text
stringlengths 733
1.02M
| score
float64 0
0.27
|
---|---|
"""Views for DwollaProvider"""
import requests
from django.conf import settings
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
from .provider import DwollaProvider
ENVIRONMENTS = {
'production': {
'auth_url': 'https://www.dwolla.com/oauth/v2/authenticate',
'token_url': 'https://www.dwolla.com/oauth/v2/token',
},
'sandbox': {
'auth_url': 'https://uat.dwolla.com/oauth/v2/authenticate',
'token_url': 'https://uat.dwolla.com/oauth/v2/token',
}
}
ENV = getattr(settings, 'SOCIALACCOUNT_PROVIDERS', {}).get(
'dwolla', {}).get('ENVIROMENT', 'production')
AUTH_URL = ENVIRONMENTS[ENV]['auth_url']
TOKEN_URL = ENVIRONMENTS[ENV]['token_url']
class DwollaOAuth2Adapter(OAuth2Adapter):
"""Dwolla Views Adapter"""
scope_delimiter = '|'
provider_id = DwollaProvider.id
access_token_url = TOKEN_URL
authorize_url = AUTH_URL
def complete_login(self, request, app, token, response, **kwargs):
resp = requests.get(
response['_links']['account']['href'],
headers={
'authorization': 'Bearer %s' % token.token,
'accept': 'application/vnd.dwolla.v1.hal+json',
},
)
extra_data = resp.json()
return self.get_provider().sociallogin_from_response(
request,
extra_data
)
oauth2_login = OAuth2LoginView.adapter_view(DwollaOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(DwollaOAuth2Adapter)
| 0 |
"""
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
"""Tests the DBSCAN algorithm with a similarity array."""
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
"""Tests the DBSCAN algorithm with a feature vector array."""
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
"""Tests the DBSCAN algorithm with a callable metric."""
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
"""Tests the DBSCAN algorithm with balltree for neighbor calculation."""
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
"""DBSCAN.fit should accept a list of lists."""
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
"""Test bad argument values: these should all raise ValueErrors"""
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
| 0 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v7.services',
marshal='google.ads.googleads.v7',
manifest={
'GetDynamicSearchAdsSearchTermViewRequest',
},
)
class GetDynamicSearchAdsSearchTermViewRequest(proto.Message):
r"""Request message for
[DynamicSearchAdsSearchTermViewService.GetDynamicSearchAdsSearchTermView][google.ads.googleads.v7.services.DynamicSearchAdsSearchTermViewService.GetDynamicSearchAdsSearchTermView].
Attributes:
resource_name (str):
Required. The resource name of the dynamic
search ads search term view to fetch.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| 0 |
'''
Created on Dec 11, 2013
:author: jzupka, astepano
:contact: Andrei Stepanov <astepano@redhat.com>
'''
import copy
class MessengerError(Exception):
"""
Represented error in messanger.
"""
def __init__(self, msg):
super(MessengerError, self).__init__(msg)
self.msg = msg
def __str__(self):
return "Messenger ERROR %s" % (self.msg)
class CommanderError(MessengerError):
"""
Represent error in Commnader
"""
def __init__(self, msg):
super(CommanderError, self).__init__(msg)
def __str__(self):
return "Commander ERROR %s" % (self.msg)
class CmdTraceBack(Exception):
"""
Represent back-trace used for error tracing on remote side.
"""
def __init__(self, msg):
super(CmdTraceBack, self).__init__(msg)
self.msg = msg
def __str__(self):
return "Cmd ERROR %s" % (self.msg)
class CmdMessage(object):
"""
Base cmd message class
"""
__slots__ = ["cmd_id"]
def __init__(self, cmd_id):
self.cmd_id = cmd_id
def __getstate__(self):
return (self.cmd_id)
def __setstate__(self, state):
self.cmd_id = state[0]
def isCmdMsg(self):
return self.cmd_id is not None
def __eq__(self, other):
return self.cmd_id == other.cmd_id
class StdStream(CmdMessage):
"""
Represent message string data from remote client
"""
__slots__ = ["msg"]
def __init__(self, msg, cmd_id=None):
super(StdStream, self).__init__(cmd_id)
self.msg = msg
def __str__(self):
return (self.msg)
def __getstate__(self):
return (self.cmd_id, self.msg)
def __setstate__(self, state):
self.cmd_id = state[0]
self.msg = state[1]
class StdOut(StdStream):
"""
Represent message from stdout string data from remote client
"""
__slots__ = ["cmd_id", "msg"]
def __init__(self, msg, cmd_id=None):
super(StdOut, self).__init__(msg, cmd_id)
def __getstate__(self):
return (self.cmd_id, self.msg)
def __setstate__(self, state):
self.cmd_id = state[0]
self.msg = state[1]
class StdErr(StdStream):
"""
Represent message from stderr string data from remote client
"""
__slots__ = ["cmd_id", "msg"]
def __init__(self, msg, cmd_id=None):
super(StdErr, self).__init__(msg, cmd_id)
def __getstate__(self):
return (self.cmd_id, self.msg)
def __setstate__(self, state):
self.cmd_id = state[0]
self.msg = state[1]
class CmdQuery(object):
"""Command-msg-request from VM to avocado-vt test.
"""
def __init__(self, *args, **kargs):
"""
Command for asking from VM to avocado-vt.
:param args: Something pickable. Is irrelevant for messenger.
:param kargs: Something pickable. Is irrelevant for messenger.
"""
self.args = copy.deepcopy(args)
self.kargs = copy.deepcopy(kargs)
class CmdRespond(object):
"""Command-msg-answer from avocado-test to VM.
"""
def __init__(self, respond):
"""
Command for answering avocado-vt to VM.
:param respond: Something pickable. Is irrelevant for messenger.
"""
self.respond = respond # Must be pickable.
class BaseCmd(CmdMessage):
"""
Class used for moveing information about commands between master and slave.
"""
__slots__ = ["func", "args", "kargs", "results", "_async", "_finished",
"nh_stdin", "nh_stdout", "nh_stderr", "cmd_hash"]
single_cmd_id = 0
def __init__(self, func_cmd, *args, **kargs):
self.cmd_id = BaseCmd.single_cmd_id
BaseCmd.single_cmd_id += 1
super(BaseCmd, self).__init__(self.cmd_id)
self.func = func_cmd
self.args = copy.deepcopy(args)
self.kargs = copy.deepcopy(kargs)
self.results = None
self._async = False
self._finished = False
self.nh_stdin = None
self.nh_stdout = None
self.nh_stderr = None
self.cmd_hash = None
def __getstate__(self):
return (self.cmd_id, self.func, self.args, self.kargs, self.results,
self._async, self._finished, self.nh_stdin, self.nh_stdout,
self.nh_stderr, self.cmd_hash)
def __setstate__(self, state):
self.cmd_id = state[0]
self.func = state[1]
self.args = state[2]
self.kargs = state[3]
self.results = state[4]
self._async = state[5]
self._finished = state[6]
self.nh_stdin = state[7]
self.nh_stdout = state[8]
self.nh_stderr = state[9]
self.cmd_hash = state[10]
def __str__(self):
str_args = []
for a in self.args: # Format str value in args to "val"
if type(a) is str:
str_args.append("\"%s\"" % a)
else:
str_args.append(a)
str_kargs = {}
for key, val in self.kargs: # Format str value in kargs to "val"
if type(val) is str:
str_kargs[key] = "\"%s\"" % val
else:
str_kargs[key] = val
return ("base_cmd: %s(%s)" % (".".join(self.func),
", ".join(str_args) +
",".join(list(str_kargs.items()))))
def is_async(self):
"""
:return: True if command is async else False
"""
return self._async
def is_finished(self):
"""
:return: True if command is finished else False
"""
return self._finished
def update(self, basecmd):
"""
Sync local class with class moved over the messanger.
:param basecmd: basecmd from which should be sync data to this instance
:type basecmd: BaseCmd
"""
self.results = basecmd.results
self._finished = basecmd._finished
self._async = basecmd._async
def update_cmd_hash(self, basecmd):
if basecmd.cmd_hash is not None:
self.cmd_hash = basecmd.cmd_hash
| 0 |
"""
TODO:
- Reimplement NODIR
- Option to merge path elements into one directory while file count
is below treshold.
"""
import time, os, sys
import re
import Params
import Runtime
import Rules
from util import *
import log
mainlog = log.get_log('main')
def load_backend_type(tp):
assert '.' in tp, "Specify backend as <module>.<backend-type>"
p = tp.split( '.' )
path, name = '.'.join( p[:-1] ), p[-1]
mod = __import__( path, locals(), globals(), [] )
return getattr( mod, name )
def suffix_ext(path, suffix):
x = re.match('.*\.([a-zA-Z0-9]+)$', path)
if x:
p = x.start(1)
path = path[:p-1] + suffix + path[p-1:]
else:
path += suffix
return path
class File(object):
"""
Simple cache that stores at path/filename taken from URL.
The PARTIAL suffix (see Params) is used for partial downloads.
Parameters ARCHIVE and ENCODE_PATHSEP also affect the storage location.
ARCHIVE is applied after ENCODE_PATHSEP.
"""
def __init__(self, path=None):
"""
The path is an `wget -r` path. Meaning it has the parts:
host/path?query. The cache implementation will determine the final
local path name.
"""
super( File,self).__init__()
self.partial = None
self.full = None
self.fp = None
if path:
self.init(path)
def init(self, path):
assert Params.PARTIAL not in path
assert not path.startswith(os.sep), \
"File.init: saving in other roots not supported,"\
" only paths relative to Runtime.ROOT allowed."
# encode query and/or fragment parts
sep = min_pos(path.find('#'), path.find( '?' ))
# optional removal of directories in entire path
psep = Runtime.ENCODE_PATHSEP
if psep:
path = path.replace( '/', psep)
else:
# partial pathsep encode
if sep != -1:
path = path[ :sep ] + path[ sep: ].replace( '/', psep)
# make archive path
if Runtime.ARCHIVE:
path = time.strftime( Runtime.ARCHIVE, time.gmtime() ) + path
assert Runtime.PARTIAL not in path
# add default part
if path[-1] == os.sep:
path += Params.DEFAULT
self.path = path
mainlog.debug('%s: init %s',self, path)
self.fp = None
assert len(self.abspath()) < 255, \
"LBYL, cache location path to long for Cache.File! "
self.stat()
def full_path(self):
assert Runtime.ROOT
assert not self.path.startswith(Runtime.ROOT)
return os.path.join( Runtime.ROOT, self.path )
def partial_path(self):
assert Runtime.PARTIAL
assert not self.path.endswith(Runtime.PARTIAL)
return suffix_ext( self.full_path(), Runtime.PARTIAL )
def stat(self):
assert Runtime.PARTIAL not in self.path
abspath = self.full_path()
partial = self.partial_path()
if os.path.isfile( partial ):
self.full = False
self.partial = os.stat( partial )
elif os.path.isfile( abspath ):
self.full = os.stat( abspath )
self.partial = False
return self.partial or self.full
def abspath(self):
assert Runtime.PARTIAL not in self.path, self.path
if not (self.partial or self.full):
self.stat()
if self.full:
return self.full_path()
else:
return self.partial_path()
@property
def size(self):
stat = ( self.partial or self.full )
if stat:
return stat.st_size
@property
def mtime(self):
stat = ( self.partial or self.full )
if stat:
return stat.st_mtime
def utime(self, mtime):
os.utime( self.abspath(), ( mtime, mtime ) )
self.stat()
def open_new(self):
assert not self.fp
mainlog.note('%s: Preparing new file in cache', self)
new_file = self.abspath()
tdir = os.path.dirname( new_file )
if not os.path.exists( tdir ):
os.makedirs( tdir )
try:
self.fp = open( new_file, 'w+' )
except Exception, e:
mainlog.note('%s: Failed to open file: %s',self, e)
self.fp = os.tmpfile()
def open_partial(self, offset=-1):
assert not self.fp
self.fp = open( self.abspath(), 'a+' )
if offset >= 0:
assert offset <= self.tell(), 'range does not match file in cache'
self.fp.seek( offset )
self.fp.truncate()
mainlog.info('%s: Resuming partial file in cache at byte %s',self, self.tell())
def open_full(self):
assert not self.fp
self.fp = open( self.abspath(), 'r' )
# self.size = self.tell()
def open(self):
if self.full:
self.open_full()
elif self.partial:
self.open_partial()
else:
self.open_new()
def remove_full(self):
os.remove( self.abspath() )
mainlog.note('%s: Removed complete file from cache', self)
def remove_partial(self):
mainlog.note('%s: Removed partial file from cache', self)
os.remove( self.abspath() + Runtime.PARTIAL )
def read(self, pos, size):
self.fp.seek( pos )
return self.fp.read( size )
def write(self, chunk):
self.fp.seek( 0, 2 )
return self.fp.write( chunk )
def tell(self):
self.fp.seek( 0, 2 )
return self.fp.tell()
def close(self):
assert self.fp
self.fp.close()
self.fp = None
self.partial, self.full = None, None
mainlog.debug("%s: Closed %s",self, self.path)
# def __nonzero__(self):
# return ( self.complete() or self.partial ) != None
def __del__(self):
if self.fp:
try:
self.close()
mainlog.warn("%s: Forced close", self)
except Exception, e:
mainlog.warn("%s: Error on closing cache file: %s",self, e)
| 0.047297 |
"""
Support for displaying the current CPU speed.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.cpuspeed/
"""
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['py-cpuinfo==3.3.0']
_LOGGER = logging.getLogger(__name__)
ATTR_BRAND = 'Brand'
ATTR_HZ = 'GHz Advertised'
ATTR_ARCH = 'arch'
DEFAULT_NAME = 'CPU speed'
ICON = 'mdi:pulse'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
# pylint: disable=unused-variable
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the CPU speed sensor."""
name = config.get(CONF_NAME)
add_devices([CpuSpeedSensor(name)], True)
class CpuSpeedSensor(Entity):
"""Representation of a CPU sensor."""
def __init__(self, name):
"""Initialize the sensor."""
self._name = name
self._state = None
self.info = None
self._unit_of_measurement = 'GHz'
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self.info is not None:
return {
ATTR_ARCH: self.info['arch'],
ATTR_BRAND: self.info['brand'],
ATTR_HZ: round(self.info['hz_advertised_raw'][0]/10**9, 2)
}
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
def update(self):
"""Get the latest data and updates the state."""
from cpuinfo import cpuinfo
self.info = cpuinfo.get_cpu_info()
self._state = round(float(self.info['hz_actual_raw'][0])/10**9, 2)
| 0 |
## A script for finding every cox coefficient and pvalue for every CESC lncRNA in the beta MiTranscriptome data set (normalized counts)
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
## There were two clinical files with nonredundant data. V4.0 was found to be most up to date.
## Both files are loaded with the more up to date file getting preference
f=open(os.path.join(BASE_DIR,'tcga_data','CESC','clinical','nationwidechildrens.org_clinical_follow_up_v4.0_cesc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
f=open(os.path.join(BASE_DIR,'tcga_data','CESC','clinical','nationwidechildrens.org_clinical_follow_up_v2.0_cesc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical2=[['','','']]
for i in data:
if i[patient_column] not in [j[0] for j in clinical]:
if clinical2[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical2[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical2[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical2.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical2.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
##merging data and removing the empty value
clinical+=clinical2[1:]
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
grade_dict['G4']=4
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
clinical4=[]
f=open(os.path.join(BASE_DIR,'tcga_data','CESC','clinical','nationwidechildrens.org_clinical_patient_cesc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
grade_column=columns.index('tumor_grade')
sex_column=columns.index('gender')
age_column=columns.index('age_at_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
##In a separate script I parsed the mitranscriptome.expr.counts.tsv file and extracted the CESC patient and expression values.
##From this file I will load the expression data.
##There are duplicated transcripts and the possibility of a patient having multiple sequencing files.
##create a dictionary to check for duplicated data
lncrna_dict={}
##I have the list of transcripts saved in a file
f=open(os.path.join(BASE_DIR,'lncrna','transcripts.txt'))
transcripts=eval(f.read())
f=open(os.path.join(BASE_DIR,'tcga_data','CESC','lncrna','CESC.txt'))
##patient list is at the top of the file
patients=f.readline().strip().split()
lncrnas=[[]]*len(patients)
for i,j in zip(transcripts,f):
if i not in lncrna_dict:
data=eval(j.strip())
for index, k in enumerate(data):
lncrnas[index]=lncrnas[index]+[[i,float(k)]]
lncrna_dict[i]=''
##create a dictionary mapping patient to all of their lncrna expression data
patient_dict={}
for index, i in enumerate(patients):
patient_dict[i[:12]]=patient_dict.get(i[:12],[])+[lncrnas[index]]
##find which patients have complete clinical data, order the data, and average data if necessary
##it's possible there are expression data for patients without clinical data, and clinical data without expression data
##create a new clinical list called clinical_and_files for consistency with previous scripts
clinical_and_files=[]
for i in final_clinical:
if i[0] in patient_dict:
clinical_and_files.append(i)
ordered_lncrnas=[]
for i in clinical_and_files:
temp=[]
for j in patient_dict[i[0]]:
temp.append(j)
if len(temp)==1:
ordered_lncrnas.append(temp[0])
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
ordered_lncrnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want lncras that meet an expression cutoff
## It is not known what expression level of lncrnas is needed for function, so a soft value for median was chosen.
## I don't want to perform an analysis with all 0 expression however, so zeros are still counted.
## A cutoff of .1 and no more than a fourth of the patients containing no expression was chosen
final_lncrnas=[[]]*len(ordered_lncrnas)
for i in range(len(ordered_lncrnas[0])):
temp=[]
for j in ordered_lncrnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(ordered_lncrnas)/4.0 and median>.1:
for index, kk in enumerate(temp):
final_lncrnas[index]=final_lncrnas[index]+[kk]
## This will write the final lncrnas to a medium sized file ~10-50MB which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'lncrna','cox','CESC','final_lncrnas.txt'),'w')
for i in final_lncrnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the lncrnas in final_lncrnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
lncrnas=[] ##This list tracks the lncrna names
for i in range(len(final_lncrnas[0])):
kaplan=[]
lncrnas.append(final_lncrnas[0][i][0])
for k,j in zip(clinical_and_files,final_lncrnas): ## These lists contain the clinical information and lncrna data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the lncrna values for the current lncrna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['lncrna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade1
grade1=[]
for ii in kaplan:
if ii[2]==1:
grade1.append(1)
else:
grade1.append(0)
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
##grade4
grade4=[]
for ii in kaplan:
if ii[2]==4:
grade4.append(1)
else:
grade4.append(0)
ro.globalenv['grade1']=ro.IntVector(grade1)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['grade4']=ro.IntVector(grade4)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ lncrna + grade1 + grade2 + grade3 + grade4 + age)') ## Perform Cox regression
# Parse the string of the result with python for the lncrna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='lncrna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with lncrna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'lncrna','cox','CESC','coeffs_pvalues.txt'),'w')
for i,j,k in zip(lncrnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| 0.024546 |
# Copyright 2017 AT&T Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
https://developer.openstack.org/api-ref/identity/v3-ext/#os-ep-filter-api
"""
from oslo_serialization import jsonutils as json
from tempest.lib.common import rest_client
class EndPointsFilterClient(rest_client.RestClient):
api_version = "v3"
ep_filter = "OS-EP-FILTER"
def list_projects_for_endpoint(self, endpoint_id):
"""List all projects that are associated with the endpoint."""
resp, body = self.get(self.ep_filter + '/endpoints/%s/projects' %
endpoint_id)
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def add_endpoint_to_project(self, project_id, endpoint_id):
"""Add association between project and endpoint. """
body = None
resp, body = self.put(
self.ep_filter + '/projects/%s/endpoints/%s' %
(project_id, endpoint_id), body)
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp, body)
def check_endpoint_in_project(self, project_id, endpoint_id):
"""Check association of Project with Endpoint."""
resp, body = self.head(
self.ep_filter + '/projects/%s/endpoints/%s' %
(project_id, endpoint_id), None)
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp, body)
def list_endpoints_in_project(self, project_id):
"""List Endpoints associated with Project."""
resp, body = self.get(self.ep_filter + '/projects/%s/endpoints'
% project_id)
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def delete_endpoint_from_project(self, project_id, endpoint_id):
"""Delete association between project and endpoint."""
resp, body = self.delete(
self.ep_filter + '/projects/%s/endpoints/%s'
% (project_id, endpoint_id))
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp, body)
| 0 |
#!/usr/bin/env python
"""
Written by Dejanira Araiza-Illan July-August 2016
"""
import sys
import os
import rospy
import actionlib
import smach
import smach_ros
import math
import time
from control_msgs.msg import FollowJointTrajectoryAction, FollowJointTrajectoryGoal
from gazebo_msgs.srv import SetModelState
from gazebo_msgs.srv import GetModelState
from gazebo_msgs.msg import ModelState
from geometry_msgs.msg import Pose
from geometry_msgs.msg import Point
from geometry_msgs.msg import Quaternion
from geometry_msgs.msg import Twist
from geometry_msgs.msg import Vector3
from play_motion_msgs.msg import PlayMotionAction, PlayMotionGoal
from sensor_msgs.msg import JointState
from std_msgs.msg import String
from std_msgs.msg import Int8
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
client = actionlib.SimpleActionClient("/play_motion", PlayMotionAction)
order='none'
stop = 0
## MOTIONS ##
def moveuntilreached(x,y):
getmodel = rospy.ServiceProxy('/gazebo/get_model_state', GetModelState)
data = getmodel('tiago_steel','')
pub = rospy.Publisher('/mobile_base_controller/cmd_vel', Twist, queue_size=1,latch=True)
phi = math.atan2(2*(data.pose.orientation.w*data.pose.orientation.z + data.pose.orientation.x*data.pose.orientation.y), 1 - 2*(math.pow(data.pose.orientation.y,2)+math.pow(data.pose.orientation.z,2)))
while abs(y - data.pose.position.y) > 0.02 and abs(x - data.pose.position.x) > 0.1:
#Correct angle
while abs(math.atan2(y - data.pose.position.y,x - data.pose.position.x) - phi) > 0.1 :
if math.atan2(y - data.pose.position.y,x - data.pose.position.x) > 0 and phi > 0:
if math.atan2(y - data.pose.position.y,x - data.pose.position.x) > phi:
pub.publish(Vector3(0.0,0.0,0.0),Vector3(0.0,0.0,0.5))
rospy.sleep(0.25)
else:
pub.publish(Vector3(0.0,0.0,0.0),Vector3(0.0,0.0,-0.5))
rospy.sleep(0.25)
elif math.atan2(y - data.pose.position.y,x - data.pose.position.x) < 0 and phi < 0:
if abs(math.atan2(y - data.pose.position.y,x - data.pose.position.x)) > abs(phi):
pub.publish(Vector3(0.0,0.0,0.0),Vector3(0.0,0.0,-0.5))
rospy.sleep(0.25)
else:
pub.publish(Vector3(0.0,0.0,0.0),Vector3(0.0,0.0,0.5))
rospy.sleep(0.25)
elif math.atan2(y - data.pose.position.y,x - data.pose.position.x) > 0 and phi < 0:
if abs(math.atan2(y - data.pose.position.y,x - data.pose.position.x)) > 1.5 and abs(phi) > 1.5:
pub.publish(Vector3(0.0,0.0,0.0),Vector3(0.0,0.0,-0.5))
rospy.sleep(0.25)
elif abs(math.atan2(y - data.pose.position.y,x - data.pose.position.x)) < 1.5 and abs(phi) < 1.5:
pub.publish(Vector3(0.0,0.0,0.0),Vector3(0.0,0.0,0.5))
rospy.sleep(0.25)
else:
pub.publish(Vector3(0.0,0.0,0.0),Vector3(0.0,0.0,0.5))
rospy.sleep(0.25)
else:
if abs(math.atan2(y - data.pose.position.y,x - data.pose.position.x)) > 1.5 and abs(phi) > 1.5:
pub.publish(Vector3(0.0,0.0,0.0),Vector3(0.0,0.0,0.5))
rospy.sleep(0.25)
elif abs(math.atan2(y - data.pose.position.y,x - data.pose.position.x)) < 1.5 and abs(phi) < 1.5:
pub.publish(Vector3(0.0,0.0,0.0),Vector3(0.0,0.0,-0.5))
rospy.sleep(0.25)
else:
pub.publish(Vector3(0.0,0.0,0.0),Vector3(0.0,0.0,0.5))
rospy.sleep(0.25)
getmodel = rospy.ServiceProxy('/gazebo/get_model_state', GetModelState)
data = getmodel('tiago_steel','')
phi = math.atan2(2*(data.pose.orientation.w*data.pose.orientation.z + data.pose.orientation.x*data.pose.orientation.y), 1 - 2*(math.pow(data.pose.orientation.y,2)+math.pow(data.pose.orientation.z,2)))
#Then advance for a bit
for i in range(0,1):
pub.publish(Vector3(0.25,0.0,0.0),Vector3(0.0,0.0,0.0))
rospy.sleep(0.1)
#Check for collisions
rospy.Subscriber("collision_sensor", Int8, stopnow)
while stop != 0:
rospy.Subscriber("collision_sensor", Int8, stopnow)
rospy.sleep(0.1)
#Recheck if angle is not good anymore and fix
getmodel = rospy.ServiceProxy('/gazebo/get_model_state', GetModelState)
data = getmodel('tiago_steel','')
def stopnow(data):
global stop
if data.data == 1:
stop = 1
else:
stop = 0
##---------------------------------
class Recharge(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['outcome1'])
def execute(self, userdata):
getmodel = rospy.ServiceProxy('/gazebo/get_model_state', GetModelState)
data = getmodel('tiago_steel','')
#If in human
if abs(data.pose.position.x + 2.4) < 0.15 and abs(data.pose.position.y + 3.95) < 0.15:
#Move to centre of room
moveuntilreached(-0.25,-0.5)
#Continue to recharge
moveuntilreached(1.35,0.015)
else:
moveuntilreached(1.35,0.015)
return 'outcome1'
##----------------------------------
class Cooker(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['outcome1'])
def execute(self, userdata):
getmodel = rospy.ServiceProxy('/gazebo/get_model_state', GetModelState)
data = getmodel('tiago_steel','')
return 'outcome1'
## ---------------------------------
class Fridge(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['outcome1'])
def execute(self, userdata):
getmodel = rospy.ServiceProxy('/gazebo/get_model_state', GetModelState)
data = getmodel('tiago_steel','')
pub = rospy.Publisher('/mobile_base_controller/cmd_vel', Twist, queue_size=1,latch=True)
phi = math.atan2(2*(data.pose.orientation.w*data.pose.orientation.z + data.pose.orientation.x*data.pose.orientation.y), 1 - 2*(math.pow(data.pose.orientation.y,2)+math.pow(data.pose.orientation.z,2)))
#If in human
if abs(data.pose.position.x + 2.4) < 0.15 and abs(data.pose.position.y + 3.95) < 0.15:
#Move to centre of room
moveuntilreached(-0.25,-0.5)
#Continue towards fridge
moveuntilreached(-2.8,0.405)
else:
moveuntilreached(-2.8,0.405)
return 'outcome1'
## -----------------------------
class Sink(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['outcome1'])
def execute(self, userdata):
getmodel = rospy.ServiceProxy('/gazebo/get_model_state', GetModelState)
data = getmodel('tiago_steel','')
pub = rospy.Publisher('/mobile_base_controller/cmd_vel', Twist, queue_size=1,latch=True)
phi = math.atan2(2*(data.pose.orientation.w*data.pose.orientation.z + data.pose.orientation.x*data.pose.orientation.y), 1 - 2*(math.pow(data.pose.orientation.y,2)+math.pow(data.pose.orientation.z,2)))
#If in human
if abs(data.pose.position.x + 2.4) < 0.15 and abs(data.pose.position.y + 3.95) < 0.15:
#Move to centre of room
moveuntilreached(-0.25,-0.5)
#Continue towards sink
moveuntilreached(-2.03,3.055)
else:
moveuntilreached(-2.03,3.055)
pubfood = rospy.Publisher('food', Int8, queue_size=1,latch=True)
pubfood.publish(0)
rospy.sleep(0.1)
return 'outcome1'
## ------------------------------------
class Human(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['outcome1'])
def execute(self, userdata):
getmodel = rospy.ServiceProxy('/gazebo/get_model_state', GetModelState)
data = getmodel('tiago_steel','')
pub = rospy.Publisher('/mobile_base_controller/cmd_vel', Twist, queue_size=1,latch=True)
phi = math.atan2(2*(data.pose.orientation.w*data.pose.orientation.z + data.pose.orientation.x*data.pose.orientation.y), 1 - 2*(math.pow(data.pose.orientation.y,2)+math.pow(data.pose.orientation.z,2)))
#Move to centre first
moveuntilreached(-0.25,-0.5)
#Approach table now
moveuntilreached(-2.4,-3.95)
pubfood = rospy.Publisher('food', Int8, queue_size=1,latch=True)
pubfood.publish(0)
rospy.sleep(0.1)
return 'outcome1'
## -------------------------------
class Grab(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['outcome1'])
def execute(self, userdata):
goal = PlayMotionGoal()
goal.motion_name = 'close_hand'
goal.skip_planning = True
global client
client.send_goal(goal)
client.wait_for_result(rospy.Duration(3.0))
rospy.loginfo("grabbed")
pubfood = rospy.Publisher('food', Int8, queue_size=1,latch=True)
pubfood.publish(1)
rospy.sleep(0.1)
return 'outcome1'
## -------------------------------------
class Drop(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['outcome1'])
def execute(self, userdata):
goal = PlayMotionGoal()
goal.motion_name = 'open_hand'
goal.skip_planning = True
global client
client.send_goal(goal)
client.wait_for_result(rospy.Duration(3.0))
rospy.loginfo("dropped")
return 'outcome1'
## -----------------------------------
class Tuck(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['outcome1'])
def execute(self, userdata):
goal = PlayMotionGoal()
goal.motion_name = 'home'
goal.skip_planning = True
global client
client.send_goal(goal)
client.wait_for_result(rospy.Duration(10.0))
rospy.loginfo("Arm tucked.")
return 'outcome1'
## MAIN ##
def getgoal(data):
global order
order = data.data
print order
def main():
rospy.init_node('robot_navigation', anonymous=True) #Start node first
rospy.loginfo("Waiting for motion servers...")
#Variables
global client
global order
#Start connections
client.wait_for_server()
rospy.loginfo("...connected.")
rospy.wait_for_message("/joint_states", JointState)
rospy.sleep(3)
rospy.loginfo("Robot is ready")
thetimestart = 1000000000000000
#Listen to human and send orders to low level navigation control
while not rospy.is_shutdown():
rospy.Subscriber('orders',String,getgoal)
rospy.sleep(0.1)
rospy.loginfo(order)
if order == 'feed':
sm = smach.StateMachine(outcomes=['nextOrder'])
with sm:
#Move to fridge
smach.StateMachine.add('Fridge', Fridge(), transitions={'outcome1':'Grabfood'})
#Grab food from fridge
smach.StateMachine.add('Grabfood', Grab(), transitions={'outcome1':'Human'})
#Take it to person
smach.StateMachine.add('Human', Human(), transitions={'outcome1':'Recharge'})
#Go to recharge
smach.StateMachine.add('Recharge', Recharge(), transitions={'outcome1':'nextOrder'})
outcome = sm.execute()
pubdone = rospy.Publisher('done', Int8, queue_size=1,latch=True)
pubdone.publish(1)
rospy.sleep(0.5)
pubdone.publish(0)
thetimestart = time.time()
elif order == 'clean':
sm = smach.StateMachine(outcomes=['nextOrder'])
with sm:
#Move to person
smach.StateMachine.add('Human', Human(), transitions={'outcome1':'Grabfood'})
#Grab food from table
smach.StateMachine.add('Grabfood', Grab(), transitions={'outcome1':'Sink'})
#Move to sink
smach.StateMachine.add('Sink', Sink(), transitions={'outcome1':'Dropfood'})
#Drop into sink
smach.StateMachine.add('Dropfood', Drop(), transitions={'outcome1':'Recharge'})
#Go to recharge
smach.StateMachine.add('Recharge', Recharge(), transitions={'outcome1':'nextOrder'})
outcome = sm.execute()
pubdone = rospy.Publisher('done', Int8, queue_size=1,latch=True)
pubdone.publish(1)
rospy.sleep(0.5)
pubdone.publish(0)
thetimestart = time.time()
elif order == 'fridge':
sm = smach.StateMachine(outcomes=['nextOrder'])
with sm:
#Move to fridge
smach.StateMachine.add('Fridge', Fridge(), transitions={'outcome1':'Recharge'})
#Go to recharge
smach.StateMachine.add('Recharge', Recharge(), transitions={'outcome1':'nextOrder'})
outcome = sm.execute()
pubdone = rospy.Publisher('done', Int8, queue_size=1,latch=True)
pubdone.publish(1)
rospy.sleep(0.5)
pubdone.publish(0)
thetimestart = time.time()
elif order == 'sink':
sm = smach.StateMachine(outcomes=['nextOrder'])
with sm:
#Move to sink
smach.StateMachine.add('Sink', Sink(), transitions={'outcome1':'Recharge'})
#Go to recharge
smach.StateMachine.add('Recharge', Recharge(), transitions={'outcome1':'nextOrder'})
outcome = sm.execute()
pubdone = rospy.Publisher('done', Int8, queue_size=1,latch=True)
pubdone.publish(1)
rospy.sleep(0.5)
pubdone.publish(0)
thetimestart = time.time()
elif order == 'ended':
rospy.loginfo('ended')
break
else:
rospy.loginfo('I cannot do this')
#Check if idle for a time, then send to recharge
if time.time()-thetimestart > 30:
order = 'recharge'
pubdone = rospy.Publisher('idle', Int8, queue_size=1,latch=True)
pubdone.publish(1)
rospy.sleep(0.5)
pubdone.publish(0)
rospy.loginfo('idle')
sm = smach.StateMachine(outcomes=['nextOrder'])
with sm:
#Go to recharge
smach.StateMachine.add('Recharge', Recharge(), transitions={'outcome1':'nextOrder'})
outcome = sm.execute()
pubdone = rospy.Publisher('done', Int8, queue_size=1,latch=True)
pubdone.publish(1)
rospy.sleep(0.5)
pubdone.publish(0)
thetimestart = time.time()
#---------------------------------------------------------------------------------------------------
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException: #to stop the code when pressing Ctr+c
pass
| 0.045858 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import six
from pinotdb import connect
from airflow.hooks.dbapi_hook import DbApiHook
class PinotDbApiHook(DbApiHook):
"""
Connect to pinot db(https://github.com/linkedin/pinot) to issue pql
"""
conn_name_attr = 'pinot_broker_conn_id'
default_conn_name = 'pinot_broker_default'
supports_autocommit = False
def __init__(self, *args, **kwargs):
super(PinotDbApiHook, self).__init__(*args, **kwargs)
def get_conn(self):
"""
Establish a connection to pinot broker through pinot dbqpi.
"""
conn = self.get_connection(self.pinot_broker_conn_id)
pinot_broker_conn = connect(
host=conn.host,
port=conn.port,
path=conn.extra_dejson.get('endpoint', '/pql'),
scheme=conn.extra_dejson.get('schema', 'http')
)
self.log.info('Get the connection to pinot '
'broker on {host}'.format(host=conn.host))
return pinot_broker_conn
def get_uri(self):
"""
Get the connection uri for pinot broker.
e.g: http://localhost:9000/pql
"""
conn = self.get_connection(getattr(self, self.conn_name_attr))
host = conn.host
if conn.port is not None:
host += ':{port}'.format(port=conn.port)
conn_type = 'http' if not conn.conn_type else conn.conn_type
endpoint = conn.extra_dejson.get('endpoint', 'pql')
return '{conn_type}://{host}/{endpoint}'.format(
conn_type=conn_type, host=host, endpoint=endpoint)
def get_records(self, sql):
"""
Executes the sql and returns a set of records.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str
"""
if six.PY2:
sql = sql.encode('utf-8')
with self.get_conn() as cur:
cur.execute(sql)
return cur.fetchall()
def get_first(self, sql):
"""
Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
"""
if six.PY2:
sql = sql.encode('utf-8')
with self.get_conn() as cur:
cur.execute(sql)
return cur.fetchone()
def set_autocommit(self, conn, autocommit):
raise NotImplementedError()
def get_pandas_df(self, sql, parameters=None):
raise NotImplementedError()
def insert_rows(self, table, rows, target_fields=None, commit_every=1000):
raise NotImplementedError()
| 0 |
"""
Django settings for bookmarks project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'jt358%r8dihus8-1=mr7!rww6lejg*t0%t#cq@@s2vvid5*z7^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'bookmarks.urls'
WSGI_APPLICATION = 'bookmarks.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_PATH = os.path.abspath(os.path.join(BASE_DIR, 'static'))
STATICFILES_DIRS = (
STATIC_PATH,
)
TEMPLATE_PATH = os.path.abspath(os.path.join(BASE_DIR, 'templates'))
TEMPLATE_DIRS = (
TEMPLATE_PATH,
)
| 0 |
from twilio.rest.resources import NextGenInstanceResource, NextGenListResource
class Member(NextGenInstanceResource):
def update(self, role_sid, **kwargs):
"""
Updates the Member instance identified by sid
:param sid: Member instance identifier
:param role_sid: Member's Role Sid
:param identity: Member's Identity
:return: Updated instance
"""
kwargs['role_sid'] = role_sid
return self.update_instance(**kwargs)
def delete(self):
"""
Delete this member
"""
return self.delete_instance()
class Members(NextGenListResource):
name = "Members"
instance = Member
def list(self, **kwargs):
"""
Returns a page of :class:`Member` resources as a list.
For paging information see :class:`ListResource`.
**NOTE**: Due to the potentially voluminous amount of data in an
alert, the full HTTP request and response data is only returned
in the Member instance resource representation.
"""
return self.get_instances(kwargs)
def create(self, identity, **kwargs):
"""
Create a Member.
:param str identity: The identity of the user.
:param str role: The role to assign the member.
:return: A :class:`Member` object
"""
kwargs["identity"] = identity
return self.create_instance(kwargs)
def delete(self, sid):
"""
Delete a given Member
"""
return self.delete_instance(sid)
def update(self, sid, role_sid, **kwargs):
"""
Updates the Member instance identified by sid
:param sid: Member instance identifier
:param role_sid: Member's Role Sid
:param identity: Member's Identity
:return: Updated instance
"""
kwargs['role_sid'] = role_sid
return self.update_instance(sid, kwargs)
| 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import cv2
import numpy as np
from util import load_gray
from util import imshow
class Elem(object):
def __init__(self, img, box, spcount, cls=-1):
self._img = img
self._box = box
self._cls = cls
self._prob = None
self._sc = spcount
@property
def img(self):
return self._img
@property
def box(self):
return self._box
@property
def sc(self):
return self._sc
@property
def cls(self):
return self._cls
@cls.setter
def cls(self, val):
self._cls = val
@property
def prob(self):
return self._prob
@prob.setter
def prob(self, val):
if val is not None:
self._prob = val
class RandomSplitPredicate():
def __init__(self, p=.6):
self._p = p
def run(self, elem):
p = self._p / (elem.sc + 1)**0.5
return np.random.randn() >= (1-p)
class SplitPredicate():
def __init__(self, pred, thmax=0.85, thmin=0.5, alpha=2):
self._pred = pred
self._thmax = thmax
self._thmin = thmin
self._alpha = alpha
def run(self, elem):
pred, prob = self._pred.predict(elem.img)
need_split = False
if pred == -1:
elem.cls = pred
elem.prob = 1
else:
prob = prob[0][pred]
elem.prob = prob
if prob >= self.__comp_thresh(elem.sc):
elem.cls = pred
elem.prob = prob
else:
need_split = True
return need_split
def __comp_thresh(self, sc):
D = self._thmax - self._thmin
y = D * np.exp(-1 * self._alpha * sc) + self._thmin
return y
class DoSplit(object):
def __init__(self):
self._count = 0
def run(self, elem):
img, offset, sc = elem.img, elem.box[0], elem.sc
M, N = img.shape[:2]
blocks, i = [], 0
for c1 in range(2):
j = 0
for c2 in range(2):
timg = img[i:i+M/2, j:j+N/2]
box = ((offset[0] + i, offset[1] + j),
(offset[0] + i + M/2, offset[1] + j + N/2))
blocks.append(Elem(timg, box, sc+1))
j += N/2
i += M/2
self._count += 1
return blocks
@property
def count(self):
return self._count
class Split(object):
def __init__(self, predicate, msizeratio=4):
self._splitter = DoSplit()
self._splitpred = predicate
self._msr = msizeratio
self._img = []
self._blocks = []
self._min_size = []
def run(self, img):
self._img = img
self._min_size = (img.shape[0]/self._msr, img.shape[1]/self._msr)
elem = Elem(img, ((0, 0), img.shape[:2]), 0)
processlist = [elem]
regionlist = []
while processlist:
elem = processlist.pop()
if not self._splitpred.run(elem):
regionlist.append(elem)
elif self.__min_size(elem):
regionlist.append(elem)
else:
processlist += self._splitter.run(elem)
self._blocks = regionlist
return self._blocks
def __min_size(self, x):
M, N = x.img.shape[:2]
return M <= self._min_size[0] and N <= self._min_size[1]
def show(self, lw=4, off=16):
colors = ((0, 255, 0), (255, 0, 0), (0, 0, 0), ())
tmp = cv2.cvtColor(self._img, cv2.COLOR_GRAY2RGB)
for bl in self._blocks:
pred, prob = bl.cls, bl.prob
p1, p2 = bl.box[0], (bl.box[1][0] - lw, bl.box[1][1] - lw)
cv2.putText(tmp, str(prob), (p1[1] + off, p1[0] + int(2.5*off)),
cv2.FONT_HERSHEY_PLAIN, 2, colors[pred], 2)
cv2.rectangle(tmp, p1[::-1], p2[::-1], colors[pred], lw)
imshow(tmp)
if __name__ == '__main__':
from util import Crop
img = load_gray('test/test4.jpg')
crop = Crop()
img = crop.run(img)
split = Split(RandomSplitPredicate())
split.run(img)
split.show()
| 0 |
"""
This code was generated by Codezu.
Changes to this file may cause incorrect behavior and will be lost if
the code is regenerated.
"""
from mozurestsdk.mozuclient import default as default_client
from mozurestsdk.mozuurl import MozuUrl;
from mozurestsdk.urllocation import UrlLocation
from mozurestsdk.apicontext import ApiContext;
class CustomerAttribute(object):
def __init__(self, apiContext: ApiContext = None, mozuClient = None):
self.client = mozuClient or default_client();
if (apiContext is not None):
self.client.withApiContext(apiContext);
else:
self.client.withApiContext(ApiContext());
def getAccountAttribute(self,accountId, attributeFQN, responseFields = None):
""" Retrieves the contents of an attribute associated with the specified customer account.
Args:
| accountId (int) - Unique identifier of the customer account.
| attributeFQN (string) - The fully qualified name of the attribute, which is a user defined attribute identifier.
| responseFields (string) - Use this field to include those fields which are not included by default.
Returns:
| CustomerAttribute
Raises:
| ApiException
"""
url = MozuUrl("/api/commerce/customer/accounts/{accountId}/attributes/{attributeFQN}?responseFields={responseFields}", "GET", UrlLocation.TenantPod, False);
url.formatUrl("accountId", accountId);
url.formatUrl("attributeFQN", attributeFQN);
url.formatUrl("responseFields", responseFields);
self.client.withResourceUrl(url).execute();
return self.client.result();
def getAccountAttributes(self,accountId, startIndex = None, pageSize = None, sortBy = None, filter = None, responseFields = None):
""" Retrieves the list of customer account attributes.
Args:
| accountId (int) - Unique identifier of the customer account.
| startIndex (int) -
| pageSize (int) -
| sortBy (string) -
| filter (string) -
| responseFields (string) - Use this field to include those fields which are not included by default.
Returns:
| CustomerAttributeCollection
Raises:
| ApiException
"""
url = MozuUrl("/api/commerce/customer/accounts/{accountId}/attributes?startIndex={startIndex}&pageSize={pageSize}&sortBy={sortBy}&filter={filter}&responseFields={responseFields}", "GET", UrlLocation.TenantPod, False);
url.formatUrl("accountId", accountId);
url.formatUrl("filter", filter);
url.formatUrl("pageSize", pageSize);
url.formatUrl("responseFields", responseFields);
url.formatUrl("sortBy", sortBy);
url.formatUrl("startIndex", startIndex);
self.client.withResourceUrl(url).execute();
return self.client.result();
def addAccountAttribute(self,attribute, accountId, responseFields = None):
""" Applies a defined attribute to the customer account specified in the request and assigns a value to the customer attribute.
Args:
| attribute(attribute) - Properties of an attribute associated with a customer account.
| accountId (int) - Unique identifier of the customer account.
| responseFields (string) - Use this field to include those fields which are not included by default.
Returns:
| CustomerAttribute
Raises:
| ApiException
"""
url = MozuUrl("/api/commerce/customer/accounts/{accountId}/attributes?responseFields={responseFields}", "POST", UrlLocation.TenantPod, False);
url.formatUrl("accountId", accountId);
url.formatUrl("responseFields", responseFields);
self.client.withResourceUrl(url).withBody(attribute).execute();
return self.client.result();
def updateAccountAttribute(self,attribute, accountId, attributeFQN, responseFields = None):
""" Updates one or more details of a customer account attribute.
Args:
| attribute(attribute) - Properties of an attribute associated with a customer account.
| accountId (int) - Unique identifier of the customer account.
| attributeFQN (string) - The fully qualified name of the attribute, which is a user defined attribute identifier.
| responseFields (string) - Use this field to include those fields which are not included by default.
Returns:
| CustomerAttribute
Raises:
| ApiException
"""
url = MozuUrl("/api/commerce/customer/accounts/{accountId}/attributes/{attributeFQN}?responseFields={responseFields}", "PUT", UrlLocation.TenantPod, False);
url.formatUrl("accountId", accountId);
url.formatUrl("attributeFQN", attributeFQN);
url.formatUrl("responseFields", responseFields);
self.client.withResourceUrl(url).withBody(attribute).execute();
return self.client.result();
def deleteAccountAttribute(self,accountId, attributeFQN):
""" Removes the attribute specified in the request from the customer account.
Args:
| accountId (int) - Unique identifier of the customer account.
| attributeFQN (string) - The fully qualified name of the attribute, which is a user defined attribute identifier.
Raises:
| ApiException
"""
url = MozuUrl("/api/commerce/customer/accounts/{accountId}/attributes/{attributeFQN}", "DELETE", UrlLocation.TenantPod, False);
url.formatUrl("accountId", accountId);
url.formatUrl("attributeFQN", attributeFQN);
self.client.withResourceUrl(url).execute();
| 0.047075 |
#!/usr/bin/python2
# config : utf-8
import json
from alfanous.main import TraductionSearchEngine
LANGS = {
'el': 'Greek',
'eo': 'Esperanto',
'en': 'English',
'vi': 'Vietnamese',
'ca': 'Catalan',
'it': 'Italian',
'lb': 'Luxembourgish',
'eu': 'Basque',
'ar': 'Arabic',
'bg': 'Bulgarian',
'cs': 'Czech',
'et': 'Estonian',
'gl': 'Galician',
'id': 'Indonesian',
'ru': 'Russian',
'nl': 'Dutch',
'pt': 'Portuguese',
'no': 'Norwegian',
'tr': 'Turkish',
'lv': 'Latvian',
'lt': 'Lithuanian',
'th': 'Thai',
'es': 'Spanish',
'ro': 'Romanian',
'en_GB': 'British English',
'fr': 'French',
'hy': 'Armenian',
'uk': 'Ukrainian',
'pt_BR': 'Brazilian',
'hr': 'Croatian',
'de': 'German',
'da': 'Danish',
'fa': 'Persian',
'bs': 'Bosnian',
'fi': 'Finnish',
'hu': 'Hungarian',
'ja': 'Japanese',
'he': 'Hebrew',
'ka': 'Georgian',
'zh': 'Chinese',
'kk': 'Kazakh',
'sr': 'Serbian',
'sq': 'Albanian',
'ko': 'Korean',
'sv': 'Swedish',
'mk': 'Macedonian',
'sk': 'Slovak',
'pl': 'Polish',
'ms': 'Malay',
'sl': 'Slovenian',
'sw': 'Swahili',
'sd': 'Sindhi',
'ml': 'Malayalam',
'tg': 'Tajik',
'ta': 'Tamil',
'ur': 'Urdu',
'uz': 'Uzbek',
'hi': 'Hindi',
'tt': 'Tatar',
'so': 'Somali',
'az': 'Azerbaijani',
'bn': 'Bengali',
'dv': 'Divehi',
'ha': 'Hausa',
'ug': 'Uughur'
} #languages
def update_translations_list( TSE_index = "../../indexes/extend", translations_list_file = "../../resources/configs/translations.js" ):
TSE = TraductionSearchEngine( TSE_index )
list1 = [item for item in TSE.list_values( "id" ) if item]
list2 = []
list3 = []
for id in list1:
list2.extend( [item for item in TSE.list_values( "lang", conditions = [( "id", id )] ) if item] )
list3.extend( [item for item in TSE.list_values( "author", conditions = [( "id", id )] ) if item] )
list5 = map( lambda x: LANGS[x] if LANGS.has_key( x ) else x, list2 )
D = {}
for i in range( len( list3 ) ):
D[list1[i]] = list5[i] + "-" + list3[i]
TDICT = json.dumps( D )
f = open( translations_list_file, "w" )
f.write( TDICT )
return TDICT
| 0.063645 |
# Copyright (C) 2018 Philipp Hörist <philipp AT hoerist.com>
#
# This file is part of nbxmpp.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; If not, see <http://www.gnu.org/licenses/>.
from nbxmpp.namespaces import Namespace
from nbxmpp.protocol import NodeProcessed
from nbxmpp.structs import StanzaHandler
from nbxmpp.task import iq_request_task
from nbxmpp.errors import MalformedStanzaError
from nbxmpp.modules.base import BaseModule
from nbxmpp.modules.util import raise_if_error
from nbxmpp.modules.util import finalize
from nbxmpp.modules.bookmarks.util import parse_bookmark
from nbxmpp.modules.bookmarks.util import build_conference_node
BOOKMARK_OPTIONS = {
'pubsub#notify_delete': 'true',
'pubsub#notify_retract': 'true',
'pubsub#persist_items': 'true',
'pubsub#max_items': 'max',
'pubsub#access_model': 'whitelist',
'pubsub#send_last_published_item': 'never',
}
class NativeBookmarks(BaseModule):
_depends = {
'retract': 'PubSub',
'publish': 'PubSub',
'request_items': 'PubSub',
}
def __init__(self, client):
BaseModule.__init__(self, client)
self._client = client
self.handlers = [
StanzaHandler(name='message',
callback=self._process_pubsub_bookmarks,
ns=Namespace.PUBSUB_EVENT,
priority=16),
]
def _process_pubsub_bookmarks(self, _client, _stanza, properties):
if not properties.is_pubsub_event:
return
if properties.pubsub_event.node != Namespace.BOOKMARKS_1:
return
item = properties.pubsub_event.item
if item is None:
# Retract, Deleted or Purged
return
try:
bookmark_item = parse_bookmark(item)
except MalformedStanzaError as error:
self._log.warning(error)
self._log.warning(error.stanza)
raise NodeProcessed
pubsub_event = properties.pubsub_event._replace(data=bookmark_item)
self._log.info('Received bookmark item from: %s', properties.jid)
self._log.info(bookmark_item)
properties.pubsub_event = pubsub_event
@iq_request_task
def request_bookmarks(self):
_task = yield
items = yield self.request_items(Namespace.BOOKMARKS_1)
raise_if_error(items)
bookmarks = []
for item in items:
try:
bookmark_item = parse_bookmark(item)
except MalformedStanzaError as error:
self._log.warning(error)
self._log.warning(error.stanza)
continue
bookmarks.append(bookmark_item)
for bookmark in bookmarks:
self._log.info(bookmark)
yield bookmarks
@iq_request_task
def retract_bookmark(self, bookmark_jid):
task = yield
self._log.info('Retract Bookmark: %s', bookmark_jid)
result = yield self.retract(Namespace.BOOKMARKS_1, str(bookmark_jid))
yield finalize(task, result)
@iq_request_task
def store_bookmarks(self, bookmarks):
_task = yield
self._log.info('Store Bookmarks')
for bookmark in bookmarks:
self.publish(Namespace.BOOKMARKS_1,
build_conference_node(bookmark),
id_=str(bookmark.jid),
options=BOOKMARK_OPTIONS,
force_node_options=True)
yield True
| 0 |
'''
Button
======
.. image:: images/button.jpg
:align: right
The :class:`Button` is a :class:`~kivy.uix.label.Label` with associated actions
that are triggered when the button is pressed (or released after a
click/touch). To configure the button, the same properties (padding,
font_size, etc) and
:ref:`sizing system <kivy-uix-label-sizing-and-text-content>`
are used as for the :class:`~kivy.uix.label.Label` class::
button = Button(text='Hello world', font_size=14)
To attach a callback when the button is pressed (clicked/touched), use
:class:`~kivy.uix.widget.Widget.bind`::
def callback(instance):
print('The button <%s> is being pressed' % instance.text)
btn1 = Button(text='Hello world 1')
btn1.bind(on_press=callback)
btn2 = Button(text='Hello world 2')
btn2.bind(on_press=callback)
If you want to be notified every time the button state changes, you can bind
to the :attr:`Button.state` property::
def callback(instance, value):
print('My button <%s> state is <%s>' % (instance, value))
btn1 = Button(text='Hello world 1')
btn1.bind(state=callback)
'''
__all__ = ('Button', )
from kivy.uix.label import Label
from kivy.properties import StringProperty, ListProperty
from kivy.uix.behaviors import ButtonBehavior
class Button(ButtonBehavior, Label):
'''Button class, see module documentation for more information.
.. versionchanged:: 1.8.0
The behavior / logic of the button has been moved to
:class:`~kivy.uix.behaviors.ButtonBehaviors`.
'''
background_color = ListProperty([1, 1, 1, 1])
'''Background color, in the format (r, g, b, a).
This acts as a *multiplier* to the texture colour. The default
texture is grey, so just setting the background color will give
a darker result. To set a plain color, set the
:attr:`background_normal` to ``''``.
.. versionadded:: 1.0.8
The :attr:`background_color` is a
:class:`~kivy.properties.ListProperty` and defaults to [1, 1, 1, 1].
'''
background_normal = StringProperty(
'atlas://data/images/defaulttheme/button')
'''Background image of the button used for the default graphical
representation when the button is not pressed.
.. versionadded:: 1.0.4
:attr:`background_normal` is a :class:`~kivy.properties.StringProperty`
and defaults to 'atlas://data/images/defaulttheme/button'.
'''
background_down = StringProperty(
'atlas://data/images/defaulttheme/button_pressed')
'''Background image of the button used for the default graphical
representation when the button is pressed.
.. versionadded:: 1.0.4
:attr:`background_down` is a :class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/button_pressed'.
'''
background_disabled_normal = StringProperty(
'atlas://data/images/defaulttheme/button_disabled')
'''Background image of the button used for the default graphical
representation when the button is disabled and not pressed.
.. versionadded:: 1.8.0
:attr:`background_disabled_normal` is a
:class:`~kivy.properties.StringProperty` and defaults to
'atlas://data/images/defaulttheme/button_disabled'.
'''
background_disabled_down = StringProperty(
'atlas://data/images/defaulttheme/button_disabled_pressed')
'''Background image of the button used for the default graphical
representation when the button is disabled and pressed.
.. versionadded:: 1.8.0
:attr:`background_disabled_down` is a
:class:`~kivy.properties.StringProperty` and defaults to
'atlas://data/images/defaulttheme/button_disabled_pressed'.
'''
border = ListProperty([16, 16, 16, 16])
'''Border used for :class:`~kivy.graphics.vertex_instructions.BorderImage`
graphics instruction. Used with :attr:`background_normal` and
:attr:`background_down`. Can be used for custom backgrounds.
It must be a list of four values: (top, right, bottom, left). Read the
BorderImage instruction for more information about how to use it.
:attr:`border` is a :class:`~kivy.properties.ListProperty` and defaults to
(16, 16, 16, 16)
'''
| 0 |
#!/usr/bin/python
import random
def randomGraph(n, p):
graph = dict() #we represent a graph as a list of adjacency lists
for i in range(n):
graph[i] = [] #there are n nodes (n adjacency lists)
for i in range(n):
for j in range(i+1,n): #we start from i+1 because graph is undirected
r = random.random()
if r <= p : #the probability that a random value in [0,1] is at most p is exactly p
graph[i].append(j)
graph[j].append(i) #graph is undirected, thus if j is a neighbor of i, then i is a neighbor of j
return graph
def countTriangles(graph):
triangles = 0
for i in graph.keys():
for j in graph[i]:
for k in graph[j]:
if k in graph[i]:
triangles += 1
return int(triangles/6)
def diameter(graph):
n = len(graph)
diameter = -1
for i in graph.keys(): #check the max distance for every node i
visited = [] #keep a list of visited nodes to check if the graph is connected
max_distance = 0 #keep the max distance from i
###BFS###
queue = [i]
distance = dict()
for j in graph.keys():
distance[j] = -1
distance[i] = 0
while queue != []:
s = queue.pop(0)
visited.append(s)
for j in graph[s]:
if distance[j] < 0:
queue.append(j)
distance[j] = distance[s] + 1
if distance[j] > max_distance:
max_distance = distance[j]
###END###
if len(visited) < n: #graph is not connected
break
elif max_distance > diameter:
diameter = max_distance
return diameter
def averageClustering(graph):
n = len(graph)
total=0
for i in graph.keys():
triangles = 0
neigh_pairs = (len(graph[i])*(len(graph[i])-1))/2 #number of pairs of neighbors of node i
for j in graph[i]:
for k in graph[i]:
if k in graph[j]:
triangles += 1 #number of pairs of neighbors of node i that are adjacent
if neigh_pairs > 0:
total += float(triangles)/(2*neigh_pairs) # triangles/neighbors is the individual clustering of node i
return float(total)/n #the average clustering is the average of individual clusterings | 0.033288 |
#
# Copyright 2009 Claudio Pisa (claudio dot pisa at uniroma2 dot it)
#
# This file is part of SVEF (SVC Streaming Evaluation Framework).
#
# SVEF is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SVEF is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SVEF. If not, see <http://www.gnu.org/licenses/>.
#
import os
import commands
import sys
class NALUException(BaseException):
pass
class NALU:
"This class represents a NALU, i.e. a line in a tracefile"
startpos = ""
length = -1
lid = ""
tid = ""
qid = ""
packettype = ""
discardable = ""
truncatable = ""
timestamp = 0
frame_number = -1
parents = None #NALUs on which this NALU depends
children = None #NALUs that depend on this NALU
tracefileline = ""
def __init__(self, tracefileline):
"Take a line from a tracefile, parse it and populate this object's fields"
self.tracefileline = tracefileline
self.parents = list()
self.children = list()
try:
stuff = tracefileline.split()
try:
self.startpos = stuff[0]
except IndexError:
raise NALUException
try:
self.id = int(stuff[0], 16)
except ValueError:
self.id = -1
try:
self.length = int(stuff[1])
except ValueError:
self.length = -1
try:
self.lid = int(stuff[2])
except ValueError:
self.lid = -1
try:
self.tid = int(stuff[3])
except ValueError:
self.tid = -1
try:
self.qid = int(stuff[4])
except ValueError:
self.qid = -1
self.packettype = stuff[5]
self.discardable = stuff[6]
self.truncatable = stuff[7]
try:
self.frame_number = int(stuff[8])
except IndexError:
self.frame_number = -1
try:
self.timestamp = int(stuff[9])
except IndexError:
self.timestamp = 0
except:
raise
raise NALUException
def __str__(self):
"The string representation of this object"
return "%s%8s%5s%5s%5s%14s%12s%12s%8s%20s" % \
(self.startpos, self.length, self.lid, \
self.tid, self.qid, self.packettype, self.discardable, \
self.truncatable, self.frame_number, self.timestamp)
def __repr__(self):
return str(self)
def __cmp__(self, other):
if self.id < other.id:
return -1
if self.id > other.id:
return 1
return 0
def isControlNALU(self):
"This object is a control NALU (i.e. type 6 or 14)?"
return self.length <= 20 and self.length > 0
def isGOPHead(self):
"This object is at the beginning of a GOP?"
return self.lid == 0 and self.tid == 0 and self.qid == 0
def getCoarseParentsIds(self):
"Returns a list of (lid, tid) of the NALUs on which the current object depends"
if self.lid == 0 and self.tid == 0:
return []
elif self.lid == 0:
return [(self.lid, self.tid - 1)]
elif self.tid == 0:
return [(self.lid - 1, self.tid)]
else:
return [(self.lid, self.tid - 1), (self.lid - 1, self.tid)]
def getMediumParentsIds(self):
"Returns a list of (tid, qid) of the NALUs on which the current object depends"
if self.tid == 0 and self.qid == 0:
return []
elif self.qid == 0:
return [(self.tid-1, 0)]
else:
return [(self.tid, self.qid - 1)]
def getAVCParentsIds(self):
"Returns a list of (tid,) of the NALUs on which the current object depends"
if self.tid == 0:
return []
else:
return [(self.tid-1,), (self.tid-1)]
def getMediumId(self):
"Returns a (tid, qid) tuple"
return (self.tid, self.qid)
def getCoarseId(self):
"Returns a (lid, tid) tuple"
return (self.lid, self.tid)
def getAVCId(self):
"Returns a (tid,) tuple"
return (self.tid,)
def alldata(self):
"String representation with the frame number"
return self.__str__()
def copy(self):
return NALU(self.tracefileline)
def meiosis(self, maxlen):
"return a list of NALUs, each with length less than maxlen, resulting from the division of this NALU"
res = []
numberofnalus = self.length / maxlen + 1
avglen = self.length / numberofnalus
for i in range(numberofnalus):
n = NALU(self.tracefileline)
n.length = avglen
n.id = n.id + avglen * i
n.startpos = "0x%08x" % n.id
res.append(n)
#the remainder
res[-1].length += self.length % numberofnalus
return res
class DecoderNALU:
frame = -1
lid = -1
tl = -1
ql = -1
type = ""
bid = -1
ap = -1
qp = -1
original = ""
def __init__(self, decoderline):
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# Frame 8 ( LId 1, TL 1, QL 0, SVC-P, BId 0, AP 1, QP 30 )
try:
fields = decoderline.split()
self.frame = int(fields[1])
self.lid = int(fields[4].strip(','))
self.tl = int(fields[6].strip(','))
self.ql = int(fields[8].strip(','))
self.type = fields[9].strip(',')
# self.bid = int(fields[11].strip(','))
# self.ap = int(fields[13].strip(','))
# self.qp = int(fields[15].strip(','))
except:
raise NALUException
def __str__(self):
return " Frame %d ( LId %d, TL %d, QL %d, %s, BId %d, AP %d, QP %d ) " % \
(self.frame, self.lid, self.tl, self.ql, self.type, self.bid, self.ap, self.qp)
def __repr__(self):
return str(self)
def alldata(self):
return str(self) + " %s %s" % (self.original, self.realframe)
def mince(filename, bytesperframe, tmpdir, filenames=[]):
"""Splits a YUV file called filename in bytesperframe big frames into directory tmpdir"""
sr = os.stat(filename)
assert sr.st_size % bytesperframe == 0
numberofframes = sr.st_size / bytesperframe
if filenames == []:
filenames = range(numberofframes)
print "%d vs. %d" % (numberofframes, len(filenames))
if numberofframes < len(filenames):
filenames=filenames[:numberofframes]
elif numberofframes > len(filenames):
numberofframes = len(filenames)
assert numberofframes == len(filenames)
thefile = open(filename, 'rb')
offset = 0
for f in filenames:
ofname = "%s/%d.yuv" % (tmpdir, f)
print ofname
offile = open(ofname, 'wb')
thefile.seek(offset)
data = thefile.read(bytesperframe)
offile.write(data)
offile.close()
# check that the frame size matches
srframe = os.stat(ofname)
assert srframe.st_size == bytesperframe
offset += bytesperframe
thefile.close()
def dothem(commandz, printoutput=True, printcommand=True, dummy=False, exitonerror=True, returnexitcode=False):
"execute a list of commands"
if not isinstance(commandz, list):
commandlist = [commandz]
else:
commandlist = commandz
for command in commandlist:
if printcommand:
print command
if not dummy:
ret, stdoe = commands.getstatusoutput(command)
else:
ret = 0
stdoe = ""
if printoutput:
print stdoe
if ret != 0:
print >> sys.stderr, "Execution Error!"
if exitonerror:
sys.exit(3)
if returnexitcode:
return stdoe, ret
else:
return stdoe
def makehashdict(yuvdir):
command = "md5sum %s/*.yuv" % yuvdir
md5out = dothem([command])
md5dict = {}
for line in md5out.split('\n'):
md5, file = line.split()
frameno = int(os.path.splitext(os.path.basename(file))[0])
md5dict.update({md5: frameno})
return md5dict
| 0.043329 |
#! /usr/bin/env python
__author__ = 'bbowman@pacificbiosciences.com'
import sys
import logging
LOG_FORMAT = "%(asctime)s [%(levelname)s - %(module)s] %(message)s"
TIME_FORMAT = "%Y-%m-%d %H:%M:%S"
FORMATTER = logging.Formatter( LOG_FORMAT, TIME_FORMAT )
def add_stream_handler( logger, stream=sys.stdout, log_level=logging.INFO ):
# Set up a simple Stream handler
stream_handler = logging.StreamHandler( stream=stream )
stream_handler.setFormatter( FORMATTER )
stream_handler.setLevel( log_level )
logger.addHandler( stream_handler )
def add_file_handler( logger, log_file='rna_pipeline.log', log_level=logging.INFO ):
# Set a second handler for the log file
file_handler = logging.FileHandler( log_file )
file_handler.setFormatter( FORMATTER )
file_handler.setLevel( log_level )
logger.addHandler( file_handler )
def initialize_logger( logger, stream=None, log_file=None, debug=False):
if debug:
log_level = logging.DEBUG
else:
log_level = logging.INFO
logger.setLevel( log_level )
if stream:
add_stream_handler( logger, stream=stream, log_level=log_level )
if log_file:
add_file_handler( logger, log_file=log_file, log_level=log_level ) | 0.026688 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Count the number of documents and average number of lines and tokens per
document in a large file. Documents should be separated by a single empty line.
"""
import argparse
import gzip
import sys
import numpy as np
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument('--gzip', action='store_true')
args = parser.parse_args()
def gopen():
if args.gzip:
return gzip.open(args.input, 'r')
else:
return open(args.input, 'r', encoding='utf-8')
num_lines = []
num_toks = []
with gopen() as h:
num_docs = 1
num_lines_in_doc = 0
num_toks_in_doc = 0
for i, line in enumerate(h):
if len(line.strip()) == 0: # empty line indicates new document
num_docs += 1
num_lines.append(num_lines_in_doc)
num_toks.append(num_toks_in_doc)
num_lines_in_doc = 0
num_toks_in_doc = 0
else:
num_lines_in_doc += 1
num_toks_in_doc += len(line.rstrip().split())
if i % 1000000 == 0:
print(i, file=sys.stderr, end="", flush=True)
elif i % 100000 == 0:
print(".", file=sys.stderr, end="", flush=True)
print(file=sys.stderr, flush=True)
print("found {} docs".format(num_docs))
print("average num lines per doc: {}".format(np.mean(num_lines)))
print("average num toks per doc: {}".format(np.mean(num_toks)))
if __name__ == '__main__':
main()
| 0 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v7.common',
marshal='google.ads.googleads.v7',
manifest={
'UrlCollection',
},
)
class UrlCollection(proto.Message):
r"""Collection of urls that is tagged with a unique identifier.
Attributes:
url_collection_id (str):
Unique identifier for this UrlCollection
instance.
final_urls (Sequence[str]):
A list of possible final URLs.
final_mobile_urls (Sequence[str]):
A list of possible final mobile URLs.
tracking_url_template (str):
URL template for constructing a tracking URL.
"""
url_collection_id = proto.Field(
proto.STRING,
number=5,
optional=True,
)
final_urls = proto.RepeatedField(
proto.STRING,
number=6,
)
final_mobile_urls = proto.RepeatedField(
proto.STRING,
number=7,
)
tracking_url_template = proto.Field(
proto.STRING,
number=8,
optional=True,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| 0 |
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
import GafferUI
QtCore = GafferUI._qtImport( "QtCore" )
## The NodeSetEditor is a base class for all Editors which focus their
# editing on a subset of nodes beneath a ScriptNode. This set defaults
# to the ScriptNode.selection() but can be modified to be any Set of nodes.
class NodeSetEditor( GafferUI.EditorWidget ) :
def __init__( self, topLevelWidget, scriptNode, **kw ) :
self.__nodeSet = Gaffer.StandardSet()
self.__nodeSetChangedSignal = GafferUI.WidgetSignal()
GafferUI.EditorWidget.__init__( self, topLevelWidget, scriptNode, **kw )
self.__titleFormat = None
self.__updateScheduled = False
# allow derived classes to call _updateFromSet() themselves after construction,
# to avoid being called when they're only half constructed.
self.__setNodeSetInternal( self.scriptNode().selection(), callUpdateFromSet=False )
## Sets the nodes that will be displayed by this editor. As members are
# added to and removed from the set, the UI will be updated automatically
# to show them. If the set is not scriptNode.selection(), then an OrphanRemover
# will be applied automatically to the set so that deleted nodes are not
# visible in the UI.
# \todo Although the OrphanRemover behaviour is convenient for our current use cases
# where it prevents the callers of setNodeSet() from having to worry about nodes
# being deleted, it might not be ideal in all cases. For instance the same set may be
# reused across multiple NodeSetEditors and end up with multiple OrphanRemovers applied.
# We might like to consider an API where the behaviours applied to a given object can be
# queried, or we could make it the responsibility of the caller to apply an OrphanRemover
# explicitly where appropriate.
def setNodeSet( self, nodeSet ) :
self.__setNodeSetInternal( nodeSet, callUpdateFromSet=True )
def getNodeSet( self ) :
return self.__nodeSet
def nodeSetChangedSignal( self ) :
return self.__nodeSetChangedSignal
## Overridden to display the names of the nodes being edited.
# Derived classes should override _titleFormat() rather than
# reimplement this again.
def getTitle( self ) :
t = GafferUI.EditorWidget.getTitle( self )
if t :
return t
if self.__titleFormat is None :
self.__titleFormat = self._titleFormat()
self.__nameChangedConnections = []
for n in self.__titleFormat :
if isinstance( n, Gaffer.GraphComponent ) :
self.__nameChangedConnections.append( n.nameChangedSignal().connect( Gaffer.WeakMethod( self.__nameChanged ) ) )
result = ""
for t in self.__titleFormat :
if isinstance( t, basestring ) :
result += t
else :
result += t.getName()
return result
## Ensures that the specified node has a visible editor of this class type editing
# it, creating one if necessary.
## \todo User preferences for whether these are made floating, embedded, whether
# they are reused etc. This class should provide the behaviour, but the code for
# linking it to preferences should be in a startup file.
## \todo Consider how this relates to draggable editor tabs and editor floating
# once we implement that in CompoundEditor - perhaps acquire will become a method
# on CompoundEditor instead at this point.
@classmethod
def acquire( cls, node ) :
if isinstance( node, Gaffer.ScriptNode ) :
script = node
else :
script = node.scriptNode()
scriptWindow = GafferUI.ScriptWindow.acquire( script )
for editor in scriptWindow.getLayout().editors( type = cls ) :
if node.isSame( editor._lastAddedNode() ) :
editor.reveal()
return editor
childWindows = scriptWindow.childWindows()
for window in childWindows :
if isinstance( window, _EditorWindow ) :
if isinstance( window.getChild(), cls ) and node in window.getChild().getNodeSet() :
window.setVisible( True )
return window.getChild()
editor = cls( script )
editor.setNodeSet( Gaffer.StandardSet( [ node ] ) )
window = _EditorWindow( scriptWindow, editor )
window.setVisible( True )
return editor
def _lastAddedNode( self ) :
if len( self.__nodeSet ) :
return self.__nodeSet[-1]
return None
## Called when the contents of getNodeSet() have changed and need to be
# reflected in the UI - so must be implemented by derived classes to update
# their UI appropriately. Updates are performed lazily to avoid unecessary
# work, but any pending updates can be performed immediately by calling
# _doPendingUpdate().
#
# All implementations must first call the base class implementation.
def _updateFromSet( self ) :
# flush information needed for making the title -
# we'll update it lazily in getTitle().
self.__nameChangedConnections = []
self.__titleFormat = None
self.titleChangedSignal()( self )
# May be called to ensure that _updateFromSet() is called
# immediately if a lazy update has been scheduled but not
# yet performed.
def _doPendingUpdate( self ) :
self.__updateTimeout()
## May be reimplemented by derived classes to specify a combination of
# strings and node names to use in building the title. The NodeSetEditor
# will take care of updating the title appropriately as the nodes are renamed.
def _titleFormat( self, _prefix = None, _maxNodes = 2, _reverseNodes = False, _ellipsis = True ) :
if _prefix is None :
result = [ IECore.CamelCase.toSpaced( self.__class__.__name__ ) ]
else :
result = [ _prefix ]
numNames = min( _maxNodes, len( self.__nodeSet ) )
if numNames :
result.append( " : " )
if _reverseNodes :
nodes = self.__nodeSet[len(self.__nodeSet)-numNames:]
nodes.reverse()
else :
nodes = self.__nodeSet[:numNames]
for i, node in enumerate( nodes ) :
result.append( node )
if i < numNames - 1 :
result.append( ", " )
if _ellipsis and len( self.__nodeSet ) > _maxNodes :
result.append( "..." )
return result
def __setNodeSetInternal( self, nodeSet, callUpdateFromSet ) :
if self.__nodeSet.isSame( nodeSet ) :
return
prevSet = self.__nodeSet
self.__nodeSet = nodeSet
self.__memberAddedConnection = self.__nodeSet.memberAddedSignal().connect( Gaffer.WeakMethod( self.__membersChanged ) )
self.__memberRemovedConnection = self.__nodeSet.memberRemovedSignal().connect( Gaffer.WeakMethod( self.__membersChanged ) )
if not self.__nodeSet.isSame( self.scriptNode().selection() ) :
self.__orphanRemover = Gaffer.Behaviours.OrphanRemover( self.__nodeSet )
else :
self.__orphanRemover = None
if callUpdateFromSet :
# only update if the nodes being held have actually changed,
# so we don't get unnecessary flicker in any of the uis.
needsUpdate = len( prevSet ) != len( self.__nodeSet )
if not needsUpdate :
for i in range( 0, len( prevSet ) ) :
if not prevSet[i].isSame( self.__nodeSet[i] ) :
needsUpdate = True
break
if needsUpdate :
self._updateFromSet()
self.__nodeSetChangedSignal( self )
def __nameChanged( self, node ) :
self.titleChangedSignal()( self )
def __membersChanged( self, set, member ) :
if self.__updateScheduled :
return
QtCore.QTimer.singleShot( 0, self.__updateTimeout )
self.__updateScheduled = True
def __updateTimeout( self ) :
if self.__updateScheduled :
self.__updateScheduled = False
self._updateFromSet()
class _EditorWindow( GafferUI.Window ) :
def __init__( self, parentWindow, editor, **kw ) :
GafferUI.Window.__init__( self, borderWidth = 8, **kw )
self.setChild( editor )
self.__titleChangedConnection = editor.titleChangedSignal().connect( Gaffer.WeakMethod( self.__updateTitle ) )
self.__nodeSetMemberRemovedConnection = editor.getNodeSet().memberRemovedSignal().connect( Gaffer.WeakMethod( self.__nodeSetMemberRemoved ) )
self.__closedConnection = self.closedSignal().connect( Gaffer.WeakMethod( self.__closed ) )
parentWindow.addChildWindow( self )
self.__updateTitle()
def __updateTitle( self, *unused ) :
self.setTitle( self.getChild().getTitle() )
def __nodeSetMemberRemoved( self, set, node ) :
if not len( set ) :
self.parent().removeChild( self )
def __closed( self, window ) :
assert( window is self )
self.parent().removeChild( self )
| 0.054678 |
# -*- coding: utf-8 -*-
from PIL import Image
import StringIO, random, zlib
class myd(dict):
def __getattr__(self, key):
return self.get(key,'')
def __setattr__(self, key, value):
self[key] = value
def __add__(self, data):
return myd(self.items() + data.items())
def __sub__(self, key):
return myd((k,v) for (k,v) in self.items() if k != key)
def _loadpic(who,typ,pic):
return Image.open("./s/avem/%s%s%s.png" % ('' if typ=='bg' else who+'/', 'hair' if who=='boy' and typ=='head' else typ, pic) ).convert("RGBA")
def img(kv,typ):
if typ=='bg':
return _db.bg[int(kv.bg)]
else:
return _loadpic(kv.who,typ,int(kv[typ])+1)
_max, _db = myd(), myd(bg=[])
_max['girl'] = myd(bg=5,face=4,clothes=59,mouth=17,head=33,eye=53)
_max['boy'] = myd(bg=5,face=4,clothes=65,mouth=26,head=36,eye=32)
for n in range(6):
_db.bg.append( Image.open("./s/avem/bg%s.png" % n ).convert("RGBA") )
def compose_image(kv):
im_base = img(kv,'bg').copy()
for comp in ('face', 'clothes', 'mouth', 'head', 'eye'):
p = img(kv,comp)
im_base.paste(p, (0,0), p)
return im_base
def draw_item(kv):
im = compose_image(kv)
width = kv.get('width',400)
if width: width = int(width)
if width < 400:
im = im.resize((width, width), Image.BICUBIC)
if kv.gray:
im = im.convert("LA")
output = StringIO.StringIO()
im.save(output, 'PNG')
contents = output.getvalue()
output.close()
return contents
def draw_byhash(kv,newseed=None):
if kv.hash:
newseed = zlib.crc32(kv.get('hash'),0)
elif kv.seed:
newseed = int(kv.seed)
if newseed:
l = []
for n in sorted(kv.items()):
l.append('/%s/%s' % n)
lnk = ''.join(l)
if not newseed: newseed = random.randrange(0,99999999)
random.seed(newseed)
kv.update(seed=newseed)
for k,v in _max[kv.who].items():
if not k in kv:
if k=='bg': kv[k] = random.randrange(v) + 1
else: kv[k] = random.randrange(v)
dat = draw_item(kv)
return dat
| 0.026066 |
# This module provides a class representing scalar, vector, and tensor fields.
#
# Written by Konrad Hinsen <hinsen@cnrs-orleans.fr>
# last revision: 1999-7-5
#
import Numeric, umath
import VectorModule, TensorModule
from Scientific.indexing import index_expression
from Scientific.Functions import Interpolation
#
# General tensor field base class
#
class TensorField(Interpolation.InterpolatingFunction):
"""Tensor field of arbitrary rank
A tensor field is described by a tensor at each point of
a three-dimensional rectangular grid. The grid spacing
may be non-uniform. Tensor fields are implemented as a subclass
of InterpolatingFunction from the module
Scientific.Functions.Interpolation and thus share all methods
defined in that class.
Constructor: TensorField(|rank|, |axes|, |values|, |default|='None')
Arguments:
|rank| -- a non-negative integer indicating the tensor rank
|axes| -- a sequence of three one-dimensional arrays, each
of which specifies one coordinate (x, y, z) of the
grid points
|values| -- an array of 'rank+3' dimensions. Its first
three dimensions correspond to the x, y, z
directions and must have lengths compatible with
the axis arrays. The remaining dimensions must
have length 3.
|default| -- the value of the field for points outside the grid.
A value of 'None' means that an exception will be
raised for an attempt to evaluate the field outside
the grid. Any other value must a tensor of the
correct rank.
Evaluation:
- 'tensorfield(x, y, z)' (three coordinates)
- 'tensorfield(coordinates)' (any sequence containing three coordinates)
"""
def __init__(self, rank, axes, values, default = None, check = 1):
if check:
if len(axes) != 3:
raise ValueError, 'Field must have three axes'
if len(values.shape) != 3 + rank:
raise ValueError, 'Values must have rank ' + `rank`
if values.shape[3:] != rank*(3,):
raise ValueError, 'Values must have dimension 3'
self.rank = rank
self.spacing = []
for axis in axes:
d = axis[1:]-axis[:-1]
self.spacing.append(d[0])
if check:
dmin = Numeric.minimum.reduce(d)
if abs(dmin-Numeric.maximum.reduce(d)) > 0.0001*dmin:
raise ValueError, 'Grid must be equidistant'
Interpolation.InterpolatingFunction.__init__(self, axes, values,
default)
def __call__(self, *points):
if len(points) == 1:
points = tuple(points[0])
value = apply(Interpolation.InterpolatingFunction.__call__,
(self, ) + points)
if self.rank == 0:
return value
elif self.rank == 1:
return VectorModule.Vector(value)
else:
return TensorModule.Tensor(value)
def __getitem__(self, index):
if type(index) == type(0):
index = (index,)
rank = self.rank - len(index)
if rank < 0:
raise ValueError, 'Number of indices too large'
index = index_expression[...] + index + rank*index_expression[::]
try: default = self.default[index]
except TypeError: default = None
if rank == 0:
return ScalarField(self.axes, self.values[index], default, 0)
elif rank == 1:
return VectorField(self.axes, self.values[index], default, 0)
else:
return TensorField(self.axes, rank, self.values[index], default, 0)
def zero(self):
"Returns a tensor of the correct rank with zero elements."
if self.rank == 0:
return 0.
else:
return Tensor(Numeric.zeros(self.rank*(3,), Numeric.Float))
def derivative(self, variable):
"""Returns the derivative with respect to |variable|, which
must be one of 0, 1, or 2."""
ui = variable*index_expression[::] + \
index_expression[2::] + index_expression[...]
li = variable*index_expression[::] + \
index_expression[:-2:] + index_expression[...]
d_values = 0.5*(self.values[ui]-self.values[li])/self.spacing[variable]
diffaxis = self.axes[variable]
diffaxis = 0.5*(diffaxis[2:]+diffaxis[:-2])
d_axes = self.axes[:variable]+[diffaxis]+self.axes[variable+1:]
d_default = None
if self.default is not None:
d_default = Numeric.zeros(self.rank*(3,), Numeric.Float)
return self._constructor(d_axes, d_values, d_default, 0)
def allDerivatives(self):
"Returns all three derivatives (x, y, z)."
x = self.derivative(0)
x._reduceAxis(1)
x._reduceAxis(2)
y = self.derivative(1)
y._reduceAxis(0)
y._reduceAxis(2)
z = self.derivative(2)
z._reduceAxis(0)
z._reduceAxis(1)
return x, y, z
def _reduceAxis(self, variable):
self.axes[variable] = self.axes[variable][1:-1]
i = variable*index_expression[::] + \
index_expression[1:-1:] + index_expression[...]
self.values = self.values[i]
def _checkCompatibility(self, other):
pass
def __add__(self, other):
self._checkCompatibility(other)
if self.default is None or other.default is None:
default = None
else:
default = self.default + other.default
return self._constructor(self.axes, self.values+other.values,
default, 0)
def __sub__(self, other):
self._checkCompatibility(other)
if self.default is None or other.default is None:
default = None
else:
default = self.default - other.default
return self._constructor(self.axes, self.values-other.values,
default, 0)
#
# Scalar field class definition
#
class ScalarField(TensorField):
"""Scalar field (tensor field of rank 0)
Constructor: ScalarField(|axes|, |values|, |default|='None')
A subclass of TensorField.
"""
def __init__(self, axes, values, default = None, check = 1):
TensorField.__init__(self, 0, axes, values, default, check)
def gradient(self):
"Returns the gradient (a vector field)."
x, y, z = self.allDerivatives()
grad = Numeric.transpose(Numeric.array([x.values, y.values, z.values]),
[1,2,3,0])
if self.default is None:
default = None
else:
default = Numeric.zeros((3,), Numeric.Float)
return VectorField(x.axes, grad, default, 0)
def laplacian(self):
"Returns the laplacian (a scalar field)."
return self.gradient().divergence()
ScalarField._constructor = ScalarField
#
# Vector field class definition
#
class VectorField(TensorField):
"""Vector field (tensor field of rank 1)
Constructor: VectorField(|axes|, |values|, |default|='None')
A subclass of TensorField.
"""
def __init__(self, axes, values, default = None, check = 1):
TensorField.__init__(self, 1, axes, values, default, check)
def zero(self):
return Vector(0., 0., 0.)
def _divergence(self, x, y, z):
return x[0] + y[1] + z[2]
def _curl(self, x, y, z):
curl_x = y.values[..., 2] - z.values[..., 1]
curl_y = z.values[..., 0] - x.values[..., 2]
curl_z = x.values[..., 1] - y.values[..., 0]
curl = Numeric.transpose(Numeric.array([curl_x, curl_y, curl_z]),
[1,2,3,0])
if self.default is None:
default = None
else:
default = Numeric.zeros((3,), Numeric.Float)
return VectorField(x.axes, curl, default, 0)
def _strain(self, x, y, z):
strain = Numeric.transpose(Numeric.array([x.values, y.values,
z.values]), [1,2,3,0,4])
strain = 0.5*(strain+Numeric.transpose(strain, [0,1,2,4,3]))
trace = (strain[..., 0,0] + strain[..., 1,1] + strain[..., 2,2])/3.
strain = strain - trace[..., Numeric.NewAxis, Numeric.NewAxis] * \
Numeric.identity(3)[Numeric.NewAxis, Numeric.NewAxis,
Numeric.NewAxis, :, :]
if self.default is None:
default = None
else:
default = Numeric.zeros((3, 3), Numeric.Float)
return TensorField(2, x.axes, strain, default, 0)
def divergence(self):
"Returns the divergence (a scalar field)."
x, y, z = self.allDerivatives()
return self._divergence(x, y, z)
def curl(self):
"Returns the curl (a vector field)."
x, y, z = self.allDerivatives()
return self._curl(x, y, z)
def strain(self):
"Returns the strain (a tensor field of rank 2)."
x, y, z = self.allDerivatives()
return self._strain(x, y, z)
def divergenceCurlAndStrain(self):
"Returns all derivative fields: divergence, curl, and strain."
x, y, z = self.allDerivatives()
return self._divergence(x, y, z), self._curl(x, y, z), \
self._strain(x, y, z)
def laplacian(self):
"Returns the laplacian (a vector field)."
x, y, z = self.allDerivatives()
return self._divergence(x, y, z).gradient()-self._curl(x, y, z).curl()
def length(self):
"""Returns a scalar field corresponding to the length (norm) of
the vector field."""
l = Numeric.sqrt(Numeric.add.reduce(self.values**2, -1))
try: default = Numeric.sqrt(Numeric.add.reduce(self.default))
except ValueError: default = None
return ScalarField(self.axes, l, default, 0)
VectorField._constructor = VectorField
# Sort indices for automatic document string extraction
TensorField._documentation_sort_index = 0
ScalarField._documentation_sort_index = 1
VectorField._documentation_sort_index = 2
#
# Test code
#
if __name__ == '__main__':
from Numeric import *
axis = arange(0., 1., 0.1)
values = zeros((10,10,10,3), Float)
zero = VectorField(3*(axis,), values)
div = zero.divergence()
curl = zero.curl()
strain = zero.strain()
| 0.031458 |
#!/usr/bin/env python
import numpy as np
# Dataset generator is a script to produce fake datasets in order to test averager
########################
# setup input parameters
########################
# Number of data points, measurements, systematic
nData = 10
nMes = 2
nSyst = 5
# part of non-emply measurement/systematics (between 0 and 1)
zMes = 0.99
zSyst = 0.99
# Min and mix number of measurements for data point (have to be smaller as total number of measurements)
#rMes = [1,3]
# Min and mix number of systematic uncertainties for measurement (have to be smaller as total number of systematic uncertainties)
#rSyst = [3,4]
# Parameters of uncertainties
# Minumal and maximal relative statistical/systematic uncertainty
vStat = [0.01,0.03]
vSyst = [0.08,0.11]
# array of truth data points
Tdata = nData*np.random.random_sample((nData))
# array of truth nuisanse parameters
Tshift = np.random.normal(0, 1, nSyst)
# array of statistical uncertainties.
# 2D data points vs measurements
Mstat = (vStat[1]-vStat[0]) * np.random.random_sample((nMes, nData)) + vStat[0]
# array of systematic uncertainties.
# 3D data points vs measurement vs systematic
Msyst = (vSyst[1]-vSyst[0]) * np.random.random_sample((nMes, nData, nSyst)) + vSyst[0]
# Add holes in the array of measurements and systematics
# - Some data points does not exists for a certain measurement
# - Some systematics does not exist for a certain measurement
# --------------------------------
Hdata = np.signbit(np.random.random_sample((nMes, nData))-zMes)*1
Hsyst = np.signbit(np.random.random_sample((nMes, 1, nSyst))-zSyst)*1
# array of measured data points
Gaus_ij = np.random.normal(0, 1, nMes*nData).reshape((nMes, nData))
Mdata = Tdata*(1+(Mstat*Gaus_ij)+np.sum(Msyst*Tshift*Hsyst,axis=2))*Hdata
np.savetxt('Tshift.out', Tshift, fmt='%1.3f')
np.savetxt('Tdata.out', Tdata, fmt='%1.3f')
# Write output files. Python format
# Loop over measurements
for m in range(nMes):
f = open('test'+str(m)+'.csv','w')
f.write('bin1,data,stat')
# Loop over systematics
for s in range(nSyst):
if(Hsyst[m][0][s]!=0):
f.write(',error%05i' % s)
f.write('\n')
# Loop over data point
for d in range(nData):
if(Mdata[m][d]!=0):
f.write('%4.0f,'% d)
f.write('%8.3f,'% (Mdata[m][d]))
f.write('%8.3f'% (Mstat[m][d]*Mdata[m][d]))
# Loop over systematics
for s in range(nSyst):
if(Hsyst[m][0][s]!=0):
f.write(',%8.3f'% (Msyst[m][d][s]*Mdata[m][d]))
f.write('\n')
f.close()
# Write output files. Fortran format
# Loop over measurements
for m in range(nMes):
f = open('test'+str(m)+'.dat','w')
f.write('&Data\n')
f.write(' Name = \'Data%i\'\n'%nData)
f.write(' NData = %i\n'% nData)
f.write(' NColumn = %i\n'%(nSyst+3))
f.write(' ColumnType = \'Bin\', \'Sigma\', %i*\'Error\'\n'%(nSyst+1))
f.write(' ColumnName = \'Y\', \'x-section\', \'stat\'')
for s in range(nSyst):
if(Msyst[m][0][s]!=0):
f.write(',\'error%05i\'' % s)
f.write('\n')
f.write(' Reaction = \'Bla\'\n')
f.write(' Percent = false')
for s in range(nSyst):
if(Hsyst[m][0][s]!=0):
f.write(',false')
f.write('\n')
f.write('&END\n')
# Loop over data point
for d in range(nData):
if(Mdata[m][d]!=0):
f.write('%4.0f '% d)
f.write('%8.3f '% Mdata[m][d])
f.write('%8.3f '% (Mstat[m][d]*Mdata[m][d]))
# Loop over systematics
for s in range(nSyst):
if(Hsyst[m][0][s]!=0):
f.write('%8.3f '% (Msyst[m][d][s]*Mdata[m][d]))
f.write('\n')
f.write('\n')
f.close()
| 0.026 |
#FILE NAME: BannerTool.py
#created by: Ciro Veneruso
#purpose: banner localization
#last edited by: Ciro Veneruso
#INSTALL: BeautifulSoup
#TODO: this code is a blob, must be refactorized!!!!
import re
import mechanize
import socket
import urllib
from tools import BaseTool
from bs4 import BeautifulSoup
from pprint import pprint
from ipwhois import IPWhois, WhoisLookupError
from tld import get_tld
import urlparse
from tld.exceptions import TldIOError, TldDomainNotFound, TldBadUrl
from tools import ToolException
class BannerTool(BaseTool):
def __init__(self, config):
BaseTool.__init__(self, "BannerAnalyzer", config, needRefresh = True)
self.values = []
def run(self, browser):
try:
url = browser.url.replace('http://','')
print url+"\n"
#response = browser.open(url)
html = browser.httpResponse #response.get_data()
site_domain_name = get_tld(browser.url)
#print(site_domain_name)
soup = BeautifulSoup(html)
links = soup.findAll('a')
response_domain = ""
addr = ""
name = ""
state = ""
city = ""
description = ""
country = ""
foo_flag = 0
flag = 0
for link in links:
foo = link.findChild('img')
#print foo
if foo is not None:
foo_flag = 1
flag = 1
href = link.get('href')
if href is None:
continue
print(href+"\n")
if href.startswith('/'):
response_domain ="link interno"
print ("link interno\n")
elif href.startswith('/'):
response_domain ="Link interno"
print ("link interno\n")
elif href.startswith("http://"+url):
response_domain ="link interno"
print ("link interno\n")
elif href.startswith("https://"+url):
response_domain ="link interno"
print ("link interno\n")
else:
response_domain ="link esterno"
print ("link esterno... Geolocalizzazione:\n")
try:
banner_domain_name = get_tld(href)
print(banner_domain_name+"\n")
print(site_domain_name)
url = 'https://' + url if not banner_domain_name.startswith('http') else banner_domain_name.replace('http:', 'https:')
parsed = urlparse.urlparse(url)
hostname = "%s://%s" % (parsed.scheme, parsed.netloc)
url = url.split("//")[1]
url_s = url.split("/")[0]
ip = socket.gethostbyname(url_s)
#print(href)
#get ip by url
#ip = socket.gethostbyname(banner_domain_name)
#get information by ip
result = None
try:
obj = IPWhois(ip)
result = obj.lookup()
except Error as e:
continue
addr = result['nets'][0]['address'] if result['nets'][0]['address'] != None else 'None'
name = result['nets'][0]['name'] if result['nets'][0]['name'] != None else 'None'
state = result['nets'][0]['state'] if result['nets'][0]['state'] != None else 'None'
city = result['nets'][0]['city'] if result['nets'][0]['city'] != None else 'None'
description = result['nets'][0]['description'] if result['nets'][0]['description'] != None else 'None'
country = result['nets'][0]['country'] if result['nets'][0]['country'] != None else 'None'
'''
self.values.append(["Link analyzed",href])
self.values.append(["Response",response_domain])
self.values.append(["Address", addr])
self.values.append(["Name", name])
self.values.append(["State", state])
self.values.append(["City", city])
self.values.append(["Description", description])
self.values.append(["Country", country])
print('Name: ' + name + '\n' + 'Description: ' + description + '\n' + 'Address: ' +
addr + '\n' + 'Country: ' + country + '\n' + 'State: ' + state + '\n' + 'City: ' + city)
'''
temp = {
"Url" : url,
"Address" : addr,
"Name" : name,
"State" : state,
"City" : city,
"Description" : description,
"Country" : country,
"Response" : response_domain
}
self.values.append({ "Link analyzed %s" % (href) : temp })
except TldBadUrl as e:
print ("Bad URL!")
if flag == 0:
print("There aren' t extra domain banners in this site")
if(foo_flag == 0):
print("There aren't banner in this site")
except WhoisLookupError as e:
raise ToolException(str(e))
return len(self.values) >= self.config.getInt("banner_count_treshold", 0)
def createModel(self):
return False, ["key","value"], self.values
| 0.041851 |
#!/usr/bin/env python3
import rospy
from lg_common import ManagedBrowser
from lg_msg_defs.msg import ApplicationState
from std_msgs.msg import String
if __name__ == '__main__':
rospy.init_node('browser')
browser = ManagedBrowser(
url='https://www.google.com',
force_device_scale_factor=2,
user_agent='Mozilla/5.0(iPad; U; CPU iPhone OS 3_2 like Mac OS X; ' +
'en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) ' +
'Version/4.0.4 Mobile/7B314 Safari/531.21.10',
remote_debugging_port=10000,
)
# Start the process and converge its window.
browser.set_state(ApplicationState.VISIBLE)
# Provide a state topic for debugging.
rospy.Subscriber('/example_browser/state', ApplicationState,
browser.handle_state_msg)
# Provide a debug socket topic for debugging.
# i.e. {"id":1,"method":"Page.reload","params":{"ignoreCache":"True"}}
def handle_debug_sock_msg(msg):
browser.send_debug_sock_msg(msg.data)
rospy.Subscriber('/example_browser/debug', String, handle_debug_sock_msg)
rospy.spin()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| 0 |
"""Tomography using the `kaczmarz` solver.
Solves the inverse problem
A(x) = g
Where ``A`` is a fan (cone) beam forward projector, ``x`` the result and
``g`` is given data.
In order to solve this using `kaczmarz`'s method, the operator is split into
several sub-operators (each representing a subset of the angles and detector
points). This allows a faster solution.
"""
import odl
# --- Set up the forward operator (ray transform) --- #
# Reconstruction space: discretized functions on the rectangle
# [-20, 20]^2 with 300 samples per dimension.
space = odl.uniform_discr(
min_pt=[-20, -20], max_pt=[20, 20], shape=[128, 128], dtype='float32')
# Make a parallel beam geometry with flat detector
geometry = odl.tomo.parallel_beam_geometry(space)
# Here we split the geometry according to both angular subsets and
# detector subsets.
# For practical applications these choices should be fine tuned,
# these values are selected to give an illustrative visualization.
split = 'interlaced'
if split == 'block':
# Split the data into blocks:
# 111 222 333
n = 20
ns = geometry.angles.size // n
ray_trafos = [odl.tomo.RayTransform(space, geometry[i * ns:(i + 1) * ns])
for i in range(n)]
elif split == 'interlaced':
# Split the data into slices:
# 123 123 123
n = 20
ray_trafos = [odl.tomo.RayTransform(space, geometry[i::n])
for i in range(n)]
# Create one large ray transform from components
ray_trafo = odl.BroadcastOperator(*ray_trafos)
# --- Generate artificial data --- #
# Create phantom
phantom = odl.phantom.shepp_logan(space, modified=True)
# Create sinogram of forward projected phantom with noise
data = ray_trafo(phantom)
# Compute steplength
omega = n * odl.power_method_opnorm(ray_trafo) ** (-2)
# Optionally pass callback to the solver to display intermediate results
callback = (odl.solvers.CallbackPrintIteration() &
odl.solvers.CallbackShow())
# Choose a starting point
x = space.zero()
# Run the algorithm, call the callback in each iteration for visualization.
# Note that using only 5 iterations still gives a decent reconstruction.
odl.solvers.kaczmarz(
ray_trafos, x, data, niter=5, omega=omega,
callback=callback, callback_loop='inner')
# Display images
phantom.show(title='Original Image')
x.show(title='Reconstructed Image', force_show=True)
| 0 |
import argparse
import os
import numpy as np
from pele.storage import Database
from pele.utils.optim_compatibility import OptimDBConverter
def main():
parser = argparse.ArgumentParser(description="""
convert an OPTIM database to a pele sqlite database. Four files are needed. Normally they are called:
points.min : the coordinates of the minima in binary format
min.data : additional information about the minima (like the energy)
points.ts : the coordinates of the transition states
min.ts : additional information about transition states (like which minima they connect)
Other file names can optionally be passed. Some fortran compilers use non-standard endianness to save the
binary data. If your coordinates are garbage, try changing the endianness.
""", formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--ndof', help='Number of total degrees of freedom (e.g. 3*number of atoms). This is simply the length of a coordinates vector.',
type=int, default=None)
parser.add_argument('--Database','-d', help = 'Name of database to write into', type = str, default="optimdb.sqlite")
parser.add_argument('--Mindata','-m', help = 'Name of min.data file', type = str, default="min.data")
parser.add_argument('--Tsdata','-t', help = 'Name of ts.data file', type = str, default="ts.data")
parser.add_argument('--Pointsmin','-p', help = 'Name of points.min file', type = str, default="points.min")
parser.add_argument('--Pointsts','-q', help = 'Name of points.ts file', type = str, default="points.ts")
parser.add_argument('--endianness', help = 'set the endianness of the binary data. Can be "<" for little-endian or ">" for big-endian', type = str, default="=")
args = parser.parse_args()
db = Database(args.Database)
cv = OptimDBConverter(database=db, ndof=args.ndof, mindata=args.Mindata,
tsdata=args.Tsdata, pointsmin=args.Pointsmin, pointsts=args.Pointsts,
endianness=args.endianness)
cv.setAccuracy()
cv.convert()
cv.db.session.commit()
if __name__ == "__main__":
main()
| 0.024646 |
def create_trap_details_table(curs):
sql = '\n'.join([
"CREATE TABLE trap_details (",
" trap_details_id INTEGER PRIMARY KEY,",
" section_id INTEGER NOT NULL,",
" cr TEXT,",
" trap_type TEXT,",
" perception TEXT,",
" disable_device TEXT,",
" duration TEXT,",
" effect TEXT,",
" trigger TEXT,",
" reset TEXT",
")"])
curs.execute(sql)
def create_trap_details_index(curs):
sql = '\n'.join([
"CREATE INDEX trap_details_section_id",
" ON trap_details (section_id)"])
curs.execute(sql)
def insert_trap_detail(curs, section_id, cr=None, trap_type=None, perception=None, disable_device=None, duration=None, effect=None, trigger=None, reset=None, **kwargs):
values = [section_id, cr, trap_type, perception, disable_device, duration, effect, trigger, reset]
sql = '\n'.join([
"INSERT INTO trap_details",
" (section_id, cr, trap_type, perception, disable_device, duration, effect, trigger, reset)",
" VALUES",
" (?, ?, ?, ?, ?, ?, ?, ?, ?)"])
curs.execute(sql, values)
def delete_trap_detail(curs, section_id):
values = [section_id]
sql = '\n'.join([
"DELETE FROM trap_details",
" WHERE section_id = ?"])
curs.execute(sql, values)
def fetch_trap_detail(curs, section_id):
values = [section_id]
sql = '\n'.join([
"SELECT *",
" FROM trap_details",
" WHERE section_id = ?"])
curs.execute(sql, values)
| 0.032353 |
from django.core import signing
from filecreator.creator import Creator
import arrow
from BunqAPI.callbacks import callback
import datetime
class Installation:
def __init__(self, user, api_key, password, delete_user=True):
self.user = user
self.api_key = api_key
self.password = password
self._delete_user = delete_user
def register_api_key(self):
c = callback(api_key=self.api_key, user=self.user, decrypt=False)
installation = c.installation()
if installation['status']:
enc_string = signing.dumps(obj=installation['data'],
key=self.password)
now = datetime.datetime.now()
json = {
'secret': enc_string,
'username': self.user.username,
'created': arrow.get(now).format(fmt='DD-MM-YYYY HH:mm:ss')
}
Creator(user=self.user).user_json(data=json)
return True
else: # pragma: no cover
if self._delete_user:
self.user.delete()
return False
@property
def status(self):
if self.register_api_key():
return True
else: # pragma: no cover
return False
| 0 |
import csv
import logging
#import os
import sys
from ftplib import FTP as _ftp
from json import dumps, loads
if sys.version_info < (3, 0):
import ConfigParser
else:
import configparser as ConfigParser
#import warnings
#warnings.warn("period must be positive", RuntimeWarning)
'''
Kitchen Sink
CSV - Some web scraping returns csv files
FTP - web scraping
INI - Each program reads the ini file(s) produced by the previous program and produces its own.
This chaining allows for more rapid development both in execution and debugging.
'''
'''
'''
class INI_BASE(object) :
@classmethod
def init(cls) :
ret = ConfigParser.ConfigParser()
ret.optionxform=str
return ret
@classmethod
def dump_name(cls, ret) :
if not isinstance(ret,str) :
return ret
ret = ret.replace('%', '_pct_')
ret = ret.replace('=', '_eq_')
return ret
@classmethod
def load_name(cls, ret) :
if not isinstance(ret,str) :
return ret
ret = ret.replace('_pct_','%')
ret = ret.replace('_eq_','=')
return ret
@classmethod
def load(cls, ret) :
ret = ret.strip()
if ret.startswith('{') and ret.endswith('}') :
ret = ret.replace("'",'"')
ret = ret.replace("`","'")
ret = loads(ret)
return ret
if ',' not in ret :
return [ret]
ret = ret.split(',')
ret = map(lambda key : key.strip(), ret)
return list(ret)
@classmethod
def _dump(cls, ret) :
if not isinstance(ret,str) :
if isinstance(ret,dict) :
ret = dumps(ret)
else :
ret = str(ret)
ret = ret.replace("'","`")
ret = ret.replace('"',"'")
return ret
@classmethod
def dump(cls, ret) :
if isinstance(ret,list) :
return ",".join(ret)
return cls._dump(ret)
class INI_READ(object) :
@classmethod
def read(cls, *file_list) :
for i, ini_file in enumerate(sorted(file_list)) :
for name, key, value in cls.read_ini(ini_file) :
yield ini_file, name, key, value
@classmethod
def read_ini(cls, path) :
fp = open(path)
config = INI_BASE.init()
config.read_file(fp)
for name, key, value in cls.read_section(config) :
key = INI_BASE.load_name(key)
value = INI_BASE.load(value)
value = INI_BASE.load_name(value)
yield name, key, value
fp.close()
@classmethod
def read_section(cls, config) :
for i, name in enumerate(sorted(config._sections)) :
for key, value in config.items(name) :
if len(value) == 0 : continue
yield name, key, value
class INI_WRITE(object) :
@classmethod
def write(cls, filename,**data) :
config = INI_BASE.init()
cls.write_ini(config,**data)
fp = open(filename, 'w')
config.write(fp)
fp.close()
@classmethod
def write_ini(cls, config,**data) :
for i, section in enumerate(sorted(data.keys())) :
values = data.get(section,{})
cls.write_section(config,section,**values)
@classmethod
def write_section(cls, config,section,**data) :
config.add_section(section)
for i, key in enumerate(sorted(data.keys())) :
value = INI_BASE.dump(data[key])
value = INI_BASE.dump_name(value)
key = INI_BASE.dump_name(key)
config.set(section,key,value)
class FTP:
get = 'RETR {pwd}'
def __init__(self, connection):
self.connection = connection
self.data = []
def __call__(self,s):
self.data.append(s)
def __str__(self) :
return "\n".join(self.data)
@classmethod
def init(cls, **kwargs) :
target='server'
server = kwargs.get(target,'ftp.kernel.org')
target='user'
user = kwargs.get(target,None)
target='pass'
pswd = kwargs.get(target,None)
ret = cls.login(server,user,pswd)
ret = cls(ret)
return ret
@classmethod
def login(cls, server,user,pswd):
if user is None or pswd is None :
ret = _ftp(server)
ret.login()
else :
ret = _ftp(server, user, pswd)
return ret
@classmethod
def GET(cls, obj, **kwargs) :
obj.data = []
get = cls.get.format(**kwargs)
obj.connection.retrlines(get,obj)
return obj
@classmethod
def LIST(cls, obj, **kwargs) :
target = 'pwd'
pwd = kwargs.get(target, None)
if pwd is not None :
obj.connection.cwd(pwd)
return obj.connection.nlst()
class CSV :
@classmethod
def to_dict(cls, path) :
logging.info("reading file {}".format(path))
with open(path, 'rt') as csvfile:
row_list = csv.DictReader(csvfile)
#ret = {row[0]:row[1] for row in row_list}
for row in row_list :
yield row
@classmethod
def rows(cls, path) :
logging.info("reading file {}".format(path))
with open(path, 'rt') as csvfile:
row_list = csv.reader(csvfile)
for row in row_list :
yield row
@classmethod
def grep(cls, path, *arg_list) :
ret = {}
for row in CSV.rows(path) :
for arg in arg_list :
key, _row = CSV._grep(arg, row)
if key :
ret[key] = _row
return ret
@classmethod
def _grep(cls, key, row) :
flag = filter(lambda t : key == t, row)
flag = list(flag)
if len(flag) > 0 :
return key, row
alt_key = key.replace('-P','-')
flag = filter(lambda t : alt_key == t, row)
flag = list(flag)
if len(flag) > 0 :
return key, row
return None, None
if __name__ == "__main__" :
import sys
import logging
log_msg = '%(module)s.%(funcName)s(%(lineno)s) %(levelname)s - %(message)s'
logging.basicConfig(stream=sys.stdout, format=log_msg, level=logging.DEBUG)
| 0.04567 |
from boxbranding import getBoxType
from twisted.internet import threads
from enigma import eDBoxLCD, eTimer
from config import config, ConfigSubsection, ConfigSelection, ConfigSlider, ConfigYesNo, ConfigNothing
from Components.SystemInfo import SystemInfo
from Tools.Directories import fileExists
import usb
def IconCheck(session=None, **kwargs):
if fileExists("/proc/stb/lcd/symbol_network") or fileExists("/proc/stb/lcd/symbol_usb"):
global networklinkpoller
networklinkpoller = IconCheckPoller()
networklinkpoller.start()
class IconCheckPoller:
def __init__(self):
self.timer = eTimer()
def start(self):
if self.iconcheck not in self.timer.callback:
self.timer.callback.append(self.iconcheck)
self.timer.startLongTimer(0)
def stop(self):
if self.iconcheck in self.timer.callback:
self.timer.callback.remove(self.iconcheck)
self.timer.stop()
def iconcheck(self):
threads.deferToThread(self.JobTask)
self.timer.startLongTimer(30)
def JobTask(self):
LinkState = 0
if fileExists('/sys/class/net/wlan0/operstate'):
LinkState = open('/sys/class/net/wlan0/operstate').read()
if LinkState != 'down':
LinkState = open('/sys/class/net/wlan0/operstate').read()
elif fileExists('/sys/class/net/eth0/operstate'):
LinkState = open('/sys/class/net/eth0/operstate').read()
if LinkState != 'down':
LinkState = open('/sys/class/net/eth0/carrier').read()
LinkState = LinkState[:1]
if fileExists("/proc/stb/lcd/symbol_network") and config.lcd.mode.value == '1':
f = open("/proc/stb/lcd/symbol_network", "w")
f.write(str(LinkState))
f.close()
elif fileExists("/proc/stb/lcd/symbol_network") and config.lcd.mode.value == '0':
f = open("/proc/stb/lcd/symbol_network", "w")
f.write('0')
f.close()
USBState = 0
busses = usb.busses()
for bus in busses:
devices = bus.devices
for dev in devices:
if dev.deviceClass != 9 and dev.deviceClass != 2 and dev.idVendor > 0:
# print ' '
# print "Device:", dev.filename
# print " Number:", dev.deviceClass
# print " idVendor: %d (0x%04x)" % (dev.idVendor, dev.idVendor)
# print " idProduct: %d (0x%04x)" % (dev.idProduct, dev.idProduct)
USBState = 1
if fileExists("/proc/stb/lcd/symbol_usb") and config.lcd.mode.value == '1':
f = open("/proc/stb/lcd/symbol_usb", "w")
f.write(str(USBState))
f.close()
elif fileExists("/proc/stb/lcd/symbol_usb") and config.lcd.mode.value == '0':
f = open("/proc/stb/lcd/symbol_usb", "w")
f.write('0')
f.close()
self.timer.startLongTimer(30)
class LCD:
def __init__(self):
pass
def setBright(self, value):
value *= 255
value /= 10
if value > 255:
value = 255
eDBoxLCD.getInstance().setLCDBrightness(value)
def setContrast(self, value):
value *= 63
value /= 20
if value > 63:
value = 63
eDBoxLCD.getInstance().setLCDContrast(value)
def setInverted(self, value):
if value:
value = 255
eDBoxLCD.getInstance().setInverted(value)
def setFlipped(self, value):
eDBoxLCD.getInstance().setFlipped(value)
def isOled(self):
return eDBoxLCD.getInstance().isOled()
def setMode(self, value):
print 'setLCDMode',value
f = open("/proc/stb/lcd/show_symbols", "w")
f.write(value)
f.close()
def setRepeat(self, value):
print 'setLCDRepeat',value
f = open("/proc/stb/lcd/scroll_repeats", "w")
f.write(value)
f.close()
def setScrollspeed(self, value):
print 'setLCDScrollspeed',value
f = open("/proc/stb/lcd/scroll_delay", "w")
f.write(str(value))
f.close()
def setLEDNormalState(self, value):
eDBoxLCD.getInstance().setLED(value, 0)
def setLEDDeepStandbyState(self, value):
eDBoxLCD.getInstance().setLED(value, 1)
def setLEDBlinkingTime(self, value):
eDBoxLCD.getInstance().setLED(value, 2)
def setLEDStandby(self, value):
file = open("/proc/stb/power/standbyled", "w")
file.write(value and "on" or "off")
file.close()
def setLCDMiniTVMode(self, value):
print 'setLCDMiniTVMode',value
f = open('/proc/stb/lcd/mode', "w")
f.write(value)
f.close()
def setLCDMiniTVPIPMode(self, value):
print 'setLCDMiniTVPIPMode',value
def setLCDMiniTVFPS(self, value):
print 'setLCDMiniTVFPS',value
f = open('/proc/stb/lcd/fps', "w")
f.write("%d \n" % value)
f.close()
def leaveStandby():
config.lcd.bright.apply()
if SystemInfo["LEDButtons"]:
config.lcd.ledbrightness.apply()
config.lcd.ledbrightnessdeepstandby.apply()
def standbyCounterChanged(configElement):
from Screens.Standby import inStandby
inStandby.onClose.append(leaveStandby)
config.lcd.standby.apply()
if SystemInfo["LEDButtons"]:
config.lcd.ledbrightnessstandby.apply()
config.lcd.ledbrightnessdeepstandby.apply()
def InitLcd():
if getBoxType() in ('gb800se', 'gb800solo', 'iqonios300hd', 'tmsingle', 'tmnano2super', 'vusolo', 'vusolose', 'et4x00', 'et5x00', 'et6x00'):
detected = False
else:
detected = eDBoxLCD.getInstance().detected()
ilcd = LCD()
SystemInfo["Display"] = detected
config.lcd = ConfigSubsection()
if SystemInfo["StandbyLED"]:
def setLEDstandby(configElement):
ilcd.setLEDStandby(configElement.value)
config.usage.standbyLED = ConfigYesNo(default = True)
config.usage.standbyLED.addNotifier(setLEDstandby)
if SystemInfo["LEDButtons"]:
def setLEDnormalstate(configElement):
ilcd.setLEDNormalState(configElement.value)
def setLEDdeepstandby(configElement):
ilcd.setLEDDeepStandbyState(configElement.value)
def setLEDblinkingtime(configElement):
ilcd.setLEDBlinkingTime(configElement.value)
config.lcd.ledblinkingtime = ConfigSlider(default = 5, increment = 1, limits = (0,15))
config.lcd.ledblinkingtime.addNotifier(setLEDblinkingtime)
config.lcd.ledbrightnessdeepstandby = ConfigSlider(default = 1, increment = 1, limits = (0,15))
config.lcd.ledbrightnessdeepstandby.addNotifier(setLEDnormalstate)
config.lcd.ledbrightnessdeepstandby.addNotifier(setLEDdeepstandby)
config.lcd.ledbrightnessdeepstandby.apply = lambda : setLEDdeepstandby(config.lcd.ledbrightnessdeepstandby)
config.lcd.ledbrightnessstandby = ConfigSlider(default = 1, increment = 1, limits = (0,15))
config.lcd.ledbrightnessstandby.addNotifier(setLEDnormalstate)
config.lcd.ledbrightnessstandby.apply = lambda : setLEDnormalstate(config.lcd.ledbrightnessstandby)
config.lcd.ledbrightness = ConfigSlider(default = 3, increment = 1, limits = (0,15))
config.lcd.ledbrightness.addNotifier(setLEDnormalstate)
config.lcd.ledbrightness.apply = lambda : setLEDnormalstate(config.lcd.ledbrightness)
config.lcd.ledbrightness.callNotifiersOnSaveAndCancel = True
if detected:
config.lcd.scroll_speed = ConfigSelection(default = "300", choices = [
("500", _("slow")),
("300", _("normal")),
("100", _("fast"))])
config.lcd.scroll_delay = ConfigSelection(default = "10000", choices = [
("10000", "10 " + _("seconds")),
("20000", "20 " + _("seconds")),
("30000", "30 " + _("seconds")),
("60000", "1 " + _("minute")),
("300000", "5 " + _("minutes")),
("noscrolling", _("off"))])
def setLCDbright(configElement):
ilcd.setBright(configElement.value)
def setLCDcontrast(configElement):
ilcd.setContrast(configElement.value)
def setLCDinverted(configElement):
ilcd.setInverted(configElement.value)
def setLCDflipped(configElement):
ilcd.setFlipped(configElement.value)
def setLCDmode(configElement):
ilcd.setMode(configElement.value)
def setLCDrepeat(configElement):
ilcd.setRepeat(configElement.value)
def setLCDscrollspeed(configElement):
ilcd.setScrollspeed(configElement.value)
def setLCDminitvmode(configElement):
ilcd.setLCDMiniTVMode(configElement.value)
def setLCDminitvpipmode(configElement):
ilcd.setLCDMiniTVPIPMode(configElement.value)
def setLCDminitvfps(configElement):
ilcd.setLCDMiniTVFPS(configElement.value)
standby_default = 0
if not ilcd.isOled():
config.lcd.contrast = ConfigSlider(default=5, limits=(0, 20))
config.lcd.contrast.addNotifier(setLCDcontrast)
else:
config.lcd.contrast = ConfigNothing()
standby_default = 1
config.lcd.standby = ConfigSlider(default=standby_default, limits=(0, 10))
config.lcd.standby.addNotifier(setLCDbright)
config.lcd.standby.apply = lambda : setLCDbright(config.lcd.standby)
config.lcd.bright = ConfigSlider(default=5, limits=(0, 10))
config.lcd.bright.addNotifier(setLCDbright)
config.lcd.bright.apply = lambda : setLCDbright(config.lcd.bright)
config.lcd.bright.callNotifiersOnSaveAndCancel = True
config.lcd.invert = ConfigYesNo(default=False)
config.lcd.invert.addNotifier(setLCDinverted)
config.lcd.flip = ConfigYesNo(default=False)
config.lcd.flip.addNotifier(setLCDflipped)
if SystemInfo["LCDMiniTV"]:
config.lcd.minitvmode = ConfigSelection([("0", _("normal")), ("1", _("MiniTV")), ("2", _("OSD")), ("3", _("MiniTV with OSD"))], "0")
config.lcd.minitvmode.addNotifier(setLCDminitvmode)
config.lcd.minitvpipmode = ConfigSelection([("0", _("off")), ("5", _("PIP")), ("7", _("PIP with OSD"))], "0")
config.lcd.minitvpipmode.addNotifier(setLCDminitvpipmode)
config.lcd.minitvfps = ConfigSlider(default=30, limits=(0, 30))
config.lcd.minitvfps.addNotifier(setLCDminitvfps)
if fileExists("/proc/stb/lcd/scroll_delay"):
config.lcd.scrollspeed = ConfigSlider(default = 150, increment = 10, limits = (0, 500))
config.lcd.scrollspeed.addNotifier(setLCDscrollspeed)
else:
config.lcd.scrollspeed = ConfigNothing()
if fileExists("/proc/stb/lcd/scroll_repeats"):
config.lcd.repeat = ConfigSelection([("0", _("None")), ("1", _("1X")), ("2", _("2X")), ("3", _("3X")), ("4", _("4X")), ("500", _("Continues"))], "3")
config.lcd.repeat.addNotifier(setLCDrepeat)
else:
config.lcd.repeat = ConfigNothing()
if fileExists("/proc/stb/lcd/show_symbols"):
config.lcd.mode = ConfigSelection([("0", _("No")), ("1", _("Yes"))], "1")
config.lcd.mode.addNotifier(setLCDmode)
else:
config.lcd.mode = ConfigNothing()
else:
def doNothing():
pass
config.lcd.contrast = ConfigNothing()
config.lcd.bright = ConfigNothing()
config.lcd.standby = ConfigNothing()
config.lcd.bright.apply = lambda : doNothing()
config.lcd.standby.apply = lambda : doNothing()
config.lcd.mode = ConfigNothing()
config.lcd.repeat = ConfigNothing()
config.lcd.scrollspeed = ConfigNothing()
config.lcd.ledbrightness = ConfigNothing()
config.lcd.ledbrightness.apply = lambda : doNothing()
config.lcd.ledbrightnessstandby = ConfigNothing()
config.lcd.ledbrightnessstandby.apply = lambda : doNothing()
config.lcd.ledbrightnessdeepstandby = ConfigNothing()
config.lcd.ledbrightnessdeepstandby.apply = lambda : doNothing()
config.lcd.ledblinkingtime = ConfigNothing()
config.misc.standbyCounter.addNotifier(standbyCounterChanged, initial_call = False)
| 0.031108 |
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import logging
import oslo_messaging
from oslo_messaging._drivers import common as rpc_common
from oslo_messaging._drivers.zmq_driver.client.publishers\
import zmq_publisher_base
from oslo_messaging._drivers.zmq_driver import zmq_address
from oslo_messaging._drivers.zmq_driver import zmq_async
from oslo_messaging._drivers.zmq_driver import zmq_names
from oslo_messaging._i18n import _LE, _LI
LOG = logging.getLogger(__name__)
zmq = zmq_async.import_zmq()
class ReqPublisher(zmq_publisher_base.PublisherBase):
def send_request(self, request):
if request.msg_type != zmq_names.CALL_TYPE:
raise zmq_publisher_base.UnsupportedSendPattern(request.msg_type)
socket = self._connect_to_host(request.target)
self._send_request(socket, request)
return self._receive_reply(socket, request)
def _connect_to_host(self, target):
try:
self.zmq_context = zmq.Context()
socket = self.zmq_context.socket(zmq.REQ)
host = self.matchmaker.get_single_host(target)
connect_address = zmq_address.get_tcp_direct_address(host)
LOG.info(_LI("Connecting REQ to %s") % connect_address)
socket.connect(connect_address)
self.outbound_sockets[str(target)] = socket
return socket
except zmq.ZMQError as e:
errmsg = _LE("Error connecting to socket: %s") % str(e)
LOG.error(_LE("Error connecting to socket: %s") % str(e))
raise rpc_common.RPCException(errmsg)
@staticmethod
def _receive_reply(socket, request):
def _receive_method(socket):
return socket.recv_pyobj()
# NOTE(ozamiatin): Check for retry here (no retries now)
with contextlib.closing(zmq_async.get_reply_poller()) as poller:
poller.register(socket, recv_method=_receive_method)
reply, socket = poller.poll(timeout=request.timeout)
if reply is None:
raise oslo_messaging.MessagingTimeout(
"Timeout %s seconds was reached" % request.timeout)
if reply[zmq_names.FIELD_FAILURE]:
raise rpc_common.deserialize_remote_exception(
reply[zmq_names.FIELD_FAILURE],
request.allowed_remote_exmods)
else:
return reply[zmq_names.FIELD_REPLY]
def close(self):
# For contextlib compatibility
self.cleanup()
| 0 |
import random
import sqlite3
from game_variables import motivation, coach_off_iq, coach_def_iq, training, offense_playbook, defense_playbook, leadership, major_bonus
from coach_first_names import coach_first_names
from player_last_names import player_last_names
def create_stat(stat): # assumes a min/max tuple as input
min = stat[0] # helper function that aids in class object creation
max = stat[1]
selection = random.randrange(min, max)
return selection
#################################################
# Coach database structure for future reference
#
# database.execute('''CREATE TABLE coach_db (Id integer primary key,
# league_id REFERENCES league_table(Id),
# team_id REFERENCES team_db(Id),
# name, motivation, coach_off_iq, coach_def_iq, training,
# leadership, offense_playbook, defense_playbook, coach_rating)''')
#################################################
class Coach:
def __init__(self):
self.league_id = 0 # invalid pk value as default, needs to be overridden before being stuffed into db
self.team = 0 # invalid pk value as default, needs to be overridden before being stuffed into db
def update_coach(self): # updates every coach field except for name
connection = sqlite3.connect('league.db')
database = connection.cursor()
coach_attributes = (self.league_id, self.team, self.motivation, self.coach_off_iq, self.coach_def_iq, self.training, self.leadership, self.offense_playbook, self.defense_playbook, self.coach_rating, self.db_id)
database.execute('''UPDATE coach_db
SET league_id = ?,
team_id = ?,
motivation = ?,
coach_off_iq = ?,
coach_def_iq = ?,
training = ?,
leadership = ?,
offense_playbook = ?,
defense_playbook = ?,
coach_rating = ?
WHERE 'Id' = ?''', coach_attributes)
print "coach", self.name, "updated"
connection.commit()
connection.close()
def create_coach(self, league_id):
self.league_id = league_id
self.name = random.choice(coach_first_names) + " " + random.choice(player_last_names)
self.motivation = create_stat(motivation)
self.coach_off_iq = create_stat(coach_off_iq)
self.coach_def_iq = create_stat(coach_def_iq)
self.training = create_stat(training)
self.leadership = create_stat(leadership)
self.offense_playbook = offense_playbook[str(random.randint(1,3))]
self.defense_playbook = defense_playbook[str(random.randint(1,3))]
def rating_boost(): # because every coach should be good at 1 thing in the very least
to_boost = random.randint(1,5)
if to_boost == 1:
major_bonus(self.motivation)
return self.motivation
elif to_boost == 2:
major_bonus(self.coach_off_iq)
return self.coach_off_iq
elif to_boost == 3:
major_bonus(self.coach_def_iq)
return self.coach_def_iq
elif to_boost == 4:
major_bonus(self.training)
return self.training
elif to_boost == 5:
major_bonus(self.leadership)
def coach_rating():
total = self.motivation + self.coach_off_iq + self.coach_def_iq + self.training + self.leadership
rating = int(total / 4.5)
return rating
def insert_coach(self): # puts the coach class object into the coach database table
connection = sqlite3.connect('league.db')
database = connection.cursor()
coach_attributes = (self.league_id, self.team, self.name, self.motivation, self.coach_off_iq, self.coach_def_iq, self.training, self.leadership, self.offense_playbook, self.defense_playbook, self.coach_rating)
database.execute('''INSERT INTO coach_db
(league_id, team_id, name, motivation, coach_off_iq, coach_def_iq, training,leadership, offense_playbook, defense_playbook, coach_rating)
VALUES(?,?,?,?,?,?,?,?,?,?,?)''', coach_attributes)
connection.commit()
self.db_id = database.lastrowid
connection.close()
rating_boost()
self.coach_rating = coach_rating()
insert_coach(self)
def load_coaches(league_pk):
connection = sqlite3.connect('league.db')
database = connection.cursor()
league_id = league_pk
coach_pool = []
database.execute('''SELECT league_id, team_id, name, motivation, coach_off_iq, coach_def_iq, training,leadership, offense_playbook, defense_playbook, coach_rating, Id FROM coach_db WHERE league_id = ?''', league_id)
#for coach in range(number_of_coaches):
coach = 0
coach_attributes = database.fetchone()
while coach_attributes != None:
coach_pool.append(Coach())
print "attempting coach resurrection"
coach_pool[coach].league_id = coach_attributes[0]
coach_pool[coach].team = coach_attributes[1]
coach_pool[coach].name = coach_attributes[2]
coach_pool[coach].motivation = coach_attributes[3]
coach_pool[coach].coach_off_iq = coach_attributes[4]
coach_pool[coach].coach_def_iq = coach_attributes[5]
coach_pool[coach].training = coach_attributes[6]
coach_pool[coach].leadership = coach_attributes[7]
coach_pool[coach].offense_playbook = coach_attributes[8]
coach_pool[coach].defense_playbook = coach_attributes[9]
coach_pool[coach].coach_rating = coach_attributes[10]
coach_pool[coach].db_id = coach_attributes[11]
print coach_pool[coach].name, " resurrected"
coach += 1
coach_attributes = database.fetchone()
connection.commit()
connection.close()
return coach_pool
| 0.026498 |
import sys
import re
import os
from collections import defaultdict, namedtuple
from numba.core.config import IS_WIN32, IS_OSX
from numba.misc.findlib import find_lib, find_file
_env_path_tuple = namedtuple('_env_path_tuple', ['by', 'info'])
def _find_valid_path(options):
"""Find valid path from *options*, which is a list of 2-tuple of
(name, path). Return first pair where *path* is not None.
If no valid path is found, return ('<unknown>', None)
"""
for by, data in options:
if data is not None:
return by, data
else:
return '<unknown>', None
def _get_libdevice_path_decision():
options = [
('Conda environment', get_conda_ctk()),
('CUDA_HOME', get_cuda_home('nvvm', 'libdevice')),
('System', get_system_ctk('nvvm', 'libdevice')),
('Debian package', get_debian_pkg_libdevice()),
]
by, libdir = _find_valid_path(options)
return by, libdir
def _nvvm_lib_dir():
if IS_WIN32:
return 'nvvm', 'bin'
elif IS_OSX:
return 'nvvm', 'lib'
else:
return 'nvvm', 'lib64'
def _get_nvvm_path_decision():
options = [
('Conda environment', get_conda_ctk()),
('CUDA_HOME', get_cuda_home(*_nvvm_lib_dir())),
('System', get_system_ctk(*_nvvm_lib_dir())),
]
by, path = _find_valid_path(options)
return by, path
def _get_libdevice_paths():
by, libdir = _get_libdevice_path_decision()
# Search for pattern
pat = r'libdevice(\.(?P<arch>compute_\d+))?(\.\d+)*\.bc$'
candidates = find_file(re.compile(pat), libdir)
# Grouping
out = defaultdict(list)
for path in candidates:
m = re.search(pat, path)
arch = m.group('arch')
out[arch].append(path)
# Keep only the max (most recent version) of the bitcode files.
out = {k: max(v) for k, v in out.items()}
return _env_path_tuple(by, out)
def _cudalib_path():
if IS_WIN32:
return 'bin'
elif IS_OSX:
return 'lib'
else:
return 'lib64'
def _get_cudalib_dir_path_decision():
options = [
('Conda environment', get_conda_ctk()),
('CUDA_HOME', get_cuda_home(_cudalib_path())),
('System', get_system_ctk(_cudalib_path())),
]
by, libdir = _find_valid_path(options)
return by, libdir
def _get_cudalib_dir():
by, libdir = _get_cudalib_dir_path_decision()
return _env_path_tuple(by, libdir)
def get_system_ctk(*subdirs):
"""Return path to system-wide cudatoolkit; or, None if it doesn't exist.
"""
# Linux?
if sys.platform.startswith('linux'):
# Is cuda alias to /usr/local/cuda?
# We are intentionally not getting versioned cuda installation.
base = '/usr/local/cuda'
if os.path.exists(base):
return os.path.join(base, *subdirs)
def get_conda_ctk():
"""Return path to directory containing the shared libraries of cudatoolkit.
"""
is_conda_env = os.path.exists(os.path.join(sys.prefix, 'conda-meta'))
if not is_conda_env:
return
# Asssume the existence of NVVM to imply cudatoolkit installed
paths = find_lib('nvvm')
if not paths:
return
# Use the directory name of the max path
return os.path.dirname(max(paths))
def get_cuda_home(*subdirs):
"""Get paths of CUDA_HOME.
If *subdirs* are the subdirectory name to be appended in the resulting
path.
"""
cuda_home = os.environ.get('CUDA_HOME')
if cuda_home is None:
# Try Windows CUDA installation without Anaconda
cuda_home = os.environ.get('CUDA_PATH')
if cuda_home is not None:
return os.path.join(cuda_home, *subdirs)
def _get_nvvm_path():
by, path = _get_nvvm_path_decision()
candidates = find_lib('nvvm', path)
path = max(candidates) if candidates else None
return _env_path_tuple(by, path)
def get_cuda_paths():
"""Returns a dictionary mapping component names to a 2-tuple
of (source_variable, info).
The returned dictionary will have the following keys and infos:
- "nvvm": file_path
- "libdevice": List[Tuple[arch, file_path]]
- "cudalib_dir": directory_path
Note: The result of the function is cached.
"""
# Check cache
if hasattr(get_cuda_paths, '_cached_result'):
return get_cuda_paths._cached_result
else:
# Not in cache
d = {
'nvvm': _get_nvvm_path(),
'libdevice': _get_libdevice_paths(),
'cudalib_dir': _get_cudalib_dir(),
}
# Cache result
get_cuda_paths._cached_result = d
return d
def get_debian_pkg_libdevice():
"""
Return the Debian NVIDIA Maintainers-packaged libdevice location, if it
exists.
"""
pkg_libdevice_location = '/usr/lib/nvidia-cuda-toolkit/libdevice'
if not os.path.exists(pkg_libdevice_location):
return None
return pkg_libdevice_location
| 0 |
from Components.ActionMap import ActionMap, HelpableActionMap, NumberActionMap
from Components.Button import Button
from Components.ChoiceList import ChoiceList, ChoiceEntryComponent
from Components.SystemInfo import SystemInfo
from Components.config import config, ConfigSubsection, ConfigText, ConfigYesNo
from Components.PluginComponent import plugins
from Screens.ChoiceBox import ChoiceBox
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Plugins.Plugin import PluginDescriptor
from Tools.BoundFunction import boundFunction
from ServiceReference import ServiceReference
from enigma import eServiceReference
import os
def getHotkeys():
return [(_("Red") + " " + _("long"), "red_long", ""),
(_("Green") + " " + _("long"), "green_long", ""),
(_("Yellow") + " " + _("long"), "yellow_long", ""),
(_("Blue") + " " + _("long"), "blue_long", "Plugins/PLi/SoftcamSetup/1"),
("F1/LAN", "f1", ""),
("F1" + " " + _("long"), "f1_long", ""),
("F2", "f2", ""),
("F2" + " " + _("long"), "f2_long", ""),
("F3", "f3", ""),
("F3" + " " + _("long"), "f3_long", ""),
(_("Red"), "red", ""),
(_("Green"), "green", ""),
(_("Yellow"), "yellow", ""),
(_("Blue"), "blue", ""),
("Rec", "rec", ""),
("Radio", "radio", ""),
("TV", "showTv", ""),
("Teletext", "text", ""),
("Help", "displayHelp", ""),
("Help" + " " + _("long"), "displayHelp_long", ""),
("Subtitle", "subtitle", ""),
("Menu", "mainMenu", ""),
("Info (EPG)", "info", "Infobar/openEventView"),
("Info (EPG)" + " " + _("long"), "info_long", "Infobar/showEventInfoPlugins"),
("List/Fav/PVR", "list", ""),
("Back/Recall", "back", ""),
("Back/Recall" + " " + _("long"), "back_long", ""),
("End", "end", ""),
("Epg/Guide", "epg", "Plugins/Extensions/GraphMultiEPG/1"),
("Epg/Guide" + " " + _("long"), "epg_long", "Infobar/showEventInfoPlugins"),
("Left", "cross_left", ""),
("Right", "cross_right", ""),
("Up", "cross_up", ""),
("Down", "cross_down", ""),
("Ok", "ok", ""),
("Channel up", "channelup", ""),
("Channel down", "channeldown", ""),
("Next", "next", ""),
("Previous", "previous", ""),
("Audio", "audio", ""),
("Play", "play", ""),
("Playpause", "playpause", ""),
("Stop", "stop", ""),
("Pause", "pause", ""),
("Rewind", "rewind", ""),
("Fastforward", "fastforward", ""),
("Skip back", "skip_back", ""),
("Skip forward", "skip_forward", ""),
("activatePiP", "activatePiP", ""),
("Timer", "timer", ""),
("Timer" + " " + _("long"), "timer_long", ""),
("Playlist", "playlist", ""),
("Timeshift", "timeshift", ""),
("Search", "search", ""),
("Search" + " " + _("long"), "search_long", ""),
("Slow", "slow", ""),
("Mark/Portal/Playlist", "mark", ""),
("Mark/Portal/Playlist" + " " + _("long"), "mark_long", ""),
("Sleep", "sleep", ""),
("Sleep" + " " + _("long"), "sleep_long", ""),
("Context", "contextmenu", ""),
("Context" + " " + _("long"), "contextmenu_long", ""),
("Video Mode", "vmode", ""),
("Video Mode" + " " + _("long"), "vmode_long", ""),
("Home", "home", ""),
("Power", "power", ""),
("Power" + " " + _("long"), "power_long", ""),
("HDMIin", "HDMIin", "Infobar/HDMIIn"),
("HDMIin" + " " + _("long"), "HDMIin_long", SystemInfo["LcdLiveTV"] and "Infobar/ToggleLCDLiveTV" or "")]
config.misc.hotkey = ConfigSubsection()
config.misc.hotkey.additional_keys = ConfigYesNo(default=False)
for x in getHotkeys():
exec "config.misc.hotkey." + x[1] + " = ConfigText(default='" + x[2] + "')"
def getHotkeyFunctions():
hotkeyFunctions = []
twinPlugins = []
twinPaths = {}
pluginlist = plugins.getPlugins(PluginDescriptor.WHERE_EVENTINFO)
pluginlist.sort(key=lambda p: p.name)
for plugin in pluginlist:
if plugin.name not in twinPlugins and plugin.path and 'selectedevent' not in plugin.__call__.func_code.co_varnames:
if twinPaths.has_key(plugin.path[24:]):
twinPaths[plugin.path[24:]] += 1
else:
twinPaths[plugin.path[24:]] = 1
hotkeyFunctions.append((plugin.name, plugin.path[24:] + "/" + str(twinPaths[plugin.path[24:]]) , "EPG"))
twinPlugins.append(plugin.name)
pluginlist = plugins.getPlugins([PluginDescriptor.WHERE_PLUGINMENU, PluginDescriptor.WHERE_EXTENSIONSMENU])
pluginlist.sort(key=lambda p: p.name)
for plugin in pluginlist:
if plugin.name not in twinPlugins and plugin.path:
if twinPaths.has_key(plugin.path[24:]):
twinPaths[plugin.path[24:]] += 1
else:
twinPaths[plugin.path[24:]] = 1
hotkeyFunctions.append((plugin.name, plugin.path[24:] + "/" + str(twinPaths[plugin.path[24:]]) , "Plugins"))
twinPlugins.append(plugin.name)
hotkeyFunctions.append((_("Main menu"), "Infobar/mainMenu", "InfoBar"))
hotkeyFunctions.append((_("Show help"), "Infobar/showHelp", "InfoBar"))
hotkeyFunctions.append((_("Show extension selection"), "Infobar/showExtensionSelection", "InfoBar"))
hotkeyFunctions.append((_("Zap down"), "Infobar/zapDown", "InfoBar"))
hotkeyFunctions.append((_("Zap up"), "Infobar/zapUp", "InfoBar"))
hotkeyFunctions.append((_("Switch channel up"), "Infobar/switchChannelUp", "InfoBar"))
hotkeyFunctions.append((_("Switch channel down"), "Infobar/switchChannelDown", "InfoBar"))
hotkeyFunctions.append((_("Show service list"), "Infobar/openServiceList", "InfoBar"))
hotkeyFunctions.append((_("Show movies"), "Infobar/showMovies", "InfoBar"))
hotkeyFunctions.append((_("Show servicelist or movies"), "Infobar/showServiceListOrMovies", "InfoBar"))
hotkeyFunctions.append((_("Show favourites list"), "Infobar/openFavouritesList", "InfoBar"))
hotkeyFunctions.append((_("History back"), "Infobar/historyBack", "InfoBar"))
hotkeyFunctions.append((_("History next"), "Infobar/historyNext", "InfoBar"))
hotkeyFunctions.append((_("Recall to previous service"), "Infobar/servicelist/recallPrevService", "InfoBar"))
hotkeyFunctions.append((_("Show eventinfo plugins"), "Infobar/showEventInfoPlugins", "EPG"))
hotkeyFunctions.append((_("Show event details"), "Infobar/openEventView", "EPG"))
hotkeyFunctions.append((_("Show single service EPG"), "Infobar/openSingleServiceEPG", "EPG"))
hotkeyFunctions.append((_("Show multi channel EPG"), "Infobar/openMultiServiceEPG", "EPG"))
hotkeyFunctions.append((_("Show Audioselection"), "Infobar/audioSelection", "InfoBar"))
hotkeyFunctions.append((_("Switch to radio mode"), "Infobar/showRadio", "InfoBar"))
hotkeyFunctions.append((_("Switch to TV mode"), "Infobar/showTv", "InfoBar"))
hotkeyFunctions.append((_("Instant record"), "Infobar/instantRecord", "InfoBar"))
hotkeyFunctions.append((_("Start instant recording"), "Infobar/startInstantRecording", "InfoBar"))
hotkeyFunctions.append((_("Activate timeshift End"), "Infobar/activateTimeshiftEnd", "InfoBar"))
hotkeyFunctions.append((_("Activate timeshift end and pause"), "Infobar/activateTimeshiftEndAndPause", "InfoBar"))
hotkeyFunctions.append((_("Start timeshift"), "Infobar/startTimeshift", "InfoBar"))
hotkeyFunctions.append((_("Stop timeshift"), "Infobar/stopTimeshift", "InfoBar"))
hotkeyFunctions.append((_("Start teletext"), "Infobar/startTeletext", "InfoBar"))
hotkeyFunctions.append((_("Show subservice selection"), "Infobar/subserviceSelection", "InfoBar"))
hotkeyFunctions.append((_("Show subtitle selection"), "Infobar/subtitleSelection", "InfoBar"))
hotkeyFunctions.append((_("Show InfoBar"), "Infobar/showFirstInfoBar", "InfoBar"))
hotkeyFunctions.append((_("Show second InfoBar"), "Infobar/showSecondInfoBar", "InfoBar"))
hotkeyFunctions.append((_("Toggle infoBar"), "Infobar/toggleShow", "InfoBar"))
hotkeyFunctions.append((_("Letterbox zoom"), "Infobar/vmodeSelection", "InfoBar"))
if SystemInfo["PIPAvailable"]:
hotkeyFunctions.append((_("Show PIP"), "Infobar/showPiP", "InfoBar"))
hotkeyFunctions.append((_("Swap PIP"), "Infobar/swapPiP", "InfoBar"))
hotkeyFunctions.append((_("Move PIP"), "Infobar/movePiP", "InfoBar"))
hotkeyFunctions.append((_("Toggle PIPzap"), "Infobar/togglePipzap", "InfoBar"))
hotkeyFunctions.append((_("Activate HbbTV (Redbutton)"), "Infobar/activateRedButton", "InfoBar"))
hotkeyFunctions.append((_("Toggle HDMI In"), "Infobar/HDMIIn", "InfoBar"))
if SystemInfo["LcdLiveTV"]:
hotkeyFunctions.append((_("Toggle LCD LiveTV"), "Infobar/ToggleLCDLiveTV", "InfoBar"))
hotkeyFunctions.append((_("HotKey Setup"), "Module/Screens.Hotkey/HotkeySetup", "Setup"))
hotkeyFunctions.append((_("Software update"), "Module/Screens.SoftwareUpdate/UpdatePlugin", "Setup"))
hotkeyFunctions.append((_("Latest Commits"), "Module/Screens.About/CommitInfo", "Setup"))
hotkeyFunctions.append((_("CI (Common Interface) Setup"), "Module/Screens.Ci/CiSelection", "Setup"))
hotkeyFunctions.append((_("Tuner Configuration"), "Module/Screens.Satconfig/NimSelection", "Scanning"))
hotkeyFunctions.append((_("Manual Scan"), "Module/Screens.ScanSetup/ScanSetup", "Scanning"))
hotkeyFunctions.append((_("Automatic Scan"), "Module/Screens.ScanSetup/ScanSimple", "Scanning"))
for plugin in plugins.getPluginsForMenu("scan"):
hotkeyFunctions.append((plugin[0], "MenuPlugin/scan/" + plugin[2], "Scanning"))
hotkeyFunctions.append((_("Network"), "Module/Screens.NetworkSetup/NetworkAdapterSelection", "Setup"))
hotkeyFunctions.append((_("Plugin Browser"), "Module/Screens.PluginBrowser/PluginBrowser", "Setup"))
hotkeyFunctions.append((_("Sleeptimer edit"), "Module/Screens.SleepTimerEdit/SleepTimerEdit", "Setup"))
hotkeyFunctions.append((_("Channel Info"), "Module/Screens.ServiceInfo/ServiceInfo", "Setup"))
hotkeyFunctions.append((_("Timer"), "Module/Screens.TimerEdit/TimerEditList", "Setup"))
for plugin in plugins.getPluginsForMenu("system"):
if plugin[2]:
hotkeyFunctions.append((plugin[0], "MenuPlugin/system/" + plugin[2], "Setup"))
hotkeyFunctions.append((_("Standby"), "Module/Screens.Standby/Standby", "Power"))
hotkeyFunctions.append((_("Restart"), "Module/Screens.Standby/TryQuitMainloop/2", "Power"))
hotkeyFunctions.append((_("Restart enigma"), "Module/Screens.Standby/TryQuitMainloop/3", "Power"))
hotkeyFunctions.append((_("Deep standby"), "Module/Screens.Standby/TryQuitMainloop/1", "Power"))
hotkeyFunctions.append((_("Usage Setup"), "Setup/usage", "Setup"))
hotkeyFunctions.append((_("User interface"), "Setup/userinterface", "Setup"))
hotkeyFunctions.append((_("Recording Setup"), "Setup/recording", "Setup"))
hotkeyFunctions.append((_("Harddisk Setup"), "Setup/harddisk", "Setup"))
hotkeyFunctions.append((_("Subtitles Settings"), "Setup/subtitlesetup", "Setup"))
hotkeyFunctions.append((_("Language"), "Module/Screens.LanguageSelection/LanguageSelection", "Setup"))
hotkeyFunctions.append((_("Memory Info"), "Module/Screens.About/MemoryInfo", "Setup"))
if os.path.isdir("/etc/ppanels"):
for x in [x for x in os.listdir("/etc/ppanels") if x.endswith(".xml")]:
x = x[:-4]
hotkeyFunctions.append((_("PPanel") + " " + x, "PPanel/" + x, "PPanels"))
if os.path.isdir("/usr/script"):
for x in [x for x in os.listdir("/usr/script") if x.endswith(".sh")]:
x = x[:-3]
hotkeyFunctions.append((_("Shellscript") + " " + x, "Shellscript/" + x, "Shellscripts"))
return hotkeyFunctions
class HotkeySetup(Screen):
def __init__(self, session, args=None):
Screen.__init__(self, session)
self.session = session
self.setTitle(_("Hotkey Setup"))
self["key_red"] = Button(_("Exit"))
self["key_green"] = Button(_("Toggle Extra Keys"))
self.list = []
self.hotkeys = getHotkeys()
self.hotkeyFunctions = getHotkeyFunctions()
for x in self.hotkeys:
self.list.append(ChoiceEntryComponent('',(x[0], x[1])))
self["list"] = ChoiceList(list=self.list[:config.misc.hotkey.additional_keys.value and len(self.hotkeys) or 10], selection = 0)
self["choosen"] = ChoiceList(list=[])
self["actions"] = ActionMap(["OkCancelActions", "ColorActions", "DirectionActions", "MenuActions"],
{
"ok": self.keyOk,
"cancel": self.close,
"red": self.close,
"green": self.toggleAdditionalKeys,
"up": self.keyUp,
"down": self.keyDown,
"left": self.keyLeft,
"right": self.keyRight,
"menu": boundFunction(self.close, True),
}, -1)
self["NumberActions"] = NumberActionMap(["NumberActions"],
{
"0": self.keyNumberGlobal
})
self["HotkeyButtonActions"] = hotkeyActionMap(["HotkeyActions"], dict((x[1], self.hotkeyGlobal) for x in self.hotkeys))
self.longkeyPressed = False
self.onLayoutFinish.append(self.__layoutFinished)
self.onExecBegin.append(self.getFunctions)
def __layoutFinished(self):
self["choosen"].selectionEnabled(0)
def hotkeyGlobal(self, key):
if self.longkeyPressed:
self.longkeyPressed = False
else:
index = 0
for x in self.list[:config.misc.hotkey.additional_keys.value and len(self.hotkeys) or 10]:
if key == x[0][1]:
self["list"].moveToIndex(index)
if key.endswith("_long"):
self.longkeyPressed = True
break
index += 1
self.getFunctions()
def keyOk(self):
self.session.openWithCallback(self.HotkeySetupSelectCallback, HotkeySetupSelect, self["list"].l.getCurrentSelection())
def HotkeySetupSelectCallback(self, answer):
if answer:
self.close(True)
def keyLeft(self):
self["list"].instance.moveSelection(self["list"].instance.pageUp)
self.getFunctions()
def keyRight(self):
self["list"].instance.moveSelection(self["list"].instance.pageDown)
self.getFunctions()
def keyUp(self):
self["list"].instance.moveSelection(self["list"].instance.moveUp)
self.getFunctions()
def keyDown(self):
self["list"].instance.moveSelection(self["list"].instance.moveDown)
self.getFunctions()
def setDefaultHotkey(self, answer):
if answer:
for x in getHotkeys():
current_config = eval("config.misc.hotkey." + x[1])
current_config.value = str(x[2])
current_config.save()
self.getFunctions()
def keyNumberGlobal(self, number):
self.session.openWithCallback(self.setDefaultHotkey, MessageBox, _("Set all hotkey to default?"), MessageBox.TYPE_YESNO)
def toggleAdditionalKeys(self):
config.misc.hotkey.additional_keys.value = not config.misc.hotkey.additional_keys.value
config.misc.hotkey.additional_keys.save()
self["list"].setList(self.list[:config.misc.hotkey.additional_keys.value and len(self.hotkeys) or 10])
def getFunctions(self):
key = self["list"].l.getCurrentSelection()[0][1]
if key:
selected = []
for x in eval("config.misc.hotkey." + key + ".value.split(',')"):
if x.startswith("ZapPanic"):
selected.append(ChoiceEntryComponent('',((_("Panic to") + " " + ServiceReference(eServiceReference(x.split("/", 1)[1]).toString()).getServiceName()), x)))
elif x.startswith("Zap"):
selected.append(ChoiceEntryComponent('',((_("Zap to") + " " + ServiceReference(eServiceReference(x.split("/", 1)[1]).toString()).getServiceName()), x)))
else:
function = list(function for function in self.hotkeyFunctions if function[1] == x )
if function:
selected.append(ChoiceEntryComponent('',((function[0][0]), function[0][1])))
self["choosen"].setList(selected)
class HotkeySetupSelect(Screen):
def __init__(self, session, key, args=None):
Screen.__init__(self, session)
self.skinName="HotkeySetup"
self.session = session
self.key = key
self.setTitle(_("Hotkey Setup") + " " + key[0][0])
self["key_red"] = Button(_("Cancel"))
self["key_green"] = Button(_("Save"))
self.mode = "list"
self.hotkeyFunctions = getHotkeyFunctions()
self.config = eval("config.misc.hotkey." + key[0][1])
self.expanded = []
self.selected = []
for x in self.config.value.split(','):
if x.startswith("ZapPanic"):
self.selected.append(ChoiceEntryComponent('',((_("Panic to") + " " + ServiceReference(eServiceReference(x.split("/", 1)[1]).toString()).getServiceName()), x)))
elif x.startswith("Zap"):
self.selected.append(ChoiceEntryComponent('',((_("Zap to") + " " + ServiceReference(eServiceReference(x.split("/", 1)[1]).toString()).getServiceName()), x)))
else:
function = list(function for function in self.hotkeyFunctions if function[1] == x )
if function:
self.selected.append(ChoiceEntryComponent('',((function[0][0]), function[0][1])))
self.prevselected = self.selected[:]
self["choosen"] = ChoiceList(list=self.selected, selection=0)
self["list"] = ChoiceList(list=self.getFunctionList(), selection=0)
self["actions"] = ActionMap(["OkCancelActions", "ColorActions", "DirectionActions", "KeyboardInputActions", "MenuActions"],
{
"ok": self.keyOk,
"cancel": self.cancel,
"red": self.cancel,
"green": self.save,
"up": self.keyUp,
"down": self.keyDown,
"left": self.keyLeft,
"right": self.keyRight,
"upRepeated": self.keyUp,
"downRepeated": self.keyDown,
"leftRepeated": self.keyLeft,
"rightRepeated": self.keyRight,
"pageUp": self.toggleMode,
"pageDown": self.toggleMode,
"moveUp": self.moveUp,
"moveDown": self.moveDown,
"menu": boundFunction(self.close, True),
}, -1)
self.onLayoutFinish.append(self.__layoutFinished)
def __layoutFinished(self):
self["choosen"].selectionEnabled(0)
def getFunctionList(self):
functionslist = []
catagories = {}
for function in self.hotkeyFunctions:
if not catagories.has_key(function[2]):
catagories[function[2]] = []
catagories[function[2]].append(function)
for catagorie in sorted(list(catagories)):
if catagorie in self.expanded:
functionslist.append(ChoiceEntryComponent('expanded',((catagorie), "Expander")))
for function in catagories[catagorie]:
functionslist.append(ChoiceEntryComponent('verticalline',((function[0]), function[1])))
if catagorie == "InfoBar":
functionslist.append(ChoiceEntryComponent('verticalline',((_("Zap to")), "Zap")))
functionslist.append(ChoiceEntryComponent('verticalline',((_("Panic to")), "ZapPanic")))
else:
functionslist.append(ChoiceEntryComponent('expandable',((catagorie), "Expander")))
return functionslist
def toggleMode(self):
if self.mode == "list" and self.selected:
self.mode = "choosen"
self["choosen"].selectionEnabled(1)
self["list"].selectionEnabled(0)
elif self.mode == "choosen":
self.mode = "list"
self["choosen"].selectionEnabled(0)
self["list"].selectionEnabled(1)
def keyOk(self):
if self.mode == "list":
currentSelected = self["list"].l.getCurrentSelection()
if currentSelected[0][1] == "Expander":
if currentSelected[0][0] in self.expanded:
self.expanded.remove(currentSelected[0][0])
else:
self.expanded.append(currentSelected[0][0])
self["list"].setList(self.getFunctionList())
else:
if currentSelected[:2] in self.selected:
self.selected.remove(currentSelected[:2])
else:
if currentSelected[0][1].startswith("ZapPanic"):
from Screens.ChannelSelection import SimpleChannelSelection
self.session.openWithCallback(self.zaptoCallback, SimpleChannelSelection, _("Hotkey Panic") + " " + self.key[0][0], currentBouquet=True)
elif currentSelected[0][1].startswith("Zap"):
from Screens.ChannelSelection import SimpleChannelSelection
self.session.openWithCallback(self.zaptoCallback, SimpleChannelSelection, _("Hotkey zap") + " " + self.key[0][0], currentBouquet=True)
else:
self.selected.append(currentSelected[:2])
elif self.selected:
self.selected.remove(self["choosen"].l.getCurrentSelection())
if not self.selected:
self.toggleMode()
self["choosen"].setList(self.selected)
def zaptoCallback(self, *args):
if args:
currentSelected = self["list"].l.getCurrentSelection()[:]
currentSelected[1]=currentSelected[1][:-1] + (currentSelected[0][0] + " " + ServiceReference(args[0]).getServiceName(),)
self.selected.append([(currentSelected[0][0], currentSelected[0][1] + "/" + args[0].toString()), currentSelected[1]])
def keyLeft(self):
self[self.mode].instance.moveSelection(self[self.mode].instance.pageUp)
def keyRight(self):
self[self.mode].instance.moveSelection(self[self.mode].instance.pageDown)
def keyUp(self):
self[self.mode].instance.moveSelection(self[self.mode].instance.moveUp)
def keyDown(self):
self[self.mode].instance.moveSelection(self[self.mode].instance.moveDown)
def moveUp(self):
self.moveChoosen(self.keyUp)
def moveDown(self):
self.moveChoosen(self.keyDown)
def moveChoosen(self, direction):
if self.mode == "choosen":
currentIndex = self["choosen"].getSelectionIndex()
swapIndex = (currentIndex + (direction == self.keyDown and 1 or -1)) % len(self["choosen"].list)
self["choosen"].list[currentIndex], self["choosen"].list[swapIndex] = self["choosen"].list[swapIndex], self["choosen"].list[currentIndex]
self["choosen"].setList(self["choosen"].list)
direction()
else:
return 0
def save(self):
configValue = []
for x in self.selected:
configValue.append(x[0][1])
self.config.value = ",".join(configValue)
self.config.save()
self.close(False)
def cancel(self):
if self.selected != self.prevselected:
self.session.openWithCallback(self.cancelCallback, MessageBox, _("are you sure to cancel all changes"), default=False)
else:
self.close(None)
def cancelCallback(self, answer):
answer and self.close(None)
class hotkeyActionMap(ActionMap):
def action(self, contexts, action):
if (action in tuple(x[1] for x in getHotkeys()) and self.actions.has_key(action)):
res = self.actions[action](action)
if res is not None:
return res
return 1
else:
return ActionMap.action(self, contexts, action)
class helpableHotkeyActionMap(HelpableActionMap):
def action(self, contexts, action):
if (action in tuple(x[1] for x in getHotkeys()) and self.actions.has_key(action)):
res = self.actions[action](action)
if res is not None:
return res
return 1
else:
return ActionMap.action(self, contexts, action)
class InfoBarHotkey():
def __init__(self):
self.hotkeys = getHotkeys()
self["HotkeyButtonActions"] = helpableHotkeyActionMap(self, "HotkeyActions",
dict((x[1],(self.hotkeyGlobal, boundFunction(self.getHelpText, x[1]))) for x in self.hotkeys), -10)
self.onExecBegin.append(self.clearLongkeyPressed)
def clearLongkeyPressed(self):
self.longkeyPressed = False
def getKeyFunctions(self, key):
if key in ("play", "playpause", "Stop", "stop", "pause", "rewind", "next", "previous", "fastforward", "skip_back", "skip_forward") and (self.__class__.__name__ == "MoviePlayer" or hasattr(self, "timeshiftActivated") and self.timeshiftActivated()):
return False
selection = eval("config.misc.hotkey." + key + ".value.split(',')")
selected = []
for x in selection:
if x.startswith("ZapPanic"):
selected.append(((_("Panic to") + " " + ServiceReference(eServiceReference(x.split("/", 1)[1]).toString()).getServiceName()), x))
elif x.startswith("Zap"):
selected.append(((_("Zap to") + " " + ServiceReference(eServiceReference(x.split("/", 1)[1]).toString()).getServiceName()), x))
else:
function = list(function for function in getHotkeyFunctions() if function[1] == x )
if function:
selected.append(function[0])
return selected
def getHelpText(self, key):
selected = self.getKeyFunctions(key)
if not selected:
return
if len(selected) == 1:
return selected[0][0]
else:
return _("Hotkey") + " " + tuple(x[0] for x in self.hotkeys if x[1] == key)[0]
def hotkeyGlobal(self, key):
if self.longkeyPressed:
self.longkeyPressed = False
else:
selected = self.getKeyFunctions(key)
if not selected:
return 0
elif len(selected) == 1:
self.longkeyPressed = key.endswith("_long")
return self.execHotkey(selected[0])
else:
key = tuple(x[0] for x in self.hotkeys if x[1] == key)[0]
self.session.openWithCallback(self.execHotkey, ChoiceBox, _("Hotkey") + " " + key, selected)
def execHotkey(self, selected):
if selected:
selected = selected[1].split("/")
if selected[0] == "Plugins":
twinPlugins = []
twinPaths = {}
pluginlist = plugins.getPlugins(PluginDescriptor.WHERE_EVENTINFO)
pluginlist.sort(key=lambda p: p.name)
for plugin in pluginlist:
if plugin.name not in twinPlugins and plugin.path and 'selectedevent' not in plugin.__call__.func_code.co_varnames:
if twinPaths.has_key(plugin.path[24:]):
twinPaths[plugin.path[24:]] += 1
else:
twinPaths[plugin.path[24:]] = 1
if plugin.path[24:] + "/" + str(twinPaths[plugin.path[24:]]) == "/".join(selected):
self.runPlugin(plugin)
return
twinPlugins.append(plugin.name)
pluginlist = plugins.getPlugins([PluginDescriptor.WHERE_PLUGINMENU, PluginDescriptor.WHERE_EXTENSIONSMENU])
pluginlist.sort(key=lambda p: p.name)
for plugin in pluginlist:
if plugin.name not in twinPlugins and plugin.path:
if twinPaths.has_key(plugin.path[24:]):
twinPaths[plugin.path[24:]] += 1
else:
twinPaths[plugin.path[24:]] = 1
if plugin.path[24:] + "/" + str(twinPaths[plugin.path[24:]]) == "/".join(selected):
self.runPlugin(plugin)
return
twinPlugins.append(plugin.name)
elif selected[0] == "MenuPlugin":
for plugin in plugins.getPluginsForMenu(selected[1]):
if plugin[2] == selected[2]:
self.runPlugin(plugin[1])
return
elif selected[0] == "Infobar":
if hasattr(self, selected[1]):
exec "self." + ".".join(selected[1:]) + "()"
else:
return 0
elif selected[0] == "Module":
try:
exec "from " + selected[1] + " import *"
exec "self.session.open(" + ",".join(selected[2:]) + ")"
except:
print "[Hotkey] error during executing module %s, screen %s" % (selected[1], selected[2])
elif selected[0] == "Setup":
exec "from Screens.Setup import *"
exec "self.session.open(Setup, \"" + selected[1] + "\")"
elif selected[0].startswith("Zap"):
if selected[0] == "ZapPanic":
self.servicelist.history = []
self.pipShown() and self.showPiP()
self.servicelist.servicelist.setCurrent(eServiceReference("/".join(selected[1:])))
self.servicelist.zap(enable_pipzap = True)
if hasattr(self, "lastservice"):
self.lastservice = eServiceReference("/".join(selected[1:]))
self.close()
else:
self.show()
from Screens.MovieSelection import defaultMoviePath
moviepath = defaultMoviePath()
if moviepath:
config.movielist.last_videodir.value = moviepath
elif selected[0] == "PPanel":
ppanelFileName = '/etc/ppanels/' + selected[1] + ".xml"
if os.path.isfile(ppanelFileName) and os.path.isdir('/usr/lib/enigma2/python/Plugins/Extensions/PPanel'):
from Plugins.Extensions.PPanel.ppanel import PPanel
self.session.open(PPanel, name=selected[1] + ' PPanel', node=None, filename=ppanelFileName, deletenode=None)
elif selected[0] == "Shellscript":
command = '/usr/script/' + selected[1] + ".sh"
if os.path.isfile(command) and os.path.isdir('/usr/lib/enigma2/python/Plugins/Extensions/PPanel'):
from Plugins.Extensions.PPanel.ppanel import Execute
self.session.open(Execute, selected[1] + " shellscript", None, command)
def showServiceListOrMovies(self):
if hasattr(self, "openServiceList"):
self.openServiceList()
elif hasattr(self, "showMovies"):
self.showMovies()
def ToggleLCDLiveTV(self):
config.lcd.showTv.value = not config.lcd.showTv.value
| 0.025632 |
# -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from nailgun.api.v1.validators.base import BasicValidator
from nailgun.api.v1.validators.json_schema import role
from nailgun.db import db
from nailgun.db.sqlalchemy import models
from nailgun.errors import errors
class RoleValidator(BasicValidator):
@classmethod
def validate_delete(cls, release, role_name):
clusters = [cluster.id for cluster in release.clusters]
node = db().query(models.Node).filter(
models.Node.cluster_id.in_(clusters)
).filter(sa.or_(
models.Node.roles.any(role_name),
models.Node.pending_roles.any(role_name)
)).first()
if node:
raise errors.CannotDelete(
"Can't delete roles that is assigned to some node.")
@classmethod
def validate(cls, data, instance=None):
parsed = super(RoleValidator, cls).validate(data)
cls.validate_schema(parsed, role.SCHEMA)
return parsed
@classmethod
def validate_update(cls, data, instance):
parsed = cls.validate(data, instance=instance)
allowed_ids = [m['id'] for m in instance.volumes_metadata['volumes']]
missing_volume_ids = []
for volume in parsed['volumes_roles_mapping']:
if volume['id'] not in allowed_ids:
missing_volume_ids.append(volume['id'])
if missing_volume_ids:
raise errors.InvalidData(
"Wrong data in volumes_roles_mapping. Volumes with ids {0} are"
" not in the list of allowed volumes {1}".format(
missing_volume_ids, allowed_ids))
return parsed
@classmethod
def validate_create(cls, data):
return cls.validate(data)
| 0 |
"""
Utility functions to deal with ppm (qemu screendump format) files.
:copyright: Red Hat 2008-2009
"""
import os
import struct
import time
import re
import glob
import logging
try:
from PIL import Image
except ImportError:
Image = None
logging.warning('No python imaging library installed. Windows guest '
'BSOD detection disabled. In order to enable it, '
'please install python-imaging or the equivalent for your '
'distro.')
try:
import hashlib
except ImportError:
import md5
# Some directory/filename utils, for consistency
def md5eval(data):
"""
Returns a md5 hash evaluator. This function is implemented in order to
encapsulate objects in a way that is compatible with python 2.4 and
python 2.6 without warnings.
:param data: Optional input string that will be used to update the object.
"""
try:
hsh = hashlib.new('md5')
except NameError:
hsh = md5.new()
if data:
hsh.update(data)
return hsh
def find_id_for_screendump(md5sum, data_dir):
"""
Search dir for a PPM file whose name ends with md5sum.
:param md5sum: md5 sum string
:param dir: Directory that holds the PPM files.
:return: The file's basename without any preceding path, e.g.
``20080101_120000_d41d8cd98f00b204e9800998ecf8427e.ppm``
"""
try:
files = os.listdir(data_dir)
except OSError:
files = []
for fl in files:
exp = re.compile(r"(.*_)?" + md5sum + r"\.ppm", re.IGNORECASE)
if exp.match(fl):
return fl
def generate_id_for_screendump(md5sum, data_dir):
"""
Generate a unique filename using the given MD5 sum.
:return: Only the file basename, without any preceding path. The
filename consists of the current date and time, the MD5 sum and a
``.ppm`` extension, e.g.
``20080101_120000_d41d8cd98f00b204e9800998ecf8427e.ppm``.
"""
filename = time.strftime("%Y%m%d_%H%M%S") + "_" + md5sum + ".ppm"
return filename
def get_data_dir(steps_filename):
"""
Return the data dir of the given steps filename.
"""
filename = os.path.basename(steps_filename)
return os.path.join(os.path.dirname(steps_filename), "..", "steps_data",
filename + "_data")
# Functions for working with PPM files
def image_read_from_ppm_file(filename):
"""
Read a PPM image.
:return: A 3 element tuple containing the width, height and data of the
image.
"""
fin = open(filename, "rb")
fin.readline()
l2 = fin.readline()
fin.readline()
data = fin.read()
fin.close()
(w, h) = map(int, l2.split())
return (w, h, data)
def image_write_to_ppm_file(filename, width, height, data):
"""
Write a PPM image with the given width, height and data.
:param filename: PPM file path
:param width: PPM file width (pixels)
:param height: PPM file height (pixels)
"""
fout = open(filename, "wb")
fout.write("P6\n")
fout.write("%d %d\n" % (width, height))
fout.write("255\n")
fout.write(data)
fout.close()
def image_crop(width, height, data, x1, y1, dx, dy):
"""
Crop an image.
:param width: Original image width
:param height: Original image height
:param data: Image data
:param x1: Desired x coordinate of the cropped region
:param y1: Desired y coordinate of the cropped region
:param dx: Desired width of the cropped region
:param dy: Desired height of the cropped region
:return: A 3-tuple containing the width, height and data of the
cropped image.
"""
if x1 > width - 1:
x1 = width - 1
if y1 > height - 1:
y1 = height - 1
if dx > width - x1:
dx = width - x1
if dy > height - y1:
dy = height - y1
newdata = ""
index = (x1 + y1 * width) * 3
for _ in range(dy):
newdata += data[index:(index + dx * 3)]
index += width * 3
return (dx, dy, newdata)
def image_md5sum(width, height, data):
"""
Return the md5sum of an image.
:param width: PPM file width
:param height: PPM file height
:param data: PPM file data
"""
header = "P6\n%d %d\n255\n" % (width, height)
hsh = md5eval(header)
hsh.update(data)
return hsh.hexdigest()
def get_region_md5sum(width, height, data, x1, y1, dx, dy,
cropped_image_filename=None):
"""
Return the md5sum of a cropped region.
:param width: Original image width
:param height: Original image height
:param data: Image data
:param x1: Desired x coord of the cropped region
:param y1: Desired y coord of the cropped region
:param dx: Desired width of the cropped region
:param dy: Desired height of the cropped region
:param cropped_image_filename: if not None, write the resulting cropped
image to a file with this name
"""
(cw, ch, cdata) = image_crop(width, height, data, x1, y1, dx, dy)
# Write cropped image for debugging
if cropped_image_filename:
image_write_to_ppm_file(cropped_image_filename, cw, ch, cdata)
return image_md5sum(cw, ch, cdata)
def image_verify_ppm_file(filename):
"""
Verify the validity of a PPM file.
:param filename: Path of the file being verified.
:return: True if filename is a valid PPM image file. This function
reads only the first few bytes of the file so it should be rather
fast.
"""
try:
size = os.path.getsize(filename)
fin = open(filename, "rb")
assert(fin.readline().strip() == "P6")
(width, height) = map(int, fin.readline().split())
assert(width > 0 and height > 0)
assert(fin.readline().strip() == "255")
size_read = fin.tell()
fin.close()
assert(size - size_read == width * height * 3)
return True
except Exception:
return False
def image_comparison(width, height, data1, data2):
"""
Generate a green-red comparison image from two given images.
:param width: Width of both images
:param height: Height of both images
:param data1: Data of first image
:param data2: Data of second image
:return: A 3-element tuple containing the width, height and data of the
generated comparison image.
:note: Input images must be the same size.
"""
newdata = ""
i = 0
while i < width * height * 3:
# Compute monochromatic value of current pixel in data1
pixel1_str = data1[i:i + 3]
temp = struct.unpack("BBB", pixel1_str)
value1 = int((temp[0] + temp[1] + temp[2]) / 3)
# Compute monochromatic value of current pixel in data2
pixel2_str = data2[i:i + 3]
temp = struct.unpack("BBB", pixel2_str)
value2 = int((temp[0] + temp[1] + temp[2]) / 3)
# Compute average of the two values
value = int((value1 + value2) / 2)
# Scale value to the upper half of the range [0, 255]
value = 128 + value / 2
# Compare pixels
if pixel1_str == pixel2_str:
# Equal -- give the pixel a greenish hue
newpixel = [0, value, 0]
else:
# Not equal -- give the pixel a reddish hue
newpixel = [value, 0, 0]
newdata += struct.pack("BBB", newpixel[0], newpixel[1], newpixel[2])
i += 3
return (width, height, newdata)
def image_fuzzy_compare(width, height, data1, data2):
"""
Return the degree of equality of two given images.
:param width: Width of both images
:param height: Height of both images
:param data1: Data of first image
:param data2: Data of second image
:return: Ratio equal_pixel_count / total_pixel_count.
:note: Input images must be the same size.
"""
equal = 0.0
different = 0.0
i = 0
while i < width * height * 3:
pixel1_str = data1[i:i + 3]
pixel2_str = data2[i:i + 3]
# Compare pixels
if pixel1_str == pixel2_str:
equal += 1.0
else:
different += 1.0
i += 3
return equal / (equal + different)
def image_average_hash(image, img_wd=8, img_ht=8):
"""
Resize and convert the image, then get image data as sequence object,
calculate the average hash
:param image: an image path or an opened image object
"""
if not isinstance(image, Image.Image):
image = Image.open(image)
image = image.resize((img_wd, img_ht), Image.ANTIALIAS).convert('L')
avg = reduce(lambda x, y: x + y, image.getdata()) / (img_wd * img_ht)
def _hta(i):
if i < avg:
return 0
else:
return 1
return reduce(lambda x, (y, z): x | (z << y),
enumerate(map(_hta, image.getdata())), 0)
def cal_hamming_distance(h1, h2):
"""
Calculate the hamming distance
"""
h_distance, distance = 0, h1 ^ h2
while distance:
h_distance += 1
distance &= distance - 1
return h_distance
def img_ham_distance(base_img, comp_img):
"""
Calculate two images hamming distance
"""
base_img_ahash = image_average_hash(base_img)
comp_img_ahash = image_average_hash(comp_img)
return cal_hamming_distance(comp_img_ahash, base_img_ahash)
def img_similar(base_img, comp_img, threshold=10):
"""
check whether two images are similar by hamming distance
"""
try:
hamming_distance = img_ham_distance(base_img, comp_img)
except IOError:
return False
if hamming_distance < threshold:
return True
else:
return False
def have_similar_img(base_img, comp_img_path, threshold=10):
"""
Check whether comp_img_path have a image looks like base_img.
"""
support_img_format = ['jpg', 'jpeg', 'gif', 'png', 'pmp']
comp_images = []
if os.path.isdir(comp_img_path):
for ext in support_img_format:
comp_images.extend([os.path.join(comp_img_path, x) for x in
glob.glob1(comp_img_path, '*.%s' % ext)])
else:
comp_images.append(comp_img_path)
for img in comp_images:
if img_similar(base_img, img, threshold):
return True
return False
| 0 |
from functions import Textile
class TextileFactory(object):
"""
Use TextileFactory to create a Textile object which can be re-used
to process multiple strings with the same settings.
>>> f = TextileFactory()
>>> f.process("some text here")
'\\t<p>some text here</p>'
>>> f = TextileFactory(restricted=True)
>>> f.process("more text here")
'\\t<p>more text here</p>'
Certain parameter values are not permitted because they are illogical:
>>> f = TextileFactory(lite=True)
Traceback (most recent call last):
...
ValueError: lite can only be enabled in restricted mode
>>> f = TextileFactory(head_offset=7)
Traceback (most recent call last):
...
ValueError: head_offset must be 0-6
>>> f = TextileFactory(html_type='html5')
Traceback (most recent call last):
...
ValueError: html_type must be 'html' or 'xhtml'
"""
def __init__(self, restricted=False, lite=False, sanitize=False,
noimage=None, auto_link=False, get_sizes=False,
head_offset=0, html_type='xhtml'):
self.class_parms = {}
self.method_parms = {}
if lite and not restricted:
raise ValueError("lite can only be enabled in restricted mode")
if restricted:
self.class_parms['restricted'] = True
self.class_parms['lite'] = lite
self.method_parms['rel'] = 'nofollow'
if noimage is None:
if restricted:
noimage = True
else:
noimage = False
self.class_parms['noimage'] = noimage
self.method_parms['sanitize'] = sanitize
self.class_parms['auto_link'] = auto_link
self.class_parms['get_sizes'] = get_sizes
if int(head_offset) not in range(0, 6):
raise ValueError("head_offset must be 0-6")
else:
self.method_parms['head_offset'] = head_offset
if html_type not in ['html', 'xhtml']:
raise ValueError("html_type must be 'html' or 'xhtml'")
else:
self.method_parms['html_type'] = html_type
def process(self, text):
return Textile(**self.class_parms).textile(text, **self.method_parms)
| 0 |
# In The Name Of God
# ========================================
# [] File Name : fibonacci
#
# [] Creation Date : 17-04-2015
#
# [] Created By : Parham Alvani (parham.alvani@gmail.com)
# =======================================
__author__ = 'Parham Alvani'
class Fibonacci:
"""
simple class for making fibonacci number with divide & conquer and
dynamic programming methods.
"""
def __init__(self):
"""
build fibonacci object for you
:return: nothing
"""
self.list = list([0, 1])
def get_fibonacci(self, index):
"""
:param index: fibonacci sequence number
:return: fibonacci at sequence number
:raise: TypeError if index not a integer
"""
if not isinstance(index, int):
raise TypeError
if self.list.__len__() - 1 >= index:
return self.list[index]
else:
while self.list.__len__() - 1 < index:
self.list.append(self.list[-1] + self.list[-2])
return self.list[index]
fibonacci = Fibonacci()
var = input("Please enter fibonacci sequence number ")
var = int(var)
print(fibonacci.get_fibonacci(var))
# Test string operations
s = str("Hello world of %s" % "python")
print(s)
print(fibonacci.list)
| 0 |
import numpy
from chainer import cuda
from chainer import function
from chainer.functions.array import reshape
from chainer.functions.math import inv
from chainer.functions.math import matmul
from chainer import utils
from chainer.utils import type_check
def _det_gpu(b):
# We do a batched LU decomposition on the GPU to compute
# and compute the determinant by multiplying the diagonal.
# Change the shape of the array to be size=1 minibatch if necessary.
# Also copy the matrix as the elments will be modified in-place.
a = matmul._as_batch_mat(b).copy()
n = a.shape[1]
n_matrices = len(a)
# Pivot array
p = cuda.cupy.zeros((n_matrices, n), dtype='int32')
# Output array
# These arrays hold information on the execution success
# or if the matrix was singular.
info = cuda.cupy.zeros(n_matrices, dtype=numpy.intp)
ap = matmul._mat_ptrs(a)
_, lda = matmul._get_ld(a)
cuda.cublas.sgetrfBatched(cuda.Device().cublas_handle, n, ap.data.ptr, lda,
p.data.ptr, info.data.ptr, n_matrices)
det = cuda.cupy.prod(a.diagonal(axis1=1, axis2=2), axis=1)
# The determinant is equal to the product of the diagonal entries
# of `a` where the sign of `a` is flipped depending on whether
# the pivot array is equal to its index.
rng = cuda.cupy.arange(1, n + 1, dtype='int32')
parity = cuda.cupy.sum(p != rng, axis=1) % 2
sign = 1. - 2. * parity.astype('float32')
return det * sign, info
class BatchDet(function.Function):
@property
def label(self):
return 'det'
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
a_type, = in_types
type_check.expect(a_type.dtype.kind == 'f')
# Only a minibatch of 2D array shapes allowed.
type_check.expect(a_type.ndim == 3)
# Matrix inversion only allowed for square matrices
# so assert the last two dimensions are equal.
type_check.expect(a_type.shape[-1] == a_type.shape[-2])
def forward_cpu(self, x):
self.detx = utils.force_array(numpy.linalg.det(x[0]))
return self.detx,
def forward_gpu(self, x):
self.detx, _ = _det_gpu(x[0])
return self.detx,
def backward_cpu(self, x, gy):
x, = x
gy, = gy
try:
inv_x = numpy.linalg.inv(x.transpose((0, 2, 1)))
except numpy.linalg.LinAlgError:
raise ValueError('Input has singular matrices.')
grad = gy[:, None, None] * self.detx[:, None, None] * inv_x
return utils.force_array(grad),
def backward_gpu(self, x, gy):
x, = x
gy, = gy
inv_x, info = inv._inv_gpu(x.transpose((0, 2, 1)))
if cuda.cupy.any(info != 0):
raise ValueError('Input has singular matrices.')
grad = gy[:, None, None] * self.detx[:, None, None] * inv_x
return utils.force_array(grad),
def batch_det(a):
"""Computes the determinant of a batch of square matrices.
Args:
a (Variable): Input array to compute the determinant for.
The first dimension should iterate over each matrix and be
of the batchsize.
Returns:
~chainer.Variable: vector of determinants for every matrix
in the batch.
"""
return BatchDet()(a)
def det(a):
"""Computes the determinant of a single square matrix.
Args:
a (Variable): Input array to compute the determinant for.
Returns:
~chainer.Variable: Scalar determinant of the matrix a.
"""
shape = (1, a.shape[0], a.shape[1])
batched_a = reshape.Reshape(shape)(a)
batched_det = BatchDet()(batched_a)
return reshape.Reshape(())(batched_det)
| 0 |
from __future__ import unicode_literals
def device_from_request(request):
"""
Determine's the device name from the request by first looking for an
overridding cookie, and if not found then matching the user agent.
Used at both the template level for choosing the template to load and
also at the cache level as a cache key prefix.
"""
from mezzanine.conf import settings
try:
# If a device was set via cookie, match available devices.
for (device, _) in settings.DEVICE_USER_AGENTS:
if device == request.COOKIES["mezzanine-device"]:
return device
except KeyError:
# If a device wasn't set via cookie, match user agent.
try:
user_agent = request.META["HTTP_USER_AGENT"].lower()
except KeyError:
pass
else:
try:
user_agent = user_agent.decode("utf-8")
except AttributeError:
pass
for (device, ua_strings) in settings.DEVICE_USER_AGENTS:
for ua_string in ua_strings:
if ua_string.lower() in user_agent:
return device
return ""
def templates_for_device(request, templates):
"""
Given a template name (or list of them), returns the template names
as a list, with each name prefixed with the device directory
inserted before it's associate default in the list.
"""
from mezzanine.conf import settings
if not isinstance(templates, (list, tuple)):
templates = [templates]
device = device_from_request(request)
device_templates = []
for template in templates:
if device:
device_templates.append("%s/%s" % (device, template))
if settings.DEVICE_DEFAULT and settings.DEVICE_DEFAULT != device:
default = "%s/%s" % (settings.DEVICE_DEFAULT, template)
device_templates.append(default)
device_templates.append(template)
return device_templates
| 0 |
from chainer.dataset import dataset_mixin
class TransformDataset(dataset_mixin.DatasetMixin):
"""Dataset that indexes the base dataset and transforms the data.
This dataset wraps the base dataset by modifying the behavior of the base
dataset's :meth:`__getitem__`. Arrays returned by :meth:`__getitem__` of
the base dataset with integer as an argument are transformed by the given
function :obj:`transform`.
Also, :meth:`__len__` returns the integer returned by the base dataset's
:meth:`__len__`.
The function :obj:`transform` takes, as an argument, :obj:`in_data`, which
is the output of the base dataset's :meth:`__getitem__`, and returns
the transformed arrays as output. Please see the following example.
>>> from chainer.datasets import get_mnist
>>> from chainer.datasets import TransformDataset
>>> dataset, _ = get_mnist()
>>> def transform(in_data):
... img, label = in_data
... img -= 0.5 # scale to [-0.5, -0.5]
... return img, label
>>> dataset = TransformDataset(dataset, transform)
Args:
dataset: The underlying dataset. The index of this dataset corresponds
to the index of the base dataset. This object needs to support
functions :meth:`__getitem__` and :meth:`__len__` as described
above.
transform (callable): A function that is called to transform values
returned by the underlying dataset's :meth:`__getitem__`.
"""
def __init__(self, dataset, transform):
self._dataset = dataset
self._transform = transform
def __len__(self):
return len(self._dataset)
def get_example(self, i):
in_data = self._dataset[i]
return self._transform(in_data)
| 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from .. import base
def setUpModule():
base.startServer()
def tearDownModule():
base.stopServer()
class WebRootTestCase(base.TestCase):
def testAccessWebRoot(self):
"""
Requests the webroot and tests the existence of several
elements in the returned html
"""
resp = self.request(path='/', method='GET', isJson=False, prefix='')
self.assertStatus(resp, 200)
body = self.getBody(resp)
self.assertTrue('girder_app.min.js' in body)
self.assertTrue('girder_lib.min.js' in body)
| 0 |
#=========================================================================
# DotProductRTL
#=========================================================================
from pymtl import *
from pclib.ifcs import InValRdyBundle, OutValRdyBundle
from pclib.ifcs import ParentReqRespBundle, ChildReqRespBundle
nmul_stages = 4
#------------------------------------------------------------------------------
# DotProductRTL
#------------------------------------------------------------------------------
class DotProductRTL( Model ):
def __init__( s, mem_ifc_types, cpu_ifc_types ):
s.cpu_ifc = ChildReqRespBundle ( cpu_ifc_types )
s.mem_ifc = ParentReqRespBundle( mem_ifc_types )
s.dpath = DotProductDpath( mem_ifc_types, cpu_ifc_types )
s.ctrl = DotProductCtrl ( mem_ifc_types, cpu_ifc_types )
s.connect_auto(s.dpath, s.ctrl)
def line_trace( s ):
return "| {} {} {} {}|".format(s.ctrl.state, s.dpath.count, s.dpath.accum_A, s.ctrl.pause)
def elaborate_logic( s ):
pass
#------------------------------------------------------------------------------
# Select Constants
#------------------------------------------------------------------------------
y = Bits( 1, 1 )
n = Bits( 1, 0 )
# mem_type
na = Bits( 2, 0 )
ld = Bits( 2, 1 )
st = Bits( 2, 2 )
load = Bits( 1, 0 )
# data_sel
#zer = Bits( 1, 0 )
acm = Bits( 1, 1 )
# offset_sel_M
zro = Bits( 1, 0 )
cnt = Bits( 1, 1 )
# baddr_sel_M
xxx = Bits( 2, 0 )
row = Bits( 2, 0 )
vec = Bits( 2, 1 )
dst = Bits( 2, 2 )
src0 = Bits( 2, 0 )
size = Bits(2, 0)
src0 = Bits(2, 1)
src1 = Bits(2, 2)
# TODO: total hack
Model.tick_rtl = Model.posedge_clk
#------------------------------------------------------------------------------
# DotProductDpath
#------------------------------------------------------------------------------
class DotProductDpath( Model ):
def __init__( s, mem_ifc_types, cpu_ifc_types ):
s.cpu_ifc = ChildReqRespBundle ( cpu_ifc_types )
s.mem_ifc = ParentReqRespBundle( mem_ifc_types )
s.cs = InPort ( CtrlSignals() )
s.ss = OutPort( StatusSignals() )
#--- Stage M: Memory Request ------------------------------
s.count = Wire( cpu_ifc_types.req .data.nbits )
s.size = Wire( cpu_ifc_types.req .data.nbits )
s.src0_addr_M = Wire( mem_ifc_types.req .addr.nbits )
s.src1_addr_M = Wire( mem_ifc_types.req .addr.nbits )
s.src0_data_R = Wire( mem_ifc_types.resp.data.nbits )
s.src1_data_R = Wire( mem_ifc_types.resp.data.nbits )
@s.tick_rtl
def stage_seq_M():
ctrl_msg = s.cpu_ifc.req_msg .ctrl_msg
cpu_data = s.cpu_ifc.req_msg .data
if s.cs.update_M:
if ctrl_msg == 1: s.size .next = cpu_data
elif ctrl_msg == 2: s.src0_addr_M.next = cpu_data
elif ctrl_msg == 3: s.src1_addr_M.next = cpu_data
elif ctrl_msg == 0: s.ss.go .next = True
else: s.ss.go.next = False
if s.cs.count_clear_M: s.count.next = 0
elif s.cs.count_en_M: s.count.next = s.count + 1
@s.combinational
def stage_comb_M():
# base_addr mux
if s.cs.baddr_sel_M == src0: base_addr_M = s.src0_addr_M
else: base_addr_M = s.src1_addr_M
# memory request
s.mem_ifc.req_msg.type_.value = 0
s.mem_ifc.req_msg.addr.value = base_addr_M + (s.count<<2)
# last item status signal
s.ss.last_item_M.value = s.count == (s.size - 1)
#--- Stage R: Memory Response -----------------------------
@s.tick_rtl
def stage_seq_M():
mem_data = s.mem_ifc.resp_msg.data
if s.cs.src0_en_R: s.src0_data_R.next = mem_data
if s.cs.src1_en_R: s.src1_data_R.next = mem_data
#--- Stage X: Execute Multiply ----------------------------
s.result_X = Wire( cpu_ifc_types.req.data.nbits )
s.mul = IntPipelinedMultiplier(
nbits = cpu_ifc_types.req.data.nbits,
nstages = 4,
)
s.connect_dict( { s.mul.op_a : s.src0_data_R,
s.mul.op_b : s.src1_data_R,
s.mul.product : s.result_X } )
#--- Stage A: Accumulate ----------------------------------
s.accum_A = Wire( cpu_ifc_types.resp.data.nbits )
s.accum_out = Wire( cpu_ifc_types.resp.data.nbits )
@s.tick_rtl
def stage_seq_A():
if s.reset or s.cs.accum_clear_A:
s.accum_A.next = 0
elif s.cs.accum_en_A:
s.accum_A.next = s.accum_out
@s.combinational
def stage_comb_A():
s.accum_out.value = s.result_X + s.accum_A
s.cpu_ifc.resp_msg.value = s.accum_A
def elaborate_logic( s ):
pass
#------------------------------------------------------------------------------
# State Machine Constants
#------------------------------------------------------------------------------
IDLE = 0
SEND_OP_LDA = 1
SEND_OP_LDB = 2
SEND_OP_ST = 3
DONE = 4
#------------------------------------------------------------------------------
# DotProductCtrl
#------------------------------------------------------------------------------
class DotProductCtrl( Model ):
def __init__( s, mem_ifc_types, cpu_ifc_types ):
s.cpu_ifc = ChildReqRespBundle ( cpu_ifc_types )
s.mem_ifc = ParentReqRespBundle( mem_ifc_types )
s.cs = OutPort( CtrlSignals() )
s.ss = InPort ( StatusSignals() )
s.nmul_stages = nmul_stages
s.state = Wire( 3 )
s.state_next = Wire( 3 )
s.pause = Wire( 1 )
s.stall_M = Wire( 1 )
s.stall_R = Wire( 1 )
s.any_stall = Wire( 1 )
s.valid = Wire( 3 )
#--------------------------------------------------------------------------
# State Machine
#--------------------------------------------------------------------------
@s.posedge_clk
def state_update_M():
if s.reset: s.state.next = IDLE
elif s.stall_M: s.state.next = s.state
else: s.state.next = s.state_next
@s.combinational
def state_transition():
send_req = s.mem_ifc.req_val and s.mem_ifc.req_rdy
recv_resp = s.mem_ifc.resp_val and s.mem_ifc.resp_rdy
go = s.ss.go
s.state_next.value = s.state
if s.state == IDLE and go:
s.state_next.value = SEND_OP_LDA
elif s.state == SEND_OP_LDA and send_req:
s.state_next.value = SEND_OP_LDB
elif s.state == SEND_OP_LDB and send_req:
if s.ss.last_item_M:
s.state_next.value = SEND_OP_ST
else:
s.state_next.value = SEND_OP_LDA
elif s.state == SEND_OP_ST and not s.any_stall:
s.state_next.value = DONE
elif s.state == DONE and s.cpu_ifc.resp_rdy:
s.state_next.value = IDLE
#--------------------------------------------------------------------------
# Control Signal Pipeline
#--------------------------------------------------------------------------
s.ctrl_signals_M = Wire( 14 )
s.ctrl_signals_R = Wire( 14 )
s.ctrl_signals_X = [Wire( 14 ) for x in range(s.nmul_stages)]
s.ctrl_signals_A = Wire( 14 )
@s.posedge_clk
def ctrl_regs():
if s.stall_R: s.ctrl_signals_R.next = s.ctrl_signals_R
elif s.stall_M: s.ctrl_signals_R.next = 0
else: s.ctrl_signals_R.next = s.ctrl_signals_M
if s.stall_R: s.ctrl_signals_X[0].next = 0
else: s.ctrl_signals_X[0].next = s.ctrl_signals_R
for i in range( 1, s.nmul_stages ):
s.ctrl_signals_X[i].next = s.ctrl_signals_X[i-1]
s.ctrl_signals_A.next = s.ctrl_signals_X[s.nmul_stages-1]
s.L = Wire( 1 )
@s.combinational
def state_to_ctrl():
# TODO: cannot infer temporaries when an inferred temporary on the RHS!
s.L.value = s.ss.last_item_M
# TODO: multiple assignments to a temporary results in duplicate decl error!
# Encode signals sent down the pipeline based on State
#
# up count last acm mul b a mem data off baddr
# en rst item en en en en type sel sel sel
if s.state == IDLE: cs = concat(y, n, y, n, n, n, n, n, na, zro, zro, xxx)
elif s.state == SEND_OP_LDA: cs = concat(n, n, n, n, n, n, n, y, ld, zro, cnt, row)
elif s.state == SEND_OP_LDB: cs = concat(n, y, n, s.L, y, y, y, n, ld, zro, cnt, vec)
elif s.state == SEND_OP_ST: cs = concat(n, y, n, n, n, n, n, n, na, acm, zro, dst)
elif s.state == DONE: cs = concat(n, n, n, n, n, n, n, n, na, xxx, zro, dst)
s.ctrl_signals_M.value = cs
s.cpu_ifc.req_rdy.value = s.state == IDLE
s.cpu_ifc.resp_val.value = s.state == DONE
if s.state == DONE or s.reset:
s.valid.value = 0
@s.combinational
def ctrl_to_dpath():
# Stall conditions
s.pause.value = s.state == SEND_OP_ST and not s.ctrl_signals_A[10]
req_en = s.ctrl_signals_M[4:6] > 0
resp_en = s.ctrl_signals_R[4:6] > 0
s.stall_M.value = (req_en and not s.mem_ifc.req_rdy) or s.pause
s.stall_R.value = (resp_en and not s.mem_ifc.resp_val)
s.any_stall.value = s.stall_M or s.stall_R
# M Stage
s.cs.baddr_sel_M .value = s.ctrl_signals_M[0:2]
s.cs.offset_sel_M .value = s.ctrl_signals_M[ 2]
s.cs.count_clear_M .value = s.ctrl_signals_M[ 11]
s.cs.accum_clear_A .value = s.cs.count_clear_M
s.cs.count_en_M .value = s.ctrl_signals_M[ 12] and not s.any_stall
s.cs.update_M .value = s.ctrl_signals_M[ 13] and s.cpu_ifc.req_val
s.mem_ifc.req_val.value = req_en and not s.any_stall
# R Stage
s.cs.src0_en_R .value = s.ctrl_signals_R[ 6]
s.cs.src1_en_R .value = s.ctrl_signals_R[ 7]
s.mem_ifc.resp_rdy.value = resp_en and not s.stall_R
# X Stage
for i in range( s.nmul_stages ):
s.cs.mul_reg_en_R[i].value = s.ctrl_signals_X[i][8]
# A Stage
s.cs.accum_en_A.value = s.ctrl_signals_A[9]
def elaborate_logic( s ):
pass
#------------------------------------------------------------------------------
# CtrlSignals
#------------------------------------------------------------------------------
class CtrlSignals( BitStructDefinition ):
def __init__( s ):
# M Stage Signals
s.baddr_sel_M = BitField (2)
s.offset_sel_M = BitField (1)
s.count_clear_M = BitField (1)
s.count_en_M = BitField (1)
# R Stage Signals
s.src0_en_R = BitField (1)
s.src1_en_R = BitField (1)
# X Stage Signals
s.mul_reg_en_R = BitField (nmul_stages)
# A Stage Signals
s.accum_en_A = BitField (1)
s.accum_clear_A = BitField (1)
#IDLE State Signals
s.update_M = BitField (1)
#------------------------------------------------------------------------------
# StatusSignals
#------------------------------------------------------------------------------
class StatusSignals( BitStructDefinition ):
def __init__(s):
s.last_item_M = BitField (1)
s.go = BitField (1)
#------------------------------------------------------------------------------
# MatrixVecCOP_mul
#------------------------------------------------------------------------------
# A dummy multiplier module, acts as a placeholder for DesignWare components.
class IntPipelinedMultiplier( Model ):
def __init__( s, nbits, nstages ):
s.op_a = InPort ( nbits )
s.op_b = InPort ( nbits )
s.product = OutPort( nbits )
s.nbits = nbits
s.nstages = nstages
def elaborate_logic( s ):
s.regs = [ Wire( s.nbits ) for x in range( s.nstages ) ]
@s.posedge_clk
def mult_logic():
if s.reset:
for i in range( s.nstages ):
s.regs[i].next = 0
else:
s.regs[0].next = s.op_a * s.op_b
for i in range( 1, s.nstages ):
s.regs[i].next = s.regs[i-1]
s.connect( s.product, s.regs[-1] )
| 0.033514 |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import click
from flow_client.flow_cli.utils import cli_args
from flow_client.flow_cli.utils.cli_utils import preprocess, access_server
@click.group(short_help="Task Operations")
@click.pass_context
def task(ctx):
"""
\b
Provides numbers of task operational commands, including list and query.
For more details, please check out the help text.
"""
pass
@task.command("list", short_help="List Task Command")
@cli_args.LIMIT
@click.pass_context
def list_task(ctx, **kwargs):
"""
\b
- DESCRIPTION:
List Task description
\b
- USAGE:
flow task list
flow task list -l 25
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'job/list/task', config_data)
@task.command("query", short_help="Query Task Command")
@cli_args.JOBID
@cli_args.ROLE
@cli_args.PARTYID
@cli_args.COMPONENT_NAME
@cli_args.STATUS
@click.pass_context
def query(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Query Task Command.
\b
- USAGE:
flow task query -j $JOB_ID -p 9999 -r guest
flow task query -cpn hetero_feature_binning_0 -s success
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'job/task/query', config_data)
| 0 |
# function to call the main analysis/synthesis functions in software/models/hpsModel.py
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import get_window
import sys, os
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/'))
import utilFunctions as UF
import hpsModel as HPS
def main(inputFile='../../sounds/sax-phrase-short.wav', window='blackman', M=601, N=1024, t=-100,
minSineDur=0.1, nH=100, minf0=350, maxf0=700, f0et=5, harmDevSlope=0.01, stocf=0.1):
"""
inputFile: input sound file (monophonic with sampling rate of 44100)
window: analysis window type (rectangular, hanning, hamming, blackman, blackmanharris)
M: analysis window size; N: fft size (power of two, bigger or equal than M)
t: magnitude threshold of spectral peaks; minSineDur: minimum duration of sinusoidal tracks
nH: maximum number of harmonics; minf0: minimum fundamental frequency in sound
maxf0: maximum fundamental frequency in sound; f0et: maximum error accepted in f0 detection algorithm
harmDevSlope: allowed deviation of harmonic tracks, higher harmonics have higher allowed deviation
stocf: decimation factor used for the stochastic approximation
"""
# size of fft used in synthesis
Ns = 512
# hop size (has to be 1/4 of Ns)
H = 128
# read input sound
(fs, x) = UF.wavread(inputFile)
# compute analysis window
w = get_window(window, M)
# compute the harmonic plus stochastic model of the whole sound
hfreq, hmag, hphase, stocEnv = HPS.hpsModelAnal(x, fs, w, N, H, t, nH, minf0, maxf0, f0et, harmDevSlope, minSineDur, Ns, stocf)
# synthesize a sound from the harmonic plus stochastic representation
y, yh, yst = HPS.hpsModelSynth(hfreq, hmag, hphase, stocEnv, Ns, H, fs)
# output sound file (monophonic with sampling rate of 44100)
outputFileSines = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_hpsModel_sines.wav'
outputFileStochastic = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_hpsModel_stochastic.wav'
outputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_hpsModel.wav'
# write sounds files for harmonics, stochastic, and the sum
UF.wavwrite(yh, fs, outputFileSines)
UF.wavwrite(yst, fs, outputFileStochastic)
UF.wavwrite(y, fs, outputFile)
# create figure to plot
plt.figure(figsize=(12, 9))
# frequency range to plot
maxplotfreq = 15000.0
# plot the input sound
plt.subplot(3,1,1)
plt.plot(np.arange(x.size)/float(fs), x)
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('input sound: x')
# plot spectrogram stochastic component
plt.subplot(3,1,2)
numFrames = int(stocEnv[:,0].size)
sizeEnv = int(stocEnv[0,:].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = (.5*fs)*np.arange(sizeEnv*maxplotfreq/(.5*fs))/sizeEnv
plt.pcolormesh(frmTime, binFreq, np.transpose(stocEnv[:,:sizeEnv*maxplotfreq/(.5*fs)+1]))
plt.autoscale(tight=True)
# plot harmonic on top of stochastic spectrogram
if (hfreq.shape[1] > 0):
harms = hfreq*np.less(hfreq,maxplotfreq)
harms[harms==0] = np.nan
numFrames = harms.shape[0]
frmTime = H*np.arange(numFrames)/float(fs)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.xlabel('time (sec)')
plt.ylabel('frequency (Hz)')
plt.autoscale(tight=True)
plt.title('harmonics + stochastic spectrogram')
# plot the output sound
plt.subplot(3,1,3)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.show(block=False)
if __name__ == "__main__":
main()
| 0.028655 |
import subprocess;
import sys;
def generate_tcl(simulate, wave_ena, tb_model_list, srclist, inclist, log_name, save_tcl):
wr_string = "transcript file " + log_name + "\n";
#Init & Command
cmd = "vlog -work work "
wr_string = wr_string + "vlib work\n";
#Include
incdir = "";
fh = open(inclist, 'r');
for line in fh:
incdir = incdir + "+incdir+../" + line.rstrip() + " ";
fh.close();
#Make-TB Mode
fh = open(tb_model_list, 'r');
for line in fh:
wr_string = wr_string + cmd + incdir + "" + line + "\n";
fh.close();
#Make-Src
fh = open(srclist, 'r');
for line in fh:
wr_string = wr_string + cmd + incdir + "" + line + "\n";
fh.close();
if(simulate == True):
#For Wave save
if(wave_ena == True):
wr_string = wr_string + "radix -hexadecimal\n";
wr_string = wr_string + "log -r /*\n";
wr_string = wr_string + "run -all\n";
#Quit
wr_string = wr_string + "quit\n"
#Save
fh = open(save_tcl, "w");
fh.write(wr_string);
fh.close();
if __name__ == "__main__":
if(len(sys.argv) != 7):
print("Error : Parameter Miss");
sys.exit();
generate_tcl(False, (sys.argv[1] == 'True'), sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6]); | 0.060759 |
# -*- coding: utf-8 -*-
import sys
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import ssm
mpl.rcParams['figure.figsize'] = (16,10)
SILENT_OUTPUT = bool(sys.argv[1] if len(sys.argv)>1 else 0)
if SILENT_OUTPUT:
run_name = '_'+sys.argv[2] if len(sys.argv)>2 else ''
fout = open('demo_nile'+run_name+'_out.txt','w')
mpl.rcParams['savefig.bbox'] = 'tight'
mpl.rcParams['savefig.dpi'] = 150
mpl.rcParams['savefig.format'] = 'png'
mpl.rcParams['savefig.pad_inches'] = 0.1
fig_count = 0
def _output_fig(count):
count += 1
plt.savefig('demo_nile'+run_name+("_out%02d" % count)+'.png')
plt.close()
return count
output_fig = lambda : _output_fig(fig_count)
else:
fout = sys.stdout
output_fig = plt.show
fout.write('\n')
#-- Load data --#
y = np.loadtxt('data/nile.dat')[:,None].T
time = list(range(1871,1971))
#-- Maximum loglikelihood estimation --#
llm = ssm.model_llm()
llm = ssm.estimate(y,llm,np.log([10000,5000])/2,method='Nelder-Mead',options={'disp':not SILENT_OUTPUT})[0]
logL,fvar = ssm.loglik(y,llm)
fout.write("Loglikelihood = %g, variance = %g.\n" % (logL,fvar))
fout.write("epsilon variance = %g, eta variance = %g.\n" % (llm['H']['mat'][0,0],llm['Q']['mat'][0,0]))
#-- Kalman filtering --#
a,P,v,F = ssm.kalman(y,llm)
# Reshape output for plotting
a = a.squeeze()
P = P.squeeze()
sqrtP = np.sqrt(P)
v = v.squeeze()
F = F.squeeze()
fig = plt.figure(num='Filtered state')
ax1 = plt.subplot(221)
plt.plot(time,y.tolist()[0],'r:',label='nile')
plt.plot(time+[1971],a,'b-',label='filt. state')
plt.plot(time+[1971],a+1.645*sqrtP,'g:',label='90% conf. +')
plt.plot(time+[1971],a-1.645*sqrtP,'g:',label='90% conf. -')
plt.title('Filtered state'); plt.ylim([450,1400])
plt.legend()
ax2 = plt.subplot(222)
plt.plot(time+[1971],P)
plt.title('Filtered state variance'); plt.ylim([5000,17500])
ax3 = plt.subplot(223)
plt.plot(time,v)
plt.title('Prediction errors'); plt.ylim([-450,400])
ax4 = plt.subplot(224)
plt.plot(time,F)
plt.title('Prediction error variance'); plt.ylim([20000,32500])
fig_count = output_fig()
#-- State smoothing --#
alphahat,V,r,N = ssm.statesmo(y,llm)
# Reshape output for plotting
alphahat = alphahat.squeeze()
V = V.squeeze()
r = r.squeeze()
N = N.squeeze()
fig = plt.figure(num='Smoothed state')
ax1 = plt.subplot(221)
plt.plot(time,y.tolist()[0],'r:',label='nile')
plt.plot(time,alphahat,label='smo. state')
plt.plot(time,alphahat+1.645*np.sqrt(V),'g:',label='90% conf. +')
plt.plot(time,alphahat-1.645*np.sqrt(V),'g:',label='90% conf. -')
plt.title('Smoothed state'); plt.ylim([450,1400]); plt.legend()
ax2 = plt.subplot(222)
plt.plot(time,V)
plt.title('Smoothed state variance'); plt.ylim([2300,4100])
ax3 = plt.subplot(223)
plt.plot(time,r)
plt.title('Smoothing cumulant'); plt.ylim([-0.036,0.024])
ax4 = plt.subplot(224)
plt.plot(time,N)
plt.title('Smoothing variance cumulant'); plt.ylim([0,0.000105])
fig_count = output_fig()
#-- Disturbance smoothing --#
epshat,etahat,epsvarhat,etavarhat = ssm.disturbsmo(y,llm)
# Reshape output for plotting
epshat = epshat.squeeze()
etahat = etahat.squeeze()
epsvarhat = epsvarhat.squeeze()
etavarhat = etavarhat.squeeze()
fig = plt.figure(num='Smoothed disturbances')
ax1 = plt.subplot(221)
plt.plot(time,epshat)
plt.title('Observation error'); plt.ylim([-360,280])
ax2 = plt.subplot(222)
plt.plot(time,epsvarhat)
plt.title('Observation error variance'); plt.ylim([2300,4100])
ax3 = plt.subplot(223)
plt.plot(time,etahat)
plt.title('State error'); plt.ylim([-50,35])
ax4 = plt.subplot(224)
plt.plot(time,etavarhat)
plt.title('State error variance'); plt.ylim([1225,1475])
fig_count = output_fig()
#-- Simulation smoothing --#
NN = 5 if SILENT_OUTPUT else 1
for ii in range(NN):
alphatilde,epstilde,etatilde,alphaplus = ssm.simsmo(1,y,llm)
alphatilde = alphatilde.squeeze()
epstilde = epstilde.squeeze()
etatilde = etatilde.squeeze()
alphaplus = alphaplus.squeeze()
fig = plt.figure(num='Simulation')
ax1 = plt.subplot(221)
plt.plot(time,alphahat,label='samp. state')
plt.scatter(time,alphaplus+alphahat[0]-alphaplus[0],8,'r','s','filled',label='nile')
plt.title('Unconditioned sampled state'); plt.legend()
ax2 = plt.subplot(222)
plt.plot(time, alphahat,label='disp. samp. state')
plt.scatter(time,alphatilde, 8, 'r', 's', 'filled',label='nile')
plt.title('Conditioned sampled state'); plt.ylim([740,1160]); plt.legend()
ax3 = plt.subplot(223)
plt.plot(time, epshat,label='smo. obs. disturb.')
plt.scatter(time,epstilde,8,'r','s','filled',label='samp. obs. disturb.')
plt.title('Conditioned sampled observation error'); plt.ylim([-440,280]); plt.legend()
ax4 = plt.subplot(224)
plt.plot(time, etahat,label='smo. state disturb.')
plt.scatter(time,etatilde,8,'r','s','filled',label='samp. state disturb.')
plt.title('Conditioned sampled state error'); plt.ylim([-440,280]); plt.legend()
if SILENT_OUTPUT:
if ii==0: fig_count += 1
plt.savefig('demo_nile'+run_name+("_out%02d-"%fig_count)+str(ii)+'.png')
plt.close()
else:
plt.show()
#-- Missing Observations --#
ymis = y.astype(float).copy()
ymis[:,list(range(21,41))+list(range(61,81))] = np.nan
amis,Pmis = ssm.kalman(ymis,llm)[:2]
alphahatmis,Vmis = ssm.statesmo(ymis,llm)[:2]
amis = amis.squeeze()
Pmis = Pmis.squeeze()
alphahatmis = alphahatmis.squeeze()
Vmis = Vmis.squeeze()
fig = plt.figure(num='Filtering and smoothing of data with missing observations')
ax1 = plt.subplot(221)
plt.plot(time,ymis.tolist()[0],'r:',label='nile w/ miss. values')
plt.plot(time+[1971],amis,label='filt. state')
plt.title('Filtered state (extrapolation)'); plt.ylim([450,1400]); plt.legend()
ax2 = plt.subplot(222)
plt.plot(time+[1971],Pmis)
plt.title('Filtered state variance'); plt.ylim([4000,36000])
ax3 = plt.subplot(223)
plt.plot(time, ymis.tolist()[0], 'r:',label='nile w/ miss. values')
plt.plot(time, alphahatmis,label='smo. state')
plt.title('Smoothed state (interpolation)'); plt.ylim([450,1400]); plt.legend()
ax4 = plt.subplot(224)
plt.plot(time, Vmis)
plt.title('Filtered state (extrapolation)'); plt.ylim([2000,10000])
fig_count = output_fig()
#-- Forecasting (equivalent to future missing values) --#
yforc = np.hstack([y,np.tile(np.nan,(1,50))])
aforc,Pforc,vforc,Fforc = ssm.kalman(yforc,llm)
# Reshape output for plotting
aforc = aforc.squeeze()
Pforc = Pforc.squeeze()
sqrtPforc = np.sqrt(Pforc)
vforc = vforc.squeeze()
Fforc = Fforc.squeeze()
fig = plt.figure(num='Forecasting')
ax1 = plt.subplot(221)
time2 = time + list(range(1972,2022))
plt.plot(time2, yforc.tolist()[0], 'r:',label='nile')
plt.plot(time2+[2023], aforc,label='forecast')
plt.plot(time2+[2023], np.hstack([np.tile(np.nan,len(time)),aforc[-51:]+0.675*sqrtPforc[-51:]]), 'g:',label='50% conf. +')
plt.title('State forecast'); plt.xlim([1868,2026]); plt.ylim([450,1400])
plt.plot(time2+[2023], np.hstack([np.tile(np.nan,len(time)),aforc[-51:]-0.675*sqrtPforc[-51:]]), 'g:',label='50% conf. -')
plt.title('State forecast'); plt.xlim([1868,2026]); plt.ylim([450,1400]); plt.legend()
ax2 = plt.subplot(222)
plt.plot(time2+[2023], Pforc)
plt.title('State variance'); plt.xlim([1868,2026]); plt.ylim([4000,80000])
ax3 = plt.subplot(223)
plt.plot(time2+[2023], aforc)
plt.title('Observation forecast'); plt.xlim([1868,2026]); plt.ylim([700,1200])
ax4 = plt.subplot(224)
plt.plot(time2, Fforc)
plt.title('Observation forecast variance'); plt.xlim([1868,2026]); plt.ylim([20000,96000])
fig_count = output_fig()
fout.write('\n')
if SILENT_OUTPUT: fout.close()
| 0.033368 |
#=========================================================================
# DotProductRTL
#=========================================================================
from pymtl import *
from pclib.ifcs import InValRdyBundle, OutValRdyBundle
from pclib.ifcs import ParentReqRespBundle, ChildReqRespBundle
nmul_stages = 4
#------------------------------------------------------------------------------
# DotProductRTL
#------------------------------------------------------------------------------
class DotProductRTL( Model ):
def __init__( s, mem_ifc_types, cpu_ifc_types ):
s.cpu_ifc = ChildReqRespBundle ( cpu_ifc_types )
s.mem_ifc = ParentReqRespBundle( mem_ifc_types )
s.dpath = DotProductDpath( mem_ifc_types, cpu_ifc_types )
s.ctrl = DotProductCtrl ( mem_ifc_types, cpu_ifc_types )
s.connect_auto(s.dpath, s.ctrl)
def line_trace( s ):
return "| {} {} {} {}|".format(s.ctrl.state, s.dpath.count, s.dpath.accum_A, s.ctrl.pause)
def elaborate_logic( s ):
pass
#------------------------------------------------------------------------------
# Select Constants
#------------------------------------------------------------------------------
y = Bits( 1, 1 )
n = Bits( 1, 0 )
# mem_type
na = Bits( 2, 0 )
ld = Bits( 2, 1 )
st = Bits( 2, 2 )
load = Bits( 1, 0 )
# data_sel
#zer = Bits( 1, 0 )
acm = Bits( 1, 1 )
# offset_sel_M
zro = Bits( 1, 0 )
cnt = Bits( 1, 1 )
# baddr_sel_M
xxx = Bits( 2, 0 )
row = Bits( 2, 0 )
vec = Bits( 2, 1 )
dst = Bits( 2, 2 )
src0 = Bits( 2, 0 )
size = Bits(2, 0)
src0 = Bits(2, 1)
src1 = Bits(2, 2)
# TODO: total hack
Model.tick_rtl = Model.posedge_clk
#------------------------------------------------------------------------------
# DotProductDpath
#------------------------------------------------------------------------------
class DotProductDpath( Model ):
def __init__( s, mem_ifc_types, cpu_ifc_types ):
s.cpu_ifc = ChildReqRespBundle ( cpu_ifc_types )
s.mem_ifc = ParentReqRespBundle( mem_ifc_types )
s.cs = InPort ( CtrlSignals() )
s.ss = OutPort( StatusSignals() )
#--- Stage M: Memory Request ------------------------------
s.count = Wire( cpu_ifc_types.req .data.nbits )
s.size = Wire( cpu_ifc_types.req .data.nbits )
s.src0_addr_M = Wire( mem_ifc_types.req .addr.nbits )
s.src1_addr_M = Wire( mem_ifc_types.req .addr.nbits )
s.src0_data_R = Wire( mem_ifc_types.resp.data.nbits )
s.src1_data_R = Wire( mem_ifc_types.resp.data.nbits )
@s.tick_rtl
def stage_seq_M():
ctrl_msg = s.cpu_ifc.req_msg .ctrl_msg
cpu_data = s.cpu_ifc.req_msg .data
if s.cs.update_M:
if ctrl_msg == 1: s.size .next = cpu_data
elif ctrl_msg == 2: s.src0_addr_M.next = cpu_data
elif ctrl_msg == 3: s.src1_addr_M.next = cpu_data
elif ctrl_msg == 0: s.ss.go .next = True
else: s.ss.go.next = False
if s.cs.count_clear_M: s.count.next = 0
elif s.cs.count_en_M: s.count.next = s.count + 1
@s.combinational
def stage_comb_M():
# base_addr mux
if s.cs.baddr_sel_M == src0: base_addr_M = s.src0_addr_M
else: base_addr_M = s.src1_addr_M
# memory request
s.mem_ifc.req_msg.type_.value = 0
s.mem_ifc.req_msg.addr.value = base_addr_M + (s.count<<2)
# last item status signal
s.ss.last_item_M.value = s.count == (s.size - 1)
#--- Stage R: Memory Response -----------------------------
@s.tick_rtl
def stage_seq_M():
mem_data = s.mem_ifc.resp_msg.data
if s.cs.src0_en_R: s.src0_data_R.next = mem_data
if s.cs.src1_en_R: s.src1_data_R.next = mem_data
#--- Stage X: Execute Multiply ----------------------------
s.result_X = Wire( cpu_ifc_types.req.data.nbits )
s.mul = IntPipelinedMultiplier(
nbits = cpu_ifc_types.req.data.nbits,
nstages = 4,
)
s.connect_dict( { s.mul.op_a : s.src0_data_R,
s.mul.op_b : s.src1_data_R,
s.mul.product : s.result_X } )
#--- Stage A: Accumulate ----------------------------------
s.accum_A = Wire( cpu_ifc_types.resp.data.nbits )
s.accum_out = Wire( cpu_ifc_types.resp.data.nbits )
@s.tick_rtl
def stage_seq_A():
if s.reset or s.cs.accum_clear_A:
s.accum_A.next = 0
elif s.cs.accum_en_A:
s.accum_A.next = s.accum_out
@s.combinational
def stage_comb_A():
s.accum_out.value = s.result_X + s.accum_A
s.cpu_ifc.resp_msg.value = s.accum_A
def elaborate_logic( s ):
pass
#------------------------------------------------------------------------------
# State Machine Constants
#------------------------------------------------------------------------------
IDLE = 0
SEND_OP_LDA = 1
SEND_OP_LDB = 2
SEND_OP_ST = 3
DONE = 4
#------------------------------------------------------------------------------
# DotProductCtrl
#------------------------------------------------------------------------------
class DotProductCtrl( Model ):
def __init__( s, mem_ifc_types, cpu_ifc_types ):
s.cpu_ifc = ChildReqRespBundle ( cpu_ifc_types )
s.mem_ifc = ParentReqRespBundle( mem_ifc_types )
s.cs = OutPort( CtrlSignals() )
s.ss = InPort ( StatusSignals() )
s.nmul_stages = nmul_stages
s.state = Wire( 3 )
s.state_next = Wire( 3 )
s.pause = Wire( 1 )
s.stall_M = Wire( 1 )
s.stall_R = Wire( 1 )
s.any_stall = Wire( 1 )
s.valid = Wire( 3 )
#--------------------------------------------------------------------------
# State Machine
#--------------------------------------------------------------------------
@s.posedge_clk
def state_update_M():
if s.reset: s.state.next = IDLE
elif s.stall_M: s.state.next = s.state
else: s.state.next = s.state_next
@s.combinational
def state_transition():
send_req = s.mem_ifc.req_val and s.mem_ifc.req_rdy
recv_resp = s.mem_ifc.resp_val and s.mem_ifc.resp_rdy
go = s.ss.go
s.state_next.value = s.state
if s.state == IDLE and go:
s.state_next.value = SEND_OP_LDA
elif s.state == SEND_OP_LDA and send_req:
s.state_next.value = SEND_OP_LDB
elif s.state == SEND_OP_LDB and send_req:
if s.ss.last_item_M:
s.state_next.value = SEND_OP_ST
else:
s.state_next.value = SEND_OP_LDA
elif s.state == SEND_OP_ST and not s.any_stall:
s.state_next.value = DONE
elif s.state == DONE and s.cpu_ifc.resp_rdy:
s.state_next.value = IDLE
#--------------------------------------------------------------------------
# Control Signal Pipeline
#--------------------------------------------------------------------------
s.ctrl_signals_M = Wire( 14 )
s.ctrl_signals_R = Wire( 14 )
s.ctrl_signals_X = [Wire( 14 ) for x in range(s.nmul_stages)]
s.ctrl_signals_A = Wire( 14 )
@s.posedge_clk
def ctrl_regs():
if s.stall_R: s.ctrl_signals_R.next = s.ctrl_signals_R
elif s.stall_M: s.ctrl_signals_R.next = 0
else: s.ctrl_signals_R.next = s.ctrl_signals_M
if s.stall_R: s.ctrl_signals_X[0].next = 0
else: s.ctrl_signals_X[0].next = s.ctrl_signals_R
for i in range( 1, s.nmul_stages ):
s.ctrl_signals_X[i].next = s.ctrl_signals_X[i-1]
s.ctrl_signals_A.next = s.ctrl_signals_X[s.nmul_stages-1]
s.L = Wire( 1 )
@s.combinational
def state_to_ctrl():
# TODO: cannot infer temporaries when an inferred temporary on the RHS!
s.L.value = s.ss.last_item_M
# TODO: multiple assignments to a temporary results in duplicate decl error!
# Encode signals sent down the pipeline based on State
#
# up count last acm mul b a mem data off baddr
# en rst item en en en en type sel sel sel
if s.state == IDLE: cs = concat(y, n, y, n, n, n, n, n, na, zro, zro, xxx)
elif s.state == SEND_OP_LDA: cs = concat(n, n, n, n, n, n, n, y, ld, zro, cnt, row)
elif s.state == SEND_OP_LDB: cs = concat(n, y, n, s.L, y, y, y, n, ld, zro, cnt, vec)
elif s.state == SEND_OP_ST: cs = concat(n, y, n, n, n, n, n, n, na, acm, zro, dst)
elif s.state == DONE: cs = concat(n, n, n, n, n, n, n, n, na, xxx, zro, dst)
s.ctrl_signals_M.value = cs
s.cpu_ifc.req_rdy.value = s.state == IDLE
s.cpu_ifc.resp_val.value = s.state == DONE
if s.state == DONE or s.reset:
s.valid.value = 0
@s.combinational
def ctrl_to_dpath():
# Stall conditions
s.pause.value = s.state == SEND_OP_ST and not s.ctrl_signals_A[10]
req_en = s.ctrl_signals_M[4:6] > 0
resp_en = s.ctrl_signals_R[4:6] > 0
s.stall_M.value = (req_en and not s.mem_ifc.req_rdy) or s.pause
s.stall_R.value = (resp_en and not s.mem_ifc.resp_val)
s.any_stall.value = s.stall_M or s.stall_R
# M Stage
s.cs.baddr_sel_M .value = s.ctrl_signals_M[0:2]
s.cs.offset_sel_M .value = s.ctrl_signals_M[ 2]
s.cs.count_clear_M .value = s.ctrl_signals_M[ 11]
s.cs.accum_clear_A .value = s.cs.count_clear_M
s.cs.count_en_M .value = s.ctrl_signals_M[ 12] and not s.any_stall
s.cs.update_M .value = s.ctrl_signals_M[ 13] and s.cpu_ifc.req_val
s.mem_ifc.req_val.value = req_en and not s.any_stall
# R Stage
s.cs.src0_en_R .value = s.ctrl_signals_R[ 6]
s.cs.src1_en_R .value = s.ctrl_signals_R[ 7]
s.mem_ifc.resp_rdy.value = resp_en and not s.stall_R
# X Stage
for i in range( s.nmul_stages ):
s.cs.mul_reg_en_R[i].value = s.ctrl_signals_X[i][8]
# A Stage
s.cs.accum_en_A.value = s.ctrl_signals_A[9]
def elaborate_logic( s ):
pass
#------------------------------------------------------------------------------
# CtrlSignals
#------------------------------------------------------------------------------
class CtrlSignals( BitStructDefinition ):
def __init__( s ):
# M Stage Signals
s.baddr_sel_M = BitField (2)
s.offset_sel_M = BitField (1)
s.count_clear_M = BitField (1)
s.count_en_M = BitField (1)
# R Stage Signals
s.src0_en_R = BitField (1)
s.src1_en_R = BitField (1)
# X Stage Signals
s.mul_reg_en_R = BitField (nmul_stages)
# A Stage Signals
s.accum_en_A = BitField (1)
s.accum_clear_A = BitField (1)
#IDLE State Signals
s.update_M = BitField (1)
#------------------------------------------------------------------------------
# StatusSignals
#------------------------------------------------------------------------------
class StatusSignals( BitStructDefinition ):
def __init__(s):
s.last_item_M = BitField (1)
s.go = BitField (1)
#------------------------------------------------------------------------------
# MatrixVecCOP_mul
#------------------------------------------------------------------------------
# A dummy multiplier module, acts as a placeholder for DesignWare components.
class IntPipelinedMultiplier( Model ):
def __init__( s, nbits, nstages ):
s.op_a = InPort ( nbits )
s.op_b = InPort ( nbits )
s.product = OutPort( nbits )
s.nbits = nbits
s.nstages = nstages
def elaborate_logic( s ):
s.regs = [ Wire( s.nbits ) for x in range( s.nstages ) ]
@s.posedge_clk
def mult_logic():
if s.reset:
for i in range( s.nstages ):
s.regs[i].next = 0
else:
s.regs[0].next = s.op_a * s.op_b
for i in range( 1, s.nstages ):
s.regs[i].next = s.regs[i-1]
s.connect( s.product, s.regs[-1] )
| 0.033514 |
import os
import logging
import stat
import thread
log = logging.getLogger(__name__)
class mqCache():
def __init__( self, root ):
self.root = root
""" Fetch a BLOB
"""
def get(self, id):
if not self.is_available( id ):
return None
try:
hndFile = open( self.construct_name( id ), "rb" )
bytes = hndFile.read()
hndFile.close()
except:
return None
return bytes
""" Add/Update a BLOB
"""
def put(self, id, data):
try:
os.makedirs( self.construct_path( id ) )
except:
pass
if not data:
return None
tmp_filename = self.root + ".tmp/"
try:
os.makedirs(tmp_filename)
except:
pass
# construct temporary file name using PID and thread ID so that
# concurrent requests can't overwrite each other.
tmp_filename += "." + str(os.getpid()) + "_" + str(thread.get_ident())
""" For a atomic copy, we are using a temp file and then moving it using the os.rename
"""
log.debug ("saving to " + tmp_filename)
with open(tmp_filename, "wb") as fout:
fout.write(data)
fout.close()
filename = self.construct_name( id )
log.debug ("saving to " + filename )
os.rename(tmp_filename, filename)
return None
""" Get location of a BLOB
"""
def get_location(self, id):
return self.construct_path( id )
""" Check Availability
"""
def is_available(self, id):
return os.path.exists( self.construct_name( id ) )
""" get modified time
"""
def get_modified_time(self, id):
if self.is_available( id ):
return os.stat( self.construct_name( id ) )[stat.ST_MTIME]
return None
""" set modified time
"""
def set_modified_time(self, id, time):
if self.is_available( id ):
os.utime( self.construct_name( id ), ( time, time ) )
return True
return False
def path_split(self, i):
return [ "%03d" % x for x in [i / 1000000, (i / 1000) % 1000, i % 1000] ]
def construct_path( self, id ):
return os.path.join( self.root, id.replica, id.type, str( id.zoom ), *(self.path_split(id.x) + self.path_split(id.y)[:-1]))
def construct_name( self, id ):
return os.path.join( self.construct_path( id ), "%s.%s" % ( self.path_split(id.y)[-1], id.extension ) )
| 0.049016 |
import warnings
import numpy as np
import pandas.util.testing as tm
from pandas import (Series, DataFrame, Panel, MultiIndex,
Int64Index, UInt64Index, Float64Index,
IntervalIndex, CategoricalIndex,
IndexSlice, concat, date_range)
class NumericSeriesIndexing(object):
params = [
(Int64Index, UInt64Index, Float64Index),
('unique_monotonic_inc', 'nonunique_monotonic_inc'),
]
param_names = ['index_dtype', 'index_structure']
def setup(self, index, index_structure):
N = 10**6
indices = {
'unique_monotonic_inc': index(range(N)),
'nonunique_monotonic_inc': index(
list(range(55)) + [54] + list(range(55, N - 1))),
}
self.data = Series(np.random.rand(N), index=indices[index_structure])
self.array = np.arange(10000)
self.array_list = self.array.tolist()
def time_getitem_scalar(self, index, index_structure):
self.data[800000]
def time_getitem_slice(self, index, index_structure):
self.data[:800000]
def time_getitem_list_like(self, index, index_structure):
self.data[[800000]]
def time_getitem_array(self, index, index_structure):
self.data[self.array]
def time_getitem_lists(self, index, index_structure):
self.data[self.array_list]
def time_iloc_array(self, index, index_structure):
self.data.iloc[self.array]
def time_iloc_list_like(self, index, index_structure):
self.data.iloc[[800000]]
def time_iloc_scalar(self, index, index_structure):
self.data.iloc[800000]
def time_iloc_slice(self, index, index_structure):
self.data.iloc[:800000]
def time_ix_array(self, index, index_structure):
self.data.ix[self.array]
def time_ix_list_like(self, index, index_structure):
self.data.ix[[800000]]
def time_ix_scalar(self, index, index_structure):
self.data.ix[800000]
def time_ix_slice(self, index, index_structure):
self.data.ix[:800000]
def time_loc_array(self, index, index_structure):
self.data.loc[self.array]
def time_loc_list_like(self, index, index_structure):
self.data.loc[[800000]]
def time_loc_scalar(self, index, index_structure):
self.data.loc[800000]
def time_loc_slice(self, index, index_structure):
self.data.loc[:800000]
class NonNumericSeriesIndexing(object):
params = [
('string', 'datetime'),
('unique_monotonic_inc', 'nonunique_monotonic_inc'),
]
param_names = ['index_dtype', 'index_structure']
def setup(self, index, index_structure):
N = 10**6
indexes = {'string': tm.makeStringIndex(N),
'datetime': date_range('1900', periods=N, freq='s')}
index = indexes[index]
if index_structure == 'nonunique_monotonic_inc':
index = index.insert(item=index[2], loc=2)[:-1]
self.s = Series(np.random.rand(N), index=index)
self.lbl = index[80000]
def time_getitem_label_slice(self, index, index_structure):
self.s[:self.lbl]
def time_getitem_pos_slice(self, index, index_structure):
self.s[:80000]
def time_get_value(self, index, index_structure):
with warnings.catch_warnings(record=True):
self.s.get_value(self.lbl)
def time_getitem_scalar(self, index, index_structure):
self.s[self.lbl]
def time_getitem_list_like(self, index, index_structure):
self.s[[self.lbl]]
class DataFrameStringIndexing(object):
def setup(self):
index = tm.makeStringIndex(1000)
columns = tm.makeStringIndex(30)
self.df = DataFrame(np.random.randn(1000, 30), index=index,
columns=columns)
self.idx_scalar = index[100]
self.col_scalar = columns[10]
self.bool_indexer = self.df[self.col_scalar] > 0
self.bool_obj_indexer = self.bool_indexer.astype(object)
def time_get_value(self):
with warnings.catch_warnings(record=True):
self.df.get_value(self.idx_scalar, self.col_scalar)
def time_ix(self):
self.df.ix[self.idx_scalar, self.col_scalar]
def time_loc(self):
self.df.loc[self.idx_scalar, self.col_scalar]
def time_getitem_scalar(self):
self.df[self.col_scalar][self.idx_scalar]
def time_boolean_rows(self):
self.df[self.bool_indexer]
def time_boolean_rows_object(self):
self.df[self.bool_obj_indexer]
class DataFrameNumericIndexing(object):
def setup(self):
self.idx_dupe = np.array(range(30)) * 99
self.df = DataFrame(np.random.randn(10000, 5))
self.df_dup = concat([self.df, 2 * self.df, 3 * self.df])
self.bool_indexer = [True] * 5000 + [False] * 5000
def time_iloc_dups(self):
self.df_dup.iloc[self.idx_dupe]
def time_loc_dups(self):
self.df_dup.loc[self.idx_dupe]
def time_iloc(self):
self.df.iloc[:100, 0]
def time_loc(self):
self.df.loc[:100, 0]
def time_bool_indexer(self):
self.df[self.bool_indexer]
class Take(object):
params = ['int', 'datetime']
param_names = ['index']
def setup(self, index):
N = 100000
indexes = {'int': Int64Index(np.arange(N)),
'datetime': date_range('2011-01-01', freq='S', periods=N)}
index = indexes[index]
self.s = Series(np.random.rand(N), index=index)
self.indexer = [True, False, True, True, False] * 20000
def time_take(self, index):
self.s.take(self.indexer)
class MultiIndexing(object):
def setup(self):
mi = MultiIndex.from_product([range(1000), range(1000)])
self.s = Series(np.random.randn(1000000), index=mi)
self.df = DataFrame(self.s)
n = 100000
self.mdt = DataFrame({'A': np.random.choice(range(10000, 45000, 1000),
n),
'B': np.random.choice(range(10, 400), n),
'C': np.random.choice(range(1, 150), n),
'D': np.random.choice(range(10000, 45000), n),
'x': np.random.choice(range(400), n),
'y': np.random.choice(range(25), n)})
self.idx = IndexSlice[20000:30000, 20:30, 35:45, 30000:40000]
self.mdt = self.mdt.set_index(['A', 'B', 'C', 'D']).sort_index()
def time_series_ix(self):
self.s.ix[999]
def time_frame_ix(self):
self.df.ix[999]
def time_index_slice(self):
self.mdt.loc[self.idx, :]
class IntervalIndexing(object):
def setup_cache(self):
idx = IntervalIndex.from_breaks(np.arange(1000001))
monotonic = Series(np.arange(1000000), index=idx)
return monotonic
def time_getitem_scalar(self, monotonic):
monotonic[80000]
def time_loc_scalar(self, monotonic):
monotonic.loc[80000]
def time_getitem_list(self, monotonic):
monotonic[80000:]
def time_loc_list(self, monotonic):
monotonic.loc[80000:]
class CategoricalIndexIndexing(object):
params = ['monotonic_incr', 'monotonic_decr', 'non_monotonic']
param_names = ['index']
def setup(self, index):
N = 10**5
values = list('a' * N + 'b' * N + 'c' * N)
indices = {
'monotonic_incr': CategoricalIndex(values),
'monotonic_decr': CategoricalIndex(reversed(values)),
'non_monotonic': CategoricalIndex(list('abc' * N))}
self.data = indices[index]
self.int_scalar = 10000
self.int_list = list(range(10000))
self.cat_scalar = 'b'
self.cat_list = ['a', 'c']
def time_getitem_scalar(self, index):
self.data[self.int_scalar]
def time_getitem_slice(self, index):
self.data[:self.int_scalar]
def time_getitem_list_like(self, index):
self.data[[self.int_scalar]]
def time_getitem_list(self, index):
self.data[self.int_list]
def time_getitem_bool_array(self, index):
self.data[self.data == self.cat_scalar]
def time_get_loc_scalar(self, index):
self.data.get_loc(self.cat_scalar)
def time_get_indexer_list(self, index):
self.data.get_indexer(self.cat_list)
class PanelIndexing(object):
def setup(self):
with warnings.catch_warnings(record=True):
self.p = Panel(np.random.randn(100, 100, 100))
self.inds = range(0, 100, 10)
def time_subset(self):
with warnings.catch_warnings(record=True):
self.p.ix[(self.inds, self.inds, self.inds)]
class MethodLookup(object):
def setup_cache(self):
s = Series()
return s
def time_lookup_iloc(self, s):
s.iloc
def time_lookup_ix(self, s):
s.ix
def time_lookup_loc(self, s):
s.loc
class GetItemSingleColumn(object):
def setup(self):
self.df_string_col = DataFrame(np.random.randn(3000, 1), columns=['A'])
self.df_int_col = DataFrame(np.random.randn(3000, 1))
def time_frame_getitem_single_column_label(self):
self.df_string_col['A']
def time_frame_getitem_single_column_int(self):
self.df_int_col[0]
class AssignTimeseriesIndex(object):
def setup(self):
N = 100000
idx = date_range('1/1/2000', periods=N, freq='H')
self.df = DataFrame(np.random.randn(N, 1), columns=['A'], index=idx)
def time_frame_assign_timeseries_index(self):
self.df['date'] = self.df.index
class InsertColumns(object):
def setup(self):
self.N = 10**3
self.df = DataFrame(index=range(self.N))
def time_insert(self):
np.random.seed(1234)
for i in range(100):
self.df.insert(0, i, np.random.randn(self.N),
allow_duplicates=True)
def time_assign_with_setitem(self):
np.random.seed(1234)
for i in range(100):
self.df[i] = np.random.randn(self.N)
from .pandas_vb_common import setup # noqa: F401
| 0 |
#!/usr/bin/env python
#
# This ensures that the the deletion statements come after the regular log
# statements for each node.
#
#
#
import sys, math, re
from sets import Set
from datetime import datetime , timedelta
infilename = sys.argv[1]
icfile = open( infilename, 'r') # r for reading
outfilename = infilename + 'reordered'
ofile = open( outfilename, 'w') # w for writing
nodelist = []
#parse times
regexp = re.compile('(\d*)-(\d*)-(\d*)\W(\d*):(\d*):(\d*).(\d*)')
rev_line_list = []
for line in icfile:
rev_line_list.insert(0,line)
# time --> connection disconnection log statement list
log_dictionary = {}
# node --> deletion time
deletion_time = {}
last_log = {}
one_sec = timedelta(seconds=1)
for line in rev_line_list:
parsed_line = line.split()
tmp_date_time = None
datestring = "%s %s" % (parsed_line[0],parsed_line[1])
match = regexp.match(datestring)
if match:
year = int(match.group(1) )
month = int( match.group(2) )
day = int(match.group(3) )
hour = int( match.group(4) )
min = int( match.group(5) )
sec = int( match.group(6) )
mic = int( match.group(7) )
tmp_date_time = datetime(year,month,day,hour,min,sec,mic)
else:
print "NO MATCH"
if parsed_line[2] == 'deletion' :
if parsed_line[4] not in deletion_time:
deletion_time[parsed_line[4]] = None
print "Added to deletion table", parsed_line[4]
else:
pass
#print "ERROR- multiple deletion", parsed_line[4]
else:
tmp_list = []
if tmp_date_time in log_dictionary:
tmp_list = log_dictionary[tmp_date_time]
tmp_list.append(line)
log_dictionary[tmp_date_time] = tmp_list
else:
tmp_list.append(line)
log_dictionary[tmp_date_time] = tmp_list
l_a = parsed_line[4]
r_a = parsed_line[5]
#print parsed_line, l_a , r_a
if l_a not in last_log:
last_log[l_a] = tmp_date_time + one_sec
if r_a not in last_log:
last_log[r_a] = tmp_date_time + one_sec
del_keys = deletion_time.keys()
for dk in del_keys:
tmp_list = []
if dk in last_log:
t_time = last_log[dk]
deletion_line = "%s deletion deletion %s %s deletion\n" % \
(t_time.isoformat(' ') ,dk ,dk)
if t_time in log_dictionary:
tmp_list = log_dictionary[ t_time ]
tmp_list.append(deletion_line)
log_dictionary[t_time] = tmp_list
else:
tmp_list.append(deletion_line)
log_dictionary[t_time] = tmp_list
sorted_keys = log_dictionary.keys()
sorted_keys.sort()
for t_dt in sorted_keys:
t_l = log_dictionary[t_dt]
for line in t_l:
ofile.write(line)
| 0.030864 |
#!/usr/bin/env python
# Meran - MERAN UNLP is a ILS (Integrated Library System) wich provides Catalog,
# Circulation and User's Management. It's written in Perl, and uses Apache2
# Web-Server, MySQL database and Sphinx 2 indexing.
# Copyright (C) 2009-2013 Grupo de desarrollo de Meran CeSPI-UNLP
#
# This file is part of Meran.
#
# Meran is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Meran is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Meran. If not, see <http://www.gnu.org/licenses/>.
# encoding: utf-8
# Meran - MERAN UNLP is a ILS (Integrated Library System) wich provides Catalog,
# Circulation and User's Management. It's written in Perl, and uses Apache2
# Web-Server, MySQL database and Sphinx 2 indexing.
# Copyright (C) 2009-2013 Grupo de desarrollo de Meran CeSPI-UNLP
#
# This file is part of Meran.
#
# Meran is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Meran is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Meran. If not, see <http://www.gnu.org/licenses/>.
# Thomas Nagy, 2005-2008 (ita)
"""
The class task_gen encapsulates the creation of task objects (low-level code)
The instances can have various parameters, but the creation of task nodes (Task.py)
is delayed. To achieve this, various methods are called from the method "apply"
The class task_gen contains lots of methods, and a configuration table:
* the methods to call (self.meths) can be specified dynamically (removing, adding, ..)
* the order of the methods (self.prec or by default task_gen.prec) is configurable
* new methods can be inserted dynamically without pasting old code
Additionally, task_gen provides the method apply_core
* file extensions are mapped to methods: def meth(self, name_or_node)
* if a mapping is not found in self.mappings, it is searched in task_gen.mappings
* when called, the functions may modify self.allnodes to re-add source to process
* the mappings can map an extension or a filename (see the code below)
WARNING: subclasses must reimplement the clone method
"""
import os, traceback, copy
import Build, Task, Utils, Logs, Options
from Logs import debug, error, warn
from Constants import *
typos = {
'sources':'source',
'targets':'target',
'include':'includes',
'define':'defines',
'importpath':'importpaths',
'install_var':'install_path',
'install_subdir':'install_path',
'inst_var':'install_path',
'inst_dir':'install_path',
'feature':'features',
}
class register_obj(type):
"""no decorators for classes, so we use a metaclass
we store into task_gen.classes the classes that inherit task_gen
and whose names end in '_taskgen'
"""
def __init__(cls, name, bases, dict):
super(register_obj, cls).__init__(name, bases, dict)
name = cls.__name__
suffix = '_taskgen'
if name.endswith(suffix):
task_gen.classes[name.replace(suffix, '')] = cls
class task_gen(object):
"""
Most methods are of the form 'def meth(self):' without any parameters
there are many of them, and they do many different things:
* task creation
* task results installation
* environment modification
* attribute addition/removal
The inheritance approach is complicated
* mixing several languages at once
* subclassing is needed even for small changes
* inserting new methods is complicated
This new class uses a configuration table:
* adding new methods easily
* obtaining the order in which to call the methods
* postponing the method calls (post() -> apply)
Additionally, a 'traits' static attribute is provided:
* this list contains methods
* the methods can remove or add methods from self.meths
Example1: the attribute 'staticlib' is set on an instance
a method set in the list of traits is executed when the
instance is posted, it finds that flag and adds another method for execution
Example2: a method set in the list of traits finds the msvc
compiler (from self.env['MSVC']==1); more methods are added to self.meths
"""
__metaclass__ = register_obj
mappings = {}
mapped = {}
prec = Utils.DefaultDict(list)
traits = Utils.DefaultDict(set)
classes = {}
def __init__(self, *kw, **kwargs):
self.prec = Utils.DefaultDict(list)
"map precedence of function names to call"
# so we will have to play with directed acyclic graphs
# detect cycles, etc
self.source = ''
self.target = ''
# list of methods to execute - does not touch it by hand unless you know
self.meths = []
# list of mappings extension -> function
self.mappings = {}
# list of features (see the documentation on traits)
self.features = list(kw)
# not always a good idea
self.tasks = []
self.default_chmod = O644
self.default_install_path = None
# kind of private, beware of what you put in it, also, the contents are consumed
self.allnodes = []
self.bld = kwargs.get('bld', Build.bld)
self.env = self.bld.env.copy()
self.path = self.bld.path # emulate chdir when reading scripts
self.name = '' # give a name to the target (static+shlib with the same targetname ambiguity)
# provide a unique id
self.idx = self.bld.idx[self.path.id] = self.bld.idx.get(self.path.id, 0) + 1
for key, val in kwargs.iteritems():
setattr(self, key, val)
self.bld.task_manager.add_task_gen(self)
self.bld.all_task_gen.append(self)
def __str__(self):
return ("<task_gen '%s' of type %s defined in %s>"
% (self.name or self.target, self.__class__.__name__, str(self.path)))
def __setattr__(self, name, attr):
real = typos.get(name, name)
if real != name:
warn('typo %s -> %s' % (name, real))
if Logs.verbose > 0:
traceback.print_stack()
object.__setattr__(self, real, attr)
def to_list(self, value):
"helper: returns a list"
if isinstance(value, str): return value.split()
else: return value
def apply(self):
"order the methods to execute using self.prec or task_gen.prec"
keys = set(self.meths)
# add the methods listed in the features
self.features = Utils.to_list(self.features)
for x in self.features + ['*']:
st = task_gen.traits[x]
if not st:
warn('feature %r does not exist - bind at least one method to it' % x)
keys.update(st)
# copy the precedence table
prec = {}
prec_tbl = self.prec or task_gen.prec
for x in prec_tbl:
if x in keys:
prec[x] = prec_tbl[x]
# elements disconnected
tmp = []
for a in keys:
for x in prec.values():
if a in x: break
else:
tmp.append(a)
# topological sort
out = []
while tmp:
e = tmp.pop()
if e in keys: out.append(e)
try:
nlst = prec[e]
except KeyError:
pass
else:
del prec[e]
for x in nlst:
for y in prec:
if x in prec[y]:
break
else:
tmp.append(x)
if prec: raise Utils.WafError("graph has a cycle %s" % str(prec))
out.reverse()
self.meths = out
# then we run the methods in order
debug('task_gen: posting %s %d', self, id(self))
for x in out:
try:
v = getattr(self, x)
except AttributeError:
raise Utils.WafError("tried to retrieve %s which is not a valid method" % x)
debug('task_gen: -> %s (%d)', x, id(self))
v()
def post(self):
"runs the code to create the tasks, do not subclass"
if not self.name:
if isinstance(self.target, list):
self.name = ' '.join(self.target)
else:
self.name = self.target
if getattr(self, 'posted', None):
#error("OBJECT ALREADY POSTED" + str( self))
return
self.apply()
debug('task_gen: posted %s', self.name)
self.posted = True
def get_hook(self, ext):
try: return self.mappings[ext]
except KeyError:
try: return task_gen.mappings[ext]
except KeyError: return None
# TODO waf 1.6: always set the environment
# TODO waf 1.6: create_task(self, name, inputs, outputs)
def create_task(self, name, src=None, tgt=None, env=None):
env = env or self.env
task = Task.TaskBase.classes[name](env.copy(), generator=self)
if src:
task.set_inputs(src)
if tgt:
task.set_outputs(tgt)
self.tasks.append(task)
return task
def name_to_obj(self, name):
return self.bld.name_to_obj(name, self.env)
def find_sources_in_dirs(self, dirnames, excludes=[], exts=[]):
"""
The attributes "excludes" and "exts" must be lists to avoid the confusion
find_sources_in_dirs('a', 'b', 'c') <-> find_sources_in_dirs('a b c')
do not use absolute paths
do not use paths outside of the source tree
the files or folder beginning by . are not returned
# TODO: remove in Waf 1.6
"""
err_msg = "'%s' attribute must be a list"
if not isinstance(excludes, list):
raise Utils.WscriptError(err_msg % 'excludes')
if not isinstance(exts, list):
raise Utils.WscriptError(err_msg % 'exts')
lst = []
#make sure dirnames is a list helps with dirnames with spaces
dirnames = self.to_list(dirnames)
ext_lst = exts or list(self.mappings.keys()) + list(task_gen.mappings.keys())
for name in dirnames:
anode = self.path.find_dir(name)
if not anode or not anode.is_child_of(self.bld.srcnode):
raise Utils.WscriptError("Unable to use '%s' - either because it's not a relative path" \
", or it's not child of '%s'." % (name, self.bld.srcnode))
self.bld.rescan(anode)
for name in self.bld.cache_dir_contents[anode.id]:
# ignore hidden files
if name.startswith('.'):
continue
(base, ext) = os.path.splitext(name)
if ext in ext_lst and not name in lst and not name in excludes:
lst.append((anode.relpath_gen(self.path) or '.') + os.path.sep + name)
lst.sort()
self.source = self.to_list(self.source)
if not self.source: self.source = lst
else: self.source += lst
def clone(self, env):
""
newobj = task_gen(bld=self.bld)
for x in self.__dict__:
if x in ['env', 'bld']:
continue
elif x in ["path", "features"]:
setattr(newobj, x, getattr(self, x))
else:
setattr(newobj, x, copy.copy(getattr(self, x)))
newobj.__class__ = self.__class__
if isinstance(env, str):
newobj.env = self.bld.all_envs[env].copy()
else:
newobj.env = env.copy()
return newobj
def get_inst_path(self):
return getattr(self, '_install_path', getattr(self, 'default_install_path', ''))
def set_inst_path(self, val):
self._install_path = val
install_path = property(get_inst_path, set_inst_path)
def get_chmod(self):
return getattr(self, '_chmod', getattr(self, 'default_chmod', O644))
def set_chmod(self, val):
self._chmod = val
chmod = property(get_chmod, set_chmod)
def declare_extension(var, func):
try:
for x in Utils.to_list(var):
task_gen.mappings[x] = func
except:
raise Utils.WscriptError('declare_extension takes either a list or a string %r' % var)
task_gen.mapped[func.__name__] = func
def declare_order(*k):
assert(len(k) > 1)
n = len(k) - 1
for i in xrange(n):
f1 = k[i]
f2 = k[i+1]
if not f1 in task_gen.prec[f2]:
task_gen.prec[f2].append(f1)
def declare_chain(name='', action='', ext_in='', ext_out='', reentrant=True, color='BLUE',
install=0, before=[], after=[], decider=None, rule=None, scan=None):
"""
see Tools/flex.py for an example
while i do not like such wrappers, some people really do
"""
action = action or rule
if isinstance(action, str):
act = Task.simple_task_type(name, action, color=color)
else:
act = Task.task_type_from_func(name, action, color=color)
act.ext_in = tuple(Utils.to_list(ext_in))
act.ext_out = tuple(Utils.to_list(ext_out))
act.before = Utils.to_list(before)
act.after = Utils.to_list(after)
act.scan = scan
def x_file(self, node):
if decider:
ext = decider(self, node)
else:
ext = ext_out
if isinstance(ext, str):
out_source = node.change_ext(ext)
if reentrant:
self.allnodes.append(out_source)
elif isinstance(ext, list):
out_source = [node.change_ext(x) for x in ext]
if reentrant:
for i in xrange((reentrant is True) and len(out_source) or reentrant):
self.allnodes.append(out_source[i])
else:
# XXX: useless: it will fail on Utils.to_list above...
raise Utils.WafError("do not know how to process %s" % str(ext))
tsk = self.create_task(name, node, out_source)
if node.__class__.bld.is_install:
tsk.install = install
declare_extension(act.ext_in, x_file)
def bind_feature(name, methods):
lst = Utils.to_list(methods)
task_gen.traits[name].update(lst)
"""
All the following decorators are registration decorators, i.e add an attribute to current class
(task_gen and its derivatives), with same name as func, which points to func itself.
For example:
@taskgen
def sayHi(self):
print("hi")
Now taskgen.sayHi() may be called
If python were really smart, it could infer itself the order of methods by looking at the
attributes. A prerequisite for execution is to have the attribute set before.
Intelligent compilers binding aspect-oriented programming and parallelization, what a nice topic for studies.
"""
def taskgen(func):
setattr(task_gen, func.__name__, func)
return func
def feature(*k):
def deco(func):
setattr(task_gen, func.__name__, func)
for name in k:
task_gen.traits[name].update([func.__name__])
return func
return deco
def before(*k):
def deco(func):
setattr(task_gen, func.__name__, func)
for fun_name in k:
if not func.__name__ in task_gen.prec[fun_name]:
task_gen.prec[fun_name].append(func.__name__)
return func
return deco
def after(*k):
def deco(func):
setattr(task_gen, func.__name__, func)
for fun_name in k:
if not fun_name in task_gen.prec[func.__name__]:
task_gen.prec[func.__name__].append(fun_name)
return func
return deco
def extension(var):
def deco(func):
setattr(task_gen, func.__name__, func)
try:
for x in Utils.to_list(var):
task_gen.mappings[x] = func
except:
raise Utils.WafError('extension takes either a list or a string %r' % var)
task_gen.mapped[func.__name__] = func
return func
return deco
# TODO make certain the decorators may be used here
def apply_core(self):
"""Process the attribute source
transform the names into file nodes
try to process the files by name first, later by extension"""
# get the list of folders to use by the scanners
# all our objects share the same include paths anyway
find_resource = self.path.find_resource
for filename in self.to_list(self.source):
# if self.mappings or task_gen.mappings contains a file of the same name
x = self.get_hook(filename)
if x:
x(self, filename)
else:
node = find_resource(filename)
if not node: raise Utils.WafError("source not found: '%s' in '%s'" % (filename, str(self.path)))
self.allnodes.append(node)
for node in self.allnodes:
# self.mappings or task_gen.mappings map the file extension to a function
x = self.get_hook(node.suffix())
if not x:
raise Utils.WafError("Cannot guess how to process %s (got mappings %r in %r) -> try conf.check_tool(..)?" % \
(str(node), self.__class__.mappings.keys(), self.__class__))
x(self, node)
feature('*')(apply_core)
def exec_rule(self):
"""Process the attribute rule, when provided the method apply_core will be disabled
"""
if not getattr(self, 'rule', None):
return
# someone may have removed it already
try:
self.meths.remove('apply_core')
except ValueError:
pass
# get the function and the variables
func = self.rule
vars2 = []
if isinstance(func, str):
# use the shell by default for user-defined commands
(func, vars2) = Task.compile_fun('', self.rule, shell=getattr(self, 'shell', True))
func.code = self.rule
# create the task class
name = getattr(self, 'name', None) or self.target or self.rule
if not isinstance(name, str):
name = str(self.idx)
cls = Task.task_type_from_func(name, func, getattr(self, 'vars', vars2))
# now create one instance
tsk = self.create_task(name)
dep_vars = getattr(self, 'dep_vars', ['ruledeps'])
if dep_vars:
tsk.dep_vars = dep_vars
if isinstance(self.rule, str):
tsk.env.ruledeps = self.rule
else:
# only works if the function is in a global module such as a waf tool
tsk.env.ruledeps = Utils.h_fun(self.rule)
# we assume that the user knows that without inputs or outputs
#if not getattr(self, 'target', None) and not getattr(self, 'source', None):
# cls.quiet = True
if getattr(self, 'target', None):
cls.quiet = True
tsk.outputs = [self.path.find_or_declare(x) for x in self.to_list(self.target)]
if getattr(self, 'source', None):
cls.quiet = True
tsk.inputs = []
for x in self.to_list(self.source):
y = self.path.find_resource(x)
if not y:
raise Utils.WafError('input file %r could not be found (%r)' % (x, self.path.abspath()))
tsk.inputs.append(y)
if self.allnodes:
tsk.inputs.extend(self.allnodes)
if getattr(self, 'scan', None):
cls.scan = self.scan
if getattr(self, 'install_path', None):
tsk.install_path = self.install_path
if getattr(self, 'cwd', None):
tsk.cwd = self.cwd
if getattr(self, 'on_results', None):
Task.update_outputs(cls)
if getattr(self, 'always', None):
Task.always_run(cls)
for x in ['after', 'before', 'ext_in', 'ext_out']:
setattr(cls, x, getattr(self, x, []))
feature('*')(exec_rule)
before('apply_core')(exec_rule)
def sequence_order(self):
"""
add a strict sequential constraint between the tasks generated by task generators
it uses the fact that task generators are posted in order
it will not post objects which belong to other folders
there is also an awesome trick for executing the method in last position
to use:
bld(features='javac seq')
bld(features='jar seq')
to start a new sequence, set the attribute seq_start, for example:
obj.seq_start = True
"""
if self.meths and self.meths[-1] != 'sequence_order':
self.meths.append('sequence_order')
return
if getattr(self, 'seq_start', None):
return
# all the tasks previously declared must be run before these
if getattr(self.bld, 'prev', None):
self.bld.prev.post()
for x in self.bld.prev.tasks:
for y in self.tasks:
y.set_run_after(x)
self.bld.prev = self
feature('seq')(sequence_order) | 0.027312 |
# coding=utf-8
"""Field Mapping Dialog Implementation."""
import logging
from PyQt4.QtCore import pyqtSignature, pyqtSlot, QSettings
from PyQt4.QtGui import (
QDialog, QHBoxLayout, QLabel, QDialogButtonBox, QMessageBox, QIcon)
from qgis.gui import QgsMapLayerComboBox, QgsMapLayerProxyModel
from parameters.parameter_exceptions import InvalidValidationException
from safe.common.exceptions import (
NoKeywordsFoundError,
KeywordNotFoundError,
MetadataReadError,
InaSAFEError)
from safe.definitions.constants import RECENT
from safe.definitions.layer_purposes import (
layer_purpose_exposure, layer_purpose_hazard)
from safe.definitions.utilities import get_field_groups
from safe.gui.tools.help.field_mapping_help import field_mapping_help
from safe.gui.widgets.field_mapping_widget import FieldMappingWidget
from safe.utilities.default_values import set_inasafe_default_value_qsetting
from safe.utilities.i18n import tr
from safe.utilities.keyword_io import KeywordIO
from safe.utilities.qgis_utilities import display_warning_message_box
from safe.utilities.resources import (
get_ui_class, html_footer, html_header, resources_path)
from safe.utilities.unicode import get_string
from safe.utilities.utilities import get_error_message
FORM_CLASS = get_ui_class('field_mapping_dialog_base.ui')
LOGGER = logging.getLogger('InaSAFE')
class FieldMappingDialog(QDialog, FORM_CLASS):
"""Dialog implementation class for the InaSAFE field mapping tool."""
def __init__(self, parent=None, iface=None, setting=None):
"""Constructor."""
QDialog.__init__(self, parent)
self.setupUi(self)
self.setWindowTitle(self.tr('InaSAFE Field Mapping Tool'))
icon = resources_path('img', 'icons', 'show-mapping-tool.svg')
self.setWindowIcon(QIcon(icon))
self.parent = parent
self.iface = iface
if setting is None:
setting = QSettings()
self.setting = setting
self.keyword_io = KeywordIO()
self.layer = None
self.metadata = {}
self.layer_input_layout = QHBoxLayout()
self.layer_label = QLabel(tr('Layer'))
self.layer_combo_box = QgsMapLayerComboBox()
# Filter only for Polygon and Point
self.layer_combo_box.setFilters(
QgsMapLayerProxyModel.PolygonLayer |
QgsMapLayerProxyModel.PointLayer)
# Filter out a layer that don't have layer groups
excepted_layers = []
for i in range(self.layer_combo_box.count()):
layer = self.layer_combo_box.layer(i)
try:
keywords = self.keyword_io.read_keywords(layer)
except (KeywordNotFoundError, NoKeywordsFoundError):
excepted_layers.append(layer)
continue
layer_purpose = keywords.get('layer_purpose')
if not layer_purpose:
excepted_layers.append(layer)
continue
if layer_purpose == layer_purpose_exposure['key']:
layer_subcategory = keywords.get('exposure')
elif layer_purpose == layer_purpose_hazard['key']:
layer_subcategory = keywords.get('hazard')
else:
layer_subcategory = None
field_groups = get_field_groups(layer_purpose, layer_subcategory)
if len(field_groups) == 0:
excepted_layers.append(layer)
continue
self.layer_combo_box.setExceptedLayerList(excepted_layers)
# Select the active layer.
if self.iface.activeLayer():
found = self.layer_combo_box.findText(
self.iface.activeLayer().name())
if found > -1:
self.layer_combo_box.setLayer(self.iface.activeLayer())
self.field_mapping_widget = None
self.main_stacked_widget.setCurrentIndex(1)
# Input
self.layer_input_layout.addWidget(self.layer_label)
self.layer_input_layout.addWidget(self.layer_combo_box)
self.header_label = QLabel()
self.header_label.setWordWrap(True)
self.main_layout.addWidget(self.header_label)
self.main_layout.addLayout(self.layer_input_layout)
# Signal
self.layer_combo_box.layerChanged.connect(self.set_layer)
if self.layer_combo_box.currentLayer():
self.set_layer(self.layer_combo_box.currentLayer())
# Set up things for context help
self.help_button = self.button_box.button(QDialogButtonBox.Help)
# Allow toggling the help button
self.help_button.setCheckable(True)
self.help_button.toggled.connect(self.help_toggled)
# Set up things for ok button
self.ok_button = self.button_box.button(QDialogButtonBox.Ok)
self.ok_button.clicked.connect(self.accept)
# Set up things for cancel button
self.cancel_button = self.button_box.button(QDialogButtonBox.Cancel)
self.cancel_button.clicked.connect(self.reject)
def set_layer(self, layer=None, keywords=None):
"""Set layer and update UI accordingly.
:param layer: A QgsVectorLayer.
:type layer: QgsVectorLayer
:param keywords: Keywords for the layer.
:type keywords: dict, None
"""
if self.field_mapping_widget is not None:
self.field_mapping_widget.setParent(None)
self.field_mapping_widget.close()
self.field_mapping_widget.deleteLater()
self.main_layout.removeWidget(self.field_mapping_widget)
if layer:
self.layer = layer
else:
self.layer = self.layer_combo_box.currentLayer()
if not self.layer:
return
if keywords is not None:
self.metadata = keywords
else:
# Always read from metadata file.
try:
self.metadata = self.keyword_io.read_keywords(self.layer)
except (
NoKeywordsFoundError,
KeywordNotFoundError,
MetadataReadError) as e:
raise e
if 'inasafe_default_values' not in self.metadata:
self.metadata['inasafe_default_values'] = {}
if 'inasafe_fields' not in self.metadata:
self.metadata['inasafe_fields'] = {}
self.field_mapping_widget = FieldMappingWidget(
parent=self, iface=self.iface)
self.field_mapping_widget.set_layer(self.layer, self.metadata)
self.field_mapping_widget.show()
self.main_layout.addWidget(self.field_mapping_widget)
# Set header label
group_names = [
self.field_mapping_widget.tabText(i) for i in range(
self.field_mapping_widget.count())]
if len(group_names) == 0:
header_text = tr(
'There is no field group for this layer. Please select '
'another layer.')
self.header_label.setText(header_text)
return
elif len(group_names) == 1:
pretty_group_name = group_names[0]
elif len(group_names) == 2:
pretty_group_name = group_names[0] + tr(' and ') + group_names[1]
else:
pretty_group_name = ', '.join(group_names[:-1])
pretty_group_name += tr(', and {0}').format(group_names[-1])
header_text = tr(
'Please fill the information for every tab to determine the '
'attribute for {0} group.').format(pretty_group_name)
self.header_label.setText(header_text)
@pyqtSlot()
@pyqtSignature('bool') # prevents actions being handled twice
def help_toggled(self, flag):
"""Show or hide the help tab in the stacked widget.
.. versionadded: 3.2.1
:param flag: Flag indicating whether help should be shown or hidden.
:type flag: bool
"""
if flag:
self.help_button.setText(self.tr('Hide Help'))
self.show_help()
else:
self.help_button.setText(self.tr('Show Help'))
self.hide_help()
def hide_help(self):
"""Hide the usage info from the user.
.. versionadded: 3.2.1
"""
self.main_stacked_widget.setCurrentIndex(1)
def show_help(self):
"""Show usage info to the user."""
# Read the header and footer html snippets
self.main_stacked_widget.setCurrentIndex(0)
header = html_header()
footer = html_footer()
string = header
message = field_mapping_help()
string += message.to_html()
string += footer
self.help_web_view.setHtml(string)
def save_metadata(self):
"""Save metadata based on the field mapping state."""
metadata = self.field_mapping_widget.get_field_mapping()
for key, value in metadata['fields'].items():
# Delete the key if it's set to None
if key in self.metadata['inasafe_default_values']:
self.metadata['inasafe_default_values'].pop(key)
if value is None or value == []:
if key in self.metadata['inasafe_fields']:
self.metadata['inasafe_fields'].pop(key)
else:
self.metadata['inasafe_fields'][key] = value
for key, value in metadata['values'].items():
# Delete the key if it's set to None
if key in self.metadata['inasafe_fields']:
self.metadata['inasafe_fields'].pop(key)
if value is None:
if key in self.metadata['inasafe_default_values']:
self.metadata['inasafe_default_values'].pop(key)
else:
self.metadata['inasafe_default_values'][key] = value
# Save metadata
try:
self.keyword_io.write_keywords(
layer=self.layer, keywords=self.metadata)
except InaSAFEError, e:
error_message = get_error_message(e)
# noinspection PyCallByClass,PyTypeChecker,PyArgumentList
QMessageBox.warning(
self, self.tr('InaSAFE'),
((self.tr(
'An error was encountered when saving the following '
'keywords:\n %s') % error_message.to_html())))
# Update setting fir recent value
if self.metadata.get('inasafe_default_values'):
for key, value in \
self.metadata['inasafe_default_values'].items():
set_inasafe_default_value_qsetting(
self.setting, key, RECENT, value)
def accept(self):
"""Method invoked when OK button is clicked."""
try:
self.save_metadata()
except InvalidValidationException as e:
display_warning_message_box(
self, tr('Invalid Field Mapping'), get_string(e.message))
return
super(FieldMappingDialog, self).accept()
| 0 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The volumes attachments API."""
from oslo_log import log as logging
import webob
from cinder.api import common
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.api.schemas import attachments as attachment
from cinder.api.v3.views import attachments as attachment_views
from cinder.api import validation
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.policies import attachments as attachment_policy
from cinder import utils
from cinder.volume import api as volume_api
LOG = logging.getLogger(__name__)
class AttachmentsController(wsgi.Controller):
"""The Attachments API controller for the OpenStack API."""
_view_builder_class = attachment_views.ViewBuilder
allowed_filters = {'volume_id', 'status', 'instance_id', 'attach_status'}
def __init__(self, ext_mgr=None):
"""Initialize controller class."""
self.volume_api = volume_api.API()
self.ext_mgr = ext_mgr
super(AttachmentsController, self).__init__()
@wsgi.Controller.api_version(mv.NEW_ATTACH)
def show(self, req, id):
"""Return data about the given attachment."""
context = req.environ['cinder.context']
attachment = objects.VolumeAttachment.get_by_id(context, id)
return attachment_views.ViewBuilder.detail(attachment)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
def index(self, req):
"""Return a summary list of attachments."""
attachments = self._items(req)
return attachment_views.ViewBuilder.list(attachments)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
def detail(self, req):
"""Return a detailed list of attachments."""
attachments = self._items(req)
return attachment_views.ViewBuilder.list(attachments, detail=True)
@common.process_general_filtering('attachment')
def _process_attachment_filtering(self, context=None, filters=None,
req_version=None):
utils.remove_invalid_filter_options(context, filters,
self.allowed_filters)
def _items(self, req):
"""Return a list of attachments, transformed through view builder."""
context = req.environ['cinder.context']
req_version = req.api_version_request
# Pop out non search_opts and create local variables
search_opts = req.GET.copy()
sort_keys, sort_dirs = common.get_sort_params(search_opts)
marker, limit, offset = common.get_pagination_params(search_opts)
self._process_attachment_filtering(context=context,
filters=search_opts,
req_version=req_version)
if search_opts.get('instance_id', None):
search_opts['instance_uuid'] = search_opts.pop('instance_id', None)
if context.is_admin and 'all_tenants' in search_opts:
del search_opts['all_tenants']
return objects.VolumeAttachmentList.get_all(
context, search_opts=search_opts, marker=marker, limit=limit,
offset=offset, sort_keys=sort_keys, sort_direction=sort_dirs)
else:
return objects.VolumeAttachmentList.get_all_by_project(
context, context.project_id, search_opts=search_opts,
marker=marker, limit=limit, offset=offset, sort_keys=sort_keys,
sort_direction=sort_dirs)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
@wsgi.response(200)
@validation.schema(attachment.create)
def create(self, req, body):
"""Create an attachment.
This method can be used to create an empty attachment (reserve) or to
create and initialize a volume attachment based on the provided input
parameters.
If the caller does not yet have the connector information but needs to
reserve an attachment for the volume (ie Nova BootFromVolume) the
create can be called with just the volume-uuid and the server
identifier. This will reserve an attachment, mark the volume as
reserved and prevent any new attachment_create calls from being made
until the attachment is updated (completed).
The alternative is that the connection can be reserved and initialized
all at once with a single call if the caller has all of the required
information (connector data) at the time of the call.
NOTE: In Nova terms server == instance, the server_id parameter
referenced below is the UUID of the Instance, for non-nova consumers
this can be a server UUID or some other arbitrary unique identifier.
Expected format of the input parameter 'body':
.. code-block:: json
{
"attachment":
{
"volume_uuid": "volume-uuid",
"instance_uuid": "nova-server-uuid",
"connector": "null|<connector-object>"
}
}
Example connector:
.. code-block:: json
{
"connector":
{
"initiator": "iqn.1993-08.org.debian:01:cad181614cec",
"ip":"192.168.1.20",
"platform": "x86_64",
"host": "tempest-1",
"os_type": "linux2",
"multipath": false,
"mountpoint": "/dev/vdb",
"mode": "null|rw|ro"
}
}
NOTE all that's required for a reserve is volume_uuid
and an instance_uuid.
returns: A summary view of the attachment object
"""
context = req.environ['cinder.context']
instance_uuid = body['attachment']['instance_uuid']
volume_uuid = body['attachment']['volume_uuid']
volume_ref = objects.Volume.get_by_id(
context,
volume_uuid)
connector = body['attachment'].get('connector', None)
err_msg = None
try:
attachment_ref = (
self.volume_api.attachment_create(context,
volume_ref,
instance_uuid,
connector=connector))
except (exception.NotAuthorized,
exception.InvalidVolume):
raise
except exception.CinderException as ex:
err_msg = _(
"Unable to create attachment for volume (%s).") % ex.msg
LOG.exception(err_msg)
except Exception as ex:
err_msg = _("Unable to create attachment for volume.")
LOG.exception(err_msg)
finally:
if err_msg:
raise webob.exc.HTTPInternalServerError(explanation=err_msg)
return attachment_views.ViewBuilder.detail(attachment_ref)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
@validation.schema(attachment.update)
def update(self, req, id, body):
"""Update an attachment record.
Update a reserved attachment record with connector information and set
up the appropriate connection_info from the driver.
Expected format of the input parameter 'body':
.. code:: json
{
"attachment":
{
"connector":
{
"initiator": "iqn.1993-08.org.debian:01:cad181614cec",
"ip":"192.168.1.20",
"platform": "x86_64",
"host": "tempest-1",
"os_type": "linux2",
"multipath": False,
"mountpoint": "/dev/vdb",
"mode": None|"rw"|"ro",
}
}
}
"""
context = req.environ['cinder.context']
attachment_ref = (
objects.VolumeAttachment.get_by_id(context, id))
connector = body['attachment']['connector']
err_msg = None
try:
attachment_ref = (
self.volume_api.attachment_update(context,
attachment_ref,
connector))
except exception.NotAuthorized:
raise
except exception.CinderException as ex:
err_msg = (
_("Unable to update attachment.(%s).") % ex.msg)
LOG.exception(err_msg)
except Exception:
err_msg = _("Unable to update the attachment.")
LOG.exception(err_msg)
finally:
if err_msg:
raise webob.exc.HTTPInternalServerError(explanation=err_msg)
# TODO(jdg): Test this out some more, do we want to return and object
# or a dict?
return attachment_views.ViewBuilder.detail(attachment_ref)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
def delete(self, req, id):
"""Delete an attachment.
Disconnects/Deletes the specified attachment, returns a list of any
known shared attachment-id's for the effected backend device.
returns: A summary list of any attachments sharing this connection
"""
context = req.environ['cinder.context']
attachment = objects.VolumeAttachment.get_by_id(context, id)
attachments = self.volume_api.attachment_delete(context, attachment)
return attachment_views.ViewBuilder.list(attachments)
@wsgi.response(204)
@wsgi.Controller.api_version(mv.NEW_ATTACH_COMPLETION)
@wsgi.action('os-complete')
def complete(self, req, id, body):
"""Mark a volume attachment process as completed (in-use)."""
context = req.environ['cinder.context']
attachment_ref = (
objects.VolumeAttachment.get_by_id(context, id))
volume_ref = objects.Volume.get_by_id(
context,
attachment_ref.volume_id)
context.authorize(attachment_policy.COMPLETE_POLICY,
target_obj=attachment_ref)
attachment_ref.update({'attach_status': 'attached'})
attachment_ref.save()
volume_ref.update({'status': 'in-use', 'attach_status': 'attached'})
volume_ref.save()
def create_resource(ext_mgr):
"""Create the wsgi resource for this controller."""
return wsgi.Resource(AttachmentsController(ext_mgr))
| 0 |
# Copyright (c) 2006-2021 Andrey Golovizin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from pkg_resources import get_distribution, EntryPoint
def add_entry_point(group, name, module, attr):
pybtex_dist = get_distribution('pybtex')
entry_point = EntryPoint(name, module, (attr,), dist=pybtex_dist)
entry_point_map = pybtex_dist.get_entry_map()
entry_point_map.setdefault(group, {})[name] = entry_point
| 0 |
# encoding: utf-8
"""
Provides a general interface to a *physical* OPC package, such as a zip file.
"""
from __future__ import absolute_import
import os
from zipfile import ZipFile, is_zipfile, ZIP_DEFLATED
from pptx.exceptions import PackageNotFoundError
from .packuri import CONTENT_TYPES_URI
class PhysPkgReader(object):
"""
Factory for physical package reader objects.
"""
def __new__(cls, pkg_file):
# if *pkg_file* is a string, treat it as a path
if isinstance(pkg_file, basestring):
if os.path.isdir(pkg_file):
reader_cls = _DirPkgReader
elif is_zipfile(pkg_file):
reader_cls = _ZipPkgReader
else:
raise PackageNotFoundError(
"Package not found at '%s'" % pkg_file
)
else: # assume it's a stream and pass it to Zip reader to sort out
reader_cls = _ZipPkgReader
return super(PhysPkgReader, cls).__new__(reader_cls)
class PhysPkgWriter(object):
"""
Factory for physical package writer objects.
"""
def __new__(cls, pkg_file):
return super(PhysPkgWriter, cls).__new__(_ZipPkgWriter)
class _DirPkgReader(PhysPkgReader):
"""
Implements |PhysPkgReader| interface for an OPC package extracted into a
directory.
"""
def __init__(self, path):
"""
*path* is the path to a directory containing an expanded package.
"""
super(_DirPkgReader, self).__init__()
self._path = os.path.abspath(path)
def blob_for(self, pack_uri):
"""
Return contents of file corresponding to *pack_uri* in package
directory.
"""
path = os.path.join(self._path, pack_uri.membername)
with open(path, 'rb') as f:
blob = f.read()
return blob
def close(self):
"""
Provides interface consistency with |ZipFileSystem|, but does
nothing, a directory file system doesn't need closing.
"""
pass
@property
def content_types_xml(self):
"""
Return the `[Content_Types].xml` blob from the package.
"""
return self.blob_for(CONTENT_TYPES_URI)
def rels_xml_for(self, source_uri):
"""
Return rels item XML for source with *source_uri*, or None if the
item has no rels item.
"""
try:
rels_xml = self.blob_for(source_uri.rels_uri)
except IOError:
rels_xml = None
return rels_xml
class _ZipPkgReader(PhysPkgReader):
"""
Implements |PhysPkgReader| interface for a zip file OPC package.
"""
def __init__(self, pkg_file):
super(_ZipPkgReader, self).__init__()
self._zipf = ZipFile(pkg_file, 'r')
def blob_for(self, pack_uri):
"""
Return blob corresponding to *pack_uri*. Raises |ValueError| if no
matching member is present in zip archive.
"""
return self._zipf.read(pack_uri.membername)
def close(self):
"""
Close the zip archive, releasing any resources it is using.
"""
self._zipf.close()
@property
def content_types_xml(self):
"""
Return the `[Content_Types].xml` blob from the zip package.
"""
return self.blob_for(CONTENT_TYPES_URI)
def rels_xml_for(self, source_uri):
"""
Return rels item XML for source with *source_uri* or None if no rels
item is present.
"""
try:
rels_xml = self.blob_for(source_uri.rels_uri)
except KeyError:
rels_xml = None
return rels_xml
class _ZipPkgWriter(PhysPkgWriter):
"""
Implements |PhysPkgWriter| interface for a zip file OPC package.
"""
def __init__(self, pkg_file):
super(_ZipPkgWriter, self).__init__()
self._zipf = ZipFile(pkg_file, 'w', compression=ZIP_DEFLATED)
def close(self):
"""
Close the zip archive, flushing any pending physical writes and
releasing any resources it's using.
"""
self._zipf.close()
def write(self, pack_uri, blob):
"""
Write *blob* to this zip package with the membername corresponding to
*pack_uri*.
"""
self._zipf.writestr(pack_uri.membername, blob)
| 0 |
#!/usr/bin/env python3
import subprocess
import atexit
import urllib
from urllib import request
import http.client
class Application(object):
def __init__(self, *a):
self.p = None
def spawn(self):
assert(not self.running())
self.p = subprocess.Popen(['./app/bin/run.sh'])
print(f'Spawned process: {self.p.pid}')
def running(self):
if self.p is None:
return False
return self.p.poll() is None
def stop(self):
if self.running():
print(f'killing {self.p.pid}')
self.p.kill()
self.p.wait()
def __call__(self, env, start_fn):
if not self.running():
self.spawn()
# print(repr(env))
body_stream = env['wsgi.input']
headers = {}
for k,v in env.items():
if k.startswith('HTTP_'):
http_key = k[5:].replace('_', '-')
headers[http_key] = v
# print(repr(headers))
conn = http.client.HTTPConnection("localhost:2055")
conn.request(env['REQUEST_METHOD'], env['RAW_URI'], body=body_stream, headers=headers)
response = conn.getresponse()
start_fn(f'{response.status} {response.version}', response.getheaders())
try:
while True:
# print('reading chunk')
chunk = response.read(1024)
# print('> ' + repr(chunk))
if not chunk:
break
else:
yield chunk
finally:
response.close()
app = Application()
atexit.register(app.stop)
app.spawn()
| 0.033911 |
import sys
def matrixChainOrder(p):
n = len(p)-1
m = {}
s = {}
for i in xrange(1, n+1):
m[(i, i)] = 0
for l in range(2, n+1):
for i in range(1, n-l+2):
j = i+l-1
m[(i, j)] = sys.maxint
for k in range(i, j):
q = m[i, k] + m[k+1, j] + p[i-1]*p[k]*p[j]
if q < m[(i, j)]:
m[(i, j)] = q
s[(i, j)] = k
return (m, s)
def printOptimalParens(s, i, j):
if i == j:
print ("A[%d"%i)+']',
else:
print "(",
printOptimalParens(s, i, s[i, j])
printOptimalParens(s, s[i, j]+1, j)
print ")",
def matrixMultiply(A, B):
if A.columns != B.rows:
print "incompatible dimensions"
return
else:
C = [[0 for col in range(A.rows)] for row in range(B.columns)]
for i in range(0, A.rows):
for j in xrange(0, B.columns):
C[i][j] = 0
for k in xrange(0,A.columns):
C[i][j] = C[i][j]+A[i][k]*B[k][j]
return C
def matrixChainMultiply(A, s, i, j):
if i == j:
return A[i]
else:
return matrixMultiply(matrixChainMultiply(A, s, i, s[i][j]), matrixChainMultiply(A, s, s[i][j]+1, j))
# p = [30,35,15,5,10,20,25]
# (m, s) = matrixChainOrder(p)
# printOptimalParens(s, 1, len(p)-1)
# mul = [[0]*5]*3
# print mul
| 0.040517 |
#!/usr/bin/env python3.3
# -*- coding: utf8 -*-
#
# Converts the nmap xml file to the stripped down CVE-Search json
# format
# Copyright (c) 2015 NorthernSec
# Copyright (c) 2015 Pieter-Jan Moreels
# This software is licensed under the Original BSD License
# Imports
import os
import sys
runpath=os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(runpath, '..'))
try:
from libnmap.parser import NmapParser
except:
sys.exit("Missing dependencies!")
import argparse
from lib.Toolkit import writeJson
def parseNMap(file=None, string=None):
try:
if file: report = NmapParser.parse_fromfile(file)
elif string: report = NmapParser.parse_fromstring(string)
else: raise(Exception)
except:
raise(Exception)
systems = []
for h in report.hosts:
system = {'mac':h.mac, 'ip':h.address, 'status':h.status, 'hostnames': h.hostnames,
'vendor':h.vendor, 'distance':h.distance}
cpeList = []
for c in h.os_match_probabilities():
for x in c.get_cpe():
cpeList.append(x)
cpeList=list(set(cpeList))
if len(cpeList)>0:
system['cpes']=cpeList
services = []
for s in h.services:
service={'port':s.port, 'banner':s.banner, 'protocol':s.protocol, 'name':s.service,
'state':s.state, 'reason':s.reason}
if s.cpelist:
service['cpe'] = s.cpelist[0].cpestring
services.append(service)
system['services']=services
systems.append(system)
scan={"systems":systems, "scan": {"time": report.endtime,
"type": report._nmaprun["args"]}}
return scan
if __name__ == '__main__':
# argument parser
description='''Converts the nmap xml file to the stripped down
CVE-Search json format'''
parser = argparse.ArgumentParser(description=description)
parser.add_argument('inp', metavar='input', type=str, help='Input nmap xml file')
parser.add_argument('out', metavar='output', type=str, help='Output file')
args = parser.parse_args()
# input
try:
syslist=parseNMap(file=args.inp)
except:
exit("Invalid Nmap xml!")
writeJson(args.out, syslist)
| 0.026977 |
# This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from sqlalchemy.dialects.postgresql import ARRAY
from indico.core.db import db
from indico.core.db.sqlalchemy.custom.utcdatetime import UTCDateTime
from indico.util.date_time import now_utc
from indico.util.string import return_ascii
class ReservationEditLog(db.Model):
__tablename__ = 'reservation_edit_logs'
__table_args__ = {'schema': 'roombooking'}
id = db.Column(
db.Integer,
primary_key=True
)
timestamp = db.Column(
UTCDateTime,
nullable=False,
default=now_utc
)
info = db.Column(
ARRAY(db.String),
nullable=False
)
user_name = db.Column(
db.String,
nullable=False
)
reservation_id = db.Column(
db.Integer,
db.ForeignKey('roombooking.reservations.id'),
nullable=False,
index=True
)
# relationship backrefs:
# - reservation (Reservation.edit_logs)
@return_ascii
def __repr__(self):
return u'<ReservationEditLog({0}, {1}, {2}, {3})>'.format(
self.user_name,
self.reservation_id,
self.timestamp,
self.info
)
| 0 |
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((14490.9, 3029.12, 3060.83), (0.7, 0.7, 0.7), 507.685)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((15116.6, 3760.52, 2692.79), (0.7, 0.7, 0.7), 479.978)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((13383.1, 4090.52, 3479.5), (0.7, 0.7, 0.7), 681.834)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((11225.1, 4429.85, 4322.38), (0.7, 0.7, 0.7), 522.532)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((10554.9, 4586.88, 4596.63), (0, 1, 0), 751.925)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((12326, 4639.28, 5766.95), (0.7, 0.7, 0.7), 437.001)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((11370.4, 6195.88, 6398.83), (0.7, 0.7, 0.7), 710.767)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((12294.8, 7399.36, 7205.16), (0.7, 0.7, 0.7), 762.077)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((11975.4, 8902.22, 7425.72), (0.7, 0.7, 0.7), 726.799)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((11370.5, 10459.7, 8176.74), (0.7, 0.7, 0.7), 885.508)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((11026.4, 11464.8, 6823.83), (0.7, 0.7, 0.7), 778.489)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((12450.8, 13012.5, 6529.84), (0.7, 0.7, 0.7), 790.333)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((13874.2, 14489.8, 6268.22), (0.7, 0.7, 0.7), 707.721)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((14235.6, 13007.5, 5753.75), (0.7, 0.7, 0.7), 651.166)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((13215.9, 13710.7, 6892.86), (0.7, 0.7, 0.7), 708.61)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((11980.5, 13227.6, 7763.68), (0.7, 0.7, 0.7), 490.595)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((11432, 11904.7, 8032.92), (0.7, 0.7, 0.7), 591.565)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((10650.9, 10579.8, 8390.35), (0.7, 0.7, 0.7), 581.287)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((11842.6, 9383.56, 9068.83), (0.7, 0.7, 0.7), 789.529)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((11007.5, 8508.02, 10007.1), (0.7, 0.7, 0.7), 623.587)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((9961.01, 7958.91, 11407.6), (0.7, 0.7, 0.7), 1083.56)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((9762.12, 8128.95, 13085.8), (0.7, 0.7, 0.7), 504.258)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((9105.04, 8055.92, 11818.5), (0.7, 0.7, 0.7), 805.519)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((8098.96, 8945.37, 10136.6), (0.7, 0.7, 0.7), 631.708)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((7113.44, 10486.1, 9008.71), (0.7, 0.7, 0.7), 805.942)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((6624.41, 11283.8, 8528.05), (1, 0.7, 0), 672.697)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((5149.07, 9466.37, 7162.96), (0.7, 0.7, 0.7), 797.863)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((3504.19, 8941.41, 6449.57), (1, 0.7, 0), 735.682)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((3197.3, 7852.11, 7001.47), (0.7, 0.7, 0.7), 602.14)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((2170.99, 6197.96, 8273.91), (0.7, 0.7, 0.7), 954.796)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((2865.17, 6382.4, 7807.34), (0.7, 0.7, 0.7), 1021.88)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((1678.42, 6425.8, 7006.61), (0.7, 0.7, 0.7), 909.323)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((135.134, 4970.61, 6205.73), (0.7, 0.7, 0.7), 621.049)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((571.58, 4380.79, 4957.66), (0.7, 0.7, 0.7), 525.154)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((1541.14, 3448.69, 4309.93), (0.7, 0.7, 0.7), 890.246)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((1849.42, 1776.94, 3839.31), (0.7, 0.7, 0.7), 671.216)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((2383.37, 290.48, 4448.44), (0.7, 0.7, 0.7), 662.672)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((2421.7, 991.476, 5898.51), (0.7, 0.7, 0.7), 646.682)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((1032.67, 1633.14, 5847.07), (0.7, 0.7, 0.7), 769.945)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((1237.16, 3532.79, 5257.35), (0.7, 0.7, 0.7), 606.92)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((553.737, 3534.85, 4213.84), (0.7, 0.7, 0.7), 622.571)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((1601.81, 4124.33, 4851.28), (0.7, 0.7, 0.7), 466.865)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((2243.52, 3644.67, 4704.5), (0.7, 0.7, 0.7), 682.933)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((1631.44, 3925.35, 4647.59), (0.7, 0.7, 0.7), 809.326)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((1142.92, 5369.1, 5674.5), (0.7, 0.7, 0.7), 796.72)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((2626.92, 7819.11, 5319.34), (0.7, 0.7, 0.7), 870.026)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((3047.34, 9026.17, 3950.98), (0.7, 0.7, 0.7), 909.577)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((3457.58, 9145.33, 2883.25), (0, 1, 0), 500.536)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((2644.32, 8950.16, 1073.97), (0.7, 0.7, 0.7), 725.276)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((846.027, 9108.82, -893.839), (0.7, 0.7, 0.7), 570.331)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((-6.51386, 7800.76, -208.332), (0.7, 0.7, 0.7), 492.203)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((-40.7095, 8772.6, 2535.54), (0, 1, 0), 547.7)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((421.267, 8145, 2491.85), (0.7, 0.7, 0.7), 581.921)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((115.381, 6285.36, 2181.62), (0.7, 0.7, 0.7), 555.314)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((658.606, 4858.23, 1911.62), (0.7, 0.7, 0.7), 404.219)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((2406.42, 4665.42, 2429.8), (0.7, 0.7, 0.7), 764.234)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| 0.025203 |
#!/bin/python3
#initialize and scan for cards
# 1 Addr x XOR
#always use bcast addr
def setup(ser):
print("Setup: Emptying card buffers")
readanddiscard(ser)
command=1
addr=1
data =0
xor=command^addr^data
toSend= bytes([command, addr, data, xor])
ser.write(toSend)
return parseSetup(ser)
#switch relais on
# 6 Addr data XOR
# function does not take board address, but instead numbers relais starting
# from 0 accross all boards
# data is a bitmask for all relais to be switched on, but we only support
# switching one relais at a time
def relaisOn(num,cards,ser):
command=6
addr=int((int(num)/8)+1)
bit=int(num)%8
data= 1 << bit
xor=command^addr^data
if addr < 1 or addr > len(cards):
return(False,"No such port. "+str(num)+" requested, max is "+str(len(cards)*8-1))
toSend=bytes([command, addr, data, xor])
ser.write(toSend)
data=ser.read(4)
if len(data)==0:
return(False,"Communication Error")
if data[0] == 0xff:
return(False,"Command Error")
if not data[0] == 249:
return(False,"Unknown response: "+str(data))
return (True, "OK")
#switch relais off
# 6 Addr data XOR
# function does not take board address, but instead numbers relais starting
# from 0 accross all boards
# data is a bitmask for all relais to be switched on, but we only support
# switching one relais at a time
def relaisOff(num, cards, ser):
command=7
addr=int((int(num)/8)+1)
bit=int(num)%8
data= 1 << bit
xor=command^addr^data
if addr < 1 or addr > len(cards):
return(False,"No such port. "+str(num)+" requested, max is "+str(len(cards)*8-1))
toSend=bytes([command, addr, data, xor])
ser.write(toSend)
data=ser.read(4)
if len(data)==0:
return(False,"Communication Error")
if data[0] == 0xff:
return(False,"Command Error")
if not data[0] == 248:
return(False,"Unknown response: "+str(data))
return (True, "OK")
#Get current port state from all cards in cards. Ports in sorder of cards
# 2 Addr x XOR
def getPortState(cards, ser):
ports=[]
for card in cards:
command=2
addr=int(card['address'])
data=0
xor=command^addr^data
toSend=bytes([command, addr, data, xor])
#print("Query port :"+str(toSend))
ser.write(toSend)
data=ser.read(4)
if len(data)==0:
return(False,"Communication Error", ports)
if data[0] == 0xff:
return(False,"Command Error", ports)
if not data[0] == 253:
return(False,"Unknown response: "+str(data),ports)
if not xorok(data):
return(False,"Checksum wrong",ports)
boardports=data[2]
for i in range(0,8):
ports.append( (boardports>>i)&0x01 )
return(True, "OK", ports)
#parse response after setup, return card structure
def parseSetup(ser):
cards=[]
currAddr=1
while True:
card={}
data=ser.read(4)
if len(data)==0:
print("All boards found")
break
print("Got SETUP answer:", end="")
for byte in data:
print(""+format(byte, '02x'), end="")
print("")
if len(data) != 4:
print("Short read. Expected 4 got "+str(len(data)))
continue
if data[0] == 1: #last card broadcasting back to server
print("Last board in chain reached")
#could break, but for safety read again
continue
if data[0] != 254:
print("Unknown SETUP response: "+str(data[0]))
continue
if xorok(data):
print("Found board")
card['address']=currAddr
card['firmware']=data[2]
card['xorok']= xorok(data)
cards.append(card)
currAddr=currAddr+1
else:
print("Checksum error in MSG: ", end="")
return cards
# In case of a failure read and discard data until no more data comming from relais
# read at most 1024 bytes (256 msgs)
def readanddiscard(ser):
command=0
addr=0
data =0
xor=command^addr^data
toSend= bytes([command, addr, data, xor])
ser.write(toSend)
data=ser.read(1024)
print("Ignored "+str(len(data))+" bytes from relaiscard")
def xorok(data):
return ( data[3]==(data[0]^data[1]^data[2]))
| 0.02617 |
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((11248.6, 1687.25, 2687.38), (0.7, 0.7, 0.7), 890.203)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((10161.3, 3059.47, 3154.38), (0.7, 0.7, 0.7), 792.956)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((9148.67, 2959.77, 4804.99), (0.7, 0.7, 0.7), 856.786)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((11139.5, 1720.32, 5034.55), (0.7, 0.7, 0.7), 963.679)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((10738.5, 883.725, 6535.47), (0.7, 0.7, 0.7), 761.442)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((8436.61, 1249.5, 7087.47), (0.7, 0.7, 0.7), 961.183)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((6722.89, 1118.39, 6985.83), (0.7, 0.7, 0.7), 753.151)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((7367.24, 572.61, 7152.59), (1, 0.7, 0), 1098.07)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((5019.84, 1648.91, 6379.04), (0.7, 0.7, 0.7), 1010.42)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((3875.33, 656.111, 5651.68), (1, 0.7, 0), 821.043)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((2764.36, 2122.16, 5251.58), (0.7, 0.7, 0.7), 873.876)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((3536.42, 2756.35, 4450.95), (0.7, 0.7, 0.7), 625.532)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((3890.95, 3596.09, 3136.54), (0.7, 0.7, 0.7), 880.474)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((4958.86, 2559.78, 2691.92), (0.7, 0.7, 0.7), 659.161)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((5186.2, 2747.27, 461.175), (0.7, 0.7, 0.7), 831.745)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((6523.65, 4987.61, -1237.44), (0.7, 0.7, 0.7), 803.065)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((7084.94, 6153.6, 214.589), (0.7, 0.7, 0.7), 610.262)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((5618.5, 6358.49, 358.408), (0.7, 0.7, 0.7), 741.265)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((4656.19, 5342.07, 1352.14), (0.7, 0.7, 0.7), 748.625)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((3230.06, 4762.23, 1367.89), (0.7, 0.7, 0.7), 677.181)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((3726.44, 4349.46, 3710.08), (0.7, 0.7, 0.7), 616.015)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((3544.73, 5373.67, 1954.4), (0.7, 0.7, 0.7), 653.154)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((3503.84, 6026.15, 2401.57), (0.7, 0.7, 0.7), 595.33)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((4008.44, 7068.51, 1793.7), (0.7, 0.7, 0.7), 627.901)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((5225.7, 6820, 1222.46), (0.7, 0.7, 0.7), 663.941)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((5685.62, 7197.54, -222.223), (0.7, 0.7, 0.7), 663.899)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((5226.02, 6265.17, 784.631), (0.7, 0.7, 0.7), 644.694)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((5304.28, 4769.84, 2328.61), (0.7, 0.7, 0.7), 896.802)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((4061.37, 5410.74, 2881.29), (0.7, 0.7, 0.7), 576.38)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((3272.37, 5038.25, 3845.39), (0.7, 0.7, 0.7), 635.092)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((3754.57, 5755.57, 4050.55), (0.7, 0.7, 0.7), 651.505)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((3540.18, 4049.32, 4248.08), (0.7, 0.7, 0.7), 718.042)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((2535.17, 5214.29, 3456.17), (0.7, 0.7, 0.7), 726.714)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((3624.43, 6185.9, 3048.26), (0.7, 0.7, 0.7), 673.585)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((4138.83, 6184.87, 1835), (0.7, 0.7, 0.7), 598.418)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((3941.15, 6314.96, 513.212), (0.7, 0.7, 0.7), 693.382)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((4031.48, 5247.3, 2698.91), (0.7, 0.7, 0.7), 804.038)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((4124.54, 6654.1, 1407.82), (0.7, 0.7, 0.7), 816.178)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((3534.51, 6808.94, 2754.24), (0.7, 0.7, 0.7), 776.628)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((1965.05, 7009.1, 3189.23), (0.7, 0.7, 0.7), 750.656)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((1954.3, 6382.64, 4964.46), (0.7, 0.7, 0.7), 709.625)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((811.898, 5551.69, 6434.17), (0.7, 0.7, 0.7), 927.681)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((-873.953, 7130.72, 8232.58), (0.7, 0.7, 0.7), 1088.21)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((14.07, 5532.02, 9074.3), (0.7, 0.7, 0.7), 736.147)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((604.676, 6415.67, 7819.58), (0.7, 0.7, 0.7), 861.101)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((2108.65, 5573.9, 6862.28), (0.7, 0.7, 0.7), 924.213)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((2957.82, 7295.57, 7183.92), (0.7, 0.7, 0.7), 881.828)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((1128.11, 8102.38, 6757.96), (0.7, 0.7, 0.7), 927.681)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((2722.5, 7404.68, 7355.86), (0.7, 0.7, 0.7), 831.576)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((3869.59, 5910.24, 7600.57), (0.7, 0.7, 0.7), 859.494)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((2847.7, 6068.8, 8269.91), (0.7, 0.7, 0.7), 704.845)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((3569.84, 4810.99, 7436.87), (0.7, 0.7, 0.7), 804.461)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((4621.46, 3855.59, 6320.3), (0.7, 0.7, 0.7), 934.111)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((4467.46, 2623.8, 7254.41), (0.7, 0.7, 0.7), 988.339)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((4290.89, 2864.64, 7872.03), (1, 0.7, 0), 803.7)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((4435.07, 4941.7, 7484.84), (0.7, 0.7, 0.7), 812.118)
if "particle_56 geometry" not in marker_sets:
s=new_marker_set('particle_56 geometry')
marker_sets["particle_56 geometry"]=s
s= marker_sets["particle_56 geometry"]
mark=s.place_marker((6564.19, 5342.68, 7799.42), (0.7, 0.7, 0.7), 1177.93)
if "particle_57 geometry" not in marker_sets:
s=new_marker_set('particle_57 geometry')
marker_sets["particle_57 geometry"]=s
s= marker_sets["particle_57 geometry"]
mark=s.place_marker((8044.38, 7367.52, 8046.93), (0.7, 0.7, 0.7), 1038.21)
if "particle_58 geometry" not in marker_sets:
s=new_marker_set('particle_58 geometry')
marker_sets["particle_58 geometry"]=s
s= marker_sets["particle_58 geometry"]
mark=s.place_marker((8342.75, 7898.85, 8076.58), (1, 0.7, 0), 758.016)
if "particle_59 geometry" not in marker_sets:
s=new_marker_set('particle_59 geometry')
marker_sets["particle_59 geometry"]=s
s= marker_sets["particle_59 geometry"]
mark=s.place_marker((8768.06, 7318, 8478.33), (0.7, 0.7, 0.7), 824.046)
if "particle_60 geometry" not in marker_sets:
s=new_marker_set('particle_60 geometry')
marker_sets["particle_60 geometry"]=s
s= marker_sets["particle_60 geometry"]
mark=s.place_marker((7936.39, 7244.87, 8355.84), (0.7, 0.7, 0.7), 793.379)
if "particle_61 geometry" not in marker_sets:
s=new_marker_set('particle_61 geometry')
marker_sets["particle_61 geometry"]=s
s= marker_sets["particle_61 geometry"]
mark=s.place_marker((8197.96, 7300.82, 9208.25), (0.7, 0.7, 0.7), 1011.56)
if "particle_62 geometry" not in marker_sets:
s=new_marker_set('particle_62 geometry')
marker_sets["particle_62 geometry"]=s
s= marker_sets["particle_62 geometry"]
mark=s.place_marker((7223.76, 6295.71, 7918.92), (0.7, 0.7, 0.7), 1097.01)
if "particle_63 geometry" not in marker_sets:
s=new_marker_set('particle_63 geometry')
marker_sets["particle_63 geometry"]=s
s= marker_sets["particle_63 geometry"]
mark=s.place_marker((8572.41, 6625.26, 9233.36), (0.7, 0.7, 0.7), 851.626)
if "particle_64 geometry" not in marker_sets:
s=new_marker_set('particle_64 geometry')
marker_sets["particle_64 geometry"]=s
s= marker_sets["particle_64 geometry"]
mark=s.place_marker((10054.9, 7586.16, 10273.5), (0.7, 0.7, 0.7), 869.434)
if "particle_65 geometry" not in marker_sets:
s=new_marker_set('particle_65 geometry')
marker_sets["particle_65 geometry"]=s
s= marker_sets["particle_65 geometry"]
mark=s.place_marker((8396.3, 7602.05, 10980.6), (0.7, 0.7, 0.7), 818.463)
if "particle_66 geometry" not in marker_sets:
s=new_marker_set('particle_66 geometry')
marker_sets["particle_66 geometry"]=s
s= marker_sets["particle_66 geometry"]
mark=s.place_marker((8428.51, 9268.04, 10845), (0.7, 0.7, 0.7), 759.539)
if "particle_67 geometry" not in marker_sets:
s=new_marker_set('particle_67 geometry')
marker_sets["particle_67 geometry"]=s
s= marker_sets["particle_67 geometry"]
mark=s.place_marker((7537.95, 7304.67, 9626.43), (0.7, 0.7, 0.7), 1088.59)
if "particle_68 geometry" not in marker_sets:
s=new_marker_set('particle_68 geometry')
marker_sets["particle_68 geometry"]=s
s= marker_sets["particle_68 geometry"]
mark=s.place_marker((9323.43, 8120.69, 10081.5), (0.7, 0.7, 0.7), 822.312)
if "particle_69 geometry" not in marker_sets:
s=new_marker_set('particle_69 geometry')
marker_sets["particle_69 geometry"]=s
s= marker_sets["particle_69 geometry"]
mark=s.place_marker((8723.48, 9425.56, 10803.8), (0.7, 0.7, 0.7), 749.81)
if "particle_70 geometry" not in marker_sets:
s=new_marker_set('particle_70 geometry')
marker_sets["particle_70 geometry"]=s
s= marker_sets["particle_70 geometry"]
mark=s.place_marker((7639.76, 8774.86, 10166), (0.7, 0.7, 0.7), 764.488)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| 0.025108 |
#!/usr/bin/env python
import struct
from cStringIO import StringIO
TAG_TYPE_METADATA = 18
##################################################
# AMF0
##################################################
AMF_TYPE_NUMBER = 0x00
AMF_TYPE_BOOLEAN = 0x01
AMF_TYPE_STRING = 0x02
AMF_TYPE_OBJECT = 0x03
AMF_TYPE_MOVIECLIP = 0x04
AMF_TYPE_NULL = 0x05
AMF_TYPE_UNDEFINED = 0x06
AMF_TYPE_REFERENCE = 0x07
AMF_TYPE_MIXED_ARRAY = 0x08
AMF_TYPE_END_OF_OBJECT = 0x09
AMF_TYPE_ARRAY = 0x0A
AMF_TYPE_DATE = 0x0B
AMF_TYPE_LONG_STRING = 0x0C
AMF_TYPE_UNSUPPORTED = 0x0D
AMF_TYPE_RECORDSET = 0x0E
AMF_TYPE_XML = 0x0F
AMF_TYPE_CLASS_OBJECT = 0x10
AMF_TYPE_AMF3_OBJECT = 0x11
class ECMAObject:
def __init__(self, max_number):
self.max_number = max_number
self.data = []
self.map = {}
def put(self, k, v):
self.data.append((k, v))
self.map[k] = v
def get(self, k):
return self.map[k]
def set(self, k, v):
for i in range(len(self.data)):
if self.data[i][0] == k:
self.data[i] = (k, v)
break
else:
raise KeyError(k)
self.map[k] = v
def keys(self):
return self.map.keys()
def __str__(self):
return 'ECMAObject<'+repr(self.map)+'>'
def __eq__(self, other):
return self.max_number == other.max_number and self.data == other.data
def read_amf_number(stream):
return struct.unpack('>d', stream.read(8))[0]
def read_amf_boolean(stream):
b = read_byte(stream)
assert b in (0, 1)
return bool(b)
def read_amf_string(stream):
xx = stream.read(2)
if xx == '':
# dirty fix for the invalid Qiyi flv
return None
n = struct.unpack('>H', xx)[0]
s = stream.read(n)
assert len(s) == n
return s.decode('utf-8')
def read_amf_object(stream):
obj = {}
while True:
k = read_amf_string(stream)
if not k:
assert read_byte(stream) == AMF_TYPE_END_OF_OBJECT
break
v = read_amf(stream)
obj[k] = v
return obj
def read_amf_mixed_array(stream):
max_number = read_uint(stream)
mixed_results = ECMAObject(max_number)
while True:
k = read_amf_string(stream)
if k is None:
# dirty fix for the invalid Qiyi flv
break
if not k:
assert read_byte(stream) == AMF_TYPE_END_OF_OBJECT
break
v = read_amf(stream)
mixed_results.put(k, v)
assert len(mixed_results.data) == max_number
return mixed_results
def read_amf_array(stream):
n = read_uint(stream)
v = []
for i in range(n):
v.append(read_amf(stream))
return v
amf_readers = {
AMF_TYPE_NUMBER: read_amf_number,
AMF_TYPE_BOOLEAN: read_amf_boolean,
AMF_TYPE_STRING: read_amf_string,
AMF_TYPE_OBJECT: read_amf_object,
AMF_TYPE_MIXED_ARRAY: read_amf_mixed_array,
AMF_TYPE_ARRAY: read_amf_array,
}
def read_amf(stream):
return amf_readers[read_byte(stream)](stream)
def write_amf_number(stream, v):
stream.write(struct.pack('>d', v))
def write_amf_boolean(stream, v):
if v:
stream.write('\x01')
else:
stream.write('\x00')
def write_amf_string(stream, s):
s = s.encode('utf-8')
stream.write(struct.pack('>H', len(s)))
stream.write(s)
def write_amf_object(stream, o):
for k in o:
write_amf_string(stream, k)
write_amf(stream, o[k])
write_amf_string(stream, '')
write_byte(stream, AMF_TYPE_END_OF_OBJECT)
def write_amf_mixed_array(stream, o):
write_uint(stream, o.max_number)
for k, v in o.data:
write_amf_string(stream, k)
write_amf(stream, v)
write_amf_string(stream, '')
write_byte(stream, AMF_TYPE_END_OF_OBJECT)
def write_amf_array(stream, o):
write_uint(stream, len(o))
for v in o:
write_amf(stream, v)
amf_writers_tags = {
float: AMF_TYPE_NUMBER,
bool: AMF_TYPE_BOOLEAN,
unicode: AMF_TYPE_STRING,
dict: AMF_TYPE_OBJECT,
ECMAObject: AMF_TYPE_MIXED_ARRAY,
list: AMF_TYPE_ARRAY,
}
amf_writers = {
AMF_TYPE_NUMBER: write_amf_number,
AMF_TYPE_BOOLEAN: write_amf_boolean,
AMF_TYPE_STRING: write_amf_string,
AMF_TYPE_OBJECT: write_amf_object,
AMF_TYPE_MIXED_ARRAY: write_amf_mixed_array,
AMF_TYPE_ARRAY: write_amf_array,
}
def write_amf(stream, v):
if isinstance(v, ECMAObject):
tag = amf_writers_tags[ECMAObject]
else:
tag = amf_writers_tags[type(v)]
write_byte(stream, tag)
amf_writers[tag](stream, v)
##################################################
# FLV
##################################################
def read_int(stream):
return struct.unpack('>i', stream.read(4))[0]
def read_uint(stream):
return struct.unpack('>I', stream.read(4))[0]
def write_uint(stream, n):
stream.write(struct.pack('>I', n))
def read_byte(stream):
return ord(stream.read(1))
def write_byte(stream, b):
stream.write(chr(b))
def read_unsigned_medium_int(stream):
x1, x2, x3 = struct.unpack('BBB', stream.read(3))
return (x1 << 16) | (x2 << 8) | x3
def read_tag(stream):
# header size: 15 bytes
header = stream.read(15)
if len(header) == 4:
return
x = struct.unpack('>IBBBBBBBBBBB', header)
previous_tag_size = x[0]
data_type = x[1]
body_size = (x[2] << 16) | (x[3] << 8) | x[4]
assert body_size < 1024*1024*128, 'tag body size too big (> 128MB)'
timestamp = (x[5] << 16) | (x[6] << 8) | x[7]
timestamp += x[8] << 24
assert x[9:] == (0, 0, 0)
body = stream.read(body_size)
return (data_type, timestamp, body_size, body, previous_tag_size)
#previous_tag_size = read_uint(stream)
#data_type = read_byte(stream)
#body_size = read_unsigned_medium_int(stream)
#assert body_size < 1024*1024*128, 'tag body size too big (> 128MB)'
#timestamp = read_unsigned_medium_int(stream)
#timestamp += read_byte(stream) << 24
#assert read_unsigned_medium_int(stream) == 0
#body = stream.read(body_size)
#return (data_type, timestamp, body_size, body, previous_tag_size)
def write_tag(stream, tag):
data_type, timestamp, body_size, body, previous_tag_size = tag
write_uint(stream, previous_tag_size)
write_byte(stream, data_type)
write_byte(stream, body_size>>16 & 0xff)
write_byte(stream, body_size>>8 & 0xff)
write_byte(stream, body_size & 0xff)
write_byte(stream, timestamp>>16 & 0xff)
write_byte(stream, timestamp>>8 & 0xff)
write_byte(stream, timestamp & 0xff)
write_byte(stream, timestamp>>24 & 0xff)
stream.write('\0\0\0')
stream.write(body)
def read_flv_header(stream):
assert stream.read(3) == 'FLV'
header_version = read_byte(stream)
assert header_version == 1
type_flags = read_byte(stream)
assert type_flags == 5
data_offset = read_uint(stream)
assert data_offset == 9
def write_flv_header(stream):
stream.write('FLV')
write_byte(stream, 1)
write_byte(stream, 5)
write_uint(stream, 9)
def read_meta_data(stream):
meta_type = read_amf(stream)
meta = read_amf(stream)
return meta_type, meta
def read_meta_tag(tag):
data_type, timestamp, body_size, body, previous_tag_size = tag
assert data_type == TAG_TYPE_METADATA
assert timestamp == 0
assert previous_tag_size == 0
return read_meta_data(StringIO(body))
def write_meta_data(stream, meta_type, meta_data):
assert isinstance(meta_type, basesting)
write_amf(meta_type)
write_amf(meta_data)
def write_meta_tag(stream, meta_type, meta_data):
buffer = StringIO()
write_amf(buffer, meta_type)
write_amf(buffer, meta_data)
body = buffer.getvalue()
write_tag(stream, (TAG_TYPE_METADATA, 0, len(body), body, 0))
##################################################
# main
##################################################
def guess_output(inputs):
import os.path
inputs = map(os.path.basename, inputs)
n = min(map(len, inputs))
for i in reversed(range(1, n)):
if len(set(s[:i] for s in inputs)) == 1:
return inputs[0][:i] + '.flv'
return 'output.flv'
def concat_flvs(flvs, output=None):
assert flvs, 'no flv file found'
import os.path
if not output:
output = guess_output(flvs)
elif os.path.isdir(output):
output = os.path.join(output, guess_output(flvs))
print 'Joining %s into %s' % (', '.join(flvs), output)
ins = [open(flv, 'rb') for flv in flvs]
for stream in ins:
read_flv_header(stream)
meta_tags = map(read_tag, ins)
metas = map(read_meta_tag, meta_tags)
meta_types, metas = zip(*metas)
assert len(set(meta_types)) == 1
meta_type = meta_types[0]
# must merge fields: duration
# TODO: check other meta info, update other meta info
total_duration = sum(meta.get('duration') for meta in metas)
meta_data = metas[0]
meta_data.set('duration', total_duration)
out = open(output, 'wb')
write_flv_header(out)
write_meta_tag(out, meta_type, meta_data)
timestamp_start = 0
for stream in ins:
while True:
tag = read_tag(stream)
if tag:
data_type, timestamp, body_size, body, previous_tag_size = tag
timestamp += timestamp_start
tag = data_type, timestamp, body_size, body, previous_tag_size
write_tag(out, tag)
else:
break
timestamp_start = timestamp
write_uint(out, previous_tag_size)
return output
def usage():
print 'python flv_join.py --output target.flv flv...'
def main():
import sys, getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "ho:", ["help", "output="])
except getopt.GetoptError, err:
usage()
sys.exit(1)
output = None
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-o", "--output"):
output = a
else:
usage()
sys.exit(1)
if not args:
usage()
sys.exit(1)
concat_flvs(args, output)
if __name__ == '__main__':
main()
| 0.033359 |
# -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Joan Massich <mailsik@gmail.com>
#
# License: BSD Style.
import os
import os.path as op
import numpy as np
from distutils.version import LooseVersion
from ...utils import (_fetch_file, verbose, _TempDir, _check_pandas_installed,
_on_missing)
from ..utils import _get_path
AGE_SLEEP_RECORDS = op.join(op.dirname(__file__), 'age_records.csv')
TEMAZEPAM_SLEEP_RECORDS = op.join(op.dirname(__file__),
'temazepam_records.csv')
TEMAZEPAM_RECORDS_URL = 'https://physionet.org/physiobank/database/sleep-edfx/ST-subjects.xls' # noqa: E501
TEMAZEPAM_RECORDS_URL_SHA1 = 'f52fffe5c18826a2bd4c5d5cb375bb4a9008c885'
AGE_RECORDS_URL = 'https://physionet.org/physiobank/database/sleep-edfx/SC-subjects.xls' # noqa: E501
AGE_RECORDS_URL_SHA1 = '0ba6650892c5d33a8e2b3f62ce1cc9f30438c54f'
sha1sums_fname = op.join(op.dirname(__file__), 'SHA1SUMS')
def _fetch_one(fname, hashsum, path, force_update, base_url):
# Fetch the file
url = base_url + '/' + fname
destination = op.join(path, fname)
if not op.isfile(destination) or force_update:
if op.isfile(destination):
os.remove(destination)
if not op.isdir(op.dirname(destination)):
os.makedirs(op.dirname(destination))
_fetch_file(url, destination, print_destination=False,
hash_=hashsum, hash_type='sha1')
return destination
@verbose
def _data_path(path=None, force_update=False, update_path=None, verbose=None):
"""Get path to local copy of EEG Physionet age Polysomnography dataset URL.
This is a low-level function useful for getting a local copy of a
remote Polysomnography dataset :footcite:`KempEtAl2000` which is available
at PhysioNet :footcite:`GoldbergerEtAl2000`.
Parameters
----------
path : None | str
Location of where to look for the data storing location.
If None, the environment variable or config parameter
``MNE_DATASETS_PHYSIONET_SLEEP_PATH`` is used. If it doesn't exist, the
"~/mne_data" directory is used. If the dataset
is not found under the given path, the data
will be automatically downloaded to the specified folder.
force_update : bool
Force update of the dataset even if a local copy exists.
update_path : bool | None
If True, set the MNE_DATASETS_PHYSIONET_SLEEP_PATH in mne-python
config to the given path. If None, the user is prompted.
%(verbose)s
Returns
-------
path : list of str
Local path to the given data file. This path is contained inside a list
of length one, for compatibility.
References
----------
.. footbibliography::
""" # noqa: E501
key = 'PHYSIONET_SLEEP_PATH'
name = 'PHYSIONET_SLEEP'
path = _get_path(path, key, name)
return op.join(path, 'physionet-sleep-data')
def _update_sleep_temazepam_records(fname=TEMAZEPAM_SLEEP_RECORDS):
"""Help function to download Physionet's temazepam dataset records."""
pd = _check_pandas_installed()
tmp = _TempDir()
# Download subjects info.
subjects_fname = op.join(tmp, 'ST-subjects.xls')
_fetch_file(url=TEMAZEPAM_RECORDS_URL,
file_name=subjects_fname,
hash_=TEMAZEPAM_RECORDS_URL_SHA1,
hash_type='sha1')
# Load and Massage the checksums.
sha1_df = pd.read_csv(sha1sums_fname, sep=' ', header=None,
names=['sha', 'fname'], engine='python')
select_age_records = (sha1_df.fname.str.startswith('ST') &
sha1_df.fname.str.endswith('edf'))
sha1_df = sha1_df[select_age_records]
sha1_df['id'] = [name[:6] for name in sha1_df.fname]
# Load and massage the data.
data = pd.read_excel(subjects_fname, header=[0, 1])
if LooseVersion(pd.__version__) >= LooseVersion('0.24.0'):
data = data.set_index(('Subject - age - sex', 'Nr'))
data.index.name = 'subject'
data.columns.names = [None, None]
data = (data.set_index([('Subject - age - sex', 'Age'),
('Subject - age - sex', 'M1/F2')], append=True)
.stack(level=0).reset_index())
data = data.rename(columns={('Subject - age - sex', 'Age'): 'age',
('Subject - age - sex', 'M1/F2'): 'sex',
'level_3': 'drug'})
data['id'] = ['ST7{:02d}{:1d}'.format(s, n)
for s, n in zip(data.subject, data['night nr'])]
data = pd.merge(sha1_df, data, how='outer', on='id')
data['record type'] = (data.fname.str.split('-', expand=True)[1]
.str.split('.', expand=True)[0]
.astype('category'))
data = data.set_index(['id', 'subject', 'age', 'sex', 'drug',
'lights off', 'night nr', 'record type']).unstack()
data.columns = [l1 + '_' + l2 for l1, l2 in data.columns]
if LooseVersion(pd.__version__) < LooseVersion('0.21.0'):
data = data.reset_index().drop(labels=['id'], axis=1)
else:
data = data.reset_index().drop(columns=['id'])
data['sex'] = (data.sex.astype('category')
.cat.rename_categories({1: 'male', 2: 'female'}))
data['drug'] = data['drug'].str.split(expand=True)[0]
data['subject_orig'] = data['subject']
data['subject'] = data.index // 2 # to make sure index is from 0 to 21
# Save the data.
data.to_csv(fname, index=False)
def _update_sleep_age_records(fname=AGE_SLEEP_RECORDS):
"""Help function to download Physionet's age dataset records."""
pd = _check_pandas_installed()
tmp = _TempDir()
# Download subjects info.
subjects_fname = op.join(tmp, 'SC-subjects.xls')
_fetch_file(url=AGE_RECORDS_URL,
file_name=subjects_fname,
hash_=AGE_RECORDS_URL_SHA1,
hash_type='sha1')
# Load and Massage the checksums.
sha1_df = pd.read_csv(sha1sums_fname, sep=' ', header=None,
names=['sha', 'fname'], engine='python')
select_age_records = (sha1_df.fname.str.startswith('SC') &
sha1_df.fname.str.endswith('edf'))
sha1_df = sha1_df[select_age_records]
sha1_df['id'] = [name[:6] for name in sha1_df.fname]
# Load and massage the data.
data = pd.read_excel(subjects_fname)
data = data.rename(index=str, columns={'sex (F=1)': 'sex',
'LightsOff': 'lights off'})
data['sex'] = (data.sex.astype('category')
.cat.rename_categories({1: 'female', 2: 'male'}))
data['id'] = ['SC4{:02d}{:1d}'.format(s, n)
for s, n in zip(data.subject, data.night)]
data = data.set_index('id').join(sha1_df.set_index('id')).dropna()
data['record type'] = (data.fname.str.split('-', expand=True)[1]
.str.split('.', expand=True)[0]
.astype('category'))
if LooseVersion(pd.__version__) < LooseVersion('0.21.0'):
data = data.reset_index().drop(labels=['id'], axis=1)
else:
data = data.reset_index().drop(columns=['id'])
data = data[['subject', 'night', 'record type', 'age', 'sex', 'lights off',
'sha', 'fname']]
# Save the data.
data.to_csv(fname, index=False)
def _check_subjects(subjects, n_subjects, missing=None, on_missing='raise'):
"""Check whether subjects are available.
Parameters
----------
subjects : list
Subject numbers to be checked.
n_subjects : int
Number of subjects available.
missing : list | None
Subject numbers that are missing.
on_missing : 'raise' | 'warn' | 'ignore'
What to do if one or several subjects are not available. Valid keys
are 'raise' | 'warn' | 'ignore'. Default is 'error'. If on_missing
is 'warn' it will proceed but warn, if 'ignore' it will proceed
silently.
"""
valid_subjects = np.arange(n_subjects)
if missing is not None:
valid_subjects = np.setdiff1d(valid_subjects, missing)
unknown_subjects = np.setdiff1d(subjects, valid_subjects)
if unknown_subjects.size > 0:
subjects_list = ', '.join([str(s) for s in unknown_subjects])
msg = (f'This dataset contains subjects 0 to {n_subjects - 1} with '
f'missing subjects {missing}. Unknown subjects: '
f'{subjects_list}.')
_on_missing(on_missing, msg)
| 0 |
"""
## Hyptotheis Testing Stuff
### Standard Stuff
#### Standard Headers
"""
from __future__ import division
import sys, random, math, os
sys.dont_write_bytecode = True
sys.path.append(os.path.abspath("."))
"""
#### Standard Utils
"""
class o():
"Anonymous container"
def __init__(i,**fields) :
i.override(fields)
def override(i,d): i.__dict__.update(d); return i
def __repr__(i):
d = i.__dict__
name = i.__class__.__name__
return name+'{'+' '.join([':%s %s' % (k,d[k])
for k in i.show()])+ '}'
def show(i):
return [k for k in sorted(i.__dict__.keys())
if not "_" in k]
"""
Misc functions:
"""
rand = random.random
any = random.choice
seed = random.seed
exp = lambda n: math.e**n
ln = lambda n: math.log(n,math.e)
g = lambda n: round(n,2)
def median(lst,ordered=False):
if not ordered: lst= sorted(lst)
n = len(lst)
p = n//2
if n % 2: return lst[p]
q = p - 1
q = max(0,min(q,n))
return (lst[p] + lst[q])/2
def msecs(f):
import time
t1 = time.time()
f()
return (time.time() - t1) * 1000
def pairs(lst):
"Return all pairs of items i,i+1 from a list."
last=lst[0]
for i in lst[1:]:
yield last,i
last = i
def xtile(num,lo=0,hi=100,width=40,
#chops=[0.1 ,0.3,0.5,0.7,0.9],
chops=[0.25,0.5,0.75],
#marks=[" ","-","-"," "],
marks=["-","-"],
bar="|",star="*",show=" %3.0f"):
"""The function _xtile_ takes a list of (possibly)
unsorted numbers and presents them as a horizontal
xtile chart (in ascii format). The default is a
contracted _quintile_ that shows the
10,30,50,70,90 breaks in the data (but this can be
changed- see the optional flags of the function).
"""
lst = num.all
def pos(p) : return ordered[int(len(ordered)*p)]
def place(x) :
return int(width*float((x - lo))/(hi - lo+0.00001))
def pretty(lst) :
return ', '.join([show % x for x in lst])
ordered = sorted(lst)
#lo = min(lo,ordered[0])
#hi = max(hi,ordered[-1])
what = num.quartiles_noround()
where = [place(n) for n in what]
out = [" "] * width
for one,two in pairs(where):
for i in range(one,two):
if i<len(out):
out[i] = marks[0]
marks = marks[1:]
out[int(width/2)] = bar
out[place(pos(0.5))] = star
#print(lst)
return '('+''.join(out) + ")," + pretty(what)
def _tileX() :
import random
random.seed(1)
nums = [random.random()**2 for _ in range(100)]
print xtile(nums,lo=0,hi=1.0,width=25,show=" %5.2f")
"""
### Standard Accumulator for Numbers
Note the _lt_ method: this accumulator can be sorted by median values.
Warning: this accumulator keeps _all_ numbers. Might be better to use
a bounded cache.
"""
class Num:
"An Accumulator for numbers"
def __init__(i,name,inits=[]):
i.n = i.m2 = i.mu = 0.0
i.all=[]
i._median=None
i.name = name
i.rank = 0
for x in inits: i.add(x)
def s(i) : return (i.m2/(i.n - 1))**0.5
def add(i,x):
i._median=None
i.n += 1
i.all += [x]
delta = x - i.mu
i.mu += delta*1.0/i.n
i.m2 += delta*(x - i.mu)
def __add__(i,j):
return Num(i.name + j.name,i.all + j.all)
def quartiles(i):
def p(x) : return int(round(100*xs[x]))
i.median()
xs = i.all
n = int(len(xs)*0.25)
return p(n) , p(2*n) , p(3*n)
def quartiles_noround(i):
def p(x) : return round(g(xs[x]), 2)
i.median()
xs = i.all
n = int(len(xs)*0.25)
return p(n) , p(2*n) , p(3*n)
def median(i):
if not i._median:
i.all = sorted(i.all)
i._median=median(i.all)
return i._median
def __lt__(i,j):
return i.median() < j.median()
def spread(i):
i.all=sorted(i.all)
n1=i.n*0.25
n2=i.n*0.75
if len(i.all) <= 1:
return 0
if len(i.all) == 2:
return i.all[1] - i.all[0]
else:
return i.all[int(n2)] - i.all[int(n1)]
"""
### The A12 Effect Size Test
"""
def a12slow(lst1,lst2):
"how often is x in lst1 more than y in lst2?"
more = same = 0.0
for x in lst1:
for y in lst2:
if x == y : same += 1
elif x > y : more += 1
x= (more + 0.5*same) / (len(lst1)*len(lst2))
return x
def a12(lst1,lst2):
"how often is x in lst1 more than y in lst2?"
def loop(t,t1,t2):
while t1.j < t1.n and t2.j < t2.n:
h1 = t1.l[t1.j]
h2 = t2.l[t2.j]
h3 = t2.l[t2.j+1] if t2.j+1 < t2.n else None
if h1> h2:
t1.j += 1; t1.gt += t2.n - t2.j
elif h1 == h2:
if h3 and h1 > h3 :
t1.gt += t2.n - t2.j - 1
t1.j += 1; t1.eq += 1; t2.eq += 1
else:
t2,t1 = t1,t2
return t.gt*1.0, t.eq*1.0
#--------------------------
lst1 = sorted(lst1, reverse=True)
lst2 = sorted(lst2, reverse=True)
n1 = len(lst1)
n2 = len(lst2)
t1 = o(l=lst1,j=0,eq=0,gt=0,n=n1)
t2 = o(l=lst2,j=0,eq=0,gt=0,n=n2)
gt,eq= loop(t1, t1, t2)
return gt/(n1*n2) + eq/2/(n1*n2)
def _a12():
def f1(): return a12slow(l1,l2)
def f2(): return a12(l1,l2)
for n in [100,200,400,800,1600,3200,6400]:
l1 = [rand() for _ in xrange(n)]
l2 = [rand() for _ in xrange(n)]
t1 = msecs(f1)
t2 = msecs(f2)
print n, g(f1()),g(f2()),int((t1/t2))
"""Output:
````
n a12(fast) a12(slow) tfast / tslow
--- --------------- -------------- --------------
100 0.53 0.53 4
200 0.48 0.48 6
400 0.49 0.49 28
800 0.5 0.5 26
1600 0.51 0.51 72
3200 0.49 0.49 109
6400 0.5 0.5 244
````
## Non-Parametric Hypothesis Testing
The following _bootstrap_ method was introduced in
1979 by Bradley Efron at Stanford University. It
was inspired by earlier work on the
jackknife.
Improved estimates of the variance were [developed later][efron01].
[efron01]: http://goo.gl/14n8Wf "Bradley Efron and R.J. Tibshirani. An Introduction to the Bootstrap (Chapman & Hall/CRC Monographs on Statistics & Applied Probability), 1993"
To check if two populations _(y0,z0)_
are different, many times sample with replacement
from both to generate _(y1,z1), (y2,z2), (y3,z3)_.. etc.
"""
def sampleWithReplacement(lst):
"returns a list same size as list"
def any(n) : return random.uniform(0,n)
def one(lst): return lst[ int(any(len(lst))) ]
return [one(lst) for _ in lst]
"""
Then, for all those samples,
check if some *testStatistic* in the original pair
hold for all the other pairs. If it does more than (say) 99%
of the time, then we are 99% confident in that the
populations are the same.
In such a _bootstrap_ hypothesis test, the *some property*
is the difference between the two populations, muted by the
joint standard deviation of the populations.
"""
def testStatistic(y,z):
"""Checks if two means are different, tempered
by the sample size of 'y' and 'z'"""
tmp1 = tmp2 = 0
for y1 in y.all: tmp1 += (y1 - y.mu)**2
for z1 in z.all: tmp2 += (z1 - z.mu)**2
s1 = (float(tmp1)/(y.n - 1))**0.5
s2 = (float(tmp2)/(z.n - 1))**0.5
delta = z.mu - y.mu
if s1+s2:
delta = delta/((s1/y.n + s2/z.n)**0.5)
return delta
"""
The rest is just details:
+ Efron advises
to make the mean of the populations the same (see
the _yhat,zhat_ stuff shown below).
+ The class _total_ is a just a quick and dirty accumulation class.
+ For more details see [the Efron text][efron01].
"""
def bootstrap(y0,z0,conf=0.05,b=1000):
"""The bootstrap hypothesis test from
p220 to 223 of Efron's book 'An
introduction to the boostrap."""
class total():
"quick and dirty data collector"
def __init__(i,some=[]):
i.sum = i.n = i.mu = 0 ; i.all=[]
for one in some: i.put(one)
def put(i,x):
i.all.append(x);
i.sum +=x; i.n += 1; i.mu = float(i.sum)/i.n
def __add__(i1,i2): return total(i1.all + i2.all)
y, z = total(y0), total(z0)
x = y + z
tobs = testStatistic(y,z)
yhat = [y1 - y.mu + x.mu for y1 in y.all]
zhat = [z1 - z.mu + x.mu for z1 in z.all]
bigger = 0.0
for i in range(b):
if testStatistic(total(sampleWithReplacement(yhat)),
total(sampleWithReplacement(zhat))) > tobs:
bigger += 1
return bigger / b < conf
"""
#### Examples
"""
def _bootstraped():
def worker(n=1000,
mu1=10, sigma1=1,
mu2=10.2, sigma2=1):
def g(mu,sigma) : return random.gauss(mu,sigma)
x = [g(mu1,sigma1) for i in range(n)]
y = [g(mu2,sigma2) for i in range(n)]
return n,mu1,sigma1,mu2,sigma2,\
'different' if bootstrap(x,y) else 'same'
# very different means, same std
print worker(mu1=10, sigma1=10,
mu2=100, sigma2=10)
# similar means and std
print worker(mu1= 10.1, sigma1=1,
mu2= 10.2, sigma2=1)
# slightly different means, same std
print worker(mu1= 10.1, sigma1= 1,
mu2= 10.8, sigma2= 1)
# different in mu eater by large std
print worker(mu1= 10.1, sigma1= 10,
mu2= 10.8, sigma2= 1)
"""
Output:
````
_bootstraped()
(1000, 10, 10, 100, 10, 'different')
(1000, 10.1, 1, 10.2, 1, 'same')
(1000, 10.1, 1, 10.8, 1, 'different')
(1000, 10.1, 10, 10.8, 1, 'same')
````
Warning- the above took 8 seconds to generate since we used 1000 bootstraps.
As to how many bootstraps are enough, that depends on the data. There are
results saying 200 to 400 are enough but, since I am suspicious man, I run it for 1000.
Which means the runtimes associated with bootstrapping is a significant issue.
To reduce that runtime, I avoid things like an all-pairs comparison of all treatments
(see below: Scott-knott). Also, BEFORE I do the boostrap, I first run
the effect size test (and only go to bootstrapping in effect size passes:
"""
def different(l1,l2):
#return bootstrap(l1,l2) and a12(l2,l1)
return a12(l2,l1) and bootstrap(l1,l2)
#return a12(l2,l1)
"""
## Saner Hypothesis Testing
The following code, which you should use verbatim does the following:
+ All treatments are clustered into _ranks_. In practice, dozens
of treatments end up generating just a handful of ranks.
+ The numbers of calls to the hypothesis tests are minimized:
+ Treatments are sorted by their median value.
+ Treatments are divided into two groups such that the
expected value of the mean values _after_ the split is minimized;
+ Hypothesis tests are called to test if the two groups are truly difference.
+ All hypothesis tests are non-parametric and include (1) effect size tests
and (2) tests for statistically significant numbers;
+ Slow bootstraps are executed if the faster _A12_ tests are passed;
In practice, this means that the hypothesis tests (with confidence of say, 95%)
are called on only a logarithmic number of times. So...
+ With this method, 16 treatments can be studied using less than _∑<sub>1,2,4,8,16</sub>log<sub>2</sub>i =15_ hypothesis tests and confidence _0.99<sup>15</sup>=0.86_.
+ But if did this with the 120 all-pairs comparisons of the 16 treatments, we would have total confidence _0.99<sup>120</sup>=0.30.
For examples on using this code, see _rdivDemo_ (below).
"""
def scottknott(data,cohen=0.3,small=3, useA12=False,epsilon=0.01):
"""Recursively split data, maximizing delta of
the expected value of the mean before and
after the splits.
Reject splits with under 3 items"""
all = reduce(lambda x,y:x+y,data)
same = lambda l,r: abs(l.median() - r.median()) <= all.s()*cohen
if useA12:
same = lambda l, r: not different(l.all,r.all)
big = lambda n: n > small
return rdiv(data,all,minMu,big,same,epsilon)
def rdiv(data, # a list of class Nums
all, # all the data combined into one num
div, # function: find the best split
big, # function: rejects small splits
same, # function: rejects similar splits
epsilon): # small enough to split two parts
"""Looks for ways to split sorted data,
Recurses into each split. Assigns a 'rank' number
to all the leaf splits found in this way.
"""
def recurse(parts,all,rank=0):
"Split, then recurse on each part."
cut,left,right = maybeIgnore(div(parts,all,big,epsilon),
same,parts)
if cut:
# if cut, rank "right" higher than "left"
rank = recurse(parts[:cut],left,rank) + 1
rank = recurse(parts[cut:],right,rank)
else:
# if no cut, then all get same rank
for part in parts:
part.rank = rank
return rank
recurse(sorted(data),all)
return data
def maybeIgnore((cut,left,right), same,parts):
if cut:
if same(sum(parts[:cut],Num('upto')),
sum(parts[cut:],Num('above'))):
cut = left = right = None
return cut,left,right
def minMu(parts,all,big,epsilon):
"""Find a cut in the parts that maximizes
the expected value of the difference in
the mean before and after the cut.
Reject splits that are insignificantly
different or that generate very small subsets.
"""
cut,left,right = None,None,None
before, mu = 0, all.mu
for i,l,r in leftRight(parts,epsilon):
if big(l.n) and big(r.n):
n = all.n * 1.0
now = l.n/n*(mu- l.mu)**2 + r.n/n*(mu- r.mu)**2
if now > before:
before,cut,left,right = now,i,l,r
return cut,left,right
def leftRight(parts,epsilon=0.01):
"""Iterator. For all items in 'parts',
return everything to the left and everything
from here to the end. For reasons of
efficiency, take a first pass over the data
to pre-compute and cache right-hand-sides
"""
rights = {}
n = j = len(parts) - 1
while j > 0:
rights[j] = parts[j]
if j < n: rights[j] += rights[j+1]
j -=1
left = parts[0]
for i,one in enumerate(parts):
if i> 0:
if parts[i]._median - parts[i-1]._median > epsilon:
yield i,left,rights[i]
left += one
"""
## Putting it All Together
Driver for the demos:
"""
def rdivDemo(data):
def z(x):
return int(100 * (x - lo) / (hi - lo + 0.00001))
data = map(lambda lst:Num(lst[0],lst[1:]),
data)
ranks=[]
maxMedian = -1
for x in scottknott(data,useA12=True):
q1,q2,q3 = x.quartiles()
maxMedian = max(maxMedian, x.median())
ranks += [(x.rank,q2,q3 - q1, x)]
all=[]
for _,__,___,x in sorted(ranks): all += x.all
all = sorted(all)
lo, hi = all[0], all[-1]
line = "----------------------------------------------------"
last = None
ret_list = []
header = ('%4s , %22s , %s , %4s ' % ('rank', 'name', 'med', 'iqr'))+ "\n"+ line
print(header)
ret_list.append(header)
for _,__,___,x in sorted(ranks, key=lambda a: (a[0], a[1], a[2])):
q1,q2,q3 = x.quartiles()
#xtile(x.all,lo=lo,hi=hi,width=30,show="%5.2f")
print_stmt = ('%1s , %22s , %4s , %4s ' % (x.rank+1, x.name, q2, q3 - q1))
ret_list.append(print_stmt)
print(print_stmt)
last = x.rank
return "\n".join(ret_list)
"""
The demos:
"""
def rdiv0():
rdivDemo([
["x1",0.34, 0.49, 0.51, 0.6],
["x2",6, 7, 8, 9] ])
"""
````
rank , name , med , iqr
----------------------------------------------------
1 , x1 , 51 , 11 (* | ), 0.34, 0.49, 0.51, 0.51, 0.60
2 , x2 , 800 , 200 ( | ---- *-- ), 6.00, 7.00, 8.00, 8.00, 9.00
````
"""
def rdiv1():
rdivDemo([
["x1",0.1, 0.2, 0.3, 0.4],
["x2",0.1, 0.2, 0.3, 0.4],
["x3",6, 7, 8, 9] ])
"""
````
rank , name , med , iqr
----------------------------------------------------
1 , x1 , 30 , 20 (* | ), 0.10, 0.20, 0.30, 0.30, 0.40
1 , x2 , 30 , 20 (* | ), 0.10, 0.20, 0.30, 0.30, 0.40
2 , x3 , 800 , 200 ( | ---- *-- ), 6.00, 7.00, 8.00, 8.00, 9.00
````
"""
def rdiv2():
rdivDemo([
["x1",0.34, 0.49, 0.51, 0.6],
["x2",0.6, 0.7, 0.8, 0.9],
["x3",0.15, 0.25, 0.4, 0.35],
["x4",0.6, 0.7, 0.8, 0.9],
["x5",0.1, 0.2, 0.3, 0.4] ])
"""
````
rank , name , med , iqr
----------------------------------------------------
1 , x5 , 30 , 20 (--- *--- | ), 0.10, 0.20, 0.30, 0.30, 0.40
1 , x3 , 35 , 15 ( ---- *- | ), 0.15, 0.25, 0.35, 0.35, 0.40
2 , x1 , 51 , 11 ( ------ *-- ), 0.34, 0.49, 0.51, 0.51, 0.60
3 , x2 , 80 , 20 ( | ---- *-- ), 0.60, 0.70, 0.80, 0.80, 0.90
3 , x4 , 80 , 20 ( | ---- *-- ), 0.60, 0.70, 0.80, 0.80, 0.90
````
"""
def rdiv3():
rdivDemo([
["x1",101, 100, 99, 101, 99.5],
["x2",101, 100, 99, 101, 100],
["x3",101, 100, 99.5, 101, 99],
["x4",101, 100, 99, 101, 100] ])
"""
````
rank , name , med , iqr
----------------------------------------------------
1 , x1 , 10000 , 150 (------- *| ),99.00, 99.50, 100.00, 101.00, 101.00
1 , x2 , 10000 , 100 (--------------*| ),99.00, 100.00, 100.00, 101.00, 101.00
1 , x3 , 10000 , 150 (------- *| ),99.00, 99.50, 100.00, 101.00, 101.00
1 , x4 , 10000 , 100 (--------------*| ),99.00, 100.00, 100.00, 101.00, 101.00
````
"""
def rdiv4():
rdivDemo([
["x1",11,12,13],
["x2",14,31,22],
["x3",23,24,31],
["x5",32,33,34]])
"""
````
rank , name , med , iqr
----------------------------------------------------
1 , x1 , 1100 , 0 ( * | ),11.00, 11.00, 12.00, 13.00, 13.00
1 , x2 , 1400 , 0 ( *| ),14.00, 14.00, 22.00, 31.00, 31.00
2 , x3 , 2300 , 0 ( |* ),23.00, 23.00, 24.00, 31.00, 31.00
2 , x5 , 3200 , 0 ( | * ),32.00, 32.00, 33.00, 34.00, 34.00
````
"""
def rdiv5():
rdivDemo([
["x1",11,11,11],
["x2",11,11,11],
["x3",11,11,11]])
"""
````
rank , name , med , iqr
----------------------------------------------------
1 , x1 , 1100 , 0 (* | ),11.00, 11.00, 11.00, 11.00, 11.00
1 , x2 , 1100 , 0 (* | ),11.00, 11.00, 11.00, 11.00, 11.00
1 , x3 , 1100 , 0 (* | ),11.00, 11.00, 11.00, 11.00, 11.00
````
"""
def rdiv6():
rdivDemo([
["x1",11,11,11],
["x2",11,11,11],
["x4",32,33,34,35]])
"""
````
rank , name , med , iqr
----------------------------------------------------
1 , x1 , 1100 , 0 (* | ),11.00, 11.00, 11.00, 11.00, 11.00
1 , x2 , 1100 , 0 (* | ),11.00, 11.00, 11.00, 11.00, 11.00
2 , x4 , 3400 , 200 ( | - * ),32.00, 33.00, 34.00, 34.00, 35.00
````
"""
def rdiv7():
rdivDemo([
["x1"] + [rand()**0.5 for _ in range(256)],
["x2"] + [rand()**2 for _ in range(256)],
["x3"] + [rand() for _ in range(256)]
])
"""
````
rank , name , med , iqr
----------------------------------------------------
1 , x2 , 25 , 50 (-- * -|--------- ), 0.01, 0.09, 0.25, 0.47, 0.86
2 , x3 , 49 , 47 ( ------ *| ------- ), 0.08, 0.29, 0.49, 0.66, 0.89
3 , x1 , 73 , 37 ( ------|- * --- ), 0.32, 0.57, 0.73, 0.86, 0.95
````
"""
def _rdivs():
seed(1)
rdiv0(); rdiv1(); rdiv2(); rdiv3();
rdiv4(); rdiv5(); rdiv6(); rdiv7()
#_rdivs() | 0.032907 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import traceback
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
import taskflow.engines
from taskflow.patterns import linear_flow
from taskflow.types import failure as ft
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _, _LE, _LI
from cinder.image import glance
from cinder import objects
from cinder import utils
from cinder.volume.flows import common
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
ACTION = 'volume:create'
CONF = cfg.CONF
# These attributes we will attempt to save for the volume if they exist
# in the source image metadata.
IMAGE_ATTRIBUTES = (
'checksum',
'container_format',
'disk_format',
'min_disk',
'min_ram',
'size',
)
class OnFailureRescheduleTask(flow_utils.CinderTask):
"""Triggers a rescheduling request to be sent when reverting occurs.
Reversion strategy: Triggers the rescheduling mechanism whereby a cast gets
sent to the scheduler rpc api to allow for an attempt X of Y for scheduling
this volume elsewhere.
"""
def __init__(self, reschedule_context, db, scheduler_rpcapi):
requires = ['filter_properties', 'image_id', 'request_spec',
'snapshot_id', 'volume_id', 'context']
super(OnFailureRescheduleTask, self).__init__(addons=[ACTION],
requires=requires)
self.scheduler_rpcapi = scheduler_rpcapi
self.db = db
self.reschedule_context = reschedule_context
# These exception types will trigger the volume to be set into error
# status rather than being rescheduled.
self.no_reschedule_types = [
# Image copying happens after volume creation so rescheduling due
# to copy failure will mean the same volume will be created at
# another place when it still exists locally.
exception.ImageCopyFailure,
# Metadata updates happen after the volume has been created so if
# they fail, rescheduling will likely attempt to create the volume
# on another machine when it still exists locally.
exception.MetadataCopyFailure,
exception.MetadataCreateFailure,
exception.MetadataUpdateFailure,
# The volume/snapshot has been removed from the database, that
# can not be fixed by rescheduling.
exception.VolumeNotFound,
exception.SnapshotNotFound,
exception.VolumeTypeNotFound,
exception.ImageUnacceptable,
]
def execute(self, **kwargs):
pass
def _reschedule(self, context, cause, request_spec, filter_properties,
snapshot_id, image_id, volume_id, **kwargs):
"""Actions that happen during the rescheduling attempt occur here."""
create_volume = self.scheduler_rpcapi.create_volume
if not filter_properties:
filter_properties = {}
if 'retry' not in filter_properties:
filter_properties['retry'] = {}
retry_info = filter_properties['retry']
num_attempts = retry_info.get('num_attempts', 0)
request_spec['volume_id'] = volume_id
LOG.debug("Volume %(volume_id)s: re-scheduling %(method)s "
"attempt %(num)d due to %(reason)s",
{'volume_id': volume_id,
'method': common.make_pretty_name(create_volume),
'num': num_attempts,
'reason': cause.exception_str})
if all(cause.exc_info):
# Stringify to avoid circular ref problem in json serialization
retry_info['exc'] = traceback.format_exception(*cause.exc_info)
return create_volume(context, CONF.volume_topic, volume_id,
snapshot_id=snapshot_id, image_id=image_id,
request_spec=request_spec,
filter_properties=filter_properties)
def _post_reschedule(self, context, volume_id):
"""Actions that happen after the rescheduling attempt occur here."""
LOG.debug("Volume %s: re-scheduled", volume_id)
def _pre_reschedule(self, context, volume_id):
"""Actions that happen before the rescheduling attempt occur here."""
try:
# Reset the volume state.
#
# NOTE(harlowja): this is awkward to be done here, shouldn't
# this happen at the scheduler itself and not before it gets
# sent to the scheduler? (since what happens if it never gets
# there??). It's almost like we need a status of 'on-the-way-to
# scheduler' in the future.
update = {
'status': 'creating',
'scheduled_at': timeutils.utcnow(),
'host': None
}
LOG.debug("Updating volume %(volume_id)s with %(update)s.",
{'update': update, 'volume_id': volume_id})
self.db.volume_update(context, volume_id, update)
except exception.CinderException:
# Don't let resetting the status cause the rescheduling to fail.
LOG.exception(_LE("Volume %s: resetting 'creating' "
"status failed."),
volume_id)
def revert(self, context, result, flow_failures, **kwargs):
# NOTE(dulek): Revert is occurring and manager need to know if
# rescheduling happened. We're injecting this information into
# exception that will be caught there. This is ugly and we need
# TaskFlow to support better way of returning data from reverted flow.
cause = list(flow_failures.values())[0]
cause.exception.rescheduled = False
# Check if we have a cause which can tell us not to reschedule.
for failure in flow_failures.values():
if failure.check(*self.no_reschedule_types):
return
volume_id = kwargs['volume_id']
# Use a different context when rescheduling.
if self.reschedule_context:
context = self.reschedule_context
try:
self._pre_reschedule(context, volume_id)
self._reschedule(context, cause, **kwargs)
self._post_reschedule(context, volume_id)
# Inject information that we rescheduled
cause.exception.rescheduled = True
except exception.CinderException:
LOG.exception(_LE("Volume %s: rescheduling failed"), volume_id)
class ExtractVolumeRefTask(flow_utils.CinderTask):
"""Extracts volume reference for given volume id."""
default_provides = 'volume_ref'
def __init__(self, db, host):
super(ExtractVolumeRefTask, self).__init__(addons=[ACTION])
self.db = db
self.host = host
def execute(self, context, volume_id):
# NOTE(harlowja): this will fetch the volume from the database, if
# the volume has been deleted before we got here then this should fail.
#
# In the future we might want to have a lock on the volume_id so that
# the volume can not be deleted while its still being created?
volume_ref = self.db.volume_get(context, volume_id)
return volume_ref
def revert(self, context, volume_id, result, **kwargs):
if isinstance(result, ft.Failure):
return
common.error_out_volume(context, self.db, volume_id)
LOG.error(_LE("Volume %s: create failed"), volume_id)
class ExtractVolumeSpecTask(flow_utils.CinderTask):
"""Extracts a spec of a volume to be created into a common structure.
This task extracts and organizes the input requirements into a common
and easier to analyze structure for later tasks to use. It will also
attach the underlying database volume reference which can be used by
other tasks to reference for further details about the volume to be.
Reversion strategy: N/A
"""
default_provides = 'volume_spec'
def __init__(self, db):
requires = ['image_id', 'snapshot_id', 'source_volid',
'source_replicaid']
super(ExtractVolumeSpecTask, self).__init__(addons=[ACTION],
requires=requires)
self.db = db
def execute(self, context, volume_ref, **kwargs):
get_remote_image_service = glance.get_remote_image_service
volume_name = volume_ref['name']
volume_size = utils.as_int(volume_ref['size'], quiet=False)
# Create a dictionary that will represent the volume to be so that
# later tasks can easily switch between the different types and create
# the volume according to the volume types specifications (which are
# represented in this dictionary).
specs = {
'status': volume_ref['status'],
'type': 'raw', # This will have the type of the volume to be
# created, which should be one of [raw, snap,
# source_vol, image]
'volume_id': volume_ref['id'],
'volume_name': volume_name,
'volume_size': volume_size,
}
if kwargs.get('snapshot_id'):
# We are making a snapshot based volume instead of a raw volume.
specs.update({
'type': 'snap',
'snapshot_id': kwargs['snapshot_id'],
})
elif kwargs.get('source_volid'):
# We are making a source based volume instead of a raw volume.
#
# NOTE(harlowja): This will likely fail if the source volume
# disappeared by the time this call occurred.
source_volid = kwargs['source_volid']
source_volume_ref = self.db.volume_get(context, source_volid)
specs.update({
'source_volid': source_volid,
# This is captured incase we have to revert and we want to set
# back the source volume status to its original status. This
# may or may not be sketchy to do??
'source_volstatus': source_volume_ref['status'],
'type': 'source_vol',
})
elif kwargs.get('source_replicaid'):
# We are making a clone based on the replica.
#
# NOTE(harlowja): This will likely fail if the replica
# disappeared by the time this call occurred.
source_volid = kwargs['source_replicaid']
source_volume_ref = self.db.volume_get(context, source_volid)
specs.update({
'source_replicaid': source_volid,
'source_replicastatus': source_volume_ref['status'],
'type': 'source_replica',
})
elif kwargs.get('image_id'):
# We are making an image based volume instead of a raw volume.
image_href = kwargs['image_id']
image_service, image_id = get_remote_image_service(context,
image_href)
specs.update({
'type': 'image',
'image_id': image_id,
'image_location': image_service.get_location(context,
image_id),
'image_meta': image_service.show(context, image_id),
# Instead of refetching the image service later just save it.
#
# NOTE(harlowja): if we have to later recover this tasks output
# on another 'node' that this object won't be able to be
# serialized, so we will have to recreate this object on
# demand in the future.
'image_service': image_service,
})
return specs
def revert(self, context, result, **kwargs):
if isinstance(result, ft.Failure):
return
volume_spec = result.get('volume_spec')
# Restore the source volume status and set the volume to error status.
common.restore_source_status(context, self.db, volume_spec)
class NotifyVolumeActionTask(flow_utils.CinderTask):
"""Performs a notification about the given volume when called.
Reversion strategy: N/A
"""
def __init__(self, db, event_suffix):
super(NotifyVolumeActionTask, self).__init__(addons=[ACTION,
event_suffix])
self.db = db
self.event_suffix = event_suffix
def execute(self, context, volume_ref):
volume_id = volume_ref['id']
try:
volume_utils.notify_about_volume_usage(context, volume_ref,
self.event_suffix,
host=volume_ref['host'])
except exception.CinderException:
# If notification sending of volume database entry reading fails
# then we shouldn't error out the whole workflow since this is
# not always information that must be sent for volumes to operate
LOG.exception(_LE("Failed notifying about the volume"
" action %(event)s for volume %(volume_id)s"),
{'event': self.event_suffix, 'volume_id': volume_id})
class CreateVolumeFromSpecTask(flow_utils.CinderTask):
"""Creates a volume from a provided specification.
Reversion strategy: N/A
"""
default_provides = 'volume'
def __init__(self, db, driver):
super(CreateVolumeFromSpecTask, self).__init__(addons=[ACTION])
self.db = db
self.driver = driver
def _handle_bootable_volume_glance_meta(self, context, volume_id,
**kwargs):
"""Enable bootable flag and properly handle glance metadata.
Caller should provide one and only one of snapshot_id,source_volid
and image_id. If an image_id specified, an image_meta should also be
provided, otherwise will be treated as an empty dictionary.
"""
log_template = _("Copying metadata from %(src_type)s %(src_id)s to "
"%(vol_id)s.")
exception_template = _("Failed updating volume %(vol_id)s metadata"
" using the provided %(src_type)s"
" %(src_id)s metadata")
src_type = None
src_id = None
self._enable_bootable_flag(context, volume_id)
try:
if kwargs.get('snapshot_id'):
src_type = 'snapshot'
src_id = kwargs['snapshot_id']
snapshot_id = src_id
LOG.debug(log_template, {'src_type': src_type,
'src_id': src_id,
'vol_id': volume_id})
self.db.volume_glance_metadata_copy_to_volume(
context, volume_id, snapshot_id)
elif kwargs.get('source_volid'):
src_type = 'source volume'
src_id = kwargs['source_volid']
source_volid = src_id
LOG.debug(log_template, {'src_type': src_type,
'src_id': src_id,
'vol_id': volume_id})
self.db.volume_glance_metadata_copy_from_volume_to_volume(
context,
source_volid,
volume_id)
elif kwargs.get('source_replicaid'):
src_type = 'source replica'
src_id = kwargs['source_replicaid']
source_replicaid = src_id
LOG.debug(log_template, {'src_type': src_type,
'src_id': src_id,
'vol_id': volume_id})
self.db.volume_glance_metadata_copy_from_volume_to_volume(
context,
source_replicaid,
volume_id)
elif kwargs.get('image_id'):
src_type = 'image'
src_id = kwargs['image_id']
image_id = src_id
image_meta = kwargs.get('image_meta', {})
LOG.debug(log_template, {'src_type': src_type,
'src_id': src_id,
'vol_id': volume_id})
self._capture_volume_image_metadata(context, volume_id,
image_id, image_meta)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.exception(exception_template, {'src_type': src_type,
'src_id': src_id,
'vol_id': volume_id})
raise exception.MetadataCopyFailure(reason=ex)
def _create_from_snapshot(self, context, volume_ref, snapshot_id,
**kwargs):
volume_id = volume_ref['id']
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
model_update = self.driver.create_volume_from_snapshot(volume_ref,
snapshot)
# NOTE(harlowja): Subtasks would be useful here since after this
# point the volume has already been created and further failures
# will not destroy the volume (although they could in the future).
make_bootable = False
try:
originating_vref = self.db.volume_get(context,
snapshot.volume_id)
make_bootable = originating_vref.bootable
except exception.CinderException as ex:
LOG.exception(_LE("Failed fetching snapshot %(snapshot_id)s "
"bootable"
" flag using the provided glance snapshot "
"%(snapshot_ref_id)s volume reference"),
{'snapshot_id': snapshot_id,
'snapshot_ref_id': snapshot.volume_id})
raise exception.MetadataUpdateFailure(reason=ex)
if make_bootable:
self._handle_bootable_volume_glance_meta(context, volume_id,
snapshot_id=snapshot_id)
return model_update
def _enable_bootable_flag(self, context, volume_id):
try:
LOG.debug('Marking volume %s as bootable.', volume_id)
self.db.volume_update(context, volume_id, {'bootable': True})
except exception.CinderException as ex:
LOG.exception(_LE("Failed updating volume %(volume_id)s bootable "
"flag to true"), {'volume_id': volume_id})
raise exception.MetadataUpdateFailure(reason=ex)
def _create_from_source_volume(self, context, volume_ref,
source_volid, **kwargs):
# NOTE(harlowja): if the source volume has disappeared this will be our
# detection of that since this database call should fail.
#
# NOTE(harlowja): likely this is not the best place for this to happen
# and we should have proper locks on the source volume while actions
# that use the source volume are underway.
srcvol_ref = self.db.volume_get(context, source_volid)
model_update = self.driver.create_cloned_volume(volume_ref, srcvol_ref)
# NOTE(harlowja): Subtasks would be useful here since after this
# point the volume has already been created and further failures
# will not destroy the volume (although they could in the future).
if srcvol_ref.bootable:
self._handle_bootable_volume_glance_meta(context, volume_ref['id'],
source_volid=source_volid)
return model_update
def _create_from_source_replica(self, context, volume_ref,
source_replicaid, **kwargs):
# NOTE(harlowja): if the source volume has disappeared this will be our
# detection of that since this database call should fail.
#
# NOTE(harlowja): likely this is not the best place for this to happen
# and we should have proper locks on the source volume while actions
# that use the source volume are underway.
srcvol_ref = self.db.volume_get(context, source_replicaid)
model_update = self.driver.create_replica_test_volume(volume_ref,
srcvol_ref)
# NOTE(harlowja): Subtasks would be useful here since after this
# point the volume has already been created and further failures
# will not destroy the volume (although they could in the future).
if srcvol_ref.bootable:
self._handle_bootable_volume_glance_meta(
context,
volume_ref['id'],
source_replicaid=source_replicaid)
return model_update
def _copy_image_to_volume(self, context, volume_ref,
image_id, image_location, image_service):
"""Downloads Glance image to the specified volume."""
copy_image_to_volume = self.driver.copy_image_to_volume
volume_id = volume_ref['id']
LOG.debug("Attempting download of %(image_id)s (%(image_location)s)"
" to volume %(volume_id)s.",
{'image_id': image_id, 'volume_id': volume_id,
'image_location': image_location})
try:
copy_image_to_volume(context, volume_ref, image_service, image_id)
except processutils.ProcessExecutionError as ex:
LOG.exception(_LE("Failed to copy image %(image_id)s to volume: "
"%(volume_id)s"),
{'volume_id': volume_id, 'image_id': image_id})
raise exception.ImageCopyFailure(reason=ex.stderr)
except exception.ImageUnacceptable as ex:
LOG.exception(_LE("Failed to copy image to volume: %(volume_id)s"),
{'volume_id': volume_id})
raise exception.ImageUnacceptable(ex)
except Exception as ex:
LOG.exception(_LE("Failed to copy image %(image_id)s to "
"volume: %(volume_id)s"),
{'volume_id': volume_id, 'image_id': image_id})
if not isinstance(ex, exception.ImageCopyFailure):
raise exception.ImageCopyFailure(reason=ex)
else:
raise
LOG.debug("Downloaded image %(image_id)s (%(image_location)s)"
" to volume %(volume_id)s successfully.",
{'image_id': image_id, 'volume_id': volume_id,
'image_location': image_location})
def _capture_volume_image_metadata(self, context, volume_id,
image_id, image_meta):
# Save some base attributes into the volume metadata
base_metadata = {
'image_id': image_id,
}
name = image_meta.get('name', None)
if name:
base_metadata['image_name'] = name
# Save some more attributes into the volume metadata from the image
# metadata
for key in IMAGE_ATTRIBUTES:
if key not in image_meta:
continue
value = image_meta.get(key, None)
if value is not None:
base_metadata[key] = value
# Save all the image metadata properties into the volume metadata
property_metadata = {}
image_properties = image_meta.get('properties', {})
for (key, value) in image_properties.items():
if value is not None:
property_metadata[key] = value
# NOTE(harlowja): The best way for this to happen would be in bulk,
# but that doesn't seem to exist (yet), so we go through one by one
# which means we can have partial create/update failure.
volume_metadata = dict(property_metadata)
volume_metadata.update(base_metadata)
LOG.debug("Creating volume glance metadata for volume %(volume_id)s"
" backed by image %(image_id)s with: %(vol_metadata)s.",
{'volume_id': volume_id, 'image_id': image_id,
'vol_metadata': volume_metadata})
for (key, value) in volume_metadata.items():
try:
self.db.volume_glance_metadata_create(context, volume_id,
key, value)
except exception.GlanceMetadataExists:
pass
def _create_from_image(self, context, volume_ref,
image_location, image_id, image_meta,
image_service, **kwargs):
LOG.debug("Cloning %(volume_id)s from image %(image_id)s "
" at location %(image_location)s.",
{'volume_id': volume_ref['id'],
'image_location': image_location, 'image_id': image_id})
# Create the volume from an image.
#
# NOTE (singn): two params need to be returned
# dict containing provider_location for cloned volume
# and clone status.
model_update, cloned = self.driver.clone_image(context,
volume_ref,
image_location,
image_meta,
image_service)
if not cloned:
# TODO(harlowja): what needs to be rolled back in the clone if this
# volume create fails?? Likely this should be a subflow or broken
# out task in the future. That will bring up the question of how
# do we make said subflow/task which is only triggered in the
# clone image 'path' resumable and revertable in the correct
# manner.
#
# Create the volume and then download the image onto the volume.
model_update = self.driver.create_volume(volume_ref)
updates = dict(model_update or dict(), status='downloading')
try:
volume_ref = self.db.volume_update(context,
volume_ref['id'], updates)
except exception.CinderException:
LOG.exception(_LE("Failed updating volume %(volume_id)s with "
"%(updates)s"),
{'volume_id': volume_ref['id'],
'updates': updates})
self._copy_image_to_volume(context, volume_ref,
image_id, image_location, image_service)
self._handle_bootable_volume_glance_meta(context, volume_ref['id'],
image_id=image_id,
image_meta=image_meta)
return model_update
def _create_raw_volume(self, context, volume_ref, **kwargs):
return self.driver.create_volume(volume_ref)
def execute(self, context, volume_ref, volume_spec):
volume_spec = dict(volume_spec)
volume_id = volume_spec.pop('volume_id', None)
if not volume_id:
volume_id = volume_ref['id']
# we can't do anything if the driver didn't init
if not self.driver.initialized:
driver_name = self.driver.__class__.__name__
LOG.exception(_LE("Unable to create volume. "
"Volume driver %s not initialized"), driver_name)
# NOTE(flaper87): Set the error status before
# raising any exception.
self.db.volume_update(context, volume_id, dict(status='error'))
raise exception.DriverNotInitialized()
create_type = volume_spec.pop('type', None)
LOG.info(_LI("Volume %(volume_id)s: being created as %(create_type)s "
"with specification: %(volume_spec)s"),
{'volume_spec': volume_spec, 'volume_id': volume_id,
'create_type': create_type})
if create_type == 'raw':
model_update = self._create_raw_volume(context,
volume_ref=volume_ref,
**volume_spec)
elif create_type == 'snap':
model_update = self._create_from_snapshot(context,
volume_ref=volume_ref,
**volume_spec)
elif create_type == 'source_vol':
model_update = self._create_from_source_volume(
context, volume_ref=volume_ref, **volume_spec)
elif create_type == 'source_replica':
model_update = self._create_from_source_replica(
context, volume_ref=volume_ref, **volume_spec)
elif create_type == 'image':
model_update = self._create_from_image(context,
volume_ref=volume_ref,
**volume_spec)
else:
raise exception.VolumeTypeNotFound(volume_type_id=create_type)
# Persist any model information provided on creation.
try:
if model_update:
volume_ref = self.db.volume_update(context, volume_ref['id'],
model_update)
except exception.CinderException:
# If somehow the update failed we want to ensure that the
# failure is logged (but not try rescheduling since the volume at
# this point has been created).
LOG.exception(_LE("Failed updating model of volume %(volume_id)s "
"with creation provided model %(model)s"),
{'volume_id': volume_id, 'model': model_update})
raise
return volume_ref
class CreateVolumeOnFinishTask(NotifyVolumeActionTask):
"""On successful volume creation this will perform final volume actions.
When a volume is created successfully it is expected that MQ notifications
and database updates will occur to 'signal' to others that the volume is
now ready for usage. This task does those notifications and updates in a
reliable manner (not re-raising exceptions if said actions can not be
triggered).
Reversion strategy: N/A
"""
def __init__(self, db, event_suffix):
super(CreateVolumeOnFinishTask, self).__init__(db, event_suffix)
self.status_translation = {
'migration_target_creating': 'migration_target',
}
def execute(self, context, volume, volume_spec):
volume_id = volume['id']
new_status = self.status_translation.get(volume_spec.get('status'),
'available')
update = {
'status': new_status,
'launched_at': timeutils.utcnow(),
}
try:
# TODO(harlowja): is it acceptable to only log if this fails??
# or are there other side-effects that this will cause if the
# status isn't updated correctly (aka it will likely be stuck in
# 'building' if this fails)??
volume_ref = self.db.volume_update(context, volume_id, update)
# Now use the parent to notify.
super(CreateVolumeOnFinishTask, self).execute(context, volume_ref)
except exception.CinderException:
LOG.exception(_LE("Failed updating volume %(volume_id)s with "
"%(update)s"), {'volume_id': volume_id,
'update': update})
# Even if the update fails, the volume is ready.
LOG.info(_LI("Volume %(volume_name)s (%(volume_id)s): "
"created successfully"),
{'volume_name': volume_spec['volume_name'],
'volume_id': volume_id})
def get_flow(context, db, driver, scheduler_rpcapi, host, volume_id,
allow_reschedule, reschedule_context, request_spec,
filter_properties, snapshot_id=None, image_id=None,
source_volid=None, source_replicaid=None,
consistencygroup_id=None, cgsnapshot_id=None):
"""Constructs and returns the manager entrypoint flow.
This flow will do the following:
1. Determines if rescheduling is enabled (ahead of time).
2. Inject keys & values for dependent tasks.
3. Selects 1 of 2 activated only on *failure* tasks (one to update the db
status & notify or one to update the db status & notify & *reschedule*).
4. Extracts a volume specification from the provided inputs.
5. Notifies that the volume has start to be created.
6. Creates a volume from the extracted volume specification.
7. Attaches a on-success *only* task that notifies that the volume creation
has ended and performs further database status updates.
"""
flow_name = ACTION.replace(":", "_") + "_manager"
volume_flow = linear_flow.Flow(flow_name)
# This injects the initial starting flow values into the workflow so that
# the dependency order of the tasks provides/requires can be correctly
# determined.
create_what = {
'context': context,
'filter_properties': filter_properties,
'image_id': image_id,
'request_spec': request_spec,
'snapshot_id': snapshot_id,
'source_volid': source_volid,
'volume_id': volume_id,
'source_replicaid': source_replicaid,
'consistencygroup_id': consistencygroup_id,
'cgsnapshot_id': cgsnapshot_id,
}
volume_flow.add(ExtractVolumeRefTask(db, host))
retry = filter_properties.get('retry', None)
if allow_reschedule and request_spec and retry:
volume_flow.add(OnFailureRescheduleTask(reschedule_context,
db, scheduler_rpcapi))
LOG.debug("Volume reschedule parameters: %(allow)s "
"retry: %(retry)s", {'allow': allow_reschedule, 'retry': retry})
volume_flow.add(ExtractVolumeSpecTask(db),
NotifyVolumeActionTask(db, "create.start"),
CreateVolumeFromSpecTask(db, driver),
CreateVolumeOnFinishTask(db, "create.end"))
# Now load (but do not run) the flow using the provided initial data.
return taskflow.engines.load(volume_flow, store=create_what)
| 0 |
"""Server trace events.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
import enum # pylint: disable=wrong-import-order
import logging
from .. import _events
_LOGGER = logging.getLogger(__name__)
class ServerTraceEvent(_events.TraceEvent):
"""Parent class of all server trace events.
Contains the basic attributes of all events as well as the factory method
`from_data` that instanciate an event object from its data representation.
All server event classes must derive from this class.
"""
__slots__ = (
'event_type',
'timestamp',
'source',
'servername',
'payload',
)
def __init__(self,
timestamp=None, source=None, servername=None, payload=None):
self.event_type = ServerTraceEventTypes(self.__class__).name
if timestamp is None:
self.timestamp = None
else:
self.timestamp = float(timestamp)
self.source = source
self.payload = payload
self.servername = servername
@property
@abc.abstractmethod
def event_data(self):
"""Return an event's event_data.
"""
@classmethod
def _class_from_type(cls, event_type):
"""Return the class for a given event_type.
"""
etype = getattr(ServerTraceEventTypes, event_type, None)
if etype is None:
_LOGGER.warning('Unknown event type %r', event_type)
return None
eclass = etype.value
return eclass
@classmethod
def from_data(cls, timestamp, source, servername, event_type, event_data,
payload=None):
"""Intantiate an event from given event data.
"""
eclass = cls._class_from_type(event_type)
if eclass is None:
return None
try:
event = eclass.from_data(
timestamp=timestamp,
source=source,
servername=servername,
event_type=event_type,
event_data=event_data,
payload=payload
)
except Exception: # pylint: disable=broad-except
_LOGGER.warning('Failed to parse event type %r:', event_type,
exc_info=True)
event = None
return event
def to_data(self):
"""Return a 6 tuple represtation of an event.
"""
event_data = self.event_data
if event_data is None:
event_data = ''
return (
self.timestamp,
self.source,
self.servername,
self.event_type,
event_data,
self.payload
)
@classmethod
def from_dict(cls, event_data):
"""Instantiate an event from a dict of its data.
"""
event_type = event_data.pop('event_type')
eclass = cls._class_from_type(event_type)
if eclass is None:
return None
try:
event = eclass(**event_data)
except Exception: # pylint: disable=broad-except
_LOGGER.warning('Failed to instanciate event type %r:', event_type,
exc_info=True)
event = None
return event
def to_dict(self):
"""Return a dictionary representation of an event.
"""
return {
k: getattr(self, k)
for k in super(self.__class__, self).__slots__ + self.__slots__
}
class ServerStateTraceEvent(ServerTraceEvent):
"""Event emitted when server state changes.
"""
__slots__ = (
'state',
)
def __init__(self, state,
timestamp=None, source=None, servername=None, payload=None):
super(ServerStateTraceEvent, self).__init__(
timestamp=timestamp,
source=source,
servername=servername,
payload=payload
)
self.state = state
@classmethod
def from_data(cls, timestamp, source, servername, event_type, event_data,
payload=None):
assert cls == getattr(ServerTraceEventTypes, event_type).value
return cls(
timestamp=timestamp,
source=source,
servername=servername,
payload=payload,
state=event_data
)
@property
def event_data(self):
return self.state
class ServerBlackoutTraceEvent(ServerTraceEvent):
"""Event emitted when server is blackedout.
"""
__slots__ = (
)
@classmethod
def from_data(cls, timestamp, source, servername, event_type, event_data,
payload=None):
assert cls == getattr(ServerTraceEventTypes, event_type).value
return cls(
timestamp=timestamp,
source=source,
servername=servername,
payload=payload
)
@property
def event_data(self):
pass
class ServerBlackoutClearedTraceEvent(ServerTraceEvent):
"""Event emitted when server blackout is cleared.
"""
__slots__ = (
)
@classmethod
def from_data(cls, timestamp, source, servername, event_type, event_data,
payload=None):
assert cls == getattr(ServerTraceEventTypes, event_type).value
return cls(
timestamp=timestamp,
source=source,
servername=servername,
payload=payload
)
@property
def event_data(self):
pass
class ServerTraceEventTypes(enum.Enum):
"""Enumeration of all server event type names.
"""
server_state = ServerStateTraceEvent
server_blackout = ServerBlackoutTraceEvent
server_blackout_cleared = ServerBlackoutClearedTraceEvent
class ServerTraceEventHandler(_events.TraceEventHandler):
"""Base class for processing server trace events.
"""
DISPATCH = {
ServerStateTraceEvent:
lambda self, event: self.on_server_state(
when=event.timestamp,
servername=event.servername,
state=event.state
),
ServerBlackoutTraceEvent:
lambda self, event: self.on_server_blackout(
when=event.timestamp,
servername=event.servername
),
ServerBlackoutClearedTraceEvent:
lambda self, event: self.on_server_blackout_cleared(
when=event.timestamp,
servername=event.servername
),
}
def dispatch(self, event):
"""Dispatch event to one of the handler methods.
"""
return self.DISPATCH.get(type(event), None)
@abc.abstractmethod
def on_server_state(self, when, servername, state):
"""Invoked when server state changes.
"""
@abc.abstractmethod
def on_server_blackout(self, when, servername):
"""Invoked when server is blackedout.
"""
@abc.abstractmethod
def on_server_blackout_cleared(self, when, servername):
"""Invoked when server blackout is cleared.
"""
| 0 |
"""aospy.Run objects for simulations from various GFDL models."""
import datetime
from aospy import Run
from aospy.data_loader import GFDLDataLoader
# SM2.1
sm2_cont = Run(
name='cont',
data_loader=GFDLDataLoader(
data_direc=('/archive/Yi.Ming/sm2.1_fixed/SM2.1U_Control-1860_lm2_aie'
'_rerun6.YIM/pp'),
data_dur=20,
data_start_date=datetime.datetime(1, 1, 1),
data_end_date=datetime.datetime(120, 12, 31),
),
)
sm2_aero = Run(
name='aero',
data_loader=GFDLDataLoader(
data_direc=('/archive/Yi.Ming/sm2.1_fixed/SM2.1U_Control-1860_lm2_aie2'
'_rerun6.YIM/pp'),
data_dur=100,
data_start_date=datetime.datetime(1, 1, 1),
data_end_date=datetime.datetime(100, 12, 31),
),
)
sm2_gas = Run(
name='gas',
data_loader=GFDLDataLoader(
data_direc=('/archive/Yi.Ming/sm2.1_fixed/SM2.1U_Control-1860_lm2_aie3'
'_rerun8.YIM/pp'),
data_dur=5,
data_start_date=datetime.datetime(1, 1, 1),
data_end_date=datetime.datetime(80, 12, 31),
),
)
sm2_both = Run(
name='both',
data_loader=GFDLDataLoader(
data_direc=('/archive/Yi.Ming/sm2.1_fixed/SM2.1U_Control-1860_lm2_aie4'
'_rerun6.YIM/pp'),
data_dur=100,
data_start_date=datetime.datetime(1, 1, 1),
data_end_date=datetime.datetime(100, 12, 31),
),
)
# c48-HiRAM
hiram_c48_0 = Run(
name='ming0',
data_loader=GFDLDataLoader(
data_direc=('/archive/Ming.Zhao/hiramdp/siena_201204/c48l32_him_X0/'
'gfdl.ncrc2-intel-prod/pp'),
data_start_date=datetime.datetime(1981, 1, 1),
data_end_date=datetime.datetime(1995, 12, 31),
),
)
hiram_c48_0_p2K = Run(
name='ming0_p2K',
data_loader=GFDLDataLoader(
template=hiram_c48_0.data_loader,
data_direc=('/archive/Ming.Zhao/hiramdp/siena_201204/c48l32_him_X0'
'_p2K/gfdl.ncrc2-intel-prod/pp'),
),
)
hiram_c48_1 = Run(
name='ming1',
data_loader=GFDLDataLoader(
template=hiram_c48_0.data_loader,
data_direc=('/archive/Ming.Zhao/hiramdp/siena_201204/c48l32_him_X0b/'
'gfdl.ncrc2-intel-prod/pp'),
),
)
hiram_c48_1_p2K = Run(
name='ming1_p2K',
data_loader=GFDLDataLoader(
template=hiram_c48_0.data_loader,
data_direc=('/archive/Ming.Zhao/hiramdp/siena_201204/'
'c48l32_him_X0b_p2K/gfdl.ncrc2-intel-prod/pp'),
),
)
hiram_c48_2 = Run(
name='ming2',
data_loader=GFDLDataLoader(
template=hiram_c48_0.data_loader,
data_direc=('/archive/Ming.Zhao/hiramdp/siena_201204/c48l32_him_X0e/'
'gfdl.ncrc2-intel-prod/pp'),
),
)
hiram_c48_2_p2K = Run(
name='ming2_p2K',
data_loader=GFDLDataLoader(
template=hiram_c48_0.data_loader,
data_direc=('/archive/Ming.Zhao/hiramdp/siena_201204/'
'c48l32_him_X0e_p2K/gfdl.ncrc2-intel-prod/pp'),
),
)
hiram_c48_3 = Run(
name='ming3',
data_loader=GFDLDataLoader(
template=hiram_c48_0.data_loader,
data_direc=('/archive/Ming.Zhao/hiramdp/siena_201204/c48l32_him_X0f/'
'gfdl.ncrc2-intel-prod/pp'),
),
)
hiram_c48_3_p2K = Run(
name='ming3_p2K',
data_loader=GFDLDataLoader(
template=hiram_c48_0.data_loader,
data_direc=('/archive/Ming.Zhao/hiramdp/siena_201204/'
'c48l32_him_X0f_p2K/gfdl.ncrc2-intel-prod/pp'),
),
)
hiram_c48_4 = Run(
name='ming4',
data_loader=GFDLDataLoader(
template=hiram_c48_0.data_loader,
data_direc=('/archive/Ming.Zhao/hiramdp/siena_201204/c48l32_him_X0c/'
'gfdl.ncrc2-intel-prod/pp'),
),
)
hiram_c48_4_p2K = Run(
name='ming4_p2K',
data_loader=GFDLDataLoader(
template=hiram_c48_0.data_loader,
data_direc=('/archive/Ming.Zhao/hiramdp/siena_201204/'
'c48l32_him_X0c_p2K/gfdl.ncrc2-intel-prod/pp'),
),
)
hiram_c48_5 = Run(
name='ming5',
data_loader=GFDLDataLoader(
template=hiram_c48_0.data_loader,
data_direc=('/archive/Ming.Zhao/hiramdp/siena_201204/c48l32_him_X01/'
'gfdl.ncrc2-intel-prod/pp'),
),
)
hiram_c48_5_p2K = Run(
name='ming5_p2K',
data_loader=GFDLDataLoader(
template=hiram_c48_0.data_loader,
data_direc=('/archive/Ming.Zhao/hiramdp/siena_201204/'
'c48l32_him_X01_p2K/gfdl.ncrc2-intel-prod/pp'),
),
)
hiram_c48_6 = Run(
name='ming6',
data_loader=GFDLDataLoader(
template=hiram_c48_0.data_loader,
data_direc=('/archive/Ming.Zhao/hiramdp/siena_201204/c48l32_him_X02/'
'gfdl.ncrc2-intel-prod/pp'),
),
)
hiram_c48_6_p2K = Run(
name='ming6_p2K',
data_loader=GFDLDataLoader(
template=hiram_c48_0.data_loader,
data_direc=('/archive/Ming.Zhao/hiramdp/siena_201204/'
'c48l32_him_X02_p2K/gfdl.ncrc2-intel-prod/pp'),
),
)
hiram_c48_7 = Run(
name='ming7',
data_loader=GFDLDataLoader(
template=hiram_c48_0.data_loader,
data_direc=('/archive/Ming.Zhao/hiramdp/siena_201204/c48l32_him_X03/'
'gfdl.ncrc2-intel-prod/pp'),
),
)
hiram_c48_7_p2K = Run(
name='ming7_p2K',
data_loader=GFDLDataLoader(
template=hiram_c48_0.data_loader,
data_direc=('/archive/Ming.Zhao/hiramdp/siena_201204/'
'c48l32_him_X03_p2K/gfdl.ncrc2-intel-prod/pp'),
),
)
hiram_c48_8 = Run(
name='ming8',
data_loader=GFDLDataLoader(
template=hiram_c48_0.data_loader,
data_direc=('/archive/Ming.Zhao/hiramdp/siena_201204/c48l32_him_X04/'
'gfdl.ncrc2-intel-prod/pp'),
),
)
hiram_c48_8_p2K = Run(
name='ming8_p2K',
data_loader=GFDLDataLoader(
template=hiram_c48_0.data_loader,
data_direc=('/archive/Ming.Zhao/hiramdp/siena_201204/'
'c48l32_him_X04_p2K/gfdl.ncrc2-intel-prod/pp'),
),
)
# AM3_c90
am3c90_cont = Run(
name='cont',
data_loader=GFDLDataLoader(
data_direc=('/archive/h1g/FMS/siena_201203/c90L48_am3p10_v6_clim/'
'gfdl.ncrc2-intel-prod-openmp/pp'),
data_start_date=datetime.datetime(1981, 1, 1),
data_end_date=datetime.datetime(1990, 12, 31),
),
)
am3c90_p2K = Run(
name='p2K',
data_loader=GFDLDataLoader(
data_direc=('/archive/h1g/FMS/siena_201203/c90L48_am3p10_v6_clim_p2k/'
'gfdl.ncrc2-intel-prod-openmp/pp'),
data_start_date=datetime.datetime(1981, 1, 1),
data_end_date=datetime.datetime(1990, 12, 31),
),
)
# AM2.5
am2p5_cont = Run(
name='cont',
data_loader=GFDLDataLoader(
data_direc=('/archive/miz/hiramdp/siena_201204/c180l32_am2_C0/'
'gfdl.ncrc2-intel-prod/pp'),
data_dur=10,
data_start_date=datetime.datetime(1981, 1, 1),
data_end_date=datetime.datetime(2000, 12, 31),
),
)
am2p5_p2K = Run(
name='p2K',
data_loader=GFDLDataLoader(
data_direc=('/archive/miz/hiramdp/siena_201204/c180l32_am2_C0_p2K/'
'gfdl.ncrc2-intel-prod/pp'),
data_dur=10,
data_start_date=datetime.datetime(1981, 1, 1),
data_end_date=datetime.datetime(2000, 12, 31),
),
)
# AM4 prototypes
am4_a1c = Run(
name='cont',
data_loader=GFDLDataLoader(
data_direc=('/archive/Ming.Zhao/awg/tikal_201403/c96L48_am4a1_'
'2000climo_highsen1/gfdl.ncrc2-intel-prod-openmp/pp'),
),
)
am4_a1p2k = Run(
name='+2K',
data_loader=GFDLDataLoader(
data_direc=('/archive/Ming.Zhao/awg/tikal_201403/c96L48_am4a1_'
'2000climo_highsen1_p2K/gfdl.ncrc2-intel-prod-openmp/pp'),
),
)
am4_a2c = Run(
name='cont',
data_loader=GFDLDataLoader(
data_direc=('/archive/cjg/awg/tikal_201403/c96L48_am4a2r1_'
'2000climo/gfdl.ncrc2-intel-prod-openmp/pp'),
),
)
am4_a2p2k = Run(
name='+2K',
data_loader=GFDLDataLoader(
data_direc=('/archive/cjg/awg/tikal_201403/c96L48_am4a2r1_'
'2000climo_p2K/gfdl.ncrc2-intel-prod-openmp/pp'),
),
)
am4_c1c = Run(
name='cont',
data_loader=GFDLDataLoader(
data_direc=('/archive/miz/tikal_201409_awgUpdates_mom6_2014.08.29/'
'c96L48_am4c1r2_2000climo/'
'gfdl.ncrc2-intel-prod-openmp/pp'),
),
)
am4_c1p2k = Run(
name='+2K',
data_loader=GFDLDataLoader(
data_direc=('/archive/miz/tikal_201409_awgUpdates_mom6_2014.08.29/'
'c96L48_am4c1r2_2000climo_p2K/gfdl.ncrc2-intel-prod-'
'openmp/pp'),
),
)
| 0 |
# Copyright 2010-2017, The University of Melbourne
# Copyright 2010-2017, Brian May
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
""" This file shows the project application views using a state machine. """
from django.conf import settings
from django.http import HttpResponseBadRequest
from karaage.common.decorators import login_required
from karaage.plugins.kgapplications.views import base
from .models import SoftwareApplication
def get_application_state_machine():
""" Get the default state machine for applications. """
config = settings.APPLICATION_SOFTWARE
state_machine = base.StateMachine(config)
return state_machine
def register():
base.setup_application_type(
SoftwareApplication, get_application_state_machine())
@login_required
def new_application(request, software_license):
# Called automatically by hook.
assert software_license is not None
if request.method != 'POST':
return HttpResponseBadRequest("<h1>Bad Request</h1>")
application = SoftwareApplication()
application.new_applicant = None
application.existing_person = request.user
application.software_license = software_license
application.save()
state_machine = get_application_state_machine()
response = state_machine.start(request, application, {})
return response
| 0 |
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
import sqlalchemy as sa
from sqlalchemy.orm import exc as sa_exc
from neutron.common import exceptions as exc
from neutron.db import api as db_api
from neutron.db import model_base
from neutron.openstack.common import log
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers import type_tunnel
LOG = log.getLogger(__name__)
gre_opts = [
cfg.ListOpt('tunnel_id_ranges',
default=[],
help=_("Comma-separated list of <tun_min>:<tun_max> tuples "
"enumerating ranges of GRE tunnel IDs that are "
"available for tenant network allocation"))
]
cfg.CONF.register_opts(gre_opts, "ml2_type_gre")
class GreAllocation(model_base.BASEV2):
__tablename__ = 'ml2_gre_allocations'
gre_id = sa.Column(sa.Integer, nullable=False, primary_key=True,
autoincrement=False)
allocated = sa.Column(sa.Boolean, nullable=False, default=False)
class GreEndpoints(model_base.BASEV2):
"""Represents tunnel endpoint in RPC mode."""
__tablename__ = 'ml2_gre_endpoints'
ip_address = sa.Column(sa.String(64), primary_key=True)
def __repr__(self):
return "<GreTunnelEndpoint(%s)>" % self.ip_address
class GreTypeDriver(type_tunnel.TunnelTypeDriver):
def get_type(self):
return p_const.TYPE_GRE
def initialize(self):
self.gre_id_ranges = []
self._parse_tunnel_ranges(
cfg.CONF.ml2_type_gre.tunnel_id_ranges,
self.gre_id_ranges,
p_const.TYPE_GRE
)
self._sync_gre_allocations()
def reserve_provider_segment(self, session, segment):
segmentation_id = segment.get(api.SEGMENTATION_ID)
with session.begin(subtransactions=True):
try:
alloc = (session.query(GreAllocation).
filter_by(gre_id=segmentation_id).
with_lockmode('update').
one())
if alloc.allocated:
raise exc.TunnelIdInUse(tunnel_id=segmentation_id)
LOG.debug(_("Reserving specific gre tunnel %s from pool"),
segmentation_id)
alloc.allocated = True
except sa_exc.NoResultFound:
LOG.debug(_("Reserving specific gre tunnel %s outside pool"),
segmentation_id)
alloc = GreAllocation(gre_id=segmentation_id)
alloc.allocated = True
session.add(alloc)
def allocate_tenant_segment(self, session):
with session.begin(subtransactions=True):
alloc = (session.query(GreAllocation).
filter_by(allocated=False).
with_lockmode('update').
first())
if alloc:
LOG.debug(_("Allocating gre tunnel id %(gre_id)s"),
{'gre_id': alloc.gre_id})
alloc.allocated = True
return {api.NETWORK_TYPE: p_const.TYPE_GRE,
api.PHYSICAL_NETWORK: None,
api.SEGMENTATION_ID: alloc.gre_id}
def release_segment(self, session, segment):
gre_id = segment[api.SEGMENTATION_ID]
with session.begin(subtransactions=True):
try:
alloc = (session.query(GreAllocation).
filter_by(gre_id=gre_id).
with_lockmode('update').
one())
alloc.allocated = False
for lo, hi in self.gre_id_ranges:
if lo <= gre_id <= hi:
LOG.debug(_("Releasing gre tunnel %s to pool"),
gre_id)
break
else:
session.delete(alloc)
LOG.debug(_("Releasing gre tunnel %s outside pool"),
gre_id)
except sa_exc.NoResultFound:
LOG.warning(_("gre_id %s not found"), gre_id)
def _sync_gre_allocations(self):
"""Synchronize gre_allocations table with configured tunnel ranges."""
# determine current configured allocatable gres
gre_ids = set()
for gre_id_range in self.gre_id_ranges:
tun_min, tun_max = gre_id_range
if tun_max + 1 - tun_min > 1000000:
LOG.error(_("Skipping unreasonable gre ID range "
"%(tun_min)s:%(tun_max)s"),
{'tun_min': tun_min, 'tun_max': tun_max})
else:
gre_ids |= set(xrange(tun_min, tun_max + 1))
session = db_api.get_session()
with session.begin(subtransactions=True):
# remove from table unallocated tunnels not currently allocatable
allocs = (session.query(GreAllocation).all())
for alloc in allocs:
try:
# see if tunnel is allocatable
gre_ids.remove(alloc.gre_id)
except KeyError:
# it's not allocatable, so check if its allocated
if not alloc.allocated:
# it's not, so remove it from table
LOG.debug(_("Removing tunnel %s from pool"),
alloc.gre_id)
session.delete(alloc)
# add missing allocatable tunnels to table
for gre_id in sorted(gre_ids):
alloc = GreAllocation(gre_id=gre_id)
session.add(alloc)
def get_gre_allocation(self, session, gre_id):
return session.query(GreAllocation).filter_by(gre_id=gre_id).first()
def get_endpoints(self):
"""Get every gre endpoints from database."""
LOG.debug(_("get_gre_endpoints() called"))
session = db_api.get_session()
with session.begin(subtransactions=True):
gre_endpoints = session.query(GreEndpoints)
return [{'ip_address': gre_endpoint.ip_address}
for gre_endpoint in gre_endpoints]
def add_endpoint(self, ip):
LOG.debug(_("add_gre_endpoint() called for ip %s"), ip)
session = db_api.get_session()
with session.begin(subtransactions=True):
try:
gre_endpoint = (session.query(GreEndpoints).
filter_by(ip_address=ip).one())
LOG.warning(_("Gre endpoint with ip %s already exists"), ip)
except sa_exc.NoResultFound:
gre_endpoint = GreEndpoints(ip_address=ip)
session.add(gre_endpoint)
return gre_endpoint
| 0 |
# LCOETurbineEvaluator.py
#
# Given turbine blade size, hub height and maximum rotational speed, compute the
# Levelized Cost of Energy (in $/KWH). The annual energy is computed by
# mutiplying the Power Curve by the Weibull Distribution.
#
#
# Author: Lewis Li (lewisli@stanford.edu)
# Original Date: November 28th 2015
#
#
import os
from math import pi
import numpy as np
from pyopt_driver.pyopt_driver import pyOptDriver
from commonse.WindWaveDrag import FluidLoads, AeroHydroLoads, TowerWindDrag, \
TowerWaveDrag
from commonse.environment import WindBase, WaveBase # , SoilBase
from commonse import Tube
from commonse.utilities import check_gradient_unit_test
from commonse.UtilizationSupplement import fatigue, hoopStressEurocode, \
shellBucklingEurocode, bucklingGL, vonMisesStressUtilization
from rotorse.rotor import RotorSE
from rotorse.rotoraero import Coefficients, SetupRunVarSpeed, \
RegulatedPowerCurve, AEP
from rotorse.rotoraerodefaults import RotorAeroVSVPWithCCBlade,GeometrySpline, \
CCBladeGeometry, CCBlade, CSMDrivetrain, WeibullCDF, \
WeibullWithMeanCDF, RayleighCDF
from rotorse.precomp import Profile, Orthotropic2DMaterial, CompositeSection
from drivese.hub import HubSE
from drivese.drive import Drive4pt
from towerse.tower import TowerSE
from turbine_costsse.turbine_costsse.turbine_costsse import Turbine_CostsSE
from plant_costsse.nrel_csm_bos.nrel_csm_bos import bos_csm_assembly
from plant_costsse.nrel_csm_opex.nrel_csm_opex import opex_csm_assembly
from WindDistribution import CalculateAEPConstantWind
from WindDistribution import CalculateAEPWeibull
from WindDistribution import ComputeScaleFunction
from WindDistribution import EstimateCapacity
from WindDistribution import ComputeLCOE
from openmdao.main.api import VariableTree, Component, Assembly, set_as_top
from openmdao.main.datatypes.api import Int, Float, Array, VarTree, Bool, Slot
from openmdao.lib.casehandlers.api import DumpCaseRecorder
from fusedwind.turbine.tower import TowerFromCSProps
from fusedwind.interface import implement_base
import frame3dd
import matplotlib.pyplot as plt
def EvaluateLCOE(BladeLength, HubHeight, MaximumRotSpeed,Verbose=False):
############################################################################
# Define baseline paremeters used for scaling
ReferenceBladeLength = 35;
ReferenceTowerHeight = 95
WindReferenceHeight = 50
WindReferenceMeanVelocity = 3
WeibullShapeFactor = 2.0
ShearFactor = 0.25
RatedPower = 1.5e6
# Years used for analysis
Years = 25
DiscountRate = 0.08
############################################################################
############################################################################
### 1. Aerodynamic and structural performance using RotorSE
rotor = RotorSE()
# -------------------
# === blade grid ===
# (Array): initial aerodynamic grid on unit radius
rotor.initial_aero_grid = np.array([0.02222276, 0.06666667, 0.11111057, \
0.16666667, 0.23333333, 0.3, 0.36666667, 0.43333333, 0.5, 0.56666667, \
0.63333333, 0.7, 0.76666667, 0.83333333, 0.88888943, 0.93333333, \
0.97777724])
# (Array): initial structural grid on unit radius
rotor.initial_str_grid = np.array([0.0, 0.00492790457512, 0.00652942887106,
0.00813095316699, 0.00983257273154, 0.0114340970275, 0.0130356213234,
0.02222276, 0.024446481932, 0.026048006228, 0.06666667, 0.089508406455,
0.11111057, 0.146462614229, 0.16666667, 0.195309105255, 0.23333333,
0.276686558545, 0.3, 0.333640766319,0.36666667, 0.400404310407, 0.43333333,
0.5, 0.520818918408, 0.56666667, 0.602196371696, 0.63333333,
0.667358391486, 0.683573824984, 0.7, 0.73242031601, 0.76666667, 0.83333333,
0.88888943, 0.93333333, 0.97777724, 1.0])
# (Int): first idx in r_aero_unit of non-cylindrical section,
# constant twist inboard of here
rotor.idx_cylinder_aero = 3
# (Int): first idx in r_str_unit of non-cylindrical section
rotor.idx_cylinder_str = 14
# (Float): hub location as fraction of radius
rotor.hubFraction = 0.025
# ------------------
# === blade geometry ===
# (Array): new aerodynamic grid on unit radius
rotor.r_aero = np.array([0.02222276, 0.06666667, 0.11111057, 0.2, 0.23333333,
0.3, 0.36666667, 0.43333333, 0.5, 0.56666667, 0.63333333, 0.64, 0.7,
0.83333333, 0.88888943, 0.93333333, 0.97777724])
# (Float): location of max chord on unit radius
rotor.r_max_chord = 0.23577
# (Array, m): chord at control points. defined at hub, then at linearly spaced
# locations from r_max_chord to tip
ReferenceChord = [3.2612, 4.5709, 3.3178, 1.4621]
rotor.chord_sub = [x * np.true_divide(BladeLength,ReferenceBladeLength) \
for x in ReferenceChord]
# (Array, deg): twist at control points. defined at linearly spaced locations
# from r[idx_cylinder] to tip
rotor.theta_sub = [13.2783, 7.46036, 2.89317, -0.0878099]
# (Array, m): precurve at control points. defined at same locations at chord,
# starting at 2nd control point (root must be zero precurve)
rotor.precurve_sub = [0.0, 0.0, 0.0]
# (Array, m): adjustment to precurve to account for curvature from loading
rotor.delta_precurve_sub = [0.0, 0.0, 0.0]
# (Array, m): spar cap thickness parameters
rotor.sparT = [0.05, 0.047754, 0.045376, 0.031085, 0.0061398]
# (Array, m): trailing-edge thickness parameters
rotor.teT = [0.1, 0.09569, 0.06569, 0.02569, 0.00569]
# (Float, m): blade length (if not precurved or swept)
# otherwise length of blade before curvature
rotor.bladeLength = BladeLength
# (Float, m): adjustment to blade length to account for curvature from
# loading
rotor.delta_bladeLength = 0.0
rotor.precone = 2.5 # (Float, deg): precone angle
rotor.tilt = 5.0 # (Float, deg): shaft tilt
rotor.yaw = 0.0 # (Float, deg): yaw error
rotor.nBlades = 3 # (Int): number of blades
# ------------------
# === airfoil files ===
basepath = os.path.join(os.path.dirname(\
os.path.realpath(__file__)), '5MW_AFFiles')
# load all airfoils
airfoil_types = [0]*8
airfoil_types[0] = os.path.join(basepath, 'Cylinder1.dat')
airfoil_types[1] = os.path.join(basepath, 'Cylinder2.dat')
airfoil_types[2] = os.path.join(basepath, 'DU40_A17.dat')
airfoil_types[3] = os.path.join(basepath, 'DU35_A17.dat')
airfoil_types[4] = os.path.join(basepath, 'DU30_A17.dat')
airfoil_types[5] = os.path.join(basepath, 'DU25_A17.dat')
airfoil_types[6] = os.path.join(basepath, 'DU21_A17.dat')
airfoil_types[7] = os.path.join(basepath, 'NACA64_A17.dat')
# place at appropriate radial stations
af_idx = [0, 0, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 7, 7, 7, 7, 7]
n = len(af_idx)
af = [0]*n
for i in range(n):
af[i] = airfoil_types[af_idx[i]]
rotor.airfoil_files = af # (List): names of airfoil file
# ----------------------
# === atmosphere ===
rotor.rho = 1.225 # (Float, kg/m**3): density of air
rotor.mu = 1.81206e-5 # (Float, kg/m/s): dynamic viscosity of air
rotor.shearExp = 0.25 # (Float): shear exponent
rotor.hubHt = HubHeight # (Float, m): hub height
rotor.turbine_class = 'I' # (Enum): IEC turbine class
rotor.turbulence_class = 'B' # (Enum): IEC turbulence class class
rotor.cdf_reference_height_wind_speed = 30.0
rotor.g = 9.81 # (Float, m/s**2): acceleration of gravity
# ----------------------
# === control ===
rotor.control.Vin = 3.0 # (Float, m/s): cut-in wind speed
rotor.control.Vout = 26.0 # (Float, m/s): cut-out wind speed
rotor.control.ratedPower = RatedPower # (Float, W): rated power
# (Float, rpm): minimum allowed rotor rotation speed
# (Float, rpm): maximum allowed rotor rotation speed
rotor.control.minOmega = 0.0
rotor.control.maxOmega = MaximumRotSpeed
# (Float): tip-speed ratio in Region 2 (should be optimized externally)
rotor.control.tsr = 7
# (Float, deg): pitch angle in region 2 (and region 3 for fixed pitch machines)
rotor.control.pitch = 0.0
# (Float, deg): worst-case pitch at survival wind condition
rotor.pitch_extreme = 0.0
# (Float, deg): worst-case azimuth at survival wind condition
rotor.azimuth_extreme = 0.0
# (Float): fraction of rated speed at which the deflection is assumed to
# representative throughout the power curve calculation
rotor.VfactorPC = 0.7
# ----------------------
# === aero and structural analysis options ===
# (Int): number of sectors to divide rotor face into in computing thrust and power
rotor.nSector = 4
# (Int): number of points to evaluate aero analysis at
rotor.npts_coarse_power_curve = 20
# (Int): number of points to use in fitting spline to power curve
rotor.npts_spline_power_curve = 200
# (Float): availability and other losses (soiling, array, etc.)
rotor.AEP_loss_factor = 1.0
rotor.drivetrainType = 'geared' # (Enum)
# (Int): number of natural frequencies to compute
rotor.nF = 5
# (Float): a dynamic amplification factor to adjust the static deflection
# calculation
rotor.dynamic_amplication_tip_deflection = 1.35
# ----------------------
# === materials and composite layup ===
basepath = os.path.join(os.path.dirname(os.path.realpath(__file__)), \
'5MW_PrecompFiles')
materials = Orthotropic2DMaterial.listFromPreCompFile(os.path.join(basepath,\
'materials.inp'))
ncomp = len(rotor.initial_str_grid)
upper = [0]*ncomp
lower = [0]*ncomp
webs = [0]*ncomp
profile = [0]*ncomp
# (Array): array of leading-edge positions from a reference blade axis
# (usually blade pitch axis). locations are normalized by the local chord
# length. e.g. leLoc[i] = 0.2 means leading edge is 0.2*chord[i] from reference
# axis. positive in -x direction for airfoil-aligned coordinate system
rotor.leLoc = np.array([0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.498, 0.497,
0.465, 0.447, 0.43, 0.411, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4,
0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4])
# (Array): index of sector for spar (PreComp definition of sector)
rotor.sector_idx_strain_spar = [2]*ncomp
# (Array): index of sector for trailing-edge (PreComp definition of sector)
rotor.sector_idx_strain_te = [3]*ncomp
web1 = np.array([-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, 0.4114, 0.4102,
0.4094, 0.3876, 0.3755, 0.3639, 0.345, 0.3342, 0.3313, 0.3274, 0.323,
0.3206, 0.3172, 0.3138, 0.3104, 0.307, 0.3003, 0.2982, 0.2935, 0.2899,
0.2867, 0.2833, 0.2817, 0.2799, 0.2767, 0.2731, 0.2664, 0.2607, 0.2562,
0.1886, -1.0])
web2 = np.array([-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, 0.5886, 0.5868,
0.5854, 0.5508, 0.5315, 0.5131, 0.4831, 0.4658, 0.4687, 0.4726, 0.477,
0.4794, 0.4828, 0.4862, 0.4896, 0.493, 0.4997, 0.5018, 0.5065, 0.5101,
0.5133, 0.5167, 0.5183, 0.5201, 0.5233, 0.5269, 0.5336, 0.5393, 0.5438,
0.6114, -1.0])
web3 = np.array([-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0,
-1.0, -1.0, -1.0, -1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0,
-1.0, -1.0])
# (Array, m): chord distribution for reference section, thickness of structural
# layup scaled with reference thickness (fixed t/c for this case)
rotor.chord_str_ref = np.array([3.2612, 3.3100915356, 3.32587052924,
3.34159388653, 3.35823798667, 3.37384375335, 3.38939112914, 3.4774055542,
3.49839685, 3.51343645709, 3.87017220335, 4.04645623801, 4.19408216643,
4.47641008477, 4.55844487985, 4.57383098262, 4.57285771934, 4.51914315648,
4.47677655262, 4.40075650022, 4.31069949379, 4.20483735936, 4.08985563932,
3.82931757126, 3.74220276467, 3.54415796922, 3.38732428502, 3.24931446473,
3.23421422609, 3.22701537997, 3.21972125648, 3.08979310611, 2.95152261813,
2.330753331, 2.05553464181, 1.82577817774, 1.5860853279, 1.4621])* \
np.true_divide(BladeLength,ReferenceBladeLength)
for i in range(ncomp):
webLoc = []
if web1[i] != -1:
webLoc.append(web1[i])
if web2[i] != -1:
webLoc.append(web2[i])
if web3[i] != -1:
webLoc.append(web3[i])
upper[i], lower[i], webs[i] = CompositeSection.initFromPreCompLayupFile\
(os.path.join(basepath, 'layup_' + str(i+1) + '.inp'), webLoc, materials)
profile[i] = Profile.initFromPreCompFile(os.path.join(basepath, 'shape_' \
+ str(i+1) + '.inp'))
# (List): list of all Orthotropic2DMaterial objects used in
# defining the geometry
rotor.materials = materials
# (List): list of CompositeSection objections defining the properties for
# upper surface
rotor.upperCS = upper
# (List): list of CompositeSection objections defining the properties for
# lower surface
rotor.lowerCS = lower
# (List): list of CompositeSection objections defining the properties for
# shear webs
rotor.websCS = webs
# (List): airfoil shape at each radial position
rotor.profile = profile
# --------------------------------------
# === fatigue ===
# (Array): nondimensional radial locations of damage equivalent moments
rotor.rstar_damage = np.array([0.000, 0.022, 0.067, 0.111, 0.167, 0.233, 0.300,
0.367, 0.433, 0.500, 0.567, 0.633, 0.700, 0.767, 0.833, 0.889, 0.933, 0.978])
# (Array, N*m): damage equivalent moments about blade c.s. x-direction
rotor.Mxb_damage = 1e3*np.array([2.3743E+003, 2.0834E+003, 1.8108E+003,
1.5705E+003, 1.3104E+003, 1.0488E+003, 8.2367E+002, 6.3407E+002,
4.7727E+002, 3.4804E+002, 2.4458E+002, 1.6339E+002, 1.0252E+002,
5.7842E+001, 2.7349E+001, 1.1262E+001, 3.8549E+000, 4.4738E-001])
# (Array, N*m): damage equivalent moments about blade c.s. y-direction
rotor.Myb_damage = 1e3*np.array([2.7732E+003, 2.8155E+003, 2.6004E+003,
2.3933E+003, 2.1371E+003, 1.8459E+003, 1.5582E+003, 1.2896E+003,
1.0427E+003, 8.2015E+002, 6.2449E+002, 4.5229E+002, 3.0658E+002,
1.8746E+002, 9.6475E+001, 4.2677E+001, 1.5409E+001, 1.8426E+000])
rotor.strain_ult_spar = 1.0e-2 # (Float): ultimate strain in spar cap
# (Float): uptimate strain in trailing-edge panels, note that I am putting a
# factor of two for the damage part only.
rotor.strain_ult_te = 2500*1e-6 * 2
rotor.eta_damage = 1.35*1.3*1.0 # (Float): safety factor for fatigue
rotor.m_damage = 10.0 # (Float): slope of S-N curve for fatigue analysis
# (Float): number of cycles used in fatigue analysis
rotor.N_damage = 365*24*3600*20.0
# ----------------
# from myutilities import plt
# === run and outputs ===
rotor.run()
# Evaluate AEP Using Lewis' Functions
# Weibull Wind Parameters
WindReferenceHeight = 50
WindReferenceMeanVelocity = 7.5
WeibullShapeFactor = 2.0
ShearFactor = 0.25
PowerCurve = rotor.P/1e6
PowerCurveVelocity = rotor.V
HubHeight = rotor.hubHt
AEP,WeibullScale = CalculateAEPWeibull(PowerCurve,PowerCurveVelocity, HubHeight, \
BladeLength,WeibullShapeFactor, WindReferenceHeight, \
WindReferenceMeanVelocity, ShearFactor)
NamePlateCapacity = EstimateCapacity(PowerCurve,PowerCurveVelocity, \
rotor.ratedConditions.V)
# AEP At Constant 7.5m/s Wind used for benchmarking...
#AEP = CalculateAEPConstantWind(PowerCurve, PowerCurveVelocity, 7.5)
if (Verbose ==True):
print '################### ROTORSE ######################'
print 'AEP = %d MWH' %(AEP)
print 'NamePlateCapacity = %fMW' %(NamePlateCapacity)
print 'diameter =', rotor.diameter
print 'ratedConditions.V =', rotor.ratedConditions.V
print 'ratedConditions.Omega =', rotor.ratedConditions.Omega
print 'ratedConditions.pitch =', rotor.ratedConditions.pitch
print 'mass_one_blade =', rotor.mass_one_blade
print 'mass_all_blades =', rotor.mass_all_blades
print 'I_all_blades =', rotor.I_all_blades
print 'freq =', rotor.freq
print 'tip_deflection =', rotor.tip_deflection
print 'root_bending_moment =', rotor.root_bending_moment
print '#########################################################'
#############################################################################
### 2. Hub Sizing
# Specify hub parameters based off rotor
# Load default hub model
hubS = HubSE()
hubS.rotor_diameter = rotor.Rtip*2 # m
hubS.blade_number = rotor.nBlades
hubS.blade_root_diameter = rotor.chord_sub[0]*1.25
hubS.L_rb = rotor.hubFraction*rotor.diameter
hubS.MB1_location = np.array([-0.5, 0.0, 0.0])
hubS.machine_rating = rotor.control.ratedPower
hubS.blade_mass = rotor.mass_one_blade
hubS.rotor_bending_moment = rotor.root_bending_moment
hubS.run()
RotorTotalWeight = rotor.mass_all_blades + hubS.spinner.mass + \
hubS.hub.mass + hubS.pitchSystem.mass
if (Verbose==True):
print '##################### Hub SE ############################'
print "Estimate of Hub Component Sizes:"
print "Hub Components"
print ' Hub: {0:8.1f} kg'.format(hubS.hub.mass)
print ' Pitch system: {0:8.1f} kg'.format(hubS.pitchSystem.mass)
print ' Nose cone: {0:8.1f} kg'.format(hubS.spinner.mass)
print 'Rotor Total Weight = %d kg' %RotorTotalWeight
print '#########################################################'
############################################################################
### 3. Drive train + Nacelle Mass estimation
nace = Drive4pt()
nace.rotor_diameter = rotor.Rtip *2 # m
nace.rotor_speed = rotor.ratedConditions.Omega # #rpm m/s
nace.machine_rating = hubS.machine_rating/1000
nace.DrivetrainEfficiency = 0.95
# 6.35e6 #4365248.74 # Nm
nace.rotor_torque = rotor.ratedConditions.Q
nace.rotor_thrust = rotor.ratedConditions.T # N
nace.rotor_mass = 0.0 #accounted for in F_z # kg
nace.rotor_bending_moment_x = rotor.Mxyz_0[0]
nace.rotor_bending_moment_y = rotor.Mxyz_0[1]
nace.rotor_bending_moment_z = rotor.Mxyz_0[2]
nace.rotor_force_x = rotor.Fxyz_0[0] # N
nace.rotor_force_y = rotor.Fxyz_0[1]
nace.rotor_force_z = rotor.Fxyz_0[2] # N
# geared 3-stage Gearbox with induction generator machine
nace.drivetrain_design = 'geared'
nace.gear_ratio = 96.76 # 97:1 as listed in the 5 MW reference document
nace.gear_configuration = 'eep' # epicyclic-epicyclic-parallel
nace.crane = True # onboard crane present
nace.shaft_angle = 5.0 #deg
nace.shaft_ratio = 0.10
nace.Np = [3,3,1]
nace.ratio_type = 'optimal'
nace.shaft_type = 'normal'
nace.uptower_transformer=False
nace.shrink_disc_mass = 333.3*nace.machine_rating/1000.0 # estimated
nace.mb1Type = 'CARB'
nace.mb2Type = 'SRB'
nace.flange_length = 0.5 #m
nace.overhang = 5.0
nace.gearbox_cm = 0.1
nace.hss_length = 1.5
#0 if no fatigue check, 1 if parameterized fatigue check,
#2 if known loads inputs
nace.check_fatigue = 0
nace.blade_number=rotor.nBlades
nace.cut_in=rotor.control.Vin #cut-in m/s
nace.cut_out=rotor.control.Vout #cut-out m/s
nace.Vrated=rotor.ratedConditions.V #rated windspeed m/s
nace.weibull_k = WeibullShapeFactor # windepeed distribution shape parameter
# windspeed distribution scale parameter
nace.weibull_A = WeibullScale
nace.T_life=20. #design life in years
nace.IEC_Class_Letter = 'B'
# length from hub center to main bearing, leave zero if unknown
nace.L_rb = hubS.L_rb
# NREL 5 MW Tower Variables
nace.tower_top_diameter = 3.78 # m
nace.run()
if (Verbose==True):
print '##################### Drive SE ############################'
print "Estimate of Nacelle Component Sizes"
print 'Low speed shaft: {0:8.1f} kg'.format(nace.lowSpeedShaft.mass)
print 'Main bearings: {0:8.1f} kg'.format(\
nace.mainBearing.mass + nace.secondBearing.mass)
print 'Gearbox: {0:8.1f} kg'.format(nace.gearbox.mass)
print 'High speed shaft & brakes: {0:8.1f} kg'.format\
(nace.highSpeedSide.mass)
print 'Generator: {0:8.1f} kg'.format(nace.generator.mass)
print 'Variable speed electronics: {0:8.1f} kg'.format(\
nace.above_yaw_massAdder.vs_electronics_mass)
print 'Overall mainframe:{0:8.1f} kg'.format(\
nace.above_yaw_massAdder.mainframe_mass)
print ' Bedplate: {0:8.1f} kg'.format(nace.bedplate.mass)
print 'Electrical connections: {0:8.1f} kg'.format(\
nace.above_yaw_massAdder.electrical_mass)
print 'HVAC system: {0:8.1f} kg'.format(\
nace.above_yaw_massAdder.hvac_mass )
print 'Nacelle cover: {0:8.1f} kg'.format(\
nace.above_yaw_massAdder.cover_mass)
print 'Yaw system: {0:8.1f} kg'.format(nace.yawSystem.mass)
print 'Overall nacelle: {0:8.1f} kg'.format(nace.nacelle_mass, \
nace.nacelle_cm[0], nace.nacelle_cm[1], nace.nacelle_cm[2], \
nace.nacelle_I[0], nace.nacelle_I[1], nace.nacelle_I[2])
print '#########################################################'
############################################################################
### 4. Tower Mass
# --- tower setup ------
from commonse.environment import PowerWind
tower = set_as_top(TowerSE())
# ---- tower ------
tower.replace('wind1', PowerWind())
tower.replace('wind2', PowerWind())
# onshore (no waves)
# --- geometry ----
tower.z_param = [0.0, HubHeight*0.5, HubHeight]
TowerRatio = np.true_divide(HubHeight,ReferenceTowerHeight)
tower.d_param = [6.0*TowerRatio, 4.935*TowerRatio, 3.87*TowerRatio]
tower.t_param = [0.027*1.3*TowerRatio, 0.023*1.3*TowerRatio, \
0.019*1.3*TowerRatio]
n = 10
tower.z_full = np.linspace(0.0, HubHeight, n)
tower.L_reinforced = 15.0*np.ones(n) # [m] buckling length
tower.theta_stress = 0.0*np.ones(n)
tower.yaw = 0.0
# --- material props ---
tower.E = 210e9*np.ones(n)
tower.G = 80.8e9*np.ones(n)
tower.rho = 8500.0*np.ones(n)
tower.sigma_y = 450.0e6*np.ones(n)
# --- spring reaction data. Use float('inf') for rigid constraints. ---
tower.kidx = [0] # applied at base
tower.kx = [float('inf')]
tower.ky = [float('inf')]
tower.kz = [float('inf')]
tower.ktx = [float('inf')]
tower.kty = [float('inf')]
tower.ktz = [float('inf')]
# --- extra mass ----
tower.midx = [n-1] # RNA mass at top
tower.m = [0.8]
tower.mIxx = [1.14930678e+08]
tower.mIyy = [2.20354030e+07]
tower.mIzz = [1.87597425e+07]
tower.mIxy = [0.00000000e+00]
tower.mIxz = [5.03710467e+05]
tower.mIyz = [0.00000000e+00]
tower.mrhox = [-1.13197635]
tower.mrhoy = [0.]
tower.mrhoz = [0.50875268]
tower.addGravityLoadForExtraMass = False
# -----------
# --- wind ---
tower.wind_zref = 90.0
tower.wind_z0 = 0.0
tower.wind1.shearExp = 0.14
tower.wind2.shearExp = 0.14
# ---------------
# # --- loading case 1: max Thrust ---
tower.wind_Uref1 = 11.73732
tower.plidx1 = [n-1] # at tower top
tower.Fx1 = [0.19620519]
tower.Fy1 = [0.]
tower.Fz1 = [-2914124.84400512]
tower.Mxx1 = [3963732.76208099]
tower.Myy1 = [-2275104.79420872]
tower.Mzz1 = [-346781.68192839]
# # ---------------
# # --- loading case 2: max wind speed ---
tower.wind_Uref2 = 70.0
tower.plidx1 = [n-1] # at tower top
tower.Fx1 = [930198.60063279]
tower.Fy1 = [0.]
tower.Fz1 = [-2883106.12368949]
tower.Mxx1 = [-1683669.22411597]
tower.Myy1 = [-2522475.34625363]
tower.Mzz1 = [147301.97023764]
# # ---------------
# # --- run ---
tower.run()
if (Verbose==True):
print '##################### Tower SE ##########################'
print 'mass (kg) =', tower.mass
print 'f1 (Hz) =', tower.f1
print 'f2 (Hz) =', tower.f2
print 'top_deflection1 (m) =', tower.top_deflection1
print 'top_deflection2 (m) =', tower.top_deflection2
print '#########################################################'
############################################################################
## 5. Turbine captial costs analysis
turbine = Turbine_CostsSE()
# NREL 5 MW turbine component masses based on Sunderland model approach
# Rotor
# inline with the windpact estimates
turbine.blade_mass = rotor.mass_one_blade
turbine.hub_mass = hubS.hub.mass
turbine.pitch_system_mass = hubS.pitchSystem.mass
turbine.spinner_mass = hubS.spinner.mass
# Drivetrain and Nacelle
turbine.low_speed_shaft_mass = nace.lowSpeedShaft.mass
turbine.main_bearing_mass=nace.mainBearing.mass
turbine.second_bearing_mass = nace.secondBearing.mass
turbine.gearbox_mass = nace.gearbox.mass
turbine.high_speed_side_mass = nace.highSpeedSide.mass
turbine.generator_mass = nace.generator.mass
turbine.bedplate_mass = nace.bedplate.mass
turbine.yaw_system_mass = nace.yawSystem.mass
# Tower
turbine.tower_mass = tower.mass*0.5
# Additional non-mass cost model input variables
turbine.machine_rating = hubS.machine_rating/1000
turbine.advanced = False
turbine.blade_number = rotor.nBlades
turbine.drivetrain_design = 'geared'
turbine.crane = False
turbine.offshore = False
# Target year for analysis results
turbine.year = 2010
turbine.month = 12
turbine.run()
if (Verbose==True):
print '##################### TurbinePrice SE ####################'
print "Overall rotor cost with 3 advanced blades is ${0:.2f} USD"\
.format(turbine.rotorCC.cost)
print "Blade cost is ${0:.2f} USD".format(turbine.rotorCC.bladeCC.cost)
print "Hub cost is ${0:.2f} USD".format(turbine.rotorCC.hubCC.cost)
print "Pitch system cost is ${0:.2f} USD".format(turbine.rotorCC.pitchSysCC.cost)
print "Spinner cost is ${0:.2f} USD".format(turbine.rotorCC.spinnerCC.cost)
print
print "Overall nacelle cost is ${0:.2f} USD".format(turbine.nacelleCC.cost)
print "LSS cost is ${0:.2f} USD".format(turbine.nacelleCC.lssCC.cost)
print "Main bearings cost is ${0:.2f} USD".format(turbine.nacelleCC.bearingsCC.cost)
print "Gearbox cost is ${0:.2f} USD".format(turbine.nacelleCC.gearboxCC.cost)
print "Hight speed side cost is ${0:.2f} USD".format(turbine.nacelleCC.hssCC.cost)
print "Generator cost is ${0:.2f} USD".format(turbine.nacelleCC.generatorCC.cost)
print "Bedplate cost is ${0:.2f} USD".format(turbine.nacelleCC.bedplateCC.cost)
print "Yaw system cost is ${0:.2f} USD".format(turbine.nacelleCC.yawSysCC.cost)
print
print "Tower cost is ${0:.2f} USD".format(turbine.towerCC.cost)
print
print "The overall turbine cost is ${0:.2f} USD".format(turbine.turbine_cost)
print '#########################################################'
############################################################################
## 6. Operating Expenses
# A simple test of nrel_csm_bos model
bos = bos_csm_assembly()
# Set input parameters
bos = bos_csm_assembly()
bos.machine_rating = hubS.machine_rating/1000
bos.rotor_diameter = rotor.diameter
bos.turbine_cost = turbine.turbine_cost
bos.hub_height = HubHeight
bos.turbine_number = 1
bos.sea_depth = 0
bos.year = 2009
bos.month = 12
bos.multiplier = 1.0
bos.run()
om = opex_csm_assembly()
om.machine_rating = rotor.control.ratedPower/1000
# Need to manipulate input or underlying component will not execute
om.net_aep = AEP*10e4
om.sea_depth = 0
om.year = 2009
om.month = 12
om.turbine_number = 100
om.run()
if (Verbose==True):
print '##################### Operating Costs ####################'
print "BOS cost per turbine: ${0:.2f} USD".format(bos.bos_costs / \
bos.turbine_number)
print "Average annual operational expenditures"
print "OPEX on shore with 100 turbines ${:.2f}: USD".format(\
om.avg_annual_opex)
print "Preventative OPEX by turbine: ${:.2f} USD".format(\
om.opex_breakdown.preventative_opex / om.turbine_number)
print "Corrective OPEX by turbine: ${:.2f} USD".format(\
om.opex_breakdown.corrective_opex / om.turbine_number)
print "Land Lease OPEX by turbine: ${:.2f} USD".format(\
om.opex_breakdown.lease_opex / om.turbine_number)
print '#########################################################'
CapitalCost = turbine.turbine_cost + bos.bos_costs / bos.turbine_number
OperatingCost = om.opex_breakdown.preventative_opex / om.turbine_number + \
om.opex_breakdown.lease_opex / om.turbine_number + \
om.opex_breakdown.corrective_opex / om.turbine_number
LCOE = ComputeLCOE(AEP, CapitalCost, OperatingCost, DiscountRate, Years)
print '######################***********************###################'
print "Levelized Cost of Energy over %d years \
is $%f/kWH" %(Years,LCOE/1000)
print '######################***********************###################'
return LCOE/1000 | 0.031706 |
import frappe
from erpnext.manufacturing.doctype.production_order.production_order \
import make_timesheet, add_timesheet_detail
def execute():
frappe.reload_doc('projects', 'doctype', 'task')
frappe.reload_doc('projects', 'doctype', 'timesheet')
if not frappe.db.table_exists("Time Log"):
return
for data in frappe.db.sql("select * from `tabTime Log`", as_dict=1):
if data.task:
company = frappe.db.get_value("Task", data.task, "company")
elif data.production_order:
company = frappe.db.get_value("Prodction Order", data.production_order, "company")
else:
company = frappe.db.get_single_value('Global Defaults', 'default_company')
time_sheet = make_timesheet(data.production_order)
args = get_timelog_data(data)
add_timesheet_detail(time_sheet, args)
if data.docstatus == 2:
time_sheet.docstatus = 0
else:
time_sheet.docstatus = data.docstatus
time_sheet.employee = data.employee
time_sheet.note = data.note
time_sheet.company = company
time_sheet.set_status()
time_sheet.set_dates()
time_sheet.update_cost()
time_sheet.calculate_total_amounts()
time_sheet.flags.ignore_validate = True
time_sheet.flags.ignore_links = True
time_sheet.save(ignore_permissions=True)
# To ignore validate_mandatory_fields function
if data.docstatus == 1:
time_sheet.db_set("docstatus", 1)
for d in time_sheet.get("time_logs"):
d.db_set("docstatus", 1)
time_sheet.update_production_order(time_sheet.name)
time_sheet.update_task_and_project()
if data.docstatus == 2:
time_sheet.db_set("docstatus", 2)
for d in time_sheet.get("time_logs"):
d.db_set("docstatus", 2)
def get_timelog_data(data):
return {
'billable': data.billable,
'from_time': data.from_time,
'hours': data.hours,
'to_time': data.to_time,
'project': data.project,
'task': data.task,
'activity_type': data.activity_type,
'operation': data.operation,
'operation_id': data.operation_id,
'workstation': data.workstation,
'completed_qty': data.completed_qty,
'billing_rate': data.billing_rate,
'billing_amount': data.billing_amount,
'costing_rate': data.costing_rate,
'costing_amount': data.costing_amount
}
| 0.028493 |
import urlparse
from urllib import urlencode
from django.conf import settings
from django.core.cache import cache
from django.core.cache.utils import make_template_fragment_key
from notification import models as notification
from persistent_messages.models import Message
def clear_notifications_template_cache(username):
key = make_template_fragment_key('notifications_table', [username])
cache.delete(key)
def push_notification(recipients, from_user, notice_type, data):
data.update({
'notices_url': settings.BASE_URL + '/',
'base_url': settings.BASE_URL,
})
notification.send(recipients, notice_type, data, sender=from_user)
for recipient in recipients:
clear_notifications_template_cache(recipient.username)
def get_notification_url_params_for_email(from_user=None):
return dict(
utm_source='astrobin',
utm_medium='email',
utm_campaign='notification',
from_user=from_user.pk if from_user else None
)
def build_notification_url(url, from_user=None):
params = get_notification_url_params_for_email(from_user)
url_parse = urlparse.urlparse(url)
query = url_parse.query
url_dict = dict(urlparse.parse_qsl(query))
url_dict.update(params)
url_new_query = urlencode(url_dict)
url_parse = url_parse._replace(query=url_new_query)
return urlparse.urlunparse(url_parse)
| 0 |
from django import forms
from django.utils.translation import ugettext as _
from crispy_forms.bootstrap import FormActions
from crispy_forms.helper import FormHelper
from crispy_forms import layout as crispy
from corehq.apps.app_manager.models import get_apps_in_domain
class CreateFormExportForm(forms.Form):
application = forms.ChoiceField()
module = forms.ChoiceField()
form = forms.ChoiceField()
def __init__(self, domain, *args, **kwargs):
super(CreateFormExportForm, self).__init__(*args, **kwargs)
apps = get_apps_in_domain(domain)
self.fields['application'].choices = ([
('', _('Select Application...')),
] if len(apps) > 1 else []) + [
(app._id, app.name) for app in apps
]
self.fields['module'].choices = [
(module.unique_id, module.name)
for app in apps
for module in app.modules
]
self.fields['form'].choices = [
(form.get_unique_id(), form.name)
for app in apps
for form in app.get_forms()
]
self.helper = FormHelper()
self.helper.form_id = "create-export-form"
self.helper.form_class = "form-horizontal"
self.helper.layout = crispy.Layout(
crispy.Fieldset(
_('Select Form'),
crispy.Field(
'application',
data_bind='value: appId',
),
crispy.Div(
crispy.Field(
'module',
data_bind=(
"options: moduleOptions, "
"optionsText: 'text', "
"optionsValue: 'value', "
"value: moduleId"
),
),
data_bind="visible: appId",
),
crispy.Div(
crispy.Field(
'form',
data_bind=(
"options: formOptions, "
"optionsText: 'text', "
"optionsValue: 'value', "
"value: formId"
),
),
data_bind="visible: moduleId",
),
),
crispy.Div(
FormActions(
crispy.ButtonHolder(
crispy.Submit(
'create_export',
_('Next'),
),
),
),
data_bind="visible: formId",
),
)
class CreateCaseExportForm(forms.Form):
application = forms.ChoiceField()
case_type = forms.ChoiceField()
def __init__(self, domain, *args, **kwargs):
super(CreateCaseExportForm, self).__init__(*args, **kwargs)
apps = get_apps_in_domain(domain)
self.fields['application'].choices = ([
('', _('Select Application...')),
] if len(apps) > 1 else []) + [
(app._id, app.name) for app in apps
]
self.fields['case_type'].choices = [
(module.case_type, module.case_type)
for app in apps
for module in app.modules
if module.case_type
]
self.helper = FormHelper()
self.helper.form_id = "create-export-form"
self.helper.form_class = "form-horizontal"
self.helper.layout = crispy.Layout(
crispy.Fieldset(
_('Select Case Type'),
crispy.Field(
'application',
data_bind='value: appId',
),
crispy.Div(
crispy.Field(
'case_type',
data_bind=(
"options: caseTypeOptions, "
"optionsText: 'text', "
"optionsValue: 'value', "
"value: case_type"
),
),
data_bind="visible: appId",
),
),
crispy.Div(
FormActions(
crispy.ButtonHolder(
crispy.Submit(
'create_export',
_('Next'),
),
),
),
data_bind="visible: case_type",
),
)
| 0 |
# -*- coding: utf-8 -*-
import abc
import os
import warnings
import concierge.core.processor
import concierge.endpoints.cli
import concierge.endpoints.templates
import concierge.notifications
import concierge.templater
import concierge.utils
LOG = concierge.utils.logger(__name__)
class App(metaclass=abc.ABCMeta):
@classmethod
def specify_parser(cls, parser):
return parser
def __init__(self, options):
if options.use_templater is None:
warnings.warn(
"--use-templater flag and therefore implicit templater "
"autoresolve are deprecated. Please use explicit "
"templater in both concierge-check and concierge.",
FutureWarning)
if options.no_templater:
warnings.warn(
"Flag --no-templater is deprecated. "
"Please use 'dummy' templater instead.",
DeprecationWarning)
self.source_path = options.source_path
self.destination_path = options.destination_path
self.boring_syntax = options.boring_syntax
self.add_header = options.add_header
self.no_templater = getattr(options, "no_templater", False)
self.templater_name = options.use_templater
if options.no_desktop_notifications:
self.notificator = concierge.notifications.dummy_notifier
else:
self.notificator = concierge.notifications.notifier
try:
self.templater = concierge.templater.resolve_templater(
self.templater_name)
except KeyError:
raise ValueError(
"Cannot find templater for {0}".format(options.use_templater))
if self.add_header is None:
self.add_header = options.destination_path is not None
concierge.utils.configure_logging(
options.debug,
options.verbose,
self.destination_path is None)
@abc.abstractmethod
def do(self):
pass
def output(self):
content = self.get_new_config()
if self.destination_path is None:
print(content)
return
try:
with concierge.utils.topen(self.destination_path, True) as destfp:
destfp.write(content)
except Exception as exc:
self.log_error("Cannot write to file %s: %s",
self.destination_path, exc)
raise
def get_new_config(self):
content = self.fetch_content()
if not self.no_templater:
content = self.apply_template(content)
else:
LOG.info("No templating is used.")
if not self.boring_syntax:
content = self.process_syntax(content)
else:
LOG.info("Boring syntax was choosen, not processing is applied.")
if self.add_header:
content = self.attach_header(content)
else:
LOG.info("No need to attach header.")
return content
def fetch_content(self):
LOG.info("Fetching content from %s", self.source_path)
try:
content = concierge.utils.get_content(self.source_path)
except Exception as exc:
self.log_error("Cannot fetch content from %s: %s",
self.source_path, exc)
raise
LOG.info("Original content of %s:\n%s", self.source_path, content)
return content
def apply_template(self, content):
LOG.info("Applying templater to content of %s.", self.source_path)
try:
content = self.templater.render(content)
except Exception as exc:
self.log_error("Cannot process template (%s) in source file %s.",
self.source_path, self.templater.name, exc)
raise
LOG.info("Templated content of %s:\n%s", self.source_path, content)
return content
def process_syntax(self, content):
try:
return concierge.core.processor.process(content)
except Exception as exc:
self.log_error("Cannot parse content of source file %s: %s",
self.source_path, exc)
raise
def attach_header(self, content):
header = concierge.endpoints.templates.make_header(
rc_file=self.source_path)
content = header + content
return content
def log_error(self, template, *args):
LOG.error(template, *args)
self.notificator(template % args)
def main(app_class):
def main_func():
parser = concierge.endpoints.cli.create_parser()
parser = app_class.specify_parser(parser)
options = parser.parse_args()
app = app_class(options)
LOG.debug("Options: %s", options)
try:
return app.do()
except KeyboardInterrupt:
pass
except Exception as exc:
LOG.exception("Failed with error %s", exc)
return os.EX_SOFTWARE
return main_func
| 0 |
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
import logging
import traceback
from myuw.dao.term import get_specific_term, is_past
from myuw.dao.card_display_dates import in_show_grades_period
from myuw.logger.timer import Timer
from myuw.views.api.base_schedule import StudClasSche
from myuw.views.error import invalid_future_term, handle_exception
logger = logging.getLogger(__name__)
class StudClasScheFutureQuar(StudClasSche):
"""
Performs actions on resource at
/api/v1/schedule/<year>,<quarter>(,<summer_term>)?
"""
def get(self, request, *args, **kwargs):
"""
GET returns 200 with course section schedule details of
the given year, quarter.
Return the course sections of full term and matched term
if a specific summer-term is given
@return class schedule data in json format
status 404: no schedule found (not registered)
status 543: data error
"""
timer = Timer()
year = kwargs.get("year")
quarter = kwargs.get("quarter")
summer_term = kwargs.get("summer_term", "full-term")
try:
request_term = get_specific_term(year, quarter)
if is_past(request_term, request):
if not in_show_grades_period(request_term, request):
return invalid_future_term("{},{}".format(year, quarter))
return self.make_http_resp(
timer, request_term, request, summer_term=summer_term)
except Exception:
return handle_exception(logger, timer, traceback)
| 0 |
#!/usr/bin/env python
# Copyright 2014 Johns Hopkins University (Authors: Vijayaditya Peddinti). Apache 2.0.
# script to read rir files from air database
import sys, numpy as np, argparse, scipy.signal as signal, os.path, glob, scipy.io, scipy.io.wavfile
def read_raw(input_filename, precision = np.float32):
# assuming numpy return littleendian data
file_handle = open(input_filename, 'rb')
data = np.fromfile(file_handle, dtype = precision)
#sys.stderr.write("Read file of length {0} and type {1}\n".format(len(data), precision))
return data
def wav_write(file_handle, fs, data):
if str(data.dtype) in set(['float64', 'float32']):
data = (0.99 * data / np.max(np.abs(data))) * (2 ** 15)
data = data.astype('int16', copy = False)
elif str(data.dtype) == 'int16':
pass
else:
raise Exception('Not implemented for '+str(data.dtype))
scipy.io.wavfile.write(file_handle, fs, data)
def usage():
return """This is a python script to read impulse responses stored in custom formats. It handles AIR database."""
if __name__ == "__main__":
#sys.stderr.write(" ".join(sys.argv)+"\n")
parser = argparse.ArgumentParser(usage())
parser.add_argument('--output-sampling-rate', type = int, default = 8000, help = 'sampling rate of the output')
parser.add_argument('type', type = str, default = None, help = 'database type', choices = ['air'])
parser.add_argument('input', type = str, default = None, help = 'directory containing the multi-channel data for a particular recording, or file name or file-regex-pattern')
parser.add_argument('output_filename', type = str, default = None, help = 'output filename (if "-" then output is written to output pipe)')
params = parser.parse_args()
if params.output_filename == "-":
output = sys.stdout
else:
output = open(params.output_filename, 'wb')
if params.type == 'air':
files = glob.glob(params.input)
# there are just two files which vary in the channel id (0,1)
assert(len(files)==2)
sr = -1
data = []
for file in files:
mat_data = scipy.io.loadmat(file)
data.append(mat_data['h_air'][0,:])
sr = mat_data['air_info']['fs'][0][0][0][0]
data = np.array(data)
data = data.transpose()
assert(data.shape[1] == 2)
if params.output_sampling_rate != sr:
data = signal.resample(data, int(params.output_sampling_rate * float(data.shape[0]) / sr), axis = 0)
wav_write(output, params.output_sampling_rate, data)
| 0.031452 |
from PyQt5.QtCore import QObject, pyqtSignal
class Player(QObject):
updated = pyqtSignal(object, object)
newCurrentGame = pyqtSignal(object, object, object)
"""
Represents a player the client knows about.
"""
def __init__(self,
id_,
login,
global_rating=(1500, 500),
ladder_rating=(1500, 500),
number_of_games=0,
avatar=None,
country=None,
clan=None,
league=None):
QObject.__init__(self)
"""
Initialize a Player
"""
# Required fields
self.id = int(id_)
self.login = login
self.global_rating = global_rating
self.ladder_rating = ladder_rating
self.number_of_games = number_of_games
self.avatar = avatar
self.country = country
self.clan = clan
self.league = league
# The game the player is currently playing
self._currentGame = None
def copy(self):
s = self
p = Player(s.id, s.login, s.global_rating, s.ladder_rating,
s.number_of_games, s.avatar, s.country, s.clan, s.league)
p.currentGame = self._currentGame
return p
def update(self,
id_=None,
login=None,
global_rating=None,
ladder_rating=None,
number_of_games=None,
avatar=None,
country=None,
clan=None,
league=None):
old_data = self.copy()
# Ignore id and login (they are be immutable)
# Login should be mutable, but we look up things by login right now
if global_rating is not None:
self.global_rating = global_rating
if ladder_rating is not None:
self.ladder_rating = ladder_rating
if number_of_games is not None:
self.number_of_games = number_of_games
if avatar is not None:
self.avatar = avatar
if country is not None:
self.country = country
if clan is not None:
self.clan = clan
if league is not None:
self.league = league
self.updated.emit(self, old_data)
def __hash__(self):
"""
Index by id
"""
return self.id.__hash__()
def __index__(self):
return self.id
def __eq__(self, other):
"""
Equality by id
:param other: player object to compare with
"""
if not isinstance(other, Player):
return False
return other.id == self.id
def rounded_rating_estimate(self):
"""
Get the conservative estimate of the players global trueskill rating,
rounded to nearest 100
"""
return round((self.rating_estimate()/100))*100
def rating_estimate(self):
"""
Get the conservative estimate of the players global trueskill rating
"""
return int(max(0, (self.global_rating[0] - 3 * self.global_rating[1])))
def ladder_estimate(self):
"""
Get the conservative estimate of the players ladder trueskill rating
"""
return int(max(0, (self.ladder_rating[0] - 3 * self.ladder_rating[1])))
@property
def rating_mean(self):
return self.global_rating[0]
@property
def rating_deviation(self):
return self.global_rating[1]
@property
def ladder_rating_mean(self):
return self.ladder_rating[0]
@property
def ladder_rating_deviation(self):
return self.ladder_rating[1]
def __repr__(self):
return self.__str__()
def __str__(self):
return ("Player(id={}, login={}, global_rating={}, "
"ladder_rating={})").format(
self.id,
self.login,
self.global_rating,
self.ladder_rating
)
@property
def currentGame(self):
return self._currentGame
@currentGame.setter
def currentGame(self, game):
self.set_current_game_defer_signal(game)()
def set_current_game_defer_signal(self, game):
if self.currentGame == game:
return lambda: None
old = self._currentGame
self._currentGame = game
return lambda: self._emit_game_change(game, old)
def _emit_game_change(self, game, old):
self.newCurrentGame.emit(self, game, old)
if old is not None:
old.ingamePlayerRemoved.emit(old, self)
if game is not None:
game.ingamePlayerAdded.emit(game, self)
| 0 |
# -*- coding: utf-8 -*-
# ****************************************************************************
# Original work Copyright (C) 2013-2015 SUNCAT
# Modified work Copyright 2015-2017 Lukasz Mentel
#
# This file is distributed under the terms of the
# GNU General Public License. See the file 'COPYING'
# in the root directory of the present distribution,
# or http://www.gnu.org/copyleft/gpl.txt .
# ****************************************************************************
from __future__ import print_function, absolute_import
from builtins import object
import numpy as np
from ase.calculators.calculator import FileIOCalculator
from .espresso import Espresso
__version__ = "0.3.4"
class Vibespresso(FileIOCalculator, object):
"""
Special espresso calculator, which expects the first calculation to
be performed for a structure without displacements. All subsequent
calculations are then initialized with the Kohn-Sham potential of
the first calculation to speed up vibrational calculations.
"""
def __init__(self, outdirprefix="out", **kwargs):
"""
In addition to the parameters of a standard espresso calculator,
outdirprefix (default: 'out') can be specified, which will be the
prefix of the output of the calculations for different displacements
"""
self.arg = kwargs.copy()
self.outdirprefix = outdirprefix
self.counter = 0
self.equilibriumdensity = outdirprefix + "_equi.tgz"
self.firststep = True
self.ready = False
self.atoms = None
def update(self, atoms):
if self.atoms is not None:
x = atoms.positions - self.atoms.positions
if np.max(x) > 1.0e-13 or np.min(x) < -1.0e-13:
self.ready = False
else:
self.atoms = atoms.copy()
self.runcalc(atoms)
if atoms is not None:
self.atoms = atoms.copy()
def runcalc(self, atoms):
if not self.ready:
self.arg["outdir"] = self.outdirprefix + "_%04d" % self.counter
self.counter += 1
if self.firststep:
self.esp = Espresso(**self.arg)
self.esp.set_atoms(atoms)
self.esp.get_potential_energy(atoms)
self.esp.save_chg(self.equilibriumdensity)
self.firststep = False
else:
self.arg["startingpot"] = "file"
self.esp = Espresso(**self.arg)
self.esp.set_atoms(atoms)
self.esp.initialize(atoms)
self.esp.load_chg(self.equilibriumdensity)
self.esp.get_potential_energy(atoms)
self.esp.stop()
self.ready = True
def get_potential_energy(self, atoms, force_consistent=False):
self.update(atoms)
if force_consistent:
return self.esp.energy_free
else:
return self.esp.energy_zero
def get_forces(self, atoms):
self.update(atoms)
return self.esp.forces
def get_name(self):
return "VibEspresso"
def get_version(self):
return __version__
| 0 |
ALPHABET = ["A", "C", "D", "E", "F", "G", "H", "I", "K", "L", "M", "N", "P", "Q", "R", "S"]
def makeKMerList(k):
if k == 1:
# The k-mer list is the alphabet
return ALPHABET
kMinusOneMerList = makeKMerList(k - 1)
kMerList = []
for kMinusOneMer in kMinusOneMerList:
# Iterate through the list of k-1-mers and add each letter of the alphabet to each
for letter in ALPHABET:
# Iterate through the letters of the alphabet and add each to the current k-1-mer
kMer = kMinusOneMer + letter
kMerList.append(kMer)
return kMerList
def initializeKMerDict(k):
# Initialize the k-mer dictionary
kMerList = makeKMerList(k)
kMerDict = {}
for kMer in kMerList:
# Make an entry in the dictionary for each k-mer
kMerDict[kMer] = 0
return kMerDict
def makeSampleImageKMerCounts(sequence, k, pngFileNamePrefix, sampleCount):
# Make an image from k-mer counts in the sequence
kMerDict = initializeKMerDict(k)
for i in range(len(sequence) - k + 1):
# Iterate through the k-mers and increment the appropriate counts
if "X" not in sequence[i:i+k]:
# The sequence does not have a wild card, so count it
kMerDict[sequence[i:i+k]] = kMerDict[sequence[i:i+k]] + 1
sequenceArray = numpy.zeros((len(kMerDict), 1))
numSeqCount = 0
for val in kMerDict.values():
# Iterate through the values in the dictionary and put each into the sequence array
sequenceArray[numSeqCount] = float(val)/float(255)
if val > 255:
print "PROBLEM!"
numSeqCount = numSeqCount + 1
pngFileName = pngFileNamePrefix + "_" + str(sampleCount) + ".png"
misc.imsave(pngFileName, sequenceArray)
return pngFileName
def makeSequenceInputsKMerCountsBaseline(sequenceAlphabetFileName, peakHeightFileName, pngFileNamePrefix, valueFileNamePrefix, outputFileName, k):
# Convert each alphabet sequence to k-mer counts, and record them all as png examples
# ASSUMES THAT 16-LETTER ALPHABET AND ALL 4 COMBINATIONS FOR EACH SEQUENCE ARE IN sequenceAlphabetFile
# ASSUMES THAT EACH PEAK HEIGHT CORRESPONDS TO THE ALPHABET ENTRIES WITH THE SAME INDEX
sequenceAlphabetFile = open(sequenceAlphabetFileName)
peakHeightFile = open(peakHeightFileName)
outputFile = open(outputFileName, 'w+')
sampleCount = 0
valueFileName = ""
for line in sequenceAlphabetFile:
# Iterate through the alphabet sequences and count the kmers in each
peakHeight = peakHeightFile.readline().strip()
sampleCount = sampleCount + 1
if sampleCount % 4 == 1:
valueFileName = valueFileNamePrefix + "_" + str(sampleCount) + "-" + str(sampleCount + 3) + ".txt"
valueFile = open(valueFileName, 'w+')
valueFile.write(peakHeight + "\n")
valueFile.close()
pngFileName = makeSampleImageKMerCounts(line.strip(), k, pngFileNamePrefix, sampleCount)
outputFile.write(pngFileName + "\t" + valueFileName + "\t" + "0" + "\n")
peakHeightFile.close()
outputFile.close()
if __name__=="__main__":
import sys
import numpy
from scipy import misc
sequenceAlphabetFileName = sys.argv[1]
peakHeightFileName = sys.argv[2]
pngFileNamePrefix = sys.argv[3]
valueFileNamePrefix = sys.argv[4]
outputFileName = sys.argv[5]
k = int(sys.argv[6])
makeSequenceInputsKMerCountsBaseline(sequenceAlphabetFileName, peakHeightFileName, pngFileNamePrefix, valueFileNamePrefix, outputFileName, k)
| 0.025464 |
import csv
datasets = ['fu-converted', 'poly1a-converted', 'poly2b-converted', 'poly3b-converted', 'poly4b-converted']
genetics = ['-100-1-999999-50-', '-100-1-999999-100-', '-100-1-999999-200-']
hill = ['-hill-100-1-']
tabu = ['-tabu-100-1-']
time = ['60000', '120000', '300000']
end = '-info.txt'
def prettytime(string):
if string == '60000': return '1'
if string == '120000': return '2'
if string == '300000': return '5'
def prettyname(string):
if '-100-1-999999-50-' in string: return 'G_p50'
if '-100-1-999999-100-' in string: return 'G_p100'
if '-100-1-999999-200-' in string: return 'G_p200'
if 'hill' in string: return 'hill'
if 'tabu' in string: return 'tabu'
def get_row(file):
with open(file, newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
return row;
def process_genetics(partfile):
for x in time:
row = get_row(partfile+x+end)
print(','.join([row[0], row[7], prettytime(x), prettyname(partfile)]))
def process_tabu(partfile):
for x in time:
row = get_row(partfile+x+end)
print(','.join([row[0], row[6], prettytime(x), prettyname(partfile)]))
def process_hill(partfile):
for x in time:
row = get_row(partfile+x+end)
print(','.join([row[0], row[6], prettytime(x), prettyname(partfile)]))
for a in datasets:
for b in genetics:
process_genetics(a+b)
for b in hill:
process_hill(a+b)
for b in tabu:
process_tabu(a+b) | 0.035915 |
# Pizza.py toolkit, www.cs.sandia.gov/~sjplimp/pizza.html
# Steve Plimpton, sjplimp@sandia.gov, Sandia National Laboratories
#
# Copyright (2005) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
# certain rights in this software. This software is distributed under
# the GNU General Public License.
# cfg tool
oneline = "Convert LAMMPS snapshots to AtomEye CFG format"
docstr = """
c = cfg(d) d = object containing atom coords (dump, data)
c.one() write all snapshots to tmp.cfg
c.one("new") write all snapshots to new.cfg
c.many() write snapshots to tmp0000.cfg, tmp0001.cfg, etc
c.many("new") write snapshots to new0000.cfg, new0001.cfg, etc
c.single(N) write snapshot for timestep N to tmp.cfg
c.single(N,"file") write snapshot for timestep N to file.cfg
"""
# History
# 11/06, Aidan Thompson (SNL): original version
# ToDo list
# should decide if dump is scaled or not, since CFG prints in scaled coords
# this creates a simple AtomEye CFG format
# there is more complex format we could write out
# which allows for extra atom info, e.g. to do atom coloring on
# how to dump for a triclinic box, since AtomEye accepts this
# Variables
# data = data file to read from
# Imports and external programs
import sys
# Class definition
class cfg:
# --------------------------------------------------------------------
def __init__(self,data):
self.data = data
# --------------------------------------------------------------------
def one(self,*args):
if len(args) == 0: file = "tmp.cfg"
elif args[0][-4:] == ".cfg": file = args[0]
else: file = args[0] + ".cfg"
f = open(file,"w")
n = flag = 0
while 1:
which,time,flag = self.data.iterator(flag)
if flag == -1: break
time,box,atoms,bonds,tris,lines = self.data.viz(which)
xlen = box[3]-box[0]
ylen = box[4]-box[1]
zlen = box[5]-box[2]
print >>f,"Number of particles = %d " % len(atoms)
print >>f,"# Timestep %d \n#\nA = 1.0 Angstrom" % time
print >>f,"H0(1,1) = %20.10f A " % xlen
print >>f,"H0(1,2) = 0.0 A "
print >>f,"H0(1,3) = 0.0 A "
print >>f,"H0(2,1) = 0.0 A "
print >>f,"H0(2,2) = %20.10f A " % ylen
print >>f,"H0(2,3) = 0.0 A "
print >>f,"H0(3,1) = 0.0 A "
print >>f,"H0(3,2) = 0.0 A "
print >>f,"H0(3,3) = %20.10f A " % zlen
print >>f,"#"
for atom in atoms:
itype = int(atom[1])
xfrac = (atom[2]-box[0])/xlen
yfrac = (atom[3]-box[1])/ylen
zfrac = (atom[4]-box[2])/zlen
# print >>f,"1.0 %d %15.10f %15.10f %15.10f %15.10f %15.10f %15.10f " % (itype,xfrac,yfrac,zfrac,atom[5],atom[6],atom[7])
print >>f,"1.0 %d %15.10f %15.10f %15.10f 0.0 0.0 0.0 " % (itype,xfrac,yfrac,zfrac)
print time,
sys.stdout.flush()
n += 1
f.close()
print "\nwrote %d snapshots to %s in CFG format" % (n,file)
# --------------------------------------------------------------------
def many(self,*args):
if len(args) == 0: root = "tmp"
else: root = args[0]
n = flag = 0
while 1:
which,time,flag = self.data.iterator(flag)
if flag == -1: break
time,box,atoms,bonds,tris,lines = self.data.viz(which)
if n < 10:
file = root + "000" + str(n)
elif n < 100:
file = root + "00" + str(n)
elif n < 1000:
file = root + "0" + str(n)
else:
file = root + str(n)
file += ".cfg"
f = open(file,"w")
xlen = box[3]-box[0]
ylen = box[4]-box[1]
zlen = box[5]-box[2]
print >>f,"Number of particles = %d " % len(atoms)
print >>f,"# Timestep %d \n#\nA = 1.0 Angstrom" % time
print >>f,"H0(1,1) = %20.10f A " % xlen
print >>f,"H0(1,2) = 0.0 A "
print >>f,"H0(1,3) = 0.0 A "
print >>f,"H0(2,1) = 0.0 A "
print >>f,"H0(2,2) = %20.10f A " % ylen
print >>f,"H0(2,3) = 0.0 A "
print >>f,"H0(3,1) = 0.0 A "
print >>f,"H0(3,2) = 0.0 A "
print >>f,"H0(3,3) = %20.10f A " % zlen
print >>f,"#"
for atom in atoms:
itype = int(atom[1])
xfrac = (atom[2]-box[0])/xlen
yfrac = (atom[3]-box[1])/ylen
zfrac = (atom[4]-box[2])/zlen
# print >>f,"1.0 %d %15.10f %15.10f %15.10f %15.10f %15.10f %15.10f " % (itype,xfrac,yfrac,zfrac,atom[5],atom[6],atom[7])
print >>f,"1.0 %d %15.10f %15.10f %15.10f 0.0 0.0 0.0 " % (itype,xfrac,yfrac,zfrac)
print time,
sys.stdout.flush()
f.close()
n += 1
print "\nwrote %s snapshots in CFG format" % n
# --------------------------------------------------------------------
def single(self,time,*args):
if len(args) == 0: file = "tmp.cfg"
elif args[0][-4:] == ".cfg": file = args[0]
else: file = args[0] + ".cfg"
which = self.data.findtime(time)
time,box,atoms,bonds,tris,lines = self.data.viz(which)
f = open(file,"w")
xlen = box[3]-box[0]
ylen = box[4]-box[1]
zlen = box[5]-box[2]
print >>f,"Number of particles = %d " % len(atoms)
print >>f,"# Timestep %d \n#\nA = 1.0 Angstrom" % time
print >>f,"H0(1,1) = %20.10f A " % xlen
print >>f,"H0(1,2) = 0.0 A "
print >>f,"H0(1,3) = 0.0 A "
print >>f,"H0(2,1) = 0.0 A "
print >>f,"H0(2,2) = %20.10f A " % ylen
print >>f,"H0(2,3) = 0.0 A "
print >>f,"H0(3,1) = 0.0 A "
print >>f,"H0(3,2) = 0.0 A "
print >>f,"H0(3,3) = %20.10f A " % zlen
print >>f,"#"
for atom in atoms:
itype = int(atom[1])
xfrac = (atom[2]-box[0])/xlen
yfrac = (atom[3]-box[1])/ylen
zfrac = (atom[4]-box[2])/zlen
# print >>f,"1.0 %d %15.10f %15.10f %15.10f %15.10f %15.10f %15.10f " % (itype,xfrac,yfrac,zfrac,atom[5],atom[6],atom[7])
print >>f,"1.0 %d %15.10f %15.10f %15.10f 0.0 0.0 0.0 " % (itype,xfrac,yfrac,zfrac)
f.close()
| 0.026176 |
import itertools
from django.db.models import Case
from django.db.models import F, Q
from django.db.models import Sum
from django.db.models import When
from django.db.models import Value
from registrasion.models import commerce
from registrasion.models import inventory
from .batch import BatchController
from .category import CategoryController
from .flag import FlagController
class ProductController(object):
def __init__(self, product):
self.product = product
@classmethod
def available_products(cls, user, category=None, products=None):
''' Returns a list of all of the products that are available per
flag conditions from the given categories. '''
if category is None and products is None:
raise ValueError("You must provide products or a category")
if category is not None:
all_products = inventory.Product.objects.filter(category=category)
all_products = all_products.select_related("category")
else:
all_products = []
if products is not None:
all_products = set(itertools.chain(all_products, products))
category_remainders = CategoryController.user_remainders(user)
product_remainders = ProductController.user_remainders(user)
passed_limits = set(
product
for product in all_products
if category_remainders[product.category.id] > 0
if product_remainders[product.id] > 0
)
failed_and_messages = FlagController.test_flags(
user, products=passed_limits
)
failed_conditions = set(i[0] for i in failed_and_messages)
out = list(passed_limits - failed_conditions)
out.sort(key=lambda product: product.order)
return out
@classmethod
@BatchController.memoise
def user_remainders(cls, user):
'''
Return:
Mapping[int->int]: A dictionary that maps the product ID to the
user's remainder for that product.
'''
products = inventory.Product.objects.all()
cart_filter = (
Q(productitem__cart__user=user) &
Q(productitem__cart__status=commerce.Cart.STATUS_PAID)
)
quantity = When(
cart_filter,
then='productitem__quantity'
)
quantity_or_zero = Case(
quantity,
default=Value(0),
)
remainder = Case(
When(limit_per_user=None, then=Value(99999999)),
default=F('limit_per_user') - Sum(quantity_or_zero),
)
products = products.annotate(remainder=remainder)
return dict((product.id, product.remainder) for product in products)
| 0 |
## src/common/GnuPG.py
##
## Copyright (C) 2003-2008 Yann Leboulanger <asterix AT lagaule.org>
## Copyright (C) 2005 Alex Mauer <hawke AT hawkesnest.net>
## Copyright (C) 2005-2006 Nikos Kouremenos <kourem AT gmail.com>
## Copyright (C) 2005-2008 Yann Leboulanger <asterix AT lagaule.org>
## Copyright (C) 2007 Stephan Erb <steve-e AT h3c.de>
## Copyright (C) 2008 Jean-Marie Traissard <jim AT lapin.org>
## Jonathan Schleifer <js-gajim AT webkeks.org>
##
## This file is part of Gajim.
##
## Gajim is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published
## by the Free Software Foundation; version 3 only.
##
## Gajim is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Gajim. If not, see <http://www.gnu.org/licenses/>.
##
import gajim
from os import tmpfile
from common import helpers
if gajim.HAVE_GPG:
import GnuPGInterface
class GnuPG(GnuPGInterface.GnuPG):
def __init__(self, use_agent = False):
GnuPGInterface.GnuPG.__init__(self)
self.use_agent = use_agent
self._setup_my_options()
def _setup_my_options(self):
self.options.armor = 1
self.options.meta_interactive = 0
self.options.extra_args.append('--no-secmem-warning')
if self.use_agent:
self.options.extra_args.append('--use-agent')
def _read_response(self, child_stdout):
# Internal method: reads all the output from GPG, taking notice
# only of lines that begin with the magic [GNUPG:] prefix.
# (See doc/DETAILS in the GPG distribution for info on GPG's
# output when --status-fd is specified.)
#
# Returns a dictionary, mapping GPG's keywords to the arguments
# for that keyword.
resp = {}
while True:
line = helpers.temp_failure_retry(child_stdout.readline)
if line == "": break
line = line.rstrip()
if line[0:9] == '[GNUPG:] ':
# Chop off the prefix
line = line[9:]
L = line.split(None, 1)
keyword = L[0]
if len(L) > 1:
resp[ keyword ] = L[1]
else:
resp[ keyword ] = ""
return resp
def encrypt(self, str_, recipients):
self.options.recipients = recipients # a list!
proc = self.run(['--encrypt'], create_fhs=['stdin', 'stdout', 'status',
'stderr'])
proc.handles['stdin'].write(str_)
try:
proc.handles['stdin'].close()
except IOError:
pass
output = proc.handles['stdout'].read()
try:
proc.handles['stdout'].close()
except IOError:
pass
stat = proc.handles['status']
resp = self._read_response(stat)
try:
proc.handles['status'].close()
except IOError:
pass
error = proc.handles['stderr'].read()
proc.handles['stderr'].close()
try: proc.wait()
except IOError: pass
if 'BEGIN_ENCRYPTION' in resp and 'END_ENCRYPTION' in resp:
# Encryption succeeded, even if there is output on stderr. Maybe
# verbose is on
error = ''
return self._stripHeaderFooter(output), error
def decrypt(self, str_, keyID):
proc = self.run(['--decrypt', '-q', '-u %s'%keyID], create_fhs=['stdin', 'stdout'])
enc = self._addHeaderFooter(str_, 'MESSAGE')
proc.handles['stdin'].write(enc)
proc.handles['stdin'].close()
output = proc.handles['stdout'].read()
proc.handles['stdout'].close()
try: proc.wait()
except IOError: pass
return output
def sign(self, str_, keyID):
proc = self.run(['-b', '-u %s'%keyID], create_fhs=['stdin', 'stdout', 'status', 'stderr'])
proc.handles['stdin'].write(str_)
try:
proc.handles['stdin'].close()
except IOError:
pass
output = proc.handles['stdout'].read()
try:
proc.handles['stdout'].close()
proc.handles['stderr'].close()
except IOError:
pass
stat = proc.handles['status']
resp = self._read_response(stat)
try:
proc.handles['status'].close()
except IOError:
pass
try: proc.wait()
except IOError: pass
if 'GOOD_PASSPHRASE' in resp or 'SIG_CREATED' in resp:
return self._stripHeaderFooter(output)
return 'BAD_PASSPHRASE'
def verify(self, str_, sign):
if str_ is None:
return ''
f = tmpfile()
fd = f.fileno()
f.write(str_)
f.seek(0)
proc = self.run(['--verify', '--enable-special-filenames', '-', '-&%s'%fd], create_fhs=['stdin', 'status', 'stderr'])
f.close()
sign = self._addHeaderFooter(sign, 'SIGNATURE')
proc.handles['stdin'].write(sign)
proc.handles['stdin'].close()
proc.handles['stderr'].close()
stat = proc.handles['status']
resp = self._read_response(stat)
proc.handles['status'].close()
try: proc.wait()
except IOError: pass
keyid = ''
if 'GOODSIG' in resp:
keyid = resp['GOODSIG'].split()[0]
return keyid
def get_keys(self, secret = False):
if secret:
opt = '--list-secret-keys'
else:
opt = '--list-keys'
proc = self.run(['--with-colons', opt],
create_fhs=['stdout'])
output = proc.handles['stdout'].read()
proc.handles['stdout'].close()
keys = {}
lines = output.split('\n')
for line in lines:
sline = line.split(':')
if (sline[0] == 'sec' and secret) or \
(sline[0] == 'pub' and not secret):
# decode escaped chars
name = eval('"' + sline[9].replace('"', '\\"') + '"')
# make it unicode instance
keys[sline[4][8:]] = helpers.decode_string(name)
return keys
try: proc.wait()
except IOError: pass
def get_secret_keys(self):
return self.get_keys(True)
def _stripHeaderFooter(self, data):
"""Remove header and footer from data"""
if not data: return ''
lines = data.split('\n')
while lines[0] != '':
lines.remove(lines[0])
while lines[0] == '':
lines.remove(lines[0])
i = 0
for line in lines:
if line:
if line[0] == '-': break
i = i+1
line = '\n'.join(lines[0:i])
return line
def _addHeaderFooter(self, data, type_):
"""Add header and footer from data"""
out = "-----BEGIN PGP %s-----\n" % type_
out = out + "Version: PGP\n"
out = out + "\n"
out = out + data + "\n"
out = out + "-----END PGP %s-----\n" % type_
return out
# vim: se ts=3:
| 0.034477 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.template.defaultfilters import title # noqa
from django.template.loader import render_to_string
from horizon.utils import filters
def stack_info(stack, stack_image):
stack.stack_status_desc = title(
filters.replace_underscores(stack.stack_status))
if stack.stack_status_reason:
stack.stack_status_reason = title(
filters.replace_underscores(stack.stack_status_reason)
)
context = {}
context['stack'] = stack
context['stack_image'] = stack_image
return render_to_string('project/stacks/_stack_info.html',
context)
def resource_info(resource):
resource.resource_status_desc = title(
filters.replace_underscores(resource.resource_status)
)
if resource.resource_status_reason:
resource.resource_status_reason = title(
filters.replace_underscores(resource.resource_status_reason)
)
context = {}
context['resource'] = resource
return render_to_string('project/stacks/_resource_info.html',
context)
| 0 |
#!/usr/bin/env python
# encoding: utf-8
# Jérôme Carretero 2011 (zougloub)
# QNX neutrino compatibility functions
import sys, os
from waflib import Utils
class Popen(object):
"""
Popen cannot work on QNX from a threaded program:
Forking in threads is not implemented in neutrino.
Python's os.popen / spawn / fork won't work when running in threads (they will if in the main program thread)
In waf, this happens mostly in build.
And the use cases can be replaced by os.system() calls.
"""
__slots__ = ["prog", "kw", "popen", "verbose"]
verbose = 0
def __init__(self, prog, **kw):
try:
self.prog = prog
self.kw = kw
self.popen = None
if Popen.verbose:
sys.stdout.write("Popen created: %r, kw=%r..." % (prog, kw))
do_delegate = kw.get('stdout', None) == -1 and kw.get('stderr', None) == -1
if do_delegate:
if Popen.verbose:
print("Delegating to real Popen")
self.popen = self.real_Popen(prog, **kw)
else:
if Popen.verbose:
print("Emulating")
except Exception as e:
if Popen.verbose:
print("Exception: %s" % e)
raise
def __getattr__(self, name):
if Popen.verbose:
sys.stdout.write("Getattr: %s..." % name)
if name in Popen.__slots__:
if Popen.verbose:
print("In slots!")
return object.__getattr__(self, name)
else:
if self.popen is not None:
if Popen.verbose:
print("from Popen")
return getattr(self.popen, name)
else:
if name == "wait":
return self.emu_wait
else:
raise Exception("subprocess emulation: not implemented: %s" % name)
def emu_wait(self):
if Popen.verbose:
print("emulated wait (%r kw=%r)" % (self.prog, self.kw))
if isinstance(self.prog, str):
cmd = self.prog
else:
cmd = " ".join(self.prog)
if 'cwd' in self.kw:
cmd = 'cd "%s" && %s' % (self.kw['cwd'], cmd)
return os.system(cmd)
if sys.platform == "qnx6":
Popen.real_Popen = Utils.subprocess.Popen
Utils.subprocess.Popen = Popen
| 0.032242 |
import sys
import unittest
from dynd import nd, ndt
"""
class TestInt128(unittest.TestCase):
def test_pyconvert(self):
# Conversions to/from python longs
a = nd.empty(ndt.int128)
a[...] = 1
self.assertEqual(nd.as_py(a), 1)
a[...] = 12345
self.assertEqual(nd.as_py(a), 12345)
a[...] = -12345
self.assertEqual(nd.as_py(a), -12345)
a[...] = -2**127
self.assertEqual(nd.as_py(a), -2**127)
a[...] = 2**127 - 1
self.assertEqual(nd.as_py(a), 2**127 - 1)
def test_pyconvert_overflow(self):
a = nd.empty(ndt.int128)
def assign_val(x, val):
x[...] = val
self.assertRaises(OverflowError, assign_val, a, -2**127 - 1)
self.assertRaises(OverflowError, assign_val, a, 2**127)
class TestUInt128(unittest.TestCase):
def test_pyconvert(self):
# Conversions to/from python longs
a = nd.empty(ndt.uint128)
a[...] = 1
self.assertEqual(nd.as_py(a), 1)
a[...] = 12345
self.assertEqual(nd.as_py(a), 12345)
a[...] = 0
self.assertEqual(nd.as_py(a), 0)
a[...] = 2**128 - 1
self.assertEqual(nd.as_py(a), 2**128 - 1)
def test_pyconvert_overflow(self):
a = nd.empty(ndt.uint128)
def assign_val(x, val):
x[...] = val
self.assertRaises(OverflowError, assign_val, a, -1)
self.assertRaises(OverflowError, assign_val, a, -2**127 - 1)
self.assertRaises(OverflowError, assign_val, a, 2**128)
"""
if __name__ == '__main__':
unittest.main(verbosity=2)
| 0 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, json
import frappe.desk.form.meta
import frappe.desk.form.load
from frappe.utils.html_utils import clean_email_html
from frappe import _
from six import string_types
@frappe.whitelist()
def remove_attach():
"""remove attachment"""
import frappe.utils.file_manager
fid = frappe.form_dict.get('fid')
return frappe.utils.file_manager.remove_file(fid)
@frappe.whitelist()
def validate_link():
"""validate link when updated by user"""
import frappe
import frappe.utils
value, options, fetch = frappe.form_dict.get('value'), frappe.form_dict.get('options'), frappe.form_dict.get('fetch')
# no options, don't validate
if not options or options=='null' or options=='undefined':
frappe.response['message'] = 'Ok'
return
valid_value = frappe.db.sql("select name from `tab%s` where name=%s" % (frappe.db.escape(options),
'%s'), (value,))
if valid_value:
valid_value = valid_value[0][0]
# get fetch values
if fetch:
# escape with "`"
fetch = ", ".join(("`{0}`".format(frappe.db.escape(f.strip())) for f in fetch.split(",")))
fetch_value = None
try:
fetch_value = frappe.db.sql("select %s from `tab%s` where name=%s"
% (fetch, frappe.db.escape(options), '%s'), (value,))[0]
except Exception as e:
error_message = str(e).split("Unknown column '")
fieldname = None if len(error_message)<=1 else error_message[1].split("'")[0]
frappe.msgprint(_("Wrong fieldname <b>{0}</b> in add_fetch configuration of custom script").format(fieldname))
frappe.errprint(frappe.get_traceback())
if fetch_value:
frappe.response['fetch_values'] = [frappe.utils.parse_val(c) for c in fetch_value]
frappe.response['valid_value'] = valid_value
frappe.response['message'] = 'Ok'
@frappe.whitelist()
def add_comment(doc):
"""allow any logged user to post a comment"""
doc = frappe.get_doc(json.loads(doc))
doc.content = clean_email_html(doc.content)
if not (doc.doctype=="Communication" and doc.communication_type=='Comment'):
frappe.throw(_("This method can only be used to create a Comment"), frappe.PermissionError)
doc.insert(ignore_permissions=True)
return doc.as_dict()
@frappe.whitelist()
def update_comment(name, content):
"""allow only owner to update comment"""
doc = frappe.get_doc('Communication', name)
if frappe.session.user not in ['Administrator', doc.owner]:
frappe.throw(_('Comment can only be edited by the owner'), frappe.PermissionError)
doc.content = content
doc.save(ignore_permissions=True)
@frappe.whitelist()
def get_next(doctype, value, prev, filters=None, order_by="modified desc"):
prev = not int(prev)
sort_field, sort_order = order_by.split(" ")
if not filters: filters = []
if isinstance(filters, string_types):
filters = json.loads(filters)
# condition based on sort order
condition = ">" if sort_order.lower()=="desc" else "<"
# switch the condition
if prev:
condition = "<" if condition==">" else "<"
else:
sort_order = "asc" if sort_order.lower()=="desc" else "desc"
# add condition for next or prev item
if not order_by[0] in [f[1] for f in filters]:
filters.append([doctype, sort_field, condition, value])
res = frappe.get_list(doctype,
fields = ["name"],
filters = filters,
order_by = sort_field + " " + sort_order,
limit_start=0, limit_page_length=1, as_list=True)
if not res:
frappe.msgprint(_("No further records"))
return None
else:
return res[0][0]
def get_pdf_link(doctype, docname, print_format='Standard', no_letterhead=0):
return '/api/method/frappe.utils.print_format.download_pdf?doctype={doctype}&name={docname}&format={print_format}&no_letterhead={no_letterhead}'.format(
doctype = doctype,
docname = docname,
print_format = print_format,
no_letterhead = no_letterhead
) | 0.030761 |
def _get_leaf_members(leaf):
"""
Return an iterator to members of @leaf, if it is a multiple leaf
"""
try:
return leaf.get_multiple_leaf_representation()
except AttributeError:
return (leaf, )
def action_valid_for_item(action, leaf):
return all(action.valid_for_item(L) for L in _get_leaf_members(leaf))
def actions_for_item(leaf, sourcecontroller):
if leaf is None:
return []
actions = None
for L in _get_leaf_members(leaf):
l_actions = set(L.get_actions())
l_actions.update(sourcecontroller.get_actions_for_leaf(L))
if actions is None:
actions = l_actions
else:
actions.intersection_update(l_actions)
return actions
def iobject_source_for_action(action, for_item):
for leaf in _get_leaf_members(for_item):
return action.object_source(leaf)
def iobjects_valid_for_action(action, for_item):
"""
Return a filtering *function* that will let through
those leaves that are good iobjects for @action and @for_item.
"""
def valid_object(leaf, for_item):
_valid_object = action.valid_object
for L in _get_leaf_members(leaf):
for I in _get_leaf_members(for_item):
if not _valid_object(L, for_item=I):
return False
return True
types = tuple(action.object_types())
def type_obj_check(iobjs):
for i in iobjs:
if (isinstance(i, types) and valid_object(i, for_item=for_item)):
yield i
def type_check(itms):
for i in itms:
if isinstance(i, types):
yield i
if hasattr(action, "valid_object"):
return type_obj_check
else:
return type_check
| 0.034983 |
# This script is intended to easily allow checking of many sim setups in debug to catch bugs.
import subprocess as sb
import time
simc_bin = "simc.exe"
iterations=4
threads=1
output_dir="d:/dev/simc/debug/"
classes = [
#("priest", ["shadow","discipline","holy"]),
("priest", ["shadow",]),
("shaman", ["elemental", "enhancement", "restoration"]),
#("druid",["guardian","feral","balance","restoration"]),
("druid",["guardian","feral","balance"]),
("warrior",["fury","arms","protection"]),
("paladin",["holy","retribution","protection"]),
("rogue",["subtlety","combat","assassination"]),
("warlock",["demonology","affliction","destruction"]),
#("monk",["brewmaster","windwalker","mistweaver"]),
("monk",["brewmaster","windwalker"]),
]
for wowclass in classes:
for spec in wowclass[1]:
try:
command = "{bin} {wclass}=foo spec={spec} iterations={iterations} threads={threads} output={output}" \
.format( bin=simc_bin, wclass=wowclass[0], spec=spec, iterations=iterations, threads=threads, output=output_dir+ wowclass[0] + "_" + spec + ".txt" )
print( "Simulating {wclass} spec {spec}".format(wclass=wowclass[0], spec=spec) )
print( command )
time.sleep(0.1)
sb.check_call( command )
time.sleep(2)
except sb.CalledProcessError as e:
print ("Exited with non-zero return code: " + str(e.returncode) )
| 0.025907 |
import os
from django.conf import settings
from django.contrib import messages
from django.core import exceptions
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, HttpResponse
from django.template.loader import render_to_string
from django.utils.translation import ungettext, ugettext_lazy as _
from django.views.generic import (
ListView, DeleteView, CreateView, UpdateView, View)
from oscar.views.generic import BulkEditMixin
from oscar.core.loading import get_classes, get_model
Range = get_model('offer', 'Range')
RangeProduct = get_model('offer', 'RangeProduct')
RangeProductFileUpload = get_model('offer', 'RangeProductFileUpload')
Product = get_model('catalogue', 'Product')
RangeForm, RangeProductForm = get_classes('dashboard.ranges.forms',
['RangeForm', 'RangeProductForm'])
class RangeListView(ListView):
model = Range
context_object_name = 'ranges'
template_name = 'dashboard/ranges/range_list.html'
class RangeCreateView(CreateView):
model = Range
template_name = 'dashboard/ranges/range_form.html'
form_class = RangeForm
def get_success_url(self):
if 'action' in self.request.POST:
return reverse('dashboard:range-products',
kwargs={'pk': self.object.id})
else:
msg = render_to_string(
'dashboard/ranges/messages/range_saved.html',
{'range': self.object})
messages.success(self.request, msg, extra_tags='safe noicon')
return reverse('dashboard:range-list')
def get_context_data(self, **kwargs):
ctx = super(RangeCreateView, self).get_context_data(**kwargs)
ctx['title'] = _("Create range")
return ctx
class RangeUpdateView(UpdateView):
model = Range
template_name = 'dashboard/ranges/range_form.html'
form_class = RangeForm
def get_object(self):
obj = super(RangeUpdateView, self).get_object()
if not obj.is_editable:
raise exceptions.PermissionDenied("Not allowed")
return obj
def get_success_url(self):
if 'action' in self.request.POST:
return reverse('dashboard:range-products',
kwargs={'pk': self.object.id})
else:
msg = render_to_string(
'dashboard/ranges/messages/range_saved.html',
{'range': self.object})
messages.success(self.request, msg, extra_tags='safe noicon')
return reverse('dashboard:range-list')
def get_context_data(self, **kwargs):
ctx = super(RangeUpdateView, self).get_context_data(**kwargs)
ctx['range'] = self.object
ctx['title'] = self.object.name
return ctx
class RangeDeleteView(DeleteView):
model = Range
template_name = 'dashboard/ranges/range_delete.html'
context_object_name = 'range'
def get_success_url(self):
messages.warning(self.request, _("Range deleted"))
return reverse('dashboard:range-list')
class RangeProductListView(BulkEditMixin, ListView):
model = Product
template_name = 'dashboard/ranges/range_product_list.html'
context_object_name = 'products'
actions = ('remove_selected_products', 'add_products')
form_class = RangeProductForm
def post(self, request, *args, **kwargs):
self.object_list = self.get_queryset()
if request.POST.get('action', None) == 'add_products':
return self.add_products(request)
return super(RangeProductListView, self).post(request, *args, **kwargs)
def get_range(self):
if not hasattr(self, '_range'):
self._range = get_object_or_404(Range, id=self.kwargs['pk'])
return self._range
def get_queryset(self):
products = self.get_range().included_products.all()
return products.order_by('rangeproduct__display_order')
def get_context_data(self, **kwargs):
ctx = super(RangeProductListView, self).get_context_data(**kwargs)
range = self.get_range()
ctx['range'] = range
if 'form' not in ctx:
ctx['form'] = self.form_class(range)
return ctx
def remove_selected_products(self, request, products):
range = self.get_range()
for product in products:
range.remove_product(product)
num_products = len(products)
messages.success(request, ungettext("Removed %d product from range",
"Removed %d products from range",
num_products) % num_products)
return HttpResponseRedirect(self.get_success_url(request))
def add_products(self, request):
range = self.get_range()
form = self.form_class(range, request.POST, request.FILES)
if not form.is_valid():
ctx = self.get_context_data(form=form,
object_list=self.object_list)
return self.render_to_response(ctx)
self.handle_query_products(request, range, form)
self.handle_file_products(request, range, form)
return HttpResponseRedirect(self.get_success_url(request))
def handle_query_products(self, request, range, form):
products = form.get_products()
if not products:
return
for product in products:
range.add_product(product)
num_products = len(products)
messages.success(request, ungettext("%d product added to range",
"%d products added to range",
num_products) % num_products)
dupe_skus = form.get_duplicate_skus()
if dupe_skus:
messages.warning(
request,
_("The products with SKUs or UPCs matching %s are already "
"in this range") % ", ".join(dupe_skus))
missing_skus = form.get_missing_skus()
if missing_skus:
messages.warning(
request,
_("No product(s) were found with SKU or UPC matching %s") %
", ".join(missing_skus))
def handle_file_products(self, request, range, form):
if 'file_upload' not in request.FILES:
return
upload = self.create_upload_object(request, range)
upload.process()
if not upload.was_processing_successful():
messages.error(request, upload.error_message)
else:
msg = render_to_string(
'dashboard/ranges/messages/range_products_saved.html',
{'range': range,
'upload': upload})
messages.success(request, msg, extra_tags='safe noicon block')
upload.delete_file()
def create_upload_object(self, request, range):
f = request.FILES['file_upload']
destination_path = os.path.join(settings.OSCAR_UPLOAD_ROOT, f.name)
with open(destination_path, 'wb+') as dest:
for chunk in f.chunks():
dest.write(chunk)
upload = RangeProductFileUpload.objects.create(
range=range,
uploaded_by=request.user,
filepath=destination_path,
size=f.size
)
return upload
class RangeReorderView(View):
def post(self, request, pk):
order = dict(request.POST).get('product[]')
self._save_page_order(order)
return HttpResponse(status=200)
def _save_page_order(self, order):
"""
Save the order of the products within range.
"""
range = get_object_or_404(Range, pk=self.kwargs['pk'])
for index, item in enumerate(order):
entry = RangeProduct.objects.get(range=range, product__pk=item)
if entry.display_order != index:
entry.display_order = index
entry.save()
| 0 |
# -*- coding: utf-8 -*-
'''
Loading and unloading of kernel modules
=======================================
The Kernel modules on a system can be managed cleanly with the kmod state
module:
.. code-block:: yaml
kvm_amd:
kmod.present
pcspkr:
kmod.absent
'''
def __virtual__():
'''
Only load if the kmod module is available in __salt__
'''
return 'kmod.available' in __salt__
def present(name, persist=False):
'''
Ensure that the specified kernel module is loaded
name
The name of the kernel module to verify is loaded
persist
Also add module to ``/etc/modules``
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
mods = __salt__['kmod.mod_list']()
if persist:
persist_mods = __salt__['kmod.mod_list'](True)
# Intersection of persist and non persist module
mods_set = list(set(mods) & set(persist_mods))
else:
mods_set = mods
if name in mods_set:
ret['comment'] = ('Kernel module {0} is already present'
.format(name))
return ret
# Module is not loaded, verify availability
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Module {0} is set to be loaded'.format(name)
return ret
if name not in __salt__['kmod.available']():
ret['comment'] = 'Kernel module {0} is unavailable'.format(name)
ret['result'] = False
return ret
load_result = __salt__['kmod.load'](name, persist)
if isinstance(load_result, list):
if len(load_result) > 0:
for mod in load_result:
ret['changes'][mod] = 'loaded'
ret['comment'] = 'Loaded kernel module {0}'.format(name)
return ret
else:
ret['result'] = False
ret['comment'] = 'Failed to load kernel module {0}'.format(name)
else:
ret['result'] = False
ret['comment'] = load_result
return ret
def absent(name, persist=False, comment=True):
'''
Verify that the named kernel module is not loaded
name
The name of the kernel module to verify is not loaded
persist
Delete module from ``/etc/modules``
comment
Don't remove module from ``/etc/modules``, only comment it
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
mods = __salt__['kmod.mod_list']()
if persist:
persist_mods = __salt__['kmod.mod_list'](True)
mods = list(set(mods) | set(persist_mods))
if name in mods:
# Found the module, unload it!
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Module {0} is set to be unloaded'.format(name)
return ret
for mod in __salt__['kmod.remove'](name, persist, comment):
ret['changes'][mod] = 'removed'
for change in ret['changes']:
if name in change:
ret['comment'] = 'Removed kernel module {0}'.format(name)
return ret
ret['result'] = False
ret['comment'] = ('Module {0} is present but failed to remove'
.format(name))
return ret
else:
ret['comment'] = 'Kernel module {0} is already absent'.format(name)
return ret
| 0 |
# -*- coding: utf-8 -*-
"""
flask_raven.resource
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2014 Daniel Chatfield
:license: Artistic 2.0
"""
from datetime import datetime
from flask import request
from Crypto.Hash.SHA import SHA1Hash
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
from .errors import (AuthenticationError, ResponseError, SignatureError,
TimestampError, UrlError, UserCancelledError)
from .helpers import get_config, get_key, b64decode, remove_query_arg
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
__all__ = ['RavenResponse', 'RavenRequest']
class RavenResponse(object):
""" Class representing the response from raven
The response fields and their associated values are:
Field Value
--------- ---------------------------------------------------------------
ver [REQUIRED] The version of the WLS protocol in use. This document
describes versions 1, 2 and 3 of the protocol. This will not be
greater than the 'ver' parameter supplied in the request
status [REQUIRED] A three digit status code indicating the status of
the authentication request. '200' indicates success, other
possible values are listed below.
msg [OPTIONAL] A text message further describing the status of the
authentication request, suitable for display to end-user.
issue [REQUIRED] The date and time that this authentication response
was created.
id [REQUIRED] An identifier for this response. 'id', combined
with 'issue' provides a unique identifier for this
response. 'id' is not unguessable.
url [REQUIRED] The value of 'url' supplied in the 'authentication
request' and used to form the 'authentication response'.
principal [REQUIRED if status is '200', otherwise required to be
empty] If present, indicates the authenticated identity of
the browser user
ptags [OPTIONAL in a version 3 response, MUST be entirely
omitted otherwise] A potentially empty sequence of text
tokens separated by ',' indicating attributes or
properties of the identified principal. Possible values of
this tag are not standardised and are a matter for local
definition by individual WLS operators (see note
below). WAA SOULD ignore values that they do not
recognise.
auth [REQUIRED if authentication was successfully established by
interaction with the user, otherwise required to be empty]
This indicates which authentication type was used. This
value consists of a single text token as described below.
sso [REQUIRED if 'auth' is empty] Authentication must have been
established based on previous successful authentication
interaction(s) with the user. This indicates which
authentication types were used on these occasions. This
value consists of a sequence of text tokens as described
below, separated by ','.
life [OPTIONAL] If the user has established an authenticated
'session' with the WLS, this indicates the remaining life
(in seconds) of that session. If present, a WAA SHOULD use
this to establish an upper limit to the lifetime of any
session that it establishes.
params [REQUIRED to be a copy of the params parameter from the
request]
kid [REQUIRED if a signature is present] A string which identifies
the RSA key which will be used to form a signature
supplied with the response. Typically these will be small
integers.
sig [REQUIRED if status is 200, OPTIONAL otherwise] A public-key
signature of the response data constructed from the entire
parameter value except 'kid' and 'signature' (and their
separating ':' characters) using the private key
identified by 'kid', the SHA-1 hash algorithm and the
'RSASSA-PKCS1-v1_5' scheme as specified in PKCS #1 v2.1
[RFC 3447] and the resulting signature encoded using the
base64 scheme [RFC 1521] except that the characters '+',
'/', and '=' are replaced by '-', '.' and '_' to reduce
the URL-encoding overhead.
"""
_response_fields = ("ver", "status", "msg", "issue", "id", "url",
"principal", "ptags", "auth", "sso", "life", "params",
"kid", "sig")
def __init__(self, response):
""" Parses a string response and returns a RavenResponse object
or throws an exception if something went wrong.
"""
self.raw_response = response
values = self._split_response(response)
# Strip the kid and sig to obtain the payload used to generate the
# signature
self.payload = '!'.join(values[:-2])
for key, value in zip(self._response_fields, values):
setattr(self, key, value)
self.issue = datetime.strptime(self.issue, "%Y%m%dT%H%M%SZ")
self.status = int(self.status)
replace = {
'-': '+',
'.': '/',
'_': '='
}
for search, replace_with in replace.iteritems():
self.sig = self.sig.replace(search, replace_with)
try:
self.sig = b64decode(self.sig, validate=True)
except (TypeError, ValueError):
raise SignatureError('Signature was malformed')
self.check_request_url()
self.check_timestamp()
self.check_signature()
if self.status == 410:
raise UserCancelledError()
elif self.status != 200:
raise AuthenticationError()
def _split_response(self, response_string):
values = response_string.split('!')
if len(values) != len(self._response_fields):
raise ResponseError(
"Incorrect number of response fields, expecting %d but got %d"
% (len(self._response_fields), len(values)))
return values
def check_request_url(self):
if self.url != remove_query_arg():
raise UrlError(
"The requested url does not match the url returned by raven")
def check_timestamp(self):
now = datetime.utcnow()
diff = now - self.issue
if diff.seconds > get_config('RAVEN_RESPONSE_TIMESTAMP_DIFF'):
raise TimestampError(
"Too much time has elapsed since this auth request was made")
def check_signature(self):
key = RSA.importKey(get_key())
mhash = SHA1Hash(self.payload)
verifier = PKCS1_v1_5.new(key)
if not verifier.verify(mhash, self.sig):
raise SignatureError(
"Signature mismatch - response has been tampered with")
class RavenRequest(object):
""" Class representing a request to the raven server """
def __init__(self, url=None):
if url is None:
url = request.url
self.ver = 3
self.url = url
@property
def redirect_url(self):
params = {
'ver': self.ver,
'url': self.url
}
return get_config('RAVEN_AUTH_ENDPOINT') + '?' + urlencode(params)
| 0 |
###############################################################################
##
## Copyright 2011 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from case import Case
class Case7_1_2(Case):
DESCRIPTION = """Send two close frames"""
EXPECTATION = """Clean close with normal code. Second close frame ignored."""
def init(self):
self.suppressClose = True
def onConnectionLost(self, failedByMe):
Case.onConnectionLost(self, failedByMe)
if self.behaviorClose == Case.WRONG_CODE:
self.behavior = Case.FAILED
self.passed = False
self.result = self.resultClose
def onOpen(self):
payload = "Hello World!"
self.expected[Case.OK] = []
self.expectedClose = {"closedByMe":True,"closeCode":[self.p.CLOSE_STATUS_CODE_NORMAL],"requireClean":True}
self.p.sendClose(self.p.CLOSE_STATUS_CODE_NORMAL)
self.p.sendFrame(opcode = 8)
self.p.killAfter(1)
| 0.026397 |
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((4560.3, 2615.96, 5188.81), (0.7, 0.7, 0.7), 182.271)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((4782.09, 2505.79, 4805.8), (0.7, 0.7, 0.7), 258.199)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((4697.55, 2368.67, 4408.95), (0.7, 0.7, 0.7), 123.897)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((5114.57, 2278.57, 4341.53), (0.7, 0.7, 0.7), 146.739)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((5524.63, 2357.25, 4120.04), (0.7, 0.7, 0.7), 179.098)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((5028.95, 2259.9, 3794.75), (0.7, 0.7, 0.7), 148.854)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((4641.55, 2226.8, 3420.85), (0.7, 0.7, 0.7), 196.357)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((4987.99, 2601.42, 3211.98), (0.7, 0.7, 0.7), 166.873)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((5402.53, 2939.81, 2939.19), (0.7, 0.7, 0.7), 95.4711)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((5083.08, 2694.01, 2884.9), (0.7, 0.7, 0.7), 185.401)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((4801.91, 2328.19, 3113.1), (0.7, 0.7, 0.7), 151.984)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((4316.04, 1953.96, 3303.56), (0.7, 0.7, 0.7), 185.612)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((4258.18, 1561.16, 3483.21), (0.7, 0.7, 0.7), 210.273)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((4561.88, 1544.55, 3594.59), (0.7, 0.7, 0.7), 106.892)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((4655.13, 1187.34, 3709.83), (0.7, 0.7, 0.7), 202.025)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((4676.83, 800.233, 4067.69), (0.7, 0.7, 0.7), 192.169)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((4578.06, 381.959, 4514.28), (0.7, 0.7, 0.7), 241.11)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((4239.25, 100.446, 4839.36), (0.7, 0.7, 0.7), 128.465)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((3919.19, -99.9129, 5274.58), (0.7, 0.7, 0.7), 217.38)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((3877.05, -331.172, 5928.22), (0.7, 0.7, 0.7), 184.555)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((3565.64, -41.2717, 5454.21), (0.7, 0.7, 0.7), 140.055)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((3480.04, -199.154, 5043.68), (0.7, 0.7, 0.7), 169.708)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((3273.86, -515.943, 4851.12), (0.7, 0.7, 0.7), 184.639)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((2954.25, -478.16, 4888.19), (0.7, 0.7, 0.7), 119.286)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((2694.61, -282.096, 4938.75), (0.7, 0.7, 0.7), 147.754)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((2571.3, 7.06416, 5056.33), (0.7, 0.7, 0.7), 171.4)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((2796.02, 146.968, 4728.48), (0.7, 0.7, 0.7), 156.341)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((2926.64, 654.904, 4463.73), (0.7, 0.7, 0.7), 186.501)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((2987.59, 1104.26, 4163.96), (0.7, 0.7, 0.7), 308.325)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((3229.09, 1264.61, 3851.03), (0.7, 0.7, 0.7), 138.617)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((3257.2, 1278.63, 3553.32), (0.7, 0.7, 0.7), 130.03)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((3127.94, 1048.75, 3755.85), (0.7, 0.7, 0.7), 156.552)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((3218.35, 1235.86, 3990.81), (0.7, 0.7, 0.7), 183.244)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((3260.03, 1384.9, 4212.1), (0.7, 0.7, 0.7), 181.382)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((3115.95, 1444.51, 4337.47), (0.7, 0.7, 0.7), 101.943)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((3095.19, 1465.03, 4694.92), (1, 0.7, 0), 138.913)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((2227.35, 869.461, 4714.04), (0.7, 0.7, 0.7), 221.737)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((1557.32, 617.16, 4560.36), (0.7, 0.7, 0.7), 256.38)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((1187.53, 315.733, 4446.88), (0.7, 0.7, 0.7), 221.694)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((1426.78, -189.245, 4165.27), (0.7, 0.7, 0.7), 259.341)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((1950.41, 265.942, 3738.52), (0.7, 0.7, 0.7), 117.89)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((2612.08, 1037.17, 3617.88), (0.7, 0.7, 0.7), 116.071)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((2865.49, 1721.05, 3637.05), (0.7, 0.7, 0.7), 268.224)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((2901.72, 2099.26, 3662.98), (0.7, 0.7, 0.7), 386.918)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((3145.38, 2598.51, 3359.62), (0.7, 0.7, 0.7), 121.316)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((3167.82, 3021.6, 3454.22), (0.7, 0.7, 0.7), 138.363)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((3002.13, 2568.01, 3989.35), (1, 0.7, 0), 175.207)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((3106.31, 3280.71, 3910.92), (0.7, 0.7, 0.7), 131.468)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((3102.63, 4014.9, 4008.66), (0.7, 0.7, 0.7), 287.894)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((2714.18, 3702.53, 3806.72), (0.7, 0.7, 0.7), 88.1109)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((2631.82, 3131.96, 3641.52), (0.7, 0.7, 0.7), 145.385)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((2611.46, 2994.92, 3477.45), (0.7, 0.7, 0.7), 155.452)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((2549.49, 3613.84, 3393.18), (0.7, 0.7, 0.7), 145.512)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((2526.67, 4106.38, 3254.64), (0.7, 0.7, 0.7), 99.9972)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((2564.33, 4516.12, 3086.61), (0.7, 0.7, 0.7), 327.529)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((2712.65, 4035.68, 2686.8), (0.7, 0.7, 0.7), 137.983)
if "particle_56 geometry" not in marker_sets:
s=new_marker_set('particle_56 geometry')
marker_sets["particle_56 geometry"]=s
s= marker_sets["particle_56 geometry"]
mark=s.place_marker((2596.89, 3539.07, 2705.23), (0.7, 0.7, 0.7), 83.3733)
if "particle_57 geometry" not in marker_sets:
s=new_marker_set('particle_57 geometry')
marker_sets["particle_57 geometry"]=s
s= marker_sets["particle_57 geometry"]
mark=s.place_marker((2468.23, 2996.45, 2845.73), (0.7, 0.7, 0.7), 101.562)
if "particle_58 geometry" not in marker_sets:
s=new_marker_set('particle_58 geometry')
marker_sets["particle_58 geometry"]=s
s= marker_sets["particle_58 geometry"]
mark=s.place_marker((2421.32, 2529.99, 3093.97), (0.7, 0.7, 0.7), 165.689)
if "particle_59 geometry" not in marker_sets:
s=new_marker_set('particle_59 geometry')
marker_sets["particle_59 geometry"]=s
s= marker_sets["particle_59 geometry"]
mark=s.place_marker((2296.11, 2610.91, 3375.63), (0.7, 0.7, 0.7), 136.925)
if "particle_60 geometry" not in marker_sets:
s=new_marker_set('particle_60 geometry')
marker_sets["particle_60 geometry"]=s
s= marker_sets["particle_60 geometry"]
mark=s.place_marker((2297.07, 2695.63, 3447.31), (0.7, 0.7, 0.7), 123.389)
if "particle_61 geometry" not in marker_sets:
s=new_marker_set('particle_61 geometry')
marker_sets["particle_61 geometry"]=s
s= marker_sets["particle_61 geometry"]
mark=s.place_marker((2245.29, 3043.43, 3149.34), (0.7, 0.7, 0.7), 184.47)
if "particle_62 geometry" not in marker_sets:
s=new_marker_set('particle_62 geometry')
marker_sets["particle_62 geometry"]=s
s= marker_sets["particle_62 geometry"]
mark=s.place_marker((2006.41, 3668.72, 2713.44), (0.7, 0.7, 0.7), 148.473)
if "particle_63 geometry" not in marker_sets:
s=new_marker_set('particle_63 geometry')
marker_sets["particle_63 geometry"]=s
s= marker_sets["particle_63 geometry"]
mark=s.place_marker((1635.93, 4463.55, 2242.68), (0.7, 0.7, 0.7), 241.406)
if "particle_64 geometry" not in marker_sets:
s=new_marker_set('particle_64 geometry')
marker_sets["particle_64 geometry"]=s
s= marker_sets["particle_64 geometry"]
mark=s.place_marker((2168.49, 4075.57, 2234.38), (0.7, 0.7, 0.7), 182.736)
if "particle_65 geometry" not in marker_sets:
s=new_marker_set('particle_65 geometry')
marker_sets["particle_65 geometry"]=s
s= marker_sets["particle_65 geometry"]
mark=s.place_marker((2502.2, 3814.92, 2407.3), (0.7, 0.7, 0.7), 166.62)
if "particle_66 geometry" not in marker_sets:
s=new_marker_set('particle_66 geometry')
marker_sets["particle_66 geometry"]=s
s= marker_sets["particle_66 geometry"]
mark=s.place_marker((2341.42, 3679.37, 2610.96), (0.7, 0.7, 0.7), 113.872)
if "particle_67 geometry" not in marker_sets:
s=new_marker_set('particle_67 geometry')
marker_sets["particle_67 geometry"]=s
s= marker_sets["particle_67 geometry"]
mark=s.place_marker((2397.61, 3463.74, 2842.18), (0.7, 0.7, 0.7), 110.065)
if "particle_68 geometry" not in marker_sets:
s=new_marker_set('particle_68 geometry')
marker_sets["particle_68 geometry"]=s
s= marker_sets["particle_68 geometry"]
mark=s.place_marker((2441.4, 3388.8, 3217.65), (0.7, 0.7, 0.7), 150.08)
if "particle_69 geometry" not in marker_sets:
s=new_marker_set('particle_69 geometry')
marker_sets["particle_69 geometry"]=s
s= marker_sets["particle_69 geometry"]
mark=s.place_marker((2495.25, 3420.46, 3690.04), (0.7, 0.7, 0.7), 118.525)
if "particle_70 geometry" not in marker_sets:
s=new_marker_set('particle_70 geometry')
marker_sets["particle_70 geometry"]=s
s= marker_sets["particle_70 geometry"]
mark=s.place_marker((2413.58, 3564.79, 4206.1), (0.7, 0.7, 0.7), 163.955)
if "particle_71 geometry" not in marker_sets:
s=new_marker_set('particle_71 geometry')
marker_sets["particle_71 geometry"]=s
s= marker_sets["particle_71 geometry"]
mark=s.place_marker((2191.18, 3848.66, 4309.78), (0.7, 0.7, 0.7), 170.131)
if "particle_72 geometry" not in marker_sets:
s=new_marker_set('particle_72 geometry')
marker_sets["particle_72 geometry"]=s
s= marker_sets["particle_72 geometry"]
mark=s.place_marker((1994.68, 4152.26, 3646.38), (0.7, 0.7, 0.7), 78.2127)
if "particle_73 geometry" not in marker_sets:
s=new_marker_set('particle_73 geometry')
marker_sets["particle_73 geometry"]=s
s= marker_sets["particle_73 geometry"]
mark=s.place_marker((1903.39, 4441.09, 2877.29), (0.7, 0.7, 0.7), 251.896)
if "particle_74 geometry" not in marker_sets:
s=new_marker_set('particle_74 geometry')
marker_sets["particle_74 geometry"]=s
s= marker_sets["particle_74 geometry"]
mark=s.place_marker((2005.68, 4576.73, 2224.16), (0.7, 0.7, 0.7), 167.55)
if "particle_75 geometry" not in marker_sets:
s=new_marker_set('particle_75 geometry')
marker_sets["particle_75 geometry"]=s
s= marker_sets["particle_75 geometry"]
mark=s.place_marker((2176.97, 4493.42, 1862.63), (0.7, 0.7, 0.7), 167.846)
if "particle_76 geometry" not in marker_sets:
s=new_marker_set('particle_76 geometry')
marker_sets["particle_76 geometry"]=s
s= marker_sets["particle_76 geometry"]
mark=s.place_marker((1648.76, 4485.69, 1917.78), (0.7, 0.7, 0.7), 259.68)
if "particle_77 geometry" not in marker_sets:
s=new_marker_set('particle_77 geometry')
marker_sets["particle_77 geometry"]=s
s= marker_sets["particle_77 geometry"]
mark=s.place_marker((1383.48, 4578.23, 2316.4), (0.7, 0.7, 0.7), 80.2854)
if "particle_78 geometry" not in marker_sets:
s=new_marker_set('particle_78 geometry')
marker_sets["particle_78 geometry"]=s
s= marker_sets["particle_78 geometry"]
mark=s.place_marker((1275.4, 4767.45, 2364.14), (0.7, 0.7, 0.7), 82.4427)
if "particle_79 geometry" not in marker_sets:
s=new_marker_set('particle_79 geometry')
marker_sets["particle_79 geometry"]=s
s= marker_sets["particle_79 geometry"]
mark=s.place_marker((960.747, 4870.99, 2171.27), (0.7, 0.7, 0.7), 212.811)
if "particle_80 geometry" not in marker_sets:
s=new_marker_set('particle_80 geometry')
marker_sets["particle_80 geometry"]=s
s= marker_sets["particle_80 geometry"]
mark=s.place_marker((758.341, 4187.51, 1887.55), (0.7, 0.7, 0.7), 176.391)
if "particle_81 geometry" not in marker_sets:
s=new_marker_set('particle_81 geometry')
marker_sets["particle_81 geometry"]=s
s= marker_sets["particle_81 geometry"]
mark=s.place_marker((1016.26, 3502.8, 2004.1), (0.7, 0.7, 0.7), 99.3204)
if "particle_82 geometry" not in marker_sets:
s=new_marker_set('particle_82 geometry')
marker_sets["particle_82 geometry"]=s
s= marker_sets["particle_82 geometry"]
mark=s.place_marker((1219.96, 3093.5, 2414.55), (0.7, 0.7, 0.7), 166.62)
if "particle_83 geometry" not in marker_sets:
s=new_marker_set('particle_83 geometry')
marker_sets["particle_83 geometry"]=s
s= marker_sets["particle_83 geometry"]
mark=s.place_marker((1116.32, 2837.9, 2611.13), (0.7, 0.7, 0.7), 102.831)
if "particle_84 geometry" not in marker_sets:
s=new_marker_set('particle_84 geometry')
marker_sets["particle_84 geometry"]=s
s= marker_sets["particle_84 geometry"]
mark=s.place_marker((522.386, 3309.4, 2162.83), (0.7, 0.7, 0.7), 65.0997)
if "particle_85 geometry" not in marker_sets:
s=new_marker_set('particle_85 geometry')
marker_sets["particle_85 geometry"]=s
s= marker_sets["particle_85 geometry"]
mark=s.place_marker((929.084, 3614.04, 2129.96), (0.7, 0.7, 0.7), 92.1294)
if "particle_86 geometry" not in marker_sets:
s=new_marker_set('particle_86 geometry')
marker_sets["particle_86 geometry"]=s
s= marker_sets["particle_86 geometry"]
mark=s.place_marker((1475.32, 3668.07, 2268.41), (0.7, 0.7, 0.7), 194.791)
if "particle_87 geometry" not in marker_sets:
s=new_marker_set('particle_87 geometry')
marker_sets["particle_87 geometry"]=s
s= marker_sets["particle_87 geometry"]
mark=s.place_marker((1904.74, 3695.44, 2220.39), (0.7, 0.7, 0.7), 120.766)
if "particle_88 geometry" not in marker_sets:
s=new_marker_set('particle_88 geometry')
marker_sets["particle_88 geometry"]=s
s= marker_sets["particle_88 geometry"]
mark=s.place_marker((1738.18, 4005.86, 1750.58), (0.7, 0.7, 0.7), 217.803)
if "particle_89 geometry" not in marker_sets:
s=new_marker_set('particle_89 geometry')
marker_sets["particle_89 geometry"]=s
s= marker_sets["particle_89 geometry"]
mark=s.place_marker((1534.34, 4054.22, 2081.04), (0.7, 0.7, 0.7), 115.775)
if "particle_90 geometry" not in marker_sets:
s=new_marker_set('particle_90 geometry')
marker_sets["particle_90 geometry"]=s
s= marker_sets["particle_90 geometry"]
mark=s.place_marker((1633.06, 4051.28, 2501.65), (0.7, 0.7, 0.7), 115.648)
if "particle_91 geometry" not in marker_sets:
s=new_marker_set('particle_91 geometry')
marker_sets["particle_91 geometry"]=s
s= marker_sets["particle_91 geometry"]
mark=s.place_marker((1894.13, 3834.52, 2557.36), (0.7, 0.7, 0.7), 83.8386)
if "particle_92 geometry" not in marker_sets:
s=new_marker_set('particle_92 geometry')
marker_sets["particle_92 geometry"]=s
s= marker_sets["particle_92 geometry"]
mark=s.place_marker((2249.73, 3824.38, 2353.55), (0.7, 0.7, 0.7), 124.32)
if "particle_93 geometry" not in marker_sets:
s=new_marker_set('particle_93 geometry')
marker_sets["particle_93 geometry"]=s
s= marker_sets["particle_93 geometry"]
mark=s.place_marker((2742.96, 3916.34, 2273.44), (0.7, 0.7, 0.7), 185.993)
if "particle_94 geometry" not in marker_sets:
s=new_marker_set('particle_94 geometry')
marker_sets["particle_94 geometry"]=s
s= marker_sets["particle_94 geometry"]
mark=s.place_marker((3294.95, 4317.76, 2331.38), (0.7, 0.7, 0.7), 238.826)
if "particle_95 geometry" not in marker_sets:
s=new_marker_set('particle_95 geometry')
marker_sets["particle_95 geometry"]=s
s= marker_sets["particle_95 geometry"]
mark=s.place_marker((3530.81, 4766.28, 2579.25), (0.7, 0.7, 0.7), 128.465)
if "particle_96 geometry" not in marker_sets:
s=new_marker_set('particle_96 geometry')
marker_sets["particle_96 geometry"]=s
s= marker_sets["particle_96 geometry"]
mark=s.place_marker((3044.56, 4642.94, 2924.95), (0.7, 0.7, 0.7), 203.209)
if "particle_97 geometry" not in marker_sets:
s=new_marker_set('particle_97 geometry')
marker_sets["particle_97 geometry"]=s
s= marker_sets["particle_97 geometry"]
mark=s.place_marker((2724.65, 4302.41, 2752.52), (0.7, 0.7, 0.7), 160.486)
if "particle_98 geometry" not in marker_sets:
s=new_marker_set('particle_98 geometry')
marker_sets["particle_98 geometry"]=s
s= marker_sets["particle_98 geometry"]
mark=s.place_marker((2835.77, 4267.44, 2411.83), (0.7, 0.7, 0.7), 149.277)
if "particle_99 geometry" not in marker_sets:
s=new_marker_set('particle_99 geometry')
marker_sets["particle_99 geometry"]=s
s= marker_sets["particle_99 geometry"]
mark=s.place_marker((2775.65, 4766.47, 2268.26), (0.7, 0.7, 0.7), 35.7435)
if "particle_100 geometry" not in marker_sets:
s=new_marker_set('particle_100 geometry')
marker_sets["particle_100 geometry"]=s
s= marker_sets["particle_100 geometry"]
mark=s.place_marker((2349.78, 4018.52, 2782.82), (0.7, 0.7, 0.7), 98.3898)
if "particle_101 geometry" not in marker_sets:
s=new_marker_set('particle_101 geometry')
marker_sets["particle_101 geometry"]=s
s= marker_sets["particle_101 geometry"]
mark=s.place_marker((2010.25, 3058.94, 3142.71), (0.7, 0.7, 0.7), 188.404)
if "particle_102 geometry" not in marker_sets:
s=new_marker_set('particle_102 geometry')
marker_sets["particle_102 geometry"]=s
s= marker_sets["particle_102 geometry"]
mark=s.place_marker((2017.35, 2555.02, 2984.69), (0.7, 0.7, 0.7), 110.318)
if "particle_103 geometry" not in marker_sets:
s=new_marker_set('particle_103 geometry')
marker_sets["particle_103 geometry"]=s
s= marker_sets["particle_103 geometry"]
mark=s.place_marker((1972.21, 2827.03, 2710.45), (0.7, 0.7, 0.7), 127.534)
if "particle_104 geometry" not in marker_sets:
s=new_marker_set('particle_104 geometry')
marker_sets["particle_104 geometry"]=s
s= marker_sets["particle_104 geometry"]
mark=s.place_marker((2009.69, 3191.24, 2570), (0.7, 0.7, 0.7), 91.368)
if "particle_105 geometry" not in marker_sets:
s=new_marker_set('particle_105 geometry')
marker_sets["particle_105 geometry"]=s
s= marker_sets["particle_105 geometry"]
mark=s.place_marker((2106.65, 3582.21, 2518.48), (0.7, 0.7, 0.7), 131.045)
if "particle_106 geometry" not in marker_sets:
s=new_marker_set('particle_106 geometry')
marker_sets["particle_106 geometry"]=s
s= marker_sets["particle_106 geometry"]
mark=s.place_marker((2346.11, 3898.52, 2623.02), (0.7, 0.7, 0.7), 143.608)
if "particle_107 geometry" not in marker_sets:
s=new_marker_set('particle_107 geometry')
marker_sets["particle_107 geometry"]=s
s= marker_sets["particle_107 geometry"]
mark=s.place_marker((2682.79, 3974.88, 2441.67), (0.7, 0.7, 0.7), 135.783)
if "particle_108 geometry" not in marker_sets:
s=new_marker_set('particle_108 geometry')
marker_sets["particle_108 geometry"]=s
s= marker_sets["particle_108 geometry"]
mark=s.place_marker((2949.46, 4003.92, 2243.38), (0.7, 0.7, 0.7), 92.5947)
if "particle_109 geometry" not in marker_sets:
s=new_marker_set('particle_109 geometry')
marker_sets["particle_109 geometry"]=s
s= marker_sets["particle_109 geometry"]
mark=s.place_marker((2885.65, 3774.29, 2096.34), (0.7, 0.7, 0.7), 150.123)
if "particle_110 geometry" not in marker_sets:
s=new_marker_set('particle_110 geometry')
marker_sets["particle_110 geometry"]=s
s= marker_sets["particle_110 geometry"]
mark=s.place_marker((2687.82, 3598.75, 2011.48), (0.7, 0.7, 0.7), 121.57)
if "particle_111 geometry" not in marker_sets:
s=new_marker_set('particle_111 geometry')
marker_sets["particle_111 geometry"]=s
s= marker_sets["particle_111 geometry"]
mark=s.place_marker((2705.83, 3632.16, 1683.08), (0.7, 0.7, 0.7), 104.777)
if "particle_112 geometry" not in marker_sets:
s=new_marker_set('particle_112 geometry')
marker_sets["particle_112 geometry"]=s
s= marker_sets["particle_112 geometry"]
mark=s.place_marker((2647.48, 3229.22, 1774.4), (0.7, 0.7, 0.7), 114.844)
if "particle_113 geometry" not in marker_sets:
s=new_marker_set('particle_113 geometry')
marker_sets["particle_113 geometry"]=s
s= marker_sets["particle_113 geometry"]
mark=s.place_marker((2586.83, 2795.35, 1880.49), (0.7, 0.7, 0.7), 150.588)
if "particle_114 geometry" not in marker_sets:
s=new_marker_set('particle_114 geometry')
marker_sets["particle_114 geometry"]=s
s= marker_sets["particle_114 geometry"]
mark=s.place_marker((2330.58, 2696.74, 2204.55), (0.7, 0.7, 0.7), 103.55)
if "particle_115 geometry" not in marker_sets:
s=new_marker_set('particle_115 geometry')
marker_sets["particle_115 geometry"]=s
s= marker_sets["particle_115 geometry"]
mark=s.place_marker((1805.85, 2734.76, 2385.02), (0.7, 0.7, 0.7), 215.392)
if "particle_116 geometry" not in marker_sets:
s=new_marker_set('particle_116 geometry')
marker_sets["particle_116 geometry"]=s
s= marker_sets["particle_116 geometry"]
mark=s.place_marker((1302.7, 2620.69, 2612.04), (0.7, 0.7, 0.7), 99.9126)
if "particle_117 geometry" not in marker_sets:
s=new_marker_set('particle_117 geometry')
marker_sets["particle_117 geometry"]=s
s= marker_sets["particle_117 geometry"]
mark=s.place_marker((640.744, 2764.77, 2404.04), (0.7, 0.7, 0.7), 99.7857)
if "particle_118 geometry" not in marker_sets:
s=new_marker_set('particle_118 geometry')
marker_sets["particle_118 geometry"]=s
s= marker_sets["particle_118 geometry"]
mark=s.place_marker((194.243, 3077.23, 2288.05), (0.7, 0.7, 0.7), 109.98)
if "particle_119 geometry" not in marker_sets:
s=new_marker_set('particle_119 geometry')
marker_sets["particle_119 geometry"]=s
s= marker_sets["particle_119 geometry"]
mark=s.place_marker((655.363, 2916.78, 2151.24), (0.7, 0.7, 0.7), 102.831)
if "particle_120 geometry" not in marker_sets:
s=new_marker_set('particle_120 geometry')
marker_sets["particle_120 geometry"]=s
s= marker_sets["particle_120 geometry"]
mark=s.place_marker((1049.25, 2942.43, 2208.83), (0.7, 0.7, 0.7), 103.593)
if "particle_121 geometry" not in marker_sets:
s=new_marker_set('particle_121 geometry')
marker_sets["particle_121 geometry"]=s
s= marker_sets["particle_121 geometry"]
mark=s.place_marker((1500.22, 3122.37, 2243.52), (0.7, 0.7, 0.7), 173.472)
if "particle_122 geometry" not in marker_sets:
s=new_marker_set('particle_122 geometry')
marker_sets["particle_122 geometry"]=s
s= marker_sets["particle_122 geometry"]
mark=s.place_marker((1727.14, 3627.09, 2180.57), (0.7, 0.7, 0.7), 113.575)
if "particle_123 geometry" not in marker_sets:
s=new_marker_set('particle_123 geometry')
marker_sets["particle_123 geometry"]=s
s= marker_sets["particle_123 geometry"]
mark=s.place_marker((2179.83, 3801.74, 2178.02), (0.7, 0.7, 0.7), 128.296)
if "particle_124 geometry" not in marker_sets:
s=new_marker_set('particle_124 geometry')
marker_sets["particle_124 geometry"]=s
s= marker_sets["particle_124 geometry"]
mark=s.place_marker((2614.4, 3971.38, 2075.21), (0.7, 0.7, 0.7), 145.004)
if "particle_125 geometry" not in marker_sets:
s=new_marker_set('particle_125 geometry')
marker_sets["particle_125 geometry"]=s
s= marker_sets["particle_125 geometry"]
mark=s.place_marker((3164.39, 3957.86, 2012.76), (0.7, 0.7, 0.7), 148.261)
if "particle_126 geometry" not in marker_sets:
s=new_marker_set('particle_126 geometry')
marker_sets["particle_126 geometry"]=s
s= marker_sets["particle_126 geometry"]
mark=s.place_marker((3725.23, 4275.68, 1908.43), (0.7, 0.7, 0.7), 127.704)
if "particle_127 geometry" not in marker_sets:
s=new_marker_set('particle_127 geometry')
marker_sets["particle_127 geometry"]=s
s= marker_sets["particle_127 geometry"]
mark=s.place_marker((4141.13, 4700.2, 1936.71), (0.7, 0.7, 0.7), 129.607)
if "particle_128 geometry" not in marker_sets:
s=new_marker_set('particle_128 geometry')
marker_sets["particle_128 geometry"]=s
s= marker_sets["particle_128 geometry"]
mark=s.place_marker((3714.93, 4803.3, 2149.22), (0.7, 0.7, 0.7), 139.759)
if "particle_129 geometry" not in marker_sets:
s=new_marker_set('particle_129 geometry')
marker_sets["particle_129 geometry"]=s
s= marker_sets["particle_129 geometry"]
mark=s.place_marker((3158.66, 4648.58, 2452.04), (0.7, 0.7, 0.7), 118.567)
if "particle_130 geometry" not in marker_sets:
s=new_marker_set('particle_130 geometry')
marker_sets["particle_130 geometry"]=s
s= marker_sets["particle_130 geometry"]
mark=s.place_marker((2795.45, 4532.31, 2280.71), (0.7, 0.7, 0.7), 136.164)
if "particle_131 geometry" not in marker_sets:
s=new_marker_set('particle_131 geometry')
marker_sets["particle_131 geometry"]=s
s= marker_sets["particle_131 geometry"]
mark=s.place_marker((2501.52, 4218.04, 2165.46), (0.7, 0.7, 0.7), 121.655)
if "particle_132 geometry" not in marker_sets:
s=new_marker_set('particle_132 geometry')
marker_sets["particle_132 geometry"]=s
s= marker_sets["particle_132 geometry"]
mark=s.place_marker((2332.7, 3890.37, 1952.75), (0.7, 0.7, 0.7), 127.492)
if "particle_133 geometry" not in marker_sets:
s=new_marker_set('particle_133 geometry')
marker_sets["particle_133 geometry"]=s
s= marker_sets["particle_133 geometry"]
mark=s.place_marker((2144.63, 3861.44, 1576.72), (0.7, 0.7, 0.7), 138.617)
if "particle_134 geometry" not in marker_sets:
s=new_marker_set('particle_134 geometry')
marker_sets["particle_134 geometry"]=s
s= marker_sets["particle_134 geometry"]
mark=s.place_marker((2139.76, 3544.64, 1418.67), (0.7, 0.7, 0.7), 120.766)
if "particle_135 geometry" not in marker_sets:
s=new_marker_set('particle_135 geometry')
marker_sets["particle_135 geometry"]=s
s= marker_sets["particle_135 geometry"]
mark=s.place_marker((2052.21, 3320.08, 1514.7), (0.7, 0.7, 0.7), 145.893)
if "particle_136 geometry" not in marker_sets:
s=new_marker_set('particle_136 geometry')
marker_sets["particle_136 geometry"]=s
s= marker_sets["particle_136 geometry"]
mark=s.place_marker((2209.22, 3330.69, 1956.77), (0.7, 0.7, 0.7), 185.02)
if "particle_137 geometry" not in marker_sets:
s=new_marker_set('particle_137 geometry')
marker_sets["particle_137 geometry"]=s
s= marker_sets["particle_137 geometry"]
mark=s.place_marker((2438.27, 3050.36, 2349.56), (0.7, 0.7, 0.7), 221.314)
if "particle_138 geometry" not in marker_sets:
s=new_marker_set('particle_138 geometry')
marker_sets["particle_138 geometry"]=s
s= marker_sets["particle_138 geometry"]
mark=s.place_marker((2710.69, 2675.32, 2555.92), (0.7, 0.7, 0.7), 165.139)
if "particle_139 geometry" not in marker_sets:
s=new_marker_set('particle_139 geometry')
marker_sets["particle_139 geometry"]=s
s= marker_sets["particle_139 geometry"]
mark=s.place_marker((2906.64, 2714.12, 2656.8), (0.7, 0.7, 0.7), 179.437)
if "particle_140 geometry" not in marker_sets:
s=new_marker_set('particle_140 geometry')
marker_sets["particle_140 geometry"]=s
s= marker_sets["particle_140 geometry"]
mark=s.place_marker((2615.36, 3024.94, 2602.93), (0.7, 0.7, 0.7), 137.898)
if "particle_141 geometry" not in marker_sets:
s=new_marker_set('particle_141 geometry')
marker_sets["particle_141 geometry"]=s
s= marker_sets["particle_141 geometry"]
mark=s.place_marker((2365.31, 3310.67, 2556.37), (0.7, 0.7, 0.7), 124.658)
if "particle_142 geometry" not in marker_sets:
s=new_marker_set('particle_142 geometry')
marker_sets["particle_142 geometry"]=s
s= marker_sets["particle_142 geometry"]
mark=s.place_marker((2258.28, 3375.51, 2247.99), (0.7, 0.7, 0.7), 97.7553)
if "particle_143 geometry" not in marker_sets:
s=new_marker_set('particle_143 geometry')
marker_sets["particle_143 geometry"]=s
s= marker_sets["particle_143 geometry"]
mark=s.place_marker((2079.49, 3466.38, 1997.94), (0.7, 0.7, 0.7), 92.9331)
if "particle_144 geometry" not in marker_sets:
s=new_marker_set('particle_144 geometry')
marker_sets["particle_144 geometry"]=s
s= marker_sets["particle_144 geometry"]
mark=s.place_marker((1822.31, 3647.86, 1794.48), (0.7, 0.7, 0.7), 123.135)
if "particle_145 geometry" not in marker_sets:
s=new_marker_set('particle_145 geometry')
marker_sets["particle_145 geometry"]=s
s= marker_sets["particle_145 geometry"]
mark=s.place_marker((1832.68, 3393.82, 2115.15), (0.7, 0.7, 0.7), 125.716)
if "particle_146 geometry" not in marker_sets:
s=new_marker_set('particle_146 geometry')
marker_sets["particle_146 geometry"]=s
s= marker_sets["particle_146 geometry"]
mark=s.place_marker((1967.9, 3172.83, 2274.31), (0.7, 0.7, 0.7), 127.534)
if "particle_147 geometry" not in marker_sets:
s=new_marker_set('particle_147 geometry')
marker_sets["particle_147 geometry"]=s
s= marker_sets["particle_147 geometry"]
mark=s.place_marker((2179.24, 3342.78, 2263.57), (0.7, 0.7, 0.7), 94.9212)
if "particle_148 geometry" not in marker_sets:
s=new_marker_set('particle_148 geometry')
marker_sets["particle_148 geometry"]=s
s= marker_sets["particle_148 geometry"]
mark=s.place_marker((2397.01, 3084.58, 2532.98), (0.7, 0.7, 0.7), 137.644)
if "particle_149 geometry" not in marker_sets:
s=new_marker_set('particle_149 geometry')
marker_sets["particle_149 geometry"]=s
s= marker_sets["particle_149 geometry"]
mark=s.place_marker((2659.59, 2911.45, 2673.21), (0.7, 0.7, 0.7), 149.277)
if "particle_150 geometry" not in marker_sets:
s=new_marker_set('particle_150 geometry')
marker_sets["particle_150 geometry"]=s
s= marker_sets["particle_150 geometry"]
mark=s.place_marker((2666.23, 2806.09, 2335.52), (0.7, 0.7, 0.7), 103.677)
if "particle_151 geometry" not in marker_sets:
s=new_marker_set('particle_151 geometry')
marker_sets["particle_151 geometry"]=s
s= marker_sets["particle_151 geometry"]
mark=s.place_marker((2722.77, 2901.58, 1859.6), (0.7, 0.7, 0.7), 99.6588)
if "particle_152 geometry" not in marker_sets:
s=new_marker_set('particle_152 geometry')
marker_sets["particle_152 geometry"]=s
s= marker_sets["particle_152 geometry"]
mark=s.place_marker((2829.35, 2981.25, 1505.82), (0.7, 0.7, 0.7), 134.133)
if "particle_153 geometry" not in marker_sets:
s=new_marker_set('particle_153 geometry')
marker_sets["particle_153 geometry"]=s
s= marker_sets["particle_153 geometry"]
mark=s.place_marker((2608.41, 2773.07, 1629.18), (0.7, 0.7, 0.7), 173.007)
if "particle_154 geometry" not in marker_sets:
s=new_marker_set('particle_154 geometry')
marker_sets["particle_154 geometry"]=s
s= marker_sets["particle_154 geometry"]
mark=s.place_marker((2577.83, 2575.41, 2162.01), (0.7, 0.7, 0.7), 141.028)
if "particle_155 geometry" not in marker_sets:
s=new_marker_set('particle_155 geometry')
marker_sets["particle_155 geometry"]=s
s= marker_sets["particle_155 geometry"]
mark=s.place_marker((2479.9, 2508.6, 2620.59), (0.7, 0.7, 0.7), 161.121)
if "particle_156 geometry" not in marker_sets:
s=new_marker_set('particle_156 geometry')
marker_sets["particle_156 geometry"]=s
s= marker_sets["particle_156 geometry"]
mark=s.place_marker((2461.5, 2800.36, 2828.21), (0.7, 0.7, 0.7), 119.582)
if "particle_157 geometry" not in marker_sets:
s=new_marker_set('particle_157 geometry')
marker_sets["particle_157 geometry"]=s
s= marker_sets["particle_157 geometry"]
mark=s.place_marker((2333.65, 3122.06, 2616.48), (0.7, 0.7, 0.7), 137.094)
if "particle_158 geometry" not in marker_sets:
s=new_marker_set('particle_158 geometry')
marker_sets["particle_158 geometry"]=s
s= marker_sets["particle_158 geometry"]
mark=s.place_marker((2017.78, 3446.92, 2398.3), (0.7, 0.7, 0.7), 149.234)
if "particle_159 geometry" not in marker_sets:
s=new_marker_set('particle_159 geometry')
marker_sets["particle_159 geometry"]=s
s= marker_sets["particle_159 geometry"]
mark=s.place_marker((1675.01, 3261.46, 2569.47), (0.7, 0.7, 0.7), 151.011)
if "particle_160 geometry" not in marker_sets:
s=new_marker_set('particle_160 geometry')
marker_sets["particle_160 geometry"]=s
s= marker_sets["particle_160 geometry"]
mark=s.place_marker((1563, 2822.14, 2805), (0.7, 0.7, 0.7), 184.216)
if "particle_161 geometry" not in marker_sets:
s=new_marker_set('particle_161 geometry')
marker_sets["particle_161 geometry"]=s
s= marker_sets["particle_161 geometry"]
mark=s.place_marker((1570.39, 2531.37, 2515.8), (0.7, 0.7, 0.7), 170.596)
if "particle_162 geometry" not in marker_sets:
s=new_marker_set('particle_162 geometry')
marker_sets["particle_162 geometry"]=s
s= marker_sets["particle_162 geometry"]
mark=s.place_marker((1313.55, 2882.1, 2055.3), (0.7, 0.7, 0.7), 215.603)
if "particle_163 geometry" not in marker_sets:
s=new_marker_set('particle_163 geometry')
marker_sets["particle_163 geometry"]=s
s= marker_sets["particle_163 geometry"]
mark=s.place_marker((925.016, 3423.92, 1471.77), (0.7, 0.7, 0.7), 79.0164)
if "particle_164 geometry" not in marker_sets:
s=new_marker_set('particle_164 geometry')
marker_sets["particle_164 geometry"]=s
s= marker_sets["particle_164 geometry"]
mark=s.place_marker((1086.8, 3370.62, 1173.83), (0.7, 0.7, 0.7), 77.2821)
if "particle_165 geometry" not in marker_sets:
s=new_marker_set('particle_165 geometry')
marker_sets["particle_165 geometry"]=s
s= marker_sets["particle_165 geometry"]
mark=s.place_marker((1416.14, 3261.84, 1225.76), (0.7, 0.7, 0.7), 188.658)
if "particle_166 geometry" not in marker_sets:
s=new_marker_set('particle_166 geometry')
marker_sets["particle_166 geometry"]=s
s= marker_sets["particle_166 geometry"]
mark=s.place_marker((1474.64, 2993.8, 1077.61), (0.7, 0.7, 0.7), 115.437)
if "particle_167 geometry" not in marker_sets:
s=new_marker_set('particle_167 geometry')
marker_sets["particle_167 geometry"]=s
s= marker_sets["particle_167 geometry"]
mark=s.place_marker((1711.75, 2865.86, 1621.69), (0.7, 0.7, 0.7), 88.4916)
if "particle_168 geometry" not in marker_sets:
s=new_marker_set('particle_168 geometry')
marker_sets["particle_168 geometry"]=s
s= marker_sets["particle_168 geometry"]
mark=s.place_marker((1951.3, 2739.24, 2184.07), (0.7, 0.7, 0.7), 108.88)
if "particle_169 geometry" not in marker_sets:
s=new_marker_set('particle_169 geometry')
marker_sets["particle_169 geometry"]=s
s= marker_sets["particle_169 geometry"]
mark=s.place_marker((1956.99, 2804.87, 2534.32), (0.7, 0.7, 0.7), 172.119)
if "particle_170 geometry" not in marker_sets:
s=new_marker_set('particle_170 geometry')
marker_sets["particle_170 geometry"]=s
s= marker_sets["particle_170 geometry"]
mark=s.place_marker((1683.28, 3050.67, 2218.28), (0.7, 0.7, 0.7), 139.505)
if "particle_171 geometry" not in marker_sets:
s=new_marker_set('particle_171 geometry')
marker_sets["particle_171 geometry"]=s
s= marker_sets["particle_171 geometry"]
mark=s.place_marker((1418.52, 3298.07, 1900.35), (0.7, 0.7, 0.7), 92.7639)
if "particle_172 geometry" not in marker_sets:
s=new_marker_set('particle_172 geometry')
marker_sets["particle_172 geometry"]=s
s= marker_sets["particle_172 geometry"]
mark=s.place_marker((1322.19, 3361.9, 2116.48), (0.7, 0.7, 0.7), 89.8452)
if "particle_173 geometry" not in marker_sets:
s=new_marker_set('particle_173 geometry')
marker_sets["particle_173 geometry"]=s
s= marker_sets["particle_173 geometry"]
mark=s.place_marker((1350.08, 3101.35, 2010.18), (0.7, 0.7, 0.7), 149.446)
if "particle_174 geometry" not in marker_sets:
s=new_marker_set('particle_174 geometry')
marker_sets["particle_174 geometry"]=s
s= marker_sets["particle_174 geometry"]
mark=s.place_marker((1331.83, 2995.77, 1684.31), (0.7, 0.7, 0.7), 126.858)
if "particle_175 geometry" not in marker_sets:
s=new_marker_set('particle_175 geometry')
marker_sets["particle_175 geometry"]=s
s= marker_sets["particle_175 geometry"]
mark=s.place_marker((1292, 3306.24, 1549.2), (0.7, 0.7, 0.7), 106.046)
if "particle_176 geometry" not in marker_sets:
s=new_marker_set('particle_176 geometry')
marker_sets["particle_176 geometry"]=s
s= marker_sets["particle_176 geometry"]
mark=s.place_marker((1324.54, 3729.88, 1835.28), (0.7, 0.7, 0.7), 156.298)
if "particle_177 geometry" not in marker_sets:
s=new_marker_set('particle_177 geometry')
marker_sets["particle_177 geometry"]=s
s= marker_sets["particle_177 geometry"]
mark=s.place_marker((1485.12, 4238.91, 2128.62), (0.7, 0.7, 0.7), 231.212)
if "particle_178 geometry" not in marker_sets:
s=new_marker_set('particle_178 geometry')
marker_sets["particle_178 geometry"]=s
s= marker_sets["particle_178 geometry"]
mark=s.place_marker((1614.75, 4251.36, 2677.02), (0.7, 0.7, 0.7), 88.4916)
if "particle_179 geometry" not in marker_sets:
s=new_marker_set('particle_179 geometry')
marker_sets["particle_179 geometry"]=s
s= marker_sets["particle_179 geometry"]
mark=s.place_marker((1709.4, 3876.93, 2960.87), (0.7, 0.7, 0.7), 111.334)
if "particle_180 geometry" not in marker_sets:
s=new_marker_set('particle_180 geometry')
marker_sets["particle_180 geometry"]=s
s= marker_sets["particle_180 geometry"]
mark=s.place_marker((1829.19, 3268.43, 2957.72), (0.7, 0.7, 0.7), 127.619)
if "particle_181 geometry" not in marker_sets:
s=new_marker_set('particle_181 geometry')
marker_sets["particle_181 geometry"]=s
s= marker_sets["particle_181 geometry"]
mark=s.place_marker((1890.93, 2810.22, 2942.08), (0.7, 0.7, 0.7), 230.746)
if "particle_182 geometry" not in marker_sets:
s=new_marker_set('particle_182 geometry')
marker_sets["particle_182 geometry"]=s
s= marker_sets["particle_182 geometry"]
mark=s.place_marker((1680.03, 3004.71, 2675.58), (0.7, 0.7, 0.7), 124.573)
if "particle_183 geometry" not in marker_sets:
s=new_marker_set('particle_183 geometry')
marker_sets["particle_183 geometry"]=s
s= marker_sets["particle_183 geometry"]
mark=s.place_marker((1300.73, 3352.64, 2336.58), (0.7, 0.7, 0.7), 124.489)
if "particle_184 geometry" not in marker_sets:
s=new_marker_set('particle_184 geometry')
marker_sets["particle_184 geometry"]=s
s= marker_sets["particle_184 geometry"]
mark=s.place_marker((1237.78, 3271.68, 1967.96), (0.7, 0.7, 0.7), 196.61)
if "particle_185 geometry" not in marker_sets:
s=new_marker_set('particle_185 geometry')
marker_sets["particle_185 geometry"]=s
s= marker_sets["particle_185 geometry"]
mark=s.place_marker((1132.55, 3025.87, 2137.01), (0.7, 0.7, 0.7), 134.049)
if "particle_186 geometry" not in marker_sets:
s=new_marker_set('particle_186 geometry')
marker_sets["particle_186 geometry"]=s
s= marker_sets["particle_186 geometry"]
mark=s.place_marker((928.141, 3093.8, 2347.08), (0.7, 0.7, 0.7), 141.493)
if "particle_187 geometry" not in marker_sets:
s=new_marker_set('particle_187 geometry')
marker_sets["particle_187 geometry"]=s
s= marker_sets["particle_187 geometry"]
mark=s.place_marker((596.433, 3304.34, 2373.53), (0.7, 0.7, 0.7), 172.203)
if "particle_188 geometry" not in marker_sets:
s=new_marker_set('particle_188 geometry')
marker_sets["particle_188 geometry"]=s
s= marker_sets["particle_188 geometry"]
mark=s.place_marker((1070.77, 3026.34, 2014.31), (0.7, 0.7, 0.7), 271.354)
if "particle_189 geometry" not in marker_sets:
s=new_marker_set('particle_189 geometry')
marker_sets["particle_189 geometry"]=s
s= marker_sets["particle_189 geometry"]
mark=s.place_marker((1564.15, 3084.59, 1805.19), (0.7, 0.7, 0.7), 97.0785)
if "particle_190 geometry" not in marker_sets:
s=new_marker_set('particle_190 geometry')
marker_sets["particle_190 geometry"]=s
s= marker_sets["particle_190 geometry"]
mark=s.place_marker((1925.22, 3315.95, 1678.16), (0.7, 0.7, 0.7), 151.857)
if "particle_191 geometry" not in marker_sets:
s=new_marker_set('particle_191 geometry')
marker_sets["particle_191 geometry"]=s
s= marker_sets["particle_191 geometry"]
mark=s.place_marker((2488.12, 3381.22, 1432.42), (0.7, 0.7, 0.7), 199.233)
if "particle_192 geometry" not in marker_sets:
s=new_marker_set('particle_192 geometry')
marker_sets["particle_192 geometry"]=s
s= marker_sets["particle_192 geometry"]
mark=s.place_marker((2944.44, 3051.73, 1661.37), (0.7, 0.7, 0.7), 118.863)
if "particle_193 geometry" not in marker_sets:
s=new_marker_set('particle_193 geometry')
marker_sets["particle_193 geometry"]=s
s= marker_sets["particle_193 geometry"]
mark=s.place_marker((3410.76, 3103.91, 1734.01), (0.7, 0.7, 0.7), 172.415)
if "particle_194 geometry" not in marker_sets:
s=new_marker_set('particle_194 geometry')
marker_sets["particle_194 geometry"]=s
s= marker_sets["particle_194 geometry"]
mark=s.place_marker((3833.64, 3402.83, 1592.89), (0.7, 0.7, 0.7), 134.26)
if "particle_195 geometry" not in marker_sets:
s=new_marker_set('particle_195 geometry')
marker_sets["particle_195 geometry"]=s
s= marker_sets["particle_195 geometry"]
mark=s.place_marker((4352.44, 4013.64, 1095.6), (0.7, 0.7, 0.7), 139.548)
if "particle_196 geometry" not in marker_sets:
s=new_marker_set('particle_196 geometry')
marker_sets["particle_196 geometry"]=s
s= marker_sets["particle_196 geometry"]
mark=s.place_marker((4011.1, 4390.44, 1303.84), (0.7, 0.7, 0.7), 196.526)
if "particle_197 geometry" not in marker_sets:
s=new_marker_set('particle_197 geometry')
marker_sets["particle_197 geometry"]=s
s= marker_sets["particle_197 geometry"]
mark=s.place_marker((3358.49, 4283.08, 1719.97), (0.7, 0.7, 0.7), 136.206)
if "particle_198 geometry" not in marker_sets:
s=new_marker_set('particle_198 geometry')
marker_sets["particle_198 geometry"]=s
s= marker_sets["particle_198 geometry"]
mark=s.place_marker((2902.6, 3750.1, 2396.32), (0.7, 0.7, 0.7), 152.322)
if "particle_199 geometry" not in marker_sets:
s=new_marker_set('particle_199 geometry')
marker_sets["particle_199 geometry"]=s
s= marker_sets["particle_199 geometry"]
mark=s.place_marker((2589.13, 3269.01, 2704.38), (0.7, 0.7, 0.7), 126.054)
if "particle_200 geometry" not in marker_sets:
s=new_marker_set('particle_200 geometry')
marker_sets["particle_200 geometry"]=s
s= marker_sets["particle_200 geometry"]
mark=s.place_marker((2264.92, 3388.41, 2498.28), (0.7, 0.7, 0.7), 164.378)
if "particle_201 geometry" not in marker_sets:
s=new_marker_set('particle_201 geometry')
marker_sets["particle_201 geometry"]=s
s= marker_sets["particle_201 geometry"]
mark=s.place_marker((2142.79, 3488.67, 2090.37), (0.7, 0.7, 0.7), 122.205)
if "particle_202 geometry" not in marker_sets:
s=new_marker_set('particle_202 geometry')
marker_sets["particle_202 geometry"]=s
s= marker_sets["particle_202 geometry"]
mark=s.place_marker((2113.8, 3392.92, 1655.45), (0.7, 0.7, 0.7), 134.979)
if "particle_203 geometry" not in marker_sets:
s=new_marker_set('particle_203 geometry')
marker_sets["particle_203 geometry"]=s
s= marker_sets["particle_203 geometry"]
mark=s.place_marker((2299.68, 3086.72, 1616.07), (0.7, 0.7, 0.7), 136.375)
if "particle_204 geometry" not in marker_sets:
s=new_marker_set('particle_204 geometry')
marker_sets["particle_204 geometry"]=s
s= marker_sets["particle_204 geometry"]
mark=s.place_marker((2073.88, 3195.52, 1762.11), (0.7, 0.7, 0.7), 151.688)
if "particle_205 geometry" not in marker_sets:
s=new_marker_set('particle_205 geometry')
marker_sets["particle_205 geometry"]=s
s= marker_sets["particle_205 geometry"]
mark=s.place_marker((1799, 3169.86, 1701.24), (0.7, 0.7, 0.7), 116.156)
if "particle_206 geometry" not in marker_sets:
s=new_marker_set('particle_206 geometry')
marker_sets["particle_206 geometry"]=s
s= marker_sets["particle_206 geometry"]
mark=s.place_marker((2037.68, 2801.83, 2246.55), (0.7, 0.7, 0.7), 122.839)
if "particle_207 geometry" not in marker_sets:
s=new_marker_set('particle_207 geometry')
marker_sets["particle_207 geometry"]=s
s= marker_sets["particle_207 geometry"]
mark=s.place_marker((2281.38, 2804.6, 2698.42), (0.7, 0.7, 0.7), 164.716)
if "particle_208 geometry" not in marker_sets:
s=new_marker_set('particle_208 geometry')
marker_sets["particle_208 geometry"]=s
s= marker_sets["particle_208 geometry"]
mark=s.place_marker((2329.91, 3605.74, 2367.78), (0.7, 0.7, 0.7), 303.672)
if "particle_209 geometry" not in marker_sets:
s=new_marker_set('particle_209 geometry')
marker_sets["particle_209 geometry"]=s
s= marker_sets["particle_209 geometry"]
mark=s.place_marker((2334.36, 4275.89, 1545.96), (0.7, 0.7, 0.7), 220.298)
if "particle_210 geometry" not in marker_sets:
s=new_marker_set('particle_210 geometry')
marker_sets["particle_210 geometry"]=s
s= marker_sets["particle_210 geometry"]
mark=s.place_marker((1854.26, 3919.33, 1354.55), (0.7, 0.7, 0.7), 175.883)
if "particle_211 geometry" not in marker_sets:
s=new_marker_set('particle_211 geometry')
marker_sets["particle_211 geometry"]=s
s= marker_sets["particle_211 geometry"]
mark=s.place_marker((1240.7, 3695.11, 1531.8), (0.7, 0.7, 0.7), 233.581)
if "particle_212 geometry" not in marker_sets:
s=new_marker_set('particle_212 geometry')
marker_sets["particle_212 geometry"]=s
s= marker_sets["particle_212 geometry"]
mark=s.place_marker((884.139, 3651.44, 2216.55), (0.7, 0.7, 0.7), 231.127)
if "particle_213 geometry" not in marker_sets:
s=new_marker_set('particle_213 geometry')
marker_sets["particle_213 geometry"]=s
s= marker_sets["particle_213 geometry"]
mark=s.place_marker((416.721, 3290.04, 2411.8), (0.7, 0.7, 0.7), 247.413)
if "particle_214 geometry" not in marker_sets:
s=new_marker_set('particle_214 geometry')
marker_sets["particle_214 geometry"]=s
s= marker_sets["particle_214 geometry"]
mark=s.place_marker((34.4919, 2792.92, 2232.33), (0.7, 0.7, 0.7), 200.206)
if "particle_215 geometry" not in marker_sets:
s=new_marker_set('particle_215 geometry')
marker_sets["particle_215 geometry"]=s
s= marker_sets["particle_215 geometry"]
mark=s.place_marker((86.1928, 2554.47, 1891.18), (0.7, 0.7, 0.7), 150.419)
if "particle_216 geometry" not in marker_sets:
s=new_marker_set('particle_216 geometry')
marker_sets["particle_216 geometry"]=s
s= marker_sets["particle_216 geometry"]
mark=s.place_marker((514.903, 2446.26, 2319.8), (0.7, 0.7, 0.7), 140.14)
if "particle_217 geometry" not in marker_sets:
s=new_marker_set('particle_217 geometry')
marker_sets["particle_217 geometry"]=s
s= marker_sets["particle_217 geometry"]
mark=s.place_marker((619.06, 2455.85, 2765.13), (0.7, 0.7, 0.7), 132.949)
if "particle_218 geometry" not in marker_sets:
s=new_marker_set('particle_218 geometry')
marker_sets["particle_218 geometry"]=s
s= marker_sets["particle_218 geometry"]
mark=s.place_marker((855.178, 2427.53, 3065.08), (0.7, 0.7, 0.7), 141.113)
if "particle_219 geometry" not in marker_sets:
s=new_marker_set('particle_219 geometry')
marker_sets["particle_219 geometry"]=s
s= marker_sets["particle_219 geometry"]
mark=s.place_marker((905.458, 2772.85, 3131.86), (0.7, 0.7, 0.7), 171.526)
if "particle_220 geometry" not in marker_sets:
s=new_marker_set('particle_220 geometry')
marker_sets["particle_220 geometry"]=s
s= marker_sets["particle_220 geometry"]
mark=s.place_marker((772.487, 3278.16, 2823.15), (0.7, 0.7, 0.7), 326.937)
if "particle_221 geometry" not in marker_sets:
s=new_marker_set('particle_221 geometry')
marker_sets["particle_221 geometry"]=s
s= marker_sets["particle_221 geometry"]
mark=s.place_marker((978.463, 3563.12, 2354.17), (0.7, 0.7, 0.7), 92.0871)
if "particle_222 geometry" not in marker_sets:
s=new_marker_set('particle_222 geometry')
marker_sets["particle_222 geometry"]=s
s= marker_sets["particle_222 geometry"]
mark=s.place_marker((1447.71, 3607.9, 2474.88), (0.7, 0.7, 0.7), 210.273)
if "particle_223 geometry" not in marker_sets:
s=new_marker_set('particle_223 geometry')
marker_sets["particle_223 geometry"]=s
s= marker_sets["particle_223 geometry"]
mark=s.place_marker((1824.92, 3116.57, 2922.08), (0.7, 0.7, 0.7), 122.628)
if "particle_224 geometry" not in marker_sets:
s=new_marker_set('particle_224 geometry')
marker_sets["particle_224 geometry"]=s
s= marker_sets["particle_224 geometry"]
mark=s.place_marker((1808.23, 2915.8, 3095.64), (0.7, 0.7, 0.7), 109.176)
if "particle_225 geometry" not in marker_sets:
s=new_marker_set('particle_225 geometry')
marker_sets["particle_225 geometry"]=s
s= marker_sets["particle_225 geometry"]
mark=s.place_marker((1591.14, 2928.89, 2876), (0.7, 0.7, 0.7), 142.213)
if "particle_226 geometry" not in marker_sets:
s=new_marker_set('particle_226 geometry')
marker_sets["particle_226 geometry"]=s
s= marker_sets["particle_226 geometry"]
mark=s.place_marker((1683.04, 3275.3, 2708.19), (0.7, 0.7, 0.7), 250.078)
if "particle_227 geometry" not in marker_sets:
s=new_marker_set('particle_227 geometry')
marker_sets["particle_227 geometry"]=s
s= marker_sets["particle_227 geometry"]
mark=s.place_marker((1804.01, 2992.48, 2369.64), (0.7, 0.7, 0.7), 123.558)
if "particle_228 geometry" not in marker_sets:
s=new_marker_set('particle_228 geometry')
marker_sets["particle_228 geometry"]=s
s= marker_sets["particle_228 geometry"]
mark=s.place_marker((2002.08, 2562.56, 2306.36), (0.7, 0.7, 0.7), 235.992)
if "particle_229 geometry" not in marker_sets:
s=new_marker_set('particle_229 geometry')
marker_sets["particle_229 geometry"]=s
s= marker_sets["particle_229 geometry"]
mark=s.place_marker((2243.32, 2185.41, 2098.18), (0.7, 0.7, 0.7), 172.373)
if "particle_230 geometry" not in marker_sets:
s=new_marker_set('particle_230 geometry')
marker_sets["particle_230 geometry"]=s
s= marker_sets["particle_230 geometry"]
mark=s.place_marker((2259.73, 2197.84, 1644.5), (0.7, 0.7, 0.7), 152.322)
if "particle_231 geometry" not in marker_sets:
s=new_marker_set('particle_231 geometry')
marker_sets["particle_231 geometry"]=s
s= marker_sets["particle_231 geometry"]
mark=s.place_marker((2113.71, 2209.96, 1367.08), (0.7, 0.7, 0.7), 196.653)
if "particle_232 geometry" not in marker_sets:
s=new_marker_set('particle_232 geometry')
marker_sets["particle_232 geometry"]=s
s= marker_sets["particle_232 geometry"]
mark=s.place_marker((1979.82, 1959.32, 1557.98), (0.7, 0.7, 0.7), 134.091)
if "particle_233 geometry" not in marker_sets:
s=new_marker_set('particle_233 geometry')
marker_sets["particle_233 geometry"]=s
s= marker_sets["particle_233 geometry"]
mark=s.place_marker((2004.5, 1746.14, 1748.74), (0.7, 0.7, 0.7), 180.325)
if "particle_234 geometry" not in marker_sets:
s=new_marker_set('particle_234 geometry')
marker_sets["particle_234 geometry"]=s
s= marker_sets["particle_234 geometry"]
mark=s.place_marker((2081.56, 2153.48, 1957.41), (0.7, 0.7, 0.7), 218.437)
if "particle_235 geometry" not in marker_sets:
s=new_marker_set('particle_235 geometry')
marker_sets["particle_235 geometry"]=s
s= marker_sets["particle_235 geometry"]
mark=s.place_marker((1901.95, 2583.16, 1964.85), (0.7, 0.7, 0.7), 148.008)
if "particle_236 geometry" not in marker_sets:
s=new_marker_set('particle_236 geometry')
marker_sets["particle_236 geometry"]=s
s= marker_sets["particle_236 geometry"]
mark=s.place_marker((1469.11, 2952, 1681.52), (0.7, 0.7, 0.7), 191.873)
if "particle_237 geometry" not in marker_sets:
s=new_marker_set('particle_237 geometry')
marker_sets["particle_237 geometry"]=s
s= marker_sets["particle_237 geometry"]
mark=s.place_marker((950.694, 3124.62, 1614.55), (0.7, 0.7, 0.7), 138.575)
if "particle_238 geometry" not in marker_sets:
s=new_marker_set('particle_238 geometry')
marker_sets["particle_238 geometry"]=s
s= marker_sets["particle_238 geometry"]
mark=s.place_marker((665.674, 2959.81, 1324.93), (0.7, 0.7, 0.7), 161.205)
if "particle_239 geometry" not in marker_sets:
s=new_marker_set('particle_239 geometry')
marker_sets["particle_239 geometry"]=s
s= marker_sets["particle_239 geometry"]
mark=s.place_marker((1103.26, 3090.12, 1336.03), (0.7, 0.7, 0.7), 288.021)
if "particle_240 geometry" not in marker_sets:
s=new_marker_set('particle_240 geometry')
marker_sets["particle_240 geometry"]=s
s= marker_sets["particle_240 geometry"]
mark=s.place_marker((1461.7, 2493.6, 1425.6), (0.7, 0.7, 0.7), 227.405)
if "particle_241 geometry" not in marker_sets:
s=new_marker_set('particle_241 geometry')
marker_sets["particle_241 geometry"]=s
s= marker_sets["particle_241 geometry"]
mark=s.place_marker((1859.69, 2249.23, 1651.18), (0.7, 0.7, 0.7), 126.519)
if "particle_242 geometry" not in marker_sets:
s=new_marker_set('particle_242 geometry')
marker_sets["particle_242 geometry"]=s
s= marker_sets["particle_242 geometry"]
mark=s.place_marker((1956.42, 2410.4, 1401.95), (0.7, 0.7, 0.7), 117.975)
if "particle_243 geometry" not in marker_sets:
s=new_marker_set('particle_243 geometry')
marker_sets["particle_243 geometry"]=s
s= marker_sets["particle_243 geometry"]
mark=s.place_marker((2120.76, 2512.94, 1758.14), (0.7, 0.7, 0.7), 200.883)
if "particle_244 geometry" not in marker_sets:
s=new_marker_set('particle_244 geometry')
marker_sets["particle_244 geometry"]=s
s= marker_sets["particle_244 geometry"]
mark=s.place_marker((2008.28, 2254.97, 1993.29), (0.7, 0.7, 0.7), 158.794)
if "particle_245 geometry" not in marker_sets:
s=new_marker_set('particle_245 geometry')
marker_sets["particle_245 geometry"]=s
s= marker_sets["particle_245 geometry"]
mark=s.place_marker((1781.98, 2076.14, 2130.03), (0.7, 0.7, 0.7), 115.86)
if "particle_246 geometry" not in marker_sets:
s=new_marker_set('particle_246 geometry')
marker_sets["particle_246 geometry"]=s
s= marker_sets["particle_246 geometry"]
mark=s.place_marker((1916.42, 1988.93, 2310.29), (0.7, 0.7, 0.7), 133.034)
if "particle_247 geometry" not in marker_sets:
s=new_marker_set('particle_247 geometry')
marker_sets["particle_247 geometry"]=s
s= marker_sets["particle_247 geometry"]
mark=s.place_marker((2395.06, 1978.22, 2284.8), (0.7, 0.7, 0.7), 314.627)
if "particle_248 geometry" not in marker_sets:
s=new_marker_set('particle_248 geometry')
marker_sets["particle_248 geometry"]=s
s= marker_sets["particle_248 geometry"]
mark=s.place_marker((2290.64, 2198.1, 2007.82), (0.7, 0.7, 0.7), 115.352)
if "particle_249 geometry" not in marker_sets:
s=new_marker_set('particle_249 geometry')
marker_sets["particle_249 geometry"]=s
s= marker_sets["particle_249 geometry"]
mark=s.place_marker((1932.61, 2288.61, 1785.93), (0.7, 0.7, 0.7), 180.621)
if "particle_250 geometry" not in marker_sets:
s=new_marker_set('particle_250 geometry')
marker_sets["particle_250 geometry"]=s
s= marker_sets["particle_250 geometry"]
mark=s.place_marker((1638.21, 2135.24, 1952.72), (0.7, 0.7, 0.7), 126.265)
if "particle_251 geometry" not in marker_sets:
s=new_marker_set('particle_251 geometry')
marker_sets["particle_251 geometry"]=s
s= marker_sets["particle_251 geometry"]
mark=s.place_marker((1520.78, 2092.5, 2320.15), (0.7, 0.7, 0.7), 133.541)
if "particle_252 geometry" not in marker_sets:
s=new_marker_set('particle_252 geometry')
marker_sets["particle_252 geometry"]=s
s= marker_sets["particle_252 geometry"]
mark=s.place_marker((1239.91, 2140.65, 2653.86), (0.7, 0.7, 0.7), 171.019)
if "particle_253 geometry" not in marker_sets:
s=new_marker_set('particle_253 geometry')
marker_sets["particle_253 geometry"]=s
s= marker_sets["particle_253 geometry"]
mark=s.place_marker((904.779, 2260.7, 2834.97), (0.7, 0.7, 0.7), 115.437)
if "particle_254 geometry" not in marker_sets:
s=new_marker_set('particle_254 geometry')
marker_sets["particle_254 geometry"]=s
s= marker_sets["particle_254 geometry"]
mark=s.place_marker((1034.56, 2128.21, 2576.84), (0.7, 0.7, 0.7), 158.583)
if "particle_255 geometry" not in marker_sets:
s=new_marker_set('particle_255 geometry')
marker_sets["particle_255 geometry"]=s
s= marker_sets["particle_255 geometry"]
mark=s.place_marker((1484.18, 2216.59, 2647.83), (0.7, 0.7, 0.7), 192)
if "particle_256 geometry" not in marker_sets:
s=new_marker_set('particle_256 geometry')
marker_sets["particle_256 geometry"]=s
s= marker_sets["particle_256 geometry"]
mark=s.place_marker((1908.61, 2141.76, 2570.62), (0.7, 0.7, 0.7), 150.165)
if "particle_257 geometry" not in marker_sets:
s=new_marker_set('particle_257 geometry')
marker_sets["particle_257 geometry"]=s
s= marker_sets["particle_257 geometry"]
mark=s.place_marker((1839.94, 2151.53, 2830.37), (0.7, 0.7, 0.7), 157.567)
if "particle_258 geometry" not in marker_sets:
s=new_marker_set('particle_258 geometry')
marker_sets["particle_258 geometry"]=s
s= marker_sets["particle_258 geometry"]
mark=s.place_marker((1970.27, 2229.56, 2787.99), (0.7, 0.7, 0.7), 199.36)
if "particle_259 geometry" not in marker_sets:
s=new_marker_set('particle_259 geometry')
marker_sets["particle_259 geometry"]=s
s= marker_sets["particle_259 geometry"]
mark=s.place_marker((1709.62, 2534.06, 2551.23), (0.7, 0.7, 0.7), 105.369)
if "particle_260 geometry" not in marker_sets:
s=new_marker_set('particle_260 geometry')
marker_sets["particle_260 geometry"]=s
s= marker_sets["particle_260 geometry"]
mark=s.place_marker((1703.81, 2772.84, 2420.4), (0.7, 0.7, 0.7), 118.651)
if "particle_261 geometry" not in marker_sets:
s=new_marker_set('particle_261 geometry')
marker_sets["particle_261 geometry"]=s
s= marker_sets["particle_261 geometry"]
mark=s.place_marker((2065.21, 2510.45, 2446.14), (0.7, 0.7, 0.7), 219.664)
if "particle_262 geometry" not in marker_sets:
s=new_marker_set('particle_262 geometry')
marker_sets["particle_262 geometry"]=s
s= marker_sets["particle_262 geometry"]
mark=s.place_marker((2434.05, 2102.72, 2669.05), (0.7, 0.7, 0.7), 196.018)
if "particle_263 geometry" not in marker_sets:
s=new_marker_set('particle_263 geometry')
marker_sets["particle_263 geometry"]=s
s= marker_sets["particle_263 geometry"]
mark=s.place_marker((2788.03, 1814.09, 2864.59), (0.7, 0.7, 0.7), 218.141)
if "particle_264 geometry" not in marker_sets:
s=new_marker_set('particle_264 geometry')
marker_sets["particle_264 geometry"]=s
s= marker_sets["particle_264 geometry"]
mark=s.place_marker((2773.92, 1961.53, 3189.6), (0.7, 0.7, 0.7), 181.636)
if "particle_265 geometry" not in marker_sets:
s=new_marker_set('particle_265 geometry')
marker_sets["particle_265 geometry"]=s
s= marker_sets["particle_265 geometry"]
mark=s.place_marker((2571.14, 2189.82, 3184.22), (0.7, 0.7, 0.7), 195.003)
if "particle_266 geometry" not in marker_sets:
s=new_marker_set('particle_266 geometry')
marker_sets["particle_266 geometry"]=s
s= marker_sets["particle_266 geometry"]
mark=s.place_marker((2683.46, 1948.4, 3207.93), (0.7, 0.7, 0.7), 139.209)
if "particle_267 geometry" not in marker_sets:
s=new_marker_set('particle_267 geometry')
marker_sets["particle_267 geometry"]=s
s= marker_sets["particle_267 geometry"]
mark=s.place_marker((2642.02, 1894.83, 3259.18), (0.7, 0.7, 0.7), 189.885)
if "particle_268 geometry" not in marker_sets:
s=new_marker_set('particle_268 geometry')
marker_sets["particle_268 geometry"]=s
s= marker_sets["particle_268 geometry"]
mark=s.place_marker((2421.66, 1801.6, 3090.34), (0.7, 0.7, 0.7), 267.674)
if "particle_269 geometry" not in marker_sets:
s=new_marker_set('particle_269 geometry')
marker_sets["particle_269 geometry"]=s
s= marker_sets["particle_269 geometry"]
mark=s.place_marker((2058.07, 1666.71, 2695.58), (0.7, 0.7, 0.7), 196.568)
if "particle_270 geometry" not in marker_sets:
s=new_marker_set('particle_270 geometry')
marker_sets["particle_270 geometry"]=s
s= marker_sets["particle_270 geometry"]
mark=s.place_marker((1944.52, 1496.55, 2930.39), (0.7, 0.7, 0.7), 192.423)
if "particle_271 geometry" not in marker_sets:
s=new_marker_set('particle_271 geometry')
marker_sets["particle_271 geometry"]=s
s= marker_sets["particle_271 geometry"]
mark=s.place_marker((2172.22, 1482.99, 3259.14), (1, 0.7, 0), 202.405)
if "particle_272 geometry" not in marker_sets:
s=new_marker_set('particle_272 geometry')
marker_sets["particle_272 geometry"]=s
s= marker_sets["particle_272 geometry"]
mark=s.place_marker((1643.64, 1439.15, 2578.84), (0.7, 0.7, 0.7), 135.529)
if "particle_273 geometry" not in marker_sets:
s=new_marker_set('particle_273 geometry')
marker_sets["particle_273 geometry"]=s
s= marker_sets["particle_273 geometry"]
mark=s.place_marker((921.314, 1422.89, 1930.92), (0.7, 0.7, 0.7), 114.21)
if "particle_274 geometry" not in marker_sets:
s=new_marker_set('particle_274 geometry')
marker_sets["particle_274 geometry"]=s
s= marker_sets["particle_274 geometry"]
mark=s.place_marker((920.247, 1746.57, 1894.5), (0.7, 0.7, 0.7), 159.133)
if "particle_275 geometry" not in marker_sets:
s=new_marker_set('particle_275 geometry')
marker_sets["particle_275 geometry"]=s
s= marker_sets["particle_275 geometry"]
mark=s.place_marker((1261.6, 1977.68, 1890.03), (0.7, 0.7, 0.7), 144.412)
if "particle_276 geometry" not in marker_sets:
s=new_marker_set('particle_276 geometry')
marker_sets["particle_276 geometry"]=s
s= marker_sets["particle_276 geometry"]
mark=s.place_marker((1544.44, 2125.95, 1878.22), (0.7, 0.7, 0.7), 70.8525)
if "particle_277 geometry" not in marker_sets:
s=new_marker_set('particle_277 geometry')
marker_sets["particle_277 geometry"]=s
s= marker_sets["particle_277 geometry"]
mark=s.place_marker((1958.02, 2121.77, 2334.59), (0.7, 0.7, 0.7), 141.874)
if "particle_278 geometry" not in marker_sets:
s=new_marker_set('particle_278 geometry')
marker_sets["particle_278 geometry"]=s
s= marker_sets["particle_278 geometry"]
mark=s.place_marker((2316.66, 2027.09, 2823.68), (0.7, 0.7, 0.7), 217.337)
if "particle_279 geometry" not in marker_sets:
s=new_marker_set('particle_279 geometry')
marker_sets["particle_279 geometry"]=s
s= marker_sets["particle_279 geometry"]
mark=s.place_marker((2381.18, 1984.35, 2801.67), (0.7, 0.7, 0.7), 237.641)
if "particle_280 geometry" not in marker_sets:
s=new_marker_set('particle_280 geometry')
marker_sets["particle_280 geometry"]=s
s= marker_sets["particle_280 geometry"]
mark=s.place_marker((2405.2, 2036.38, 2343.33), (0.7, 0.7, 0.7), 229.393)
if "particle_281 geometry" not in marker_sets:
s=new_marker_set('particle_281 geometry')
marker_sets["particle_281 geometry"]=s
s= marker_sets["particle_281 geometry"]
mark=s.place_marker((2919.92, 1725.2, 2425.26), (0.7, 0.7, 0.7), 349.906)
if "particle_282 geometry" not in marker_sets:
s=new_marker_set('particle_282 geometry')
marker_sets["particle_282 geometry"]=s
s= marker_sets["particle_282 geometry"]
mark=s.place_marker((3112.31, 1241, 2668.57), (0.7, 0.7, 0.7), 162.347)
if "particle_283 geometry" not in marker_sets:
s=new_marker_set('particle_283 geometry')
marker_sets["particle_283 geometry"]=s
s= marker_sets["particle_283 geometry"]
mark=s.place_marker((3167.07, 1065.01, 2664.18), (0.7, 0.7, 0.7), 194.072)
if "particle_284 geometry" not in marker_sets:
s=new_marker_set('particle_284 geometry')
marker_sets["particle_284 geometry"]=s
s= marker_sets["particle_284 geometry"]
mark=s.place_marker((3232.15, 1051.57, 2494.18), (0.7, 0.7, 0.7), 242.21)
if "particle_285 geometry" not in marker_sets:
s=new_marker_set('particle_285 geometry')
marker_sets["particle_285 geometry"]=s
s= marker_sets["particle_285 geometry"]
mark=s.place_marker((3031.34, 668.636, 2156.97), (0.7, 0.7, 0.7), 320.93)
if "particle_286 geometry" not in marker_sets:
s=new_marker_set('particle_286 geometry')
marker_sets["particle_286 geometry"]=s
s= marker_sets["particle_286 geometry"]
mark=s.place_marker((3323.59, 197.548, 1973.55), (0.7, 0.7, 0.7), 226.432)
if "particle_287 geometry" not in marker_sets:
s=new_marker_set('particle_287 geometry')
marker_sets["particle_287 geometry"]=s
s= marker_sets["particle_287 geometry"]
mark=s.place_marker((3693.62, 357.615, 2099.26), (0.7, 0.7, 0.7), 125.208)
if "particle_288 geometry" not in marker_sets:
s=new_marker_set('particle_288 geometry')
marker_sets["particle_288 geometry"]=s
s= marker_sets["particle_288 geometry"]
mark=s.place_marker((3941.49, 793.473, 2267.19), (0.7, 0.7, 0.7), 197.837)
if "particle_289 geometry" not in marker_sets:
s=new_marker_set('particle_289 geometry')
marker_sets["particle_289 geometry"]=s
s= marker_sets["particle_289 geometry"]
mark=s.place_marker((4427.87, 1008.66, 1929.76), (0.7, 0.7, 0.7), 167.804)
if "particle_290 geometry" not in marker_sets:
s=new_marker_set('particle_290 geometry')
marker_sets["particle_290 geometry"]=s
s= marker_sets["particle_290 geometry"]
mark=s.place_marker((5006.86, 987.238, 1344.67), (0.7, 0.7, 0.7), 136.84)
if "particle_291 geometry" not in marker_sets:
s=new_marker_set('particle_291 geometry')
marker_sets["particle_291 geometry"]=s
s= marker_sets["particle_291 geometry"]
mark=s.place_marker((4733.41, 909.505, 969.097), (0.7, 0.7, 0.7), 85.7421)
if "particle_292 geometry" not in marker_sets:
s=new_marker_set('particle_292 geometry')
marker_sets["particle_292 geometry"]=s
s= marker_sets["particle_292 geometry"]
mark=s.place_marker((3671.32, 702.833, 1869.21), (1, 0.7, 0), 256)
if "particle_293 geometry" not in marker_sets:
s=new_marker_set('particle_293 geometry')
marker_sets["particle_293 geometry"]=s
s= marker_sets["particle_293 geometry"]
mark=s.place_marker((4643.54, 1255.72, 1724.65), (0.7, 0.7, 0.7), 138.702)
if "particle_294 geometry" not in marker_sets:
s=new_marker_set('particle_294 geometry')
marker_sets["particle_294 geometry"]=s
s= marker_sets["particle_294 geometry"]
mark=s.place_marker((5081.8, 1414.21, 1833.83), (0.7, 0.7, 0.7), 140.732)
if "particle_295 geometry" not in marker_sets:
s=new_marker_set('particle_295 geometry')
marker_sets["particle_295 geometry"]=s
s= marker_sets["particle_295 geometry"]
mark=s.place_marker((4947.4, 1124.89, 1837.88), (0.7, 0.7, 0.7), 81.3006)
if "particle_296 geometry" not in marker_sets:
s=new_marker_set('particle_296 geometry')
marker_sets["particle_296 geometry"]=s
s= marker_sets["particle_296 geometry"]
mark=s.place_marker((5034.4, 757.553, 1595.89), (0.7, 0.7, 0.7), 133.837)
if "particle_297 geometry" not in marker_sets:
s=new_marker_set('particle_297 geometry')
marker_sets["particle_297 geometry"]=s
s= marker_sets["particle_297 geometry"]
mark=s.place_marker((4450.73, 720.171, 1807.67), (0.7, 0.7, 0.7), 98.3475)
if "particle_298 geometry" not in marker_sets:
s=new_marker_set('particle_298 geometry')
marker_sets["particle_298 geometry"]=s
s= marker_sets["particle_298 geometry"]
mark=s.place_marker((3835.53, 906.797, 2318.32), (0.7, 0.7, 0.7), 297.623)
if "particle_299 geometry" not in marker_sets:
s=new_marker_set('particle_299 geometry')
marker_sets["particle_299 geometry"]=s
s= marker_sets["particle_299 geometry"]
mark=s.place_marker((3412.43, 894.299, 2395.71), (0.7, 0.7, 0.7), 212.938)
if "particle_300 geometry" not in marker_sets:
s=new_marker_set('particle_300 geometry')
marker_sets["particle_300 geometry"]=s
s= marker_sets["particle_300 geometry"]
mark=s.place_marker((3465.1, 715.687, 2533.99), (0.7, 0.7, 0.7), 154.183)
if "particle_301 geometry" not in marker_sets:
s=new_marker_set('particle_301 geometry')
marker_sets["particle_301 geometry"]=s
s= marker_sets["particle_301 geometry"]
mark=s.place_marker((3268.75, 399.259, 2327.94), (0.7, 0.7, 0.7), 180.832)
if "particle_302 geometry" not in marker_sets:
s=new_marker_set('particle_302 geometry')
marker_sets["particle_302 geometry"]=s
s= marker_sets["particle_302 geometry"]
mark=s.place_marker((3070.09, 325.16, 2011.13), (0.7, 0.7, 0.7), 122.332)
if "particle_303 geometry" not in marker_sets:
s=new_marker_set('particle_303 geometry')
marker_sets["particle_303 geometry"]=s
s= marker_sets["particle_303 geometry"]
mark=s.place_marker((2918.32, 402.63, 1673.95), (0.7, 0.7, 0.7), 209.047)
if "particle_304 geometry" not in marker_sets:
s=new_marker_set('particle_304 geometry')
marker_sets["particle_304 geometry"]=s
s= marker_sets["particle_304 geometry"]
mark=s.place_marker((2782.17, 37.3409, 1797.02), (0.7, 0.7, 0.7), 126.985)
if "particle_305 geometry" not in marker_sets:
s=new_marker_set('particle_305 geometry')
marker_sets["particle_305 geometry"]=s
s= marker_sets["particle_305 geometry"]
mark=s.place_marker((2852.37, -355.565, 1668.45), (0.7, 0.7, 0.7), 122.205)
if "particle_306 geometry" not in marker_sets:
s=new_marker_set('particle_306 geometry')
marker_sets["particle_306 geometry"]=s
s= marker_sets["particle_306 geometry"]
mark=s.place_marker((3093.73, -486.671, 1665.92), (0.7, 0.7, 0.7), 107.95)
if "particle_307 geometry" not in marker_sets:
s=new_marker_set('particle_307 geometry')
marker_sets["particle_307 geometry"]=s
s= marker_sets["particle_307 geometry"]
mark=s.place_marker((3075.15, -30.2755, 2067.55), (0.7, 0.7, 0.7), 182.567)
if "particle_308 geometry" not in marker_sets:
s=new_marker_set('particle_308 geometry')
marker_sets["particle_308 geometry"]=s
s= marker_sets["particle_308 geometry"]
mark=s.place_marker((3148.7, 545.106, 2332.03), (0.7, 0.7, 0.7), 185.274)
if "particle_309 geometry" not in marker_sets:
s=new_marker_set('particle_309 geometry')
marker_sets["particle_309 geometry"]=s
s= marker_sets["particle_309 geometry"]
mark=s.place_marker((3129.69, 1009.35, 2242.14), (0.7, 0.7, 0.7), 413.567)
if "particle_310 geometry" not in marker_sets:
s=new_marker_set('particle_310 geometry')
marker_sets["particle_310 geometry"]=s
s= marker_sets["particle_310 geometry"]
mark=s.place_marker((3162.33, 1098.51, 2467.72), (0.7, 0.7, 0.7), 240.01)
if "particle_311 geometry" not in marker_sets:
s=new_marker_set('particle_311 geometry')
marker_sets["particle_311 geometry"]=s
s= marker_sets["particle_311 geometry"]
mark=s.place_marker((3137.75, 1088.55, 2425.97), (0.7, 0.7, 0.7), 238.995)
if "particle_312 geometry" not in marker_sets:
s=new_marker_set('particle_312 geometry')
marker_sets["particle_312 geometry"]=s
s= marker_sets["particle_312 geometry"]
mark=s.place_marker((3166.31, 824.039, 2508.03), (0.7, 0.7, 0.7), 203.674)
if "particle_313 geometry" not in marker_sets:
s=new_marker_set('particle_313 geometry')
marker_sets["particle_313 geometry"]=s
s= marker_sets["particle_313 geometry"]
mark=s.place_marker((2816.44, 346.26, 2629.34), (0.7, 0.7, 0.7), 266.744)
if "particle_314 geometry" not in marker_sets:
s=new_marker_set('particle_314 geometry')
marker_sets["particle_314 geometry"]=s
s= marker_sets["particle_314 geometry"]
mark=s.place_marker((3194.82, 209.125, 2847.2), (0.7, 0.7, 0.7), 147.585)
if "particle_315 geometry" not in marker_sets:
s=new_marker_set('particle_315 geometry')
marker_sets["particle_315 geometry"]=s
s= marker_sets["particle_315 geometry"]
mark=s.place_marker((3211.66, 471.058, 2792.46), (0.7, 0.7, 0.7), 249.485)
if "particle_316 geometry" not in marker_sets:
s=new_marker_set('particle_316 geometry')
marker_sets["particle_316 geometry"]=s
s= marker_sets["particle_316 geometry"]
mark=s.place_marker((2894.46, 657.053, 2583.16), (0.7, 0.7, 0.7), 119.371)
if "particle_317 geometry" not in marker_sets:
s=new_marker_set('particle_317 geometry')
marker_sets["particle_317 geometry"]=s
s= marker_sets["particle_317 geometry"]
mark=s.place_marker((2602.68, 669.316, 1938.15), (0.7, 0.7, 0.7), 155.875)
if "particle_318 geometry" not in marker_sets:
s=new_marker_set('particle_318 geometry')
marker_sets["particle_318 geometry"]=s
s= marker_sets["particle_318 geometry"]
mark=s.place_marker((2776.22, 797.717, 1200.18), (0.7, 0.7, 0.7), 189.419)
if "particle_319 geometry" not in marker_sets:
s=new_marker_set('particle_319 geometry')
marker_sets["particle_319 geometry"]=s
s= marker_sets["particle_319 geometry"]
mark=s.place_marker((3143.25, 1213.97, 1103.54), (0.7, 0.7, 0.7), 137.475)
if "particle_320 geometry" not in marker_sets:
s=new_marker_set('particle_320 geometry')
marker_sets["particle_320 geometry"]=s
s= marker_sets["particle_320 geometry"]
mark=s.place_marker((3414.98, 1623.2, 1275.22), (0.7, 0.7, 0.7), 176.179)
if "particle_321 geometry" not in marker_sets:
s=new_marker_set('particle_321 geometry')
marker_sets["particle_321 geometry"]=s
s= marker_sets["particle_321 geometry"]
mark=s.place_marker((3780.25, 1938.68, 1333.01), (0.7, 0.7, 0.7), 138.829)
if "particle_322 geometry" not in marker_sets:
s=new_marker_set('particle_322 geometry')
marker_sets["particle_322 geometry"]=s
s= marker_sets["particle_322 geometry"]
mark=s.place_marker((4196.88, 2092.21, 1339.64), (0.7, 0.7, 0.7), 148.727)
if "particle_323 geometry" not in marker_sets:
s=new_marker_set('particle_323 geometry')
marker_sets["particle_323 geometry"]=s
s= marker_sets["particle_323 geometry"]
mark=s.place_marker((4711.88, 2132.84, 1175.49), (0.7, 0.7, 0.7), 230.323)
if "particle_324 geometry" not in marker_sets:
s=new_marker_set('particle_324 geometry')
marker_sets["particle_324 geometry"]=s
s= marker_sets["particle_324 geometry"]
mark=s.place_marker((4226.69, 1749.63, 1231.13), (0.7, 0.7, 0.7), 175.376)
if "particle_325 geometry" not in marker_sets:
s=new_marker_set('particle_325 geometry')
marker_sets["particle_325 geometry"]=s
s= marker_sets["particle_325 geometry"]
mark=s.place_marker((3782.1, 1586.47, 1396.23), (0.7, 0.7, 0.7), 161.163)
if "particle_326 geometry" not in marker_sets:
s=new_marker_set('particle_326 geometry')
marker_sets["particle_326 geometry"]=s
s= marker_sets["particle_326 geometry"]
mark=s.place_marker((3572.97, 1865.05, 1069.04), (0.7, 0.7, 0.7), 125.885)
if "particle_327 geometry" not in marker_sets:
s=new_marker_set('particle_327 geometry')
marker_sets["particle_327 geometry"]=s
s= marker_sets["particle_327 geometry"]
mark=s.place_marker((3410.2, 1953.58, 659.713), (0.7, 0.7, 0.7), 206.635)
if "particle_328 geometry" not in marker_sets:
s=new_marker_set('particle_328 geometry')
marker_sets["particle_328 geometry"]=s
s= marker_sets["particle_328 geometry"]
mark=s.place_marker((3397.94, 2206.34, 1031.41), (0.7, 0.7, 0.7), 151.392)
if "particle_329 geometry" not in marker_sets:
s=new_marker_set('particle_329 geometry')
marker_sets["particle_329 geometry"]=s
s= marker_sets["particle_329 geometry"]
mark=s.place_marker((3557.03, 2343.18, 1327.26), (0.7, 0.7, 0.7), 173.388)
if "particle_330 geometry" not in marker_sets:
s=new_marker_set('particle_330 geometry')
marker_sets["particle_330 geometry"]=s
s= marker_sets["particle_330 geometry"]
mark=s.place_marker((3762.31, 2377.17, 1286.08), (0.7, 0.7, 0.7), 135.825)
if "particle_331 geometry" not in marker_sets:
s=new_marker_set('particle_331 geometry')
marker_sets["particle_331 geometry"]=s
s= marker_sets["particle_331 geometry"]
mark=s.place_marker((4114.72, 2320.6, 1067.8), (0.7, 0.7, 0.7), 186.839)
if "particle_332 geometry" not in marker_sets:
s=new_marker_set('particle_332 geometry')
marker_sets["particle_332 geometry"]=s
s= marker_sets["particle_332 geometry"]
mark=s.place_marker((4483.13, 2235.5, 797.468), (0.7, 0.7, 0.7), 121.189)
if "particle_333 geometry" not in marker_sets:
s=new_marker_set('particle_333 geometry')
marker_sets["particle_333 geometry"]=s
s= marker_sets["particle_333 geometry"]
mark=s.place_marker((4125.75, 2040.09, 934.54), (0.7, 0.7, 0.7), 102.916)
if "particle_334 geometry" not in marker_sets:
s=new_marker_set('particle_334 geometry')
marker_sets["particle_334 geometry"]=s
s= marker_sets["particle_334 geometry"]
mark=s.place_marker((3694.23, 1710.16, 1239.89), (0.7, 0.7, 0.7), 212.769)
if "particle_335 geometry" not in marker_sets:
s=new_marker_set('particle_335 geometry')
marker_sets["particle_335 geometry"]=s
s= marker_sets["particle_335 geometry"]
mark=s.place_marker((3201.04, 1587.51, 1680.76), (0.7, 0.7, 0.7), 173.092)
if "particle_336 geometry" not in marker_sets:
s=new_marker_set('particle_336 geometry')
marker_sets["particle_336 geometry"]=s
s= marker_sets["particle_336 geometry"]
mark=s.place_marker((2813.85, 1294.19, 1841.59), (0.7, 0.7, 0.7), 264.502)
if "particle_337 geometry" not in marker_sets:
s=new_marker_set('particle_337 geometry')
marker_sets["particle_337 geometry"]=s
s= marker_sets["particle_337 geometry"]
mark=s.place_marker((2565.73, 817.626, 1677.43), (0.7, 0.7, 0.7), 208.666)
if "particle_338 geometry" not in marker_sets:
s=new_marker_set('particle_338 geometry')
marker_sets["particle_338 geometry"]=s
s= marker_sets["particle_338 geometry"]
mark=s.place_marker((2588.36, 327.336, 1600.37), (0.7, 0.7, 0.7), 186.797)
if "particle_339 geometry" not in marker_sets:
s=new_marker_set('particle_339 geometry')
marker_sets["particle_339 geometry"]=s
s= marker_sets["particle_339 geometry"]
mark=s.place_marker((2612.94, 70.1497, 2035.23), (0.7, 0.7, 0.7), 255.534)
if "particle_340 geometry" not in marker_sets:
s=new_marker_set('particle_340 geometry')
marker_sets["particle_340 geometry"]=s
s= marker_sets["particle_340 geometry"]
mark=s.place_marker((2958.03, -162.765, 2137.48), (0.7, 0.7, 0.7), 153.126)
if "particle_341 geometry" not in marker_sets:
s=new_marker_set('particle_341 geometry')
marker_sets["particle_341 geometry"]=s
s= marker_sets["particle_341 geometry"]
mark=s.place_marker((2782.72, -367.341, 1846.57), (0.7, 0.7, 0.7), 165.816)
if "particle_342 geometry" not in marker_sets:
s=new_marker_set('particle_342 geometry')
marker_sets["particle_342 geometry"]=s
s= marker_sets["particle_342 geometry"]
mark=s.place_marker((2474.63, -145.432, 1962.96), (0.7, 0.7, 0.7), 134.429)
if "particle_343 geometry" not in marker_sets:
s=new_marker_set('particle_343 geometry')
marker_sets["particle_343 geometry"]=s
s= marker_sets["particle_343 geometry"]
mark=s.place_marker((2407.92, 232.549, 1860.23), (0.7, 0.7, 0.7), 178.971)
if "particle_344 geometry" not in marker_sets:
s=new_marker_set('particle_344 geometry')
marker_sets["particle_344 geometry"]=s
s= marker_sets["particle_344 geometry"]
mark=s.place_marker((2656.2, 481.969, 1493.57), (0.7, 0.7, 0.7), 189.969)
if "particle_345 geometry" not in marker_sets:
s=new_marker_set('particle_345 geometry')
marker_sets["particle_345 geometry"]=s
s= marker_sets["particle_345 geometry"]
mark=s.place_marker((2702.11, 396.421, 876.772), (0.7, 0.7, 0.7), 121.359)
if "particle_346 geometry" not in marker_sets:
s=new_marker_set('particle_346 geometry')
marker_sets["particle_346 geometry"]=s
s= marker_sets["particle_346 geometry"]
mark=s.place_marker((2972.37, 788.599, 612.417), (0.7, 0.7, 0.7), 187.262)
if "particle_347 geometry" not in marker_sets:
s=new_marker_set('particle_347 geometry')
marker_sets["particle_347 geometry"]=s
s= marker_sets["particle_347 geometry"]
mark=s.place_marker((3177.61, 1393.68, 764.856), (0.7, 0.7, 0.7), 164.335)
if "particle_348 geometry" not in marker_sets:
s=new_marker_set('particle_348 geometry')
marker_sets["particle_348 geometry"]=s
s= marker_sets["particle_348 geometry"]
mark=s.place_marker((3628.89, 1557.13, 961.411), (0.7, 0.7, 0.7), 138.363)
if "particle_349 geometry" not in marker_sets:
s=new_marker_set('particle_349 geometry')
marker_sets["particle_349 geometry"]=s
s= marker_sets["particle_349 geometry"]
mark=s.place_marker((3948.71, 1763.18, 952.459), (0.7, 0.7, 0.7), 138.49)
if "particle_350 geometry" not in marker_sets:
s=new_marker_set('particle_350 geometry')
marker_sets["particle_350 geometry"]=s
s= marker_sets["particle_350 geometry"]
mark=s.place_marker((3739.59, 2048.36, 891.954), (0.7, 0.7, 0.7), 116.325)
if "particle_351 geometry" not in marker_sets:
s=new_marker_set('particle_351 geometry')
marker_sets["particle_351 geometry"]=s
s= marker_sets["particle_351 geometry"]
mark=s.place_marker((3312.26, 1863.04, 950.244), (0.7, 0.7, 0.7), 106.511)
if "particle_352 geometry" not in marker_sets:
s=new_marker_set('particle_352 geometry')
marker_sets["particle_352 geometry"]=s
s= marker_sets["particle_352 geometry"]
mark=s.place_marker((3039.03, 1427.83, 1127.46), (0.7, 0.7, 0.7), 151.096)
if "particle_353 geometry" not in marker_sets:
s=new_marker_set('particle_353 geometry')
marker_sets["particle_353 geometry"]=s
s= marker_sets["particle_353 geometry"]
mark=s.place_marker((2823.42, 797.828, 1202.03), (0.7, 0.7, 0.7), 240.856)
if "particle_354 geometry" not in marker_sets:
s=new_marker_set('particle_354 geometry')
marker_sets["particle_354 geometry"]=s
s= marker_sets["particle_354 geometry"]
mark=s.place_marker((2746.83, 283.395, 1271.47), (0.7, 0.7, 0.7), 149.7)
if "particle_355 geometry" not in marker_sets:
s=new_marker_set('particle_355 geometry')
marker_sets["particle_355 geometry"]=s
s= marker_sets["particle_355 geometry"]
mark=s.place_marker((2582.55, 71.4996, 1507.94), (0.7, 0.7, 0.7), 165.943)
if "particle_356 geometry" not in marker_sets:
s=new_marker_set('particle_356 geometry')
marker_sets["particle_356 geometry"]=s
s= marker_sets["particle_356 geometry"]
mark=s.place_marker((2635.31, 291.013, 2075.4), (0.7, 0.7, 0.7), 178.971)
if "particle_357 geometry" not in marker_sets:
s=new_marker_set('particle_357 geometry')
marker_sets["particle_357 geometry"]=s
s= marker_sets["particle_357 geometry"]
mark=s.place_marker((2492.76, 425.996, 2792.3), (0.7, 0.7, 0.7), 154.945)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| 0.024518 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.