text
stringlengths 681
1.05M
| score
float64 0
0.27
|
---|---|
import os
from . import utils
def _trimpath(path):
comps = []
for _ in range(3):
path, c = os.path.split(path)
comps.append(c)
return os.path.join(*reversed(comps)).replace('\\', '/')
def concatenate_input_files(input_files, output_file, release=False):
assert isinstance(input_files, (list, tuple))
if len(input_files) == 1 and input_files[0] == output_file:
return
for f in input_files:
assert f != output_file, 'Concatenate input file is same as output.'
if output_file:
utils.ensure_path_exists(os.path.dirname(output_file))
try:
utils.logv('>>> concat {} > {}'.format(' '.join(input_files), output_file))
with open(output_file, 'w') as output:
for input_file in input_files:
with open(input_file, 'r') as input:
output.write(input.read())
if not release:
path = _trimpath(input_file)
output.write('\n\n'
'/*\n'
' * {caret}\n'
' * {path} \n'
' * {caret}\n'
' */\n\n\n'.format(path=path, caret='^' * len(path)))
except IOError as e:
utils.ensure_deleted(output_file)
raise utils.StaticCompilerError('Failed to concatenate to {}'.format(output_file), error=str(e))
| 0.002063 |
import os
import urlparse
import requests
import redis
from PIL import Image
from StringIO import StringIO
from flask import Flask, request, send_file, redirect, render_template
from media import sanitze, ratio, measurements
import random
REDIS_URL = urlparse.urlparse(os.environ.get('REDISCLOUD_URL', 'redis://:@localhost:6379/'))
r = redis.StrictRedis(host=REDIS_URL.hostname, port=REDIS_URL.port, password=REDIS_URL.password)
app = Flask(__name__)
app.config['DEBUG'] = True
"""
if os.environ.get('DEVELOPMENT'):
app.config['DEBUG'] = True
else:
app.config['DEBUG'] = False
"""
ext2conttype2 = {
"jpg": "JPEG",
"jpeg": "JPEG",
"png": "PNG",
"gif": "GIF",
"image/jpeg": "JPEG",
"image/png": "PNG",
"image/gif": "GIF"
}
ext2conttype = {"jpg": "image/jpeg",
"jpeg": "image/jpeg",
"png": "image/png",
"gif": "image/gif"
}
@app.route('/media/upload', methods=['GET', 'POST'])
def uploader():
if request.method=='POST':
try:
file = request.files['file']
if file and file.filename:
filename = file.filename
extension = filename[filename.rfind(".")+1:].lower()
content_type = ext2conttype[extension]
image = Image.open(file)
buff_img = StringIO()
image.seek(0)
image.save(buff_img, ext2conttype2[extension])
key = "%s.%s.%s" % (random.random(), random.random(), random.random())
print "KEY:", key
r.set("Image-%s" % key, buff_img.getvalue())
r.set("Content-type-%s" % key, content_type)
buff_img.seek(0)
#return send_file(buff_img, mimetype='image/jpg')
return key
except:
return 'Image Upload did not go well', 500
return '''
<!doctype html>
<title>Upload new File</title>
<h1>Upload new File</h1>
<form action="" method=post enctype=multipart/form-data>
<p><input type=file name=file>
<input type=submit value=Upload>
</form>
'''
@app.route('/media/get/<img_id>', methods=['GET'])
def get_image(img_id):
data = r.get('Image-%s' % img_id)
content_type = r.get('Content-type-%s' % img_id)
buff = StringIO()
buff.write(data)
buff.seek(0)
print content_type
return send_file(buff, mimetype=content_type)
@app.route('/media/get/thumbnail/<img_id>', methods=['GET'])
def get_image_thumbnail(img_id):
height = request.args.get('height')
width = request.args.get('width')
data = r.get('Image-%s' % img_id)
content_type = r.get('Content-type-%s' % img_id)
buff = StringIO()
buff.write(data)
buff.seek(0)
if height is None or width is None:
return send_file(buff, mimetype=content_type)
image = Image.open(buff)
desired_width, desired_height = measurements(image, width, height)
buffer_image = StringIO()
print "Numbers is", desired_width, desired_height
resized_image = image.resize((desired_width, desired_height), Image.ANTIALIAS)
resized_image.save(buffer_image, ext2conttype2[content_type], quality=90)
buffer_image.seek(0)
return send_file(buffer_image, mimetype=content_type)
if __name__ == '__main__':
app.run()
| 0.02421 |
import h5py,numpy, os, operator, pylab, time
p='F:/CHESS2011_h5MAIN'
px='F:/CHESS2011XRD_asimported'
fnxrd_fnpnsc=[
#('2011Jun01b_AuSiCu.h5',['AuSiCuheat1','AuSiCuheat2','AuSiCuheat3','AuSiCuheat4']),\
#('2011Jun01b_NiTiHf.h5',['NiTiHfheat1','NiTiHfheat1_MA','NiTiHfheat1_fast','NiTiHfheat1_slow','NiTiHfheat2','NiTiHfheat2_MA']),\
#('2011Jun01A_ZrCuAl_heat0.dat.h5', '2011Jun01a.h5', ['ZrCuAlheat1', 'ZrCuAlheat2', 'ZrCuAlheat3', 'ZrCuAlheat4', 'ZrCuAlheat5']), \
#('2011Oct02D_AuSiCu.h5', ['AuSiCuheats']), \
('2011Oct02D_BiInSn.h5', ['Bi_DCheats', 'Bi_ACheats', 'In_DCheats', 'In_ACheats', 'Sn_DCheats', 'Sn_ACheats']), \
#('2011Oct10B_NiTiHf.h5', ['NiTiHfheat1', 'NiTiHfheat2', 'NiTiHfheat3', 'NiTiHfheat1_MA', 'NiTiHfheat2_MA', 'NiTiHfheat3_MA']), \
#('2011Oct10B_FeNi.h5', ['DCheats', 'ACheats']), \
#('2011Oct10C.h5', ['borides']), \
#('2011Oct10D.h5', ['DCheats', 'ACheats']), \
#('BackgroundImages.dat.h5', serp), \
#('nosampleconfigurations.dat.h5', serp), \
]
tryattr=lambda pnt, s:(s in pnt.attrs.keys() and (pnt.attrs[s],) or (None,))[0]
x=[]
y=[]
for fn, expgrplist in fnxrd_fnpnsc:
if not fn.endswith('.h5'):
continue
f=h5py.File(os.path.join(p, fn), mode='r')
hppnt_epoch=[]
#for node in f['Calorimetry'].values():
for expgrp in expgrplist:
node=f['Calorimetry'][expgrp]
# for nn in node.values():
# print nn, 'samplecurrent' in nn
# if 'samplecurrent' in nn:
# nam=nn.name.rpartition('/')[2]
# f.copy(node[nam], node['measurement/HeatProgram'])
# del node[nam]
if not 'measurement/HeatProgram' in node:
continue
for node2 in node['measurement/HeatProgram'].itervalues():
sptup=tuple()
for k in ['specscan','prespecscan','postspecscan']:
t=tryattr(node2,k)
if not t is None:
pnt=f[`t`]['measurement/scalar_data/Seconds']
try:
sec=pnt[0]
except:
sec=pnt.value
t=(t, sec)
sptup+=(t,)
hppnt_epoch+=[(node2.attrs['epoch'], node2.attrs['CELLNUMBER'], node2.attrs['segment_ms'][-1], numpy.max(node2['samplecurrent'][:, :]))+sptup]
f.close()
eparr=numpy.array([tup[0] for tup in hppnt_epoch])
hppnt_epoch=[hppnt_epoch[i] for i in numpy.argsort(eparr)]
carr=numpy.array([tup[1] for tup in hppnt_epoch])
allcells=sorted(list(set(carr)))
maxnumexp=max([(carr==c).sum() for c in allcells])
sc=[]
se=[[] for i in range(maxnumexp)]
for count, cell in enumerate(allcells):
inds=numpy.where(carr==cell)[0]
sc+=['cell%02d' %cell]
for l, i in zip(se, inds):
e, c, maxms, maxma, stup, prestup, poststup=hppnt_epoch[i]
s='%.2fs PnSC up to %.1fmA' %(maxms/1000., maxma)
if not stup is None:
s+=' with insitu XRD'
if not prestup is None:
s+=', %ds preXRD' %prestup[1]
if not poststup is None:
s+=', %ds postXRD' %poststup[1]
l+=[s]
print fn
print '\t'.join(sc)
for l in se:
print '\t'.join(l)
print 'done'
| 0.02766 |
from __future__ import absolute_import
import theano
import theano.tensor as T
import numpy as np
from .utils.theano_utils import sharedX, shared_zeros
def get_fans(shape):
fan_in = shape[0] if len(shape) == 2 else np.prod(shape[1:])
fan_out = shape[1] if len(shape) == 2 else shape[0]
return fan_in, fan_out
def uniform(shape, scale=0.05):
return sharedX(np.random.uniform(low=-scale, high=scale, size=shape))
def normal(shape, scale=0.05):
return sharedX(np.random.randn(*shape) * scale)
def lecun_uniform(shape):
''' Reference: LeCun 98, Efficient Backprop
http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf
'''
fan_in, fan_out = get_fans(shape)
scale = 1./np.sqrt(fan_in)
return uniform(shape, scale)
def glorot_normal(shape):
''' Reference: Glorot & Bengio, AISTATS 2010
'''
fan_in, fan_out = get_fans(shape)
s = np.sqrt(2. / (fan_in + fan_out))
return normal(shape, s)
def glorot_uniform(shape):
fan_in, fan_out = get_fans(shape)
s = np.sqrt(2. / (fan_in + fan_out))
return uniform(shape, s)
def he_normal(shape):
''' Reference: He et al., http://arxiv.org/abs/1502.01852
'''
fan_in, fan_out = get_fans(shape)
s = np.sqrt(2. / fan_in)
return normal(shape, s)
def he_uniform(shape):
fan_in, fan_out = get_fans(shape)
s = np.sqrt(2. / fan_in)
return uniform(shape, s)
def orthogonal(shape, scale=1.1):
''' From Lasagne
'''
flat_shape = (shape[0], np.prod(shape[1:]))
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v # pick the one with the correct shape
q = q.reshape(shape)
return sharedX(scale * q[:shape[0], :shape[1]])
def identity(shape, scale=1):
if len(shape) != 2 or shape[0] != shape[1]:
raise Exception("Identity matrix initialization can only be used for 2D square matrices")
else:
return sharedX(scale * np.identity(shape[0]))
def zero(shape):
return shared_zeros(shape)
from .utils.generic_utils import get_from_module
def get(identifier):
return get_from_module(identifier, globals(), 'initialization')
| 0.006824 |
# Copyright 2013 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Volume snapshot interface (1.1 extension)."""
import six
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
from cinderclient import base
class Snapshot(base.Resource):
"""A Snapshot is a point-in-time snapshot of an openstack volume."""
def __repr__(self):
return "<Snapshot: %s>" % self.id
def delete(self):
"""Delete this snapshot."""
self.manager.delete(self)
def update(self, **kwargs):
"""Update the name or description for this snapshot."""
self.manager.update(self, **kwargs)
@property
def progress(self):
return self._info.get('os-extended-snapshot-attributes:progress')
@property
def project_id(self):
return self._info.get('os-extended-snapshot-attributes:project_id')
def reset_state(self, state):
"""Update the snapshot with the provided state."""
self.manager.reset_state(self, state)
class SnapshotManager(base.ManagerWithFind):
"""Manage :class:`Snapshot` resources."""
resource_class = Snapshot
def create(self, volume_id, force=False,
name=None, description=None):
"""Create a snapshot of the given volume.
:param volume_id: The ID of the volume to snapshot.
:param force: If force is True, create a snapshot even if the volume is
attached to an instance. Default is False.
:param name: Name of the snapshot
:param description: Description of the snapshot
:rtype: :class:`Snapshot`
"""
body = {'snapshot': {'volume_id': volume_id,
'force': force,
'name': name,
'description': description}}
return self._create('/snapshots', body, 'snapshot')
def get(self, snapshot_id):
"""Get a snapshot.
:param snapshot_id: The ID of the snapshot to get.
:rtype: :class:`Snapshot`
"""
return self._get("/snapshots/%s" % snapshot_id, "snapshot")
def list(self, detailed=True, search_opts=None):
"""Get a list of all snapshots.
:rtype: list of :class:`Snapshot`
"""
if search_opts is None:
search_opts = {}
qparams = {}
for opt, val in six.iteritems(search_opts):
if val:
qparams[opt] = val
query_string = "?%s" % urlencode(qparams) if qparams else ""
detail = ""
if detailed:
detail = "/detail"
return self._list("/snapshots%s%s" % (detail, query_string),
"snapshots")
def delete(self, snapshot):
"""Delete a snapshot.
:param snapshot: The :class:`Snapshot` to delete.
"""
self._delete("/snapshots/%s" % base.getid(snapshot))
def update(self, snapshot, **kwargs):
"""Update the name or description for a snapshot.
:param snapshot: The :class:`Snapshot` to delete.
"""
if not kwargs:
return
body = {"snapshot": kwargs}
self._update("/snapshots/%s" % base.getid(snapshot), body)
def reset_state(self, snapshot, state):
"""Update the specified snapshot with the provided state."""
return self._action('os-reset_status', snapshot, {'status': state})
def _action(self, action, snapshot, info=None, **kwargs):
"""Perform a snapshot action."""
body = {action: info}
self.run_hooks('modify_body_for_action', body, **kwargs)
url = '/snapshots/%s/action' % base.getid(snapshot)
return self.api.client.post(url, body=body)
| 0 |
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# Thinkopen Brasil
# Copyright (C) Thinkopen Solutions Brasil (<http://www.tkobr.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import openerp
from openerp import tools, api
from openerp.osv import osv, fields
class nfeio_servicomunicipio(osv.Model):
_name = 'nfeio.servicomunicipio'
_description = ''
_order = 'name'
_columns = {
'name': fields.char('Code', size=64, required=True),
'description': fields.char('Description', size=256),
'country_id': fields.many2one('res.country', 'Country'),
'state_id': fields.many2one('res.country.state', 'UF',
domain="[('country_id','=',country_id)]"),
'l10n_br_city_id': fields.many2one(
'l10n_br_base.city', 'Municipio',
domain="[('state_id','=',state_id)]"),
'cnae_id': fields.many2one('l10n_br_account.cnae', u'CNAE'),
'code_federal': fields.char('Federal Code', size=128),
'code_municipal': fields.char('Municipal Code', size=128),
'iss': fields.float('ISS'),
'rt_ir': fields.float('IR Withholding'),
'rt_pis': fields.float('PIS Withholding'),
'rt_cofins': fields.float('COFINS Withholding'),
'rt_csl': fields.float('CSL Withholding'),
'rt_inss': fields.float('INSS Withholding'),
'rt_iss': fields.float('ISS Withholding'),
'date_start':fields.datetime('Date Start'),
'date_end':fields.datetime('Date Start'),
'state':fields.selection([('updated','Updated'),('changed','Changed'),('inactive','Inactive')],)
}
_defaults = {
'country_id': lambda self, cr, uid, c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.country_id.id,
}
_sql_constraints = [
('description_unique', 'unique(name)', u'The code must be unique.'),
] | 0.006159 |
"""
=====================================================
An introduction to the Probabilistic Direction Getter
=====================================================
Probabilistic fiber tracking is a way of reconstructing white matter
connections using diffusion MR imaging. Like deterministic fiber tracking, the
probabilistic approach follows the trajectory of a possible pathway step by
step starting at a seed, however, unlike deterministic tracking, the tracking
direction at each point along the path is chosen at random from a distribution.
The distribution at each point is different and depends on the observed
diffusion data at that point. The distribution of tracking directions at each
point can be represented as a probability mass function (PMF) if the possible
tracking directions are restricted to discrete numbers of well distributed
points on a sphere.
This example is an extension of the :ref:`example_tracking_introduction_eudx`
example. We'll begin by repeating a few steps from that example, loading the
data and fitting a Constrained Spherical Deconvolution (CSD) model.
"""
from dipy.core.gradients import gradient_table
from dipy.data import get_fnames
from dipy.io.gradients import read_bvals_bvecs
from dipy.io.image import load_nifti, load_nifti_data
from dipy.reconst.csdeconv import (ConstrainedSphericalDeconvModel,
auto_response)
from dipy.tracking import utils
from dipy.tracking.local_tracking import LocalTracking
from dipy.tracking.streamline import Streamlines
from dipy.tracking.stopping_criterion import ThresholdStoppingCriterion
from dipy.viz import window, actor, colormap, has_fury
# Enables/disables interactive visualization
interactive = False
hardi_fname, hardi_bval_fname, hardi_bvec_fname = get_fnames('stanford_hardi')
label_fname = get_fnames('stanford_labels')
data, affine, hardi_img = load_nifti(hardi_fname, return_img=True)
labels = load_nifti_data(label_fname)
bvals, bvecs = read_bvals_bvecs(hardi_bval_fname, hardi_bvec_fname)
gtab = gradient_table(bvals, bvecs)
seed_mask = (labels == 2)
white_matter = (labels == 1) | (labels == 2)
seeds = utils.seeds_from_mask(seed_mask, affine, density=1)
response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.7)
csd_model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=6)
csd_fit = csd_model.fit(data, mask=white_matter)
"""
We use the GFA of the CSA model to build a stopping criterion.
"""
from dipy.reconst.shm import CsaOdfModel
csa_model = CsaOdfModel(gtab, sh_order=6)
gfa = csa_model.fit(data, mask=white_matter).gfa
stopping_criterion = ThresholdStoppingCriterion(gfa, .25)
"""
The Fiber Orientation Distribution (FOD) of the CSD model estimates the
distribution of small fiber bundles within each voxel. We can use this
distribution for probabilistic fiber tracking. One way to do this is to
represent the FOD using a discrete sphere. This discrete FOD can be used by the
``ProbabilisticDirectionGetter`` as a PMF for sampling tracking directions. We
need to clip the FOD to use it as a PMF because the latter cannot have negative
values. Ideally, the FOD should be strictly positive, but because of noise
and/or model failures sometimes it can have negative values.
"""
from dipy.direction import ProbabilisticDirectionGetter
from dipy.data import small_sphere
from dipy.io.stateful_tractogram import Space, StatefulTractogram
from dipy.io.streamline import save_trk
fod = csd_fit.odf(small_sphere)
pmf = fod.clip(min=0)
prob_dg = ProbabilisticDirectionGetter.from_pmf(pmf, max_angle=30.,
sphere=small_sphere)
streamline_generator = LocalTracking(prob_dg, stopping_criterion, seeds,
affine, step_size=.5)
streamlines = Streamlines(streamline_generator)
sft = StatefulTractogram(streamlines, hardi_img, Space.RASMM)
save_trk(sft, "tractogram_probabilistic_dg_pmf.trk")
if has_fury:
r = window.Renderer()
r.add(actor.line(streamlines, colormap.line_colors(streamlines)))
window.record(r, out_path='tractogram_probabilistic_dg_pmf.png',
size=(800, 800))
if interactive:
window.show(r)
"""
.. figure:: tractogram_probabilistic_dg_pmf.png
:align: center
**Corpus Callosum using probabilistic direction getter from PMF**
"""
"""
One disadvantage of using a discrete PMF to represent possible tracking
directions is that it tends to take up a lot of memory (RAM). The size of the
PMF, the FOD in this case, must be equal to the number of possible tracking
directions on the hemisphere, and every voxel has a unique PMF. In this case
the data is ``(81, 106, 76)`` and ``small_sphere`` has 181 directions so the
FOD is ``(81, 106, 76, 181)``. One way to avoid sampling the PMF and holding it
in memory is to build the direction getter directly from the spherical harmonic
(SH) representation of the FOD. By using this approach, we can also use a
larger sphere, like ``default_sphere`` which has 362 directions on the
hemisphere, without having to worry about memory limitations.
"""
from dipy.data import default_sphere
prob_dg = ProbabilisticDirectionGetter.from_shcoeff(csd_fit.shm_coeff,
max_angle=30.,
sphere=default_sphere)
streamline_generator = LocalTracking(prob_dg, stopping_criterion, seeds,
affine, step_size=.5)
streamlines = Streamlines(streamline_generator)
sft = StatefulTractogram(streamlines, hardi_img, Space.RASMM)
save_trk(sft, "tractogram_probabilistic_dg_sh.trk")
if has_fury:
r = window.Renderer()
r.add(actor.line(streamlines, colormap.line_colors(streamlines)))
window.record(r, out_path='tractogram_probabilistic_dg_sh.png',
size=(800, 800))
if interactive:
window.show(r)
"""
.. figure:: tractogram_probabilistic_dg_sh.png
:align: center
**Corpus Callosum using probabilistic direction getter from SH**
"""
"""
Not all model fits have the ``shm_coeff`` attribute because not all models use
this basis to represent the data internally. However we can fit the ODF of any
model to the spherical harmonic basis using the ``peaks_from_model`` function.
"""
from dipy.direction import peaks_from_model
peaks = peaks_from_model(csd_model, data, default_sphere, .5, 25,
mask=white_matter, return_sh=True, parallel=True)
fod_coeff = peaks.shm_coeff
prob_dg = ProbabilisticDirectionGetter.from_shcoeff(fod_coeff, max_angle=30.,
sphere=default_sphere)
streamline_generator = LocalTracking(prob_dg, stopping_criterion, seeds,
affine, step_size=.5)
streamlines = Streamlines(streamline_generator)
sft = StatefulTractogram(streamlines, hardi_img, Space.RASMM)
save_trk(sft, "tractogram_probabilistic_dg_sh_pfm.trk")
if has_fury:
r = window.Renderer()
r.add(actor.line(streamlines, colormap.line_colors(streamlines)))
window.record(r, out_path='tractogram_probabilistic_dg_sh_pfm.png',
size=(800, 800))
if interactive:
window.show(r)
"""
.. figure:: tractogram_probabilistic_dg_sh_pfm.png
:align: center
**Corpus Callosum using probabilistic direction getter from SH (
peaks_from_model)**
"""
"""
.. include:: ../links_names.inc
"""
| 0.000944 |
import numpy as np
est = dict(
rank = 8,
N = 3629,
Q = 4.59536484786e-20,
J = 1.66765790329e-16,
J_df = 0,
k_1 = 8,
converged = 1,
has_xtinst = 0,
type = 1,
n_eq = 1,
k = 8,
n_moments = 8,
k_aux = 8,
k_eq_model = 0,
k_eq = 8,
cmdline = "gmm ( docvis - exp({xb:private medicaid aget aget2 educyr actlim totchr}+{b0})), instruments(incomet ssiratio aget aget2 educyr actlim totchr) onestep vce(robust)",
cmd = "gmm",
estat_cmd = "gmm_estat",
predict = "gmm_p",
marginsnotok = "_ALL",
eqnames = "1",
technique = "gn",
winit = "Unadjusted",
estimator = "onestep",
wmatrix = "robust",
vce = "robust",
vcetype = "Robust",
params = "xb_private xb_medicaid xb_aget xb_aget2 xb_educyr xb_actlim xb_totchr b0",
inst_1 = "incomet ssiratio aget aget2 educyr actlim totchr _cons",
params_1 = "xb_private xb_medicaid xb_aget xb_aget2 xb_educyr xb_actlim xb_totchr b0",
sexp_1 = "docvis - exp( ({xb_private} *private + {xb_medicaid} *medicaid + {xb_aget} *aget + {xb_aget2} *aget2 + {xb_educyr} *educyr + {xb_actlim} *actlim + {xb_totchr} *totchr) + {b0} )",
properties = "b V",
)
params_table = np.array([
.62093805844748, .35860052573857, 1.731559252928, .08335206643438,
-.08190605683724, 1.3237821737322, np.nan, 1.9599639845401,
0, .68895699568302, .43817618784254, 1.5723286997298,
.11587434043505, -.1698525513714, 1.5477665427374, np.nan,
1.9599639845401, 0, .25750627258076, .05009451793791,
5.1404082358855, 2.741421857e-07, .15932282159956, .35568972356197,
np.nan, 1.9599639845401, 0, -.05352997420414,
.01103202674353, -4.8522339048464, 1.220785200e-06, -.07515234929795,
-.03190759911034, np.nan, 1.9599639845401, 0,
.03106248018916, .01032090201131, 3.0096671933432, .00261534090329,
.01083388395902, .05129107641931, np.nan, 1.9599639845401,
0, .14175365608301, .0494498280382, 2.8666157539212,
.00414886404159, .04483377408643, .23867353807958, np.nan,
1.9599639845401, 0, .23128095221422, .01565221628818,
14.776243054406, 2.084750820e-49, .20060317201116, .26195873241727,
np.nan, 1.9599639845401, 0, .34763567088735,
.31615794015526, 1.0995633091379, .27152243570261, -.27202250524333,
.96729384701803, np.nan, 1.9599639845401, 0
]).reshape(8,9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = '_cons _cons _cons _cons _cons _cons _cons _cons'.split()
cov = np.array([
.12859433705998, .13265896898444, .00910916927048, -.00144786113189,
-.00037337560793, -.00152379041042, -.00336772308907, -.09899309651531,
.13265896898444, .19199837159222, .00979636564963, -.00135323134276,
.00180599814286, -.00930935415071, -.00460031335865, -.13429156867927,
.00910916927048, .00979636564963, .00250946072743, -.00052373946978,
5.155389870e-07, -.00016461502154, -.00025816911604, -.00869892550441,
-.00144786113189, -.00135323134276, -.00052373946978, .00012170561407,
8.334416260e-06, -.00002526568199, .00003797456789, .00131001446811,
-.00037337560793, .00180599814286, 5.155389870e-07, 8.334416260e-06,
.00010652101833, -.00026856403693, -.00003344387872, -.00122933496346,
-.00152379041042, -.00930935415071, -.00016461502154, -.00002526568199,
-.00026856403693, .00244528549301, .00003610001892, .00527355381855,
-.00336772308907, -.00460031335865, -.00025816911604, .00003797456789,
-.00003344387872, .00003610001892, .00024499187473, .00300075896709,
-.09899309651531, -.13429156867927, -.00869892550441, .00131001446811,
-.00122933496346, .00527355381855, .00300075896709, .09995584312322
]).reshape(8,8)
cov_colnames = '_cons _cons _cons _cons _cons _cons _cons _cons'.split()
cov_rownames = '_cons _cons _cons _cons _cons _cons _cons _cons'.split()
class Bunch(dict):
def __init__(self, **kw):
dict.__init__(self, kw)
self.__dict__ = self
for i,att in enumerate(['params', 'bse', 'tvalues', 'pvalues']):
self[att] = self.params_table[:,i]
results_addonestep = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
import numpy as np
est = dict(
rank = 8,
N = 3629,
Q = 6.09567389485e-33,
J = 2.21212005644e-29,
J_df = 0,
k_1 = 8,
converged = 1,
has_xtinst = 0,
type = 1,
n_eq = 1,
k = 8,
n_moments = 8,
k_aux = 8,
k_eq_model = 0,
k_eq = 8,
cmdline = "gmm ( docvis - exp({xb:private medicaid aget aget2 educyr actlim totchr}+{b0})), instruments(incomet ssiratio aget aget2 educyr actlim totchr) twostep vce(robust)",
cmd = "gmm",
estat_cmd = "gmm_estat",
predict = "gmm_p",
marginsnotok = "_ALL",
eqnames = "1",
technique = "gn",
winit = "Unadjusted",
estimator = "twostep",
wmatrix = "robust",
vce = "robust",
vcetype = "Robust",
params = "xb_private xb_medicaid xb_aget xb_aget2 xb_educyr xb_actlim xb_totchr b0",
inst_1 = "incomet ssiratio aget aget2 educyr actlim totchr _cons",
params_1 = "xb_private xb_medicaid xb_aget xb_aget2 xb_educyr xb_actlim xb_totchr b0",
sexp_1 = "docvis - exp( ({xb_private} *private + {xb_medicaid} *medicaid + {xb_aget} *aget + {xb_aget2} *aget2 + {xb_educyr} *educyr + {xb_actlim} *actlim + {xb_totchr} *totchr) + {b0} )",
properties = "b V",
)
params_table = np.array([
.6209380584426, .35860052570457, 1.7315592530786, .08335206640755,
-.08190605677548, 1.3237821736607, np.nan, 1.9599639845401,
0, .68895699501744, .43817618789764, 1.5723286980131,
.11587434083298, -.16985255214498, 1.5477665421799, np.nan,
1.9599639845401, 0, .25750627271754, .05009451794125,
5.1404082382732, 2.741421823e-07, .15932282172979, .35568972370529,
np.nan, 1.9599639845401, 0, -.05352997423123,
.01103202674378, -4.8522339071944, 1.220785186e-06, -.07515234932551,
-.03190759913694, np.nan, 1.9599639845401, 0,
.03106248018903, .01032090201422, 3.0096671924822, .0026153409107,
.01083388395319, .05129107642488, np.nan, 1.9599639845401,
0, .14175365616691, .04944982804302, 2.8666157553386,
.00414886402301, .04483377416089, .23867353817294, np.nan,
1.9599639845401, 0, .23128095224221, .01565221628892,
14.776243055497, 2.084750786e-49, .20060317203771, .26195873244672,
np.nan, 1.9599639845401, 0, .34763567064032,
.31615794015859, 1.099563308345, .27152243604826, -.27202250549689,
.96729384677754, np.nan, 1.9599639845401, 0
]).reshape(8,9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = '_cons _cons _cons _cons _cons _cons _cons _cons'.split()
cov = np.array([
.12859433703559, .1326589689683, .00910916927021, -.00144786113188,
-.00037337560766, -.00152379040753, -.00336772308885, -.09899309649807,
.1326589689683, .1919983716405, .00979636565235, -.00135323134324,
.00180599814488, -.00930935415256, -.00460031335946, -.13429156869395,
.00910916927021, .00979636565235, .00250946072777, -.00052373946983,
5.155391569e-07, -.00016461502162, -.00025816911611, -.00869892550672,
-.00144786113188, -.00135323134324, -.00052373946983, .00012170561408,
8.334416227e-06, -.00002526568198, .0000379745679, .00131001446858,
-.00037337560766, .00180599814488, 5.155391569e-07, 8.334416227e-06,
.00010652101839, -.00026856403706, -.00003344387875, -.00122933496459,
-.00152379040753, -.00930935415256, -.00016461502162, -.00002526568198,
-.00026856403706, .00244528549348, .00003610001887, .00527355381795,
-.00336772308885, -.00460031335946, -.00025816911611, .0000379745679,
-.00003344387875, .00003610001887, .00024499187475, .00300075896724,
-.09899309649807, -.13429156869395, -.00869892550672, .00131001446858,
-.00122933496459, .00527355381795, .00300075896724, .09995584312533
]).reshape(8,8)
cov_colnames = '_cons _cons _cons _cons _cons _cons _cons _cons'.split()
cov_rownames = '_cons _cons _cons _cons _cons _cons _cons _cons'.split()
results_addtwostep = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
import numpy as np
est = dict(
rank = 8,
N = 3629,
Q = .0002538911897719,
J = .9213711276820714,
J_df = 1,
k_1 = 8,
converged = 1,
has_xtinst = 0,
type = 1,
n_eq = 1,
k = 8,
n_moments = 9,
k_aux = 8,
k_eq_model = 0,
k_eq = 8,
cmdline = "gmm ( (docvis / exp({xb:private medicaid aget aget2 educyr actlim totchr}+{b0})) - 1), instruments(income ssiratio medicaid aget aget2 educyr actlim totchr) onestep vce(robust)",
cmd = "gmm",
estat_cmd = "gmm_estat",
predict = "gmm_p",
marginsnotok = "_ALL",
eqnames = "1",
technique = "gn",
winit = "Unadjusted",
estimator = "onestep",
wmatrix = "robust",
vce = "robust",
vcetype = "Robust",
params = "xb_private xb_medicaid xb_aget xb_aget2 xb_educyr xb_actlim xb_totchr b0",
inst_1 = "income ssiratio medicaid aget aget2 educyr actlim totchr _cons",
params_1 = "xb_private xb_medicaid xb_aget xb_aget2 xb_educyr xb_actlim xb_totchr b0",
sexp_1 = "(docvis / exp( ({xb_private} *private + {xb_medicaid} *medicaid + {xb_aget} *aget + {xb_aget2} *aget2 + {xb_educyr} *educyr + {xb_actlim} *actlim + {xb_totchr} *totchr) + {b0} )) - 1",
properties = "b V",
)
params_table = np.array([
.67045580921478, .25039046077656, 2.6776411814389, .00741425985435,
.17969952402034, 1.1612120944092, np.nan, 1.9599639845401,
0, .28551241628798, .10358919281318, 2.7561988710819,
.00584774303307, .08248132918657, .4885435033894, np.nan,
1.9599639845401, 0, .2672004738793, .05203985579809,
5.1345352476769, 2.828420839e-07, .16520423075439, .36919671700421,
np.nan, 1.9599639845401, 0, -.0560702624564,
.01191485946838, -4.7059105149509, 2.527353692e-06, -.07942295789528,
-.03271756701753, np.nan, 1.9599639845401, 0,
.01448379701656, .00782559934942, 1.8508227127214, .06419506241955,
-.00085409586574, .02982168989887, np.nan, 1.9599639845401,
0, .18130374188096, .0382173439987, 4.7440173206998,
2.095209222e-06, .10639912405874, .25620835970318, np.nan,
1.9599639845401, 0, .28146161235562, .01380395117777,
20.389931022715, 2.054354003e-92, .25440636520284, .30851685950839,
np.nan, 1.9599639845401, 0, .51399857133918,
.10262653035745, 5.0084375799215, 5.487366567e-07, .31285426798028,
.71514287469808, np.nan, 1.9599639845401, 0
]).reshape(8,9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = '_cons _cons _cons _cons _cons _cons _cons _cons'.split()
cov = np.array([
.0626953828479, .02323594786658, .00535172023578, -.00103050587759,
-.00154311442856, .00154515839603, -.00043159973572, -.01570852578318,
.02323594786658, .01073072086769, .00207768328305, -.00039713375955,
-.00049396171685, .00027652302157, -.00020408147523, -.00701276303887,
.00535172023578, .00207768328305, .00270814659149, -.00059652725999,
-.00012298559534, .00021079055266, -.00004341699196, -.0031278522429,
-.00103050587759, -.00039713375955, -.00059652725999, .00014196387615,
.00002481291175, -.00006035908648, .00001093157006, .00059187926133,
-.00154311442856, -.00049396171685, -.00012298559534, .00002481291175,
.00006124000518, -.00001857594061, .00001436652009, .00008106194688,
.00154515839603, .00027652302157, .00021079055266, -.00006035908648,
-.00001857594061, .00146056538231, -.00016708887634, -.00074321753343,
-.00043159973572, -.00020408147523, -.00004341699196, .00001093157006,
.00001436652009, -.00016708887634, .00019054906812, -.00028024031412,
-.01570852578318, -.00701276303887, -.0031278522429, .00059187926133,
.00008106194688, -.00074321753343, -.00028024031412, .01053220473321
]).reshape(8,8)
cov_colnames = '_cons _cons _cons _cons _cons _cons _cons _cons'.split()
cov_rownames = '_cons _cons _cons _cons _cons _cons _cons _cons'.split()
results_multonestep = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
import numpy as np
est = dict(
rank = 8,
N = 3629,
Q = .0002589826272982,
J = .9398479544653281,
J_df = 1,
k_1 = 8,
converged = 1,
has_xtinst = 0,
type = 1,
n_eq = 1,
k = 8,
n_moments = 9,
k_aux = 8,
k_eq_model = 0,
k_eq = 8,
cmdline = "gmm ( (docvis / exp({xb:private medicaid aget aget2 educyr actlim totchr}+{b0})) - 1), instruments(income ssiratio medicaid aget aget2 educyr actlim totchr) twostep vce(robust)",
cmd = "gmm",
estat_cmd = "gmm_estat",
predict = "gmm_p",
marginsnotok = "_ALL",
eqnames = "1",
technique = "gn",
winit = "Unadjusted",
estimator = "twostep",
wmatrix = "robust",
vce = "robust",
vcetype = "Robust",
params = "xb_private xb_medicaid xb_aget xb_aget2 xb_educyr xb_actlim xb_totchr b0",
inst_1 = "income ssiratio medicaid aget aget2 educyr actlim totchr _cons",
params_1 = "xb_private xb_medicaid xb_aget xb_aget2 xb_educyr xb_actlim xb_totchr b0",
sexp_1 = "(docvis / exp( ({xb_private} *private + {xb_medicaid} *medicaid + {xb_aget} *aget + {xb_aget2} *aget2 + {xb_educyr} *educyr + {xb_actlim} *actlim + {xb_totchr} *totchr) + {b0} )) - 1",
properties = "b V",
)
params_table = np.array([
.67815288158883, .25053953449054, 2.7067699433856, .00679413212727,
.18710441728393, 1.1692013458937, np.nan, 1.9599639845401,
0, .28872837589732, .1032733938985, 2.7957672833051,
.00517766683505, .08631624329503, .49114050849961, np.nan,
1.9599639845401, 0, .27067071818542, .05199695467114,
5.2055109745809, 1.934635127e-07, .16875855972422, .37258287664662,
np.nan, 1.9599639845401, 0, -.05690856524563,
.01189861686254, -4.7827882772482, 1.728801925e-06, -.08022942576205,
-.03358770472921, np.nan, 1.9599639845401, 0,
.01438118999252, .00783219080428, 1.8361644081315, .06633334485657,
-.00096962190392, .02973200188896, np.nan, 1.9599639845401,
0, .18038262255626, .03826653224544, 4.7138481584715,
2.430818311e-06, .10538159754195, .25538364757056, np.nan,
1.9599639845401, 0, .28251027986119, .01378475918788,
20.494393555287, 2.415775858e-93, .25549264831739, .30952791140498,
np.nan, 1.9599639845401, 0, .5077134442587,
.10235830367214, 4.9601588346456, 7.043556343e-07, .30709485554269,
.7083320329747, np.nan, 1.9599639845401, 0
]).reshape(8,9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = '_cons _cons _cons _cons _cons _cons _cons _cons'.split()
cov = np.array([
.06277005834274, .02315710174743, .00533574120292, -.00102544979294,
-.00154463417995, .0015508406274, -.00043796451278, -.01559999387335,
.02315710174743, .01066539388732, .00206217803508, -.00039331197813,
-.00049172930967, .00027603135609, -.00020644763374, -.00694810289238,
.00533574120292, .00206217803508, .00270368329507, -.0005950942106,
-.00012276584915, .00021462173623, -.00004681980342, -.00310767551047,
-.00102544979294, -.00039331197813, -.0005950942106, .00014157708324,
.00002474211336, -.00006134660609, .00001178280314, .00058658157366,
-.00154463417995, -.00049172930967, -.00012276584915, .00002474211336,
.00006134321279, -.00001855941375, .00001443470174, .0000776612477,
.0015508406274, .00027603135609, .00021462173623, -.00006134660609,
-.00001855941375, .00146432749009, -.00016643326394, -.00074847803836,
-.00043796451278, -.00020644763374, -.00004681980342, .00001178280314,
.00001443470174, -.00016643326394, .00019001958587, -.00027573517109,
-.01559999387335, -.00694810289238, -.00310767551047, .00058658157366,
.0000776612477, -.00074847803836, -.00027573517109, .01047722233064
]).reshape(8,8)
cov_colnames = '_cons _cons _cons _cons _cons _cons _cons _cons'.split()
cov_rownames = '_cons _cons _cons _cons _cons _cons _cons _cons'.split()
results_multtwostep = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
import numpy as np
est = dict(
rank = 8,
N = 3629,
Q = .0002590497181628,
J = .940091427212973,
J_df = 1,
k_1 = 8,
converged = 1,
has_xtinst = 0,
type = 1,
n_eq = 1,
k = 8,
n_moments = 9,
k_aux = 8,
k_eq_model = 0,
k_eq = 8,
cmdline = "gmm ( (docvis / exp({xb:private medicaid aget aget2 educyr actlim totchr}+{b0})) - 1), instruments(income ssiratio medicaid aget aget2 educyr actlim totchr) twostep wmatrix(robust) vce(unadjusted) center",
cmd = "gmm",
estat_cmd = "gmm_estat",
predict = "gmm_p",
marginsnotok = "_ALL",
eqnames = "1",
technique = "gn",
winit = "Unadjusted",
estimator = "twostep",
wmatrix = "robust",
vce = "unadjusted",
params = "xb_private xb_medicaid xb_aget xb_aget2 xb_educyr xb_actlim xb_totchr b0",
inst_1 = "income ssiratio medicaid aget aget2 educyr actlim totchr _cons",
params_1 = "xb_private xb_medicaid xb_aget xb_aget2 xb_educyr xb_actlim xb_totchr b0",
sexp_1 = "(docvis / exp( ({xb_private} *private + {xb_medicaid} *medicaid + {xb_aget} *aget + {xb_aget2} *aget2 + {xb_educyr} *educyr + {xb_actlim} *actlim + {xb_totchr} *totchr) + {b0} )) - 1",
properties = "b V",
)
params_table = np.array([
.67815486150911, .25018082946574, 2.7106587781218, .00671496899138,
.1878094461339, 1.1685002768843, np.nan, 1.9599639845401,
0, .28872920226215, .10311429027815, 2.8000891193967,
.00510884999633, .08662890702558, .49082949749873, np.nan,
1.9599639845401, 0, .27067161407481, .0518802415232,
5.2172388972735, 1.816099638e-07, .16898820918009, .37235501896953,
np.nan, 1.9599639845401, 0, -.05690878166227,
.0118728670827, -4.7931793783164, 1.641587211e-06, -.08017917353758,
-.03363838978695, np.nan, 1.9599639845401, 0,
.01438116368432, .00781887593806, 1.8392878718448, .0658728559523,
-.00094355155385, .0297058789225, np.nan, 1.9599639845401,
0, .18038238197017, .03819661477822, 4.7224703816696,
2.329970297e-06, .10551839267351, .25524637126682, np.nan,
1.9599639845401, 0, .28251055147828, .01376659609161,
20.521452768591, 1.385109204e-93, .25552851894901, .30949258400755,
np.nan, 1.9599639845401, 0, .50771182444237,
.10208891085993, 4.9732318639284, 6.584582712e-07, .30762123593598,
.70780241294876, np.nan, 1.9599639845401, 0
]).reshape(8,9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = 'xb_private xb_medicaid xb_aget xb_aget2 xb_educyr xb_actlim xb_totchr b0'.split()
cov = np.array([
.06259044743217, .02308524749042, .00531802921719, -.0010223122446,
-.00154027662468, .00154945994717, -.00043816683551, -.01554486097815,
.02308524749042, .01063255685957, .00205438168765, -.00039193802388,
-.00049039628782, .0002760841411, -.0002064504141, -.00691934867666,
.00531802921719, .00205438168765, .00269155946051, -.00059250696972,
-.00012247118567, .00021403084056, -.00004749600121, -.00308951213731,
-.0010223122446, -.00039193802388, -.00059250696972, .00014096497276,
.00002468288871, -.00006115240604, .00001190303672, .00058327928125,
-.00154027662468, -.00049039628782, -.00012247118567, .00002468288871,
.00006113482093, -.00001854325518, .00001439868646, .00007784185009,
.00154945994717, .0002760841411, .00021403084056, -.00006115240604,
-.00001854325518, .00145898138052, -.00016596475072, -.00074697007542,
-.00043816683551, -.0002064504141, -.00004749600121, .00001190303672,
.00001439868646, -.00016596475072, .00018951916795, -.00027350320218,
-.01554486097815, -.00691934867666, -.00308951213731, .00058327928125,
.00007784185009, -.00074697007542, -.00027350320218, .01042214572057
]).reshape(8,8)
cov_colnames = 'xb_private xb_medicaid xb_aget xb_aget2 xb_educyr xb_actlim xb_totchr b0'.split()
cov_rownames = 'xb_private xb_medicaid xb_aget xb_aget2 xb_educyr xb_actlim xb_totchr b0'.split()
results_multtwostepdefault = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
import numpy as np
est = dict(
rank = 8,
N = 3629,
Q = .0002590497181628,
J = .940091427212973,
J_df = 1,
k_1 = 8,
converged = 1,
has_xtinst = 0,
type = 1,
n_eq = 1,
k = 8,
n_moments = 9,
k_aux = 8,
k_eq_model = 0,
k_eq = 8,
cmdline = "gmm ( (docvis / exp({xb:private medicaid aget aget2 educyr actlim totchr}+{b0})) - 1), instruments(income ssiratio medicaid aget aget2 educyr actlim totchr) twostep wmatrix(robust) center",
cmd = "gmm",
estat_cmd = "gmm_estat",
predict = "gmm_p",
marginsnotok = "_ALL",
eqnames = "1",
technique = "gn",
winit = "Unadjusted",
estimator = "twostep",
wmatrix = "robust",
vce = "robust",
vcetype = "Robust",
params = "xb_private xb_medicaid xb_aget xb_aget2 xb_educyr xb_actlim xb_totchr b0",
inst_1 = "income ssiratio medicaid aget aget2 educyr actlim totchr _cons",
params_1 = "xb_private xb_medicaid xb_aget xb_aget2 xb_educyr xb_actlim xb_totchr b0",
sexp_1 = "(docvis / exp( ({xb_private} *private + {xb_medicaid} *medicaid + {xb_aget} *aget + {xb_aget2} *aget2 + {xb_educyr} *educyr + {xb_actlim} *actlim + {xb_totchr} *totchr) + {b0} )) - 1",
properties = "b V",
)
params_table = np.array([
.67815486150911, .25053960844836, 2.7067770469869, .00679398676131,
.18710625224955, 1.1692034707687, np.nan, 1.9599639845401,
0, .28872920226215, .10327332768441, 2.7957770775479,
.00517750993835, .08631719943712, .49114120508719, np.nan,
1.9599639845401, 0, .27067161407481, .05199697557915,
5.2055261110869, 1.934477426e-07, .16875941463467, .37258381351495,
np.nan, 1.9599639845401, 0, -.05690878166227,
.01189862079945, -4.7828048831437, 1.728659059e-06, -.08022964989488,
-.03358791342965, np.nan, 1.9599639845401, 0,
.01438116368432, .00783219272776, 1.8361605982125, .06633390816397,
-.00096965198207, .02973197935072, np.nan, 1.9599639845401,
0, .18038238197017, .03826654814775, 4.71383991244,
2.430916736e-06, .10538132578791, .25538343815243, np.nan,
1.9599639845401, 0, .28251055147828, .01378476509846,
20.494404471929, 2.415234157e-93, .25549290834996, .3095281946066,
np.nan, 1.9599639845401, 0, .50771182444237,
.10235828870929, 4.960143734762, 7.044103886e-07, .307093265053,
.70833038383174, np.nan, 1.9599639845401, 0
]).reshape(8,9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = '_cons _cons _cons _cons _cons _cons _cons _cons'.split()
cov = np.array([
.06277009540146, .02315708886727, .00533574465012, -.0010254503134,
-.00154463481696, .00155084007911, -.00043796389511, -.01559997980204,
.02315708886727, .01066538021101, .00206217721135, -.00039331175814,
-.00049172883672, .00027603038575, -.00020644729789, -.00694809209467,
.00533574465012, .00206217721135, .00270368546938, -.00059509464294,
-.000122765895, .00021462183651, -.00004681968717, -.003107676362,
-.0010254503134, -.00039331175814, -.00059509464294, .00014157717693,
.00002474211983, -.00006134664668, .00001178278294, .00058658166731,
-.00154463481696, -.00049172883672, -.000122765895, .00002474211983,
.00006134324292, -.00001855938213, .00001443468876, .00007766055925,
.00155084007911, .00027603038575, .00021462183651, -.00006134664668,
-.00001855938213, .00146432870714, -.00016643336248, -.00074847778305,
-.00043796389511, -.00020644729789, -.00004681968717, .00001178278294,
.00001443468876, -.00016643336248, .00019001974882, -.00027573582025,
-.01559997980204, -.00694809209467, -.003107676362, .00058658166731,
.00007766055925, -.00074847778305, -.00027573582025, .0104772192675
]).reshape(8,8)
cov_colnames = '_cons _cons _cons _cons _cons _cons _cons _cons'.split()
cov_rownames = '_cons _cons _cons _cons _cons _cons _cons _cons'.split()
results_multtwostepcenter = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
| 0.019034 |
"""
Takes the raw text and runs the sentence and keyword extraction.
Copyright (C) 2015 Nicholas Rutherford
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from sentenceSelection import selectSentences
from keywordExtraction import extractKeywords
import sumUtil
import os
TEXT_DIR = "./hnSummarized/text/"
SUM_DIR = "./hnSummarized/summaries/"
NUM_SENTENCES = 10
NUM_KEYWORDS = 4
def hasFailedBefore(downfile):
"""Checks if a file has failed before
Args:
downfile (str) - The current file to check
Returns:
Bool - Wether the file has failed to summarize previously
"""
failed = sumUtil.loadFailed()
return downfile in failed
def checkIn(downfile):
"""Mark a file as failing.
Args:
downfile (str) - The current file to check
Notes:
The file is marked as failed, then it is summarized. If the summary
runs fine, then the file is removed from the failing list. If the
summary runs into a segfault, then the file will remain marked
as failed, and will not be tried again.
"""
failed = sumUtil.loadFailed()
failed.append(downfile)
sumUtil.saveFailed(failed)
def checkOut(downfile):
"""Mark a file as completed succesfully.
Args:
downfile (str) - The current file to check
"""
failed = sumUtil.loadFailed()
failed.remove(downfile)
sumUtil.saveFailed(failed)
def summariseAll():
for folder in sumUtil.listDirectory(TEXT_DIR):
for downFile in sumUtil.listDirectory(TEXT_DIR + folder):
if not os.path.isfile(SUM_DIR + folder + "/" + downFile):
print downFile
if (not hasFailedBefore(downFile)):
checkIn(downFile)
path = TEXT_DIR + folder + "/" + downFile
rawText = sumUtil.loadFile(path)
# Summarise
summary = selectSentences(rawText, NUM_SENTENCES)
# Key Words
keyWordsList = extractKeywords(rawText, NUM_KEYWORDS)
keyWords = " | ".join(keyWordsList)
toSave = keyWords + "\n" + summary
sumUtil.saveAndMakePath(SUM_DIR + folder + "/",
downFile, toSave)
checkOut(downFile)
if __name__ == "__main__":
summariseAll()
| 0 |
"""
*** Marc file program runner ***
1: Output HI
2: Output Python
3: Output Hello world
Use the Marc file assembler to compile your ".masm" file to ".mcf"
"""
class EmulatedSystem:
regA, regB, regC, regD, regX = 0
accumulator = 0
programCounter = 0
stackPointer = 0
heap = [0x00 for i in range(0xFFF)]
stack = []
def emulate(codeBuffer):
#Goes through file contents and checks the opcodes
for i in range(len(codeBuffer)):
i = i.strip()
i = int(i)
if codeBuffer[i] == 0x00: #OUT
print(regX)
break
elif i == 0x01: #IN
check = input("Program requires input... ")
if (check > -1 and check < 0x
elif i == 0x02:
print("Hello world!") #Executiuon 3: Prints "Hello world!"
else:
#Prints if the instrucition is not rechognised
print("Corrupt file, please consult the comment reference for opcodes.")
#Waits for input
temp = input("Press any key to quit.")
#Prefered exit method
sys.exit(1)
#Import system library - Needed for exiting
import sys
#Init contents
contents = []
#Get input
filename = input('Please select your compiled binary ".mcf": ')
#Checks last 4 letters are .mcf, and prints an error message if it's not
if (not(filename[len(filename)-4:len(filename)] == ".mcf")):
#Prints an error message
print('FATAL ERROR, please use a ".mcf" file.')
#Waits for input
temp = input("Press any key to quit.")
#Prefered exit method
sys.exit(1)
#Opens file and reads contents
with open(filename, "r") as target:
#Reads first line of file
contents = target.readline()
#Closes the file
target.close()
#Goes through file contents and checks the opcodes
for i in contents:
i = i.strip()
if i == "1":
print("HI") #Executiuon 1: Prints "HI"
elif i == "2":
print("Python!") #Executiuon 2: Prints "Python!"
elif i == "3":
print("Hello world!") #Executiuon 3: Prints "Hello world!"
else:
#Prints if the instrucition is not rechognised
print("Corrupt file, please consult the comment reference for opcodes.")
#Waits for input
temp = input("Press any key to quit.")
#Prefered exit method
sys.exit(1)
#Waits for input
temp = input("\nPress any key to quit.") | 0.029206 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. codeauthor:: Cédric Dumay <cedric.dumay@gmail.com>
"""
import logging
import os
import time
from kafka import KafkaConsumer
from kser import KSER_METRICS_ENABLED
from kser.controller import Controller
logger = logging.getLogger(__name__)
class Consumer(object):
REGISTRY = Controller
def __init__(self, config, topics):
self.client = KafkaConsumer(**config)
self.client.subscribe(topics)
self.clean_lock()
def __del__(self):
for lockfile in (os.environ['LOCK_FILE'], os.environ['RUNNING_FILE']):
if os.path.exists(lockfile):
logger.debug("Cleaning existing lock file: {}".format(lockfile))
os.remove(lockfile)
def clean_lock(self):
if os.path.exists(os.environ['LOCK_FILE']):
logger.debug("Cleaning existing pause file: {}".format(
os.environ['LOCK_FILE']
))
os.remove(os.environ['LOCK_FILE'])
def is_active(self):
return not os.path.exists(os.environ['LOCK_FILE'])
def run(self):
""" Run consumer
"""
if KSER_METRICS_ENABLED == "yes":
from prometheus_client import start_http_server
logger.info("Metric.Starting...")
start_http_server(
os.getenv("KSER_METRICS_PORT", 8888),
os.getenv("KSER_METRICS_ADDRESS", "0.0.0.0")
)
logger.info("{}.Starting...".format(self.__class__.__name__))
while True:
if self.is_active() is True:
msg = next(self.client)
data = msg.value.decode('utf-8')
if self.client.config['enable_auto_commit'] is False:
self.client.commit()
logger.debug("{}: Manual commit done.".format(
self.__class__.__name__
))
self.REGISTRY.run(data)
else:
logger.warning("Consumer is paused")
time.sleep(60)
| 0.000483 |
# Copyright 2015 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import sys
from catkin_pkg.packages import find_packages
from catkin_tools.argument_parsing import add_context_args
from catkin_tools.context import Context
from catkin_tools.metadata import find_enclosing_workspace
from catkin_tools.terminal_color import ColorMapper
color_mapper = ColorMapper()
clr = color_mapper.clr
def prepare_arguments(parser):
add_context_args(parser) # Adds the --profile option, possibly other things.
# Behavior
behavior_group = parser.add_argument_group('Behavior')
add = behavior_group.add_argument
add('-e', '--existing-only', action='store_true',
help="Only print paths to existing directories.")
add('-r', '--relative', action='store_true',
help="Print relative paths instead of the absolute paths.")
add('-q', '--quiet', action='store_true',
help="Suppress warning output.")
# Path options
dir_group = parser.add_argument_group(
'Sub-Space Options',
'Get the absolute path to one of the following locations in the given '
'workspace with the given profile.')
dir_group_mut = dir_group.add_mutually_exclusive_group()
add = dir_group_mut.add_argument
add('-s', '--src', dest='space', action='store_const', const='src',
help="Get the path to the source space.")
add('-b', '--build', dest='space', action='store_const', const='build',
help="Get the path to the build space.")
add('-d', '--devel', dest='space', action='store_const', const='devel',
help="Get the path to the devel space.")
add('-i', '--install', dest='space', action='store_const', const='install',
help="Get the path to the install space.")
pkg_group = parser.add_argument_group(
'Package Directories',
"Get the absolute path to package directories in the given workspace "
"and sub-space. By default this will output paths in the workspace's "
"source space. If the -b (--build) flag is given, it will output the "
"path to the package's build directory. If the -d or -i (--devel or "
"--install) flags are given, it will output the path to the package's "
"share directory in that space. If no package is provided, the base "
"space paths are printed, e.g. `catkin locate -s` might return "
"`/path/to/ws/src` and `catkin locate -s foo` might return "
"`/path/to/ws/src/foo`.")
add = pkg_group.add_argument
add('package', metavar='PACKAGE', nargs='?',
help="The name of a package to locate.")
special_group = parser.add_argument_group(
'Special Directories',
'Get the absolute path to a special catkin location')
add = special_group.add_argument
add('--shell-verbs', action='store_true',
help="Get the path to the shell verbs script.")
add('--examples', action='store_true',
help="Get the path to the examples directory.")
return parser
def main(opts):
# Initialize dictionary version of opts namespace
opts_vars = vars(opts) if opts else {}
# Check for special locations
root_resource_path = os.path.join(os.path.dirname(__file__), '..', '..')
if opts.shell_verbs:
shell_verbs = os.path.join(root_resource_path, 'verbs', 'catkin_shell_verbs.bash')
print(os.path.normpath(shell_verbs))
sys.exit(0)
elif opts.examples:
shell_verbs = os.path.join(root_resource_path, '..', 'docs', 'examples')
print(os.path.normpath(shell_verbs))
sys.exit(0)
# Get the workspace (either the given directory or the enclosing ws)
workspace_hint = opts_vars.get('workspace', None) or os.getcwd()
workspace = find_enclosing_workspace(workspace_hint)
if not workspace:
if not opts.quiet:
print(clr("@{rf}ERROR: No workspace found containing '%s'@|" % workspace_hint), file=sys.stderr)
sys.exit(1)
# Load the context to get the subspaces
ctx = Context.load(workspace, opts.profile, opts, load_env=False)
path = None
if opts.space:
# Get the subspace
if opts.space == 'src':
path = ctx.source_space_abs
elif opts.space == 'build':
path = ctx.build_space_abs
elif opts.space == 'devel':
path = ctx.devel_space_abs
elif opts.space == 'install':
path = ctx.install_space_abs
if opts.package:
# Get the path to the given package
path = path or ctx.source_space_abs
if opts.space == 'build':
path = os.path.join(path, opts.package)
elif opts.space in ['devel', 'install']:
path = os.path.join(path, 'share', opts.package)
else:
try:
packages = find_packages(path, warnings=[])
catkin_package = [pkg_path for pkg_path, p in packages.items() if p.name == opts.package]
if catkin_package:
path = os.path.join(path, catkin_package[0])
else:
print(clr("@{rf}ERROR: Could not locate a package named '%s' in path '%s'@|" %
(opts.package, path)), file=sys.stderr)
sys.exit(2)
except RuntimeError as e:
print(clr('@{rf}ERROR: %s@|' % str(e)), file=sys.stderr)
sys.exit(1)
elif not opts.space:
# Get the path to the workspace root
path = workspace
# Check if the path exists
if opts.existing_only and not os.path.exists(path):
print(clr("@{rf}ERROR: Requested path '%s' does not exist.@|" % path), file=sys.stderr)
sys.exit(1)
# Make the path relative if desired
if opts.relative:
path = os.path.relpath(path, os.getcwd())
# Print the path
print(path)
| 0.001087 |
#! /usr/bin/env python
import roslib
from datetime import datetime, timedelta
roslib.load_manifest('nextfood_tasks')
import rospy
import actionlib
import time
from client_pwm_driver_x6.srv import OperatePwm
from client_relay_x1.srv import OperateRelay
from nextfood_tasks.msg import *
class DoWaterPumpingServer:
_pump_startup_time = 0
_device_valve_1 = 1
_device_valve_2 = 2
_device_valve_3 = 3
def __init__(self, name):
self._action_name = name
self.connect_server()
def connect_server(self):
rospy.loginfo("Registering and starting action server: " + self._action_name)
self._server = actionlib.SimpleActionServer(self._action_name, DoWaterPumpingAction, self.execute, False)
self._server.start()
def execute(self, goal):
success = True
rospy.loginfo("Starting Water Pumping. Water Pumping for {} s".format(goal.water_pumping_time))
set_pump = None
try:
set_pump = rospy.ServiceProxy('growbox_water_pump/relay', OperateRelay)
pump_time = time.time()
# Setup
rospy.loginfo("Enabling Water Pumping.")
set_pump(True)
time.sleep(goal.water_pumping_time)
set_pump(False)
rospy.loginfo("Shutting off Water Pump: Elapsed {} s".format(time.time()-pump_time))
total_pump_time = time.time() - pump_time
res = DoWaterPumpingResult()
res.total_pump_time = total_pump_time
rospy.loginfo('Water Pumping action server has finished its goal. Total water pump time {} s.'.format(total_pump_time))
self._server.set_succeeded(res)
except rospy.ServiceException, e:
rospy.logerr("Service call for Water Pumping failed: {}".format(e))
if set_pump:
set_pump(False)
self.connect_server()
except BaseException as e:
rospy.logerr("Water Pumping execution failed: " + str(e))
if set_pump:
set_pump(False)
self.connect_server()
if __name__ == '__main__':
rospy.init_node('water_pumping_server')
server = DoWaterPumpingServer(rospy.get_name())
rospy.spin()
| 0.011021 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Train a ConvNet on MNIST using K-FAC.
Distributed training with sync replicas optimizer. See
`convnet.train_mnist_distributed_sync_replicas` for details.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
import tensorflow as tf
from tensorflow.contrib.kfac.examples import convnet
FLAGS = flags.FLAGS
flags.DEFINE_integer("task", -1, "Task identifier")
flags.DEFINE_string("data_dir", "/tmp/mnist", "local mnist dir")
flags.DEFINE_string(
"cov_inv_op_strategy", "chief_worker",
"In dist training mode run the cov, inv ops on chief or dedicated workers."
)
flags.DEFINE_string("master", "local", "Session master.")
flags.DEFINE_integer("ps_tasks", 2,
"Number of tasks in the parameter server job.")
flags.DEFINE_integer("replicas_to_aggregate", 5,
"Number of replicas to aggregate.")
flags.DEFINE_integer("worker_replicas", 5, "Number of replicas in worker job.")
flags.DEFINE_integer("num_epochs", None, "Number of epochs.")
def _is_chief():
"""Determines whether a job is the chief worker."""
if "chief_worker" in FLAGS.brain_jobs:
return FLAGS.brain_job_name == "chief_worker"
else:
return FLAGS.task == 0
def main(unused_argv):
_ = unused_argv
convnet.train_mnist_distributed_sync_replicas(
FLAGS.task, _is_chief(), FLAGS.worker_replicas, FLAGS.ps_tasks,
FLAGS.master, FLAGS.data_dir, FLAGS.num_epochs, FLAGS.cov_inv_op_strategy)
if __name__ == "__main__":
tf.app.run(main=main)
| 0.003549 |
class Solution(object):
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
Given an array S of n integers, are there elements a, b, c in S such that a + b + c = 0? Find all unique triplets in the array which gives the sum of zero.
Note: The solution set must not contain duplicate triplets.
For example, given array S = [-1, 0, 1, 2, -1, -4],
A solution set is:
[
[-1, 0, 1],
[-1, -1, 2]
]
"""
nums = sorted(nums)
ans = set()
print(nums)
for a_index in range(0, len(nums)-2):
b_index = a_index+1
c_index = len(nums)-1
while b_index < c_index:
print(a_index, b_index, c_index)
if nums[a_index] + nums[b_index] + nums[c_index] == 0:
ans.add((nums[a_index], nums[b_index], nums[c_index]))
print(ans)
b_index += 1
c_index -= 1
# break
elif nums[a_index] + nums[b_index] + nums[c_index] < 0:
b_index += 1
else:
c_index -= 1
print(ans)
ans_list = []
for i in ans:
ans_list.append(list(i))
print(list(ans_list))
return list(ans_list)
soln = Solution()
#soln.threeSum([-1, 0, 1, 2, -1, -4])
#soln.threeSum([0, 0, 0, 0])
soln.threeSum([-2,0,1,1,2])
| 0.005319 |
# The following comment should be removed at some point in the future.
# mypy: disallow-untyped-defs=False
import codecs
import os
import sys
from setuptools import find_packages, setup
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
# intentionally *not* adding an encoding option to open, See:
# https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690
with codecs.open(os.path.join(here, rel_path), 'r') as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith('__version__'):
# __version__ = "0.9"
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
else:
raise RuntimeError("Unable to find version string.")
long_description = read('README.rst')
setup(
name="pip",
version=get_version("src/pip/__init__.py"),
description="The PyPA recommended tool for installing Python packages.",
long_description=long_description,
license='MIT',
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Topic :: Software Development :: Build Tools",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
url='https://pip.pypa.io/',
keywords='distutils easy_install egg setuptools wheel virtualenv',
author='The pip developers',
author_email='pypa-dev@groups.google.com',
package_dir={"": "src"},
packages=find_packages(
where="src",
exclude=["contrib", "docs", "tests*", "tasks"],
),
package_data={
"pip._vendor.certifi": ["*.pem"],
"pip._vendor.requests": ["*.pem"],
"pip._vendor.distlib._backport": ["sysconfig.cfg"],
"pip._vendor.distlib": ["t32.exe", "t64.exe", "w32.exe", "w64.exe"],
},
entry_points={
"console_scripts": [
"pip=pip._internal.cli.main:main",
"pip{}=pip._internal.cli.main:main".format(sys.version_info[0]),
"pip{}.{}=pip._internal.cli.main:main".format(
*sys.version_info[:2]
),
],
},
zip_safe=False,
python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*',
)
| 0 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""FuncGraph and related functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import weakref
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import tape
from tensorflow.python.eager.graph_only_ops import graph_placeholder
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework.auto_control_deps import AutomaticControlDependencies
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import compat
from tensorflow.python.util import memory
from tensorflow.python.util import nest
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_decorator
from tensorflow.python.util.lazy_loader import LazyLoader
# This is to avoid a circular dependency:
# function -> func_graph
function = LazyLoader("function", globals(),
"tensorflow.python.eager.function")
def_function = LazyLoader(
"def_function", globals(),
"tensorflow.python.eager.def_function")
WHITELIST_COLLECTIONS = [
ops.GraphKeys.GLOBAL_VARIABLES,
ops.GraphKeys.LOCAL_VARIABLES,
ops.GraphKeys.TRAINABLE_VARIABLES,
variable_scope._VARSTORE_KEY, # pylint: disable=protected-access
variable_scope._VARSCOPESTORE_KEY # pylint: disable=protected-access
]
class FuncGraph(ops.Graph):
"""Graph representing a function body.
Attributes:
name: The name of the function.
inputs: Placeholder tensors representing the inputs to this function. The
tensors are in this FuncGraph. This represents "regular" inputs as well as
captured inputs (i.e. the values of self.captures), with the regular
inputs coming first.
outputs: Tensors that will be returned by this function. The tensors are in
this FuncGraph.
structured_outputs: A possibly-nested python object which will be returned
by this function. The Tensors in this structure are the same as those of
self.outputs. Note that this structure might contain Python `None`s.
variables: Variables that should be watched during function execution.
outer_graph: The graph this function is defined in. May be another FuncGraph
or the global default Graph.
captures: Maps external tensor -> internal tensor (i.e. input placeholder).
The entries are in the order they were captured.
seed: The graph-level random seed.
"""
def __init__(self, name, read_only_collections=True):
"""Construct a new FuncGraph.
The graph will inherit its graph key, collections, seed, and distribution
strategy stack from the current context or graph.
Args:
name: the name of the function.
read_only_collections: whether to not write function graph collections
back to default graph. Defaults to True.
"""
super(FuncGraph, self).__init__()
self.name = name
self.inputs = []
self.outputs = []
self.structured_outputs = None
self._read_only_collections = read_only_collections
self._weak_variables = []
self.outer_graph = ops.get_default_graph()
self.captures = collections.OrderedDict()
self._building_function = True
# Map from resource tensor name to last op (in program order) which uses
# this tensor. Used to enforce that execution order matches program order
# for resource tensors.
self._last_op_using_resource_tensor = {}
graph = self.outer_graph
if context.executing_eagerly():
self.seed = context.global_seed()
device_type = context.context().device_spec.device_type
self._xla_compile = (device_type == "TPU" or device_type == "XLA_GPU"
or device_type == "XLA_CPU")
else:
self.seed = graph.seed
self._xla_compile = getattr(graph, "_xla_compile", False)
# TODO(allenl): Figure out if we can remove colocation stack
# specialization (currently used in cond_v2), here and in the cache key.
self._colocation_stack = graph._colocation_stack.copy() # pylint: disable=protected-access
if not self._read_only_collections:
self._collections = graph._collections # pylint: disable=protected-access
else:
for collection_name in graph.get_all_collection_keys():
if collection_name not in WHITELIST_COLLECTIONS:
self._collections[collection_name] = graph.get_collection(
collection_name)
for collection_name in WHITELIST_COLLECTIONS:
self._collections[collection_name] = graph.get_collection_ref(
collection_name)
def as_default(self):
outer_cm = super(FuncGraph, self).as_default()
@tf_contextlib.contextmanager
def inner_cm():
"""Context manager for copying distribute.Strategy scope information."""
graph = ops.get_default_graph()
# pylint: disable=protected-access
# TODO(b/112906995, nareshmodi): distribution strategy depends on
# inheriting this stack from the default graph even in eager mode. Maybe
# it should be part of the eager context? This would also allow us to
# remove a get_default_graph() call from the function cache lookup.
old_strategy_stack = self._distribution_strategy_stack
self._distribution_strategy_stack = list(
graph._distribution_strategy_stack)
# We ignore device placements from any outer scopes while tracing the
# function when possible, to avoid hard-coding them in the function
# graph. "Default" placements come from the PartitionedCallOp's placement,
# so that the same trace of the Python function may be placed on several
# different devices and saved functions may be placed on new devices when
# restored.
old_device_stack = self._device_function_stack
if context.executing_eagerly():
if self._distribution_strategy_stack or self._xla_compile:
self._add_device_to_stack(context.context().device_name)
else:
if (self._distribution_strategy_stack
or self._xla_compile
or device_stack_has_callable(graph._device_function_stack)):
# Hard-code devices from device functions in the function body
self._device_function_stack = graph._device_function_stack.copy()
old_creator_stack = self._variable_creator_stack
self._variable_creator_stack = graph._variable_creator_stack
# Inherit the graph key, since this is used for matching variables in
# optimizers.
old_graph_key = self._graph_key
self._graph_key = graph._graph_key
# pylint: enable=protected-access
with outer_cm as g:
try:
yield g
finally:
self._distribution_strategy_stack = old_strategy_stack
self._device_function_stack = old_device_stack
self._variable_creator_stack = old_creator_stack
self._graph_key = old_graph_key
return inner_cm()
@property
def output_types(self):
return [t.dtype for t in self.outputs]
@property
def output_shapes(self):
return [t.shape for t in self.outputs]
@property
def variables(self):
"""A list of variables accessed by this FuncGraph.
Note that functions keep only weak references to variables. Calling the
function after a variable it accesses has been deleted is an error.
Yields:
Strong references to variables accessed by this FuncGraph.
"""
for weak_v in self._weak_variables:
v = weak_v()
if v is None:
raise AssertionError(
"Called a function referencing variables which have been deleted. "
"This likely means that function-local variables were created and "
"not referenced elsewhere in the program. This is generally a "
"mistake; consider storing variables in an object attribute on "
"first call.")
yield v
@variables.setter
def variables(self, var_list):
self._weak_variables = [weakref.ref(v) for v in var_list]
def create_op(
self,
op_type,
inputs,
dtypes,
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_shapes=True,
compute_device=True):
"""Like Graph.create_op, except handles external input tensors.
This overload adds functionality to create_op to "capture" any external
input tensors, i.e. tensors from the eager context or outer function graphs
if this is a nested function. See `capture` for more information.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: A list of `DType` objects that will be the types of the tensors
that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of
the tensors that the operation consumes. By default, uses the base
`DType` of each input in `inputs`. Operations that expect
reference-typed inputs must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_shapes: (Optional.) Deprecated. Has no effect (shapes are always
computed).
compute_device: (Optional.) If True, device functions will be executed
to compute the device property of the Operation.
Returns:
An `Operation` object.
"""
# This capturing logic interacts poorly with control flow contexts which
# want to replace inputs of ops far too late in the process. This can lead
# the context to get confused and try to create an Enter for an Enter. We
# can detect this here and skip the additional Enter which can confuse loop
# validation logic.
if op_type == "Enter" and inputs[0].op.type == "Enter":
if inputs[0].op.get_attr("frame_name") == attrs["frame_name"].s:
return inputs[0].op
# Calling AddValue on the control flow contexts to force creation of the
# backward accumulators in the original graph before we create placeholders
# to capture the inputs.
ctxt = ops.get_default_graph()._control_flow_context # pylint: disable=protected-access
for i, inp in enumerate(inputs):
# TPU Estimator defines a control flow context with no AddValue method.
if ctxt is not None and hasattr(ctxt, "AddValue"):
inp = ctxt.AddValue(inp)
inp = self.capture(inp)
inputs[i] = inp
return super(FuncGraph, self).create_op(
op_type, inputs, dtypes, input_types, name, attrs, op_def,
compute_device=compute_device)
def capture(self, tensor, name=None):
"""Captures `tensor` if it's external to this graph.
If `tensor` is from a different graph, returns a placeholder for it.
`tensor` and the placeholder will appear in self.captures, and the
placeholder will appear in self.inputs. Multiple calls to this method with
the same `tensor` argument will return the same placeholder. If `tensor` is
from this graph, returns `tensor`.
Args:
tensor: Tensor. May be from this FuncGraph or a different graph.
name: Optional name if a placeholder is created.
Returns:
Tensor from this FuncGraph.
"""
if isinstance(tensor, ops.EagerTensor):
if name is None:
name = str(ops.uid())
return self._capture_helper(tensor, name)
if tensor.graph is not self:
if name is None:
name = tensor.op.name
return self._capture_helper(tensor, name)
return tensor
def _capture_helper(self, tensor, name):
captured_tensor = self.captures.get(tensor, None)
if captured_tensor is None:
captured_tensor = _create_substitute_placeholder(tensor, name=name,
dtype=tensor.dtype)
self.captures[tensor] = captured_tensor
self.inputs.append(captured_tensor)
tape.record_operation("captured_value", [captured_tensor], [tensor],
lambda x: [x])
return captured_tensor
@property
def external_captures(self):
"""External tensors captured by this function."""
return list(self.captures.keys())
@property
def internal_captures(self):
"""Placeholders in this function corresponding captured tensors."""
return list(self.captures.values())
def func_graph_from_py_func(name,
python_func,
args,
kwargs,
signature=None,
func_graph=None,
autograph=False,
add_control_dependencies=True,
arg_names=None,
op_return_value=None):
"""Returns a `FuncGraph` generated from `python_func`.
Args:
name: an identifier for the function.
python_func: the Python function to trace.
args: the positional args with which the Python function should be called;
ignored if a signature is provided.
kwargs: the keyword args with which the Python function should be called;
ignored if a signature is provided.
signature: a possibly nested sequence of `TensorSpecs` specifying the shapes
and dtypes of the arguments. When a signature is provided, `args` and
`kwargs` are ignored, and `python_func` is traced with Tensors conforming
to `signature`. If `None`, the shapes and dtypes are inferred from the
inputs.
func_graph: Optional. An instance of FuncGraph. If provided, we will use
this graph else a new one is built and returned.
autograph: whether to use autograph to compile `python_func`.
See https://www.tensorflow.org/guide/autograph for more information.
add_control_dependencies: If True, automatically adds control dependencies
to ensure program order matches execution order and stateful ops always
execute.
arg_names: Optional list of argument names, used to give input placeholders
recognizable names.
op_return_value: Optional. A Tensor. If set and `python_func` returns
Operations, those return values will be replaced with this value. If not
set, returning an Operation triggers an error.
Returns:
A FuncGraph.
Raises:
TypeError: If any of `python_func`'s return values is neither `None` nor a
`Tensor`.
"""
if op_return_value is not None:
assert isinstance(op_return_value, ops.Tensor), op_return_value
if func_graph is None:
func_graph = FuncGraph(name)
assert isinstance(func_graph, FuncGraph)
if add_control_dependencies:
control_manager = AutomaticControlDependencies
else:
control_manager = ops.NullContextmanager
with func_graph.as_default(), control_manager() as a:
current_scope = variable_scope.get_variable_scope()
default_use_recource = current_scope.use_resource
current_scope.set_use_resource(True)
if signature is not None:
args = signature
kwargs = {}
# Creates and names placeholders for all arguments.
func_args = _get_defun_inputs_from_args(args, arg_names)
func_kwargs = _get_defun_inputs_from_kwargs(kwargs)
# Note: `nest.flatten` sorts by keys, as does `_deterministic_dict_values`.
# Variables to help check whether mutation happens in calling the function
# Copy the recursive list, tuple and map structure, but not base objects
func_args_before = nest.pack_sequence_as(func_args, nest.flatten(func_args))
func_kwargs_before = nest.pack_sequence_as(
func_kwargs, nest.flatten(func_kwargs))
def convert(x):
"""Converts a function output to a Tensor."""
if x is None:
return None
if op_return_value is not None and isinstance(x, ops.Operation):
# TODO(b/79881896): we currently can't capture external control deps, so
# this won't work if x needs to be captured (i.e. if python_func returns
# captured Operations).
with ops.control_dependencies([x]):
x = array_ops.identity(op_return_value)
elif not isinstance(x, tensor_array_ops.TensorArray):
try:
x = ops.convert_to_tensor_or_indexed_slices(x)
except (ValueError, TypeError):
raise TypeError(
"To be compatible with tf.contrib.eager.defun, Python functions "
"must return zero or more Tensors; in compilation of %s, found "
"return value of type %s, which is not a Tensor." %
(str(python_func), type(x)))
if add_control_dependencies:
x = a.mark_as_return(x)
return x
this_tape = tape.push_new_tape()
try:
if autograph:
from tensorflow.python import autograph # pylint: disable=g-import-not-at-top
_, original_func = tf_decorator.unwrap(python_func)
def wrapper(*args, **kwargs):
# Note: functions annotated with @tf.function should always be
# converted even though they would meet autograph's whitelisting
# criteria.
# If this assumption is ever broken, converted_call will need to
# handle the possibility of original_func still being a shim, e.g.
# bound to WeakrefSelf.
return autograph.converted_call(
original_func, None,
autograph.ConversionOptions(
verbose=autograph.Verbosity.BRIEF,
recursive=True,
strip_decorators=(def_function.function,),
optional_features=(),
force_conversion=True,
), *args, **kwargs)
# Wrapping around a decorator allows checks like tf_inspect.getargspec
# to be accurate.
converted_func = tf_decorator.make_decorator(original_func, wrapper)
tf_decorator.rewrap(python_func, original_func, converted_func)
func_outputs = python_func(*func_args, **func_kwargs)
# invariant: `func_outputs` contains only Tensors, IndexedSlices,
# SparseTensors, TensorArrays and `None`s.
func_outputs = nest.map_structure(convert, func_outputs)
check_mutation(func_args_before, func_args)
check_mutation(func_kwargs_before, func_kwargs)
finally:
tape.pop_tape(this_tape)
current_scope.set_use_resource(default_use_recource)
# Variables in `func_args`, `func_kwargs` should be explicit inputs
# to the function, not captured inputs.
tape_variables = this_tape.watched_variables()
arg_variables = set()
inputs = []
for arg in nest.flatten(func_args) + nest.flatten(func_kwargs):
if isinstance(arg, resource_variable_ops.ResourceVariable):
# Even if an argument variable was not used in the function, we've
# already manually captured the resource Tensor when creating argument
# placeholders.
resource_placeholder = func_graph.captures.pop(arg.handle)
arg_variables.add(arg)
inputs.append(resource_placeholder)
elif isinstance(arg, ops.Tensor):
inputs.append(arg)
variables = [v for v in tape_variables if v not in arg_variables]
func_graph.inputs = inputs + list(func_graph.captures.values())
func_graph.structured_outputs = func_outputs
# Returning a closed-over tensor does not trigger convert_to_tensor.
func_graph.outputs.extend(
func_graph.capture(x)
for x in flatten(func_graph.structured_outputs)
if x is not None)
func_graph.variables = variables
# Register any other functions defined in the graph.
with ops.init_scope():
if context.executing_eagerly():
for f in func_graph._functions.values(): # pylint: disable=protected-access
# TODO(ashankar): What about the gradient registry?
context.add_function(f._c_func.func) # pylint: disable=protected-access
return func_graph
def maybe_captured(tensor):
"""If t is a captured value placeholder, returns the original captured value.
Args:
tensor: Tensor.
Returns:
A tensor, potentially from a different Graph/FuncGraph.
"""
if (not isinstance(tensor, ops.EagerTensor) and
tensor.op.graph.building_function and tensor.op.type == "Placeholder"):
for input_t, placeholder_t in tensor.op.graph.captures.items():
if tensor == placeholder_t:
return maybe_captured(input_t)
# pylint: enable=protected-access
return tensor
def device_stack_has_callable(device_stack):
"""Checks whether a device stack contains a callable."""
return any(callable(spec._device_name_or_function) # pylint: disable=protected-access
for spec in device_stack.peek_objs())
def check_mutation(n1, n2):
"""Check if two list of arguments are exactly the same."""
errmsg = ("Function to be traced should not modify structure of input "
"arguments. Check if your function has list and dictionary "
"operations that alter input arguments, "
"such as `list.pop`, `list.append`")
try:
nest.assert_same_structure(n1, n2)
except ValueError:
raise ValueError(errmsg)
for arg1, arg2 in zip(nest.flatten(n1), nest.flatten(n2)):
if arg1 is not arg2:
raise ValueError(errmsg)
def flatten(sequence):
"""Like `nest.flatten` but also unpacks other Tensor-like objects.
Flattens non-tensor objects into their constituent tensors.
Args:
sequence: A nested structure of Tensors, IndexedSlices, SparseTensors and
TensorArrays.
Returns:
A list of tensors.
"""
# TODO(akshayka): Support `SparseTensor` in a similar fashion.
flat_sequence = nest.flatten(sequence)
outputs = []
for item in flat_sequence:
if isinstance(item, ops.IndexedSlices):
if item.dense_shape is not None:
outputs.extend([item.values, item.indices, item.dense_shape])
else:
outputs.extend([item.values, item.indices])
elif isinstance(item, sparse_tensor.SparseTensor):
outputs.extend([item.indices, item.values, item.dense_shape])
elif isinstance(item, tensor_array_ops.TensorArray):
outputs.append(item.flow)
else:
outputs.append(item)
return outputs
def pack_sequence_as(structure, flat_sequence):
"""Like `nest.pack_sequence_as` but also packs other Tensor-like objects.
Args:
structure: The structure to pack into. May contain Tensors, IndexedSlices,
TensorArrays or SparseTensors.
flat_sequence: An iterable containing tensors.
Returns:
A nested structure.
Raises:
AssertionError if `structure` and `flat_sequence` are not compatible.
"""
flattened_structure = nest.flatten(structure)
flat_sequence_with_slices_and_tas = []
index = 0
for t in flattened_structure:
if isinstance(t, ops.IndexedSlices):
if t.dense_shape is not None:
flat_sequence_with_slices_and_tas.append(
ops.IndexedSlices(*flat_sequence[index:index + 3]))
index += 3
else:
flat_sequence_with_slices_and_tas.append(
ops.IndexedSlices(*flat_sequence[index:index + 2]))
index += 2
elif isinstance(t, sparse_tensor.SparseTensor):
flat_sequence_with_slices_and_tas.append(
sparse_tensor.SparseTensor(*flat_sequence[index:index + 3]))
index += 3
elif isinstance(t, tensor_array_ops.TensorArray):
flow = flat_sequence[index]
ta = tensor_array_ops.build_ta_with_new_flow(t, flow)
flat_sequence_with_slices_and_tas.append(ta)
index += 1
else:
flat_sequence_with_slices_and_tas.append(flat_sequence[index])
index += 1
assert len(flattened_structure) == len(flat_sequence_with_slices_and_tas)
return nest.pack_sequence_as(structure, flat_sequence_with_slices_and_tas)
def _create_substitute_placeholder(value, name=None, dtype=None):
"""Creates a placeholder for `value` and propagates shape info to it."""
# Note: setting ops.control_dependencies(None) ensures we always put
# capturing placeholders outside of any control flow context.
with ops.control_dependencies(None):
placeholder = graph_placeholder(
dtype=dtype or value.dtype, shape=value.shape, name=name)
custom_gradient.copy_handle_data(value, placeholder)
return placeholder
def _get_defun_inputs_from_args(args, names):
"""Maps Python function positional args to graph-construction inputs."""
return _get_defun_inputs(args, names, structure=args)
def _get_defun_inputs(flat_args, names, structure):
"""Maps python function args to graph-construction inputs.
Args:
flat_args: A flat list of user-specified arguments.
names: A list of strings with user-specified argument names, same length as
`flat_args`. May be `None`, in which case a generic name is used.
structure: The original argument list or dictionary.
Returns:
Placeholders with the same structure as `structure`.
"""
func_graph = ops.get_default_graph()
function_inputs = []
if names is None:
names = [None] * len(flat_args)
for arg_value, name in zip(flat_args, names):
for arg in nest.flatten(arg_value):
if isinstance(arg, (ops.Tensor, tensor_spec.TensorSpec)):
if isinstance(arg, tensor_spec.TensorSpec) and arg.name:
requested_name = arg.name
else:
requested_name = name
placeholder = graph_placeholder(
arg.dtype, arg.shape,
name=requested_name)
if name is not None:
# Record the requested/user-specified name in case it's different than
# the uniquified name, for validation when exporting signatures.
placeholder.op._set_attr( # pylint: disable=protected-access
"_user_specified_name",
attr_value_pb2.AttrValue(s=compat.as_bytes(requested_name)))
function_inputs.append(placeholder)
elif isinstance(arg, resource_variable_ops.ResourceVariable):
# Capture arg variables to create placeholders for them. These will be
# removed as captures after the function is traced (since otherwise we'd
# just add it back with a new placeholder when the variable was
# referenced).
placeholder = func_graph.capture(arg.handle, name=name)
placeholder.op._set_attr( # pylint: disable=protected-access
"_user_specified_name",
attr_value_pb2.AttrValue(s=compat.as_bytes(name)))
function_inputs.append(arg)
else:
function_inputs.append(arg)
return nest.pack_sequence_as(structure, function_inputs)
def _get_defun_inputs_from_kwargs(kwargs):
"""Maps Python function keyword args to graph-construction inputs."""
if kwargs:
names, flat_args = zip(*sorted(kwargs.items()))
else:
names = []
flat_args = []
return _get_defun_inputs(flat_args, names, structure=kwargs)
def dismantle_func_graph(func_graph):
"""Removes reference cycles in `func_graph` FuncGraph.
Helpful for making sure the garbage collector doesn't need to run when
the FuncGraph goes out of scope, e.g. in tests using defun with
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True).
Args:
func_graph: A `FuncGraph` object to destroy. `func_graph` is unusable
after this function.
"""
# TODO(b/115366440): Delete this method when a custom OrderedDict is added.
# Clearing captures using clear() leaves some cycles around.
while func_graph.captures:
func_graph.captures.popitem()
memory.dismantle_ordered_dict(func_graph.captures)
ops.dismantle_graph(func_graph)
| 0.007343 |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
import queue
import sys
import threading
import time
from sqlalchemy.exc import ProgrammingError, OperationalError
from flexget.task import TaskAbort
log = logging.getLogger('task_queue')
class TaskQueue(object):
"""
Task processing thread.
Only executes one task at a time, if more are requested they are queued up and run in turn.
"""
def __init__(self):
self.run_queue = queue.PriorityQueue()
self._shutdown_now = False
self._shutdown_when_finished = False
self.current_task = None
# We don't override `threading.Thread` because debugging this seems unsafe with pydevd.
# Overriding __len__(self) seems to cause a debugger deadlock.
self._thread = threading.Thread(target=self.run, name='task_queue')
self._thread.daemon = True
def start(self):
self._thread.start()
def run(self):
while not self._shutdown_now:
# Grab the first job from the run queue and do it
try:
self.current_task = self.run_queue.get(timeout=0.5)
except queue.Empty:
if self._shutdown_when_finished:
self._shutdown_now = True
continue
try:
self.current_task.execute()
except TaskAbort as e:
log.debug('task %s aborted: %r' % (self.current_task.name, e))
except (ProgrammingError, OperationalError):
log.critical('Database error while running a task. Attempting to recover.')
self.current_task.manager.crash_report()
except Exception:
log.critical('BUG: Unhandled exception during task queue run loop.')
self.current_task.manager.crash_report()
finally:
self.run_queue.task_done()
self.current_task = None
remaining_jobs = self.run_queue.qsize()
if remaining_jobs:
log.warning(
'task queue shut down with %s tasks remaining in the queue to run.'
% remaining_jobs
)
else:
log.debug('task queue shut down')
def is_alive(self):
return self._thread.is_alive()
def put(self, task):
"""Adds a task to be executed to the queue."""
self.run_queue.put(task)
def __len__(self):
return self.run_queue.qsize()
def shutdown(self, finish_queue=True):
"""
Request shutdown.
:param bool finish_queue: Should all tasks be finished before ending thread.
"""
log.debug('task queue shutdown requested')
if finish_queue:
self._shutdown_when_finished = True
if self.run_queue.qsize():
log.verbose(
'There are %s tasks to execute. Shutdown will commence when they have completed.'
% self.run_queue.qsize()
)
else:
self._shutdown_now = True
def wait(self):
"""
Waits for the thread to exit.
Allows abortion of task queue with ctrl-c
"""
if sys.version_info >= (3, 4):
# Due to python bug, Thread.is_alive doesn't seem to work properly under our conditions on python 3.4+
# http://bugs.python.org/issue26793
# TODO: Is it important to have the clean abortion? Do we need to find a better way?
self._thread.join()
return
try:
while self._thread.is_alive():
time.sleep(0.5)
except KeyboardInterrupt:
log.error('Got ctrl-c, shutting down after running task (if any) completes')
self.shutdown(finish_queue=False)
# We still wait to finish cleanly, pressing ctrl-c again will abort
while self._thread.is_alive():
time.sleep(0.5)
| 0.002458 |
# This script embeds the PortMidi Portable MIDI library via ctypes.
# It is based on Grant Yoshida's 2007 version with some updates from
# Christopher Arndt from 2009 with some bugs fixed and restructured a bit.
import os
import sys
from ctypes import (CDLL, CFUNCTYPE, POINTER, Structure, byref, c_char_p,
c_int32, c_uint, c_void_p, cast, create_string_buffer)
import macosx
# path to portmidi in the standalone .app bundle
_PM_MACOSX_APP = '../Frameworks/libportmidi.dylib'
# the basename of the portmidi/porttime libraries on different platforms
_PM_DLL = dict(
win32 = 'libportmidi-0',
)
_PT_DLL = dict(
win32 = 'libporttime-0',
)
if sys.platform.startswith('win'):
# ctypes.util.find_library() does not implement the full Windows DLL search
# order, so we have to provide it ourselves, so that the PortMidi DLL can
# be found. See the docstring of the find_library() function for more
# information.
from ctypes import windll, c_wchar_p, create_unicode_buffer
def get_system_directory():
"""Return the path of the Windos system directory as a unicode string."""
try:
windll.kernel32.GetSystemDirectoryW.argtypes = [c_wchar_p, c_uint]
windll.kernel32.GetSystemDirectoryW.restype = c_uint
except AttributeError:
return None
else:
path = create_unicode_buffer(256)
plen = windll.kernel32.GetSystemDirectoryW(path, 256)
return plen and path.value or None
def get_windows_directory():
"""Return the path of the Windos directory as a unicode string."""
try:
windll.kernel32.GetWindowsDirectoryW.argtypes = [c_wchar_p, c_uint]
windll.kernel32.GetWindowsDirectoryW.restype = c_uint
except AttributeError:
return None
else:
path = create_unicode_buffer(256)
plen = windll.kernel32.GetWindowsDirectoryW(path, 256)
return plen and path.value or None
def find_library(name, prepend_paths=None):
r"""Find and return the path of the given DLL using the DLL search order.
'name' should be the basename of the DLL with or without the '.dll'
extension. If 'prepend_paths' is specified, it should be a list of
directories to be searched before the default ones.
The default search order searches these directories:
- The directory from where the application (i.e. the main Python script)
is loaded
- The Windows system directory (e.g. C:\Windows\System32)
- The Windows 16-bit system directory (e.g. C:\Windows\System)
- The Windows directory (e.g. C:\Windows)
- The current directory
- Any directory named on the PATH environment variable
"""
windir = get_windows_directory()
search_paths = (prepend_paths or []) + [
os.path.dirname(sys.argv[0]),
get_system_directory(),
os.path.join(windir, 'System'),
windir,
os.curdir
] + [p for p in os.environ['PATH'].split(os.pathsep) if p]
for directory in search_paths:
fname = os.path.join(directory, name)
if os.path.exists(fname):
return fname
if fname.lower().endswith(".dll"):
continue
fname = fname + ".dll"
if os.path.exists(fname):
return fname
return None
dll_name = find_library(_PM_DLL['win32'], [os.path.dirname(__file__)])
elif sys.platform.startswith('darwin') and macosx.inside_app_bundle() and os.path.exists(_PM_MACOSX_APP):
dll_name = _PM_MACOSX_APP
else:
from ctypes.util import find_library
dll_name = find_library(_PM_DLL.get(sys.platform, 'portmidi'))
if dll_name is None:
raise ImportError("Couldn't find the PortMidi library.")
libpm = CDLL(dll_name)
# The portmidi library may be linked against porttime but not export its
# symbols. Then we need to load the porttime library as well.
if hasattr(libpm, 'Pt_Time'):
libpt = libpm
else:
libpt = CDLL(find_library(_PT_DLL.get(sys.platform, 'porttime')))
# portmidi.h
PmError = c_int32
# PmError enum
pmNoError = 0
pmHostError = -10000
pmInvalidDeviceId = -9999
pmInsufficientMemory = -9998
pmBufferTooSmall = -9997
pmBufferOverflow = -9996
pmBadPtr = -9995
pmBadData = -9994
pmInternalError = -9993
pmBufferMaxSize = -9992
libpm.Pm_Initialize.restype = PmError
libpm.Pm_Terminate.restype = PmError
PmDeviceID = c_int32
PortMidiStreamPtr = c_void_p
PmStreamPtr = PortMidiStreamPtr
PortMidiStreamPtrPtr = POINTER(PortMidiStreamPtr)
libpm.Pm_HasHostError.restype = c_int32
libpm.Pm_HasHostError.argtypes = [PortMidiStreamPtr]
libpm.Pm_GetErrorText.restype = c_char_p
libpm.Pm_GetErrorText.argtypes = [PmError]
libpm.Pm_GetHostErrorText.argtypes = [c_char_p, c_uint]
pmNoDevice = -1
class PmDeviceInfo(Structure):
_fields_ = [("structVersion", c_int32),
("interf", c_char_p),
("name", c_char_p),
("input", c_int32),
("output", c_int32),
("opened", c_int32)]
PmDeviceInfoPtr = POINTER(PmDeviceInfo)
libpm.Pm_CountDevices.restype = c_int32
libpm.Pm_GetDefaultOutputDeviceID.restype = PmDeviceID
libpm.Pm_GetDefaultInputDeviceID.restype = PmDeviceID
PmTimestamp = c_int32
PmTimeProcPtr = CFUNCTYPE(PmTimestamp, c_void_p)
NullTimeProcPtr = cast(None, PmTimeProcPtr)
# PmBefore is not defined
libpm.Pm_GetDeviceInfo.argtypes = [PmDeviceID]
libpm.Pm_GetDeviceInfo.restype = PmDeviceInfoPtr
libpm.Pm_OpenInput.restype = PmError
libpm.Pm_OpenInput.argtypes = [PortMidiStreamPtrPtr,
PmDeviceID,
c_void_p,
c_int32,
PmTimeProcPtr,
c_void_p]
libpm.Pm_OpenOutput.restype = PmError
libpm.Pm_OpenOutput.argtypes = [PortMidiStreamPtrPtr,
PmDeviceID,
c_void_p,
c_int32,
PmTimeProcPtr,
c_void_p,
c_int32]
libpm.Pm_SetFilter.restype = PmError
libpm.Pm_SetFilter.argtypes = [PortMidiStreamPtr, c_int32]
libpm.Pm_SetChannelMask.restype = PmError
libpm.Pm_SetChannelMask.argtypes = [PortMidiStreamPtr, c_int32]
libpm.Pm_Abort.restype = PmError
libpm.Pm_Abort.argtypes = [PortMidiStreamPtr]
libpm.Pm_Close.restype = PmError
libpm.Pm_Close.argtypes = [PortMidiStreamPtr]
PmMessage = c_int32
class PmEvent(Structure):
_fields_ = [("message", PmMessage),
("timestamp", PmTimestamp)]
PmEventPtr = POINTER(PmEvent)
libpm.Pm_Read.restype = PmError
libpm.Pm_Read.argtypes = [PortMidiStreamPtr, PmEventPtr, c_int32]
libpm.Pm_Poll.restype = PmError
libpm.Pm_Poll.argtypes = [PortMidiStreamPtr]
libpm.Pm_Write.restype = PmError
libpm.Pm_Write.argtypes = [PortMidiStreamPtr, PmEventPtr, c_int32]
libpm.Pm_WriteShort.restype = PmError
libpm.Pm_WriteShort.argtypes = [PortMidiStreamPtr, PmTimestamp, c_int32]
libpm.Pm_WriteSysEx.restype = PmError
libpm.Pm_WriteSysEx.argtypes = [PortMidiStreamPtr, PmTimestamp, c_char_p]
# porttime.h
# PtError enum
PtError = c_int32
ptNoError = 0
ptHostError = -10000
ptAlreadyStarted = -9999
ptAlreadyStopped = -9998
ptInsufficientMemory = -9997
PtTimestamp = c_int32
PtCallback = CFUNCTYPE(PmTimestamp, c_void_p)
libpt.Pt_Start.restype = PtError
libpt.Pt_Start.argtypes = [c_int32, PtCallback, c_void_p]
libpt.Pt_Stop.restype = PtError
libpt.Pt_Started.restype = c_int32
libpt.Pt_Time.restype = PtTimestamp
| 0.003258 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Organization'
db.create_table('morello_organization', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('address_street', self.gf('django.db.models.fields.CharField')(max_length=50)),
('address_city', self.gf('django.db.models.fields.CharField')(max_length=60)),
('address_state', self.gf('django.db.models.fields.CharField')(max_length=30)),
('address_zip', self.gf('django.db.models.fields.CharField')(max_length=50)),
('website', self.gf('django.db.models.fields.URLField')(max_length=200)),
))
db.send_create_signal('morello', ['Organization'])
# Adding model 'Person'
db.create_table('morello_person', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name_first', self.gf('django.db.models.fields.CharField')(max_length=50)),
('name_middle', self.gf('django.db.models.fields.CharField')(max_length=50)),
('name_last', self.gf('django.db.models.fields.CharField')(max_length=50)),
))
db.send_create_signal('morello', ['Person'])
# Adding model 'Client'
db.create_table('morello_client', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('persons', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['morello.Person'], unique=True)),
('race_ethnicity', self.gf('django.db.models.fields.CharField')(max_length=1)),
('languages_spoken', self.gf('django.db.models.fields.CharField')(max_length=1)),
('marital_status', self.gf('django.db.models.fields.CharField')(max_length=1)),
('immigration_status', self.gf('django.db.models.fields.CharField')(max_length=1)),
))
db.send_create_signal('morello', ['Client'])
# Adding model 'Client_NextDoor'
db.create_table('morello_client_nextdoor', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('clients', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['morello.Client'], unique=True)),
('why_cx', self.gf('django.db.models.fields.CharField')(max_length=1)),
('arrested', self.gf('django.db.models.fields.CharField')(max_length=1)),
('convicted', self.gf('django.db.models.fields.CharField')(max_length=1)),
('employer_refused', self.gf('django.db.models.fields.CharField')(max_length=1)),
('employer_declined', self.gf('django.db.models.fields.CharField')(max_length=1)),
))
db.send_create_signal('morello', ['Client_NextDoor'])
# Adding model 'Notes'
db.create_table('morello_notes', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('body', self.gf('django.db.models.fields.TextField')()),
('author', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('sticky', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('morello', ['Notes'])
def backwards(self, orm):
# Deleting model 'Organization'
db.delete_table('morello_organization')
# Deleting model 'Person'
db.delete_table('morello_person')
# Deleting model 'Client'
db.delete_table('morello_client')
# Deleting model 'Client_NextDoor'
db.delete_table('morello_client_nextdoor')
# Deleting model 'Notes'
db.delete_table('morello_notes')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'morello.client': {
'Meta': {'object_name': 'Client'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'immigration_status': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'languages_spoken': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'marital_status': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'persons': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['morello.Person']", 'unique': 'True'}),
'race_ethnicity': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'morello.client_nextdoor': {
'Meta': {'object_name': 'Client_NextDoor'},
'arrested': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'clients': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['morello.Client']", 'unique': 'True'}),
'convicted': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'employer_declined': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'employer_refused': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'why_cx': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'morello.notes': {
'Meta': {'object_name': 'Notes'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'body': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sticky': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'morello.organization': {
'Meta': {'object_name': 'Organization'},
'address_city': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'address_state': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'address_street': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'address_zip': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'morello.person': {
'Meta': {'object_name': 'Person'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name_first': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'name_last': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'name_middle': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['morello'] | 0.008192 |
#!/usr/bin/env python
#
# THE KITTI VISION BENCHMARK SUITE: ROAD BENCHMARK
#
# File: simpleExample_evalTrainResults.py
#
# Copyright (C) 2013
# Honda Research Institute Europe GmbH
# Carl-Legien-Str. 30
# 63073 Offenbach/Main
# Germany
#
# UNPUBLISHED PROPRIETARY MATERIAL.
# ALL RIGHTS RESERVED.
#
# Authors: Tobias Kuehnl <tkuehnl@cor-lab.uni-bielefeld.de>
# Jannik Fritsch <jannik.fritsch@honda-ri.de>
#
import os, sys
import computeBaseline, evaluateRoad
#########################################################################
# test script to evaluate training data in perspective domain
#########################################################################
if __name__ == "__main__":
#datasetDir = '/hri/storage/user/rtds/KITTI_Road_Data'
#outputDir = '/hri/recordings/KITTI/road_dataset/'
# check for correct number of arguments.
if len(sys.argv)<2:
print "Usage: python simpleExample_evalTrainResults.py <datasetDir> <outputDir>"
print "<datasetDir> = base directory of the KITTI Road benchmark dataset (has to contain training and testing), e.g., /home/elvis/kitti_road/"
print "<outputDir> = Here the baseline results will be saved, e.g., /home/elvis/kitti_road/results/"
sys.exit(1)
# parse parameters
datasetDir = sys.argv[1]
assert os.path.isdir(datasetDir), 'Error <datasetDir>=%s does not exist' %datasetDir
if len(sys.argv)>2:
outputDir = sys.argv[2]
else:
# default
outputDir = os.path.join(datasetDir, 'results')
# Run computeBaseline script to generate example classification results on training set
trainDir = os.path.join(datasetDir, 'training')
outputDir_perspective = os.path.join(outputDir, 'baseline_perspective_train')
computeBaseline.main(trainDir, trainDir, outputDir_perspective)
# Toy example running evaluation on perspective train data
# Final evaluation on server is done in BEV space and uses a 'valid_map'
# indicating the BEV areas that are invalid
# (no correspondence in perspective space)
evaluateRoad.main(outputDir_perspective, trainDir)
| 0.010129 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutExceptions(Koan):
class MySpecialError(RuntimeError):
pass
def test_exceptions_inherit_from_exception(self):
mro = self.MySpecialError.__mro__
self.assertEqual('RuntimeError', mro[1].__name__)
self.assertEqual('StandardError', mro[2].__name__)
self.assertEqual('Exception', mro[3].__name__)
self.assertEqual('BaseException', mro[4].__name__)
def test_try_clause(self):
result = None
try:
self.fail("Oops")
except StandardError as ex:
result = 'exception handled'
self.assertEqual('exception handled', result)
self.assertEqual(True, isinstance(ex, StandardError))
self.assertEqual(False, isinstance(ex, RuntimeError))
self.assertTrue(issubclass(RuntimeError, StandardError), \
"RuntimeError is a subclass of StandardError")
self.assertEqual('Oops', ex[0])
def test_raising_a_specific_error(self):
result = None
try:
raise self.MySpecialError, "My Message"
except self.MySpecialError as ex:
result = 'exception handled'
self.assertEqual('exception handled', result)
self.assertEqual("My Message", ex[0])
def test_else_clause(self):
result = None
try:
pass
except RuntimeError:
result = 'it broke'
pass
else:
result = 'no damage done'
self.assertEqual('no damage done', result)
def test_finally_clause(self):
result = None
try:
self.fail("Oops")
except:
# no code here
pass
finally:
result = 'always run'
self.assertEqual('always run', result)
| 0.007606 |
# Copyright 2004-2005 Joe Wreschnig, Michael Urman
# 2015-2020 Nick Boultbee,
# 2016 Ryan Dellenbaugh,
# 2019 Peter Strulo
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from __future__ import annotations
from enum import Enum, auto
from typing import Optional, Type, Iterable, TypeVar
from quodlibet import print_d, config
from quodlibet.util import re_escape, cached_property
from . import _match as match
from ._match import Error, Node, False_
from ._parser import QueryParser
T = TypeVar("T")
class QueryType(Enum):
TEXT = auto()
VALID = auto()
INVALID = auto()
def __repr__(self):
# Compact representation
return self._name_
def __str__(self):
return self._name_
class Query(Node):
STAR: Iterable[str] = ["artist", "album", "title"]
"""Default tags to search in, use/extend and pass to Query()"""
Error: Type[Exception] = Error
"""Base error type"""
type: Optional[QueryType] = None
"""The QueryType value: VALID or TEXT"""
string: Optional[str] = None
"""The original string which was used to create this query"""
def __init__(self, string: str, star: Optional[Iterable[str]] = None):
"""Parses the query string and returns a match object.
:param string: The text to parse
:param star: Tags to look in, if none are specified in the query.
Defaults to those specified in `STAR`.
This parses the query language as well as some tagless shortcuts:
"foo bar" -> &(star1,star2=foo,star1,star2=bar)
"!foo" -> !star1,star2=foo
"&(foo, bar)" -> &(star1,star2=foo, star1,star2=bar)
"&(foo, !bar)" -> &(star1,star2=foo, !star1,star2=bar)
"|(foo, bar)" -> |(star1,star2=foo, star1,star2=bar)
"!&(foo, bar)" -> !&(star1,star2=foo, star1,star2=bar)
"!(foo, bar)" -> !star1,star2=(foo, bar)
etc...
"""
print_d(f"Creating query {string!r}")
if star is None:
star = self.STAR
assert isinstance(string, str)
self.star = list(star)
self.string = string
self.type = QueryType.VALID
try:
self._match = QueryParser(string, star=star).StartQuery()
if not self._match.valid:
self.type = QueryType.INVALID
return
except self.Error:
pass
if not set("#=").intersection(string):
for c in config.get("browsers", "ignored_characters"):
string = string.replace(c, "")
parts = ["/%s/d" % re_escape(s) for s in string.split()]
string = "&(" + ",".join(parts) + ")"
self.string = string
try:
self.type = QueryType.TEXT
self._match = QueryParser(string, star=star).StartQuery()
return
except self.Error:
pass
print_d("Query '%s' is invalid" % string)
self.type = QueryType.INVALID
self._match = False_()
@classmethod
def StrictQueryMatcher(cls, string):
"""Returns a Matcher for a strict, valid (non-freetext) Query,
or `None` if this fails.
"""
try:
return QueryParser(string).StartQuery()
except Error:
return None
def __repr__(self) -> str:
return "<Query string=%r type=%r star=%r>" % (
self.string, self.type, self.star)
@cached_property
def search(self):
return self._match.search
@cached_property
def filter(self):
return self._match.filter
@property
def valid(self) -> bool:
"""Whether a query is a valid full (not free-text) query"""
return self.type == QueryType.VALID
@property
def matches_all(self) -> bool:
"""Whether the resulting query will not filter anything"""
return isinstance(self._match, match.True_)
@property
def is_parsable(self) -> bool:
"""Whether the text can be parsed at all"""
return self.type is not QueryType.INVALID
def _unpack(self) -> Node:
# so that other classes can see the wrapped one and optimize
# the result using the type information
return self._match
def __or__(self, other: Node) -> Node:
return self._match.__or__(other)
def __and__(self, other: Node) -> Node:
return self._match.__and__(other)
def __neg__(self) -> Node:
return self._match.__neg__()
def __eq__(self, other) -> bool:
return (self.string == other.string and self.star == other.star and
self.type == other.type)
@classmethod
def validator(cls, string: str) -> Optional[bool]:
"""Returns True/False for a query, None for a text only query"""
query = cls(string)
type_ = query.type
if type_ == QueryType.VALID:
# in case of an empty but valid query we say it's "text"
if query.matches_all:
return None
return True
elif type_ == QueryType.INVALID:
return False
return None
| 0 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import curses
import pdb
import sys
import time
screen = None
def init(stdscr):
global screen
screen = stdscr
def finalize(stdscr=None):
if not stdscr and not screen:
raise Exception('either call init() first or provide a window object')
stdscr = screen if screen and not stdscr else stdscr
curses.nocbreak()
stdscr.keypad(0)
curses.echo()
curses.endwin()
def debug(stdscr=None):
if not stdscr and not screen:
raise Exception('either call init() first or provide a window object')
stdscr = screen if screen and not stdscr else stdscr
finalize(stdscr)
debugger = pdb.Pdb()
debugger.reset()
debugger.do_where(None)
users_frame = sys._getframe().f_back # One frame up, outside this function
debugger.interaction(users_frame, None)
def log(msg):
with open('../giterm.log', 'a') as f:
full_msg = '{:<18}'.format(str(time.time())) + ': ' + str(msg)
full_msg = full_msg + '\n' if full_msg[-1] != '\n' else full_msg
f.write(full_msg)
# Use with:
# import cursutils
# cursutils.init(stdscr) # where stdscr is a `curses` Window object
# cursutils.debug()
| 0 |
#!/usr/bin/python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys, tornado.ioloop
from tornado_app import TornadoApp
from proton import Message
from proton.handlers import CHandshaker
class Send:
def __init__(self, host, message):
self.host = host
self.message = message
# Use the handlers property to add some default handshaking
# behaviour.
self.handlers = [CHandshaker()]
def on_connection_init(self, event):
conn = event.connection
conn.hostname = self.host
# Every session or link could have their own handler(s) if we
# wanted simply by setting the "handler" slot on the
# given session or link.
ssn = conn.session()
# If a link doesn't have an event handler, the events go to
# its parent session. If the session doesn't have a handler
# the events go to its parent connection. If the connection
# doesn't have a handler, the events go to the reactor.
snd = ssn.sender("sender")
conn.open()
ssn.open()
snd.open()
def on_link_flow(self, event):
snd = event.sender
if snd.credit > 0:
dlv = snd.send(self.message)
dlv.settle()
snd.close()
snd.session.close()
snd.connection.close()
class Program:
def __init__(self, hostname, content):
self.hostname = hostname
self.content = content
def on_reactor_init(self, event):
# You can use the connection method to create AMQP connections.
# This connection's handler is the Send object. All the events
# for this connection will go to the Send object instead of
# going to the reactor. If you were to omit the Send object,
# all the events would go to the reactor.
event.reactor.connection(Send(self.hostname, Message(self.content)))
args = sys.argv[1:]
hostname = args.pop() if args else "localhost"
content = args.pop() if args else "Hello World!"
TornadoApp(Program(hostname, content))
tornado.ioloop.IOLoop.instance().start()
| 0.001401 |
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import json
import mmap
import os
import re
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("filenames", help="list of files to check, all files if unspecified", nargs='*')
parser.add_argument("-e", "--skip-exceptions", help="ignore hack/verify-flags/exceptions.txt and print all output", action="store_true")
args = parser.parse_args()
# Cargo culted from http://stackoverflow.com/questions/898669/how-can-i-detect-if-a-file-is-binary-non-text-in-python
def is_binary(pathname):
"""Return true if the given filename is binary.
@raise EnvironmentError: if the file does not exist or cannot be accessed.
@attention: found @ http://bytes.com/topic/python/answers/21222-determine-file-type-binary-text on 6/08/2010
@author: Trent Mick <TrentM@ActiveState.com>
@author: Jorge Orpinel <jorge@orpinel.com>"""
try:
with open(pathname, 'r') as f:
CHUNKSIZE = 1024
while 1:
chunk = f.read(CHUNKSIZE)
if '\0' in chunk: # found null byte
return True
if len(chunk) < CHUNKSIZE:
break # done
except:
return True
return False
def get_all_files(rootdir):
all_files = []
for root, dirs, files in os.walk(rootdir):
# don't visit certain dirs
if 'vendor' in dirs:
dirs.remove('vendor')
if '_output' in dirs:
dirs.remove('_output')
if '_gopath' in dirs:
dirs.remove('_gopath')
if 'third_party' in dirs:
dirs.remove('third_party')
if '.git' in dirs:
dirs.remove('.git')
if 'exceptions.txt' in files:
files.remove('exceptions.txt')
if 'known-flags.txt' in files:
files.remove('known-flags.txt')
for name in files:
pathname = os.path.join(root, name)
if is_binary(pathname):
continue
all_files.append(pathname)
return all_files
def normalize_files(rootdir, files):
newfiles = []
a = ['Godeps', '_gopath', 'third_party', '.git', 'exceptions.txt', 'known-flags.txt']
for f in files:
if any(x in f for x in a):
continue
if f.endswith(".svg"):
continue
if f.endswith(".gliffy"):
continue
if f.endswith(".md"):
continue
if f.endswith(".yaml"):
continue
newfiles.append(f)
for i, f in enumerate(newfiles):
if not os.path.isabs(f):
newfiles[i] = os.path.join(rootdir, f)
return newfiles
def line_has_bad_flag(line, flagre):
results = flagre.findall(line)
for result in results:
if not "_" in result:
return False
# this should exclude many cases where jinja2 templates use kube flags
# as variables, except it uses _ for the variable name
if "{% set" + result + "= \"" in line:
return False
if "pillar[" + result + "]" in line:
return False
if "grains" + result in line:
return False
# something common in juju variables...
if "template_data[" + result + "]" in line:
return False
return True
return False
# The list of files might not be the whole repo. If someone only changed a
# couple of files we don't want to run all of the golang files looking for
# flags. Instead load the list of flags from hack/verify-flags/known-flags.txt
# If running the golang files finds a new flag not in that file, return an
# error and tell the user to add the flag to the flag list.
def get_flags(rootdir, files):
# preload the 'known' flags
pathname = os.path.join(rootdir, "hack/verify-flags/known-flags.txt")
f = open(pathname, 'r')
flags = set(f.read().splitlines())
f.close()
# preload the 'known' flags which don't follow the - standard
pathname = os.path.join(rootdir, "hack/verify-flags/excluded-flags.txt")
f = open(pathname, 'r')
excluded_flags = set(f.read().splitlines())
f.close()
regexs = [ re.compile('Var[P]?\([^,]*, "([^"]*)"'),
re.compile('.String[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Int[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Bool[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Duration[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.StringSlice[P]?\("([^"]*)",[^,]+,[^)]+\)') ]
new_flags = set()
new_excluded_flags = set()
# walk all the files looking for any flags being declared
for pathname in files:
if not pathname.endswith(".go"):
continue
f = open(pathname, 'r')
data = f.read()
f.close()
matches = []
for regex in regexs:
matches = matches + regex.findall(data)
for flag in matches:
if any(x in flag for x in excluded_flags):
continue
if "_" in flag:
new_excluded_flags.add(flag)
if not "-" in flag:
continue
if flag not in flags:
new_flags.add(flag)
if len(new_excluded_flags) != 0:
print("Found a flag declared with an _ but which is not explicitly listed as a valid flag name in hack/verify-flags/excluded-flags.txt")
print("Are you certain this flag should not have been declared with an - instead?")
l = list(new_excluded_flags)
l.sort()
print("%s" % "\n".join(l))
sys.exit(1)
if len(new_flags) != 0:
print("Found flags in golang files not in the list of known flags. Please add these to hack/verify-flags/known-flags.txt")
l = list(new_flags)
l.sort()
print("%s" % "\n".join(l))
sys.exit(1)
return list(flags)
def flags_to_re(flags):
"""turn the list of all flags we found into a regex find both - and _ versions"""
dashRE = re.compile('[-_]')
flagREs = []
for flag in flags:
# turn all flag names into regexs which will find both types
newre = dashRE.sub('[-_]', flag)
# only match if there is not a leading or trailing alphanumeric character
flagREs.append("[^\w${]" + newre + "[^\w]")
# turn that list of regex strings into a single large RE
flagRE = "|".join(flagREs)
flagRE = re.compile(flagRE)
return flagRE
def load_exceptions(rootdir):
exceptions = set()
if args.skip_exceptions:
return exceptions
exception_filename = os.path.join(rootdir, "hack/verify-flags/exceptions.txt")
exception_file = open(exception_filename, 'r')
for exception in exception_file.read().splitlines():
out = exception.split(":", 1)
if len(out) != 2:
print("Invalid line in exceptions file: %s" % exception)
continue
filename = out[0]
line = out[1]
exceptions.add((filename, line))
return exceptions
def main():
rootdir = os.path.dirname(__file__) + "/../"
rootdir = os.path.abspath(rootdir)
exceptions = load_exceptions(rootdir)
if len(args.filenames) > 0:
files = args.filenames
else:
files = get_all_files(rootdir)
files = normalize_files(rootdir, files)
flags = get_flags(rootdir, files)
flagRE = flags_to_re(flags)
bad_lines = []
# walk all the file looking for any flag that was declared and now has an _
for pathname in files:
relname = os.path.relpath(pathname, rootdir)
f = open(pathname, 'r')
for line in f.read().splitlines():
if line_has_bad_flag(line, flagRE):
if (relname, line) not in exceptions:
bad_lines.append((relname, line))
f.close()
if len(bad_lines) != 0:
if not args.skip_exceptions:
print("Found illegal 'flag' usage. If these are false positives you should run `hack/verify-flags-underscore.py -e > hack/verify-flags/exceptions.txt` to update the list.")
bad_lines.sort()
for (relname, line) in bad_lines:
print("%s:%s" % (relname, line))
return 1
if __name__ == "__main__":
sys.exit(main())
| 0.005174 |
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import re
import logging
import datetime
from email.utils import parseaddr, formataddr
try:
from backports.configparser import ConfigParser
except ImportError:
from configparser import ConfigParser
from isso.compat import text_type as str
logger = logging.getLogger("isso")
# Python 2.6 compatibility
def total_seconds(td):
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6
def timedelta(string):
"""
Parse :param string: into :class:`datetime.timedelta`, you can use any
(logical) combination of Nw, Nd, Nh and Nm, e.g. `1h30m` for 1 hour, 30
minutes or `3w` for 3 weeks.
Raises a ValueError if the input is invalid/unparseable.
>>> print(timedelta("3w"))
21 days, 0:00:00
>>> print(timedelta("3w 12h 57m"))
21 days, 12:57:00
>>> print(timedelta("1h30m37s"))
1:30:37
>>> print(timedelta("1asdf3w"))
Traceback (most recent call last):
...
ValueError: invalid human-readable timedelta
"""
keys = ["weeks", "days", "hours", "minutes", "seconds"]
regex = "".join(["((?P<%s>\d+)%s ?)?" % (k, k[0]) for k in keys])
kwargs = {}
for k, v in re.match(regex, string).groupdict(default="0").items():
kwargs[k] = int(v)
rv = datetime.timedelta(**kwargs)
if rv == datetime.timedelta():
raise ValueError("invalid human-readable timedelta")
return datetime.timedelta(**kwargs)
class Section(object):
"""A wrapper around :class:`IssoParser` that returns a partial configuration
section object.
>>> conf = new({"foo": {"bar": "spam"}})
>>> section = conf.section("foo")
>>> conf.get("foo", "bar") == section.get("bar")
True
"""
def __init__(self, conf, section):
self.conf = conf
self.section = section
def get(self, key):
return self.conf.get(self.section, key)
def getint(self, key):
return self.conf.getint(self.section, key)
def getlist(self, key):
return self.conf.getlist(self.section, key)
def getiter(self, key):
return self.conf.getiter(self.section, key)
def getboolean(self, key):
return self.conf.getboolean(self.section, key)
class IssoParser(ConfigParser):
"""Parse INI-style configuration with some modifications for Isso.
* parse human-readable timedelta such as "15m" as "15 minutes"
* handle indented lines as "lists"
"""
def getint(self, section, key):
try:
delta = timedelta(self.get(section, key))
except ValueError:
return super(IssoParser, self).getint(section, key)
else:
try:
return int(delta.total_seconds())
except AttributeError:
return int(total_seconds(delta))
def getlist(self, section, key):
return list(map(str.strip, self.get(section, key).split(',')))
def getiter(self, section, key):
for item in map(str.strip, self.get(section, key).split('\n')):
if item:
yield item
def section(self, section):
return Section(self, section)
def new(options=None):
cp = IssoParser(allow_no_value=True)
if options:
cp.read_dict(options)
return cp
def load(default, user=None):
# return set of (section, option)
setify = lambda cp: set((section, option) for section in cp.sections()
for option in cp.options(section))
parser = new()
parser.read(default)
a = setify(parser)
if user:
parser.read(user)
for item in setify(parser).difference(a):
logger.warn("no such option: [%s] %s", *item)
if item in (("server", "host"), ("server", "port")):
logger.warn("use `listen = http://$host:$port` instead")
if item == ("smtp", "ssl"):
logger.warn("use `security = none | starttls | ssl` instead")
if item == ("general", "session-key"):
logger.info("Your `session-key` has been stored in the "
"database itself, this option is now unused")
if not parseaddr(parser.get("smtp", "from"))[0]:
parser.set("smtp", "from",
formataddr(("Ich schrei sonst!", parser.get("smtp", "from"))))
return parser
| 0.001146 |
"""
==========================================================
Comparison of kernel ridge and Gaussian process regression
==========================================================
Both kernel ridge regression (KRR) and Gaussian process regression (GPR) learn
a target function by employing internally the "kernel trick". KRR learns a
linear function in the space induced by the respective kernel which corresponds
to a non-linear function in the original space. The linear function in the
kernel space is chosen based on the mean-squared error loss with
ridge regularization. GPR uses the kernel to define the covariance of
a prior distribution over the target functions and uses the observed training
data to define a likelihood function. Based on Bayes theorem, a (Gaussian)
posterior distribution over target functions is defined, whose mean is used
for prediction.
A major difference is that GPR can choose the kernel's hyperparameters based
on gradient-ascent on the marginal likelihood function while KRR needs to
perform a grid search on a cross-validated loss function (mean-squared error
loss). A further difference is that GPR learns a generative, probabilistic
model of the target function and can thus provide meaningful confidence
intervals and posterior samples along with the predictions while KRR only
provides predictions.
This example illustrates both methods on an artificial dataset, which
consists of a sinusoidal target function and strong noise. The figure compares
the learned model of KRR and GPR based on a ExpSineSquared kernel, which is
suited for learning periodic functions. The kernel's hyperparameters control
the smoothness (l) and periodicity of the kernel (p). Moreover, the noise level
of the data is learned explicitly by GPR by an additional WhiteKernel component
in the kernel and by the regularization parameter alpha of KRR.
The figure shows that both methods learn reasonable models of the target
function. GPR correctly identifies the periodicity of the function to be
roughly 2*pi (6.28), while KRR chooses the doubled periodicity 4*pi. Besides
that, GPR provides reasonable confidence bounds on the prediction which are not
available for KRR. A major difference between the two methods is the time
required for fitting and predicting: while fitting KRR is fast in principle,
the grid-search for hyperparameter optimization scales exponentially with the
number of hyperparameters ("curse of dimensionality"). The gradient-based
optimization of the parameters in GPR does not suffer from this exponential
scaling and is thus considerable faster on this example with 3-dimensional
hyperparameter space. The time for predicting is similar; however, generating
the variance of the predictive distribution of GPR takes considerable longer
than just predicting the mean.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.kernel_ridge import KernelRidge
from sklearn.grid_search import GridSearchCV
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import WhiteKernel, ExpSineSquared
rng = np.random.RandomState(0)
# Generate sample data
X = 15 * rng.rand(100, 1)
y = np.sin(X).ravel()
y += 3 * (0.5 - rng.rand(X.shape[0])) # add noise
# Fit KernelRidge with parameter selection based on 5-fold cross validation
param_grid = {"alpha": [1e0, 1e-1, 1e-2, 1e-3],
"kernel": [ExpSineSquared(l, p)
for l in np.logspace(-2, 2, 10)
for p in np.logspace(0, 2, 10)]}
kr = GridSearchCV(KernelRidge(), cv=5, param_grid=param_grid)
stime = time.time()
kr.fit(X, y)
print("Time for KRR fitting: %.3f" % (time.time() - stime))
gp_kernel = ExpSineSquared(1.0, 5.0, periodicity_bounds=(1e-2, 1e1)) \
+ WhiteKernel(1e-1)
gpr = GaussianProcessRegressor(kernel=gp_kernel)
stime = time.time()
gpr.fit(X, y)
print("Time for GPR fitting: %.3f" % (time.time() - stime))
# Predict using kernel ridge
X_plot = np.linspace(0, 20, 10000)[:, None]
stime = time.time()
y_kr = kr.predict(X_plot)
print("Time for KRR prediction: %.3f" % (time.time() - stime))
# Predict using kernel ridge
stime = time.time()
y_gpr = gpr.predict(X_plot, return_std=False)
print("Time for GPR prediction: %.3f" % (time.time() - stime))
stime = time.time()
y_gpr, y_std = gpr.predict(X_plot, return_std=True)
print("Time for GPR prediction with standard-deviation: %.3f"
% (time.time() - stime))
# Plot results
plt.figure(figsize=(10, 5))
lw = 2
plt.scatter(X, y, c='k', label='data')
plt.plot(X_plot, np.sin(X_plot), color='navy', lw=lw, label='True')
plt.plot(X_plot, y_kr, color='turquoise', lw=lw,
label='KRR (%s)' % kr.best_params_)
plt.plot(X_plot, y_gpr, color='darkorange', lw=lw,
label='GPR (%s)' % gpr.kernel_)
plt.fill_between(X_plot[:, 0], y_gpr - y_std, y_gpr + y_std, color='darkorange',
alpha=0.2)
plt.xlabel('data')
plt.ylabel('target')
plt.xlim(0, 20)
plt.ylim(-4, 4)
plt.title('GPR versus Kernel Ridge')
plt.legend(loc="best", scatterpoints=1, prop={'size': 8})
plt.show()
| 0.001735 |
#!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example runs a report similar to the "Orders report" on the DFP
website with additional attributes and can filter to include just one order. To
download the report run download_report.py."""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
import time
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
report_service = client.GetService('ReportService', version='v201208')
order_id = 'INSERT_ORDER_ID_HERE'
# Create statement object to filter for an order.
values = [{
'key': 'id',
'value': {
'xsi_type': 'NumberValue',
'value': order_id
}
}]
filter_statement = {'query': 'WHERE ORDER_ID = :id',
'values': values}
# Create report job.
report_job = {
'reportQuery': {
'dimensions': ['ORDER'],
'dimensionAttributes': ['ORDER_TRAFFICKER', 'ORDER_START_DATE_TIME',
'ORDER_END_DATE_TIME'],
'statement': filter_statement,
'columns': ['AD_SERVER_IMPRESSIONS', 'AD_SERVER_CLICKS',
'AD_SERVER_CTR', 'AD_SERVER_CPM_AND_CPC_REVENUE',
'AD_SERVER_AVERAGE_ECPM'],
'dateRangeType': 'LAST_MONTH'
}
}
# Run report.
report_job = report_service.RunReportJob(report_job)[0]
# Wait for report to complete.
status = report_job['reportJobStatus']
while status != 'COMPLETED' and status != 'FAILED':
print 'Report job with \'%s\' id is still running.' % report_job['id']
time.sleep(30)
status = report_service.GetReportJob(report_job['id'])[0]['reportJobStatus']
if status == 'FAILED':
print ('Report job with id \'%s\' failed to complete successfully.'
% report_job['id'])
else:
print 'Report job with id \'%s\' completed successfully.' % report_job['id']
| 0.0033 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools import config
import nodes
class document_davdir(osv.osv):
_inherit = 'document.directory'
_columns = {
# Placed here just for a reference
'dav_prop_ids': fields.one2many('document.webdav.dir.property', 'dir_id', 'DAV properties'),
}
def get_node_class(self, cr, uid, ids, dbro=None, dynamic=False, context=None):
# Note: in this function, nodes come from document_webdav/nodes.py !
if dbro is None:
dbro = self.browse(cr, uid, ids, context=context)
if dynamic:
return nodes.node_res_obj
elif dbro.type == 'directory':
return nodes.node_dir
elif dbro.type == 'ressource':
return nodes.node_res_dir
else:
raise ValueError("Directory node for %s type.", dbro.type)
def _prepare_context(self, cr, uid, nctx, context=None):
nctx.node_file_class = nodes.node_file
# We can fill some more fields, but avoid any expensive function
# that might be not worth preparing.
nctx.extra_ctx['webdav_path'] = '/'+config.get_misc('webdav','vdir','webdav')
usr_obj = self.pool.get('res.users')
res = usr_obj.read(cr, uid, uid, ['login','lang'])
if res:
nctx.extra_ctx['username'] = res['login']
nctx.extra_ctx['lang'] = res['lang']
# TODO group
return
def _locate_child(self, cr, uid, root_id, uri, nparent, ncontext):
""" try to locate the node in uri,
Return a tuple (node_dir, remaining_path)
"""
return (nodes.node_database(context=ncontext), uri)
class dav_dir_property(osv.osv):
""" Arbitrary WebDAV properties, attached to document.directories.
Some DAV properties have to be settable at directories, depending
on the database directory structure.
Example would be the principal-URL.
There _can_ be properties without a directory, which means that they
globally apply to all the directories (aka. collections) of the
present database.
"""
_name = 'document.webdav.dir.property'
_columns = {
'create_date': fields.datetime('Date Created', readonly=True),
'create_uid': fields.many2one('res.users', 'Creator', readonly=True),
'write_date': fields.datetime('Date Modified', readonly=True),
'write_uid': fields.many2one('res.users', 'Last Modification User', readonly=True),
'dir_id': fields.many2one('document.directory', 'Directory', required=False, select=1),
'namespace': fields.char('Namespace', size=127, required=True),
'name': fields.char('Name', size=64, required=True),
'value': fields.text('Value'),
'do_subst': fields.boolean('Substitute', required=True),
}
_defaults = {
'do_subst': False,
}
class dav_file_property(osv.osv):
""" Arbitrary WebDAV properties, attached to ir.attachments.
A special case is the locks that can be applied on file nodes.
There _can_ be properties without a file (RFC?), which means that they
globally apply to all the attachments of the present database.
TODO access permissions, per property.
"""
_name = 'document.webdav.file.property'
_columns = {
'create_date': fields.datetime('Date Created', readonly=True),
'create_uid': fields.many2one('res.users', 'Creator', readonly=True),
'write_date': fields.datetime('Date Modified', readonly=True),
'write_uid': fields.many2one('res.users', 'Last Modification User', readonly=True),
'file_id': fields.many2one('ir.attachment', 'Document', required=False, select=1),
'namespace': fields.char('Namespace', size=127, required=True),
'name': fields.char('Name', size=64, required=True),
'value': fields.text('Value'),
'do_subst': fields.boolean('Substitute', required=True),
}
_defaults = {
'do_subst': False,
}
#eof
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 0.002765 |
# -*- coding: utf-8 -*-
import numpy as np
class Tuner():
def __init__(self, data):
self.data = data
@property
def size(self):
return len(self.data)
def penalty(self, order):
'''Return sum of distances between units
'''
x1 = np.take(self.data, order)
x2 = np.roll(x1, 1)
d = np.sum(abs(x1 - x2)[1:]) # (x1 - x2)[0] is the distance between the first and the last points
return d
def roll_to_max_distance(self, order):
# Roll the order that,
# so the first and the last points are the most distance
x1 = np.take(self.data, order)
x2 = np.roll(x1, 1)
d = abs(x1 - x2)
idx = np.argmax(d)
return np.roll(order, -idx)
def permute(self, index, order):
tests = np.empty((self.size, self.size), dtype=np.int)
num = order[index]
base = np.delete(order, index)
for i in range(self.size):
tests[i, :] = np.insert(base, i, num)
return tests
def local_opt(self, init_order):
'''Reorder some points to find local optimum
'''
best_penalty = self.penalty(init_order)
best_order = init_order
final = False
while not final:
final = True
for idx in range(self.size):
for order in self.permute(idx, best_order):
if self.penalty(order) < best_penalty:
best_order = order
best_penalty = self.penalty(order)
final = False
break
return (best_penalty, best_order)
def reorder(self, init_order):
# List of candidates (good start points) for optimize.
# Candidate 1: eliminate the biggest distance from the point list
cand1 = self.roll_to_max_distance(init_order)
if np.all(cand1 == init_order):
candidates = [init_order]
else:
candidates = [init_order, cand1]
# TODO: This line can ne done in 2 threads
penalties = [self.local_opt(order) for order in candidates]
if len(penalties) == 1:
return penalties[0][1]
if penalties[0][0] < penalties[1][0]:
return penalties[0][1]
else:
return penalties[1][1]
| 0.000852 |
import cdms2, cdtime, string, numpy, os
def binary2ascii(var, fpath, opath=None, dlat=None, dlon=None, freq='daily',
missing_value='default', speedup='True'):
"""
binary2ascii : Convert the binary file such as nc, ctl, pp into ascii csv
files. It should create individual files for each years.
Csv file contains the month, day, lat & lon information
along with its corresponding data.
It has optimised code to extract data and write into file
by using numpy.tofile() function. Its just extract the
particular/each lat grid, extract all the longitude values
in single dimension array and write into file object at a
time. So it is more optimised.
Inputs :
var - variable name
fpath - binary file input absolute path
opath - output directory path. Inside this folder, it should create
csv files with variable name along with year. If user didnt
pass any value for this, then it should create variable name
as folder name for the output in the current working
directory path.
dlat - need data lat shape in ascii. eg (0, 40)
dlon - need data lon shape in ascii. eg (60, 100)
freq - it takes either 'daily' or 'monthly'.
It is just to fastup the time dimension loop by skipping 365
days in daily and 12 months in monthly to access the another/
next year dataset.
missing_value - if missing_value passed by user, then that value
should be set while writing into csv file. By default it takes
'default' value, i.e. it will take fill_value from the binary
file information itself.
speedup - This binary2ascii.py works fine only for all 12 months or
365 days data. If some months are missing in b/w means,
it will fail to work. So in that case, you switch off this
speedup option.
todo - to get the available years, we need to use timeutils.py module.
in that case, the above speedup option no need.
Written By : Arulalan.T
Date : 22.08.2012
"""
inf = cdms2.open(fpath)
ftime = inf[var].getTime().asComponentTime()
latitude = inf[var].getLatitude()
longitude = inf[var].getLongitude()
lon = numpy.array(longitude)
preyear = None
premon = None
outf = None
if speedup:
if freq == 'daily':
ftime = ftime[::365]
elif freq == 'monthly':
ftime = ftime[::12]
else:
pass
if opath is None:
if not os.path.isdir(var):
os.mkdir(var)
print "Created Directory called ,", var
# end of if not os.path.isdir(var):
print "All the output files will be written inside the directory, ", var
opath = var
# end of if opath is None:
for ytime in ftime:
# loop through available years in the time axis
year = ytime.year
if preyear == year:
continue
else:
if outf and preyear:
print "The file writing finished for the year ", preyear
outf.close()
# end of if outf and preyear:
fname = var + '_' + str(year) + '.csv'
outf = open(opath + '/' + fname, 'w')
preyear = year
print "The file has created ", fname
print "Writing ..."
# year period
startPeriod = cdtime.comptime(year, 1, 1, ytime.hour)
endPeriod = cdtime.comptime(year, 12, 31, ytime.hour)
# get the data of one/each year & load into memory
if dlat and dlon:
# extract specified lat, lon for one/each year
data = inf(var, time=(startPeriod, endPeriod), latitude=dlat,
longitude=dlon)
# get the lat, lon axis w.r.t user need
latitude = data.getLatitude()
longitude = data.getLongitude()
lon = numpy.array(longitude)
else:
# extract all lat, lon for one/each year
data = inf(var, time=(startPeriod, endPeriod))
dtime = data.getTime()
if missing_value != 'default':
data.missing_value = missing_value
# end of if missing_value != 'default':
# make it as filled value and reset its axis informations
data = data.filled()
data = cdms2.createVariable(data)
# set the time, lat, lon axis w.r.t extracted shape of data
data.setAxisList([dtime, latitude, longitude])
# end of if preyear != year:
for ctime in dtime.asComponentTime():
# loop thorugh daily time for one/each year. i.e. 365/366
year = str(year)
mon = str(ctime.month)
day = str(ctime.day)
if premon != mon:
lonstr = string.joinfields(['Year', 'Mon', 'Day', 'Lat/Lon'], ',')
outf.write('\n' + lonstr + ',')
lon.tofile(outf, sep=',')
outf.write('\n')
premon = mon
print "Writing Month, ", mon
# end of if premon != mon:
latbegingstr = string.joinfields([year, mon, day], ',')
for lat in latitude:
latstr = latbegingstr + ',' + str(lat)
outf.write(latstr + ',')
# get the particular lat and all the longitude grid data.
val = numpy.array(data(time=ctime, latitude=lat))
# write the numpy array into fileobject with separation of
# comma. It is optimised one.
val.tofile(outf, sep=',')
outf.write('\n')
# end of for lat in latitude:
# end of for ctime in dtime.asComponentTime():
del data
# end of for time in ftime:
inf.close()
# end of def binary2ascii(var, fpath, opath):
if __name__ == '__main__':
binary2ascii('precip', 'srb1.xml')
| 0.001737 |
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.workspace import Workspace
from webkitpy.common.system.executive_mock import MockExecutive
class WorkspaceTest(unittest.TestCase):
def test_find_unused_filename(self):
filesystem = MockFileSystem({
"dir/foo.jpg": "",
"dir/foo-1.jpg": "",
"dir/foo-2.jpg": "",
})
workspace = Workspace(filesystem, None)
self.assertEqual(workspace.find_unused_filename("bar", "bar", "bar"), "bar/bar.bar")
self.assertEqual(workspace.find_unused_filename("dir", "foo", "jpg", search_limit=1), None)
self.assertEqual(workspace.find_unused_filename("dir", "foo", "jpg", search_limit=2), None)
self.assertEqual(workspace.find_unused_filename("dir", "foo", "jpg"), "dir/foo-3.jpg")
def test_create_zip(self):
workspace = Workspace(None, MockExecutive(should_log=True))
expected_logs = "MOCK run_command: ['zip', '-9', '-r', '/zip/path', '.'], cwd=/source/path\n"
class MockZipFile(object):
def __init__(self, path):
self.filename = path
archive = OutputCapture().assert_outputs(self, workspace.create_zip, ["/zip/path", "/source/path", MockZipFile], expected_logs=expected_logs)
self.assertEqual(archive.filename, "/zip/path")
def test_create_zip_exception(self):
workspace = Workspace(None, MockExecutive(should_log=True, should_throw=True))
expected_logs = """MOCK run_command: ['zip', '-9', '-r', '/zip/path', '.'], cwd=/source/path
Workspace.create_zip failed in /source/path:
MOCK ScriptError
MOCK output of child process
"""
class MockZipFile(object):
def __init__(self, path):
self.filename = path
archive = OutputCapture().assert_outputs(self, workspace.create_zip, ["/zip/path", "/source/path", MockZipFile], expected_logs=expected_logs)
self.assertIsNone(archive)
| 0.003047 |
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import os
from unittest import mock
from azure.identity.aio._credentials.app_service import AppServiceCredential
from azure.identity._constants import EnvironmentVariables
import pytest
from helpers_async import await_test
from recorded_test_case import RecordedTestCase
from test_app_service import PLAYBACK_URL
class RecordedTests(RecordedTestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.is_live:
url = os.environ.get(EnvironmentVariables.MSI_ENDPOINT)
if not (url and EnvironmentVariables.MSI_SECRET in os.environ):
pytest.skip("Recording requires values for $MSI_ENDPOINT and $MSI_SECRET")
else:
self.scrubber.register_name_pair(url, PLAYBACK_URL)
self.patch = mock.MagicMock() # no need to patch anything when recording
else:
# in playback we need to set environment variables and clear any that would interfere
# (MSI_SECRET ends up in a header; vcr.py doesn't match headers, so the value doesn't matter)
env = {EnvironmentVariables.MSI_ENDPOINT: PLAYBACK_URL, EnvironmentVariables.MSI_SECRET: "redacted"}
self.patch = mock.patch.dict(os.environ, env, clear=True)
@await_test
async def test_system_assigned(self):
with self.patch:
credential = AppServiceCredential()
token = await credential.get_token(self.scope)
assert token.token
assert isinstance(token.expires_on, int)
@pytest.mark.usefixtures("user_assigned_identity_client_id")
@await_test
async def test_user_assigned(self):
with self.patch:
credential = AppServiceCredential(client_id=self.user_assigned_identity_client_id)
token = await credential.get_token(self.scope)
assert token.token
assert isinstance(token.expires_on, int)
| 0.002913 |
import textwrap
import pytest
from pre_commit_hooks.string_fixer import main
TESTS = (
# Base cases
("''", "''", 0),
('""', "''", 1),
(r'"\'"', r'"\'"', 0),
(r'"\""', r'"\""', 0),
(r"'\"\"'", r"'\"\"'", 0),
# String somewhere in the line
('x = "foo"', "x = 'foo'", 1),
# Test escaped characters
(r'"\'"', r'"\'"', 0),
# Docstring
('""" Foo """', '""" Foo """', 0),
(
textwrap.dedent(
"""
x = " \\
foo \\
"\n
""",
),
textwrap.dedent(
"""
x = ' \\
foo \\
'\n
""",
),
1,
),
('"foo""bar"', "'foo''bar'", 1),
)
@pytest.mark.parametrize(('input_s', 'output', 'expected_retval'), TESTS)
def test_rewrite(input_s, output, expected_retval, tmpdir):
path = tmpdir.join('file.py')
path.write(input_s)
retval = main([str(path)])
assert path.read() == output
assert retval == expected_retval
def test_rewrite_crlf(tmpdir):
f = tmpdir.join('f.py')
f.write_binary(b'"foo"\r\n"bar"\r\n')
assert main((str(f),))
assert f.read_binary() == b"'foo'\r\n'bar'\r\n"
| 0 |
import os
import time
import datetime
import re
from random import randint
from logging.handlers import BaseRotatingHandler
# sibling module than handles all the ugly platform-specific details of file locking
from portalocker import lock, unlock, LOCK_EX
__version__ = '0.0.1'
__author__ = "yorks"
class MultProcTimedRotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a file, rotating the log file at certain timed.
"""
def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False, debug=False):
"""
* interval, backupCount is not working!!! *
Just Copied from logging.handlers.TimedRotatingFileHandler
# a rollover occurs. Current 'when' events supported:
# S - Seconds
# M - Minutes
# H - Hours
# D - Days
# midnight - roll over at midnight
# W{0-6} - roll over on a certain day; 0 - Monday
#
# Case of the 'when' specifier is not important; lower or upper case
# will work.
"""
BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
self.when = when.upper()
self.backupCount = backupCount
self.utc = utc
self.debug = debug
self.mylogfile = "%s.%08d" % ('/tmp/mptfhanldler', randint(0,99999999))
self.interval = 1 # datetime timedelta only have, days, seconds, microseconds
if self.when == 'S':
#self.interval = 1 # one second
self.suffix = "%Y-%m-%d_%H-%M-%S"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}$"
elif self.when == 'M':
self.interval = 60 # one minute
self.suffix = "%Y-%m-%d_%H-%M"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}$"
elif self.when == 'H':
self.interval = 60 * 60 # one hour
self.suffix = "%Y-%m-%d_%H"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}$"
elif self.when == 'D' or self.when == 'MIDNIGHT':
#self.interval = 60 * 60 * 24 # one day
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
self.when = 'D' # MIDNIGHT is day, use day only
elif self.when.startswith('W'):
#self.interval = 60 * 60 * 24 * 7 # one week
if len(self.when) != 2:
raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
if self.when[1] < '0' or self.when[1] > '6':
raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
self.dayOfWeek = int(self.when[1])
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
else:
raise ValueError("Invalid rollover interval specified: %s" % self.when)
self.extMatch = re.compile(self.extMatch)
#self.interval = self.interval * interval # multiply by units requested
self.interval = self.interval * 1 # interval arg is not working
# lock file, contain next rollover timestamp
self.stream_lock = None
self.lock_file = self._getLockFile()
# read from conf first for inherit the first process
# if it is the first process, please remove the lock file by hand first
self.nextRolloverTime = self.getNextRolloverTime()
if not self.nextRolloverTime:
self.nextRolloverTime = self.computerNextRolloverTime()
self.saveNextRolloverTime()
def _log2mylog(self, msg):
time_str=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
msg=str(msg)
content="%s [%s]\n"% (time_str, msg)
fa = open( self.mylogfile, 'a')
fa.write(content)
fa.close()
def _getLockFile(self):
# Use 'file.lock' and not 'file.log.lock' (Only handles the normal "*.log" case.)
if self.baseFilename.endswith(".log"):
lock_file = self.baseFilename[:-4]
else:
lock_file = self.baseFilename
lock_file += ".lock"
return lock_file
def _openLockFile(self):
lock_file = self._getLockFile()
self.stream_lock = open(lock_file, 'w')
def computerNextRolloverTime(self):
""" Work out the next rollover time. """
nextTime = None
currentDateTime = datetime.datetime.now()
if self.utc:
currentDateTime = datetime.datetime.utcnow()
if self.when == 'D' :
nextDateTime = currentDateTime + datetime.timedelta(days=self.interval)
nextDate = nextDateTime.date()
nextTime = int( time.mktime(nextDate.timetuple()) )
elif self.when.startswith('W'):
days = 0
currentWeekDay = currentDateTime.weekday()
if currentWeekDay == self.dayOfWeek:
days = ( self.interval + 7 )
elif currentWeekDay < self.dayOfWeek:
days = self.dayOfWeek - currentWeekDay
else:
days = 6 - currentWeekDay + self.dayOfWeek + 1
nextDateTime = currentDateTime + datetime.timedelta(days=days)
nextDate = nextDateTime.date()
nextTime = int( time.mktime(nextDate.timetuple()) )
else:
tmpNextDateTime = currentDateTime + datetime.timedelta(seconds=self.interval)
nextDateTime = tmpNextDateTime.replace(microsecond=0)
if self.when == 'H':
nextDateTime = tmpNextDateTime.replace(minute=0, second=0, microsecond=0)
elif self.when == 'M':
nextDateTime = tmpNextDateTime.replace(second=0, microsecond=0)
nextTime = int( time.mktime(nextDateTime.timetuple()) )
return nextTime
def getNextRolloverTime(self):
""" get next rollover time stamp from lock file """
try:
fp = open(self.lock_file, 'r')
c = fp.read()
fp.close()
return int(c)
except:
return False
def saveNextRolloverTime(self):
""" save the nextRolloverTimestamp to lock file
this is a flag for avoid multiple processes to rotate
the log file again at the same rollovertime.
"""
if not self.nextRolloverTime:
return 0
content = "%d"% self.nextRolloverTime
if not self.stream_lock:
self._openLockFile()
lock(self.stream_lock, LOCK_EX)
try:
self.stream_lock.seek(0)
self.stream_lock.write(content)
self.stream_lock.flush()
except:
if self.debug:self._log2mylog('saveNextRT exception!!!')
pass
finally:
unlock(self.stream_lock)
if self.debug:self._log2mylog('saveNextRT:%s'% content)
def acquire(self):
""" Acquire thread and file locks.
Copid from ConcurrentRotatingFileHandler
"""
# handle thread lock
BaseRotatingHandler.acquire(self)
# Issue a file lock. (This is inefficient for multiple active threads
# within a single process. But if you're worried about high-performance,
# you probably aren't using this log handler.)
if self.stream_lock:
# If stream_lock=None, then assume close() was called or something
# else weird and ignore all file-level locks.
if self.stream_lock.closed:
# Daemonization can close all open file descriptors, see
# https://bugzilla.redhat.com/show_bug.cgi?id=952929
# Try opening the lock file again. Should we warn() here?!?
try:
self._openLockFile()
except Exception:
# Don't try to open the stream lock again
self.stream_lock = None
return
lock(self.stream_lock, LOCK_EX)
# Stream will be opened as part by FileHandler.emit()
def release(self):
""" Release file and thread locks.
"""
try:
if self.stream_lock and not self.stream_lock.closed:
unlock(self.stream_lock)
except Exception:
pass
finally:
# release thread lock
BaseRotatingHandler.release(self)
def _close_stream(self):
""" Close the log file stream """
if self.stream:
try:
if not self.stream.closed:
self.stream.flush()
self.stream.close()
finally:
self.stream = None
def _close_stream_lock(self):
""" Close the lock file stream """
if self.stream_lock:
try:
if not self.stream_lock.closed:
self.stream_lock.flush()
self.stream_lock.close()
finally:
self.stream_lock = None
def close(self):
"""
Close log stream and stream_lock. """
try:
self._close_stream()
self._close_stream_lock()
finally:
self.stream = None
self.stream_lock = None
def shouldRollover(self, record):
"""
Determine if rollover should occur.
record is not used, as we are just comparing times, but it is needed so
the method signatures are the same
Copied from std lib
"""
t = int(time.time())
if t >= self.nextRolloverTime:
return 1
#print "No need to rollover: %d, %d" % (t, self.rolloverAt)
return 0
def doRollover(self):
""" Do a rollover,
0. close stream, stream_lock file handle
1. get lock
2. mv log log.$date
3. setting up nextRolloverTime
4. relese lock
"""
if self.debug:self._log2mylog('do Rollover')
self._close_stream()
self.acquire()
try:
fileNextRolloverTime = self.getNextRolloverTime()
if not fileNextRolloverTime:
if self.debug:self._log2mylog('getNextRolloverTime False, skip rotate!')
self.release()
return 0
# avoid other process do rollover again.
if self.nextRolloverTime < fileNextRolloverTime:
self.nextRolloverTime = fileNextRolloverTime
if self.debug:self._log2mylog('already rotated, skip this proc to rotate!')
self.release()
return 0
except Exception, e:
pass
# because log is older then self.nextRolloverTime,
# we need the old log rename to old filename
# donot use time.time()-1,
# for between last rollover and nextRolloverTime*N may have none log to record.
time_tuple = time.localtime( self.nextRolloverTime - 1 )
dfn = self.baseFilename + "." + time.strftime(self.suffix, time_tuple)
# rename
if os.path.exists( dfn ):
bakname = dfn + ".bak"
while os.path.exists(bakname):
bakname = "%s.%08d" % (bakname, randint(0,99999999))
try:
os.rename(dfn, bakname)
except:
pass
if os.path.exists(self.baseFilename):
try:
if self.debug:self._log2mylog('rename %s to %s'% (self.baseFilename, dfn))
os.rename(self.baseFilename, dfn)
except:
pass
# set new nextRolloverTime
self.nextRolloverTime = self.computerNextRolloverTime()
self.saveNextRolloverTime()
if not self.delay:
self.stream = self._open()
self.release()
import logging.handlers
logging.handlers.MultProcTimedRotatingFileHandler = MultProcTimedRotatingFileHandler
| 0.00808 |
#!/usr/bin/env python
"""
This script verifies the Limited-Knowledge(LK) and No-Knowledge(NK)
benchmark sets. The entry points for such verification are the
following two methods:
verify_LK_benchmark:
This method need to be invoked to verify LK-benchmark files.
The arguments for calling this method are the following:
t1_iea_handle: file handle to a t1_ea file
t1_exp_handle: file handle to a t1_exp file
t2_exp_handle: file handle to a t2_exp file
output_filename_LK_bpo: file name to a LK-bpo benchmark set
output_filename_LK_cco: file name to a LK-cco benchmark set
output_filename_LK_mfo: file name to a LK-mfo benchmark set
verify_NK_benchmark:
This method need to be invoked to verify NK-benchmark files. The
arguments for calling this method are the following:
t1_iea_handle: file handle to a t1_ea file
t1_exp_handle: file handle to a t1_exp file
t2_exp_handle: file handle to a t2_exp file
output_filename_LK_bpo: file name to a NK-bpo benchmark set
output_filename_LK_cco: file name to a NK-cco benchmark set
output_filename_LK_mfo: file name to a NK-mfo benchmark set
The following methods are invoked by the above two methods to create
required data structures and perform the verification:
create_iea_ann_dict:
This method builds a dictionary for <protein, GO ID> tuples
from t1_iea file.
create_exp_ann_dict:
This method builds three dictionaries in BPO, CCO, and MFO
categories for <protein, GO ID> tuples from a
t1_exp or t2_exp file.
check_LK_benchmark_creation(t1_iea_dict,
t1_xxo_dict,
t2_xxo_dict,
benchmark_fh):
This method verifies the benchmark entries in the benchmark file
passed by the file handle benchmark_fh.
Meaning of xxo: xxo is replaced runtime by bpo, cco, or mfo to
make this method specific to a certain type of benchmarks.
check_NK_benchmark_creation(t1_iea_dict,
t1_bpo_dict,
t1_cco_dict,
t1_mfo_dict,
t2_xxo_dict,
benchmark_fh):
This method verifies the benchmark entries in the benchmark file
passed by the file handle benchmark_fh.
Meaning of xxo: xxo is replaced runtime by bpo, cco, or mfo to
make this method specific to a certain type of benchmarks.
"""
import os.path
import sys
from collections import defaultdict
import FormatChecker as fc
def create_iea_ann_dict(goa_iea_handle):
"""
This method builds a dictionary for <protein, GO ID> tuples
from t1_iea file.
"""
# Initialize a dictionar which will later be populated with
# <protein, GO ID> tuples from t1_iea file:
dict_iea = defaultdict(lambda:set())
# Populate the dictionary for t1_iea with <protein, GO terms> as
# <key, values> pairs from the entries with NOn-Experimental Evidence
# at time t1:
for lines in goa_iea_handle:
cols = lines.strip().split('\t')
if len(cols) < 15:
continue
dict_iea[cols[1]].add(cols[4])
# Column 1: protein name, Column 4: GO ID
return dict_iea
def create_exp_ann_dict(goa_exp_handle):
"""
This method builds three dictionaries in BPO, CCO, and MFO categories
for <protein, GO ID> tuples from UniProt-GOA file without the header
string.
"""
# Initialize three dictionaries in BPO, CCO, and MFO ontology groups
# which will later be populated with <protein, GO ID> tuples from
# t1_exp file ontology groups:
dict_bpo = defaultdict(lambda:set())
dict_cco = defaultdict(lambda:set())
dict_mfo = defaultdict(lambda:set())
# Populate the three dictionaries for t1_exp with <protein, GO terms>
# as <key, values> pairs from the entries with Non-Experimental Evidence
# at time t1:
for lines in goa_exp_handle:
cols = lines.strip().split('\t')
if len(cols) < 15:
continue
if cols[8] == 'F': # Column 8: Ontology group
dict_mfo[cols[1]].add(cols[4])
# Column 1: protein name, Column 4: GO ID
elif cols[8] == 'P':
dict_bpo[cols[1]].add(cols[4])
elif cols[8] == 'C':
dict_cco[cols[1]].add(cols[4])
return (dict_bpo, dict_cco, dict_mfo)
def check_LK_benchmark_creation(t1_iea_dict,
t1_xxo_dict,
t2_xxo_dict,
benchmark_fh):
"""
This method verifies the benchmark entries in the benchmark file
passed by the file handle benchmark_fh.
Meaning of xxo: xxo is replaced runtime by bpo, cco, or mfo to make
this method specific to a certain type of benchmarks.
"""
err_msg = ''
for lines in benchmark_fh:
cols = lines.strip().split('\t')
if cols[0] not in t1_iea_dict:
err_msg = '\t\tan undesired protein ' + cols[0] + ' got selected ' + \
'in the benchmark file.'
break
elif cols[0] in t1_xxo_dict:
err_msg = '\t\tselected protein ' + cols[0] + ' in the ' + \
'benchmark file already had\n' + \
'\t\texperimental evidence at time t1.'
break
elif cols[0] not in t2_xxo_dict or cols[1] not in t2_xxo_dict[cols[0]]:
err_msg = '\t\tselected protein ' + cols[0] + ' in the ' + \
' benchmark file has not gainedi\n' + \
'\t\texperimental evidence at time t2.'
break
return err_msg
def verify_LK_benchmark(t1_iea_handle,
t1_exp_handle,
t2_exp_handle,
benchmark_fh,
ontType):
"""
This method verifies Limited-Knowledge benchmark sets.
"""
# Create a dictionary for the <protein, GO ID> tuples from t1_iea file:
t1_iea_dict = create_iea_ann_dict(t1_iea_handle)
# Create BPO, CCO and MFO dictionaries for the
# <protein, GO ID> tuples from t1_exp file:
t1_bpo_dict, t1_cco_dict, t1_mfo_dict = create_exp_ann_dict(t1_exp_handle)
# Create BPO, CCO and MFO dictionaries for the
# <protein, GO ID> tuples from t2_exp file:
t2_bpo_dict, t2_cco_dict, t2_mfo_dict = create_exp_ann_dict(t2_exp_handle)
# Check file format for LK_bpo benchmark file.
# LK_bpo is True when the filename is non-empty, file exists, file size
# is non-zero and file in correct format:
err_msg = '' # Error message holders
if ontType == 'BPO':
# Verify LK-BPO benchmarks
err_msg = check_LK_benchmark_creation(t1_iea_dict,
t1_bpo_dict,
t2_bpo_dict,
benchmark_fh)
elif ontType == 'CCO':
# Verify LK-CCO benchmarks
err_msg = check_LK_benchmark_creation(t1_iea_dict,
t1_cco_dict,
t2_cco_dict,
benchmark_fh)
elif ontType == 'MFO':
# Verify LK-MFO benchmarks
err_msg = check_LK_benchmark_creation(t1_iea_dict,
t1_mfo_dict,
t2_mfo_dict,
benchmark_fh)
return err_msg
def check_NK_benchmark_creation(t1_iea_dict,
t1_bpo_dict,
t1_cco_dict,
t1_mfo_dict,
t2_xxo_dict,
benchmark_fh):
"""
This method verifies the benchmark entries in the benchmark file
passed by the file handle benchmark_fh.
Meaning of xxo: xxo is replaced runtime by bpo, cco, or mfo to make
this method specific to a certain type of benchmarks.
"""
err_msg = ''
for lines in benchmark_fh:
cols = lines.strip().split('\t')
if cols[0] not in t1_iea_dict:
err_msg = '\t\tan undesired protein ' + cols[0] + \
' got selected in the benchmark file.'
break
elif cols[0] in t1_bpo_dict or \
cols[0] in t1_cco_dict or \
cols[0] in t1_mfo_dict:
err_msg = '\t\tselected protein ' + cols[0] + ' in the ' + \
'benchmark file already had\n' +\
'\t\texperimental evidence at t1.'
break
elif cols[0] not in t2_xxo_dict:
err_msg = '\t\tselected protein ' + cols[0] + ' in the '+ \
'benchmark file has not gained\n' + \
'\t\texperimental evidence at time t2.'
break
return err_msg
def verify_NK_benchmark(t1_iea_handle,
t1_exp_handle,
t2_exp_handle,
benchmark_fh,
ontType):
"""
This method verifies No-Knowledge benchmark sets.
"""
# Create a dictionary for the
# <protein, GO ID> tuples from t1_iea file:
t1_iea_dict = create_iea_ann_dict(t1_iea_handle)
# Create BPO, CCO and MFO dictionaries for the
# <protein, GO ID> tuples from t1_exp file:
t1_bpo_dict, t1_cco_dict, t1_mfo_dict = create_exp_ann_dict(t1_exp_handle)
# Create BPO, CCO and MFO dictionaries for the
# <protein, GO ID> tuples from t2_exp file:
t2_bpo_dict, t2_cco_dict, t2_mfo_dict = create_exp_ann_dict(t2_exp_handle)
err_msg = '' # Error message holders
if (ontType == 'BPO'):
# Verify NK-BPO benchmarks:
err_msg = check_NK_benchmark_creation(t1_iea_dict,
t1_bpo_dict,
t1_cco_dict,
t1_mfo_dict,
t2_bpo_dict,
benchmark_fh)
elif (ontType == 'CCO'):
# Verify NK-CCO benchmarks:
err_msg = check_NK_benchmark_creation(t1_iea_dict,
t1_bpo_dict,
t1_cco_dict,
t1_mfo_dict,
t2_cco_dict,
benchmark_fh)
elif (ontType == 'MFO'):
# Verify NK-MFO benchmarks:
err_msg = check_NK_benchmark_creation(t1_iea_dict,
t1_bpo_dict,
t1_cco_dict,
t1_mfo_dict,
t2_mfo_dict,
benchmark_fh)
return err_msg
if __name__ == "__main__":
print (sys.argv[0] + ':')
print (__doc__)
sys.exit(0)
| 0.003889 |
from celery import task
from django.conf import settings
from django.contrib.auth.models import User
from django.core.mail import EmailMessage
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from webparticipation.apps.utils.views import get_url
from webparticipation.apps.ureport_auth.models import PasswordReset
from webparticipation.apps.ureporter.models import Ureporter
@task()
def send_forgot_password_email(email):
user = User.objects.filter(email=email).first()
if user:
expiry = timezone.now() + timezone.timedelta(days=settings.PASSWORD_RESET_EXPIRY_DAYS)
password_reset = PasswordReset.objects.filter(user_id=user.id).first()
if password_reset:
password_reset.set_expiry(expiry)
password_reset.generate_password_reset_token()
else:
password_reset = PasswordReset.objects.create(expiry=expiry, user=user)
password_reset.generate_password_reset_token()
subject = _('U-Report Password Recovery')
email_content = construct_forgotten_password_email(email)
recipients = [email]
message = EmailMessage(subject, email_content, to=recipients)
message.content_subtype = 'html'
message.send()
def construct_forgotten_password_email(email):
ureporter = Ureporter.objects.get(user__email=email)
password_reset = PasswordReset.objects.get(user_id=ureporter.user.id)
password_reset_link = get_url('/password-reset/%s/' % password_reset.token)
unsubscribe_link = get_url('/profile/unsubscribe/%s' % ureporter.unsubscribe_token)
body = '<p>Hello from U-Report,</p>' \
'<p>You recently requested a reset of your ureport account password.</p>' \
'<p>To do this, please click this password recovery link to change your password: %s</p>'\
'<p>-----</p>' \
'<p>Thanks,</p>' % password_reset_link
signature = _('<p>Your friendly U-Report team</p>')
footer = '<hr>' \
'<p>Please click <a href="%s">unsubscribe</a> ' \
'to stop receiving email notifications</p>' % unsubscribe_link
return '%s%s%s' % (body, signature, footer)
| 0.002275 |
# coding=utf-8
from __future__ import print_function
from __future__ import division
import matplotlib.pyplot as plt
import pyphysio as ph
import numpy as np
class _MouseSelectionFilter(object):
def __init__(self, onselect):
self._select = onselect
self._last_press = None
def on_move(self, event):
self._last_press = None
def on_press(self, event):
x, y = event.xdata, event.ydata
self._last_press = x, y, event.button
def on_release(self, event):
x, y = event.xdata, event.ydata
if self._last_press is not None:
xx, yy, b = self._last_press
if x == xx and y == yy and event.button == b:
self._select(event)
class _ItemManager(object):
def __init__(self, snap_func, select, unselect, add, delete):
self._snap_func = snap_func
self._select = select
self._unselect = unselect
self._delete = delete
self._add = add
self.selection = -1
def unselect(self):
self._unselect(self.selection)
self.selection = None
def on_select(self, ev):
if ev.xdata is not None and ev.ydata is not None:
x, y, item, new = self._snap_func(ev.xdata, ev.ydata)
# print("on_select: %d, %d: %d" % (x, y, item))
if self.selection is not None:
self.unselect()
if ev.button == 1:
if new:
self._add(x, y, item)
else:
self.selection = item
self._select(item)
class Annotate(object):
def __init__(self, ecg, ibi):
self.plots = None
self.peaks_t = None
self.done = False
self.ecg = ecg
self.ibi = ibi
self.fig = plt.figure()
self.p_sig = self.fig.add_subplot(2, 1, 1)
self.p_res = self.fig.add_subplot(2, 1, 2, sharex=self.p_sig)
self.max = ph.Max()(self.ecg)
self.min = ph.Min()(self.ecg)
self.margin = ph.Range()(self.ecg) * .1
self.max += self.margin
self.min -= self.margin
if isinstance(ibi, ph.UnevenlySignal):
self.peaks_t = self.ibi.get_times()
self.peaks_v = self.ibi.get_values()
else:
self.peaks_t = np.empty(0)
self.peaks_v = np.empty(0)
self.p_sig.plot(self.ecg.get_times(), self.ecg.get_values(), 'b')
self.p_res.plot(self.peaks_t, self.peaks_v, 'b'),
self.p_res.plot(self.peaks_t, self.peaks_v, 'go')
self.replot()
class Cursor(object):
left = None
right = None
radius = .3
radiusi = int(radius * self.ecg.get_sampling_freq())
@staticmethod
def on_move(event):
Cursor.draw(event)
@staticmethod
def on_scroll(event):
if event.button == "up":
Cursor.radiusi += 3
elif event.button == "down":
Cursor.radiusi -= 7
Cursor.radius = Cursor.radiusi / self.ecg.get_sampling_freq()
Cursor.draw(event)
@staticmethod
def draw(event):
if Cursor.left is not None:
Cursor.left.remove()
Cursor.right.remove()
Cursor.left = None
Cursor.right = None
if event.xdata is not None: # TODO (Andrea): not do this if speed (dxdata/dt) is high
Cursor.left = self.p_sig.vlines(event.xdata - Cursor.radius, self.min - self.margin * 2,
self.max + self.margin * 2, 'k')
Cursor.right = self.p_sig.vlines(event.xdata + Cursor.radius, self.min - self.margin * 2,
self.max + self.margin * 2, 'k')
self.fig.canvas.draw()
def find_peak(s):
return np.argmax(s)
def snap(xdata, ydata):
nearest_after = self.peaks_t.searchsorted(xdata)
nearest_prev = nearest_after - 1
dist_after = self.peaks_t[nearest_after] - xdata if 0 <= nearest_after < len(self.peaks_t) else None
dist_prev = xdata - self.peaks_t[nearest_prev] if 0 <= nearest_prev < len(self.peaks_t) else None
if dist_after is None or dist_prev < dist_after:
if dist_prev is not None and dist_prev < Cursor.radius:
return self.peaks_t[nearest_prev], ydata, nearest_prev, False
elif dist_prev is None or dist_after < dist_prev:
if dist_after is not None and dist_after < Cursor.radius:
return self.peaks_t[nearest_after], ydata, nearest_after, False
s = self.ecg.segment_time(xdata - Cursor.radius, xdata + Cursor.radius)
s = np.array(s)
m = find_peak(s)
return xdata - Cursor.radius + m / self.ecg.get_sampling_freq(), ydata, nearest_after, True
class Selector(object):
selector = None
@staticmethod
def select(item):
# print("select: %d" % item)
Selector.selector = self.p_sig.vlines(self.peaks_t[item], self.min - self.margin, self.max + self.margin, 'g')
@staticmethod
def unselect(item):
if Selector.selector is not None:
# print("unselect: %d" % item)
Selector.selector.remove()
# it is correct that the computation of the values is done at the end (line 186)
def add(time, y, pos):
self.peaks_t = np.insert(self.peaks_t, pos, time)
self.replot()
def delete(item):
self.peaks_t = np.delete(self.peaks_t, item)
self.replot()
im = _ItemManager(snap, Selector.select, Selector.unselect, add, delete)
mf = _MouseSelectionFilter(im.on_select)
def press(ev):
# print(ev.key)
if ev.key == "d" and im.selection is not None:
delete(im.selection)
im.unselect()
def handle_close(ev):
self.done = True
return
clim = self.fig.canvas.mpl_connect('motion_notify_event', lambda e: (mf.on_move(e), Cursor.on_move(e)))
clip = self.fig.canvas.mpl_connect('button_press_event', mf.on_press)
clir = self.fig.canvas.mpl_connect('button_release_event', mf.on_release)
clis = self.fig.canvas.mpl_connect('scroll_event', Cursor.on_scroll)
clik = self.fig.canvas.mpl_connect('key_press_event', press)
ccls = self.fig.canvas.mpl_connect('close_event', handle_close)
while not self.done :
# print('waiting')
plt.pause(1)
plt.close(self.fig)
# it is correct that the computation of the values is done at the end!
# do not change!
self.peaks_v = np.diff(self.peaks_t)
self.peaks_v = np.r_[self.peaks_v[0], self.peaks_v]
if isinstance(ibi, ph.UnevenlySignal):
self.ibi_ok = ph.UnevenlySignal(values=self.peaks_v,
sampling_freq=self.ibi.get_sampling_freq(),
signal_type=self.ibi.get_signal_type(),
start_time=self.ibi.get_start_time(),
x_values=self.peaks_t,
x_type='instants',
duration=self.ibi.get_duration())
else:
self.ibi_ok = ph.UnevenlySignal(values=self.peaks_v,
sampling_freq=self.ecg.get_sampling_freq(),
signal_type=self.ecg.get_signal_type(),
start_time=self.ecg.get_start_time(),
x_values=self.peaks_t,
x_type='instants',
duration=self.ecg.get_duration())
def __call__(self):
return self.ibi_ok
def replot(self):
if self.plots is not None:
self.plots.remove()
if self.peaks_t is not None:
self.plots = self.p_sig.vlines(self.peaks_t, self.min, self.max, 'y')
self.fig.canvas.draw()
| 0.005407 |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at: http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distrib-
# uted under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# specific language governing permissions and limitations under the License.
"""Handler for the map review page."""
__author__ = 'shakusa@google.com (Steve Hakusa)'
import json
import urllib
import base_handler
import config
import model
import perms
# Takes a hex color (rgb or rrggbb) as a %-substitution
_ICON_URL_TEMPLATE = ('https://chart.googleapis.com/chart?'
'chst=d_map_xpin_letter'
'&chld=pin%%7C+%%7C%s%%7C000%%7CF00')
def _MakeIconUrl(report, choice_colors):
"""Returns a URL to render an icon for the given report.
Args:
report: A model.CrowdReport.
choice_colors: A dict; keys are (question_id, choice_id) pairs, values
are hex color strings.
Returns:
A string, a URL for a marker icon colored by the first answer, if any.
"""
color = report.answers and choice_colors.get(report.answers.items()[0])
return _ICON_URL_TEMPLATE % (color or 'aaa').strip('#')
def _NoneIfTrueElseFalse(value):
return None if value.lower() in {'true', 'yes', '1'} else False
def _NoneIfFalseElseTrue(value):
return value.lower() in {'true', 'yes', '1'} or None
class _MapReview(base_handler.BaseHandler):
"""Administration page for reviewing crowd reports on a map.
Supported query params:
query: An optional string; used to filter crowd reports by keywords.
May also contain structured search expressions as explained in
model.CrowdReport.search.
id: An optional string; If present, ignore other filter parameters
and return the crowd report with the given identifier
author: An optional string; if present, review only those crowd reports
created on this map by the author with this id. If the author
string is not a full URL, it is assumed to be a user ID or
anonymous user token created via GetCurrentUserUrl
topic: An optional string topic ID. If present, review only those crowd
reports to belonging to the topic with this ID.
hidden: An optional string; If 'true', 'yes', or '1', review only those
crowd reports with hidden set to True, otherwise review both hidden
and non-hidden reports.
reviewed: An optional string; If 'true', 'yes', or '1', review crowd
reports regardless of whether they have already been reviewed,
otherwise, and by default, review only those crowd reports with
reviewed = False.
count: An optional integer; Return this many crowd reports to review.
Defaults to 50.
skip: An optional integer for paging. Skip this many crowd reports before
returning count.
"""
# Query params used in _GetUrl and also passed to map_review.js
params = ['query', 'id', 'author', 'topic', 'hidden', 'reviewed',
'count', 'skip']
def _GetUrl(self, **kwargs):
"""Gets a URL for the review page with params set from self.request.
Args:
**kwargs: key, value pairs used to override params in self.request
Returns:
A URL for the review page with params set from self.request.
"""
for param in self.params:
kwargs[param] = kwargs.get(param, self.request.get(param, ''))
return 'review?' + urllib.urlencode(
[(k, v) for k, v in kwargs.iteritems() if v])
def RenderReviewPage(self, map_object):
"""Renders the map review page.
Args:
map_object: The model.Map instance being reviewed.
"""
perms.AssertAccess(perms.Role.MAP_REVIEWER, map_object)
self.count = int(self.request.get('count') or 50)
self.skip = int(self.request.get('skip') or 0)
self.reviewed = _NoneIfTrueElseFalse(self.request.get('reviewed'))
self.hidden = _NoneIfFalseElseTrue(self.request.get('hidden'))
self.report_id = self.request.get('id', '').strip()
self.query = self.request.get('query', '').strip()
self.author = self.request.get('author', '').strip() or None
self.topic_id = self.request.get('topic')
prev_skip = max(0, self.skip - self.count)
prev_url = self._GetUrl(skip=prev_skip) if self.skip else None
next_skip = 0
next_url = None
map_id = map_object.key.name()
map_root = map_object.map_root
topic_ids = []
report_dicts = []
if 'topics' in map_root:
topic_ids, report_dicts = self._ExtractTopicsAndReports(map_id, map_root)
if len(report_dicts) > self.count:
report_dicts = report_dicts[:self.count]
next_skip = self.skip + self.count
next_url = self._GetUrl(skip=next_skip)
self._RenderTemplate(map_object, report_dicts, topic_ids,
prev_url, next_url, next_skip)
def _RenderTemplate(self, map_object, report_dicts, topic_ids,
prev_url, next_url, next_skip):
"""Renders the map review template.
Args:
map_object: The model.Map instance being reviewed.
report_dicts: An array of dicts representing reports to review.
topic_ids: An array of topic IDs representing the map topics.
prev_url: A string, the URL to review the previous page of reports.
next_url: A string, the URL to review the next page of reports.
next_skip: An int, the number of reports to skip when rendering next_url.
"""
self.response.out.write(self.RenderTemplate('map_review.html', {
'map': map_object,
'params_json': json.dumps(self.params),
'reports': report_dicts,
'reports_json': json.dumps(report_dicts),
'topic_id': self.topic_id,
'topic_ids': topic_ids,
'id': self.report_id,
'query': self.query,
'author': self.request.get('author'),
'prev_url': prev_url,
'next_url': next_url,
'first': self.skip + 1,
'last': self.skip + len(report_dicts),
'skip': next_skip,
'hidden': self.hidden and 'true' or '',
'reviewed': self.reviewed is None and 'true' or '',
}))
def _ExtractTopicsAndReports(self, map_id, map_root):
"""Extracts topics from MapRoot and loads reports to review from datastore.
Args:
map_id: A string, the id of the map being reviewed.
map_root: The MapRoot definition of the map being reviewed.
Returns:
A pair (topic_ids, reports) where topic_ids is a list of the map's topic
IDs and reports is a list of dicts representing reports to review.
"""
topic_ids = []
question_types = {}
question_titles = {}
choice_colors = {}
choice_labels = {}
for topic in map_root['topics']:
topic_ids.append(topic['id'])
for question in topic.get('questions', []):
question_id = '%s.%s.%s' % (map_id, topic['id'], question['id'])
question_types[question_id] = question.get('type', '')
question_titles[question_id] = title = question.get('title', '')
for choice in question.get('choices', []):
choice_labels[question_id, choice['id']] = (
choice.get('label', '') or title + ': ' + choice.get('title', ''))
choice_colors[question_id, choice['id']] = choice.get('color', '')
def _DescribeAnswer((question_id, answer)):
if question_types.get(question_id) == 'CHOICE':
return choice_labels.get((question_id, answer))
return '%s: %s' % (question_titles[question_id], answer)
return topic_ids, [{
'id': report.id,
'url': '../%s?ll=%.5f,%.5f&z=17' % (
map_id, report.location.lat, report.location.lon),
'author': (('%s/.users/' % self.request.root_url) in report.author and
report.author.split('/')[-1] or report.author),
'text': report.text,
'location': '(%.3f, %.3f)' % (report.location.lat, report.location.lon),
'lat': report.location.lat,
'lon': report.location.lon,
'icon_url': _MakeIconUrl(report, choice_colors),
'updated': report.updated.strftime('%Y-%m-%dT%H:%M:%SZ'),
'topics': ','.join(tid.split('.')[1] for tid in report.topic_ids),
'answers': ', '.join(
map(_DescribeAnswer, json.loads(report.answers_json).items())),
'hidden': report.hidden,
'votes': u'\u2191%d \u2193%d (%.1f)' % (
report.upvote_count or 0, report.downvote_count or 0,
report.score or 0)
} for report in self._QueryForReports(map_id, topic_ids)]
def _QueryForReports(self, map_id, topic_ids):
"""Queries datastore for reports.
Args:
map_id: A string, the id of the map being reviewed.
topic_ids: An array of topic IDs for which to restrict the query.
Returns:
A iterable of model.CrowdReport.
"""
if self.report_id:
report = model.CrowdReport.Get(self.report_id)
return [report] if report else []
else:
if self.topic_id and self.topic_id in topic_ids:
tids = ['%s.%s' % (map_id, self.topic_id)]
else:
tids = ['%s.%s' % (map_id, tid) for tid in topic_ids]
if self.query:
# Restrict the search to topics for this map.
# Note that the query itself can be arbitrarily complex, following
# the syntax at
# developers.google.com/appengine/docs/python/search/query_strings
# We don't validate the query here, and an invalid query currently
# will render an error page.
restricted_query = [
self.query,
'topic_id:(%s)' % (' OR '.join('"%s"' % tid for tid in tids))]
if self.hidden is not None:
restricted_query.append('hidden: %s' % self.hidden)
if self.reviewed is not None:
restricted_query.append('reviewed: %s' % self.reviewed)
return model.CrowdReport.Search(' '.join(restricted_query),
self.count + 1, self.skip)
else:
if self.author and not self.author.startswith('http'):
author = '%s/.users/%s' % (self.request.root_url, self.author)
else:
author = self.author
return model.CrowdReport.GetForTopics(tids, self.count + 1, self.skip,
author, self.hidden,
self.reviewed)
def HandlePost(self, map_object):
"""Handles a POST.
Possible user actions are marking the set of input reports reviewed,
upvoted or downvoted.
Upon success, the user is redirected to the review page.
Args:
map_object: The model.Map instance being reviewed.
"""
perms.AssertAccess(perms.Role.MAP_REVIEWER, map_object)
to_accept = self.request.get_all('accept')
to_downvote = self.request.get_all('downvote')
to_upvote = self.request.get_all('upvote')
model.CrowdReport.MarkAsReviewed(to_accept + to_downvote + to_upvote)
for report_id in to_downvote:
model.CrowdVote.Put(report_id, self.GetCurrentUserUrl(), 'REVIEWER_DOWN')
for report_id in to_upvote:
model.CrowdVote.Put(report_id, self.GetCurrentUserUrl(), 'REVIEWER_UP')
self.redirect(self._GetUrl())
class MapReviewByLabel(_MapReview):
"""A version of MapReview that expects a map label in the URL."""
def _GetMap(self, label, domain):
"""Loads the model.Map instance being reviewed by label and domain.
Args:
label: A string, the published label for the map.
domain: A string, the domain in which the map was created, eg gmail.com.
Returns:
The model.Map instance being reviewed
Raises:
base_handler.Error: If the map csnnot be found.
"""
domain = domain or config.Get('primary_domain') or ''
entry = model.CatalogEntry.Get(domain, label)
if not entry:
raise base_handler.Error(404, 'Map %r not found.' % label)
map_object = model.Map.Get(entry.map_id)
if not map_object:
raise base_handler.Error(404, 'Map %r not found.' % label)
return map_object
def Get(self, label, domain=None):
"""Renders the map review page by domain and map label.
Args:
label: A string, the published label for the map.
domain: A string, the domain in which the map was created, eg gmail.com.
"""
self.RenderReviewPage(self._GetMap(label, domain))
def Post(self, label, domain=None):
"""Updates report statuses for the map at the given domain and map label.
Args:
label: A string, the published label for the map.
domain: A string, the domain in which the map was created, eg gmail.com.
"""
self.HandlePost(self._GetMap(label, domain))
class MapReviewById(_MapReview):
"""A version of MapReview that expects a map_id in the URL."""
def _GetMap(self, map_id):
"""Loads the model.Map instance being reviewed by ID.
Args:
map_id: A string, the id of the map being reviewed.
Returns:
The model.Map instance being reviewed
Raises:
base_handler.Error: If the map csnnot be found.
"""
map_object = model.Map.Get(map_id)
if not map_object:
raise base_handler.Error(404, 'Map %r not found.' % map_id)
return map_object
def Get(self, map_id, domain=None):
"""Renders the map review page by map ID.
Args:
map_id: A string, the id of the map being reviewed.
domain: A string, the domain in which the map was created, eg gmail.com.
"""
self.RenderReviewPage(self._GetMap(map_id))
def Post(self, map_id, domain=None):
"""Updates report statuses for the map at the given map ID.
Args:
map_id: A string, the id of the map being reviewed.
domain: A string, the domain in which the map was created, eg gmail.com.
"""
self.HandlePost(self._GetMap(map_id))
| 0.00354 |
# -:- encoding: utf-8 -:-
# This file is part of the MapProxy project.
# Copyright (C) 2013 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mapproxy.compat import iteritems
def caches(cap, sources, srs_grids):
caches = {}
for name, source in iteritems(sources):
conf = for_source(name, source, srs_grids)
if not conf:
continue
caches[name[:-len('_wms')] + '_cache'] = conf
return caches
def for_source(name, source, srs_grids):
cache = {
'sources': [name]
}
grids = []
for srs in source['supported_srs']:
if srs in srs_grids:
grids.append(srs_grids[srs])
if not grids:
return None
cache['grids'] = grids
return cache
| 0.002358 |
# -*- coding: utf-8 -*-
'''
Exodus Add-on
adapted for nanscrapers
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,json
from ..common import clean_title, random_agent, clean_search, replaceHTMLCodes, filter_host, get_rd_domains
from ..scraper import Scraper
import requests
import xbmc
class Releasebb(Scraper):
domains = ['rlsbb.com']
name = "Releasebb"
def __init__(self):
self.domains = ['rlsbb.com']
self.base_link = 'http://rlsbb.ru'
self.search_base_link = 'http://search.rlsbb.ru'
self.search_header_link = {'X-Requested-With': 'XMLHttpRequest', 'Cookie': 'serach_mode=light'}
self.search_link = '/lib/search526049.php?phrase=%s&pindex=1&content=true'
self.search_link2 = '/search/%s'
def scrape_movie(self, title, year, imdb, debrid=False):
try:
if not debrid:
return []
url = self.movie(imdb, title, year)
sources = self.sources(url, [], [])
for source in sources:
source["scraper"] = source["provider"]
return sources
except:
return []
def scrape_episode(self, title, show_year, year, season, episode,
imdb, tvdb, debrid=False):
try:
if not debrid:
return []
show_url = self.tvshow(imdb, tvdb, title, show_year)
url = self.episode(show_url, imdb, tvdb, title,
year, season, episode)
sources = self.sources(url, [], [])
for source in sources:
source["scraper"] = source["provider"]
return sources
except:
return []
def movie(self, imdb, title, year):
try:
self.elysium_url = []
query = clean_search(title)
cleanmovie = clean_title(title)
query = "%s+%s" % (urllib.quote_plus(query), year)
query = self.search_link % query
query = urlparse.urljoin(self.search_base_link, query)
headers = self.search_header_link
headers["referer"] = query
r = requests.get(query, headers=headers).content
posts = []
dupes = []
print ("RELEASEBB QUERY", r)
try:
posts += json.loads(re.findall('({.+?})$', r)[0])['results']
except:
pass
for post in posts:
try:
name = post['post_title'].encode('utf-8')
url = post['post_name'].encode('utf-8')
if url in dupes:
raise Exception()
dupes.append(url)
print ("RELEASEBB 2", name, url)
t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)
if cleanmovie not in clean_title(name) or not year in name:
raise Exception()
print ("RELEASEBB 3 PASSED", t)
content = post['post_content']
url = [i for i in parse_dom(content, 'a', ret='href')]
size = get_size(content)
quality = quality_tag(name)
self.elysium_url.append([size, quality, url])
except:
pass
print("RELEASEBB PASSED", self.elysium_url)
return self.elysium_url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, year):
try:
url = {'tvshowtitle': tvshowtitle}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
self.elysium_url = []
if url is None:
return
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
title = clean_search(title)
cleanmovie = clean_title(title)
data['season'], data['episode'] = season, episode
ep_query = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
titlecheck = cleanmovie+ep_query.lower()
query = "%s+%s" % (urllib.quote_plus(title), ep_query)
query = self.search_link % query
query = urlparse.urljoin(self.search_base_link, query)
headers = self.search_header_link
headers["referer"] = query
r = requests.get(query, headers=headers).content
posts = []
dupes = []
print ("RELEASEBB QUERY", r)
try:
posts += json.loads(re.findall('({.+?})$', r)[0])['results']
except:
pass
for post in posts:
try:
name = post['post_title'].encode('utf-8')
url = post['post_name'].encode('utf-8')
if url in dupes:
raise Exception()
dupes.append(url)
t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)
print ("RELEASEBB 3 TV", t)
if titlecheck not in clean_title(name):
raise Exception()
print ("RELEASEBB 3 PASSED", t)
content = post['post_content']
url = [i for i in parse_dom(content, 'a', ret='href')]
size = get_size(content)
quality = 'getbyurl'
self.elysium_url.append([size, quality, url])
except:
pass
print("RELEASEBB PASSED", self.elysium_url)
return self.elysium_url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
for size, q, urls in self.elysium_url:
for url in urls:
try:
print ("RELEASEBB SOURCES", size, q, url)
url = url.encode('utf-8')
if q == 'getbyurl':
quality = quality_tag(url)
else:
quality = q
loc = urlparse.urlparse(url).netloc
if not filter_host(loc):
rd_domains = get_rd_domains()
if loc not in rd_domains:
continue
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
host = replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'provider': 'Releasebb', 'url': url, 'info': size, 'direct': False, 'debridonly': True})
except:
pass
return sources
except:
return sources
def resolve(self, url):
return url
def _getDOMContent(html, name, match, ret):
end_str = "</%s" % (name)
start_str = '<%s' % (name)
start = html.find(match)
end = html.find(end_str, start)
pos = html.find(start_str, start + 1)
while pos < end and pos != -1: # Ignore too early </endstr> return
tend = html.find(end_str, end + len(end_str))
if tend != -1:
end = tend
pos = html.find(start_str, pos + 1)
if start == -1 and end == -1:
result = ''
elif start > -1 and end > -1:
result = html[start + len(match):end]
elif end > -1:
result = html[:end]
elif start > -1:
result = html[start + len(match):]
else:
result = ''
if ret:
endstr = html[end:html.find(">", html.find(end_str)) + 1]
result = match + result + endstr
return result
def _getDOMAttributes(match, name, ret):
pattern = '''<%s[^>]* %s\s*=\s*(?:(['"])(.*?)\\1|([^'"].*?)(?:>|\s))''' % (name, ret)
results = re.findall(pattern, match, re.I | re.M | re.S)
return [result[1] if result[1] else result[2] for result in results]
def _getDOMElements(item, name, attrs):
if not attrs:
pattern = '(<%s(?: [^>]*>|/?>))' % (name)
this_list = re.findall(pattern, item, re.M | re.S | re.I)
else:
last_list = None
for key in attrs:
pattern = '''(<%s [^>]*%s=['"]%s['"][^>]*>)''' % (name, key, attrs[key])
this_list = re.findall(pattern, item, re.M | re. S | re.I)
if not this_list and ' ' not in attrs[key]:
pattern = '''(<%s [^>]*%s=%s[^>]*>)''' % (name, key, attrs[key])
this_list = re.findall(pattern, item, re.M | re. S | re.I)
if last_list is None:
last_list = this_list
else:
last_list = [item for item in this_list if item in last_list]
this_list = last_list
return this_list
def parse_dom(html, name='', attrs=None, ret=False):
if attrs is None:
attrs = {}
if isinstance(html, str):
try:
html = [html.decode("utf-8")] # Replace with chardet thingy
except:
print "none"
try:
html = [html.decode("utf-8", "replace")]
except:
html = [html]
elif isinstance(html, unicode):
html = [html]
elif not isinstance(html, list):
return ''
if not name.strip():
return ''
if not isinstance(attrs, dict):
return ''
ret_lst = []
for item in html:
for match in re.findall('(<[^>]*\n[^>]*>)', item):
item = item.replace(match, match.replace('\n', ' ').replace('\r', ' '))
lst = _getDOMElements(item, name, attrs)
if isinstance(ret, str):
lst2 = []
for match in lst:
lst2 += _getDOMAttributes(match, name, ret)
lst = lst2
else:
lst2 = []
for match in lst:
temp = _getDOMContent(item, name, match, ret).strip()
item = item[item.find(temp, item.find(match)):]
lst2.append(temp)
lst = lst2
ret_lst += lst
# log_utils.log("Done: " + repr(ret_lst), xbmc.LOGDEBUG)
return ret_lst
def get_size(txt):
try:
txt = re.findall('(\d+(?:\.|/,|)?\d+(?:\s+|)(?:GB|GiB|MB|MiB))', txt)
txt = txt[0].encode('utf-8')
except:
txt = ''
return txt
def quality_tag(txt):
if any(value in txt for value in ['1080', '1080p','1080P']):
quality = "1080p"
elif any(value in txt for value in ['720', '720p','720P']):
quality = "HD"
else:
quality = "SD"
return quality
| 0.0065 |
# -*- encoding: utf-8 -*-
# pilas engine: un motor para hacer videojuegos
#
# Copyright 2010-2014 - Hugo Ruscitti
# License: LGPLv3 (see http://www.gnu.org/licenses/lgpl.html)
#
# Website - http://www.pilas-engine.com.ar
from PyQt4 import QtCore
class Simbolos(object):
def __init__(self, pilas):
self.pilas = pilas
self.IZQUIERDA = 1
self.DERECHA = 2
self.ARRIBA = 3
self.ABAJO = 4
self.BOTON = 5
self.SELECCION = 6
self.ESPACIO = 32
self.CTRL=17
self.ALTGR=19
self.ALT=18
self.SHIFT=16
self.CAPSLOCK=20
self.F1='F1'
self.F2='F2'
self.F3='F3'
self.F4='F4'
self.F5='F5'
self.F6='F6'
self.F7='F7'
self.F8='F8'
self.F9='F9'
self.F10='F10'
self.F11='F11'
self.F12='F12'
self.a='a'
self.b='b'
self.c='c'
self.d='d'
self.e='e'
self.f='f'
self.g='g'
self.h='h'
self.i='i'
self.j='j'
self.k='k'
self.l='l'
self.m='m'
self.n='n'
self.o='o'
self.p='p'
self.q='q'
self.r='r'
self.s='s'
self.t='t'
self.u='u'
self.v='v'
self.w='w'
self.x='x'
self.y='y'
self.z='z'
| 0.032093 |
# -*- coding: utf-8 -*-
# Copyright(C) 2012 Florent Fourcot
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import re
from datetime import datetime, date, time
from decimal import Decimal
from weboob.tools.browser import BasePage
from weboob.capabilities.bill import Detail, Bill
__all__ = ['HistoryPage', 'DetailsPage']
def convert_price(div):
try:
price = div.find('div[@class="horsForfait"]/p/span').text
price = price.encode('utf-8', 'replace').replace('€', '').replace(',', '.')
return Decimal(price)
except:
return Decimal(0)
class DetailsPage(BasePage):
def on_loaded(self):
self.details = {}
self.datebills = {}
for div in self.document.xpath('//div[@class="infosLigne pointer"]'):
phonenumber = div.text
phonenumber = phonenumber.split("-")[-1].strip()
virtualnumber = div.attrib['onclick'].split('(')[1][1]
self.details['num' + str(phonenumber)] = virtualnumber
for div in self.document.xpath('//div[@class="infosConso"]'):
num = div.attrib['id'].split('_')[1][0]
self.details[num] = []
# National parsing
divnat = div.xpath('div[@class="national"]')[0]
self.parse_div(divnat, "National : %s | International : %s", num, False)
# International parsing
divint = div.xpath('div[@class="international hide"]')[0]
self.parse_div(divint, u"Appels émis : %s | Appels reçus : %s", num, True)
for divbill in self.document.xpath('//div[@class="facture"]'):
for trbill in divbill.xpath('table/tr'):
mydate = unicode(trbill.find('td').text.split(":")[1].strip())
for alink in trbill.xpath('td/a'):
bill = Bill()
bill.label = unicode(mydate)
billid = mydate.replace('-', '')
billid = billid[4:8] + billid[2:4] + billid[0:2]
bill.id = billid
bill.date = date(*reversed([int(x)
for x in mydate.split("-")]))
bill.format = u"pdf"
bill._url = alink.attrib.get('href')
if "pdfrecap" in alink.attrib.get('href'):
bill.id = "recap-" + bill.id
localid = re.search('&l=(?P<id>\d*)&id',
alink.attrib.get('href')).group('id')
if localid not in self.datebills:
self.datebills[localid] = []
self.datebills[localid].append(bill)
def parse_div(self, divglobal, string, num, inter=False):
divs = divglobal.xpath('div[@class="detail"]')
# Two informations in one div...
div = divs.pop(0)
voice = self.parse_voice(div, string, num, inter)
self.details[num].append(voice)
self.iter_divs(divs, num, inter)
def iter_divs(self, divs, num, inter=False):
for div in divs:
detail = Detail()
detail.label = unicode(div.find('div[@class="titre"]/p').text_content())
detail.id = "-" + detail.label.split(' ')[1].lower()
if inter:
detail.label = detail.label + u" (international)"
detail.id = detail.id + "-inter"
detail.infos = unicode(div.find('div[@class="conso"]/p').text_content().lstrip())
detail.price = convert_price(div)
self.details[num].append(detail)
def parse_voice(self, div, string, num, inter=False):
voice = Detail()
voice.id = "-voice"
voicediv = div.xpath('div[@class="conso"]')[0]
voice.label = unicode(div.find('div[@class="titre"]/p').text_content())
if inter:
voice.label = voice.label + " (international)"
voice.id = voice.id + "-inter"
voice.price = convert_price(div)
voice1 = voicediv.xpath('.//span[@class="actif"]')[0].text
voice2 = voicediv.xpath('.//span[@class="actif"]')[1].text
voice.infos = unicode(string) % (voice1, voice2)
return voice
def get_details(self, subscription):
num = self.details['num' + subscription.id]
for detail in self.details[num]:
detail.id = subscription.id + detail.id
yield detail
def date_bills(self, subscription):
for bill in self.datebills[subscription._login]:
bill.id = subscription.id + '.' + bill.id
yield bill
def get_renew_date(self, subscription):
login = subscription._login
div = self.document.xpath('//div[@login="%s"]' % login)[0]
mydate = div.xpath('.//span[@class="actif"]')[0].text
mydate = date(*reversed([int(x) for x in mydate.split("/")]))
if mydate.month == 12:
mydate = mydate.replace(month=1)
mydate = mydate.replace(year=mydate.year + 1)
else:
mydate = mydate.replace(month=mydate.month + 1)
return mydate
def _get_date(detail):
return detail.datetime
class HistoryPage(BasePage):
def on_loaded(self):
self.calls = []
for tr in self.document.xpath('//tr'):
tds = tr.xpath('td')
if tds[0].text is None or tds[0].text == "Date":
pass
else:
detail = Detail()
mydate = date(*reversed([int(x) for x in tds[0].text.split(' ')[0].split("/")]))
mytime = time(*[int(x) for x in tds[0].text.split(' ')[2].split(":")])
detail.datetime = datetime.combine(mydate, mytime)
detail.label = u' '.join([unicode(td.text.strip()) for td in tds[1:4] if td.text is not None])
try:
detail.price = Decimal(tds[4].text[0:4].replace(',', '.'))
except:
detail.price = Decimal(0)
self.calls.append(detail)
def get_calls(self):
return sorted(self.calls, key=_get_date, reverse=True)
| 0.001942 |
from typing import List, Optional
from ray.rllib.evaluation.episode import MultiAgentEpisode
from ray.rllib.evaluation.postprocessing import compute_advantages
from ray.rllib.policy import Policy
from ray.rllib.policy.sample_batch import SampleBatch
def post_process_advantages(
policy: Policy,
sample_batch: SampleBatch,
other_agent_batches: Optional[List[SampleBatch]] = None,
episode: Optional[MultiAgentEpisode] = None) -> SampleBatch:
"""Adds the "advantages" column to `sample_batch`.
Args:
policy (Policy): The Policy object to do post-processing for.
sample_batch (SampleBatch): The actual sample batch to post-process.
other_agent_batches (Optional[List[SampleBatch]]): Optional list of
other agents' SampleBatch objects.
episode (MultiAgentEpisode): The multi-agent episode object, from which
`sample_batch` was generated.
Returns:
SampleBatch: The SampleBatch enhanced by the added ADVANTAGES field.
"""
# Calculates advantage values based on the rewards in the sample batch.
# The value of the last observation is assumed to be 0.0 (no value function
# estimation at the end of the sampled chunk).
return compute_advantages(
rollout=sample_batch,
last_r=0.0,
gamma=policy.config["gamma"],
use_gae=False,
use_critic=False)
| 0 |
from __future__ import absolute_import
import functools
from time import time
def memoize(timeout, dynamic_timeout=False):
"""
Memoization decorator with support for timeout.
If dynamic_timeout is set, the cache timeout is doubled if the cached function
takes longer time to run than the timeout time
"""
cache = {"timeout": timeout}
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
start = time()
if (not "time" in cache) or (start - cache["time"] > cache["timeout"]):
# cache miss
cache["result"] = func(*args, **kwargs)
cache["time"] = time()
if dynamic_timeout and cache["time"] - start > cache["timeout"]:
cache["timeout"] *= 2
return cache["result"]
def clear_cache():
if "time" in cache:
del cache["time"]
if "result" in cache:
del cache["result"]
wrapper.clear_cache = clear_cache
return wrapper
return decorator
| 0.00363 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
gdaltindex.py
---------------------
Date : February 2015
Copyright : (C) 2015 by Pedro Venancio
Email : pedrongvenancio at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import str
__author__ = 'Pedro Venancio'
__date__ = 'February 2015'
__copyright__ = '(C) 2015, Pedro Venancio'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtGui import QIcon
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.core.outputs import OutputVector
from processing.core.parameters import ParameterBoolean
from processing.core.parameters import ParameterMultipleInput
from processing.core.parameters import ParameterString
from processing.tools import dataobjects
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class gdaltindex(GdalAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
FIELD_NAME = 'FIELD_NAME'
PROJ_DIFFERENCE = 'PROJ_DIFFERENCE'
def getIcon(self):
return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'tiles.png'))
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Tile Index')
self.group, self.i18n_group = self.trAlgorithm('[GDAL] Miscellaneous')
self.addParameter(ParameterMultipleInput(self.INPUT,
self.tr('Input layers'), dataobjects.TYPE_RASTER))
self.addParameter(ParameterString(self.FIELD_NAME,
self.tr('Tile index field'),
'location', optional=True))
self.addParameter(ParameterBoolean(self.PROJ_DIFFERENCE,
self.tr('Skip files with different projection reference'), False))
self.addOutput(OutputVector(gdaltindex.OUTPUT, self.tr('Tile index')))
def getConsoleCommands(self):
fieldName = str(self.getParameterValue(self.FIELD_NAME))
arguments = []
if len(fieldName) > 0:
arguments.append('-tileindex')
arguments.append(fieldName)
if self.getParameterValue(gdaltindex.PROJ_DIFFERENCE):
arguments.append('-skip_different_projection')
arguments.append(str(self.getOutputValue(gdaltindex.OUTPUT)))
arguments.extend(str(self.getParameterValue(gdaltindex.INPUT)).split(';'))
return ['gdaltindex', GdalUtils.escapeAndJoin(arguments)]
| 0.001222 |
#
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2017, Ilya Etingof <etingof@gmail.com>
# License: http://pysnmp.sf.net/license.html
#
import random
random.seed()
class Integer(object):
"""Return a next value in a reasonably MT-safe manner"""
def __init__(self, maximum, increment=256):
self.__maximum = maximum
if increment >= maximum:
increment = maximum
self.__increment = increment
self.__threshold = increment // 2
e = random.randrange(self.__maximum - self.__increment)
self.__bank = list(range(e, e + self.__increment))
def __repr__(self):
return '%s(%d, %d)' % (
self.__class__.__name__,
self.__maximum,
self.__increment
)
def __call__(self):
v = self.__bank.pop(0)
if v % self.__threshold:
return v
else:
# this is MT-safe unless too many (~ increment/2) threads
# bump into this code simultaneously
e = self.__bank[-1] + 1
if e > self.__maximum:
e = 0
self.__bank.extend(range(e, e + self.__threshold))
return v
| 0 |
# ---------------------------------------------------------------------
#
# Copyright (C) 2016 by the deal.II authors
#
# This file is part of the deal.II library.
#
# The deal.II library is free software; you can use it, redistribute
# it, and/or modify it under the terms of the GNU Lesser General
# Public License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# The full text of the license can be found in the file LICENSE at
# the top level of the deal.II distribution.
#
# ---------------------------------------------------------------------
import unittest
import math
from PyDealII.Debug import *
class TestPointWrapper(unittest.TestCase):
def test_2d_point(self):
p1 = Point([0., 1.])
self.assertEqual(p1.x, 0.)
self.assertEqual(p1.y, 1.)
p1.x = 1.
p1.y = 2.
self.assertEqual(p1.x, 1.)
self.assertEqual(p1.y, 2.)
p2 = Point([0., 2.])
self.assertEqual(p1.distance(p2), 1.)
self.assertEqual(p2.norm(), 2.)
self.assertEqual(p2.norm_square(), 4.)
self.assertEqual(p1 != p2, True)
self.assertEqual(p1 == p2, False)
self.assertEqual(p1*p2, 4.)
p3 = p1 + p2
self.assertEqual(p3.x, p1.x + p2.x)
self.assertEqual(p3.y, p1.y + p2.y)
p3 = p1 - p2
self.assertEqual(p3.x, p1.x - p2.x)
self.assertEqual(p3.y, p1.y - p2.y)
p3 = -p2;
self.assertEqual(p3.x, -p2.x)
self.assertEqual(p3.y, -p2.y)
p3 = p2 / 2.;
self.assertEqual(p3.x, p2.x / 2.)
self.assertEqual(p3.y, p2.y / 2.)
p3 = p2 * 2.;
self.assertEqual(p3.x, p2.x * 2.)
self.assertEqual(p3.y, p2.y * 2.)
p2 += p1
self.assertEqual(p2.x, 1.)
self.assertEqual(p2.y, 4.)
p2 -= p1
self.assertEqual(p2.x, 0.)
self.assertEqual(p2.y, 2.)
p2 /= 2.;
self.assertEqual(p2.x, 0.)
self.assertEqual(p2.y, 1.)
p2 *= 2.;
self.assertEqual(p2.x, 0.)
self.assertEqual(p2.y, 2.)
def test_3d_point(self):
p1 = Point([0., 1., 2.])
self.assertEqual(p1.x, 0.)
self.assertEqual(p1.y, 1.)
self.assertEqual(p1.z, 2.)
p1.x = 1.
p1.y = 2.
p1.z = 3.
self.assertEqual(p1.x, 1.)
self.assertEqual(p1.y, 2.)
self.assertEqual(p1.z, 3.)
p2 = Point([0., 1., 2.])
self.assertAlmostEqual(p1.distance(p2), math.sqrt(3))
self.assertAlmostEqual(p2.norm(), math.sqrt(5))
self.assertEqual(p2.norm_square(), 5.)
self.assertEqual(p1 != p2, True)
self.assertEqual(p1 == p2, False)
self.assertEqual(p1*p2, 8)
dim = 3
p3 = p1 + p2
self.assertEqual(p3.x, p1.x + p2.x)
self.assertEqual(p3.y, p1.y + p2.y)
self.assertEqual(p3.z, p1.z + p2.z)
p3 = p1 - p2
self.assertEqual(p3.x, p1.x - p2.x)
self.assertEqual(p3.y, p1.y - p2.y)
self.assertEqual(p3.z, p1.z - p2.z)
p3 = -p2;
self.assertEqual(p3.x, -p2.x)
self.assertEqual(p3.y, -p2.y)
self.assertEqual(p3.z, -p2.z)
p3 = p2 / 2.;
self.assertEqual(p3.x, p2.x / 2.)
self.assertEqual(p3.y, p2.y / 2.)
self.assertEqual(p3.z, p2.z / 2.)
p3 = p2 * 2.;
self.assertEqual(p3.x, p2.x * 2.)
self.assertEqual(p3.y, p2.y * 2.)
self.assertEqual(p3.z, p2.z * 2.)
p2 += p1
self.assertEqual(p2.x, 1.)
self.assertEqual(p2.y, 3.)
self.assertEqual(p2.z, 5.)
p2 -= p1
self.assertEqual(p2.x, 0.)
self.assertEqual(p2.y, 1.)
self.assertEqual(p2.z, 2.)
p2 /= 2.
self.assertEqual(p2.x, 0.)
self.assertEqual(p2.y, 0.5)
self.assertEqual(p2.z, 1.)
p2 *= 2.
self.assertEqual(p2.x, 0.)
self.assertEqual(p2.y, 1.)
self.assertEqual(p2.z, 2.)
if __name__ == '__main__':
unittest.main()
| 0.003453 |
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for attention mechanism for images."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range # pylint: disable=redefined-builtin
from tensor2tensor.layers import common_attention
from tensor2tensor.layers import common_layers
from tensor2tensor.utils import expert_utils
import tensorflow as tf
class AttentionType(object):
"""Types of attention type used in cia."""
LOCAL_1D = "local_1d"
LOCAL_2D = "local_2d"
GLOBAL = "global"
GLOCAL = "global_local"
DILATED = "dilated"
MOE_LOCAL_1D = "moe_local1d"
LOCAL_BLOCK = "local_block"
NON_CAUSAL_1D = "local_1d_noncausal"
RELATIVE_LOCAL_1D = "rel_local_1d"
@staticmethod
def get_choices():
return [
AttentionType.GLOBAL,
AttentionType.GLOCAL,
AttentionType.MOE_LOCAL_1D,
AttentionType.LOCAL_1D,
AttentionType.LOCAL_2D,
AttentionType.LOCAL_BLOCK,
AttentionType.DILATED,
AttentionType.NON_CAUSAL_1D,
AttentionType.RELATIVE_LOCAL_1D,
]
class DistributionType(object):
"""Types of distributions used in cia."""
CAT = "cat"
DMOL = "dmol"
@staticmethod
def get_choices():
return [
DistributionType.CAT,
DistributionType.DMOL,
]
def maybe_reshape_4d_to_3d(x):
"""Reshape input from 4D to 3D if necessary."""
x_shape = common_layers.shape_list(x)
is_4d = False
if len(x_shape) == 4:
x = tf.reshape(x, [x_shape[0], x_shape[1]*x_shape[2], x_shape[3]])
is_4d = True
return x, x_shape, is_4d
def local_attention_2d(x, hparams, attention_type="local_attention_2d"):
"""Local 2d, self attention layer."""
# self-attention
with tf.variable_scope("local_2d_self_att"):
y = common_attention.multihead_attention_2d(
x,
None,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
attention_type=attention_type,
query_shape=hparams.query_shape,
memory_flange=hparams.memory_flange,
name="self_attention")
return y
def local_within_block_attention(x,
self_attention_bias,
hparams,
attention_type="local_within_block_mask_right",
q_padding="VALID",
kv_padding="VALID"):
"""Local within block self attention."""
x_new, x_shape, is_4d = maybe_reshape_4d_to_3d(x)
with tf.variable_scope("local_within_block"):
y = common_attention.multihead_attention(
common_layers.layer_preprocess(x_new, hparams),
None,
self_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=attention_type,
block_width=hparams.block_width,
block_length=hparams.block_length,
q_padding=q_padding,
kv_padding=kv_padding,
q_filter_width=hparams.q_filter_width,
kv_filter_width=hparams.kv_filter_width,
name="local_within_block")
if is_4d:
y = tf.reshape(y, x_shape)
return y
def local_attention_1d(x,
hparams,
attention_type="local_unmasked",
q_padding="VALID",
kv_padding="VALID"):
"""Local 1d self attention."""
# self-attention
x, x_shape, is_4d = maybe_reshape_4d_to_3d(x)
with tf.variable_scope("local_1d_self_att"):
y = common_attention.multihead_attention(
x,
None,
None,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=attention_type,
shared_rel=hparams.shared_rel,
block_width=hparams.block_width,
block_length=hparams.block_length,
q_padding=q_padding,
kv_padding=kv_padding,
q_filter_width=hparams.q_filter_width,
kv_filter_width=hparams.kv_filter_width,
make_image_summary=False,
name="self_attention")
if is_4d:
y = tf.reshape(y, x_shape)
return y
def dilated_attention_1d(x,
hparams,
attention_type="masked_dilated_1d",
q_padding="VALID",
kv_padding="VALID",
gap_size=2):
"""Dilated 1d self attention."""
# self-attention
x, x_shape, is_4d = maybe_reshape_4d_to_3d(x)
with tf.variable_scope("masked_dilated_1d"):
y = common_attention.multihead_attention(
x,
None,
None,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=attention_type,
block_width=hparams.block_width,
block_length=hparams.block_length,
q_padding=q_padding,
kv_padding=kv_padding,
q_filter_width=hparams.q_filter_width,
kv_filter_width=hparams.kv_filter_width,
gap_size=gap_size,
num_memory_blocks=hparams.num_memory_blocks,
name="self_attention")
if is_4d:
y = tf.reshape(y, x_shape)
y.set_shape([None, None, None, hparams.hidden_size])
return y
def local_global_attention(x,
self_attention_bias,
hparams,
q_padding="LEFT",
kv_padding="LEFT"):
"""Local and global 1d self attention."""
with tf.variable_scope("self_local_global_att"):
[x_global, x_local] = tf.split(x, 2, axis=-1)
split_hidden_size = int(hparams.hidden_size / 2)
split_heads = int(hparams.num_heads / 2)
if self_attention_bias is not None:
self_attention_bias = get_self_attention_bias(x)
y_global = common_attention.multihead_attention(
x_global,
None,
self_attention_bias,
hparams.attention_key_channels or split_hidden_size,
hparams.attention_value_channels or split_hidden_size,
split_hidden_size,
split_heads,
hparams.attention_dropout,
q_filter_width=hparams.q_filter_width,
kv_filter_width=hparams.kv_filter_width,
q_padding=q_padding,
kv_padding=kv_padding,
name="global_self_att")
y_local = common_attention.multihead_attention(
x_local,
None,
None,
hparams.attention_key_channels or split_hidden_size,
hparams.attention_value_channels or split_hidden_size,
split_hidden_size,
split_heads,
hparams.attention_dropout,
attention_type="local_masked",
block_length=hparams.block_length,
block_width=hparams.block_width,
q_filter_width=hparams.q_filter_width,
kv_filter_width=hparams.kv_filter_width,
q_padding=q_padding,
kv_padding=kv_padding,
name="local_self_att")
y = tf.concat([y_global, y_local], axis=-1)
return y
def full_self_attention(x,
self_attention_bias,
hparams,
q_padding="LEFT",
kv_padding="LEFT"):
"""Full self-attention layer."""
x, x_shape, is_4d = maybe_reshape_4d_to_3d(x)
if self_attention_bias is not None:
self_attention_bias = get_self_attention_bias(x)
with tf.variable_scope("self_att"):
y = common_attention.multihead_attention(
x,
None,
self_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
q_filter_width=hparams.q_filter_width,
kv_filter_width=hparams.kv_filter_width,
q_padding=q_padding,
kv_padding=kv_padding,
name="self_att")
if is_4d:
y = tf.reshape(y, [x_shape[0], x_shape[1], x_shape[2], x_shape[3]])
y.set_shape([None, None, None, hparams.hidden_size])
return y
def encdec_attention_1d(x,
encoder_output,
encoder_decoder_attention_bias,
hparams):
"""Local 1d self attention."""
x, x_shape, is_4d = maybe_reshape_4d_to_3d(x)
encoder_output, _, _ = maybe_reshape_4d_to_3d(encoder_output)
with tf.variable_scope("encdec_attention"):
# Encoder Decoder attention
y = common_attention.multihead_attention(
x,
encoder_output,
encoder_decoder_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
name="encdec_attention")
if is_4d:
y = tf.reshape(y, x_shape)
y.set_shape([None, None, None, hparams.hidden_size])
return y
def transformer_decoder_layers(inputs,
encoder_output,
num_layers,
hparams,
self_attention_bias=None,
encoder_decoder_attention_bias=None,
attention_type=AttentionType.LOCAL_2D,
losses=None,
name="transformer"):
"""Multi layer transformer."""
x = inputs
x = tf.nn.dropout(x, 1.0 - hparams.layer_prepostprocess_dropout)
if attention_type == AttentionType.DILATED:
assert len(hparams.gap_sizes) == num_layers
for layer in range(num_layers):
with tf.variable_scope("%s_layer_%d" % (name, layer)):
# self-attention + skip connections
if attention_type == AttentionType.LOCAL_2D:
y = local_attention_2d(common_layers.layer_preprocess(x, hparams),
hparams,
attention_type="masked_local_attention_2d")
elif attention_type == AttentionType.LOCAL_1D:
y = local_attention_1d(common_layers.layer_preprocess(x, hparams),
hparams,
attention_type="local_mask_right",
q_padding="LEFT", kv_padding="LEFT")
elif attention_type == AttentionType.RELATIVE_LOCAL_1D:
y = local_attention_1d(common_layers.layer_preprocess(x, hparams),
hparams,
attention_type="rel_local_mask_right",
q_padding="LEFT", kv_padding="LEFT")
elif attention_type == AttentionType.NON_CAUSAL_1D:
y = local_attention_1d(common_layers.layer_preprocess(x, hparams),
hparams,
attention_type="local_unmasked",
q_padding="VALID", kv_padding="VALID")
elif attention_type == AttentionType.LOCAL_BLOCK:
y = local_within_block_attention(
common_layers.layer_preprocess(x, hparams),
self_attention_bias, hparams,
attention_type="local_within_block_mask_right",
q_padding="LEFT", kv_padding="LEFT")
elif attention_type == AttentionType.GLOCAL:
y = local_global_attention(common_layers.layer_preprocess(x, hparams),
self_attention_bias, hparams,
q_padding="LEFT", kv_padding="LEFT")
elif attention_type == AttentionType.DILATED:
y = dilated_attention_1d(common_layers.layer_preprocess(x, hparams),
hparams, q_padding="LEFT",
kv_padding="LEFT",
gap_size=hparams.gap_sizes[layer])
elif attention_type == AttentionType.GLOBAL:
y = full_self_attention(common_layers.layer_preprocess(x, hparams),
self_attention_bias, hparams,
q_padding="LEFT", kv_padding="LEFT")
x = common_layers.layer_postprocess(x, y, hparams)
# enc-dec attention + skip connections
if encoder_output is not None:
y = encdec_attention_1d(common_layers.layer_preprocess(x, hparams),
encoder_output,
encoder_decoder_attention_bias,
hparams)
x = common_layers.layer_postprocess(x, y, hparams)
# feed-fwd layers + skip connections
y = ffn_layer(common_layers.layer_preprocess(x, hparams), hparams,
losses=losses)
x = common_layers.layer_postprocess(x, y, hparams)
return common_layers.layer_preprocess(x, hparams)
def transformer_encoder_layers(inputs,
num_layers,
hparams,
attention_type=AttentionType.GLOBAL,
self_attention_bias=None,
q_padding="VALID",
kv_padding="VALID",
name="transformer"):
"""Multi layer transformer encoder."""
x = inputs
x = tf.nn.dropout(x, 1.0 - hparams.layer_prepostprocess_dropout)
for layer in range(num_layers):
# attention layers + skip connections
with tf.variable_scope("%s_layer_%d" % (name, layer)):
if attention_type == AttentionType.LOCAL_2D:
y = local_attention_2d(common_layers.layer_preprocess(x, hparams),
hparams,
attention_type="local_attention_2d")
elif attention_type == AttentionType.LOCAL_1D:
y = local_attention_1d(common_layers.layer_preprocess(x, hparams),
hparams,
attention_type="local_unmasked",
q_padding=q_padding, kv_padding=kv_padding)
elif attention_type == AttentionType.GLOBAL:
y = full_self_attention(common_layers.layer_preprocess(x, hparams),
self_attention_bias, hparams,
q_padding=q_padding, kv_padding=kv_padding)
x = common_layers.layer_postprocess(x, y, hparams)
# feed-fwd layer + skip connections
y = ffn_layer(common_layers.layer_preprocess(x, hparams), hparams)
x = common_layers.layer_postprocess(x, y, hparams)
return common_layers.layer_preprocess(x, hparams)
def ffn_layer(x, hparams, losses=None):
"""ffn layer transformer."""
with tf.variable_scope("ffn"):
if hparams.ffn_layer == "none":
return x
if hparams.ffn_layer == "conv_hidden_relu":
y = common_layers.dense_relu_dense(
x,
hparams.filter_size,
hparams.hidden_size,
dropout=hparams.relu_dropout)
elif hparams.ffn_layer == "normed_conv_hidden_relu":
y = common_layers.normed_conv_hidden_relu(
x,
hparams.norm_type,
hparams.layer_norm_epsilon,
hparams.filter_size,
hparams.hidden_size,
dropout=hparams.relu_dropout,
norm_name="convnorm")
elif hparams.ffn_layer == "self_attention_ffn":
x_shape = tf.shape(x)
x = tf.reshape(x, [x_shape[0], -1, hparams.hidden_size])
y = common_attention.ffn_self_attention_layer(
x, hparams.filter_size, hparams.hidden_size, hparams.num_parts,
hparams.attention_dropout, hparams.share_kv)
y = tf.reshape(y, x_shape)
elif hparams.ffn_layer == "local_moe_tpu":
overhead = (hparams.moe_overhead_train
if hparams.mode == tf.estimator.ModeKeys.TRAIN
else hparams.moe_overhead_eval)
x, x_shape, is_4d = maybe_reshape_4d_to_3d(x)
y, loss = expert_utils.local_moe_tpu(
x, hparams.filter_size // 2,
hparams.hidden_size,
hparams.moe_num_experts, overhead=overhead,
loss_coef=hparams.moe_loss_coef)
if is_4d:
y = tf.reshape(y, x_shape)
if losses is None:
raise ValueError(
"transformer_ffn_layer with type local_moe_tpu must pass in "
"a losses list")
losses.append(loss)
else:
assert hparams.ffn_layer == "glu_ffn"
y = common_layers.gated_linear_unit_layer(x)
return y
def get_self_attention_bias(x):
"""Creates masked self attention bias.
Args:
x: A tensor of shape [batch, length, depth]
Returns:
self_attention_bias: A tensor of shape [length, length, 1]
"""
x_shape = common_layers.shape_list(x)
self_attention_bias = common_attention.attention_bias_lower_triangle(
x_shape[1])
return self_attention_bias
def postprocess_image(x, rows, cols, hparams):
"""Postprocessing after decoding.
Args:
x: Tensor of shape [batch, ...], where ... can be any rank such that the
number of elements in x is batch * rows * cols * hparams.hidden_size.
rows: Integer representing number of rows in a 2-D data point.
cols: Integer representing number of columns in a 2-D data point.
hparams: tf.contrib.training.HParams set.
Returns:
Tensor of shape [batch, rows, cols, depth], where depth is
hparams.num_mixtures * 10 if hparams.likelihood is DMOL, otherwise 256. In
the special case of inference and block raster scan order, it is a Tensor
of shape [batch, num_blocks_rows, num_block_cols, block_length, block_width,
depth].
"""
batch = common_layers.shape_list(x)[0]
x = tf.reshape(x, [batch, rows, cols, hparams.hidden_size])
likelihood = getattr(hparams, "likelihood", DistributionType.CAT)
if likelihood == DistributionType.DMOL:
depth = hparams.num_mixtures * 10
targets = tf.layers.dense(x,
depth,
use_bias=False,
activation=None,
name="output_conv")
else:
depth = 256
targets = tf.layers.dense(x,
depth,
use_bias=True,
activation=None,
name="output_conv")
if (hparams.mode == tf.contrib.learn.ModeKeys.INFER and
hparams.block_raster_scan):
y = targets
yshape = common_layers.shape_list(y)
block_length = hparams.query_shape[0]
block_width = hparams.query_shape[1]
# Break into block row wise.
y = tf.reshape(y,
[batch, yshape[1] // block_length, block_length,
yshape[2], depth])
yshape = common_layers.shape_list(y)
# Break into blocks width wise.
y_blocks = tf.reshape(y,
[batch, yshape[1], yshape[2],
yshape[3] // block_width, block_width, depth])
# Reshape targets as [batch, num_blocks_rows, num_block_cols, block_length,
# block_width, depth].
targets = tf.transpose(y_blocks, [0, 1, 3, 2, 4, 5])
return targets
def prepare_encoder(inputs, hparams, attention_type="local_1d"):
"""Prepare encoder for images."""
x = prepare_image(inputs, hparams, name="enc_channels")
# Add position signals.
x = add_pos_signals(x, hparams, "enc_pos")
x_shape = common_layers.shape_list(x)
if attention_type == "local_1d":
x = tf.reshape(x, [x_shape[0], x_shape[1]*x_shape[2], hparams.hidden_size])
x.set_shape([None, None, hparams.hidden_size])
elif attention_type == "local_2d":
x.set_shape([None, None, None, hparams.hidden_size])
return x
def prepare_decoder(targets, hparams):
"""Prepare decoder for images."""
targets_shape = common_layers.shape_list(targets)
channels = hparams.num_channels
curr_infer_length = None
# during training, images are [batch, IMG_LEN, IMG_LEN, 3].
# At inference, they are [batch, curr_infer_length, 1, 1]
if hparams.mode == tf.contrib.learn.ModeKeys.INFER:
curr_infer_length = targets_shape[1]
if hparams.block_raster_scan:
assert hparams.img_len*channels % hparams.query_shape[1] == 0
assert hparams.img_len % hparams.query_shape[0] == 0
total_block_width = hparams.img_len*channels
# Decoding is in block raster scan order. We divide the image into
# hparams.query_shape blocks and then decode each block in raster scan.
# To make that compatible with our inference pipeline, pad the target so
# that rows is a multiple of query_shape and columns is a multiple of
# hparams.img_len*channels
curr_infer_length = targets_shape[1]
block_padding_factor = total_block_width * hparams.query_shape[0]
targets = tf.pad(targets, [
[0, 0], [0, -curr_infer_length % block_padding_factor],
[0, 0], [0, 0]])
num_blocks = total_block_width // hparams.query_shape[1]
# Reshape the image to represent blocks
target_blocks = tf.reshape(
targets, [targets_shape[0], -1, num_blocks, hparams.query_shape[0],
hparams.query_shape[1]])
# Transpose to read the image in 2D fashion.
targets = tf.transpose(target_blocks, [0, 1, 3, 2, 4])
else:
# add padding to make sure the size of targets is a multiple of img_height
# times number of channels. This is needed for positional encodings and
# for doing the RGB lookup.
padding_factor = channels * hparams.img_len
targets = tf.pad(targets, [
[0, 0], [0, -curr_infer_length % padding_factor], [0, 0], [0, 0]])
targets = tf.reshape(targets,
[targets_shape[0], -1, hparams.img_len, channels])
# Preprocess image
x = prepare_image(targets, hparams, name="dec_channels")
x_shape = common_layers.shape_list(x)
if (hparams.dec_attention_type == AttentionType.LOCAL_2D or
hparams.dec_attention_type == AttentionType.LOCAL_BLOCK):
x = common_attention.right_shift_blockwise(x, hparams.query_shape)
x = add_pos_signals(x, hparams, "dec_pos")
else:
# Add position signals
x = tf.reshape(x, [targets_shape[0],
x_shape[1]*x_shape[2], hparams.hidden_size])
x = common_layers.shift_right_3d(x)
x = tf.reshape(x, [targets_shape[0],
x_shape[1], x_shape[2], hparams.hidden_size])
x = add_pos_signals(x, hparams, "dec_pos")
x = common_layers.cast_like(x, targets)
return x, x_shape[1], x_shape[2]
def prepare_image(inputs, hparams, name=None):
"""Prepare image."""
inputs_shape = common_layers.shape_list(inputs)
batch = inputs_shape[0]
orig_rows = inputs_shape[1]
orig_cols = inputs_shape[2]
channels = hparams.num_channels
hidden_size = hparams.hidden_size
# TODO(trandustin): Check via modalities.IdentityModality and not its name.
# The current implementation is to avoid circular imports, modalities ->
# discretization -> common_image_attention -> modalities.
if "targets" in hparams.modality:
target_modality_name = hparams.modality["targets"].__name__
else:
target_modality_name = None
if target_modality_name == "IdentityModality":
inputs = tf.to_int32(inputs)
x = get_channel_embeddings(channels, inputs, hidden_size, name=name)
else:
x = inputs
x = tf.reshape(x, [batch, orig_rows, orig_cols * channels, hidden_size])
return x
def create_output(decoder_output, rows, cols, targets, hparams):
"""Creates output from decoder output and vars.
Args:
decoder_output: Tensor of shape [batch, ...], where ... can be any rank such
that the number of elements is batch * rows * cols * hparams.hidden_size.
rows: Integer representing number of rows in a 2-D data point.
cols: Integer representing number of columns in a 2-D data point.
targets: Tensor of shape [batch, hparams.img_len, hparams.img_len,
hparams.num_channels].
hparams: tf.contrib.training.HParams set.
Returns:
Tensor of shape [batch, hparams.img_len, hparams.img_len,
hparams.num_mixtures * 10] if hparams.likelihood is DMOL, otherwise
[batch, hparams.img_len, hparams.img_len, hparams.num_channels, 256].
In the special case of predict mode, it is a Tensor of rank 5.
"""
decoded_image = postprocess_image(decoder_output, rows, cols, hparams)
depth = common_layers.shape_list(decoded_image)[-1]
batch, height, width, channels = common_layers.shape_list(targets)
likelihood = getattr(hparams, "likelihood", DistributionType.CAT)
if hparams.mode == tf.estimator.ModeKeys.PREDICT:
y = tf.reshape(decoded_image, [batch, -1, 1, 1, depth])
output = y[:, :height, :, :, :]
elif likelihood == DistributionType.CAT:
# Unpack the cols dimension of the Categorical.
output = tf.reshape(decoded_image,
[batch, height, width, channels, depth])
else:
output = decoded_image
return output
def get_channel_embeddings(io_depth, targets, hidden_size, name="channel"):
"""Get separate embedding for each of the channels."""
targets_split = tf.split(targets, io_depth, axis=3)
rgb_embedding_var = tf.get_variable("rgb_target_emb_%s" % name,
[256 * io_depth, hidden_size])
rgb_embedding_var = tf.identity(rgb_embedding_var)
rgb_embedding_var *= float(hidden_size)**0.5
channel_target_embs = []
for i in range(io_depth):
# Adding the channel offsets to get the right embedding since the
# embedding tensor has shape 256 * io_depth, hidden_size
target_ids = tf.squeeze(targets_split[i], axis=3) + i * 256
target_embs = common_layers.gather(rgb_embedding_var, target_ids)
channel_target_embs.append(target_embs)
return tf.concat(channel_target_embs, axis=-1)
def add_pos_signals(x, hparams, name="pos_emb"):
with tf.variable_scope(name, reuse=False):
if hparams.pos == "timing":
x = common_attention.add_timing_signal_nd(x)
else:
assert hparams.pos == "emb"
x = common_attention.add_positional_embedding_nd(
x, hparams.max_length, name)
return x
| 0.007596 |
class ReverseProxied(object):
'''Wrap the application in this middleware and configure the
front-end server to add these headers, to let you quietly bind
this to a URL other than / and to an HTTP scheme that is
different than what is used locally.
In nginx:
location /myprefix {
proxy_pass http://192.168.0.1:5001;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Script-Name /myprefix;
}
:param app: the WSGI application
'''
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
script_name = environ.get('HTTP_X_SCRIPT_NAME', '')
if script_name:
environ['SCRIPT_NAME'] = script_name
path_info = environ['PATH_INFO']
if path_info.startswith(script_name):
environ['PATH_INFO'] = path_info[len(script_name):]
scheme = environ.get('HTTP_X_SCHEME', '')
if scheme:
environ['wsgi.url_scheme'] = scheme
return self.app(environ, start_response)
| 0.002562 |
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron import context
from neutron.extensions import portbindings
from neutron import manager
from neutron.plugins.ml2 import config as config
from neutron.tests.unit import test_db_plugin as test_plugin
PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin'
class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
_plugin_name = PLUGIN_NAME
def setUp(self):
# Enable the test mechanism driver to ensure that
# we can successfully call through to all mechanism
# driver apis.
config.cfg.CONF.set_override('mechanism_drivers',
['logger', 'test'],
'ml2')
self.addCleanup(config.cfg.CONF.reset)
super(PortBindingTestCase, self).setUp(PLUGIN_NAME)
self.port_create_status = 'DOWN'
self.plugin = manager.NeutronManager.get_plugin()
self.plugin.start_rpc_listener()
def _check_response(self, port, vif_type, has_port_filter, bound):
self.assertEqual(port[portbindings.VIF_TYPE], vif_type)
vif_details = port[portbindings.VIF_DETAILS]
if bound:
# TODO(rkukura): Replace with new VIF security details
self.assertEqual(vif_details[portbindings.CAP_PORT_FILTER],
has_port_filter)
def _test_port_binding(self, host, vif_type, has_port_filter, bound):
host_arg = {portbindings.HOST_ID: host}
with self.port(name='name', arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
self._check_response(port['port'], vif_type, has_port_filter,
bound)
port_id = port['port']['id']
details = self.plugin.callbacks.get_device_details(
None, agent_id="theAgentId", device=port_id)
if bound:
self.assertEqual(details['network_type'], 'local')
else:
self.assertNotIn('network_type', details)
def test_unbound(self):
self._test_port_binding("",
portbindings.VIF_TYPE_UNBOUND,
False, False)
def test_binding_failed(self):
self._test_port_binding("host-fail",
portbindings.VIF_TYPE_BINDING_FAILED,
False, False)
def test_binding_no_filter(self):
self._test_port_binding("host-ovs-no_filter",
portbindings.VIF_TYPE_OVS,
False, True)
def test_binding_filter(self):
self._test_port_binding("host-bridge-filter",
portbindings.VIF_TYPE_BRIDGE,
True, True)
def _test_update_port_binding(self, host, new_host=None):
with mock.patch.object(self.plugin,
'_notify_port_updated') as notify_mock:
host_arg = {portbindings.HOST_ID: host}
update_body = {'name': 'test_update'}
if new_host is not None:
update_body[portbindings.HOST_ID] = new_host
with self.port(name='name', arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
neutron_context = context.get_admin_context()
updated_port = self._update('ports', port['port']['id'],
{'port': update_body},
neutron_context=neutron_context)
port_data = updated_port['port']
if new_host is not None:
self.assertEqual(port_data[portbindings.HOST_ID],
new_host)
else:
self.assertEqual(port_data[portbindings.HOST_ID], host)
if new_host is not None and new_host != host:
notify_mock.assert_called_once_with(mock.ANY)
else:
self.assertFalse(notify_mock.called)
def test_update_with_new_host_binding_notifies_agent(self):
self._test_update_port_binding('host-ovs-no_filter',
'host-bridge-filter')
def test_update_with_same_host_binding_does_not_notify(self):
self._test_update_port_binding('host-ovs-no_filter',
'host-ovs-no_filter')
def test_update_without_binding_does_not_notify(self):
self._test_update_port_binding('host-ovs-no_filter')
def testt_update_from_empty_to_host_binding_notifies_agent(self):
self._test_update_port_binding('', 'host-ovs-no_filter')
def test_update_from_host_to_empty_binding_notifies_agent(self):
self._test_update_port_binding('host-ovs-no_filter', '')
| 0 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core import urlresolvers
from django.forms import fields, widgets
class DynamicSelectWidget(widgets.Select):
"""
A subclass of the ``Select`` widget which renders extra attributes for use
in callbacks to handle dynamic changes to the available choices.
"""
_data_add_url_attr = "data-add-item-url"
def render(self, *args, **kwargs):
add_item_url = self.get_add_item_url()
if add_item_url is not None:
self.attrs.update({self._data_add_url_attr: add_item_url})
return super(DynamicSelectWidget, self).render(*args, **kwargs)
def get_add_item_url(self):
if callable(self.add_item_link):
return self.add_item_link()
try:
if self.add_item_link_args:
return urlresolvers.reverse(self.add_item_link,
args=[self.add_item_link_args])
else:
return urlresolvers.reverse(self.add_item_link)
except urlresolvers.NoReverseMatch:
return self.add_item_link
class DynamicChoiceField(fields.ChoiceField):
"""
A subclass of ``ChoiceField`` with additional properties that make
dynamically updating its elements easier.
Notably, the field declaration takes an extra argument, ``add_item_link``
which may be a string or callable defining the URL that should be used
for the "add" link associated with the field.
"""
widget = DynamicSelectWidget
def __init__(self,
add_item_link=None,
add_item_link_args=None,
*args,
**kwargs):
super(DynamicChoiceField, self).__init__(*args, **kwargs)
self.widget.add_item_link = add_item_link
self.widget.add_item_link_args = add_item_link_args
class DynamicTypedChoiceField(DynamicChoiceField, fields.TypedChoiceField):
""" Simple mix of ``DynamicChoiceField`` and ``TypedChoiceField``. """
pass
| 0 |
# stdlib
from hashlib import md5
import time
# 3rd party
import requests
# project
from checks import AgentCheck
class Mesos(AgentCheck):
SERVICE_CHECK_NAME = "mesos.can_connect"
def check(self, instance):
if 'url' not in instance:
raise Exception('Mesos instance missing "url" value.')
# Load values from the instance config
url = instance['url']
instance_tags = instance.get('tags', [])
default_timeout = self.init_config.get('default_timeout', 5)
timeout = float(instance.get('timeout', default_timeout))
response = self.get_master_roles(url, timeout)
if response is not None:
for role in response['roles']:
tags = ['role:' + role['name']] + instance_tags
self.gauge('mesos.role.frameworks', len(role['frameworks']), tags=tags)
self.gauge('mesos.role.weight', role['weight'], tags=tags)
resources = role['resources']
for attr in ['cpus','mem']:
if attr in resources:
self.gauge('mesos.role.' + attr, resources[attr], tags=tags)
response = self.get_master_stats(url, timeout)
if response is not None:
tags = instance_tags
for key in iter(response):
self.gauge('mesos.stats.' + key, response[key], tags=tags)
response = self.get_master_state(url, timeout)
if response is not None:
tags = instance_tags
for attr in ['deactivated_slaves','failed_tasks','finished_tasks','killed_tasks','lost_tasks','staged_tasks','started_tasks']:
self.gauge('mesos.state.' + attr, response[attr], tags=tags)
for framework in response['frameworks']:
tags = ['framework:' + framework['id']] + instance_tags
resources = framework['resources']
for attr in ['cpus','mem']:
if attr in resources:
self.gauge('mesos.state.framework.' + attr, resources[attr], tags=tags)
for slave in response['slaves']:
tags = ['mesos','slave:' + slave['id']] + instance_tags
resources = slave['resources']
for attr in ['cpus','mem','disk']:
if attr in resources:
self.gauge('mesos.state.slave.' + attr, resources[attr], tags=tags)
def get_master_roles(self, url, timeout):
return self.get_json(url + "/master/roles.json", timeout)
def get_master_stats(self, url, timeout):
return self.get_json(url + "/master/stats.json", timeout)
def get_master_state(self, url, timeout):
return self.get_json(url + "/master/state.json", timeout)
def get_json(self, url, timeout):
# Use a hash of the URL as an aggregation key
aggregation_key = md5(url).hexdigest()
tags = ["url:%s" % url]
msg = None
status = None
try:
r = requests.get(url, timeout=timeout)
if r.status_code != 200:
self.status_code_event(url, r, aggregation_key)
status = AgentCheck.CRITICAL
msg = "Got %s when hitting %s" % (r.status_code, url)
else:
status = AgentCheck.OK
msg = "Mesos master instance detected at %s " % url
except requests.exceptions.Timeout as e:
# If there's a timeout
self.timeout_event(url, timeout, aggregation_key)
msg = "%s seconds timeout when hitting %s" % (timeout, url)
status = AgentCheck.CRITICAL
except Exception as e:
msg = str(e)
status = AgentCheck.CRITICAL
finally:
self.service_check(self.SERVICE_CHECK_NAME, status, tags=tags, message=msg)
if status is AgentCheck.CRITICAL:
self.warning(msg)
return None
return r.json()
def timeout_event(self, url, timeout, aggregation_key):
self.event({
'timestamp': int(time.time()),
'event_type': 'http_check',
'msg_title': 'URL timeout',
'msg_text': '%s timed out after %s seconds.' % (url, timeout),
'aggregation_key': aggregation_key
})
def status_code_event(self, url, r, aggregation_key):
self.event({
'timestamp': int(time.time()),
'event_type': 'http_check',
'msg_title': 'Invalid reponse code for %s' % url,
'msg_text': '%s returned a status of %s' % (url, r.status_code),
'aggregation_key': aggregation_key
})
| 0.003625 |
import mxnet as mx
from common import legacy_conv_act_layer
from common import multibox_layer
def get_symbol_train(num_classes=20, nms_thresh=0.5, force_suppress=False, nms_topk=400):
"""
Single-shot multi-box detection with VGG 16 layers ConvNet
This is a modified version, with fc6/fc7 layers replaced by conv layers
And the network is slightly smaller than original VGG 16 network
This is a training network with losses
Parameters:
----------
num_classes: int
number of object classes not including background
nms_thresh : float
non-maximum suppression threshold
force_suppress : boolean
whether suppress different class objects
nms_topk : int
apply NMS to top K detections
Returns:
----------
mx.Symbol
"""
data = mx.symbol.Variable(name="data")
label = mx.symbol.Variable(name="label")
# group 1
conv1_1 = mx.symbol.Convolution(
data=data, kernel=(3, 3), pad=(1, 1), num_filter=64, name="conv1_1")
relu1_1 = mx.symbol.Activation(data=conv1_1, act_type="relu", name="relu1_1")
conv1_2 = mx.symbol.Convolution(
data=relu1_1, kernel=(3, 3), pad=(1, 1), num_filter=64, name="conv1_2")
relu1_2 = mx.symbol.Activation(data=conv1_2, act_type="relu", name="relu1_2")
pool1 = mx.symbol.Pooling(
data=relu1_2, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool1")
# group 2
conv2_1 = mx.symbol.Convolution(
data=pool1, kernel=(3, 3), pad=(1, 1), num_filter=128, name="conv2_1")
relu2_1 = mx.symbol.Activation(data=conv2_1, act_type="relu", name="relu2_1")
conv2_2 = mx.symbol.Convolution(
data=relu2_1, kernel=(3, 3), pad=(1, 1), num_filter=128, name="conv2_2")
relu2_2 = mx.symbol.Activation(data=conv2_2, act_type="relu", name="relu2_2")
pool2 = mx.symbol.Pooling(
data=relu2_2, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool2")
# group 3
conv3_1 = mx.symbol.Convolution(
data=pool2, kernel=(3, 3), pad=(1, 1), num_filter=256, name="conv3_1")
relu3_1 = mx.symbol.Activation(data=conv3_1, act_type="relu", name="relu3_1")
conv3_2 = mx.symbol.Convolution(
data=relu3_1, kernel=(3, 3), pad=(1, 1), num_filter=256, name="conv3_2")
relu3_2 = mx.symbol.Activation(data=conv3_2, act_type="relu", name="relu3_2")
conv3_3 = mx.symbol.Convolution(
data=relu3_2, kernel=(3, 3), pad=(1, 1), num_filter=256, name="conv3_3")
relu3_3 = mx.symbol.Activation(data=conv3_3, act_type="relu", name="relu3_3")
pool3 = mx.symbol.Pooling(
data=relu3_3, pool_type="max", kernel=(2, 2), stride=(2, 2), \
pooling_convention="full", name="pool3")
# group 4
conv4_1 = mx.symbol.Convolution(
data=pool3, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv4_1")
relu4_1 = mx.symbol.Activation(data=conv4_1, act_type="relu", name="relu4_1")
conv4_2 = mx.symbol.Convolution(
data=relu4_1, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv4_2")
relu4_2 = mx.symbol.Activation(data=conv4_2, act_type="relu", name="relu4_2")
conv4_3 = mx.symbol.Convolution(
data=relu4_2, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv4_3")
relu4_3 = mx.symbol.Activation(data=conv4_3, act_type="relu", name="relu4_3")
pool4 = mx.symbol.Pooling(
data=relu4_3, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool4")
# group 5
conv5_1 = mx.symbol.Convolution(
data=pool4, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv5_1")
relu5_1 = mx.symbol.Activation(data=conv5_1, act_type="relu", name="relu5_1")
conv5_2 = mx.symbol.Convolution(
data=relu5_1, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv5_2")
relu5_2 = mx.symbol.Activation(data=conv5_2, act_type="relu", name="relu5_2")
conv5_3 = mx.symbol.Convolution(
data=relu5_2, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv5_3")
relu5_3 = mx.symbol.Activation(data=conv5_3, act_type="relu", name="relu5_3")
pool5 = mx.symbol.Pooling(
data=relu5_3, pool_type="max", kernel=(3, 3), stride=(1, 1),
pad=(1,1), name="pool5")
# group 6
conv6 = mx.symbol.Convolution(
data=pool5, kernel=(3, 3), pad=(6, 6), dilate=(6, 6),
num_filter=1024, name="conv6")
relu6 = mx.symbol.Activation(data=conv6, act_type="relu", name="relu6")
# drop6 = mx.symbol.Dropout(data=relu6, p=0.5, name="drop6")
# group 7
conv7 = mx.symbol.Convolution(
data=relu6, kernel=(1, 1), pad=(0, 0), num_filter=1024, name="conv7")
relu7 = mx.symbol.Activation(data=conv7, act_type="relu", name="relu7")
# drop7 = mx.symbol.Dropout(data=relu7, p=0.5, name="drop7")
### ssd extra layers ###
conv8_1, relu8_1 = legacy_conv_act_layer(relu7, "8_1", 256, kernel=(1,1), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False)
conv8_2, relu8_2 = legacy_conv_act_layer(relu8_1, "8_2", 512, kernel=(3,3), pad=(1,1), \
stride=(2,2), act_type="relu", use_batchnorm=False)
conv9_1, relu9_1 = legacy_conv_act_layer(relu8_2, "9_1", 128, kernel=(1,1), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False)
conv9_2, relu9_2 = legacy_conv_act_layer(relu9_1, "9_2", 256, kernel=(3,3), pad=(1,1), \
stride=(2,2), act_type="relu", use_batchnorm=False)
conv10_1, relu10_1 = legacy_conv_act_layer(relu9_2, "10_1", 128, kernel=(1,1), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False)
conv10_2, relu10_2 = legacy_conv_act_layer(relu10_1, "10_2", 256, kernel=(3,3), pad=(1,1), \
stride=(2,2), act_type="relu", use_batchnorm=False)
conv11_1, relu11_1 = legacy_conv_act_layer(relu10_2, "11_1", 128, kernel=(1,1), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False)
conv11_2, relu11_2 = legacy_conv_act_layer(relu11_1, "11_2", 256, kernel=(3,3), pad=(1,1), \
stride=(2,2), act_type="relu", use_batchnorm=False)
conv12_1, relu12_1 = legacy_conv_act_layer(relu11_2, "12_1", 128, kernel=(1,1), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False)
conv12_2, relu12_2 = legacy_conv_act_layer(relu12_1, "12_2", 256, kernel=(4,4), pad=(1,1), \
stride=(1,1), act_type="relu", use_batchnorm=False)
# specific parameters for VGG16 network
from_layers = [relu4_3, relu7, relu8_2, relu9_2, relu10_2, relu11_2, relu12_2]
sizes = [[.07, .1025], [.15,.2121], [.3, .3674], [.45, .5196], [.6, .6708], \
[.75, .8216], [.9, .9721]]
ratios = [[1,2,.5], [1,2,.5,3,1./3], [1,2,.5,3,1./3], [1,2,.5,3,1./3], \
[1,2,.5,3,1./3], [1,2,.5], [1,2,.5]]
normalizations = [20, -1, -1, -1, -1, -1, -1]
steps = [ x / 512.0 for x in [8, 16, 32, 64, 128, 256, 512]]
num_channels = [512]
loc_preds, cls_preds, anchor_boxes = multibox_layer(from_layers, \
num_classes, sizes=sizes, ratios=ratios, normalization=normalizations, \
num_channels=num_channels, clip=False, interm_layer=0, steps=steps)
tmp = mx.contrib.symbol.MultiBoxTarget(
*[anchor_boxes, label, cls_preds], overlap_threshold=.5, \
ignore_label=-1, negative_mining_ratio=3, minimum_negative_samples=0, \
negative_mining_thresh=.5, variances=(0.1, 0.1, 0.2, 0.2),
name="multibox_target")
loc_target = tmp[0]
loc_target_mask = tmp[1]
cls_target = tmp[2]
cls_prob = mx.symbol.SoftmaxOutput(data=cls_preds, label=cls_target, \
ignore_label=-1, use_ignore=True, grad_scale=1., multi_output=True, \
normalization='valid', name="cls_prob")
loc_loss_ = mx.symbol.smooth_l1(name="loc_loss_", \
data=loc_target_mask * (loc_preds - loc_target), scalar=1.0)
loc_loss = mx.symbol.MakeLoss(loc_loss_, grad_scale=1., \
normalization='valid', name="loc_loss")
# monitoring training status
cls_label = mx.symbol.MakeLoss(data=cls_target, grad_scale=0, name="cls_label")
det = mx.contrib.symbol.MultiBoxDetection(*[cls_prob, loc_preds, anchor_boxes], \
name="detection", nms_threshold=nms_thresh, force_suppress=force_suppress,
variances=(0.1, 0.1, 0.2, 0.2), nms_topk=nms_topk)
det = mx.symbol.MakeLoss(data=det, grad_scale=0, name="det_out")
# group output
out = mx.symbol.Group([cls_prob, loc_loss, cls_label, det])
return out
def get_symbol(num_classes=20, nms_thresh=0.5, force_suppress=False, nms_topk=400):
"""
Single-shot multi-box detection with VGG 16 layers ConvNet
This is a modified version, with fc6/fc7 layers replaced by conv layers
And the network is slightly smaller than original VGG 16 network
This is the detection network
Parameters:
----------
num_classes: int
number of object classes not including background
nms_thresh : float
threshold of overlap for non-maximum suppression
force_suppress : boolean
whether suppress different class objects
nms_topk : int
apply NMS to top K detections
Returns:
----------
mx.Symbol
"""
net = get_symbol_train(num_classes)
cls_preds = net.get_internals()["multibox_cls_pred_output"]
loc_preds = net.get_internals()["multibox_loc_pred_output"]
anchor_boxes = net.get_internals()["multibox_anchors_output"]
cls_prob = mx.symbol.SoftmaxActivation(data=cls_preds, mode='channel', \
name='cls_prob')
out = mx.contrib.symbol.MultiBoxDetection(*[cls_prob, loc_preds, anchor_boxes], \
name="detection", nms_threshold=nms_thresh, force_suppress=force_suppress,
variances=(0.1, 0.1, 0.2, 0.2), nms_topk=nms_topk)
return out
| 0.015313 |
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A keyring based Storage.
A Storage for Credentials that uses the keyring module.
"""
import threading
import keyring
from oauth2client import client
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
class Storage(client.Storage):
"""Store and retrieve a single credential to and from the keyring.
To use this module you must have the keyring module installed. See
<http://pypi.python.org/pypi/keyring/>. This is an optional module and is
not installed with oauth2client by default because it does not work on all
the platforms that oauth2client supports, such as Google App Engine.
The keyring module <http://pypi.python.org/pypi/keyring/> is a
cross-platform library for access the keyring capabilities of the local
system. The user will be prompted for their keyring password when this
module is used, and the manner in which the user is prompted will vary per
platform.
Usage::
from oauth2client import keyring_storage
s = keyring_storage.Storage('name_of_application', 'user1')
credentials = s.get()
"""
def __init__(self, service_name, user_name):
"""Constructor.
Args:
service_name: string, The name of the service under which the
credentials are stored.
user_name: string, The name of the user to store credentials for.
"""
super(Storage, self).__init__(lock=threading.Lock())
self._service_name = service_name
self._user_name = user_name
def locked_get(self):
"""Retrieve Credential from file.
Returns:
oauth2client.client.Credentials
"""
credentials = None
content = keyring.get_password(self._service_name, self._user_name)
if content is not None:
try:
credentials = client.Credentials.new_from_json(content)
credentials.set_store(self)
except ValueError:
pass
return credentials
def locked_put(self, credentials):
"""Write Credentials to file.
Args:
credentials: Credentials, the credentials to store.
"""
keyring.set_password(self._service_name, self._user_name,
credentials.to_json())
def locked_delete(self):
"""Delete Credentials file.
Args:
credentials: Credentials, the credentials to store.
"""
keyring.set_password(self._service_name, self._user_name, '')
| 0 |
from time import clock
from itertools import tee
from inspect import getsourcelines
from os import getcwd
import readline as rl
class ArgLess(object):
def __init__(self, func, args=[], kwargs={}, doneStr='', verbose=False):
self.f = func
self.d = doneStr
self.a = args
self.k = kwargs
self.v = verbose
def __call__(self, *args, **kwargs):
kwargs.update(self.k)
if args:
return self.f(*args, **kwargs)
return self.f(*self.a, **kwargs)
def __repr__(self):
res = self()
if self.v:
if not self.d:
return str(res)
print(res, end='')
return self.d
def history(start=0, end=None, concat=False):
if end is None:
end = rl.get_current_history_length()
if start < 0:
start += rl.get_current_history_length()
if concat:
print(';'.join(rl.get_history_item(i+1) for i in range(start, end)))
return
for i in range(start, end):
print(str(i+1)+":", rl.get_history_item(i+1))
hist = ArgLess(history)
pwd = ArgLess(getcwd, verbose=True)
def whist(fname):
rl.write_history_file(fname)
def rhist(fname):
code = []
with open(fname) as fd:
for line in fd:
if not line.strip():
continue
code.append('try:')
code.append(' '+line.strip())
code.append(' rl.add_history("'+line.strip().replace('"', '\\"')+'")')
code.append('except: pass')
while True:
try:
exec("\n".join(code).encode(), globals())
break
except SyntaxError as e:
print(e, code[e.lineno-1])
for _ in range(4):
del code[e.lineno-2]
return code
def printlist(lst, width=None, delim="\n"):
try:
if width is not None:
form = ('{:<'+str(width)+'}')*len(lst[0])
print(delim.join(form.format(*y) for y in lst))
else:
print(delim.join((" ".join((str(x) for x in y)) for y in lst)))
except TypeError:
print(delim.join((str(y) for y in lst)))
def printfunc(func):
print(''.join(getsourcelines(func)[0]))
def lookin(module, string, nodir=False):
lst = dir(module)
if nodir:
lst = module
for x in lst:
if string.lower() in x.lower():
print(x)
def frange(stop, start=None, step=1, decimals=None):
if start is not None:
start, stop = stop, start
else:
start = 0
if decimals is None:
decimals = 0
for x in (start, step):
if x is None:
continue
strx = str(x).split('.')
cdec = 0 if len(strx) == 1 else len(strx[1])
decimals = max(decimals, cdec)
while start < stop:
yield round(start, decimals)
start += step
def raToSex(coord):
h = coord*24/360.0
m = (h-int(h))*60
s = (m-int(m))*60
return int(h), int(m), s
def decToSex(coord):
sign = coord/abs(coord)
deg = abs(coord)
m = (deg-int(deg))*60
s = (m-int(m))*60
deg = sign*int(deg)
if not deg:
m = sign*int(m)
if not m:
s = sign*s
return deg, int(m), s
def timer(com, iters=1):
got_e = None
t = 0
for _ in range(iters):
begin = clock()
try:
com()
end = clock()
except Exception as e:
end = clock()
got_e = e
t += (end-begin)
if got_e is not None:
print("Got exception", got_e)
return t/iters
def timelist(l):
def check():
for _ in l:
pass
return check
class RomanConversion(object):
numerals = (('M', 1000), ('D', 500), ('C', 100), ('L', 50), ('X', 10), ('V', 5), ('I', 1))
@staticmethod
def toRoman(num):
numerals = RomanConversion.numerals
div = 5
result = ''
for index, (numeral, value) in enumerate(numerals):
div = 5 if div == 2 else 2
amount = num//value
if div == 2 and amount == 4 and numeral != 'M':
# If amount > 4 we have a problem
result += numeral + numerals[index-1][0]
elif (div == 5 and numeral != 'I' and num//numerals[index+1][1] == 9
and numeral != 'M'):
result += numerals[index+1][0] + numerals[index-1][0]
value = numerals[index+1][1]
else:
result += numeral * amount # 3 tops, if not M
num %= value
return result
@staticmethod
def toInt(numeral):
numeral = numeral.upper()
numerals = dict(RomanConversion.numerals)
res = 0
skip = False
for i, roman in enumerate(numeral):
if skip:
skip = False
continue
if i < len(numeral)-1 and numerals[roman] < numerals[numeral[i+1]]:
res += numerals[numeral[i+1]] - numerals[roman]
skip = True
else:
res += numerals[roman]
return res
def closest(lst, value):
if value <= lst[0]:
raise ValueError("Value to low")
if value >= lst[-1]:
raise ValueError("Value to high")
start = 0
end = len(lst)-1
while end - start > 1:
mid = (end+start)/2
if value == lst[mid]:
return mid
if value >= lst[mid]:
start = mid
else:
end = mid
return (start, end, (lst[end]-value)/(lst[end]-lst[start]),
(value-lst[start])/(lst[end]-lst[start]))
def nwise(iterable, n=2, overlap=True):
if overlap:
iterators = tee(iterable, n)
for i in range(len(iterators)):
for j in range(i):
next(iterators[i], None)
return zip(*iterators)
return zip(*[iter(iterable)]*n)
def lmap(a, b):
return list(map(a, b))
| 0.000839 |
##########################################################################
#
# Copyright (c) 2012, Image Engine Design Inc. All rights reserved.
# Copyright (c) 2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import imath
import IECore
import Gaffer
import GafferUI
import GafferCortexUI
class ClassParameterValueWidget( GafferCortexUI.CompoundParameterValueWidget ) :
def __init__( self, parameterHandler, collapsible=None, **kw ) :
GafferCortexUI.CompoundParameterValueWidget.__init__(
self,
parameterHandler,
collapsible,
_PlugValueWidget,
**kw
)
class _PlugValueWidget( GafferCortexUI.CompoundParameterValueWidget._PlugValueWidget ) :
def __init__( self, parameterHandler, collapsed ) :
GafferCortexUI.CompoundParameterValueWidget._PlugValueWidget.__init__( self, parameterHandler, collapsed )
def _headerWidget( self ) :
result = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing = 0 )
# label
label = GafferUI.Label(
"Class" if self._collapsible() is not None else self._parameterLabelText( self.parameterHandler() ),
horizontalAlignment = GafferUI.Label.HorizontalAlignment.Right
)
## \todo Decide how we allow this sort of tweak using the public
# interface. Perhaps we should have a SizeableContainer or something?
label._qtWidget().setFixedWidth( GafferUI.PlugWidget.labelWidth() )
label.setToolTip( self._parameterToolTip( self._parameterHandler() ) )
result.append( label )
# space
result.append( GafferUI.Spacer( imath.V2i( 8, 1 ) ) )
# class button
className, classVersion = self._parameter().getClass( True )[1:3]
classButton = GafferUI.MenuButton( className if className else "Choose...", hasFrame=False )
classButton.setMenu( self.__classMenu() )
result.append( classButton )
# version button
if className :
versionButton = GafferUI.MenuButton( " v%d" % classVersion if className else "", hasFrame=False )
versionButton.setMenu( self.__versionMenu() )
result.append( versionButton )
# a spacer to stop the buttons expanding
result.append( GafferUI.Spacer( imath.V2i( 1, 1 ), imath.V2i( 9999999, 1 ) ), expand=True )
return result
def __classMenu( self ) :
md = IECore.MenuDefinition()
classInfo = self._parameter().getClass( True )
classNameFilter = "*"
with IECore.IgnoredExceptions( KeyError ) :
classNameFilter = self._parameter().userData()["UI"]["classNameFilter"].value
menuPathStart = max( 0, classNameFilter.find( "*" ) )
if classInfo[1] :
md.append(
"/Remove", { "command" : IECore.curry( Gaffer.WeakMethod( self.__setClass ), "", 0 ) }
)
md.append( "/RemoveDivider", { "divider" : True } )
loader = IECore.ClassLoader.defaultLoader( classInfo[3] )
for className in loader.classNames( classNameFilter ) :
classVersions = loader.versions( className )
for classVersion in classVersions :
menuPath = "/" + className[menuPathStart:]
if len( classVersions ) > 1 :
menuPath += "/v%d" % classVersion
md.append(
menuPath,
{
"command" : IECore.curry( Gaffer.WeakMethod( self.__setClass ), className, classVersion ),
"active" : className != classInfo[1] or classVersion != classInfo[2]
},
)
return GafferUI.Menu( md )
def __versionMenu( self ) :
md = IECore.MenuDefinition()
classInfo = self._parameter().getClass( True )
if classInfo[1] :
loader = IECore.ClassLoader.defaultLoader( classInfo[3] )
for version in loader.versions( classInfo[1] ) :
md.append(
"/v%d" % version,
{
"command" : IECore.curry( Gaffer.WeakMethod( self.__setClass ), classInfo[1], version ),
"active" : version != classInfo[2],
},
)
return GafferUI.Menu( md )
def __setClass( self, className, classVersion ) :
with self.getPlug().node().parameterModificationContext() :
self._parameter().setClass( className, classVersion )
GafferCortexUI.ParameterValueWidget.registerType( IECore.ClassParameter, ClassParameterValueWidget )
| 0.040834 |
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test proper accounting with a double-spend conflict
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class TxnMallTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
return super(TxnMallTest, self).setup_network(True)
def run_test(self):
# All nodes should start with 12,500 ADN:
starting_balance = 12500
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar accounts:
node0_address_foo = self.nodes[0].getnewaddress("foo")
fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 12190)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
node0_address_bar = self.nodes[0].getnewaddress("bar")
fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 290)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
assert_equal(self.nodes[0].getbalance(""),
starting_balance - 12190 - 290 + fund_foo_tx["fee"] + fund_bar_tx["fee"])
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("from0")
# First: use raw transaction API to send 12400 ADN to node1_address,
# but don't broadcast:
doublespend_fee = Decimal('-.02')
rawtx_input_0 = {}
rawtx_input_0["txid"] = fund_foo_txid
rawtx_input_0["vout"] = find_output(self.nodes[0], fund_foo_txid, 12190)
rawtx_input_1 = {}
rawtx_input_1["txid"] = fund_bar_txid
rawtx_input_1["vout"] = find_output(self.nodes[0], fund_bar_txid, 290)
inputs = [rawtx_input_0, rawtx_input_1]
change_address = self.nodes[0].getnewaddress()
outputs = {}
outputs[node1_address] = 12400
outputs[change_address] = 12480 - 12400 + doublespend_fee
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
doublespend = self.nodes[0].signrawtransaction(rawtx)
assert_equal(doublespend["complete"], True)
# Create two spends using 1 500 ADN coin each
txid1 = self.nodes[0].sendfrom("foo", node1_address, 400, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 200, 0)
# Have node0 mine a block:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 500ADN for another
# matured block, minus 400, minus 200, and minus transaction fees:
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]
if self.options.mine_block: expected += 500
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo", 0), 12190+tx1["amount"]+tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar", 0), 290+tx2["amount"]+tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"]+tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Now give doublespend and its parents to miner:
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
doublespend_txid = self.nodes[2].sendrawtransaction(doublespend["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
assert_equal(self.nodes[0].gettransaction(doublespend_txid)["confirmations"], 2)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Both transactions should be conflicted
assert_equal(tx1["confirmations"], -2)
assert_equal(tx2["confirmations"], -2)
# Node0's total balance should be starting balance, plus 1000ADN for
# two more matured blocks, minus 12400 for the double-spend, plus fees (which are
# negative):
expected = starting_balance + 1000 - 12400 + fund_foo_tx["fee"] + fund_bar_tx["fee"] + doublespend_fee
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*"), expected)
# Final "" balance is starting_balance - amount moved to accounts - doublespend + subsidies +
# fees (which are negative)
assert_equal(self.nodes[0].getbalance("foo"), 12190)
assert_equal(self.nodes[0].getbalance("bar"), 290)
assert_equal(self.nodes[0].getbalance(""), starting_balance
-12190
- 290
-12400
+ 1000
+ fund_foo_tx["fee"]
+ fund_bar_tx["fee"]
+ doublespend_fee)
# Node1's "from0" account balance should be just the doublespend:
assert_equal(self.nodes[1].getbalance("from0"), 12400)
if __name__ == '__main__':
TxnMallTest().main()
| 0.004419 |
import calendar
import json
from datetime import date, timedelta, datetime
from dateutil.relativedelta import relativedelta
from operator import itemgetter
import os
import random
import time
import sys
def convert_list_to_dict(recs, col_name):
new_dict = {}
for r in recs:
new_dict[r[col_name]] = r
return new_dict
def get_last_price(price_recs, today):
current_day = today
symbol = price_recs.values()[0]['symbol']
for x in range(0, -60, -1):
try:
return price_recs[current_day]['price']
current_day = add_days_to_date(today, x)
except KeyError:
current_day = add_days_to_date(today, x)
continue
print('[stock_common] No closer price for {0} {1}'.format(symbol, today))
def add_months(sourcedate,months):
month = sourcedate.month - 1 + months
year = sourcedate.year + month / 12
month = month % 12 + 1
day = min(sourcedate.day,calendar.monthrange(year,month)[1])
return datetime.date(year,month,day)
def convert_datetime_to_num(dates):
dates = [dd['date'] for dd in dates_datetime]
dates = stock_common.convert_str_to_datetime(dates)
return [date2num(dd) for dd in dates]
def convert_str_to_datetime(dates_strings, slash_format=True):
dates = []
for date_string in dates_strings:
if not slash_format:
dates.append(datetime.strptime(date_string, "%d-%m-%Y"))
else:
dates.append(datetime.strptime(date_string, "%m/%d/%Y %H"))
return dates
def find_market_open_day_backward(today):
return find_market_open_day(today, False)
def find_market_open_day_forward(today):
return find_market_open_day(today, True)
def find_market_open_day(today, is_forward):
if is_forward:
inc = 1
num_days = 10
else:
inc = -1
num_days = -10
current_day = today
for x in range(0, num_days, inc):
if is_forward:
print('-==== {0}'.format(current_day))
if is_market_open(current_day):
return current_day
else:
current_day = add_days_to_date(today, x)
print('[stock_common] Market closed for a little too long {0}'.format(today))
sys.exit(0)
def is_market_open(today):
return os.path.isfile('/Users/osarood/work/cool_predictions/cool_predictions/price_by_date/'+today+'.json')
def get_year(date_str):
return date_str.split('-')[0]
def get_epoch_time(date_str, slash_format=True, is_hourly=False):
if not slash_format:
if is_hourly:
pattern = '%Y-%m-%d %H'
else:
pattern = '%Y-%m-%d'
else:
if is_hourly:
pattern = '%m/%d/%Y %H'
else:
pattern = '%m/%d/%Y'
#pattern = '%m/%d/%Y'
return int(time.mktime(time.strptime(date_str, pattern)))
def get_random_stocks(stock_list, NUM_STOCKS):
if NUM_STOCKS > len(stock_list):
print('We don\'t have enough history!')
sys.exit(0)
# return [s['symbol'] for s in stock_list[NUM_STOCKS:]]
random_stocks = []
while True:
random_stock = random.choice(stock_list)
if random_stock['symbol'] not in random_stocks:
random_stocks.append({'symbol': random_stock['symbol']})
if len(random_stocks) == NUM_STOCKS:
return random_stocks
def calculate_price_gains(price_recs):
base_price, start_date = starting_price(price_recs)
for price_rec in price_recs.values():
price_rec['price_gain'] = (float(price_rec['price']) - base_price)/base_price * 100.0
price_rec['base_price'] = base_price
return price_recs, base_price, start_date
def starting_price(price_recs):
prices_recs_sorted_date = sort_list([x for x in price_recs.values()], 'date', False)
return prices_recs_sorted_date[0]['price'], prices_recs_sorted_date[0]['date']
def ending_price(price_recs):
Y = [x for x in price_recs.values()]
return sort_list(Y, 'date', False)[len(price_recs)-1]['price']
def current_market_cap(price_recs):
Y = [x for x in price_recs.values()]
#print('==dsada= {0}'.format(sort_list(Y, 'date', False)[-1]))
return sort_list(Y, 'date', False)[-1]['market_cap']
def get_date_iterator(start, end):
start_date_list = start.split('-')
end_date_list = end.split('-')
return dategenerator(date(int(start_date_list[0]), int(start_date_list[1]), int(start_date_list[2])),
date(int(end_date_list[0]), int(end_date_list[1]), int(end_date_list[2])))
def get_historical_data(symbol, start, end, historical_data_mgr):
all_historical = historical_data_mgr.get_stock_historical_data(symbol)
#print('{0}'.format(all_historical))
date_range = {}
start_date_list = start.split('-')
end_date_list = end.split('-')
for dt in get_date_iterator(start, end):
date_str = dt.strftime("%Y-%m-%d")
day_data = all_historical.get(date_str, 'missing_data')
#print('DT: {0} {1}'.format(dt.strftime("%Y-%m-%d"), day_data))
if day_data != 'missing_data':
date_range[date_str] = day_data
if len(date_range) == 0:
return None, None
#print('====> {0} {1}'.format(symbol, date_range))
return all_historical, date_range
#TODO: This method should go to HistoricalDataManager
def get_stats_for_period(filtered_stock_list, start, end, historical_data_mgr):
days_active = {}
active_stock_list = []
for idx, stock_info in enumerate(filtered_stock_list):
stock = {'symbol': stock_info['symbol']}
all_historical, price_recs = get_historical_data(stock_info['symbol'], start, end, historical_data_mgr)
# no recs found or 1 price record found
if price_recs == None or len(price_recs) <= 1:
days_active[stock['symbol']] = 0
else:
days_active[stock['symbol']] = len(price_recs)
price_list = [price_rec['price'] for price_rec in price_recs.values()]
start_price, start_date = starting_price(price_recs)
end_price = ending_price(price_recs)
if stock['symbol'] not in ['^IXIC']:
try:
stock['market_cap'] = current_market_cap(price_recs)
except KeyError:
stock['market_cap'] = 0.0
#print('[{4}] ===== {0} {1} {2} {3}'.format(stock_symbol, start_price, end_price, start_date, idx))
stock['gain'] = (end_price - start_price) / start_price * 100.0
stock['price_list'] = price_recs
stock['name'] = stock_info['name']
stock['all_history'] = all_historical
stock['price_list'], stock['cost_price'], \
stock['start_date'] = calculate_price_gains(stock['price_list'])
active_stock_list.append(stock)
if not active_stock_list:
print('ERROR: ******** No stock active ****** ({0}, {1})'.format(start, end))
sys.exit(0)
else:
print('[stock_common] Got {0} stocks. Number of active stocks {1}'.format(
len(filtered_stock_list),
len(active_stock_list)
))
return remove_stocks_with_missing_prices_data(days_active, active_stock_list)
def remove_stocks_with_missing_prices_data(days_active, active_stock_list):
# filter 'new' stocks which have less price records
MAX_DAYS_ACTIVE = median([x for x in days_active.values()])
final_stock_list = []
for idx, stock in enumerate(active_stock_list):
if days_active[stock['symbol']] >= MAX_DAYS_ACTIVE:
final_stock_list.append(stock)
else:
if False: #disable this print
print("Removing NEW stock {0} recs: {1} {2}".format(stock['symbol'],
days_active[stock['symbol']],
MAX_DAYS_ACTIVE
))
print('Num stock after checking for missing data ---> {0}'.format(len(final_stock_list)))
if not final_stock_list:
print('ALERT!!! Returning empty history for {0} period:({1}, {2})'.format(filtered_stock_list, start, end))
return final_stock_list
def add_days_to_date(BASE_DATE_STR, days_to_add):
b_date = BASE_DATE_STR.split('-')
BASE_END = str(date(int(b_date[0]), int(b_date[1]), int(b_date[2])) + \
relativedelta(days=days_to_add)
)
return str(BASE_END)
def add_months_to_date(BASE_DATE_STR, months_to_add):
b_date = BASE_DATE_STR.split('-')
BASE_END = str(date(int(b_date[0]), int(b_date[1]), int(b_date[2])) + \
relativedelta(months=months_to_add)
)
return str(BASE_END)
def dategenerator(start, end):
current = start
while current <= end:
yield current
current += timedelta(days=1)
def adjust_stock_split(price_recs, split_date, split_factor):
for date, price_rec in price_recs.iteritems():
if price_rec['date'] < split_date:
price_rec['price'] = price_rec['price'] / split_factor
return price_recs
def detect_split(price_recs):
Y = [x for x in price_recs.values()]
prices_sorted_by_date = [x for x in sort_list(Y, 'date', False)]
idx = 1
while True:
split_mult = prices_sorted_by_date[idx-1]['price'] / prices_sorted_by_date[idx]['price']
# use 1.9 since stock can jump a bit after split. For example GOOGL
if split_mult >= 1.9:
# divide all prices older than price_rec[idx]['date'] by split_mult
return True, prices_sorted_by_date[idx]['date'], split_mult
if idx == len(prices_sorted_by_date)-1:
return False, None, None
else:
idx += 1
def median(mylist):
sorts = sorted(mylist)
length = len(sorts)
if length == 0:
return None
if length == 1:
return sorts[0]
if not length % 2:
return (sorts[length / 2] + sorts[length / 2 - 1]) / 2.0
return sorts[length / 2]
def basic_stock_info_sorted(file_name, sort_field, current_symbols):
stock_list = read_stock_list(file_name)
stock_list = [stock for stock in stock_list if stock['symbol'] not in current_symbols]
print('Read {0} companies info'.format(len(stock_list)))
filtered_stock_list = stock_list
return sort_list(filtered_stock_list, 'market_cap', True)
def sort_list(stock_list, key_to_sort, s_order=False):
return sorted(stock_list, key=itemgetter(key_to_sort), reverse=s_order)
| 0.007422 |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mapping from schema to backend properties."""
__author__ = 'Abhinav Khandelwal (abhinavk@google.com)'
import collections
import copy
import json
class Property(object):
"""Property."""
def __init__(
self, name, label, property_type, select_data=None, description=None,
optional=False, extra_schema_dict_values=None):
self._name = name
self._label = label
self._property_type = property_type
self._select_data = select_data
self._description = description
self._optional = optional
self._extra_schema_dict_values = extra_schema_dict_values or {}
def __str__(self):
return '%s#%s' % (self._name, self._property_type)
@property
def type(self):
return self._property_type
@property
def name(self):
return self._name
@property
def description(self):
return self._description
@property
def extra_schema_dict_values(self):
return self._extra_schema_dict_values
@property
def label(self):
return self._label
def set_select_data(self, select_data):
self._select_data = select_data
def get_display_dict(self):
return {
'name': self._name,
'label': self._label,
'description': self._description,
}
class Registry(object):
"""Registry is a collection of Property's."""
def __init__(self, title, description=None, extra_schema_dict_values=None):
self._title = title
self._registry = {'id': title, 'type': 'object'}
self._description = description
if description:
self._registry['description'] = description
self._extra_schema_dict_values = extra_schema_dict_values
self._properties = []
self._sub_registries = collections.OrderedDict()
@property
def title(self):
return self._title
@property
def sub_registries(self):
return self._sub_registries
def add_property(self, schema_field):
"""Add a Property to this Registry."""
self._properties.append(schema_field)
def get_property(self, property_name):
for prop in self._properties:
if prop.name == property_name:
return prop
return None
def get_sub_registry(self, sub_registry_name):
return self._sub_registries.get(sub_registry_name)
def remove_property(self, property_name):
prop = self.get_property(property_name)
if prop:
return self._properties.pop(self._properties.index(prop))
def add_sub_registry(
self, name, title=None, description=None, registry=None):
"""Add a sub registry to for this Registry."""
if not registry:
registry = Registry(title, description)
self._sub_registries[name] = registry
return registry
def has_subregistries(self):
return True if self._sub_registries else False
def get_display_dict(self):
return {
'title': self._title,
'properties': [p.get_display_dict() for p in self._properties],
'registries': [r.get_display_dict()
for r in self._sub_registries.itervalues()],
}
def clone_only_items_named(self, paths):
"""Clone only the selected items from a registry.
Args:
paths: Each item is a path into the schema, with slashes as
separators. E.g., "foo" would match things at the top level
named "foo". Similarly, 'foo/bar/baz' looks in sub-schema
"foo" for a sub-schema "bar", and within that, "baz." The
returned schema would include not just the leaf item, but
sub-registry 'foo' containing 'bar', containing 'baz'.
NOTE - Schema hierarchy components are stored separately from
properties, and so "foo" may well match _both_ a subschema
_and_ a property, if someone were unwise enough to build
a schema with overloaded names.
Also note that colons in names are not special to this function,
though they may well have special meaning to, e.g., the
course schema mapping to course.yaml dict hierarchy. Picking
out a single such field would use a name such as
"registration/course:send_welcome_notifications".
Returns:
A schema with only the named items present.
"""
# Arbitrary depth instantiate-on-reference dict constructor
treebuilder = lambda: collections.defaultdict(treebuilder)
# Build a tree of nodes from the given paths.
root = treebuilder()
for path in paths:
parts = path.split('/')
node = root
for part in parts:
node = node[part]
registry = copy.deepcopy(self)
def delete_all_but(registry, node):
# pylint: disable-msg=protected-access
# Copy so deleting does not wreck iterator.
for prop in copy.copy(registry._properties):
if prop.name not in node:
registry._properties.remove(prop)
for name, value in registry._sub_registries.iteritems():
# If this subregistry is not named at all, remove it.
if name not in node:
del registry._sub_registries[name]
# If the paths-to-save gives sub-entries within this
# node, then proceed into the node to prune its members.
# Otherwise, do nothing, leaving the node and all its
# children in place.
elif node[name]:
delete_all_but(value, node[name])
delete_all_but(registry, root)
return registry
class SchemaField(Property):
"""SchemaField defines a simple field."""
def __init__(
self, name, label, property_type, select_data=None, description=None,
optional=False, hidden=False, editable=True, i18n=None,
extra_schema_dict_values=None, validator=None):
Property.__init__(
self, name, label, property_type, select_data=select_data,
description=description, optional=optional,
extra_schema_dict_values=extra_schema_dict_values)
self._hidden = hidden
self._editable = editable
self._validator = validator
self._i18n = i18n
@property
def hidden(self):
return self._hidden
@property
def editable(self):
return self._editable
@property
def i18n(self):
return self._i18n
def get_json_schema_dict(self):
"""Get the JSON schema for this field."""
prop = {}
prop['type'] = self._property_type
if self._optional:
prop['optional'] = self._optional
if self._description:
prop['description'] = self._description
if self._i18n:
prop['i18n'] = self._i18n
return prop
def _get_schema_dict(self, prefix_key):
"""Get Schema annotation dictionary for this field."""
if self._extra_schema_dict_values:
schema = self._extra_schema_dict_values
else:
schema = {}
schema['label'] = self._label
if self._hidden:
schema['_type'] = 'hidden'
elif not self._editable:
schema['_type'] = 'uneditable'
elif self._select_data and '_type' not in schema:
schema['_type'] = 'select'
if 'date' is self._property_type:
if 'dateFormat' not in schema:
schema['dateFormat'] = 'Y/m/d'
if 'valueFormat' not in schema:
schema['valueFormat'] = 'Y/m/d'
elif self._select_data:
choices = []
for value, label in self._select_data:
choices.append(
{'value': value, 'label': unicode(label)})
schema['choices'] = choices
if self._description:
schema['description'] = self._description
return [(prefix_key + ['_inputex'], schema)]
def validate(self, value, errors):
if self._validator:
self._validator(value, errors)
class FieldArray(SchemaField):
"""FieldArray is an array with object or simple items."""
def __init__(
self, name, label, description=None, item_type=None,
extra_schema_dict_values=None):
super(FieldArray, self).__init__(
name, label, 'array', description=description,
extra_schema_dict_values=extra_schema_dict_values)
self._item_type = item_type
@property
def item_type(self):
return self._item_type
def get_json_schema_dict(self):
json_schema = super(FieldArray, self).get_json_schema_dict()
json_schema['items'] = self._item_type.get_json_schema_dict()
return json_schema
def _get_schema_dict(self, prefix_key):
dict_list = super(FieldArray, self)._get_schema_dict(prefix_key)
# pylint: disable-msg=protected-access
dict_list += self._item_type._get_schema_dict(prefix_key + ['items'])
# pylint: enable-msg=protected-access
return dict_list
class FieldRegistry(Registry):
"""FieldRegistry is an object with SchemaField properties."""
def add_sub_registry(
self, name, title=None, description=None, registry=None):
"""Add a sub registry to for this Registry."""
if not registry:
registry = FieldRegistry(title, description=description)
self._sub_registries[name] = registry
return registry
def get_json_schema_dict(self):
schema_dict = dict(self._registry)
schema_dict['properties'] = collections.OrderedDict()
for schema_field in self._properties:
schema_dict['properties'][schema_field.name] = (
schema_field.get_json_schema_dict())
for key in self._sub_registries.keys():
schema_dict['properties'][key] = (
self._sub_registries[key].get_json_schema_dict())
return schema_dict
def get_json_schema(self):
"""Get the json schema for this API."""
return json.dumps(self.get_json_schema_dict())
def _get_schema_dict(self, prefix_key):
"""Get schema dict for this API."""
title_key = list(prefix_key)
title_key.append('title')
schema_dict = [(title_key, self._title)]
if self._extra_schema_dict_values:
key = list(prefix_key)
key.append('_inputex')
schema_dict.append([key, self._extra_schema_dict_values])
base_key = list(prefix_key)
base_key.append('properties')
# pylint: disable-msg=protected-access
for schema_field in self._properties:
key = base_key + [schema_field.name]
schema_dict += schema_field._get_schema_dict(key)
# pylint: enable-msg=protected-access
for key in self._sub_registries.keys():
sub_registry_key_prefix = list(base_key)
sub_registry_key_prefix.append(key)
sub_registry = self._sub_registries[key]
# pylint: disable-msg=protected-access
for entry in sub_registry._get_schema_dict(sub_registry_key_prefix):
schema_dict.append(entry)
# pylint: enable-msg=protected-access
return schema_dict
def get_schema_dict(self):
"""Get schema dict for this API."""
return self._get_schema_dict(list())
@classmethod
def _add_entry(cls, key_part_list, value, entity):
if len(key_part_list) == 1:
entity[key_part_list[0]] = value
return
key = key_part_list.pop()
if not entity.has_key(key):
entity[key] = {}
else:
assert type(entity[key]) == type(dict())
cls._add_entry(key_part_list, value, entity[key])
@classmethod
def convert_json_to_entity(cls, json_entry, entity):
assert type(json_entry) == type(dict())
for key in json_entry.keys():
if type(json_entry[key]) == type(dict()):
cls.convert_json_to_entity(json_entry[key], entity)
else:
key_parts = key.split(':')
key_parts.reverse()
cls._add_entry(key_parts, json_entry[key], entity)
@classmethod
def _get_field_name_parts(cls, field_name):
field_name_parts = field_name.split(':')
field_name_parts.reverse()
return field_name_parts
@classmethod
def _get_field_value(cls, key_part_list, entity):
if len(key_part_list) == 1:
if type(entity) == dict and entity.has_key(key_part_list[0]):
return entity[key_part_list[0]]
return None
key = key_part_list.pop()
if entity.has_key(key):
return cls._get_field_value(key_part_list, entity[key])
return None
def convert_entity_to_json_entity(self, entity, json_entry):
for schema_field in self._properties:
field_name = schema_field.name
field_name_parts = self._get_field_name_parts(field_name)
value = self._get_field_value(field_name_parts, entity)
if type(value) != type(None):
json_entry[field_name] = value
for key in self._sub_registries.keys():
json_entry[key] = {}
self._sub_registries[key].convert_entity_to_json_entity(
entity, json_entry[key])
def validate(self, payload, errors):
for schema_field in self._properties:
field_name_parts = self._get_field_name_parts(schema_field.name)
value = self._get_field_value(field_name_parts, payload)
schema_field.validate(value, errors)
for registry in self._sub_registries.values():
registry.validate(payload, errors)
@classmethod
def is_complex_name(cls, name):
return ':' in name
@classmethod
def compute_name(cls, parent_names):
"""Computes non-indexed and indexed entity name given parent names."""
parts = []
for parent_name in parent_names:
if parent_name[0] == '[' and parent_name[-1] == ']':
parts.append('[]')
else:
parts.append(parent_name)
return ':'.join(parts), ':'.join(parent_names)
class SchemaFieldValue(object):
"""This class represents an instance of a field value."""
def __init__(self, name, field, value, setter):
"""An object that name, value and type of a field.
Args:
name: a name of the value
field: SchemaField object that holds the type
value: Python object that holds the value
setter: a function which sets the value in the underlying data
structure
"""
self._name = name
self._field = field
self._value = value
self._setter = setter
@property
def name(self):
return self._name
@property
def field(self):
return self._field
@property
def value(self):
return self._value
@value.setter
def value(self, new_value):
self._value = new_value
self._setter(new_value)
class FieldRegistryIndex(object):
"""Helper class that allows fast access to values and their fields."""
def __init__(self, registry):
self._registry = registry
self._names_in_order = []
self._complex_name_to_field = {}
self._computed_name_to_field = {}
@property
def registry(self):
return self._registry
@property
def names_in_order(self):
return self._names_in_order
def _inspect_registry(self, parent_names, registry):
"""Inspects registry and adds its items to the index."""
for field in registry._properties: # pylint: disable=protected-access
name = field.name
if isinstance(field, FieldArray):
self._inspect_registry(
parent_names + [name, '[]'], field.item_type)
if registry.is_complex_name(field.name):
complex_name = field.name
if complex_name in self._complex_name_to_field:
raise KeyError('Field already defined: %s.' % complex_name)
self._complex_name_to_field[complex_name] = field
self._names_in_order.append(complex_name)
else:
computed_name = ':'.join(parent_names + [field.name])
if computed_name in self._computed_name_to_field:
raise KeyError('Field already defined: %s.' % computed_name)
self._computed_name_to_field[computed_name] = field
self._names_in_order.append(computed_name)
# pylint: disable=protected-access
for name, registry in registry._sub_registries.items():
self._inspect_registry(parent_names + [name], registry)
def rebuild(self):
"""Build an index."""
self._inspect_registry([], self._registry)
def find(self, name):
"""Finds and returns a field given field name."""
field = self._complex_name_to_field.get(name)
return field if field else self._computed_name_to_field.get(name)
class FieldFilter(object):
"""Filter for collections of schema fields."""
def __init__(
self, type_names=None, hidden_values=None, i18n_values=None,
editable_values=None):
self._type_names = type_names
self._hidden_values = hidden_values
self._i18n_values = i18n_values
self._editable_values = editable_values
def _filter(self, named_field_list):
"""Filters a list of name, SchemaField pairs."""
result = set()
for name, field in named_field_list:
if self._type_names and field.type not in self._type_names:
continue
if self._hidden_values and field.hidden not in self._hidden_values:
continue
if self._editable_values and (
field.editable not in self._editable_values):
continue
if self._i18n_values and field.i18n not in self._i18n_values:
continue
result.add(name)
return result
def filter_value_to_type_binding(self, binding):
"""Returns a set of value names that pass the criterion."""
named_field_list = [
(field_value.name, field_value.field)
for field_value in binding.value_list]
return self._filter(named_field_list)
def filter_field_registry_index(self, index):
"""Returns the field names in the schema that pass the criterion."""
named_field_list = [
(name, index.find(name)) for name in index.names_in_order]
return self._filter(named_field_list)
class ValueToTypeBinding(object):
"""This class provides mapping of entity attributes to their types."""
def __init__(self):
self.value_list = [] # a list of all encountered SchemaFieldValues
self.name_to_value = {} # field name to SchemaFieldValue mapping
self.name_to_field = {} # field name to SchemaField mapping
self.unmapped_names = set() # a set of field names where mapping failed
self.index = None # the indexed set of schema names
def find_value(self, name):
return self.name_to_value[name]
def find_field(self, name):
return self.name_to_field[name]
@classmethod
def _get_setter(cls, entity, key):
def setter(value):
entity[key] = value
return setter
@classmethod
def _visit_dict(cls, index, parent_names, entity, binding):
"""Visit dict entity."""
for _name, _value in entity.items():
cls._decompose_entity(
index, parent_names + [_name], _value, binding,
cls._get_setter(entity, _name))
@classmethod
def _visit_list(cls, index, parent_names, entity, binding, setter):
"""Visit list entity."""
name_no_index, name = index.registry.compute_name(parent_names)
_field = index.find(name_no_index)
if _field:
assert isinstance(_field, FieldArray)
assert name not in binding.name_to_field
binding.name_to_field[name] = _field
assert name not in binding.name_to_value, name
binding.name_to_value[name] = SchemaFieldValue(
name, _field, entity, setter)
for _index, _item in enumerate(entity):
_item_name = '[%s]' % _index
cls._decompose_entity(
index, parent_names + [_item_name], _item, binding,
cls._get_setter(entity, _index))
else:
assert name not in binding.unmapped_names
binding.unmapped_names.add(name)
@classmethod
def _visit_attribute(cls, index, parent_names, entity, binding, setter):
"""Visit simple attribute."""
name_no_index, name = index.registry.compute_name(parent_names)
_field = index.find(name_no_index)
if _field:
_value = SchemaFieldValue(name, _field, entity, setter)
binding.value_list.append(_value)
assert name not in binding.name_to_value, name
binding.name_to_value[name] = _value
assert name not in binding.name_to_field
binding.name_to_field[name] = _field
else:
assert name not in binding.unmapped_names, name
binding.unmapped_names.add(name)
@classmethod
def _decompose_entity(
cls, index, parent_names, entity, binding, setter):
"""Recursively decomposes entity."""
if isinstance(entity, dict):
cls._visit_dict(index, parent_names, entity, binding)
elif isinstance(entity, list):
cls._visit_list(index, parent_names, entity, binding, setter)
else:
cls._visit_attribute(index, parent_names, entity, binding, setter)
@classmethod
def bind_entity_to_schema(cls, json_dumpable_entity, registry):
"""Connects schema field type information to the entity attributes.
Args:
json_dumpable_entity: a Python dict recursively containing other
dict, list and primitive objects
registry: a FieldRegistry that holds entity type information
Returns:
an instance of ValueToTypeBinding object that maps entity attributes
to their types
"""
binding = ValueToTypeBinding()
index = FieldRegistryIndex(registry)
index.rebuild()
cls._decompose_entity(
index, [], json_dumpable_entity, binding, None)
binding.index = index
return binding
| 0.000803 |
import setuptools
with open('VERSION.txt', 'r') as f:
version = f.read().strip()
setuptools.setup(
name="odoo8-addons-oca-account-invoicing",
description="Meta package for oca-account-invoicing Odoo addons",
version=version,
install_requires=[
'odoo8-addon-account_group_invoice_lines',
'odoo8-addon-account_invoice_force_number',
'odoo8-addon-account_invoice_kanban',
'odoo8-addon-account_invoice_line_description',
'odoo8-addon-account_invoice_line_price_subtotal_gross',
'odoo8-addon-account_invoice_line_sort',
'odoo8-addon-account_invoice_merge',
'odoo8-addon-account_invoice_merge_payment',
'odoo8-addon-account_invoice_merge_purchase',
'odoo8-addon-account_invoice_partner',
'odoo8-addon-account_invoice_period_usability',
'odoo8-addon-account_invoice_pricelist',
'odoo8-addon-account_invoice_pricelist_sale',
'odoo8-addon-account_invoice_pricelist_sale_stock',
'odoo8-addon-account_invoice_pricelist_stock_account',
'odoo8-addon-account_invoice_rounding',
'odoo8-addon-account_invoice_rounding_by_currency',
'odoo8-addon-account_invoice_shipping_address',
'odoo8-addon-account_invoice_supplier_number_info',
'odoo8-addon-account_invoice_supplier_ref_unique',
'odoo8-addon-account_invoice_supplierinfo_update',
'odoo8-addon-account_invoice_supplierinfo_update_discount',
'odoo8-addon-account_invoice_supplierinfo_update_on_validate',
'odoo8-addon-account_invoice_supplierinfo_update_variant',
'odoo8-addon-account_invoice_transmit_method',
'odoo8-addon-account_invoice_triple_discount',
'odoo8-addon-account_invoice_uom',
'odoo8-addon-account_invoice_validation_workflow',
'odoo8-addon-account_invoice_zero_autopay',
'odoo8-addon-account_outstanding_payment',
'odoo8-addon-account_payment_term_extension',
'odoo8-addon-invoice_fiscal_position_update',
'odoo8-addon-invoice_margin',
'odoo8-addon-sale_order_line_price_subtotal_gross',
'odoo8-addon-stock_picking_invoice_product_group',
'odoo8-addon-stock_picking_invoicing_incoterm',
'odoo8-addon-stock_picking_invoicing_incoterm_sale',
'odoo8-addon-stock_picking_invoicing_unified',
],
classifiers=[
'Programming Language :: Python',
'Framework :: Odoo',
]
)
| 0 |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.api.openstack.compute.plugins.v3 import deferred_delete
from nova.compute import api as compute_api
from nova import context
from nova import exception
from nova import test
class FakeRequest(object):
def __init__(self, context):
self.environ = {'nova.context': context}
class DeferredDeleteExtensionTest(test.NoDBTestCase):
def setUp(self):
super(DeferredDeleteExtensionTest, self).setUp()
self.extension = deferred_delete.DeferredDeleteController()
self.fake_input_dict = {}
self.fake_uuid = 'fake_uuid'
self.fake_context = context.RequestContext('fake', 'fake')
self.fake_req = FakeRequest(self.fake_context)
def test_force_delete(self):
self.mox.StubOutWithMock(compute_api.API, 'get')
self.mox.StubOutWithMock(compute_api.API, 'force_delete')
fake_instance = 'fake_instance'
compute_api.API.get(self.fake_context, self.fake_uuid,
expected_attrs=None, want_objects=True).AndReturn(
fake_instance)
compute_api.API.force_delete(self.fake_context, fake_instance)
self.mox.ReplayAll()
res = self.extension._force_delete(self.fake_req, self.fake_uuid,
self.fake_input_dict)
self.assertEqual(res.status_int, 202)
def test_force_delete_instance_not_found(self):
self.mox.StubOutWithMock(compute_api.API, 'get')
compute_api.API.get(self.fake_context, self.fake_uuid,
expected_attrs=None, want_objects=True).AndRaise(
exception.InstanceNotFound(instance_id='instance-0000'))
self.mox.ReplayAll()
self.assertRaises(webob.exc.HTTPNotFound,
self.extension._force_delete,
self.fake_req,
self.fake_uuid,
self.fake_input_dict)
def test_force_delete_raises_conflict_on_invalid_state(self):
self.mox.StubOutWithMock(compute_api.API, 'get')
self.mox.StubOutWithMock(compute_api.API, 'force_delete')
fake_instance = 'fake_instance'
compute_api.API.get(self.fake_context, self.fake_uuid,
expected_attrs=None, want_objects=True).AndReturn(
fake_instance)
exc = exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
compute_api.API.force_delete(self.fake_context, fake_instance)\
.AndRaise(exc)
self.mox.ReplayAll()
self.assertRaises(webob.exc.HTTPConflict,
self.extension._force_delete, self.fake_req, self.fake_uuid,
self.fake_input_dict)
def test_restore(self):
self.mox.StubOutWithMock(compute_api.API, 'get')
self.mox.StubOutWithMock(compute_api.API, 'restore')
fake_instance = 'fake_instance'
compute_api.API.get(self.fake_context, self.fake_uuid,
expected_attrs=None, want_objects=True).AndReturn(
fake_instance)
compute_api.API.restore(self.fake_context, fake_instance)
self.mox.ReplayAll()
res = self.extension._restore(self.fake_req, self.fake_uuid,
self.fake_input_dict)
self.assertEqual(res.status_int, 202)
def test_restore_instance_not_found(self):
self.mox.StubOutWithMock(compute_api.API, 'get')
compute_api.API.get(self.fake_context, self.fake_uuid,
expected_attrs=None, want_objects=True).AndRaise(
exception.InstanceNotFound(instance_id='instance-0000'))
self.mox.ReplayAll()
self.assertRaises(webob.exc.HTTPNotFound, self.extension._restore,
self.fake_req, self.fake_uuid,
self.fake_input_dict)
def test_restore_raises_conflict_on_invalid_state(self):
self.mox.StubOutWithMock(compute_api.API, 'get')
self.mox.StubOutWithMock(compute_api.API, 'restore')
fake_instance = 'fake_instance'
exc = exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
compute_api.API.get(self.fake_context, self.fake_uuid,
expected_attrs=None, want_objects=True).AndReturn(
fake_instance)
compute_api.API.restore(self.fake_context, fake_instance).AndRaise(
exc)
self.mox.ReplayAll()
self.assertRaises(webob.exc.HTTPConflict, self.extension._restore,
self.fake_req, self.fake_uuid, self.fake_input_dict)
| 0.001289 |
# -*- coding: utf-8 -*-
from .baseapi import BaseAPI, POST, DELETE, PUT
class Record(BaseAPI):
def __init__(self, domain_name=None, *args, **kwargs):
self.domain = domain_name if domain_name else ""
self.id = None
self.type = None
self.name = None
self.data = None
self.priority = None
self.port = None
self.weight = None
super(Record, self).__init__(*args, **kwargs)
@classmethod
def get_object(cls, api_token, domain, record_id):
"""
Class method that will return a Record object by ID and the domain.
"""
record = cls(token=api_token, domain=domain, id=record_id)
record.load()
return record
def create(self):
"""
Create a record for a domain
"""
input_params = {
"type": self.type,
"data": self.data,
"name": self.name,
"priority": self.priority,
"port": self.port,
"weight": self.weight
}
data = self.get_data(
"domains/%s/records" % (self.domain),
type=POST,
params=input_params,
)
if data:
self.id = data['domain_record']['id']
def destroy(self):
"""
Destroy the record
"""
return self.get_data(
"domains/%s/records/%s" % (self.domain, self.id),
type=DELETE,
)
def save(self):
"""
Save existing record
"""
data = {
"type": self.type,
"data": self.data,
"name": self.name,
"priority": self.priority,
"port": self.port,
"weight": self.weight,
}
return self.get_data(
"domains/%s/records/%s" % (self.domain, self.id),
type=PUT,
params=data
)
def load(self):
url = "domains/%s/records/%s" % (self.domain, self.id)
record = self.get_data(url)
if record:
record = record[u'domain_record']
# Setting the attribute values
for attr in record.keys():
setattr(self, attr, record[attr])
def __str__(self):
return "%s %s" % (self.id, self.domain)
| 0 |
# -*- coding: utf-8 -*-
"""
github3.auths
=============
This module contains the Authorization object.
"""
from __future__ import unicode_literals
from .decorators import requires_basic_auth
from .models import GitHubCore
class Authorization(GitHubCore):
"""The :class:`Authorization <Authorization>` object.
Two authorization instances can be checked like so::
a1 == a2
a1 != a2
And is equivalent to::
a1.id == a2.id
a1.id != a2.id
See also: http://developer.github.com/v3/oauth/#oauth-authorizations-api
"""
def _update_attributes(self, auth):
self._api = auth.get('url')
#: Details about the application (name, url)
self.app = auth.get('app', {})
#: Returns the Authorization token
self.token = auth.get('token', '')
#: App name
self.name = self.app.get('name', '')
#: URL about the note
self.note_url = auth.get('note_url') or ''
#: Note about the authorization
self.note = auth.get('note') or ''
#: List of scopes this applies to
self.scopes = auth.get('scopes', [])
#: Unique id of the authorization
self.id = auth.get('id', 0)
#: datetime object representing when the authorization was created.
self.created_at = self._strptime(auth.get('created_at'))
#: datetime object representing when the authorization was updated.
self.updated_at = self._strptime(auth.get('updated_at'))
def _repr(self):
return '<Authorization [{0}]>'.format(self.name)
def _update(self, scopes_data, note, note_url):
"""Helper for add_scopes, replace_scopes, remove_scopes."""
if note is not None:
scopes_data['note'] = note
if note_url is not None:
scopes_data['note_url'] = note_url
json = self._json(self._post(self._api, data=scopes_data), 200)
if json:
self._update_attributes(json)
return True
return False
@requires_basic_auth
def add_scopes(self, scopes, note=None, note_url=None):
"""Adds the scopes to this authorization.
.. versionadded:: 1.0
:param list scopes: Adds these scopes to the ones present on this
authorization
:param str note: (optional), Note about the authorization
:param str note_url: (optional), URL to link to when the user views
the authorization
:returns: True if successful, False otherwise
:rtype: bool
"""
return self._update({'add_scopes': scopes}, note, note_url)
@requires_basic_auth
def delete(self):
"""Delete this authorization."""
return self._boolean(self._delete(self._api), 204, 404)
@requires_basic_auth
def remove_scopes(self, scopes, note=None, note_url=None):
"""Remove the scopes from this authorization.
.. versionadded:: 1.0
:param list scopes: Remove these scopes from the ones present on this
authorization
:param str note: (optional), Note about the authorization
:param str note_url: (optional), URL to link to when the user views
the authorization
:returns: True if successful, False otherwise
:rtype: bool
"""
return self._update({'rm_scopes': scopes}, note, note_url)
@requires_basic_auth
def replace_scopes(self, scopes, note=None, note_url=None):
"""Replace the scopes on this authorization.
.. versionadded:: 1.0
:param list scopes: Use these scopes instead of the previous list
:param str note: (optional), Note about the authorization
:param str note_url: (optional), URL to link to when the user views
the authorization
:returns: True if successful, False otherwise
:rtype: bool
"""
return self._update({'scopes': scopes}, note, note_url)
| 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
sys.path.append('./.libs/')
sys.path.append('../../lib/pyUniSet/.libs/')
sys.path.append('../../lib/pyUniSet/')
from pyUniSet import *
if __name__ == "__main__":
lst = Params_inst()
for i in range(0, len(sys.argv)):
if i >= Params.max:
break;
lst.add( sys.argv[i] )
try:
uniset_init_params( lst, str("test.xml") )
print "getShortName: id=%d name=%s" % (1, getShortName(1))
print " getName: id=%d name=%s" % (1, getName(1))
print " getTextName: id=%d name=%s" % (1, getTextName(1))
print "\n"
print "getShortName: id=%d name=%s" % (2, getShortName(2))
print " getName: id=%d name=%s" % (2, getName(2))
print " getTextName: id=%d name=%s" % (2, getTextName(2))
try:
print "getValue: %d=%d" % ( 1, getValue(1) )
except UException, e:
print "getValue exception: " + str(e.getError())
try:
print "setValue: %d=%d" % (14,setValue(14,22))
except UException, e:
print "setValue exception: " + str(e.getError())
except UException, e:
print "(testUI): catch exception: " + str(e.getError())
| 0.038669 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from flask import Blueprint, render_template, abort, url_for, redirect, request
from flask_babel import gettext
from flask_login import current_user
from pony.orm import db_session
from mini_fiction.utils.misc import Paginator
from mini_fiction.utils.views import admin_required
from mini_fiction.validation import ValidationError
from mini_fiction.forms.admin.tag_categories import TagCategoryForm
from mini_fiction.models import TagCategory
bp = Blueprint('admin_tag_categories', __name__)
@bp.route('/page/last/', defaults={'page': -1})
@bp.route('/', defaults={'page': 1})
@bp.route('/page/<int:page>/')
@db_session
@admin_required
def index(page):
objects = TagCategory.select().order_by(TagCategory.id)
page_obj = Paginator(page, objects.count(), per_page=100)
objects = page_obj.slice_or_404(objects)
return render_template(
'admin/tag_categories/index.html',
page_title=gettext('Tag categories'),
tag_categories=objects,
page_obj=page_obj,
)
@bp.route('/create/', methods=('GET', 'POST'))
@db_session
@admin_required
def create():
form = TagCategoryForm()
if form.validate_on_submit():
try:
tag_category = TagCategory.bl.create(current_user, form.data)
except ValidationError as exc:
form.set_errors(exc.errors)
else:
return redirect(url_for('admin_tag_categories.update', pk=tag_category.id))
return render_template(
'admin/tag_categories/work.html',
page_title=gettext('Create'),
form=form,
edit=False,
)
@bp.route('/<int:pk>/', methods=('GET', 'POST'))
@db_session
@admin_required
def update(pk):
tag_category = TagCategory.get(id=pk)
if not tag_category:
abort(404)
form = TagCategoryForm(data={
'name': tag_category.name,
'description': tag_category.description,
})
saved = False
if form.validate_on_submit():
try:
tag_category.bl.update(current_user, form.data)
except ValidationError as exc:
form.set_errors(exc.errors)
else:
saved = True
return render_template(
'admin/tag_categories/work.html',
page_title=tag_category.name,
tag_category=tag_category,
form=form,
edit=True,
saved=saved,
)
@bp.route('/<int:pk>/delete/', methods=('GET', 'POST'))
@db_session
@admin_required
def delete(pk):
tag_category = TagCategory.get(id=pk)
if not tag_category:
abort(404)
if request.method == 'POST':
try:
tag_category.bl.delete(current_user)
except ValidationError:
abort(403)
else:
return redirect(url_for('admin_tag_categories.index'))
return render_template(
'admin/tag_categories/delete.html',
page_title=gettext('Delete'),
tag_category=tag_category,
)
| 0.000337 |
# -*- coding: utf-8 -*-
'''manual balancing call chain test mixins'''
try:
import unittest2 as unittest
except ImportError:
import unittest
class Manning(unittest.TestCase):
def _false_true_false(self, manq, expr, comp=None):
manq = manq.back()
self.assertFalse(manq.balanced)
manq.sync()
self.assertTrue(manq.balanced)
if comp is not None:
expr(manq.value(), comp)
else:
expr(manq.value())
self.assertFalse(manq.balanced)
def _true_true_false(self, manq, expr, comp=None):
manq = manq.back()
self.assertTrue(manq.balanced)
manq.sync()
self.assertTrue(manq.balanced)
if comp is not None:
expr(manq.value(), comp)
else:
expr(manq.value())
self.assertFalse(manq.balanced)
def _false_true_true(self, manq, expr, comp=None):
manq = manq.back()
self.assertFalse(manq.balanced)
manq.sync()
self.assertTrue(manq.balanced)
if comp is not None:
expr(manq.value(), comp)
else:
expr(manq.value())
self.assertTrue(manq.balanced)
| 0 |
#!/usr/bin/python
#
# (c) 2017 Apstra Inc, <community@apstra.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aos_ip_pool
author: Damien Garros (@dgarros)
version_added: "2.3"
short_description: Manage AOS IP Pool
description:
- Apstra AOS Ip Pool module let you manage your IP Pool easily. You can create
create and delete IP Pool by Name, ID or by using a JSON File. This module
is idempotent and support the I(check) mode. It's using the AOS REST API.
requirements:
- "aos-pyez >= 0.6.0"
options:
session:
description:
- An existing AOS session as obtained by M(aos_login) module.
required: true
name:
description:
- Name of the IP Pool to manage.
Only one of I(name), I(id) or I(content) can be set.
id:
description:
- AOS Id of the IP Pool to manage (can't be used to create a new IP Pool),
Only one of I(name), I(id) or I(content) can be set.
content:
description:
- Datastructure of the IP Pool to manage. The data can be in YAML / JSON or
directly a variable. It's the same datastructure that is returned
on success in I(value).
state:
description:
- Indicate what is the expected state of the IP Pool (present or not).
default: present
choices: ['present', 'absent']
subnets:
description:
- List of subnet that needs to be part of the IP Pool.
'''
EXAMPLES = '''
- name: "Create an IP Pool with one subnet"
aos_ip_pool:
session: "{{ aos_session }}"
name: "my-ip-pool"
subnets: [ 172.10.0.0/16 ]
state: present
- name: "Create an IP Pool with multiple subnets"
aos_ip_pool:
session: "{{ aos_session }}"
name: "my-other-ip-pool"
subnets: [ 172.10.0.0/16, 192.168.0.0./24 ]
state: present
- name: "Check if an IP Pool exist with same subnets by ID"
aos_ip_pool:
session: "{{ aos_session }}"
name: "45ab26fc-c2ed-4307-b330-0870488fa13e"
subnets: [ 172.10.0.0/16, 192.168.0.0./24 ]
state: present
- name: "Delete an IP Pool by name"
aos_ip_pool:
session: "{{ aos_session }}"
name: "my-ip-pool"
state: absent
- name: "Delete an IP pool by id"
aos_ip_pool:
session: "{{ aos_session }}"
id: "45ab26fc-c2ed-4307-b330-0870488fa13e"
state: absent
# Save an IP Pool to a file
- name: "Access IP Pool 1/3"
aos_ip_pool:
session: "{{ aos_session }}"
name: "my-ip-pool"
subnets: [ 172.10.0.0/16, 172.12.0.0/16 ]
state: present
register: ip_pool
- name: "Save Ip Pool into a file in JSON 2/3"
copy:
content: "{{ ip_pool.value | to_nice_json }}"
dest: ip_pool_saved.json
- name: "Save Ip Pool into a file in YAML 3/3"
copy:
content: "{{ ip_pool.value | to_nice_yaml }}"
dest: ip_pool_saved.yaml
- name: "Load IP Pool from a JSON file"
aos_ip_pool:
session: "{{ aos_session }}"
content: "{{ lookup('file', 'resources/ip_pool_saved.json') }}"
state: present
- name: "Load IP Pool from a YAML file"
aos_ip_pool:
session: "{{ aos_session }}"
content: "{{ lookup('file', 'resources/ip_pool_saved.yaml') }}"
state: present
- name: "Load IP Pool from a Variable"
aos_ip_pool:
session: "{{ aos_session }}"
content:
display_name: my-ip-pool
id: 4276738d-6f86-4034-9656-4bff94a34ea7
subnets:
- network: 172.10.0.0/16
- network: 172.12.0.0/16
state: present
'''
RETURNS = '''
name:
description: Name of the IP Pool
returned: always
type: str
sample: Server-IpAddrs
id:
description: AOS unique ID assigned to the IP Pool
returned: always
type: str
sample: fcc4ac1c-e249-4fe7-b458-2138bfb44c06
value:
description: Value of the object as returned by the AOS Server
returned: always
type: dict
sample: {'...'}
'''
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.aos import get_aos_session, find_collection_item, do_load_resource, check_aos_version, content_to_dict
def get_list_of_subnets(ip_pool):
subnets = []
for subnet in ip_pool.value['subnets']:
subnets.append(subnet['network'])
return subnets
def create_new_ip_pool(ip_pool, name, subnets):
# Create value
datum = dict(display_name=name, subnets=[])
for subnet in subnets:
datum['subnets'].append(dict(network=subnet))
ip_pool.datum = datum
## Write to AOS
return ip_pool.write()
#########################################################
# State Processing
#########################################################
def ip_pool_absent(module, aos, my_pool):
margs = module.params
# If the module do not exist, return directly
if my_pool.exists is False:
module.exit_json(changed=False, name=margs['name'], id='', value={})
## Check if object is currently in Use or Not
# If in Use, return an error
if my_pool.value:
if my_pool.value['status'] != 'not_in_use':
module.fail_json(msg="unable to delete this ip Pool, currently in use")
else:
module.fail_json(msg="Ip Pool object has an invalid format, value['status'] must be defined")
# If not in check mode, delete Ip Pool
if not module.check_mode:
try:
my_pool.delete()
except:
module.fail_json(msg="An error occured, while trying to delete the IP Pool")
module.exit_json( changed=True,
name=my_pool.name,
id=my_pool.id,
value={} )
def ip_pool_present(module, aos, my_pool):
margs = module.params
# if content is defined, create object from Content
try:
if margs['content'] is not None:
if 'display_name' in module.params['content'].keys():
do_load_resource(module, aos.IpPools, module.params['content']['display_name'])
else:
module.fail_json(msg="Unable to find display_name in 'content', Mandatory")
except:
module.fail_json(msg="Unable to load resource from content, something went wrong")
# if ip_pool doesn't exist already, create a new one
if my_pool.exists is False and 'name' not in margs.keys():
module.fail_json(msg="Name is mandatory for module that don't exist currently")
elif my_pool.exists is False:
if not module.check_mode:
try:
my_new_pool = create_new_ip_pool(my_pool, margs['name'], margs['subnets'])
my_pool = my_new_pool
except:
module.fail_json(msg="An error occured while trying to create a new IP Pool ")
module.exit_json( changed=True,
name=my_pool.name,
id=my_pool.id,
value=my_pool.value )
# if pool already exist, check if list of network is the same
# if same just return the object and report change false
if set(get_list_of_subnets(my_pool)) == set(margs['subnets']):
module.exit_json( changed=False,
name=my_pool.name,
id=my_pool.id,
value=my_pool.value )
else:
module.fail_json(msg="ip_pool already exist but value is different, currently not supported to update a module")
#########################################################
# Main Function
#########################################################
def ip_pool(module):
margs = module.params
try:
aos = get_aos_session(module, margs['session'])
except:
module.fail_json(msg="Unable to login to the AOS server")
item_name = False
item_id = False
if margs['content'] is not None:
content = content_to_dict(module, margs['content'] )
if 'display_name' in content.keys():
item_name = content['display_name']
else:
module.fail_json(msg="Unable to extract 'display_name' from 'content'")
elif margs['name'] is not None:
item_name = margs['name']
elif margs['id'] is not None:
item_id = margs['id']
#----------------------------------------------------
# Find Object if available based on ID or Name
#----------------------------------------------------
try:
my_pool = find_collection_item(aos.IpPools,
item_name=item_name,
item_id=item_id)
except:
module.fail_json(msg="Unable to find the IP Pool based on name or ID, something went wrong")
#----------------------------------------------------
# Proceed based on State value
#----------------------------------------------------
if margs['state'] == 'absent':
ip_pool_absent(module, aos, my_pool)
elif margs['state'] == 'present':
ip_pool_present(module, aos, my_pool)
def main():
module = AnsibleModule(
argument_spec=dict(
session=dict(required=True, type="dict"),
name=dict(required=False ),
id=dict(required=False ),
content=dict(required=False, type="json"),
state=dict( required=False,
choices=['present', 'absent'],
default="present"),
subnets=dict(required=False, type="list")
),
mutually_exclusive = [('name', 'id', 'content')],
required_one_of=[('name', 'id', 'content')],
supports_check_mode=True
)
# Check if aos-pyez is present and match the minimum version
check_aos_version(module, '0.6.0')
ip_pool(module)
if __name__ == "__main__":
main()
| 0.004925 |
###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
############################################################################
# Utility functions for debugging on eigen.py
from __future__ import division
from vistrails.core.data_structures.point import Point
def smart_sum(v):
try:
fst = v.next()
return sum(v, fst)
except Exception:
pass
fst = v[0]
return sum(v[1:], fst)
def pipeline_centroid(pipeline):
"""Returns the centroid of a given pipeline."""
return (smart_sum(x.location for
x in pipeline.modules.itervalues()) *
(1.0 / len(pipeline.modules)))
def pipeline_bbox(pipeline):
mn_x = 1000000000.0
mn_y = 1000000000.0
mx_x = -1000000000.0
mx_y = -1000000000.0
for m in pipeline.modules.itervalues():
mn_x = min(mn_x, m.location.x)
mn_y = min(mn_y, m.location.y)
mx_x = max(mx_x, m.location.x)
mx_y = max(mx_y, m.location.y)
return (Point(mn_x, mn_y), Point(mx_x, mx_y))
| 0.010399 |
# Copyright (C) 2016 Swift Navigation Inc.
#
# Contact: Valeri Atamaniouk <valeri@swiftnav.com>
# This source is subject to the license found in the file 'LICENSE' which must
# be be distributed together with this source. All other rights reserved.
#
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
from peregrine.tracking_file_utils import collectTrackingOutputFileEntries
from peregrine.tracking_file_utils import createTrackingDumpOutputFileName
from peregrine.tracking_file_utils import createTrackingOutputFileNames
from peregrine.tracking_file_utils import PickleLoadObject
from peregrine.tracking_file_utils import removeTrackingOutputFiles
from peregrine.tracking_file_utils import TrackingResults
from peregrine.tracking_file_utils import TrackResultFile
from peregrine.tracking import TrackResults
def __testSetup():
'''
Test utility
'''
removeTrackingOutputFiles("test_output.bin")
tr1 = TrackResults(500, 0, 'l1ca')
for i in range(500):
tr1.ms_tracked[i] = i * 2
tr1.absolute_sample[i] = i * 2
tr1.status = 'A'
tr1.dump('test_output.bin', 500)
tr2 = TrackResults(500, 1, 'l1ca')
for i in range(500):
tr2.ms_tracked[i] = i * 2 + 1
tr2.absolute_sample[i] = i * 2 + 1
tr2.status = 'B'
tr2.dump('test_output.bin', 500)
def test_OutputFileName0s():
'''
Name mangling test
'''
aName, rName = createTrackingOutputFileNames("output.bin", 1, "l1ca")
assert aName == "output.PRN-1.l1ca.bin"
assert rName == "output.PRN-1.l1ca.bin.track_results"
def test_OutputFileNames1():
'''
Name mangling test
'''
aName, rName = createTrackingOutputFileNames("/mnt/usr/output.bin", 2, "l2c")
assert aName == "/mnt/usr/output.PRN-2.l2c.bin"
assert rName == "/mnt/usr/output.PRN-2.l2c.bin.track_results"
def test_DumpOutputFileName():
'''
Name mangling test
'''
fname = createTrackingDumpOutputFileName('output.bin')
assert fname == 'output.combined_track.bin'
def test_CollectTrackingOutputFileEntries0():
'''
Test for locating tracking results (empty)
'''
removeTrackingOutputFiles("test_output.bin")
entries = collectTrackingOutputFileEntries("test_output.bin")
assert isinstance(entries, list)
assert len(entries) == 0
def test_CollectTrackingOutputFileEntries1():
'''
Test for locating tracking results (non-empty)
'''
removeTrackingOutputFiles("test_output.bin")
aName1, rName1 = createTrackingOutputFileNames("test_output.bin", 1, "l1ca")
aName2, rName2 = createTrackingOutputFileNames("test_output.bin", 2, "l1ca")
aName3, rName3 = createTrackingOutputFileNames("test_output.bin", 1, "l2c")
for f in [aName1, aName2, aName3, rName1, rName2, rName3]:
with file(f, "wb"):
pass
entries = collectTrackingOutputFileEntries("test_output.bin")
assert isinstance(entries, list)
assert len(entries) == 3
assert entries[0]['filename'] == rName1
assert entries[1]['filename'] == rName2
assert entries[2]['filename'] == rName3
removeTrackingOutputFiles("test_output.bin")
def test_RemoveTrackingOutputFiles():
'''
File system cleanup test
'''
aName1, rName1 = createTrackingOutputFileNames("test_output.bin", 1, "l1ca")
aName2, rName2 = createTrackingOutputFileNames("test_output.bin", 2, "l1ca")
aName3, rName3 = createTrackingOutputFileNames("test_output.bin", 1, "l2c")
for f in [aName1, aName2, aName3, rName1, rName2, rName3]:
with file(f, "wb"):
pass
removeTrackingOutputFiles("test_output.bin")
entries = collectTrackingOutputFileEntries("test_output.bin")
assert isinstance(entries, list)
assert len(entries) == 0
def test_PickleLoadObject():
'''
Test for PickleLoadObject object
'''
tr = TrackResults(500, 1, 'l1ca')
for i in range(500):
tr.ms_tracked[i] = i
tr.absolute_sample[i] = i
tr.dump('test_output.bin', 500)
for i in range(500):
tr.ms_tracked[i] = i + 500
tr.absolute_sample[i] = i + 500
tr.dump('test_output.bin', 500)
loadObj = PickleLoadObject('test_output.PRN-2.l1ca.bin.track_results')
it = iter(loadObj)
o0 = it.next()
o1 = it.next()
try:
it.next()
assert False
except StopIteration:
pass
try:
it.next()
assert False
except StopIteration:
pass
assert isinstance(o0, TrackResults)
for i in range(500):
assert o0.ms_tracked[i] == i
assert o0.absolute_sample[i] == i
assert isinstance(o1, TrackResults)
for i in range(500):
assert o1.ms_tracked[i] == i + 500
assert o1.absolute_sample[i] == i + 500
def test_TrackResultsFile():
'''
Test for TrackResults object
'''
tr = TrackResults(500, 1, 'l1ca')
for i in range(500):
tr.ms_tracked[i] = i
tr.absolute_sample[i] = i
tr.status = 'A'
tr.dump('test_output.bin', 500)
for i in range(500):
tr.ms_tracked[i] = i + 500
tr.absolute_sample[i] = i + 500
tr.status = 'B'
tr.dump('test_output.bin', 500)
obj = TrackResultFile(
PickleLoadObject('test_output.PRN-2.l1ca.bin.track_results'))
it = iter(obj)
for i in range(500):
o, idx = it.next()
assert o.status == 'A'
assert idx == i
assert o.ms_tracked[i] == i
assert o.absolute_sample[i] == i
for i in range(500):
o, idx = it.next()
assert o.status == 'B'
assert idx == i
assert o.ms_tracked[i] == i + 500
assert o.absolute_sample[i] == i + 500
try:
it.next()
assert False
except StopIteration:
pass
try:
it.next()
assert False
except StopIteration:
pass
def test_TrackResultsObj0():
removeTrackingOutputFiles("test_output.bin")
tr = TrackingResults('test_output.bin')
assert tr.channelCount() == 0
assert isinstance(tr.getEntries(), list)
assert len(tr.getEntries()) == 0
co = tr.combinedResult()
assert isinstance(co, TrackingResults.MultiChannel)
try:
iter(co).next()
assert False
except StopIteration:
pass
def test_TrackResultsObj1():
'''
Test for combined channel data iterations.
'''
__testSetup()
tr = TrackingResults('test_output.bin')
assert tr.channelCount() == 2
assert isinstance(tr.getEntries(), list)
assert len(tr.getEntries()) == 2
co = tr.combinedResult()
assert isinstance(co, TrackingResults.MultiChannel)
it = iter(co)
for i in range(500):
tr1, idx1 = it.next()
tr2, idx2 = it.next()
assert tr1.status == 'A'
assert tr2.status == 'B'
assert idx1 == idx2 == i
assert tr1.ms_tracked[i] == i * 2
assert tr2.ms_tracked[i] == i * 2 + 1
try:
it.next()
assert False
except StopIteration:
pass
removeTrackingOutputFiles("test_output.bin")
def test_TrackResultsObj_single1():
'''
Test for individual channel data iterations.
'''
__testSetup()
tr = TrackingResults('test_output.bin')
assert tr.channelCount() == 2
assert isinstance(tr.getEntries(), list)
assert len(tr.getEntries()) == 2
c0 = tr.channelResult(0)
c1 = tr.channelResult(1)
assert isinstance(c0, TrackingResults.SingleChannel)
assert isinstance(c1, TrackingResults.SingleChannel)
it1 = iter(c0)
it2 = iter(c1)
for i in range(500):
tr1, idx1 = it1.next()
tr2, idx2 = it2.next()
assert tr1.status == 'A'
assert tr2.status == 'B'
assert idx1 == idx2 == i
assert tr1.ms_tracked[i] == i * 2
assert tr2.ms_tracked[i] == i * 2 + 1
try:
it1.next()
assert False
except StopIteration:
pass
try:
it2.next()
assert False
except StopIteration:
pass
removeTrackingOutputFiles("test_output.bin")
def test_TrackResultsObj_dump():
'''
Sanity test for combined data output in a textual form.
'''
__testSetup()
tr = TrackingResults('test_output.bin')
tr.dump()
removeTrackingOutputFiles("test_output.bin")
| 0.016429 |
from datetime import datetime, timedelta
import pytest
import pytz
from apscheduler.triggers.cron import CronTrigger
from apscheduler.triggers.date import DateTrigger
from apscheduler.triggers.interval import IntervalTrigger
class TestCronTrigger(object):
def test_cron_trigger_1(self, timezone):
trigger = CronTrigger(year='2009/2', month='1/3', day='5-13', timezone=timezone)
assert repr(trigger) == "<CronTrigger (year='2009/2', month='1/3', day='5-13')>"
assert str(trigger) == "cron[year='2009/2', month='1/3', day='5-13']"
start_date = timezone.localize(datetime(2008, 12, 1))
correct_next_date = timezone.localize(datetime(2009, 1, 5))
assert trigger.get_next_fire_time(None, start_date) == correct_next_date
def test_cron_trigger_2(self, timezone):
trigger = CronTrigger(year='2009/2', month='1/3', day='5-13', timezone=timezone)
start_date = timezone.localize(datetime(2009, 10, 14))
correct_next_date = timezone.localize(datetime(2011, 1, 5))
assert trigger.get_next_fire_time(None, start_date) == correct_next_date
def test_cron_trigger_3(self, timezone):
trigger = CronTrigger(year='2009', month='2', hour='8-10', timezone=timezone)
assert repr(trigger) == "<CronTrigger (year='2009', month='2', hour='8-10')>"
start_date = timezone.localize(datetime(2009, 1, 1))
correct_next_date = timezone.localize(datetime(2009, 2, 1, 8))
assert trigger.get_next_fire_time(None, start_date) == correct_next_date
def test_cron_trigger_4(self, timezone):
trigger = CronTrigger(year='2012', month='2', day='last', timezone=timezone)
assert repr(trigger) == "<CronTrigger (year='2012', month='2', day='last')>"
start_date = timezone.localize(datetime(2012, 2, 1))
correct_next_date = timezone.localize(datetime(2012, 2, 29))
assert trigger.get_next_fire_time(None, start_date) == correct_next_date
def test_cron_zero_value(self, timezone):
trigger = CronTrigger(year=2009, month=2, hour=0, timezone=timezone)
assert repr(trigger) == "<CronTrigger (year='2009', month='2', hour='0')>"
def test_cron_year_list(self, timezone):
trigger = CronTrigger(year='2009,2008', timezone=timezone)
assert repr(trigger) == "<CronTrigger (year='2009,2008')>"
assert str(trigger) == "cron[year='2009,2008']"
start_date = timezone.localize(datetime(2009, 1, 1))
correct_next_date = timezone.localize(datetime(2009, 1, 1))
assert trigger.get_next_fire_time(None, start_date) == correct_next_date
def test_cron_start_date(self, timezone):
trigger = CronTrigger(year='2009', month='2', hour='8-10', start_date='2009-02-03 11:00:00', timezone=timezone)
assert repr(trigger) == \
"<CronTrigger (year='2009', month='2', hour='8-10', start_date='2009-02-03 11:00:00 CET')>"
assert str(trigger) == "cron[year='2009', month='2', hour='8-10']"
start_date = timezone.localize(datetime(2009, 1, 1))
correct_next_date = timezone.localize(datetime(2009, 2, 4, 8))
assert trigger.get_next_fire_time(None, start_date) == correct_next_date
def test_cron_weekday_overlap(self, timezone):
trigger = CronTrigger(year=2009, month=1, day='6-10', day_of_week='2-4', timezone=timezone)
assert repr(trigger) == "<CronTrigger (year='2009', month='1', day='6-10', day_of_week='2-4')>"
assert str(trigger) == "cron[year='2009', month='1', day='6-10', day_of_week='2-4']"
start_date = timezone.localize(datetime(2009, 1, 1))
correct_next_date = timezone.localize(datetime(2009, 1, 7))
assert trigger.get_next_fire_time(None, start_date) == correct_next_date
def test_cron_weekday_nomatch(self, timezone):
trigger = CronTrigger(year=2009, month=1, day='6-10', day_of_week='0,6', timezone=timezone)
assert repr(trigger) == "<CronTrigger (year='2009', month='1', day='6-10', day_of_week='0,6')>"
assert str(trigger) == "cron[year='2009', month='1', day='6-10', day_of_week='0,6']"
start_date = timezone.localize(datetime(2009, 1, 1))
correct_next_date = None
assert trigger.get_next_fire_time(None, start_date) == correct_next_date
def test_cron_weekday_positional(self, timezone):
trigger = CronTrigger(year=2009, month=1, day='4th wed', timezone=timezone)
assert repr(trigger) == "<CronTrigger (year='2009', month='1', day='4th wed')>"
assert str(trigger) == "cron[year='2009', month='1', day='4th wed']"
start_date = timezone.localize(datetime(2009, 1, 1))
correct_next_date = timezone.localize(datetime(2009, 1, 28))
assert trigger.get_next_fire_time(None, start_date) == correct_next_date
def test_week_1(self, timezone):
trigger = CronTrigger(year=2009, month=2, week=8, timezone=timezone)
assert repr(trigger) == "<CronTrigger (year='2009', month='2', week='8')>"
assert str(trigger) == "cron[year='2009', month='2', week='8']"
start_date = timezone.localize(datetime(2009, 1, 1))
correct_next_date = timezone.localize(datetime(2009, 2, 16))
assert trigger.get_next_fire_time(None, start_date) == correct_next_date
def test_week_2(self, timezone):
trigger = CronTrigger(year=2009, week=15, day_of_week=2, timezone=timezone)
assert repr(trigger) == "<CronTrigger (year='2009', week='15', day_of_week='2')>"
assert str(trigger) == "cron[year='2009', week='15', day_of_week='2']"
start_date = timezone.localize(datetime(2009, 1, 1))
correct_next_date = timezone.localize(datetime(2009, 4, 8))
assert trigger.get_next_fire_time(None, start_date) == correct_next_date
def test_cron_extra_coverage(self, timezone):
# This test has no value other than patching holes in test coverage
trigger = CronTrigger(day='6,8', timezone=timezone)
assert repr(trigger) == "<CronTrigger (day='6,8')>"
assert str(trigger) == "cron[day='6,8']"
start_date = timezone.localize(datetime(2009, 12, 31))
correct_next_date = timezone.localize(datetime(2010, 1, 6))
assert trigger.get_next_fire_time(None, start_date) == correct_next_date
def test_cron_faulty_expr(self, timezone):
pytest.raises(ValueError, CronTrigger, year='2009-fault', timezone=timezone)
def test_cron_increment_weekday(self, timezone):
"""
Tests that incrementing the weekday field in the process of calculating the next matching date won't cause
problems.
"""
trigger = CronTrigger(hour='5-6', timezone=timezone)
assert repr(trigger) == "<CronTrigger (hour='5-6')>"
assert str(trigger) == "cron[hour='5-6']"
start_date = timezone.localize(datetime(2009, 9, 25, 7))
correct_next_date = timezone.localize(datetime(2009, 9, 26, 5))
assert trigger.get_next_fire_time(None, start_date) == correct_next_date
def test_cron_bad_kwarg(self, timezone):
pytest.raises(TypeError, CronTrigger, second=0, third=1, timezone=timezone)
def test_timezone_from_start_date(self, timezone):
"""Tests that the trigger takes the timezone from the start_date parameter if no timezone is supplied."""
start_date = timezone.localize(datetime(2014, 4, 13, 5, 30))
trigger = CronTrigger(year=2014, hour=4, start_date=start_date)
assert trigger.timezone == start_date.tzinfo
def test_end_date(self, timezone):
end_date = timezone.localize(datetime(2014, 4, 13, 3))
trigger = CronTrigger(year=2014, hour=4, end_date=end_date)
start_date = timezone.localize(datetime(2014, 4, 13, 2, 30))
assert trigger.get_next_fire_time(None, start_date - timedelta(1)) == \
start_date.replace(day=12, hour=4, minute=0)
assert trigger.get_next_fire_time(None, start_date) is None
def test_different_tz(self, timezone):
alter_tz = pytz.FixedOffset(-600)
trigger = CronTrigger(year=2009, week=15, day_of_week=2, timezone=timezone)
assert repr(trigger) == "<CronTrigger (year='2009', week='15', day_of_week='2')>"
assert str(trigger) == "cron[year='2009', week='15', day_of_week='2']"
start_date = alter_tz.localize(datetime(2008, 12, 31, 22))
correct_next_date = timezone.localize(datetime(2009, 4, 8))
assert trigger.get_next_fire_time(None, start_date) == correct_next_date
def test_dst_change(self):
"""
Making sure that CronTrigger works correctly when crossing the DST switch threshold.
Note that you should explicitly compare datetimes as strings to avoid the internal datetime comparison which
would test for equality in the UTC timezone.
"""
eastern = pytz.timezone('US/Eastern')
trigger = CronTrigger(minute='*/30', timezone=eastern)
datetime_edt = eastern.localize(datetime(2013, 11, 3, 1, 5), is_dst=True)
correct_next_date = eastern.localize(datetime(2013, 11, 3, 1, 30), is_dst=True)
assert str(trigger.get_next_fire_time(None, datetime_edt)) == str(correct_next_date)
datetime_edt = eastern.localize(datetime(2013, 11, 3, 1, 35), is_dst=True)
correct_next_date = eastern.localize(datetime(2013, 11, 3, 1), is_dst=False)
assert str(trigger.get_next_fire_time(None, datetime_edt)) == str(correct_next_date)
def test_timezone_change(self, timezone):
"""
Ensure that get_next_fire_time method returns datetimes in the timezone of the trigger and not in the timezone
of the passed in start_date.
"""
est = pytz.FixedOffset(-300)
cst = pytz.FixedOffset(-360)
trigger = CronTrigger(hour=11, minute='*/5', timezone=est)
start_date = cst.localize(datetime(2009, 9, 26, 10, 16))
correct_next_date = est.localize(datetime(2009, 9, 26, 11, 20))
assert str(trigger.get_next_fire_time(None, start_date)) == str(correct_next_date)
class TestDateTrigger(object):
@pytest.mark.parametrize('run_date,alter_tz,previous,now,expected', [
(datetime(2009, 7, 6), None, None, datetime(2008, 5, 4), datetime(2009, 7, 6)),
(datetime(2009, 7, 6), None, None, datetime(2009, 7, 6), datetime(2009, 7, 6)),
(datetime(2009, 7, 6), None, None, datetime(2009, 9, 2), datetime(2009, 7, 6)),
('2009-7-6', None, None, datetime(2009, 9, 2), datetime(2009, 7, 6)),
(datetime(2009, 7, 6), None, datetime(2009, 7, 6), datetime(2009, 9, 2), None),
(datetime(2009, 7, 5, 22), pytz.FixedOffset(-60), datetime(2009, 7, 6), datetime(2009, 7, 6), None)
], ids=['earlier', 'exact', 'later', 'as text', 'previously fired', 'alternate timezone'])
def test_get_next_fire_time(self, run_date, alter_tz, previous, now, expected, timezone):
trigger = DateTrigger(run_date, alter_tz or timezone)
previous = timezone.localize(previous) if previous else None
now = timezone.localize(now)
expected = timezone.localize(expected) if expected else None
assert trigger.get_next_fire_time(previous, now) == expected
@pytest.mark.parametrize('is_dst', [True, False], ids=['daylight saving', 'standard time'])
def test_dst_change(self, is_dst):
"""
Making sure that DateTrigger works during the ambiguous "fall-back" DST period.
Note that you should explicitly compare datetimes as strings to avoid the internal datetime comparison which
would test for equality in the UTC timezone.
"""
eastern = pytz.timezone('US/Eastern')
run_date = eastern.localize(datetime(2013, 11, 3, 1, 5), is_dst=is_dst)
fire_date = eastern.normalize(run_date + timedelta(minutes=55))
trigger = DateTrigger(run_date=fire_date, timezone=eastern)
assert str(trigger.get_next_fire_time(None, fire_date)) == str(fire_date)
def test_repr(self, timezone):
trigger = DateTrigger(datetime(2009, 7, 6), timezone)
assert repr(trigger) == "<DateTrigger (run_date='2009-07-06 00:00:00 CEST')>"
def test_str(self, timezone):
trigger = DateTrigger(datetime(2009, 7, 6), timezone)
assert str(trigger) == "date[2009-07-06 00:00:00 CEST]"
class TestIntervalTrigger(object):
@pytest.fixture()
def trigger(self, timezone):
return IntervalTrigger(seconds=1, start_date=datetime(2009, 8, 4, second=2), timezone=timezone)
def test_invalid_interval(self, timezone):
pytest.raises(TypeError, IntervalTrigger, '1-6', timezone=timezone)
def test_before(self, trigger, timezone):
"""Tests that if "start_date" is later than "now", it will return start_date."""
now = trigger.start_date - timedelta(seconds=2)
assert trigger.get_next_fire_time(None, now) == trigger.start_date
def test_within(self, trigger, timezone):
"""Tests that if "now" is between "start_date" and the next interval, it will return the next interval."""
now = trigger.start_date + timedelta(microseconds=1000)
assert trigger.get_next_fire_time(None, now) == trigger.start_date + trigger.interval
def test_no_start_date(self, timezone):
trigger = IntervalTrigger(seconds=2, timezone=timezone)
now = datetime.now(timezone)
assert (trigger.get_next_fire_time(None, now) - now) <= timedelta(seconds=2)
def test_different_tz(self, trigger, timezone):
alter_tz = pytz.FixedOffset(-60)
start_date = alter_tz.localize(datetime(2009, 8, 3, 22, second=2, microsecond=1000))
correct_next_date = timezone.localize(datetime(2009, 8, 4, 1, second=3))
assert trigger.get_next_fire_time(None, start_date) == correct_next_date
def test_end_date(self, timezone):
"""Tests that the interval trigger won't return any datetimes past the set end time."""
start_date = timezone.localize(datetime(2014, 5, 26))
trigger = IntervalTrigger(minutes=5, start_date=start_date, end_date=datetime(2014, 5, 26, 0, 7),
timezone=timezone)
assert trigger.get_next_fire_time(None, start_date + timedelta(minutes=2)) == start_date.replace(minute=5)
assert trigger.get_next_fire_time(None, start_date + timedelta(minutes=6)) is None
def test_dst_change(self):
"""
Making sure that IntervalTrigger works during the ambiguous "fall-back" DST period.
Note that you should explicitly compare datetimes as strings to avoid the internal datetime comparison which
would test for equality in the UTC timezone.
"""
eastern = pytz.timezone('US/Eastern')
start_date = datetime(2013, 6, 1) # Start within EDT
trigger = IntervalTrigger(hours=1, start_date=start_date, timezone=eastern)
datetime_edt = eastern.localize(datetime(2013, 11, 3, 1, 5), is_dst=True)
correct_next_date = eastern.normalize(datetime_edt + timedelta(minutes=55))
assert str(trigger.get_next_fire_time(None, datetime_edt)) == str(correct_next_date)
datetime_est = eastern.localize(datetime(2013, 11, 3, 1, 5), is_dst=False)
correct_next_date = eastern.normalize(datetime_est + timedelta(minutes=55))
assert str(trigger.get_next_fire_time(None, datetime_est)) == str(correct_next_date)
def test_repr(self, trigger):
assert repr(trigger) == \
"<IntervalTrigger (interval=datetime.timedelta(0, 1), start_date='2009-08-04 00:00:02 CEST')>"
def test_str(self, trigger):
assert str(trigger) == "interval[0:00:01]"
| 0.005416 |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Run processes of a Thermos task.
This module contains the Process class, used to manage the execution of the constituent processes of
a Thermos task. Each process is represented by a "coordinator" process, which fires off the actual
commandline in a subprocess of its own.
"""
import grp
import os
import pwd
import signal
import subprocess
import sys
import time
from abc import abstractmethod
from twitter.common import log
from twitter.common.dirutil import lock_file, safe_mkdir, safe_open
from twitter.common.lang import Interface
from twitter.common.quantity import Amount, Time
from twitter.common.recordio import ThriftRecordReader, ThriftRecordWriter
from gen.apache.thermos.ttypes import ProcessState, ProcessStatus, RunnerCkpt
class Platform(Interface):
"""Abstract representation of a platform encapsulating system-level functions"""
@abstractmethod
def clock(self):
pass
@abstractmethod
def fork(self):
pass
@abstractmethod
def getpid(self):
pass
class ProcessBase(object):
"""
Encapsulate a running process for a task.
"""
class Error(Exception): pass
class UnknownUserError(Error): pass
class CheckpointError(Error): pass
class UnspecifiedSandbox(Error): pass
class PermissionError(Error): pass
CONTROL_WAIT_CHECK_INTERVAL = Amount(100, Time.MILLISECONDS)
MAXIMUM_CONTROL_WAIT = Amount(1, Time.MINUTES)
def __init__(self, name, cmdline, sequence, pathspec, sandbox_dir, user=None, platform=None):
"""
required:
name = name of the process
cmdline = cmdline of the process
sequence = the next available sequence number for state updates
pathspec = TaskPath object for synthesizing path names
sandbox_dir = the sandbox in which to run the process
platform = Platform providing fork, clock, getpid
optional:
user = the user to run as (if unspecified, will default to current user.)
if specified to a user that is not the current user, you must have root access
"""
self._name = name
self._cmdline = cmdline
self._pathspec = pathspec
self._seq = sequence
self._sandbox = sandbox_dir
if self._sandbox:
safe_mkdir(self._sandbox)
self._pid = None
self._fork_time = None
self._stdout = None
self._stderr = None
self._user = user
self._ckpt = None
self._ckpt_head = -1
if platform is None:
raise ValueError("Platform must be specified")
self._platform = platform
def _log(self, msg):
log.debug('[process:%5s=%s]: %s' % (self._pid, self.name(), msg))
def _getpwuid(self):
"""Returns a tuple of the user (i.e. --user) and current user."""
uid = os.getuid()
try:
current_user = pwd.getpwuid(uid)
except KeyError:
raise self.UnknownUserError('Unknown uid %s!' % uid)
try:
user = pwd.getpwnam(self._user) if self._user is not None else current_user
except KeyError:
raise self.UnknownUserError('Unable to get pwent information!')
return user, current_user
def _ckpt_write(self, msg):
self._init_ckpt_if_necessary()
self._log("child state transition [%s] <= %s" % (self.ckpt_file(), msg))
self._ckpt.write(msg)
def _write_process_update(self, **kw):
"""Write a process update to the coordinator's checkpoint stream."""
process_status = ProcessStatus(**kw)
process_status.seq = self._seq
process_status.process = self.name()
self._ckpt_write(RunnerCkpt(process_status=process_status))
self._seq += 1
def _write_initial_update(self):
self._write_process_update(state=ProcessState.FORKED,
fork_time=self._fork_time,
coordinator_pid=self._pid)
def cmdline(self):
return self._cmdline
def name(self):
return self._name
def pid(self):
"""pid of the coordinator"""
return self._pid
def rebind(self, pid, fork_time):
"""rebind Process to an existing coordinator pid without forking"""
self._pid = pid
self._fork_time = fork_time
def ckpt_file(self):
return self._pathspec.getpath('process_checkpoint')
def _setup_ckpt(self):
"""Set up the checkpoint: must be run on the parent."""
self._log('initializing checkpoint file: %s' % self.ckpt_file())
ckpt_fp = lock_file(self.ckpt_file(), "a+")
if ckpt_fp in (None, False):
raise self.CheckpointError('Could not acquire checkpoint permission or lock for %s!' %
self.ckpt_file())
self._ckpt_head = os.path.getsize(self.ckpt_file())
ckpt_fp.seek(self._ckpt_head)
self._ckpt = ThriftRecordWriter(ckpt_fp)
self._ckpt.set_sync(True)
def _init_ckpt_if_necessary(self):
if self._ckpt is None:
self._setup_ckpt()
def _wait_for_control(self):
"""Wait for control of the checkpoint stream: must be run in the child."""
total_wait_time = Amount(0, Time.SECONDS)
with open(self.ckpt_file(), 'r') as fp:
fp.seek(self._ckpt_head)
rr = ThriftRecordReader(fp, RunnerCkpt)
while total_wait_time < self.MAXIMUM_CONTROL_WAIT:
ckpt_tail = os.path.getsize(self.ckpt_file())
if ckpt_tail == self._ckpt_head:
self._platform.clock().sleep(self.CONTROL_WAIT_CHECK_INTERVAL.as_(Time.SECONDS))
total_wait_time += self.CONTROL_WAIT_CHECK_INTERVAL
continue
checkpoint = rr.try_read()
if checkpoint:
if not checkpoint.process_status:
raise self.CheckpointError('No process status in checkpoint!')
if (checkpoint.process_status.process != self.name() or
checkpoint.process_status.state != ProcessState.FORKED or
checkpoint.process_status.fork_time != self._fork_time or
checkpoint.process_status.coordinator_pid != self._pid):
self._log('Losing control of the checkpoint stream:')
self._log(' fork_time [%s] vs self._fork_time [%s]' % (
checkpoint.process_status.fork_time, self._fork_time))
self._log(' coordinator_pid [%s] vs self._pid [%s]' % (
checkpoint.process_status.coordinator_pid, self._pid))
raise self.CheckpointError('Lost control of the checkpoint stream!')
self._log('Taking control of the checkpoint stream at record: %s' %
checkpoint.process_status)
self._seq = checkpoint.process_status.seq + 1
return True
raise self.CheckpointError('Timed out waiting for checkpoint stream!')
def _prepare_fork(self):
user, current_user = self._getpwuid()
if self._user:
if user != current_user and os.geteuid() != 0:
raise self.PermissionError('Must be root to run processes as other users!')
uid, gid = user.pw_uid, user.pw_gid
self._fork_time = self._platform.clock().time()
self._setup_ckpt()
self._stdout = safe_open(self._pathspec.with_filename('stdout').getpath('process_logdir'), "a")
self._stderr = safe_open(self._pathspec.with_filename('stderr').getpath('process_logdir'), "a")
os.chown(self._stdout.name, user.pw_uid, user.pw_gid)
os.chown(self._stderr.name, user.pw_uid, user.pw_gid)
def _finalize_fork(self):
self._write_initial_update()
self._ckpt.close()
self._ckpt = None
def start(self):
"""
This is the main call point from the runner, and forks a co-ordinator process to run the
target process (i.e. self.cmdline())
The parent returns immediately and populates information about the pid of the co-ordinator.
The child (co-ordinator) will launch the target process in a subprocess.
"""
self._prepare_fork() # calls _setup_ckpt which can raise CheckpointError
# calls _getpwuid which can raise:
# UnknownUserError
# PermissionError
self._pid = self._platform.fork()
if self._pid == 0:
self._pid = self._platform.getpid()
self._wait_for_control() # can raise CheckpointError
try:
self.execute()
finally:
self._ckpt.close()
self.finish()
else:
self._finalize_fork() # can raise CheckpointError
def execute(self):
raise NotImplementedError
def finish(self):
pass
class RealPlatform(Platform):
IGNORE_SIGNALS = (signal.SIGINT,)
def __init__(self, fork=os.fork):
self._fork = fork
def fork(self):
pid = self._fork()
if pid == 0:
self._sanitize()
return pid
def _sanitize(self):
for sig in self.IGNORE_SIGNALS:
signal.signal(sig, signal.SIG_IGN)
def getpid(self):
return os.getpid()
def clock(self):
return time
class Process(ProcessBase):
"""
Encapsulate a running process for a task.
"""
RCFILE = '.thermos_profile'
FD_CLOEXEC = True
def __init__(self, *args, **kw):
"""
See ProcessBase.__init__
Takes additional arguments:
fork: the fork function to use [default: os.fork]
chroot: whether or not to chroot into the sandbox [default: False]
"""
fork = kw.pop('fork', os.fork)
self._use_chroot = bool(kw.pop('chroot', False))
self._rc = None
kw['platform'] = RealPlatform(fork=fork)
ProcessBase.__init__(self, *args, **kw)
if self._use_chroot and self._sandbox is None:
raise self.UnspecifiedSandbox('If using chroot, must specify sandbox!')
def _chroot(self):
"""chdir and chroot to the sandbox directory."""
os.chdir(self._sandbox)
os.chroot(self._sandbox)
def _setuid(self):
"""Drop privileges to the user supplied in Process creation (if necessary.)"""
user, current_user = self._getpwuid()
if user.pw_uid == current_user.pw_uid:
return
uid, gid = user.pw_uid, user.pw_gid
username = user.pw_name
group_ids = [group.gr_gid for group in grp.getgrall() if username in group.gr_mem]
os.setgroups(group_ids)
os.setgid(gid)
os.setuid(uid)
def execute(self):
"""Perform final initialization and launch target process commandline in a subprocess."""
if not self._stderr:
raise RuntimeError('self._stderr not set up!')
if not self._stdout:
raise RuntimeError('self._stdout not set up!')
user, _ = self._getpwuid()
username, homedir = user.pw_name, user.pw_dir
# TODO(wickman) reconsider setsid now that we're invoking in a subshell
os.setsid()
if self._use_chroot:
self._chroot()
self._setuid()
# start process
start_time = self._platform.clock().time()
if not self._sandbox:
sandbox = os.getcwd()
else:
sandbox = self._sandbox if not self._use_chroot else '/'
thermos_profile = os.path.join(sandbox, self.RCFILE)
env = {
'HOME': homedir if self._use_chroot else sandbox,
'LOGNAME': username,
'USER': username,
'PATH': os.environ['PATH']
}
if os.path.exists(thermos_profile):
env.update(BASH_ENV=thermos_profile)
self._popen = subprocess.Popen(["/bin/bash", "-c", self.cmdline()],
stderr=self._stderr,
stdout=self._stdout,
close_fds=self.FD_CLOEXEC,
cwd=sandbox,
env=env)
self._write_process_update(state=ProcessState.RUNNING,
pid=self._popen.pid,
start_time=start_time)
# wait for job to finish
rc = self._popen.wait()
# indicate that we have finished/failed
if rc < 0:
state = ProcessState.KILLED
elif rc == 0:
state = ProcessState.SUCCESS
else:
state = ProcessState.FAILED
self._write_process_update(state=state,
return_code=rc,
stop_time=self._platform.clock().time())
self._rc = rc
def finish(self):
self._log('Coordinator exiting.')
sys.exit(0)
| 0.0094 |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to compute V-trace off-policy actor critic targets.
For details and theory see:
"IMPALA: Scalable Distributed Deep-RL with
Importance Weighted Actor-Learner Architectures"
by Espeholt, Soyer, Munos et al.
See https://arxiv.org/abs/1802.01561 for the full paper.
In addition to the original paper's code, changes have been made
to support MultiDiscrete action spaces. behaviour_policy_logits,
target_policy_logits and actions parameters in the entry point
multi_from_logits method accepts lists of tensors instead of just
tensors.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from ray.rllib.models.action_dist import Categorical
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
VTraceFromLogitsReturns = collections.namedtuple("VTraceFromLogitsReturns", [
"vs", "pg_advantages", "log_rhos", "behaviour_action_log_probs",
"target_action_log_probs"
])
VTraceReturns = collections.namedtuple("VTraceReturns", "vs pg_advantages")
def log_probs_from_logits_and_actions(policy_logits,
actions,
dist_class=Categorical):
return multi_log_probs_from_logits_and_actions([policy_logits], [actions],
dist_class)[0]
def multi_log_probs_from_logits_and_actions(policy_logits, actions,
dist_class):
"""Computes action log-probs from policy logits and actions.
In the notation used throughout documentation and comments, T refers to the
time dimension ranging from 0 to T-1. B refers to the batch size and
ACTION_SPACE refers to the list of numbers each representing a number of
actions.
Args:
policy_logits: A list with length of ACTION_SPACE of float32
tensors of shapes
[T, B, ACTION_SPACE[0]],
...,
[T, B, ACTION_SPACE[-1]]
with un-normalized log-probabilities parameterizing a softmax policy.
actions: A list with length of ACTION_SPACE of
tensors of shapes
[T, B, ...],
...,
[T, B, ...]
with actions.
Returns:
A list with length of ACTION_SPACE of float32
tensors of shapes
[T, B],
...,
[T, B]
corresponding to the sampling log probability
of the chosen action w.r.t. the policy.
"""
log_probs = []
for i in range(len(policy_logits)):
p_shape = tf.shape(policy_logits[i])
a_shape = tf.shape(actions[i])
policy_logits_flat = tf.reshape(policy_logits[i],
tf.concat([[-1], p_shape[2:]], axis=0))
actions_flat = tf.reshape(actions[i],
tf.concat([[-1], a_shape[2:]], axis=0))
log_probs.append(
tf.reshape(
dist_class(policy_logits_flat).logp(actions_flat),
a_shape[:2]))
return log_probs
def from_logits(behaviour_policy_logits,
target_policy_logits,
actions,
discounts,
rewards,
values,
bootstrap_value,
dist_class=Categorical,
clip_rho_threshold=1.0,
clip_pg_rho_threshold=1.0,
name="vtrace_from_logits"):
"""multi_from_logits wrapper used only for tests"""
res = multi_from_logits(
[behaviour_policy_logits], [target_policy_logits], [actions],
discounts,
rewards,
values,
bootstrap_value,
dist_class,
clip_rho_threshold=clip_rho_threshold,
clip_pg_rho_threshold=clip_pg_rho_threshold,
name=name)
return VTraceFromLogitsReturns(
vs=res.vs,
pg_advantages=res.pg_advantages,
log_rhos=res.log_rhos,
behaviour_action_log_probs=tf.squeeze(
res.behaviour_action_log_probs, axis=0),
target_action_log_probs=tf.squeeze(
res.target_action_log_probs, axis=0),
)
def multi_from_logits(behaviour_policy_logits,
target_policy_logits,
actions,
discounts,
rewards,
values,
bootstrap_value,
dist_class,
clip_rho_threshold=1.0,
clip_pg_rho_threshold=1.0,
name="vtrace_from_logits"):
r"""V-trace for softmax policies.
Calculates V-trace actor critic targets for softmax polices as described in
"IMPALA: Scalable Distributed Deep-RL with
Importance Weighted Actor-Learner Architectures"
by Espeholt, Soyer, Munos et al.
Target policy refers to the policy we are interested in improving and
behaviour policy refers to the policy that generated the given
rewards and actions.
In the notation used throughout documentation and comments, T refers to the
time dimension ranging from 0 to T-1. B refers to the batch size and
ACTION_SPACE refers to the list of numbers each representing a number of
actions.
Args:
behaviour_policy_logits: A list with length of ACTION_SPACE of float32
tensors of shapes
[T, B, ACTION_SPACE[0]],
...,
[T, B, ACTION_SPACE[-1]]
with un-normalized log-probabilities parameterizing the softmax behaviour
policy.
target_policy_logits: A list with length of ACTION_SPACE of float32
tensors of shapes
[T, B, ACTION_SPACE[0]],
...,
[T, B, ACTION_SPACE[-1]]
with un-normalized log-probabilities parameterizing the softmax target
policy.
actions: A list with length of ACTION_SPACE of
tensors of shapes
[T, B, ...],
...,
[T, B, ...]
with actions sampled from the behaviour policy.
discounts: A float32 tensor of shape [T, B] with the discount encountered
when following the behaviour policy.
rewards: A float32 tensor of shape [T, B] with the rewards generated by
following the behaviour policy.
values: A float32 tensor of shape [T, B] with the value function estimates
wrt. the target policy.
bootstrap_value: A float32 of shape [B] with the value function estimate at
time T.
dist_class: action distribution class for the logits.
clip_rho_threshold: A scalar float32 tensor with the clipping threshold for
importance weights (rho) when calculating the baseline targets (vs).
rho^bar in the paper.
clip_pg_rho_threshold: A scalar float32 tensor with the clipping threshold
on rho_s in \rho_s \delta log \pi(a|x) (r + \gamma v_{s+1} - V(x_s)).
name: The name scope that all V-trace operations will be created in.
Returns:
A `VTraceFromLogitsReturns` namedtuple with the following fields:
vs: A float32 tensor of shape [T, B]. Can be used as target to train a
baseline (V(x_t) - vs_t)^2.
pg_advantages: A float 32 tensor of shape [T, B]. Can be used as an
estimate of the advantage in the calculation of policy gradients.
log_rhos: A float32 tensor of shape [T, B] containing the log importance
sampling weights (log rhos).
behaviour_action_log_probs: A float32 tensor of shape [T, B] containing
behaviour policy action log probabilities (log \mu(a_t)).
target_action_log_probs: A float32 tensor of shape [T, B] containing
target policy action probabilities (log \pi(a_t)).
"""
for i in range(len(behaviour_policy_logits)):
behaviour_policy_logits[i] = tf.convert_to_tensor(
behaviour_policy_logits[i], dtype=tf.float32)
target_policy_logits[i] = tf.convert_to_tensor(
target_policy_logits[i], dtype=tf.float32)
# Make sure tensor ranks are as expected.
# The rest will be checked by from_action_log_probs.
behaviour_policy_logits[i].shape.assert_has_rank(3)
target_policy_logits[i].shape.assert_has_rank(3)
with tf.name_scope(
name,
values=[
behaviour_policy_logits, target_policy_logits, actions,
discounts, rewards, values, bootstrap_value
]):
target_action_log_probs = multi_log_probs_from_logits_and_actions(
target_policy_logits, actions, dist_class)
behaviour_action_log_probs = multi_log_probs_from_logits_and_actions(
behaviour_policy_logits, actions, dist_class)
log_rhos = get_log_rhos(target_action_log_probs,
behaviour_action_log_probs)
vtrace_returns = from_importance_weights(
log_rhos=log_rhos,
discounts=discounts,
rewards=rewards,
values=values,
bootstrap_value=bootstrap_value,
clip_rho_threshold=clip_rho_threshold,
clip_pg_rho_threshold=clip_pg_rho_threshold)
return VTraceFromLogitsReturns(
log_rhos=log_rhos,
behaviour_action_log_probs=behaviour_action_log_probs,
target_action_log_probs=target_action_log_probs,
**vtrace_returns._asdict())
def from_importance_weights(log_rhos,
discounts,
rewards,
values,
bootstrap_value,
clip_rho_threshold=1.0,
clip_pg_rho_threshold=1.0,
name="vtrace_from_importance_weights"):
r"""V-trace from log importance weights.
Calculates V-trace actor critic targets as described in
"IMPALA: Scalable Distributed Deep-RL with
Importance Weighted Actor-Learner Architectures"
by Espeholt, Soyer, Munos et al.
In the notation used throughout documentation and comments, T refers to the
time dimension ranging from 0 to T-1. B refers to the batch size. This code
also supports the case where all tensors have the same number of additional
dimensions, e.g., `rewards` is [T, B, C], `values` is [T, B, C],
`bootstrap_value` is [B, C].
Args:
log_rhos: A float32 tensor of shape [T, B] representing the
log importance sampling weights, i.e.
log(target_policy(a) / behaviour_policy(a)). V-trace performs operations
on rhos in log-space for numerical stability.
discounts: A float32 tensor of shape [T, B] with discounts encountered when
following the behaviour policy.
rewards: A float32 tensor of shape [T, B] containing rewards generated by
following the behaviour policy.
values: A float32 tensor of shape [T, B] with the value function estimates
wrt. the target policy.
bootstrap_value: A float32 of shape [B] with the value function estimate at
time T.
clip_rho_threshold: A scalar float32 tensor with the clipping threshold for
importance weights (rho) when calculating the baseline targets (vs).
rho^bar in the paper. If None, no clipping is applied.
clip_pg_rho_threshold: A scalar float32 tensor with the clipping threshold
on rho_s in \rho_s \delta log \pi(a|x) (r + \gamma v_{s+1} - V(x_s)). If
None, no clipping is applied.
name: The name scope that all V-trace operations will be created in.
Returns:
A VTraceReturns namedtuple (vs, pg_advantages) where:
vs: A float32 tensor of shape [T, B]. Can be used as target to
train a baseline (V(x_t) - vs_t)^2.
pg_advantages: A float32 tensor of shape [T, B]. Can be used as the
advantage in the calculation of policy gradients.
"""
log_rhos = tf.convert_to_tensor(log_rhos, dtype=tf.float32)
discounts = tf.convert_to_tensor(discounts, dtype=tf.float32)
rewards = tf.convert_to_tensor(rewards, dtype=tf.float32)
values = tf.convert_to_tensor(values, dtype=tf.float32)
bootstrap_value = tf.convert_to_tensor(bootstrap_value, dtype=tf.float32)
if clip_rho_threshold is not None:
clip_rho_threshold = tf.convert_to_tensor(
clip_rho_threshold, dtype=tf.float32)
if clip_pg_rho_threshold is not None:
clip_pg_rho_threshold = tf.convert_to_tensor(
clip_pg_rho_threshold, dtype=tf.float32)
# Make sure tensor ranks are consistent.
rho_rank = log_rhos.shape.ndims # Usually 2.
values.shape.assert_has_rank(rho_rank)
bootstrap_value.shape.assert_has_rank(rho_rank - 1)
discounts.shape.assert_has_rank(rho_rank)
rewards.shape.assert_has_rank(rho_rank)
if clip_rho_threshold is not None:
clip_rho_threshold.shape.assert_has_rank(0)
if clip_pg_rho_threshold is not None:
clip_pg_rho_threshold.shape.assert_has_rank(0)
with tf.name_scope(
name,
values=[log_rhos, discounts, rewards, values, bootstrap_value]):
rhos = tf.exp(log_rhos)
if clip_rho_threshold is not None:
clipped_rhos = tf.minimum(
clip_rho_threshold, rhos, name="clipped_rhos")
tf.summary.histogram("clipped_rhos_1000", tf.minimum(1000.0, rhos))
tf.summary.scalar(
"num_of_clipped_rhos",
tf.reduce_sum(
tf.cast(
tf.equal(clipped_rhos, clip_rho_threshold), tf.int32)))
tf.summary.scalar("size_of_clipped_rhos", tf.size(clipped_rhos))
else:
clipped_rhos = rhos
cs = tf.minimum(1.0, rhos, name="cs")
# Append bootstrapped value to get [v1, ..., v_t+1]
values_t_plus_1 = tf.concat(
[values[1:], tf.expand_dims(bootstrap_value, 0)], axis=0)
deltas = clipped_rhos * (
rewards + discounts * values_t_plus_1 - values)
# All sequences are reversed, computation starts from the back.
sequences = (
tf.reverse(discounts, axis=[0]),
tf.reverse(cs, axis=[0]),
tf.reverse(deltas, axis=[0]),
)
# V-trace vs are calculated through a scan from the back to the
# beginning of the given trajectory.
def scanfunc(acc, sequence_item):
discount_t, c_t, delta_t = sequence_item
return delta_t + discount_t * c_t * acc
initial_values = tf.zeros_like(bootstrap_value)
vs_minus_v_xs = tf.scan(
fn=scanfunc,
elems=sequences,
initializer=initial_values,
parallel_iterations=1,
back_prop=False,
name="scan")
# Reverse the results back to original order.
vs_minus_v_xs = tf.reverse(vs_minus_v_xs, [0], name="vs_minus_v_xs")
# Add V(x_s) to get v_s.
vs = tf.add(vs_minus_v_xs, values, name="vs")
# Advantage for policy gradient.
vs_t_plus_1 = tf.concat(
[vs[1:], tf.expand_dims(bootstrap_value, 0)], axis=0)
if clip_pg_rho_threshold is not None:
clipped_pg_rhos = tf.minimum(
clip_pg_rho_threshold, rhos, name="clipped_pg_rhos")
else:
clipped_pg_rhos = rhos
pg_advantages = (
clipped_pg_rhos * (rewards + discounts * vs_t_plus_1 - values))
# Make sure no gradients backpropagated through the returned values.
return VTraceReturns(
vs=tf.stop_gradient(vs),
pg_advantages=tf.stop_gradient(pg_advantages))
def get_log_rhos(target_action_log_probs, behaviour_action_log_probs):
"""With the selected log_probs for multi-discrete actions of behaviour
and target policies we compute the log_rhos for calculating the vtrace."""
t = tf.stack(target_action_log_probs)
b = tf.stack(behaviour_action_log_probs)
log_rhos = tf.reduce_sum(t - b, axis=0)
return log_rhos
| 0 |
"""Support for monitoring a Smappee energy sensor."""
from datetime import timedelta
import logging
from homeassistant.const import ENERGY_KILO_WATT_HOUR, POWER_WATT
from homeassistant.helpers.entity import Entity
from . import DATA_SMAPPEE
_LOGGER = logging.getLogger(__name__)
SENSOR_PREFIX = 'Smappee'
SENSOR_TYPES = {
'solar':
['Solar', 'mdi:white-balance-sunny', 'local', POWER_WATT, 'solar'],
'active_power':
['Active Power', 'mdi:power-plug', 'local', POWER_WATT,
'active_power'],
'current':
['Current', 'mdi:gauge', 'local', 'A', 'current'],
'voltage':
['Voltage', 'mdi:gauge', 'local', 'V', 'voltage'],
'active_cosfi':
['Power Factor', 'mdi:gauge', 'local', '%', 'active_cosfi'],
'alwayson_today':
['Always On Today', 'mdi:gauge', 'remote', ENERGY_KILO_WATT_HOUR,
'alwaysOn'],
'solar_today':
['Solar Today', 'mdi:white-balance-sunny', 'remote',
ENERGY_KILO_WATT_HOUR, 'solar'],
'power_today':
['Power Today', 'mdi:power-plug', 'remote', ENERGY_KILO_WATT_HOUR,
'consumption'],
'water_sensor_1':
['Water Sensor 1', 'mdi:water', 'water', 'm3', 'value1'],
'water_sensor_2':
['Water Sensor 2', 'mdi:water', 'water', 'm3', 'value2'],
'water_sensor_temperature':
['Water Sensor Temperature', 'mdi:temperature-celsius',
'water', '°', 'temperature'],
'water_sensor_humidity':
['Water Sensor Humidity', 'mdi:water-percent', 'water',
'%', 'humidity'],
'water_sensor_battery':
['Water Sensor Battery', 'mdi:battery', 'water', '%', 'battery'],
}
SCAN_INTERVAL = timedelta(seconds=30)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Smappee sensor."""
smappee = hass.data[DATA_SMAPPEE]
dev = []
if smappee.is_remote_active:
for location_id in smappee.locations.keys():
for sensor in SENSOR_TYPES:
if 'remote' in SENSOR_TYPES[sensor]:
dev.append(SmappeeSensor(smappee, location_id,
sensor,
SENSOR_TYPES[sensor]))
elif 'water' in SENSOR_TYPES[sensor]:
for items in smappee.info[location_id].get('sensors'):
dev.append(SmappeeSensor(
smappee,
location_id,
'{}:{}'.format(sensor, items.get('id')),
SENSOR_TYPES[sensor]))
if smappee.is_local_active:
for location_id in smappee.locations.keys():
for sensor in SENSOR_TYPES:
if 'local' in SENSOR_TYPES[sensor]:
if smappee.is_remote_active:
dev.append(SmappeeSensor(smappee, location_id, sensor,
SENSOR_TYPES[sensor]))
else:
dev.append(SmappeeSensor(smappee, None, sensor,
SENSOR_TYPES[sensor]))
add_entities(dev, True)
class SmappeeSensor(Entity):
"""Implementation of a Smappee sensor."""
def __init__(self, smappee, location_id, sensor, attributes):
"""Initialize the Smappee sensor."""
self._smappee = smappee
self._location_id = location_id
self._attributes = attributes
self._sensor = sensor
self.data = None
self._state = None
self._name = self._attributes[0]
self._icon = self._attributes[1]
self._type = self._attributes[2]
self._unit_of_measurement = self._attributes[3]
self._smappe_name = self._attributes[4]
@property
def name(self):
"""Return the name of the sensor."""
if self._location_id:
location_name = self._smappee.locations[self._location_id]
else:
location_name = 'Local'
return "{} {} {}".format(SENSOR_PREFIX, location_name, self._name)
@property
def icon(self):
"""Icon to use in the frontend."""
return self._icon
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
attr = {}
if self._location_id:
attr['Location Id'] = self._location_id
attr['Location Name'] = self._smappee.locations[self._location_id]
return attr
def update(self):
"""Get the latest data from Smappee and update the state."""
self._smappee.update()
if self._sensor in ['alwayson_today', 'solar_today', 'power_today']:
data = self._smappee.consumption[self._location_id]
if data:
consumption = data.get('consumptions')[-1]
_LOGGER.debug("%s %s", self._sensor, consumption)
value = consumption.get(self._smappe_name)
self._state = round(value / 1000, 2)
elif self._sensor == 'active_cosfi':
cosfi = self._smappee.active_cosfi()
_LOGGER.debug("%s %s", self._sensor, cosfi)
if cosfi:
self._state = round(cosfi, 2)
elif self._sensor == 'current':
current = self._smappee.active_current()
_LOGGER.debug("%s %s", self._sensor, current)
if current:
self._state = round(current, 2)
elif self._sensor == 'voltage':
voltage = self._smappee.active_voltage()
_LOGGER.debug("%s %s", self._sensor, voltage)
if voltage:
self._state = round(voltage, 3)
elif self._sensor == 'active_power':
data = self._smappee.instantaneous
_LOGGER.debug("%s %s", self._sensor, data)
if data:
value1 = [float(i['value']) for i in data
if i['key'].endswith('phase0ActivePower')]
value2 = [float(i['value']) for i in data
if i['key'].endswith('phase1ActivePower')]
value3 = [float(i['value']) for i in data
if i['key'].endswith('phase2ActivePower')]
active_power = sum(value1 + value2 + value3) / 1000
self._state = round(active_power, 2)
elif self._sensor == 'solar':
data = self._smappee.instantaneous
_LOGGER.debug("%s %s", self._sensor, data)
if data:
value1 = [float(i['value']) for i in data
if i['key'].endswith('phase3ActivePower')]
value2 = [float(i['value']) for i in data
if i['key'].endswith('phase4ActivePower')]
value3 = [float(i['value']) for i in data
if i['key'].endswith('phase5ActivePower')]
power = sum(value1 + value2 + value3) / 1000
self._state = round(power, 2)
elif self._type == 'water':
sensor_name, sensor_id = self._sensor.split(":")
data = self._smappee.sensor_consumption[self._location_id]\
.get(int(sensor_id))
if data:
tempdata = data.get('records')
if tempdata:
consumption = tempdata[-1]
_LOGGER.debug("%s (%s) %s",
sensor_name, sensor_id, consumption)
value = consumption.get(self._smappe_name)
self._state = value
| 0 |
"""
urlresolver XBMC Addon
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from lib import helpers
from urlresolver9 import common
from urlresolver9.resolver import UrlResolver, ResolverError
class GorillavidResolver(UrlResolver):
name = "gorillavid"
domains = ["gorillavid.in", "gorillavid.com"]
pattern = '(?://|\.)(gorillavid\.(?:in|com))/(?:embed-)?([0-9a-zA-Z]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
headers = {'User-Agent': common.FF_USER_AGENT}
response = self.net.http_GET(web_url, headers=headers)
html = response.content
sources = helpers.scrape_sources(html)
if not sources:
data = helpers.get_hidden(html)
headers['Cookie'] = response.get_headers(as_dict=True).get('Set-Cookie', '')
html = self.net.http_POST(response.get_url(), headers=headers, form_data=data).content
sources = helpers.scrape_sources(html)
return helpers.pick_source(sources) + helpers.append_headers(headers)
def get_url(self, host, media_id):
return 'http://gorillavid.in/%s' % (media_id)
| 0.002687 |
_const_codes = [
'POP_TOP','ROT_TWO','ROT_THREE','ROT_FOUR','DUP_TOP',
'BUILD_LIST','BUILD_MAP','BUILD_TUPLE',
'LOAD_CONST','RETURN_VALUE','STORE_SUBSCR'
]
_expr_codes = _const_codes + [
'UNARY_POSITIVE','UNARY_NEGATIVE','UNARY_NOT',
'UNARY_INVERT','BINARY_POWER','BINARY_MULTIPLY',
'BINARY_DIVIDE','BINARY_FLOOR_DIVIDE','BINARY_TRUE_DIVIDE',
'BINARY_MODULO','BINARY_ADD','BINARY_SUBTRACT',
'BINARY_LSHIFT','BINARY_RSHIFT','BINARY_AND','BINARY_XOR',
'BINARY_OR',
]
def _get_opcodes(codeobj):
"""_get_opcodes(codeobj) -> [opcodes]
Extract the actual opcodes as a list from a code object
>>> c = compile("[1 + 2, (1,2)]", "", "eval")
>>> _get_opcodes(c)
[100, 100, 23, 100, 100, 102, 103, 83]
"""
import dis
i = 0
opcodes = []
s = codeobj.co_code
while i < len(s):
code = ord(s[i])
opcodes.append(code)
if code >= dis.HAVE_ARGUMENT:
i += 3
else:
i += 1
return opcodes
def test_expr(expr, allowed_codes):
"""test_expr(expr) -> codeobj
Test that the expression contains only the listed opcodes.
If the expression is valid and contains only allowed codes,
return the compiled code object. Otherwise raise a ValueError
"""
import dis
allowed_codes = map(dis.opmap.__getitem__, allowed_codes)
try:
c = compile(expr, "", "eval")
except:
raise ValueError, "%s is not a valid expression" % expr
codes = _get_opcodes(c)
for code in codes:
if code not in allowed_codes:
raise ValueError, "opcode %s not allowed" % dis.opname[code]
return c
def const_eval(expr):
"""const_eval(expression) -> value
Safe Python constant evaluation
Evaluates a string that contains an expression describing
a Python constant. Strings that are not valid Python expressions
or that contain other code besides the constant raise ValueError.
>>> const_eval("10")
10
>>> const_eval("[1,2, (3,4), {'foo':'bar'}]")
[1, 2, (3, 4), {'foo': 'bar'}]
>>> const_eval("1+2")
Traceback (most recent call last):
...
ValueError: opcode BINARY_ADD not allowed
"""
c = test_expr(expr, _const_codes)
return eval(c)
def expr_eval(expr):
"""expr_eval(expression) -> value
Safe Python expression evaluation
Evaluates a string that contains an expression that only
uses Python constants. This can be used to e.g. evaluate
a numerical expression from an untrusted source.
>>> expr_eval("1+2")
3
>>> expr_eval("[1,2]*2")
[1, 2, 1, 2]
>>> expr_eval("__import__('sys').modules")
Traceback (most recent call last):
...
ValueError: opcode LOAD_NAME not allowed
"""
c = test_expr(expr, _expr_codes)
return eval(c)
| 0.009197 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2015 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.magentoerpconnect.unit.import_synchronizer import (
import_record)
from .common import mock_api, SetUpMagentoSynchronized
from .data_base import magento_base_responses
class TestPartnerCategory(SetUpMagentoSynchronized):
def test_import_partner_category(self):
""" Import of a partner category """
backend_id = self.backend_id
with mock_api(magento_base_responses):
import_record(self.session, 'magento.res.partner.category',
backend_id, 2)
binding_model = self.env['magento.res.partner.category']
category = binding_model.search([('backend_id', '=', backend_id),
('magento_id', '=', '2')])
self.assertEqual(len(category), 1)
self.assertEqual(category.name, 'Wholesale')
self.assertEqual(category.tax_class_id, 3)
def test_import_existing_partner_category(self):
""" Bind of an existing category with same name"""
binding_model = self.env['magento.res.partner.category']
category_model = self.env['res.partner.category']
existing_category = category_model.create({'name': 'Wholesale'})
backend_id = self.backend_id
with mock_api(magento_base_responses):
import_record(self.session, 'magento.res.partner.category',
backend_id, 2)
category = binding_model.search([('backend_id', '=', backend_id),
('magento_id', '=', '2')])
self.assertEqual(len(category), 1)
self.assertEqual(category.openerp_id, existing_category)
self.assertEqual(category.name, 'Wholesale')
self.assertEqual(category.tax_class_id, 3)
| 0 |
from pip._vendor import pytoml
from pip._internal.build_env import BuildEnvironment
from pip._internal.download import PipSession
from pip._internal.index import PackageFinder
from pip._internal.req import InstallRequirement
def make_project(tmpdir, requires=[], backend=None):
buildsys = {'requires': requires}
if backend:
buildsys['build-backend'] = backend
data = pytoml.dumps({'build-system': buildsys})
tmpdir.join('pyproject.toml').write(data)
return tmpdir
def test_backend(tmpdir, data):
"""Can we call a requirement's backend successfully?"""
project = make_project(tmpdir, backend="dummy_backend")
req = InstallRequirement(None, None, source_dir=project)
req.load_pyproject_toml()
env = BuildEnvironment()
finder = PackageFinder([data.backends], [], session=PipSession())
env.install_requirements(finder, ["dummy_backend"], "Installing")
assert not env.missing_requirements(["dummy_backend"])
assert hasattr(req.pep517_backend, 'build_wheel')
with env:
assert req.pep517_backend.build_wheel("dir") == "Backend called"
| 0 |
## \file
## \ingroup tutorial_pyroot
## Example of function called when a mouse event occurs in a pad.
## When moving the mouse in the canvas, a second canvas shows the
## projection along X of the bin corresponding to the Y position
## of the mouse. The resulting histogram is fitted with a gaussian.
## A "dynamic" line shows the current bin position in Y.
## This more elaborated example can be used as a starting point
## to develop more powerful interactive applications exploiting CINT
## as a development engine.
##
## Note that a class is used to hold on to the canvas that display
## the selected slice.
##
## \macro_image
## \macro_code
##
## \author Rene Brun, Johann Cohen-Tanugi, Wim Lavrijsen
import sys
from ROOT import gRandom, gPad, gROOT, gVirtualX
from ROOT import kTRUE, kRed
from ROOT import TCanvas, TH2, TH2F, Double
class DynamicExec:
def __init__( self ):
self._cX = None
self._cY = None
self._old = None
def __call__( self ):
h = gPad.GetSelected();
if not h:
return
if not isinstance( h, TH2 ):
return
gPad.GetCanvas().FeedbackMode( kTRUE )
# erase old position and draw a line at current position
px = gPad.GetEventX()
py = gPad.GetEventY()
uxmin, uxmax = gPad.GetUxmin(), gPad.GetUxmax()
uymin, uymax = gPad.GetUymin(), gPad.GetUymax()
pxmin, pxmax = gPad.XtoAbsPixel( uxmin ), gPad.XtoAbsPixel( uxmax )
pymin, pymax = gPad.YtoAbsPixel( uymin ), gPad.YtoAbsPixel( uymax )
if self._old != None:
gVirtualX.DrawLine( pxmin, self._old[1], pxmax, self._old[1] )
gVirtualX.DrawLine( self._old[0], pymin, self._old[0], pymax )
gVirtualX.DrawLine( pxmin, py, pxmax, py )
gVirtualX.DrawLine( px, pymin, px, pymax )
self._old = px, py
upx = gPad.AbsPixeltoX( px )
x = gPad.PadtoX( upx )
upy = gPad.AbsPixeltoY( py )
y = gPad.PadtoY( upy )
padsav = gPad
# create or set the display canvases
if not self._cX:
self._cX = TCanvas( 'c2', 'Projection Canvas in X', 730, 10, 700, 500 )
else:
self._DestroyPrimitive( 'X' )
if not self._cY:
self._cY = TCanvas( 'c3', 'Projection Canvas in Y', 10, 550, 700, 500 )
else:
self._DestroyPrimitive( 'Y' )
self.DrawSlice( h, y, 'Y' )
self.DrawSlice( h, x, 'X' )
padsav.cd()
def _DestroyPrimitive( self, xy ):
proj = getattr( self, '_c'+xy ).GetPrimitive( 'Projection '+xy )
if proj:
proj.IsA().Destructor( proj )
def DrawSlice( self, histo, value, xy ):
yx = xy == 'X' and 'Y' or 'X'
# draw slice corresponding to mouse position
canvas = getattr( self, '_c'+xy )
canvas.SetGrid()
canvas.cd()
bin = getattr( histo, 'Get%saxis' % xy )().FindBin( value )
hp = getattr( histo, 'Projection' + yx )( '', bin, bin )
hp.SetFillColor( 38 )
hp.SetName( 'Projection ' + xy )
hp.SetTitle( xy + 'Projection of bin=%d' % bin )
hp.Fit( 'gaus', 'ql' )
hp.GetFunction( 'gaus' ).SetLineColor( kRed )
hp.GetFunction( 'gaus' ).SetLineWidth( 6 )
canvas.Update()
if __name__ == '__main__':
# create a new canvas.
c1 = TCanvas('c1', 'Dynamic Slice Example', 10, 10, 700, 500 )
c1.SetFillColor( 42 )
c1.SetFrameFillColor( 33 )
# create a 2-d histogram, fill and draw it
hpxpy = TH2F( 'hpxpy', 'py vs px', 40, -4, 4, 40, -4, 4 )
hpxpy.SetStats( 0 )
x, y = Double( 0.1 ), Double( 0.101 )
for i in xrange( 50000 ):
gRandom.Rannor( x, y )
hpxpy.Fill( x, y )
hpxpy.Draw( 'COL' )
# Add a TExec object to the canvas (explicit use of __main__ is for IPython)
import __main__
__main__.slicer = DynamicExec()
c1.AddExec( 'dynamic', 'TPython::Exec( "slicer()" );' )
c1.Update()
| 0.051989 |
import re
def vipercharts_create_integers(input_dict):
intStr = input_dict['intStr']
intList = []
for i in re.findall(r'\w+', intStr):
try:
intList.append(int(i))
except:
pass
if input_dict['sort'].lower() == "true":
intList.sort()
return {'intList':intList}
def vipercharts_sum_integers(input_dict):
intList = input_dict['intList']
return {'sum':sum(intList)}
def vipercharts_pre_filter_integers(input_dict):
return input_dict
def vipercharts_post_filter_integers(postdata,input_dict,output_dict):
intListOut = postdata['intListOut']
intList = []
for i in intListOut:
try:
intList.append(int(i))
except:
pass
return {'intList': intList}
def vipercharts_pre_display_summation(input_dict):
return {}
# Prepare curve data
def vipercharts_prepareCurveData(input_dict): #, subtype
import math
nPoints=4
performance = input_dict['predictions']#chartdata
subtype = input_dict['subtype']
kenmax = 0.5
ratemax = 0.5
for curve in performance:
n = len(curve['actual'])
negs = curve['actual'].count(0)
poss = curve['actual'].count(1)
if poss == 0 or negs == 0:
print "Class Error, zero poss or zero negs, only one class or other type error."
return []
try:
ranks = curve['rank']
except:
ranks = range(n+1)[1:] # ranks from 1
paralel =[]
for i in range(n):
paralel.append([curve['actual'][i], float(curve['predicted'][i])])
if (subtype == '-score'):
ROCseries = [[0,0, '-Inf']]; PRseries = [[0,1, '-Inf']]; LIFTseries = [[0,0, '-Inf']]
ROChull = [[0,0,'-Inf']]; COSTseries = [[0,0,'-Inf']]; RATEseries = []; KENseries = [[0,0]]; KENup=[[0,1]]; KENdown=[[0,0]]
_oldrate = 0
_oldloss = 0
AUC = 0
AUPR = 0
ranked = sorted(paralel, key = lambda pair:pair[1], reverse=True)
print "ranked:"
print ranked
k = 0
tp = 0; fp = 0; tp_old = 0; fp_old = 0; n1 = 0; concordant_pairs = 0; discordant_pairs = 0;
while k < len(ranked):
addedconc = 0; addeddisc = 0;
threshold = ranked[k][1];
group = [x[0] for x in ranked if x[1] >= threshold]
tp = group.count(1)
fp = group.count(0)
#next k is len(group).
ties = len(group) - k
n1 += ties * (ties-1)/2
concordant_pairs += tp_old * (fp - fp_old)
discordant_pairs += fp_old * (tp - tp_old)
ROCpoint = [fp*1.0/negs,tp*1.0/poss, threshold]
ROCseries.append(ROCpoint)
AUC += (ROCpoint[1] + ROCseries[-2][1]) * (ROCpoint[0] - ROCseries[-2][0]) * 0.5
PRseries.append([tp*1.0/poss, tp*1.0/(tp+fp), threshold])
AUPR += (PRseries[-1][1] + PRseries[-2][1]) * (PRseries[-1][0] - PRseries[-2][0]) * 0.5
LIFTseries.append([len(group)*1.0/n, tp*1.0/poss, threshold])
#Convex hull and lower envelope:
while len(ROChull)>=2 and (ROChull[-1][0]==ROCpoint[0] or (ROChull[-2][0]!=ROChull[-1][0] and (ROChull[-1][1]-ROChull[-2][1])/(ROChull[-1][0]-ROChull[-2][0]) <= (ROCpoint[1]-ROChull[-1][1])/(ROCpoint[0]-ROChull[-1][0]))):
ROChull.pop()
COSTseries.pop()
ROChull.append(ROCpoint)
if(ROCpoint[0] != ROChull[-2][0]):
slope = (ROCpoint[1] - ROChull[-2][1]) / (ROCpoint[0] - ROChull[-2][0])
intercept = ROCpoint[1] - slope * ROCpoint[0]
COSTseries.append([1 / (slope + 1), (1 - intercept) / (1 + slope), threshold])
else:
if len(COSTseries) == 0:
COSTseries.append([0,0,threshold])
else:
COSTseries[0][2] = threshold
COSTend = 1 - ROCpoint[1]
#Rate driven curve:
#The Rate driven curve is a list of intervals. Each interval is a set of points on the appropriate parabola. There are nPoints number of points
RATEinterval = []
pi0 = poss * 1.0 / n
pi1 = 1 - pi0
_newrate = pi1*ROCpoint[0]+pi0*ROCpoint[1]
_newloss = 2*(_newrate*(pi0-_newrate) + pi1*ROCpoint[0])
RATEinterval.append([_oldrate, _oldloss, threshold, performance.index(curve)+1])
for i in range(1, nPoints):
alpha = i * 1.0/nPoints
rate = _oldrate + alpha * (_newrate - _oldrate)
loss = 2 * (rate * (pi0 - rate) + pi1 * (ROCseries[-2][0] + alpha * (ROCpoint[0] - ROCseries[-2][0])))
RATEinterval.append([rate, loss, 0])
RATEinterval.append([_newrate, _newloss, 0])
RATEseries.append(RATEinterval)
if _newloss > ratemax:
ratemax = _newloss
m = 0.5*(pi0+pi1*(ROCseries[-2][0]-ROCpoint[0])/(_newrate-_oldrate))
if m<_newrate and m>_oldrate:
mvalue=2*(m*(pi0-m)+pi1*((_newrate-m)*ROCseries[-2][0] + (m-_oldrate)*ROCpoint[0])/(_newrate - _oldrate))
if mvalue > ratemax:
ratemax = mvalue
#Kendall curve:
if _newrate <= pi0:
KENseries.append([_newrate, 2*pi1*ROCpoint[0], threshold])
else:
if _oldrate < pi0:
KENseries.append([pi0,(2*pi1*ROCpoint[0]-KENseries[-1][1])*(pi0-KENseries[-1][0])/(_newrate - KENseries[-1][0])+(KENseries[-1][1]), ''])
KENseries.append([_newrate, 2*pi0*(1-ROCpoint[1]), threshold])
if KENseries[-1][1] > kenmax:
kenmax = KENseries[-1][1]
_oldrate = _newrate
_oldloss = _newloss
k += len(group) - k
tp_old = tp
fp_old = fp
else:
ROCseries = [[0,0,0]]; PRseries = [[0,1,0]]; LIFTseries = [[0,0,0]]# x: y: rank:
ranked = sorted(paralel, key=lambda pair:pair[1])
print ranked
k = 0
while k < len(ranked):
tp = 0; fp = 0;
threshold = ranked[k][1];
group = [x[0] for x in ranked if x[1] <= threshold]
print group
tp = group.count('1')
fp = group.count('0')
ROCpoint = [fp*1.0/negs,tp*1.0/poss, threshold]
ROCseries.append([fp*1.0/negs, tp*1.0/poss, int(threshold)])
PRseries.append([tp*1.0/poss, tp*1.0/(tp+fp), int(threshold)])
LIFTseries.append([len(group)*1.0/n, tp*1.0/poss, int(threshold)])
while len(ROChull)>=2 and (ROChull[-1][0]==ROCpoint[0] or (ROChull[-2][0]!=ROChull[-1][0] and (ROChull[-1][1]-ROChull[-2][1])/(ROChull[-1][0]-ROChull[-2][0]) <= (ROCpoint[1]-ROChull[-1][1])/(ROCpoint[0]-ROChull[-1][0]))):
ROChull.pop()
COSTseries.pop()
ROChull.append(ROCpoint)
if(ROCpoint[0]!=ROChull[-2][0]):
slope=(ROCpoint[1]-ROChull[-2][1])/(ROCpoint[0]-ROChull[-2][0])
intercept=ROCpoint[1]-slope*ROCpoint[0]
COSTseries.append([1/(1+slope), (1-intercept)/(1+slope)])
else:
COSTseries.append([0.0, ROCpoint[0]])
k += len(group) - k
if COSTseries[-1][0]<1:
#append final point with max threshold
COSTseries.append([1, COSTend, ranked[-1][1]])
curve['ROCpoints'] = ROCseries
curve['PRpoints'] = PRseries
curve['LIFTpoints'] = LIFTseries
curve['ROChull'] = ROChull
curve['COSTpoints'] = COSTseries
curve['RATEintervals'] = RATEseries
curve['KENpoints'] = KENseries
curve['AUC'] = AUC
curve['Gini'] = 2 * AUC - 1
n0=n*(n-1)/2
curve['KENtau'] = (concordant_pairs - discordant_pairs) / math.sqrt((n0 - n1) * (n0 - (negs*(negs-1) + poss*(poss-1))/2))
curve['AUPR'] = AUPR
AUCH = 0
for i in range(1, len(ROChull)):
AUCH += (ROChull[i][1] + ROChull[i-1][1]) * (ROChull[i][0] - ROChull[i-1][0]) * 0.5
curve['AUCH'] = AUCH
performance[0]['KENmax'] = kenmax
performance[0]['RATEmax'] = ratemax
output_dict = {}
output_dict['performance'] = performance
return output_dict
def vipercharts_compare_class_ActualPredicted_orange(input_dict):
actual = input_dict['actual']
predicted = input_dict['predicted']
target = input_dict['target']
out_dict = {'actual':[], 'predicted':[], 'name':input_dict['name']}
for i, ex in enumerate(actual):
# set actual
if target == ex.getclass().value:
out_dict['actual'].append(1)
else:
out_dict['actual'].append(0)
#set predicted
if predicted[i].getclass().value == target:
out_dict['predicted'].append(1)
else:
out_dict['predicted'].append(0)
output_dict = {}
output_dict['apv'] = out_dict
return output_dict
def class_from_odt(input_dict):
output_dict = {}
output_dict['target'] = []
return output_dict
def class_from_odt_selection(postdata, input_dict, output_dict):
print postdata['selected']
output_dict['target'] = postdata['selected'][0] #input_dict['data'].domain.class_var.values[postdata['selected']]
return output_dict
# Scatter charts
def vipercharts_pr_space(input_dict):
return {}
def vipercharts_roc_space(input_dict):
return {}
# Curve charts
def vipercharts_roc_curve(input_dict):
return {}
def vipercharts_roc_hull(input_dict):
return {}
def vipercharts_pr_curve(input_dict):
return {}
def vipercharts_lift_curve(input_dict):
return {}
def vipercharts_cost_curve(input_dict):
return {}
def vipercharts_kendall_curve(input_dict):
return {}
def vipercharts_rate_curve(input_dict):
return {}
# Column charts
def vipercharts_column_chart(input_dict):
return {}
def vipercharts_eval_bar_chart(input_dict):
return {}
# Related results table
def vipercharts_related_table(input_dict):
return {} | 0.041741 |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def client_query_w_array_params():
# [START bigquery_query_params_arrays]
from google.cloud import bigquery
# Construct a BigQuery client object.
client = bigquery.Client()
query = """
SELECT name, sum(number) as count
FROM `bigquery-public-data.usa_names.usa_1910_2013`
WHERE gender = @gender
AND state IN UNNEST(@states)
GROUP BY name
ORDER BY count DESC
LIMIT 10;
"""
job_config = bigquery.QueryJobConfig(
query_parameters=[
bigquery.ScalarQueryParameter("gender", "STRING", "M"),
bigquery.ArrayQueryParameter("states", "STRING", ["WA", "WI", "WV", "WY"]),
]
)
query_job = client.query(query, job_config=job_config) # Make an API request.
for row in query_job:
print("{}: \t{}".format(row.name, row.count))
# [END bigquery_query_params_arrays]
| 0.001354 |
"""
Setup script adapted from Datadesk's softhyphen project:
https://github.com/datadesk/django-softhyphen/blob/master/setup.py
Which says:
Tricks lifted from Django's own setup.py and django_debug_toolbar.
Still not sure why the templates install with this particular config
and not with some of the others I tried.
"""
from distutils.core import setup
from setuptools import setup, find_packages
from distutils.command.install_data import install_data
from distutils.command.install import INSTALL_SCHEMES
import os
import sys
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
class osx_install_data(install_data):
# On MacOS, the platform-specific lib dir is /System/Library/Framework/Python/.../
# which is wrong. Python 2.5 supplied with MacOS 10.5 has an Apple-specific fix
# for this in distutils.command.install_data#306. It fixes install_lib but not
# install_data, which is why we roll our own install_data class.
def finalize_options(self):
# By the time finalize_options is called, install.install_lib is set to the
# fixed directory, so we set the installdir to install_lib. The
# install_data class uses ('install_data', 'install_dir') instead.
self.set_undefined_options('install', ('install_lib', 'install_dir'))
install_data.finalize_options(self)
if sys.platform == "darwin":
cmdclasses = {'install_data': osx_install_data}
else:
cmdclasses = {'install_data': install_data}
def fullsplit(path, result=None):
"""
Split a pathname into components (the opposite of os.path.join) in a
platform-neutral way.
"""
if result is None:
result = []
head, tail = os.path.split(path)
if head == '':
return [tail] + result
if head == path:
return result
return fullsplit(head, [tail] + result)
# Tell distutils to put the data_files in platform-specific installation
# locations. See here for an explanation:
# http://groups.google.com/group/comp.lang.python/browse_thread/thread/35ec7b2fed36eaec/2105ee4d9e8042cb
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
# Compile the list of packages available, because distutils doesn't have
# an easy way to do this.
packages, data_files = [], []
root_dir = os.path.dirname(__file__)
if root_dir != '':
os.chdir(root_dir)
django_dir = 'shibboleth'
for dirpath, dirnames, filenames in os.walk(django_dir):
# Ignore dirnames that start with '.'
for i, dirname in enumerate(dirnames):
if dirname.startswith('.'): del dirnames[i]
if '__init__.py' in filenames:
packages.append('.'.join(fullsplit(dirpath)))
elif filenames:
data_files.append([dirpath, [os.path.join(dirpath, f) for f in filenames]])
# Small hack for working with bdist_wininst.
# See http://mail.python.org/pipermail/distutils-sig/2004-August/004134.html
if len(sys.argv) > 1 and sys.argv[1] == 'bdist_wininst':
for file_info in data_files:
file_info[0] = '\\PURELIB\\%s' % file_info[0]
setup(
name = "django-shibboleth-remoteuser",
version='0.5',
long_description = read('README.md'),
author = 'Ted Lawless',
author_email = 'tlawless@brown.edu',
url = 'https://github.com/Brown-University-Library/django-shibboleth-remoteuser',
include_package_data = True,
packages=packages,
cmdclass = cmdclasses,
data_files=data_files,
)
| 0.009211 |
#!/usr/bin/python
#
# Copyright (c) 2015, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
DOCUMENTATION = """
---
module: eos_ethernet
short_description: Manage physical Ethernet interfaces in EOS
description:
- The eos_ethernet module manages the interface configuration for
physical Ethernet interfaces on EOS nodes.
version_added: 1.0.0
category: Interfaces
author: Arista EOS+
requirements:
- Arista EOS 4.13.7M or later with command API enabled
- Python Client for eAPI 0.3.0 or later
notes:
- All configuration is idempotent unless otherwise specified
- Supports eos metaparameters for using the eAPI transport
- Does not support stateful resource configuration.
options:
name:
description:
- The unique interface identifier name. The interface name must use
the full interface name (no abbreviated names). For example,
interfaces should be specified as Ethernet1 not Et1
required: true
default: null
choices: []
aliases: []
version_added: 1.0.0
enable:
description:
- Configures the administrative state for the interface. Setting
the value to true will adminstrative enable the interface and
setting the value to false will administratively disable the
interface. The EOS default value for enable is true
required: false
default: true
choices: ['True', 'False']
aliases: []
version_added: 1.0.0
description:
description:
- Configures a one lne ASCII description for the interface. The EOS
default value for description is None
required: false
default: null
choices: []
aliases: []
version_added: 1.0.0
sflow:
description:
- Configures the adminstrative state of running sflow on the named
Ethernet interface. If this value is true, then sflow is enabled
on the interface and if this value is false, then sflow is disabled
on this interface. The EOS default value for sflow is true
required: false
default: null
choices: []
aliases: []
version_added: 1.0.0
flowcontrol_send:
description:
- Configures the flowcontrol send value for the named Ethernet
interface in EOS. If the value is configured true, then
control send is enabled (on). If the value is configured false
then flowcontrol send is disabled (off).
required: false
default: null
choices: ['True', 'False']
aliases: []
version_added: 1.0.0
flowcontrol_receive:
description:
- Configures the flowcontrol receive value for the named Ethernet
interface in EOS. If the value is configured true, then
control receive is enabled (on). If the value is configured false
then flowcontrol receive is disabled (off).
required: false
default: null
choices: ['True', 'False']
aliases: []
version_added: 1.0.0
"""
EXAMPLES = """
- name: Ensure that Ethernet1/1 is administratively enabled
eos_ethernet: name=Ethernet1/1 enable=yes
- name: Enable flowcontrol send and receive on Ethernet10
eos_ethernet: name=Ethernet10 flowcontrol_send=yes flowcontrol_receive=yes
"""
#<<EOS_COMMON_MODULE_START>>
import syslog
import collections
from ansible.module_utils.basic import *
try:
import pyeapi
PYEAPI_AVAILABLE = True
except ImportError:
PYEAPI_AVAILABLE = False
DEFAULT_SYSLOG_PRIORITY = syslog.LOG_NOTICE
DEFAULT_CONNECTION = 'localhost'
TRANSPORTS = ['socket', 'http', 'https', 'http_local']
class EosAnsibleModule(AnsibleModule):
meta_args = {
'config': dict(),
'username': dict(),
'password': dict(),
'host': dict(),
'connection': dict(default=DEFAULT_CONNECTION),
'transport': dict(choices=TRANSPORTS),
'port': dict(),
'debug': dict(type='bool', default='false'),
'logging': dict(type='bool', default='true')
}
stateful_args = {
'state': dict(default='present', choices=['present', 'absent']),
}
def __init__(self, stateful=True, *args, **kwargs):
kwargs['argument_spec'].update(self.meta_args)
self._stateful = stateful
if stateful:
kwargs['argument_spec'].update(self.stateful_args)
super(EosAnsibleModule, self).__init__(*args, **kwargs)
self.result = dict(changed=False, changes=dict())
self._debug = kwargs.get('debug') or self.boolean(self.params['debug'])
self._logging = kwargs.get('logging') or self.params['logging']
self.log('DEBUG flag is %s' % self._debug)
self.debug('pyeapi_version', self.check_pyeapi())
self.debug('stateful', self._stateful)
self.debug('params', self.params)
self._attributes = self.map_argument_spec()
self.validate()
self._node = self.connect()
self._instance = None
self.desired_state = self.params['state'] if self._stateful else None
self.exit_after_flush = kwargs.get('exit_after_flush')
@property
def instance(self):
if self._instance:
return self._instance
func = self.func('instance')
if not func:
self.fail('Module does not support "instance"')
try:
self._instance = func(self)
except Exception as exc:
self.fail('instance[error]: %s' % exc.message)
self.log("called instance: %s" % self._instance)
return self._instance
@property
def attributes(self):
return self._attributes
@property
def node(self):
if self._node:
return self._node
self._node = self.connect()
return self._node
def check_pyeapi(self):
if not PYEAPI_AVAILABLE:
self.fail('Unable to import pyeapi, is it installed?')
return pyeapi.__version__
def map_argument_spec(self):
"""map_argument_spec maps only the module argument spec to attrs
This method will map the argumentspec minus the meta_args to attrs
and return the attrs. This returns a dict object that includes only
the original argspec plus the stateful_args (if self._stateful=True)
Returns:
dict: Returns a dict object that includes the original
argument_spec plus stateful_args with values minus meta_args
"""
keys = set(self.params).difference(self.meta_args)
attrs = dict()
attrs = dict([(k, self.params[k]) for k in self.params if k in keys])
if 'CHECKMODE' in attrs:
del attrs['CHECKMODE']
return attrs
def validate(self):
for key, value in self.attributes.iteritems():
func = self.func('validate_%s' % key)
if func:
self.attributes[key] = func(value)
def create(self):
if not self.check_mode:
func = self.func('create')
if not func:
self.fail('Module must define "create" function')
return self.invoke(func, self)
def remove(self):
if not self.check_mode:
func = self.func('remove')
if not func:
self.fail('Module most define "remove" function')
return self.invoke(func, self)
def flush(self, exit_after_flush=False):
self.exit_after_flush = exit_after_flush
if self.desired_state == 'present' or not self._stateful:
if self.instance.get('state') == 'absent':
changed = self.create()
self.result['changed'] = changed or True
self.refresh()
changeset = self.attributes.viewitems() - self.instance.viewitems()
if self._debug:
self.debug('desired_state', self.attributes)
self.debug('current_state', self.instance)
changes = self.update(changeset)
if changes:
self.result['changes'] = changes
self.result['changed'] = True
self._attributes.update(changes)
flush = self.func('flush')
if flush:
self.invoke(flush, self)
elif self.desired_state == 'absent' and self._stateful:
if self.instance.get('state') == 'present':
changed = self.remove()
self.result['changed'] = changed or True
elif self._stateful:
if self.desired_state != self.instance.get('state'):
changed = self.invoke(self.instance.get('state'))
self.result['changed'] = changed or True
self.refresh()
self.result['instance'] = self.instance
if self.exit_after_flush:
self.exit()
def update(self, changeset):
changes = dict()
for key, value in changeset:
if value is not None:
changes[key] = value
func = self.func('set_%s' % key)
if func and not self.check_mode:
try:
self.invoke(func, self)
except Exception as exc:
self.fail(exc.message)
return changes
def connect(self):
if self.params['config']:
pyeapi.load_config(self.params['config'])
config = dict()
if self.params['connection']:
config = pyeapi.config_for(self.params['connection'])
if not config:
msg = 'Connection name "%s" not found' % self.params['connection']
self.fail(msg)
if self.params['username']:
config['username'] = self.params['username']
if self.params['password']:
config['password'] = self.params['password']
if self.params['transport']:
config['transport'] = self.params['transport']
if self.params['port']:
config['port'] = self.params['port']
if self.params['host']:
config['host'] = self.params['host']
if 'transport' not in config:
self.fail('Connection must define a transport')
connection = pyeapi.client.make_connection(**config)
node = pyeapi.client.Node(connection, **config)
try:
resp = node.enable('show version')
self.debug('eos_version', resp[0]['result']['version'])
self.debug('eos_model', resp[0]['result']['modelName'])
except (pyeapi.eapilib.ConnectionError, pyeapi.eapilib.CommandError):
self.fail('unable to connect to %s' % node)
else:
self.log('Connected to node %s' % node)
self.debug('node', str(node))
return node
def config(self, commands):
self.result['changed'] = True
if not self.check_mode:
self.node.config(commands)
def api(self, module):
return self.node.api(module)
def func(self, name):
return globals().get(name)
def invoke(self, func, *args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as exc:
self.fail(exc.message)
def invoke_function(self, name, *args, **kwargs):
func = self.func(name)
if func:
return self.invoke(func, *args, **kwargs)
def fail(self, msg):
self.invoke_function('on_fail', self)
self.log('ERROR: %s' % msg, syslog.LOG_ERR)
self.fail_json(msg=msg)
def exit(self):
self.invoke_function('on_exit', self)
self.log('Module completed successfully')
self.exit_json(**self.result)
def refresh(self):
self._instance = None
def debug(self, key, value):
if self._debug:
if 'debug' not in self.result:
self.result['debug'] = dict()
self.result['debug'][key] = value
def log(self, message, priority=None):
if self._logging:
syslog.openlog('ansible-eos')
priority = priority or DEFAULT_SYSLOG_PRIORITY
syslog.syslog(priority, str(message))
@classmethod
def add_state(cls, name):
cls.stateful_args['state']['choices'].append(name)
#<<EOS_COMMON_MODULE_END>>
def instance(module):
""" Returns the interface properties for the specified name
"""
name = module.attributes['name']
result = module.node.api('interfaces').get(name)
if not result:
module.fail('Unknown interface: %s' % name)
instance = dict(name=name)
instance['enable'] = not result['shutdown']
desc = '' if not result['description'] else result['description']
instance['description'] = desc
instance['sflow'] = result['sflow']
instance['flowcontrol_send'] = result['flowcontrol_send'] == 'on'
instance['flowcontrol_receive'] = result['flowcontrol_receive'] == 'on'
return instance
def set_description(module):
""" Configures the description attribute for the interface
"""
value = module.attributes['description']
name = module.attributes['name']
value = None if value == '' else value
module.log('Invoked set_description for eos_ethernet[%s] '
'with value %s' % (name, value))
module.node.api('interfaces').set_description(name, value)
def set_enable(module):
""" Configures the enable attribute for the interface
"""
value = not module.attributes['enable']
name = module.attributes['name']
module.log('Invoked set_enable for eos_ethernet[%s] '
'with value %s' % (name, value))
module.node.api('interfaces').set_shutdown(name, value)
def set_sflow(module):
""" Configures the sflow attribute for the interface
"""
value = module.attributes['sflow']
name = module.attributes['name']
module.log('Invoked set_sflow for eos_ethernet[%s] '
'with value %s' % (name, value))
module.node.api('interfaces').set_sflow(name, value)
def set_flowcontrol_send(module):
""" Configures the flowcontrol send attribute for the interface
"""
value = module.attributes['flowcontrol_send']
value = 'on' if value else 'off'
name = module.attributes['name']
module.log('Invoked set_flowcontrol_send for eos_ethernet[%s] '
'with value %s' % (name, value))
module.node.api('interfaces').set_flowcontrol_send(name, value)
def set_flowcontrol_receive(module):
""" Configures the flowcontrol receive attribute for the interface
"""
value = module.attributes['flowcontrol_receive']
value = 'on' if value else 'off'
name = module.attributes['name']
module.log('Invoked set_flowcontrol_receive for eos_ethernet[%s] '
'with value %s' % (name, value))
module.node.api('interfaces').set_flowcontrol_receive(name, value)
def main():
""" The main module routine called when the module is run by Ansible
"""
argument_spec = dict(
name=dict(required=True),
enable=dict(type='bool', default=True),
description=dict(),
sflow=dict(type='bool'),
flowcontrol_send=dict(type='bool'),
flowcontrol_receive=dict(type='bool')
)
module = EosAnsibleModule(argument_spec=argument_spec,
stateful=False,
supports_check_mode=True)
module.flush(True)
main() | 0.000894 |
#!/usr/bin/env python
"""
Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import re
from lib.core.enums import PRIORITY
__priority__ = PRIORITY.NORMAL
def dependencies():
pass
def tamper(payload, **kwargs):
"""
Replaces quote character (') with a multi-byte combo %bf%27 together with
generic comment at the end (to make it work)
Notes:
* Useful for bypassing magic_quotes/addslashes feature
Reference:
* http://shiflett.org/blog/2006/jan/addslashes-versus-mysql-real-escape-string
>>> tamper("1' AND 1=1")
'1%bf%27 AND 1=1-- '
"""
retVal = payload
if payload:
found = False
retVal = ""
for i in xrange(len(payload)):
if payload[i] == '\'' and not found:
retVal += "%bf%27"
found = True
else:
retVal += payload[i]
continue
if found:
_ = re.sub("(?i)\s*(AND|OR)[\s(]+'[^']+'\s*(=|LIKE)\s*'.*", "", retVal)
if _ != retVal:
retVal = _
retVal += "-- "
return retVal
| 0.006751 |
# -*- coding: utf-8 -*-
import os
import time
from collections import defaultdict
from operator import itemgetter
import gevent
from gevent.queue import Queue
from gevent.event import Event
from tomcrypt import ecc
from tomcrypt.hash import sha256
from tomcrypt.cipher import aes
from .log import log
from .identity import SwitchID
from . import packet
from .line import Line
from .channel import Channel, DurableChannel
from .exceptions import *
class RemoteSwitch(gevent.Greenlet):
def __init__(self, switch_id, dht):
super(RemoteSwitch, self).__init__()
self.id = switch_id
self.local_id = dht.me
self.transmit = dht.transmit
self.dht = dht
self.line = None
self.line_time = 0
self.channels = {}
self.paths = defaultdict(lambda: 0)
self.packetq = Queue()
self.openq = Queue()
def _run(self):
self.running = True
#TODO: This might be a bad way to green-sublet
gevent.spawn(self.open_handler)
while self.running:
wrapper, payload, address = self.packetq.get()
self.recv(wrapper, payload, address)
gevent.sleep(0)
def open_handler(self):
while self.running:
open_tuple = self.openq.get()
self.handle_open(open_tuple)
gevent.sleep(0)
def _ecdh(self, remote_line, remote_ecc):
secret = self._ecc.shared_secret(ecc.Key(remote_ecc))
log.debug('ECDH: %s' % secret.encode('hex'))
#safe to remove this; keep _ecc_pub for open retransmits
del self._ecc
self.line.complete(remote_line, secret)
def path_hint(self, paths):
for path in paths:
t = path.get('type')
if t == 'ipv4':
ip = path.get('ip')
port = path.get('port')
pri = path.get('priority', 0)
self.paths[(ip, port)] += pri
def best_path(self):
ranked = sorted(self.paths.iteritems(), key=itemgetter(1))
if ranked:
return ranked[0][0]
else:
return None
def all_paths(self):
#more terrible ipv4 stopgap stuff
paths_array = []
for path, pri in self.paths.iteritems():
if pri > 0:
valid_path = {'type': 'ipv4'}
valid_path['ip'] = path[0]
valid_path['port'] = path[1]
paths_array.append(valid_path)
log.debug('PATHS ARRAY: {}'.format(paths_array))
return paths_array
def confirm_path(self, address):
self.paths[address] = time.time() * 1000
def new_line(self):
"""Creates or replaces a secure line to the remote switch"""
if self.line:
log.debug('Invalidating previous line: %s' % self.line.id)
#unregister Line dangerously
del self.dht.linemap[self.line.id]
del self.line
self.line = Line()
self.dht.linemap[self.line.id] = self
"""
In order to separate the concerns, you have to mix them up first,
right? The "at" timestamp in an "open" packet signifies the time
that the local ECC key was generated. That local ECC key only
needs to be kept around if we haven't received a remote "open"
packet. I'll try to make this connection clearer in the next
big refactor.
"""
self._ecc = ecc.Key(256)
self._ecc_pub = self._ecc.public \
.as_string(format='der', ansi=True)
self._open = packet.create_open(
self.id.hash_name,
self.line.id,
self.local_id.pub_key_der)
self._send_open()
def send(self, data, body='', timeout=5):
"""Take a Channel packet, wrap it in a line, and send
TODO: implement timeout to wait for line
"""
if not self.line:
l = gevent.spawn(self.new_line)
l.join(timeout)
if l.successful():
pass
else:
log.debug('line timeout')
return
if self.line.is_complete:
self._send(self.line.send(data, body))
else:
gevent.sleep(1)
if self.line.is_complete:
self._send(self.line.send(data, body))
else:
log.debug('Brutally dropping packets until line is up.')
def _send(self, data):
"""Transmit packet on best network path"""
address = self.best_path()
log.debug('Sending %s to %s' % (len(data), address))
self.transmit(data, address)
def recv(self, wrapper, payload, address):
self.confirm_path(address)
if not self.line.is_complete:
log.debug('Quick restart, remote line still open?')
log.debug('Hashname: %s' % self.id.hash_name)
return
iv = wrapper['iv'].decode('hex')
data, body = self.line.recv(iv, payload)
c = data.get('c')
if c is None:
return
candidate = self.channels.get(c)
if candidate is None:
t = data.get('type')
if not isinstance(t, (str, unicode)):
return
if t[:1] != '_':
ch = Channel(self, t, c)
self.dht.channel_handler(self, ch, data, body)
elif 'seq' in data.keys():
ch = DurableChannel(self, t, c)
else:
#TODO: get remote-initiated channel handler from user
ch = Channel(self, t, c)
self.channels[c] = ch
else:
candidate.inq.put((data, body))
def _send_open(self):
iv = os.urandom(16)
aes_key = sha256(self._ecc_pub)
enc_body = aes(aes_key.digest(), iv).encrypt(self._open)
aes_key.update(self.line.id.decode('hex'))
sig = self.local_id.sign(sha256(enc_body).digest())
enc_sig = aes(aes_key.digest(), iv).encrypt(sig)
o = self.id.encrypt(self._ecc_pub)
data = packet.wrap_open(o, iv, enc_sig, enc_body)
self._send(data)
log.debug('Open to: %s' % self.id.hash_name)
log.debug('Line: %s to %s' % (self.line.id, self.line.rid))
def handle_open(self, open_tuple):
"""Deal with incoming open from this remote switch"""
# getting quick and dirty again for a bit
#sender_ecc, line_id, at, address = open_tuple
self.confirm_path(open_tuple[3])
log.debug('Open from: %s' % self.id.hash_name)
recv_at = open_tuple[2]
while not self.openq.empty():
# pick any single open with the latest timestamp
cand_tuple = self.openq.get()
sender_ecc, line_id, at, address = open_tuple
self.confirm_path(cand_tuple[3])
if cand_tuple[2] > recv_at:
open_tuple = cand_tuple
sender_ecc, line_id, at, address = open_tuple
if self.line:
#We're expecting this or we might need to invalidate?
log.debug('Line: %s from %s' % (self.line.id, self.line.rid))
log.debug('At: %i' % (at))
if self.line and self.line_time == 0:
#we've been waiting for our first open
self.line_time = at
if self.line_time < at:
self.new_line()
self._ecdh(line_id, sender_ecc)
return
if self.line.is_complete:
#remote didn't get our response open
self._send_open()
else:
self._ecdh(line_id, sender_ecc)
def open_channel(self, ctype, initial_data=None, timeout=10):
ch = Channel(self, ctype)
self.channels[ch.c] = ch
ch.start()
if initial_data is not None:
d, b = initial_data
d['type'] = ctype
ch.send(d, b)
#TODO: "wait for first response" logic is all over the place
return ch
| 0.001381 |
import os, sys
import datetime
import iris
import iris.unit as unit
import numpy as np
from iris.coord_categorisation import add_categorised_coord
diag = 'avg.5216'
cube_name_explicit='stratiform_rainfall_rate'
cube_name_param='precipitation_flux'
pp_file_path='/projects/cascade/pwille/moose_retrievals/'
experiment_ids = ['djznw', 'djzny', 'djznq', 'djzns', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkbhu', 'djznu', 'dkhgu' ] # All 12
#experiment_ids = ['djzns', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkbhu', 'djznu', 'dkhgu' ]
#experiment_ids = [ 'dklwu', 'dklzq', 'dklyu', 'dkmbq', 'dkbhu', 'djznu', 'dkhgu', 'djzns' ]
#experiment_ids = ['djzns', 'dkbhu', 'djznu', 'dkhgu' ]
#experiment_ids = ['djznw', 'djzny', 'djznq', 'dkjxq']
# Load global LAM
dtmindt = datetime.datetime(2011,8,19,0,0,0)
dtmaxdt = datetime.datetime(2011,9,7,23,0,0)
dtmin = unit.date2num(dtmindt, 'hours since 1970-01-01 00:00:00', unit.CALENDAR_STANDARD)
dtmax = unit.date2num(dtmaxdt, 'hours since 1970-01-01 00:00:00', unit.CALENDAR_STANDARD)
time_constraint = iris.Constraint(time= lambda t: dtmin <= t.point <= dtmax)
fg = '%sdjzn/djznw/%s.pp' % (pp_file_path, diag)
glob_load = iris.load_cube(fg, ('%s' % cube_name_param) & time_constraint)
## Get time points from global LAM to use as time constraint when loading other runs
time_list = glob_load.coord('time').points
glob_tc = iris.Constraint(time=time_list)
del glob_load
for experiment_id in experiment_ids:
expmin1 = experiment_id[:-1]
#fu = '%s%s/%s/%s.pp' % (pp_file_path, expmin1, experiment_id, diag)
flsm = '%s%s/%s/30.pp' % (pp_file_path, expmin1, experiment_id)
fdm = '%s%s/%s/%s_rainfall_hourly_mean.pp' % (pp_file_path, expmin1, experiment_id, diag)
print experiment_id
sys.stdout.flush()
# Load diurnal mean cube
try:
diurnal_mean_cube = iris.load_cube(fdm, ('%s' % cube_name_explicit) & glob_tc)
except iris.exceptions.ConstraintMismatchError:
diurnal_mean_cube = iris.load_cube(fdm, ('%s' % cube_name_param) & glob_tc)
add_categorised_coord(diurnal_mean_cube, 'hour', 'time',lambda coord, x: coord.units.num2date(x).hour)
# Load land/sea mask
lsm = iris.load_cube(flsm, ('land_binary_mask' ))
print lsm
sys.stdout.flush()
# For Sea and Land, mask area and calculate mean of each hour for sea/land and SAVE as numpy array
for s in ([0,1]):
ms = np.where(lsm.data==s, diurnal_mean_cube.data, np.NaN)
nancube = np.where(lsm.data==s, diurnal_mean_cube.data, np.NaN)
maskedcube = np.ma.masked_array(nancube,np.isnan(nancube))
total_rainfall = np.mean(maskedcube.reshape(maskedcube.shape[0], (maskedcube.shape[1]*maskedcube.shape[2])), axis=1)
trnp =[total_rainfall.data, diurnal_mean_cube.coord('hour').points+0.5]
if s == 0:
# Areas of ocean
print total_rainfall
np.save('%s%s/%s/%s_sea_rainfall_diurnal_np' % (pp_file_path, expmin1, experiment_id, diag), trnp)
#iris.save(total_rainfall, '%s%s/%s/%s_seamask_diurnal_total.pp' % (pp_file_path, expmin1, experiment_id, diag))
if s == 1:
# Areas of land
np.save('%s%s/%s/%s_land_rainfall_diurnal_np' % (pp_file_path, expmin1, experiment_id, diag), trnp)
#iris.save(total_rainfall, '%s%s/%s/%s_landmask_diurnal_total.pp' % (pp_file_path, expmin1, experiment_id, diag))
del lsm
| 0.023426 |
# Copyright 2015 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Test Update message"""
import unittest
from yabgp.common.constants import HDR_LEN
from yabgp.message.update import Update
from yabgp.common.exception import UpdateMessageError
class TestUpdate(unittest.TestCase):
def test_parse_prefix_list(self):
prefix_hex = b'\x13\xb8\x9d\xe0\x18E\xb3\xdd\x18E\xb3\xdc\x18\xd1f\xb2\x16Bpd\x18\xd06\xc2'
nlri = ['184.157.224.0/19', '69.179.221.0/24', '69.179.220.0/24',
'209.102.178.0/24', '66.112.100.0/22', '208.54.194.0/24']
self.assertEqual(nlri, Update.parse_prefix_list(prefix_hex))
def test_parse_prefix_mask_larger_than_32(self):
prefix_hex = b'\x21\xb8\x9d\xe0\x18E\xb3\xdd\x18E\xb3\xdc\x18\xd1f\xb2\x16Bpd\x18\xd06\xc2'
self.assertRaises(UpdateMessageError, Update.parse_prefix_list, prefix_hex)
def test_construct_prefix_v4(self):
nlri = ['184.157.224.1/32', '32.65.243.12/30', '89.232.254.0/23', '69.179.221.0/24',
'61.172.0.0/16', '202.223.128.0/17', '156.152.0.0/15', '15.0.0.0/8',
'209.102.178.0/24', '66.112.100.0/22', '208.54.194.0/24']
nlri_hex = Update.construct_prefix_v4(nlri)
self.assertEqual(nlri, Update.parse_prefix_list(nlri_hex))
def test_parse_attributes_ipv4(self):
attr_hex = b'@\x01\x01\x00@\x02\x08\x02\x03\x00\x01\x00\x02\x00\x03@\x03\x04\xac\x10\x01\x0e\x80\x04\x04' \
b'\x00\x00\x00\x00@\x05\x04\x00\x00\x00d\x80\t\x04\xac\x10\x01\x0e\x80\n\x08\x02\x02\x02\x02dddd'
attributes = {1: 0,
2: [(2, [1, 2, 3])],
3: '172.16.1.14',
4: 0,
5: 100,
9: '172.16.1.14',
10: ['2.2.2.2', '100.100.100.100']}
self.assertEqual(attributes, Update.parse_attributes(attr_hex, False))
self.assertRaises(UpdateMessageError, Update.parse_attributes, attr_hex, True)
def test_construct_attributes_ipv4(self):
attr = {
1: 2,
2: [(2, [701, 71])],
3: '219.158.1.204',
5: 100,
6: b'',
7: (71, '16.96.243.103'),
8: ['4837:701', '4837:2100'],
9: '219.158.1.204',
10: ['219.158.1.209', '0.0.0.30']}
attr_hex = Update.construct_attributes(attr, asn4=True)
self.assertEqual(attr, Update.parse_attributes(attr_hex, asn4=True))
def test_parse_and_construct_ipv4_unicast_2byteas(self):
# 2 bytes asn
msg_hex = b'\x00\x00\x00\x28\x40\x01\x01\x02\x40\x02\x0a\x02\x01\x00\x1e\x01\x02\x00\x0a\x00\x14\x40\x03\x04' \
b'\x0a\x00\x00\x09\x80\x04\x04\x00\x00\x00\x00\xc0\x07\x06\x00\x1e\x0a\x00\x00\x09\x15\xac\x10\x00'
update = Update.parse([None, False, msg_hex])
attributes = {1: 2, 2: [(2, [30]), (1, [10, 20])], 3: '10.0.0.9', 4: 0, 7: (30, '10.0.0.9')}
self.assertEqual(attributes, update['attr'])
self.assertEqual([], update['withdraw'])
self.assertEqual(['172.16.0.0/21'], update['nlri'])
self.assertEqual(msg_hex, Update.construct(msg_dict=update, asn4=False)[HDR_LEN:])
def test_parse_ipv4_4byteas(self):
# 4 bytes asn
msg_hex = b'\x00\x00\x00\x30\x40\x01\x01\x02\x40\x02\x10\x02\x01\x00\x00\x00\x1e\x01\x02\x00\x00\x00\x0a\x00' \
b'\x00\x00\x14\x40\x03\x04\x0a\x00\x00\x09\x80\x04\x04\x00\x00\x00\x00\xc0\x07\x08\x00\x00\x00' \
b'\x1e\x0a\x00\x00\x09\x15\xac\x10\x00'
update = Update.parse([None, True, msg_hex])
attributes = {1: 2, 2: [(2, [30]), (1, [10, 20])], 3: '10.0.0.9', 4: 0, 7: (30, '10.0.0.9')}
self.assertEqual(attributes, update['attr'])
self.assertEqual([], update['withdraw'])
self.assertEqual(['172.16.0.0/21'], update['nlri'])
if __name__ == '__main__':
unittest.main()
| 0.003325 |
# -*- coding: utf-8 -*-
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
# This file exists for backward compatibility with old code that refers to
# Crypto.Hash.RIPEMD
"""Deprecated alias for `Crypto.Hash.RIPEMD160`"""
from Crypto.Hash.RIPEMD160 import new, block_size, digest_size
| 0 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities supporting export to SavedModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
import re
import time
from tensorflow.contrib.learn.python.learn import export_strategy
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.contrib.learn.python.learn.utils import gc
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.python.platform import gfile
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.util import compat
# A key for use in the input_alternatives dict indicating the default input.
# This is the input that will be expected when a serving request does not
# specify a specific signature.
# The default input alternative specifies placeholders that the input_fn
# requires to be fed (in the typical case, a single placeholder for a
# serialized tf.Example).
DEFAULT_INPUT_ALTERNATIVE_KEY = 'default_input_alternative'
# A key for use in the input_alternatives dict indicating the features input.
# The features inputs alternative specifies the feature Tensors provided as
# input to the model_fn, i.e. the outputs of the input_fn.
FEATURES_INPUT_ALTERNATIVE_KEY = 'features_input_alternative'
# A key for use in the output_alternatives dict indicating the default output.
# This is the output that will be provided when a serving request does not
# specify a specific signature.
# In a single-headed model, the single output is automatically the default.
# In a multi-headed model, the name of the desired default head should be
# provided to get_output_alternatives.
DEFAULT_OUTPUT_ALTERNATIVE_KEY = 'default_output_alternative'
def build_standardized_signature_def(
input_tensors, output_tensors, problem_type):
"""Build a SignatureDef using problem type and input and output Tensors.
Note that this delegates the actual creation of the signatures to methods in
//third_party/tensorflow/python/saved_model/signature_def_utils.py, which may
assign names to the input and output tensors (depending on the problem type)
that are standardized in the context of SavedModel.
Args:
input_tensors: a dict of string key to `Tensor`
output_tensors: a dict of string key to `Tensor`
problem_type: an instance of constants.ProblemType, specifying
classification, regression, etc.
Returns:
A SignatureDef using SavedModel standard keys where possible.
Raises:
ValueError: if input_tensors or output_tensors is None or empty.
"""
if not input_tensors:
raise ValueError('input_tensors must be provided.')
if not output_tensors:
raise ValueError('output_tensors must be provided.')
# Per-method signature_def functions will standardize the keys if possible
if _is_classification_problem(problem_type, input_tensors, output_tensors):
(_, examples), = input_tensors.items()
classes = output_tensors.get(prediction_key.PredictionKey.CLASSES)
scores = output_tensors.get(prediction_key.PredictionKey.SCORES)
if not (classes or scores):
(_, classes), = output_tensors.items()
return signature_def_utils.classification_signature_def(
examples, classes, scores)
elif _is_regression_problem(problem_type, input_tensors, output_tensors):
(_, examples), = input_tensors.items()
(_, predictions), = output_tensors.items()
return signature_def_utils.regression_signature_def(examples, predictions)
else:
return signature_def_utils.predict_signature_def(
input_tensors, output_tensors)
def _is_classification_problem(problem_type, input_tensors, output_tensors):
classes = output_tensors.get(prediction_key.PredictionKey.CLASSES)
scores = output_tensors.get(prediction_key.PredictionKey.SCORES)
return ((problem_type == constants.ProblemType.CLASSIFICATION or
problem_type == constants.ProblemType.LOGISTIC_REGRESSION)
and len(input_tensors) == 1
and (classes or scores or len(output_tensors) == 1))
def _is_regression_problem(problem_type, input_tensors, output_tensors):
return (problem_type == constants.ProblemType.LINEAR_REGRESSION
and len(input_tensors) == 1
and len(output_tensors) == 1)
def get_input_alternatives(input_ops):
"""Obtain all input alternatives using the input_fn output and heuristics."""
input_alternatives = {}
if isinstance(input_ops, input_fn_utils.InputFnOps):
features, unused_labels, default_inputs = input_ops
input_alternatives[DEFAULT_INPUT_ALTERNATIVE_KEY] = default_inputs
else:
features, unused_labels = input_ops
if not features:
raise ValueError('Features must be defined.')
# Add the "features" input_signature in any case.
# Note defensive copy because model_fns alter the features dict.
input_alternatives[FEATURES_INPUT_ALTERNATIVE_KEY] = (
copy.copy(features))
return input_alternatives, features
def get_output_alternatives(
model_fn_ops,
default_output_alternative_key=DEFAULT_OUTPUT_ALTERNATIVE_KEY):
"""Obtain all output alternatives using the model_fn output and heuristics."""
output_alternatives = model_fn_ops.output_alternatives
# Identify the default outputs, creating them if needed.
if (output_alternatives
and default_output_alternative_key not in output_alternatives):
raise ValueError('default_output_alternative_key not in '
'output_alternatives: %s' % default_output_alternative_key)
if (output_alternatives
and default_output_alternative_key in output_alternatives):
# If a default head is provided, use it.
actual_default_output_alternative_key = default_output_alternative_key
return output_alternatives, actual_default_output_alternative_key
if output_alternatives and len(output_alternatives) == 1:
# If there is only one head, use it as the default.
(actual_default_output_alternative_key, _), = output_alternatives.items()
return output_alternatives, actual_default_output_alternative_key
# Lacking provided output alternatives, the best we can do is to
# interpret the model as single-headed of unknown type.
default_problem_type = constants.ProblemType.UNSPECIFIED
default_outputs = model_fn_ops.predictions
actual_default_output_alternative_key = DEFAULT_OUTPUT_ALTERNATIVE_KEY
output_alternatives = {actual_default_output_alternative_key:
(default_problem_type, default_outputs)}
return output_alternatives, actual_default_output_alternative_key
def build_all_signature_defs(input_alternatives, output_alternatives,
actual_default_output_alternative_key):
"""Build `SignatureDef`s from all pairs of input and output alternatives."""
signature_def_map = {
('%s:%s' % (input_key, output_key or 'None')):
build_standardized_signature_def(
inputs, outputs, problem_type)
for input_key, inputs in input_alternatives.items()
for output_key, (problem_type, outputs)
in output_alternatives.items()}
# Add the default SignatureDef
default_inputs = input_alternatives[DEFAULT_INPUT_ALTERNATIVE_KEY]
if not default_inputs:
default_inputs = input_alternatives[FEATURES_INPUT_ALTERNATIVE_KEY]
# default outputs are guaranteed to exist above
(default_problem_type, default_outputs) = (
output_alternatives[actual_default_output_alternative_key])
signature_def_map[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = (
build_standardized_signature_def(
default_inputs, default_outputs, default_problem_type))
return signature_def_map
def get_timestamped_export_dir(export_dir_base):
"""Builds a path to a new subdirectory within the base directory.
Each export is written into a new subdirectory named using the
current time. This guarantees monotonically increasing version
numbers even across multiple runs of the pipeline.
The timestamp used is the number of milliseconds since epoch UTC.
Args:
export_dir_base: A string containing a directory to write the exported
graph and checkpoints.
Returns:
The full path of the new subdirectory (which is not actually created yet).
"""
export_timestamp = int(time.time() * 1e3)
export_dir = os.path.join(
compat.as_bytes(export_dir_base),
compat.as_bytes(str(export_timestamp)))
return export_dir
def garbage_collect_exports(export_dir_base, exports_to_keep):
"""Deletes older exports, retaining only a given number of the most recent.
Export subdirectories are assumed to be named with monotonically increasing
integers; the most recent are taken to be those with the largest values.
Args:
export_dir_base: the base directory under which each export is in a
versioned subdirectory.
exports_to_keep: the number of recent exports to retain.
"""
if exports_to_keep is None:
return
keep_filter = gc.largest_export_versions(exports_to_keep)
delete_filter = gc.negation(keep_filter)
# Export dir must not end with / or it will break the re match below.
if export_dir_base.endswith('/'):
export_dir_base = export_dir_base[:-1]
# create a simple parser that pulls the export_version from the directory.
def parser(path):
match = re.match('^' + export_dir_base + '/(\\d{13})$', path.path)
if not match:
return None
return path._replace(export_version=int(match.group(1)))
for p in delete_filter(gc.get_paths(export_dir_base, parser=parser)):
gfile.DeleteRecursively(p.path)
def make_export_strategy(export_input_fn,
default_output_alternative_key='default',
assets_extra=None,
export_as_text=False,
exports_to_keep=None):
"""Create an ExportStrategy for use with Experiment."""
def export_fn(estimator, export_dir_base):
"""Exports the given Estimator as a SavedModel."""
export_result = estimator.export_savedmodel(
export_dir_base,
export_input_fn,
default_output_alternative_key=default_output_alternative_key,
assets_extra=assets_extra,
export_as_text=export_as_text,
exports_to_keep=exports_to_keep)
garbage_collect_exports(export_dir_base, exports_to_keep)
return export_result
return export_strategy.ExportStrategy('Servo', export_fn)
| 0.005865 |
# -*- coding: utf-8 -*-
__doc__ = """
The manager module provides a selected classes to
handle websocket's execution.
Initially the rationale was to:
- Externalize the way the CherryPy server had been setup
as its websocket management was too tightly coupled with
the plugin implementation.
- Offer a management that could be used by other
server or client implementations.
- Move away from the threaded model to the event-based
model by relying on `select` or `epoll` (when available).
A simple usage for handling websocket clients:
.. code-block:: python
from ws4py.client import WebSocketBaseClient
from ws4py.manager import WebSocketManager
m = WebSocketManager()
class EchoClient(WebSocketBaseClient):
def handshake_ok(self):
m.add(self) # register the client once the handshake is done
def received_message(self, msg):
print str(msg)
m.start()
client = EchoClient('ws://localhost:9000/ws')
client.connect()
m.join() # blocks forever
Managers are not compulsory but hopefully will help your
workflow. For clients, you can still rely on threaded, gevent or
tornado based implementations of course.
"""
import logging
import select
import threading
import time
from ws4py import format_addresses
from ws4py.compat import py3k
logger = logging.getLogger('ws4py')
class SelectPoller(object):
def __init__(self, timeout=0.1):
"""
A socket poller that uses the `select`
implementation to determines which
file descriptors have data available to read.
It is available on all platforms.
"""
self._fds = []
self.timeout = timeout
def release(self):
"""
Cleanup resources.
"""
self._fds = []
def register(self, fd):
"""
Register a new file descriptor to be
part of the select polling next time around.
"""
if fd not in self._fds:
self._fds.append(fd)
def unregister(self, fd):
"""
Unregister the given file descriptor.
"""
if fd in self._fds:
self._fds.remove(fd)
def poll(self):
"""
Polls once and returns a list of
ready-to-be-read file descriptors.
"""
if not self._fds:
time.sleep(self.timeout)
return []
try:
r, w, x = select.select(self._fds, [], [], self.timeout)
except IOError as e:
return []
return r
class EPollPoller(object):
def __init__(self, timeout=0.1):
"""
An epoll poller that uses the ``epoll``
implementation to determines which
file descriptors have data available to read.
Available on Unix flavors mostly.
"""
self.poller = select.epoll()
self.timeout = timeout
def release(self):
"""
Cleanup resources.
"""
self.poller.close()
def register(self, fd):
"""
Register a new file descriptor to be
part of the select polling next time around.
"""
try:
self.poller.register(fd, select.EPOLLIN | select.EPOLLPRI)
except IOError:
pass
def unregister(self, fd):
"""
Unregister the given file descriptor.
"""
self.poller.unregister(fd)
def poll(self):
"""
Polls once and yields each ready-to-be-read
file-descriptor
"""
try:
events = self.poller.poll(timeout=self.timeout)
except IOError:
events = []
for fd, event in events:
if event | select.EPOLLIN | select.EPOLLPRI:
yield fd
class KQueuePoller(object):
def __init__(self, timeout=0.1):
"""
An epoll poller that uses the ``epoll``
implementation to determines which
file descriptors have data available to read.
Available on Unix flavors mostly.
"""
self.poller = select.epoll()
self.timeout = timeout
def release(self):
"""
Cleanup resources.
"""
self.poller.close()
def register(self, fd):
"""
Register a new file descriptor to be
part of the select polling next time around.
"""
try:
self.poller.register(fd, select.EPOLLIN | select.EPOLLPRI)
except IOError:
pass
def unregister(self, fd):
"""
Unregister the given file descriptor.
"""
self.poller.unregister(fd)
def poll(self):
"""
Polls once and yields each ready-to-be-read
file-descriptor
"""
try:
events = self.poller.poll(timeout=self.timeout)
except IOError:
events = []
for fd, event in events:
if event | select.EPOLLIN | select.EPOLLPRI:
yield fd
class WebSocketManager(threading.Thread):
def __init__(self, poller=None):
"""
An event-based websocket manager. By event-based, we mean
that the websockets will be called when their
sockets have data to be read from.
The manager itself runs in its own thread as not to
be the blocking mainloop of your application.
The poller's implementation is automatically chosen
with ``epoll`` if available else ``select`` unless you
provide your own ``poller``.
"""
threading.Thread.__init__(self)
self.name="WebSocketManager"
self.lock = threading.Lock()
self.websockets = {}
self.running = False
if poller:
self.poller = poller
else:
if hasattr(select, "epoll"):
self.poller = EPollPoller()
logger.info("Using epoll")
else:
self.poller = SelectPoller()
logger.info("Using select as epoll is not available")
def __len__(self):
return len(self.websockets)
def __iter__(self):
if py3k:
return iter(self.websockets.values())
else:
return self.websockets.itervalues()
def __contains__(self, ws):
fd = ws.sock.fileno()
# just in case the file descriptor was reused
# we actually check the instance (well, this might
# also have been reused...)
return self.websockets.get(fd) is ws
def add(self, websocket):
"""
Manage a new websocket.
First calls its :meth:`opened() <ws4py.websocket.WebSocket.opened>`
method and register its socket against the poller
for reading events.
"""
if websocket in self:
return
logger.info("Managing websocket %s" % format_addresses(websocket))
websocket.opened()
with self.lock:
fd = websocket.sock.fileno()
self.websockets[fd] = websocket
self.poller.register(fd)
def remove(self, websocket):
"""
Remove the given ``websocket`` from the manager.
This does not call its :meth:`closed() <ws4py.websocket.WebSocket.closed>`
method as it's out-of-band by your application
or from within the manager's run loop.
"""
if websocket not in self:
return
logger.info("Removing websocket %s" % format_addresses(websocket))
with self.lock:
fd = websocket.sock.fileno()
self.websockets.pop(fd, None)
self.poller.unregister(fd)
def stop(self):
"""
Mark the manager as terminated and
releases its resources.
"""
self.running = False
with self.lock:
self.websockets.clear()
self.poller.release()
def run(self):
"""
Manager's mainloop executed from within a thread.
Constantly poll for read events and, when available,
call related websockets' `once` method to
read and process the incoming data.
If the :meth:`once() <ws4py.websocket.WebSocket.once>`
method returns a `False` value, its :meth:`terminate() <ws4py.websocket.WebSocket.terminate>`
method is also applied to properly close
the websocket and its socket is unregistered from the poller.
Note that websocket shouldn't take long to process
their data or they will block the remaining
websockets with data to be handled. As for what long means,
it's up to your requirements.
"""
self.running = True
while self.running:
with self.lock:
polled = self.poller.poll()
if not self.running:
break
for fd in polled:
if not self.running:
break
ws = self.websockets.get(fd)
if ws and not ws.terminated:
#I don't know what kind of errors might spew out of here, but they probably shouldn't crash the entire server.
try:
x = ws.once()
#Treat the error as if once() had returned None
except Exception as e:
x=None
logger.error("Terminating websocket %s due to exception: %s in once method" % (format_addresses(ws), repr(e)) )
if not x:
with self.lock:
fd = ws.sock.fileno()
self.websockets.pop(fd, None)
self.poller.unregister(fd)
if not ws.terminated:
logger.info("Terminating websocket %s" % format_addresses(ws))
ws.terminate()
def close_all(self, code=1001, message='Server is shutting down'):
"""
Execute the :meth:`close() <ws4py.websocket.WebSocket.close>`
method of each registered websockets to initiate the closing handshake.
It doesn't wait for the handshake to complete properly.
"""
with self.lock:
logger.info("Closing all websockets with [%d] '%s'" % (code, message))
for ws in iter(self):
ws.close(code=code, reason=message)
def broadcast(self, message, binary=False):
"""
Broadcasts the given message to all registered
websockets, at the time of the call.
Broadcast may fail on a given registered peer
but this is silent as it's not the method's
purpose to handle websocket's failures.
"""
with self.lock:
websockets = self.websockets.copy()
if py3k:
ws_iter = iter(websockets.values())
else:
ws_iter = websockets.itervalues()
for ws in ws_iter:
if not ws.terminated:
try:
ws.send(message, binary)
except:
pass
| 0.001533 |
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import six
from six.moves.urllib import parse
from keystone.common import controller as common_controller
from keystone.common import dependency
from keystone.contrib import federation
from keystone import exception
from keystone.i18n import _, _LE
from keystone.openstack.common import versionutils
from keystone import token
from keystone.token import provider
LOG = log.getLogger(__name__)
CONF = cfg.CONF
@dependency.requires('catalog_api', 'resource_api')
class V2TokenDataHelper(object):
"""Creates V2 token data."""
def v3_to_v2_token(self, token_id, v3_token_data):
token_data = {}
# Build v2 token
v3_token = v3_token_data['token']
token = {}
token['id'] = token_id
token['expires'] = v3_token.get('expires_at')
token['issued_at'] = v3_token.get('issued_at')
token['audit_ids'] = v3_token.get('audit_ids')
if 'project' in v3_token:
# v3 token_data does not contain all tenant attributes
tenant = self.resource_api.get_project(
v3_token['project']['id'])
token['tenant'] = common_controller.V2Controller.filter_domain_id(
tenant)
token_data['token'] = token
# Build v2 user
v3_user = v3_token['user']
user = common_controller.V2Controller.v3_to_v2_user(v3_user)
# Set user roles
user['roles'] = []
role_ids = []
for role in v3_token.get('roles', []):
# Filter role id since it's not included in v2 token response
role_ids.append(role.pop('id'))
user['roles'].append(role)
user['roles_links'] = []
token_data['user'] = user
# Get and build v2 service catalog
token_data['serviceCatalog'] = []
if 'tenant' in token:
catalog_ref = self.catalog_api.get_catalog(
user['id'], token['tenant']['id'])
if catalog_ref:
token_data['serviceCatalog'] = self.format_catalog(catalog_ref)
# Build v2 metadata
metadata = {}
metadata['roles'] = role_ids
# Setting is_admin to keep consistency in v2 response
metadata['is_admin'] = 0
token_data['metadata'] = metadata
return {'access': token_data}
@classmethod
def format_token(cls, token_ref, roles_ref=None, catalog_ref=None,
trust_ref=None):
audit_info = None
user_ref = token_ref['user']
metadata_ref = token_ref['metadata']
if roles_ref is None:
roles_ref = []
expires = token_ref.get('expires', provider.default_expire_time())
if expires is not None:
if not isinstance(expires, six.text_type):
expires = timeutils.isotime(expires)
token_data = token_ref.get('token_data')
if token_data:
token_audit = token_data.get(
'access', token_data).get('token', {}).get('audit_ids')
audit_info = token_audit
if audit_info is None:
audit_info = provider.audit_info(token_ref.get('parent_audit_id'))
o = {'access': {'token': {'id': token_ref['id'],
'expires': expires,
'issued_at': timeutils.strtime(),
'audit_ids': audit_info
},
'user': {'id': user_ref['id'],
'name': user_ref['name'],
'username': user_ref['name'],
'roles': roles_ref,
'roles_links': metadata_ref.get('roles_links',
[])
}
}
}
if 'bind' in token_ref:
o['access']['token']['bind'] = token_ref['bind']
if 'tenant' in token_ref and token_ref['tenant']:
token_ref['tenant']['enabled'] = True
o['access']['token']['tenant'] = token_ref['tenant']
if catalog_ref is not None:
o['access']['serviceCatalog'] = V2TokenDataHelper.format_catalog(
catalog_ref)
if metadata_ref:
if 'is_admin' in metadata_ref:
o['access']['metadata'] = {'is_admin':
metadata_ref['is_admin']}
else:
o['access']['metadata'] = {'is_admin': 0}
if 'roles' in metadata_ref:
o['access']['metadata']['roles'] = metadata_ref['roles']
if CONF.trust.enabled and trust_ref:
o['access']['trust'] = {'trustee_user_id':
trust_ref['trustee_user_id'],
'id': trust_ref['id'],
'trustor_user_id':
trust_ref['trustor_user_id'],
'impersonation':
trust_ref['impersonation']
}
return o
@classmethod
def format_catalog(cls, catalog_ref):
"""Munge catalogs from internal to output format
Internal catalogs look like::
{$REGION: {
{$SERVICE: {
$key1: $value1,
...
}
}
}
The legacy api wants them to look like::
[{'name': $SERVICE[name],
'type': $SERVICE,
'endpoints': [{
'tenantId': $tenant_id,
...
'region': $REGION,
}],
'endpoints_links': [],
}]
"""
if not catalog_ref:
return []
services = {}
for region, region_ref in six.iteritems(catalog_ref):
for service, service_ref in six.iteritems(region_ref):
new_service_ref = services.get(service, {})
new_service_ref['name'] = service_ref.pop('name')
new_service_ref['type'] = service
new_service_ref['endpoints_links'] = []
service_ref['region'] = region
endpoints_ref = new_service_ref.get('endpoints', [])
endpoints_ref.append(service_ref)
new_service_ref['endpoints'] = endpoints_ref
services[service] = new_service_ref
return services.values()
@dependency.requires('assignment_api', 'catalog_api', 'federation_api',
'identity_api', 'resource_api', 'role_api', 'trust_api')
class V3TokenDataHelper(object):
"""Token data helper."""
def __init__(self):
# Keep __init__ around to ensure dependency injection works.
super(V3TokenDataHelper, self).__init__()
def _get_filtered_domain(self, domain_id):
domain_ref = self.resource_api.get_domain(domain_id)
return {'id': domain_ref['id'], 'name': domain_ref['name']}
def _get_filtered_project(self, project_id):
project_ref = self.resource_api.get_project(project_id)
filtered_project = {
'id': project_ref['id'],
'name': project_ref['name']}
filtered_project['domain'] = self._get_filtered_domain(
project_ref['domain_id'])
return filtered_project
def _populate_scope(self, token_data, domain_id, project_id):
if 'domain' in token_data or 'project' in token_data:
# scope already exist, no need to populate it again
return
if domain_id:
token_data['domain'] = self._get_filtered_domain(domain_id)
if project_id:
token_data['project'] = self._get_filtered_project(project_id)
def _get_roles_for_user(self, user_id, domain_id, project_id):
roles = []
if domain_id:
roles = self.assignment_api.get_roles_for_user_and_domain(
user_id, domain_id)
if project_id:
roles = self.assignment_api.get_roles_for_user_and_project(
user_id, project_id)
return [self.role_api.get_role(role_id) for role_id in roles]
def _populate_roles_for_groups(self, group_ids,
project_id=None, domain_id=None,
user_id=None):
def _check_roles(roles, user_id, project_id, domain_id):
# User was granted roles so simply exit this function.
if roles:
return
if project_id:
msg = _('User %(user_id)s has no access '
'to project %(project_id)s') % {
'user_id': user_id,
'project_id': project_id}
elif domain_id:
msg = _('User %(user_id)s has no access '
'to domain %(domain_id)s') % {
'user_id': user_id,
'domain_id': domain_id}
# Since no roles were found a user is not authorized to
# perform any operations. Raise an exception with
# appropriate error message.
raise exception.Unauthorized(msg)
roles = self.assignment_api.get_roles_for_groups(group_ids,
project_id,
domain_id)
_check_roles(roles, user_id, project_id, domain_id)
return roles
def _populate_user(self, token_data, user_id, trust):
if 'user' in token_data:
# no need to repopulate user if it already exists
return
user_ref = self.identity_api.get_user(user_id)
if CONF.trust.enabled and trust and 'OS-TRUST:trust' not in token_data:
trustor_user_ref = (self.identity_api.get_user(
trust['trustor_user_id']))
try:
self.identity_api.assert_user_enabled(trust['trustor_user_id'])
except AssertionError:
raise exception.Forbidden(_('Trustor is disabled.'))
if trust['impersonation']:
user_ref = trustor_user_ref
token_data['OS-TRUST:trust'] = (
{
'id': trust['id'],
'trustor_user': {'id': trust['trustor_user_id']},
'trustee_user': {'id': trust['trustee_user_id']},
'impersonation': trust['impersonation']
})
filtered_user = {
'id': user_ref['id'],
'name': user_ref['name'],
'domain': self._get_filtered_domain(user_ref['domain_id'])}
token_data['user'] = filtered_user
def _populate_oauth_section(self, token_data, access_token):
if access_token:
access_token_id = access_token['id']
consumer_id = access_token['consumer_id']
token_data['OS-OAUTH1'] = ({'access_token_id': access_token_id,
'consumer_id': consumer_id})
def _populate_roles(self, token_data, user_id, domain_id, project_id,
trust, access_token):
if 'roles' in token_data:
# no need to repopulate roles
return
if access_token:
filtered_roles = []
authed_role_ids = jsonutils.loads(access_token['role_ids'])
all_roles = self.role_api.list_roles()
for role in all_roles:
for authed_role in authed_role_ids:
if authed_role == role['id']:
filtered_roles.append({'id': role['id'],
'name': role['name']})
token_data['roles'] = filtered_roles
return
if CONF.trust.enabled and trust:
token_user_id = trust['trustor_user_id']
token_project_id = trust['project_id']
# trusts do not support domains yet
token_domain_id = None
else:
token_user_id = user_id
token_project_id = project_id
token_domain_id = domain_id
if token_domain_id or token_project_id:
roles = self._get_roles_for_user(token_user_id,
token_domain_id,
token_project_id)
filtered_roles = []
if CONF.trust.enabled and trust:
for trust_role in trust['roles']:
match_roles = [x for x in roles
if x['id'] == trust_role['id']]
if match_roles:
filtered_roles.append(match_roles[0])
else:
raise exception.Forbidden(
_('Trustee has no delegated roles.'))
else:
for role in roles:
filtered_roles.append({'id': role['id'],
'name': role['name']})
# user has no project or domain roles, therefore access denied
if not filtered_roles:
if token_project_id:
msg = _('User %(user_id)s has no access '
'to project %(project_id)s') % {
'user_id': user_id,
'project_id': token_project_id}
else:
msg = _('User %(user_id)s has no access '
'to domain %(domain_id)s') % {
'user_id': user_id,
'domain_id': token_domain_id}
LOG.debug(msg)
raise exception.Unauthorized(msg)
token_data['roles'] = filtered_roles
def _populate_service_catalog(self, token_data, user_id,
domain_id, project_id, trust):
if 'catalog' in token_data:
# no need to repopulate service catalog
return
if CONF.trust.enabled and trust:
user_id = trust['trustor_user_id']
if project_id or domain_id:
service_catalog = self.catalog_api.get_v3_catalog(
user_id, project_id)
# TODO(ayoung): Enforce Endpoints for trust
token_data['catalog'] = service_catalog
def _populate_service_providers(self, token_data):
if 'service_providers' in token_data:
return
service_providers = self.federation_api.get_enabled_service_providers()
if service_providers:
token_data['service_providers'] = service_providers
def _populate_token_dates(self, token_data, expires=None, trust=None,
issued_at=None):
if not expires:
expires = provider.default_expire_time()
if not isinstance(expires, six.string_types):
expires = timeutils.isotime(expires, subsecond=True)
token_data['expires_at'] = expires
token_data['issued_at'] = (issued_at or
timeutils.isotime(subsecond=True))
def _populate_audit_info(self, token_data, audit_info=None):
if audit_info is None or isinstance(audit_info, six.string_types):
token_data['audit_ids'] = provider.audit_info(audit_info)
elif isinstance(audit_info, list):
token_data['audit_ids'] = audit_info
else:
msg = (_('Invalid audit info data type: %(data)s (%(type)s)') %
{'data': audit_info, 'type': type(audit_info)})
LOG.error(msg)
raise exception.UnexpectedError(msg)
def get_token_data(self, user_id, method_names, extras=None,
domain_id=None, project_id=None, expires=None,
trust=None, token=None, include_catalog=True,
bind=None, access_token=None, issued_at=None,
audit_info=None):
if extras is None:
extras = {}
if extras:
versionutils.deprecated(
what='passing token data with "extras"',
as_of=versionutils.deprecated.KILO,
in_favor_of='well-defined APIs')
token_data = {'methods': method_names,
'extras': extras}
# We've probably already written these to the token
if token:
for x in ('roles', 'user', 'catalog', 'project', 'domain'):
if x in token:
token_data[x] = token[x]
if CONF.trust.enabled and trust:
if user_id != trust['trustee_user_id']:
raise exception.Forbidden(_('User is not a trustee.'))
if bind:
token_data['bind'] = bind
self._populate_scope(token_data, domain_id, project_id)
self._populate_user(token_data, user_id, trust)
self._populate_roles(token_data, user_id, domain_id, project_id, trust,
access_token)
self._populate_audit_info(token_data, audit_info)
if include_catalog:
self._populate_service_catalog(token_data, user_id, domain_id,
project_id, trust)
self._populate_service_providers(token_data)
self._populate_token_dates(token_data, expires=expires, trust=trust,
issued_at=issued_at)
self._populate_oauth_section(token_data, access_token)
return {'token': token_data}
@dependency.requires('catalog_api', 'identity_api', 'oauth_api',
'resource_api', 'role_api', 'trust_api')
class BaseProvider(provider.Provider):
def __init__(self, *args, **kwargs):
super(BaseProvider, self).__init__(*args, **kwargs)
self.v3_token_data_helper = V3TokenDataHelper()
self.v2_token_data_helper = V2TokenDataHelper()
def get_token_version(self, token_data):
if token_data and isinstance(token_data, dict):
if 'token_version' in token_data:
if token_data['token_version'] in token.provider.VERSIONS:
return token_data['token_version']
# FIXME(morganfainberg): deprecate the following logic in future
# revisions. It is better to just specify the token_version in
# the token_data itself. This way we can support future versions
# that might have the same fields.
if 'access' in token_data:
return token.provider.V2
if 'token' in token_data and 'methods' in token_data['token']:
return token.provider.V3
raise exception.UnsupportedTokenVersionException()
def issue_v2_token(self, token_ref, roles_ref=None,
catalog_ref=None):
metadata_ref = token_ref['metadata']
trust_ref = None
if CONF.trust.enabled and metadata_ref and 'trust_id' in metadata_ref:
trust_ref = self.trust_api.get_trust(metadata_ref['trust_id'])
token_data = self.v2_token_data_helper.format_token(
token_ref, roles_ref, catalog_ref, trust_ref)
token_id = self._get_token_id(token_data)
token_data['access']['token']['id'] = token_id
return token_id, token_data
def _is_mapped_token(self, auth_context):
return (federation.IDENTITY_PROVIDER in auth_context and
federation.PROTOCOL in auth_context)
def issue_v3_token(self, user_id, method_names, expires_at=None,
project_id=None, domain_id=None, auth_context=None,
trust=None, metadata_ref=None, include_catalog=True,
parent_audit_id=None):
# for V2, trust is stashed in metadata_ref
if (CONF.trust.enabled and not trust and metadata_ref and
'trust_id' in metadata_ref):
trust = self.trust_api.get_trust(metadata_ref['trust_id'])
token_ref = None
if auth_context and self._is_mapped_token(auth_context):
token_ref = self._handle_mapped_tokens(
auth_context, project_id, domain_id)
access_token = None
if 'oauth1' in method_names:
access_token_id = auth_context['access_token_id']
access_token = self.oauth_api.get_access_token(access_token_id)
token_data = self.v3_token_data_helper.get_token_data(
user_id,
method_names,
auth_context.get('extras') if auth_context else None,
domain_id=domain_id,
project_id=project_id,
expires=expires_at,
trust=trust,
bind=auth_context.get('bind') if auth_context else None,
token=token_ref,
include_catalog=include_catalog,
access_token=access_token,
audit_info=parent_audit_id)
token_id = self._get_token_id(token_data)
return token_id, token_data
def _handle_mapped_tokens(self, auth_context, project_id, domain_id):
def get_federated_domain():
return (CONF.federation.federated_domain_name or
federation.FEDERATED_DOMAIN_KEYWORD)
federated_domain = get_federated_domain()
user_id = auth_context['user_id']
group_ids = auth_context['group_ids']
idp = auth_context[federation.IDENTITY_PROVIDER]
protocol = auth_context[federation.PROTOCOL]
token_data = {
'user': {
'id': user_id,
'name': parse.unquote(user_id),
federation.FEDERATION: {
'identity_provider': {'id': idp},
'protocol': {'id': protocol}
},
'domain': {
'id': federated_domain,
'name': federated_domain
}
}
}
if project_id or domain_id:
roles = self.v3_token_data_helper._populate_roles_for_groups(
group_ids, project_id, domain_id, user_id)
token_data.update({'roles': roles})
else:
token_data['user'][federation.FEDERATION].update({
'groups': [{'id': x} for x in group_ids]
})
return token_data
def _verify_token_ref(self, token_ref):
"""Verify and return the given token_ref."""
if not token_ref:
raise exception.Unauthorized()
return token_ref
def _assert_is_not_federation_token(self, token_ref):
"""Make sure we aren't using v2 auth on a federation token."""
token_data = token_ref.get('token_data')
if (token_data and self.get_token_version(token_data) ==
token.provider.V3):
if 'OS-FEDERATION' in token_data['token']['user']:
msg = _('Attempting to use OS-FEDERATION token with V2 '
'Identity Service, use V3 Authentication')
raise exception.Unauthorized(msg)
def _assert_default_domain(self, token_ref):
"""Make sure we are operating on default domain only."""
if (token_ref.get('token_data') and
self.get_token_version(token_ref.get('token_data')) ==
token.provider.V3):
# this is a V3 token
msg = _('Non-default domain is not supported')
# user in a non-default is prohibited
if (token_ref['token_data']['token']['user']['domain']['id'] !=
CONF.identity.default_domain_id):
raise exception.Unauthorized(msg)
# domain scoping is prohibited
if token_ref['token_data']['token'].get('domain'):
raise exception.Unauthorized(
_('Domain scoped token is not supported'))
# project in non-default domain is prohibited
if token_ref['token_data']['token'].get('project'):
project = token_ref['token_data']['token']['project']
project_domain_id = project['domain']['id']
# scoped to project in non-default domain is prohibited
if project_domain_id != CONF.identity.default_domain_id:
raise exception.Unauthorized(msg)
# if token is scoped to trust, both trustor and trustee must
# be in the default domain. Furthermore, the delegated project
# must also be in the default domain
metadata_ref = token_ref['metadata']
if CONF.trust.enabled and 'trust_id' in metadata_ref:
trust_ref = self.trust_api.get_trust(metadata_ref['trust_id'])
trustee_user_ref = self.identity_api.get_user(
trust_ref['trustee_user_id'])
if (trustee_user_ref['domain_id'] !=
CONF.identity.default_domain_id):
raise exception.Unauthorized(msg)
trustor_user_ref = self.identity_api.get_user(
trust_ref['trustor_user_id'])
if (trustor_user_ref['domain_id'] !=
CONF.identity.default_domain_id):
raise exception.Unauthorized(msg)
project_ref = self.resource_api.get_project(
trust_ref['project_id'])
if (project_ref['domain_id'] !=
CONF.identity.default_domain_id):
raise exception.Unauthorized(msg)
def validate_v2_token(self, token_ref):
try:
self._assert_is_not_federation_token(token_ref)
self._assert_default_domain(token_ref)
# FIXME(gyee): performance or correctness? Should we return the
# cached token or reconstruct it? Obviously if we are going with
# the cached token, any role, project, or domain name changes
# will not be reflected. One may argue that with PKI tokens,
# we are essentially doing cached token validation anyway.
# Lets go with the cached token strategy. Since token
# management layer is now pluggable, one can always provide
# their own implementation to suit their needs.
token_data = token_ref.get('token_data')
if (not token_data or
self.get_token_version(token_data) !=
token.provider.V2):
# token is created by old v2 logic
metadata_ref = token_ref['metadata']
roles_ref = []
for role_id in metadata_ref.get('roles', []):
roles_ref.append(self.role_api.get_role(role_id))
# Get a service catalog if possible
# This is needed for on-behalf-of requests
catalog_ref = None
if token_ref.get('tenant'):
catalog_ref = self.catalog_api.get_catalog(
token_ref['user']['id'],
token_ref['tenant']['id'])
trust_ref = None
if CONF.trust.enabled and 'trust_id' in metadata_ref:
trust_ref = self.trust_api.get_trust(
metadata_ref['trust_id'])
token_data = self.v2_token_data_helper.format_token(
token_ref, roles_ref, catalog_ref, trust_ref)
trust_id = token_data['access'].get('trust', {}).get('id')
if trust_id:
# token trust validation
self.trust_api.get_trust(trust_id)
return token_data
except exception.ValidationError as e:
LOG.exception(_LE('Failed to validate token'))
raise exception.TokenNotFound(e)
def validate_v3_token(self, token_ref):
# FIXME(gyee): performance or correctness? Should we return the
# cached token or reconstruct it? Obviously if we are going with
# the cached token, any role, project, or domain name changes
# will not be reflected. One may argue that with PKI tokens,
# we are essentially doing cached token validation anyway.
# Lets go with the cached token strategy. Since token
# management layer is now pluggable, one can always provide
# their own implementation to suit their needs.
trust_id = token_ref.get('trust_id')
if trust_id:
# token trust validation
self.trust_api.get_trust(trust_id)
token_data = token_ref.get('token_data')
if not token_data or 'token' not in token_data:
# token ref is created by V2 API
project_id = None
project_ref = token_ref.get('tenant')
if project_ref:
project_id = project_ref['id']
issued_at = token_ref['token_data']['access']['token']['issued_at']
audit = token_ref['token_data']['access']['token'].get('audit_ids')
token_data = self.v3_token_data_helper.get_token_data(
token_ref['user']['id'],
['password', 'token'],
project_id=project_id,
bind=token_ref.get('bind'),
expires=token_ref['expires'],
issued_at=issued_at,
audit_info=audit)
return token_data
| 0 |
import json
from collections import OrderedDict
from coalib.bearlib.spacing.SpacingHelper import SpacingHelper
from coalib.bears.LocalBear import LocalBear
from coalib.results.Diff import Diff
from coalib.results.Result import Result
class JSONFormatBear(LocalBear):
try:
DecodeError = json.decoder.JSONDecodeError
except AttributeError:
DecodeError = ValueError
LANGUAGES = {"JSON"}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
CAN_DETECT = {'Formatting'}
def run(self, filename, file,
json_sort: bool=False,
tab_width: int=SpacingHelper.DEFAULT_TAB_WIDTH,
escape_unicode: bool=False):
"""
Raises issues for any deviations from the pretty-printed JSON.
:param json_sort: Whether or not keys should be sorted.
:param tab_width: Number of spaces to indent.
:param escape_unicode: Whether or not to escape unicode values using
ASCII.
"""
try:
json_content = json.loads(''.join(file),
object_pairs_hook=OrderedDict)
except self.DecodeError as err:
yield Result.from_values(
self,
"This file does not contain parsable JSON. " + repr(str(err)),
file=filename)
return
corrected = json.dumps(json_content,
sort_keys=json_sort,
indent=tab_width,
ensure_ascii=not escape_unicode
).splitlines(True)
# Because of a bug in several python versions we have to correct
# whitespace here.
corrected = tuple(line.rstrip(" \n") + "\n" for line in corrected)
diff = Diff.from_string_arrays(file, corrected)
if len(diff) > 0:
yield Result(self,
"This file can be reformatted by sorting keys and "
"following indentation.",
affected_code=tuple(d.range(filename)
for d in diff.split_diff()),
diffs={filename: diff})
| 0.002605 |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class cacheglobal_binding(base_resource):
""" Binding class showing the resources that can be bound to cacheglobal_binding.
"""
def __init__(self) :
self.cacheglobal_cachepolicy_binding = []
@property
def cacheglobal_cachepolicy_bindings(self) :
ur"""cachepolicy that can be bound to cacheglobal.
"""
try :
return self._cacheglobal_cachepolicy_binding
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(cacheglobal_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.cacheglobal_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
return 0
except Exception as e :
raise e
@classmethod
def get(self, service) :
ur""" Use this API to fetch a cacheglobal_binding resource .
"""
try :
obj = cacheglobal_binding()
response = obj.get_resource(service)
return response
except Exception as e:
raise e
class cacheglobal_binding_response(base_response) :
def __init__(self, length=1) :
self.cacheglobal_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.cacheglobal_binding = [cacheglobal_binding() for _ in range(length)]
| 0.029537 |
import os
from distutils.core import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='django-linkcheck',
version='0.6.4',
description="A Django app that will analyze and report on links in any "
"model that you register with it.",
long_description=read('README.rst'),
author='Andy Baker',
author_email='andy@andybak.net',
license='BSD',
url='http://github.com/andybak/django-linkcheck/',
packages=[
'linkcheck',
'linkcheck.management',
'linkcheck.management.commands',
'linkcheck.tests',
'linkcheck.tests.sampleapp',
],
package_data={
'linkcheck': [
'templates/linkcheck/*.html',
'templates/linkcheck/*.xhtml',
'tests/media/*',
]
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
],
)
| 0.001706 |
from scipy.spatial.distance import cdist
from collections import defaultdict, Counter
import utils as u
import numpy as np
def count_categories(raw_categories):
"""Count the venues categories given by `raw_categories` and returns:
* sub_count: {sub category index: number of venues}
* top_count: {sub category index: total number of venues in the same top
category}
* sub_cat_to_top: {sub category index: corresponding top category index}
."""
sub_count = defaultdict(int)
top_cats = range(0, 9*int(1e5), int(1e5))
sub_cat_to_top = {sub: top for top in top_cats
for sub in range(top, top+200)}
sub_count.update(Counter(raw_categories))
top_count = defaultdict(int)
for top_cat in top_cats:
sub_cats = range(top_cat, top_cat+200)
total = sum([sub_count[sub_cat] for sub_cat in sub_cats])
for sub_cat in sub_cats:
top_count[sub_cat] = total - sub_count[sub_cat]
return sub_count, top_count, sub_cat_to_top
def NDCG(gold_cat, results, sub_cat_to_top, rank):
"""Compute the Normalized Discounted Cumulative Gain at rank K of
`results`, a ranked list of sub categories, given that we were trying to
retrieve `gold_cat` among `cats_count`."""
coeff = np.log2(np.arange(2, rank+2))
@u.memodict
def relevance(result_cat):
"""Compute R, the relevance of `result_cat` with respect to `query_cat`
and returns 2**R - 1"""
# if result in brand(query) return 1 where brand returns brand id in
# matching city
if gold_cat == result_cat:
return 1.0
if sub_cat_to_top[gold_cat] == sub_cat_to_top[result_cat]:
return 0.3195079
return 0.0
return np.sum(np.array(map(relevance, results)) / coeff)
def evaluate_by_NDCG(left, right, all_categories, mat, fake=False):
"""Query all venues in `left` to and return their DCG score when
matching them in `right` using the distance defined by `mat`. If `fake`,
return score of random ordering."""
k = int(left['knn'])
cats_count = count_categories(all_categories[right['city']])
sub_count, top_count, sub_cat_to_top = cats_count
coeff = np.log2(np.arange(2, k+2))
@u.memodict
def perfect_score(sub_cat):
"""Compute the maximum score if categories are ordered optimally with
respect to `sub_cat`."""
different_cat = max(0, k - sub_count[sub_cat] - top_count[sub_cat])
# 2**.4-1 = 0.3195079107728942
scores = np.array(sub_count[sub_cat]*[1.0, ] +
top_count[sub_cat]*[0.3195079, ] +
different_cat*[0.0, ])
return np.sum(scores[:k] / coeff)
res = []
metric = 'euclidean'
if mat is not None:
metric = 'mahalanobis'
mat = np.linalg.inv(mat)
if fake:
dst = np.random.randn(left['features'].shape[0],
right['features'].shape[0])
else:
dst = cdist(left['features'], right['features'], metric, VI=mat)
for venue_order, listed in enumerate(np.argsort(dst, axis=1)):
query_cat = all_categories[left['city']][venue_order]
results_cat = all_categories[right['city']][listed[:k]]
res.append(NDCG(query_cat, results_cat, sub_cat_to_top, k) /
perfect_score(query_cat))
return np.array(res)
| 0 |
"""
Copyright 2017-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datetime import datetime, timedelta
import boto3
from streamalert.shared.helpers.dynamodb import ignore_conditional_failure
from streamalert.shared.importer import import_folders
from streamalert.shared.logger import get_logger
from streamalert.shared.rule import Rule
LOGGER = get_logger(__name__)
class RuleTable:
"""Provides convenience methods for accessing and modifying the rules table."""
DEFAULT_STAGING_HOURS = 48
DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
def __init__(self, table_name, *rule_import_paths):
"""Load the given table to be used for rule information updates
Args:
table_name (str): The name of the DynamoDB table from which to load
rule info
rule_import_paths (str): Variable number of paths to import rules
from. Useful for using this as a standalone class. Items for this
can be ommitted if instantiated from a caller that has already
loaded the rules files.
"""
self._table = boto3.resource('dynamodb').Table(table_name)
import_folders(*rule_import_paths)
self._remote_rule_info = None
def __str__(self, verbose=False):
"""Return a human-readable respresentation of the table's data"""
if not self.remote_rule_names:
return 'Rule table is empty'
pad_size = max([len(rule) for rule in list(self.remote_rule_info.keys())]) + 4
output = ['{rule:<{pad}}Staged?'.format(rule='Rule', pad=pad_size+5)]
for index, rule in enumerate(sorted(self.remote_rule_info.keys()), start=1):
output.append(
'{index:>3d}: {rule: <{pad}}{staged}'.format(
index=index,
rule=rule,
pad=pad_size,
staged=self.remote_rule_info[rule]['Staged']
)
)
# Append additional information if verbose is enabled
if verbose:
details_pad_size = max([len(prop)
for prop in list(self.remote_rule_info[rule].keys())]) + 4
output.extend(
'{prefix:>{left_pad}}{property: <{internal_pad}}{value}'.format(
prefix='- ',
left_pad=7,
property='{}:'.format(prop),
internal_pad=details_pad_size,
value=self.remote_rule_info[rule][prop]
)
for prop in sorted(self.remote_rule_info[rule].keys())
if prop != 'Staged'
)
return '\n'.join(output)
def _add_new_rules(self, skip_staging=False):
"""Add any new local rules (renamed rules included) to the remote database"""
# If the table is empty, no rules have been added yet
# Add them all as unstaged to avoid demoting rules from production status
# Also, allow the user to bypass staging with the skip_staging flag
skip_staging = skip_staging or (len(self.remote_rule_names) == 0)
with self._table.batch_writer() as batch:
for rule_name in self.local_not_remote:
LOGGER.debug('Adding rule \'%s\' (skip staging=%s)', rule_name, skip_staging)
batch.put_item(self._dynamo_record(rule_name, skip_staging))
def _del_old_rules(self):
"""Delete any rules that exist in the rule database but not locally"""
with self._table.batch_writer() as batch:
for rule_name in self.remote_not_local:
LOGGER.debug('Deleting rule \'%s\'', rule_name)
batch.delete_item(Key={'RuleName': rule_name})
@classmethod
def _cast_value(cls, key, value):
"""Cast certain values into their respective object types
Args:
key (str): Name of key that this value corresponds to
value : Object to be cast, could be various types
Returns:
object: Variant type object in the expected type
"""
# Handle date casting from string to datetime object
if key in {'StagedAt', 'StagedUntil'}:
return datetime.strptime(value, cls.DATETIME_FORMAT)
return value
def _load_remote_state(self):
"""Return the state of all rules stored in the database
Returns:
dict: key = rule name, value = dictionary of staging information
Example:
{
'example_rule_name':
{
'Staged': True
'StagedAt': datetime.datetime object,
'StagedUntil': datetime.datetime object
}
}
"""
paginator = self._table.meta.client.get_paginator('scan')
page_iterator = paginator.paginate(TableName=self.name, ConsistentRead=True)
return {
item['RuleName']: {
key: self._cast_value(key, value)
for key, value in item.items()
if key != 'RuleName'
}
for page in page_iterator
for item in page['Items']
}
@staticmethod
def _default_dynamo_kwargs(rule_name):
return {
'Key': {'RuleName': rule_name},
'ConditionExpression': 'attribute_exists(RuleName)'
}
@staticmethod
def _dynamo_record(rule_name, skip_staging=False):
"""Generate a DynamoDB record with this rule information
Args:
rule_name (string): Name of rule for this record
skip_staging (bool): [optional] Argument that dictates if this rule
should skip the staging phase.
An initial deployment of rule info will skip the staging state
as to avoid taking rules out of production unexpectedly. This
argument can also be used to during the deploy process to
immediately put new rules into production.
"""
item = {
'RuleName': rule_name,
'Staged': not skip_staging
}
# We may want to skip staging if the database is empty (ie: newly created)
# or if the user is manually bypassing staging for this rule
if skip_staging:
return item
staged_at, staged_until = RuleTable._staged_window()
item.update({
'StagedAt': staged_at,
'StagedUntil': staged_until
})
return item
@staticmethod
def _staged_window():
"""Get staging window to be used for this rule
Returns:
tuple: staging start datetime, staging end datetime
"""
staged_at = datetime.utcnow()
staged_until = staged_at + timedelta(hours=RuleTable.DEFAULT_STAGING_HOURS)
return (
staged_at.strftime(RuleTable.DATETIME_FORMAT),
staged_until.strftime(RuleTable.DATETIME_FORMAT)
)
def update_local_cache(self):
"""Force the local cache of remote rule info to be updated"""
self._remote_rule_info = self._load_remote_state()
def rule_info(self, rule_name):
"""Get the rule info from the table information
Returns:
dict: Rule information for the specified rule from the DynamoDB rule table
"""
return self.remote_rule_info.get(rule_name)
@ignore_conditional_failure
def toggle_staged_state(self, rule_name, stage):
"""Mark the specified rule as staged=True or staged=False
Args:
rule_name (string): The name of the rule being staged
stage (bool): True if this rule should be staged and False if
this rule should be promoted out of staging.
"""
if rule_name not in self.remote_rule_info:
LOGGER.error(
'Staging status for rule \'%s\' cannot be set to %s; rule does not exist',
rule_name, stage
)
return
if self.remote_rule_info[rule_name]['Staged'] and stage:
LOGGER.info(
'Rule \'%s\' is already staged and will have its staging window updated',
rule_name
)
LOGGER.debug('Toggling staged state for rule \'%s\' to: %s', rule_name, stage)
update_expressions = ['set Staged = :staged']
expression_attributes = [':staged']
expression_values = [stage]
# If staging, add some additonal staging context to the expression
if stage:
update_expressions.extend(['StagedAt = :staged_at', 'StagedUntil = :staged_until'])
expression_attributes.extend([':staged_at', ':staged_until'])
expression_values.extend(self._staged_window())
args = {
'UpdateExpression': ','.join(update_expressions),
'ExpressionAttributeValues': dict(list(zip(expression_attributes, expression_values)))
}
args.update(self._default_dynamo_kwargs(rule_name))
self._table.update_item(**args)
def update(self, skip_staging=False):
"""Update the database with new local rules and remove deleted ones from remote"""
self._add_new_rules(skip_staging)
self._del_old_rules()
# Refresh the cached remote rule info state
self.update_local_cache()
@property
def local_not_remote(self):
"""Rules that exist locally but not within the remote database"""
return self.local_rule_names.difference(self.remote_rule_names)
@property
def local_rule_names(self):
"""Names of locally loaded rules"""
return set(Rule.rule_names())
@property
def name(self):
"""Name of the DynamoDB table used to store alerts."""
return self._table.table_name
@property
def remote_rule_info(self):
"""All rule info from the remote database. Returns cache if it exists"""
if not self._remote_rule_info:
self.update_local_cache()
return self._remote_rule_info
@property
def remote_rule_names(self):
"""Rule names from the remote database. Returns cache if it exists"""
return set(self.remote_rule_info)
@property
def remote_not_local(self):
"""Rules that exist in the remote database but not locally"""
return self.remote_rule_names.difference(self.local_rule_names)
| 0.001801 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Upgrader for Python scripts from pre-1.0 TensorFlow to 1.0 TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import ast
import collections
import os
import shutil
import sys
import tempfile
import traceback
class APIChangeSpec(object):
"""List of maps that describe what changed in the API."""
def __init__(self):
# Maps from a function name to a dictionary that describes how to
# map from an old argument keyword to the new argument keyword.
self.function_keyword_renames = {
"tf.count_nonzero": {
"reduction_indices": "axis"
},
"tf.reduce_all": {
"reduction_indices": "axis"
},
"tf.reduce_any": {
"reduction_indices": "axis"
},
"tf.reduce_max": {
"reduction_indices": "axis"
},
"tf.reduce_mean": {
"reduction_indices": "axis"
},
"tf.reduce_min": {
"reduction_indices": "axis"
},
"tf.reduce_prod": {
"reduction_indices": "axis"
},
"tf.reduce_sum": {
"reduction_indices": "axis"
},
"tf.reduce_logsumexp": {
"reduction_indices": "axis"
},
"tf.expand_dims": {
"dim": "axis"
},
"tf.argmax": {
"dimension": "axis"
},
"tf.argmin": {
"dimension": "axis"
},
"tf.reduce_join": {
"reduction_indices": "axis"
},
"tf.sparse_concat": {
"concat_dim": "axis"
},
"tf.sparse_split": {
"split_dim": "axis"
},
"tf.sparse_reduce_sum": {
"reduction_axes": "axis"
},
"tf.reverse_sequence": {
"seq_dim": "seq_axis",
"batch_dim": "batch_axis"
},
"tf.sparse_reduce_sum_sparse": {
"reduction_axes": "axis"
},
"tf.squeeze": {
"squeeze_dims": "axis"
},
"tf.split": {
"split_dim": "axis",
"num_split": "num_or_size_splits"
},
"tf.concat": {
"concat_dim": "axis"
},
}
# Mapping from function to the new name of the function
self.function_renames = {
"tf.inv": "tf.reciprocal",
"tf.contrib.deprecated.scalar_summary": "tf.summary.scalar",
"tf.contrib.deprecated.histogram_summary": "tf.summary.histogram",
"tf.listdiff": "tf.setdiff1d",
"tf.list_diff": "tf.setdiff1d",
"tf.mul": "tf.multiply",
"tf.neg": "tf.negative",
"tf.sub": "tf.subtract",
"tf.train.SummaryWriter": "tf.summary.FileWriter",
"tf.scalar_summary": "tf.summary.scalar",
"tf.histogram_summary": "tf.summary.histogram",
"tf.audio_summary": "tf.summary.audio",
"tf.image_summary": "tf.summary.image",
"tf.merge_summary": "tf.summary.merge",
"tf.merge_all_summaries": "tf.summary.merge_all",
"tf.image.per_image_whitening": "tf.image.per_image_standardization",
"tf.all_variables": "tf.global_variables",
"tf.VARIABLES": "tf.GLOBAL_VARIABLES",
"tf.initialize_all_variables": "tf.global_variables_initializer",
"tf.initialize_variables": "tf.variables_initializer",
"tf.initialize_local_variables": "tf.local_variables_initializer",
"tf.batch_matrix_diag": "tf.matrix_diag",
"tf.batch_band_part": "tf.band_part",
"tf.batch_set_diag": "tf.set_diag",
"tf.batch_matrix_transpose": "tf.matrix_transpose",
"tf.batch_matrix_determinant": "tf.matrix_determinant",
"tf.batch_matrix_inverse": "tf.matrix_inverse",
"tf.batch_cholesky": "tf.cholesky",
"tf.batch_cholesky_solve": "tf.cholesky_solve",
"tf.batch_matrix_solve": "tf.matrix_solve",
"tf.batch_matrix_triangular_solve": "tf.matrix_triangular_solve",
"tf.batch_matrix_solve_ls": "tf.matrix_solve_ls",
"tf.batch_self_adjoint_eig": "tf.self_adjoint_eig",
"tf.batch_self_adjoint_eigvals": "tf.self_adjoint_eigvals",
"tf.batch_svd": "tf.svd",
"tf.batch_fft": "tf.fft",
"tf.batch_ifft": "tf.ifft",
"tf.batch_fft2d": "tf.fft2d",
"tf.batch_ifft2d": "tf.ifft2d",
"tf.batch_fft3d": "tf.fft3d",
"tf.batch_ifft3d": "tf.ifft3d",
"tf.select": "tf.where",
"tf.complex_abs": "tf.abs",
"tf.batch_matmul": "tf.matmul",
"tf.pack": "tf.stack",
"tf.unpack": "tf.unstack",
}
self.change_to_function = {
"tf.ones_initializer",
"tf.zeros_initializer",
}
# Functions that were reordered should be changed to the new keyword args
# for safety, if positional arguments are used. If you have reversed the
# positional arguments yourself, this could do the wrong thing.
self.function_reorders = {
"tf.split": ["axis", "num_or_size_splits", "value", "name"],
"tf.sparse_split": ["axis", "num_or_size_splits", "value", "name"],
"tf.concat": ["concat_dim", "values", "name"],
"tf.svd": ["tensor", "compute_uv", "full_matrices", "name"],
"tf.nn.softmax_cross_entropy_with_logits": [
"logits", "labels", "dim", "name"],
"tf.nn.sparse_softmax_cross_entropy_with_logits": [
"logits", "labels", "name"],
"tf.nn.sigmoid_cross_entropy_with_logits": [
"logits", "labels", "name"]
}
# Specially handled functions.
self.function_handle = {"tf.reverse": self._reverse_handler}
@staticmethod
def _reverse_handler(file_edit_recorder, node):
# TODO(aselle): Could check for a literal list of bools and try to convert
# them to indices.
comment = ("ERROR: tf.reverse has had its argument semantics changed\n"
"significantly the converter cannot detect this reliably, so you"
"need to inspect this usage manually.\n")
file_edit_recorder.add(comment,
node.lineno,
node.col_offset,
"tf.reverse",
"tf.reverse",
error="tf.reverse requires manual check.")
class FileEditTuple(collections.namedtuple(
"FileEditTuple", ["comment", "line", "start", "old", "new"])):
"""Each edit that is recorded by a FileEditRecorder.
Fields:
comment: A description of the edit and why it was made.
line: The line number in the file where the edit occurs (1-indexed).
start: The line number in the file where the edit occurs (0-indexed).
old: text string to remove (this must match what was in file).
new: text string to add in place of `old`.
"""
__slots__ = ()
class FileEditRecorder(object):
"""Record changes that need to be done to the file."""
def __init__(self, filename):
# all edits are lists of chars
self._filename = filename
self._line_to_edit = collections.defaultdict(list)
self._errors = []
def process(self, text):
"""Process a list of strings, each corresponding to the recorded changes.
Args:
text: A list of lines of text (assumed to contain newlines)
Returns:
A tuple of the modified text and a textual description of what is done.
Raises:
ValueError: if substitution source location does not have expected text.
"""
change_report = ""
# Iterate of each line
for line, edits in self._line_to_edit.items():
offset = 0
# sort by column so that edits are processed in order in order to make
# indexing adjustments cumulative for changes that change the string
# length
edits.sort(key=lambda x: x.start)
# Extract each line to a list of characters, because mutable lists
# are editable, unlike immutable strings.
char_array = list(text[line - 1])
# Record a description of the change
change_report += "%r Line %d\n" % (self._filename, line)
change_report += "-" * 80 + "\n\n"
for e in edits:
change_report += "%s\n" % e.comment
change_report += "\n Old: %s" % (text[line - 1])
# Make underscore buffers for underlining where in the line the edit was
change_list = [" "] * len(text[line - 1])
change_list_new = [" "] * len(text[line - 1])
# Iterate for each edit
for e in edits:
# Create effective start, end by accounting for change in length due
# to previous edits
start_eff = e.start + offset
end_eff = start_eff + len(e.old)
# Make sure the edit is changing what it should be changing
old_actual = "".join(char_array[start_eff:end_eff])
if old_actual != e.old:
raise ValueError("Expected text %r but got %r" %
("".join(e.old), "".join(old_actual)))
# Make the edit
char_array[start_eff:end_eff] = list(e.new)
# Create the underline highlighting of the before and after
change_list[e.start:e.start + len(e.old)] = "~" * len(e.old)
change_list_new[start_eff:end_eff] = "~" * len(e.new)
# Keep track of how to generate effective ranges
offset += len(e.new) - len(e.old)
# Finish the report comment
change_report += " %s\n" % "".join(change_list)
text[line - 1] = "".join(char_array)
change_report += " New: %s" % (text[line - 1])
change_report += " %s\n\n" % "".join(change_list_new)
return "".join(text), change_report, self._errors
def add(self, comment, line, start, old, new, error=None):
"""Add a new change that is needed.
Args:
comment: A description of what was changed
line: Line number (1 indexed)
start: Column offset (0 indexed)
old: old text
new: new text
error: this "edit" is something that cannot be fixed automatically
Returns:
None
"""
self._line_to_edit[line].append(
FileEditTuple(comment, line, start, old, new))
if error:
self._errors.append("%s:%d: %s" % (self._filename, line, error))
class TensorFlowCallVisitor(ast.NodeVisitor):
"""AST Visitor that finds TensorFlow Function calls.
Updates function calls from old API version to new API version.
"""
def __init__(self, filename, lines):
self._filename = filename
self._file_edit = FileEditRecorder(filename)
self._lines = lines
self._api_change_spec = APIChangeSpec()
def process(self, lines):
return self._file_edit.process(lines)
def generic_visit(self, node):
ast.NodeVisitor.generic_visit(self, node)
def _rename_functions(self, node, full_name):
function_renames = self._api_change_spec.function_renames
try:
new_name = function_renames[full_name]
self._file_edit.add("Renamed function %r to %r" % (full_name,
new_name),
node.lineno, node.col_offset, full_name, new_name)
except KeyError:
pass
def _get_attribute_full_path(self, node):
"""Traverse an attribute to generate a full name e.g. tf.foo.bar.
Args:
node: A Node of type Attribute.
Returns:
a '.'-delimited full-name or None if the tree was not a simple form.
i.e. `foo()+b).bar` returns None, while `a.b.c` would return "a.b.c".
"""
curr = node
items = []
while not isinstance(curr, ast.Name):
if not isinstance(curr, ast.Attribute):
return None
items.append(curr.attr)
curr = curr.value
items.append(curr.id)
return ".".join(reversed(items))
def _find_true_position(self, node):
"""Return correct line number and column offset for a given node.
This is necessary mainly because ListComp's location reporting reports
the next token after the list comprehension list opening.
Args:
node: Node for which we wish to know the lineno and col_offset
"""
import re
find_open = re.compile("^\s*(\\[).*$")
find_string_chars = re.compile("['\"]")
if isinstance(node, ast.ListComp):
# Strangely, ast.ListComp returns the col_offset of the first token
# after the '[' token which appears to be a bug. Workaround by
# explicitly finding the real start of the list comprehension.
line = node.lineno
col = node.col_offset
# loop over lines
while 1:
# Reverse the text to and regular expression search for whitespace
text = self._lines[line-1]
reversed_preceding_text = text[:col][::-1]
# First find if a [ can be found with only whitespace between it and
# col.
m = find_open.match(reversed_preceding_text)
if m:
new_col_offset = col - m.start(1) - 1
return line, new_col_offset
else:
if (reversed_preceding_text=="" or
reversed_preceding_text.isspace()):
line = line - 1
prev_line = self._lines[line - 1]
# TODO(aselle):
# this is poor comment detection, but it is good enough for
# cases where the comment does not contain string literal starting/
# ending characters. If ast gave us start and end locations of the
# ast nodes rather than just start, we could use string literal
# node ranges to filter out spurious #'s that appear in string
# literals.
comment_start = prev_line.find("#")
if comment_start == -1:
col = len(prev_line) -1
elif find_string_chars.search(prev_line[comment_start:]) is None:
col = comment_start
else:
return None, None
# Most other nodes return proper locations (with notably does not), but
# it is not possible to use that in an argument.
return node.lineno, node.col_offset
def visit_Call(self, node): # pylint: disable=invalid-name
"""Handle visiting a call node in the AST.
Args:
node: Current Node
"""
# Find a simple attribute name path e.g. "tf.foo.bar"
full_name = self._get_attribute_full_path(node.func)
# Make sure the func is marked as being part of a call
node.func.is_function_for_call = True
if full_name and full_name.startswith("tf."):
# Call special handlers
function_handles = self._api_change_spec.function_handle
if full_name in function_handles:
function_handles[full_name](self._file_edit, node)
# Examine any non-keyword argument and make it into a keyword argument
# if reordering required.
function_reorders = self._api_change_spec.function_reorders
function_keyword_renames = (
self._api_change_spec.function_keyword_renames)
if full_name in function_reorders:
reordered = function_reorders[full_name]
for idx, arg in enumerate(node.args):
lineno, col_offset = self._find_true_position(arg)
if lineno is None or col_offset is None:
self._file_edit.add(
"Failed to add keyword %r to reordered function %r"
% (reordered[idx], full_name), arg.lineno, arg.col_offset,
"", "",
error="A necessary keyword argument failed to be inserted.")
else:
keyword_arg = reordered[idx]
if (full_name in function_keyword_renames and
keyword_arg in function_keyword_renames[full_name]):
keyword_arg = function_keyword_renames[full_name][keyword_arg]
self._file_edit.add("Added keyword %r to reordered function %r"
% (reordered[idx], full_name), lineno,
col_offset, "", keyword_arg + "=")
# Examine each keyword argument and convert it to the final renamed form
renamed_keywords = ({} if full_name not in function_keyword_renames else
function_keyword_renames[full_name])
for keyword in node.keywords:
argkey = keyword.arg
argval = keyword.value
if argkey in renamed_keywords:
argval_lineno, argval_col_offset = self._find_true_position(argval)
if (argval_lineno is not None and argval_col_offset is not None):
# TODO(aselle): We should scan backward to find the start of the
# keyword key. Unfortunately ast does not give you the location of
# keyword keys, so we are forced to infer it from the keyword arg
# value.
key_start = argval_col_offset - len(argkey) - 1
key_end = key_start + len(argkey) + 1
if self._lines[argval_lineno - 1][key_start:key_end] == argkey + "=":
self._file_edit.add("Renamed keyword argument from %r to %r" %
(argkey, renamed_keywords[argkey]),
argval_lineno,
argval_col_offset - len(argkey) - 1,
argkey + "=", renamed_keywords[argkey] + "=")
continue
self._file_edit.add(
"Failed to rename keyword argument from %r to %r" %
(argkey, renamed_keywords[argkey]),
argval.lineno,
argval.col_offset - len(argkey) - 1,
"", "",
error="Failed to find keyword lexographically. Fix manually.")
ast.NodeVisitor.generic_visit(self, node)
def visit_Attribute(self, node): # pylint: disable=invalid-name
"""Handle bare Attributes i.e. [tf.foo, tf.bar].
Args:
node: Node that is of type ast.Attribute
"""
full_name = self._get_attribute_full_path(node)
if full_name and full_name.startswith("tf."):
self._rename_functions(node, full_name)
if full_name in self._api_change_spec.change_to_function:
if not hasattr(node, "is_function_for_call"):
new_text = full_name + "()"
self._file_edit.add("Changed %r to %r"%(full_name, new_text),
node.lineno, node.col_offset, full_name, new_text)
ast.NodeVisitor.generic_visit(self, node)
class TensorFlowCodeUpgrader(object):
"""Class that handles upgrading a set of Python files to TensorFlow 1.0."""
def __init__(self):
pass
def process_file(self, in_filename, out_filename):
"""Process the given python file for incompatible changes.
Args:
in_filename: filename to parse
out_filename: output file to write to
Returns:
A tuple representing number of files processed, log of actions, errors
"""
# Write to a temporary file, just in case we are doing an implace modify.
with open(in_filename, "r") as in_file, \
tempfile.NamedTemporaryFile("w", delete=False) as temp_file:
ret = self.process_opened_file(
in_filename, in_file, out_filename, temp_file)
shutil.move(temp_file.name, out_filename)
return ret
# Broad exceptions are required here because ast throws whatever it wants.
# pylint: disable=broad-except
def process_opened_file(self, in_filename, in_file, out_filename, out_file):
"""Process the given python file for incompatible changes.
This function is split out to facilitate StringIO testing from
tf_upgrade_test.py.
Args:
in_filename: filename to parse
in_file: opened file (or StringIO)
out_filename: output file to write to
out_file: opened file (or StringIO)
Returns:
A tuple representing number of files processed, log of actions, errors
"""
process_errors = []
text = "-" * 80 + "\n"
text += "Processing file %r\n outputting to %r\n" % (in_filename,
out_filename)
text += "-" * 80 + "\n\n"
parsed_ast = None
lines = in_file.readlines()
try:
parsed_ast = ast.parse("".join(lines))
except Exception:
text += "Failed to parse %r\n\n" % in_filename
text += traceback.format_exc()
if parsed_ast:
visitor = TensorFlowCallVisitor(in_filename, lines)
visitor.visit(parsed_ast)
out_text, new_text, process_errors = visitor.process(lines)
text += new_text
if out_file:
out_file.write(out_text)
text += "\n"
return 1, text, process_errors
# pylint: enable=broad-except
def process_tree(self, root_directory, output_root_directory, copy_other_files):
"""Processes upgrades on an entire tree of python files in place.
Note that only Python files. If you have custom code in other languages,
you will need to manually upgrade those.
Args:
root_directory: Directory to walk and process.
output_root_directory: Directory to use as base
Returns:
A tuple of files processed, the report string ofr all files, and errors
"""
# make sure output directory doesn't exist
if output_root_directory and os.path.exists(output_root_directory):
print("Output directory %r must not already exist." % (
output_root_directory))
sys.exit(1)
# make sure output directory does not overlap with root_directory
norm_root = os.path.split(os.path.normpath(root_directory))
norm_output = os.path.split(os.path.normpath(output_root_directory))
if norm_root == norm_output:
print("Output directory %r same as input directory %r" % (
root_directory, output_root_directory))
sys.exit(1)
# Collect list of files to process (we do this to correctly handle if the
# user puts the output directory in some sub directory of the input dir)
files_to_process = []
files_to_copy = []
for dir_name, _, file_list in os.walk(root_directory):
py_files = [f for f in file_list if f.endswith(".py")]
copy_files = [f for f in file_list if not f.endswith(".py")]
for filename in py_files:
fullpath = os.path.join(dir_name, filename)
fullpath_output = os.path.join(
output_root_directory, os.path.relpath(fullpath, root_directory))
files_to_process.append((fullpath, fullpath_output))
if copy_other_files:
for filename in copy_files:
fullpath = os.path.join(dir_name, filename)
fullpath_output = os.path.join(
output_root_directory, os.path.relpath(fullpath, root_directory))
files_to_copy.append((fullpath, fullpath_output))
file_count = 0
tree_errors = []
report = ""
report += ("=" * 80) + "\n"
report += "Input tree: %r\n" % root_directory
report += ("=" * 80) + "\n"
for input_path, output_path in files_to_process:
output_directory = os.path.dirname(output_path)
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
file_count += 1
_, l_report, l_errors = self.process_file(input_path, output_path)
tree_errors += l_errors
report += l_report
for input_path, output_path in files_to_copy:
output_directory = os.path.dirname(output_path)
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
shutil.copy(input_path, output_path)
return file_count, report, tree_errors
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Convert a TensorFlow Python file to 1.0
Simple usage:
tf_convert.py --infile foo.py --outfile bar.py
tf_convert.py --intree ~/code/old --outtree ~/code/new
""")
parser.add_argument(
"--infile",
dest="input_file",
help="If converting a single file, the name of the file "
"to convert")
parser.add_argument(
"--outfile",
dest="output_file",
help="If converting a single file, the output filename.")
parser.add_argument(
"--intree",
dest="input_tree",
help="If converting a whole tree of files, the directory "
"to read from (relative or absolute).")
parser.add_argument(
"--outtree",
dest="output_tree",
help="If converting a whole tree of files, the output "
"directory (relative or absolute).")
parser.add_argument(
"--copyotherfiles",
dest="copy_other_files",
help=("If converting a whole tree of files, whether to "
"copy the other files."),
type=bool,
default=False)
parser.add_argument(
"--reportfile",
dest="report_filename",
help=("The name of the file where the report log is "
"stored."
"(default: %(default)s)"),
default="report.txt")
args = parser.parse_args()
upgrade = TensorFlowCodeUpgrader()
report_text = None
report_filename = args.report_filename
files_processed = 0
if args.input_file:
files_processed, report_text, errors = upgrade.process_file(
args.input_file, args.output_file)
files_processed = 1
elif args.input_tree:
files_processed, report_text, errors = upgrade.process_tree(
args.input_tree, args.output_tree, args.copy_other_files)
else:
parser.print_help()
if report_text:
open(report_filename, "w").write(report_text)
print("TensorFlow 1.0 Upgrade Script")
print("-----------------------------")
print("Converted %d files\n" % files_processed)
print("Detected %d errors that require attention" % len(errors))
print("-" * 80)
print("\n".join(errors))
print("\nMake sure to read the detailed log %r\n" % report_filename)
| 0.005906 |
# Copyright (C) 2010 Aldo Cortesi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import Queue, threading
should_exit = False
class DummyReply:
"""
A reply object that does nothing. Useful when we need an object to seem
like it has a channel, and during testing.
"""
def __init__(self):
self.acked = False
def __call__(self, msg=False):
self.acked = True
class Reply:
"""
Messages sent through a channel are decorated with a "reply" attribute.
This object is used to respond to the message through the return
channel.
"""
def __init__(self, obj):
self.obj = obj
self.q = Queue.Queue()
self.acked = False
def __call__(self, msg=None):
if not self.acked:
self.acked = True
if msg is None:
self.q.put(self.obj)
else:
self.q.put(msg)
class Channel:
def __init__(self, q):
self.q = q
def ask(self, m):
"""
Decorate a message with a reply attribute, and send it to the
master. then wait for a response.
"""
m.reply = Reply(m)
self.q.put(m)
while not should_exit:
try:
# The timeout is here so we can handle a should_exit event.
g = m.reply.q.get(timeout=0.5)
except Queue.Empty: # pragma: nocover
continue
return g
def tell(self, m):
"""
Decorate a message with a dummy reply attribute, send it to the
master, then return immediately.
"""
m.reply = DummyReply()
self.q.put(m)
class Slave(threading.Thread):
"""
Slaves get a channel end-point through which they can send messages to
the master.
"""
def __init__(self, channel, server):
self.channel, self.server = channel, server
self.server.set_channel(channel)
threading.Thread.__init__(self)
def run(self):
self.server.serve_forever()
class Master:
"""
Masters get and respond to messages from slaves.
"""
def __init__(self, server):
"""
server may be None if no server is needed.
"""
self.server = server
self.masterq = Queue.Queue()
def tick(self, q):
changed = False
try:
# This endless loop runs until the 'Queue.Empty'
# exception is thrown. If more than one request is in
# the queue, this speeds up every request by 0.1 seconds,
# because get_input(..) function is not blocking.
while True:
# Small timeout to prevent pegging the CPU
msg = q.get(timeout=0.01)
self.handle(msg)
changed = True
except Queue.Empty:
pass
return changed
def run(self):
global should_exit
should_exit = False
self.server.start_slave(Slave, Channel(self.masterq))
while not should_exit:
self.tick(self.masterq)
self.shutdown()
def handle(self, msg):
c = "handle_" + msg.__class__.__name__.lower()
m = getattr(self, c, None)
if m:
m(msg)
else:
msg.reply()
def shutdown(self):
global should_exit
if not should_exit:
should_exit = True
if self.server:
self.server.shutdown()
| 0.000488 |
import gc
import pandas as pd
import numpy as np
import os
import arboretum
import lightgbm as lgb
import json
import sklearn.metrics
from sklearn.metrics import f1_score, roc_auc_score
from sklearn.model_selection import train_test_split
from scipy.sparse import dok_matrix, coo_matrix
from sklearn.utils.multiclass import type_of_target
def fscore(true_value_matrix, prediction, order_index, product_index, rows, cols, threshold=[0.5]):
prediction_value_matrix = coo_matrix((prediction, (order_index, product_index)), shape=(rows, cols), dtype=np.float32)
# prediction_value_matrix.eliminate_zeros()
return list(map(lambda x: f1_score(true_value_matrix, prediction_value_matrix > x, average='samples'), threshold))
if __name__ == '__main__':
path = "data"
aisles = pd.read_csv(os.path.join(path, "aisles.csv"), dtype={'aisle_id': np.uint8, 'aisle': 'category'})
departments = pd.read_csv(os.path.join(path, "departments.csv"),
dtype={'department_id': np.uint8, 'department': 'category'})
order_prior = pd.read_csv(os.path.join(path, "order_products__prior.csv"), dtype={'order_id': np.uint32,
'product_id': np.uint16,
'add_to_cart_order': np.uint8,
'reordered': bool})
order_train = pd.read_csv(os.path.join(path, "order_products__train.csv"), dtype={'order_id': np.uint32,
'product_id': np.uint16,
'add_to_cart_order': np.uint8,
'reordered': bool})
orders = pd.read_csv(os.path.join(path, "orders.csv"), dtype={'order_id': np.uint32,
'user_id': np.uint32,
'eval_set': 'category',
'order_number': np.uint8,
'order_dow': np.uint8,
'order_hour_of_day': np.uint8
})
products = pd.read_csv(os.path.join(path, "products.csv"), dtype={'product_id': np.uint16,
'aisle_id': np.uint8,
'department_id': np.uint8})
product_embeddings = pd.read_pickle('data/product_embeddings.pkl')
embedings = list(range(32))
product_embeddings = product_embeddings[embedings + ['product_id']]
order_train = pd.read_pickle(os.path.join(path, 'chunk_0.pkl'))
order_test = order_train.loc[order_train.eval_set == "test", ['order_id', 'product_id']]
order_train = order_train.loc[order_train.eval_set == "train", ['order_id', 'product_id', 'reordered']]
product_periods = pd.read_pickle(os.path.join(path, 'product_periods_stat.pkl')).fillna(9999)
print(order_train.columns)
###########################
prob = pd.merge(order_prior, orders, on='order_id')
print(prob.columns)
prob = prob.groupby(['product_id', 'user_id'])\
.agg({'reordered':'sum', 'user_id': 'size'})
print(prob.columns)
prob.rename(columns={'sum': 'reordered',
'user_id': 'total'}, inplace=True)
prob.reordered = (prob.reordered > 0).astype(np.float32)
prob.total = (prob.total > 0).astype(np.float32)
prob['reorder_prob'] = prob.reordered / prob.total
prob = prob.groupby('product_id').agg({'reorder_prob': 'mean'}).rename(columns={'mean': 'reorder_prob'})\
.reset_index()
prod_stat = order_prior.groupby('product_id').agg({'reordered': ['sum', 'size'],
'add_to_cart_order':'mean'})
prod_stat.columns = prod_stat.columns.levels[1]
prod_stat.rename(columns={'sum':'prod_reorders',
'size':'prod_orders',
'mean': 'prod_add_to_card_mean'}, inplace=True)
prod_stat.reset_index(inplace=True)
prod_stat['reorder_ration'] = prod_stat['prod_reorders'] / prod_stat['prod_orders']
prod_stat = pd.merge(prod_stat, prob, on='product_id')
# prod_stat.drop(['prod_reorders'], axis=1, inplace=True)
user_stat = orders.loc[orders.eval_set == 'prior', :].groupby('user_id').agg({'order_number': 'max',
'days_since_prior_order': ['sum',
'mean',
'median']})
user_stat.columns = user_stat.columns.droplevel(0)
user_stat.rename(columns={'max': 'user_orders',
'sum': 'user_order_starts_at',
'mean': 'user_mean_days_since_prior',
'median': 'user_median_days_since_prior'}, inplace=True)
user_stat.reset_index(inplace=True)
orders_products = pd.merge(orders, order_prior, on="order_id")
user_order_stat = orders_products.groupby('user_id').agg({'user_id': 'size',
'reordered': 'sum',
"product_id": lambda x: x.nunique()})
user_order_stat.rename(columns={'user_id': 'user_total_products',
'product_id': 'user_distinct_products',
'reordered': 'user_reorder_ratio'}, inplace=True)
user_order_stat.reset_index(inplace=True)
user_order_stat.user_reorder_ratio = user_order_stat.user_reorder_ratio / user_order_stat.user_total_products
user_stat = pd.merge(user_stat, user_order_stat, on='user_id')
user_stat['user_average_basket'] = user_stat.user_total_products / user_stat.user_orders
########################### products
prod_usr = orders_products.groupby(['product_id']).agg({'user_id': lambda x: x.nunique()})
prod_usr.rename(columns={'user_id':'prod_users_unq'}, inplace=True)
prod_usr.reset_index(inplace=True)
prod_usr_reordered = orders_products.loc[orders_products.reordered, :].groupby(['product_id']).agg({'user_id': lambda x: x.nunique()})
prod_usr_reordered.rename(columns={'user_id': 'prod_users_unq_reordered'}, inplace=True)
prod_usr_reordered.reset_index(inplace=True)
order_stat = orders_products.groupby('order_id').agg({'order_id': 'size'}) \
.rename(columns={'order_id': 'order_size'}).reset_index()
orders_products = pd.merge(orders_products, order_stat, on='order_id')
orders_products['add_to_cart_order_inverted'] = orders_products.order_size - orders_products.add_to_cart_order
orders_products['add_to_cart_order_relative'] = orders_products.add_to_cart_order / orders_products.order_size
data = orders_products.groupby(['user_id', 'product_id']).agg({'user_id': 'size',
'order_number': ['min', 'max'],
'add_to_cart_order': ['mean', 'median'],
'days_since_prior_order': ['mean', 'median'],
'order_dow': ['mean', 'median'],
'order_hour_of_day': ['mean', 'median'],
'add_to_cart_order_inverted': ['mean', 'median'],
'add_to_cart_order_relative': ['mean', 'median'],
'reordered': ['sum']})
data.columns = data.columns.droplevel(0)
data.columns = ['up_orders', 'up_first_order', 'up_last_order', 'up_mean_cart_position', 'up_median_cart_position',
'days_since_prior_order_mean', 'days_since_prior_order_median', 'order_dow_mean',
'order_dow_median',
'order_hour_of_day_mean', 'order_hour_of_day_median',
'add_to_cart_order_inverted_mean', 'add_to_cart_order_inverted_median',
'add_to_cart_order_relative_mean', 'add_to_cart_order_relative_median',
'reordered_sum'
]
data['user_product_reordered_ratio'] = (data.reordered_sum + 1.0) / data.up_orders
# data['first_order'] = data['up_orders'] > 0
# data['second_order'] = data['up_orders'] > 1
#
# data.groupby('product_id')['']
data.reset_index(inplace=True)
data = pd.merge(data, prod_stat, on='product_id')
data = pd.merge(data, user_stat, on='user_id')
data['up_order_rate'] = data.up_orders / data.user_orders
data['up_orders_since_last_order'] = data.user_orders - data.up_last_order
data['up_order_rate_since_first_order'] = data.user_orders / (data.user_orders - data.up_first_order + 1)
############################
user_dep_stat = pd.read_pickle('data/user_department_products.pkl')
user_aisle_stat = pd.read_pickle('data/user_aisle_products.pkl')
############### train
print(order_train.shape)
order_train = pd.merge(order_train, products, on='product_id')
print(order_train.shape)
order_train = pd.merge(order_train, orders, on='order_id')
print(order_train.shape)
order_train = pd.merge(order_train, user_dep_stat, on=['user_id', 'department_id'])
print(order_train.shape)
order_train = pd.merge(order_train, user_aisle_stat, on=['user_id', 'aisle_id'])
print(order_train.shape)
order_train = pd.merge(order_train, prod_usr, on='product_id')
print(order_train.shape)
order_train = pd.merge(order_train, prod_usr_reordered, on='product_id', how='left')
order_train.prod_users_unq_reordered.fillna(0, inplace=True)
print(order_train.shape)
order_train = pd.merge(order_train, data, on=['product_id', 'user_id'])
print(order_train.shape)
order_train['aisle_reordered_ratio'] = order_train.aisle_reordered / order_train.user_orders
order_train['dep_reordered_ratio'] = order_train.dep_reordered / order_train.user_orders
order_train = pd.merge(order_train, product_periods, on=['user_id', 'product_id'])
##############
order_test = pd.merge(order_test, products, on='product_id')
order_test = pd.merge(order_test, orders, on='order_id')
order_test = pd.merge(order_test, user_dep_stat, on=['user_id', 'department_id'])
order_test = pd.merge(order_test, user_aisle_stat, on=['user_id', 'aisle_id'])
order_test = pd.merge(order_test, prod_usr, on='product_id')
order_test = pd.merge(order_test, prod_usr_reordered, on='product_id', how='left')
order_train.prod_users_unq_reordered.fillna(0, inplace=True)
order_test = pd.merge(order_test, data, on=['product_id', 'user_id'])
order_test['aisle_reordered_ratio'] = order_test.aisle_reordered / order_test.user_orders
order_test['dep_reordered_ratio'] = order_test.dep_reordered / order_test.user_orders
order_test = pd.merge(order_test, product_periods, on=['user_id', 'product_id'])
order_train = pd.merge(order_train, product_embeddings, on=['product_id'])
order_test = pd.merge(order_test, product_embeddings, on=['product_id'])
print('data is joined')
features = [
# 'reordered_dow_ration', 'reordered_dow', 'reordered_dow_size',
# 'reordered_prev', 'add_to_cart_order_prev', 'order_dow_prev', 'order_hour_of_day_prev',
'user_product_reordered_ratio', 'reordered_sum',
'add_to_cart_order_inverted_mean', 'add_to_cart_order_relative_mean',
'reorder_prob',
'last', 'prev1', 'prev2', 'median', 'mean',
'dep_reordered_ratio', 'aisle_reordered_ratio',
'aisle_products',
'aisle_reordered',
'dep_products',
'dep_reordered',
'prod_users_unq', 'prod_users_unq_reordered',
'order_number', 'prod_add_to_card_mean',
'days_since_prior_order',
'order_dow', 'order_hour_of_day',
'reorder_ration',
'user_orders', 'user_order_starts_at', 'user_mean_days_since_prior',
# 'user_median_days_since_prior',
'user_average_basket', 'user_distinct_products', 'user_reorder_ratio', 'user_total_products',
'prod_orders', 'prod_reorders',
'up_order_rate', 'up_orders_since_last_order', 'up_order_rate_since_first_order',
'up_orders', 'up_first_order', 'up_last_order', 'up_mean_cart_position',
# 'up_median_cart_position',
'days_since_prior_order_mean',
# 'days_since_prior_order_median',
'order_dow_mean',
# 'order_dow_median',
'order_hour_of_day_mean',
# 'order_hour_of_day_median'
]
features.extend(embedings)
categories = ['product_id', 'aisle_id', 'department_id']
features.extend(embedings)
cat_features = ','.join(map(lambda x: str(x + len(features)), range(len(categories))))
features.extend(categories)
print('not included', set(order_train.columns.tolist()) - set(features))
data = order_train[features]
labels = order_train[['reordered']].values.astype(np.float32).flatten()
data_val = order_test[features]
assert data.shape[0] == 8474661
lgb_train = lgb.Dataset(data, labels, categorical_feature=cat_features)
# specify your configurations as a dict
params = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': {'binary_logloss', 'auc'},
'num_leaves': 256,
'min_sum_hessian_in_leaf': 20,
'max_depth': 12,
'learning_rate': 0.05,
'feature_fraction': 0.6,
# 'bagging_fraction': 0.9,
# 'bagging_freq': 3,
'verbose': 1
}
print('Start training...')
# train
gbm = lgb.train(params,
lgb_train,
num_boost_round=380)
prediction = gbm.predict(data_val)
# prediction = model.predict(data_val)
orders = order_test.order_id.values
products = order_test.product_id.values
result = pd.DataFrame({'product_id': products, 'order_id': orders, 'prediction': prediction})
result.to_pickle('data/prediction_lgbm.pkl')
| 0.005962 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.