gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for low-level eager execution primitives."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python import keras
class Tests(test.TestCase):
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_MatMulCorrectResponse(self):
a_2_by_2 = random_ops.random_uniform((2, 2))
b_2_by_2 = random_ops.random_uniform((2, 2))
a_100_by_784 = random_ops.random_uniform((100, 784))
b_100_by_784 = random_ops.random_uniform((100, 784))
ctx = context.context()
ctx.ensure_initialized()
self.assertAllClose(
math_ops.matmul(a_2_by_2, b_2_by_2),
pywrap_tensorflow.TFE_Py_FastPathExecute(
ctx._handle, ctx.device_name, "MatMul", None, None, a_2_by_2,
b_2_by_2, "transpose_a", False, "transpose_b", False))
self.assertAllClose(
math_ops.matmul(a_100_by_784, b_100_by_784, transpose_b=True),
pywrap_tensorflow.TFE_Py_FastPathExecute(
ctx._handle, ctx.device_name, "MatMul", None, None, a_100_by_784,
b_100_by_784, "transpose_a", False, "transpose_b", True))
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_ResourceVariableMatMulCorrectResponse(self):
ctx = context.context()
ctx.ensure_initialized()
a_2_by_2 = constant_op.constant(1.0, shape=[2, 2])
m = resource_variable_ops.ResourceVariable(a_2_by_2)
x = pywrap_tensorflow.TFE_Py_FastPathExecute(
ctx._handle, ctx.device_name, "MatMul", None, None, m, m, "transpose_a",
False, "transpose_b", False)
y = pywrap_tensorflow.TFE_Py_FastPathExecute(
ctx._handle, ctx.device_name, "MatMul", None, None, a_2_by_2, a_2_by_2,
"transpose_a", False, "transpose_b", False)
self.assertAllEqual(x, y)
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_TapeWrite(self):
ctx = context.context()
ctx.ensure_initialized()
with backprop.GradientTape(persistent=True) as tape:
a_2_by_2 = constant_op.constant(1.0, shape=[2, 2])
tape.watch(a_2_by_2)
z = pywrap_tensorflow.TFE_Py_FastPathExecute(
ctx._handle, ctx.device_name, "MatMul", None, None, a_2_by_2,
a_2_by_2, "transpose_a", False, "transpose_b", False)
dz_dy = tape.gradient(z, [a_2_by_2])[0]
self.assertAllEqual(dz_dy.numpy(),
constant_op.constant(4.0, shape=[2, 2]).numpy())
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_ResourceVariableTapeWrite(self):
ctx = context.context()
ctx.ensure_initialized()
with backprop.GradientTape(persistent=True) as tape:
a_2_by_2 = constant_op.constant(1.0, shape=[2, 2])
m = resource_variable_ops.ResourceVariable(a_2_by_2)
tape.watch(m)
z = pywrap_tensorflow.TFE_Py_FastPathExecute(
ctx._handle, ctx.device_name, "MatMul", None, None, m, m,
"transpose_a", False, "transpose_b", False)
dz_dy = tape.gradient(z, [m])[0]
self.assertAllEqual(dz_dy.numpy(),
constant_op.constant(4.0, shape=[2, 2]).numpy())
# Tests homogeneous list op
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_AddNCorrectResponse(self):
ctx = context.context()
ctx.ensure_initialized()
a_2_by_2 = random_ops.random_uniform((2, 2))
b_2_by_2 = random_ops.random_uniform((2, 2))
self.assertAllClose(
math_ops.add_n([a_2_by_2, b_2_by_2]),
pywrap_tensorflow.TFE_Py_FastPathExecute(ctx._handle, ctx.device_name,
"AddN", None, None,
[a_2_by_2, b_2_by_2]))
# Tests homogeneous list op
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_AddNTapeWrite(self):
ctx = context.context()
ctx.ensure_initialized()
a_2_by_2 = random_ops.random_uniform((2, 2))
b_2_by_2 = random_ops.random_uniform((2, 2))
with backprop.GradientTape(persistent=True) as tape:
tape.watch(a_2_by_2)
tape.watch(b_2_by_2)
z1 = pywrap_tensorflow.TFE_Py_FastPathExecute(
ctx._handle, ctx.device_name, "AddN", None, None,
[a_2_by_2, b_2_by_2])
z2 = math_ops.add_n([a_2_by_2, b_2_by_2])
dz1_dy = tape.gradient(z1, [a_2_by_2])[0]
dz2_dy = tape.gradient(z2, [a_2_by_2])[0]
self.assertAllEqual(dz1_dy.numpy(), dz2_dy.numpy())
# Tests heterogeneous list op
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_IdentityNCorrectResponse(self):
ctx = context.context()
ctx.ensure_initialized()
a_2_by_2 = random_ops.random_uniform((2, 2))
b_2_by_2 = random_ops.random_uniform((2, 2))
self.assertAllClose(
array_ops.identity_n([a_2_by_2, b_2_by_2]),
pywrap_tensorflow.TFE_Py_FastPathExecute(ctx._handle, ctx.device_name,
"IdentityN", None, None,
[a_2_by_2, b_2_by_2]))
# Tests heterogeneous list op
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_IdentityNTapeWrite(self):
ctx = context.context()
ctx.ensure_initialized()
a_2_by_2 = random_ops.random_uniform((2, 2))
b_2_by_2 = random_ops.random_uniform((2, 2))
with backprop.GradientTape(persistent=True) as tape:
tape.watch(a_2_by_2)
tape.watch(b_2_by_2)
z1 = pywrap_tensorflow.TFE_Py_FastPathExecute(
ctx._handle, ctx.device_name, "IdentityN", None, None,
[a_2_by_2, b_2_by_2])
z2 = array_ops.identity_n([a_2_by_2, b_2_by_2])
dz1_dy = tape.gradient(z1[0], [a_2_by_2])[0]
dz2_dy = tape.gradient(z2[0], [a_2_by_2])[0]
self.assertAllEqual(dz1_dy.numpy(), dz2_dy.numpy())
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_InvalidInputs(self):
a_2_by_2 = random_ops.random_uniform((2, 2))
ctx = context.context()
ctx.ensure_initialized()
assert ctx.executing_eagerly(
), "The prototype doesn't contain C code for graph construction"
ctx_handle = ctx._handle # pylint: disable=protected-access
# Not enough base params
with self.assertRaisesRegexp(ValueError,
"at least 5 items in the input tuple"):
pywrap_tensorflow.TFE_Py_FastPathExecute(ctx_handle, ctx.device_name,
"Identity")
# Not enough inputs
with self.assertRaisesRegexp(ValueError,
"Expected to be at least 6, was 5"):
pywrap_tensorflow.TFE_Py_FastPathExecute(ctx_handle, ctx_handle,
"Identity", None, [])
# Bad type
with self.assertRaisesRegexp(TypeError, "expected a string for op_name"):
pywrap_tensorflow.TFE_Py_FastPathExecute(ctx_handle, ctx.device_name,
ctx_handle, None, [], a_2_by_2)
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastPathExecute_InvalidAttributes(self):
split_dim = constant_op.constant(0, dtype=dtypes.int32)
value = constant_op.constant([0, 1, 2, 3], dtype=dtypes.float32)
ctx = context.context()
ctx.ensure_initialized()
ctx_handle = ctx._handle
with self.assertRaises(core._FallbackException):
pywrap_tensorflow.TFE_Py_FastPathExecute(ctx_handle, ctx.device_name,
"Split", None, None, split_dim,
value, "num_split", -1)
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testInvalidNumOutputs(self):
with self.assertRaisesRegexp(
Exception,
"Value for attr 'num_split' of -1 must be at least minimum 1"):
array_ops.split(value=[1, 2, 3], num_or_size_splits=-1)
with self.assertRaisesRegexp(
Exception,
"Value for attr 'num_split' of 0 must be at least minimum 1"):
array_ops.split(value=[1, 2, 3], num_or_size_splits=0)
def testIsFunction(self):
ctx = context.context()
self.assertFalse(ctx.has_function("not_a_function"))
@def_function.function
def f():
return 1.
self.assertTrue(ctx.has_function(f.get_concrete_function().name))
def testEagerExecute_InvalidType(self):
# Test case for GitHub issue 26879.
value = keras.layers.Input((128, 128, 1), dtype="float32")
with self.assertRaisesRegexp(TypeError,
"Expected list for 'values' argument"):
_ = array_ops.stack(value, axis=1)
def testGraphResourceVariableRaisesFallback(self):
with ops.Graph().as_default():
a_2_by_2 = constant_op.constant(1.0, shape=[2, 2])
m = resource_variable_ops.ResourceVariable(a_2_by_2)
ctx = context.context()
ctx.ensure_initialized()
with self.assertRaises(core._FallbackException):
pywrap_tensorflow.TFE_Py_FastPathExecute(ctx._handle, ctx.device_name,
"MatMul", None, None, m, m,
"transpose_a", False,
"transpose_b", False)
if __name__ == "__main__":
test.main()
|
|
#!/usr/bin/env python3
"""
Convert $1 -- a markdown file to an HTML file in `/tmp/vim/<parent>/<basename>.html`.
"""
# Standard Library
from os.path import realpath, basename, isfile, isdir, dirname, abspath
import os
import shutil
import subprocess
import sys
import re
import logging
from logging import Logger
from typing import Dict, Iterable, Optional, Set, Text, List
from subprocess import DEVNULL, PIPE, run
from meta import MATHJAX_CONFS, LongOpt, CodeStyle, FromFmt, Ext, ToFmt
from utils import validate_executables, require_exists, vimdir_path, ensure_exists
# initalise logging with sane configuration
logging.basicConfig(level=logging.WARN,
format="%(levelname)s:%(asctime)s %(message)s")
log: Logger = logging.getLogger()
validate_executables(["pandoc"])
# NOTE this will try to include a lua filter from `./task-list.lua`.
class PandocCmd:
def __init__(
self,
input_file: Text,
stylesheet: Text = vimdir_path("css", "styles.css"),
javascript: Text = vimdir_path("js", "script.js"),
from_fmt: FromFmt = FromFmt.MARKDOWN,
to_fmt: ToFmt = ToFmt.HTML5,
exts: List[Ext] = [
Ext.ASCII_IDENTIFIERS,
Ext.AUTOLINK_BARE_URIS,
Ext.COMPACT_DEFINITION_LISTS,
Ext.FENCED_DIVS,
Ext.GFM_AUTO_IDENTIFIERS,
Ext.INTRAWORD_UNDERSCORES,
Ext.MARKDOWN_ATTRIBUTE,
Ext.MMD_HEADER_IDENTIFIERS,
Ext.MMD_LINK_ATTRIBUTES,
Ext.SMART,
Ext.TEX_MATH_DOLLARS,
Ext.TEX_MATH_DOUBLE_BACKSLASH,
Ext.TEX_MATH_SINGLE_BACKSLASH,
],
no_exts: List[Ext] = [Ext.ALL_SYMBOLS_ESCAPABLE, Ext.ESCAPED_LINE_BREAKS],
long_opts: Dict[LongOpt, Optional[Text]] = {
LongOpt.ATX_HEADERS: None,
LongOpt.REFERENCE_LOCATION: "document",
LongOpt.SECTION_DIVS: None,
LongOpt.EMAIL_OBFUSCATION: "javascript",
},
code_style: CodeStyle = CodeStyle.TANGO,
mathjax_version: Text = "2.7.5",
mathjax_conf: Text = "TeX-AMS_HTML",
width: int = 80,
toc_depth: int = 3,
) -> None:
log.debug("initalising a PandocCmd object")
self.exts: List[Ext] = []
self.no_exts: List[Ext] = []
self.long_opts: Dict[LongOpt, Optional[Text]] = dict()
self.set_opts(long_opts).set_exts(exts).set_no_exts(no_exts).set_input_file(
input_file
).set_opt(LongOpt.STANDALONE).set_to_fmt(to_fmt).set_from_fmt(
from_fmt
).set_mathjax(
mathjax_version, mathjax_conf
).set_width(
width
).set_stylesheet(
stylesheet
).set_code_style(
code_style
).set_javascript(
javascript
)
if toc_depth:
self.set_toc_depth(toc_depth).set_opt(LongOpt.TOC)
lua_filter: str = os.path.join(os.path.dirname(
os.path.abspath(__file__)), 'task-list.lua')
if isfile(lua_filter):
self.set_opt(LongOpt.LUA_FILTER, lua_filter)
else:
log.error(f'failed to find lua filter ./{lua_filter}')
def set_from_fmt(self, fmt: FromFmt) -> object:
assert fmt in FromFmt, f"from format '{fmt}' invalid"
self.from_fmt: FromFmt = fmt
return self
def set_opt(self, key: LongOpt, val: Optional[Text] = None) -> object:
self.long_opts[key] = val
return self
def set_opts(self, pairs: Dict[LongOpt, Optional[Text]]) -> object:
for (k, v) in pairs.items():
self.set_opt(k, v)
return self
def set_to_fmt(self, fmt: ToFmt) -> object:
self.to_fmt = fmt
return self
def set_input_file(self, file_path: Text) -> object:
require_exists(file_path)
self.input_file = file_path
return self
def set_width(self, n: int) -> object:
assert n and n >= 0, f"invalid value {str(n)}"
return self.set_opt(LongOpt.COLUMNS, str(n))
def set_stylesheet(self, css_path: Text) -> object:
require_exists(css_path)
return self.set_opt(LongOpt.CSS, css_path)
def set_javascript(self, js_path: Text) -> object:
require_exists(js_path)
self.javascript = js_path
return self
def set_toc_depth(self, n: int) -> object:
assert n and n >= 0, f"invalid value {n}"
return self.set_opt(LongOpt.TOC_DEPTH, str(n))
def set_code_style(self, style: CodeStyle) -> object:
return self.set_opt(LongOpt.HIGHLIGHT_STYLE, style._name_.lower())
def set_mathjax(self, version: Text, cfg: Text) -> object:
assert cfg and cfg in MATHJAX_CONFS, f"unreconginsed MathJax config {cfg}"
assert (
version
and len(version) >= 3
and version[0] == "2"
and version[1] == "."
and str.isdigit(version[2])
), f"unrecognised MathJax version {version}"
return self.set_opt(
LongOpt.MATHJAX,
f"https://cdnjs.cloudflare.com/ajax/libs/mathjax/{version}/MathJax.js?config={cfg}",
)
def set_exts(self, exts: Iterable[Ext]) -> object:
for ext in exts:
self.set_ext(ext)
return self
def set_ext(self, ext: Ext) -> object:
self.exts.append(ext)
return self
def set_no_ext(self, ext: Ext) -> object:
self.no_exts.append(ext)
return self
def set_no_exts(self, exts: Iterable[Ext]) -> object:
for ext in exts:
self.set_no_ext(ext)
return self
@property
def args(self) -> List[Text]:
arguments = ["pandoc", "--from"]
from_fmt = self.from_fmt._name_.lower()
if len(self.exts) > 0:
for ext in self.exts:
from_fmt += f"+{ext._name_.lower()}"
if len(self.no_exts) > 0:
for ext in self.no_exts:
from_fmt += f"-{ext._name_.lower()}"
arguments.append(from_fmt)
arguments.extend(["--to", self.to_fmt._name_.lower()])
for opt in self.long_opts.keys():
maybe_val: Optional[Text] = self.long_opts[opt]
opt_name: Text = opt._name_.lower().replace("_", "-")
if maybe_val:
arguments.append(f"--{opt_name}={maybe_val}")
else:
arguments.append(f"--{opt_name}")
log.debug(f"args: {arguments}")
return arguments
def before(self, text: Text) -> Text:
"""Fix badly formatted markdown where heading marker `#` is not
followed by space.
NOTE: This method is pure.
:param text: input text before transformations
:return: output text after transformations
"""
log.debug("preprocessing [before hook]")
return re.sub(r"(#+)([A-Z])", "\1 \2", text, re.MULTILINE)
def after(self, text: Text) -> Text:
"""Transform relative links and references, Inject JavaScript.
NOTE: This method is pure.
Match on either src or href e.g.:
`src="./script.js"` and `href="styles.css"`
skip over whitespace e.g.:
`src=" address/file.png"`
match if relative e.g.:
`./`
or match if not an external link with a protocol e.g.:
`https://stackoverflow.com`
or match if not a valid directory reference e.g.:
`/srv/log/log.txt` and `~/file.txt`
:param text: input text
:return: output after transformations
"""
pat = re.compile(r'(href|src)="\s*(\./|(?![a-z]{2,10}://|~|\#|/))')
d: Text = dirname(self.input_file)
with open(self.javascript) as f:
log.debug("postprocessing [after hook]")
return re.sub(pat, f'\\1="{d}/', text).replace(
"</body>", f"<script>{f.read()}</script></body>"
)
def execute(self) -> Text:
log.debug("executing")
with open(self.input_file, encoding="utf-8") as input:
return self.after(
run(
self.args,
encoding="utf-8",
input=self.before(input.read()),
stderr=DEVNULL,
stdout=PIPE,
).stdout
)
if __name__ == "__main__":
INPUT_FILE = sys.argv[1]
log.debug(f"input file: {INPUT_FILE}")
OUTPUT_FILE: Text = os.path.join(
"/tmp/vim",
basename(dirname(INPUT_FILE)),
re.sub(
r"^(.*)\.(?:r?md|m(?:ark)?down)$",
r"\1.html",
basename(INPUT_FILE),
re.IGNORECASE | re.MULTILINE,
),
)
log.debug(f"output file: {OUTPUT_FILE}")
ensure_exists(OUTPUT_FILE)
with open(OUTPUT_FILE, "w", encoding="utf-8") as output:
cmd = PandocCmd(INPUT_FILE)
output.write(cmd.execute())
print(f"Cmd: {' '.join(cmd.args)}")
print(f'Output: {output.name}')
# vim:foldmethod=manual:
|
|
# coding=utf-8
from __future__ import absolute_import, unicode_literals, division
import pathtools.patterns
import termcolor
import logging
import shutil
import time
import re
import os
class TimedSet(set):
__slots__ = ["updated"]
def __init__(self, updated):
self.updated = updated
super(TimedSet, self).__init__()
class Handler(object):
def __init__(self, patterns, ignore_patterns=None, suffixes=[""]):
"""Creates a new handler. `patterns` and `ignore_patterns` are lists of
glob patterns that determine what files this handler operates on.
`suffixes` is a list of extensions that will be added to a input file
to produce output files (the files will be produced in the output
folder, but will respect the input folder structure)."""
super(Handler, self).__init__()
self.patterns = [os.path.normcase(pattern) for pattern in patterns]
self.ignore_patterns = None
if ignore_patterns:
self.ignore_patterns = [os.path.normcase(pattern) for
pattern in ignore_patterns]
self.suffixes = suffixes
self.failures = {}
def handles(self, src, path):
"""Must return a list of files that this handler will produce after
successfully processing `path`. If the current handler does not operate
on `path`, None should be returned. If instead `path` should not
produce output by itself (but should be compiled nonetheless), then an
empty list should be returned. This function will be called every time
the file identified by `path` changes (only the modification time is
taken into account; `src` is provided for convenience; it allows direct
access to the file's contents.."""
if not pathtools.patterns.match_path(path, self.patterns,
self.ignore_patterns):
return None
return self._outputs(src, path)
def deleted(self, src, path):
"""Called whenever `path` is deleted from the source folder `src`."""
def changed(self, src, path, dest):
"""Called whenever `path` is changed in the source folder `src`. `dest`
is the output folder. The default implementation calls `build` after
determining that the input file is newer than any of the outputs, or
any of the outputs does not exist."""
try:
mtime = os.path.getmtime(os.path.join(src, path))
self._build(src, path, dest, mtime)
except EnvironmentError as e:
logging.error("{0} is inaccessible: {1}".format(
termcolor.colored(path, "yellow", attrs=["bold"]),
e.args[0]
))
def _outputs(self, src, path):
return [path + suffix for suffix in self.suffixes]
def _build(self, src, path, dest, mtime):
"""Calls `build` after testing that at least one output file (as
returned by `_outputs()` does not exist or is older than `mtime`. If
the build fails, the build time is recorded and no other builds will be
attempted on `input` until this method is called with a larger mtime.
"""
input_path = os.path.join(src, path)
output_paths = [os.path.join(dest, output) for output in
self._outputs(src, path)]
if path in self.failures and mtime <= self.failures[path]:
# the input file was not modified since the last recorded failure
# as such, assume that the task will fail again and skip it
return
for output in output_paths:
try:
if \
os.path.exists(output) and \
mtime <= os.path.getmtime(output):
# output file exists and is up to date; no need to trigger
# build on this file's expense
continue
except EnvironmentError:
# usually happens when the output file has been deleted in
# between the call to exists and the call to getmtime
pass
start = time.time()
try:
self.build(input_path, output_paths)
except Exception as e:
if isinstance(e, EnvironmentError):
# non-zero return code in sub-process; only show message
logging.error("{0} failed after {1:.2f}s: {2}".format(
termcolor.colored(path, "red", attrs=["bold"]),
time.time() - start, e.args[0]
))
else:
# probably a bug in the handler; show full trace
logging.exception("{0} failed after {1:.2f}s".format(
termcolor.colored(path, "red", attrs=["bold"]),
time.time() - start
))
self.failures[path] = start
else:
logging.info("{0} completed in {1:.2f}s".format(
termcolor.colored(path, "green", attrs=["bold"]),
time.time() - start
))
self.failures.pop(path, None)
break
def build(self, input_path, output_paths):
"""Should be extended by subclasses to actually do stuff. By default
this will copy `input` over every file in the `outputs` list."""
for output in output_paths:
shutil.copy(input_path, output_paths)
_base_path = re.compile("///.*?<base\s+path=[\"\'](.*)[\"\']\s*/>", re.I)
class TreeHandler(Handler):
def __init__(self, patterns, ignore_patterns=None, suffixes=[""],
line_regex=_base_path):
"""Creates a new tree handler. `line_regex` is a regex that determines
whether a file is self-sufficient or must be included in one (or more)
files."""
super(TreeHandler, self).__init__(patterns, ignore_patterns, suffixes)
self.line_regex = re.compile(line_regex)
self.parents = {} # maps a filename to a list of direct parents
self.children = {} # maps a filename to a list of direct children
def rebuild_references(self, src, path, reject=None):
"""Updates `parents` and `children` to be in sync with the changes to
`src` if any."""
if reject is None:
reject = set()
reject.add(path)
try:
filename = os.path.join(src, path)
mtime = os.path.getmtime(filename)
contents = open(filename)
except EnvironmentError:
raise ValueError("Unable to open '{0}'".format(path))
if \
path in self.parents and \
self.parents[path].updated == mtime:
# cache hit; no need to update
return
# drop existing references
if path in self.parents:
self.deleted(src, path)
# build a list of parents
parents = TimedSet(mtime)
current = os.path.dirname(path)
for line in contents:
match = self.line_regex.search(line)
if match:
parent = match.group(1)
relative = os.path.normpath(os.path.join(current, parent))
if relative.startswith(".."):
raise ValueError("Parent reference '{0}' outside of "
"watched folder in '{1}'".format(parent,
path))
parent = os.path.normcase(relative)
if parent in reject:
raise ValueError("Circular reference to '{0}' "
"detected in '{1}'".format(parent,
path))
parents.add(parent)
for parent in parents:
# recursively build references for all parents; this will
# usually be a cache hit and no-op
self.rebuild_references(src, parent, reject)
self.parents[path] = parents
for parent in parents:
# add this node to each of its parents' children
if parent not in self.children:
self.children[parent] = set()
self.children[parent].add(path)
def handles(self, src, path):
if \
not pathtools.patterns.match_path(path, self.patterns,
self.ignore_patterns) and \
path not in self.children:
# allow both files that match the pattern as well as explicitly
# defined parent files
return None
# rebuild references
try:
start = time.time()
self.rebuild_references(src, path)
except ValueError as e:
# there was an error processing this file
logging.error("{0} failed after {1:.2f}s: {1}".format(
termcolor.colored(path, "red", attrs=["bold"]),
time.time() - start, e.args[0]
))
return None
# only files that don't have any parent produce output via this handler
if self.parents[path]:
return []
else:
return self._outputs(src, path)
def deleted(self, src, path):
"""Update the reference tree when a handled file is deleted."""
if self.parents[path] is not None:
for parent in self.parents[path]:
self.children[parent].remove(path)
if not self.children[parent]:
del self.children[parent]
del self.parents[path]
def changed(self, src, path, dest):
"""If `path` does not have any parents, it is built. Otherwise, it will
attempt to build every parent of `path` (or their parents). Output file
modification times are taken into account to prevent unnecessary
builds."""
modified = {path: self.parents[path].updated}
while True:
for path in modified:
if self.parents[path]:
mtime = modified.pop(path)
for parent in self.parents[path]:
modified[parent] = max(mtime,
self.parents[parent].updated)
break
else:
break
for path in modified:
self._build(src, path, dest, modified[path])
|
|
# Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import io
import os
import pipes
import re
import sys
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import importutils
from manila import exception
from manila.i18n import _
from manila.share.drivers.ganesha import utils as ganesha_utils
from manila import utils
LOG = log.getLogger(__name__)
IWIDTH = 4
def _conf2json(conf):
"""Convert Ganesha config to JSON."""
# tokenize config string
token_list = [io.StringIO()]
state = {
'in_quote': False,
'in_comment': False,
'escape': False,
}
cbk = []
for char in conf:
if state['in_quote']:
if not state['escape']:
if char == '"':
state['in_quote'] = False
cbk.append(lambda: token_list.append(io.StringIO()))
elif char == '\\':
cbk.append(lambda: state.update({'escape': True}))
else:
if char == "#":
state['in_comment'] = True
if state['in_comment']:
if char == "\n":
state['in_comment'] = False
else:
if char == '"':
token_list.append(io.StringIO())
state['in_quote'] = True
state['escape'] = False
if not state['in_comment']:
token_list[-1].write(char)
while cbk:
cbk.pop(0)()
if state['in_quote']:
raise RuntimeError("Unterminated quoted string")
# jsonify tokens
js_token_list = ["{"]
for tok in token_list:
tok = tok.getvalue()
if tok[0] == '"':
js_token_list.append(tok)
continue
for pat, s in [
# add omitted "=" signs to block openings
(r'([^=\s])\s*{', '\\1={'),
# delete trailing semicolons in blocks
(r';\s*}', '}'),
# add omitted semicolons after blocks
(r'}\s*([^}\s])', '};\\1'),
# separate syntactically significant characters
(r'([;{}=])', ' \\1 ')]:
tok = re.sub(pat, s, tok)
# map tokens to JSON equivalents
for word in tok.split():
if word == "=":
word = ":"
elif word == ";":
word = ','
elif (word in ['{', '}'] or
re.search(r'\A-?[1-9]\d*(\.\d+)?\Z', word)):
pass
else:
word = jsonutils.dumps(word)
js_token_list.append(word)
js_token_list.append("}")
# group quoted strings
token_grp_list = []
for tok in js_token_list:
if tok[0] == '"':
if not (token_grp_list and isinstance(token_grp_list[-1], list)):
token_grp_list.append([])
token_grp_list[-1].append(tok)
else:
token_grp_list.append(tok)
# process quoted string groups by joining them
js_token_list2 = []
for x in token_grp_list:
if isinstance(x, list):
x = ''.join(['"'] + [tok[1:-1] for tok in x] + ['"'])
js_token_list2.append(x)
return ''.join(js_token_list2)
def _dump_to_conf(confdict, out=sys.stdout, indent=0):
"""Output confdict in Ganesha config format."""
if isinstance(confdict, dict):
for k, v in confdict.items():
if v is None:
continue
if isinstance(v, dict):
out.write(' ' * (indent * IWIDTH) + k + ' ')
out.write("{\n")
_dump_to_conf(v, out, indent + 1)
out.write(' ' * (indent * IWIDTH) + '}')
elif isinstance(v, list):
for item in v:
out.write(' ' * (indent * IWIDTH) + k + ' ')
out.write("{\n")
_dump_to_conf(item, out, indent + 1)
out.write(' ' * (indent * IWIDTH) + '}\n')
# The 'CLIENTS' Ganesha string option is an exception in that it's
# string value can't be enclosed within quotes as can be done for
# other string options in a valid Ganesha conf file.
elif k.upper() == 'CLIENTS':
out.write(' ' * (indent * IWIDTH) + k + ' = ' + v + ';')
else:
out.write(' ' * (indent * IWIDTH) + k + ' ')
out.write('= ')
_dump_to_conf(v, out, indent)
out.write(';')
out.write('\n')
else:
dj = jsonutils.dumps(confdict)
out.write(dj)
def parseconf(conf):
"""Parse Ganesha config.
Both native format and JSON are supported.
Convert config to a (nested) dictionary.
"""
def list_to_dict(src_list):
# Convert a list of key-value pairs stored as tuples to a dict.
# For tuples with identical keys, preserve all the values in a
# list. e.g., argument [('k', 'v1'), ('k', 'v2')] to function
# returns {'k': ['v1', 'v2']}.
dst_dict = {}
for i in src_list:
if isinstance(i, tuple):
k, v = i
if isinstance(v, list):
v = list_to_dict(v)
if k in dst_dict:
dst_dict[k] = [dst_dict[k]]
dst_dict[k].append(v)
else:
dst_dict[k] = v
return dst_dict
try:
# allow config to be specified in JSON --
# for sake of people who might feel Ganesha config foreign.
d = jsonutils.loads(conf)
except ValueError:
# Customize JSON decoder to convert Ganesha config to a list
# of key-value pairs stored as tuples. This allows multiple
# occurrences of a config block to be later converted to a
# dict key-value pair, with block name being the key and a
# list of block contents being the value.
li = jsonutils.loads(_conf2json(conf), object_pairs_hook=lambda x: x)
d = list_to_dict(li)
return d
def mkconf(confdict):
"""Create Ganesha config string from confdict."""
s = io.StringIO()
_dump_to_conf(confdict, s)
return s.getvalue()
rados = None
def setup_rados():
global rados
if not rados:
try:
rados = importutils.import_module('rados')
except ImportError:
raise exception.ShareBackendException(
_("python-rados is not installed"))
class GaneshaManager(object):
"""Ganesha instrumentation class."""
def __init__(self, execute, tag, **kwargs):
self.confrx = re.compile(r'\.conf\Z')
self.ganesha_config_path = kwargs['ganesha_config_path']
self.tag = tag
def _execute(*args, **kwargs):
msg = kwargs.pop('message', args[0])
makelog = kwargs.pop('makelog', True)
try:
return execute(*args, **kwargs)
except exception.ProcessExecutionError as e:
if makelog:
LOG.error(
("Error while executing management command on "
"Ganesha node %(tag)s: %(msg)s."),
{'tag': tag, 'msg': msg})
raise exception.GaneshaCommandFailure(
stdout=e.stdout, stderr=e.stderr, exit_code=e.exit_code,
cmd=e.cmd)
self.execute = _execute
self.ganesha_service = kwargs['ganesha_service_name']
self.ganesha_export_dir = kwargs['ganesha_export_dir']
self.execute('mkdir', '-p', self.ganesha_export_dir)
self.ganesha_rados_store_enable = kwargs.get(
'ganesha_rados_store_enable')
if self.ganesha_rados_store_enable:
setup_rados()
self.ganesha_rados_store_pool_name = (
kwargs['ganesha_rados_store_pool_name'])
self.ganesha_rados_export_counter = (
kwargs['ganesha_rados_export_counter'])
self.ganesha_rados_export_index = (
kwargs['ganesha_rados_export_index'])
self.rados_client = kwargs['rados_client']
try:
self._get_rados_object(self.ganesha_rados_export_counter)
except rados.ObjectNotFound:
self._put_rados_object(self.ganesha_rados_export_counter,
str(1000))
else:
self.ganesha_db_path = kwargs['ganesha_db_path']
self.execute('mkdir', '-p', os.path.dirname(self.ganesha_db_path))
# Here we are to make sure that an SQLite database of the
# required scheme exists at self.ganesha_db_path.
# The following command gets us there -- provided the file
# does not yet exist (otherwise it just fails). However,
# we don't care about this condition, we just execute the
# command unconditionally (ignoring failure). Instead we
# directly query the db right after, to check its validity.
self.execute(
"sqlite3", self.ganesha_db_path,
'create table ganesha(key varchar(20) primary key, '
'value int); insert into ganesha values("exportid", '
'100);', run_as_root=False, check_exit_code=False)
self.get_export_id(bump=False)
def _getpath(self, name):
"""Get the path of config file for name."""
return os.path.join(self.ganesha_export_dir, name + ".conf")
@staticmethod
def _get_export_rados_object_name(name):
return 'ganesha-export-' + name
def _write_tmp_conf_file(self, path, data):
"""Write data to tmp conf file."""
dirpath, fname = (getattr(os.path, q + "name")(path) for q in
("dir", "base"))
tmpf = self.execute('mktemp', '-p', dirpath, "-t",
fname + ".XXXXXX")[0][:-1]
self.execute(
'sh', '-c',
'echo %s > %s' % (pipes.quote(data), pipes.quote(tmpf)),
message='writing ' + tmpf)
return tmpf
def _write_conf_file(self, name, data):
"""Write data to config file for name atomically."""
path = self._getpath(name)
tmpf = self._write_tmp_conf_file(path, data)
try:
self.execute('mv', tmpf, path)
except exception.ProcessExecutionError as e:
LOG.error('mv temp file ({0}) to {1} failed.'.format(tmpf, path))
self.execute('rm', tmpf)
raise exception.GaneshaCommandFailure(
stdout=e.stdout, stderr=e.stderr, exit_code=e.exit_code,
cmd=e.cmd)
return path
def _mkindex(self):
"""Generate the index file for current exports."""
@utils.synchronized("ganesha-index-" + self.tag, external=True)
def _mkindex():
files = filter(lambda f: self.confrx.search(f) and
f != "INDEX.conf",
self.execute('ls', self.ganesha_export_dir,
run_as_root=False)[0].split("\n"))
index = "".join(map(lambda f: "%include " + os.path.join(
self.ganesha_export_dir, f) + "\n", files))
self._write_conf_file("INDEX", index)
_mkindex()
def _read_export_rados_object(self, name):
return parseconf(self._get_rados_object(
self._get_export_rados_object_name(name)))
def _read_export_file(self, name):
return parseconf(self.execute("cat", self._getpath(name),
message='reading export ' + name)[0])
def _read_export(self, name):
"""Return the dict of the export identified by name."""
if self.ganesha_rados_store_enable:
return self._read_export_rados_object(name)
else:
return self._read_export_file(name)
def _check_export_rados_object_exists(self, name):
try:
self._get_rados_object(
self._get_export_rados_object_name(name))
return True
except rados.ObjectNotFound:
return False
def _check_file_exists(self, path):
try:
self.execute('test', '-f', path, makelog=False,
run_as_root=False)
return True
except exception.GaneshaCommandFailure as e:
if e.exit_code == 1:
return False
else:
raise exception.GaneshaCommandFailure(
stdout=e.stdout, stderr=e.stderr, exit_code=e.exit_code,
cmd=e.cmd)
def _check_export_file_exists(self, name):
return self._check_file_exists(self._getpath(name))
def check_export_exists(self, name):
"""Check whether export exists."""
if self.ganesha_rados_store_enable:
return self._check_export_rados_object_exists(name)
else:
return self._check_export_file_exists(name)
def _write_export_rados_object(self, name, data):
"""Write confdict to the export RADOS object of name."""
self._put_rados_object(self._get_export_rados_object_name(name),
data)
# temp export config file required for DBus calls
return self._write_tmp_conf_file(self._getpath(name), data)
def _write_export(self, name, confdict):
"""Write confdict to the export file or RADOS object of name."""
for k, v in ganesha_utils.walk(confdict):
# values in the export block template that need to be
# filled in by Manila are pre-fixed by '@'
if isinstance(v, str) and v[0] == '@':
msg = _("Incomplete export block: value %(val)s of attribute "
"%(key)s is a stub.") % {'key': k, 'val': v}
raise exception.InvalidParameterValue(err=msg)
if self.ganesha_rados_store_enable:
return self._write_export_rados_object(name, mkconf(confdict))
else:
return self._write_conf_file(name, mkconf(confdict))
def _rm_file(self, path):
self.execute("rm", "-f", path)
def _rm_export_file(self, name):
"""Remove export file of name."""
self._rm_file(self._getpath(name))
def _rm_export_rados_object(self, name):
"""Remove export object of name."""
self._delete_rados_object(self._get_export_rados_object_name(name))
def _dbus_send_ganesha(self, method, *args, **kwargs):
"""Send a message to Ganesha via dbus."""
service = kwargs.pop("service", "exportmgr")
self.execute("dbus-send", "--print-reply", "--system",
"--dest=org.ganesha.nfsd", "/org/ganesha/nfsd/ExportMgr",
"org.ganesha.nfsd.%s.%s" % (service, method), *args,
message='dbus call %s.%s' % (service, method), **kwargs)
def _remove_export_dbus(self, xid):
"""Remove an export from Ganesha runtime with given export id."""
self._dbus_send_ganesha("RemoveExport", "uint16:%d" % xid)
def _add_rados_object_url_to_index(self, name):
"""Add an export RADOS object's URL to the RADOS URL index."""
# TODO(rraja): Ensure that the export index object's update is atomic,
# e.g., retry object update until the object version between the 'get'
# and 'put' operations remains the same.
index_data = self._get_rados_object(self.ganesha_rados_export_index)
want_url = "%url rados://{0}/{1}".format(
self.ganesha_rados_store_pool_name,
self._get_export_rados_object_name(name))
if index_data:
self._put_rados_object(
self.ganesha_rados_export_index,
'\n'.join([index_data, want_url])
)
else:
self._put_rados_object(self.ganesha_rados_export_index, want_url)
def _remove_rados_object_url_from_index(self, name):
"""Remove an export RADOS object's URL from the RADOS URL index."""
# TODO(rraja): Ensure that the export index object's update is atomic,
# e.g., retry object update until the object version between the 'get'
# and 'put' operations remains the same.
index_data = self._get_rados_object(self.ganesha_rados_export_index)
if not index_data:
return
unwanted_url = "%url rados://{0}/{1}".format(
self.ganesha_rados_store_pool_name,
self._get_export_rados_object_name(name))
rados_urls = index_data.split('\n')
new_rados_urls = [url for url in rados_urls if url != unwanted_url]
self._put_rados_object(self.ganesha_rados_export_index,
'\n'.join(new_rados_urls))
def add_export(self, name, confdict):
"""Add an export to Ganesha specified by confdict."""
xid = confdict["EXPORT"]["Export_Id"]
undos = []
_mkindex_called = False
try:
path = self._write_export(name, confdict)
if self.ganesha_rados_store_enable:
undos.append(lambda: self._rm_export_rados_object(name))
undos.append(lambda: self._rm_file(path))
else:
undos.append(lambda: self._rm_export_file(name))
self._dbus_send_ganesha("AddExport", "string:" + path,
"string:EXPORT(Export_Id=%d)" % xid)
undos.append(lambda: self._remove_export_dbus(xid))
if self.ganesha_rados_store_enable:
# Clean up temp export file used for the DBus call
self._rm_file(path)
self._add_rados_object_url_to_index(name)
else:
_mkindex_called = True
self._mkindex()
except exception.ProcessExecutionError as e:
for u in undos:
u()
if not self.ganesha_rados_store_enable and not _mkindex_called:
self._mkindex()
raise exception.GaneshaCommandFailure(
stdout=e.stdout, stderr=e.stderr, exit_code=e.exit_code,
cmd=e.cmd)
def update_export(self, name, confdict):
"""Update an export to Ganesha specified by confdict."""
xid = confdict["EXPORT"]["Export_Id"]
old_confdict = self._read_export(name)
path = self._write_export(name, confdict)
try:
self._dbus_send_ganesha("UpdateExport", "string:" + path,
"string:EXPORT(Export_Id=%d)" % xid)
except exception.ProcessExecutionError as e:
# Revert the export update.
self._write_export(name, old_confdict)
raise exception.GaneshaCommandFailure(
stdout=e.stdout, stderr=e.stderr, exit_code=e.exit_code,
cmd=e.cmd)
finally:
if self.ganesha_rados_store_enable:
# Clean up temp export file used for the DBus update call
self._rm_file(path)
def remove_export(self, name):
"""Remove an export from Ganesha."""
try:
confdict = self._read_export(name)
self._remove_export_dbus(confdict["EXPORT"]["Export_Id"])
finally:
if self.ganesha_rados_store_enable:
self._delete_rados_object(
self._get_export_rados_object_name(name))
self._remove_rados_object_url_from_index(name)
else:
self._rm_export_file(name)
self._mkindex()
def _get_rados_object(self, object_name):
"""Synchronously read data from Ceph RADOS object as a text string.
:param pool_name: name of the pool
:type pool_name: str
:param object_name: name of the object
:type object_name: str
:returns: tuple of object data and version
"""
pool_name = self.ganesha_rados_store_pool_name
ioctx = self.rados_client.open_ioctx(pool_name)
osd_max_write_size = self.rados_client.conf_get('osd_max_write_size')
max_size = int(osd_max_write_size) * 1024 * 1024
try:
bytes_read = ioctx.read(object_name, max_size)
if ((len(bytes_read) == max_size) and
(ioctx.read(object_name, 1, offset=max_size))):
LOG.warning("Size of object {0} exceeds '{1}' bytes "
"read".format(object_name, max_size))
finally:
ioctx.close()
bytes_read_decoded = bytes_read.decode('utf-8')
return bytes_read_decoded
def _put_rados_object(self, object_name, data):
"""Synchronously write data as a byte string in a Ceph RADOS object.
:param pool_name: name of the pool
:type pool_name: str
:param object_name: name of the object
:type object_name: str
:param data: data to write
:type data: bytes
"""
pool_name = self.ganesha_rados_store_pool_name
encoded_data = data.encode('utf-8')
ioctx = self.rados_client.open_ioctx(pool_name)
max_size = int(
self.rados_client.conf_get('osd_max_write_size')) * 1024 * 1024
if len(encoded_data) > max_size:
msg = ("Data to be written to object '{0}' exceeds "
"{1} bytes".format(object_name, max_size))
LOG.error(msg)
raise exception.ShareBackendException(msg)
try:
with rados.WriteOpCtx() as wop:
wop.write_full(encoded_data)
ioctx.operate_write_op(wop, object_name)
except rados.OSError as e:
LOG.error(e)
raise e
finally:
ioctx.close()
def _delete_rados_object(self, object_name):
pool_name = self.ganesha_rados_store_pool_name
ioctx = self.rados_client.open_ioctx(pool_name)
try:
ioctx.remove_object(object_name)
except rados.ObjectNotFound:
LOG.warning("Object '{0}' was already removed".format(object_name))
finally:
ioctx.close()
def get_export_id(self, bump=True):
"""Get a new export id."""
# XXX overflowing the export id (16 bit unsigned integer)
# is not handled
if self.ganesha_rados_store_enable:
# TODO(rraja): Ensure that the export counter object's update is
# atomic, e.g., retry object update until the object version
# between the 'get' and 'put' operations remains the same.
export_id = int(
self._get_rados_object(self.ganesha_rados_export_counter))
if not bump:
return export_id
export_id += 1
self._put_rados_object(self.ganesha_rados_export_counter,
str(export_id))
return export_id
else:
if bump:
bumpcode = 'update ganesha set value = value + 1;'
else:
bumpcode = ''
out = self.execute(
"sqlite3", self.ganesha_db_path,
bumpcode + 'select * from ganesha where key = "exportid";',
run_as_root=False)[0]
match = re.search(r'\Aexportid\|(\d+)$', out)
if not match:
LOG.error("Invalid export database on "
"Ganesha node %(tag)s: %(db)s.",
{'tag': self.tag, 'db': self.ganesha_db_path})
raise exception.InvalidSqliteDB()
return int(match.groups()[0])
def restart_service(self):
"""Restart the Ganesha service."""
self.execute("service", self.ganesha_service, "restart")
def reset_exports(self):
"""Delete all export files."""
self.execute('sh', '-c',
'rm -f %s/*.conf' % pipes.quote(self.ganesha_export_dir))
self._mkindex()
|
|
"""Basal Area Simulation"""
#pylint: disable=no-member
# these functions can be improved because the increment is not
# autoregressive, so we can calculate it as a vector operation using
# the other arrays then the actual values are just the cumulative sums
# TODO: change initial age to data age
# TODO: change present density to density_at_data
import logging
import numpy as np
from pygypsy import basal_area_increment as incr
LOGGER = logging.getLogger(__name__)
# TODO: make this consistent with the other functions will need an option for
# making species proportion constant and not expentially increasing with the factor
def sim_basal_area_aw(initial_age, site_index, density_at_bh_age,
basal_area_at_bh_age, sdf_aw, correction_factor, densities,
use_correction_factor_future=False,
stop_at_initial_age=True,
fix_proportion_and_density_to_initial_age=False,
species_proportion_at_bh_age=None, present_density=None,
force_use_densities=False):
'''Simlulate basal area forward in time for White Aspen
It creates the trajectory of basal area from bhage up to the inventory year
given a correction factor that is being optimized
:param float initial_age: Clock that uses the oldest species as a reference
to become the stand age
:param float site_index: site index of species Aw
:param float basal_area_at_bh_age: basal area of Aw at breast height age
:param float SDF_Aw0: Stand Density Factor of species Aw
:param float density_at_bh_age: initial density of species Aw at breast height age
:param float correction_factor: correction factor that guarantees that trajectory
passes through data obtained with inventory
:param bool use_correction_factor_future: switch that determine whether the correction
factor will be used for the future years
:param bool stop_at_initial_age: switch that determines whether simulation
will stop at the date of the inventory or
will continue for the length of the densities object
:param bool fix_proportion_and_density_to_initial_age: if true, uses the
proportion and density from the initial age for all years of the
simulation. this is provided for consistency with a previous version which
did this implicityly in for estiamting the basal area factors
:param bool force_use_densities: ignore other parameters and just use them densities object for calculating basal
area increment
'''
max_age = initial_age if stop_at_initial_age else len(densities)
basal_area_aw_arr = np.zeros(max_age)
basal_area_temp = basal_area_at_bh_age
for i, spec_comp_dict in enumerate(densities[0: max_age]):
sc_factor = correction_factor \
if i < initial_age or use_correction_factor_future \
else 1
if force_use_densities:
years_from_bhage = spec_comp_dict['bhage_Aw']
spec_proportion = spec_comp_dict['SC_Aw']
present_density = spec_comp_dict['N_bh_AwT']
else:
years_from_bhage = spec_comp_dict['bhage_Aw']
# TODO: revise to match the funcs of the other species
spec_proportion = densities[initial_age]['SC_Aw'] \
if fix_proportion_and_density_to_initial_age \
else spec_comp_dict['SC_Aw']
present_density = densities[initial_age]['N_bh_AwT'] \
if fix_proportion_and_density_to_initial_age \
else spec_comp_dict['N_bh_AwT']
if density_at_bh_age > 0:
if years_from_bhage > 0:
spec_proportion = spec_proportion * sc_factor
basal_area_increment = incr.increment_basal_area_aw(
spec_proportion, site_index, present_density,
density_at_bh_age, years_from_bhage, basal_area_temp
)
basal_area_temp = basal_area_temp + basal_area_increment
new_basal_area = basal_area_temp
if new_basal_area < 0:
new_basal_area = 0
else:
new_basal_area = 0
else:
basal_area_temp = 0
new_basal_area = 0
basal_area_aw_arr[i] = new_basal_area
return basal_area_aw_arr
def sim_basal_area_sb(initial_age, site_index, density_at_bh_age,
basal_area_at_bh_age, correction_factor, densities,
use_correction_factor_future=False,
stop_at_initial_age=True,
fix_proportion_and_density_to_initial_age=False,
species_proportion_at_bh_age=None, present_density=None,
force_use_densities=False):
'''Simlulate basal area forward in time for Black Spruce
It creates the trajectory of basal area from bhage up to the inventory year
given a correction factor that is being optimized
:param float initial_age: Clock that uses the oldest species as a reference to become
the stand age
:param float initial_age_sb: species specific age counted independently
:param float site_index: site index of species Sb
:param float basal_area_at_bh_age: basal area of Sb at breast height age
:param float density_at_bh_age: initial density of species Sb at breast height age
:param float correction_factor: correction factor that guarantees that trajectory
passes through data obtained with inventory
:param bool use_correction_factor_future: switch that determine whether the correction
factor will be used for the future years
:param bool stop_at_initial_age: switch that determines whether simulation
will stop at the date of the inventory or
will continue for the length of the densities object
:param bool fix_proportion_and_density_to_initial_age: if true, uses the
proportion and density from the initial age for all years of the
simulation. this is provided for consistency with a previous version which
did this implicityly in for estiamting the basal area factors
:param bool force_use_densities: ignore other parameters and just use them densities object for calculating basal
area increment
'''
max_age = initial_age if stop_at_initial_age else len(densities)
basal_area_arr = np.zeros(max_age)
basal_area_temp = basal_area_at_bh_age
for i, spec_comp_dict in enumerate(densities[0: max_age]):
sc_factor = correction_factor \
if i < initial_age or use_correction_factor_future \
else 1
if force_use_densities:
years_from_bhage = spec_comp_dict['bhage_Aw']
spec_proportion = spec_comp_dict['SC_Aw']
present_density = spec_comp_dict['N_bh_AwT']
else:
years_from_bhage = spec_comp_dict['bhage_Sb']
if i==0:
spec_proportion = species_proportion_at_bh_age
elif i < initial_age:
pass
else:
spec_proportion = spec_comp_dict['SC_Sw']
present_density = present_density \
if i < initial_age or fix_proportion_and_density_to_initial_age \
else spec_comp_dict['N_bh_SbT']
if density_at_bh_age > 0:
if years_from_bhage > 0:
spec_proportion = spec_proportion * sc_factor
basal_area_increment = incr.increment_basal_area_sb(
spec_proportion, site_index, present_density,
density_at_bh_age, years_from_bhage, basal_area_temp
)
basal_area_temp = basal_area_temp + basal_area_increment
new_basal_area = basal_area_temp
if new_basal_area < 0:
new_basal_area = 0
else:
new_basal_area = 0
else:
basal_area_temp = 0
new_basal_area = 0
basal_area_arr[i] = new_basal_area
return basal_area_arr
def sim_basal_area_sw(initial_age, site_index, density_at_bh_age, sdf_aw,
sdf_pl, sdf_sb, basal_area_at_bh_age, correction_factor,
densities, use_correction_factor_future=False,
stop_at_initial_age=True,
fix_proportion_and_density_to_initial_age=False,
species_proportion_at_bh_age=None, present_density=None,
force_use_densities=False):
'''Simlulate basal area forward in time for White Spruce
It created the trajectory of basal area from bhage up to the inventory year
given a correction factor that is being optimized
:param float initial_age: Clock that uses the oldest species as a reference to
become the stand age
:param float site_index: site index of species Sw
:param float basal_area_at_bh_age: basal area of Sw at breast height age
:param float density_at_bh_age: initial density of species Sw at breast height age
:param float sdf_pl: Stand Density Factor of species Pl
:param float sdf_aw: Stand Density Factor of species Aw
:param float sdf_sb: Stand Density Factor of species Sb
:param float correction_factor: correction factor that guarantees that trajectory
passes through data obtained with inventory
:param bool use_correction_factor_future: switch that determine whether the correction
factor will be used for the future years
:param bool stop_at_initial_age: switch that determines whether simulation
will stop at the date of the inventory or
will continue for the length of the densities object
:param bool fix_proportion_and_density_to_initial_age: if true, uses the
proportion and density from the initial age for all years of the
simulation. this is provided for consistency with a previous version which
did this implicityly in for estiamting the basal area factors
:param bool force_use_densities: ignore other parameters and just use them densities object for calculating basal
area increment
'''
max_age = initial_age if stop_at_initial_age else len(densities)
basal_area_arr = np.zeros(max_age)
basal_area_temp = basal_area_at_bh_age
# some of the conditional assignments in this loop could be simplified by
# initializing them outside the loop and assigning them when the simulation
# passes the initial age. this is not done because we are striving to reach
# a place where these are more configurable and thus easier to test
for i, spec_comp_dict in enumerate(densities[0: max_age]):
sc_factor = correction_factor \
if i < initial_age or use_correction_factor_future\
else 1
if force_use_densities:
years_from_bhage = spec_comp_dict['bhage_Aw']
spec_proportion = spec_comp_dict['SC_Aw']
present_density = spec_comp_dict['N_bh_AwT']
else:
years_from_bhage = spec_comp_dict['bhage_Sw']
# the first time in this for loop, spec proportion must be assigned
if i==0:
spec_proportion = species_proportion_at_bh_age
# until the year of the data is reached we then do not reassign it, and the
# value of spec_proportion * sc_factor increases exponentially
elif i < initial_age:
pass
# future values use the estimated species proportion and it is constant
else:
spec_proportion = spec_comp_dict['SC_Sw']
present_density = present_density \
if i < initial_age or fix_proportion_and_density_to_initial_age \
else spec_comp_dict['N_bh_SwT']
if density_at_bh_age > 0:
if years_from_bhage > 0:
spec_proportion = spec_proportion * sc_factor
basal_area_increment = incr.increment_basal_area_sw(
spec_proportion, site_index, present_density,
density_at_bh_age, years_from_bhage, sdf_aw, sdf_pl,
sdf_sb, basal_area_temp
)
basal_area_temp = basal_area_temp + basal_area_increment
new_basal_area = basal_area_temp
if new_basal_area < 0:
new_basal_area = 0
else:
new_basal_area = 0
else:
basal_area_temp = 0
new_basal_area = 0
basal_area_arr[i] = new_basal_area
return basal_area_arr
def sim_basal_area_pl(initial_age, site_index, density_at_bh_age, sdf_aw,
sdf_sw, sdf_sb, basal_area_at_bh_age, correction_factor,
densities, use_correction_factor_future=False,
stop_at_initial_age=True,
fix_proportion_and_density_to_initial_age=False,
species_proportion_at_bh_age=None, present_density=None,
force_use_densities=False):
'''Simlulate basal area forward in time for Lodgepole Pine
:param float initial_age: Clock that uses the oldest species as a reference to
become the stand age
:param float site_index: site index of species Pl
:param float basal_area_at_bh_age: basal area of Pl at breast height age
:param float density_at_bh_age: initial density of species Pl at breast height age
:param float sdf_sw: Stand Density Factor of species Sw
:param float sdf_aw: Stand Density Factor of species Aw
:param float sdf_sb: Stand Density Factor of species Sb
:param float correction_factor: correction factor that guarantees that trajectory
passes through data obtained with inventory
:param bool use_correction_factor_future: switch that determine whether the correction
factor will be used for the future years
:param bool stop_at_initial_age: switch that determines whether simulation
will stop at the date of the inventory or
will continue for the length of the densities object
:param bool fix_proportion_and_density_to_initial_age: if true, uses the
proportion and density from the initial age for all years of the
simulation. this is provided for consistency with a previous version which
did this implicityly in for estiamting the basal area factors
:param bool force_use_densities: ignore other parameters and just use them densities object for calculating basal
area increment
'''
max_age = initial_age if stop_at_initial_age else len(densities)
basal_area_arr = np.zeros(max_age)
basal_area_temp = basal_area_at_bh_age
for i, spec_comp_dict in enumerate(densities[0: max_age]):
sc_factor = correction_factor \
if i < initial_age or use_correction_factor_future \
else 1
if force_use_densities:
years_from_bhage = spec_comp_dict['bhage_Aw']
spec_proportion = spec_comp_dict['SC_Aw']
present_density = spec_comp_dict['N_bh_AwT']
else:
years_from_bhage = spec_comp_dict['bhage_Pl']
# the first time in this for loop, spec proportion must be assigned
if i==0:
spec_proportion = species_proportion_at_bh_age
# until the year of the data is reached we then do not reassign it, and the
# value of spec_proportion * sc_factor increases exponentially
elif i < initial_age:
pass
# future values use the estimated species proportion and it is constant
else:
spec_proportion = spec_comp_dict['SC_Pl']
present_density = present_density \
if i < initial_age or fix_proportion_and_density_to_initial_age \
else spec_comp_dict['N_bh_PlT']
if density_at_bh_age > 0:
if years_from_bhage > 0:
# factor empirically determined to work better when multiplied
# with whole increment
basal_area_increment = sc_factor \
* incr.increment_basal_area_pl(
spec_proportion, site_index,
present_density, density_at_bh_age,
years_from_bhage, sdf_aw, sdf_sw,
sdf_sb, basal_area_temp
)
basal_area_temp = basal_area_temp + basal_area_increment
new_basal_area = basal_area_temp
if new_basal_area < 0:
new_basal_area = 0
else:
new_basal_area = 0
else:
basal_area_temp = 0
new_basal_area = 0
basal_area_arr[i] = new_basal_area
return basal_area_arr
|
|
'''
Scratchpad for test-based development.
LICENSING
-------------------------------------------------
hypergolix: A python Golix client.
Copyright (C) 2016 Muterra, Inc.
Contributors
------------
Nick Badger
badg@muterra.io | badg@nickbadger.com | nickbadger.com
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the
Free Software Foundation, Inc.,
51 Franklin Street,
Fifth Floor,
Boston, MA 02110-1301 USA
------------------------------------------------------
'''
import argparse
import unittest
import sys
import time
import statistics
import collections
import threading
import random
import tempfile
import shutil
import logging
import cProfile
from hypergolix.service import _hgx_server
from hypergolix.app import app_core
from hypergolix.config import Config
from hypergolix import HGXLink
from hypergolix.utils import Aengel
from hypergolix.objproxy import ProxyBase
# ###############################################
# Fixtures
# ###############################################
def make_fixtures(debug, hgx_root_1, hgx_root_2):
''' Makes fixtures for the test.
hgx_root_# is the root app directory, used by config. It contains
the cache directory.
'''
server_port = 6022
aengel = Aengel()
with Config(hgx_root_1) as config:
config.set_remote('127.0.0.1', server_port, False)
config.ipc_port = 6023
with Config(hgx_root_2) as config:
config.set_remote('127.0.0.1', server_port, False)
config.ipc_port = 6024
hgxserver = _hgx_server(
host = '127.0.0.1',
port = server_port,
cache_dir = None,
debug = debug,
traceur = False,
aengel = aengel,
)
# localhost:6023, no tls
hgxraz = app_core(
user_id = None,
password = 'hello world',
startup_logger = None,
aengel = aengel,
_scrypt_hardness = 1024,
hgx_root = hgx_root_1,
enable_logs = False
)
# localhost:6024, no tls
hgxdes = app_core(
user_id = None,
password = 'hello world',
startup_logger = None,
aengel = aengel,
_scrypt_hardness = 1024,
hgx_root = hgx_root_2,
enable_logs = False
)
return hgxserver, hgxraz, hgxdes, aengel
# ###############################################
# Testing
# ###############################################
def make_tests(iterations, debug, raz, des, aengel):
# inner_profiler = cProfile.Profile()
# outer_profiler = cProfile.Profile()
timer = collections.deque([0,0], maxlen=2)
# Declare API
request_api = bytes(64) + b'\x01'
response_api = bytes(64) + b'\x02'
# Create an object collection
requests_outgoing = collections.deque(maxlen=10)
requests_incoming = collections.deque(maxlen=10)
responses_incoming = collections.deque(maxlen=10)
responses_outgoing = collections.deque(maxlen=10)
# This all handles round-trip responsing.
roundtrip_flag = threading.Event()
roundtrip_check = collections.deque()
def rt_notifier(obj):
timer.appendleft(time.monotonic())
roundtrip_flag.set()
roundtrip_check.append(obj._proxy_3141592)
def rt_waiter(timeout=1.5):
result = roundtrip_flag.wait(timeout)
roundtrip_flag.clear()
return result
def rt_checker(msg):
return roundtrip_check.pop() == msg
class DemoReplicatorTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.timer = timer
cls.razlink = HGXLink(
ipc_port = 6023,
debug = debug,
aengel = aengel
)
cls.deslink = HGXLink(
ipc_port = 6024,
debug = debug,
aengel = aengel
)
cls.raz = cls.razlink.whoami
cls.des = cls.deslink.whoami
def setUp(self):
self.assertNotEqual(self.raz, self.des)
self.razlink.get_new_token_threadsafe()
self.deslink.get_new_token_threadsafe()
# All requests go from Raz -> Des
def make_request(self, msg):
obj = self.razlink.new_threadsafe(
cls = ProxyBase,
state = msg,
dynamic = True,
api_id = request_api
)
time.sleep(.1)
obj.hgx_share_threadsafe(self.des)
return obj
# All responses go from Des -> Raz
def request_handler(self, obj):
# print('Receiving request.')
# Just to prevent GC
requests_incoming.appendleft(obj)
reply = self.deslink.new_threadsafe(
cls = ProxyBase,
state = obj,
dynamic = True,
api_id = response_api
)
def state_mirror(source_obj):
# print('Mirroring state.')
reply.hgx_state = source_obj
# inner_profiler.enable()
try:
reply.hgx_push_threadsafe()
finally:
# inner_profiler.disable()
pass
obj.hgx_register_callback_threadsafe(state_mirror)
reply.hgx_share_threadsafe(recipient=self.raz)
# Just to prevent GC
responses_outgoing.appendleft(reply)
# All requests go from Raz -> Des. All responses go from Des -> Raz.
def response_handler(self, obj):
# print('Receiving response.')
obj.hgx_register_callback_threadsafe(rt_notifier)
# Just to prevent GC
responses_incoming.appendleft(obj)
def test_app(self):
''' Yknow, pretend to make an app and shit. Basically, an
automated version of echo-101/demo-4.py
'''
self.razlink.register_share_handler_threadsafe(
response_api,
ProxyBase,
self.response_handler
)
self.deslink.register_share_handler_threadsafe(
request_api,
ProxyBase,
self.request_handler
)
msg = b'hello'
obj = self.make_request(msg)
time.sleep(1.5)
times = []
logging.getLogger('').critical(
'########## Handshakes complete! Starting tests. ##########'
)
for ii in range(iterations):
with self.subTest(i=ii):
msg = ''.join([chr(random.randint(0,255)) for i in range(0,25)])
msg = msg.encode('utf-8')
# Prep the object with an update
obj.hgx_state = msg
# Zero out the timer and enable the profiler
# inner_profiler.enable()
try:
self.timer.extendleft([0,0,time.monotonic()])
# Call an update
obj.hgx_push_threadsafe()
# Wait for response
success = rt_waiter()
# Stop the timer
times.append(self.timer[0] - self.timer[1])
finally:
# Stop the profiler
# inner_profiler.disable()
pass
# Check for success
self.assertTrue(success)
self.assertTrue(rt_checker(msg))
# Max update frequencies can cause problems yo
time.sleep(.1)
print('---------------------------------------------------')
print('Max time: ', max(times))
print('Min time: ', min(times))
print('Mean time:', statistics.mean(times))
print('Med time: ', statistics.median(times))
print('\n')
# print('---------------------------------------------------')
# print('Cropped profile')
# inner_profiler.print_stats('cumulative')
# print('\n')
return DemoReplicatorTest
# ###############################################
# Operations
# ###############################################
def ingest_args():
''' Register argparser and parse (and return) args.
'''
parser = argparse.ArgumentParser(description='Hypergolix trashtest.')
parser.add_argument(
'hgxroot',
action = 'store',
default = None,
nargs = 2,
help = 'Specify the root directories for the two configurations.'
)
parser.add_argument(
'--iters',
action = 'store',
default = 10,
type = int,
help = 'How many iterations to run?',
)
parser.add_argument(
'--debug',
action = 'store_true',
help = 'Set debug mode. Automatically sets verbosity to debug.'
)
parser.add_argument(
'--logdir',
action = 'store',
default = None,
type = str,
help = 'Log to a specified directory, relative to current path.',
)
parser.add_argument(
'--verbosity',
action = 'store',
default = 'warning',
type = str,
help = 'Specify the logging level. '
'"debug" -> most verbose, '
'"info" -> somewhat verbose, '
'"warning" -> default python verbosity, '
'"error" -> quiet.',
)
parser.add_argument(
'--traceur',
action = 'store',
default = None,
type = float,
help = 'Set traceur mode, using the passed float as a stack tracing '
'interval for deadlock detection. Must be a positive number, '
'or it will be ignored.'
)
parser.add_argument('unittest_args', nargs='*')
args = parser.parse_args()
return args
def configure_unified_logger(args):
''' If we want to run a single unified logger for the entire test,
set that up here. Note that these logs will be in addition to those
created naturally during hypergolix operation (which will live
within each app's respective hgx_root)
'''
from hypergolix import logutils
if args.logdir:
logutils.autoconfig(
tofile = True,
logdirname = args.logdir,
loglevel = args.verbosity
)
else:
logutils.autoconfig(
tofile = False,
loglevel = args.verbosity
)
if __name__ == '__main__':
args = ingest_args()
# Dammit unittest using argparse
sys.argv[1:] = args.unittest_args
# Configure logs first so any fixturing errors are handled.
configure_unified_logger(args)
def do_test(cache_dir_a, cache_dir_b):
# Okay, let's set up the tests
server, raz, des, aengel = make_fixtures(
args.debug,
cache_dir_a,
cache_dir_b
)
apptest = make_tests(args.iters, args.debug, raz, des, aengel)
# And finally, run them
suite = unittest.TestSuite()
suite.addTest(apptest('test_app'))
unittest.TextTestRunner().run(suite)
logging.getLogger('').critical(
'########## Test suite complete; closing down. ##########'
)
raz[1].wait_close_safe()
des[1].wait_close_safe()
# We're going to copy all of the vectors into a temporary directory, so we
# don't accidentally mutate them.
with tempfile.TemporaryDirectory() as temp_root:
cache_dir_a = temp_root + '/temp_a'
cache_dir_b = temp_root + '/temp_b'
shutil.copytree(args.hgxroot[0], cache_dir_a)
shutil.copytree(args.hgxroot[1], cache_dir_b)
# Clip negative numbers
if args.traceur is not None:
trace_interval = max([args.traceur, 0.1])
print('Running with trace.')
from hypergolix.utils import TraceLogger
with TraceLogger(trace_interval):
do_test(cache_dir_a, cache_dir_b)
else:
do_test(cache_dir_a, cache_dir_b)
# Wait a moment before cleanup so everything can exit
time.sleep(1)
|
|
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
#
# Convert I2C host format to SVG
import logging as log
from collections import namedtuple
import i2csvg.i2csvg_data as sdata
# validating version of int(x, 0)
# returns int value, error flag
# if error flag is True value will be zero
def check_int(x, err_prefix):
if isinstance(x, int):
return x, False
if len(x) == 0:
log.error(err_prefix + " Zero length string")
return 0, True
if x[0] == '0' and len(x) > 2:
if x[1] in 'bB':
validch = '01'
elif x[1] in 'oO':
validch = '01234567'
elif x[1] in 'xX':
validch = '0123456789abcdefABCDEF'
else:
log.error(err_prefix +
": int must start digit, 0b, 0B, 0o, 0O, 0x or 0X")
return 0, True
for c in x[2:]:
if not c in validch:
log.error(err_prefix + ": Bad character " + c + " in " + x)
return 0, True
else:
if not x.isdecimal():
log.error(err_prefix + ": Number not valid int " + x)
return 0, 1
return int(x, 0), False
def check_single(line, char, fullline, err):
''' Check for character char in input line
Return True if there is one (or more)
Propagate err or force to err True if more than one char
'''
res = False
if char in line:
res = True
if line.count(char) > 1:
log.warning('Multiple ' + char + ' in line ' + fullline)
err = True
return res, err
I2cOp = namedtuple('I2cOp',
'read rcont start stop nackok mvalue adr size fbyte tag')
def check_and_size(iic, line):
''' Check I2C Op for validity and return size in bits
'''
err = False
if iic.start and iic.read:
log.error('Start and Read found in ' + line)
err = True
if iic.rcont and not iic.read:
log.error('RCont without Read found in ' + line)
err = True
# documentation says R+C and P is not permitted, but I think it
# is needed for protocols where the last read data is ACKed?
# (these don't match I2C or SMBus spec but exist in the wild)
size = 0
if iic.start:
size += 1
if iic.stop:
size += 1
if iic.read:
# multi is D0, D1, ..., Dn-1 so 3 byte/acks and a 1 bit squiggle
# regular read is one byte/ack per
size += 9 * 3 + 1 if iic.mvalue else 9 * iic.fbyte
else:
# write data is one byte/ack
size += 9
# rcont, nackok just affect the polarity of the final ack bit
# adr just affects how the write data is drawn
return size, err
def parse_i2c_fifodata(line):
''' Parse input line of I2C FDATA fifo and convert to internal type
Line is usually 0x + the hex value written to the register
But could be binary (0b), octal (0o) or decimal
'''
fifodata, err = check_int(line, 'FIFO value')
# bit values here must match the register definition!
ress = (fifodata & 0x0100) != 0
resp = (fifodata & 0x0200) != 0
resr = (fifodata & 0x0400) != 0
resc = (fifodata & 0x0800) != 0
resn = (fifodata & 0x1000) != 0
resb = fifodata & 0xff
resm = False # only used in descriptive case
resa = False # only used in descriptive case
tmpr = I2cOp(resr, resc, ress, resp, resn, resm, resa, 0, resb, None)
size, serr = check_and_size(tmpr, line)
if serr:
err = True
return I2cOp(resr, resc, ress, resp, resn, resm, resa, size, resb,
None), err
def parse_i2c_code(line):
''' Parse input line of I2C FDATA fifo and convert to internal type
Line is coded with flags and an 8-bit data value
S - Start flag, P - stop flag,
R - read flag, C - continue read flag, N - NackOk flag
followed by the data byte
Special cases:
M - indicates multiple bytes instead of data byte
A - followed by 0 or 1 address/direction or 2 address/data
Data value in quotes is a tag
'''
resr, resc, ress, resp, resn = False, False, False, False, False
resm, resa, resb = False, False, 0
err = False
firstval = 0
for i in line:
if i.isdigit() or i == "'":
break
firstval += 1
# Will only check the flags section, so no concern about hex digits
ress, err = check_single(line[:firstval], 'S', line, err)
resp, err = check_single(line[:firstval], 'P', line, err)
resr, err = check_single(line[:firstval], 'R', line, err)
resc, err = check_single(line[:firstval], 'C', line, err)
resn, err = check_single(line[:firstval], 'N', line, err)
# these two are formally part of the value but parse like flags
resm, err = check_single(line[:firstval], 'M', line, err)
resa, err = check_single(line[:firstval], 'A', line, err)
if firstval == len(line):
if not resm:
err = True
log.error('No value found in ' + line)
rest = None
else:
if resm:
err = True
log.error('Found M and value in ' + line)
rest = None
resb = 0
elif line[firstval] == "'":
rest = line[firstval + 1:-1]
resb = 0
else:
rest = None
resb, verr = check_int(line[firstval:],
'Value in ' + line + ' ' + str(firstval))
if verr:
err = True
if resb < 0 or resb > 255 or (resa and resb > 2):
log.error('Value out of range in ' + line)
resb = 0
err = True
tmpr = I2cOp(resr, resc, ress, resp, resn, resm, resa, 0, resb, rest)
size, serr = check_and_size(tmpr, line)
if serr:
err = True
return I2cOp(resr, resc, ress, resp, resn, resm, resa, size, resb,
rest), err
def parse_file(infile, fifodata=False, prefix=None):
''' Parse a file of I2C data
fifodata indicates if the data is a dump from writes to FDATA fifo
prefix is a prefix on valid lines and will be stripped
lines without the prefix are ignored
Returns list of I2cOps or str (for titles)
'''
transaction = []
errors = 0
firstline = True
for line in infile:
if prefix:
if not line.startswith(prefix):
continue
line = line[len(prefix):]
if len(line) == 0 or line.isspace() or line[0] == '#':
continue
line = line.lstrip().rstrip()
if line[0] == 'T':
transaction.append(line[1:].lstrip())
continue
schar = ','
if fifodata and not ',' in line:
# fifodata could also be whitespace spearated
schar = None
for sline in line.split(sep=schar):
if fifodata:
t, err = parse_i2c_fifodata(sline)
else:
t, err = parse_i2c_code(sline)
if err:
errors += 1
else:
transaction.append(t)
if errors > 0:
log.error('Found ' + str(errors) + ' errors in input')
return transaction
def output_debug(outfile, t, term):
for tr in t:
outfile.write(str(tr) + term)
def text_element(tr, term, titles):
if isinstance(tr, str):
if titles:
return 'T ' + tr + term
return ''
flags = 'S' if tr.start else '.'
flags += 'P' if tr.stop else '.'
flags += 'R' if tr.read else '.'
flags += 'C' if tr.rcont else '.'
flags += 'N' if tr.nackok else '.'
# mvalue and adr are only for drawing, but can propagate in value
if tr.adr:
val = 'A' + str(tr.fbyte)
else:
if tr.tag:
val = "'" + tr.tag + "'"
else:
val = 'M' if tr.mvalue else hex(tr.fbyte)
return flags + ' ' + val + term
def output_text(outfile, transactions, term, titles=True):
for tr in transactions:
text = text_element(tr, term, titles)
if text:
outfile.write(text)
# use will place a defined group at the given x,y
def svg_use(item, x, y):
return ' <use href="#' + item + '" x="' + str(x) + \
'" y="' + str(y) + '" />\n'
# a byte write is a byte of data from the host and an ack from the device
def svg_wrbyte(xpos, ypos, nok, label):
rtext = svg_use('hbyte', xpos, ypos)
rtext += ' <text x="' + str(xpos + (sdata.bytew / 2))
rtext += '" y="' + str(ypos + sdata.txty) + '">\n'
rtext += label
rtext += '</text>\n'
xpos += sdata.bytew
if nok:
rtext += svg_use('norackd', xpos, ypos)
else:
rtext += svg_use('ackd', xpos, ypos)
xpos += sdata.bitw
return rtext, xpos
# a byte read is a byte of data from the device and an ack/nack from the host
def svg_rdbyte(xpos, ypos, ack, label):
rtext = svg_use('dbyte', xpos, ypos)
rtext += ' <text x="' + str(xpos + (sdata.bytew / 2))
rtext += '" y="' + str(ypos + sdata.txty) + '">\n'
rtext += label
rtext += '</text>\n'
xpos += sdata.bytew
rtext += svg_use(ack, xpos, ypos)
xpos += sdata.bitw
return rtext, xpos
def svg_element(tr, xpos, ypos):
etext = ''
if tr.start:
etext += svg_use('start', xpos, ypos)
xpos += sdata.bitw
if tr.read and not tr.mvalue:
for n in range(0, 1 if tr.tag else tr.fbyte):
acktype = 'ackh' if (n < tr.fbyte - 1) or tr.rcont else 'nackh'
t, xpos = svg_rdbyte(xpos, ypos, acktype,
tr.tag if tr.tag else 'D' + str(n + 1))
etext += t
if xpos > sdata.wrap and (n < tr.fbyte - 1):
xpos = sdata.cindent
ypos += sdata.linesep
elif tr.read and tr.mvalue:
# need space to draw three byte+ack and a break squiggle
if (xpos + (sdata.bytew + sdata.bitw) * 3 + sdata.bitw) > sdata.wrap:
xpos = sdata.cindent
ypos += sdata.linesep
t, xpos = svg_rdbyte(xpos, ypos, 'ackh', 'Data1')
etext += t
t, xpos = svg_rdbyte(xpos, ypos, 'ackh', 'Data2')
etext += t
etext += svg_use('skip', xpos, ypos)
xpos += sdata.bitw
t, xpos = svg_rdbyte(xpos, ypos, 'nackh', 'DataN')
etext += t
elif tr.adr:
etext += svg_use('adr' + str(tr.fbyte), xpos, ypos)
xpos += sdata.bytew
etext += svg_use('ackd', xpos, ypos)
xpos += sdata.bitw
elif tr.mvalue:
# need space to draw three byte+ack and a break squiggle
if (xpos + (sdata.bytew + sdata.bitw) * 3 + sdata.bitw) > sdata.wrap:
xpos = sdata.cindent
ypos += sdata.linesep
t, xpos = svg_wrbyte(xpos, ypos, tr.nackok, 'Data1')
etext += t
t, xpos = svg_wrbyte(xpos, ypos, tr.nackok, 'Data2')
etext += t
etext += svg_use('skip', xpos, ypos)
xpos += sdata.bitw
t, xpos = svg_wrbyte(xpos, ypos, tr.nackok, 'DataN')
etext += t
elif tr.start: # and not tr.adr by position in elif
etext += svg_use('adr' + str(tr.fbyte & 1), xpos, ypos)
etext += ' <text x="' + str(xpos + 115)
etext += '" y="' + str(ypos + sdata.txty) + '">' + hex(tr.fbyte >> 1)
etext += '</text>\n'
xpos += sdata.bytew
etext += svg_use('ackd', xpos, ypos)
xpos += sdata.bitw
else:
t, xpos = svg_wrbyte(xpos, ypos, tr.nackok,
tr.tag if tr.tag else hex(tr.fbyte))
etext += t
if tr.stop:
etext += svg_use('pstop', xpos, ypos)
xpos += sdata.bitw
return etext, xpos, ypos
# since they are referenced by href name the style and defs only
# go in the first svg in a file
first_svg = True
def out_svg(outfile, svg, ypos, svgtext):
global first_svg
outfile.write('<svg\n' + sdata.svgtag_consts)
outfile.write('viewBox="0 0 ' + str(sdata.svgw) + ' ' +
str(ypos + sdata.linesep + 8) + '">\n')
if (first_svg):
outfile.write(sdata.svgstyle + sdata.svg_defs)
first_svg = False
outfile.write(svg)
if svgtext:
outfile.write('<text x="10" y="' + str(ypos + sdata.linesep + 3))
outfile.write('" class="tt">' + svgtext[:-2] + '</text>\n')
outfile.write('</svg>\n')
def output_svg(outfile, transactions, title):
xpos = 0
ypos = 0
svg = ''
svgtext = ''
for tr in transactions:
if isinstance(tr, str):
if svg:
out_svg(outfile, svg, ypos, svgtext)
if title:
outfile.write('<h2>' + tr + '</h2>\n')
xpos = 0
ypos = 0
svg = ''
svgtext = ''
continue
if xpos > sdata.wrap:
xpos = sdata.cindent
ypos += sdata.linesep
trsvg, xpos, ypos = svg_element(tr, xpos, ypos)
svgtext += text_element(tr, ', ', False)
svg += trsvg
out_svg(outfile, svg, ypos, svgtext)
|
|
from __future__ import absolute_import, unicode_literals
import warnings
from functools import wraps
from itertools import count
from django.db import connection
try:
from django.db import connections, router
except ImportError: # pre-Django 1.2
connections = router = None # noqa
from django.db import models
from django.db.models.query import QuerySet
from django.conf import settings
from celery.utils.timeutils import maybe_timedelta
from .db import commit_on_success, get_queryset, rollback_unless_managed
from .utils import now
class TxIsolationWarning(UserWarning):
pass
def transaction_retry(max_retries=1):
"""Decorator for methods doing database operations.
If the database operation fails, it will retry the operation
at most ``max_retries`` times.
"""
def _outer(fun):
@wraps(fun)
def _inner(*args, **kwargs):
_max_retries = kwargs.pop('exception_retry_count', max_retries)
for retries in count(0):
try:
return fun(*args, **kwargs)
except Exception: # pragma: no cover
# Depending on the database backend used we can experience
# various exceptions. E.g. psycopg2 raises an exception
# if some operation breaks the transaction, so saving
# the task result won't be possible until we rollback
# the transaction.
if retries >= _max_retries:
raise
try:
rollback_unless_managed()
except Exception:
pass
return _inner
return _outer
def update_model_with_dict(obj, fields):
[setattr(obj, attr_name, attr_value)
for attr_name, attr_value in fields.items()]
obj.save()
return obj
class ExtendedQuerySet(QuerySet):
def update_or_create(self, **kwargs):
obj, created = self.get_or_create(**kwargs)
if not created:
fields = dict(kwargs.pop('defaults', {}))
fields.update(kwargs)
update_model_with_dict(obj, fields)
return obj, created
class ExtendedManager(models.Manager):
def get_queryset(self):
return ExtendedQuerySet(self.model)
get_query_set = get_queryset # Pre django 1.6
def update_or_create(self, **kwargs):
return get_queryset(self).update_or_create(**kwargs)
def connection_for_write(self):
if connections:
return connections[router.db_for_write(self.model)]
return connection
def connection_for_read(self):
if connections:
return connections[self.db]
return connection
def current_engine(self):
try:
return settings.DATABASES[self.db]['ENGINE']
except AttributeError:
return settings.DATABASE_ENGINE
class ResultManager(ExtendedManager):
def get_all_expired(self, expires):
"""Get all expired task results."""
return self.filter(date_done__lt=now() - maybe_timedelta(expires))
def delete_expired(self, expires):
"""Delete all expired taskset results."""
meta = self.model._meta
with commit_on_success():
self.get_all_expired(expires).update(hidden=True)
cursor = self.connection_for_write().cursor()
cursor.execute(
'DELETE FROM {0.db_table} WHERE hidden=%s'.format(meta),
(True, ),
)
class PeriodicTaskManager(ExtendedManager):
def enabled(self):
return self.filter(enabled=True)
class TaskManager(ResultManager):
"""Manager for :class:`celery.models.Task` models."""
_last_id = None
def get_task(self, task_id):
"""Get task meta for task by ``task_id``.
:keyword exception_retry_count: How many times to retry by
transaction rollback on exception. This could theoretically
happen in a race condition if another worker is trying to
create the same task. The default is to retry once.
"""
try:
return self.get(task_id=task_id)
except self.model.DoesNotExist:
if self._last_id == task_id:
self.warn_if_repeatable_read()
self._last_id = task_id
return self.model(task_id=task_id)
@transaction_retry(max_retries=2)
def store_result(self, task_id, result, status,
traceback=None, children=None):
"""Store the result and status of a task.
:param task_id: task id
:param result: The return value of the task, or an exception
instance raised by the task.
:param status: Task status. See
:meth:`celery.result.AsyncResult.get_status` for a list of
possible status values.
:keyword traceback: The traceback at the point of exception (if the
task failed).
:keyword children: List of serialized results of subtasks
of this task.
:keyword exception_retry_count: How many times to retry by
transaction rollback on exception. This could theoretically
happen in a race condition if another worker is trying to
create the same task. The default is to retry twice.
"""
return self.update_or_create(task_id=task_id,
defaults={'status': status,
'result': result,
'traceback': traceback,
'meta': {'children': children}})
def warn_if_repeatable_read(self):
if 'mysql' in self.current_engine().lower():
cursor = self.connection_for_read().cursor()
if cursor.execute('SELECT @@tx_isolation'):
isolation = cursor.fetchone()[0]
if isolation == 'REPEATABLE-READ':
warnings.warn(TxIsolationWarning(
'Polling results with transaction isolation level '
'repeatable-read within the same transaction '
'may give outdated results. Be sure to commit the '
'transaction for each poll iteration.'))
class TaskSetManager(ResultManager):
"""Manager for :class:`celery.models.TaskSet` models."""
def restore_taskset(self, taskset_id):
"""Get the async result instance by taskset id."""
try:
return self.get(taskset_id=taskset_id)
except self.model.DoesNotExist:
pass
def delete_taskset(self, taskset_id):
"""Delete a saved taskset result."""
s = self.restore_taskset(taskset_id)
if s:
s.delete()
@transaction_retry(max_retries=2)
def store_result(self, taskset_id, result):
"""Store the async result instance of a taskset.
:param taskset_id: task set id
:param result: The return value of the taskset
"""
return self.update_or_create(taskset_id=taskset_id,
defaults={'result': result})
class TaskStateManager(ExtendedManager):
def active(self):
return self.filter(hidden=False)
def expired(self, states, expires, nowfun=now):
return self.filter(state__in=states,
tstamp__lte=nowfun() - maybe_timedelta(expires))
def expire_by_states(self, states, expires):
if expires is not None:
return self.expired(states, expires).update(hidden=True)
def purge(self):
with commit_on_success():
self.model.objects.filter(hidden=True).delete()
|
|
# -*- coding: utf-8 -*-
from classytags.arguments import Argument, MultiValueArgument
from classytags.core import Options, Tag
from classytags.helpers import InclusionTag
from classytags.parser import Parser
from cms.models import Page
from cms.plugin_rendering import render_plugins, render_placeholder
from cms.plugins.utils import get_plugins
from cms.utils import get_language_from_request
from cms.utils.moderator import get_cmsplugin_queryset, get_page_queryset
from django import template
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.cache import cache
from django.core.mail import mail_managers
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from itertools import chain
import operator
import re
register = template.Library()
def get_site_id(site):
if site:
if isinstance(site, Site):
site_id = site.id
elif isinstance(site, int):
site_id = site
else:
site_id = settings.SITE_ID
else:
site_id = settings.SITE_ID
return site_id
def has_permission(page, request):
return page.has_change_permission(request)
register.filter(has_permission)
CLEAN_KEY_PATTERN = re.compile(r'[^a-zA-Z0-9_-]')
def _clean_key(key):
return CLEAN_KEY_PATTERN.sub('-', key)
def _get_cache_key(name, page_lookup, lang, site_id):
if isinstance(page_lookup, Page):
page_key = str(page_lookup.pk)
else:
page_key = str(page_lookup)
page_key = _clean_key(page_key)
return name+'__page_lookup:'+page_key+'_site:'+str(site_id)+'_lang:'+str(lang)
def _get_page_by_untyped_arg(page_lookup, request, site_id):
"""
The `page_lookup` argument can be of any of the following types:
- Integer: interpreted as `pk` of the desired page
- String: interpreted as `reverse_id` of the desired page
- `dict`: a dictionary containing keyword arguments to find the desired page
(for instance: `{'pk': 1}`)
- `Page`: you can also pass a Page object directly, in which case there will be no database lookup.
- `None`: the current page will be used
"""
if page_lookup is None:
return request.current_page
if isinstance(page_lookup, Page):
return page_lookup
if isinstance(page_lookup, basestring):
page_lookup = {'reverse_id': page_lookup}
elif isinstance(page_lookup, (int, long)):
page_lookup = {'pk': page_lookup}
elif not isinstance(page_lookup, dict):
raise TypeError('The page_lookup argument can be either a Dictionary, Integer, Page, or String.')
page_lookup.update({'site': site_id})
try:
return get_page_queryset(request).get(**page_lookup)
except Page.DoesNotExist:
site = Site.objects.get_current()
subject = _('Page not found on %(domain)s') % {'domain':site.domain}
body = _("A template tag couldn't find the page with lookup arguments `%(page_lookup)s\n`. "
"The URL of the request was: http://%(host)s%(path)s") \
% {'page_lookup': repr(page_lookup), 'host': site.domain, 'path': request.path}
if settings.DEBUG:
raise Page.DoesNotExist(body)
else:
mail_managers(subject, body, fail_silently=True)
return None
class PageUrl(InclusionTag):
template = 'cms/content.html'
name = 'page_url'
options = Options(
Argument('page_lookup'),
Argument('lang', required=False, default=None),
Argument('site', required=False, default=None),
)
def get_context(self, context, page_lookup, lang, site):
site_id = get_site_id(site)
request = context.get('request', False)
if not request:
return {'content': ''}
if request.current_page == "dummy":
return {'content': ''}
if lang is None:
lang = get_language_from_request(request)
cache_key = _get_cache_key('page_url', page_lookup, lang, site_id)+'_type:absolute_url'
url = cache.get(cache_key)
if not url:
page = _get_page_by_untyped_arg(page_lookup, request, site_id)
if page:
url = page.get_absolute_url(language=lang)
cache.set(cache_key, url, settings.CMS_CONTENT_CACHE_DURATION)
if url:
return {'content': url}
return {'content': ''}
register.tag(PageUrl)
register.tag('page_id_url', PageUrl)
def _get_placeholder(current_page, page, context, name):
placeholder_cache = getattr(current_page, '_tmp_placeholders_cache', {})
if page.pk in placeholder_cache:
return placeholder_cache[page.pk].get(name, None)
placeholder_cache[page.pk] = {}
placeholders = page.placeholders.all()
for placeholder in placeholders:
placeholder_cache[page.pk][placeholder.slot] = placeholder
current_page._tmp_placeholders_cache = placeholder_cache
return placeholder_cache[page.pk].get(name, None)
def get_placeholder_content(context, request, current_page, name, inherit):
pages = [current_page]
if inherit:
pages = chain([current_page], current_page.get_cached_ancestors(ascending=True))
for page in pages:
placeholder = _get_placeholder(current_page, page, context, name)
if placeholder is None:
continue
if not get_plugins(request, placeholder):
continue
if hasattr(request, 'placeholder_media'):
request.placeholder_media = reduce(operator.add, [request.placeholder_media, placeholder.get_media(request, context)])
#request.placeholder_media += placeholder.get_media(request, context)
content = render_placeholder(placeholder, context, name)
if content:
return content
placeholder = _get_placeholder(current_page, current_page, context, name)
return render_placeholder(placeholder, context, name)
class PlaceholderParser(Parser):
def parse_blocks(self):
for bit in getattr(self.kwargs['extra_bits'], 'value', self.kwargs['extra_bits']):
if getattr(bit, 'value', bit.var.value) == 'or':
return super(PlaceholderParser, self).parse_blocks()
return
class PlaceholderOptions(Options):
def get_parser_class(self):
return PlaceholderParser
class Placeholder(Tag):
"""
This template node is used to output page content and
is also used in the admin to dynamically generate input fields.
eg: {% placeholder "placeholder_name" %}
{% placeholder "sidebar" inherit %}
{% placeholder "footer" inherit or %}
<a href="/about/">About us</a>
{% endplaceholder %}
Keyword arguments:
name -- the name of the placeholder
width -- additional width attribute (integer) which gets added to the plugin context
(deprecated, use `{% with 320 as width %}{% placeholder "foo"}{% endwith %}`)
inherit -- optional argument which if given will result in inheriting
the content of the placeholder with the same name on parent pages
or -- optional argument which if given will make the template tag a block
tag whose content is shown if the placeholder is empty
"""
name = 'placeholder'
options = PlaceholderOptions(
Argument('name', resolve=False),
MultiValueArgument('extra_bits', required=False, resolve=False),
blocks=[
('endplaceholder', 'nodelist'),
]
)
def render_tag(self, context, name, extra_bits, nodelist=None):
width = None
inherit = False
for bit in extra_bits:
if bit == 'inherit':
inherit = True
elif bit.isdigit():
width = int(bit)
import warnings
warnings.warn(
"The width parameter for the placeholder tag is deprecated.",
DeprecationWarning
)
if not 'request' in context:
return ''
request = context['request']
if width:
context.update({'width': width})
page = request.current_page
if not page or page == 'dummy':
return ''
content = get_placeholder_content(context, request, page, name, inherit)
if not content and nodelist:
return nodelist.render(context)
return content
def get_name(self):
return self.kwargs['name'].var.value.strip('"').strip("'")
register.tag(Placeholder)
class PageAttribute(Tag):
"""
This template node is used to output attribute from a page such
as its title or slug.
Synopsis
{% page_attribute "field-name" %}
{% page_attribute "field-name" page_lookup %}
Example
{# Output current page's page_title attribute: #}
{% page_attribute "page_title" %}
{# Output page_title attribute of the page with reverse_id "the_page": #}
{% page_attribute "page_title" "the_page" %}
{# Output slug attribute of the page with pk 10: #}
{% page_attribute "slug" 10 %}
Keyword arguments:
field-name -- the name of the field to output. Use one of:
- title
- menu_title
- page_title
- slug
- meta_description
- meta_keywords
- text_excerpt
page_lookup -- lookup argument for Page, if omitted field-name of current page is returned.
See _get_page_by_untyped_arg() for detailed information on the allowed types and their interpretation
for the page_lookup argument.
"""
name = 'page_attribute'
options = Options(
Argument('name', resolve=False),
Argument('page_lookup', required=False, default=None)
)
valid_attributes = [
"title",
"slug",
"meta_description",
"meta_keywords",
"page_title",
"menu_title",
"text_excerpt",
]
def render_tag(self, context, name, page_lookup):
if not 'request' in context:
return ''
name = name.lower()
request = context['request']
lang = get_language_from_request(request)
page = _get_page_by_untyped_arg(page_lookup, request, get_site_id(None))
if page == "dummy":
return ''
if page and name in self.valid_attributes:
f = getattr(page, "get_%s" % name)
return f(language=lang, fallback=True)
return ''
register.tag(PageAttribute)
class CleanAdminListFilter(InclusionTag):
template = 'admin/filter.html'
name = 'clean_admin_list_filter'
options = Options(
Argument('cl'),
Argument('spec'),
)
def get_context(self, context, cl, spec):
choices = sorted(list(spec.choices(cl)), key=lambda k: k['query_string'])
query_string = None
unique_choices = []
for choice in choices:
if choice['query_string'] != query_string:
unique_choices.append(choice)
query_string = choice['query_string']
return {'title': spec.title(), 'choices' : unique_choices}
def _show_placeholder_for_page(context, placeholder_name, page_lookup, lang=None,
site=None, cache_result=True):
"""
Shows the content of a page with a placeholder name and given lookup
arguments in the given language.
This is useful if you want to have some more or less static content that is
shared among many pages, such as a footer.
See _get_page_by_untyped_arg() for detailed information on the allowed types
and their interpretation for the page_lookup argument.
"""
request = context.get('request', False)
site_id = get_site_id(site)
if not request:
return {'content': ''}
if lang is None:
lang = get_language_from_request(request)
content = None
if cache_result:
cache_key = _get_cache_key('_show_placeholder_for_page', page_lookup, lang, site_id)+'_placeholder:'+placeholder_name
content = cache.get(cache_key)
if not content:
page = _get_page_by_untyped_arg(page_lookup, request, site_id)
if not page:
return {'content': ''}
placeholder = page.placeholders.get(slot=placeholder_name)
baseqs = get_cmsplugin_queryset(request)
plugins = baseqs.filter(
placeholder=placeholder,
language=lang,
placeholder__slot__iexact=placeholder_name,
parent__isnull=True
).order_by('position').select_related()
c = render_plugins(plugins, context, placeholder)
content = "".join(c)
if cache_result:
cache.set(cache_key, content, settings.CMS_CONTENT_CACHE_DURATION)
if content:
return {'content': mark_safe(content)}
return {'content': ''}
class ShowPlaceholderById(InclusionTag):
template = 'cms/content.html'
name = 'show_placeholder_by_id'
options = Options(
Argument('placeholder_name'),
Argument('reverse_id'),
Argument('lang', required=False, default=None),
Argument('site', required=False, default=None),
)
def get_context(self, *args, **kwargs):
return _show_placeholder_for_page(**self.get_kwargs(*args, **kwargs))
def get_kwargs(self, context, placeholder_name, reverse_id, lang, site):
return {
'context': context,
'placeholder_name': placeholder_name,
'page_lookup': reverse_id,
'lang': lang,
'site': site
}
register.tag(ShowPlaceholderById)
register.tag('show_placeholder', ShowPlaceholderById)
class ShowUncachedPlaceholderById(ShowPlaceholderById):
name = 'show_uncached_placeholder_by_id'
def get_kwargs(self, *args, **kwargs):
kwargs = super(ShowUncachedPlaceholderById, self).get_kwargs(*args, **kwargs)
kwargs['cache_result'] = True
return kwargs
register.tag(ShowUncachedPlaceholderById)
register.tag('show_uncached_placeholder', ShowUncachedPlaceholderById)
class PluginsMedia(Tag):
"""
This template node is used to output media for plugins.
eg: {% plugins_media %}
You can also pass the object a page_lookup arg if you want to output media tags for a specific
page other than the current page.
eg: {% plugins_media "gallery" %}
"""
name = 'plugins_media'
options = Options(
Argument('page_lookup', required=False, default=None),
)
def render_tag(self, context, page_lookup):
if not 'request' in context:
return ''
request = context['request']
from cms.plugins.utils import get_plugins_media
plugins_media = None
if page_lookup:
page = _get_page_by_untyped_arg(page_lookup, request, get_site_id(None))
plugins_media = get_plugins_media(request, context, page)
else:
page = request.current_page
if page == "dummy":
return ''
# make sure the plugin cache is filled
plugins_media = get_plugins_media(request, context, request._current_page_cache)
if plugins_media:
return plugins_media.render()
else:
return u''
def __repr__(self):
return "<PluginsMediaNode Node: %s>" % getattr(self, 'name', '')
register.tag(PluginsMedia)
|
|
# -*- coding: utf-8 -*-
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# google-area120-tables documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
# For plugins that can not read conf.py.
# See also: https://github.com/docascode/sphinx-docfx-yaml/issues/85
sys.path.insert(0, os.path.abspath("."))
__version__ = ""
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.5.5"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"recommonmark",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_options = {"members": True}
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The root toctree document.
root_doc = "index"
# General information about the project.
project = "google-area120-tables"
copyright = "2019, Google"
author = "Google APIs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
"_build",
"**/.nox/**/*",
"samples/AUTHORING_GUIDE.md",
"samples/CONTRIBUTING.md",
"samples/snippets/README.rst",
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for google-area120-tables",
"github_user": "googleapis",
"github_repo": "python-area120-tables",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-area120-tables-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
root_doc,
"google-area120-tables.tex",
"google-area120-tables Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
root_doc,
"google-area120-tables",
"google-area120-tables Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
root_doc,
"google-area120-tables",
"google-area120-tables Documentation",
author,
"google-area120-tables",
"google-area120-tables Library",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://python.readthedocs.org/en/latest/", None),
"google-auth": ("https://googleapis.dev/python/google-auth/latest/", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,),
"grpc": ("https://grpc.github.io/grpc/python/", None),
"proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None),
"protobuf": ("https://googleapis.dev/python/protobuf/latest/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
|
|
#SBaaS
from .stage03_quantification_measuredData_io import stage03_quantification_measuredData_io
from .stage03_quantification_simulation_query import stage03_quantification_simulation_query
from SBaaS_quantification.stage01_quantification_averages_query import stage01_quantification_averages_query
from SBaaS_physiology.stage01_physiology_rates_query import stage01_physiology_rates_query
from SBaaS_MFA.stage02_isotopomer_fittedNetFluxes_query import stage02_isotopomer_fittedNetFluxes_query
from SBaaS_models.models_COBRA_dependencies import models_COBRA_dependencies
import copy
from math import sqrt
# Dependencies from thermodynamics
from thermodynamics.thermodynamics_metabolomicsData import thermodynamics_metabolomicsData
class stage03_quantification_measuredData_execute(stage03_quantification_measuredData_io,
stage03_quantification_simulation_query,
stage01_quantification_averages_query,
stage01_physiology_rates_query,
stage02_isotopomer_fittedNetFluxes_query):
def execute_makeMetabolomicsData_intracellular(self,experiment_id_I,data_I=[],compartment_id_I='c'):
'''Get the currated metabolomics data from data_stage01_quantification_averagesMIGeo'''
# get rows:
met_id_conv_dict = {'Hexose_Pool_fru_glc-D':['glc-D','fru-D'],
'Pool_2pg_3pg':['2pg','3pg'],
'23dpg':['13dpg']};
cobradependencies = models_COBRA_dependencies();
data_O = [];
if data_I:
data = data_I;
else:
data = [];
data = self.get_rows_experimentID_dataStage01AveragesMIgeo(experiment_id_I);
for d in data:
if d['component_group_name'] in list(met_id_conv_dict.keys()):
met2conv = d['component_group_name'];
for met_conv in met_id_conv_dict[met2conv]:
row_tmp = copy.copy(d)
row_tmp['component_group_name'] = met_conv;
data_O.append(row_tmp);
else:
data_O.append(d);
for d in data_O:
d['met_id']=cobradependencies.format_metid(d['component_group_name'],compartment_id_I);
d['measured']=True;
d['concentration_var']=d['calculated_concentration_var'];
d['concentration_lb']=d['calculated_concentration_lb'];
d['concentration_ub']=d['calculated_concentration_ub'];
d['concentration']=d['calculated_concentration_average'];
d['concentration_units']=d['calculated_concentration_units'];
d['comment_']=None;
#add data to the DB
self.add_dataStage03QuantificationMetabolomicsData(data_O);
#self.session.commit();
def execute_makeFluxomicsData(self,IDsQuantification2SimulationIDsIsotopomer_I = {},
criteria_I = 'flux_lb/flux_ub',
flip_rxn_direction_I=[]):
'''Collect estimated flux data from data_stage02_istopomer_fittedNetFluxes for thermodynamic simulation
INPUT:
IDsQuantification2SimulationIDsIsotopomer_I = {'simulation_id':{'experiment_id':..., (quant id)
'sample_name_abbreviation':..., (quant id)
'model_id':..., (quant id)
'time_point':..., (quant id)
'flux_units':..., (isotopomer id)
'simulation_dateAndTime':..., (isotopomer id)
},
...}
criteria_I = string, if 'flux_lb/flux_ub', the lower/upper bounds will be used
if 'flux_mean/flux_stdev', the lower/upper bounds will be replaced by mean +/- stdev
INPUT not yet implemented:
flip_rxn_direction_I = list of reaction_ids to flip the direction of flux
'''
data_O = [];
for simulation_id in list(IDsQuantification2SimulationIDsIsotopomer_I.keys()):
# get the fittedNetFluxes
fittedNetFluxes = [];
fittedNetFluxes = self.get_rows_simulationIDAndSimulationDateAndTimeAndFluxUnits_dataStage02IsotopomerfittedNetFluxes(simulation_id,
IDsQuantification2SimulationIDsIsotopomer_I[simulation_id]['simulation_dateAndTime'],
IDsQuantification2SimulationIDsIsotopomer_I[simulation_id]['flux_units']);
if fittedNetFluxes:
for d in fittedNetFluxes:
# change the direction
if d['rxn_id'] in flip_rxn_direction_I:
rate_tmp,rate_lb_tmp,rate_ub_tmp = d['flux'],d['flux_lb'],d['flux_ub'];
#TODO:
#d['flux_lb'] = -max([abs(x) for x in [rate_lb_tmp,rate_ub_tmp]]);
#d['flux_ub'] = -min([abs(x) for x in [rate_lb_tmp,rate_ub_tmp]]);
if criteria_I == 'flux_mean/flux_stdev':
d['flux_lb']=d['flux']-d['flux_stdev']
d['flux_ub']=d['flux']+d['flux_stdev']
tmp = {'experiment_id':IDsQuantification2SimulationIDsIsotopomer_I[simulation_id]['experiment_id'],
'model_id':IDsQuantification2SimulationIDsIsotopomer_I[simulation_id]['model_id'],
'sample_name_abbreviation':IDsQuantification2SimulationIDsIsotopomer_I[simulation_id]['sample_name_abbreviation'],
'time_point':IDsQuantification2SimulationIDsIsotopomer_I[simulation_id]['time_point'],
'rxn_id':d['rxn_id'],
'flux_average':d['flux'],
'flux_stdev':d['flux_stdev'],
'flux_lb':d['flux_lb'],
'flux_ub':d['flux_ub'],
'flux_units':d['flux_units'],
'used_':d['used_'],
'comment_':d['comment_']}
data_O.append(tmp);
# add data to the database
self.add_dataStage03QuantificationMeasuredFluxes(data_O);
def execute_addMeasuredFluxes(self,experiment_id_I, ko_list={}, flux_dict={}, model_ids_I=[], sample_name_abbreviations_I=[],time_points_I=[]):
'''Add flux data for physiological simulation'''
#Input:
#flux_dict = {};
#flux_dict['iJO1366'] = {};
#flux_dict['iJO1366'] = {};
#flux_dict['iJO1366']['sna'] = {};
#flux_dict['iJO1366']['sna']['tp'] = {};
#flux_dict['iJO1366']['sna']['tp']['Ec_biomass_iJO1366_WT_53p95M'] = {'ave':None,'stdev':None,'units':'mmol*gDCW-1*hr-1','lb':0.704*0.9,'ub':0.704*1.1};
#flux_dict['iJO1366']['sna']['tp']['EX_ac_LPAREN_e_RPAREN_'] = {'ave':None,'stdev':None,'units':'mmol*gDCW-1*hr-1','lb':2.13*0.9,'ub':2.13*1.1};
#flux_dict['iJO1366']['sna']['tp']['EX_o2_LPAREN_e_RPAREN__reverse'] = {'ave':None,'units':'mmol*gDCW-1*hr-1','stdev':None,'lb':0,'ub':16};
#flux_dict['iJO1366']['sna']['tp']['EX_glc_LPAREN_e_RPAREN_'] = {'ave':None,'stdev':None,'units':'mmol*gDCW-1*hr-1','lb':-7.4*1.1,'ub':-7.4*0.9};
data_O = [];
# get the model ids:
if model_ids_I:
model_ids = model_ids_I;
else:
model_ids = [];
model_ids = self.get_modelID_experimentID_dataStage03QuantificationSimulation(experiment_id_I);
for model_id in model_ids:
# get sample names and sample name abbreviations
if sample_name_abbreviations_I:
sample_name_abbreviations = sample_name_abbreviations_I;
else:
sample_name_abbreviations = [];
sample_name_abbreviations = self.get_sampleNameAbbreviations_experimentIDAndModelID_dataStage03QuantificationSimulation(experiment_id_I,model_id);
for sna_cnt,sna in enumerate(sample_name_abbreviations):
print('Adding experimental fluxes for sample name abbreviation ' + sna);
if time_points_I:
time_points = time_points_I;
else:
time_points = [];
time_points = self.get_timePoints_experimentIDAndModelIDAndSampleNameAbbreviation_dataStage03QuantificationSimulation(experiment_id_I,model_id,sna)
for tp in time_points:
if flux_dict:
for k,v in flux_dict[model_id][sna][tp].items():
# record the data
data_tmp = {'experiment_id':experiment_id_I,
'model_id':model_id,
'sample_name_abbreviation':sna,
'time_point':tp,
'rxn_id':k,
'flux_average':v['ave'],
'flux_stdev':v['stdev'],
'flux_lb':v['lb'],
'flux_ub':v['ub'],
'flux_units':v['units'],
'used_':True,
'comment_':None}
data_O.append(data_tmp);
##add data to the database
#row = [];
#row = data_stage03_quantification_measuredFluxes(
# experiment_id_I,
# model_id,
# sna,
# tp,
# k,
# v['ave'],
# v['stdev'],
# v['lb'],
# v['ub'],
# v['units'],
# True,
# None);
#self.session.add(row);
if ko_list:
for k in ko_list[model_id][sna][tp]:
# record the data
data_tmp = {'experiment_id':experiment_id_I,
'model_id':model_id,
'sample_name_abbreviation':sna,
'time_point':tp,
'rxn_id':k,
'flux_average':0.0,
'flux_stdev':0.0,
'flux_lb':0.0,
'flux_ub':0.0,
'flux_units':'mmol*gDCW-1*hr-1',
'used_':True,
'comment_':None}
data_O.append(data_tmp);
##add data to the database
#row = [];
#row = data_stage03_quantification_measuredFluxes(
# experiment_id_I,
# model_id,
# sna,
# tp,
# k,
# 0.0,
# 0.0,
# 0.0,
# 0.0,
# 'mmol*gDCW-1*hr-1',
# True,
# None);
#self.session.add(row);
#add data to the database:
self.add_dataStage03QuantificationMeasuredFluxes(data_O);
#self.session.commit();
def execute_makeMeasuredFluxes(self,experiment_id_I, metID2RxnID_I = {}, sample_name_abbreviations_I = [], met_ids_I = [],
correct_EX_glc_LPAREN_e_RPAREN_I = True):
'''Collect and flux data from data_stage01_physiology_ratesAverages for physiological simulation
INPUT:
metID2RxnID_I = e.g. {'glc-D':{'model_id':'140407_iDM2014','rxn_id':'EX_glc_LPAREN_e_RPAREN_'},
'ac':{'model_id':'140407_iDM2014','rxn_id':'EX_ac_LPAREN_e_RPAREN_'},
'succ':{'model_id':'140407_iDM2014','rxn_id':'EX_succ_LPAREN_e_RPAREN_'},
'lac-L':{'model_id':'140407_iDM2014','rxn_id':'EX_lac_DASH_L_LPAREN_e_RPAREN_'},
'biomass':{'model_id':'140407_iDM2014','rxn_id':'Ec_biomass_iJO1366_WT_53p95M'}};
correct_EX_glc_LPAREN_e_RPAREN_I = boolean, if True, the direction of glucose input will be reversed
'''
data_O = [];
# get sample names and sample name abbreviations
if sample_name_abbreviations_I:
sample_name_abbreviations = sample_name_abbreviations_I;
else:
sample_name_abbreviations = [];
sample_name_abbreviations = self.get_sampleNameAbbreviations_experimentID_dataStage03QuantificationSimulation(experiment_id_I);
for sna in sample_name_abbreviations:
print('Collecting experimental fluxes for sample name abbreviation ' + sna);
# get met_ids
if not met_ids_I:
met_ids = [];
met_ids = self.get_metID_experimentIDAndSampleNameAbbreviation_dataStage01PhysiologyRatesAverages(experiment_id_I,sna);
else:
met_ids = met_ids_I;
if not(met_ids): continue #no component information was found
for met in met_ids:
print('Collecting experimental fluxes for metabolite ' + met);
# check for the conversion:
if not met in metID2RxnID_I.keys(): continue;
# get rateData
slope_average, intercept_average, rate_average, rate_lb, rate_ub, rate_units, rate_var = None,None,None,None,None,None,None;
slope_average, intercept_average, rate_average, rate_lb, rate_ub, rate_units, rate_var = self.get_rateData_experimentIDAndSampleNameAbbreviationAndMetID_dataStage01PhysiologyRatesAverages(experiment_id_I,sna,met);
rate_stdev = sqrt(rate_var);
model_id = metID2RxnID_I[met]['model_id'];
rxn_id = metID2RxnID_I[met]['rxn_id'];
# correct for glucose uptake
if rxn_id == 'EX_glc_LPAREN_e_RPAREN_' and correct_EX_glc_LPAREN_e_RPAREN_I:
rate_lb_tmp,rate_ub_tmp = rate_lb,rate_ub;
rate_lb = min([abs(x) for x in [rate_lb_tmp,rate_ub_tmp]]);
rate_ub = max([abs(x) for x in [rate_lb_tmp,rate_ub_tmp]]);
rate_average = abs(rate_average);
# record the data
data_tmp = {'experiment_id':experiment_id_I,
'model_id':model_id,
'sample_name_abbreviation':sna,
'rxn_id':rxn_id,
'flux_average':rate_average,
'flux_stdev':rate_stdev,
'flux_lb':rate_lb,
'flux_ub':rate_ub,
'flux_units':rate_units,
'used_':True,
'comment_':None}
data_O.append(data_tmp);
##add data to the database
#row = [];
#row = data_stage03_quantification_measuredFluxes(
# experiment_id_I,
# model_id,
# sna,
# rxn_id,
# rate_average,
# rate_stdev,
# rate_lb,
# rate_ub,
# rate_units,
# True,
# None);
#self.session.add(row);
#add data to the database:
self.add_dataStage03QuantificationMeasuredFluxes(data_O);
#self.session.commit();
def execute_testMeasuredFluxes(self,experiment_id_I, models_I, ko_list_I={}, flux_dict_I={}, model_ids_I=[], sample_name_abbreviations_I=[],time_points_I=[],
adjustment_1_I=True,adjustment_2_I=True,diagnose_I=False,
update_measuredFluxes_I=False):
'''Test each model constrained to the measure fluxes'''
cobradependencies = models_COBRA_dependencies();
diagnose_variables_O = {};
flux_dict_O = [];
test_O = [];
# get the model ids:
if model_ids_I:
model_ids = model_ids_I;
else:
model_ids = [];
model_ids = self.get_modelID_experimentID_dataStage03QuantificationSimulation(experiment_id_I);
for model_id in model_ids:
diagnose_variables_O[model_id] = {};
cobra_model_base = models_I[model_id];
print('testing model ' + model_id);
# get sample names and sample name abbreviations
if sample_name_abbreviations_I:
sample_name_abbreviations = sample_name_abbreviations_I;
else:
sample_name_abbreviations = [];
sample_name_abbreviations = self.get_sampleNameAbbreviations_experimentIDAndModelID_dataStage03QuantificationSimulation(experiment_id_I,model_id);
for sna_cnt,sna in enumerate(sample_name_abbreviations):
diagnose_variables_O[model_id][sna] = {};
print('testing sample_name_abbreviation ' + sna);
# get the time_points
if time_points_I:
time_points = time_points_I;
else:
time_points = [];
time_points = self.get_timePoints_experimentIDAndModelIDAndSampleNameAbbreviation_dataStage03QuantificationSimulation(experiment_id_I,model_id,sna)
for tp in time_points:
diagnose_variables_O[model_id][sna][tp] = {'bad_lbub_1':None,'bad_lbub_2':None};
print('testing time_point ' + tp);
# get the flux data
if flux_dict_I:
flux_dict = flux_dict_I
else:
flux_dict = {};
flux_dict = self.get_fluxDict_experimentIDAndModelIDAndSampleNameAbbreviationsAndTimePoint_dataStage03QuantificationMeasuredFluxes(experiment_id_I,model_id,sna,tp);
# get the ko list
if ko_list_I:
ko_list = ko_list_I;
else:
ko_list = [];
# copy the cobra_model
cobra_model = cobra_model_base.copy();
# check each flux bounds
if diagnose_I:
# record the variables
summary_O = cobradependencies.diagnose_modelLBAndUB(cobra_model,ko_list,flux_dict,
adjustment_1_I=adjustment_1_I,adjustment_2_I=adjustment_2_I)
diagnose_variables_O[model_id][sna][tp]=summary_O;
diagnose_variables_O[model_id][sna][tp]['flux_dict']=flux_dict;
for rxn_id,d in list(flux_dict.items()):
#if rxn_id in summary_O['bad_lbub_1'] or rxn_id in summary_O['bad_lbub_2']:
# comment_ = 'adjusted';
#else:
# comment_ = None;
tmp = {'experiment_id':experiment_id_I,
'model_id':model_id,
'sample_name_abbreviation':sna,
'time_point':tp,
'rxn_id':rxn_id,
'flux_average':d['flux'],
'flux_stdev':d['stdev'],
'flux_lb':d['lb'],
'flux_ub':d['ub'],
'flux_units':d['units'],
'used_':d['used_'],
'comment_':d['comment_']}
flux_dict_O.append(tmp);
else:
# test and constrain each model
test = False;
test = cobradependencies.test_model(cobra_model_I=cobra_model,ko_list=ko_list,flux_dict=flux_dict,description=None);
test_O.append(test);
if diagnose_I and update_measuredFluxes_I:
#update measuredFluxes
self.update_unique_dataStage03QuantificationMeasuredFluxes(flux_dict_O);
return diagnose_variables_O;
elif diagnose_I:
return diagnose_variables_O;
else:
return test_O;
|
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
# this table is modified base on win1251BulgarianCharToOrderMap, so
# only number <64 is sure valid
Latin5_BulgarianCharToOrderMap = ( \
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, # 80
210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225, # 90
81,226,227,228,229,230,105,231,232,233,234,235,236, 45,237,238, # a0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # b0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,239, 67,240, 60, 56, # c0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # d0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,241, 42, 16, # e0
62,242,243,244, 58,245, 98,246,247,248,249,250,251, 91,252,253, # f0
)
win1251BulgarianCharToOrderMap = ( \
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
206,207,208,209,210,211,212,213,120,214,215,216,217,218,219,220, # 80
221, 78, 64, 83,121, 98,117,105,222,223,224,225,226,227,228,229, # 90
88,230,231,232,233,122, 89,106,234,235,236,237,238, 45,239,240, # a0
73, 80,118,114,241,242,243,244,245, 62, 58,246,247,248,249,250, # b0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # c0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,251, 67,252, 60, 56, # d0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # e0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,253, 42, 16, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 96.9392%
# first 1024 sequences:3.0618%
# rest sequences: 0.2992%
# negative sequences: 0.0020%
BulgarianLangModel = ( \
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,2,2,1,2,2,
3,1,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,0,1,
0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,3,3,0,3,1,0,
0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,2,2,1,3,3,3,3,2,2,2,1,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,2,2,3,3,1,1,2,3,3,2,3,3,3,3,2,1,2,0,2,0,3,0,0,
0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,1,3,3,3,3,3,2,3,2,3,3,3,3,3,2,3,3,1,3,0,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,1,3,3,2,3,3,3,1,3,3,2,3,2,2,2,0,0,2,0,2,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,3,3,1,2,2,3,2,1,1,2,0,2,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,3,1,2,3,2,2,2,3,3,3,3,3,2,2,3,1,2,0,2,1,2,0,0,
0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,3,3,3,3,2,3,3,3,2,3,3,2,3,2,2,2,3,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,2,2,1,3,1,3,2,2,3,0,0,1,0,1,0,1,0,0,
0,0,0,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,2,3,2,2,3,1,2,1,1,1,2,3,1,3,1,2,2,0,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,2,2,3,3,1,2,3,1,1,3,3,3,3,1,2,2,1,1,1,0,2,0,2,0,1,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,2,2,3,3,3,2,2,1,1,2,0,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,0,1,2,1,3,3,2,3,3,3,3,3,2,3,2,1,0,3,1,2,1,2,1,2,3,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,2,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,1,3,3,2,3,3,2,2,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,3,3,3,3,3,2,1,1,2,1,3,3,0,3,1,1,1,1,3,2,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,1,1,3,1,3,3,2,3,2,2,2,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,2,3,2,1,1,1,1,1,3,1,3,1,1,0,0,0,1,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,2,0,3,2,0,3,0,2,0,0,2,1,3,1,0,0,1,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,1,1,1,2,1,1,2,1,1,1,2,2,1,2,1,1,1,0,1,1,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,3,1,1,2,1,3,2,1,1,0,1,2,3,2,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,2,2,1,0,1,0,0,1,0,0,0,2,1,0,3,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,2,3,2,3,3,1,3,2,1,1,1,2,1,1,2,1,3,0,1,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,2,3,2,2,2,3,1,2,2,1,1,2,1,1,2,2,0,1,1,0,1,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,1,0,2,2,1,3,2,1,0,0,2,0,2,0,1,0,0,0,0,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,1,2,0,2,3,1,2,3,2,0,1,3,1,2,1,1,1,0,0,1,0,0,2,2,2,3,
2,2,2,2,1,2,1,1,2,2,1,1,2,0,1,1,1,0,0,1,1,0,0,1,1,0,0,0,1,1,0,1,
3,3,3,3,3,2,1,2,2,1,2,0,2,0,1,0,1,2,1,2,1,1,0,0,0,1,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,3,3,1,1,3,1,0,3,2,1,0,0,0,1,2,0,2,0,1,0,0,0,1,0,1,2,1,2,2,
1,1,1,1,1,1,1,2,2,2,1,1,1,1,1,1,1,0,1,2,1,1,1,0,0,0,0,0,1,1,0,0,
3,1,0,1,0,2,3,2,2,2,3,2,2,2,2,2,1,0,2,1,2,1,1,1,0,1,2,1,2,2,2,1,
1,1,2,2,2,2,1,2,1,1,0,1,2,1,2,2,2,1,1,1,0,1,1,1,1,2,0,1,0,0,0,0,
2,3,2,3,3,0,0,2,1,0,2,1,0,0,0,0,2,3,0,2,0,0,0,0,0,1,0,0,2,0,1,2,
2,1,2,1,2,2,1,1,1,2,1,1,1,0,1,2,2,1,1,1,1,1,0,1,1,1,0,0,1,2,0,0,
3,3,2,2,3,0,2,3,1,1,2,0,0,0,1,0,0,2,0,2,0,0,0,1,0,1,0,1,2,0,2,2,
1,1,1,1,2,1,0,1,2,2,2,1,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,1,0,0,
2,3,2,3,3,0,0,3,0,1,1,0,1,0,0,0,2,2,1,2,0,0,0,0,0,0,0,0,2,0,1,2,
2,2,1,1,1,1,1,2,2,2,1,0,2,0,1,0,1,0,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
3,3,3,3,2,2,2,2,2,0,2,1,1,1,1,2,1,2,1,1,0,2,0,1,0,1,0,0,2,0,1,2,
1,1,1,1,1,1,1,2,2,1,1,0,2,0,1,0,2,0,0,1,1,1,0,0,2,0,0,0,1,1,0,0,
2,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,0,0,0,1,2,0,1,2,
2,2,2,1,1,2,1,1,2,2,2,1,2,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,1,1,0,0,
2,3,3,3,3,0,2,2,0,2,1,0,0,0,1,1,1,2,0,2,0,0,0,3,0,0,0,0,2,0,2,2,
1,1,1,2,1,2,1,1,2,2,2,1,2,0,1,1,1,0,1,1,1,1,0,2,1,0,0,0,1,1,0,0,
2,3,3,3,3,0,2,1,0,0,2,0,0,0,0,0,1,2,0,2,0,0,0,0,0,0,0,0,2,0,1,2,
1,1,1,2,1,1,1,1,2,2,2,0,1,0,1,1,1,0,0,1,1,1,0,0,1,0,0,0,0,1,0,0,
3,3,2,2,3,0,1,0,1,0,0,0,0,0,0,0,1,1,0,3,0,0,0,0,0,0,0,0,1,0,2,2,
1,1,1,1,1,2,1,1,2,2,1,2,2,1,0,1,1,1,1,1,0,1,0,0,1,0,0,0,1,1,0,0,
3,1,0,1,0,2,2,2,2,3,2,1,1,1,2,3,0,0,1,0,2,1,1,0,1,1,1,1,2,1,1,1,
1,2,2,1,2,1,2,2,1,1,0,1,2,1,2,2,1,1,1,0,0,1,1,1,2,1,0,1,0,0,0,0,
2,1,0,1,0,3,1,2,2,2,2,1,2,2,1,1,1,0,2,1,2,2,1,1,2,1,1,0,2,1,1,1,
1,2,2,2,2,2,2,2,1,2,0,1,1,0,2,1,1,1,1,1,0,0,1,1,1,1,0,1,0,0,0,0,
2,1,1,1,1,2,2,2,2,1,2,2,2,1,2,2,1,1,2,1,2,3,2,2,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,1,2,0,1,2,1,1,0,1,0,1,2,1,2,0,0,0,1,1,0,0,0,1,0,0,2,
1,1,0,0,1,1,0,1,1,1,1,0,2,0,1,1,1,0,0,1,1,0,0,0,0,1,0,0,0,1,0,0,
2,0,0,0,0,1,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,2,1,1,1,
1,2,2,2,2,1,1,2,1,2,1,1,1,0,2,1,2,1,1,1,0,2,1,1,1,1,0,1,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,0,1,0,1,1,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,0,0,0,1,0,0,0,0,0,0,1,1,0,2,0,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,1,1,0,0,2,2,2,2,2,0,1,1,0,1,1,1,1,1,0,0,1,0,0,0,1,1,0,1,
2,3,1,2,1,0,1,1,0,2,2,2,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,2,1,1,1,1,1,1,1,1,0,1,1,0,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
2,2,2,2,2,0,0,2,0,0,2,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,0,2,2,
1,1,1,1,1,0,0,1,2,1,1,0,1,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,2,0,1,1,0,0,0,1,0,0,2,0,2,0,0,0,0,0,0,0,0,0,0,1,1,
0,0,0,1,1,1,1,1,1,1,1,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,3,2,0,0,1,0,0,1,0,0,0,0,0,0,1,0,2,0,0,0,1,0,0,0,0,0,0,0,2,
1,1,0,0,1,0,0,0,1,1,0,0,1,0,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,2,2,1,2,1,2,2,1,1,2,1,1,1,0,1,1,1,1,2,0,1,0,1,1,1,1,0,1,1,
1,1,2,1,1,1,1,1,1,0,0,1,2,1,1,1,1,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,
1,0,0,1,3,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,1,0,0,1,0,2,0,0,0,0,0,1,1,1,0,1,0,0,0,0,0,0,0,0,2,0,0,1,
0,2,0,1,0,0,1,1,2,0,1,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,1,1,0,2,1,0,1,1,1,0,0,1,0,2,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,0,0,1,0,0,0,1,1,0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,0,0,1,0,0,0,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,1,0,1,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,1,2,1,1,1,1,1,1,2,2,1,0,0,1,0,1,0,0,0,0,1,1,1,1,0,0,0,
1,1,2,1,1,1,1,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,1,2,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
0,1,1,0,1,1,1,0,0,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,1,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,1,0,2,0,0,2,0,1,0,0,1,0,0,1,
1,1,0,0,1,1,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,1,1,1,1,1,2,0,0,0,0,0,0,2,1,0,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
)
Latin5BulgarianModel = { \
'charToOrderMap': Latin5_BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
Win1251BulgarianModel = { \
'charToOrderMap': win1251BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
|
|
from hubcheck.pageobjects.basepagewidget import BasePageWidget
from hubcheck.pageobjects.basepageelement import Link
from hubcheck.pageobjects.basepageelement import TextReadOnly
class GroupsMenu1(BasePageWidget):
def __init__(self, owner, locatordict={}):
super(GroupsMenu1,self).__init__(owner,locatordict)
# load hub's classes
GroupsMenu_Locators = self.load_class('GroupsMenu_Locators')
# update this object's locator
self.locators.update(GroupsMenu_Locators.locators)
# update the locators with those from the owner
self.update_locators_from_owner()
# setup page object's components
self.overview = Link(self,{'base':'overview'})
self.members = Link(self,{'base':'members'})
self.nmembers = TextReadOnly(self,{'base':'nmembers'})
self.wiki = Link(self,{'base':'wiki'})
self.nwikis = TextReadOnly(self,{'base':'nwikis'})
self.resources = Link(self,{'base':'resources'})
self.nresources = TextReadOnly(self,{'base':'nresources'})
self.messages = Link(self,{'base':'messages'})
self.discussion = Link(self,{'base':'discussion'})
self.ndiscussions = TextReadOnly(self,{'base':'ndiscussions'})
self.blog = Link(self,{'base':'blog'})
self.wishlist = Link(self,{'base':'wishlist'})
self.calendar = Link(self,{'base':'calendar'})
self._menu_items = ['overview','members','wiki',
'resources','messages','discussion',
'blog','wishlist','calendar']
# update the component's locators with this objects overrides
self._updateLocators()
def get_menu_items(self):
"""return the menu link names"""
return self._menu_items
def goto_menu_item(self,menuitem):
"""click on a menu item"""
if not menuitem in self._menu_items:
raise ValueError("invalid menu item: '%s'" % menuitem)
w = getattr(self,menuitem)
w.click()
def is_menu_item_protected(self,menuitem):
"""check to see if the menu item is accessible by the user"""
if not menuitem in self._menu_items:
raise ValueError("invalid menu item: '%s'" % menuitem)
w = getattr(self,menuitem)
return 'protected' in w.get_attribute('class')
class GroupsMenu1_Locators_Base(object):
"""locators for GroupsMenu object"""
locators = {
'base' : "css=#page_menu",
'overview' : "css=.group-overview-tab",
'members' : "css=.group-members-tab",
'nmembers' : "css=.group-members-tab .count",
'wiki' : "css=.group-wiki-tab",
'nwikis' : "css=.group-wiki-tab .count",
'resources' : "css=.group-resources-tab",
'nresources' : "css=.group-resources-tab .count",
'messages' : "css=.group-messages-tab",
'discussion' : "css=.group-forum-tab",
'ndiscussions' : "css=.group-forum-tab .count",
'blog' : "css=.group-blog-tab",
'wishlist' : "css=.group-wishlist-tab",
'calendar' : "css=.group-calendar-tab",
}
class GroupsMenu2(BasePageWidget):
"""
Groups Menu for nees.org
Adds datasharing and announcements links
"""
def __init__(self, owner, locatordict={}):
super(GroupsMenu2,self).__init__(owner,locatordict)
# load hub's classes
GroupsMenu_Locators = self.load_class('GroupsMenu_Locators')
# update this object's locator
self.locators.update(GroupsMenu_Locators.locators)
# update the locators with those from the owner
self.update_locators_from_owner()
# setup page object's components
self.overview = Link(self,{'base':'overview'})
self.members = Link(self,{'base':'members'})
self.nmembers = TextReadOnly(self,{'base':'nmembers'})
self.wiki = Link(self,{'base':'wiki'})
self.nwikis = TextReadOnly(self,{'base':'nwikis'})
self.resources = Link(self,{'base':'resources'})
self.nresources = TextReadOnly(self,{'base':'nresources'})
self.messages = Link(self,{'base':'messages'})
self.discussion = Link(self,{'base':'discussion'})
self.ndiscussions = TextReadOnly(self,{'base':'ndiscussions'})
self.blog = Link(self,{'base':'blog'})
self.wishlist = Link(self,{'base':'wishlist'})
self.datasharing = Link(self,{'base':'datasharing'})
self.calendar = Link(self,{'base':'calendar'})
self.announcements = Link(self,{'base':'announcements'})
self._menu_items = ['overview','members','wiki',
'resources','messages','discussion',
'blog','wishlist','calendar',
'datasharing','announcements']
# update the component's locators with this objects overrides
self._updateLocators()
def get_menu_items(self):
"""return the menu link names"""
return self._menu_items
def goto_menu_item(self,menuitem):
"""click on a menu item"""
if not menuitem in self._menu_items:
raise ValueError("invalid menu item: '%s'" % menuitem)
w = getattr(self,menuitem)
w.click()
def is_menu_item_protected(self,menuitem):
"""check to see if the menu item is accessible by the user"""
if not menuitem in self._menu_items:
raise ValueError("invalid menu item: '%s'" % menuitem)
w = getattr(self,menuitem)
return 'protected' in w.get_attribute('class')
class GroupsMenu2_Locators_Base(object):
"""locators for GroupsMenu object"""
locators = {
'base' : "css=#page_menu",
'overview' : "css=.group-overview-tab",
'members' : "css=.group-members-tab",
'nmembers' : "css=.group-members-tab .count",
'wiki' : "css=.group-wiki-tab",
'nwikis' : "css=.group-wiki-tab .count",
'resources' : "css=.group-resources-tab",
'nresources' : "css=.group-resources-tab .count",
'messages' : "css=.group-messages-tab",
'discussion' : "css=.group-forum-tab",
'ndiscussions' : "css=.group-forum-tab .count",
'blog' : "css=.group-blog-tab",
'wishlist' : "css=.group-wishlist-tab",
'datasharing' : "css=.group-datasharing-tab",
'calendar' : "css=.group-calendar-tab",
'announcements' : "css=.group-announcements-tab",
}
class GroupsMenu3(BasePageWidget):
"""
Groups Menu for hub version 1.1.5
Adds projects, announcements, collections
"""
def __init__(self, owner, locatordict={}):
super(GroupsMenu3,self).__init__(owner,locatordict)
# load hub's classes
GroupsMenu_Locators = self.load_class('GroupsMenu_Locators')
# update this object's locator
self.locators.update(GroupsMenu_Locators.locators)
# update the locators with those from the owner
self.update_locators_from_owner()
# setup page object's components
self.overview = Link(self,{'base':'overview'})
self.members = Link(self,{'base':'members'})
self.nmembers = TextReadOnly(self,{'base':'nmembers'})
self.wiki = Link(self,{'base':'wiki'})
self.nwikis = TextReadOnly(self,{'base':'nwikis'})
self.resources = Link(self,{'base':'resources'})
self.nresources = TextReadOnly(self,{'base':'nresources'})
self.discussion = Link(self,{'base':'discussion'})
self.ndiscussions = TextReadOnly(self,{'base':'ndiscussions'})
self.blog = Link(self,{'base':'blog'})
self.nblogs = TextReadOnly(self,{'base':'nblogs'})
self.wishlist = Link(self,{'base':'wishlist'})
self.usage = Link(self,{'base':'usage'})
self.projects = Link(self,{'base':'projects'})
self.nprojects = TextReadOnly(self,{'base':'nprojects'})
self.calendar = Link(self,{'base':'calendar'})
self.ncalendars = TextReadOnly(self,{'base':'ncalendars'})
self.announcements = Link(self,{'base':'announcements'})
self.collections = Link(self,{'base':'collections'})
self._menu_items = ['overview','members',
'wiki','resources','discussion',
'blog','wishlist','usage','projects',
'calendar','announcements','collections']
# update the component's locators with this objects overrides
self._updateLocators()
def get_menu_items(self):
"""return the menu link names"""
return self._menu_items
def goto_menu_item(self,menuitem):
"""click on a menu item"""
if not menuitem in self._menu_items:
raise ValueError("invalid menu item: '%s'" % menuitem)
w = getattr(self,menuitem)
w.click()
def is_menu_item_protected(self,menuitem):
"""check to see if the menu item is accessible by the user"""
if not menuitem in self._menu_items:
raise ValueError("invalid menu item: '%s'" % menuitem)
w = getattr(self,menuitem)
return 'protected' in w.get_attribute('class')
class GroupsMenu3_Locators_Base(object):
"""locators for GroupsMenu object"""
locators = {
'base' : "css=#page_menu",
'overview' : "css=.group-overview-tab",
'members' : "css=.group-members-tab",
'nmembers' : "css=.group-members-tab .count",
'wiki' : "css=.group-wiki-tab",
'nwikis' : "css=.group-wiki-tab .count",
'resources' : "css=.group-resources-tab",
'nresources' : "css=.group-resources-tab .count",
'discussion' : "css=.group-forum-tab",
'ndiscussions' : "css=.group-forum-tab .count",
'blog' : "css=.group-blog-tab",
'nblogs' : "css=.group-blog-tab .count",
'wishlist' : "css=.group-wishlist-tab",
'usage' : "css=.group-usage-tab",
'projects' : "css=.group-projects-tab",
'nprojects' : "css=.group-projects-tab .count",
'calendar' : "css=.group-calendar-tab",
'ncalendars' : "css=.group-calendar-tab .count",
'announcements' : "css=.group-announcements-tab",
'collections' : "css=.group-collections-tab",
}
|
|
# -*- coding: utf-8 -*-
# Copyright (C) 2017 Borewit
# Copyright (C) 2019-2020 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Microsoft WAVE/RIFF audio file/stream information and tags."""
import sys
import struct
from mutagen import StreamInfo, FileType
from mutagen.id3 import ID3
from mutagen._riff import RiffFile, InvalidChunk
from mutagen._iff import error as IffError
from mutagen.id3._util import ID3NoHeaderError, error as ID3Error
from mutagen._util import (
convert_error,
endswith,
loadfile,
reraise,
)
__all__ = ["WAVE", "Open", "delete"]
class error(IffError):
"""WAVE stream parsing errors."""
class _WaveFile(RiffFile):
"""Representation of a RIFF/WAVE file"""
def __init__(self, fileobj):
RiffFile.__init__(self, fileobj)
if self.file_type != u'WAVE':
raise error("Expected RIFF/WAVE.")
# Normalize ID3v2-tag-chunk to lowercase
if u'ID3' in self:
self[u'ID3'].id = u'id3'
class WaveStreamInfo(StreamInfo):
"""WaveStreamInfo()
Microsoft WAVE file information.
Information is parsed from the 'fmt' & 'data'chunk of the RIFF/WAVE file
Attributes:
length (`float`): audio length, in seconds
bitrate (`int`): audio bitrate, in bits per second
channels (`int`): The number of audio channels
sample_rate (`int`): audio sample rate, in Hz
bits_per_sample (`int`): The audio sample size
"""
length = 0.0
bitrate = 0
channels = 0
sample_rate = 0
bits_per_sample = 0
SIZE = 16
@convert_error(IOError, error)
def __init__(self, fileobj):
"""Raises error"""
wave_file = _WaveFile(fileobj)
try:
format_chunk = wave_file[u'fmt']
except KeyError as e:
raise error(str(e))
data = format_chunk.read()
if len(data) < 16:
raise InvalidChunk()
# RIFF: http://soundfile.sapp.org/doc/WaveFormat/
# Python struct.unpack:
# https://docs.python.org/2/library/struct.html#byte-order-size-and-alignment
info = struct.unpack('<hhLLhh', data[:self.SIZE])
self.audio_format, self.channels, self.sample_rate, byte_rate, \
block_align, self.bits_per_sample = info
self.bitrate = self.channels * block_align * self.sample_rate
# Calculate duration
self._number_of_samples = 0
if block_align > 0:
try:
data_chunk = wave_file[u'data']
self._number_of_samples = data_chunk.data_size / block_align
except KeyError:
pass
if self.sample_rate > 0:
self.length = self._number_of_samples / self.sample_rate
def pprint(self):
return u"%d channel RIFF @ %d bps, %s Hz, %.2f seconds" % (
self.channels, self.bitrate, self.sample_rate, self.length)
class _WaveID3(ID3):
"""A Wave file with ID3v2 tags"""
def _pre_load_header(self, fileobj):
try:
fileobj.seek(_WaveFile(fileobj)[u'id3'].data_offset)
except (InvalidChunk, KeyError):
raise ID3NoHeaderError("No ID3 chunk")
@convert_error(IOError, error)
@loadfile(writable=True)
def save(self, filething, v1=1, v2_version=4, v23_sep='/', padding=None):
"""Save ID3v2 data to the Wave/RIFF file"""
fileobj = filething.fileobj
wave_file = _WaveFile(fileobj)
if u'id3' not in wave_file:
wave_file.insert_chunk(u'id3')
chunk = wave_file[u'id3']
try:
data = self._prepare_data(
fileobj, chunk.data_offset, chunk.data_size, v2_version,
v23_sep, padding)
except ID3Error as e:
reraise(error, e, sys.exc_info()[2])
chunk.resize(len(data))
chunk.write(data)
def delete(self, filething):
"""Completely removes the ID3 chunk from the RIFF/WAVE file"""
delete(filething)
self.clear()
@convert_error(IOError, error)
@loadfile(method=False, writable=True)
def delete(filething):
"""Completely removes the ID3 chunk from the RIFF/WAVE file"""
try:
_WaveFile(filething.fileobj).delete_chunk(u'id3')
except KeyError:
pass
class WAVE(FileType):
"""WAVE(filething)
A Waveform Audio File Format
(WAVE, or more commonly known as WAV due to its filename extension)
Arguments:
filething (filething)
Attributes:
tags (`mutagen.id3.ID3`)
info (`WaveStreamInfo`)
"""
_mimes = ["audio/wav", "audio/wave"]
@staticmethod
def score(filename, fileobj, header):
filename = filename.lower()
return (header.startswith(b"RIFF") + (header[8:12] == b'WAVE')
+ endswith(filename, b".wav") + endswith(filename, b".wave"))
def add_tags(self):
"""Add an empty ID3 tag to the file."""
if self.tags is None:
self.tags = _WaveID3()
else:
raise error("an ID3 tag already exists")
@convert_error(IOError, error)
@loadfile()
def load(self, filething, **kwargs):
"""Load stream and tag information from a file."""
fileobj = filething.fileobj
self.info = WaveStreamInfo(fileobj)
fileobj.seek(0, 0)
try:
self.tags = _WaveID3(fileobj, **kwargs)
except ID3NoHeaderError:
self.tags = None
except ID3Error as e:
raise error(e)
else:
self.tags.filename = self.filename
Open = WAVE
|
|
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.compute import interconnect_pb2
from google3.cloud.graphite.mmv2.services.google.compute import interconnect_pb2_grpc
from typing import List
class Interconnect(object):
def __init__(
self,
description: str = None,
self_link: str = None,
id: int = None,
name: str = None,
location: str = None,
link_type: str = None,
requested_link_count: int = None,
interconnect_type: str = None,
admin_enabled: bool = None,
noc_contact_email: str = None,
customer_name: str = None,
operational_status: str = None,
provisioned_link_count: int = None,
interconnect_attachments: list = None,
peer_ip_address: str = None,
google_ip_address: str = None,
google_reference_id: str = None,
expected_outages: list = None,
circuit_infos: list = None,
state: str = None,
project: str = None,
service_account_file: str = "",
):
channel.initialize()
self.description = description
self.name = name
self.location = location
self.link_type = link_type
self.requested_link_count = requested_link_count
self.interconnect_type = interconnect_type
self.admin_enabled = admin_enabled
self.noc_contact_email = noc_contact_email
self.customer_name = customer_name
self.project = project
self.service_account_file = service_account_file
def apply(self):
stub = interconnect_pb2_grpc.ComputeInterconnectServiceStub(channel.Channel())
request = interconnect_pb2.ApplyComputeInterconnectRequest()
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.location):
request.resource.location = Primitive.to_proto(self.location)
if InterconnectLinkTypeEnum.to_proto(self.link_type):
request.resource.link_type = InterconnectLinkTypeEnum.to_proto(
self.link_type
)
if Primitive.to_proto(self.requested_link_count):
request.resource.requested_link_count = Primitive.to_proto(
self.requested_link_count
)
if InterconnectInterconnectTypeEnum.to_proto(self.interconnect_type):
request.resource.interconnect_type = InterconnectInterconnectTypeEnum.to_proto(
self.interconnect_type
)
if Primitive.to_proto(self.admin_enabled):
request.resource.admin_enabled = Primitive.to_proto(self.admin_enabled)
if Primitive.to_proto(self.noc_contact_email):
request.resource.noc_contact_email = Primitive.to_proto(
self.noc_contact_email
)
if Primitive.to_proto(self.customer_name):
request.resource.customer_name = Primitive.to_proto(self.customer_name)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
request.service_account_file = self.service_account_file
response = stub.ApplyComputeInterconnect(request)
self.description = Primitive.from_proto(response.description)
self.self_link = Primitive.from_proto(response.self_link)
self.id = Primitive.from_proto(response.id)
self.name = Primitive.from_proto(response.name)
self.location = Primitive.from_proto(response.location)
self.link_type = InterconnectLinkTypeEnum.from_proto(response.link_type)
self.requested_link_count = Primitive.from_proto(response.requested_link_count)
self.interconnect_type = InterconnectInterconnectTypeEnum.from_proto(
response.interconnect_type
)
self.admin_enabled = Primitive.from_proto(response.admin_enabled)
self.noc_contact_email = Primitive.from_proto(response.noc_contact_email)
self.customer_name = Primitive.from_proto(response.customer_name)
self.operational_status = InterconnectOperationalStatusEnum.from_proto(
response.operational_status
)
self.provisioned_link_count = Primitive.from_proto(
response.provisioned_link_count
)
self.interconnect_attachments = Primitive.from_proto(
response.interconnect_attachments
)
self.peer_ip_address = Primitive.from_proto(response.peer_ip_address)
self.google_ip_address = Primitive.from_proto(response.google_ip_address)
self.google_reference_id = Primitive.from_proto(response.google_reference_id)
self.expected_outages = InterconnectExpectedOutagesArray.from_proto(
response.expected_outages
)
self.circuit_infos = InterconnectCircuitInfosArray.from_proto(
response.circuit_infos
)
self.state = InterconnectStateEnum.from_proto(response.state)
self.project = Primitive.from_proto(response.project)
def delete(self):
stub = interconnect_pb2_grpc.ComputeInterconnectServiceStub(channel.Channel())
request = interconnect_pb2.DeleteComputeInterconnectRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.location):
request.resource.location = Primitive.to_proto(self.location)
if InterconnectLinkTypeEnum.to_proto(self.link_type):
request.resource.link_type = InterconnectLinkTypeEnum.to_proto(
self.link_type
)
if Primitive.to_proto(self.requested_link_count):
request.resource.requested_link_count = Primitive.to_proto(
self.requested_link_count
)
if InterconnectInterconnectTypeEnum.to_proto(self.interconnect_type):
request.resource.interconnect_type = InterconnectInterconnectTypeEnum.to_proto(
self.interconnect_type
)
if Primitive.to_proto(self.admin_enabled):
request.resource.admin_enabled = Primitive.to_proto(self.admin_enabled)
if Primitive.to_proto(self.noc_contact_email):
request.resource.noc_contact_email = Primitive.to_proto(
self.noc_contact_email
)
if Primitive.to_proto(self.customer_name):
request.resource.customer_name = Primitive.to_proto(self.customer_name)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
response = stub.DeleteComputeInterconnect(request)
@classmethod
def list(self, project, service_account_file=""):
stub = interconnect_pb2_grpc.ComputeInterconnectServiceStub(channel.Channel())
request = interconnect_pb2.ListComputeInterconnectRequest()
request.service_account_file = service_account_file
request.Project = project
return stub.ListComputeInterconnect(request).items
def to_proto(self):
resource = interconnect_pb2.ComputeInterconnect()
if Primitive.to_proto(self.description):
resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.location):
resource.location = Primitive.to_proto(self.location)
if InterconnectLinkTypeEnum.to_proto(self.link_type):
resource.link_type = InterconnectLinkTypeEnum.to_proto(self.link_type)
if Primitive.to_proto(self.requested_link_count):
resource.requested_link_count = Primitive.to_proto(
self.requested_link_count
)
if InterconnectInterconnectTypeEnum.to_proto(self.interconnect_type):
resource.interconnect_type = InterconnectInterconnectTypeEnum.to_proto(
self.interconnect_type
)
if Primitive.to_proto(self.admin_enabled):
resource.admin_enabled = Primitive.to_proto(self.admin_enabled)
if Primitive.to_proto(self.noc_contact_email):
resource.noc_contact_email = Primitive.to_proto(self.noc_contact_email)
if Primitive.to_proto(self.customer_name):
resource.customer_name = Primitive.to_proto(self.customer_name)
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
return resource
class InterconnectExpectedOutages(object):
def __init__(
self,
name: str = None,
description: str = None,
source: str = None,
state: str = None,
issue_type: str = None,
affected_circuits: list = None,
start_time: int = None,
end_time: int = None,
):
self.name = name
self.description = description
self.source = source
self.state = state
self.issue_type = issue_type
self.affected_circuits = affected_circuits
self.start_time = start_time
self.end_time = end_time
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = interconnect_pb2.ComputeInterconnectExpectedOutages()
if Primitive.to_proto(resource.name):
res.name = Primitive.to_proto(resource.name)
if Primitive.to_proto(resource.description):
res.description = Primitive.to_proto(resource.description)
if InterconnectExpectedOutagesSourceEnum.to_proto(resource.source):
res.source = InterconnectExpectedOutagesSourceEnum.to_proto(resource.source)
if InterconnectExpectedOutagesStateEnum.to_proto(resource.state):
res.state = InterconnectExpectedOutagesStateEnum.to_proto(resource.state)
if InterconnectExpectedOutagesIssueTypeEnum.to_proto(resource.issue_type):
res.issue_type = InterconnectExpectedOutagesIssueTypeEnum.to_proto(
resource.issue_type
)
if Primitive.to_proto(resource.affected_circuits):
res.affected_circuits.extend(Primitive.to_proto(resource.affected_circuits))
if Primitive.to_proto(resource.start_time):
res.start_time = Primitive.to_proto(resource.start_time)
if Primitive.to_proto(resource.end_time):
res.end_time = Primitive.to_proto(resource.end_time)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InterconnectExpectedOutages(
name=Primitive.from_proto(resource.name),
description=Primitive.from_proto(resource.description),
source=InterconnectExpectedOutagesSourceEnum.from_proto(resource.source),
state=InterconnectExpectedOutagesStateEnum.from_proto(resource.state),
issue_type=InterconnectExpectedOutagesIssueTypeEnum.from_proto(
resource.issue_type
),
affected_circuits=Primitive.from_proto(resource.affected_circuits),
start_time=Primitive.from_proto(resource.start_time),
end_time=Primitive.from_proto(resource.end_time),
)
class InterconnectExpectedOutagesArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [InterconnectExpectedOutages.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [InterconnectExpectedOutages.from_proto(i) for i in resources]
class InterconnectCircuitInfos(object):
def __init__(
self,
google_circuit_id: str = None,
google_demarc_id: str = None,
customer_demarc_id: str = None,
):
self.google_circuit_id = google_circuit_id
self.google_demarc_id = google_demarc_id
self.customer_demarc_id = customer_demarc_id
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = interconnect_pb2.ComputeInterconnectCircuitInfos()
if Primitive.to_proto(resource.google_circuit_id):
res.google_circuit_id = Primitive.to_proto(resource.google_circuit_id)
if Primitive.to_proto(resource.google_demarc_id):
res.google_demarc_id = Primitive.to_proto(resource.google_demarc_id)
if Primitive.to_proto(resource.customer_demarc_id):
res.customer_demarc_id = Primitive.to_proto(resource.customer_demarc_id)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InterconnectCircuitInfos(
google_circuit_id=Primitive.from_proto(resource.google_circuit_id),
google_demarc_id=Primitive.from_proto(resource.google_demarc_id),
customer_demarc_id=Primitive.from_proto(resource.customer_demarc_id),
)
class InterconnectCircuitInfosArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [InterconnectCircuitInfos.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [InterconnectCircuitInfos.from_proto(i) for i in resources]
class InterconnectLinkTypeEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return interconnect_pb2.ComputeInterconnectLinkTypeEnum.Value(
"ComputeInterconnectLinkTypeEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return interconnect_pb2.ComputeInterconnectLinkTypeEnum.Name(resource)[
len("ComputeInterconnectLinkTypeEnum") :
]
class InterconnectInterconnectTypeEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return interconnect_pb2.ComputeInterconnectInterconnectTypeEnum.Value(
"ComputeInterconnectInterconnectTypeEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return interconnect_pb2.ComputeInterconnectInterconnectTypeEnum.Name(resource)[
len("ComputeInterconnectInterconnectTypeEnum") :
]
class InterconnectOperationalStatusEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return interconnect_pb2.ComputeInterconnectOperationalStatusEnum.Value(
"ComputeInterconnectOperationalStatusEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return interconnect_pb2.ComputeInterconnectOperationalStatusEnum.Name(resource)[
len("ComputeInterconnectOperationalStatusEnum") :
]
class InterconnectExpectedOutagesSourceEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return interconnect_pb2.ComputeInterconnectExpectedOutagesSourceEnum.Value(
"ComputeInterconnectExpectedOutagesSourceEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return interconnect_pb2.ComputeInterconnectExpectedOutagesSourceEnum.Name(
resource
)[len("ComputeInterconnectExpectedOutagesSourceEnum") :]
class InterconnectExpectedOutagesStateEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return interconnect_pb2.ComputeInterconnectExpectedOutagesStateEnum.Value(
"ComputeInterconnectExpectedOutagesStateEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return interconnect_pb2.ComputeInterconnectExpectedOutagesStateEnum.Name(
resource
)[len("ComputeInterconnectExpectedOutagesStateEnum") :]
class InterconnectExpectedOutagesIssueTypeEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return interconnect_pb2.ComputeInterconnectExpectedOutagesIssueTypeEnum.Value(
"ComputeInterconnectExpectedOutagesIssueTypeEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return interconnect_pb2.ComputeInterconnectExpectedOutagesIssueTypeEnum.Name(
resource
)[len("ComputeInterconnectExpectedOutagesIssueTypeEnum") :]
class InterconnectStateEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return interconnect_pb2.ComputeInterconnectStateEnum.Value(
"ComputeInterconnectStateEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return interconnect_pb2.ComputeInterconnectStateEnum.Name(resource)[
len("ComputeInterconnectStateEnum") :
]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
|
|
from collections import OrderedDict
import datetime
import operator
from django.contrib.admin.utils import (lookup_field, lookup_needs_distinct,
label_for_field)
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
from django.core.exceptions import ImproperlyConfigured
from django.db.models import Q, Sum
from django.forms.forms import pretty_name
from django.utils.datastructures import SortedDict
from django.utils.text import capfirst
import floppyforms as forms
from zenaida.templatetags.zenaida import format_money
import six
from brambling.filters import FloppyFilterSet, AttendeeFilterSet, OrderFilterSet
from brambling.models import Attendee, Order
__all__ = ('comma_separated_manager', 'ModelTable',
'AttendeeTable')
TABLE_COLUMN_FIELD = 'columns'
SEARCH_FIELD = 'search'
class Echo(object):
"""
An object that implements just the write method of the file-like
interface.
See https://docs.djangoproject.com/en/dev/howto/outputting-csv/#streaming-csv-files
"""
def write(self, value):
"""Write the value by returning it, instead of storing in a buffer."""
return value
def comma_separated_manager(attr_name):
"""
Returns a function which takes a M2M manager on an object and
returns it as a comma separated string.
"""
def inner(self, obj):
manager = getattr(obj, attr_name)
return ", ".join([unicode(x) for x in manager.all()])
inner.short_description = pretty_name(attr_name)
return inner
class Cell(object):
def __init__(self, field, value):
self.field = field
self.value = value
def __unicode__(self):
"""
Return a comma-separated list if value is an iterable. Otherwise, return
the value as a string.
"""
if hasattr(self.value, '__iter__'):
return u", ".join([unicode(x) for x in self.value])
return unicode(self.value)
def __repr__(self):
return u"{}: {}".format(self.field, self.value)
def is_boolean(self):
return isinstance(self.value, bool)
class Row(object):
def __init__(self, data, obj=None):
self.data = OrderedDict(data)
self.obj = obj
def __getitem__(self, key):
if isinstance(key, int):
return Cell(*self.data.items()[key])
return Cell(key, self.data[key])
def __iter__(self):
for key, value in self.data.items():
yield Cell(key, value)
def __len__(self):
return len(self.data)
class ModelTable(object):
"""
A class that builds a customizable table representation of a model
queryset. This representation is searchable, filterable, and has a
customizable selection of fields. If data is not required, it will
not be queried.
The class takes three optional arguments on instantiation:
1. Queryset
2. Data
3. A form prefix
"""
list_display = ()
default_fields = None
search_fields = ()
fieldsets = None
#: A dictionary mapping field names to the overriding labels
#: to be used for rendering this table.
label_overrides = {}
filterset_class = FloppyFilterSet
model = None
def __init__(self, queryset=None, data=None, form_prefix=None):
# Simple assignment:
self.queryset = queryset
self.data = data
self.form_prefix = form_prefix
self.filterset = self.get_filterset()
# More complex properties:
self.is_bound = data is not None
def __iter__(self):
fields = self.get_fields()
object_list = self.get_queryset(fields)
for obj in object_list:
yield Row(((field, self.get_field_val(obj, field))
for field in fields),
obj=obj)
def __len__(self):
fields = self.get_fields()
object_list = self.get_queryset(fields)
return object_list.count()
def __nonzero__(self):
# Prevents infinite recursion from calling __len__ - label_for_field
# gets called during __len__, and checks the boolean value of the
# table.
return True
def _label(self, field):
"""
Returns a pretty name for the given field. First check is the
label_overrides dict. Remaining checks follow the django admin's
pattern (including, for example, short_description support.)
"""
if field in self.label_overrides:
return self.label_overrides[field]
try:
return label_for_field(field, self.model, self)
except AttributeError:
# Trust that it exists, for now.
return pretty_name(field)
def header_row(self):
return Row((field, self._label(field))
for field in self.get_fields())
def get_filterset_kwargs(self):
return {
'data': self.data,
'prefix': self.form_prefix,
}
def get_filterset(self):
return self.filterset_class(**self.get_filterset_kwargs())
def get_list_display(self):
if self.fieldsets is not None:
list_display = ()
for name, fields in self.fieldsets:
list_display += fields
return list_display
return self.list_display
def get_default_fields(self):
return self.default_fields
def get_fields(self):
"""
Returns a tuple of fields that are included in the table.
"""
valid = self.is_bound and self.column_form.is_valid()
if valid:
cleaned_data = self.column_form.cleaned_data
# Include fields which are marked True in the form:
fields = list(cleaned_data.get(TABLE_COLUMN_FIELD, ()))
# Only return a list of fields if it isn't empty:
if fields:
return fields
return self.column_form.fields[TABLE_COLUMN_FIELD].initial
def get_base_queryset(self):
if self.queryset is None:
return self.model._default_manager.all()
if self.queryset.model is not self.model:
raise ImproperlyConfigured("QuerySet model must be the same as ModelTable model.")
return self.queryset.all()
def _add_data(self, queryset, fields):
"""
Add data to the queryset based on the selected fields.
For now, data required by filters should always be added.
"""
use_distinct = False
return queryset, use_distinct
def _search(self, queryset):
# Originally from django.contrib.admin.options
def construct_search(field_name):
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
elif field_name.startswith('@'):
return "%s__search" % field_name[1:]
else:
return "%s__icontains" % field_name
use_distinct = False
search_fields = self.search_fields
search_term = self.data.get(SEARCH_FIELD, '') if self.data else ''
opts = self.model._meta
if search_fields and search_term:
orm_lookups = [construct_search(str(search_field))
for search_field in search_fields]
for bit in search_term.split():
or_queries = [Q(**{orm_lookup: bit})
for orm_lookup in orm_lookups]
queryset = queryset.filter(reduce(operator.or_, or_queries))
if not use_distinct:
for search_spec in orm_lookups:
if lookup_needs_distinct(opts, search_spec):
use_distinct = True
break
return queryset, use_distinct
def get_queryset(self, fields):
queryset = self.get_base_queryset()
queryset, use_distinct = self._add_data(queryset, fields)
queryset, ud = self._search(queryset)
if use_distinct or ud:
queryset = queryset.distinct()
# HACK to work around filterset qs caching
if hasattr(self.filterset, '_qs'):
del self.filterset._qs
self.filterset.queryset = queryset
return self.filterset.qs
def get_field_val(self, obj, key):
"""
Follows the same rules as ModelAdmin dynamic lookups:
1. Model field
2. Callable
3. Method on table
4. Method on model
5. Other attribute on model
Returns a value which will be passed to the template.
"""
# Compare:
# * django.contrib.admin.utils:display_for_field
# * django.contrib.admin.utils:display_for_value
field, attr, value = lookup_field(key, obj, self)
if field is not None:
if field.flatchoices:
# EMPTY_CHANGELIST_VALUE is "(None)"
return dict(field.flatchoices).get(value, EMPTY_CHANGELIST_VALUE)
return value
@property
def empty_filter_form(self):
if not hasattr(self, '_empty_filter_form'):
filterset = self.filterset
fields = SortedDict([
(name, filter_.field)
for name, filter_ in six.iteritems(filterset.filters)])
fields[filterset.order_by_field] = filterset.ordering_field
Form = type(str('%sForm' % filterset.__class__.__name__),
(filterset._meta.form,), fields)
initial = dict(((name, None) for name in Form.base_fields))
self._empty_filter_form = Form(prefix=filterset.form_prefix, initial=initial)
return self._empty_filter_form
@property
def filter_form(self):
return self.filterset.form
def get_column_form_class(self):
return forms.Form
@property
def column_form(self):
"""
Returns a form of booleans for each field in fields,
bound with self.data if is not None.
"""
if not hasattr(self, '_column_form'):
list_display = self.get_list_display()
default_fields = self.get_default_fields()
choices = [(field, capfirst(self._label(field))) for field in list_display]
if default_fields is None:
initial = list_display
else:
initial = default_fields
field = forms.MultipleChoiceField(
choices=choices,
initial=initial,
widget=forms.CheckboxSelectMultiple,
required=False,
)
# Workaround for https://github.com/gregmuellegger/django-floppyforms/issues/145
field.hidden_widget = forms.MultipleHiddenInput
fields = {
TABLE_COLUMN_FIELD: field,
}
Form = type(str('{}Form'.format(self.__class__.__name__)), (self.get_column_form_class(),), fields)
self._column_form = Form(self.data, prefix=self.form_prefix)
return self._column_form
class CustomDataTable(ModelTable):
def get_list_display(self):
list_display = super(CustomDataTable, self).get_list_display()
return list_display + self.get_custom_fields()
def get_default_fields(self):
if self.default_fields is None:
return None
return self.default_fields + self.get_custom_fields()
def get_custom_fields(self):
if not hasattr(self, 'custom_fields'):
self.custom_fields = self._get_custom_fields()
for field in self.custom_fields:
if field.key not in self.label_overrides:
self.label_overrides[field.key] = field.name
return tuple(field.key for field in self.custom_fields)
def _get_custom_fields(self):
raise NotImplementedError
def get_field_val(self, obj, key):
if key.startswith('custom_'):
if not hasattr(obj, '_custom_data'):
raw_data = {
entry.form_field_id: entry.get_value()
for entry in obj.custom_data.all()
}
obj._custom_data = {
field.key: raw_data[field.pk]
for field in self.custom_fields
if field.pk in raw_data
}
return obj._custom_data.get(key, '')
return super(CustomDataTable, self).get_field_val(obj, key)
class AttendeeTable(CustomDataTable):
fieldsets = (
('Identification',
('pk', 'get_full_name', 'given_name', 'surname', 'middle_name')),
('Status',
('cart_items', 'purchased_items', 'refunded_items')),
('Contact',
('email', 'phone')),
('Housing',
('housing_status', 'housing_nights', 'housing_preferences',
'environment_avoid', 'environment_cause', 'person_prefer',
'person_avoid', 'other_needs')),
('Order',
('order_code', 'order_placed_by', 'order_balance')),
('Miscellaneous',
('liability_waiver', 'photo_consent')),
)
label_overrides = {
'pk': 'ID',
'get_full_name': 'Name',
'housing_nights': 'Housing nights',
'housing_preferences': 'Housing environment preference',
'environment_avoid': 'Housing Environment Avoid',
'environment_cause': 'Attendee May Cause/Do',
'person_prefer': 'Housing People Preference',
'person_avoid': 'Housing People Avoid',
'other_needs': 'Other Housing Needs',
'order__code': 'Order Code',
'order_placed_by': 'Order Placed By',
'liability_waiver': 'Liability Waiver Signed',
'photo_consent': 'Consent to be Photographed',
}
search_fields = ('given_name', 'middle_name', 'surname', 'order__code',
'email', 'order__email', 'order__person__email')
filterset_class = AttendeeFilterSet
model = Attendee
def __init__(self, event, *args, **kwargs):
self.event = event
super(AttendeeTable, self).__init__(*args, **kwargs)
def get_filterset_kwargs(self):
kwargs = super(AttendeeTable, self).get_filterset_kwargs()
kwargs['event'] = self.event
return kwargs
def _get_custom_fields(self):
from brambling.models import CustomForm, CustomFormField
return CustomFormField.objects.filter(
form__event=self.event,
form__form_type=CustomForm.ATTENDEE
).order_by('index')
def _add_data(self, queryset, fields):
use_distinct = False
for field in fields:
if field.startswith('custom_'):
queryset = queryset.prefetch_related('custom_data')
elif field == 'housing_nights':
queryset = queryset.prefetch_related('nights')
elif field == 'housing_preferences':
queryset = queryset.prefetch_related('housing_prefer')
elif field == 'environment_avoid':
queryset = queryset.prefetch_related('ef_avoid')
elif field == 'environment_cause':
queryset = queryset.prefetch_related('ef_cause')
elif field == 'order_code':
queryset = queryset.select_related('order')
elif field == 'order_placed_by':
queryset = queryset.select_related('order__person')
elif field == 'order_balance':
queryset = queryset.annotate(
order_balance=Sum('order__transactions__amount')
)
elif field == 'cart_items':
queryset = queryset.extra(select={
'cart_items': """
SELECT COUNT(*) FROM brambling_boughtitem WHERE
brambling_boughtitem.attendee_id = brambling_attendee.id AND
brambling_boughtitem.status IN ('reserved', 'unpaid')
""",
})
elif field == 'purchased_items':
queryset = queryset.extra(select={
'purchased_items': """
SELECT COUNT(*) FROM brambling_boughtitem WHERE
brambling_boughtitem.attendee_id = brambling_attendee.id AND
brambling_boughtitem.status = 'bought'
""",
})
elif field == 'refunded_items':
queryset = queryset.extra(select={
'refunded_items': """
SELECT COUNT(*) FROM brambling_boughtitem WHERE
brambling_boughtitem.attendee_id = brambling_attendee.id AND
brambling_boughtitem.status = 'refunded'
""",
})
return queryset, use_distinct
# Methods to be used as fields
def order_code(self, obj):
return obj.order.code
def order_placed_by(self, obj):
person = obj.order.person
if person:
return "{} ({})".format(person.get_full_name(), person.email)
return obj.order.email
def order_balance(self, obj):
return format_money(obj.order_balance or 0, self.event.currency)
housing_nights = comma_separated_manager("nights")
housing_preferences = comma_separated_manager("housing_prefer")
environment_avoid = comma_separated_manager("ef_avoid")
environment_cause = comma_separated_manager("ef_cause")
class OrderTable(CustomDataTable):
fieldsets = (
(None,
('code', 'person', 'balance', 'cart_items',
'purchased_items', 'refunded_items')),
)
survey_fieldsets = (
('Survey',
('heard_through', 'heard_through_other',
'send_flyers', 'send_flyers_full_address')),
)
housing_fieldsets = (
('Housing',
('providing_housing', 'contact_name', 'contact_email',
'contact_phone', 'hosting_full_address', 'public_transit_access',
'ef_present', 'ef_avoid', 'person_prefer', 'person_avoid',
'housing_categories')),
)
label_overrides = {
'heard_through_other': 'heard through (other)',
'send_flyers_full_address': 'flyers address',
}
search_fields = (
'code', 'email', 'person__given_name', 'person__middle_name',
'person__surname', 'person__email', 'attendees__given_name',
'attendees__middle_name', 'attendees__surname',
)
filterset_class = OrderFilterSet
model = Order
def __init__(self, event, *args, **kwargs):
self.event = event
super(OrderTable, self).__init__(*args, **kwargs)
def get_list_display(self):
fieldsets = self.fieldsets
if self.event.collect_survey_data:
fieldsets += self.survey_fieldsets
if self.event.collect_housing_data:
housing_fields = self.housing_fieldsets[0][1]
for date in self.event.get_housing_dates():
housing_fields += (
"hosting_spaces_{}".format(date.strftime("%Y%m%d")),
"hosting_max_{}".format(date.strftime("%Y%m%d")),
)
fieldsets += (
('Housing', housing_fields),
)
fieldsets += (
('Custom Fields',
self.get_custom_fields()),
)
list_display = ()
for name, fields in fieldsets:
list_display += fields
return list_display
def _get_custom_fields(self):
from brambling.models import CustomForm, CustomFormField
return CustomFormField.objects.filter(
form__event=self.event,
form__form_type=CustomForm.ORDER
).order_by('index')
def _label(self, field):
date_str = None
if field.startswith('hosting_max'):
date_str = field[-8:]
format_str = "hosting {month}/{day}/{year} max"
elif field.startswith('hosting_spaces'):
date_str = field[-8:]
format_str = "hosting {month}/{day}/{year} spaces"
if date_str:
return format_str.format(
year=date_str[0:4],
month=date_str[4:6],
day=date_str[6:8]
)
return super(OrderTable, self)._label(field)
def get_field_val(self, obj, key):
date_str = None
from brambling.models import HousingSlot
if key.startswith('hosting_max'):
date_str = key[-8:]
field = "spaces_max"
elif key.startswith('hosting_spaces'):
date_str = key[-8:]
field = "spaces"
if date_str:
if obj.get_eventhousing():
hosting_date = datetime.datetime.strptime(date_str, "%Y%m%d").date()
try:
slot = HousingSlot.objects.get(eventhousing__order=obj, date=hosting_date)
except HousingSlot.DoesNotExist:
pass
else:
return getattr(slot, field, '')
return ''
return super(OrderTable, self).get_field_val(obj, key)
def _add_data(self, queryset, fields):
use_distinct = False
for field in fields:
if field.startswith('custom_'):
queryset = queryset.prefetch_related('custom_data')
elif field == 'ef_present':
queryset = queryset.prefetch_related('eventhousing__ef_present')
elif field == 'ef_avoid':
queryset = queryset.prefetch_related('eventhousing__ef_avoid')
elif field == 'housing_categories':
queryset = queryset.prefetch_related('eventhousing__housing_categories')
elif field == 'person':
queryset = queryset.select_related('person')
elif field == 'balance':
queryset = queryset.annotate(
balance=Sum('transactions__amount'),
)
elif field == 'cart_items':
queryset = queryset.extra(select={
'cart_items': """
SELECT COUNT(*) FROM brambling_boughtitem WHERE
brambling_boughtitem.order_id = brambling_order.id AND
brambling_boughtitem.status IN ('reserved', 'unpaid')
""",
})
elif field == 'purchased_items':
queryset = queryset.extra(select={
'purchased_items': """
SELECT COUNT(*) FROM brambling_boughtitem WHERE
brambling_boughtitem.order_id = brambling_order.id AND
brambling_boughtitem.status = 'bought'
""",
})
elif field == 'refunded_items':
queryset = queryset.extra(select={
'refunded_items': """
SELECT COUNT(*) FROM brambling_boughtitem WHERE
brambling_boughtitem.order_id = brambling_order.id AND
brambling_boughtitem.status = 'refunded'
""",
})
return queryset, use_distinct
def send_flyers_full_address(self, obj):
if obj.send_flyers:
return u", ".join((
obj.send_flyers_address,
obj.send_flyers_address_2,
obj.send_flyers_city,
obj.send_flyers_state_or_province,
obj.send_flyers_zip,
unicode(obj.send_flyers_country),
))
return ''
def hosting_full_address(self, obj):
eventhousing = obj.get_eventhousing()
if eventhousing:
return u", ".join((
eventhousing.address,
eventhousing.address_2,
eventhousing.city,
eventhousing.state_or_province,
eventhousing.zip_code,
unicode(eventhousing.country),
))
return ''
def get_eventhousing_attr(self, obj, name):
eventhousing = obj.get_eventhousing()
if eventhousing:
return getattr(eventhousing, name)
return ''
def contact_name(self, obj):
return self.get_eventhousing_attr(obj, 'contact_name')
contact_name.short_description = 'hosting contact name'
def contact_email(self, obj):
return self.get_eventhousing_attr(obj, 'contact_email')
contact_email.short_description = 'hosting contact email'
def contact_phone(self, obj):
return self.get_eventhousing_attr(obj, 'contact_phone')
contact_phone.short_description = 'hosting contact phone'
def public_transit_access(self, obj):
return self.get_eventhousing_attr(obj, 'public_transit_access')
public_transit_access.short_description = 'hosting public transit access'
def person_prefer(self, obj):
return self.get_eventhousing_attr(obj, 'person_prefer')
person_prefer.short_description = 'hosting people preference'
def person_avoid(self, obj):
return self.get_eventhousing_attr(obj, 'person_avoid')
person_avoid.short_description = 'hosting people avoid'
def get_eventhousing_csm(self, obj, name):
eventhousing = obj.get_eventhousing()
if eventhousing:
return comma_separated_manager(name)(self, eventhousing)
return ''
def ef_present(self, obj):
return self.get_eventhousing_csm(obj, 'ef_present')
ef_present.short_description = 'hosting environmental factors'
def ef_avoid(self, obj):
return self.get_eventhousing_csm(obj, 'ef_avoid')
ef_avoid.short_description = 'hosting environmental avoided'
def housing_categories(self, obj):
return self.get_eventhousing_csm(obj, 'housing_categories')
housing_categories.short_description = 'hosting home categories'
def balance(self, obj):
return format_money(obj.balance or 0, self.event.currency)
|
|
"""
Python variables relevant to the Cas9 mutants described by Klienstiver et al. (2015), Nature, doi:10.1038/nature14592
Contains the variables:
PI_domain: string, WT PI domain (AA 1099-1368) from UniProt (http://www.uniprot.org/uniprot/Q99ZW2) + AAs 1097, 1098
PI_sec_structure: dict with format 'type_#' : (start, end), secondary structure within PI domain from UniProt
aa_groups: dict, single letter codes of 20 amino acids and their groups, which are "hydrophobic", "acidic", "basic",
"polar" and "specialcase". Note that the classifications of methionine (M), tryptophan (W) and tyrosine
(Y) are ambiguous
mutants_kleinstiver: list of dictionaries with keys "pam", "backbone" and "mutations". Each list entry is a
different cas9 mutant from Kleinstiver et al. Generated from the CSVs using kleinstiver_csv_to_dict.py.
"""
PI_domain = ('KTEVQTGGFSKESILPKRNSDKLIARKKDWDPKKYGGFDSPTVAYSVLVVAKVEKGKSKKLKSVKELLGITIMERSSFEKNPIDFLEAKGYKEVKKDLIIKLPKY'
'SLFELENGRKRMLASAGELQKGNELALPSKYVNFLYLASHYEKLKGSPEDNEQKQLFVEQHKHYLDEIIEQISEFSKRVILADANLDKVLSAYNKHRDKPIREQA'
'ENIIHLFTLTNLGAPAAFKYFDTTIDRKRYTSTKEVLDATLIHQSITGLYETRIDLSQLGGD')
PI_sec_structure = {
'helix_1': (981,1000),
'helix_2': (1002,1004),
'helix_3': (1005,1008),
'beta_strand_1': (1009,1011),
'helix_4': (1018,1021),
'helix_5': (1031,1040),
'turn_1': (1041,1043),
'helix_6': (1044,1046),
'beta_strand_2': (1048,1051),
'beta_strand_3': (1057,1059),
'beta_strand_4': (1062,1065),
'turn_2': (1067,1069),
'beta_strand_5': (1072,1075),
'turn_3': (1076,1078),
'helix_7': (1079,1087),
'beta_strand_6': (1093,1096),
'beta_strand_7': (1115,1117),
'beta_strand_8': (1120,1123),
'helix_8': (1128,1131),
'beta_strand_9': (1139,1151),
'turn_4': (1152,1155),
'beta_strand_10': (1156,1167),
'turn_5': (1168,1170),
'helix_9': (1171,1176),
'helix_10': (1178,1185),
'helix_11': (1192,1194),
'beta_strand_11': (1196,1198),
'beta_strand_12': (1203,1205),
'beta_strand_13': (1207,1209),
'beta_strand_14': (1211,1218),
'beta_strand_15': (1220,1222),
'helix_12': (1230,1240),
'helix_13': (1252,1261),
'turn_6': (1262,1264),
'helix_14': (1265,1279),
'helix_15': (1284,1296),
'turn_7': (1297,1299),
'helix_16': (1302,1312),
'turn_8': (1313,1316),
'beta_strand_16': (1317,1320),
'beta_strand_17': (1324,1328),
'beta_strand_18': (1329,1331),
'helix_17': (1341,1344),
'beta_strand_19': (1345,1350),
'beta_strand_20': (1352,1354),
'beta_strand_21': (1356,1361),
'helix_18': (1362,1365),
'none' : 0
}
aa_group = {
'R': 'basic',
'K': 'basic',
'H': 'basic',
'D': 'acidic',
'E': 'acidic',
'Q': 'polar',
'N': 'polar',
'S': 'polar',
'T': 'polar',
'A': 'hydrophobic',
'V': 'hydrophobic',
'I': 'hydrophobic',
'L': 'hydrophobic',
'F': 'hydrophobic',
'W': 'hydrophobic',
'Y': 'hydrophobic',
'M': 'hydrophobic',
'P': 'special',
'G': 'special',
'C': 'special'
}
mutants_kleinstiver = [
{'pam': 'NGA', 'backbone': 'WT',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'I',
'aa_group_init': 'polar', 'aa_group_mut': 'hydrophobic'}]},
{'pam': 'NGA', 'backbone': 'WT',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'N',
'aa_group_init': 'acidic', 'aa_group_mut': 'polar'},
{'aa_idx': 1154, 'sec_structure': 'turn_4', 'aa_init': 'S', 'aa_mut': 'F',
'aa_group_init': 'polar', 'aa_group_mut': 'hydrophobic'}]},
{'pam': 'NGA', 'backbone': 'WT',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'Q',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'}]},
{'pam': 'NGA', 'backbone': 'WT',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'N',
'aa_group_init': 'acidic', 'aa_group_mut': 'polar'},
{'aa_idx': 1222, 'sec_structure': 'beta_strand_15', 'aa_init': 'K', 'aa_mut': 'R',
'aa_group_init': 'basic', 'aa_group_mut': 'basic'}]},
{'pam': 'NGA', 'backbone': 'WT',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'V',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1300, 'sec_structure': 'none', 'aa_init': 'K', 'aa_mut': 'N',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'}]},
{'pam': 'NGA', 'backbone': 'WT',
'mutations': [{'aa_idx': 1330, 'sec_structure': 'beta_strand_18', 'aa_init': 'T', 'aa_mut': 'P',
'aa_group_init': 'polar', 'aa_group_mut': 'special'}]},
{'pam': 'NGA', 'backbone': 'R1335Q',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'Q',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'}]},
{'pam': 'NGA', 'backbone': 'R1335Q',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'V',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1317, 'sec_structure': 'beta_strand_16', 'aa_init': 'N', 'aa_mut': 'K',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'},
{'aa_idx': 1324, 'sec_structure': 'beta_strand_17', 'aa_init': 'F', 'aa_mut': 'I',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'Q',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'}]},
{'pam': 'NGA', 'backbone': 'R1335Q',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'Q',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'},
{'aa_idx': 1344, 'sec_structure': 'helix_17', 'aa_init': 'D', 'aa_mut': 'E',
'aa_group_init': 'acidic', 'aa_group_mut': 'acidic'}]},
{'pam': 'NGA', 'backbone': 'R1335Q',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1179, 'sec_structure': 'helix_10', 'aa_init': 'I', 'aa_mut': 'M',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'Q',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'}]},
{'pam': 'NGA', 'backbone': 'R1335Q',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'V',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1146, 'sec_structure': 'beta_strand_9', 'aa_init': 'V', 'aa_mut': 'E',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'Q',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'}]},
{'pam': 'NGA', 'backbone': 'R1335Q',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'E',
'aa_group_init': 'acidic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1230, 'sec_structure': 'helix_12', 'aa_init': 'S', 'aa_mut': 'F',
'aa_group_init': 'polar', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1322, 'sec_structure': 'none', 'aa_init': 'A', 'aa_mut': 'S',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'polar'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'Q',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGA', 'backbone': 'R1335Q',
'mutations': [{'aa_idx': 1111, 'sec_structure': 'none', 'aa_init': 'L', 'aa_mut': 'F',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1134, 'sec_structure': 'none', 'aa_init': 'F', 'aa_mut': 'T',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'polar'},
{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1190, 'sec_structure': 'none', 'aa_init': 'V', 'aa_mut': 'I',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'Q',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'}]},
{'pam': 'NGA', 'backbone': 'R1335Q',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1252, 'sec_structure': 'helix_13', 'aa_init': 'N', 'aa_mut': 'K',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'Q',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'}]},
{'pam': 'NGA', 'backbone': 'R1335Q',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1192, 'sec_structure': 'helix_11', 'aa_init': 'K', 'aa_mut': 'E',
'aa_group_init': 'basic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1303, 'sec_structure': 'helix_16', 'aa_init': 'R', 'aa_mut': 'C',
'aa_group_init': 'basic', 'aa_group_mut': 'special'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'Q',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'}]},
{'pam': 'NGA', 'backbone': 'R1335Q',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1151, 'sec_structure': 'beta_strand_9', 'aa_init': 'K', 'aa_mut': 'N',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'},
{'aa_idx': 1152, 'sec_structure': 'turn_4', 'aa_init': 'G', 'aa_mut': 'R',
'aa_group_init': 'special', 'aa_group_mut': 'basic'},
{'aa_idx': 1218, 'sec_structure': 'beta_strand_14', 'aa_init': 'G', 'aa_mut': 'R',
'aa_group_init': 'special', 'aa_group_mut': 'basic'},
{'aa_idx': 1329, 'sec_structure': 'beta_strand_18', 'aa_init': 'T', 'aa_mut': 'A',
'aa_group_init': 'polar', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'Q',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'}]},
{'pam': 'NGA', 'backbone': 'R1335Q',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'V',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1148, 'sec_structure': 'beta_strand_9', 'aa_init': 'K', 'aa_mut': 'N',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'},
{'aa_idx': 1283, 'sec_structure': 'none', 'aa_init': 'A', 'aa_mut': 'T',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'polar'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'Q',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'}]},
{'pam': 'NGA', 'backbone': 'R1335Q',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1249, 'sec_structure': 'none', 'aa_init': 'P', 'aa_mut': 'L',
'aa_group_init': 'special', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1263, 'sec_structure': 'turn_6', 'aa_init': 'K', 'aa_mut': 'R',
'aa_group_init': 'basic', 'aa_group_mut': 'basic'},
{'aa_idx': 1297, 'sec_structure': 'turn_7', 'aa_init': 'H', 'aa_mut': 'Y',
'aa_group_init': 'basic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'Q',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'}]},
{'pam': 'NGA', 'backbone': 'R1335Q',
'mutations': [{'aa_idx': 1125, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'V',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'Q',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'}]},
{'pam': 'NGA', 'backbone': 'R1335Q',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1143, 'sec_structure': 'beta_strand_9', 'aa_init': 'V', 'aa_mut': 'I',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'Q',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'}]},
{'pam': 'NGA', 'backbone': 'R1335Q',
'mutations': [{'aa_idx': 1104, 'sec_structure': 'none', 'aa_init': 'G', 'aa_mut': 'R',
'aa_group_init': 'special', 'aa_group_mut': 'basic'},
{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'N',
'aa_group_init': 'acidic', 'aa_group_mut': 'polar'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'Q',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'}]},
{'pam': 'NGA', 'backbone': 'R1335Q',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1303, 'sec_structure': 'helix_16', 'aa_init': 'R', 'aa_mut': 'H',
'aa_group_init': 'basic', 'aa_group_mut': 'basic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'Q',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'}]},
{'pam': 'NGA', 'backbone': 'R1335Q',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'Q',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'},
{'aa_idx': 1366, 'sec_structure': 'none', 'aa_init': 'G', 'aa_mut': 'V',
'aa_group_init': 'special', 'aa_group_mut': 'hydrophobic'}]},
{'pam': 'NGA', 'backbone': 'R1335Q',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1299, 'sec_structure': 'turn_7', 'aa_init': 'D', 'aa_mut': 'N',
'aa_group_init': 'acidic', 'aa_group_mut': 'polar'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'Q',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'}]},
{'pam': 'NGA', 'backbone': 'R1335Q',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1155, 'sec_structure': 'turn_4', 'aa_init': 'K', 'aa_mut': 'R',
'aa_group_init': 'basic', 'aa_group_mut': 'basic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'Q',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'}]},
{'pam': 'NGA', 'backbone': 'R1335Q',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'Q',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'},
{'aa_idx': 1364, 'sec_structure': 'helix_18', 'aa_init': 'Q', 'aa_mut': 'L',
'aa_group_init': 'polar', 'aa_group_mut': 'hydrophobic'}]},
{'pam': 'NGA', 'backbone': 'R1335Q',
'mutations': [{'aa_idx': 1113, 'sec_structure': 'none', 'aa_init': 'K', 'aa_mut': 'N',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'},
{'aa_idx': 1177, 'sec_structure': 'none', 'aa_init': 'N', 'aa_mut': 'K',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'Q',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGA', 'backbone': 'R1335Q',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1150, 'sec_structure': 'beta_strand_9', 'aa_init': 'E', 'aa_mut': 'K',
'aa_group_init': 'acidic', 'aa_group_mut': 'basic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'Q',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'}]},
{'pam': 'NGA', 'backbone': 'R1335Q',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'N',
'aa_group_init': 'acidic', 'aa_group_mut': 'polar'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'Q',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'}]},
{'pam': 'NGA', 'backbone': 'R1335Q',
'mutations': [{'aa_idx': 1097, 'sec_structure': 'none', 'aa_init': 'K', 'aa_mut': 'E',
'aa_group_init': 'basic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1141, 'sec_structure': 'beta_strand_9', 'aa_init': 'Y', 'aa_mut': 'F',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1173, 'sec_structure': 'helix_9', 'aa_init': 'S', 'aa_mut': 'F',
'aa_group_init': 'polar', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'Q',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'}]},
{'pam': 'NGA', 'backbone': 'R1335Q',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1162, 'sec_structure': 'beta_strand_10', 'aa_init': 'E', 'aa_mut': 'D',
'aa_group_init': 'acidic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1283, 'sec_structure': 'none', 'aa_init': 'A', 'aa_mut': 'V',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'Q',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'}]},
{'pam': 'NGA', 'backbone': 'R1335Q',
'mutations': [{'aa_idx': 1168, 'sec_structure': 'turn_5', 'aa_init': 'I', 'aa_mut': 'N',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'polar'},
{'aa_idx': 1218, 'sec_structure': 'beta_strand_14', 'aa_init': 'G', 'aa_mut': 'E',
'aa_group_init': 'special', 'aa_group_mut': 'acidic'},
{'aa_idx': 1329, 'sec_structure': 'beta_strand_18', 'aa_init': 'T', 'aa_mut': 'A',
'aa_group_init': 'polar', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'Q',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGA', 'backbone': 'R1335Q',
'mutations': [{'aa_idx': 1332, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'Q',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'}]},
{'pam': 'NGA', 'backbone': 'R1335Q',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1148, 'sec_structure': 'beta_strand_9', 'aa_init': 'K', 'aa_mut': 'N',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'},
{'aa_idx': 1317, 'sec_structure': 'beta_strand_16', 'aa_init': 'N', 'aa_mut': 'D',
'aa_group_init': 'polar', 'aa_group_mut': 'acidic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'Q',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'}]},
{'pam': 'NGC', 'backbone': 'R1335E+T1337R',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'N',
'aa_group_init': 'acidic', 'aa_group_mut': 'polar'},
{'aa_idx': 1218, 'sec_structure': 'beta_strand_14', 'aa_init': 'G', 'aa_mut': 'R',
'aa_group_init': 'special', 'aa_group_mut': 'basic'},
{'aa_idx': 1257, 'sec_structure': 'helix_13', 'aa_init': 'L', 'aa_mut': 'F',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'E',
'aa_group_init': 'basic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335E+T1337R',
'mutations': [{'aa_idx': 1100, 'sec_structure': 'none', 'aa_init': 'V', 'aa_mut': 'M',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1110, 'sec_structure': 'none', 'aa_init': 'I', 'aa_mut': 'V',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'N',
'aa_group_init': 'acidic', 'aa_group_mut': 'polar'},
{'aa_idx': 1156, 'sec_structure': 'beta_strand_10', 'aa_init': 'K', 'aa_mut': 'R',
'aa_group_init': 'basic', 'aa_group_mut': 'basic'},
{'aa_idx': 1219, 'sec_structure': 'none', 'aa_init': 'E', 'aa_mut': 'V',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1250, 'sec_structure': 'none', 'aa_init': 'E', 'aa_mut': 'D',
'aa_group_init': 'acidic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1256, 'sec_structure': 'helix_13', 'aa_init': 'Q', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'},
{'aa_idx': 1287, 'sec_structure': 'helix_15', 'aa_init': 'L', 'aa_mut': 'R',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'basic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'E',
'aa_group_init': 'basic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335E+T1337R',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1177, 'sec_structure': 'none', 'aa_init': 'N', 'aa_mut': 'E',
'aa_group_init': 'polar', 'aa_group_mut': 'acidic'},
{'aa_idx': 1250, 'sec_structure': 'none', 'aa_init': 'E', 'aa_mut': 'K',
'aa_group_init': 'acidic', 'aa_group_mut': 'basic'},
{'aa_idx': 1261, 'sec_structure': 'helix_13', 'aa_init': 'Q', 'aa_mut': 'K',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'E',
'aa_group_init': 'basic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335E+T1337R',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'N',
'aa_group_init': 'acidic', 'aa_group_mut': 'polar'},
{'aa_idx': 1148, 'sec_structure': 'beta_strand_9', 'aa_init': 'K', 'aa_mut': 'N',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'},
{'aa_idx': 1175, 'sec_structure': 'helix_9', 'aa_init': 'E', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1219, 'sec_structure': 'none', 'aa_init': 'E', 'aa_mut': 'V',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'E',
'aa_group_init': 'basic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335E+T1337R',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'V',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'Q',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'},
{'aa_idx': 1352, 'sec_structure': 'beta_strand_20', 'aa_init': 'I', 'aa_mut': 'F',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'hydrophobic'}]},
{'pam': 'NGC', 'backbone': 'R1335E+T1337R',
'mutations': [{'aa_idx': 1195, 'sec_structure': 'none', 'aa_init': 'I', 'aa_mut': 'L',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'E',
'aa_group_init': 'basic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335E+T1337R',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'N',
'aa_group_init': 'acidic', 'aa_group_mut': 'polar'},
{'aa_idx': 1149, 'sec_structure': 'beta_strand_9', 'aa_init': 'V', 'aa_mut': 'D',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1303, 'sec_structure': 'helix_16', 'aa_init': 'R', 'aa_mut': 'C',
'aa_group_init': 'basic', 'aa_group_mut': 'special'},
{'aa_idx': 1317, 'sec_structure': 'beta_strand_16', 'aa_init': 'N', 'aa_mut': 'K',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'E',
'aa_group_init': 'basic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335E+T1337R',
'mutations': [{'aa_idx': 1119, 'sec_structure': 'none', 'aa_init': 'L', 'aa_mut': 'V',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'E',
'aa_group_init': 'basic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335E+T1337R',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1138, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'I',
'aa_group_init': 'polar', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1219, 'sec_structure': 'none', 'aa_init': 'E', 'aa_mut': 'V',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'E',
'aa_group_init': 'basic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335E+T1337R',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1190, 'sec_structure': 'none', 'aa_init': 'V', 'aa_mut': 'I',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1301, 'sec_structure': 'none', 'aa_init': 'P', 'aa_mut': 'T',
'aa_group_init': 'special', 'aa_group_mut': 'polar'},
{'aa_idx': 1309, 'sec_structure': 'helix_16', 'aa_init': 'I', 'aa_mut': 'T',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'polar'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'E',
'aa_group_init': 'basic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335E+T1337R',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'E',
'aa_group_init': 'basic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'},
{'aa_idx': 1350, 'sec_structure': 'beta_strand_19', 'aa_init': 'Q', 'aa_mut': 'H',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335E+T1337R',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1285, 'sec_structure': 'helix_15', 'aa_init': 'A', 'aa_mut': 'G',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'special'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'E',
'aa_group_init': 'basic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335E+T1337R',
'mutations': [{'aa_idx': 1195, 'sec_structure': 'none', 'aa_init': 'I', 'aa_mut': 'L',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1202, 'sec_structure': 'none', 'aa_init': 'S', 'aa_mut': 'N',
'aa_group_init': 'polar', 'aa_group_mut': 'polar'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'Q',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335E+T1337R',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'N',
'aa_group_init': 'acidic', 'aa_group_mut': 'polar'},
{'aa_idx': 1250, 'sec_structure': 'none', 'aa_init': 'E', 'aa_mut': 'D',
'aa_group_init': 'acidic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1251, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'E',
'aa_group_init': 'basic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335E+T1337R',
'mutations': [{'aa_idx': 1138, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'K',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'Q',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'},
{'aa_idx': 1363, 'sec_structure': 'helix_18', 'aa_init': 'S', 'aa_mut': 'M',
'aa_group_init': 'polar', 'aa_group_mut': 'hydrophobic'}]},
{'pam': 'NGC', 'backbone': 'R1335E+T1337R',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1292, 'sec_structure': 'helix_15', 'aa_init': 'S', 'aa_mut': 'I',
'aa_group_init': 'polar', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'E',
'aa_group_init': 'basic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335E+T1337R',
'mutations': [{'aa_idx': 1100, 'sec_structure': 'none', 'aa_init': 'V', 'aa_mut': 'E',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1111, 'sec_structure': 'none', 'aa_init': 'L', 'aa_mut': 'H',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'basic'},
{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'H',
'aa_group_init': 'acidic', 'aa_group_mut': 'basic'},
{'aa_idx': 1296, 'sec_structure': 'helix_15', 'aa_init': 'K', 'aa_mut': 'M',
'aa_group_init': 'basic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'E',
'aa_group_init': 'basic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'},
{'aa_idx': 1366, 'sec_structure': 'none', 'aa_init': 'G', 'aa_mut': 'E',
'aa_group_init': 'special', 'aa_group_mut': 'acidic'}]},
{'pam': 'NGC', 'backbone': 'R1335E+T1337R',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1255, 'sec_structure': 'helix_13', 'aa_init': 'K', 'aa_mut': 'M',
'aa_group_init': 'basic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'E',
'aa_group_init': 'basic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335E+T1337R',
'mutations': [{'aa_idx': 1104, 'sec_structure': 'none', 'aa_init': 'G', 'aa_mut': 'R',
'aa_group_init': 'special', 'aa_group_mut': 'basic'},
{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'V',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'E',
'aa_group_init': 'basic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335E+T1337R',
'mutations': [{'aa_idx': 1157, 'sec_structure': 'beta_strand_10', 'aa_init': 'L', 'aa_mut': 'V',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1219, 'sec_structure': 'none', 'aa_init': 'E', 'aa_mut': 'V',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'E',
'aa_group_init': 'basic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335E+T1337R',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1242, 'sec_structure': 'none', 'aa_init': 'Y', 'aa_mut': 'F',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1330, 'sec_structure': 'beta_strand_18', 'aa_init': 'T', 'aa_mut': 'M',
'aa_group_init': 'polar', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'E',
'aa_group_init': 'basic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335E+T1337R',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'V',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1262, 'sec_structure': 'turn_6', 'aa_init': 'H', 'aa_mut': 'P',
'aa_group_init': 'basic', 'aa_group_mut': 'special'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'E',
'aa_group_init': 'basic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335E+T1337R',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'V',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1226, 'sec_structure': 'none', 'aa_init': 'L', 'aa_mut': 'I',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1229, 'sec_structure': 'none', 'aa_init': 'P', 'aa_mut': 'I',
'aa_group_init': 'special', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1330, 'sec_structure': 'beta_strand_18', 'aa_init': 'T', 'aa_mut': 'K',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'E',
'aa_group_init': 'basic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335E+T1337R',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'E',
'aa_group_init': 'basic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335E+T1337R',
'mutations': [{'aa_idx': 1219, 'sec_structure': 'none', 'aa_init': 'E', 'aa_mut': 'V',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'E',
'aa_group_init': 'basic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335E+T1337R',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'E',
'aa_group_init': 'basic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335E+T1337R',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'E',
'aa_group_init': 'basic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335E+T1337R',
'mutations': [{'aa_idx': 1179, 'sec_structure': 'helix_10', 'aa_init': 'I', 'aa_mut': 'F',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1285, 'sec_structure': 'helix_15', 'aa_init': 'A', 'aa_mut': 'P',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'special'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'E',
'aa_group_init': 'basic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335E+T1337R',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'L',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'E',
'aa_group_init': 'basic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335E+T1337R',
'mutations': [{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'E',
'aa_group_init': 'basic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335E+T1337R',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1161, 'sec_structure': 'beta_strand_10', 'aa_init': 'K', 'aa_mut': 'I',
'aa_group_init': 'basic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'E',
'aa_group_init': 'basic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335E+T1337R',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'E',
'aa_group_init': 'basic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335E+T1337R',
'mutations': [{'aa_idx': 1155, 'sec_structure': 'turn_4', 'aa_init': 'K', 'aa_mut': 'T',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'},
{'aa_idx': 1191, 'sec_structure': 'none', 'aa_init': 'K', 'aa_mut': 'I',
'aa_group_init': 'basic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1194, 'sec_structure': 'helix_11', 'aa_init': 'L', 'aa_mut': 'F',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'Q',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'},
{'aa_idx': 1339, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'P',
'aa_group_init': 'polar', 'aa_group_mut': 'special'}]},
{'pam': 'NGC', 'backbone': 'R1335E+T1337R',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'E',
'aa_group_init': 'basic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335E+T1337R',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1144, 'sec_structure': 'beta_strand_9', 'aa_init': 'L', 'aa_mut': 'P',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'special'},
{'aa_idx': 1208, 'sec_structure': 'beta_strand_13', 'aa_init': 'N', 'aa_mut': 'S',
'aa_group_init': 'polar', 'aa_group_mut': 'polar'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'E',
'aa_group_init': 'basic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335T+T1337R',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1191, 'sec_structure': 'none', 'aa_init': 'K', 'aa_mut': 'N',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'T',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335T+T1337R',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1303, 'sec_structure': 'helix_16', 'aa_init': 'R', 'aa_mut': 'H',
'aa_group_init': 'basic', 'aa_group_mut': 'basic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'T',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335T+T1337R',
'mutations': [{'aa_idx': 1195, 'sec_structure': 'none', 'aa_init': 'I', 'aa_mut': 'M',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1207, 'sec_structure': 'beta_strand_13', 'aa_init': 'E', 'aa_mut': 'K',
'aa_group_init': 'acidic', 'aa_group_mut': 'basic'},
{'aa_idx': 1317, 'sec_structure': 'beta_strand_16', 'aa_init': 'N', 'aa_mut': 'K',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'T',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'},
{'aa_idx': 1350, 'sec_structure': 'beta_strand_19', 'aa_init': 'Q', 'aa_mut': 'H',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335T+T1337R',
'mutations': [{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'T',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335T+T1337R',
'mutations': [{'aa_idx': 1134, 'sec_structure': 'none', 'aa_init': 'F', 'aa_mut': 'L',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'Y',
'aa_group_init': 'acidic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1192, 'sec_structure': 'helix_11', 'aa_init': 'K', 'aa_mut': 'R',
'aa_group_init': 'basic', 'aa_group_mut': 'basic'},
{'aa_idx': 1207, 'sec_structure': 'beta_strand_13', 'aa_init': 'E', 'aa_mut': 'K',
'aa_group_init': 'acidic', 'aa_group_mut': 'basic'},
{'aa_idx': 1218, 'sec_structure': 'beta_strand_14', 'aa_init': 'G', 'aa_mut': 'R',
'aa_group_init': 'special', 'aa_group_mut': 'basic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'T',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'},
{'aa_idx': 1342, 'sec_structure': 'helix_17', 'aa_init': 'V', 'aa_mut': 'V',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'hydrophobic'}]},
{'pam': 'NGC', 'backbone': 'R1335T+T1337R',
'mutations': [{'aa_idx': 1111, 'sec_structure': 'none', 'aa_init': 'L', 'aa_mut': 'H',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'basic'},
{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'N',
'aa_group_init': 'acidic', 'aa_group_mut': 'polar'},
{'aa_idx': 1197, 'sec_structure': 'beta_strand_11', 'aa_init': 'K', 'aa_mut': 'T',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'},
{'aa_idx': 1285, 'sec_structure': 'helix_15', 'aa_init': 'A', 'aa_mut': 'S',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'polar'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'T',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335T+T1337R',
'mutations': [{'aa_idx': 1114, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'G',
'aa_group_init': 'basic', 'aa_group_mut': 'special'},
{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'N',
'aa_group_init': 'acidic', 'aa_group_mut': 'polar'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'T',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335T+T1337R',
'mutations': [{'aa_idx': 1162, 'sec_structure': 'beta_strand_10', 'aa_init': 'E', 'aa_mut': 'D',
'aa_group_init': 'acidic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1195, 'sec_structure': 'none', 'aa_init': 'I', 'aa_mut': 'M',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1218, 'sec_structure': 'beta_strand_14', 'aa_init': 'G', 'aa_mut': 'R',
'aa_group_init': 'special', 'aa_group_mut': 'basic'},
{'aa_idx': 1309, 'sec_structure': 'helix_16', 'aa_init': 'I', 'aa_mut': 'V',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'T',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335T+T1337R',
'mutations': [{'aa_idx': 1135, 'sec_structure': 'none', 'aa_init': 'D', 'aa_mut': 'N',
'aa_group_init': 'acidic', 'aa_group_mut': 'polar'},
{'aa_idx': 1190, 'sec_structure': 'none', 'aa_init': 'V', 'aa_mut': 'I',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1285, 'sec_structure': 'helix_15', 'aa_init': 'A', 'aa_mut': 'V',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'hydrophobic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'T',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]},
{'pam': 'NGC', 'backbone': 'R1335T+T1337R',
'mutations': [{'aa_idx': 1111, 'sec_structure': 'none', 'aa_init': 'L', 'aa_mut': 'H',
'aa_group_init': 'hydrophobic', 'aa_group_mut': 'basic'},
{'aa_idx': 1218, 'sec_structure': 'beta_strand_14', 'aa_init': 'G', 'aa_mut': 'R',
'aa_group_init': 'special', 'aa_group_mut': 'basic'},
{'aa_idx': 1297, 'sec_structure': 'turn_7', 'aa_init': 'H', 'aa_mut': 'D',
'aa_group_init': 'basic', 'aa_group_mut': 'acidic'},
{'aa_idx': 1335, 'sec_structure': 'none', 'aa_init': 'R', 'aa_mut': 'T',
'aa_group_init': 'basic', 'aa_group_mut': 'polar'},
{'aa_idx': 1337, 'sec_structure': 'none', 'aa_init': 'T', 'aa_mut': 'R',
'aa_group_init': 'polar', 'aa_group_mut': 'basic'}]}]
|
|
"""
The ActivityPlugin is the most powerful plugin for tracking changes of
individual entities. If you use ActivityPlugin you probably don't need to use
TransactionChanges nor TransactionMeta plugins.
You can initalize the ActivityPlugin by adding it to versioning manager.
::
activity_plugin = ActivityPlugin()
make_versioned(plugins=[activity_plugin])
ActivityPlugin uses single database table for tracking activities. This table
follows the data structure in `activity stream specification`_, but it comes
with a nice twist:
============== =========== =============
Column Type Description
============== =========== =============
id BigInteger The primary key of the activity
verb Unicode Verb defines the action of the activity
data JSON Additional data for the activity in JSON format
transaction_id BigInteger The transaction this activity was associated
with
object_id BigInteger The primary key of the object. Object can be
any entity which has an integer as primary key.
object_type Unicode The type of the object (class name as string)
object_tx_id BigInteger The last transaction_id associated with the
object. This is used for efficiently fetching
the object version associated with this
activity.
target_id BigInteger The primary key of the target. Target can be
any entity which has an integer as primary key.
target_type Unicode The of the target (class name as string)
target_tx_id BigInteger The last transaction_id associated with the
target.
============== =========== =============
Each Activity has relationships to actor, object and target but it also holds
information about the associated transaction and about the last associated
transactions with the target and object. This allows each activity to also have
object_version and target_version relationships for introspecting what those
objects and targets were in given point in time. All these relationship
properties use `generic relationships`_ of the SQLAlchemy-Utils package.
Limitations
^^^^^^^^^^^
Currently all changes to parent models must be flushed or committed before
creating activities. This is due to a fact that there is still no dependency
processors for generic relationships. So when you create activities and assign
objects / targets for those please remember to flush the session before
creating an activity::
article = Article(name=u'Some article')
session.add(article)
session.flush() # <- IMPORTANT!
first_activity = Activity(verb=u'create', object=article)
session.add(first_activity)
session.commit()
Targets and objects of given activity must have an integer primary key
column id.
Create activities
^^^^^^^^^^^^^^^^^
Once your models have been configured you can get the Activity model from the
ActivityPlugin class with activity_cls property::
Activity = activity_plugin.activity_cls
Now let's say we have model called Article and Category. Each Article has one
Category. Activities should be created along with the changes you make on
these models. ::
article = Article(name=u'Some article')
session.add(article)
session.flush()
first_activity = Activity(verb=u'create', object=article)
session.add(first_activity)
session.commit()
Current transaction gets automatically assigned to activity object::
first_activity.transaction # Transaction object
Update activities
^^^^^^^^^^^^^^^^^
The object property of the Activity object holds the current object and the
object_version holds the object version at the time when the activity was
created. ::
article.name = u'Some article updated!'
session.flush()
second_activity = Activity(verb=u'update', object=article)
session.add(second_activity)
session.commit()
second_activity.object.name # u'Some article updated!'
first_activity.object.name # u'Some article updated!'
first_activity.object_version.name # u'Some article'
Delete activities
^^^^^^^^^^^^^^^^^
The version properties are especially useful for delete activities. Once the
activity is fetched from the database the object is no longer available (
since its deleted), hence the only way we could show some information about the
object the user deleted is by accessing the object_version property.
::
session.delete(article)
session.flush()
third_activity = Activity(verb=u'delete', object=article)
session.add(third_activity)
session.commit()
third_activity.object_version.name # u'Some article updated!'
Local version histories using targets
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The target property of the Activity model offers a way of tracking changes of
given related object. In the example below we create a new activity when adding
a category for article and then mark the article as the target of this
activity.
::
session.add(Category(name=u'Fist category', article=article))
session.flush()
activity = Activity(
verb=u'create',
object=category,
target=article
)
session.add(activity)
session.commit()
Now if we wanted to find all the changes that affected given article we could
do so by searching through all the activities where either the object or
target is the given article.
::
import sqlalchemy as sa
activities = session.query(Activity).filter(
sa.or_(
Activity.object == article,
Activity.target == article
)
)
.. _activity stream specification:
http://www.activitystrea.ms
.. _generic relationships:
https://sqlalchemy-utils.readthedocs.io/en/latest/generic_relationship.html
"""
import sqlalchemy as sa
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy_utils import JSONType, generic_relationship
from .base import Plugin
from ..factory import ModelFactory
from ..utils import version_class, version_obj
class ActivityBase(object):
id = sa.Column(
sa.BigInteger,
sa.schema.Sequence('activity_id_seq'),
primary_key=True,
autoincrement=True
)
verb = sa.Column(sa.Unicode(255))
@hybrid_property
def actor(self):
return self.transaction.user
class ActivityFactory(ModelFactory):
model_name = 'Activity'
def create_class(self, manager):
"""
Create Activity class.
"""
class Activity(
manager.declarative_base,
ActivityBase
):
__tablename__ = 'activity'
manager = self
transaction_id = sa.Column(
sa.BigInteger,
index=True,
nullable=False
)
data = sa.Column(JSONType)
object_type = sa.Column(sa.String(255))
object_id = sa.Column(sa.BigInteger)
object_tx_id = sa.Column(sa.BigInteger)
target_type = sa.Column(sa.String(255))
target_id = sa.Column(sa.BigInteger)
target_tx_id = sa.Column(sa.BigInteger)
def _calculate_tx_id(self, obj):
session = sa.orm.object_session(self)
if obj:
object_version = version_obj(session, obj)
if object_version:
return object_version.transaction_id
version_cls = version_class(obj.__class__)
return session.query(
sa.func.max(version_cls.transaction_id)
).filter(
version_cls.id == obj.id
).scalar()
def calculate_object_tx_id(self):
self.object_tx_id = self._calculate_tx_id(self.object)
def calculate_target_tx_id(self):
self.target_tx_id = self._calculate_tx_id(self.target)
object = generic_relationship(
object_type, object_id
)
@hybrid_property
def object_version_type(self):
return self.object_type + 'Version'
@object_version_type.expression
def object_version_type(cls):
return sa.func.concat(cls.object_type, 'Version')
object_version = generic_relationship(
object_version_type, (object_id, object_tx_id)
)
target = generic_relationship(
target_type, target_id
)
@hybrid_property
def target_version_type(self):
return self.target_type + 'Version'
@target_version_type.expression
def target_version_type(cls):
return sa.func.concat(cls.target_type, 'Version')
target_version = generic_relationship(
target_version_type, (target_id, target_tx_id)
)
Activity.transaction = sa.orm.relationship(
manager.transaction_cls,
backref=sa.orm.backref(
'activities',
),
primaryjoin=(
'%s.id == Activity.transaction_id' %
manager.transaction_cls.__name__
),
foreign_keys=[Activity.transaction_id]
)
return Activity
class ActivityPlugin(Plugin):
def after_build_models(self, manager):
self.activity_cls = ActivityFactory()(manager)
manager.activity_cls = self.activity_cls
def is_session_modified(self, session):
"""
Return that the session has been modified if the session contains an
activity class.
:param session: SQLAlchemy session object
"""
return any(isinstance(obj, self.activity_cls) for obj in session)
def before_flush(self, uow, session):
for obj in session:
if isinstance(obj, self.activity_cls):
obj.transaction = uow.current_transaction
obj.calculate_target_tx_id()
obj.calculate_object_tx_id()
def after_version_class_built(self, parent_cls, version_cls):
pass
|
|
import os
import re
from dnload.common import file_is_ascii_text
from dnload.common import is_listing
from dnload.common import is_verbose
from dnload.common import listify
from dnload.common import locate
from dnload.common import run_command
from dnload.platform_var import PlatformVar
########################################
# Linker ###############################
########################################
class Linker:
"""Linker used to link object files."""
def __init__(self, op):
"""Constructor."""
self.__command = op
self.__command_basename = os.path.basename(self.__command)
self.__library_directories = []
self.__libraries = []
self.__linker_flags = []
self.__linker_flags_extra = []
self.__linker_script = []
self.__rpath_directories = []
def addExtraFlags(self, op):
"""Add extra flags to use when linking."""
if is_listing(op):
for ii in op:
self.addExtraFlags(ii)
return
if not (op in self.__linker_flags_extra):
self.__linker_flags_extra += [op]
def command_basename_startswith(self, op):
"""Check if command basename starts with given string."""
return self.__command_basename.startswith(op)
def generate_linker_flags(self):
"""Generate linker command for given mode."""
self.__linker_flags = []
if self.is_gcc():
self.__linker_flags += ["-nostartfiles", "-nostdlib", "-Xlinker", "--strip-all"]
elif self.is_clang():
self.__linker_flags += ["-nostdlib", "-Xlinker", "--strip-all"]
elif self.is_ld():
dynamic_linker = str(PlatformVar("interp"))
if dynamic_linker.startswith("\"") and dynamic_linker.endswith("\""):
dynamic_linker = dynamic_linker[1:-1]
else:
raise RuntimeError("dynamic liner definition '%s' should be quoted" % (dynamic_linker))
self.__linker_flags += ["-nostdlib", "--strip-all", "--dynamic-linker=%s" % (dynamic_linker)]
else:
raise RuntimeError("compilation not supported with compiler '%s'" % (op))
def get_command(self):
"""Accessor."""
return self.__command
def get_command_basename(self):
"""Accessor."""
return self.__command_basename
def get_library_list(self):
"""Generate link library list libraries."""
ret = []
prefix = "-l"
if self.is_msvc():
prefix = "/l"
for ii in self.__libraries:
ret += [prefix + ii]
return ret
def get_library_directory_list(self):
"""Set link directory listing."""
ret = []
prefix = "-L"
rpath_prefix = ["-Xlinker"]
if self.is_msvc():
prefix = "/L"
for ii in self.__library_directories:
ret += [prefix + ii]
if self.is_ld():
ret += ["-rpath-link", ":".join(self.__library_directories)]
rpath_prefix = []
for ii in self.__rpath_directories:
ret += rpath_prefix + ["-rpath=%s" % (ii)]
return ret
def get_library_name(self, op):
"""Get actual name of library."""
if op.startswith("/"):
return op
# Check if the library is specified verbatim. If yes, no need to expand.
if re.match(r'lib.+\.so(\..*)?', op):
return op
libname = "lib%s.so" % (op)
# Search in library directories only.
for ii in self.__library_directories:
current_libname = locate(ii, libname)
if not current_libname:
continue
# Check if the supposed shared library is a linker script.
if file_is_ascii_text(current_libname):
ret = read_linker_script_library_name(current_libname)
if ret:
if is_verbose():
print("'%s' is a linker script, actual library name: '%s'" % (libname, ret))
return ret
# Stop at first match.
break
return libname
def get_linker_flags(self):
"""Accessor."""
return self.__linker_flags
def generate_linker_script(self, dst, modify_start=False):
"""Get linker script from linker, improve it, write improved linker script to given file."""
(so, se) = run_command([self.__command, "--verbose"] + self.__linker_flags_extra)
if 0 < len(se) and is_verbose():
print(se)
# Linker script is the block of code between lines of multiple '=':s.
match = re.match(r'.*\n=+\s*\n(.*)\n=+\s*\n.*', so, re.DOTALL)
if not match:
raise RuntimeError("could not extract script from linker output")
ld_script = match.group(1)
# Remove unwanted symbol definitions one at a time.
unwanted_symbols = ["__bss_end__", "__bss_start__", "__end__", "__bss_start", "_bss_end__", "_edata", "_end"]
for ii in unwanted_symbols:
ld_script = re.sub(r'\n([ \f\r\t\v]+)(%s)(\s*=[^\n]+)\n' % (ii), r'\n\1/*\2\3*/\n', ld_script, re.MULTILINE)
ld_script = re.sub(r'SEGMENT_START\s*\(\s*(\S+)\s*,\s*\d*x?\d+\s*\)', r'SEGMENT_START(\1, %s)' % (str(PlatformVar("entry"))), ld_script, re.MULTILINE)
if modify_start:
ld_script = re.sub(r'(SEGMENT_START.*\S)\s*\+\s*SIZEOF_HEADERS\s*;', r'\1;', ld_script, re.MULTILINE)
fd = open(dst, "w")
fd.write(ld_script)
fd.close()
if is_verbose():
print("Wrote linker script '%s'." % (dst))
return ld_script
def is_clang(self):
"""Tells if the linker is considered to be clang."""
if self.command_basename_startswith("clang"):
return True
return False
def is_gcc(self):
"""Tells if the linker is considered to be gcc."""
if self.command_basename_startswith("g++") or self.command_basename_startswith("gcc"):
return True
return False
def is_ld(self):
"""Tells if the linker is considered to be ld."""
if self.command_basename_startswith("ld"):
return True
return False
def is_msvc(self):
"""Tells if the linker is considered to be ld."""
if self.command_basename_startswith("cl."):
return True
return False
def link(self, src, dst, extra_args=[]):
"""Link a file."""
cmd = [self.__command, src, "-o", dst] + self.__linker_flags + self.get_library_directory_list() + self.get_library_list() + extra_args + self.__linker_script + self.__linker_flags_extra
(so, se) = run_command(cmd)
if 0 < len(se) and is_verbose():
print(se)
return so
def link_binary(self, objcopy, src, dst):
"""Link a binary file with no bells and whistles."""
ld_target = dst
cmd = [self.__command, "--entry=" + str(PlatformVar("entry"))] + listify(src) + self.__linker_script + self.__linker_flags_extra
# Use objcopy if it was given.
if objcopy:
(dst_base, dst_ext) = os.path.splitext(dst)
dst_bin = dst_base + ".out"
objcopy_cmd = [objcopy, "--output-target=binary", dst_bin, dst]
ld_target = dst_bin
# Otherwise link directly into binary.
else:
cmd += ["--oformat=binary"]
cmd += ["-o", ld_target]
# Run linker command.
(so, se) = run_command(cmd)
if 0 < len(se) and is_verbose():
print(se)
# Only run objcopy commad if it was required.
if objcopy:
(so_add, se) = run_command(objcopy_cmd)
if 0 < len(se) and is_verbose():
print(se)
so += so_add
return so
def set_libraries(self, lst):
"""Set libraries to link."""
self.__libraries = lst
def set_library_directories(self, lst):
self.__library_directories = []
for ii in lst:
if os.path.isdir(ii):
self.__library_directories += [ii]
def set_linker_script(self, op):
"""Use given linker script."""
self.__linker_script = ["-T", op]
def set_rpath_directories(self, lst):
"""Set rpath option."""
self.__rpath_directories = []
for ii in lst:
self.__rpath_directories += [ii]
########################################
# Functions ############################
########################################
def read_linker_script_library_name(op):
"""Read the actual library name from a linker script file."""
fd = open(op, "r")
contents = fd.read()
fd.close()
match = re.search(r'GROUP\s*\(\s*(\S+)\s+', contents, re.MULTILINE)
if match:
return os.path.basename(match.group(1))
match = re.search(r'INPUT\(\s*(\S+)(\s*-l(\S+))?\)', contents, re.MULTILINE)
if match:
return os.path.basename(match.group(1))
return None
|
|
import os
import unittest
import utils
import time
import string
import json
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
FIXTURES_DIR = os.path.join(CURRENT_DIR, "fixtures", "debian", "kafka-connect")
KAFKA_READY = "bash -c 'cub kafka-ready {brokers} 40 -z $KAFKA_ZOOKEEPER_CONNECT && echo PASS || echo FAIL'"
CONNECT_HEALTH_CHECK = "bash -c 'dub wait {host} {port} 30 && curl -X GET --fail --silent {host}:{port}/connectors && echo PASS || echo FAIL'"
ZK_READY = "bash -c 'cub zk-ready {servers} 40 && echo PASS || echo FAIL'"
SR_READY = "bash -c 'cub sr-ready {host} {port} 20 && echo PASS || echo FAIL'"
TOPIC_CREATE = "bash -c ' kafka-topics --create --topic {name} --partitions 1 --replication-factor 1 --if-not-exists --zookeeper $KAFKA_ZOOKEEPER_CONNECT && echo PASS || echo FAIL' "
FILE_SOURCE_CONNECTOR_CREATE = """
curl -X POST -H "Content-Type: application/json" \
--data '{"name": "%s", "config": {"connector.class":"org.apache.kafka.connect.file.FileStreamSourceConnector", "tasks.max":"1", "topic":"%s", "file": "%s"}}' \
http://%s:%s/connectors
"""
FILE_SINK_CONNECTOR_CREATE = """
curl -X POST -H "Content-Type: application/json" \
--data '{"name": "%s", "config": {"connector.class":"org.apache.kafka.connect.file.FileStreamSinkConnector", "tasks.max":"1", "topics":"%s", "file": "%s"}}' \
http://%s:%s/connectors
"""
JDBC_SOURCE_CONNECTOR_CREATE = """
curl -X POST -H "Content-Type: application/json" --data '{ "name": "%s", "config": { "connector.class": "io.confluent.connect.jdbc.JdbcSourceConnector", "tasks.max": 1, "connection.url": "%s", "mode": "incrementing", "incrementing.column.name": "id", "timestamp.column.name": "modified", "topic.prefix": "%s", "poll.interval.ms": 1000 } }' \
http://%s:%s/connectors
"""
JDBC_SINK_CONNECTOR_CREATE = """
curl -X POST -H "Content-Type: application/json" --data '{ "name": "%s", "config": { "connector.class": "io.confluent.connect.jdbc.JdbcSinkConnector", "tasks.max": 1, "connection.url": "%s", "topics": "%s", "auto.create": "true"}}' \
http://%s:%s/connectors
"""
ES_SINK_CONNECTOR_CREATE = """
curl -X POST -H "Content-Type: application/json" --data '{ "name": "%s", "config": { "connector.class": "io.confluent.connect.elasticsearch.ElasticsearchSinkConnector", "tasks.max": 1, "connection.url": "%s", "topics": "%s", "key.ignore": "true", "type.name": "kafka-connect"}}' \
http://%s:%s/connectors
"""
ACTIVEMQ_SOURCE_CONNECTOR_CREATE = """
curl -X POST -H "Content-Type: application/json" --data '{ "name": "%s", "config": { "connector.class": "io.confluent.connect.jms.JmsSourceConnector", "tasks.max": 1, "activemq.url": "%s", "jms.destination.name": "testing", "kafka.topic": "%s", "confluent.topic.bootstrap.servers=": "%s"}}' \
http://%s:%s/connectors
"""
CONNECTOR_STATUS = "curl -s -X GET http://{host}:{port}/connectors/{name}/status"
class ConfigTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.cluster = utils.TestCluster("config-test", FIXTURES_DIR, "distributed-config.yml")
cls.cluster.start()
assert "PASS" in cls.cluster.run_command_on_service("zookeeper", ZK_READY.format(servers="localhost:2181"))
assert "PASS" in cls.cluster.run_command_on_service("kafka", KAFKA_READY.format(brokers=1))
assert "PASS" in cls.cluster.run_command_on_service("schema-registry", SR_READY.format(host="schema-registry", port="8081"))
@classmethod
def tearDownClass(cls):
cls.cluster.shutdown()
@classmethod
def is_connect_healthy_for_service(cls, service):
output = cls.cluster.run_command_on_service(service, CONNECT_HEALTH_CHECK.format(host="localhost", port=8082))
assert "PASS" in output
def test_required_config_failure(self):
self.assertTrue("CONNECT_BOOTSTRAP_SERVERS is required." in self.cluster.service_logs("failing-config", stopped=True))
self.assertTrue("CONNECT_GROUP_ID is required." in self.cluster.service_logs("failing-config-group-id", stopped=True))
self.assertTrue("CONNECT_CONFIG_STORAGE_TOPIC is required." in self.cluster.service_logs("failing-config-config-topic", stopped=True))
self.assertTrue("CONNECT_OFFSET_STORAGE_TOPIC is required." in self.cluster.service_logs("failing-config-offset-topic", stopped=True))
self.assertTrue("CONNECT_STATUS_STORAGE_TOPIC is required." in self.cluster.service_logs("failing-config-status-topic", stopped=True))
self.assertTrue("CONNECT_KEY_CONVERTER is required." in self.cluster.service_logs("failing-config-key-converter", stopped=True))
self.assertTrue("CONNECT_VALUE_CONVERTER is required." in self.cluster.service_logs("failing-config-value-converter", stopped=True))
self.assertTrue("CONNECT_REST_ADVERTISED_HOST_NAME is required." in self.cluster.service_logs("failing-config-rest-adv-host-name", stopped=True))
def test_default_config(self):
self.is_connect_healthy_for_service("default-config")
props = self.cluster.run_command_on_service("default-config", "bash -c 'cat /etc/kafka-connect/kafka-connect.properties | sort'")
expected = """
bootstrap.servers=kafka:9092
config.storage.topic=default.config
group.id=default
internal.key.converter.schemas.enable=false
internal.key.converter=org.apache.kafka.connect.json.JsonConverter
internal.value.converter.schemas.enable=false
internal.value.converter=org.apache.kafka.connect.json.JsonConverter
key.converter=org.apache.kafka.connect.json.JsonConverter
offset.storage.topic=default.offsets
rest.advertised.host.name=default-config
rest.port=8082
status.storage.topic=default.status
value.converter=org.apache.kafka.connect.json.JsonConverter
zookeeper.connect=zookeeper:2181/defaultconfig
"""
self.assertEquals(props.translate(None, string.whitespace), expected.translate(None, string.whitespace))
def test_default_config_avro(self):
self.is_connect_healthy_for_service("default-config-avro")
props = self.cluster.run_command_on_service("default-config-avro", "bash -c 'cat /etc/kafka-connect/kafka-connect.properties | sort'")
expected = """
bootstrap.servers=kafka:9092
config.storage.topic=default.config
group.id=default
internal.key.converter.schemas.enable=false
internal.key.converter=org.apache.kafka.connect.json.JsonConverter
internal.value.converter.schemas.enable=false
internal.value.converter=org.apache.kafka.connect.json.JsonConverter
key.converter.schema.registry.url=http://schema-registry:8081
key.converter=io.confluent.connect.avro.AvroConverter
offset.storage.topic=default.offsets
rest.advertised.host.name=default-config
rest.port=8082
status.storage.topic=default.status
value.converter.schema.registry.url=http://schema-registry:8081
value.converter=io.confluent.connect.avro.AvroConverter
zookeeper.connect=zookeeper:2181/defaultconfig
"""
self.assertEquals(props.translate(None, string.whitespace), expected.translate(None, string.whitespace))
def test_default_logging_config(self):
self.is_connect_healthy_for_service("default-config")
log4j_props = self.cluster.run_command_on_service("default-config", "bash -c 'cat /etc/kafka/connect-log4j.properties | sort'")
expected_log4j_props = """
log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.rootLogger=INFO, stdout
"""
self.assertEquals(log4j_props.translate(None, string.whitespace), expected_log4j_props.translate(None, string.whitespace))
def create_connector(name, create_command, host, port):
utils.run_docker_command(
image="confluentinc/cp-kafka-connect",
command=create_command,
host_config={'NetworkMode': 'host'})
status = None
for i in xrange(25):
source_logs = utils.run_docker_command(
image="confluentinc/cp-kafka-connect",
command=CONNECTOR_STATUS.format(host=host, port=port, name=name),
host_config={'NetworkMode': 'host'})
connector = json.loads(source_logs)
# Retry if you see errors, connect might still be creating the connector.
if "error_code" in connector:
time.sleep(1)
else:
status = connector["connector"]["state"]
if status == "FAILED":
return status
elif status == "RUNNING":
return status
elif status == "UNASSIGNED":
time.sleep(1)
return status
def create_file_source_test_data(host_dir, file, num_records):
volumes = []
volumes.append("%s:/tmp/test" % host_dir)
print "VOLUMES : ", volumes
utils.run_docker_command(
image="confluentinc/cp-kafka-connect",
command="bash -c 'rm -rf /tmp/test/*.txt && seq {count} > /tmp/test/{name}'".format(count=num_records, name=file),
host_config={'NetworkMode': 'host', 'Binds': volumes})
def wait_and_get_sink_output(host_dir, file, expected_num_records):
# Polls the output of file sink and tries to wait until an expected no of records appear in the file.
volumes = []
volumes.append("%s/:/tmp/test" % host_dir)
for i in xrange(60):
sink_record_count = utils.run_docker_command(
image="confluentinc/cp-kafka-connect",
command="bash -c '[ -e /tmp/test/%s ] && (wc -l /tmp/test/%s | cut -d\" \" -f1) || echo -1'" % (file, file),
host_config={'NetworkMode': 'host', 'Binds': volumes})
# The bash command returns -1, if the file is not found. otherwise it returns the no of lines in the file.
if int(sink_record_count.strip()) == expected_num_records:
break
time.sleep(10)
return int(sink_record_count.strip())
class SingleNodeDistributedTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
machine_name = os.environ["DOCKER_MACHINE_NAME"]
cls.machine = utils.TestMachine(machine_name)
# Copy SSL files.
cls.machine.ssh("mkdir -p /tmp/kafka-connect-single-node-test/jars")
local_jars_dir = os.path.join(FIXTURES_DIR, "jars")
cls.machine.scp_to_machine(local_jars_dir, "/tmp/kafka-connect-single-node-test")
cls.machine.ssh("mkdir -p /tmp/kafka-connect-single-node-test/sql")
local_sql_dir = os.path.join(FIXTURES_DIR, "sql")
cls.machine.scp_to_machine(local_sql_dir, "/tmp/kafka-connect-single-node-test")
cls.machine.ssh("mkdir -p /tmp/kafka-connect-single-node-test/scripts")
local_scripts_dir = os.path.join(FIXTURES_DIR, "scripts")
cls.machine.scp_to_machine(local_scripts_dir, "/tmp/kafka-connect-single-node-test")
cls.cluster = utils.TestCluster("distributed-single-node", FIXTURES_DIR, "distributed-single-node.yml")
cls.cluster.start()
# assert "PASS" in cls.cluster.run_command_on_service("zookeeper-bridge", ZK_READY.format(servers="localhost:2181"))
assert "PASS" in cls.cluster.run_command_on_service("zookeeper-host", ZK_READY.format(servers="localhost:32181"))
# assert "PASS" in cls.cluster.run_command_on_service("kafka-bridge", KAFKA_READY.format(brokers=1))
assert "PASS" in cls.cluster.run_command_on_service("kafka-host", KAFKA_READY.format(brokers=1))
assert "PASS" in cls.cluster.run_command_on_service("schema-registry-host", SR_READY.format(host="localhost", port="8081"))
@classmethod
def tearDownClass(cls):
cls.machine.ssh("sudo rm -rf /tmp/kafka-connect-single-node-test")
cls.cluster.shutdown()
@classmethod
def is_connect_healthy_for_service(cls, service, port):
assert "PASS" in cls.cluster.run_command_on_service(service, CONNECT_HEALTH_CHECK.format(host="localhost", port=port))
def create_topics(self, kafka_service, internal_topic_prefix, data_topic):
assert "PASS" in self.cluster.run_command_on_service(kafka_service, TOPIC_CREATE.format(name=internal_topic_prefix + ".config"))
assert "PASS" in self.cluster.run_command_on_service(kafka_service, TOPIC_CREATE.format(name=internal_topic_prefix + ".status"))
assert "PASS" in self.cluster.run_command_on_service(kafka_service, TOPIC_CREATE.format(name=internal_topic_prefix + ".offsets"))
assert "PASS" in self.cluster.run_command_on_service(kafka_service, TOPIC_CREATE.format(name=data_topic))
def test_file_connector_on_host_network(self):
data_topic = "one-node-file-test"
file_source_input_file = "source.test.txt"
file_sink_output_file = "sink.test.txt"
source_connector_name = "one-node-source-test"
sink_connector_name = "one-node-sink-test"
worker_host = "localhost"
worker_port = 28082
# Creating topics upfront makes the tests go a lot faster (I suspect this is because consumers dont waste time with rebalances)
self.create_topics("kafka-host", "default", data_topic)
# Test from within the container
self.is_connect_healthy_for_service("connect-host-json", 28082)
# Create a file
record_count = 10000
create_file_source_test_data("/tmp/kafka-connect-single-node-test", file_source_input_file, record_count)
file_source_create_cmd = FILE_SOURCE_CONNECTOR_CREATE % (source_connector_name, data_topic, "/tmp/test/%s" % file_source_input_file, worker_host, worker_port)
source_status = create_connector(source_connector_name, file_source_create_cmd, worker_host, worker_port)
self.assertEquals(source_status, "RUNNING")
file_sink_create_cmd = FILE_SINK_CONNECTOR_CREATE % (sink_connector_name, data_topic, "/tmp/test/%s" % file_sink_output_file, worker_host, worker_port)
sink_status = create_connector(sink_connector_name, file_sink_create_cmd, worker_host, worker_port)
self.assertEquals(sink_status, "RUNNING")
sink_op = wait_and_get_sink_output("/tmp/kafka-connect-single-node-test", file_sink_output_file, record_count)
self.assertEquals(sink_op, record_count)
def test_file_connector_on_host_network_with_avro(self):
data_topic = "one-node-avro-test"
file_source_input_file = "source.avro.test.txt"
file_sink_output_file = "sink.avro.test.txt"
source_connector_name = "one-node-source-test"
sink_connector_name = "one-node-sink-test"
worker_host = "localhost"
worker_port = 38082
# Creating topics upfront makes the tests go a lot faster (I suspect this is because consumers dont waste time with rebalances)
self.create_topics("kafka-host", "default.avro", data_topic)
# Test from within the container
self.is_connect_healthy_for_service("connect-host-avro", 38082)
# Create a file
record_count = 10000
create_file_source_test_data("/tmp/kafka-connect-single-node-test", file_source_input_file, record_count)
file_source_create_cmd = FILE_SOURCE_CONNECTOR_CREATE % (source_connector_name, data_topic, "/tmp/test/%s" % file_source_input_file, worker_host, worker_port)
source_status = create_connector(source_connector_name, file_source_create_cmd, worker_host, worker_port)
self.assertEquals(source_status, "RUNNING")
file_sink_create_cmd = FILE_SINK_CONNECTOR_CREATE % (sink_connector_name, data_topic, "/tmp/test/%s" % file_sink_output_file, worker_host, worker_port)
sink_status = create_connector(sink_connector_name, file_sink_create_cmd, worker_host, worker_port)
self.assertEquals(sink_status, "RUNNING")
sink_op = wait_and_get_sink_output("/tmp/kafka-connect-single-node-test", file_sink_output_file, record_count)
self.assertEquals(sink_op, record_count)
def test_jdbc_source_connector_on_host_network(self):
jdbc_topic_prefix = "one-node-jdbc-source-"
data_topic = "%stest" % jdbc_topic_prefix
file_sink_output_file = "file.sink.jdbcsource.test.txt"
source_connector_name = "one-node-jdbc-source-test"
sink_connector_name = "one-node-file-sink-test"
worker_host = "localhost"
worker_port = 28082
# Creating topics upfront makes the tests go a lot faster (I suspect this is because consumers dont waste time with rebalances)
self.create_topics("kafka-host", "default", data_topic)
assert "PASS" in self.cluster.run_command_on_service("mysql-host", "bash -c 'mysql -u root -pconfluent < /tmp/sql/mysql-test.sql && echo PASS'")
# Test from within the container
self.is_connect_healthy_for_service("connect-host-json", 28082)
jdbc_source_create_cmd = JDBC_SOURCE_CONNECTOR_CREATE % (
source_connector_name,
"jdbc:mysql://127.0.0.1:3306/connect_test?user=root&password=confluent",
jdbc_topic_prefix,
worker_host,
worker_port)
jdbc_source_status = create_connector(source_connector_name, jdbc_source_create_cmd, worker_host, worker_port)
self.assertEquals(jdbc_source_status, "RUNNING")
file_sink_create_cmd = FILE_SINK_CONNECTOR_CREATE % (
sink_connector_name,
data_topic,
"/tmp/test/%s" % file_sink_output_file,
worker_host,
worker_port)
sink_status = create_connector(sink_connector_name, file_sink_create_cmd, worker_host, worker_port)
self.assertEquals(sink_status, "RUNNING")
record_count = 10
sink_op = wait_and_get_sink_output("/tmp/kafka-connect-single-node-test", file_sink_output_file, record_count)
self.assertEquals(sink_op, 10)
def test_jdbc_source_connector_on_host_network_with_avro(self):
jdbc_topic_prefix = "one-node-jdbc-source-avro-"
data_topic = "%stest" % jdbc_topic_prefix
file_sink_output_file = "file.sink.jdbcsource.avro.test.txt"
source_connector_name = "one-node-jdbc-source-test"
sink_connector_name = "one-node-file-sink-test"
worker_host = "localhost"
worker_port = 38082
# Creating topics upfront makes the tests go a lot faster (I suspect this is because consumers dont waste time with rebalances)
self.create_topics("kafka-host", "default.avro", data_topic)
assert "PASS" in self.cluster.run_command_on_service("mysql-host", "bash -c 'mysql -u root -pconfluent < /tmp/sql/mysql-test.sql && echo PASS'")
# Test from within the container
self.is_connect_healthy_for_service("connect-host-avro", 38082)
jdbc_source_create_cmd = JDBC_SOURCE_CONNECTOR_CREATE % (
source_connector_name,
"jdbc:mysql://127.0.0.1:3306/connect_test?user=root&password=confluent",
jdbc_topic_prefix,
worker_host,
worker_port)
jdbc_source_status = create_connector(source_connector_name, jdbc_source_create_cmd, worker_host, worker_port)
self.assertEquals(jdbc_source_status, "RUNNING")
file_sink_create_cmd = FILE_SINK_CONNECTOR_CREATE % (
sink_connector_name,
data_topic,
"/tmp/test/%s" % file_sink_output_file,
worker_host,
worker_port)
sink_status = create_connector(sink_connector_name, file_sink_create_cmd, worker_host, worker_port)
self.assertEquals(sink_status, "RUNNING")
record_count = 10
sink_op = wait_and_get_sink_output("/tmp/kafka-connect-single-node-test", file_sink_output_file, record_count)
self.assertEquals(sink_op, 10)
def test_jdbc_sink_connector_on_host_network_with_avro(self):
topic = "test_jdbc_sink_avro"
sink_connector_name = "one-node-jdbc-sink-test"
worker_host = "localhost"
worker_port = 38082
# Creating topics upfront makes the tests go a lot faster (I suspect this is because consumers dont waste time with rebalances)
self.create_topics("kafka-host", "default.avro", topic)
# Create the database.
assert "PASS" in self.cluster.run_command_on_service("mysql-host", "bash -c 'mysql -u root -pconfluent < /tmp/sql/mysql-test.sql && echo PASS'")
# Test from within the container
self.is_connect_healthy_for_service("connect-host-avro", 38082)
assert "PASS" in self.cluster.run_command_on_service("connect-host-avro", 'bash -c "TOPIC=%s sh /tmp/test/scripts/produce-data-avro.sh"' % topic)
jdbc_sink_create_cmd = JDBC_SINK_CONNECTOR_CREATE % (
sink_connector_name,
"jdbc:mysql://127.0.0.1:3306/connect_test?user=root&password=confluent",
topic,
worker_host,
worker_port)
jdbc_sink_status = create_connector(sink_connector_name, jdbc_sink_create_cmd, worker_host, worker_port)
self.assertEquals(jdbc_sink_status, "RUNNING")
assert "PASS" in self.cluster.run_command_on_service("mysql-host", """ bash -c "mysql --user=root --password=confluent --silent -e 'show databases;' | grep connect_test && echo PASS || echo FAIL" """)
tmp = ""
for i in xrange(25):
if "PASS" in self.cluster.run_command_on_service("mysql-host", """ bash -c "mysql --user=root --password=confluent --silent --database=connect_test -e 'show tables;' | grep %s && echo PASS || echo FAIL" """ % topic):
tmp = self.cluster.run_command_on_service("mysql-host", """ bash -c "mysql --user=root --password=confluent --silent --database=connect_test -e 'select COUNT(*) FROM %s ;' " """ % topic)
if "10000" in tmp:
break
time.sleep(1.0)
assert "10000" in tmp
def test_es_sink_connector_on_host_network_with_avro(self):
topic = "test_es_sink_avro"
sink_connector_name = "one-node-es-sink-test"
worker_host = "localhost"
worker_port = 38082
self.create_topics("kafka-host", "default.avro", topic)
# Test from within the container
self.is_connect_healthy_for_service("connect-host-avro", 38082)
assert "PASS" in self.cluster.run_command_on_service("connect-host-avro", 'bash -c "TOPIC=%s sh /tmp/test/scripts/produce-data-avro.sh"' % topic)
es_sink_create_cmd = ES_SINK_CONNECTOR_CREATE % (
sink_connector_name,
"http://localhost:9200",
topic,
worker_host,
worker_port)
es_sink_status = create_connector(sink_connector_name, es_sink_create_cmd, worker_host, worker_port)
self.assertEquals(es_sink_status, "RUNNING")
tmp = ""
for i in xrange(25):
index_exists_cmd = 'bash -c "curl -s -f -XHEAD http://localhost:9200/%s && echo PASS || echo FAIL"' % topic
if "PASS" in self.cluster.run_command_on_service("elasticsearch-host", index_exists_cmd):
doc_count = """ bash -c "curl -s -f http://localhost:9200/_cat/count/%s | cut -d' ' -f3" """ % topic
tmp = self.cluster.run_command_on_service("elasticsearch-host", doc_count)
if "10000" in tmp:
break
time.sleep(1.0)
assert "10000" in tmp
def test_activemq_source_connector_on_host_network_with_avro(self):
activemq_topic_prefix = "one-node-activemq-source-avro-"
data_topic = "%stest" % activemq_topic_prefix
source_connector_name = "one-node-activemq-source-test"
sink_connector_name = "one-node-activemq-file-sink-test"
worker_host = "localhost"
worker_port = 38082
# Creating topics upfront makes the tests go a lot faster (I suspect this is because consumers dont waste time with rebalances)
self.create_topics("kafka-host", "default.avro", data_topic)
assert "PASS" in self.cluster.run_command_on_service("activemq-host", "bash -c 'bin/activemq producer --message MyMessage --messageCount 1000 --destination queue://TEST' | grep 'Produced: 1000 messages' && echo PASS || echo FAIL")
# Test from within the container
self.is_connect_healthy_for_service("connect-host-avro", 38082)
activemq_source_create_cmd = ACTIVEMQ_SOURCE_CONNECTOR_CREATE % (
source_connector_name,
"tcp://127.0.0.1:61616",
data_topic,
"localhost:9092",
worker_host,
worker_port)
activemq_source_status = create_connector(source_connector_name, activemq_source_create_cmd, worker_host, worker_port)
self.assertEquals(activemq_source_status, "RUNNING")
file_sink_create_cmd = FILE_SINK_CONNECTOR_CREATE % (
sink_connector_name,
data_topic,
"/tmp/test/%s" % file_sink_output_file,
worker_host,
worker_port)
sink_status = create_connector(sink_connector_name, file_sink_create_cmd, worker_host, worker_port)
self.assertEquals(sink_status, "RUNNING")
record_count = 1000
sink_op = wait_and_get_sink_output("/tmp/kafka-connect-single-node-test", file_sink_output_file, record_count)
self.assertEquals(sink_op, 1000)
class ClusterHostNetworkTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
machine_name = os.environ["DOCKER_MACHINE_NAME"]
cls.machine = utils.TestMachine(machine_name)
# Copy SSL files.
cls.machine.ssh("mkdir -p /tmp/kafka-connect-host-cluster-test/jars")
local_jars_dir = os.path.join(FIXTURES_DIR, "jars")
cls.machine.scp_to_machine(local_jars_dir, "/tmp/kafka-connect-host-cluster-test")
cls.cluster = utils.TestCluster("cluster-test", FIXTURES_DIR, "cluster-host-plain.yml")
cls.cluster.start()
assert "PASS" in cls.cluster.run_command_on_service("zookeeper-1", ZK_READY.format(servers="localhost:22181,localhost:32181,localhost:42181"))
assert "PASS" in cls.cluster.run_command_on_service("kafka-1", KAFKA_READY.format(brokers=3))
@classmethod
def tearDownClass(cls):
cls.machine.ssh("sudo rm -rf /tmp/kafka-connect-host-cluster-test")
cls.cluster.shutdown()
def create_topics(self, kafka_service, internal_topic_prefix, data_topic):
assert "PASS" in self.cluster.run_command_on_service(kafka_service, TOPIC_CREATE.format(name=internal_topic_prefix + ".config"))
assert "PASS" in self.cluster.run_command_on_service(kafka_service, TOPIC_CREATE.format(name=internal_topic_prefix + ".status"))
assert "PASS" in self.cluster.run_command_on_service(kafka_service, TOPIC_CREATE.format(name=internal_topic_prefix + ".offsets"))
assert "PASS" in self.cluster.run_command_on_service(kafka_service, TOPIC_CREATE.format(name=data_topic))
def test_cluster_running(self):
self.assertTrue(self.cluster.is_running())
@classmethod
def is_connect_healthy_for_service(cls, service, port):
assert "PASS" in cls.cluster.run_command_on_service(service, CONNECT_HEALTH_CHECK.format(host="localhost", port=port))
def test_file_connector(self):
# Creating topics upfront makes the tests go a lot faster (I suspect this is because consumers dont waste time with rebalances)
self.create_topics("kafka-1", "default", "cluster-host-file-test")
# Test from within the container
self.is_connect_healthy_for_service("connect-host-1", 28082)
self.is_connect_healthy_for_service("connect-host-2", 38082)
self.is_connect_healthy_for_service("connect-host-3", 48082)
# Create a file
record_count = 10000
create_file_source_test_data("/tmp/connect-cluster-host-file-test", "source.test.txt", record_count)
file_source_create_cmd = FILE_SOURCE_CONNECTOR_CREATE % ("cluster-host-source-test", "cluster-host-file-test", "/tmp/test/source.test.txt", "localhost", "28082")
source_status = create_connector("cluster-host-source-test", file_source_create_cmd, "localhost", "28082")
self.assertEquals(source_status, "RUNNING")
file_sink_create_cmd = FILE_SINK_CONNECTOR_CREATE % ("cluster-host-sink-test", "cluster-host-file-test", "/tmp/test/sink.test.txt", "localhost", "38082")
sink_status = create_connector("cluster-host-sink-test", file_sink_create_cmd, "localhost", "38082")
self.assertEquals(sink_status, "RUNNING")
sink_op = wait_and_get_sink_output("/tmp/connect-cluster-host-file-test", "sink.test.txt", record_count)
self.assertEquals(sink_op, record_count)
def test_file_connector_with_avro(self):
# Creating topics upfront makes the tests go a lot faster (I suspect this is because consumers dont waste time with rebalances)
self.create_topics("kafka-1", "default.avro", "cluster-host-avro-file-test")
# Test from within the container
self.is_connect_healthy_for_service("connect-host-avro-1", 28083)
self.is_connect_healthy_for_service("connect-host-avro-2", 38083)
self.is_connect_healthy_for_service("connect-host-avro-3", 48083)
# Create a file
record_count = 10000
create_file_source_test_data("/tmp/connect-cluster-host-file-test", "source.avro.test.txt", record_count)
file_source_create_cmd = FILE_SOURCE_CONNECTOR_CREATE % ("cluster-host-source-test", "cluster-host-avro-file-test", "/tmp/test/source.avro.test.txt", "localhost", "28083")
source_status = create_connector("cluster-host-source-test", file_source_create_cmd, "localhost", "28083")
self.assertEquals(source_status, "RUNNING")
file_sink_create_cmd = FILE_SINK_CONNECTOR_CREATE % ("cluster-host-sink-test", "cluster-host-avro-file-test", "/tmp/test/sink.avro.test.txt", "localhost", "38083")
sink_status = create_connector("cluster-host-sink-test", file_sink_create_cmd, "localhost", "38083")
self.assertEquals(sink_status, "RUNNING")
sink_op = wait_and_get_sink_output("/tmp/connect-cluster-host-file-test", "sink.avro.test.txt", record_count)
self.assertEquals(sink_op, record_count)
|
|
"""
While a Controlfile is being read in MetaServices allow for substitutions to be
made. All of the code is here instead of living in controlfile.py so you don't
have to scroll past the Controlfile class
"""
from enum import Enum
from random import randint
import logging
module_logger = logging.getLogger('control.substitution') # pylint: disable=invalid-name
class Kind(Enum):
"""Enum for things that don't fall into the type system"""
none = 0
singular = 1
list = 2
dict = 3
DEFAULT_KIND_MAPPING = {
Kind.singular: str,
Kind.list: list,
Kind.dict: dict,
}
def _get_default_of_kind(val):
return DEFAULT_KIND_MAPPING[_determine_kind(val)]()
def _pick_most_generic(left, right):
return DEFAULT_KIND_MAPPING[
sorted([
_determine_kind(left),
_determine_kind(right)
], key=lambda x: x.value)[-1]
]()
# Make sure to call the constructor so you get a new object of that type
# instead of something else
def _determine_kind(config_option):
if isinstance(config_option, dict):
return Kind.dict
elif isinstance(config_option, list):
return Kind.list
elif config_option is None:
return Kind.none
return Kind.singular
def _build_values_for_key(k, op, x, y): # pylint: disable=invalid-name
default = _pick_most_generic(x.get(k, None), y.get(k, None))
return operations[
(
_determine_kind(x.get(k, default)),
_determine_kind(y.get(k, default)),
op
)
](x.get(k, default), y.get(k, default))
operations = {
# pylint: disable=unnecessary-lambda
# aliaeses into the workings of this dict
'suffix': lambda x, y: operations[(_determine_kind(x), _determine_kind(y), 'suffix')](x, y),
'prefix': lambda x, y: operations[(_determine_kind(x), _determine_kind(y), 'prefix')](x, y),
'union': lambda x, y: operations[(_determine_kind(x), _determine_kind(y), 'union')](x, y),
'replace': lambda x, y: y if y else x,
# Union ops
(Kind.singular, Kind.singular, 'union'): lambda x, y: [i for i in [x, y] if i] if x != y else ([x] if x else []),
(Kind.singular, Kind.list, 'union'): lambda x, yy: [x] + [y for y in yy if y != x],
(Kind.singular, Kind.dict, 'union'): lambda x, y: {
k: _build_values_for_key(k, 'union', {'shared': [x]}, y) for k in y.keys() | {'shared'}
} if x else {k: (v if isinstance(v, list) else [v]) for k, v in y.items()},
(Kind.list, Kind.singular, 'union'): lambda xx, y: xx + [y] if y not in xx else xx,
(Kind.list, Kind.list, 'union'): lambda xx, yy: xx + [y for y in yy if y not in xx],
(Kind.list, Kind.dict, 'union'): lambda xx, y: {
k: _build_values_for_key(k, 'union', {'shared': xx}, y) for k in y.keys() | {'shared'}
} if xx else y,
(Kind.dict, Kind.singular, 'union'): lambda x, y: {
k: _build_values_for_key(k, 'union', x, {'shared': [y]}) for k in x.keys() | {'shared'}
},
(Kind.dict, Kind.list, 'union'): lambda x, yy: {
k: _build_values_for_key(k, 'union', x, {'shared': yy}) for k in x.keys() | {'shared'}
} if yy else x,
(Kind.dict, Kind.dict, 'union'): lambda x, y: {
k: _build_values_for_key(k, 'union', x, y) for k in x.keys() | y.keys()
},
# Suffix Ops
(Kind.singular, Kind.singular, 'suffix'): '{0}{1}'.format,
(Kind.singular, Kind.list, 'suffix'): lambda x, y: [x] + y,
(Kind.list, Kind.singular, 'suffix'): lambda x, y: x + [y],
(Kind.list, Kind.list, 'suffix'): lambda x, y: x + y,
(Kind.list, Kind.dict, 'suffix'): lambda x, y: {
k: _build_values_for_key(k, 'suffix', {'shared': x}, y) for k in y.keys() | {'shared'}
},
(Kind.singular, Kind.dict, 'suffix'): lambda x, y: {
k: _build_values_for_key(k, 'suffix', {'shared': x}, y) for k in y.keys() | {'shared'}
},
(Kind.dict, Kind.singular, 'suffix'): lambda x, y: {
k: _build_values_for_key(k, 'suffix', x, {'shared': y}) for k in x.keys() | {'shared'}
},
(Kind.dict, Kind.list, 'suffix'): lambda x, y: {
k: _build_values_for_key(k, 'suffix', x, {'shared': y}) for k in x.keys() | {'shared'}
},
(Kind.dict, Kind.dict, 'suffix'): lambda x, y: {
k: _build_values_for_key(k, 'suffix', x, y) for k in x.keys() | y.keys()
},
# Prefix Ops
(Kind.singular, Kind.singular, 'prefix'): '{1}{0}'.format,
(Kind.singular, Kind.list, 'prefix'): lambda x, y: y + [x],
(Kind.singular, Kind.dict, 'prefix'): lambda x, y: {
k: _build_values_for_key(k, 'prefix', {'shared': x}, y) for k in y.keys() | {'shared'}
},
(Kind.list, Kind.singular, 'prefix'): lambda x, y: [y] + x,
(Kind.list, Kind.list, 'prefix'): lambda x, y: y + x,
(Kind.list, Kind.dict, 'prefix'): lambda x, y: {
k: _build_values_for_key(k, 'prefix', {'shared': x}, y) for k in y.keys() | {'shared'}
},
(Kind.dict, Kind.singular, 'prefix'): lambda x, y: {
k: _build_values_for_key(k, 'prefix', x, {'shared': y}) for k in x.keys() | {'shared'}
},
(Kind.dict, Kind.list, 'prefix'): lambda x, y: {
k: _build_values_for_key(k, 'prefix', x, {'shared': y}) for k in x.keys() | {'shared'}
},
(Kind.dict, Kind.dict, 'prefix'): lambda x, y: {
k: _build_values_for_key(k, 'prefix', x, y) for k in x.keys() | y.keys()
},
}
def normalize_service(service, opers, variables):
"""
Takes a service, and options and applies the transforms to the service.
Allowed args:
- service: must be service object that was created before hand
- options: a dict of options that define transforms to a service.
The format must conform to a Controlfile metaservice options
definition
Returns: a service with all the transforms applied and all the variables
substituted in.
"""
# We check that the Controlfile only specifies operations we support,
# that way we aren't trusting a random user to accidentally get a
# random string eval'd.
for key, op, val in (
(key, op, val)
for key, ops in opers.items()
for op, val in ops.items() if (op in operations and
key in service.all_options)):
module_logger.log(11, "service '%s' %sing %s with '%s'.",
service.service, op, key, val)
try:
replacement = operations[op](service[key], val)
except KeyError as e:
module_logger.debug(e)
module_logger.log(11, "service '%s' missing key '%s'",
service.service, key)
module_logger.log(11, service.__dict__)
replacement = operations[op](_get_default_of_kind(val), val)
finally:
service[key] = replacement
for key in service.keys():
try:
module_logger.debug('now at %s, passing in %i vars', key, len(variables))
service[key] = _substitute_vars(service[key], variables)
except KeyError:
continue
return service['service'], service
# used exclusively by visit_every_leaf, but defined outside it so it's only compiled once
substitute_vars_decision_dict = {
# dict, list, str
(True, False, False): lambda d, vd: {k: _substitute_vars(v, vd) for k, v in d.items()},
(False, True, False): lambda d, vd: [x.format(**_merge_dicts(
vd,
{'RANDOM': str(randint(0, 10000))}
)) for x in d],
(False, False, True): lambda d, vd: d.format(**_merge_dicts(
vd,
{'RANDOM': str(randint(0, 10000))}
)),
(False, False, False): lambda d, vd: d
}
def _merge_dicts(*args):
"""
Before python 3.5 you can't do foo(**dict_one, **dict_two)
so, this function exists.
"""
if len(args) < 1:
return {}
ret = args[0].copy()
for d in args[1:]:
ret.update(d)
return ret
def _substitute_vars(d, var_dict): # pylint: disable=invalid-name
"""
Visit every leaf and substitute any variables that are found. This function
is named poorly, it sounds like it should generically visit every and allow
a function to be applied to each leaf. It does not. I have no need for that
right now. If I find a need this will probably be the place that that goes.
Arguments:
- d does not necessarily need to be a dict
- var_dict should be a dictionary of variables that can be kwargs'd into
format
"""
# DEBUGGING
module_logger.debug('now at %s', str(d))
# DEBUGGING
return substitute_vars_decision_dict[(
isinstance(d, dict),
isinstance(d, list),
isinstance(d, str)
)](d, var_dict)
def satisfy_nested_options(outer, inner):
"""
Merge two Controlfile options segments for nested Controlfiles.
- Merges appends by having "{{layer_two}}{{layer_one}}"
- Merges option additions with layer_one.push(layer_two)
"""
merged = {}
for key in outer.keys() | inner.keys():
val = {}
for op in outer.get(key, {}).keys() | inner.get(key, {}).keys():
default_value = _pick_most_generic(inner.get(key, {}).get(op, None),
outer.get(key, {}).get(op, None))
joined = operations[op](inner.get(key, {}).get(op, default_value),
outer.get(key, {}).get(op, default_value))
if joined:
val[op] = joined
merged[key] = val
return merged
|
|
#!/usr/bin/env python
#ripp'ed and adapted from: https://github.com/spesmilo/electrum/blob/master/lib/mnemonic.py
import os
import math
import unicodedata
import binascii #conversion between hex, int, and binary. Also for the crc32 thing
import random as cryptorandom
import zlib
import Keccak, ed25519#in this library
from electrum import print_error
net_version = '12'#network byte:12 for mainnet, 35 for testnet
# http://www.asahi-net.or.jp/~ax2s-kmtn/ref/unicode/e_asia.html
CJK_INTERVALS = [
(0x4E00, 0x9FFF, 'CJK Unified Ideographs'),
(0x3400, 0x4DBF, 'CJK Unified Ideographs Extension A'),
(0x20000, 0x2A6DF, 'CJK Unified Ideographs Extension B'),
(0x2A700, 0x2B73F, 'CJK Unified Ideographs Extension C'),
(0x2B740, 0x2B81F, 'CJK Unified Ideographs Extension D'),
(0xF900, 0xFAFF, 'CJK Compatibility Ideographs'),
(0x2F800, 0x2FA1D, 'CJK Compatibility Ideographs Supplement'),
(0x3190, 0x319F , 'Kanbun'),
(0x2E80, 0x2EFF, 'CJK Radicals Supplement'),
(0x2F00, 0x2FDF, 'CJK Radicals'),
(0x31C0, 0x31EF, 'CJK Strokes'),
(0x2FF0, 0x2FFF, 'Ideographic Description Characters'),
(0xE0100, 0xE01EF, 'Variation Selectors Supplement'),
(0x3100, 0x312F, 'Bopomofo'),
(0x31A0, 0x31BF, 'Bopomofo Extended'),
(0xFF00, 0xFFEF, 'Halfwidth and Fullwidth Forms'),
(0x3040, 0x309F, 'Hiragana'),
(0x30A0, 0x30FF, 'Katakana'),
(0x31F0, 0x31FF, 'Katakana Phonetic Extensions'),
(0x1B000, 0x1B0FF, 'Kana Supplement'),
(0xAC00, 0xD7AF, 'Hangul Syllables'),
(0x1100, 0x11FF, 'Hangul Jamo'),
(0xA960, 0xA97F, 'Hangul Jamo Extended A'),
(0xD7B0, 0xD7FF, 'Hangul Jamo Extended B'),
(0x3130, 0x318F, 'Hangul Compatibility Jamo'),
(0xA4D0, 0xA4FF, 'Lisu'),
(0x16F00, 0x16F9F, 'Miao'),
(0xA000, 0xA48F, 'Yi Syllables'),
(0xA490, 0xA4CF, 'Yi Radicals'),
]
def is_CJK(c):
n = ord(c)
for imin,imax,name in CJK_INTERVALS:
if n>=imin and n<=imax: return True
return False
filenames = {
'en':'english.txt',
'es':'spanish.txt',
'ja':'japanese.txt',
'pt':'portuguese.txt',
'zh':'chinese_simplified.txt'
}
class Mnemonic(object):
# Seed derivation no longer follows BIP39
# Mnemonic phrase uses a hash based checksum, instead of a wordlist-dependent checksum
def __init__(self, lang=None):
lang = lang or 'en'
print_error('language', lang)
filename = filenames.get(lang[0:2], 'english.txt')
path = os.path.join(os.path.dirname(__file__), 'wordlist', filename)
s = open(path,'r').read().strip()
s = unicodedata.normalize('NFKD', s.decode('utf8'))
lines = s.split('\n')
self.wordlist = []
for line in lines:
line = line.split('#')[0]
line = line.strip(' \r')
assert ' ' not in line
if line:
self.wordlist.append(line)
print_error("wordlist has %d words"%len(self.wordlist))
#end of Class Mnemonic------------------------------
def cn_fast_hash(s):#Keccak-256 hashing
k = Keccak.Keccak()
return k.Keccak((len(s) * 4, s), 1088, 512, 32 * 8, False).lower()
#r = bitrate = 1088, c = capacity, n = output length in bits
def mn_swap_endian_4byte(st):
#this is from moneromoo's code
#lifted from https://github.com/monero-project/mininero/blob/master/mnemonic.py
r = st[6:8]+st[4:6]+st[2:4]+st[0:2]
return r
def hexToInt(h):
s = binascii.unhexlify(h) #does hex to bytes
bb = len(h) * 4 #I guess 8 bits / b
return sum(2**i * ed25519.bit(s,i) for i in range(0,bb)) #does to int
def intToHex(i):
return binascii.hexlify(ed25519.encodeint(i)) #hexlify does bytes to hex
l = 2**252 + 27742317777372353535851937790883648493
def sc_reduce_key(a):
return intToHex(hexToInt(a) % l)
def prvviewkeyfrmhexseed(sk):
a = hexToInt(cn_fast_hash(sc_reduce_key(sk))) % l
return intToHex(a)
def public_key(sk):
#returns point encoded to binary .. sk is just an int..
return ed25519.encodepoint(ed25519.scalarmultbase(sk)) #pub key is not just x coord..
def publicFromSecret(sk):
#returns pubkey in hex, same as scalarmultBase
return binascii.hexlify(public_key(hexToInt(sk)))
def reverseBytes(a): #input is byte string, it reverse the endianness
b = [a[i:i+2] for i in range(0, len(a)-1, 2)]
return ''.join(b[::-1])
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
def b58encode(v):
a = [reverseBytes(v[i:i+16]) for i in range(0, len(v)-16, 16)]
rr = -2*((len(v) /2 )% 16)
res = ''
for b in a:
bb = hexToInt(b)
result = ''
while bb >= __b58base:
div, mod = divmod(bb, __b58base)
result = __b58chars[mod] + result
bb = div
result = __b58chars[bb] + result
res += result
result = ''
if rr < 0:
bf = hexToInt(reverseBytes(v[rr:])) #since we only reversed the ones in the array..
result = ''
while bf >= __b58base:
div, mod = divmod(bf, __b58base)
result = __b58chars[mod] + result
bf = div
result = __b58chars[bf] + result
res += result
return res
def addr_frmpubkeys(spendP, viewP, network='mainnet'):
net_version = '35' if network=='testnet' else '12'
buf = net_version + spendP + viewP#networkbyte+spendpubkey+viewpubkey
h = cn_fast_hash(buf)##Keccak-256 hashing
buf = buf + h[0:8]#first 4 bytes from above appended to 'buf'
return b58encode(buf)#Base58-encoding
def addrfrmseedhex(seedhex):#accepts Hex seed and returns public address
privviewkey = prvviewkeyfrmhexseed(seedhex)
privspendkey = sc_reduce_key(seedhex)
pubspendkey = publicFromSecret(privspendkey)
pubviewkey = publicFromSecret(privviewkey)
return addr_frmpubkeys(pubspendkey, pubviewkey)
def mnemonic_encode(self, i):
n = len(Mnemonic.wordlist)
words = []
while i:
x = i%n
i = i/n
words.append(Mnemonic.wordlist[x])
return ' '.join(words)
def mn_decode(wlist):
# lifted from https://github.com/monero-project/mininero/blob/master/mnemonic.py
out = ''
words = Mnemonic().wordlist
n = len(words)
for i in range(len(wlist) / 3): # note 24 / 3 = 8... 12 / 3 = 4..
word1, word2, word3 = wlist[3 * i:3 * i + 3]
w1 = words.index(word1)
w2 = words.index(word2)
w3 = words.index(word3)
x = w1 + n * ((n + w2 - w1) % n) + n * n * ((n + w3 - w2) % n) # as an int
b = '%08x' % x # this is big endian!
out += mn_swap_endian_4byte(b)
return out
def recoverSK(seed):
mn2 = seed.split(" ") # make array
if len(mn2) > 13:
mn2 = mn2[:24]
sk = mn_decode(mn2)
else:
mn2 = mn2[:12]
sk = cn_fast_hash(mn_decode(mn2))
return sk
def electrumChecksum(seedinit):
#lifted from https://github.com/monero-project/mininero/blob/master/mininero.py
wl = seedinit.split(" ") # make an array
if len(wl) > 13:
wl = wl[:24]
else:
wl = wl[:12]
upl = 3 # prefix length
wl2 = ''
for a in wl:
wl2 += a[:upl]
z = ((zlib.crc32(wl2) & 0xffffffff) ^ 0xffffffff) >> 0
z2 = ((z ^ 0xffffffff) >> 0) % len(wl)
return wl[z2]
def integratedaddrgen(spendpubkey, viewpubkey, pymtidhex, network='mainnet'):
net_version = '36' if network=='testnet' else '13'
buf = net_version + spendpubkey + viewpubkey + pymtidhex#networkbyte+spendpubkey+viewpubkey_pymtID
h = cn_fast_hash(buf)##Keccak-256 hashing
buf2 = buf + h[0:8]#first 4 bytes from above appended to 'buf'
#super strange how simple b58encode doesn't yield a replicable address
return b58encode(buf2[:144])+b58encode(buf2[143:])#Base58-encoding
def addrfrmseedphrase(seedphrase, network='mainnet'):
seedhex = recoverSK(seedphrase)
addy = addrfrmseedhex(seedhex)
return addy
def monerorandseedhex():#nicked from mininero.PaperWallet.skGen
return intToHex(8 * (cryptorandom.getrandbits(64 * 8)) % l)
def randpaymentidhex():
return intToHex(cryptorandom.getrandbits(64))[:16]
def mn_encode( message ):
out = []
words = Mnemonic().wordlist
n = len(words)
for i in range(0, len(message), 8):
message = message[0:i] + mn_swap_endian_4byte(message[i:i+8]) + message[i+8:]
for i in range(len(message)/8):
word = message[8*i:8*i+8]
x = int(word, 16)
w1 = (x%n)
w2 = ((x//n) + w1)%n
w3 = (((x//n)//n) + w2)%n
out += [ words[w1], words[w2], words[w3] ]
return ' '.join(out)
def monerorandseedphrase():
randseed = monerorandseedhex()
seedmnemonicraw = mn_encode(randseed)
seedmnemonic = seedmnemonicraw + ' ' + electrumChecksum(seedmnemonicraw)
return seedmnemonic
|
|
import json
import atexit
import hashlib
import os
import re
import shutil
import subprocess
import shlex
import tempfile
import types
from contextlib import contextmanager
from insights.config import CommandSpec
from insights.config.static import get_config
from insights.parsers.uname import rhel_release_map
from insights.core import context as ctx
from insights.util import fs
_data_spec_config = get_config()
REPO_PATH = "./repository"
class TempMaker(object):
def __init__(self, cleanup=True, path='/tmp'):
self.cleanup = cleanup
self.path = tempfile.mkdtemp(suffix='_insights_archive', dir=path)
self.temps = set()
def get_temp(self):
path = tempfile.mkdtemp(dir=self.path)
self.temps.add(path)
return path
def close(self):
if self.cleanup:
fs.remove(self.path)
__tmp_maker = None
def __cleanup():
if __tmp_maker:
__tmp_maker.close()
def get_temp_dir():
global __tmp_maker
if not __tmp_maker:
__tmp_maker = TempMaker()
atexit.register(__cleanup)
return __tmp_maker.get_temp()
class BaseArchive(object):
root_prefix = "base_archives"
def __init__(self, name, validate=True):
self.name = name
root_prefix = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)),
REPO_PATH,
self.root_prefix))
if not os.path.exists(root_prefix):
os.makedirs(root_prefix)
self.root_dir = os.path.join(root_prefix, name)
if validate:
self.validate()
def validate(self):
if not self.name:
raise ValueError("Name not valid")
if not os.path.exists(self.root_dir):
raise IOError("{0} {1} can not be found".format(self.__class__.__name__, self.name))
def files(self, absolute=False):
prefix_len = 0 if absolute else len(self.root_dir) + 1
for dirpath, _, filenames in os.walk(self.root_dir):
for filename in filenames:
yield os.path.join(dirpath[prefix_len:], filename)
def strip_slash(path):
path = path.strip()
if path.startswith("/"):
path = path.lstrip("/")
return path
class TempOverlay(BaseArchive):
def __init__(self, files):
self.name = "TempOverlay"
self.cached_files = files if files else []
self.root_dir = get_temp_dir()
for i, t in enumerate(self.cached_files):
path, content = t
path = strip_slash(path)
self.cached_files[i] = (path, content)
self.add_file(path, content)
def add_file(self, path, content):
if not isinstance(path, types.StringTypes):
raise ValueError("Invalid path type: {0}".format(type(path)))
# We don't allow absolute paths
if content:
full_path = os.path.join(self.root_dir, path)
if not os.path.exists(os.path.dirname(full_path)):
os.makedirs(os.path.dirname(full_path))
with open(full_path, "w") as f:
f.write(content.encode("utf-8") + "\n")
def files(self, absolute=False):
for path, content in self.cached_files:
if content:
yield os.path.join(self.root_dir, path) if absolute else path
class Overlay(BaseArchive):
root_prefix = "overlays"
class Transform(object):
def __init__(self, path, force_create=True):
self.path = path
self.ops = []
self.paths = []
self.force_create = force_create
def resolve_paths(self, archive):
specs = _data_spec_config.get_spec_list(self.path)
if not specs:
specs = _data_spec_config.get_meta_spec_list(self.path)
if not specs:
raise ValueError("Invalid symbolic name: [%s]" % self.path)
for archive_file in archive.files():
for spec in specs:
if spec.matches(archive_file, suffix="$"):
self.paths.append(archive_file)
if not self.paths:
if self.force_create:
primary_value = specs[0]
if isinstance(primary_value, CommandSpec):
new_path = os.path.join(archive.root_dir, "insights_commands", primary_value.get_path())
else:
new_path = os.path.join(archive.root_dir, primary_value.get_path())
self.paths.append(new_path)
else:
raise ValueError("Symbolic name not found: {0}".format(self.path))
def replace(self, content):
def _replace(path):
parent_dir = os.path.dirname(path)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
with open(path, 'w') as fp:
fp.write(content.encode("utf-8") + "\n")
self.ops.append(_replace)
return self
def append(self, content):
def _append(path):
with open(path, 'a') as fp:
fp.write(content.encode("utf-8") + "\n")
self.ops.append(_append)
return self
def sub(self, pattern, replacement):
pattern = make_regexp(pattern)
def _sub(path):
with file_replacer(path) as (fp, tf):
for line in fp:
tf.write(pattern.sub(replacement, line))
self.ops.append(_sub)
return self
def grep(self, pattern, keep=True):
pattern = make_regexp(pattern)
def _grep(path):
with file_replacer(path) as (fp, tf):
for line in fp:
if bool(pattern.match(line)) == keep: # xor
tf.write(line)
else:
continue
self.ops.append(_grep)
return self
def exclude(self, pattern):
return self.grep(pattern, keep=False)
def execute(self, root_dir):
for op in self.ops:
for path in self.paths:
op(os.path.join(root_dir, path))
self.paths = []
class TestArchive(BaseArchive):
root_prefix = "test_archives"
def __init__(self, name, base_archive="rhel7", overlays=None, transforms=None, machine_id=None, removals=None, compression="gz", hostname=None, **kwargs):
super(TestArchive, self).__init__(name, validate=False)
if not os.path.exists(self.root_dir):
os.mkdir(self.root_dir)
self.base_archive = (
base_archive if isinstance(base_archive, BaseArchive) else BaseArchive(base_archive))
self.overlays = [Overlay(o) for o in overlays] if overlays else []
self.transforms = transforms if transforms else []
self.machine_id = machine_id if machine_id else self.generate_machine_id()
self.machine_id_override = bool(machine_id)
self.removals = removals if removals else []
self.compression = compression
self.hostname = hostname if hostname else self.machine_id
self.extra = kwargs
def create_dir_structure(self):
self.copy_archive(self.base_archive)
for spec in self.removals:
if isinstance(spec, str):
spec = _data_spec_config.get_spec_list(spec)[0]
for f in filter(spec.matches, self.files()):
path_to_unlink = os.path.join(self.root_dir, f)
if os.path.exists(path_to_unlink):
os.unlink(path_to_unlink)
for o in self.overlays:
self.copy_archive(o)
for t in self.transforms:
t.resolve_paths(self)
t.execute(self.root_dir)
self.apply_metadata()
def apply_plugin(self, plugin):
self.transform(*plugin.get("transforms", []))
self.overlay(*plugin.get("overlays", []))
def apply_metadata(self):
machine_id_path = os.path.join(self.root_dir, "etc/redhat-access-insights")
if not os.path.exists(machine_id_path):
os.makedirs(machine_id_path)
with open(os.path.join(machine_id_path, "machine-id"), "w") as f:
f.write(self.machine_id if self.machine_id_override else self.generate_machine_id())
with open(os.path.join(self.root_dir, "branch_info"), "w") as f:
f.write(self.generate_branch_info())
@contextmanager
def file_content(self, name):
with open(os.path.join(self.root_dir, name), "r") as fd:
yield fd
def transform(self, *t):
for transform in t:
self.transforms.append(transform)
return self
def overlay(self, *o):
for overlay in o:
self.overlays.append(overlay)
return self
def get_context(self):
return ctx.Context(hostname=self.hostname,
machine_id=self.machine_id,
**self.extra)
def generate_machine_id(self):
h = hashlib.sha224(self.name)
h.update(self.base_archive.name)
for overlay in self.overlays:
h.update(overlay.name)
for transform in self.transforms:
h.update(transform.path)
return "TEST-" + h.hexdigest()
def generate_branch_info(self):
return '{"remote_branch": "-1", "remote_leaf": "-1"}'
def copy_archive(self, archive):
for fname in archive.files():
src = os.path.join(archive.root_dir, fname)
dst = os.path.join(self.root_dir, fname)
dstdir = os.path.dirname(dst)
if not os.path.exists(dstdir):
os.makedirs(dstdir)
shutil.copyfile(src, dst)
def export_options(self):
if self.compression == "gz":
return "czf", "tar.gz"
elif self.compression == "xz":
return "cJf", "tar.xz"
else:
return "cf", "tar"
def export(self, dest=".", nested_root=True):
options, ext = self.export_options()
if nested_root:
dir_root = os.path.join(self.root_dir, "..")
root_name = self.name
else:
dir_root = self.root_dir
root_name = "."
subprocess.check_call(
shlex.split("tar {0} {1}.{2} -C {3} {4}".format(
options, os.path.join(dest, self.name),
ext, dir_root, root_name)))
return self.output_path(dest)
def output_path(self, dest):
return os.path.join(dest, "{0}.{1}".format(
self.name,
self.export_options()[1]))
def clean(self):
fs.remove(self.root_dir)
def build(self, dest, force=False):
if force:
self.clean()
elif os.path.exists(self.output_path(dest)):
return self.output_path(dest)
self.create_dir_structure()
return self.export(dest)
def apply_input_data(self, input_data):
self._apply_input_data_removals(input_data)
self._apply_input_data_content(input_data)
if input_data.hostname != "hostname.example.com":
self.transforms.append(Transform("hostname").replace(input_data.hostname))
self.machine_id = input_data.machine_id
self.machine_id_override = True
if input_data.release != "default-release":
self.transforms.append(Transform("redhat-release").replace(input_data.release))
if (input_data.version != ["-1", "-1"] and
not [t.path for t in self.transforms if t.path == "uname"]):
rhel_version = ".".join(input_data.version)
for kernel, rhel in rhel_release_map.iteritems():
if rhel_version == rhel:
nvr_regex = " \d*\.\d*\.\d*-\d*"
self.transforms.append(Transform("uname").sub(nvr_regex, " " + kernel))
def _apply_input_data_removals(self, input_data):
for symbolic_name in input_data.to_remove:
self.removals.extend(_data_spec_config.get_spec_list(symbolic_name))
def _apply_input_data_content(self, input_data):
for record in input_data.records:
target = record["target"]
content = record["context"].content
path = record["context"].path
if path and "BOGUS" not in path:
# Presence of path means it's a pattern file, which means it
# probably won't exist in the current archive
self.overlays.append(TempOverlay([(path, content)]))
else:
self.transforms.append(Transform(target).replace(content))
class MultiArchive(TestArchive):
def __init__(self, name, archives=None, machine_id=None, display_name=None, **kwargs):
super(MultiArchive, self).__init__(name, machine_id=machine_id, **kwargs)
self.archives = archives if archives else []
self.metadata = None
self.display_name = display_name
self.extra_metadata = {}
def copy_archive(self, archive):
pass
def apply_metadata(self):
pass
def add_metadata(self, md):
self.extra_metadata.update(md)
return self
def build(self, dest, force=True):
if force:
self.clean()
elif os.path.exists(self.output_path(dest)):
return self.output_path(dest)
if not self.metadata:
self.metadata = self.build_metadata(self.archives)
self.transforms.append(self.metadata)
self.create_dir_structure()
for sub_archive in self.archives:
sub_archive.compression = None
sub_archive_path = sub_archive.build(dest, force=force)
shutil.move(sub_archive_path, self.root_dir)
return self.export(dest, nested_root=False)
def build_metadata(self, sub_archives):
first_ctx = sub_archives[0].get_context()
product = first_ctx.product()
parent = get_parent(sub_archives)
systems = []
for system in sub_archives:
sys_ctx = system.get_context()
systems.append({
"product": product.name,
"display_name": sys_ctx.hostname,
"system_id": sys_ctx.machine_id,
"type": sys_ctx.product().role,
"links": build_links(system, sub_archives, parent)
})
metadata = {
"product": product.name,
"display_name": self.display_name if self.display_name else parent.hostname,
"rhel_version": first_ctx.release,
"system_id": self.machine_id if self.machine_id else first_ctx.machine_id,
"systems": systems
}
metadata.update(self.extra_metadata)
return Transform("metadata.json").replace(json.dumps(metadata))
def apply_input_data(self, sub_archives):
self.metadata = self.build_metadata(sub_archives)
for i, input_data in enumerate(sub_archives):
sub_archive = TestArchive("sub-archive-%d" % i, compression=None)
sub_archive.apply_input_data(input_data)
self.archives.append(sub_archive)
def build_links(target_archive, all_archives, parent):
links = []
is_parent = target_archive == parent
if is_parent:
for archive in all_archives:
ctx = archive.get_context()
links.append({
"system_id": ctx.machine_id,
"type": ctx.product().role
})
else:
ctx = parent.get_context()
links.append({
"system_id": ctx.machine_id,
"type": ctx.product().role
})
return links
def get_parent(sub_archives):
for a in sub_archives:
if a.get_context().product().is_parent():
return a
@contextmanager
def file_replacer(path):
with open(path, 'r') as fp:
with tempfile.NamedTemporaryFile(delete=False) as tf:
yield fp, tf
tf.flush()
temp_name = tf.name
shutil.move(temp_name, path)
def make_regexp(pattern):
if not hasattr(pattern, 'match'):
pattern = re.compile(pattern)
return pattern
|
|
"""Bridges between the `asyncio` module and Tornado IOLoop.
.. versionadded:: 3.2
This module integrates Tornado with the ``asyncio`` module introduced
in Python 3.4 (and available `as a separate download
<https://pypi.python.org/pypi/asyncio>`_ for Python 3.3). This makes
it possible to combine the two libraries on the same event loop.
Most applications should use `AsyncIOMainLoop` to run Tornado on the
default ``asyncio`` event loop. Applications that need to run event
loops on multiple threads may use `AsyncIOLoop` to create multiple
loops.
.. note::
Tornado requires the `~asyncio.BaseEventLoop.add_reader` family of methods,
so it is not compatible with the `~asyncio.ProactorEventLoop` on Windows.
Use the `~asyncio.SelectorEventLoop` instead.
"""
from __future__ import absolute_import, division, print_function, with_statement
import functools
import tornado.concurrent
from tornado.gen import convert_yielded
from tornado.ioloop import IOLoop
from tornado import stack_context
try:
# Import the real asyncio module for py33+ first. Older versions of the
# trollius backport also use this name.
import asyncio # type: ignore
except ImportError as e:
# Asyncio itself isn't available; see if trollius is (backport to py26+).
try:
import trollius as asyncio # type: ignore
except ImportError:
# Re-raise the original asyncio error, not the trollius one.
raise e
class BaseAsyncIOLoop(IOLoop):
def initialize(self, asyncio_loop, close_loop=False, **kwargs):
super(BaseAsyncIOLoop, self).initialize(**kwargs)
self.asyncio_loop = asyncio_loop
self.close_loop = close_loop
# Maps fd to (fileobj, handler function) pair (as in IOLoop.add_handler)
self.handlers = {}
# Set of fds listening for reads/writes
self.readers = set()
self.writers = set()
self.closing = False
def close(self, all_fds=False):
self.closing = True
for fd in list(self.handlers):
fileobj, handler_func = self.handlers[fd]
self.remove_handler(fd)
if all_fds:
self.close_fd(fileobj)
if self.close_loop:
self.asyncio_loop.close()
def add_handler(self, fd, handler, events):
fd, fileobj = self.split_fd(fd)
if fd in self.handlers:
raise ValueError("fd %s added twice" % fd)
self.handlers[fd] = (fileobj, stack_context.wrap(handler))
if events & IOLoop.READ:
self.asyncio_loop.add_reader(
fd, self._handle_events, fd, IOLoop.READ)
self.readers.add(fd)
if events & IOLoop.WRITE:
self.asyncio_loop.add_writer(
fd, self._handle_events, fd, IOLoop.WRITE)
self.writers.add(fd)
def update_handler(self, fd, events):
fd, fileobj = self.split_fd(fd)
if events & IOLoop.READ:
if fd not in self.readers:
self.asyncio_loop.add_reader(
fd, self._handle_events, fd, IOLoop.READ)
self.readers.add(fd)
else:
if fd in self.readers:
self.asyncio_loop.remove_reader(fd)
self.readers.remove(fd)
if events & IOLoop.WRITE:
if fd not in self.writers:
self.asyncio_loop.add_writer(
fd, self._handle_events, fd, IOLoop.WRITE)
self.writers.add(fd)
else:
if fd in self.writers:
self.asyncio_loop.remove_writer(fd)
self.writers.remove(fd)
def remove_handler(self, fd):
fd, fileobj = self.split_fd(fd)
if fd not in self.handlers:
return
if fd in self.readers:
self.asyncio_loop.remove_reader(fd)
self.readers.remove(fd)
if fd in self.writers:
self.asyncio_loop.remove_writer(fd)
self.writers.remove(fd)
del self.handlers[fd]
def _handle_events(self, fd, events):
fileobj, handler_func = self.handlers[fd]
handler_func(fileobj, events)
def start(self):
old_current = IOLoop.current(instance=False)
try:
self._setup_logging()
self.make_current()
self.asyncio_loop.run_forever()
finally:
if old_current is None:
IOLoop.clear_current()
else:
old_current.make_current()
def stop(self):
self.asyncio_loop.stop()
def call_at(self, when, callback, *args, **kwargs):
# asyncio.call_at supports *args but not **kwargs, so bind them here.
# We do not synchronize self.time and asyncio_loop.time, so
# convert from absolute to relative.
return self.asyncio_loop.call_later(
max(0, when - self.time()), self._run_callback,
functools.partial(stack_context.wrap(callback), *args, **kwargs))
def remove_timeout(self, timeout):
timeout.cancel()
def add_callback(self, callback, *args, **kwargs):
if self.closing:
# TODO: this is racy; we need a lock to ensure that the
# loop isn't closed during call_soon_threadsafe.
raise RuntimeError("IOLoop is closing")
self.asyncio_loop.call_soon_threadsafe(
self._run_callback,
functools.partial(stack_context.wrap(callback), *args, **kwargs))
add_callback_from_signal = add_callback
class AsyncIOMainLoop(BaseAsyncIOLoop):
"""``AsyncIOMainLoop`` creates an `.IOLoop` that corresponds to the
current ``asyncio`` event loop (i.e. the one returned by
``asyncio.get_event_loop()``). Recommended usage::
from tornado.platform.asyncio import AsyncIOMainLoop
import asyncio
AsyncIOMainLoop().install()
asyncio.get_event_loop().run_forever()
See also :meth:`tornado.ioloop.IOLoop.install` for general notes on
installing alternative IOLoops.
"""
def initialize(self, **kwargs):
super(AsyncIOMainLoop, self).initialize(asyncio.get_event_loop(),
close_loop=False, **kwargs)
class AsyncIOLoop(BaseAsyncIOLoop):
"""``AsyncIOLoop`` is an `.IOLoop` that runs on an ``asyncio`` event loop.
This class follows the usual Tornado semantics for creating new
``IOLoops``; these loops are not necessarily related to the
``asyncio`` default event loop. Recommended usage::
from tornado.ioloop import IOLoop
IOLoop.configure('tornado.platform.asyncio.AsyncIOLoop')
IOLoop.current().start()
Each ``AsyncIOLoop`` creates a new ``asyncio.EventLoop``; this object
can be accessed with the ``asyncio_loop`` attribute.
"""
def initialize(self, **kwargs):
loop = asyncio.new_event_loop()
try:
super(AsyncIOLoop, self).initialize(loop, close_loop=True, **kwargs)
except Exception:
# If initialize() does not succeed (taking ownership of the loop),
# we have to close it.
loop.close()
raise
def to_tornado_future(asyncio_future):
"""Convert an `asyncio.Future` to a `tornado.concurrent.Future`.
.. versionadded:: 4.1
"""
tf = tornado.concurrent.Future()
tornado.concurrent.chain_future(asyncio_future, tf)
return tf
def to_asyncio_future(tornado_future):
"""Convert a Tornado yieldable object to an `asyncio.Future`.
.. versionadded:: 4.1
.. versionchanged:: 4.3
Now accepts any yieldable object, not just
`tornado.concurrent.Future`.
"""
tornado_future = convert_yielded(tornado_future)
af = asyncio.Future()
tornado.concurrent.chain_future(tornado_future, af)
return af
if hasattr(convert_yielded, 'register'):
convert_yielded.register(asyncio.Future, to_tornado_future) # type: ignore
|
|
#!/usr/bin/env python
#########################################################################################
#
# Vertebral Disks Detection
#
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2013 Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: Karun Raju, Julien Cohen-Adad
# Modified: 2014-06-14
#
# About the license: see the file LICENSE.TXT
#########################################################################################
# check if needed Python libraries are already installed or not
import os
import sys
import time
import getopt
import commands
import math
import scipy
import scipy.signal
import scipy.fftpack
try:
import nibabel
except ImportError:
print '--- nibabel not installed! Exit program. ---'
sys.exit(2)
try:
import numpy as np
except ImportError:
print '--- numpy not installed! Exit program. ---'
sys.exit(2)
# get path of the toolbox
status, path_sct = commands.getstatusoutput('echo $SCT_DIR')
# append path that contains scripts, to be able to load modules
sys.path.append(path_sct + '/scripts')
import sct_utils as sct
fsloutput = 'export FSLOUTPUTTYPE=NIFTI; ' # for faster processing, all outputs are in NIFTI
#=======================================================================================================================
# class definition
#=======================================================================================================================
class param_class:
def __init__(self):
# PATH AND FILE NAME FOR ANATOMICAL IMAGE
self.debug = 0
self.input_anat = ''
self.contrast = ''
self.mean_distance_mat = ''
self.output_path = ''
# Spinal Cord labeling Parameters
self.input_centerline = '' # optional
self.shift_AP = 17 # shift the centerline on the spine in mm default : 17 mm
self.size_AP = 6 # mean around the centerline in the anterior-posterior direction in mm
self.size_RL = 5 # mean around the centerline in the right-left direction in mm
# =======================================================
# OTHER PARAMETERS
self.verbose = 0 # display text
self.plot_graph = 0
#=======================================================================================================================
# main
#=======================================================================================================================
def main():
print '\n\n\n==================================================='
print ' Running: sct_labeling'
print '===================================================\n'
# Initialization
start_time = time.time()
param = param_class()
# Check input parameters
try:
opts, args = getopt.getopt(sys.argv[1:],'hi:c:l:m:a:s:r:o:g:v:')
except getopt.GetoptError as err:
print str(err)
usage()
for opt, arg in opts:
if opt == '-h':
usage()
elif opt in ('-i'):
param.input_anat = arg
elif opt in ('-c'):
param.contrast = arg
elif opt in ('-l'):
param.input_centerline = arg
elif opt in ('-m'):
param.mean_distance_mat = arg
elif opt in ('-a'):
param.shift_AP = int(arg)
elif opt in ('-s'):
param.size_AP = int(arg)
elif opt in ('-r'):
param.size_RL = int(arg)
elif opt in ('-o'):
param.output_path = arg
elif opt in ('-g'):
param.plot_graph = int(arg)
elif opt in ('-v'):
param.verbose = int(arg)
# Display usage if a mandatory argument is not provided
if param.input_anat == '' or param.contrast=='' or param.input_centerline=='' or param.mean_distance_mat=='':
print '\n \n All mandatory arguments are not provided \n \n'
usage()
# Extract path, file and extension
input_path, file_data, ext_data = sct.extract_fname(param.input_anat)
if param.output_path=='': param.output_path = os.getcwd() + '/'
print 'Input File:',param.input_anat
print 'Center_line file:',param.input_centerline
print 'Contrast:',param.contrast
print 'Mat File:',param.mean_distance_mat,'\n'
# check existence of input files
sct.check_file_exist(param.input_anat)
sct.check_file_exist(param.input_centerline)
sct.check_file_exist(param.mean_distance_mat)
verbose = param.verbose
#==================================================
# Reorientation of the data if needed
#==================================================
command = 'fslhd ' + param.input_anat
result = commands.getoutput(command)
orientation = result[result.find('qform_xorient')+15] + result[result.find('qform_yorient')+15] + result[result.find('qform_zorient')+15]
reorient = 0
if orientation!='ASR':
sct.printv('\nReorient input volume to AP SI RL orientation...',param.verbose)
sct.run(sct.fsloutput + 'fslswapdim ' + param.input_anat + ' AP SI RL ' + input_path + 'tmp.anat_orient')
sct.run(sct.fsloutput + 'fslswapdim ' + param.input_centerline + ' AP SI RL ' + input_path + 'tmp.centerline_orient')
param.input_anat = input_path + 'tmp.anat_orient.nii'
param.input_centerline = input_path + 'tmp.centerline_orient.nii'
reorient = 1
if param.plot_graph:
import pylab as pl
#==================================================
# Loading Images
#==================================================
sct.printv('\nLoading Images...',verbose)
anat_file = nibabel.load(param.input_anat)
anat = anat_file.get_data()
hdr = anat_file.get_header()
dims = hdr['dim']
scales = hdr['pixdim']
centerline_file = nibabel.load(param.input_centerline)
centerline = centerline_file.get_data()
shift_AP = param.shift_AP*scales[1]
size_AP = param.size_AP*scales[1]
size_RL = param.size_RL*scales[3]
np.uint16(anat)
#==================================================
# Calculation of the profile intensity
#==================================================
sct.printv('\nCalculation of the profile intensity...',verbose)
#find coordinates of the centerline
X,Y,Z = np.where(centerline>0)
#reordering x,y,z with y in the growing sense
j = np.argsort(Y)
y = Y[j]
x = X[j]
z = Z[j]
#eliminating repetitions in y
index_double = []
for i in range(len(y)-1):
if y[i]==y[i+1]:
index_double.append(i)
mask = np.ones(len(y), dtype=bool)
mask[index_double] = False
y = y[mask]
x = x[mask]
z = z[mask]
#shift the centerline to the spine of shift_AP
x1 = np.round(x-shift_AP/scales[1])
#build intensity profile along the centerline
I = np.zeros((len(y),1))
for index in range(len(y)):
lim_plus = index + 5
lim_minus = index - 5
if lim_minus<0: lim_minus = 0
if lim_plus>=len(x1): lim_plus = len(x1) - 1
# normal vector of the orthogonal plane to the centerline i.e tangent vector to the centerline
Vx = x1[lim_plus] - x1[lim_minus]
Vz = z[lim_plus] - z[lim_minus]
Vy = y[lim_plus] - y[lim_minus]
d = Vx*x1[index] + Vy*y[index] + Vz*z[index]
#Averaging
for i_slice_RL in range(2*np.int(round(size_RL/scales[3]))):
for i_slice_AP in range(2*np.int(round(size_AP/scales[1]))):
result = (d - Vx*(x1[index] + i_slice_AP - size_AP - 1) - Vz*z[index])/Vy
if result > anat.shape[1]: result = anat.shape[1]
I[index] = I[index] + anat[np.int(round(x1[index]+i_slice_AP - size_AP - 1)),np.int(np.floor(result)),np.int(round(z[index] + i_slice_RL - size_RL - 1))]
start_centerline_y = y[0]
X = np.where(I==0)
mask2 = np.ones((len(y),1), dtype=bool)
mask2[X,0] = False
I = I[mask2]
if param.plot_graph:
pl.plot(I)
pl.xlabel('direction superior-inferior')
pl.ylabel('intensity')
pl.title('Intensity profile along the shifted spinal cord centerline')
pl.show()
#==================================================
# Detrending Intensity
#==================================================
sct.printv('\nDetrending Intensity...',verbose)
frequency = scipy.fftpack.fftfreq(len(I), d=1)
spectrum = np.abs(scipy.fftpack.fft(I, n=None, axis=-1, overwrite_x=False))
#Using iir filter for detrending
Wn = np.amax(frequency)/10
N = 5 #Order of the filter
b, a = scipy.signal.iirfilter(N, Wn, rp=None, rs=None, btype='low', analog=False, ftype='bessel', output='ba')
I_fit = scipy.signal.filtfilt(b, a, I, axis=-1, padtype='constant', padlen=None)
if param.plot_graph:
pl.plot(I)
pl.plot(I_fit)
pl.show()
I_detrend = np.zeros((len(I),1))
I_detrend[:,0] = I - I_fit
if param.contrast == 'T1':
I_detrend = I_detrend/(np.amax(I_detrend))
else:
I_detrend = I_detrend/abs((np.amin(I_detrend)))
if param.plot_graph:
pl.plot(I_detrend[:,0])
pl.xlabel('direction superior-inferior')
pl.ylabel('intensity')
pl.title('Intensity profile along the shifted spinal cord centerline after detrending and basic normalization')
pl.show()
info = input('\nIs the more rostral vertebrae the C1 or C2 one? if yes, enter 1 otherwise 0:')
if info==0:
level_start = input('enter the level of the more rostral vertebra - choice of the more rostral vertebral level of the field of view:')
else:
level_start = 2
#==================================================
# Preparing the Pattern and loading the template
#==================================================
mean_distance_dict = scipy.io.loadmat(param.mean_distance_mat)
mean_distance = (mean_distance_dict.values()[2]).T
C1C2_distance = mean_distance[0:2]
#Creating pattern
space = np.linspace(-5/scales[2], 5/scales[2], round(11/scales[2]), endpoint=True)
pattern = (np.sinc((space*scales[2])/15))**(20)
if param.contrast == 'T1':
mean_distance = mean_distance[level_start-1:len(mean_distance)]
xmax_pattern = np.argmax(pattern)
else:
mean_distance = mean_distance[level_start+1:len(mean_distance)]
xmax_pattern = np.argmin(pattern) # position of the peak in the pattern
pixend = len(pattern) - xmax_pattern #number of pixel after the peaks in the pattern
#==================================================
# step 1 : Find the First Peak
#==================================================
sct.printv('\nFinding the First Peak...',verbose)
pattern1 = np.concatenate((pattern,np.zeros(len(I_detrend[:,0])-len(pattern))))
#correlation between the pattern and the intensity profile
corr_all = scipy.signal.correlate(I_detrend[:,0],pattern1)
#finding the maxima of the correlation
loc_corr = np.arange(-np.round((len(corr_all)/2)),np.round(len(corr_all)/2)+2)
index_fp = []
for i in range(len(corr_all)):
if corr_all[i]>0.1:
if i==0:
if corr_all[i]<corr_all[i+1]:
index_fp.append(i)
elif i==(len(corr_all)-1):
if corr_all[i]<corr_all[i-1]:
index_fp.append(i)
else:
if corr_all[i]<corr_all[i+1]:
index_fp.append(i)
elif corr_all[i]<corr_all[i-1]:
index_fp.append(i)
else:
index_fp.append(i)
mask_fp = np.ones(len(corr_all), dtype=bool)
mask_fp[index_fp] = False
value = corr_all[mask_fp]
loc_corr = loc_corr[mask_fp]
loc_corr = loc_corr - I_detrend.shape[0]
if param.contrast == 'T1':
loc_first_peak = xmax_pattern - loc_corr[np.amax(np.where(value>1))]
Mcorr1 = value[np.amax(np.where(value>1))]
#building the pattern that has to be added at each iteration in step 2
if xmax_pattern<loc_first_peak:
template_truncated = np.concatenate((np.zeros((loc_first_peak-xmax_pattern)),pattern))
else:
template_truncated = pattern[(xmax_pattern-loc_first_peak-1):]
xend = np.amax(np.where(template_truncated>0.02))
pixend = xend - loc_first_peak
parameter = 0.15
else:
loc_first_peak = xmax_pattern - loc_corr[np.amax(np.where(value>0.6))]
Mcorr1 = value[np.amax(np.where(value>0.6))]
#building the pattern that has to be added at each iteration in step 2
if loc_first_peak>=0:
template_truncated = pattern[(loc_first_peak+1):]
else:
template_truncated = np.concatenate((np.zeros(abs(loc_first_peak)),pattern))
xend = len(template_truncated)
parameter = 0.05
# smoothing the intensity curve----
I_detrend[:,0] = scipy.ndimage.filters.gaussian_filter1d(I_detrend[:,0],10)
if param.plot_graph:
pl.plot(template_truncated)
pl.plot(I_detrend)
pl.title('Detection of First Peak')
pl.xlabel('direction anterior-posterior (mm)')
pl.ylabel('intensity')
pl.show()
loc_peak_I = np.arange(len(I_detrend[:,0]))
index_p = []
for i in range(len(I_detrend[:,0])):
if I_detrend[i]>parameter:
if i==0:
if I_detrend[i,0]<I_detrend[i+1,0]:
index_p.append(i)
elif i==(len(I_detrend[:,0])-1):
if I_detrend[i,0]<I_detrend[i-1,0]:
index_p.append(i)
else:
if I_detrend[i,0]<I_detrend[i+1,0]:
index_p.append(i)
elif I_detrend[i,0]<I_detrend[i-1,0]:
index_p.append(i)
else:
index_p.append(i)
mask_p = np.ones(len(I_detrend[:,0]), dtype=bool)
mask_p[index_p] = False
value_I = I_detrend[mask_p]
loc_peak_I = loc_peak_I[mask_p]
index_m = []
for i in range(len(loc_peak_I)-1):
if i==0:
if loc_peak_I[i+1]-loc_peak_I[i]<round(10/scales[1]):
index_m.append(i)
else:
if (loc_peak_I[i+1]-loc_peak_I[i])<round(10/scales[1]):
index_m.append(i)
elif (loc_peak_I[i]-loc_peak_I[i-1])<round(10/scales[1]):
index_m.append(i)
mask_I = np.ones(len(value_I), dtype=bool)
mask_I[index_m] = False
if param.contrast == 'T1':
value_I = value_I[mask_I]
else:
value_I = -value_I[mask_I]
loc_peak_I = loc_peak_I[mask_I]
#fitting the roughly found maxima with a smoothing spline
from scipy.interpolate import UnivariateSpline
fit = UnivariateSpline(loc_peak_I,value_I)
P = fit(np.arange(len(I_detrend)))
for i in range(len(I_detrend)):
if P[i]>0.1:
I_detrend[i,0] = I_detrend[i,0]/P[i]
if param.plot_graph:
pl.xlim(0,len(I_detrend)-1)
pl.plot(loc_peak_I,value_I)
pl.plot(I_detrend)
pl.plot(P,color='y')
pl.title('Setting values of peaks at one by fitting a smoothing spline')
pl.xlabel('direction superior-inferior (mm)')
pl.ylabel('normalized intensity')
pl.show(block=False)
#=====================================================================================
# step 2 : Cross correlation between the adjusted template and the intensity profile.
# Local moving of template's peak from the first peak already found
#=====================================================================================
#For each loop, a peak is located at the most likely postion and then local adjustement is done.
#The position of the next peak is calculated from previous positions
sct.printv('\nFinding Cross correlation between the adjusted template and the intensity profile...',verbose)
mean_distance_new = mean_distance
mean_ratio = np.zeros(len(mean_distance))
L = np.round(1.2*max(mean_distance)) - np.round(0.8*min(mean_distance))
corr_peak = np.zeros((L,len(mean_distance))) # corr_peak = np.nan #for T2
#loop on each peak
for i_peak in range(len(mean_distance)):
scale_min = np.round(0.80*mean_distance_new[i_peak]) - xmax_pattern - pixend
if scale_min<0:
scale_min = 0
scale_max = np.round(1.2*mean_distance_new[i_peak]) - xmax_pattern - pixend
scale_peak = np.arange(scale_min,scale_max+1)
for i_scale in range(len(scale_peak)):
template_resize_peak = np.concatenate([template_truncated,np.zeros(scale_peak[i_scale]),pattern])
if len(I_detrend[:,0])>len(template_resize_peak):
template_resize_peak1 = np.concatenate((template_resize_peak,np.zeros(len(I_detrend[:,0])-len(template_resize_peak))))
#cross correlation
corr_template = scipy.signal.correlate(I_detrend[:,0],template_resize_peak)
if len(I_detrend[:,0])>len(template_resize_peak):
val = np.dot(I_detrend[:,0],template_resize_peak1.T)
else:
I_detrend_2 = np.concatenate((I_detrend[:,0],np.zeros(len(template_resize_peak)-len(I_detrend[:,0]))))
val = np.dot(I_detrend_2,template_resize_peak.T)
corr_peak[i_scale,i_peak] = val
if param.plot_graph:
pl.xlim(0,len(I_detrend[:,0]))
pl.plot(I_detrend[:,0])
pl.plot(template_resize_peak)
pl.show(block=False)
pl.plot(corr_peak[:,i_peak],marker='+',linestyle='None',color='r')
pl.title('correlation value against the displacement of the peak (px)')
pl.show(block=False)
max_peak = np.amax(corr_peak[:,i_peak])
index_scale_peak = np.where(corr_peak[:,i_peak]==max_peak)
good_scale_peak = scale_peak[index_scale_peak][0]
Mcorr = Mcorr1
Mcorr = np.resize(Mcorr,i_peak+2)
Mcorr[i_peak+1] = np.amax(corr_peak[:,0:(i_peak+1)])
flag = 0
#If the correlation coefficient is too low, put the peak at the mean position
if i_peak>0:
if (Mcorr[i_peak+1]-Mcorr[i_peak])<0.4*np.mean(Mcorr[1:i_peak+2]-Mcorr[0:i_peak+1]):
test = i_peak
template_resize_peak = np.concatenate((template_truncated,np.zeros(round(mean_distance[i_peak])-xmax_pattern-pixend),pattern))
good_scale_peak = np.round(mean_distance[i_peak]) - xmax_pattern - pixend
flag = 1
if i_peak==0:
if (Mcorr[i_peak+1] - Mcorr[i_peak])<0.4*Mcorr[0]:
template_resize_peak = np.concatenate((template_truncated,np.zeros(round(mean_distance[i_peak])-xmax_pattern-pixend),pattern))
good_scale_peak = round(mean_distance[i_peak]) - xmax_pattern - pixend
flag = 1
if flag==0:
template_resize_peak=np.concatenate((template_truncated,np.zeros(good_scale_peak),pattern))
#update mean-distance by a adjustement ratio
mean_distance_new[i_peak] = good_scale_peak + xmax_pattern + pixend
mean_ratio[i_peak] = np.mean(mean_distance_new[:,0:i_peak]/mean_distance[:,0:i_peak])
template_truncated = template_resize_peak
if param.plot_graph:
pl.plot(I_detrend[:,0])
pl.plot(template_truncated)
pl.xlim(0,(len(I_detrend[:,0])-1))
pl.show()
#finding the maxima of the adjusted template
minpeakvalue = 0.5
loc_disk = np.arange(len(template_truncated))
index_disk = []
for i in range(len(template_truncated)):
if template_truncated[i]>=minpeakvalue:
if i==0:
if template_truncated[i]<template_truncated[i+1]:
index_disk.append(i)
elif i==(len(template_truncated)-1):
if template_truncated[i]<template_truncated[i-1]:
index_disk.append(i)
else:
if template_truncated[i]<template_truncated[i+1]:
index_disk.append(i)
elif template_truncated[i]<template_truncated[i-1]:
index_disk.append(i)
else:
index_disk.append(i)
mask_disk = np.ones(len(template_truncated), dtype=bool)
mask_disk[index_disk] = False
loc_disk = loc_disk[mask_disk]
X1 = np.where(loc_disk > I_detrend.shape[0])
mask_disk1 = np.ones(len(loc_disk), dtype=bool)
mask_disk1[X1] = False
loc_disk = loc_disk[mask_disk1]
loc_disk = loc_disk + start_centerline_y - 1
#=====================================================================
# Step 3: Building the labeled centerline and surface
#=====================================================================
sct.printv('\nBuilding the labeled centerline and surface... ',verbose)
#orthogonal projection of the position of disk centers on the spinal cord center line
for i in range(len(loc_disk)):
#find which index of y matches with the disk
Index = np.array(np.where(y==loc_disk[i])).T
lim_plus = Index + 5
lim_minus = Index - 5
if lim_minus<1: lim_minus=1
if lim_plus>len(x): lim_plus=len(x)
#tangent vector to the centerline
Vx = x[lim_plus] - x[lim_minus]
Vz = z[lim_plus] - z[lim_minus]
Vy = y[lim_plus] - y[lim_minus]
d = Vx*x[Index] + Vy*y[Index] + Vz*z[Index]
intersection = np.ones(len(x))
for j in range(len(x)):
intersection[j] = np.abs((Vx*x[j]+Vy*y[j]+Vz*z[j]-d))
min_intersection = np.amin(intersection)
index_intersection = np.array(np.where(min_intersection==intersection)).T
loc_disk[i] = y[index_intersection[0]]
center_disk = centerline
for i in range(len(loc_disk)-1):
tmp = center_disk[:,loc_disk[i]:loc_disk[i+1],:]
tmp[np.where(tmp==1)] = i + level_start
center_disk[:,loc_disk[i]:loc_disk[i+1],:] = tmp
center_disk[np.where(center_disk==1)] = 0
#add C1 and C2
if level_start==2:
center_disk[x[0],(int(round(loc_disk[0] - C1C2_distance[1]))-1):loc_disk[0],z[0]] = 2
center_disk[x[0],(int(round(loc_disk[0] - C1C2_distance[0] - C1C2_distance[1]))-1):(round(loc_disk[0] - C1C2_distance[1])-1),z[0]] = 1
xc,yc,zc = np.where(center_disk>0)
# Write NIFTI volumes
hdr.set_data_dtype('uint8') # set imagetype to uint8
sct.printv('\nWrite NIFTI volumes...',verbose)
img = nibabel.Nifti1Image(center_disk, None, hdr)
if reorient:
file_name = param.output_path + 'tmp_centerline.nii.gz'
else:
file_name = param.output_path + param.contrast + '_centerline.nii.gz'
nibabel.save(img,file_name)
sct.printv(('.. File created:' + file_name),verbose)
if reorient:
a = orientation[0]
b = orientation[1]
c = orinetation[2]
if a=='A': a='AP'
if a=='P': a='PA'
if a=='S': a='SI'
if a=='I': a='IS'
if a=='R': a='RL'
if a=='L': a='LR'
if b=='A': b='AP'
if b=='P': b='PA'
if b=='S': b='SI'
if b=='I': b='IS'
if b=='R': b='RL'
if b=='L': b='LR'
if c=='A': c='AP'
if c=='P': c='PA'
if c=='S': c='SI'
if c=='I': c='IS'
if c=='R': c='RL'
if c=='L': c='LR'
sct.run(sct.fsloutput + 'fslswapdim ' + param.output_path + 'tmp_centerline.nii.gz' + ' ' + a + ' ' + b + ' ' + c + ' ' + param.output_path + param.contrast + '_centerline.nii.gz')
# display elapsed time
elapsed_time = time.time() - start_time
print '\nFinished! Elapsed time: '+str(int(round(elapsed_time)))+'s'
#=======================================================================================================================
# usage
#=======================================================================================================================
def usage():
print '\n' \
''+os.path.basename(__file__)+'\n' \
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n' \
'USAGE: \n' \
' '+os.path.basename(__file__)+' -i <filename> -c <contrast> -l <centerline_binary_image> -m <mean_distance.mat> \n' \
'\n'\
'MANDATORY ARGUMENTS\n' \
' -i input_file \n' \
' -c contrast \n' \
' -l Centerline binary Image \n' \
' -m mean_distance.mat \n' \
'\n'\
'OPTIONAL ARGUMENTS\n' \
' -o Specify Output path.\n' \
' -a shift_AP in mm. Default value is 17mm \n' \
' -s size_AP in mm. Default value is 6mm \n' \
' -r size_RL in mm. Default value is 5mm \n' \
' -v {0,1} Set verbose=1 for printing text. Default value is 0 \n' \
' -g {0,1} Set value to 1 for plotting graphs. Default value is 0 \n' \
' -h help. Show this message.\n' \
'\n'\
'EXAMPLE:\n' \
' '+os.path.basename(__file__)+' -i t1.nii -c T1 -l segmentation_centerline_binary.nii -m mean_distance.mat\n'
sys.exit(2)
#=======================================================================================================================
# Start program
#=======================================================================================================================
if __name__ == "__main__":
# call main function
main()
|
|
# Copyright 2012 NetApp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the LVM driver module."""
import os
import ddt
import mock
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import timeutils
from manila.common import constants as const
from manila import context
from manila import exception
from manila.share import configuration
from manila.share.drivers import lvm
from manila import test
from manila.tests.db import fakes as db_fakes
from manila.tests import fake_utils
from manila.tests.share.drivers import test_generic
CONF = cfg.CONF
def fake_share(**kwargs):
share = {
'id': 'fakeid',
'name': 'fakename',
'size': 1,
'share_proto': 'NFS',
'export_location': '127.0.0.1:/mnt/nfs/volume-00002',
}
share.update(kwargs)
return db_fakes.FakeModel(share)
def fake_snapshot(**kwargs):
snapshot = {
'id': 'fakesnapshotid',
'share_name': 'fakename',
'share_id': 'fakeid',
'name': 'fakesnapshotname',
'share_proto': 'NFS',
'export_location': '127.0.0.1:/mnt/nfs/volume-00002',
'share': {
'id': 'fakeid',
'name': 'fakename',
'size': 1,
'share_proto': 'NFS',
},
}
snapshot.update(kwargs)
return db_fakes.FakeModel(snapshot)
def fake_access(**kwargs):
access = {
'id': 'fakeaccid',
'access_type': 'ip',
'access_to': '10.0.0.2',
'access_level': 'rw',
'state': 'active',
}
access.update(kwargs)
return db_fakes.FakeModel(access)
@ddt.ddt
class LVMShareDriverTestCase(test.TestCase):
"""Tests LVMShareDriver."""
def setUp(self):
super(LVMShareDriverTestCase, self).setUp()
fake_utils.stub_out_utils_execute(self)
self._context = context.get_admin_context()
CONF.set_default('lvm_share_volume_group', 'fakevg')
CONF.set_default('lvm_share_export_ips', ['10.0.0.1', '10.0.0.2'])
CONF.set_default('driver_handles_share_servers', False)
CONF.set_default('reserved_share_percentage', 50)
self._helper_cifs = mock.Mock()
self._helper_nfs = mock.Mock()
self.fake_conf = configuration.Configuration(None)
self._db = mock.Mock()
self._os = lvm.os = mock.Mock()
self._os.path.join = os.path.join
self._driver = lvm.LVMShareDriver(self._db,
configuration=self.fake_conf)
self._driver._helpers = {
'CIFS': self._helper_cifs,
'NFS': self._helper_nfs,
}
self.share = fake_share()
self.access = fake_access()
self.snapshot = fake_snapshot()
self.server = {
'public_addresses': self.fake_conf.lvm_share_export_ips,
'instance_id': 'LVM',
'lock_name': 'manila_lvm',
}
# Used only to test compatibility with share manager
self.share_server = "fake_share_server"
def tearDown(self):
super(LVMShareDriverTestCase, self).tearDown()
fake_utils.fake_execute_set_repliers([])
fake_utils.fake_execute_clear_log()
def test_do_setup(self):
CONF.set_default('lvm_share_helpers', ['NFS=fakenfs'])
lvm.importutils = mock.Mock()
lvm.importutils.import_class.return_value = self._helper_nfs
self._driver.do_setup(self._context)
lvm.importutils.import_class.assert_has_calls([
mock.call('fakenfs')
])
def test_check_for_setup_error(self):
def exec_runner(*ignore_args, **ignore_kwargs):
return '\n fake1\n fakevg\n fake2\n', ''
expected_exec = ['vgs --noheadings -o name']
fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)])
self._driver.check_for_setup_error()
self.assertEqual(expected_exec, fake_utils.fake_execute_get_log())
def test_check_for_setup_error_no_vg(self):
def exec_runner(*ignore_args, **ignore_kwargs):
return '\n fake0\n fake1\n fake2\n', ''
fake_utils.fake_execute_set_repliers([('vgs --noheadings -o name',
exec_runner)])
self.assertRaises(exception.InvalidParameterValue,
self._driver.check_for_setup_error)
def test_check_for_setup_error_deprecated_export_ip(self):
def exec_runner(*ignore_args, **ignore_kwargs):
return '\n fake1\n fakevg\n fake2\n', ''
fake_utils.fake_execute_set_repliers([('vgs --noheadings -o name',
exec_runner)])
CONF.set_default('lvm_share_export_ip', CONF.lvm_share_export_ips[0])
CONF.set_default('lvm_share_export_ips', None)
self.assertIsNone(self._driver.check_for_setup_error())
def test_check_for_setup_error_no_export_ips(self):
def exec_runner(*ignore_args, **ignore_kwargs):
return '\n fake1\n fakevg\n fake2\n', ''
fake_utils.fake_execute_set_repliers([('vgs --noheadings -o name',
exec_runner)])
CONF.set_default('lvm_share_export_ips', None)
self.assertRaises(exception.InvalidParameterValue,
self._driver.check_for_setup_error)
def test_check_for_setup_error_both_export_ip_and_ips(self):
def exec_runner(*ignore_args, **ignore_kwargs):
return '\n fake1\n fakevg\n fake2\n', ''
fake_utils.fake_execute_set_repliers([('vgs --noheadings -o name',
exec_runner)])
CONF.set_default('lvm_share_export_ip', CONF.lvm_share_export_ips[0])
self.assertRaises(exception.InvalidParameterValue,
self._driver.check_for_setup_error)
def test_local_path_normal(self):
share = fake_share(name='fake_sharename')
CONF.set_default('lvm_share_volume_group', 'fake_vg')
ret = self._driver._get_local_path(share)
self.assertEqual('/dev/mapper/fake_vg-fake_sharename', ret)
def test_local_path_escapes(self):
share = fake_share(name='fake-sharename')
CONF.set_default('lvm_share_volume_group', 'fake-vg')
ret = self._driver._get_local_path(share)
self.assertEqual('/dev/mapper/fake--vg-fake--sharename', ret)
def test_create_share(self):
CONF.set_default('lvm_share_mirrors', 0)
self._driver._mount_device = mock.Mock()
ret = self._driver.create_share(self._context, self.share,
self.share_server)
self._driver._mount_device.assert_called_with(
self.share, '/dev/mapper/fakevg-fakename')
expected_exec = [
'lvcreate -L 1G -n fakename fakevg',
'mkfs.ext4 /dev/mapper/fakevg-fakename',
]
self.assertEqual(expected_exec, fake_utils.fake_execute_get_log())
self.assertEqual(self._helper_nfs.create_exports.return_value, ret)
def test_create_share_from_snapshot(self):
CONF.set_default('lvm_share_mirrors', 0)
self._driver._mount_device = mock.Mock()
snapshot_instance = {
'snapshot_id': 'fakesnapshotid',
'name': 'fakename'
}
mount_share = '/dev/mapper/fakevg-fakename'
mount_snapshot = '/dev/mapper/fakevg-fakename'
self._helper_nfs.create_export.return_value = 'fakelocation'
self._driver.create_share_from_snapshot(self._context,
self.share,
snapshot_instance,
self.share_server)
self._driver._mount_device.assert_called_with(self.share,
mount_snapshot)
expected_exec = [
'lvcreate -L 1G -n fakename fakevg',
'mkfs.ext4 /dev/mapper/fakevg-fakename',
'tune2fs -U random %s' % mount_share,
("dd count=0 if=%s of=%s iflag=direct oflag=direct" %
(mount_snapshot, mount_share)),
("dd if=%s of=%s count=1024 bs=1M iflag=direct oflag=direct" %
(mount_snapshot, mount_share)),
]
self.assertEqual(expected_exec, fake_utils.fake_execute_get_log())
def test_create_share_mirrors(self):
share = fake_share(size='2048')
CONF.set_default('lvm_share_mirrors', 2)
self._driver._mount_device = mock.Mock()
ret = self._driver.create_share(self._context, share,
self.share_server)
self._driver._mount_device.assert_called_with(
share, '/dev/mapper/fakevg-fakename')
expected_exec = [
'lvcreate -L 2048G -n fakename fakevg -m 2 --nosync -R 2',
'mkfs.ext4 /dev/mapper/fakevg-fakename',
]
self.assertEqual(expected_exec, fake_utils.fake_execute_get_log())
self.assertEqual(self._helper_nfs.create_exports.return_value, ret)
def test_deallocate_container(self):
expected_exec = ['lvremove -f fakevg/fakename']
self._driver._deallocate_container(self.share['name'])
self.assertEqual(expected_exec, fake_utils.fake_execute_get_log())
def test_deallocate_container_error(self):
def _fake_exec(*args, **kwargs):
raise exception.ProcessExecutionError(stderr="error")
self.mock_object(self._driver, '_try_execute', _fake_exec)
self.assertRaises(exception.ProcessExecutionError,
self._driver._deallocate_container,
self.share['name'])
def test_deallocate_container_not_found_error(self):
def _fake_exec(*args, **kwargs):
raise exception.ProcessExecutionError(stderr="not found")
self.mock_object(self._driver, '_try_execute', _fake_exec)
self._driver._deallocate_container(self.share['name'])
@mock.patch.object(lvm.LVMShareDriver, '_update_share_stats', mock.Mock())
def test_get_share_stats(self):
with mock.patch.object(self._driver, '_stats', mock.Mock) as stats:
self.assertEqual(stats, self._driver.get_share_stats())
self.assertFalse(self._driver._update_share_stats.called)
@mock.patch.object(lvm.LVMShareDriver, '_update_share_stats', mock.Mock())
def test_get_share_stats_refresh(self):
with mock.patch.object(self._driver, '_stats', mock.Mock) as stats:
self.assertEqual(stats,
self._driver.get_share_stats(refresh=True))
self._driver._update_share_stats.assert_called_once_with()
def test__unmount_device_is_busy_error(self):
def exec_runner(*ignore_args, **ignore_kwargs):
raise exception.ProcessExecutionError(stderr='device is busy')
self._os.path.exists.return_value = True
mount_path = self._get_mount_path(self.share)
expected_exec = [
"umount -f %s" % (mount_path),
]
fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)])
self.assertRaises(exception.ShareBusyException,
self._driver._unmount_device,
self.share)
self.assertEqual(expected_exec, fake_utils.fake_execute_get_log())
def test__unmount_device_error(self):
def exec_runner(*ignore_args, **ignore_kwargs):
raise exception.ProcessExecutionError(stderr='fake error')
mount_path = self._get_mount_path(self.share)
self._os.path.exists.return_value = True
cmd = "umount -f %s" % (mount_path)
fake_utils.fake_execute_set_repliers([(cmd, exec_runner)])
self.assertRaises(processutils.ProcessExecutionError,
self._driver._unmount_device,
self.share)
self._os.path.exists.assert_called_with(mount_path)
def test__unmount_device_rmdir_error(self):
def exec_runner(*ignore_args, **ignore_kwargs):
raise exception.ProcessExecutionError(stderr='fake error')
mount_path = self._get_mount_path(self.share)
self._os.path.exists.return_value = True
cmd = "rmdir %s" % (mount_path)
fake_utils.fake_execute_set_repliers([(cmd, exec_runner)])
self.assertRaises(processutils.ProcessExecutionError,
self._driver._unmount_device,
self.share)
self._os.path.exists.assert_called_with(mount_path)
def test_create_snapshot(self):
self._driver.create_snapshot(self._context, self.snapshot,
self.share_server)
mount_path = self._get_mount_path(self.snapshot)
expected_exec = [
("lvcreate -L 1G --name fakesnapshotname --snapshot "
"%s/fakename" % (CONF.lvm_share_volume_group,)),
"tune2fs -U random /dev/mapper/fakevg-%s" % self.snapshot['name'],
"mkdir -p " + mount_path,
"mount /dev/mapper/fakevg-fakesnapshotname " + mount_path,
"chmod 777 " + mount_path,
]
self.assertEqual(expected_exec, fake_utils.fake_execute_get_log())
def test_ensure_share(self):
device_name = '/dev/mapper/fakevg-fakename'
with mock.patch.object(self._driver,
'_mount_device',
mock.Mock(return_value='fake_location')):
self._driver.ensure_share(self._context, self.share,
self.share_server)
self._driver._mount_device.assert_called_with(self.share,
device_name)
self._helper_nfs.create_exports.assert_called_once_with(
self.server, self.share['name'], recreate=True)
def test_delete_share(self):
mount_path = self._get_mount_path(self.share)
self._helper_nfs.remove_export(mount_path, self.share['name'])
self._driver._delete_share(self._context, self.share)
def test_delete_snapshot(self):
mount_path = self._get_mount_path(self.snapshot)
expected_exec = [
'umount -f %s' % mount_path,
'rmdir %s' % mount_path,
'lvremove -f fakevg/fakesnapshotname',
]
self._driver.delete_snapshot(self._context, self.snapshot,
self.share_server)
self.assertEqual(expected_exec, fake_utils.fake_execute_get_log())
def test_delete_share_invalid_share(self):
self._driver._get_helper = mock.Mock(
side_effect=exception.InvalidShare(reason='fake'))
self._driver.delete_share(self._context, self.share, self.share_server)
def test_delete_share_process_execution_error(self):
self.mock_object(
self._helper_nfs,
'remove_export',
mock.Mock(side_effect=exception.ProcessExecutionError))
self._driver._delete_share(self._context, self.share)
self._helper_nfs.remove_exports.assert_called_once_with(
self.server,
self.share['name'])
@ddt.data(const.ACCESS_LEVEL_RW, const.ACCESS_LEVEL_RO)
def test_update_access(self, access_level):
access_rules = [test_generic.get_fake_access_rule(
'1.1.1.1', access_level), ]
add_rules = [test_generic.get_fake_access_rule(
'2.2.2.2', access_level), ]
delete_rules = [test_generic.get_fake_access_rule(
'3.3.3.3', access_level), ]
self._driver.update_access(self._context, self.share, access_rules,
add_rules=add_rules,
delete_rules=delete_rules,
share_server=self.server)
(self._driver._helpers[self.share['share_proto']].
update_access.assert_called_once_with(
self.server, self.share['name'],
access_rules, add_rules=add_rules, delete_rules=delete_rules))
@ddt.data(('1001::1001/129', None, False), ('1.1.1.256', None, False),
('1001::1001', None, [6]), ('1.1.1.0', None, [4]),
(None, ['1001::1001', '1.1.1.0'], [6, 4]),
(None, ['1001::1001'], [6]), (None, ['1.1.1.0'], [4]),
(None, ['1001::1001/129', '1.1.1.0'], False))
@ddt.unpack
def test_get_configured_ip_versions(
self, configured_ip, configured_ips, configured_ip_version):
CONF.set_default('lvm_share_export_ip', configured_ip)
CONF.set_default('lvm_share_export_ips', configured_ips)
if configured_ip_version:
self.assertEqual(configured_ip_version,
self._driver.get_configured_ip_versions())
else:
self.assertRaises(exception.InvalidInput,
self._driver.get_configured_ip_versions)
def test_mount_device(self):
mount_path = self._get_mount_path(self.share)
ret = self._driver._mount_device(self.share, 'fakedevice')
expected_exec = [
"mkdir -p %s" % (mount_path,),
"mount fakedevice %s" % (mount_path,),
"chmod 777 %s" % (mount_path,),
]
self.assertEqual(expected_exec, fake_utils.fake_execute_get_log())
self.assertEqual(mount_path, ret)
def test_mount_device_already(self):
def exec_runner(*args, **kwargs):
if 'mount' in args and '-l' not in args:
raise exception.ProcessExecutionError()
else:
return 'fakedevice', ''
self.mock_object(self._driver, '_execute', exec_runner)
mount_path = self._get_mount_path(self.share)
ret = self._driver._mount_device(self.share, 'fakedevice')
self.assertEqual(mount_path, ret)
def test_mount_device_error(self):
def exec_runner(*args, **kwargs):
if 'mount' in args and '-l' not in args:
raise exception.ProcessExecutionError()
else:
return 'fake', ''
self.mock_object(self._driver, '_execute', exec_runner)
self.assertRaises(exception.ProcessExecutionError,
self._driver._mount_device, self.share, 'fakedevice')
def test_get_helper(self):
share_cifs = fake_share(share_proto='CIFS')
share_nfs = fake_share(share_proto='NFS')
share_fake = fake_share(share_proto='FAKE')
self.assertEqual(self._driver._get_helper(share_cifs),
self._helper_cifs)
self.assertEqual(self._driver._get_helper(share_nfs),
self._helper_nfs)
self.assertRaises(exception.InvalidShare, self._driver._get_helper,
share_fake)
def _get_mount_path(self, share):
return os.path.join(CONF.lvm_share_export_root, share['name'])
def test__unmount_device(self):
mount_path = self._get_mount_path(self.share)
self._os.path.exists.return_value = True
self.mock_object(self._driver, '_execute')
self._driver._unmount_device(self.share)
self._driver._execute.assert_any_call('umount', '-f', mount_path,
run_as_root=True)
self._driver._execute.assert_any_call('rmdir', mount_path,
run_as_root=True)
self._os.path.exists.assert_called_with(mount_path)
def test_extend_share(self):
local_path = self._driver._get_local_path(self.share)
self.mock_object(self._driver, '_extend_container')
self.mock_object(self._driver, '_execute')
self._driver.extend_share(self.share, 3)
self._driver._extend_container.assert_called_once_with(self.share,
local_path, 3)
self._driver._execute.assert_called_once_with('resize2fs', local_path,
run_as_root=True)
def test_ssh_exec_as_root(self):
command = ['fake_command']
self.mock_object(self._driver, '_execute')
self._driver._ssh_exec_as_root('fake_server', command)
self._driver._execute.assert_called_once_with('fake_command',
check_exit_code=True)
def test_ssh_exec_as_root_with_sudo(self):
command = ['sudo', 'fake_command']
self.mock_object(self._driver, '_execute')
self._driver._ssh_exec_as_root('fake_server', command)
self._driver._execute.assert_called_once_with(
'fake_command', run_as_root=True, check_exit_code=True)
def test_extend_container(self):
self.mock_object(self._driver, '_try_execute')
self._driver._extend_container(self.share, 'device_name', 3)
self._driver._try_execute.assert_called_once_with(
'lvextend',
'-L',
'3G',
'-n',
'device_name',
run_as_root=True)
def test_get_share_server_pools(self):
expected_result = [{
'pool_name': 'lvm-single-pool',
'total_capacity_gb': 33,
'free_capacity_gb': 22,
'reserved_percentage': 0,
}, ]
self.mock_object(
self._driver,
'_execute',
mock.Mock(return_value=("VSize 33g VFree 22g", None)))
self.assertEqual(expected_result,
self._driver.get_share_server_pools())
self._driver._execute.assert_called_once_with(
'vgs', 'fakevg', '--rows', '--units', 'g', run_as_root=True)
def test_copy_volume_error(self):
def _fake_exec(*args, **kwargs):
if 'count=0' in args:
raise exception.ProcessExecutionError()
self.mock_object(self._driver, '_execute',
mock.Mock(side_effect=_fake_exec))
self._driver._copy_volume('src', 'dest', 1)
self._driver._execute.assert_any_call('dd', 'count=0', 'if=src',
'of=dest', 'iflag=direct',
'oflag=direct', run_as_root=True)
self._driver._execute.assert_any_call('dd', 'if=src', 'of=dest',
'count=1024', 'bs=1M',
run_as_root=True)
@ddt.data(('1.1.1.1', 4), ('1001::1001', 6))
@ddt.unpack
def test_update_share_stats(self, configured_ip, version):
CONF.set_default('lvm_share_export_ip', configured_ip)
self.mock_object(self._driver, 'get_share_server_pools',
mock.Mock(return_value='test-pool'))
self._driver._update_share_stats()
self.assertEqual('LVM', self._driver._stats['share_backend_name'])
self.assertEqual('NFS_CIFS', self._driver._stats['storage_protocol'])
self.assertEqual(50, self._driver._stats['reserved_percentage'])
self.assertTrue(self._driver._stats['snapshot_support'])
self.assertEqual('LVMShareDriver', self._driver._stats['driver_name'])
self.assertEqual('test-pool', self._driver._stats['pools'])
self.assertEqual(version == 4, self._driver._stats['ipv4_support'])
self.assertEqual(version == 6, self._driver._stats['ipv6_support'])
def test_revert_to_snapshot(self):
mock_update_access = self.mock_object(self._helper_nfs,
'update_access')
self._driver.revert_to_snapshot(self._context, self.snapshot,
[], [], self.share_server)
snap_lv = "%s/fakesnapshotname" % (CONF.lvm_share_volume_group)
share_lv = "%s/fakename" % (CONF.lvm_share_volume_group)
share_mount_path = self._get_mount_path(self.snapshot['share'])
snapshot_mount_path = self._get_mount_path(self.snapshot)
expected_exec = [
('umount -f %s' % snapshot_mount_path),
("rmdir %s" % snapshot_mount_path),
("umount -f %s" % share_mount_path),
("rmdir %s" % share_mount_path),
("lvconvert --merge %s" % snap_lv),
("lvcreate -L 1G --name fakesnapshotname --snapshot %s" %
share_lv),
('tune2fs -U random /dev/mapper/%s-fakesnapshotname' %
CONF.lvm_share_volume_group),
("mkdir -p %s" % share_mount_path),
("mount /dev/mapper/%s-fakename %s" %
(CONF.lvm_share_volume_group, share_mount_path)),
("chmod 777 %s" % share_mount_path),
("mkdir -p %s" % snapshot_mount_path),
("mount /dev/mapper/fakevg-fakesnapshotname "
"%s" % snapshot_mount_path),
("chmod 777 %s" % snapshot_mount_path),
]
self.assertEqual(expected_exec, fake_utils.fake_execute_get_log())
self.assertEqual(4, mock_update_access.call_count)
def test_snapshot_update_access(self):
access_rules = [{
'access_type': 'ip',
'access_to': '1.1.1.1',
'access_level': 'ro',
}]
add_rules = [{
'access_type': 'ip',
'access_to': '2.2.2.2',
'access_level': 'ro',
}]
delete_rules = [{
'access_type': 'ip',
'access_to': '3.3.3.3',
'access_level': 'ro',
}]
self._driver.snapshot_update_access(self._context, self.snapshot,
access_rules, add_rules,
delete_rules)
(self._driver._helpers[self.snapshot['share']['share_proto']].
update_access.assert_called_once_with(
self.server, self.snapshot['name'],
access_rules, add_rules=add_rules, delete_rules=delete_rules))
@mock.patch.object(timeutils, 'utcnow', mock.Mock(
return_value='fake_date'))
def test_update_share_usage_size(self):
mount_path = self._get_mount_path(self.share)
self._os.path.exists.return_value = True
self.mock_object(
self._driver,
'_execute',
mock.Mock(return_value=(
"Mounted on Used "
+ mount_path + " 1G", None)))
update_shares = self._driver.update_share_usage_size(
self._context, [self.share, ])
self._os.path.exists.assert_called_with(mount_path)
self.assertEqual(
[{'id': 'fakeid', 'used_size': '1',
'gathered_at': 'fake_date'}],
update_shares)
self._driver._execute.assert_called_once_with(
'df', '-l', '--output=target,used',
'--block-size=g')
@mock.patch.object(timeutils, 'utcnow', mock.Mock(
return_value='fake_date'))
def test_update_share_usage_size_multiple_share(self):
share1 = fake_share(id='fakeid_get_fail', name='get_fail')
share2 = fake_share(id='fakeid_success', name='get_success')
share3 = fake_share(id='fakeid_not_exist', name='get_not_exist')
mount_path2 = self._get_mount_path(share2)
mount_path3 = self._get_mount_path(share3)
self._os.path.exists.side_effect = [True, True, False]
self.mock_object(
self._driver,
'_execute',
mock.Mock(return_value=(
"Mounted on Used "
+ mount_path2 + " 1G", None)))
update_shares = self._driver.update_share_usage_size(
self._context, [share1, share2, share3])
self._os.path.exists.assert_called_with(mount_path3)
self.assertEqual(
[{'gathered_at': 'fake_date',
'id': 'fakeid_success', 'used_size': '1'}],
update_shares)
self._driver._execute.assert_called_with(
'df', '-l', '--output=target,used',
'--block-size=g')
def test_update_share_usage_size_fail(self):
def _fake_exec(*args, **kwargs):
raise exception.ProcessExecutionError(stderr="error")
self.mock_object(self._driver, '_execute', _fake_exec)
self.assertRaises(exception.ProcessExecutionError,
self._driver.update_share_usage_size,
self._context,
[self.share])
def test_get_backend_info(self):
backend_info = self._driver.get_backend_info(self._context)
self.assertEqual(
{'export_ips': ','.join(self.server['public_addresses']),
'db_version': mock.ANY},
backend_info)
|
|
# -*- coding: utf-8 -*-
from nose.tools import * # flake8: noqa
from website.models import NodeLog
from website.project.model import Auth
from website.util import permissions
from api.base.settings.defaults import API_BASE
from tests.base import ApiTestCase
from tests.factories import (
ProjectFactory,
AuthUserFactory,
)
from tests.utils import assert_logs, assert_not_logs
class NodeCRUDTestCase(ApiTestCase):
def setUp(self):
super(NodeCRUDTestCase, self).setUp()
self.user = AuthUserFactory()
self.user_two = AuthUserFactory()
self.title = 'Cool Project'
self.new_title = 'Super Cool Project'
self.description = 'A Properly Cool Project'
self.new_description = 'An even cooler project'
self.category = 'data'
self.new_category = 'project'
self.public_project = ProjectFactory(title=self.title,
description=self.description,
category=self.category,
is_public=True,
creator=self.user)
self.public_url = '/{}nodes/{}/'.format(API_BASE, self.public_project._id)
self.private_project = ProjectFactory(title=self.title,
description=self.description,
category=self.category,
is_public=False,
creator=self.user)
self.private_url = '/{}nodes/{}/'.format(API_BASE, self.private_project._id)
self.fake_url = '/{}nodes/{}/'.format(API_BASE, '12345')
def make_node_payload(node, attributes):
return {
'data': {
'id': node._id,
'type': 'nodes',
'attributes': attributes,
}
}
class TestContributorDetail(NodeCRUDTestCase):
def setUp(self):
super(TestContributorDetail, self).setUp()
self.public_url = '/{}nodes/{}/contributors/{}/'.format(API_BASE, self.public_project, self.user._id)
self.private_url_base = '/{}nodes/{}/contributors/{}/'.format(API_BASE, self.private_project._id, '{}')
self.private_url = self.private_url_base.format(self.user._id)
def test_get_public_contributor_detail(self):
res = self.app.get(self.public_url)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], '{}-{}'.format(self.public_project._id, self.user._id))
def test_get_private_node_contributor_detail_contributor_auth(self):
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], '{}-{}'.format(self.private_project, self.user._id))
def test_get_private_node_contributor_detail_non_contributor(self):
res = self.app.get(self.private_url, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_get_private_node_contributor_detail_not_logged_in(self):
res = self.app.get(self.private_url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_get_private_node_non_contributor_detail_contributor_auth(self):
res = self.app.get(self.private_url_base.format(self.user_two._id), auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
def test_get_private_node_invalid_user_detail_contributor_auth(self):
res = self.app.get(self.private_url_base.format('invalid'), auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
def test_unregistered_contributor_detail_show_up_as_name_associated_with_project(self):
project = ProjectFactory(creator=self.user, public=True)
project.add_unregistered_contributor('Robert Jackson', 'robert@gmail.com', auth=Auth(self.user), save=True)
unregistered_contributor = project.contributors[1]
url = '/{}nodes/{}/contributors/{}/'.format(API_BASE, project._id, unregistered_contributor._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['embeds']['users']['data']['attributes']['full_name'], 'Robert Jackson')
assert_equal(res.json['data']['attributes'].get('unregistered_contributor'), 'Robert Jackson')
project_two = ProjectFactory(creator=self.user, public=True)
project_two.add_unregistered_contributor('Bob Jackson', 'robert@gmail.com', auth=Auth(self.user), save=True)
url = '/{}nodes/{}/contributors/{}/'.format(API_BASE, project_two._id, unregistered_contributor._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['embeds']['users']['data']['attributes']['full_name'], 'Robert Jackson')
assert_equal(res.json['data']['attributes'].get('unregistered_contributor'), 'Bob Jackson')
def test_detail_includes_index(self):
res = self.app.get(self.public_url)
data = res.json['data']
assert_in('index', data['attributes'].keys())
assert_equal(data['attributes']['index'], 0)
other_contributor = AuthUserFactory()
self.public_project.add_contributor(other_contributor, auth=Auth(self.user), save=True)
other_contributor_detail = '/{}nodes/{}/contributors/{}/'.format(API_BASE, self.public_project, other_contributor._id)
res = self.app.get(other_contributor_detail)
assert_equal(res.json['data']['attributes']['index'], 1)
class TestNodeContributorOrdering(ApiTestCase):
def setUp(self):
super(TestNodeContributorOrdering, self).setUp()
self.contributors = [AuthUserFactory() for number in range(1, 10)]
self.user_one = AuthUserFactory()
self.project = ProjectFactory(creator=self.user_one)
for contributor in self.contributors:
self.project.add_contributor(
contributor,
permissions=[permissions.READ, permissions.WRITE],
visible=True,
save=True
)
self.contributors.insert(0, self.user_one)
self.base_contributor_url = '/{}nodes/{}/contributors/'.format(API_BASE, self.project._id)
self.url_creator = '/{}nodes/{}/contributors/{}/'.format(API_BASE, self.project._id, self.user_one._id)
self.contributor_urls = ['/{}nodes/{}/contributors/{}/'.format(API_BASE, self.project._id, contributor._id)
for contributor in self.contributors]
self.last_position = len(self.contributors) - 1
@staticmethod
def _get_contributor_user_id(contributor):
return contributor['embeds']['users']['data']['id']
def test_initial_order(self):
res = self.app.get('/{}nodes/{}/contributors/'.format(API_BASE, self.project._id), auth=self.user_one.auth)
assert_equal(res.status_code, 200)
contributor_list = res.json['data']
found_contributors = False
for i in range(0, len(self.contributors)):
assert_equal(self.contributors[i]._id, self._get_contributor_user_id(contributor_list[i]))
assert_equal(i, contributor_list[i]['attributes']['index'])
found_contributors = True
assert_true(found_contributors, "Did not compare any contributors.")
@assert_logs(NodeLog.CONTRIB_REORDERED, 'project')
def test_move_top_contributor_down_one_and_also_log(self):
contributor_to_move = self.contributors[0]._id
contributor_id = '{}-{}'.format(self.project._id, contributor_to_move)
former_second_contributor = self.contributors[1]
url = '{}{}/'.format(self.base_contributor_url, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': 1
}
}
}
res_patch = self.app.patch_json_api(url, data, auth=self.user_one.auth)
assert_equal(res_patch.status_code, 200)
self.project.reload()
res = self.app.get('/{}nodes/{}/contributors/'.format(API_BASE, self.project._id), auth=self.user_one.auth)
assert_equal(res.status_code, 200)
contributor_list = res.json['data']
assert_equal(self._get_contributor_user_id(contributor_list[1]), contributor_to_move)
assert_equal(self._get_contributor_user_id(contributor_list[0]), former_second_contributor._id)
def test_move_second_contributor_up_one_to_top(self):
contributor_to_move = self.contributors[1]._id
contributor_id = '{}-{}'.format(self.project._id, contributor_to_move)
former_first_contributor = self.contributors[0]
url = '{}{}/'.format(self.base_contributor_url, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': 0
}
}
}
res_patch = self.app.patch_json_api(url, data, auth=self.user_one.auth)
assert_equal(res_patch.status_code, 200)
self.project.reload()
res = self.app.get('/{}nodes/{}/contributors/'.format(API_BASE, self.project._id), auth=self.user_one.auth)
assert_equal(res.status_code, 200)
contributor_list = res.json['data']
assert_equal(self._get_contributor_user_id(contributor_list[0]), contributor_to_move)
assert_equal(self._get_contributor_user_id(contributor_list[1]), former_first_contributor._id)
def test_move_top_contributor_down_to_bottom(self):
contributor_to_move = self.contributors[0]._id
contributor_id = '{}-{}'.format(self.project._id, contributor_to_move)
former_second_contributor = self.contributors[1]
url = '{}{}/'.format(self.base_contributor_url, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': self.last_position
}
}
}
res_patch = self.app.patch_json_api(url, data, auth=self.user_one.auth)
assert_equal(res_patch.status_code, 200)
self.project.reload()
res = self.app.get('/{}nodes/{}/contributors/'.format(API_BASE, self.project._id), auth=self.user_one.auth)
assert_equal(res.status_code, 200)
contributor_list = res.json['data']
assert_equal(self._get_contributor_user_id(contributor_list[self.last_position]), contributor_to_move)
assert_equal(self._get_contributor_user_id(contributor_list[0]), former_second_contributor._id)
def test_move_bottom_contributor_up_to_top(self):
contributor_to_move = self.contributors[self.last_position]._id
contributor_id = '{}-{}'.format(self.project._id, contributor_to_move)
former_second_to_last_contributor = self.contributors[self.last_position - 1]
url = '{}{}/'.format(self.base_contributor_url, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': 0
}
}
}
res_patch = self.app.patch_json_api(url, data, auth=self.user_one.auth)
assert_equal(res_patch.status_code, 200)
self.project.reload()
res = self.app.get('/{}nodes/{}/contributors/'.format(API_BASE, self.project._id), auth=self.user_one.auth)
assert_equal(res.status_code, 200)
contributor_list = res.json['data']
assert_equal(self._get_contributor_user_id(contributor_list[0]), contributor_to_move)
assert_equal(
self._get_contributor_user_id(contributor_list[self.last_position]),
former_second_to_last_contributor._id
)
def test_move_second_to_last_contributor_down_past_bottom(self):
contributor_to_move = self.contributors[self.last_position - 1]._id
contributor_id = '{}-{}'.format(self.project._id, contributor_to_move)
former_last_contributor = self.contributors[self.last_position]
url = '{}{}/'.format(self.base_contributor_url, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': self.last_position + 10
}
}
}
res_patch = self.app.patch_json_api(url, data, auth=self.user_one.auth)
assert_equal(res_patch.status_code, 200)
self.project.reload()
res = self.app.get('/{}nodes/{}/contributors/'.format(API_BASE, self.project._id), auth=self.user_one.auth)
assert_equal(res.status_code, 200)
contributor_list = res.json['data']
assert_equal(self._get_contributor_user_id(contributor_list[self.last_position]), contributor_to_move)
assert_equal(
self._get_contributor_user_id(contributor_list[self.last_position - 1]),
former_last_contributor._id
)
def test_move_top_contributor_down_to_second_to_last_position_with_negative_numbers(self):
contributor_to_move = self.contributors[0]._id
contributor_id = '{}-{}'.format(self.project._id, contributor_to_move)
former_second_contributor = self.contributors[1]
url = '{}{}/'.format(self.base_contributor_url, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': -1
}
}
}
res_patch = self.app.patch_json_api(url, data, auth=self.user_one.auth)
assert_equal(res_patch.status_code, 200)
self.project.reload()
res = self.app.get('/{}nodes/{}/contributors/'.format(API_BASE, self.project._id), auth=self.user_one.auth)
assert_equal(res.status_code, 200)
contributor_list = res.json['data']
assert_equal(self._get_contributor_user_id(contributor_list[self.last_position - 1]), contributor_to_move)
assert_equal(self._get_contributor_user_id(contributor_list[0]), former_second_contributor._id)
def test_write_contributor_fails_to_move_top_contributor_down_one(self):
contributor_to_move = self.contributors[0]._id
contributor_id = '{}-{}'.format(self.project._id, contributor_to_move)
former_second_contributor = self.contributors[1]
url = '{}{}/'.format(self.base_contributor_url, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': 1
}
}
}
res_patch = self.app.patch_json_api(url, data, auth=former_second_contributor.auth, expect_errors=True)
assert_equal(res_patch.status_code, 403)
self.project.reload()
res = self.app.get('/{}nodes/{}/contributors/'.format(API_BASE, self.project._id), auth=self.user_one.auth)
assert_equal(res.status_code, 200)
contributor_list = res.json['data']
assert_equal(self._get_contributor_user_id(contributor_list[0]), contributor_to_move)
assert_equal(self._get_contributor_user_id(contributor_list[1]), former_second_contributor._id)
def test_non_authenticated_fails_to_move_top_contributor_down_one(self):
contributor_to_move = self.contributors[0]._id
contributor_id = '{}-{}'.format(self.project._id, contributor_to_move)
former_second_contributor = self.contributors[1]
url = '{}{}/'.format(self.base_contributor_url, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': 1
}
}
}
res_patch = self.app.patch_json_api(url, data, expect_errors=True)
assert_equal(res_patch.status_code, 401)
self.project.reload()
res = self.app.get('/{}nodes/{}/contributors/'.format(API_BASE, self.project._id), auth=self.user_one.auth)
assert_equal(res.status_code, 200)
contributor_list = res.json['data']
assert_equal(self._get_contributor_user_id(contributor_list[0]), contributor_to_move)
assert_equal(self._get_contributor_user_id(contributor_list[1]), former_second_contributor._id)
class TestNodeContributorUpdate(ApiTestCase):
def setUp(self):
super(TestNodeContributorUpdate, self).setUp()
self.user = AuthUserFactory()
self.user_two = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
self.project.add_contributor(self.user_two, permissions=[permissions.READ, permissions.WRITE], visible=True, save=True)
self.url_creator = '/{}nodes/{}/contributors/{}/'.format(API_BASE, self.project._id, self.user._id)
self.url_contributor = '/{}nodes/{}/contributors/{}/'.format(API_BASE, self.project._id, self.user_two._id)
def test_node_update_invalid_data(self):
res = self.app.put_json_api(self.url_creator, "Incorrect data", auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "Malformed request.")
res = self.app.put_json_api(self.url_creator, ["Incorrect data"], auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "Malformed request.")
def test_change_contributor_no_id(self):
data = {
'data': {
'type': 'contributors',
'attributes': {
'permission': permissions.ADMIN,
'bibliographic': True
}
}
}
res = self.app.put_json_api(self.url_contributor, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_change_contributor_correct_id(self):
contrib_id = '{}-{}'.format(self.project._id, self.user_two._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': permissions.ADMIN,
'bibliographic': True
}
}
}
res = self.app.put_json_api(self.url_contributor, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 200)
def test_change_contributor_incorrect_id(self):
data = {
'data': {
'id': '12345',
'type': 'contributors',
'attributes': {
'permission': permissions.ADMIN,
'bibliographic': True
}
}
}
res = self.app.put_json_api(self.url_contributor, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 409)
def test_change_contributor_no_type(self):
contrib_id = '{}-{}'.format(self.project._id, self.user_two._id)
data = {
'data': {
'id': contrib_id,
'attributes': {
'permission': permissions.ADMIN,
'bibliographic': True
}
}
}
res = self.app.put_json_api(self.url_contributor, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_change_contributor_incorrect_type(self):
data = {
'data': {
'id': self.user_two._id,
'type': 'Wrong type.',
'attributes': {
'permission': permissions.ADMIN,
'bibliographic': True
}
}
}
res = self.app.put_json_api(self.url_contributor, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 409)
@assert_logs(NodeLog.PERMISSIONS_UPDATED, 'project', -3)
@assert_logs(NodeLog.PERMISSIONS_UPDATED, 'project', -2)
@assert_logs(NodeLog.PERMISSIONS_UPDATED, 'project')
def test_change_contributor_permissions(self):
contrib_id = '{}-{}'.format(self.project._id, self.user_two._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': permissions.ADMIN,
'bibliographic': True
}
}
}
res = self.app.put_json_api(self.url_contributor, data, auth=self.user.auth)
assert_equal(res.status_code, 200)
attributes = res.json['data']['attributes']
assert_equal(attributes['permission'], permissions.ADMIN)
self.project.reload()
assert_equal(self.project.get_permissions(self.user_two), [permissions.READ, permissions.WRITE, permissions.ADMIN])
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': permissions.WRITE,
'bibliographic': True
}
}
}
res = self.app.put_json_api(self.url_contributor, data, auth=self.user.auth)
assert_equal(res.status_code, 200)
attributes = res.json['data']['attributes']
assert_equal(attributes['permission'], permissions.WRITE)
self.project.reload()
assert_equal(self.project.get_permissions(self.user_two), [permissions.READ, permissions.WRITE])
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': permissions.READ,
'bibliographic': True
}
}
}
res = self.app.put_json_api(self.url_contributor, data, auth=self.user.auth)
assert_equal(res.status_code, 200)
attributes = res.json['data']['attributes']
assert_equal(attributes['permission'], permissions.READ)
self.project.reload()
assert_equal(self.project.get_permissions(self.user_two), [permissions.READ])
@assert_logs(NodeLog.MADE_CONTRIBUTOR_INVISIBLE, 'project', -2)
@assert_logs(NodeLog.MADE_CONTRIBUTOR_VISIBLE, 'project')
def test_change_contributor_bibliographic(self):
contrib_id = '{}-{}'.format(self.project._id, self.user_two._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'bibliographic': False
}
}
}
res = self.app.put_json_api(self.url_contributor, data, auth=self.user.auth)
assert_equal(res.status_code, 200)
attributes = res.json['data']['attributes']
assert_equal(attributes['bibliographic'], False)
self.project.reload()
assert_false(self.project.get_visible(self.user_two))
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'bibliographic': True
}
}
}
res = self.app.put_json_api(self.url_contributor, data, auth=self.user.auth)
assert_equal(res.status_code, 200)
attributes = res.json['data']['attributes']
assert_equal(attributes['bibliographic'], True)
self.project.reload()
assert_true(self.project.get_visible(self.user_two))
@assert_logs(NodeLog.PERMISSIONS_UPDATED, 'project', -2)
@assert_logs(NodeLog.MADE_CONTRIBUTOR_INVISIBLE, 'project')
def test_change_contributor_permission_and_bibliographic(self):
contrib_id = '{}-{}'.format(self.project._id, self.user_two._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': permissions.READ,
'bibliographic': False
}
}
}
res = self.app.put_json_api(self.url_contributor, data, auth=self.user.auth)
assert_equal(res.status_code, 200)
attributes = res.json['data']['attributes']
assert_equal(attributes['permission'], permissions.READ)
assert_equal(attributes['bibliographic'], False)
self.project.reload()
assert_equal(self.project.get_permissions(self.user_two), [permissions.READ])
assert_false(self.project.get_visible(self.user_two))
@assert_not_logs(NodeLog.PERMISSIONS_UPDATED, 'project')
def test_not_change_contributor(self):
contrib_id = '{}-{}'.format(self.project._id, self.user_two._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': None,
'bibliographic': True
}
}
}
res = self.app.put_json_api(self.url_contributor, data, auth=self.user.auth)
assert_equal(res.status_code, 200)
attributes = res.json['data']['attributes']
assert_equal(attributes['permission'], permissions.WRITE)
assert_equal(attributes['bibliographic'], True)
self.project.reload()
assert_equal(self.project.get_permissions(self.user_two), [permissions.READ, permissions.WRITE])
assert_true(self.project.get_visible(self.user_two))
def test_invalid_change_inputs_contributor(self):
contrib_id = '{}-{}'.format(self.project._id, self.user_two._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': 'invalid',
'bibliographic': 'invalid'
}
}
}
res = self.app.put_json_api(self.url_contributor, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(self.project.get_permissions(self.user_two), [permissions.READ, permissions.WRITE])
assert_true(self.project.get_visible(self.user_two))
@assert_logs(NodeLog.PERMISSIONS_UPDATED, 'project')
def test_change_admin_self_with_other_admin(self):
self.project.add_permission(self.user_two, permissions.ADMIN, save=True)
contrib_id = '{}-{}'.format(self.project._id, self.user._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': permissions.WRITE,
'bibliographic': True
}
}
}
res = self.app.put_json_api(self.url_creator, data, auth=self.user.auth)
assert_equal(res.status_code, 200)
attributes = res.json['data']['attributes']
assert_equal(attributes['permission'], permissions.WRITE)
self.project.reload()
assert_equal(self.project.get_permissions(self.user), [permissions.READ, permissions.WRITE])
def test_change_admin_self_without_other_admin(self):
contrib_id = '{}-{}'.format(self.project._id, self.user._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': permissions.WRITE,
'bibliographic': True
}
}
}
res = self.app.put_json_api(self.url_creator, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
self.project.reload()
assert_equal(self.project.get_permissions(self.user), [permissions.READ, permissions.WRITE, permissions.ADMIN])
def test_remove_all_bibliographic_statuses_contributors(self):
self.project.set_visible(self.user_two, False, save=True)
contrib_id = '{}-{}'.format(self.project._id, self.user._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'bibliographic': False
}
}
}
res = self.app.put_json_api(self.url_creator, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
self.project.reload()
assert_true(self.project.get_visible(self.user))
def test_change_contributor_non_admin_auth(self):
data = {
'data': {
'id': self.user_two._id,
'type': 'contributors',
'attributes': {
'permission': permissions.READ,
'bibliographic': False
}
}
}
res = self.app.put_json_api(self.url_contributor, data, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
self.project.reload()
assert_equal(self.project.get_permissions(self.user_two), [permissions.READ, permissions.WRITE])
assert_true(self.project.get_visible(self.user_two))
def test_change_contributor_not_logged_in(self):
data = {
'data': {
'id': self.user_two._id,
'type': 'contributors',
'attributes': {
'permission': permissions.READ,
'bibliographic': False
}
}
}
res = self.app.put_json_api(self.url_contributor, data, expect_errors=True)
assert_equal(res.status_code, 401)
self.project.reload()
assert_equal(self.project.get_permissions(self.user_two), [permissions.READ, permissions.WRITE])
assert_true(self.project.get_visible(self.user_two))
class TestNodeContributorPartialUpdate(ApiTestCase):
def setUp(self):
super(TestNodeContributorPartialUpdate, self).setUp()
self.user = AuthUserFactory()
self.user_two = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
self.project.add_contributor(self.user_two, permissions=[permissions.READ, permissions.WRITE], visible=True, save=True)
self.url_creator = '/{}nodes/{}/contributors/{}/'.format(API_BASE, self.project._id, self.user._id)
self.url_contributor = '/{}nodes/{}/contributors/{}/'.format(API_BASE, self.project._id, self.user_two._id)
def test_patch_bibliographic_only(self):
creator_id = '{}-{}'.format(self.project._id, self.user._id)
data = {
'data': {
'id': creator_id,
'type': 'contributors',
'attributes': {
'bibliographic': False,
}
}
}
res = self.app.patch_json_api(self.url_creator, data, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.project.reload()
assert_equal(self.project.get_permissions(self.user), [permissions.READ, permissions.WRITE, permissions.ADMIN])
assert_false(self.project.get_visible(self.user))
def test_patch_permission_only(self):
user_three = AuthUserFactory()
self.project.add_contributor(user_three, permissions=[permissions.READ, permissions.WRITE], visible=False, save=True)
url_contributor = '/{}nodes/{}/contributors/{}/'.format(API_BASE, self.project._id, user_three._id)
contributor_id = '{}-{}'.format(self.project._id, user_three._id)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'permission': permissions.READ,
}
}
}
res = self.app.patch_json_api(url_contributor, data, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.project.reload()
assert_equal(self.project.get_permissions(user_three), [permissions.READ])
assert_false(self.project.get_visible(user_three))
class TestNodeContributorDelete(ApiTestCase):
def setUp(self):
super(TestNodeContributorDelete, self).setUp()
self.user = AuthUserFactory()
self.user_two = AuthUserFactory()
self.user_three = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
self.project.add_contributor(self.user_two, permissions=[permissions.READ, permissions.WRITE], visible=True, save=True)
self.url_user = '/{}nodes/{}/contributors/{}/'.format(API_BASE, self.project._id, self.user._id)
self.url_user_two = '/{}nodes/{}/contributors/{}/'.format(API_BASE, self.project._id, self.user_two._id)
self.url_user_three = '/{}nodes/{}/contributors/{}/'.format(API_BASE, self.project._id, self.user_three._id)
@assert_logs(NodeLog.CONTRIB_REMOVED, 'project')
def test_remove_contributor_admin(self):
res = self.app.delete(self.url_user_two, auth=self.user.auth)
assert_equal(res.status_code, 204)
self.project.reload()
assert_not_in(self.user_two, self.project.contributors)
def test_remove_contributor_non_admin_is_forbidden(self):
self.project.add_contributor(self.user_three, permissions=[permissions.READ, permissions.WRITE], visible=True, save=True)
res = self.app.delete(self.url_user_three, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
self.project.reload()
assert_in(self.user_three, self.project.contributors)
@assert_logs(NodeLog.CONTRIB_REMOVED, 'project')
def test_remove_self_non_admin(self):
self.project.add_contributor(self.user_three, permissions=[permissions.READ, permissions.WRITE], visible=True, save=True)
res = self.app.delete(self.url_user_three, auth=self.user_three.auth)
assert_equal(res.status_code, 204)
self.project.reload()
assert_not_in(self.user_three, self.project.contributors)
def test_remove_contributor_non_contributor(self):
res = self.app.delete(self.url_user_two, auth=self.user_three.auth, expect_errors=True)
assert_equal(res.status_code, 403)
self.project.reload()
assert_in(self.user_two, self.project.contributors)
def test_remove_contributor_not_logged_in(self):
res = self.app.delete(self.url_user_two, expect_errors=True)
assert_equal(res.status_code, 401)
self.project.reload()
assert_in(self.user_two, self.project.contributors)
def test_remove_non_contributor_admin(self):
assert_not_in(self.user_three, self.project.contributors)
res = self.app.delete(self.url_user_three, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
self.project.reload()
assert_not_in(self.user_three, self.project.contributors)
def test_remove_non_existing_user_admin(self):
url_user_fake = '/{}nodes/{}/contributors/{}/'.format(API_BASE, self.project._id, 'fake')
res = self.app.delete(url_user_fake, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
@assert_logs(NodeLog.CONTRIB_REMOVED, 'project')
def test_remove_self_contributor_not_unique_admin(self):
self.project.add_permission(self.user_two, permissions.ADMIN, save=True)
res = self.app.delete(self.url_user, auth=self.user.auth)
assert_equal(res.status_code, 204)
self.project.reload()
assert_not_in(self.user, self.project.contributors)
@assert_logs(NodeLog.CONTRIB_REMOVED, 'project')
def test_can_remove_self_as_contributor_not_unique_admin(self):
self.project.add_permission(self.user_two, permissions.ADMIN, save=True)
res = self.app.delete(self.url_user_two, auth=self.user_two.auth)
assert_equal(res.status_code, 204)
self.project.reload()
assert_not_in(self.user_two, self.project.contributors)
def test_remove_self_contributor_unique_admin(self):
res = self.app.delete(self.url_user, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
self.project.reload()
assert_in(self.user, self.project.contributors)
def test_can_not_remove_only_bibliographic_contributor(self):
self.project.add_permission(self.user_two, permissions.ADMIN, save=True)
self.project.set_visible(self.user_two, False, save=True)
res = self.app.delete(self.url_user, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
self.project.reload()
assert_in(self.user, self.project.contributors)
|
|
# -*- coding: utf-8 -*-
# BSD 3-Clause License
#
# Copyright (c) 2017, ColoredInsaneAsylums
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# DETAILS:
# File Name: accession.py
# Description: This file contains source code for the core functionality of the
# archival accessioning workflow.
#
# Creator: Nitin Verma (nitin dot verma at utexas dot edu)
# Update: Milind Siddhanti (milindsiddhanti at utexas dot edu)
#
# IMPORT NEEDED MODULES
import csv
import sys
import os
import glob
import shutil
import metadatautilspkg.globalvars as globalvars
import metadatautilspkg.errorcodes as errorcodes
from metadatautilspkg.metadatautils import *
from metadatautilspkg.dbfunctions import *
from metadatautilspkg.premis import *
from metadatautilspkg.adminmetadatautils import *
def main():
argParser = defineCommandLineOptions()
parseCommandLineArgs(argParser, sys.argv[1:])
print_info("Extension: {}".format(globalvars.ext))
if globalvars.move == True:
print_info("'move' option selected\nCAUTION: Files will be moved rather than copied")
print_info("quiet mode: ", globalvars.quietMode)
# POPULATE LIST OF SOURCE-DESTINATION PAIRS
if globalvars.batchMode == True: # Batch mode. Read and validate CSV file.
# Read CSV file contents into globalvars.transferList.
try:
# Open the CSV file in read-only mode.
csvFileHandle = open (globalvars.csvFile, "r")
except IOError as ioErrorCsvRead:
print_error(ioErrorCsvRead)
print_error(errorcodes.ERROR_CANNOT_OPEN_CSV_FILE["message"])
exit(errorcodes.ERROR_CANNOT_OPEN_CSV_FILE["code"])
# CSV file successfully opened.
csvReader = csv.reader(csvFileHandle) # Create an iterable object from the
# CSV file using csv.reader().
# Extract the first row to check if it is a header.
firstRow = next(csvReader, None)
print_info("Checking the header row. Header: {}".format(firstRow))
if firstRow == None or isHeaderValid(firstRow) == False: # This also serves as a check for an empty CSV file
print_error(errorcodes.ERROR_INVALID_HEADER_ROW["message"])
exit(errorcodes.ERROR_INVALID_HEADER_ROW["code"])
# Extract Arrange info from header row
numArrangementInfoCols = 0
arrangementInfoTags = {}
for col in firstRow:
if col.startswith(globalvars.ARRANGEMENT_INFO_MARKER):
numArrangementInfoCols += 1
arrangementInfoTags[numArrangementInfoCols] = col.split(':')[-1] + globalvars.ARRANGEMENT_INFO_LABEL_SUFFIX
globalvars.minNumCols += numArrangementInfoCols
globalvars.errorList.append(firstRow + ["Comments"])
# This for loop reads and checks the format (i.errorcodes., presence of at least two
# columns per row) of the CSV file, and populates 'globalvars.transferList' which will
# be used for the actual file transfers.
#
# FORMAT RULES/ASSUMPTIONS for the CSV file:
# 1. The FIRST column specifies SOURCE path
# 2. The SECOND column specifies DESTINATION path
# 3. The remaining columns must be named like "arrange:<Arrange Info Field/Tag>",
# errorcodes.globalvars., "arrange:series", "ead:sub-series", etc.
rowNum = 1
for row in csvReader:
if len(row) < globalvars.minNumCols: # Check if the row has AT LEAST globalvars.minNumCols elements.
print_error("Row number {} in {} is not a valid input. This row will not be processed.".format(rowNum, globalvars.csvFile))
emptyStrings = ["" for i in range(0, globalvars.minNumCols - len(row) - 1)] # To align the error message to be under "Comments"
globalvars.errorList.append(row + emptyStrings + ["Not a valid input"])
else:
globalvars.transferList.append(row)
rowNum += 1
csvFileHandle.close() # Close the CSV file as it will not be needed
# from this point on.
print_info("Number of directories to transfer: {}".format(len(globalvars.transferList)))
# READ-IN THE LABEL DICTIONARY
globalvars.labels = readLabelDictionary()
print_info("The following labels will be used for labeling metadata items in the database records:")
#for key in globalvars.labels:
#print_info(key, ":", globalvars.labels[key])
print_info(globalvars.labels)
# READ-IN THE CONTROLLED VOCABULARY
globalvars.vocab = readControlledVocabulary()
# CREATE DATABASE CONNECTION
dbParams = init_db() # TODO: there needs to be a check to determine if the
# database connection was successful or not.
globalvars.dbHandle = dbParams["handle"]
globalvars.dbCollection = dbParams["collection_name"]
# PROCESS ALL TRANSFERS
for row in globalvars.transferList:
src = row[0]
dst = row[1]
arrangementInfo = {}
for arrangementId in range(1, numArrangementInfoCols + 1):
arrangementInfo[arrangementInfoTags[arrangementId]] = row[arrangementId + 1]
print_info("Arrangement Info Data: {}".format(arrangementInfo))
# Check if the source directory exists
if os.path.isdir(src) != True: # Source directory doesn't exist.
# Add row to globalvars.errorList, and skip to next
# row
print_info("The source directory '{}' does not exist. Skipping to next transfer.".format(src))
globalvars.errorList.append(row + ["Source does not exist"])
continue
transferStatus = transferFiles(src, dst, arrangementInfo)
if transferStatus['status'] != True:
# Something bad happened during this particular transfer.
# Add this row to the list globalvars.errorList to keep a record of it.
# Also append diagnostic information about why the transfer was not
# successful.
#row.append(transferStatus['comment'])
globalvars.errorList.append(row + [transferStatus['comment']])
# WRITE ALL ROWS THAT COULD NOT BE PROCESSED TO A CSV FILE
if len(globalvars.errorList) > 1: # Because at least the header row will always be there!
errorsCSVFileName = ("transfer_errors_" + strftime("%Y-%m-%d_%H%M%S", localtime(time())) + ".csv")
try:
errorsCSVFileHandle = open(errorsCSVFileName, 'w')
except IOError as ioErrorCsvWrite:
print_error(ioErrorCsvWrite)
print_error(errorcodes.ERROR_CANNOT_WRITE_CSV_FILE["message"])
exit (errorcodes.ERROR_CANNOT_WRITE_CSV_FILE["code"])
csvWriter = csv.writer(errorsCSVFileHandle, delimiter=',', quotechar='"', lineterminator='\n')
for row in globalvars.errorList:
csvWriter.writerow(row)
errorsCSVFileHandle.close()
print_error("Not all transfers were successful. A record of rows for which errors were encountered has been written to the following file: {}".format(errorsCSVFileName))
def defineCommandLineOptions():
#PARSE AND VALIDATE COMMAND-LINE OPTIONS
argParser = argparse.ArgumentParser(description="Migrate Files for Preservation")
argParser.add_argument('-e', '--extension', nargs=1, default='*', help='Specify file EXTENSION for files that need to be migrated.')
#argParser.add_argument('srcDstPair', nargs='*', metavar='SRC DST', help='Migrate files from SRC to DST. DST will be created if it does not exist. These arguments will be ignored if the -f option is specified.')
argParser.add_argument('-f', '--file', nargs=1, default=False, metavar='CSVPATH', help='CSVPATH is the path to the CSV file to be used with the -f option.')
argParser.add_argument('-q', '--quiet', action='store_true', help='Enable this option to suppress all logging, except critical error messages.')
argParser.add_argument('-m', '--move', action='store_true', help='Enable this option to move the files instead of copying them.')
return argParser
def parseCommandLineArgs(argParser, args):
parsedArgs = argParser.parse_args(args)
if len(args) == 0:
print_error(errorcodes.ERROR_INVALID_ARGUMENT_STRING["message"])
argParser.print_help()
exit(errorcodes.ERROR_INVALID_ARGUMENT_STRING["code"])
globalvars.ext = parsedArgs.extension[0]
globalvars.quietMode = parsedArgs.quiet
globalvars.move = parsedArgs.move
if parsedArgs.file:
globalvars.batchMode = True
globalvars.csvFile = parsedArgs.file[0]
else:
globalvars.batchMode = False
if len(parsedArgs.srcDstPair) != 2:
src = parsedArgs.srcDstPair[0]
dst = parsedArgs.srcDstPair[1]
globalvars.transferList.append([src, dst])
else:
print_error(errorcodes.ERROR_INVALID_ARGUMENT_STRING["message"])
argParser.print_help()
exit(errorcodes.ERROR_INVALID_ARGUMENT_STRING["code"])
def transferFiles(src, dst, arrangementInfo):
"""transferFiles(): Carries out the actual transfer of files.
Arguments:
[1] Source - path to source directory;
[2] Destination - path to destination directory.
Returns:
True:
False:
"""
returnData = {} # This dict will be returned to the caller. The 'status'
# element of this dict would be a binary value (True, or
# False) indicating success or failure, and the 'comment'
# element would be a string specifying "Success" in case
# the transfers were successful, OR a string describing
# what went wrong.
# Convert the source and destination paths to absolute paths.
# While this is not important as far as the file
# movement is concerned (i.errorcodes., via the shutil functions),
# but this is important from the metadata point-of-view.
src = os.path.abspath(src)
dst = os.path.abspath(dst)
srcDirectory = src
dstDirectory = dst
# Check if the destination directory exists.
# Create it if it doesn't exist.
if os.path.isdir(dstDirectory) != True: # Destination directory doesn't exist
try:
os.makedirs(dst) # This will create all the intermediate
# directories required.
except os.error as osError:
print_error(osError)
globalvars.errorList.append(row + [str(osError)])
print_error(errorcodes.ERROR_CANNOT_CREATE_DESTINATION_DIRECTORY["message"].format(dst))
exit(errorcodes.ERROR_CANNOT_CREATE_DESTINATION_DIRECTORY["code"])
prevHighestSerialNo = 0 # Initialize the serial number to 1, since this
# destination directory has just been created.
else:
prevHighestSerialNo = getHighestSerialNo(srcDirectory)
print_info("Previous highest file serial number: {}".format(prevHighestSerialNo))
try:
# Create a list of files with the given extension within the src
# directory.
fileList = sorted(glob.glob(os.path.join(src, "*." + globalvars.ext)))
totalNumFiles = len(fileList)
numFilesTransferred = 0 # Keeps track of number of files successfully
# transferred in the current run.
if totalNumFiles == 0: # That no file with the extension globalvars.ext was
# found is an 'anomalous' condition and should
# be treated as an unsuccessful transfer just
# to caution the user. This cautioning will be
# very helpful in cases of large batch files
returnData['status'] = False
print_error("No files found with extension '{}'!".format(globalvars.ext))
returnData['comment'] = "No files found with extension '{}'!".format(globalvars.ext)
return returnData
currentSerialNo = prevHighestSerialNo + 1
# Loop over all files with the extension globalvars.ext
for fileName in fileList[prevHighestSerialNo:]:
srcFileName = os.path.basename(fileName)
srcFileExt = srcFileName.split('.')[-1]
# Initialize a metadata record object
recordParams = {}
recordParams["fileName"] = fileName
recordParams["fileSize"] = os.path.getsize(fileName)
recordParams["fmtName"] = getFileFormatName(srcFileName)
recordParams["fmtVer"] = getFileFormatVersion(srcFileName)
recordParams[globalvars.ARRANGEMENT_INFO_LABEL] = arrangementInfo
metadataRecord = initMetadataRecord(recordParams)
# Extract the unique id from the just-initialized record
uniqueId = metadataRecord["_id"]
idAssignmentEvent = createIDAssignmentEvent(uniqueId)
metadataRecord[globalvars.labels.pres_entity.name][globalvars.labels.evt_parent_entity.name].append(idAssignmentEvent)
# Create the unique destination file path using the dst (destination
# directory), and the uniqueId generated using ObjectId()
dstFilePrelimPath = os.path.join(dst, uniqueId, srcFileName)
dstFileUniquePath = os.path.join(dst, uniqueId, uniqueId + "." + srcFileExt)
dstFileName = os.path.basename(dstFileUniquePath)
# Calculate the checksum for the source file. This will be used
# later to verify the contents of the file once it has been copied
# or moved to the destination directory
srcChecksum = getFileChecksum(fileName)
msgDigestCalcEvent = createMsgDigestCalcEvent(srcChecksum, globalvars.CHECKSUM_ALGO)
metadataRecord[globalvars.labels.pres_entity.name][globalvars.labels.evt_parent_entity.name].append(msgDigestCalcEvent)
# Record the checksum, and the checksum algorithm in the 'object' entity
metadataRecord[globalvars.labels.pres_entity.name][globalvars.labels.obj_entity.name][globalvars.labels.obj_chars.name][globalvars.labels.obj_fixity.name][globalvars.labels.obj_msgdgst_algo.name] = globalvars.CHECKSUM_ALGO
metadataRecord[globalvars.labels.pres_entity.name][globalvars.labels.obj_entity.name][globalvars.labels.obj_chars.name][globalvars.labels.obj_fixity.name][globalvars.labels.obj_msgdgst.name] = srcChecksum
# To be conservative about the transfers, this script implements the move operation as:
# 1. COPY the file from source to destination.
# 2. Compare the checksum of the copied file to that of the original.
# 3. DELETE the copied file in case the checksums do not match.
# 4. DELETE the original file in case the checksums match.
path, nameFile = os.path.split(dstFilePrelimPath)
print_info("{} '{}' from '{}' to '{}'".format("Moving" if globalvars.move == True else "Copying", os.path.basename(fileName), src, path))
# create folder with the unique_id generated. The folder structure for all the files to be copied is
# dst/uniqueid/uniqueid.ext
if os.path.isdir(path) != True: # Destination directory doesn't exist
try:
os.makedirs(path) # This will create all the intermediate
# directories required.
shutil.copy(fileName, dstFilePrelimPath)
except os.error as osError:
print_error(osError)
globalvars.errorList.append(row + [str(osError)])
print_error(errorcodes.ERROR_CANNOT_CREATE_DESTINATION_DIRECTORY["message"].format(path))
exit(errorcodes.ERROR_CANNOT_CREATE_DESTINATION_DIRECTORY["code"])
if globalvars.move == True:
eventType = "migration"
else:
eventType = "replication"
fileCopyEvent = createFileCopyEvent(eventType, fileName, dstFilePrelimPath)
metadataRecord[globalvars.labels.pres_entity.name][globalvars.labels.evt_parent_entity.name].append(fileCopyEvent)
# Rename the destination file
os.rename(dstFilePrelimPath, dstFileUniquePath)
filenameChangeEvent = createFilenameChangeEvent(dstFilePrelimPath, dstFileUniquePath)
metadataRecord[globalvars.labels.pres_entity.name][globalvars.labels.evt_parent_entity.name].append(filenameChangeEvent)
# Calculate the checksum for the file once copied to the destination.
dstChecksum = getFileChecksum(dstFileUniquePath)
# Compare the checksums of the source and destination files to
# verify the success of the transfer. If checksums do not match,
# it means that something went wrong during the transfer. In the
# case of such a mismatch, we remove the destination file, and the corresponding
# DB record.
if dstChecksum != srcChecksum:
print_error("Checksum mismatch for '{}', and '{}'".format(fileName, dstFileUniquePath))
# Remove the destination file
try:
os.remove(dstFileUniquePath)
except os.error as ExceptionFileRemoval:
print_error(ExceptionFileRemoval)
print_error(errorcodes.ERROR_CANNOT_REMOVE_FILE["message"])
exit(errorcodes.ERROR_CANNOT_REMOVE_FILE["code"])
# Remove entry from DB if present
deleteRecordFromDB(uniqueId)
returnData['status'] = False
returnData['comment'] = "Checksum mismatch for '{}', and '{}'. Aborted transfers for remaining files in directory.".format(fileName, dstFileUniquePath)
return returnData # Something went wrong, return False
else:
fixityCheckEvent = createFixityCheckEvent(True, dstChecksum)
metadataRecord[globalvars.labels.pres_entity.name][globalvars.labels.evt_parent_entity.name].append(fixityCheckEvent)
metadataRecord = updateSerialNumber(metadataRecord, currentSerialNo)
accessionEvent = createAccessionEvent()
metadataRecord[globalvars.labels.pres_entity.name][globalvars.labels.evt_parent_entity.name].append(accessionEvent)
# Insert the record into the DB first, and THEN copy/move the file.
dbRetValue = insertRecordInDB(metadataRecord)
if dbRetValue != uniqueId:
print_error("DB Insert operation not successful. Unique ID returned by DB does not match the one provided by the script. Exiting.")
returnData['status'] = False
returnData['comment'] = "DB Insert operation not successful."
return(returnData)
if globalvars.move == True:
try:
os.remove(dstFileUniquePath)
except os.error as ExceptionFileRemoval:
print_error("Cannot remove file '{}' from source '{}' after the move. Only a copy was made to the destination.".format(srcFileName, srcDirectory))
print_error(ExceptionFileRemoval)
print_error(errorcodes.ERROR_CANNOT_REMOVE_FILE["message"])
exit(errorcodes.ERROR_CANNOT_REMOVE_FILE["code"])
# Increment the file serial number for the next transfer
# and the corresponding DB record
currentSerialNo += 1
numFilesTransferred += 1
except Exception as shutilException: # Catching top-level exception to simplify the code.
print_error(shutilException)
print_error("Cannot complete transfer for '{}', and '{}'".format(src, dst))
print_error(shutilException)
returnData['status'] = False
commentString = "Error: " + shutilException
returnData['comment'] = commentString
return returnData # Something went wrong, return False
returnData['status'] = True
commentString = "Success. {} out of {} files transferred".format(numFilesTransferred, totalNumFiles)
returnData['comment'] = commentString
return returnData # Transfers were successfully completed, return True
if __name__ == "__main__":
main()
|
|
# coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class DependentPhoneNumberList(ListResource):
def __init__(self, version, account_sid, address_sid):
"""
Initialize the DependentPhoneNumberList
:param Version version: Version that contains the resource
:param account_sid: The account_sid
:param address_sid: The sid
:returns: twilio.rest.api.v2010.account.address.dependent_phone_number.DependentPhoneNumberList
:rtype: twilio.rest.api.v2010.account.address.dependent_phone_number.DependentPhoneNumberList
"""
super(DependentPhoneNumberList, self).__init__(version)
# Path Solution
self._solution = {
'account_sid': account_sid,
'address_sid': address_sid,
}
self._uri = '/Accounts/{account_sid}/Addresses/{address_sid}/DependentPhoneNumbers.json'.format(**self._solution)
def stream(self, limit=None, page_size=None):
"""
Streams DependentPhoneNumberInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.address.dependent_phone_number.DependentPhoneNumberInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, limit=None, page_size=None):
"""
Lists DependentPhoneNumberInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.address.dependent_phone_number.DependentPhoneNumberInstance]
"""
return list(self.stream(
limit=limit,
page_size=page_size,
))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of DependentPhoneNumberInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of DependentPhoneNumberInstance
:rtype: twilio.rest.api.v2010.account.address.dependent_phone_number.DependentPhoneNumberPage
"""
params = values.of({
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return DependentPhoneNumberPage(self._version, response, self._solution)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.DependentPhoneNumberList>'
class DependentPhoneNumberPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the DependentPhoneNumberPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param account_sid: The account_sid
:param address_sid: The sid
:returns: twilio.rest.api.v2010.account.address.dependent_phone_number.DependentPhoneNumberPage
:rtype: twilio.rest.api.v2010.account.address.dependent_phone_number.DependentPhoneNumberPage
"""
super(DependentPhoneNumberPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of DependentPhoneNumberInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.address.dependent_phone_number.DependentPhoneNumberInstance
:rtype: twilio.rest.api.v2010.account.address.dependent_phone_number.DependentPhoneNumberInstance
"""
return DependentPhoneNumberInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
address_sid=self._solution['address_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.DependentPhoneNumberPage>'
class DependentPhoneNumberInstance(InstanceResource):
def __init__(self, version, payload, account_sid, address_sid):
"""
Initialize the DependentPhoneNumberInstance
:returns: twilio.rest.api.v2010.account.address.dependent_phone_number.DependentPhoneNumberInstance
:rtype: twilio.rest.api.v2010.account.address.dependent_phone_number.DependentPhoneNumberInstance
"""
super(DependentPhoneNumberInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'friendly_name': payload['friendly_name'],
'phone_number': payload['phone_number'],
'lata': payload['lata'],
'rate_center': payload['rate_center'],
'latitude': deserialize.decimal(payload['latitude']),
'longitude': deserialize.decimal(payload['longitude']),
'region': payload['region'],
'postal_code': payload['postal_code'],
'iso_country': payload['iso_country'],
'address_requirements': payload['address_requirements'],
'capabilities': payload['capabilities'],
}
# Context
self._context = None
self._solution = {
'account_sid': account_sid,
'address_sid': address_sid,
}
@property
def friendly_name(self):
"""
:returns: The friendly_name
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def phone_number(self):
"""
:returns: The phone_number
:rtype: unicode
"""
return self._properties['phone_number']
@property
def lata(self):
"""
:returns: The lata
:rtype: unicode
"""
return self._properties['lata']
@property
def rate_center(self):
"""
:returns: The rate_center
:rtype: unicode
"""
return self._properties['rate_center']
@property
def latitude(self):
"""
:returns: The latitude
:rtype: unicode
"""
return self._properties['latitude']
@property
def longitude(self):
"""
:returns: The longitude
:rtype: unicode
"""
return self._properties['longitude']
@property
def region(self):
"""
:returns: The region
:rtype: unicode
"""
return self._properties['region']
@property
def postal_code(self):
"""
:returns: The postal_code
:rtype: unicode
"""
return self._properties['postal_code']
@property
def iso_country(self):
"""
:returns: The iso_country
:rtype: unicode
"""
return self._properties['iso_country']
@property
def address_requirements(self):
"""
:returns: The address_requirements
:rtype: unicode
"""
return self._properties['address_requirements']
@property
def capabilities(self):
"""
:returns: The capabilities
:rtype: unicode
"""
return self._properties['capabilities']
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.DependentPhoneNumberInstance>'
|
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `diag_plus_low_rank_affine.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.bijectors.diag_plus_low_rank_affine import DiagPlusLowRankAffine
from distrax._src.bijectors.tanh import Tanh
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
class DiagPlusLowRankAffineTest(parameterized.TestCase):
def test_jacobian_is_constant_property(self):
bij = DiagPlusLowRankAffine(
diag=jnp.ones((4,)),
u_matrix=jnp.ones((4, 2)),
v_matrix=jnp.ones((4, 2)),
bias=jnp.zeros((4,)))
self.assertTrue(bij.is_constant_jacobian)
self.assertTrue(bij.is_constant_log_det)
def test_properties(self):
bij = DiagPlusLowRankAffine(
diag=jnp.ones((4,)),
u_matrix=2. * jnp.ones((4, 2)),
v_matrix=3. * jnp.ones((4, 2)),
bias=jnp.zeros((4,)))
np.testing.assert_allclose(bij.diag, np.ones(4), atol=1e-6)
np.testing.assert_allclose(bij.u_matrix, np.full((4, 2), 2.), atol=1e-6)
np.testing.assert_allclose(bij.v_matrix, np.full((4, 2), 3.), atol=1e-6)
np.testing.assert_allclose(
bij.matrix, np.eye(4) + np.full((4, 4), 12.), atol=1e-6)
np.testing.assert_allclose(bij.bias, np.zeros((4,)), atol=1e-6)
@parameterized.named_parameters(
('diag is 0d', {'diag': np.ones(()),
'u_matrix': np.ones((4, 2)),
'v_matrix': np.ones((4, 2)),
'bias': np.zeros((4,))}),
('u_matrix is 1d', {'diag': np.ones((4,)),
'u_matrix': np.ones((4,)),
'v_matrix': np.ones((4, 2)),
'bias': np.zeros((4,))}),
('v_matrix is 1d', {'diag': np.ones((4,)),
'u_matrix': np.ones((4, 2)),
'v_matrix': np.ones((4,)),
'bias': np.zeros((4,))}),
('bias is 0d', {'diag': np.ones((4,)),
'u_matrix': np.ones((4, 2)),
'v_matrix': np.ones((4, 2)),
'bias': np.zeros(())}),
('diag has wrong dim', {'diag': np.ones((3,)),
'u_matrix': np.ones((4, 2)),
'v_matrix': np.ones((4, 2)),
'bias': np.zeros((4,))}),
('u_matrix has wrong dim', {'diag': np.ones((4,)),
'u_matrix': np.ones((3, 2)),
'v_matrix': np.ones((4, 2)),
'bias': np.zeros((4,))}),
('v_matrix has wrong dim', {'diag': np.ones((4,)),
'u_matrix': np.ones((4, 2)),
'v_matrix': np.ones((3, 2)),
'bias': np.zeros((4,))}),
('bias has wrong dim', {'diag': np.ones((4,)),
'u_matrix': np.ones((4, 2)),
'v_matrix': np.ones((4, 2)),
'bias': np.zeros((3,))}),
)
def test_raises_with_invalid_parameters(self, params):
with self.assertRaises(ValueError):
DiagPlusLowRankAffine(**params)
@chex.all_variants
@parameterized.parameters(
((5,), (5,), (5,), (5,), (5,)),
((5,), (), (), (), ()),
((), (5,), (), (), ()),
((), (), (5,), (), ()),
((), (), (), (5,), ()),
((), (), (), (), (5,)),
)
def test_batched_parameters(self, diag_batch_shape, u_matrix_batch_shape,
v_matrix_batch_shape, bias_batch_shape,
input_batch_shape):
prng = hk.PRNGSequence(jax.random.PRNGKey(42))
diag = jax.random.uniform(next(prng), diag_batch_shape + (4,)) + 0.5
u_matrix = jax.random.uniform(next(prng), u_matrix_batch_shape + (4, 1))
v_matrix = jax.random.uniform(next(prng), v_matrix_batch_shape + (4, 1))
bias = jax.random.normal(next(prng), bias_batch_shape + (4,))
bij = DiagPlusLowRankAffine(diag, u_matrix, v_matrix, bias)
x = jax.random.normal(next(prng), input_batch_shape + (4,))
y, logdet_fwd = self.variant(bij.forward_and_log_det)(x)
z, logdet_inv = self.variant(bij.inverse_and_log_det)(x)
output_batch_shape = jnp.broadcast_shapes(
diag_batch_shape, u_matrix_batch_shape, v_matrix_batch_shape,
bias_batch_shape, input_batch_shape)
self.assertEqual(y.shape, output_batch_shape + (4,))
self.assertEqual(z.shape, output_batch_shape + (4,))
self.assertEqual(logdet_fwd.shape, output_batch_shape)
self.assertEqual(logdet_inv.shape, output_batch_shape)
diag = jnp.broadcast_to(diag, output_batch_shape + (4,)).reshape((-1, 4))
u_matrix = jnp.broadcast_to(
u_matrix, output_batch_shape + (4, 1)).reshape((-1, 4, 1))
v_matrix = jnp.broadcast_to(
v_matrix, output_batch_shape + (4, 1)).reshape((-1, 4, 1))
bias = jnp.broadcast_to(bias, output_batch_shape + (4,)).reshape((-1, 4))
x = jnp.broadcast_to(x, output_batch_shape + (4,)).reshape((-1, 4))
y = y.reshape((-1, 4))
z = z.reshape((-1, 4))
logdet_fwd = logdet_fwd.flatten()
logdet_inv = logdet_inv.flatten()
for i in range(np.prod(output_batch_shape)):
bij = DiagPlusLowRankAffine(diag[i], u_matrix[i], v_matrix[i], bias[i])
this_y, this_logdet_fwd = self.variant(bij.forward_and_log_det)(x[i])
this_z, this_logdet_inv = self.variant(bij.inverse_and_log_det)(x[i])
np.testing.assert_allclose(this_y, y[i], atol=1e-6)
np.testing.assert_allclose(this_z, z[i], atol=1e-6)
np.testing.assert_allclose(this_logdet_fwd, logdet_fwd[i], atol=1e-6)
np.testing.assert_allclose(this_logdet_inv, logdet_inv[i], atol=1e-6)
@chex.all_variants
@parameterized.parameters(
{'batch_shape': (), 'param_shape': ()},
{'batch_shape': (2, 3), 'param_shape': (3,)},
)
def test_identity_initialization(self, batch_shape, param_shape):
bij = DiagPlusLowRankAffine(
diag=jnp.ones(param_shape + (4,)),
u_matrix=jnp.zeros(param_shape + (4, 1)),
v_matrix=jnp.zeros(param_shape + (4, 1)),
bias=jnp.zeros(param_shape + (4,)))
prng = hk.PRNGSequence(jax.random.PRNGKey(42))
x = jax.random.normal(next(prng), batch_shape + (4,))
# Forward methods.
y, logdet = self.variant(bij.forward_and_log_det)(x)
np.testing.assert_array_equal(y, x)
np.testing.assert_array_equal(logdet, jnp.zeros(batch_shape))
# Inverse methods.
x_rec, logdet = self.variant(bij.inverse_and_log_det)(y)
np.testing.assert_array_equal(x_rec, y)
np.testing.assert_array_equal(logdet, jnp.zeros(batch_shape))
@chex.all_variants
@parameterized.parameters(
{'batch_shape': (), 'param_shape': ()},
{'batch_shape': (2, 3), 'param_shape': (3,)}
)
def test_inverse_methods(self, batch_shape, param_shape):
prng = hk.PRNGSequence(jax.random.PRNGKey(42))
diag = jax.random.uniform(next(prng), param_shape + (4,)) + 0.5
u_matrix = jax.random.uniform(next(prng), param_shape + (4, 1))
v_matrix = jax.random.uniform(next(prng), param_shape + (4, 1))
bias = jax.random.normal(next(prng), param_shape + (4,))
bij = DiagPlusLowRankAffine(diag, u_matrix, v_matrix, bias)
x = jax.random.normal(next(prng), batch_shape + (4,))
y, logdet_fwd = self.variant(bij.forward_and_log_det)(x)
x_rec, logdet_inv = self.variant(bij.inverse_and_log_det)(y)
np.testing.assert_allclose(x_rec, x, atol=1e-6)
np.testing.assert_allclose(logdet_fwd, -logdet_inv, atol=1e-6)
@chex.all_variants
def test_forward_jacobian_det(self):
prng = hk.PRNGSequence(jax.random.PRNGKey(42))
diag = jax.random.uniform(next(prng), (4,)) + 0.5
u_matrix = jax.random.uniform(next(prng), (4, 1))
v_matrix = jax.random.uniform(next(prng), (4, 1))
bias = jax.random.normal(next(prng), (4,))
bij = DiagPlusLowRankAffine(diag, u_matrix, v_matrix, bias)
batched_x = jax.random.normal(next(prng), (10, 4))
single_x = jax.random.normal(next(prng), (4,))
batched_logdet = self.variant(bij.forward_log_det_jacobian)(batched_x)
jacobian_fn = jax.jacfwd(bij.forward)
logdet_numerical = jnp.linalg.slogdet(jacobian_fn(single_x))[1]
for logdet in batched_logdet:
np.testing.assert_allclose(logdet, logdet_numerical, atol=5e-4)
@chex.all_variants
def test_inverse_jacobian_det(self):
prng = hk.PRNGSequence(jax.random.PRNGKey(42))
diag = jax.random.uniform(next(prng), (4,)) + 0.5
u_matrix = jax.random.uniform(next(prng), (4, 1))
v_matrix = jax.random.uniform(next(prng), (4, 1))
bias = jax.random.normal(next(prng), (4,))
bij = DiagPlusLowRankAffine(diag, u_matrix, v_matrix, bias)
batched_y = jax.random.normal(next(prng), (10, 4))
single_y = jax.random.normal(next(prng), (4,))
batched_logdet = self.variant(bij.inverse_log_det_jacobian)(batched_y)
jacobian_fn = jax.jacfwd(bij.inverse)
logdet_numerical = jnp.linalg.slogdet(jacobian_fn(single_y))[1]
for logdet in batched_logdet:
np.testing.assert_allclose(logdet, logdet_numerical, atol=5e-4)
def test_raises_on_invalid_input_shape(self):
bij = DiagPlusLowRankAffine(
diag=jnp.ones((4,)),
u_matrix=jnp.ones((4, 2)),
v_matrix=jnp.ones((4, 2)),
bias=jnp.zeros((4,)))
for fn in [bij.forward, bij.inverse,
bij.forward_log_det_jacobian, bij.inverse_log_det_jacobian,
bij.forward_and_log_det, bij.inverse_and_log_det]:
with self.subTest(fn=fn):
with self.assertRaises(ValueError):
fn(jnp.array(0))
def test_jittable(self):
@jax.jit
def f(x, b):
return b.forward(x)
bij = DiagPlusLowRankAffine(
diag=jnp.ones((4,)),
u_matrix=jnp.ones((4, 2)),
v_matrix=jnp.ones((4, 2)),
bias=jnp.zeros((4,)))
x = np.zeros((4,))
f(x, bij)
def test_same_as_itself(self):
bij = DiagPlusLowRankAffine(
diag=jnp.ones((4,)),
u_matrix=jnp.ones((4, 2)),
v_matrix=jnp.ones((4, 2)),
bias=jnp.zeros((4,)))
self.assertTrue(bij.same_as(bij))
def test_not_same_as_others(self):
bij = DiagPlusLowRankAffine(
diag=jnp.ones((4,)),
u_matrix=jnp.ones((4, 2)),
v_matrix=jnp.ones((4, 2)),
bias=jnp.zeros((4,)))
other = DiagPlusLowRankAffine(
diag=2. * jnp.ones((4,)),
u_matrix=jnp.ones((4, 2)),
v_matrix=jnp.ones((4, 2)),
bias=jnp.zeros((4,)))
self.assertFalse(bij.same_as(other))
self.assertFalse(bij.same_as(Tanh()))
if __name__ == '__main__':
absltest.main()
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_context import context as oslo_context
from oslo_utils import reflection
from oslo_utils import timeutils
from senlin.common import context as senlin_context
from senlin.common import exception
from senlin.common.i18n import _
from senlin.common import schema
from senlin.common import utils
from senlin.db import api as db_api
from senlin.engine import environment
CHECK_RESULTS = (
CHECK_OK, CHECK_ERROR,
) = (
'OK', 'ERROR',
)
class Policy(object):
'''Base class for policies.'''
PROFILE_TYPE = 'ANY'
KEYS = (
TYPE, VERSION, DESCRIPTION, PROPERTIES,
) = (
'type', 'version', 'description', 'properties',
)
spec_schema = {
TYPE: schema.String(
_('Name of the policy type.'),
required=True,
),
VERSION: schema.String(
_('Version number of the policy type.'),
required=True,
),
DESCRIPTION: schema.String(
_('A text description of policy.'),
default='',
),
PROPERTIES: schema.Map(
_('Properties for the policy.'),
required=True,
)
}
properties_schema = {}
def __new__(cls, name, spec, **kwargs):
"""Create a new policy of the appropriate class.
:param name: The name for the policy.
:param spec: A dictionary containing the spec for the policy.
:param kwargs: Keyword arguments for policy creation.
:returns: An instance of a specific sub-class of Policy.
"""
type_name, version = schema.get_spec_version(spec)
type_str = "-".join([type_name, version])
if cls != Policy:
PolicyClass = cls
else:
PolicyClass = environment.global_env().get_policy(type_str)
return super(Policy, cls).__new__(PolicyClass)
def __init__(self, name, spec, **kwargs):
"""Initialize a policy instance.
:param name: The name for the policy.
:param spec: A dictionary containing the detailed policy spec.
:param kwargs: Keyword arguments for initializing the policy.
:returns: An instance of a specific sub-class of Policy.
"""
type_name, version = schema.get_spec_version(spec)
type_str = "-".join([type_name, version])
self.name = name
self.spec = spec
self.id = kwargs.get('id', None)
self.type = kwargs.get('type', type_str)
self.user = kwargs.get('user')
self.project = kwargs.get('project')
self.domain = kwargs.get('domain')
self.data = kwargs.get('data', {})
self.created_at = kwargs.get('created_at', None)
self.updated_at = kwargs.get('updated_at', None)
self.spec_data = schema.Spec(self.spec_schema, spec)
self.properties = schema.Spec(self.properties_schema,
self.spec.get(self.PROPERTIES, {}))
self.singleton = True
@classmethod
def _from_db_record(cls, record):
'''Construct a policy object from a database record.'''
kwargs = {
'id': record.id,
'type': record.type,
'user': record.user,
'project': record.project,
'domain': record.domain,
'created_at': record.created_at,
'updated_at': record.updated_at,
'data': record.data,
}
return cls(record.name, record.spec, **kwargs)
@classmethod
def load(cls, context, policy_id=None, db_policy=None, project_safe=True):
"""Retrieve and reconstruct a policy object from DB.
:param context: DB context for object retrieval.
:param policy_id: Optional parameter specifying the ID of policy.
:param db_policy: Optional parameter referencing a policy DB object.
:param project_safe: Optional parameter specifying whether only
policies belong to the context.project will be
loaded.
:returns: An object of the proper policy class.
"""
if db_policy is None:
db_policy = db_api.policy_get(context, policy_id,
project_safe=project_safe)
if db_policy is None:
raise exception.PolicyNotFound(policy=policy_id)
return cls._from_db_record(db_policy)
@classmethod
def load_all(cls, context, limit=None, marker=None, sort=None,
filters=None, project_safe=True):
"""Retrieve all policies from database."""
records = db_api.policy_get_all(context, limit=limit, marker=marker,
sort=sort, filters=filters,
project_safe=project_safe)
for record in records:
yield cls._from_db_record(record)
@classmethod
def delete(cls, context, policy_id):
db_api.policy_delete(context, policy_id)
def store(self, context):
'''Store the policy object into database table.'''
timestamp = timeutils.utcnow()
values = {
'name': self.name,
'type': self.type,
'user': self.user,
'project': self.project,
'domain': self.domain,
'spec': self.spec,
'data': self.data,
}
if self.id is not None:
self.updated_at = timestamp
values['updated_at'] = timestamp
db_api.policy_update(context, self.id, values)
else:
self.created_at = timestamp
values['created_at'] = timestamp
policy = db_api.policy_create(context, values)
self.id = policy.id
return self.id
def validate(self):
'''Validate the schema and the data provided.'''
self.spec_data.validate()
self.properties.validate()
@classmethod
def get_schema(cls):
return dict((name, dict(schema))
for name, schema in cls.properties_schema.items())
def _build_policy_data(self, data):
clsname = reflection.get_class_name(self, fully_qualified=False)
version = self.VERSION
result = {
clsname: {
'version': version,
'data': data,
}
}
return result
def _extract_policy_data(self, policy_data):
clsname = reflection.get_class_name(self, fully_qualified=False)
if clsname not in policy_data:
return None
data = policy_data.get(clsname)
if 'version' not in data or data['version'] != self.VERSION:
return None
return data.get('data', None)
def attach(self, cluster):
'''Method to be invoked before policy is attached to a cluster.
:param cluster: the cluster to which the policy is being attached to.
:returns: (True, message) if the operation is successful, or (False,
error) otherwise.
'''
if self.PROFILE_TYPE == ['ANY']:
return True, None
profile = cluster.rt['profile']
if profile.type not in self.PROFILE_TYPE:
error = _('Policy not applicable on profile type: '
'%s') % profile.type
return False, error
return True, None
def detach(self, cluster):
'''Method to be invoked before policy is detached from a cluster.'''
return True, None
def need_check(self, target, action):
if getattr(self, 'TARGET', None) is None:
return True
if (target, action.action) in self.TARGET:
return True
else:
return False
def pre_op(self, cluster_id, action):
'''A method that will be invoked before an action execution.'''
return
def post_op(self, cluster_id, action):
'''A method that will be invoked after an action execution.'''
return
def to_dict(self):
pb_dict = {
'id': self.id,
'name': self.name,
'type': self.type,
'user': self.user,
'project': self.project,
'domain': self.domain,
'spec': self.spec,
'created_at': utils.format_time(self.created_at),
'updated_at': utils.format_time(self.updated_at),
'data': self.data,
}
return pb_dict
def _build_conn_params(self, cluster):
"""Build trust-based connection parameters.
:param cluster: the cluste for which the trust will be checked.
"""
service_creds = senlin_context.get_service_context()
params = {
'username': service_creds.get('username'),
'password': service_creds.get('password'),
'auth_url': service_creds.get('auth_url'),
'user_domain_name': service_creds.get('user_domain_name')
}
cred = db_api.cred_get(oslo_context.get_current(),
cluster.user, cluster.project)
if cred is None:
raise exception.TrustNotFound(trustor=cluster.user)
params['trust_id'] = cred.cred['openstack']['trust']
return params
|
|
'''Works with Tesira models using the Tesira Text Protocol (TTP)'''
# TODO:
# - For Maxtrix Mixer blocks, only cross-point muting is done.
TELNET_TCPPORT = 23
param_Disabled = Parameter({'schema': {'type': 'boolean'}})
param_IPAddress = Parameter({'title': 'IP address', 'schema': {'type': 'string'}})
# TODO REMOVE DEFAULT_DEVICE = 1
param_InputBlocks = Parameter({'title': 'Input blocks', 'schema': {'type': 'array', 'items': {'type': 'object', 'properties': {
'instance': {'type': 'string', 'desc': 'Instance ID or tag', 'order': 1},
'inputNames': {'type': 'string', 'desc': 'Comma separated list of simple labels starting at input #1; use "ignore" to ignore an input', 'order': 2}}}}})
param_LevelBlocks = Parameter({'title': 'Level blocks', 'schema': {'type': 'array', 'items': {'type': 'object', 'properties': {
'instance': {'type': 'string', 'desc': 'Instance ID or tag', 'order': 1},
'names': {'type': 'string', 'desc': 'Comma separated list of simple labels starting at #1; use "ignore" to ignore', 'order': 2}}}}})
param_MuteBlocks = Parameter({'title': 'Mute blocks', 'schema': {'type': 'array', 'items': {'type': 'object', 'properties': {
'instance': {'type': 'string', 'desc': 'Instance ID or tag', 'order': 1},
'names': {'type': 'string', 'desc': 'Comma separated list of simple labels starting at #1; use "ignore" to ignore', 'order': 2}}}}})
param_SourceSelectBlocks = Parameter({'title': 'Source-Select blocks', 'schema': {'type': 'array', 'items': {'type': 'object', 'properties': {
'instance': {'type': 'string', 'desc': 'Instance ID or tag', 'order': 3},
'sourceCount': {'type': 'integer', 'desc': 'The number of sources being routed', 'order': 4}}}}})
param_MeterBlocks = Parameter({'title': 'Meter blocks', 'schema': {'type': 'array', 'items': {'type': 'object', 'properties': {
'type': {'type': 'string', 'enum': ['Peak', 'RMS', 'Presence'], 'order': 1},
'instance': {'type': 'string', 'desc': 'Instance ID or tag', 'order': 2},
'names': {'type': 'string', 'desc': 'Comma separated list of simple labels starting at #1; use "ignore" to ignore', 'order': 3}}}}})
param_MatrixMixerBlocks = Parameter({'title': 'Matrix Mixer blocks', 'schema': {'type': 'array', 'items': {'type': 'object', 'properties': {
'label': {'type': 'string', 'order': 1},
'instance': {'type': 'string', 'desc': 'Instance ID or tag', 'order': 4},
'inputNames': {'type': 'string', 'desc': 'Comma separated list of simple labels', 'order': 5},
'outputNames': {'type': 'string', 'desc': 'Comma separated list of simple labels', 'order': 6}}}}})
param_StandardMixerBlocks = Parameter({'title': 'Standard Mixer blocks', 'schema': {'type': 'array', 'items': {'type': 'object', 'properties': {
'label': {'type': 'string', 'order': 1},
'instance': {'type': 'string', 'desc': 'Instance ID or tag', 'order': 4},
'inputNames': {'type': 'string', 'desc': 'Comma separated list of simple labels', 'order': 5},
'outputNames': {'type': 'string', 'desc': 'Comma separated list of simple labels', 'order': 6},
'ignoreCrossPoints': {'type': 'boolean', 'desc': 'Ignore cross-point states to reduce number of controls', 'order': 7}
}}}})
# <main ---
def main():
if param_Disabled:
console.warn('Disabled! nothing to do')
return
if is_blank(param_IPAddress):
console.warn('No IP address set; nothing to do')
return
dest = '%s:%s' % (param_IPAddress, TELNET_TCPPORT)
console.info('Will connect to [%s]' % dest)
tcp.setDest(dest)
# --- main>
# <protocol ---
def parseResp(rawResp, onSuccess):
# e.g: [+OK "value":-64.697762]
resp = rawResp.strip()
if resp == '+OK':
onSuccess(None)
elif '-ERR' in resp:
console.warn('Got bad resp: %s' % resp)
return
else:
# any successful resp has its callback called
valuePos = resp.find('"value":')
if valuePos > 0: onSuccess(resp[valuePos+8:])
else: console.warn('no value in resp; was [%s]' % resp)
INPUTGAIN_SCHEMA = {'type': 'integer', 'desc': '0, 6, 12, 18, 24, 30, 36, 42, 48, 54, 60, 66'}
@after_main
def bindInputs():
for info in param_InputBlocks or []:
for inputNum, inputName in enumerate(info['inputNames'].split(',')):
if inputName == 'ignore':
continue
initNumberValue('Input', 'gain', inputName, info['instance'], inputNum+1, isInteger=True)
@after_main
def bindLevels():
for info in param_LevelBlocks or []:
levelInstance = info['instance']
for num, name in enumerate(info['names'].split(',')):
initNumberValue('Level', 'level', name, levelInstance, num+1)
initBoolValue('Level Muting', 'mute', name, levelInstance, num+1)
@after_main
def bindMutes():
for info in param_MuteBlocks or []:
instance = info['instance']
names = (info['names'] or '').strip()
if len(names) > 0:
for num, name in enumerate([x.strip() for x in names.split(',')]):
initBoolValue('Mute', 'mute', name, instance, num+1)
else:
initBoolValue('Mute', 'mute', 'All', instance, 1)
@after_main
def bindMatrixMixers():
for info in param_MatrixMixerBlocks or []:
instance = info['instance']
label = info['label']
for inputNum, inputName in enumerate(info['inputNames'].split(',')):
inputName = inputName.strip()
for outputNum, outputName in enumerate(info['outputNames'].split(',')):
outputName = outputName.strip()
initBoolValue('Crosspoint State', 'crosspointLevelState',
'%s - %s - %s' % (label, inputName.strip(), outputName),
instance, inputNum+1, index2=outputNum+1)
initNumberValue('Crosspoint Level', 'crosspointLevel',
inputName,
instance, inputNum+1, index2=outputNum+1,
group='"%s %s"' % (label, outputName))
@after_main
def bindStandardMixers():
for info in param_StandardMixerBlocks or []:
instance = info['instance']
if not info.get('ignoreCrossPoints'): # skip cross-points
for inputNum, inputName in enumerate(info['inputNames'].split(',')):
inputName = inputName.strip()
for outputNum, outputName in enumerate(info['outputNames'].split(',')):
outputName = outputName.strip()
initBoolValue('Crosspoint State', 'crosspoint',
inputName,
instance, inputNum+1, index2=outputNum+1,
group='"%s %s"' % (info['label'], outputName))
# output levels
for outputNum, outputName in enumerate(info['outputNames'].split(',')):
outputName = outputName.strip()
initNumberValue('Output Level', 'outputLevel',
outputName,
instance, outputNum+1,
group='"%s %s"' % (info['label'], outputName))
initBoolValue('Output Mute', 'outputMute',
outputName,
instance, outputNum+1,
group='"%s %s"' % (info['label'], outputName))
# TODO: also expose input levels
def initBoolValue(controlType, cmd, label, inst, index1, index2=None, group=None):
if index2 == None:
name = '%s %s %s' % (inst, index1, controlType)
else:
# name collision will occur if dealing with more than 10 in a list
# so add in a forced delimeter 'x', e.g. '1 11' is same as '11 1' but not '1x11'
delimeter = ' ' if index1 < 10 and index2 < 10 else ' x '
name = '%s %s%s%s %s' % (inst, index1, delimeter, index2, controlType)
title = '"%s" (#%s)' % (label, index1)
if group == None:
group = inst
schema = {'type': 'boolean'}
signal = Event(name, {'title': title, 'group': group, 'order': next_seq(), 'schema': schema})
# some cmds take in index1 and index2
index = index1 if index2 == None else '%s %s' % (index1, index2)
# e.g. Mixer1 get crosspointLevelState 1 1
getter = Action('Get ' + name, lambda arg: tcp_request('%s get %s %s\n' % (inst, cmd, index),
lambda resp: parseResp(resp, lambda arg: signal.emit(arg == '1' or arg == 'true'))),
{'title': 'Get', 'group': group, 'order': next_seq()})
setter = Action(name, lambda arg: tcp_request('%s set %s %s %s\n' % (inst, cmd, index, '1' if arg == True else '0'),
lambda resp: parseResp(resp,
lambda result: signal.emit(arg))), # NOTE: uses the original 'arg' here
{'title': title, 'group': group, 'order': next_seq(), 'schema': schema})
Timer(lambda: getter.call(), random(120,150), random(5,10))
# and come conveniece derivatives
toggle = Action(name + " Toggle", lambda arg: setter.call(not signal.getArg()), {'title': 'Toggle', 'group': group, 'order': next_seq()})
inverted = Event(name + " Inverted", {'title': '(inverted)', 'group': group, 'order': next_seq(), 'schema': schema})
signal.addEmitHandler(lambda arg: inverted.emit(not arg))
def initNumberValue(controlType, cmd, label, inst, index1, isInteger=False, index2=None, group=None):
if index2 == None:
name = '%s %s %s' % (inst, index1, controlType)
else:
# name collision will occur if dealing with more than 10 in a list
# so add in a forced delimeter 'x', e.g. '1 11' is same as '11 1' but not '1x11'
delimeter = ' ' if index1 < 10 and index2 < 10 else ' x '
name = '%s %s%s%s %s' % (inst, index1, delimeter, index2, controlType)
title = '%s ("%s")' % (name, label)
if group == None:
group = '%s %s' % (controlType, inst)
schema = {'type': 'integer' if isInteger else 'number'}
signal = Event(name, {'title': title, 'group': group, 'order': next_seq(), 'schema': schema})
# some cmds take in index1 and index2
index = index1 if index2 == None else '%s %s' % (index1, index2)
getter = Action('Get ' + name, lambda arg: tcp_request('%s get %s %s\n' % (inst, cmd, index),
lambda resp: parseResp(resp, lambda arg: signal.emit(int(float(arg)) if isInteger else float(arg)))),
{'title': 'Get', 'group': group, 'order': next_seq()})
setter = Action(name, lambda arg: tcp_request('%s set %s %s %s\n' % (inst, cmd, index, arg),
lambda resp: parseResp(resp,
lambda result: signal.emit(arg))), # NOTE: uses the original 'arg' here
{'title': title, 'group': group, 'order': next_seq(), 'schema': schema})
Timer(lambda: getter.call(), random(120,150), random(5,10))
@after_main
def bindSourceSelects():
for info in param_SourceSelectBlocks or []:
initSourceSelect(info['instance'], info['sourceCount'])
def initSourceSelect(inst, sourceCount):
name = inst
title = inst
group = inst
signal = Event(name, {'title': title, 'group': group, 'order': next_seq(), 'schema': {'type': 'integer'}})
getter = Action('Get ' + name, lambda arg: tcp_request('%s get sourceSelection\n' % (inst),
lambda resp: parseResp(resp, lambda result: signal.emit(int(result)))),
{'title': 'Get', 'group': group, 'order': next_seq()})
# safe to use title here remembering within brackets is extraneous
setter = Action(name, lambda arg: tcp_request('%s set sourceSelection %s\n' % (inst, int(arg)),
lambda resp: parseResp(resp,
lambda result: signal.emit(int(arg)))), # NOTE: uses the original 'arg' here
{'title': title, 'group': group, 'schema': {'type': 'integer'}})
for i in range(1, sourceCount+1):
bindSourceItem(inst, i, setter, signal)
Timer(lambda: getter.call(), random(120,150), random(5,10))
def bindSourceItem(inst, i, setter, signal):
name = '%s %s Selected' % (inst, i)
title = 'Source %s' % i
group = inst
selectedSignal = Event(name, {'title': title, 'group': inst, 'order': next_seq(), 'schema': {'type': 'boolean'}})
signal.addEmitHandler(lambda arg: selectedSignal.emitIfDifferent(arg == i))
def handler(arg):
if arg == None: # toggle if no arg is given
setter.call(0 if selectedSignal.getArg() else i)
else: # set the state
setter.call(i if arg == True else 0)
togglerOrSetter = Action(name, handler, {'title': title, 'group': inst, 'order': next_seq()})
@after_main
def bindMeterBlocks():
for info in param_MeterBlocks or []:
meterType = info['type']
meterInstance = info['instance']
for num, name in enumerate(info['names'].split(',')):
initMeters(meterType, name, meterInstance, num+1)
def initMeters(meterType, label, inst, index):
name = '%s %s' % (inst, index)
title = '"%s"' % label
if meterType == 'Presence':
cmd = 'present'
schema = {'type': 'boolean'}
else:
cmd = 'level'
schema = {'type': 'number'}
group = inst
signal = Event(name, {'title': title, 'group': group, 'order': next_seq(), 'schema': schema})
def handleResult(result):
if meterType == 'Presence':
signal.emitIfDifferent(result=='true')
else:
signal.emit(float(result))
def poll():
tcp_request('%s get %s %s\n' % (inst, cmd, index),
lambda resp: parseResp(resp, handleResult))
# start meters much later to avoid being overwhelmed with feedback
Timer(poll, 0.2, random(30,45))
# only requests *if ready*
def tcp_request(req, onResp):
if receivedTelnetOptions:
tcp.request(req, onResp)
# --- protocol>
# <tcp ---
# taken from Tesira help file
receivedTelnetOptions = False
def tcp_connected():
console.info('tcp_connected')
global receivedTelnetOptions
receivedTelnetOptions = False
tcp.clearQueue()
def tcp_received(data):
log(3, 'tcp_recv [%s] -- [%s]' % (data, data.encode('hex')))
for c in data:
handleByte(c)
telnetBuffer = list()
recvBuffer = list()
def handleByte(c):
if len(telnetBuffer) > 0:
# goes into a TELNET frame
telnetBuffer.append(c)
if len(telnetBuffer) == 3:
frame = ''.join(telnetBuffer)
del telnetBuffer[:]
telnet_frame_received(frame)
elif c == '\xff':
# start of TELNET FRAME
telnetBuffer.append(c)
elif c in ['\r', '\n']:
# end of a NORMAL msg
msg = ''.join(recvBuffer).strip()
del recvBuffer[:]
if len(msg) > 0:
queue.handle(msg)
else:
# put all other characters into NORMAL msg
recvBuffer.append(c)
if len(recvBuffer) > 1024:
console.warn('buffer too big; dropped; was "%s"' % ''.join(recvBuffer))
del recvBuffer[:]
def telnet_frame_received(data):
log(2, 'telnet_recv [%s]' % (data.encode('hex')))
# reject all telnet options
if data[0] == '\xFF':
if data[1] == '\xFB': # WILL
tcp.send('\xFF\xFE%s' % data[2]) # send DON'T
elif data[1] == '\xFD': # DO
tcp.send('\xFF\xFC%s' % data[2]) # send WON'T
def msg_received(data):
log(2, 'msg_recv [%s]' % (data.strip()))
lastReceive[0] = system_clock()
if 'Welcome to the Tesira Text Protocol Server...' in data:
global receivedTelnetOptions
receivedTelnetOptions = True
def tcp_sent(data):
log(3, 'tcp_sent [%s] -- [%s]' % (data, data.encode('hex')))
def tcp_disconnected():
console.warn('tcp_disconnected')
global receivedTelnetOptions
receivedTelnetOptions = False
def tcp_timeout():
console.warn('tcp_timeout; dropping (if connected)')
tcp.drop()
def protocolTimeout():
console.log('protocol timeout; flushing buffer; dropping connection (if connected)')
queue.clearQueue()
del recvBuffer[:]
telnetBuffer[:]
global receivedTelnetOptions
receivedTelnetOptions = False
tcp.drop()
tcp = TCP(connected=tcp_connected, received=tcp_received, sent=tcp_sent, disconnected=tcp_disconnected, timeout=tcp_timeout,
receiveDelimiters='', sendDelimiters='')
queue = request_queue(timeout=protocolTimeout, received=msg_received)
# --- tcp>
# <logging ---
local_event_LogLevel = LocalEvent({'group': 'Debug', 'order': 10000+next_seq(), 'schema': {'type': 'integer'}})
def warn(level, msg):
if local_event_LogLevel.getArg() >= level:
console.warn((' ' * level) + msg)
def log(level, msg):
if local_event_LogLevel.getArg() >= level:
console.log((' ' * level) + msg)
# --->
# <status and error reporting ---
local_event_LastCommsErrorTimestamp = LocalEvent({'title': 'Last Comms Error Timestamp', 'group': 'Status', 'order': 99999+next_seq(), 'schema': {'type': 'string'}})
# for comms drop-out
lastReceive = [0]
# roughly, the last contact
local_event_LastContactDetect = LocalEvent({'group': 'Status', 'order': 99999+next_seq(), 'title': 'Last contact detect', 'schema': {'type': 'string'}})
# node status
local_event_Status = LocalEvent({'group': 'Status', 'order': 99999+next_seq(), 'schema': {'type': 'object', 'properties': {
'level': {'type': 'integer', 'order': 1},
'message': {'type': 'string', 'order': 2}}}})
def statusCheck():
diff = (system_clock() - lastReceive[0])/1000.0 # (in secs)
now = date_now()
if diff > status_check_interval+15:
previousContactValue = local_event_LastContactDetect.getArg()
if previousContactValue == None:
message = 'Always been missing.'
else:
previousContact = date_parse(previousContactValue)
roughDiff = (now.getMillis() - previousContact.getMillis())/1000/60
if roughDiff < 60:
message = 'Missing for approx. %s mins' % roughDiff
elif roughDiff < (60*24):
message = 'Missing since %s' % previousContact.toString('h:mm:ss a')
else:
message = 'Missing since %s' % previousContact.toString('h:mm:ss a, E d-MMM')
local_event_Status.emit({'level': 2, 'message': message})
else:
# update contact info
local_event_LastContactDetect.emit(str(now))
# TODO: check internal device status if possible
local_event_LastContactDetect.emit(str(now))
local_event_Status.emit({'level': 0, 'message': 'OK'})
status_check_interval = 75
status_timer = Timer(statusCheck, status_check_interval)
# --->
# <convenience methods ---
def getOrDefault(value, default):
return default if value == None or is_blank(value) else value
from java.util import Random
_rand = Random()
# returns a random number between an interval
def random(fromm, to):
return fromm + _rand.nextDouble()*(to - fromm)
# --->
|
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from . import ivi
# Parameter Values
CurrentLimitBehavior = set(['regulate', 'trip'])
RangeType = set(['current', 'voltage'])
OutputState = set(['constant_voltage', 'constant_current', 'over_voltage',
'over_current', 'unregulated'])
MeasurementType = set(['current', 'voltage'])
def get_range(range_list, offset, val):
l = list()
for i in range_list:
l.append((i, abs(range_list[i][offset])))
l.sort(key=lambda x: x[1], reverse=True)
k = None
for i in range(len(l)):
if l[i][1] >= val:
k = l[i][0]
return k
class Base(ivi.IviContainer):
"Base IVI methods for all DC power supplies"
def __init__(self, *args, **kwargs):
# needed for _init_outputs calls from other __init__ methods
self._output_count = 1
super(Base, self).__init__(*args, **kwargs)
cls = 'IviDCPwr'
grp = 'Base'
ivi.add_group_capability(self, cls+grp)
self._output_current_limit = list()
self._output_current_limit_behavior = list()
self._output_enabled = list()
self._output_ovp_enabled = list()
self._output_ovp_limit = list()
self._output_voltage_level = list()
self._output_name = list()
self._output_count = 1
self._output_spec = [
{
'range': {
'P0V': (0, 0)
},
'ovp_max': 0,
'voltage_max': 0,
'current_max': 0
}
]
self._add_property('outputs[].current_limit',
self._get_output_current_limit,
self._set_output_current_limit,
None,
ivi.Doc("""
Specifies the output current limit. The units are Amps.
The value of the Current Limit Behavior attribute determines the behavior
of the power supply when the output current is equal to or greater than
the value of this attribute.
""", cls, grp, '4.2.1'))
self._add_property('outputs[].current_limit_behavior',
self._get_output_current_limit_behavior,
self._set_output_current_limit_behavior,
None,
ivi.Doc("""
Specifies the behavior of the power supply when the output current is
equal to or greater than the value of the Current Limit attribute.
Values
* 'trip' - The power supply disables the output when the output current is
equal to or greater than the value of the Current Limit attribute.
* 'regulate' - The power supply restricts the output voltage such that the
output current is not greater than the value of the Current Limit
attribute.
""", cls, grp, '4.2.2'))
self._add_property('outputs[].enabled',
self._get_output_enabled,
self._set_output_enabled,
None,
ivi.Doc("""
If true, the signal the power supply produces appears at the output
connector. If false, the signal the power supply produces does not appear
at the output connector.
""", cls, grp, '4.2.3'))
self._add_property('outputs[].ovp_enabled',
self._get_output_ovp_enabled,
self._set_output_ovp_enabled,
None,
ivi.Doc("""
Specifies whether the power supply provides over-voltage protection. If
this attribute is set to True, the power supply disables the output when
the output voltage is greater than or equal to the value of the OVP
Limit attribute.
""", cls, grp, '4.2.4'))
self._add_property('outputs[].ovp_limit',
self._get_output_ovp_limit,
self._set_output_ovp_limit,
None,
ivi.Doc("""
Specifies the voltage the power supply allows. The units are Volts.
If the OVP Enabled attribute is set to True, the power supply disables the
output when the output voltage is greater than or equal to the value of
this attribute.
If the OVP Enabled is set to False, this attribute does not affect the
behavior of the instrument.
""", cls, grp, '4.2.5'))
self._add_property('outputs[].voltage_level',
self._get_output_voltage_level,
self._set_output_voltage_level,
None,
ivi.Doc("""
Specifies the voltage level the DC power supply attempts to generate. The
units are Volts.
""", cls, grp, '4.2.6'))
self._add_property('outputs[].name',
self._get_output_name,
None,
None,
ivi.Doc("""
This attribute returns the repeated capability identifier defined by
specific driver for the output channel that corresponds to the index that
the user specifies. If the driver defines a qualified Output Channel name,
this property returns the qualified name.
If the value that the user passes for the Index parameter is less than
zero or greater than the value of the Output Channel Count, the attribute
raises a SelectorRangeException.
""", cls, grp, '4.2.9'))
self._add_method('outputs[].configure_current_limit',
self._output_configure_current_limit,
ivi.Doc("""
This function configures the current limit. It specifies the output
current limit value and the behavior of the power supply when the output
current is greater than or equal to that value.
See the definition of the Current Limit Behavior attribute for defined
values for the behavior parameter.
""", cls, grp, '4.3.1'))
self._add_method('outputs[].configure_range',
self._output_configure_range,
ivi.Doc("""
Configures the power supply's output range on an output. One parameter
specifies whether to configure the voltage or current range, and the other
parameter is the value to which to set the range.
Setting a voltage range can invalidate a previously configured current
range. Setting a current range can invalidate a previously configured
voltage range.
""", cls, grp, '4.3.3'))
self._add_method('outputs[].configure_ovp',
self._output_configure_ovp,
ivi.Doc("""
Configures the over-voltage protection. It specifies the over-voltage
limit and the behavior of the power supply when the output voltage is
greater than or equal to that value.
When the Enabled parameter is False, the Limit parameter does not affect
the instrument's behavior, and the driver does not set the OVP Limit
attribute.
""", cls, grp, '4.3.4'))
self._add_method('outputs[].query_current_limit_max',
self._output_query_current_limit_max,
ivi.Doc("""
This function returns the maximum programmable current limit that the
power supply accepts for a particular voltage level on an output.
""", cls, grp, '4.3.7'))
self._add_method('outputs[].query_voltage_level_max',
self._output_query_voltage_level_max,
ivi.Doc("""
This function returns the maximum programmable voltage level that the
power supply accepts for a particular current limit on an output.
""", cls, grp, '4.3.8'))
self._add_method('outputs[].query_output_state',
self._output_query_output_state,
ivi.Doc("""
This function returns whether the power supply is in a particular output
state.
A constant voltage condition occurs when the output voltage is equal to
the value of the Voltage Level attribute and the current is less than or
equal to the value of the Current Limit attribute.
A constant current condition occurs when the output current is equal to
the value of the Current Limit attribute and the Current Limit Behavior
attribute is set to the Current Regulate defined value.
An unregulated condition occurs when the output voltage is less than the
value of the Voltage Level attribute and the current is less than the
value of the Current Limit attribute.
An over-voltage condition occurs when the output voltage is equal to or
greater than the value of the OVP Limit attribute and the OVP Enabled
attribute is set to True.
An over-current condition occurs when the output current is equal to or
greater than the value of the Current Limit attribute and the Current
Limit Behavior attribute is set to the Current Trip defined value.
When either an over-voltage condition or an over-current condition
occurs, the power supply's output protection disables the output. If the
power supply is in an over-voltage or over-current state, it does not
produce power until the output protection is reset. The Reset Output
Protection function resets the output protection. Once the output
protection is reset, the power supply resumes generating a power signal.
Values for output_state:
* 'constant_voltage'
* 'constant_current'
* 'over_voltage'
* 'over_current'
* 'unregulated'
""", cls, grp, '4.3.9'))
self._add_method('outputs[].reset_output_protection',
self._output_reset_output_protection,
ivi.Doc("""
This function resets the power supply output protection after an
over-voltage or over-current condition occurs.
An over-voltage condition occurs when the output voltage is equal to or
greater than the value of the OVP Limit attribute and the OVP Enabled
attribute is set to True.
An over-current condition occurs when the output current is equal to or
greater than the value of the Current Limit attribute and the Current
Limit Behavior attribute is set to Current Trip.
When either an over-voltage condition or an over-current condition
occurs, the output protection of the power supply disables the output.
Once the output protection is reset, the power supply resumes generating
a power signal.
Use the Query Output State function to determine if the power supply is in
an over-voltage or over-current state.
""", cls, grp, '4.3.10'))
self._init_outputs()
def _init_outputs(self):
try:
super(Base, self)._init_outputs()
except AttributeError:
pass
self._output_name = list()
self._output_current_limit = list()
self._output_current_limit_behavior = list()
self._output_enabled = list()
self._output_ovp_enabled = list()
self._output_ovp_limit = list()
self._output_voltage_level = list()
for i in range(self._output_count):
self._output_name.append("output%d" % (i+1))
self._output_current_limit.append(0)
self._output_current_limit_behavior.append('regulate')
self._output_enabled.append(False)
self._output_ovp_enabled.append(True)
self._output_ovp_limit.append(0)
self._output_voltage_level.append(0)
self.outputs._set_list(self._output_name)
def _get_output_current_limit(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_current_limit[index]
def _set_output_current_limit(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if value < 0 or value > self._output_spec[index]['current_max']:
raise ivi.OutOfRangeException()
self._output_current_limit[index] = value
def _get_output_current_limit_behavior(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_current_limit_behavior[index]
def _set_output_current_limit_behavior(self, index, value):
index = ivi.get_index(self._output_name, index)
if value not in CurrentLimitBehavior:
raise ivi.ValueNotSupportedException()
self._output_current_limit_behavior[index] = value
def _get_output_enabled(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_enabled[index]
def _set_output_enabled(self, index, value):
index = ivi.get_index(self._output_name, index)
value = bool(value)
self._output_enabled[index] = value
def _get_output_ovp_enabled(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_ovp_enabled[index]
def _set_output_ovp_enabled(self, index, value):
index = ivi.get_index(self._output_name, index)
value = bool(value)
self._output_ovp_enabled[index] = value
def _get_output_ovp_limit(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_ovp_limit[index]
def _set_output_ovp_limit(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if value < 0 or value > self._output_spec[index]['ovp_max']:
raise ivi.OutOfRangeException()
self._output_ovp_limit[index] = value
def _get_output_voltage_level(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_voltage_level[index]
def _set_output_voltage_level(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if value < 0 or value > self._output_spec[index]['voltage_max']:
raise ivi.OutOfRangeException()
self._output_voltage_level[index] = value
def _get_output_name(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_name[index]
def _output_configure_current_limit(self, index, behavior, limit):
self._set_output_current_limit_behavior(index, behavior)
self._set_output_current_limit(index, limit)
def _output_configure_range(self, index, range_type, range_val):
index = ivi.get_index(self._output_name, index)
if range_type not in RangeType:
raise ivi.ValueNotSupportedException()
if range_type == 'voltage':
t = 0
elif range_type == 'current':
t = 1
k = dcpwr.get_range(self._output_range[index], t, range_val)
if k < 0:
raise ivi.OutOfRangeException()
self._output_spec[index]['voltage_max'] = self._output_range[index][k][0]
self._output_spec[index]['current_max'] = self._output_range[index][k][1]
pass
def _output_configure_ovp(self, index, enabled, limit):
if enabled:
self._set_output_ovp_limit(index, limit)
self._set_output_ovp_enabled(index, enabled)
def _output_query_current_limit_max(self, index, voltage_level):
index = ivi.get_index(self._output_name, index)
if voltage_level < 0 or voltage_level > self._output_spec[index]['voltage_max']:
raise ivi.OutOfRangeException()
return self._output_spec[index]['current_max']
def _output_query_voltage_level_max(self, index, current_limit):
index = ivi.get_index(self._output_name, index)
if current_limit < 0 or current_limit > self._output_spec[index]['current_limit_max']:
raise ivi.OutOfRangeException()
return self._output_spec[index]['voltage_max']
def _output_query_output_state(self, index, state):
index = ivi.get_index(self._output_name, index)
if state not in OutputState:
raise ivi.ValueNotSupportedException()
return False
def _output_reset_output_protection(self, index):
pass
class Trigger(ivi.IviContainer):
"Extension IVI methods for power supplies supporting trigger based output changes"
def __init__(self, *args, **kwargs):
super(Trigger, self).__init__(*args, **kwargs)
cls = 'IviDCPwr'
grp = 'Trigger'
ivi.add_group_capability(self, cls+grp)
self._output_trigger_source = list()
self._output_triggered_current_limit = list()
self._output_triggered_voltage_level = list()
self._add_property('outputs[].trigger_source',
self._get_output_trigger_source,
self._set_output_trigger_source,
None,
ivi.Doc("""
Specifies the trigger source. After an Initiate call, the power supply
waits for a trigger event from the source specified with this attribute.
After a trigger event occurs, the power supply changes the voltage level
to the value of the Triggered Voltage Level attribute and the current
limit to the value of the Triggered Current Limit attribute.
""", cls, grp, '5.2.1'))
self._add_property('outputs[].triggered_current_limit',
self._get_output_triggered_current_limit,
self._set_output_triggered_current_limit,
None,
ivi.Doc("""
Specifies the value to which the power supply sets the current limit after
a trigger event occurs. The units are Amps.
After an Initiate call, the power supply waits for a trigger event from
the source specified with the Trigger Source attribute. After a trigger
event occurs, the power supply sets the current limit to the value of this
attribute.
After a trigger occurs, the value of the Current Limit attribute reflects
the new value to which the current limit has been set.
""", cls, grp, '5.2.2'))
self._add_property('outputs[].triggered_voltage_level',
self._get_output_triggered_voltage_level,
self._set_output_triggered_voltage_level,
None,
ivi.Doc("""
Specifies the value to which the power supply sets the voltage level
after a trigger event occurs. The units are Volts.
After an Initiate call, the power supply waits for a trigger event from
the source specified with the Trigger Source attribute. After a trigger
event occurs, the power supply sets the voltage level to the value of this
attribute.
After a trigger occurs, the value of the Voltage Level attribute reflects
the new value to which the voltage level has been set.
""", cls, grp, '5.2.3'))
self._add_method('trigger.abort',
self._trigger_abort,
ivi.Doc("""
If the power supply is currently waiting for a trigger to change the
output signal, this function returns the power supply to the ignore
triggers state.
If the power supply is not waiting for a trigger, this function does
nothing and returns Success.
""", cls, grp, '5.3.1'))
self._add_method('trigger.initiate',
self._trigger_initiate,
ivi.Doc("""
If the power supply is not currently waiting for a trigger, this function
causes the power supply to wait for a trigger.
If the power supply is already waiting for a trigger, this function does
nothing and returns Success.
""", cls, grp, '5.3.5'))
def _init_outputs(self):
try:
super(Trigger, self)._init_outputs()
except AttributeError:
pass
self._output_trigger_source = list()
self._output_triggered_current_limit = list()
self._output_triggered_voltage_level = list()
for i in range(self._output_count):
self._output_trigger_source.append('')
self._output_triggered_current_limit.append(0)
self._output_triggered_voltage_level.append(0)
def _get_output_trigger_source(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_trigger_source[index]
def _set_output_trigger_source(self, index, value):
index = ivi.get_index(self._output_name, index)
value = str(value)
self._output_trigger_source[index] = value
def _get_output_triggered_current_limit(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_triggered_current_limit[index]
def _set_output_triggered_current_limit(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
self._output_triggered_current_limit[index] = value
def _get_output_triggered_voltage_level(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_triggered_voltage_level[index]
def _set_output_triggered_voltage_level(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
self._output_triggered_voltage_level[index] = value
def _trigger_abort(self):
pass
def _trigger_initiate(self):
pass
class SoftwareTrigger(ivi.IviContainer):
"Extension IVI methods for power supplies supporting software triggering"
def __init__(self, *args, **kwargs):
super(SoftwareTrigger, self).__init__(*args, **kwargs)
cls = 'IviDCPwr'
grp = 'SoftwareTrigger'
ivi.add_group_capability(self, cls+grp)
self._add_method('send_software_trigger',
self._send_software_trigger,
ivi.Doc("""
This function sends a software-generated trigger to the instrument. It is
only applicable for instruments using interfaces or protocols which
support an explicit trigger function. For example, with GPIB this function
could send a group execute trigger to the instrument. Other
implementations might send a ``*TRG`` command.
Since instruments interpret a software-generated trigger in a wide variety
of ways, the precise response of the instrument to this trigger is not
defined. Note that SCPI details a possible implementation.
This function should not use resources which are potentially shared by
other devices (for example, the VXI trigger lines). Use of such shared
resources may have undesirable effects on other devices.
This function should not check the instrument status. Typically, the
end-user calls this function only in a sequence of calls to other
low-level driver functions. The sequence performs one operation. The
end-user uses the low-level functions to optimize one or more aspects of
interaction with the instrument. To check the instrument status, call the
appropriate error query function at the conclusion of the sequence.
The trigger source attribute must accept Software Trigger as a valid
setting for this function to work. If the trigger source is not set to
Software Trigger, this function does nothing and returns the error Trigger
Not Software.
""", cls, grp, '6.2.1', 'send_software_trigger'))
def _send_software_trigger(self):
pass
class Measurement(ivi.IviContainer):
"Extension IVI methods for power supplies supporting measurement of the output signal"
def __init__(self, *args, **kwargs):
super(Measurement, self).__init__(*args, **kwargs)
cls = 'IviDCPwr'
grp = 'Measurement'
ivi.add_group_capability(self, cls+grp)
self._add_method('outputs[].measure',
self._output_measure,
ivi.Doc("""
Takes a measurement on the output signal and returns the measured value.
Values for measurement_type:
* 'voltage'
* 'current'
""", cls, grp, '7.2.1'))
def _output_measure(self, index, type):
index = ivi.get_index(self._output_name, index)
if type not in MeasurementType:
raise ivi.ValueNotSupportedException()
return 0
|
|
"""Admin API."""
from django import http
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext as _
from django_filters import rest_framework as dj_filters
from drf_spectacular.utils import extend_schema, extend_schema_view
from rest_framework import filters, renderers, status, viewsets
from rest_framework.decorators import action
from rest_framework.exceptions import ParseError
from rest_framework.permissions import DjangoModelPermissions, IsAuthenticated
from rest_framework.response import Response
from modoboa.core import models as core_models
from modoboa.core import sms_backends
from modoboa.lib import renderers as lib_renderers
from modoboa.lib import viewsets as lib_viewsets
from ... import lib, models
from . import serializers
@extend_schema_view(
retrieve=extend_schema(
description="Retrieve a particular domain",
summary="Retrieve a particular domain"
),
list=extend_schema(
description="Retrieve a list of domains",
summary="Retrieve a list of domains"
),
create=extend_schema(
description="Create a new domain",
summary="Create a new domain"
)
)
class DomainViewSet(lib_viewsets.RevisionModelMixin, viewsets.ModelViewSet):
"""Domain viewset."""
permission_classes = [IsAuthenticated, DjangoModelPermissions, ]
serializer_class = serializers.DomainSerializer
def get_queryset(self):
"""Filter queryset based on current user."""
return models.Domain.objects.get_for_admin(self.request.user)
def perform_destroy(self, instance):
"""Add custom args to delete call."""
instance.delete(self.request.user)
class DomainAliasFilterSet(dj_filters.FilterSet):
"""Custom FilterSet for DomainAlias."""
domain = dj_filters.CharFilter(field_name="target__name")
class Meta:
model = models.DomainAlias
fields = ["domain"]
class DomainAliasViewSet(lib_viewsets.RevisionModelMixin,
lib_viewsets.ExpandableModelViewSet):
"""ViewSet for DomainAlias."""
filter_backends = (dj_filters.DjangoFilterBackend, )
filterset_class = DomainAliasFilterSet
permission_classes = [IsAuthenticated, DjangoModelPermissions, ]
renderer_classes = (renderers.JSONRenderer, lib_renderers.CSVRenderer)
serializer_expanded_fields = ["target"]
serializer_class = serializers.DomainAliasSerializer
def get_queryset(self):
"""Filter queryset based on current user."""
return models.DomainAlias.objects.get_for_admin(self.request.user)
def get_renderer_context(self):
context = super().get_renderer_context()
context["headers"] = ["name", "target__name", "enabled"]
return context
class AccountViewSet(lib_viewsets.RevisionModelMixin, viewsets.ModelViewSet):
"""ViewSet for User/Mailbox."""
filter_backends = (filters.SearchFilter, )
permission_classes = [IsAuthenticated, DjangoModelPermissions, ]
search_fields = ("^first_name", "^last_name", "^email")
def get_serializer_class(self):
"""Return a serializer."""
action_dict = {
"list": serializers.AccountSerializer,
"retrieve": serializers.AccountSerializer,
"password": serializers.AccountPasswordSerializer,
"reset_password": serializers.ResetPasswordSerializer,
}
return action_dict.get(
self.action, serializers.WritableAccountSerializer)
def get_queryset(self):
"""Filter queryset based on current user."""
user = self.request.user
ids = user.objectaccess_set \
.filter(content_type=ContentType.objects.get_for_model(user)) \
.values_list("object_id", flat=True)
queryset = core_models.User.objects.filter(pk__in=ids)
domain = self.request.query_params.get("domain")
if domain:
queryset = queryset.filter(mailbox__domain__name=domain)
return queryset
@action(methods=["put"], detail=True)
def password(self, request, pk=None):
"""Change account password."""
try:
user = core_models.User.objects.get(pk=pk)
except core_models.User.DoesNotExist:
raise http.Http404
serializer = self.get_serializer(user, data=request.data)
if serializer.is_valid():
serializer.save()
return Response()
return Response(
serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@action(detail=False)
def exists(self, request):
"""Check if account exists.
Requires a valid email address as argument. Example:
GET /exists/?email=user@test.com
"""
email = request.GET.get("email")
if not email:
raise ParseError("email not provided")
if not core_models.User.objects.filter(email=email).exists():
data = {"exists": False}
else:
data = {"exists": True}
serializer = serializers.AccountExistsSerializer(data)
return Response(serializer.data)
@action(methods=["post"], detail=False)
def reset_password(self, request):
"""Reset account password and send a new one by SMS."""
sms_password_recovery = (
request.localconfig.parameters
.get_value("sms_password_recovery", app="core")
)
if not sms_password_recovery:
return Response(status=404)
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = core_models.User.objects.filter(
email=serializer.validated_data["email"]).first()
if not user or not user.phone_number:
return Response(status=404)
backend = sms_backends.get_active_backend(
request.localconfig.parameters)
if not backend:
return Response(status=404)
password = lib.make_password()
content = _("Here is your new Modoboa password: {}").format(
password)
if not backend.send(content, [str(user.phone_number)]):
body = {"status": "ko"}
else:
# SMS was sent, now we can set the new password.
body = {"status": "ok"}
user.set_password(password)
user.save(update_fields=["password"])
return Response(body)
class AliasViewSet(lib_viewsets.RevisionModelMixin, viewsets.ModelViewSet):
"""
create:
Create a new alias instance.
"""
permission_classes = [IsAuthenticated, DjangoModelPermissions, ]
serializer_class = serializers.AliasSerializer
def get_queryset(self):
"""Filter queryset based on current user."""
user = self.request.user
ids = (
user.objectaccess_set.filter(
content_type=ContentType.objects.get_for_model(models.Alias))
.values_list("object_id", flat=True)
)
queryset = models.Alias.objects.filter(pk__in=ids)
domain = self.request.query_params.get("domain")
if domain:
queryset = queryset.filter(domain__name=domain)
return queryset
class SenderAddressViewSet(lib_viewsets.RevisionModelMixin,
viewsets.ModelViewSet):
"""View set for SenderAddress model."""
permission_classes = [IsAuthenticated, DjangoModelPermissions, ]
serializer_class = serializers.SenderAddressSerializer
def get_queryset(self):
"""Filter queryset based on current user."""
user = self.request.user
mb_ids = (
user.objectaccess_set.filter(
content_type=ContentType.objects.get_for_model(models.Mailbox))
.values_list("object_id", flat=True)
)
return models.SenderAddress.objects.filter(mailbox__pk__in=mb_ids)
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import mock
from oslo_config import cfg
from oslo_policy import policy
from oslo_utils import timeutils
from six.moves.urllib import parse as urlparse
from wsme import types as wtypes
from magnum.api.controllers.v1 import node as api_node
from magnum.common import utils
from magnum.tests import base
from magnum.tests.unit.api import base as api_base
from magnum.tests.unit.api import utils as apiutils
from magnum.tests.unit.objects import utils as obj_utils
class TestNodeObject(base.TestCase):
def test_node_init(self):
node_dict = apiutils.node_post_data()
del node_dict['image_id']
node = api_node.Node(**node_dict)
self.assertEqual(wtypes.Unset, node.image_id)
class TestListNode(api_base.FunctionalTest):
def test_empty(self):
response = self.get_json('/nodes')
self.assertEqual([], response['nodes'])
def _assert_node_fields(self, node):
node_fields = ['type', 'image_id', 'ironic_node_id']
for field in node_fields:
self.assertIn(field, node)
def test_one(self):
node = obj_utils.create_test_node(self.context)
response = self.get_json('/nodes')
self.assertEqual(node.uuid, response['nodes'][0]["uuid"])
self._assert_node_fields(response['nodes'][0])
def test_get_one(self):
node = obj_utils.create_test_node(self.context)
response = self.get_json('/nodes/%s' % node['uuid'])
self.assertEqual(node.uuid, response['uuid'])
self._assert_node_fields(response)
def test_get_all_with_pagination_marker(self):
node_list = []
for id_ in range(4):
node = obj_utils.create_test_node(self.context, id=id_,
uuid=utils.generate_uuid())
node_list.append(node.uuid)
response = self.get_json('/nodes?limit=3&marker=%s' % node_list[2])
self.assertEqual(1, len(response['nodes']))
self.assertEqual(node_list[-1], response['nodes'][0]['uuid'])
def test_detail(self):
node = obj_utils.create_test_node(self.context)
response = self.get_json('/nodes/detail')
self.assertEqual(node.uuid, response['nodes'][0]["uuid"])
self._assert_node_fields(response['nodes'][0])
def test_detail_with_pagination_marker(self):
node_list = []
for id_ in range(4):
node = obj_utils.create_test_node(self.context, id=id_,
uuid=utils.generate_uuid())
node_list.append(node.uuid)
response = self.get_json('/nodes/detail?limit=3&marker=%s'
% node_list[2])
self.assertEqual(1, len(response['nodes']))
self.assertEqual(node_list[-1], response['nodes'][0]['uuid'])
self._assert_node_fields(response['nodes'][0])
def test_detail_against_single(self):
node = obj_utils.create_test_node(self.context)
response = self.get_json('/nodes/%s/detail' % node['uuid'],
expect_errors=True)
self.assertEqual(404, response.status_int)
def test_many(self):
node_list = []
for id_ in range(5):
node = obj_utils.create_test_node(self.context, id=id_,
uuid=utils.generate_uuid())
node_list.append(node.uuid)
response = self.get_json('/nodes')
self.assertEqual(len(node_list), len(response['nodes']))
uuids = [s['uuid'] for s in response['nodes']]
self.assertEqual(sorted(node_list), sorted(uuids))
def test_links(self):
uuid = utils.generate_uuid()
obj_utils.create_test_node(self.context, id=1, uuid=uuid)
response = self.get_json('/nodes/%s' % uuid)
self.assertIn('links', response.keys())
self.assertEqual(2, len(response['links']))
self.assertIn(uuid, response['links'][0]['href'])
for l in response['links']:
bookmark = l['rel'] == 'bookmark'
self.assertTrue(self.validate_link(l['href'], bookmark=bookmark))
def test_collection_links(self):
for id_ in range(5):
obj_utils.create_test_node(self.context, id=id_,
uuid=utils.generate_uuid())
response = self.get_json('/nodes/?limit=3')
self.assertEqual(3, len(response['nodes']))
next_marker = response['nodes'][-1]['uuid']
self.assertIn(next_marker, response['next'])
def test_collection_links_default_limit(self):
cfg.CONF.set_override('max_limit', 3, 'api')
for id_ in range(5):
obj_utils.create_test_node(self.context, id=id_,
uuid=utils.generate_uuid())
response = self.get_json('/nodes')
self.assertEqual(3, len(response['nodes']))
next_marker = response['nodes'][-1]['uuid']
self.assertIn(next_marker, response['next'])
class TestPatch(api_base.FunctionalTest):
def setUp(self):
super(TestPatch, self).setUp()
self.node = obj_utils.create_test_node(self.context, image_id='Fedora')
@mock.patch('oslo_utils.timeutils.utcnow')
def test_replace_ok(self, mock_utcnow):
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
new_image = 'Ubuntu'
response = self.get_json('/nodes/%s' % self.node.uuid)
self.assertNotEqual(new_image, response['image_id'])
response = self.patch_json('/nodes/%s' % self.node.uuid,
[{'path': '/image_id', 'value': new_image,
'op': 'replace'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
response = self.get_json('/nodes/%s' % self.node.uuid)
self.assertEqual(new_image, response['image_id'])
return_updated_at = timeutils.parse_isotime(
response['updated_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_updated_at)
def test_replace_non_existent_node(self):
response = self.patch_json('/nodes/%s' % utils.generate_uuid(),
[{'path': '/image_id', 'value': 'Ubuntu',
'op': 'replace'}],
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_add_non_existent_property(self):
response = self.patch_json(
'/nodes/%s' % self.node.uuid,
[{'path': '/foo', 'value': 'bar', 'op': 'add'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['error_message'])
def test_remove_ok(self):
response = self.get_json('/nodes/%s' % self.node.uuid)
self.assertIsNotNone(response['image_id'])
response = self.patch_json('/nodes/%s' % self.node.uuid,
[{'path': '/image_id', 'op': 'remove'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
response = self.get_json('/nodes/%s' % self.node.uuid)
self.assertIsNone(response['image_id'])
def test_remove_uuid(self):
response = self.patch_json('/nodes/%s' % self.node.uuid,
[{'path': '/uuid', 'op': 'remove'}],
expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_remove_non_existent_property(self):
response = self.patch_json(
'/nodes/%s' % self.node.uuid,
[{'path': '/non-existent', 'op': 'remove'}],
expect_errors=True)
self.assertEqual(400, response.status_code)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
class TestPost(api_base.FunctionalTest):
@mock.patch('oslo_utils.timeutils.utcnow')
def test_create_node(self, mock_utcnow):
node_dict = apiutils.node_post_data()
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.post_json('/nodes', node_dict)
self.assertEqual('application/json', response.content_type)
self.assertEqual(201, response.status_int)
# Check location header
self.assertIsNotNone(response.location)
expected_location = '/v1/nodes/%s' % node_dict['uuid']
self.assertEqual(expected_location,
urlparse.urlparse(response.location).path)
self.assertEqual(node_dict['uuid'], response.json['uuid'])
self.assertNotIn('updated_at', response.json.keys)
return_created_at = timeutils.parse_isotime(
response.json['created_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_created_at)
def test_create_node_set_project_id_and_user_id(self):
with mock.patch.object(self.dbapi, 'create_node',
wraps=self.dbapi.create_node) as cc_mock:
node_dict = apiutils.node_post_data()
self.post_json('/nodes', node_dict)
cc_mock.assert_called_once_with(mock.ANY)
self.assertEqual(self.context.project_id,
cc_mock.call_args[0][0]['project_id'])
self.assertEqual(self.context.user_id,
cc_mock.call_args[0][0]['user_id'])
def test_create_node_doesnt_contain_id(self):
with mock.patch.object(self.dbapi, 'create_node',
wraps=self.dbapi.create_node) as cn_mock:
node_dict = apiutils.node_post_data(image_id='Ubuntu')
response = self.post_json('/nodes', node_dict)
self.assertEqual(node_dict['image_id'], response.json['image_id'])
cn_mock.assert_called_once_with(mock.ANY)
# Check that 'id' is not in first arg of positional args
self.assertNotIn('id', cn_mock.call_args[0][0])
def test_create_node_generate_uuid(self):
node_dict = apiutils.node_post_data()
del node_dict['uuid']
response = self.post_json('/nodes', node_dict)
self.assertEqual('application/json', response.content_type)
self.assertEqual(201, response.status_int)
self.assertEqual(node_dict['image_id'],
response.json['image_id'])
self.assertTrue(utils.is_uuid_like(response.json['uuid']))
class TestDelete(api_base.FunctionalTest):
def setUp(self):
super(TestDelete, self).setUp()
self.node = obj_utils.create_test_node(self.context, image_id='Fedora')
def test_delete_node(self):
self.delete('/nodes/%s' % self.node.uuid)
response = self.get_json('/nodes/%s' % self.node.uuid,
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_delete_node_not_found(self):
uuid = utils.generate_uuid()
response = self.delete('/nodes/%s' % uuid, expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
class TestNodePolicyEnforcement(api_base.FunctionalTest):
def _common_policy_check(self, rule, func, *arg, **kwarg):
self.policy.set_rules({rule: "project:non_fake"})
exc = self.assertRaises(policy.PolicyNotAuthorized,
func, *arg, **kwarg)
self.assertTrue(exc.message.startswith(rule))
self.assertTrue(exc.message.endswith("disallowed by policy"))
def test_policy_disallow_get_all(self):
self._common_policy_check(
"node:get_all", self.get_json, '/nodes')
def test_policy_disallow_get_one(self):
self._common_policy_check(
"node:get", self.get_json, '/nodes/111-222-333')
def test_policy_disallow_detail(self):
self._common_policy_check(
"node:detail", self.get_json, '/nodes/111-222-333/detail')
def test_policy_disallow_update(self):
node = obj_utils.create_test_node(self.context,
type='type_A',
uuid="333-444-5555")
self._common_policy_check(
"node:update", self.patch_json,
'/nodes/%s' % node.uuid,
[{'type': '/type', 'value': "new_type", 'op': 'replace'}])
def test_policy_disallow_create(self):
bdict = apiutils.node_post_data(name='node_example_A')
self._common_policy_check(
"node:create", self.post_json, '/nodes', bdict)
def test_policy_disallow_delete(self):
node = obj_utils.create_test_node(self.context,
uuid='137-246-789')
self._common_policy_check(
"node:delete", self.delete,
'/nodes/%s' % node.uuid)
|
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This code is meant to work on Python 2.4 and above only.
#
# TODO(robinson): Helpers for verbose, common checks like seeing if a
# descriptor's cpp_type is CPPTYPE_MESSAGE.
"""Contains a metaclass and helper functions used to create
protocol message classes from Descriptor objects at runtime.
Recall that a metaclass is the "type" of a class.
(A class is to a metaclass what an instance is to a class.)
In this case, we use the GeneratedProtocolMessageType metaclass
to inject all the useful functionality into the classes
output by the protocol compiler at compile-time.
The upshot of all this is that the real implementation
details for ALL pure-Python protocol buffers are *here in
this file*.
"""
from __future__ import unicode_literals
__author__ = 'robinson@google.com (Will Robinson)'
import sys
if sys.version > '3':
import copyreg
def copy_reg_pickle(type, function):
return copyreg.pickle(type,function)
else:
import copy_reg
def copy_reg_pickle(type, function):
return copy_reg.pickle(type,function)
import struct
import weakref
# We use "as" to avoid name collisions with variables.
from google.protobuf.internal import containers
from google.protobuf.internal import decoder
from google.protobuf.internal import encoder
from google.protobuf.internal import enum_type_wrapper
from google.protobuf.internal import message_listener as message_listener_mod
from google.protobuf.internal import type_checkers
from google.protobuf.internal import wire_format
from google.protobuf.internal import utils
from google.protobuf.internal.utils import SimIO, bytestr_to_string, \
iteritems, range
from google.protobuf import descriptor as descriptor_mod
from google.protobuf import message as message_mod
from google.protobuf import text_format
_FieldDescriptor = descriptor_mod.FieldDescriptor
def NewMessage(bases, descriptor, dictionary):
_AddClassAttributesForNestedExtensions(descriptor, dictionary)
_AddSlots(descriptor, dictionary)
return bases
def InitMessage(descriptor, cls):
cls._decoders_by_tag = {}
cls._extensions_by_name = {}
cls._extensions_by_number = {}
if (descriptor.has_options and
descriptor.GetOptions().message_set_wire_format):
cls._decoders_by_tag[decoder.MESSAGE_SET_ITEM_TAG] = (
decoder.MessageSetItemDecoder(cls._extensions_by_number))
# Attach stuff to each FieldDescriptor for quick lookup later on.
for field in descriptor.fields:
_AttachFieldHelpers(cls, field)
_AddEnumValues(descriptor, cls)
_AddInitMethod(descriptor, cls)
_AddPropertiesForFields(descriptor, cls)
_AddPropertiesForExtensions(descriptor, cls)
_AddStaticMethods(cls)
_AddMessageMethods(descriptor, cls)
_AddPrivateHelperMethods(cls)
copy_reg_pickle(cls, lambda obj: (cls, (), obj.__getstate__()))
# Stateless helpers for GeneratedProtocolMessageType below.
# Outside clients should not access these directly.
#
# I opted not to make any of these methods on the metaclass, to make it more
# clear that I'm not really using any state there and to keep clients from
# thinking that they have direct access to these construction helpers.
def _PropertyName(proto_field_name):
"""Returns the name of the public property attribute which
clients can use to get and (in some cases) set the value
of a protocol message field.
Args:
proto_field_name: The protocol message field name, exactly
as it appears (or would appear) in a .proto file.
"""
# TODO(robinson): Escape Python keywords (e.g., yield), and test this support.
# nnorwitz makes my day by writing:
# """
# FYI. See the keyword module in the stdlib. This could be as simple as:
#
# if keyword.iskeyword(proto_field_name):
# return proto_field_name + "_"
# return proto_field_name
# """
# Kenton says: The above is a BAD IDEA. People rely on being able to use
# getattr() and setattr() to reflectively manipulate field values. If we
# rename the properties, then every such user has to also make sure to apply
# the same transformation. Note that currently if you name a field "yield",
# you can still access it just fine using getattr/setattr -- it's not even
# that cumbersome to do so.
# TODO(kenton): Remove this method entirely if/when everyone agrees with my
# position.
return proto_field_name
def _VerifyExtensionHandle(message, extension_handle):
"""Verify that the given extension handle is valid."""
if not isinstance(extension_handle, _FieldDescriptor):
raise KeyError('HasExtension() expects an extension handle, got: %s' %
extension_handle)
if not extension_handle.is_extension:
raise KeyError('"%s" is not an extension.' % extension_handle.full_name)
if not extension_handle.containing_type:
raise KeyError('"%s" is missing a containing_type.'
% extension_handle.full_name)
if extension_handle.containing_type is not message.DESCRIPTOR:
raise KeyError('Extension "%s" extends message type "%s", but this '
'message is of type "%s".' %
(extension_handle.full_name,
extension_handle.containing_type.full_name,
message.DESCRIPTOR.full_name))
def _AddSlots(message_descriptor, dictionary):
"""Adds a __slots__ entry to dictionary, containing the names of all valid
attributes for this message type.
Args:
message_descriptor: A Descriptor instance describing this message type.
dictionary: Class dictionary to which we'll add a '__slots__' entry.
"""
dictionary['__slots__'] = ['_cached_byte_size',
'_cached_byte_size_dirty',
'_fields',
'_unknown_fields',
'_is_present_in_parent',
'_listener',
'_listener_for_children',
'__weakref__']
def _IsMessageSetExtension(field):
return (field.is_extension and
field.containing_type.has_options and
field.containing_type.GetOptions().message_set_wire_format and
field.type == _FieldDescriptor.TYPE_MESSAGE and
field.message_type == field.extension_scope and
field.label == _FieldDescriptor.LABEL_OPTIONAL)
def _AttachFieldHelpers(cls, field_descriptor):
is_repeated = (field_descriptor.label == _FieldDescriptor.LABEL_REPEATED)
is_packed = (field_descriptor.has_options and
field_descriptor.GetOptions().packed)
if _IsMessageSetExtension(field_descriptor):
field_encoder = encoder.MessageSetItemEncoder(field_descriptor.number)
sizer = encoder.MessageSetItemSizer(field_descriptor.number)
else:
field_encoder = type_checkers.TYPE_TO_ENCODER[field_descriptor.type](
field_descriptor.number, is_repeated, is_packed)
sizer = type_checkers.TYPE_TO_SIZER[field_descriptor.type](
field_descriptor.number, is_repeated, is_packed)
field_descriptor._encoder = field_encoder
field_descriptor._sizer = sizer
field_descriptor._default_constructor = _DefaultValueConstructorForField(
field_descriptor)
def AddDecoder(wiretype, is_packed):
tag_bytes = encoder.TagBytes(field_descriptor.number, wiretype)
cls._decoders_by_tag[tag_bytes] = (
type_checkers.TYPE_TO_DECODER[field_descriptor.type](
field_descriptor.number, is_repeated, is_packed,
field_descriptor, field_descriptor._default_constructor))
AddDecoder(type_checkers.FIELD_TYPE_TO_WIRE_TYPE[field_descriptor.type],
False)
if is_repeated and wire_format.IsTypePackable(field_descriptor.type):
# To support wire compatibility of adding packed = true, add a decoder for
# packed values regardless of the field's options.
AddDecoder(wire_format.WIRETYPE_LENGTH_DELIMITED, True)
def _AddClassAttributesForNestedExtensions(descriptor, dictionary):
extension_dict = descriptor.extensions_by_name
for extension_name, extension_field in iteritems(extension_dict):
assert extension_name not in dictionary
dictionary[extension_name] = extension_field
def _AddEnumValues(descriptor, cls):
"""Sets class-level attributes for all enum fields defined in this message.
Also exporting a class-level object that can name enum values.
Args:
descriptor: Descriptor object for this message type.
cls: Class we're constructing for this message type.
"""
for enum_type in descriptor.enum_types:
setattr(cls, enum_type.name, enum_type_wrapper.EnumTypeWrapper(enum_type))
for enum_value in enum_type.values:
setattr(cls, enum_value.name, enum_value.number)
def _DefaultValueConstructorForField(field):
"""Returns a function which returns a default value for a field.
Args:
field: FieldDescriptor object for this field.
The returned function has one argument:
message: Message instance containing this field, or a weakref proxy
of same.
That function in turn returns a default value for this field. The default
value may refer back to |message| via a weak reference.
"""
if field.label == _FieldDescriptor.LABEL_REPEATED:
if field.has_default_value and field.default_value != []:
raise ValueError('Repeated field default value not empty list: %s' % (
field.default_value))
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
# We can't look at _concrete_class yet since it might not have
# been set. (Depends on order in which we initialize the classes).
message_type = field.message_type
def MakeRepeatedMessageDefault(message):
return containers.RepeatedCompositeFieldContainer(
message._listener_for_children, field.message_type)
return MakeRepeatedMessageDefault
else:
type_checker = type_checkers.GetTypeChecker(field.cpp_type, field.type)
def MakeRepeatedScalarDefault(message):
return containers.RepeatedScalarFieldContainer(
message._listener_for_children, type_checker)
return MakeRepeatedScalarDefault
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
# _concrete_class may not yet be initialized.
message_type = field.message_type
def MakeSubMessageDefault(message):
result = message_type._concrete_class()
result._SetListener(message._listener_for_children)
return result
return MakeSubMessageDefault
def MakeScalarDefault(message):
# TODO(protobuf-team): This may be broken since there may not be
# default_value. Combine with has_default_value somehow.
return field.default_value
return MakeScalarDefault
def _AddInitMethod(message_descriptor, cls):
"""Adds an __init__ method to cls."""
fields = message_descriptor.fields
def init(self, **kwargs):
self._cached_byte_size = 0
self._cached_byte_size_dirty = len(kwargs) > 0
self._fields = {}
# _unknown_fields is () when empty for efficiency, and will be turned into
# a list if fields are added.
self._unknown_fields = ()
self._is_present_in_parent = False
self._listener = message_listener_mod.NullMessageListener()
self._listener_for_children = _Listener(self)
for field_name, field_value in iteritems(kwargs):
field = _GetFieldByName(message_descriptor, field_name)
if field is None:
raise TypeError("%s() got an unexpected keyword argument '%s'" %
(message_descriptor.name, field_name))
if field.label == _FieldDescriptor.LABEL_REPEATED:
copy = field._default_constructor(self)
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: # Composite
for val in field_value:
copy.add().MergeFrom(val)
else: # Scalar
copy.extend(field_value)
self._fields[field] = copy
elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
copy = field._default_constructor(self)
copy.MergeFrom(field_value)
self._fields[field] = copy
else:
setattr(self, field_name, field_value)
init.__module__ = None
init.__doc__ = None
cls.__init__ = init
def _GetFieldByName(message_descriptor, field_name):
"""Returns a field descriptor by field name.
Args:
message_descriptor: A Descriptor describing all fields in message.
field_name: The name of the field to retrieve.
Returns:
The field descriptor associated with the field name.
"""
try:
return message_descriptor.fields_by_name[field_name]
except KeyError:
raise ValueError('Protocol message has no "%s" field.' % field_name)
def _AddPropertiesForFields(descriptor, cls):
"""Adds properties for all fields in this protocol message type."""
for field in descriptor.fields:
_AddPropertiesForField(field, cls)
if descriptor.is_extendable:
# _ExtensionDict is just an adaptor with no state so we allocate a new one
# every time it is accessed.
cls.Extensions = property(lambda self: _ExtensionDict(self))
def _AddPropertiesForField(field, cls):
"""Adds a public property for a protocol message field.
Clients can use this property to get and (in the case
of non-repeated scalar fields) directly set the value
of a protocol message field.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
# Catch it if we add other types that we should
# handle specially here.
assert _FieldDescriptor.MAX_CPPTYPE == 10
constant_name = field.name.upper() + "_FIELD_NUMBER"
setattr(cls, constant_name, field.number)
if field.label == _FieldDescriptor.LABEL_REPEATED:
_AddPropertiesForRepeatedField(field, cls)
elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
_AddPropertiesForNonRepeatedCompositeField(field, cls)
else:
_AddPropertiesForNonRepeatedScalarField(field, cls)
def _AddPropertiesForRepeatedField(field, cls):
"""Adds a public property for a "repeated" protocol message field. Clients
can use this property to get the value of the field, which will be either a
_RepeatedScalarFieldContainer or _RepeatedCompositeFieldContainer (see
below).
Note that when clients add values to these containers, we perform
type-checking in the case of repeated scalar fields, and we also set any
necessary "has" bits as a side-effect.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
proto_field_name = field.name
property_name = _PropertyName(proto_field_name)
def getter(self):
field_value = self._fields.get(field)
if field_value is None:
# Construct a new object to represent this field.
field_value = field._default_constructor(self)
# Atomically check if another thread has preempted us and, if not, swap
# in the new object we just created. If someone has preempted us, we
# take that object and discard ours.
# WARNING: We are relying on setdefault() being atomic. This is true
# in CPython but we haven't investigated others. This warning appears
# in several other locations in this file.
field_value = self._fields.setdefault(field, field_value)
return field_value
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
# We define a setter just so we can throw an exception with a more
# helpful error message.
def setter(self, new_value):
raise AttributeError('Assignment not allowed to repeated field '
'"%s" in protocol message object.' % proto_field_name)
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, property(getter, setter, doc=doc))
def _AddPropertiesForNonRepeatedScalarField(field, cls):
"""Adds a public property for a nonrepeated, scalar protocol message field.
Clients can use this property to get and directly set the value of the field.
Note that when the client sets the value of a field by using this property,
all necessary "has" bits are set as a side-effect, and we also perform
type-checking.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
proto_field_name = field.name
property_name = _PropertyName(proto_field_name)
type_checker = type_checkers.GetTypeChecker(field.cpp_type, field.type)
default_value = field.default_value
valid_values = set()
def getter(self):
# TODO(protobuf-team): This may be broken since there may not be
# default_value. Combine with has_default_value somehow.
return self._fields.get(field, default_value)
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
def setter(self, new_value):
type_checker.CheckValue(new_value)
self._fields[field] = new_value
# Check _cached_byte_size_dirty inline to improve performance, since scalar
# setters are called frequently.
if not self._cached_byte_size_dirty:
self._Modified()
setter.__module__ = None
setter.__doc__ = 'Setter for %s.' % proto_field_name
# Add a property to encapsulate the getter/setter.
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, property(getter, setter, doc=doc))
def _AddPropertiesForNonRepeatedCompositeField(field, cls):
"""Adds a public property for a nonrepeated, composite protocol message field.
A composite field is a "group" or "message" field.
Clients can use this property to get the value of the field, but cannot
assign to the property directly.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
# TODO(robinson): Remove duplication with similar method
# for non-repeated scalars.
proto_field_name = field.name
property_name = _PropertyName(proto_field_name)
# TODO(komarek): Can anyone explain to me why we cache the message_type this
# way, instead of referring to field.message_type inside of getter(self)?
# What if someone sets message_type later on (which makes for simpler
# dyanmic proto descriptor and class creation code).
message_type = field.message_type
def getter(self):
field_value = self._fields.get(field)
if field_value is None:
# Construct a new object to represent this field.
field_value = message_type._concrete_class() # use field.message_type?
field_value._SetListener(self._listener_for_children)
# Atomically check if another thread has preempted us and, if not, swap
# in the new object we just created. If someone has preempted us, we
# take that object and discard ours.
# WARNING: We are relying on setdefault() being atomic. This is true
# in CPython but we haven't investigated others. This warning appears
# in several other locations in this file.
field_value = self._fields.setdefault(field, field_value)
return field_value
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
# We define a setter just so we can throw an exception with a more
# helpful error message.
def setter(self, new_value):
raise AttributeError('Assignment not allowed to composite field '
'"%s" in protocol message object.' % proto_field_name)
# Add a property to encapsulate the getter.
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, property(getter, setter, doc=doc))
def _AddPropertiesForExtensions(descriptor, cls):
"""Adds properties for all fields in this protocol message type."""
extension_dict = descriptor.extensions_by_name
for extension_name, extension_field in iteritems(extension_dict):
constant_name = extension_name.upper() + "_FIELD_NUMBER"
setattr(cls, constant_name, extension_field.number)
def _AddStaticMethods(cls):
# TODO(robinson): This probably needs to be thread-safe(?)
def RegisterExtension(extension_handle):
extension_handle.containing_type = cls.DESCRIPTOR
_AttachFieldHelpers(cls, extension_handle)
# Try to insert our extension, failing if an extension with the same number
# already exists.
actual_handle = cls._extensions_by_number.setdefault(
extension_handle.number, extension_handle)
if actual_handle is not extension_handle:
raise AssertionError(
'Extensions "%s" and "%s" both try to extend message type "%s" with '
'field number %d.' %
(extension_handle.full_name, actual_handle.full_name,
cls.DESCRIPTOR.full_name, extension_handle.number))
cls._extensions_by_name[extension_handle.full_name] = extension_handle
handle = extension_handle # avoid line wrapping
if _IsMessageSetExtension(handle):
# MessageSet extension. Also register under type name.
cls._extensions_by_name[
extension_handle.message_type.full_name] = extension_handle
cls.RegisterExtension = staticmethod(RegisterExtension)
def FromString(s):
message = cls()
message.MergeFromString(s)
return message
cls.FromString = staticmethod(FromString)
def _IsPresent(item):
"""Given a (FieldDescriptor, value) tuple from _fields, return true if the
value should be included in the list returned by ListFields()."""
if item[0].label == _FieldDescriptor.LABEL_REPEATED:
return bool(item[1])
elif item[0].cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
return item[1]._is_present_in_parent
else:
return True
def _AddListFieldsMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def ListFields(self):
all_fields = [item for item in iteritems(self._fields) if _IsPresent(item)]
all_fields.sort(key = lambda item: item[0].number)
return all_fields
cls.ListFields = ListFields
def _AddHasFieldMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
singular_fields = {}
for field in message_descriptor.fields:
if field.label != _FieldDescriptor.LABEL_REPEATED:
singular_fields[field.name] = field
def HasField(self, field_name):
try:
field = singular_fields[field_name]
except KeyError:
raise ValueError(
'Protocol message has no singular "%s" field.' % field_name)
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
value = self._fields.get(field)
return value is not None and value._is_present_in_parent
else:
return field in self._fields
cls.HasField = HasField
def _AddClearFieldMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def ClearField(self, field_name):
try:
field = message_descriptor.fields_by_name[field_name]
except KeyError:
raise ValueError('Protocol message has no "%s" field.' % field_name)
if field in self._fields:
# Note: If the field is a sub-message, its listener will still point
# at us. That's fine, because the worst than can happen is that it
# will call _Modified() and invalidate our byte size. Big deal.
del self._fields[field]
# Always call _Modified() -- even if nothing was changed, this is
# a mutating method, and thus calling it should cause the field to become
# present in the parent message.
self._Modified()
cls.ClearField = ClearField
def _AddClearExtensionMethod(cls):
"""Helper for _AddMessageMethods()."""
def ClearExtension(self, extension_handle):
_VerifyExtensionHandle(self, extension_handle)
# Similar to ClearField(), above.
if extension_handle in self._fields:
del self._fields[extension_handle]
self._Modified()
cls.ClearExtension = ClearExtension
def _AddClearMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def Clear(self):
# Clear fields.
self._fields = {}
self._unknown_fields = ()
self._Modified()
cls.Clear = Clear
def _AddHasExtensionMethod(cls):
"""Helper for _AddMessageMethods()."""
def HasExtension(self, extension_handle):
_VerifyExtensionHandle(self, extension_handle)
if extension_handle.label == _FieldDescriptor.LABEL_REPEATED:
raise KeyError('"%s" is repeated.' % extension_handle.full_name)
if extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
value = self._fields.get(extension_handle)
return value is not None and value._is_present_in_parent
else:
return extension_handle in self._fields
cls.HasExtension = HasExtension
def _AddEqualsMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def __eq__(self, other):
if (not isinstance(other, message_mod.Message) or
other.DESCRIPTOR != self.DESCRIPTOR):
return False
if self is other:
return True
if not self.ListFields() == other.ListFields():
return False
# Sort unknown fields because their order shouldn't affect equality test.
unknown_fields = list(self._unknown_fields)
unknown_fields.sort()
other_unknown_fields = list(other._unknown_fields)
other_unknown_fields.sort()
return unknown_fields == other_unknown_fields
cls.__eq__ = __eq__
def _AddStrMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def __str__(self):
return bytestr_to_string(text_format.MessageToString(self))
cls.__str__ = __str__
def _AddUnicodeMethod(unused_message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def __unicode__(self):
return text_format.MessageToString(self, as_utf8=True).decode('utf-8')
cls.__unicode__ = __unicode__
def _AddSetListenerMethod(cls):
"""Helper for _AddMessageMethods()."""
def SetListener(self, listener):
if listener is None:
self._listener = message_listener_mod.NullMessageListener()
else:
self._listener = listener
cls._SetListener = SetListener
def _BytesForNonRepeatedElement(value, field_number, field_type):
"""Returns the number of bytes needed to serialize a non-repeated element.
The returned byte count includes space for tag information and any
other additional space associated with serializing value.
Args:
value: Value we're serializing.
field_number: Field number of this value. (Since the field number
is stored as part of a varint-encoded tag, this has an impact
on the total bytes required to serialize the value).
field_type: The type of the field. One of the TYPE_* constants
within FieldDescriptor.
"""
try:
fn = type_checkers.TYPE_TO_BYTE_SIZE_FN[field_type]
return fn(field_number, value)
except KeyError:
raise message_mod.EncodeError('Unrecognized field type: %d' % field_type)
def _AddByteSizeMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def ByteSize(self):
if not self._cached_byte_size_dirty:
return self._cached_byte_size
size = 0
for field_descriptor, field_value in self.ListFields():
size += field_descriptor._sizer(field_value)
for tag_bytes, value_bytes in self._unknown_fields:
size += len(tag_bytes) + len(value_bytes)
self._cached_byte_size = size
self._cached_byte_size_dirty = False
self._listener_for_children.dirty = False
return size
cls.ByteSize = ByteSize
def _AddSerializeToStringMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def SerializeToString(self):
# Check if the message has all of its required fields set.
errors = []
if not self.IsInitialized():
raise message_mod.EncodeError(
'Message %s is missing required fields: %s' % (
self.DESCRIPTOR.full_name, ','.join(self.FindInitializationErrors())))
return self.SerializePartialToString()
cls.SerializeToString = SerializeToString
def _AddSerializePartialToStringMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def SerializePartialToString(self):
out = SimIO()
self._InternalSerialize(out.write)
return out.getvalue()
cls.SerializePartialToString = SerializePartialToString
def InternalSerialize(self, write_bytes):
for field_descriptor, field_value in self.ListFields():
field_descriptor._encoder(write_bytes, field_value)
for tag_bytes, value_bytes in self._unknown_fields:
write_bytes(tag_bytes)
write_bytes(value_bytes)
cls._InternalSerialize = InternalSerialize
def _AddMergeFromStringMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def MergeFromString(self, serialized):
length = len(serialized)
try:
if self._InternalParse(serialized, 0, length) != length:
# The only reason _InternalParse would return early is if it
# encountered an end-group tag.
raise message_mod.DecodeError('Unexpected end-group tag.')
except IndexError:
raise message_mod.DecodeError('Truncated message.')
except struct.error as e:
raise message_mod.DecodeError(e)
return length # Return this for legacy reasons.
cls.MergeFromString = MergeFromString
local_ReadTag = decoder.ReadTag
local_SkipField = decoder.SkipField
decoders_by_tag = cls._decoders_by_tag
def InternalParse(self, buffer, pos, end):
self._Modified()
field_dict = self._fields
unknown_field_list = self._unknown_fields
while pos != end:
(tag_bytes, new_pos) = local_ReadTag(buffer, pos)
field_decoder = decoders_by_tag.get(tag_bytes)
if field_decoder is None:
value_start_pos = new_pos
new_pos = local_SkipField(buffer, new_pos, end, tag_bytes)
if new_pos == -1:
return pos
if not unknown_field_list:
unknown_field_list = self._unknown_fields = []
unknown_field_list.append((tag_bytes, buffer[value_start_pos:new_pos]))
pos = new_pos
else:
pos = field_decoder(buffer, new_pos, end, self, field_dict)
return pos
cls._InternalParse = InternalParse
def _AddIsInitializedMethod(message_descriptor, cls):
"""Adds the IsInitialized and FindInitializationError methods to the
protocol message class."""
required_fields = [field for field in message_descriptor.fields
if field.label == _FieldDescriptor.LABEL_REQUIRED]
def IsInitialized(self, errors=None):
"""Checks if all required fields of a message are set.
Args:
errors: A list which, if provided, will be populated with the field
paths of all missing required fields.
Returns:
True iff the specified message has all required fields set.
"""
# Performance is critical so we avoid HasField() and ListFields().
for field in required_fields:
if (field not in self._fields or
(field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE and
not self._fields[field]._is_present_in_parent)):
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
for field, value in iteritems(self._fields):
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
if field.label == _FieldDescriptor.LABEL_REPEATED:
for element in value:
if not element.IsInitialized():
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
elif value._is_present_in_parent and not value.IsInitialized():
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
return True
cls.IsInitialized = IsInitialized
def FindInitializationErrors(self):
"""Finds required fields which are not initialized.
Returns:
A list of strings. Each string is a path to an uninitialized field from
the top-level message, e.g. "foo.bar[5].baz".
"""
errors = [] # simplify things
for field in required_fields:
if not self.HasField(field.name):
errors.append(field.name)
for field, value in self.ListFields():
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
if field.is_extension:
name = "(%s)" % field.full_name
else:
name = field.name
if field.label == _FieldDescriptor.LABEL_REPEATED:
for i in range(len(value)):
element = value[i]
prefix = "%s[%d]." % (name, i)
sub_errors = element.FindInitializationErrors()
errors += [ prefix + error for error in sub_errors ]
else:
prefix = name + "."
sub_errors = value.FindInitializationErrors()
errors += [ prefix + error for error in sub_errors ]
return errors
cls.FindInitializationErrors = FindInitializationErrors
def _AddMergeFromMethod(cls):
LABEL_REPEATED = _FieldDescriptor.LABEL_REPEATED
CPPTYPE_MESSAGE = _FieldDescriptor.CPPTYPE_MESSAGE
def MergeFrom(self, msg):
if not isinstance(msg, cls):
raise TypeError(
"Parameter to MergeFrom() must be instance of same class: "
"expected %s got %s." % (cls.__name__, type(msg).__name__))
assert msg is not self
self._Modified()
fields = self._fields
for field, value in iteritems(msg._fields):
if field.label == LABEL_REPEATED:
field_value = fields.get(field)
if field_value is None:
# Construct a new object to represent this field.
field_value = field._default_constructor(self)
fields[field] = field_value
field_value.MergeFrom(value)
elif field.cpp_type == CPPTYPE_MESSAGE:
if value._is_present_in_parent:
field_value = fields.get(field)
if field_value is None:
# Construct a new object to represent this field.
field_value = field._default_constructor(self)
fields[field] = field_value
field_value.MergeFrom(value)
else:
self._fields[field] = value
if msg._unknown_fields:
if not self._unknown_fields:
self._unknown_fields = []
self._unknown_fields.extend(msg._unknown_fields)
cls.MergeFrom = MergeFrom
def _AddMessageMethods(message_descriptor, cls):
"""Adds implementations of all Message methods to cls."""
_AddListFieldsMethod(message_descriptor, cls)
_AddHasFieldMethod(message_descriptor, cls)
_AddClearFieldMethod(message_descriptor, cls)
if message_descriptor.is_extendable:
_AddClearExtensionMethod(cls)
_AddHasExtensionMethod(cls)
_AddClearMethod(message_descriptor, cls)
_AddEqualsMethod(message_descriptor, cls)
_AddStrMethod(message_descriptor, cls)
_AddUnicodeMethod(message_descriptor, cls)
_AddSetListenerMethod(cls)
_AddByteSizeMethod(message_descriptor, cls)
_AddSerializeToStringMethod(message_descriptor, cls)
_AddSerializePartialToStringMethod(message_descriptor, cls)
_AddMergeFromStringMethod(message_descriptor, cls)
_AddIsInitializedMethod(message_descriptor, cls)
_AddMergeFromMethod(cls)
def _AddPrivateHelperMethods(cls):
"""Adds implementation of private helper methods to cls."""
def Modified(self):
"""Sets the _cached_byte_size_dirty bit to true,
and propagates this to our listener iff this was a state change.
"""
# Note: Some callers check _cached_byte_size_dirty before calling
# _Modified() as an extra optimization. So, if this method is ever
# changed such that it does stuff even when _cached_byte_size_dirty is
# already true, the callers need to be updated.
if not self._cached_byte_size_dirty:
self._cached_byte_size_dirty = True
self._listener_for_children.dirty = True
self._is_present_in_parent = True
self._listener.Modified()
cls._Modified = Modified
cls.SetInParent = Modified
class _Listener(object):
"""MessageListener implementation that a parent message registers with its
child message.
In order to support semantics like:
foo.bar.baz.qux = 23
assert foo.HasField('bar')
...child objects must have back references to their parents.
This helper class is at the heart of this support.
"""
def __init__(self, parent_message):
"""Args:
parent_message: The message whose _Modified() method we should call when
we receive Modified() messages.
"""
# This listener establishes a back reference from a child (contained) object
# to its parent (containing) object. We make this a weak reference to avoid
# creating cyclic garbage when the client finishes with the 'parent' object
# in the tree.
if isinstance(parent_message, weakref.ProxyType):
self._parent_message_weakref = parent_message
else:
self._parent_message_weakref = weakref.proxy(parent_message)
# As an optimization, we also indicate directly on the listener whether
# or not the parent message is dirty. This way we can avoid traversing
# up the tree in the common case.
self.dirty = False
def Modified(self):
if self.dirty:
return
try:
# Propagate the signal to our parents iff this is the first field set.
self._parent_message_weakref._Modified()
except ReferenceError:
# We can get here if a client has kept a reference to a child object,
# and is now setting a field on it, but the child's parent has been
# garbage-collected. This is not an error.
pass
# TODO(robinson): Move elsewhere? This file is getting pretty ridiculous...
# TODO(robinson): Unify error handling of "unknown extension" crap.
# TODO(robinson): Support iteritems()-style iteration over all
# extensions with the "has" bits turned on?
class _ExtensionDict(object):
"""Dict-like container for supporting an indexable "Extensions"
field on proto instances.
Note that in all cases we expect extension handles to be
FieldDescriptors.
"""
def __init__(self, extended_message):
"""extended_message: Message instance for which we are the Extensions dict.
"""
self._extended_message = extended_message
def __getitem__(self, extension_handle):
"""Returns the current value of the given extension handle."""
_VerifyExtensionHandle(self._extended_message, extension_handle)
result = self._extended_message._fields.get(extension_handle)
if result is not None:
return result
if extension_handle.label == _FieldDescriptor.LABEL_REPEATED:
result = extension_handle._default_constructor(self._extended_message)
elif extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
result = extension_handle.message_type._concrete_class()
try:
result._SetListener(self._extended_message._listener_for_children)
except ReferenceError:
pass
else:
# Singular scalar -- just return the default without inserting into the
# dict.
return extension_handle.default_value
# Atomically check if another thread has preempted us and, if not, swap
# in the new object we just created. If someone has preempted us, we
# take that object and discard ours.
# WARNING: We are relying on setdefault() being atomic. This is true
# in CPython but we haven't investigated others. This warning appears
# in several other locations in this file.
result = self._extended_message._fields.setdefault(
extension_handle, result)
return result
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
my_fields = self._extended_message.ListFields()
other_fields = other._extended_message.ListFields()
# Get rid of non-extension fields.
my_fields = [ field for field in my_fields if field.is_extension ]
other_fields = [ field for field in other_fields if field.is_extension ]
return my_fields == other_fields
def __ne__(self, other):
return not self == other
def __hash__(self):
raise TypeError('unhashable object')
# Note that this is only meaningful for non-repeated, scalar extension
# fields. Note also that we may have to call _Modified() when we do
# successfully set a field this way, to set any necssary "has" bits in the
# ancestors of the extended message.
def __setitem__(self, extension_handle, value):
"""If extension_handle specifies a non-repeated, scalar extension
field, sets the value of that field.
"""
_VerifyExtensionHandle(self._extended_message, extension_handle)
if (extension_handle.label == _FieldDescriptor.LABEL_REPEATED or
extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE):
raise TypeError(
'Cannot assign to extension "%s" because it is a repeated or '
'composite type.' % extension_handle.full_name)
# It's slightly wasteful to lookup the type checker each time,
# but we expect this to be a vanishingly uncommon case anyway.
type_checker = type_checkers.GetTypeChecker(
extension_handle.cpp_type, extension_handle.type)
type_checker.CheckValue(value)
self._extended_message._fields[extension_handle] = value
self._extended_message._Modified()
def _FindExtensionByName(self, name):
"""Tries to find a known extension with the specified name.
Args:
name: Extension full name.
Returns:
Extension field descriptor.
"""
return self._extended_message._extensions_by_name.get(name, None)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCircuitAuthorizationsOperations(object):
"""ExpressRouteCircuitAuthorizationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
circuit_name, # type: str
authorization_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'authorizationName': self._serialize.url("authorization_name", authorization_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
circuit_name, # type: str
authorization_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified authorization from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param authorization_name: The name of the authorization.
:type authorization_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
authorization_name=authorization_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'authorizationName': self._serialize.url("authorization_name", authorization_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
circuit_name, # type: str
authorization_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCircuitAuthorization"
"""Gets the specified authorization from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param authorization_name: The name of the authorization.
:type authorization_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuitAuthorization, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_06_01.models.ExpressRouteCircuitAuthorization
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitAuthorization"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'authorizationName': self._serialize.url("authorization_name", authorization_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuitAuthorization', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
circuit_name, # type: str
authorization_name, # type: str
authorization_parameters, # type: "_models.ExpressRouteCircuitAuthorization"
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCircuitAuthorization"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitAuthorization"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'authorizationName': self._serialize.url("authorization_name", authorization_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(authorization_parameters, 'ExpressRouteCircuitAuthorization')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitAuthorization', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuitAuthorization', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
circuit_name, # type: str
authorization_name, # type: str
authorization_parameters, # type: "_models.ExpressRouteCircuitAuthorization"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ExpressRouteCircuitAuthorization"]
"""Creates or updates an authorization in the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param authorization_name: The name of the authorization.
:type authorization_name: str
:param authorization_parameters: Parameters supplied to the create or update express route
circuit authorization operation.
:type authorization_parameters: ~azure.mgmt.network.v2017_06_01.models.ExpressRouteCircuitAuthorization
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRouteCircuitAuthorization or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2017_06_01.models.ExpressRouteCircuitAuthorization]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitAuthorization"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
authorization_name=authorization_name,
authorization_parameters=authorization_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitAuthorization', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'authorizationName': self._serialize.url("authorization_name", authorization_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
circuit_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.AuthorizationListResult"]
"""Gets all authorizations in an express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AuthorizationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2017_06_01.models.AuthorizationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AuthorizationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AuthorizationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations'} # type: ignore
|
|
# -*- coding: utf-8 -*-
# See LICENSE file for copyright and license details
''' Test 'datatype' module. '''
import unittest
from misery import (
misc,
ast,
datatype,
)
class TestMarkOutDatatypes(unittest.TestCase):
def test_simple_func_decl(self):
input_ast = ast.Module(
decl_list=[
ast.FuncDecl(
name='start',
signature=ast.FuncSignature(),
)
]
)
expected_output = ast.Module(
decl_list=[
ast.FuncDecl(
name='start',
signature=ast.FuncSignature(),
)
]
)
real_output = datatype.mark_out_datatypes(input_ast)
misc.assert_equal(self, expected_output, real_output)
def test_copy(self):
input_ast = ast.Module()
real_output = datatype.mark_out_datatypes(input_ast)
# change original data
input_ast.decl_list.append('hi')
self.assertEquals(len(real_output.decl_list), 0)
def test_simple_integer_var_decl(self):
input_ast = ast.Module(
decl_list=[
ast.FuncDecl(
name='start',
signature=ast.FuncSignature(),
body=[
ast.VarDecl(
name='testVar',
expr=ast.Number(666),
),
],
)
]
)
def get_expected_output():
expected_output = ast.Module(
decl_list=[
ast.FuncDecl(
name='start',
signature=ast.FuncSignature(),
body=[
ast.VarDecl(
name='testVar',
expr=ast.Number(666),
datatype=datatype.SimpleDataType('Int'),
),
],
)
]
)
expected_start_func = expected_output.decl_list[0]
expected_start_func.constants = {
'const_0': ast.Number(value=666),
}
expected_start_func.vars = {
'testVar': datatype.SimpleDataType('Int'),
}
var_decl = expected_start_func.body[0]
var_decl.rvalue_expr.binded_var_name = 'const_0'
return expected_output
expected_output = get_expected_output()
real_output = datatype.mark_out_datatypes(input_ast)
misc.assert_equal(self, expected_output, real_output)
def test_integer_var_decl_with_plus_integer(self):
int_data_type = datatype.SimpleDataType('Int')
std_ident_list = {
'plusInt': ast.FuncSignature(
return_type=datatype.SimpleDataType('Int'),
param_list=[
ast.Param(name='a', datatype=int_data_type),
ast.Param(name='b', datatype=int_data_type),
],
),
}
input_ast = ast.Module(
decl_list=[
ast.FuncDecl(
name='start',
signature=ast.FuncSignature(),
body=[
ast.VarDecl(
name='testVar',
expr=ast.FuncCall(
expr=ast.Ident('plusInt'),
arg_list=[
ast.Number(1),
ast.Number(2),
],
),
),
],
)
]
)
input_ast.ident_list = std_ident_list
def get_expected_output():
expected_output = ast.Module(
decl_list=[
ast.FuncDecl(
name='start',
signature=ast.FuncSignature(),
body=[
ast.VarDecl(
name='testVar',
expr=ast.FuncCall(
expr=ast.Ident('plusInt'),
arg_list=[
ast.Number(1),
ast.Number(2),
],
),
datatype=int_data_type,
),
],
),
]
)
expected_start_func = expected_output.decl_list[0]
expected_start_func.constants = {
'const_0': ast.Number(value=1),
'const_1': ast.Number(value=2),
}
expected_start_func.tmp_vars = {
'tmp_0': int_data_type,
}
expected_start_func.vars = {
'testVar': int_data_type,
}
var_decl = expected_start_func.body[0]
var_decl.rvalue_expr.binded_var_name = 'tmp_0'
arg_list = var_decl.rvalue_expr.arg_list
arg_list[0].binded_var_name = 'const_0'
arg_list[1].binded_var_name = 'const_1'
expected_output.ident_list = std_ident_list
return expected_output
expected_output = get_expected_output()
real_output = datatype.mark_out_datatypes(input_ast)
misc.assert_equal(self, expected_output, real_output)
def test_bad_func_error(self):
input_ast = ast.Module(
decl_list=[
ast.FuncDecl(
name='start',
signature=ast.FuncSignature(),
body=[
ast.VarDecl(
name='testVar',
expr=ast.FuncCall(
expr=ast.Ident('badFuncName'),
),
),
],
)
]
)
input_ast.ident_list = {}
self.assertRaisesRegexp(
Exception,
'no func: \'badFuncName\'',
datatype.mark_out_datatypes,
input_ast,
)
def test_bad_expr_type_error(self):
class BadExprClass(object):
pass
input_ast = ast.Module(
decl_list=[
ast.FuncDecl(
name='start',
signature=ast.FuncSignature(),
body=[
ast.VarDecl(
name='testVar',
expr=BadExprClass(),
),
],
)
]
)
self.assertRaisesRegexp(
Exception,
'Bad type:.*BadExprClass',
datatype.mark_out_datatypes,
input_ast,
)
def test_bad_stmt_type_error(self):
class BadStmtClass(object):
pass
input_ast = ast.Module(
decl_list=[
ast.FuncDecl(
name='start',
signature=ast.FuncSignature(),
body=[
BadStmtClass(),
],
)
]
)
self.assertRaisesRegexp(
Exception,
'Bad type:.*BadStmtClass',
datatype.mark_out_datatypes,
input_ast,
)
def test_bad_decl_type_error(self):
class BadDeclClass(object):
pass
input_ast = ast.Module(
decl_list=[
BadDeclClass(),
]
)
self.assertRaisesRegexp(
Exception,
'Bad type:.*BadDeclClass',
datatype.mark_out_datatypes,
input_ast,
)
# vim: set tabstop=4 shiftwidth=4 softtabstop=4 expandtab:
|
|
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
# Contains code derived from Python tarfile.extractall.
#
# Copyright (C) 2002 Lars Gustabel <lars@gustaebel.de>
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# Credits: Gustavo Niemeyer, Niels Gustabel, Richard Townsend.
from __future__ import print_function, absolute_import
import os
import copy
import logging
from collections import defaultdict
from contextlib import closing
# This import adds support for multistream BZ2 files
# This is a patched tarfile using a Python2 backport for bz2file from Python3
# Because of http://bugs.python.org/issue20781
from extractcode.tarfile_patch import tarfile
from commoncode.paths import resolve
from extractcode import ExtractError
logger = logging.getLogger('extractcode')
# logging.basicConfig(level=logging.DEBUG)
"""
Low level support for tar-based archive extraction using Python built-in tar
support.
"""
def list_entries(location):
"""
Yield entries from the archive file at location.
"""
raise NotImplementedError()
def extract(location, target_dir):
"""
Extract all files from the tar archive file at `location` in the
`target_dir`. Plain tars and tars compressed with gzip and bzip2 are
supported transparently. Other compressions such as xz or lzma are handled
in two steps. Return a list of warnings messages. Raise Exceptions on errors.
Skip special files. Contains code derived from Python tarfile.extractall.
Copyright (C) 2002 Lars Gustabel <lars@gustaebel.de>
All rights reserved.
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
Credits: Gustavo Niemeyer, Niels Gustabel, Richard Townsend.
"""
assert location
assert target_dir
warnings = defaultdict(list)
# track directories to fixup modification times at the end
directories = []
with closing(tarfile.open(location)) as tar:
tar.errorlevel = 1
names = set()
for tinfo in tar.getmembers():
is_special = not any((tinfo.isfile(), tinfo.isdir(), tinfo.islnk(), tinfo.issym()))
if is_special:
# FIXME: we should not report a warning?
warnings[tinfo.name].append('Skipping special file.')
continue
# hardlinks and symlinks are treated as regular files
if tinfo.islnk() or tinfo.issym():
if tinfo.issym():
# Always search the entire archive.
linkname = '/'.join(filter(None,
(os.path.dirname(tinfo.name), tinfo.linkname)))
limit = None
else:
# Search the archive before the link, because a hard link
# is just a reference to an already archived file.
linkname = tinfo.linkname
limit = tinfo
realfile = tar._getmember(linkname, tarinfo=limit, normalize=True)
if realfile is None:
warnings[tinfo.name].append('Skipping broken link to: %(linkname)r' % locals())
continue
if not (realfile.isfile() or realfile.isdir()):
warnings[tinfo.name].append('Skipping link to special file: %(linkname)r' % locals())
continue
if realfile.islnk() or realfile.issym():
# FIXME: Check tarbomb
warnings[tinfo.name].append('Skipping multi-level link to: %(linkname)r' % locals())
continue
# replace the tarinfo with the linked-to file info
# but keep the link name
lname = tinfo.name
tinfo = copy.copy(realfile)
tinfo.name = lname
# FIXME: we skip duplicates, this can happen and will fail if the
# location is read-only, we should instead rename duplicates
# using extractcode.new_name
if tinfo.name.lower() in names:
warnings[tinfo.name].append('Skipping duplicate file name.')
names.add(tinfo.name.lower())
tinfo = copy.copy(tinfo)
# ensure we do stay always under the target dir
tinfo.name = resolve(tinfo.name)
# Extract all files with a safe mode
# FIXME: use the current user mask
tinfo.mode = 0700
# keep a list of dirs to fix mtime once they are all created
if tinfo.isdir():
directories.append(tinfo)
try:
tar.extract(tinfo, target_dir)
except Exception, e:
# FIXME: we must keep the traceback for diagnostics
raise ExtractError()
# Set correct mtime on directories, starting from the bottom of the tree
def dir_sorter(a, b):
return cmp(a.name, b.name)
for tinfo in sorted(directories, cmp=dir_sorter, reverse=True):
dir_loc = os.path.join(target_dir, tinfo.name)
try:
# NOTE: this may not work at all on Windows
tar.utime(tinfo, dir_loc)
except Exception, e:
warnings[tinfo.name].append(str(e))
# collect warnings
warning_messages = []
for pathname, messages in warnings.items():
msg = pathname + ': ' + '\n'.join(messages).replace(target_dir, '.')
if msg not in warning_messages:
warning_messages.append(msg)
return warning_messages
|
|
from packaging.version import Version
import pickle
import random
import warnings
import mxnet as mx
import numpy as np
import pytest
from mxnet.gluon import Trainer
from mxnet.gluon.data import Dataset, DataLoader
from mxnet.gluon.nn import HybridSequential, Dense
import mlflow
import mlflow.gluon
from mlflow.tracking.client import MlflowClient
from mlflow.gluon._autolog import __MLflowGluonCallback
from mlflow.utils.autologging_utils import BatchMetricsLogger
from unittest.mock import patch
from tests.gluon.utils import is_mxnet_older_than_1_6_0, get_estimator
if Version(mx.__version__) >= Version("2.0.0"):
array_module = mx.np
else:
array_module = mx.nd
class LogsDataset(Dataset):
def __init__(self):
self.len = 1000
def __getitem__(self, idx):
return (
array_module.array(np.random.rand(1, 32)),
array_module.full(1, random.randint(0, 10), dtype="float32"),
)
def __len__(self):
return self.len
def get_train_prefix():
# training prefix was renamed to `training` in mxnet 1.6.0:
# https://github.com/apache/incubator-mxnet/pull/17048
return "train" if is_mxnet_older_than_1_6_0() else "training"
def get_gluon_random_data_run(log_models=True):
mlflow.gluon.autolog(log_models)
with mlflow.start_run() as run:
data = DataLoader(LogsDataset(), batch_size=128, last_batch="discard")
validation = DataLoader(LogsDataset(), batch_size=128, last_batch="discard")
model = HybridSequential()
model.add(Dense(64, activation="relu"))
model.add(Dense(64, activation="relu"))
model.add(Dense(10))
model.initialize()
model.hybridize()
trainer = Trainer(
model.collect_params(),
"adam",
optimizer_params={"learning_rate": 0.001, "epsilon": 1e-07},
)
est = get_estimator(model, trainer)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
est.fit(data, epochs=3, val_data=validation)
client = mlflow.tracking.MlflowClient()
return client.get_run(run.info.run_id)
@pytest.fixture
def gluon_random_data_run(log_models=True):
return get_gluon_random_data_run(log_models)
@pytest.mark.large
def test_gluon_autolog_logs_expected_data(gluon_random_data_run):
data = gluon_random_data_run.data
train_prefix = get_train_prefix()
assert "{} accuracy".format(train_prefix) in data.metrics
assert "validation accuracy" in data.metrics
# In mxnet >= 1.6.0, `Estimator` monitors `loss` only when `train_metrics` is specified.
#
# estimator.Estimator(loss=SomeLoss()) # monitors `loss`
# estimator.Estimator(loss=SomeLoss(), train_metrics=SomeMetric()) # doesn't monitor `loss`
if is_mxnet_older_than_1_6_0():
assert "{} softmaxcrossentropyloss".format(train_prefix) in data.metrics
assert "validation softmaxcrossentropyloss" in data.metrics
assert "optimizer_name" in data.params
assert data.params["optimizer_name"] == "Adam"
assert "epsilon" in data.params
assert data.params["epsilon"] == "1e-07"
@pytest.mark.large
def test_gluon_autolog_batch_metrics_logger_logs_expected_metrics():
patched_metrics_data = []
# Mock patching BatchMetricsLogger.record_metrics()
# to ensure that expected metrics are being logged.
original = BatchMetricsLogger.record_metrics
with patch(
"mlflow.utils.autologging_utils.BatchMetricsLogger.record_metrics", autospec=True
) as record_metrics_mock:
def record_metrics_side_effect(self, metrics, step=None):
patched_metrics_data.extend(metrics.items())
original(self, metrics, step)
record_metrics_mock.side_effect = record_metrics_side_effect
run = get_gluon_random_data_run()
patched_metrics_data = dict(patched_metrics_data)
original_metrics = run.data.metrics
for metric_name in original_metrics:
assert metric_name in patched_metrics_data
assert original_metrics[metric_name] == patched_metrics_data[metric_name]
train_prefix = get_train_prefix()
assert "{} accuracy".format(train_prefix) in original_metrics
assert "{} accuracy".format(train_prefix) in patched_metrics_data
@pytest.mark.large
def test_gluon_autolog_model_can_load_from_artifact(gluon_random_data_run):
client = mlflow.tracking.MlflowClient()
artifacts = client.list_artifacts(gluon_random_data_run.info.run_id)
artifacts = list(map(lambda x: x.path, artifacts))
assert "model" in artifacts
ctx = mx.cpu()
model = mlflow.gluon.load_model("runs:/" + gluon_random_data_run.info.run_id + "/model", ctx)
model(array_module.array(np.random.rand(1000, 1, 32)))
@pytest.mark.large
@pytest.mark.parametrize("log_models", [True, False])
def test_gluon_autolog_log_models_configuration(log_models):
random_data_run = get_gluon_random_data_run(log_models)
client = mlflow.tracking.MlflowClient()
artifacts = client.list_artifacts(random_data_run.info.run_id)
artifacts = list(map(lambda x: x.path, artifacts))
assert ("model" in artifacts) == log_models
@pytest.mark.large
def test_autolog_ends_auto_created_run():
mlflow.gluon.autolog()
data = DataLoader(LogsDataset(), batch_size=128, last_batch="discard")
model = HybridSequential()
model.add(Dense(64, activation="relu"))
model.add(Dense(64, activation="relu"))
model.add(Dense(10))
model.initialize()
model.hybridize()
trainer = Trainer(
model.collect_params(), "adam", optimizer_params={"learning_rate": 0.001, "epsilon": 1e-07}
)
est = get_estimator(model, trainer)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
est.fit(data, epochs=3)
assert mlflow.active_run() is None
@pytest.mark.large
def test_autolog_persists_manually_created_run():
mlflow.gluon.autolog()
data = DataLoader(LogsDataset(), batch_size=128, last_batch="discard")
with mlflow.start_run() as run:
model = HybridSequential()
model.add(Dense(64, activation="relu"))
model.add(Dense(64, activation="relu"))
model.add(Dense(10))
model.initialize()
model.hybridize()
trainer = Trainer(
model.collect_params(),
"adam",
optimizer_params={"learning_rate": 0.001, "epsilon": 1e-07},
)
est = get_estimator(model, trainer)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
est.fit(data, epochs=3)
assert mlflow.active_run().info.run_id == run.info.run_id
def test_callback_is_callable():
cb = __MLflowGluonCallback(log_models=True, metrics_logger=BatchMetricsLogger(run_id="1234"))
pickle.dumps(cb)
@pytest.mark.large
def test_autolog_registering_model():
registered_model_name = "test_autolog_registered_model"
mlflow.gluon.autolog(registered_model_name=registered_model_name)
data = DataLoader(LogsDataset(), batch_size=128, last_batch="discard")
model = HybridSequential()
model.add(Dense(64, activation="relu"))
model.add(Dense(10))
model.initialize()
model.hybridize()
trainer = Trainer(
model.collect_params(), "adam", optimizer_params={"learning_rate": 0.001, "epsilon": 1e-07}
)
est = get_estimator(model, trainer)
with mlflow.start_run(), warnings.catch_warnings():
warnings.simplefilter("ignore")
est.fit(data, epochs=3)
registered_model = MlflowClient().get_registered_model(registered_model_name)
assert registered_model.name == registered_model_name
|
|
from __future__ import absolute_import
from contextlib import contextmanager
import os
import sys
import re
import textwrap
import site
import scripttest
import virtualenv
from tests.lib.path import Path, curdir, u
DATA_DIR = Path(__file__).folder.folder.join("data").abspath
SRC_DIR = Path(__file__).abspath.folder.folder.folder
pyversion = sys.version[:3]
pyversion_tuple = sys.version_info
def path_to_url(path):
"""
Convert a path to URI. The path will be made absolute and
will not have quoted path parts.
(adapted from pip.util)
"""
path = os.path.normpath(os.path.abspath(path))
drive, path = os.path.splitdrive(path)
filepath = path.split(os.path.sep)
url = '/'.join(filepath)
if drive:
return 'file:///' + drive + url
return 'file://' + url
class TestData(object):
"""
Represents a bundle of pre-created test data.
This copies a pristine set of test data into a root location that is
designed to be test specific. The reason for this is when running the tests
concurrently errors can be generated because the related tooling uses
the directory as a work space. This leads to two concurrent processes
trampling over each other. This class gets around that by copying all
data into a directory and operating on the copied data.
"""
def __init__(self, root, source=None):
self.source = source or DATA_DIR
self.root = Path(root).abspath
@classmethod
def copy(cls, root):
obj = cls(root)
obj.reset()
return obj
def reset(self):
self.root.rmtree()
self.source.copytree(self.root)
@property
def packages(self):
return self.root.join("packages")
@property
def packages2(self):
return self.root.join("packages2")
@property
def packages3(self):
return self.root.join("packages3")
@property
def src(self):
return self.root.join("src")
@property
def indexes(self):
return self.root.join("indexes")
@property
def reqfiles(self):
return self.root.join("reqfiles")
@property
def find_links(self):
return path_to_url(self.packages)
@property
def find_links2(self):
return path_to_url(self.packages2)
@property
def find_links3(self):
return path_to_url(self.packages3)
def index_url(self, index="simple"):
return path_to_url(self.root.join("indexes", index))
class TestFailure(AssertionError):
"""
An "assertion" failed during testing.
"""
pass
class TestPipResult(object):
def __init__(self, impl, verbose=False):
self._impl = impl
if verbose:
print(self.stdout)
if self.stderr:
print('======= stderr ========')
print(self.stderr)
print('=======================')
def __getattr__(self, attr):
return getattr(self._impl, attr)
if sys.platform == 'win32':
@property
def stdout(self):
return self._impl.stdout.replace('\r\n', '\n')
@property
def stderr(self):
return self._impl.stderr.replace('\r\n', '\n')
def __str__(self):
return str(self._impl).replace('\r\n', '\n')
else:
# Python doesn't automatically forward __str__ through __getattr__
def __str__(self):
return str(self._impl)
def assert_installed(self, pkg_name, editable=True, with_files=[],
without_files=[], without_egg_link=False,
use_user_site=False, sub_dir=False):
e = self.test_env
if editable:
pkg_dir = e.venv / 'src' / pkg_name.lower()
# If package was installed in a sub directory
if sub_dir:
pkg_dir = pkg_dir / sub_dir
else:
without_egg_link = True
pkg_dir = e.site_packages / pkg_name
if use_user_site:
egg_link_path = e.user_site / pkg_name + '.egg-link'
else:
egg_link_path = e.site_packages / pkg_name + '.egg-link'
if without_egg_link:
if egg_link_path in self.files_created:
raise TestFailure(
'unexpected egg link file created: %r\n%s' %
(egg_link_path, self)
)
else:
if egg_link_path not in self.files_created:
raise TestFailure(
'expected egg link file missing: %r\n%s' %
(egg_link_path, self)
)
egg_link_file = self.files_created[egg_link_path]
# FIXME: I don't understand why there's a trailing . here
if not (egg_link_file.bytes.endswith('\n.') and
egg_link_file.bytes[:-2].endswith(pkg_dir)):
raise TestFailure(textwrap.dedent(u('''\
Incorrect egg_link file %r
Expected ending: %r
------- Actual contents -------
%s
-------------------------------''' % (
egg_link_file,
pkg_dir + '\n.',
repr(egg_link_file.bytes))
)))
if use_user_site:
pth_file = e.user_site / 'easy-install.pth'
else:
pth_file = e.site_packages / 'easy-install.pth'
if (pth_file in self.files_updated) == without_egg_link:
raise TestFailure('%r unexpectedly %supdated by install' % (
pth_file, (not without_egg_link and 'not ' or '')))
if (pkg_dir in self.files_created) == (curdir in without_files):
raise TestFailure(textwrap.dedent('''\
expected package directory %r %sto be created
actually created:
%s
''') % (
pkg_dir,
(curdir in without_files and 'not ' or ''),
sorted(self.files_created.keys())))
for f in with_files:
if not (pkg_dir / f).normpath in self.files_created:
raise TestFailure(
'Package directory %r missing expected content %r' %
(pkg_dir, f)
)
for f in without_files:
if (pkg_dir / f).normpath in self.files_created:
raise TestFailure(
'Package directory %r has unexpected content %f' %
(pkg_dir, f)
)
class PipTestEnvironment(scripttest.TestFileEnvironment):
"""
A specialized TestFileEnvironment for testing pip
"""
#
# Attribute naming convention
# ---------------------------
#
# Instances of this class have many attributes representing paths
# in the filesystem. To keep things straight, absolute paths have
# a name of the form xxxx_path and relative paths have a name that
# does not end in '_path'.
exe = sys.platform == 'win32' and '.exe' or ''
verbose = False
def __init__(self, base_path, *args, **kwargs):
# Make our base_path a test.lib.path.Path object
base_path = Path(base_path)
# Store paths related to the virtual environment
_virtualenv = kwargs.pop("virtualenv")
path_locations = virtualenv.path_locations(_virtualenv)
# Make sure we have test.lib.path.Path objects
venv, lib, include, bin = map(Path, path_locations)
# workaround for https://github.com/pypa/virtualenv/issues/306
if hasattr(sys, "pypy_version_info"):
lib = os.path.join(venv, 'lib-python', pyversion)
self.venv_path = venv
self.lib_path = lib
self.include_path = include
self.bin_path = bin
if hasattr(sys, "pypy_version_info"):
self.site_packages_path = self.venv_path.join("site-packages")
else:
self.site_packages_path = self.lib_path.join("site-packages")
self.user_base_path = self.venv_path.join("user")
self.user_bin_path = self.user_base_path.join(
self.bin_path - self.venv_path
)
self.user_site_path = self.venv_path.join(
"user",
site.USER_SITE[len(site.USER_BASE) + 1:],
)
# Create a Directory to use as a scratch pad
self.scratch_path = base_path.join("scratch").mkdir()
# Set our default working directory
kwargs.setdefault("cwd", self.scratch_path)
# Setup our environment
environ = kwargs.get("environ")
if environ is None:
environ = os.environ.copy()
environ["PATH"] = Path.pathsep.join(
[self.bin_path] + [environ.get("PATH", [])],
)
environ["PYTHONUSERBASE"] = self.user_base_path
# Writing bytecode can mess up updated file detection
environ["PYTHONDONTWRITEBYTECODE"] = "1"
kwargs["environ"] = environ
# Call the TestFileEnvironment __init__
super(PipTestEnvironment, self).__init__(base_path, *args, **kwargs)
# Expand our absolute path directories into relative
for name in ["base", "venv", "lib", "include", "bin", "site_packages",
"user_base", "user_site", "user_bin", "scratch"]:
real_name = "%s_path" % name
setattr(self, name, getattr(self, real_name) - self.base_path)
# Make sure temp_path is a Path object
self.temp_path = Path(self.temp_path)
# Ensure the tmp dir exists, things break horribly if it doesn't
self.temp_path.mkdir()
# create easy-install.pth in user_site, so we always have it updated
# instead of created
self.user_site_path.makedirs()
self.user_site_path.join("easy-install.pth").touch()
def _ignore_file(self, fn):
if fn.endswith('__pycache__') or fn.endswith(".pyc"):
result = True
else:
result = super(PipTestEnvironment, self)._ignore_file(fn)
return result
def run(self, *args, **kw):
if self.verbose:
print('>> running %s %s' % (args, kw))
cwd = kw.pop('cwd', None)
run_from = kw.pop('run_from', None)
assert not cwd or not run_from, "Don't use run_from; it's going away"
cwd = cwd or run_from or self.cwd
return TestPipResult(
super(PipTestEnvironment, self).run(cwd=cwd, *args, **kw),
verbose=self.verbose,
)
def pip(self, *args, **kwargs):
# On old versions of Python, urllib3/requests will raise a warning
# about the lack of an SSLContext. Expect it when running commands
# that will touch the outside world.
if (pyversion_tuple < (2, 7, 9) and
args and args[0] in ('search', 'install', 'download')):
kwargs['expect_stderr'] = True
return self.run("pip", *args, **kwargs)
def pip_install_local(self, *args, **kwargs):
return self.pip(
"install", "--no-index",
"--find-links", path_to_url(os.path.join(DATA_DIR, "packages")),
*args, **kwargs
)
# FIXME ScriptTest does something similar, but only within a single
# ProcResult; this generalizes it so states can be compared across
# multiple commands. Maybe should be rolled into ScriptTest?
def diff_states(start, end, ignore=None):
"""
Differences two "filesystem states" as represented by dictionaries
of FoundFile and FoundDir objects.
Returns a dictionary with following keys:
``deleted``
Dictionary of files/directories found only in the start state.
``created``
Dictionary of files/directories found only in the end state.
``updated``
Dictionary of files whose size has changed (FIXME not entirely
reliable, but comparing contents is not possible because
FoundFile.bytes is lazy, and comparing mtime doesn't help if
we want to know if a file has been returned to its earlier
state).
Ignores mtime and other file attributes; only presence/absence and
size are considered.
"""
ignore = ignore or []
def prefix_match(path, prefix):
if path == prefix:
return True
prefix = prefix.rstrip(os.path.sep) + os.path.sep
return path.startswith(prefix)
start_keys = set([k for k in start.keys()
if not any([prefix_match(k, i) for i in ignore])])
end_keys = set([k for k in end.keys()
if not any([prefix_match(k, i) for i in ignore])])
deleted = dict([(k, start[k]) for k in start_keys.difference(end_keys)])
created = dict([(k, end[k]) for k in end_keys.difference(start_keys)])
updated = {}
for k in start_keys.intersection(end_keys):
if (start[k].size != end[k].size):
updated[k] = end[k]
return dict(deleted=deleted, created=created, updated=updated)
def assert_all_changes(start_state, end_state, expected_changes):
"""
Fails if anything changed that isn't listed in the
expected_changes.
start_state is either a dict mapping paths to
scripttest.[FoundFile|FoundDir] objects or a TestPipResult whose
files_before we'll test. end_state is either a similar dict or a
TestPipResult whose files_after we'll test.
Note: listing a directory means anything below
that directory can be expected to have changed.
"""
__tracebackhide__ = True
start_files = start_state
end_files = end_state
if isinstance(start_state, TestPipResult):
start_files = start_state.files_before
if isinstance(end_state, TestPipResult):
end_files = end_state.files_after
diff = diff_states(start_files, end_files, ignore=expected_changes)
if list(diff.values()) != [{}, {}, {}]:
raise TestFailure('Unexpected changes:\n' + '\n'.join(
[k + ': ' + ', '.join(v.keys()) for k, v in diff.items()]))
# Don't throw away this potentially useful information
return diff
def _create_test_package_with_subdirectory(script, subdirectory):
script.scratch_path.join("version_pkg").mkdir()
version_pkg_path = script.scratch_path / 'version_pkg'
version_pkg_path.join("version_pkg.py").write(textwrap.dedent("""
def main():
print('0.1')
"""))
version_pkg_path.join("setup.py").write(
textwrap.dedent("""
from setuptools import setup, find_packages
setup(name='version_pkg',
version='0.1',
packages=find_packages(),
py_modules=['version_pkg'],
entry_points=dict(console_scripts=['version_pkg=version_pkg:main']))
"""))
subdirectory_path = version_pkg_path.join(subdirectory)
subdirectory_path.mkdir()
subdirectory_path.join('version_subpkg.py').write(textwrap.dedent("""
def main():
print('0.1')
"""))
subdirectory_path.join('setup.py').write(
textwrap.dedent("""
from setuptools import setup, find_packages
setup(name='version_subpkg',
version='0.1',
packages=find_packages(),
py_modules=['version_subpkg'],
entry_points=dict(console_scripts=['version_pkg=version_subpkg:main']))
"""))
script.run('git', 'init', cwd=version_pkg_path)
script.run('git', 'add', '.', cwd=version_pkg_path)
script.run(
'git', 'commit', '-q',
'--author', 'pip <pypa-dev@googlegroups.com>',
'-am', 'initial version', cwd=version_pkg_path
)
return version_pkg_path
def _create_test_package(script, name='version_pkg', vcs='git'):
script.scratch_path.join(name).mkdir()
version_pkg_path = script.scratch_path / name
version_pkg_path.join("%s.py" % name).write(textwrap.dedent("""
def main():
print('0.1')
"""))
version_pkg_path.join("setup.py").write(textwrap.dedent("""
from setuptools import setup, find_packages
setup(
name='{name}',
version='0.1',
packages=find_packages(),
py_modules=['{name}'],
entry_points=dict(console_scripts=['{name}={name}:main'])
)
""".format(name=name)))
if vcs == 'git':
script.run('git', 'init', cwd=version_pkg_path)
script.run('git', 'add', '.', cwd=version_pkg_path)
script.run(
'git', 'commit', '-q',
'--author', 'pip <pypa-dev@googlegroups.com>',
'-am', 'initial version', cwd=version_pkg_path,
)
elif vcs == 'hg':
script.run('hg', 'init', cwd=version_pkg_path)
script.run('hg', 'add', '.', cwd=version_pkg_path)
script.run(
'hg', 'commit', '-q',
'--user', 'pip <pypa-dev@googlegroups.com>',
'-m', 'initial version', cwd=version_pkg_path,
)
elif vcs == 'svn':
repo_url = _create_svn_repo(script, version_pkg_path)
script.run(
'svn', 'checkout', repo_url, 'pip-test-package',
cwd=script.scratch_path
)
checkout_path = script.scratch_path / 'pip-test-package'
# svn internally stores windows drives as uppercase; we'll match that.
checkout_path = checkout_path.replace('c:', 'C:')
version_pkg_path = checkout_path
elif vcs == 'bazaar':
script.run('bzr', 'init', cwd=version_pkg_path)
script.run('bzr', 'add', '.', cwd=version_pkg_path)
script.run(
'bzr', 'whoami', 'pip <pypa-dev@googlegroups.com>',
cwd=version_pkg_path)
script.run(
'bzr', 'commit', '-q',
'--author', 'pip <pypa-dev@googlegroups.com>',
'-m', 'initial version', cwd=version_pkg_path,
)
else:
raise ValueError('Unknown vcs: %r' % vcs)
return version_pkg_path
def _create_svn_repo(script, version_pkg_path):
repo_url = path_to_url(
script.scratch_path / 'pip-test-package-repo' / 'trunk')
script.run(
'svnadmin', 'create', 'pip-test-package-repo',
cwd=script.scratch_path
)
script.run(
'svn', 'import', version_pkg_path, repo_url,
'-m', 'Initial import of pip-test-package',
cwd=script.scratch_path
)
return repo_url
def _change_test_package_version(script, version_pkg_path):
version_pkg_path.join("version_pkg.py").write(textwrap.dedent('''\
def main():
print("some different version")'''))
script.run(
'git', 'clean', '-qfdx',
cwd=version_pkg_path,
expect_stderr=True,
)
script.run(
'git', 'commit', '-q',
'--author', 'pip <pypa-dev@googlegroups.com>',
'-am', 'messed version',
cwd=version_pkg_path,
expect_stderr=True,
)
def assert_raises_regexp(exception, reg, run, *args, **kwargs):
"""Like assertRaisesRegexp in unittest"""
__tracebackhide__ = True
try:
run(*args, **kwargs)
assert False, "%s should have been thrown" % exception
except exception:
e = sys.exc_info()[1]
p = re.compile(reg)
assert p.search(str(e)), str(e)
@contextmanager
def requirements_file(contents, tmpdir):
"""Return a Path to a requirements file of given contents.
As long as the context manager is open, the requirements file will exist.
:param tmpdir: A Path to the folder in which to create the file
"""
path = tmpdir / 'reqs.txt'
path.write(contents)
yield path
path.remove()
|
|
from lpoly import LPoly, Poly
from sympy.polys.monomialtools import (
monomial_mul,
monomial_div,
monomial_lcm,
monomial_lex_key as O_lex,
monomial_grlex_key as O_grlex,
monomial_grevlex_key as O_grevlex,
)
from sympy.utilities import any, all
def S_poly(tp1,tp2):
"""expv1,p1 = tp1 with expv1 = p1.leading_expv(), p1 monic;
similarly for tp2.
Compute LCM(LM(p1),LM(p2))/LM(p1)*p1 - LCM(LM(p1),LM(p2))/LM(p2)*p2
Throw LPolyOverflowError if bits_exp is too small for the result.
"""
expv1,p1 = tp1
expv2,p2 = tp2
lp = p1.lp
lcm12 = monomial_lcm(expv1,expv2)
m1 = monomial_div(lcm12,expv1)
m2 = monomial_div(lcm12,expv2)
# TODO oprimize
res = Poly(lp)
res.iadd_m_mul_q(p1,(m1,1))
res.iadd_m_mul_q(p2,(m2,-1))
return res
def groebner_basis(f, verbose=0):
"""An improved version of Buchberger's algorithm as presented in
T. Becker, V.Weispfenning 'Groebner basis' (1993) Springer, page 232;
see also buchberger_improved in toy_buchberger.py in Sage
input: f sequence of polynomial
output: Groebner basis of the ideal generated by f
"""
order = f[0].lp.order
def select(P):
# select the pair with minimum LCM
pr = min(P, key = lambda(i,j): order(lcm_expv(f[i][0],f[j][0])))
return pr
def normal(g, H):
"""
compute the rest h of the division of g wrt the functions in H;
if the rest is zero return None
else if h is not in f add it to f; return its (expv,p)
"""
h = g.mod1([f[i] for i in H])
# FIXME
if not h or h == zero:
return None
else:
hk = tuple(h.keys())
# add h to SP, return (expv,pi)
if not hk in fd:
fd[hk] = len(f)
hexpv = h.leading_expv()
f.append((hexpv,h/h[hexpv]))
return hexpv, fd[hk]
return f[fd[hk]][0], fd[hk]
def update(G,CP,h):
"""update G using the set of critical pairs CP and h = (expv,pi)
see [BW] page 230
"""
hexpv, hp = f[h]
#print 'DB10',hp
# filter new pairs (h,g), g in G
C = G.copy()
D = set()
while C:
# select a pair (h,g) by popping an element from C
g = C.pop()
gexpv = f[g][0]
LCMhg = lcm_expv(hexpv, gexpv)
def lcm_divides(p):
expv = lcm_expv(hexpv, f[p][0])
# LCM(LM(h), LM(p)) divides LCM(LM(h),LM(g))
return monomial_div(LCMhg,expv)
# HT(h) and HT(g) disjoint: hexpv + gexpv == LCMhg
if monomial_mul(hexpv,gexpv) == LCMhg or (\
not any( lcm_divides(f) for f in C ) and \
not any( lcm_divides(pr[1]) for pr in D )):
D.add((h,g))
E = set()
while D:
# select h,g from D
h,g = D.pop()
gexpv = f[g][0]
LCMhg = lcm_expv(hexpv, gexpv)
if not monomial_mul(hexpv,gexpv) == LCMhg:
E.add((h,g))
# filter old pairs
B_new = set()
while CP:
# select g1,g2 from CP
g1,g2 = CP.pop()
g1expv = f[g1][0]
g2expv = f[g2][0]
LCM12 = lcm_expv(g1expv,g2expv)
# if HT(h) does not divide lcm(HT(g1),HT(g2))
if not monomial_div(LCM12, hexpv) or \
lcm_expv(g1expv,hexpv) == LCM12 or \
lcm_expv(g2expv,hexpv) == LCM12:
B_new.add((g1,g2))
B_new |= E
# filter polynomials
G_new = set()
while G:
g = G.pop()
if not monomial_div(f[g][0], hexpv):
G_new.add(g)
G_new.add(h)
return G_new,B_new
# end of update ################################
if not f:
return None
lp = f[0].lp
zero = Poly(lp)
# lcm_expv(expv1,expv2) computes the expv for the lcm
# of the monomials with expv1,expv2; the results are cached
lcm_expv0 = monomial_lcm
d_lcm_expv = {}
def lcm_expv(expv1,expv2):
if not (expv1,expv2) in d_lcm_expv:
d_lcm_expv[(expv1,expv2)] = lcm_expv0(expv1,expv2)
return d_lcm_expv[(expv1,expv2)]
# replace f with a list of (p.leading_expv(),p), where p is monic
# and all polynomials have different sets of monomials.
# In this way, p is identified by pk = tuple(p.keys())
# p is not hashable, so that one cannot use a built-in set of (expv,p)
# To implement a set of polynomials SP use a dictionary fd
# add p to SP:
# f.append((expv,p)); fd[pk] = len(f)
# ip is the number associated to p
# expv,p = f[ip]
# reduce the list of initial polynomials; see [BW] page 203
#print 'DB0',f
f1 = f[:]
while 1:
f = f1[:]
f1 = []
for i in range(len(f)):
p = f[i]
_, r = p.division(f[:i])
if r != 0:
f1.append(r)
# when f does not change anymore, there are not two elements with
# same LT, so the elements of f are guaranteed to have all
# different sets of monomials
if f == f1:
break
#print 'DB1',f
# convert f in a list of pairs (expv,p) where expv is the encoded
# tuple of exponents of the LT of p and p is a monic polynomial
f1 = []
for h in f:
if h:
expv = h.leading_expv()
f1.append((expv,h/h[expv]))
f = f1
# order according to the monomial ordering the initial polynomials
# f[i] < f[j] if i > j
order = f[0][1].lp.order
f.sort(key=lambda t: order(t[0]), reverse=True)
#print 'DB2',[t[1] for t in f]
# f list of pairs (expv,p)
fd = {} # ip = fd[tuple(p.keys())]; (expv,p) = f[ip]
F = set() # set of indices of polynomials
G = set() # set of indices of intermediate would-be Groebner basis
CP = set() # set of pairs of indices of critical pairs
for i, h in enumerate(f):
fd[tuple(h[1].keys())] = i
F.add(i)
#####################################
# algorithm GROEBNERNEWS2 in [BW] page 232
while F:
# select p with minimum expv
m = min([f[x] for x in F],key=lambda f: order(f[0]))[1]
h = fd[tuple(m.keys())]
F.remove(h)
#print 'DB3 CP=',CP
#print 'DB3 G', G
G,CP = update(G,CP,h)
# count the number of critical pairs which reduce to zero
reductions_to_zero = 0
while CP:
g1,g2 = select(CP)
CP.remove((g1,g2))
h = S_poly(f[g1],f[g2])
# normal(h,G) appends h to f if h
h = normal(h,G)
if h:
G, CP = update(G,CP,h[1])
else:
reductions_to_zero += 1
######################################
# now G is a Groebner basis; reduce it
Gr = set()
for g in G:
h = normal(f[g][1], G - set([g]))
if h:
Gr.add(h[1])
# replace ip with (expv,p)
Gr = [f[g] for g in Gr]
# order according to the monomial ordering
Gr.sort(reverse=True)
# replace (expv,p) with p
Gr = [ x[1] for x in Gr]
if verbose:
print 'reductions_to_zero=',reductions_to_zero
return Gr
|
|
import copy
import re
import django
from django import forms
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.forms.utils import flatatt
from django.forms.widgets import FileInput
from django.template.loader import render_to_string
from django.utils import formats, six
from django.utils.encoding import force_text
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.six.moves import filter, map
import warnings
class ImageInput(FileInput):
"""
Widget providing a input element for file uploads based on the
Django ``FileInput`` element. It hides the actual browser-specific
input element and shows the available image for images that have
been previously uploaded. Selecting the image will open the file
dialog and allow for selecting a new or replacing image file.
"""
template_name = 'partials/image_input_widget.html'
attrs = {'accept': 'image/*'}
def render(self, name, value, attrs=None, renderer=None):
"""
Render the ``input`` field based on the defined ``template_name``. The
image URL is take from *value* and is provided to the template as
``image_url`` context variable relative to ``MEDIA_URL``. Further
attributes for the ``input`` element are provide in ``input_attrs`` and
contain parameters specified in *attrs* and *name*.
If *value* contains no valid image URL an empty string will be provided
in the context.
"""
extra_attrs = {
'type': self.input_type,
'name': name,
}
if django.VERSION < (1, 11):
final_attrs = self.build_attrs(attrs, **extra_attrs)
else:
final_attrs = self.build_attrs(attrs, extra_attrs=extra_attrs)
if not value or isinstance(value, InMemoryUploadedFile):
# can't display images that aren't stored
image_url = ''
else:
image_url = final_attrs['value'] = value
return render_to_string(self.template_name, {
'input_attrs': flatatt(final_attrs),
'image_url': image_url,
'image_id': "%s-image" % final_attrs['id'],
})
class WYSIWYGTextArea(forms.Textarea):
def __init__(self, *args, **kwargs):
kwargs.setdefault('attrs', {})
kwargs['attrs'].setdefault('class', '')
kwargs['attrs']['class'] += ' wysiwyg'
super(WYSIWYGTextArea, self).__init__(*args, **kwargs)
def datetime_format_to_js_date_format(format):
"""
Convert a Python datetime format to a date format suitable for use with
the JS date picker we use.
"""
format = format.split()[0]
return datetime_format_to_js_datetime_format(format)
def datetime_format_to_js_time_format(format):
"""
Convert a Python datetime format to a time format suitable for use with the
JS time picker we use.
"""
try:
format = format.split()[1]
except IndexError:
pass
converted = format
replacements = {
'%H': 'hh',
'%I': 'HH',
'%M': 'ii',
'%S': 'ss',
}
for search, replace in replacements.items():
converted = converted.replace(search, replace)
return converted.strip()
def datetime_format_to_js_datetime_format(format):
"""
Convert a Python datetime format to a time format suitable for use with
the datetime picker we use, http://www.malot.fr/bootstrap-datetimepicker/.
"""
converted = format
replacements = {
'%Y': 'yyyy',
'%y': 'yy',
'%m': 'mm',
'%d': 'dd',
'%H': 'hh',
'%I': 'HH',
'%M': 'ii',
'%S': 'ss',
}
for search, replace in replacements.items():
converted = converted.replace(search, replace)
return converted.strip()
def datetime_format_to_js_input_mask(format):
# taken from
# http://stackoverflow.com/questions/15175142/how-can-i-do-multiple-substitutions-using-regex-in-python # noqa
def multiple_replace(dict, text):
# Create a regular expression from the dictionary keys
regex = re.compile("(%s)" % "|".join(map(re.escape, dict.keys())))
# For each match, look-up corresponding value in dictionary
return regex.sub(lambda mo: dict[mo.string[mo.start():mo.end()]], text)
replacements = {
'%Y': 'y',
'%y': '99',
'%m': 'm',
'%d': 'd',
'%H': 'h',
'%I': 'h',
'%M': 's',
'%S': 's',
}
return multiple_replace(replacements, format).strip()
class DateTimeWidgetMixin(object):
def get_format(self):
return self.format or formats.get_format(self.format_key)[0]
def gett_attrs(self, attrs, format):
if not attrs:
attrs = {}
attrs['data-inputmask'] = u"'mask': '{mask}'".format(
mask=datetime_format_to_js_input_mask(format))
return attrs
class TimePickerInput(DateTimeWidgetMixin, forms.TimeInput):
"""
A widget that passes the date format to the JS date picker in a data
attribute.
"""
format_key = 'TIME_INPUT_FORMATS'
def render(self, name, value, attrs=None, renderer=None):
format = self.get_format()
input = super(TimePickerInput, self).render(
name, value, self.gett_attrs(attrs, format))
attrs = {
'data-oscarWidget': 'time',
'data-timeFormat': datetime_format_to_js_time_format(format),
}
div = format_html(u'<div class="input-group date"{}>', flatatt(attrs))
return mark_safe(u'<div class="form-inline">'
u' {div}'
u' {input}'
u' <span class="input-group-addon">'
u' <i class="icon-time glyphicon-time"></i>'
u' </span>'
u' </div>'
u'</div>'
.format(div=div, input=input))
class DatePickerInput(DateTimeWidgetMixin, forms.DateInput):
"""
A widget that passes the date format to the JS date picker in a data
attribute.
"""
format_key = 'DATE_INPUT_FORMATS'
def render(self, name, value, attrs=None, renderer=None):
format = self.get_format()
input = super(DatePickerInput, self).render(
name, value, self.gett_attrs(attrs, format))
attrs = {
'data-oscarWidget': 'date',
'data-dateFormat': datetime_format_to_js_date_format(format),
}
div = format_html(u'<div class="input-group date"{}>', flatatt(attrs))
return mark_safe(u'<div class="form-inline">'
u' {div}'
u' {input}'
u' <span class="input-group-addon">'
u' <i class="icon-calendar glyphicon-calendar"></i>'
u' </span>'
u' </div>'
u'</div>'
.format(div=div, input=input))
class DateTimePickerInput(DateTimeWidgetMixin, forms.DateTimeInput):
"""
A widget that passes the datetime format to the JS datetime picker in a
data attribute.
It also removes seconds by default. However this only works with widgets
without localize=True.
For localized widgets refer to
https://docs.djangoproject.com/en/1.6/topics/i18n/formatting/#creating-custom-format-files # noqa
instead to override the format.
"""
format_key = 'DATETIME_INPUT_FORMATS'
def __init__(self, *args, **kwargs):
include_seconds = kwargs.pop('include_seconds', False)
super(DateTimePickerInput, self).__init__(*args, **kwargs)
if not include_seconds and self.format:
self.format = re.sub(':?%S', '', self.format)
def render(self, name, value, attrs=None, renderer=None):
format = self.get_format()
input = super(DateTimePickerInput, self).render(
name, value, self.gett_attrs(attrs, format))
attrs = {
'data-oscarWidget': 'datetime',
'data-datetimeFormat': datetime_format_to_js_datetime_format(format),
}
div = format_html(u'<div class="input-group date"{}>', flatatt(attrs))
return mark_safe(u'<div class="form-inline">'
u' {div}'
u' {input}'
u' <span class="input-group-addon">'
u' <i class="icon-calendar glyphicon-calendar"></i>'
u' </span>'
u' </div>'
u'</div>'
.format(div=div, input=input))
class AdvancedSelect(forms.Select):
"""
Customised Select widget that allows a list of disabled values to be passed
to the constructor. Django's default Select widget doesn't allow this so
we have to override the render_option method and add a section that checks
for whether the widget is disabled.
"""
def __init__(self, attrs=None, choices=(), **kwargs):
if 'disabled_values' in kwargs:
message = "Passing disabled_values as kwarg to AdvancedSelect is deprecated " \
"and will be removed in the next major version of django-oscar"
warnings.warn(message, DeprecationWarning, stacklevel=2)
disabled_values = kwargs.pop('disabled_values', ())
self.disabled_values = set(force_text(v) for v in disabled_values)
super(AdvancedSelect, self).__init__(attrs, choices, **kwargs)
def render_option(self, selected_choices, option_value, option_label):
# TODO remove this when Django 1.8 support is dropped
option_value = force_text(option_value)
# In the next version, remove checking the option_value against self.disabled_values
# and just rely on looking at the disabled attribute
option_attrs = getattr(option_label, 'attrs', None) or {}
# Also check if the object just has a diabled property, a shortcut for disabling the option
if getattr(option_label, 'disabled', False) or option_value in self.disabled_values:
option_attrs['disabled'] = 'disabled'
if option_value in selected_choices:
selected_html = mark_safe(' selected="selected"')
if not self.allow_multiple_selected:
# Only allow for a single selection.
selected_choices.remove(option_value)
else:
selected_html = ''
return format_html(u'<option value="{0}"{1}{2}>{3}</option>',
option_value,
selected_html,
flatatt(option_attrs),
force_text(option_label))
def create_option(self, name, value, label, selected, index, subindex=None, attrs=None):
option = super(AdvancedSelect, self).create_option(name, value, label, selected, index, subindex, attrs)
if force_text(value) in self.disabled_values:
option['attrs']['disabled'] = True
return option
class RemoteSelect(forms.Widget):
"""
Somewhat reusable widget that allows AJAX lookups in combination with
select2.
Requires setting the URL of a lookup view either as class attribute or when
constructing
"""
is_multiple = False
lookup_url = None
template_name = None
def __init__(self, *args, **kwargs):
if 'lookup_url' in kwargs:
self.lookup_url = kwargs.pop('lookup_url')
if self.lookup_url is None:
raise ValueError(
"RemoteSelect requires a lookup ULR")
super(RemoteSelect, self).__init__(*args, **kwargs)
def format_value(self, value):
return six.text_type(value or '')
def value_from_datadict(self, data, files, name):
value = data.get(name, None)
if value is None:
return value
else:
return six.text_type(value)
def render(self, name, value, attrs=None, renderer=None):
attrs = {} if attrs is None else copy.copy(attrs)
attrs.update({
'type': 'hidden',
'name': name,
'data-ajax-url': self.lookup_url,
'data-multiple': 'multiple' if self.is_multiple else '',
'value': self.format_value(value),
'data-required': 'required' if self.is_required else '',
})
return mark_safe(u'<input %s>' % flatatt(attrs))
class MultipleRemoteSelect(RemoteSelect):
is_multiple = True
def format_value(self, value):
if value:
return ','.join(map(six.text_type, filter(bool, value)))
else:
return ''
def value_from_datadict(self, data, files, name):
value = data.get(name, None)
if value is None:
return []
else:
return list(filter(bool, value.split(',')))
|
|
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.krmapihosting import krm_api_host_pb2
from google3.cloud.graphite.mmv2.services.google.krmapihosting import (
krm_api_host_pb2_grpc,
)
from typing import List
class KrmApiHost(object):
def __init__(
self,
name: str = None,
labels: dict = None,
bundles_config: dict = None,
use_private_endpoint: bool = None,
gke_resource_link: str = None,
state: str = None,
management_config: dict = None,
project: str = None,
location: str = None,
service_account_file: str = "",
):
channel.initialize()
self.name = name
self.labels = labels
self.bundles_config = bundles_config
self.use_private_endpoint = use_private_endpoint
self.management_config = management_config
self.project = project
self.location = location
self.service_account_file = service_account_file
def apply(self):
stub = krm_api_host_pb2_grpc.KrmapihostingAlphaKrmApiHostServiceStub(
channel.Channel()
)
request = krm_api_host_pb2.ApplyKrmapihostingAlphaKrmApiHostRequest()
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.labels):
request.resource.labels = Primitive.to_proto(self.labels)
if KrmApiHostBundlesConfig.to_proto(self.bundles_config):
request.resource.bundles_config.CopyFrom(
KrmApiHostBundlesConfig.to_proto(self.bundles_config)
)
else:
request.resource.ClearField("bundles_config")
if Primitive.to_proto(self.use_private_endpoint):
request.resource.use_private_endpoint = Primitive.to_proto(
self.use_private_endpoint
)
if KrmApiHostManagementConfig.to_proto(self.management_config):
request.resource.management_config.CopyFrom(
KrmApiHostManagementConfig.to_proto(self.management_config)
)
else:
request.resource.ClearField("management_config")
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
request.resource.location = Primitive.to_proto(self.location)
request.service_account_file = self.service_account_file
response = stub.ApplyKrmapihostingAlphaKrmApiHost(request)
self.name = Primitive.from_proto(response.name)
self.labels = Primitive.from_proto(response.labels)
self.bundles_config = KrmApiHostBundlesConfig.from_proto(
response.bundles_config
)
self.use_private_endpoint = Primitive.from_proto(response.use_private_endpoint)
self.gke_resource_link = Primitive.from_proto(response.gke_resource_link)
self.state = KrmApiHostStateEnum.from_proto(response.state)
self.management_config = KrmApiHostManagementConfig.from_proto(
response.management_config
)
self.project = Primitive.from_proto(response.project)
self.location = Primitive.from_proto(response.location)
def delete(self):
stub = krm_api_host_pb2_grpc.KrmapihostingAlphaKrmApiHostServiceStub(
channel.Channel()
)
request = krm_api_host_pb2.DeleteKrmapihostingAlphaKrmApiHostRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.labels):
request.resource.labels = Primitive.to_proto(self.labels)
if KrmApiHostBundlesConfig.to_proto(self.bundles_config):
request.resource.bundles_config.CopyFrom(
KrmApiHostBundlesConfig.to_proto(self.bundles_config)
)
else:
request.resource.ClearField("bundles_config")
if Primitive.to_proto(self.use_private_endpoint):
request.resource.use_private_endpoint = Primitive.to_proto(
self.use_private_endpoint
)
if KrmApiHostManagementConfig.to_proto(self.management_config):
request.resource.management_config.CopyFrom(
KrmApiHostManagementConfig.to_proto(self.management_config)
)
else:
request.resource.ClearField("management_config")
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
request.resource.location = Primitive.to_proto(self.location)
response = stub.DeleteKrmapihostingAlphaKrmApiHost(request)
@classmethod
def list(self, project, location, service_account_file=""):
stub = krm_api_host_pb2_grpc.KrmapihostingAlphaKrmApiHostServiceStub(
channel.Channel()
)
request = krm_api_host_pb2.ListKrmapihostingAlphaKrmApiHostRequest()
request.service_account_file = service_account_file
request.Project = project
request.Location = location
return stub.ListKrmapihostingAlphaKrmApiHost(request).items
def to_proto(self):
resource = krm_api_host_pb2.KrmapihostingAlphaKrmApiHost()
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.labels):
resource.labels = Primitive.to_proto(self.labels)
if KrmApiHostBundlesConfig.to_proto(self.bundles_config):
resource.bundles_config.CopyFrom(
KrmApiHostBundlesConfig.to_proto(self.bundles_config)
)
else:
resource.ClearField("bundles_config")
if Primitive.to_proto(self.use_private_endpoint):
resource.use_private_endpoint = Primitive.to_proto(
self.use_private_endpoint
)
if KrmApiHostManagementConfig.to_proto(self.management_config):
resource.management_config.CopyFrom(
KrmApiHostManagementConfig.to_proto(self.management_config)
)
else:
resource.ClearField("management_config")
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
resource.location = Primitive.to_proto(self.location)
return resource
class KrmApiHostBundlesConfig(object):
def __init__(self, config_controller_config: dict = None):
self.config_controller_config = config_controller_config
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = krm_api_host_pb2.KrmapihostingAlphaKrmApiHostBundlesConfig()
if KrmApiHostBundlesConfigConfigControllerConfig.to_proto(
resource.config_controller_config
):
res.config_controller_config.CopyFrom(
KrmApiHostBundlesConfigConfigControllerConfig.to_proto(
resource.config_controller_config
)
)
else:
res.ClearField("config_controller_config")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return KrmApiHostBundlesConfig(
config_controller_config=KrmApiHostBundlesConfigConfigControllerConfig.from_proto(
resource.config_controller_config
),
)
class KrmApiHostBundlesConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [KrmApiHostBundlesConfig.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [KrmApiHostBundlesConfig.from_proto(i) for i in resources]
class KrmApiHostBundlesConfigConfigControllerConfig(object):
def __init__(self, enabled: bool = None):
self.enabled = enabled
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
krm_api_host_pb2.KrmapihostingAlphaKrmApiHostBundlesConfigConfigControllerConfig()
)
if Primitive.to_proto(resource.enabled):
res.enabled = Primitive.to_proto(resource.enabled)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return KrmApiHostBundlesConfigConfigControllerConfig(
enabled=Primitive.from_proto(resource.enabled),
)
class KrmApiHostBundlesConfigConfigControllerConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
KrmApiHostBundlesConfigConfigControllerConfig.to_proto(i) for i in resources
]
@classmethod
def from_proto(self, resources):
return [
KrmApiHostBundlesConfigConfigControllerConfig.from_proto(i)
for i in resources
]
class KrmApiHostManagementConfig(object):
def __init__(self, standard_management_config: dict = None):
self.standard_management_config = standard_management_config
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = krm_api_host_pb2.KrmapihostingAlphaKrmApiHostManagementConfig()
if KrmApiHostManagementConfigStandardManagementConfig.to_proto(
resource.standard_management_config
):
res.standard_management_config.CopyFrom(
KrmApiHostManagementConfigStandardManagementConfig.to_proto(
resource.standard_management_config
)
)
else:
res.ClearField("standard_management_config")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return KrmApiHostManagementConfig(
standard_management_config=KrmApiHostManagementConfigStandardManagementConfig.from_proto(
resource.standard_management_config
),
)
class KrmApiHostManagementConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [KrmApiHostManagementConfig.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [KrmApiHostManagementConfig.from_proto(i) for i in resources]
class KrmApiHostManagementConfigStandardManagementConfig(object):
def __init__(
self,
network: str = None,
master_ipv4_cidr_block: str = None,
man_block: str = None,
cluster_cidr_block: str = None,
services_cidr_block: str = None,
cluster_named_range: str = None,
services_named_range: str = None,
):
self.network = network
self.master_ipv4_cidr_block = master_ipv4_cidr_block
self.man_block = man_block
self.cluster_cidr_block = cluster_cidr_block
self.services_cidr_block = services_cidr_block
self.cluster_named_range = cluster_named_range
self.services_named_range = services_named_range
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
krm_api_host_pb2.KrmapihostingAlphaKrmApiHostManagementConfigStandardManagementConfig()
)
if Primitive.to_proto(resource.network):
res.network = Primitive.to_proto(resource.network)
if Primitive.to_proto(resource.master_ipv4_cidr_block):
res.master_ipv4_cidr_block = Primitive.to_proto(
resource.master_ipv4_cidr_block
)
if Primitive.to_proto(resource.man_block):
res.man_block = Primitive.to_proto(resource.man_block)
if Primitive.to_proto(resource.cluster_cidr_block):
res.cluster_cidr_block = Primitive.to_proto(resource.cluster_cidr_block)
if Primitive.to_proto(resource.services_cidr_block):
res.services_cidr_block = Primitive.to_proto(resource.services_cidr_block)
if Primitive.to_proto(resource.cluster_named_range):
res.cluster_named_range = Primitive.to_proto(resource.cluster_named_range)
if Primitive.to_proto(resource.services_named_range):
res.services_named_range = Primitive.to_proto(resource.services_named_range)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return KrmApiHostManagementConfigStandardManagementConfig(
network=Primitive.from_proto(resource.network),
master_ipv4_cidr_block=Primitive.from_proto(
resource.master_ipv4_cidr_block
),
man_block=Primitive.from_proto(resource.man_block),
cluster_cidr_block=Primitive.from_proto(resource.cluster_cidr_block),
services_cidr_block=Primitive.from_proto(resource.services_cidr_block),
cluster_named_range=Primitive.from_proto(resource.cluster_named_range),
services_named_range=Primitive.from_proto(resource.services_named_range),
)
class KrmApiHostManagementConfigStandardManagementConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
KrmApiHostManagementConfigStandardManagementConfig.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
KrmApiHostManagementConfigStandardManagementConfig.from_proto(i)
for i in resources
]
class KrmApiHostStateEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return krm_api_host_pb2.KrmapihostingAlphaKrmApiHostStateEnum.Value(
"KrmapihostingAlphaKrmApiHostStateEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return krm_api_host_pb2.KrmapihostingAlphaKrmApiHostStateEnum.Name(resource)[
len("KrmapihostingAlphaKrmApiHostStateEnum") :
]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
|
|
"""
Consul KV Endpoint Access
"""
from consulate.api import base
from consulate import utils
class KV(base.Endpoint):
"""The :py:class:`consul.api.KV` class implements a :py:class:`dict` like
interface for working with the Key/Value service. Simply use items on the
:py:class:`consulate.Session` like you would with a :py:class:`dict` to
:py:meth:`get <consulate.api.KV.get>`,
:py:meth:`set <consulate.api.KV.set>`, or
:py:meth:`delete <consulate.api.KV.delete>` values in the key/value store.
Additionally, :py:class:`KV <consulate.api.KV>` acts as an
:py:meth:`iterator <consulate.api.KV.__iter__>`, providing methods to
iterate over :py:meth:`keys <consulate.api.KV.keys>`,
:py:meth:`values <consulate.api.KV.values>`,
:py:meth:`keys and values <consulate.api.KV.iteritems>`, etc.
Should you need access to get or set the flag value, the
:py:meth:`get_record <consulate.api.KV.get_record>`,
:py:meth:`set_record <consulate.api.KV.set_record>`,
and :py:meth:`records <consulate.api.KV.records>` provide a way to access
the additional fields exposed by the KV service.
"""
def __contains__(self, item):
"""Return True if there is a value set in the Key/Value service for the
given key.
:param str item: The key to check for
:rtype: bool
"""
item = item.lstrip('/')
return self._get_no_response_body([item])
def __delitem__(self, item):
"""Delete an item from the Key/Value service
:param str item: The key name
"""
self._delete_item(item)
def __getitem__(self, item):
"""Get a value from the Key/Value service, returning it fully
decoded if possible.
:param str item: The item name
:rtype: mixed
:raises: KeyError
"""
value = self._get_item(item)
if not value:
raise KeyError('Key not found ({0})'.format(item))
return value.get('Value')
def __iter__(self):
"""Iterate over all the keys in the Key/Value service
:rtype: iterator
"""
for key in self.keys():
yield key
def __len__(self):
"""Return the number if items in the Key/Value service
:return: int
"""
return len(self._get_all_items())
def __setitem__(self, item, value):
"""Set a value in the Key/Value service, using the CAS mechanism
to ensure that the set is atomic. If the value passed in is not a
string, an attempt will be made to JSON encode the value prior to
setting it.
:param str item: The key to set
:param mixed value: The value to set
:raises: KeyError
"""
self._set_item(item, value)
def acquire_lock(self, item, session):
"""Use Consul for locking by specifying the item/key to lock with
and a session value for removing the lock.
:param str item: The item in the Consul KV database
:param str session: The session value for the lock
:return: bool
"""
return self._put_response_body([item], {'acquire': session})
def delete(self, item, recurse=False):
"""Delete an item from the Key/Value service
:param str item: The item key
:param bool recurse: Remove keys prefixed with the item pattern
:raises: KeyError
"""
return self._delete_item(item, recurse)
def get(self, item, default=None, raw=False):
"""Get a value from the Key/Value service, returning it fully
decoded if possible.
:param str item: The item key
:rtype: mixed
:raises: KeyError
"""
response = self._get_item(item, raw)
if isinstance(response, dict):
return response.get('Value', default)
return response or default
def get_record(self, item):
"""Get the full record from the Key/Value service, returning
all fields including the flag.
:param str item: The item key
:rtype: dict
:raises: KeyError
"""
return self._get_item(item)
def find(self, prefix, separator=None):
"""Find all keys with the specified prefix, returning a dict of
matches.
*Example:*
.. code:: python
>>> consul.kv.find('b')
{'baz': 'qux', 'bar': 'baz'}
:param str prefix: The prefix to search with
:rtype: dict
"""
query_params = {'recurse': None}
if separator:
query_params['keys'] = prefix
query_params['separator'] = separator
response = self._get_list([prefix.lstrip('/')], query_params)
if separator:
results = response
else:
results = {}
for row in response:
results[row['Key']] = row['Value']
return results
def items(self):
"""Return a dict of all of the key/value pairs in the Key/Value service
*Example:*
.. code:: python
>>> consul.kv.items()
{'foo': 'bar', 'bar': 'baz', 'quz': True, 'corgie': 'dog'}
:rtype: dict
"""
return [{item['Key']: item['Value']} for item in self._get_all_items()]
def iteritems(self):
"""Iterate over the dict of key/value pairs in the Key/Value service
*Example:*
.. code:: python
>>> for key, value in consul.kv.iteritems():
... print(key, value)
...
(u'bar', 'baz')
(u'foo', 'bar')
(u'quz', True)
:rtype: iterator
"""
for item in self._get_all_items():
yield item['Key'], item['Value']
def keys(self):
"""Return a list of all of the keys in the Key/Value service
*Example:*
.. code:: python
>>> consul.kv.keys()
[u'bar', u'foo', u'quz']
:rtype: list
"""
return sorted([row['Key'] for row in self._get_all_items()])
def records(self):
"""Return a list of tuples for all of the records in the Key/Value
service
*Example:*
.. code:: python
>>> consul.kv.records()
[(u'bar', 0, 'baz'),
(u'corgie', 128, 'dog'),
(u'foo', 0, 'bar'),
(u'quz', 0, True)]
:rtype: list of (Key, Flags, Value)
"""
return [(item['Key'], item['Flags'], item['Value'])
for item in self._get_all_items()]
def release_lock(self, item, session):
"""Release an existing lock from the Consul KV database.
:param str item: The item in the Consul KV database
:param str session: The session value for the lock
:return: bool
"""
return self._put_response_body([item], {'release': session})
def set(self, item, value):
"""Set a value in the Key/Value service, using the CAS mechanism
to ensure that the set is atomic. If the value passed in is not a
string, an attempt will be made to JSON encode the value prior to
setting it.
:param str item: The key to set
:param mixed value: The value to set
:raises: KeyError
"""
return self.__setitem__(item, value)
def set_record(self, item, flags=0, value=None, replace=True):
"""Set a full record, including the item flag
:param str item: The key to set
:param mixed value: The value to set
:param replace: If True existing value will be overwritten:
"""
self._set_item(item, value, flags, replace)
def values(self):
"""Return a list of all of the values in the Key/Value service
*Example:*
.. code:: python
>>> consul.kv.values()
[True, 'bar', 'baz']
:rtype: list
"""
return [row['Value'] for row in self._get_all_items()]
def _delete_item(self, item, recurse=False):
"""Remove an item from the Consul database
:param str item:
:param recurse:
:return:
"""
query_params = {'recurse': True} if recurse else {}
return self._adapter.delete(self._build_uri([item], query_params))
def _get_all_items(self):
"""Internal method to return a list of all items in the Key/Value
service
:rtype: list
"""
return self._get_list([''], {'recurse': None})
def _get_item(self, item, raw=False):
"""Internal method to get the full item record from the Key/Value
service
:param str item: The item to get
:param bool raw: Return only the raw body
:rtype: mixed
"""
item = item.lstrip('/')
query_params = {'raw': True} if raw else {}
response = self._adapter.get(self._build_uri([item], query_params))
if response.status_code == 200:
return response.body
return None
def _get_modify_index(self, item, value, replace):
"""Get the modify index of the specified item. If replace is False
and an item is found, return ``None``. If the existing value
and the passed in value match, return ``None``. If no item exists in
the KV database, return ``0``, otherwise return the ``ModifyIndex``.
:param str item: The item to get the index for
:param str value: The item to evaluate for equality
:param bool replace: Should the item be replaced
:rtype: int|None
"""
response = self._adapter.get(self._build_uri([item]))
index = 0
if response.status_code == 200:
index = response.body.get('ModifyIndex')
rvalue = response.body.get('Value')
if rvalue == value:
return None
if not replace:
return None
return index
@staticmethod
def _prepare_value(value):
"""Prepare the value passed in and ensure that it is properly encoded
:param mixed value: The value to prepare
:rtype: bytes
"""
if not utils.is_string(value) or isinstance(value, bytes):
return value
try:
if utils.PYTHON3:
return value.encode('utf-8')
elif isinstance(value, unicode):
return value.encode('utf-8')
except UnicodeDecodeError:
return value
return value
def _set_item(self, item, value, flags=None, replace=True):
"""Internal method for setting a key/value pair with flags in the
Key/Value service
:param str item: The key to set
:param mixed value: The value to set
:param int flags: User defined flags to set
:param bool replace: Overwrite existing values
:raises: KeyError
"""
value = self._prepare_value(value)
if value and item.endswith('/'):
item = item.rstrip('/')
index = self._get_modify_index(item, value, replace)
if index is None:
return True
query_params = {'cas': index}
if flags is not None:
query_params['flags'] = flags
response = self._adapter.put(self._build_uri([item], query_params),
value)
if not response.status_code == 200 or not response.body:
raise KeyError(
'Error setting "{0}" ({1})'.format(item, response.status_code))
|
|
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class Question(object):
QUESTION_XML_TEMPLATE = """<Question><QuestionIdentifier>%s</QuestionIdentifier>%s<IsRequired>%s</IsRequired>%s%s</Question>"""
DISPLAY_NAME_XML_TEMPLATE = """<DisplayName>%s</DisplayName>"""
def __init__(self, identifier, content, answer_spec, is_required=False, display_name=None): #amount=0.0, currency_code='USD'):
self.identifier = identifier
self.content = content
self.answer_spec = answer_spec
self.is_required = is_required
self.display_name = display_name
def get_as_params(self, label='Question', identifier=None):
if identifier is None:
raise ValueError("identifier (QuestionIdentifier) is required per MTurk spec.")
return { label : self.get_as_xml() }
def get_as_xml(self):
# add the display name if required
display_name_xml = ''
if self.display_name:
display_name_xml = self.DISPLAY_NAME_XML_TEMPLATE %(self.display_name)
ret = Question.QUESTION_XML_TEMPLATE % (self.identifier,
display_name_xml,
str(self.is_required).lower(),
self.content.get_as_xml(),
self.answer_spec.get_as_xml())
return ret
class ExternalQuestion(object):
EXTERNAL_QUESTIONFORM_SCHEMA_LOCATION = "http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2006-07-14/ExternalQuestion.xsd"
EXTERNAL_QUESTION_XML_TEMPLATE = """<ExternalQuestion xmlns="%s"><ExternalURL>%s</ExternalURL><FrameHeight>%s</FrameHeight></ExternalQuestion>"""
def __init__(self, external_url, frame_height):
self.external_url = external_url
self.frame_height = frame_height
def get_as_params(self, label='ExternalQuestion'):
return { label : self.get_as_xml() }
def get_as_xml(self):
ret = ExternalQuestion.EXTERNAL_QUESTION_XML_TEMPLATE % (ExternalQuestion.EXTERNAL_QUESTIONFORM_SCHEMA_LOCATION,
self.external_url,
self.frame_height)
return ret
class OrderedContent(object):
def __init__(self):
self.items = []
def append(self, field, value):
"Expects field type and value"
self.items.append((field, value))
def get_binary_xml(self, field, value):
return """
<Binary>
<MimeType>
<Type>%s</Type>
<SubType>%s</SubType>
</MimeType>
<DataURL>%s</DataURL>
<AltText>%s</AltText>
</Binary>""" % (value['binary_type'],
value['binary_subtype'],
value['binary'],
value['binary_alttext'])
def get_application_xml(self, field, value):
raise NotImplementedError("Application question content is not yet supported.")
def get_as_xml(self):
default_handler = lambda f,v: '<%s>%s</%s>' % (f,v,f)
bulleted_list_handler = lambda _,list: '<List>%s</List>' % ''.join([('<ListItem>%s</ListItem>' % item) for item in list])
formatted_content_handler = lambda _,content: "<FormattedContent><![CDATA[%s]]></FormattedContent>" % content
application_handler = self.get_application_xml
binary_handler = self.get_binary_xml
children = ''
for (field,value) in self.items:
handler = default_handler
if field == 'List':
handler = bulleted_list_handler
elif field == 'Application':
handler = application_handler
elif field == 'Binary':
handler = binary_handler
elif field == 'FormattedContent':
handler = formatted_content_handler
children = children + handler(field, value)
return children
class Overview(object):
OVERVIEW_XML_TEMPLATE = """<Overview>%s</Overview>"""
def __init__(self):
self.ordered_content = OrderedContent()
def append(self, field, value):
self.ordered_content.append(field,value)
def get_as_params(self, label='Overview'):
return { label : self.get_as_xml() }
def get_as_xml(self):
ret = Overview.OVERVIEW_XML_TEMPLATE % (self.ordered_content.get_as_xml())
return ret
class QuestionForm(object):
QUESTIONFORM_SCHEMA_LOCATION = "http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2005-10-01/QuestionForm.xsd"
QUESTIONFORM_XML_TEMPLATE = """<QuestionForm xmlns="%s">%s</QuestionForm>""" # % (ns, questions_xml)
def __init__(self, questions=None, overview=None):
if questions is None or type(questions) is not list:
raise ValueError("Must pass a list of Question instances to QuestionForm constructor")
else:
self.questions = questions
self.overview = overview
def get_as_xml(self):
if self.overview:
overview_xml = self.overview.get_as_xml()
questions_xml = "".join([q.get_as_xml() for q in self.questions])
qf_xml = overview_xml + questions_xml
return QuestionForm.QUESTIONFORM_XML_TEMPLATE % (QuestionForm.QUESTIONFORM_SCHEMA_LOCATION, qf_xml)
#def startElement(self, name, attrs, connection):
# return None
#
#def endElement(self, name, value, connection):
#
# #if name == 'Amount':
# # self.amount = float(value)
# #elif name == 'CurrencyCode':
# # self.currency_code = value
# #elif name == 'FormattedPrice':
# # self.formatted_price = value
#
# pass # What's this method for? I don't get it.
class QuestionContent(object):
QUESTIONCONTENT_XML_TEMPLATE = """<QuestionContent>%s</QuestionContent>"""
def __init__(self):
self.ordered_content = OrderedContent()
def append(self, field, value):
self.ordered_content.append(field,value)
def get_as_xml(self):
ret = QuestionContent.QUESTIONCONTENT_XML_TEMPLATE % (self.ordered_content.get_as_xml())
return ret
class AnswerSpecification(object):
ANSWERSPECIFICATION_XML_TEMPLATE = """<AnswerSpecification>%s</AnswerSpecification>"""
def __init__(self, spec):
self.spec = spec
def get_as_xml(self):
values = () # TODO
return AnswerSpecification.ANSWERSPECIFICATION_XML_TEMPLATE % self.spec.get_as_xml()
class FreeTextAnswer(object):
FREETEXTANSWER_XML_TEMPLATE = """<FreeTextAnswer>%s%s</FreeTextAnswer>""" # (constraints, default)
FREETEXTANSWER_CONSTRAINTS_XML_TEMPLATE = """<Constraints>%s%s%s</Constraints>""" # (is_numeric_xml, length_xml, regex_xml)
FREETEXTANSWER_LENGTH_XML_TEMPLATE = """<Length %s %s />""" # (min_length_attr, max_length_attr)
FREETEXTANSWER_ISNUMERIC_XML_TEMPLATE = """<IsNumeric %s %s />""" # (min_value_attr, max_value_attr)
FREETEXTANSWER_DEFAULTTEXT_XML_TEMPLATE = """<DefaultText>%s</DefaultText>""" # (default)
def __init__(self, default=None, min_length=None, max_length=None, is_numeric=False, min_value=None, max_value=None, format_regex=None):
self.default = default
self.min_length = min_length
self.max_length = max_length
self.is_numeric = is_numeric
self.min_value = min_value
self.max_value = max_value
self.format_regex = format_regex
def get_as_xml(self):
is_numeric_xml = ""
if self.is_numeric:
min_value_attr = ""
max_value_attr = ""
if self.min_value:
min_value_attr = """minValue="%d" """ % self.min_value
if self.max_value:
max_value_attr = """maxValue="%d" """ % self.max_value
is_numeric_xml = FreeTextAnswer.FREETEXTANSWER_ISNUMERIC_XML_TEMPLATE % (min_value_attr, max_value_attr)
length_xml = ""
if self.min_length or self.max_length:
min_length_attr = ""
max_length_attr = ""
if self.min_length:
min_length_attr = """minLength="%d" """
if self.max_length:
max_length_attr = """maxLength="%d" """
length_xml = FreeTextAnswer.FREETEXTANSWER_LENGTH_XML_TEMPLATE % (min_length_attr, max_length_attr)
regex_xml = ""
if self.format_regex:
format_regex_attribs = '''regex="%s"''' %self.format_regex['regex']
error_text = self.format_regex.get('error_text', None)
if error_text:
format_regex_attribs += ' errorText="%s"' %error_text
flags = self.format_regex.get('flags', None)
if flags:
format_regex_attribs += ' flags="%s"' %flags
regex_xml = """<AnswerFormatRegex %s/>""" %format_regex_attribs
constraints_xml = ""
if is_numeric_xml or length_xml or regex_xml:
constraints_xml = FreeTextAnswer.FREETEXTANSWER_CONSTRAINTS_XML_TEMPLATE % (is_numeric_xml, length_xml, regex_xml)
default_xml = ""
if self.default is not None:
default_xml = FreeTextAnswer.FREETEXTANSWER_DEFAULTTEXT_XML_TEMPLATE % self.default
return FreeTextAnswer.FREETEXTANSWER_XML_TEMPLATE % (constraints_xml, default_xml)
class FileUploadAnswer(object):
FILEUPLOADANSWER_XML_TEMLPATE = """<FileUploadAnswer><MinFileSizeInBytes>%d</MinFileSizeInBytes><MaxFileSizeInBytes>%d</MaxFileSizeInBytes></FileUploadAnswer>""" # (min, max)
DEFAULT_MIN_SIZE = 1024 # 1K (completely arbitrary!)
DEFAULT_MAX_SIZE = 5 * 1024 * 1024 # 5MB (completely arbitrary!)
def __init__(self, min=None, max=None):
self.min = min
self.max = max
if self.min is None:
self.min = FileUploadAnswer.DEFAULT_MIN_SIZE
if self.max is None:
self.max = FileUploadAnswer.DEFAULT_MAX_SIZE
def get_as_xml(self):
return FileUploadAnswer.FILEUPLOADANSWER_XML_TEMLPATE % (self.min, self.max)
class SelectionAnswer(object):
"""
A class to generate SelectionAnswer XML data structures.
Does not yet implement Binary selection options.
"""
SELECTIONANSWER_XML_TEMPLATE = """<SelectionAnswer>%s%s<Selections>%s</Selections></SelectionAnswer>""" # % (count_xml, style_xml, selections_xml)
SELECTION_XML_TEMPLATE = """<Selection><SelectionIdentifier>%s</SelectionIdentifier>%s</Selection>""" # (identifier, value_xml)
SELECTION_VALUE_XML_TEMPLATE = """<%s>%s</%s>""" # (type, value, type)
STYLE_XML_TEMPLATE = """<StyleSuggestion>%s</StyleSuggestion>""" # (style)
MIN_SELECTION_COUNT_XML_TEMPLATE = """<MinSelectionCount>%s</MinSelectionCount>""" # count
MAX_SELECTION_COUNT_XML_TEMPLATE = """<MaxSelectionCount>%s</MaxSelectionCount>""" # count
ACCEPTED_STYLES = ['radiobutton', 'dropdown', 'checkbox', 'list', 'combobox', 'multichooser']
OTHER_SELECTION_ELEMENT_NAME = 'OtherSelection'
def __init__(self, min=1, max=1, style=None, selections=None, type='text', other=False):
if style is not None:
if style in SelectionAnswer.ACCEPTED_STYLES:
self.style_suggestion = style
else:
raise ValueError("style '%s' not recognized; should be one of %s" % (style, ', '.join(SelectionAnswer.ACCEPTED_STYLES)))
else:
self.style_suggestion = None
if selections is None:
raise ValueError("SelectionAnswer.__init__(): selections must be a non-empty list of (content, identifier) tuples")
else:
self.selections = selections
self.min_selections = min
self.max_selections = max
assert len(selections) >= self.min_selections, "# of selections is less than minimum of %d" % self.min_selections
#assert len(selections) <= self.max_selections, "# of selections exceeds maximum of %d" % self.max_selections
self.type = type
self.other = other
def get_as_xml(self):
xml = ""
if self.type == 'text':
TYPE_TAG = "Text"
elif self.type == 'binary':
TYPE_TAG = "Binary"
else:
raise ValueError("illegal type: %s; must be either 'text' or 'binary'" % str(self.type))
# build list of <Selection> elements
selections_xml = ""
for tpl in self.selections:
value_xml = SelectionAnswer.SELECTION_VALUE_XML_TEMPLATE % (TYPE_TAG, tpl[0], TYPE_TAG)
selection_xml = SelectionAnswer.SELECTION_XML_TEMPLATE % (tpl[1], value_xml)
selections_xml += selection_xml
if self.other:
# add OtherSelection element as xml if available
if hasattr(self.other, 'get_as_xml'):
assert type(self.other) == FreeTextAnswer, 'OtherSelection can only be a FreeTextAnswer'
selections_xml += self.other.get_as_xml().replace('FreeTextAnswer', 'OtherSelection')
else:
selections_xml += "<OtherSelection />"
if self.style_suggestion is not None:
style_xml = SelectionAnswer.STYLE_XML_TEMPLATE % self.style_suggestion
else:
style_xml = ""
if self.style_suggestion != 'radiobutton':
count_xml = SelectionAnswer.MIN_SELECTION_COUNT_XML_TEMPLATE %self.min_selections
count_xml += SelectionAnswer.MAX_SELECTION_COUNT_XML_TEMPLATE %self.max_selections
else:
count_xml = ""
ret = SelectionAnswer.SELECTIONANSWER_XML_TEMPLATE % (count_xml, style_xml, selections_xml)
# return XML
return ret
|
|
class GameData:
"""Information about Skyrim."""
NEW_CHAR_LEVEL_INFO = {
"Breton": {
"Illusion": 20,
"Conjuration": 25,
"Destruction": 15,
"Restoration": 20,
"Alteration": 20,
"Enchanting": 15,
"Smithing": 15,
"Heavy Armor": 15,
"Block": 15,
"Two-handed": 15,
"One-handed": 15,
"Archery": 15,
"Light Armor": 15,
"Sneak": 15,
"Lockpicking": 15,
"Pickpocket": 15,
"Speech": 20,
"Alchemy": 20,
},
"Nord": {
"Illusion": 15,
"Conjuration": 15,
"Destruction": 15,
"Restoration": 15,
"Alteration": 15,
"Enchanting": 15,
"Smithing": 20,
"Heavy Armor": 15,
"Block": 20,
"Two-handed": 25,
"One-handed": 20,
"Archery": 15,
"Light Armor": 20,
"Sneak": 15,
"Lockpicking": 15,
"Pickpocket": 15,
"Speech": 20,
"Alchemy": 15,
},
"Imperial": {
"Illusion": 15,
"Conjuration": 15,
"Destruction": 20,
"Restoration": 25,
"Alteration": 15,
"Enchanting": 20,
"Smithing": 15,
"Heavy Armor": 20,
"Block": 20,
"Two-handed": 15,
"One-handed": 20,
"Archery": 15,
"Light Armor": 15,
"Sneak": 15,
"Lockpicking": 15,
"Pickpocket": 15,
"Speech": 15,
"Alchemy": 15,
},
"Redguard": {
"Illusion": 15,
"Conjuration": 15,
"Destruction": 20,
"Restoration": 15,
"Alteration": 20,
"Enchanting": 15,
"Smithing": 20,
"Heavy Armor": 15,
"Block": 20,
"Two-handed": 15,
"One-handed": 25,
"Archery": 20,
"Light Armor": 15,
"Sneak": 15,
"Lockpicking": 15,
"Pickpocket": 15,
"Speech": 15,
"Alchemy": 15,
},
"Altmer": {
"Illusion": 25,
"Conjuration": 20,
"Destruction": 20,
"Restoration": 20,
"Alteration": 20,
"Enchanting": 20,
"Smithing": 15,
"Heavy Armor": 15,
"Block": 15,
"Two-handed": 15,
"One-handed": 15,
"Archery": 15,
"Light Armor": 15,
"Sneak": 15,
"Lockpicking": 15,
"Pickpocket": 15,
"Speech": 15,
"Alchemy": 15,
},
"Bosmer": {
"Illusion": 15,
"Conjuration": 15,
"Destruction": 15,
"Restoration": 15,
"Alteration": 15,
"Enchanting": 15,
"Smithing": 15,
"Heavy Armor": 15,
"Block": 15,
"Two-handed": 15,
"One-handed": 15,
"Archery": 25,
"Light Armor": 20,
"Sneak": 20,
"Lockpicking": 20,
"Pickpocket": 20,
"Speech": 15,
"Alchemy": 20,
},
"Dunmer": {
"Illusion": 20,
"Conjuration": 15,
"Destruction": 25,
"Restoration": 15,
"Alteration": 20,
"Enchanting": 15,
"Smithing": 15,
"Heavy Armor": 15,
"Block": 15,
"Two-handed": 15,
"One-handed": 15,
"Archery": 15,
"Light Armor": 20,
"Sneak": 20,
"Lockpicking": 15,
"Pickpocket": 15,
"Speech": 15,
"Alchemy": 20,
},
"Orc": {
"Illusion": 15,
"Conjuration": 15,
"Destruction": 15,
"Restoration": 15,
"Alteration": 15,
"Enchanting": 20,
"Smithing": 20,
"Heavy Armor": 25,
"Block": 20,
"Two-handed": 20,
"One-handed": 20,
"Archery": 15,
"Light Armor": 15,
"Sneak": 15,
"Lockpicking": 15,
"Pickpocket": 15,
"Speech": 15,
"Alchemy": 15,
},
"Argonian": {
"Illusion": 15,
"Conjuration": 15,
"Destruction": 15,
"Restoration": 20,
"Alteration": 20,
"Enchanting": 15,
"Smithing": 15,
"Heavy Armor": 15,
"Block": 15,
"Two-handed": 15,
"One-handed": 15,
"Archery": 15,
"Light Armor": 20,
"Sneak": 20,
"Lockpicking": 25,
"Pickpocket": 20,
"Speech": 15,
"Alchemy": 15,
},
"Khajiit": {
"Illusion": 15,
"Conjuration": 15,
"Destruction": 15,
"Restoration": 15,
"Alteration": 15,
"Enchanting": 15,
"Smithing": 15,
"Heavy Armor": 15,
"Block": 15,
"Two-handed": 15,
"One-handed": 20,
"Archery": 20,
"Light Armor": 15,
"Sneak": 25,
"Lockpicking": 20,
"Pickpocket": 20,
"Speech": 15,
"Alchemy": 20,
}
}
RACE_NAMES = ("Breton", "Nord", "Imperial", "Redguard",
"Altmer", "Bosmer", "Dunmer", "Orc",
"Argonian", "Khajiit")
RACE_TYPES = ("HUMAN", "MER", "BEAST")
SKILL_NAMES = ("Illusion", "Conjuration", "Destruction",
"Restoration", "Alteration", "Enchanting",
"Smithing", "Heavy Armor", "Block",
"Two-handed", "One-handed", "Archery",
"Light Armor", "Sneak", "Lockpicking",
"Pickpocket", "Speech", "Alchemy")
SKILL_TYPES = ("MAGIC", "COMBAT", "STEALTH")
PLAY_STYLES = {
"Crafty Merchant": ("Speech", "Alchemy", "Smithing", "Enchanting"),
"Criminal": ("Sneak", "Lockpicking", "Pickpocket")
}
class ValidationException(Exception):
"""Exception with 'problem list'.
Attributes:
message (str): error message
problems (list): list of all errors
"""
def __init__(self, message, problems=None):
super(ValidationException, self).__init__(message)
self.__problems = problems
def get_problems(self):
return self.__problems
class InputValidator:
"""Check if given input is valid (= could be Skyrim game data)."""
@staticmethod
def are_valid_skills(skills):
for skill in skills:
if skill not in GameData.SKILL_NAMES:
return False
return True
@staticmethod
def is_valid_char_level(level):
try:
level = int(level)
except ValueError:
return False
return 0 < level < 300 # arbitrary cap, >= 252
@staticmethod
def is_valid_level_combination(now, goal):
try:
now = int(now)
goal = int(goal)
except ValueError:
return False
return now < goal
@staticmethod
def is_valid_race(race):
return race in GameData.RACE_NAMES
@staticmethod
def is_valid_selection(selection):
return isinstance(selection, list) and len(selection) > 0
@staticmethod
def is_valid_skill_dictionary(dictionary):
if not isinstance(dictionary, dict) or len(dictionary) == 0:
return False
return InputValidator.are_valid_skills(dictionary)
@staticmethod
def is_valid_skill_level(level):
try:
level = int(level)
except ValueError:
return False
return 15 <= level <= 100
class InputCollector:
"""Collect valid user input.
Validation is handled by the validator passed to the constructor.
Attributes:
validator: any validator class/object.
"""
def __init__(self, validator=InputValidator):
self.__validator = validator
self.__goal = None
self.__now = None
self.__race = None
self.__selected_skills = None
self.__skill_levels = None
self.__template = None
def get_char_levels(self):
return self.__now, self.__goal
def get_race(self):
return self.__race
def get_selected_skills(self):
return self.__selected_skills
def get_skill_levels(self):
if self.__skill_levels is None:
self.__set_default_skill_levels()
return self.__skill_levels
def get_template(self):
temp = self.__template
if temp is None:
return ()
if temp in GameData.PLAY_STYLES:
return GameData.PLAY_STYLES[temp]
return [k for k, v in GameData.NEW_CHAR_LEVEL_INFO[temp].items() if
v > 15]
def has_template(self):
return self.__template is not None
def set_char_levels(self, goal, now=1):
valid_goal = self.__validator.is_valid_char_level(goal)
valid_now = now == 1 or self.__validator.is_valid_char_level(now)
if valid_goal and valid_now:
goal = int(goal)
now = int(now)
if self.__validator.is_valid_level_combination(goal=goal, now=now):
self.__goal = goal
self.__now = now
else:
raise ValidationException(
"Your goal level must be higher than your current level.",
["Goal", "Now"])
elif valid_goal:
raise ValidationException("Please enter a valid character level.",
["Now"])
elif valid_now:
raise ValidationException("Please enter a valid goal level.",
["Goal"])
else:
raise ValidationException("Please enter valid levels.",
["Goal", "Now"])
def set_race(self, race):
if self.__validator.is_valid_race(race):
self.__race = race
else:
raise ValidationException("Please select a race.")
def set_selected_skills(self, skills):
if self.__validator.is_valid_selection(skills):
if self.__validator.are_valid_skills(skills):
self.__selected_skills = skills
else:
raise ValidationException("Those skills are invalid.")
else:
raise ValidationException("You need to select at least one skill.")
def set_skill_levels(self, skill_levels):
if not self.__validator.is_valid_skill_dictionary(skill_levels):
raise ValidationException("Something went wrong.")
invalid_skills = []
for s in skill_levels:
if not self.__validator.is_valid_skill_level(skill_levels[s]):
invalid_skills.append(s)
if not invalid_skills:
self.__skill_levels = {skill: int(skill_levels[skill]) for skill in
skill_levels}
else:
raise ValidationException(
"Skill levels can range from 15 to 100.", invalid_skills)
def set_template(self, template):
self.__template = template # no validation!
def __set_default_skill_levels(self):
default_levels = GameData.NEW_CHAR_LEVEL_INFO[self.__race]
self.__skill_levels = {skill: default_levels[skill] for skill in
self.__selected_skills}
class OutputFormatter:
"""Return formatted Strings that can be printed or written to a file"""
@staticmethod
def reformat(data):
text = " | Levels | | times \n"
text += " Skill name | current | goal | train | legendary \n"
text += "----------------+---------+------+-------+-----------\n"
for skill in sorted(data):
if data[skill]["Times Leveled"] != 0:
text += " {:<14} | ".format(skill)
text += "{:>7} | ".format(str(data[skill]["Start Level"]))
text += "{:>4} | ".format(str(data[skill]["Final Level"]))
text += "{:>4}x | ".format(str(data[skill]["Times Leveled"]))
text += "{:>8}x\n".format(str(data[skill]["Times Legendary"]))
return text
|
|
# -*- coding: utf-8 -*-
"""
httpbin.helpers
~~~~~~~~~~~~~~~
This module provides helper functions for httpbin.
"""
import json
import base64
from hashlib import md5
from werkzeug.http import parse_authorization_header
from flask import request, make_response
from six.moves.urllib.parse import urlparse, urlunparse
from .structures import CaseInsensitiveDict
ASCII_ART = """
-=[ teapot ]=-
_...._
.' _ _ `.
| ."` ^ `". _,
\_;`"---"`|//
| ;/
\_ _/
`\"\"\"`
"""
REDIRECT_LOCATION = '/redirect/1'
ENV_HEADERS = (
'X-Varnish',
'X-Request-Start',
'X-Heroku-Queue-Depth',
'X-Real-Ip',
'X-Forwarded-Proto',
'X-Forwarded-Protocol',
'X-Forwarded-Ssl',
'X-Heroku-Queue-Wait-Time',
'X-Forwarded-For',
'X-Heroku-Dynos-In-Use',
'X-Forwarded-For',
'X-Forwarded-Protocol',
'X-Forwarded-Port',
'Runscope-Service'
)
ROBOT_TXT = """User-agent: *
Disallow: /deny
"""
ANGRY_ASCII ="""
.-''''''-.
.' _ _ '.
/ O O \\
: :
| |
: __ :
\ .-"` `"-. /
'. .'
'-......-'
YOU SHOULDN'T BE HERE
"""
def json_safe(string, content_type='application/octet-stream'):
"""Returns JSON-safe version of `string`.
If `string` is a Unicode string or a valid UTF-8, it is returned unmodified,
as it can safely be encoded to JSON string.
If `string` contains raw/binary data, it is Base64-encoded, formatted and
returned according to "data" URL scheme (RFC2397). Since JSON is not
suitable for binary data, some additional encoding was necessary; "data"
URL scheme was chosen for its simplicity.
"""
try:
string = string.decode('utf-8')
_encoded = json.dumps(string)
return string
except (ValueError, TypeError):
return b''.join([
b'data:',
content_type.encode('utf-8'),
b';base64,',
base64.b64encode(string)
]).decode('utf-8')
def get_files():
"""Returns files dict from request context."""
files = dict()
for k, v in request.files.items():
content_type = request.files[k].content_type or 'application/octet-stream'
val = json_safe(v.read(), content_type)
if files.get(k):
if not isinstance(files[k], list):
files[k] = [files[k]]
files[k].append(val)
else:
files[k] = val
return files
def get_headers(hide_env=True):
"""Returns headers dict from request context."""
headers = dict(request.headers.items())
if hide_env and ('show_env' not in request.args):
for key in ENV_HEADERS:
try:
del headers[key]
except KeyError:
pass
return CaseInsensitiveDict(headers.items())
def semiflatten(multi):
"""Convert a MutiDict into a regular dict. If there are more than one value
for a key, the result will have a list of values for the key. Otherwise it
will have the plain value."""
if multi:
result = multi.to_dict(flat=False)
for k, v in result.items():
if len(v) == 1:
result[k] = v[0]
return result
else:
return multi
def get_url(request):
"""
Since we might be hosted behind a proxy, we need to check the
X-Forwarded-Proto, X-Forwarded-Protocol, or X-Forwarded-SSL headers
to find out what protocol was used to access us.
"""
protocol = request.headers.get('X-Forwarded-Proto') or request.headers.get('X-Forwarded-Protocol')
if protocol is None and request.headers.get('X-Forwarded-Ssl') == 'on':
protocol = 'https'
if protocol is None:
return request.url
url = list(urlparse(request.url))
url[0] = protocol
return urlunparse(url)
def get_dict(*keys, **extras):
"""Returns request dict of given keys."""
_keys = ('url', 'args', 'form', 'data', 'origin', 'headers', 'files', 'json')
assert all(map(_keys.__contains__, keys))
data = request.data
form = request.form
form = semiflatten(request.form)
try:
_json = json.loads(data.decode('utf-8'))
except (ValueError, TypeError):
_json = None
d = dict(
url=get_url(request),
args=semiflatten(request.args),
form=form,
data=json_safe(data),
origin=request.headers.get('X-Forwarded-For', request.remote_addr),
headers=get_headers(),
files=get_files(),
json=_json
)
out_d = dict()
for key in keys:
out_d[key] = d.get(key)
out_d.update(extras)
return out_d
def status_code(code):
"""Returns response object of given status code."""
redirect = dict(headers=dict(location=REDIRECT_LOCATION))
code_map = {
301: redirect,
302: redirect,
303: redirect,
304: dict(data=''),
305: redirect,
307: redirect,
401: dict(headers={'WWW-Authenticate': 'Basic realm="Fake Realm"'}),
402: dict(
data='Fuck you, pay me!',
headers={
'x-more-info': 'http://vimeo.com/22053820'
}
),
407: dict(headers={'Proxy-Authenticate': 'Basic realm="Fake Realm"'}),
418: dict( # I'm a teapot!
data=ASCII_ART,
headers={
'x-more-info': 'http://tools.ietf.org/html/rfc2324'
}
),
}
r = make_response()
r.status_code = code
if code in code_map:
m = code_map[code]
if 'data' in m:
r.data = m['data']
if 'headers' in m:
r.headers = m['headers']
return r
def check_basic_auth(user, passwd):
"""Checks user authentication using HTTP Basic Auth."""
auth = request.authorization
return auth and auth.username == user and auth.password == passwd
# Digest auth helpers
# qop is a quality of protection
def H(data):
return md5(data).hexdigest()
def HA1(realm, username, password):
"""Create HA1 hash by realm, username, password
HA1 = md5(A1) = MD5(username:realm:password)
"""
if not realm:
realm = u''
return H(b":".join([username.encode('utf-8'),
realm.encode('utf-8'),
password.encode('utf-8')]))
def HA2(credentails, request):
"""Create HA2 md5 hash
If the qop directive's value is "auth" or is unspecified, then HA2:
HA2 = md5(A2) = MD5(method:digestURI)
If the qop directive's value is "auth-int" , then HA2 is
HA2 = md5(A2) = MD5(method:digestURI:MD5(entityBody))
"""
if credentails.get("qop") == "auth" or credentails.get('qop') is None:
return H(b":".join([request['method'].encode('utf-8'), request['uri'].encode('utf-8')]))
elif credentails.get("qop") == "auth-int":
for k in 'method', 'uri', 'body':
if k not in request:
raise ValueError("%s required" % k)
return H("%s:%s:%s" % (request['method'],
request['uri'],
H(request['body'])))
raise ValueError
def response(credentails, password, request):
"""Compile digest auth response
If the qop directive's value is "auth" or "auth-int" , then compute the response as follows:
RESPONSE = MD5(HA1:nonce:nonceCount:clienNonce:qop:HA2)
Else if the qop directive is unspecified, then compute the response as follows:
RESPONSE = MD5(HA1:nonce:HA2)
Arguments:
- `credentails`: credentails dict
- `password`: request user password
- `request`: request dict
"""
response = None
HA1_value = HA1(
credentails.get('realm'),
credentails.get('username'),
password
)
HA2_value = HA2(credentails, request)
if credentails.get('qop') is None:
response = H(b":".join([
HA1_value.encode('utf-8'),
credentails.get('nonce', '').encode('utf-8'),
HA2_value.encode('utf-8')
]))
elif credentails.get('qop') == 'auth' or credentails.get('qop') == 'auth-int':
for k in 'nonce', 'nc', 'cnonce', 'qop':
if k not in credentails:
raise ValueError("%s required for response H" % k)
response = H(b":".join([HA1_value.encode('utf-8'),
credentails.get('nonce').encode('utf-8'),
credentails.get('nc').encode('utf-8'),
credentails.get('cnonce').encode('utf-8'),
credentails.get('qop').encode('utf-8'),
HA2_value.encode('utf-8')]))
else:
raise ValueError("qop value are wrong")
return response
def check_digest_auth(user, passwd):
"""Check user authentication using HTTP Digest auth"""
if request.headers.get('Authorization'):
credentails = parse_authorization_header(request.headers.get('Authorization'))
if not credentails:
return
response_hash = response(credentails, passwd, dict(uri=request.script_root + request.path,
body=request.data,
method=request.method))
if credentails.get('response') == response_hash:
return True
return False
def secure_cookie():
"""Return true if cookie should have secure attribute"""
return request.environ['wsgi.url_scheme'] == 'https'
def __parse_request_range(range_header_text):
""" Return a tuple describing the byte range requested in a GET request
If the range is open ended on the left or right side, then a value of None
will be set.
RFC7233: http://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7233.html#header.range
Examples:
Range : bytes=1024-
Range : bytes=10-20
Range : bytes=-999
"""
left = None
right = None
if not range_header_text:
return left, right
range_header_text = range_header_text.strip()
if not range_header_text.startswith('bytes'):
return left, right
components = range_header_text.split("=")
if len(components) != 2:
return left, right
components = components[1].split("-")
try:
right = int(components[1])
except:
pass
try:
left = int(components[0])
except:
pass
return left, right
def get_request_range(request_headers, upper_bound):
first_byte_pos, last_byte_pos = __parse_request_range(request_headers['range'])
if first_byte_pos is None and last_byte_pos is None:
# Request full range
first_byte_pos = 0
last_byte_pos = upper_bound - 1
elif first_byte_pos is None:
# Request the last X bytes
first_byte_pos = max(0, upper_bound - last_byte_pos)
last_byte_pos = upper_bound - 1
elif last_byte_pos is None:
# Request the last X bytes
last_byte_pos = upper_bound - 1
return first_byte_pos, last_byte_pos
|
|
from __future__ import unicode_literals
import base64
import binascii
import hashlib
from django.dispatch import receiver
from django.conf import settings
from django.test.signals import setting_changed
from django.utils import importlib
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_bytes, force_str, force_text
from django.core.exceptions import ImproperlyConfigured
from django.utils.crypto import (
pbkdf2, constant_time_compare, get_random_string)
from django.utils.module_loading import import_by_path
from django.utils.translation import ugettext_noop as _
UNUSABLE_PASSWORD_PREFIX = '!' # This will never be a valid encoded hash
UNUSABLE_PASSWORD_SUFFIX_LENGTH = 40 # number of random chars to add after UNUSABLE_PASSWORD_PREFIX
HASHERS = None # lazily loaded from PASSWORD_HASHERS
PREFERRED_HASHER = None # defaults to first item in PASSWORD_HASHERS
@receiver(setting_changed)
def reset_hashers(**kwargs):
if kwargs['setting'] == 'PASSWORD_HASHERS':
global HASHERS, PREFERRED_HASHER
HASHERS = None
PREFERRED_HASHER = None
def is_password_usable(encoded):
if encoded is None or encoded.startswith(UNUSABLE_PASSWORD_PREFIX):
return False
try:
identify_hasher(encoded)
except ValueError:
return False
return True
def check_password(password, encoded, setter=None, preferred='default'):
"""
Returns a boolean of whether the raw password matches the three
part encoded digest.
If setter is specified, it'll be called when you need to
regenerate the password.
"""
if password is None or not is_password_usable(encoded):
return False
preferred = get_hasher(preferred)
hasher = identify_hasher(encoded)
must_update = hasher.algorithm != preferred.algorithm
if not must_update:
must_update = preferred.must_update(encoded)
is_correct = hasher.verify(password, encoded)
if setter and is_correct and must_update:
setter(password)
return is_correct
def make_password(password, salt=None, hasher='default'):
"""
Turn a plain-text password into a hash for database storage
Same as encode() but generates a new random salt.
If password is None then a concatenation of
UNUSABLE_PASSWORD_PREFIX and a random string will be returned
which disallows logins. Additional random string reduces chances
of gaining access to staff or superuser accounts.
See ticket #20079 for more info.
"""
if password is None:
return UNUSABLE_PASSWORD_PREFIX + get_random_string(UNUSABLE_PASSWORD_SUFFIX_LENGTH)
hasher = get_hasher(hasher)
if not salt:
salt = hasher.salt()
return hasher.encode(password, salt)
def load_hashers(password_hashers=None):
global HASHERS
global PREFERRED_HASHER
hashers = []
if not password_hashers:
password_hashers = settings.PASSWORD_HASHERS
for backend in password_hashers:
hasher = import_by_path(backend)()
if not getattr(hasher, 'algorithm'):
raise ImproperlyConfigured("hasher doesn't specify an "
"algorithm name: %s" % backend)
hashers.append(hasher)
HASHERS = dict([(hasher.algorithm, hasher) for hasher in hashers])
PREFERRED_HASHER = hashers[0]
def get_hasher(algorithm='default'):
"""
Returns an instance of a loaded password hasher.
If algorithm is 'default', the default hasher will be returned.
This function will also lazy import hashers specified in your
settings file if needed.
"""
if hasattr(algorithm, 'algorithm'):
return algorithm
elif algorithm == 'default':
if PREFERRED_HASHER is None:
load_hashers()
return PREFERRED_HASHER
else:
if HASHERS is None:
load_hashers()
if algorithm not in HASHERS:
raise ValueError("Unknown password hashing algorithm '%s'. "
"Did you specify it in the PASSWORD_HASHERS "
"setting?" % algorithm)
return HASHERS[algorithm]
def identify_hasher(encoded):
"""
Returns an instance of a loaded password hasher.
Identifies hasher algorithm by examining encoded hash, and calls
get_hasher() to return hasher. Raises ValueError if
algorithm cannot be identified, or if hasher is not loaded.
"""
# Ancient versions of Django created plain MD5 passwords and accepted
# MD5 passwords with an empty salt.
if ((len(encoded) == 32 and '$' not in encoded) or
(len(encoded) == 37 and encoded.startswith('md5$$'))):
algorithm = 'unsalted_md5'
# Ancient versions of Django accepted SHA1 passwords with an empty salt.
elif len(encoded) == 46 and encoded.startswith('sha1$$'):
algorithm = 'unsalted_sha1'
else:
algorithm = encoded.split('$', 1)[0]
return get_hasher(algorithm)
def mask_hash(hash, show=6, char="*"):
"""
Returns the given hash, with only the first ``show`` number shown. The
rest are masked with ``char`` for security reasons.
"""
masked = hash[:show]
masked += char * len(hash[show:])
return masked
class BasePasswordHasher(object):
"""
Abstract base class for password hashers
When creating your own hasher, you need to override algorithm,
verify(), encode() and safe_summary().
PasswordHasher objects are immutable.
"""
algorithm = None
library = None
def _load_library(self):
if self.library is not None:
if isinstance(self.library, (tuple, list)):
name, mod_path = self.library
else:
name = mod_path = self.library
try:
module = importlib.import_module(mod_path)
except ImportError as e:
raise ValueError("Couldn't load %r algorithm library: %s" %
(self.__class__.__name__, e))
return module
raise ValueError("Hasher %r doesn't specify a library attribute" %
self.__class__.__name__)
def salt(self):
"""
Generates a cryptographically secure nonce salt in ascii
"""
return get_random_string()
def verify(self, password, encoded):
"""
Checks if the given password is correct
"""
raise NotImplementedError()
def encode(self, password, salt):
"""
Creates an encoded database value
The result is normally formatted as "algorithm$salt$hash" and
must be fewer than 128 characters.
"""
raise NotImplementedError()
def safe_summary(self, encoded):
"""
Returns a summary of safe values
The result is a dictionary and will be used where the password field
must be displayed to construct a safe representation of the password.
"""
raise NotImplementedError()
def must_update(self, encoded):
return False
class PBKDF2PasswordHasher(BasePasswordHasher):
"""
Secure password hashing using the PBKDF2 algorithm (recommended)
Configured to use PBKDF2 + HMAC + SHA256 with 12000 iterations.
The result is a 64 byte binary string. Iterations may be changed
safely but you must rename the algorithm if you change SHA256.
"""
algorithm = "pbkdf2_sha256"
iterations = 12000
digest = hashlib.sha256
def encode(self, password, salt, iterations=None):
assert password is not None
assert salt and '$' not in salt
if not iterations:
iterations = self.iterations
hash = pbkdf2(password, salt, iterations, digest=self.digest)
hash = base64.b64encode(hash).decode('ascii').strip()
return "%s$%d$%s$%s" % (self.algorithm, iterations, salt, hash)
def verify(self, password, encoded):
algorithm, iterations, salt, hash = encoded.split('$', 3)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, salt, int(iterations))
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, iterations, salt, hash = encoded.split('$', 3)
assert algorithm == self.algorithm
return SortedDict([
(_('algorithm'), algorithm),
(_('iterations'), iterations),
(_('salt'), mask_hash(salt)),
(_('hash'), mask_hash(hash)),
])
def must_update(self, encoded):
algorithm, iterations, salt, hash = encoded.split('$', 3)
return int(iterations) != self.iterations
class PBKDF2SHA1PasswordHasher(PBKDF2PasswordHasher):
"""
Alternate PBKDF2 hasher which uses SHA1, the default PRF
recommended by PKCS #5. This is compatible with other
implementations of PBKDF2, such as openssl's
PKCS5_PBKDF2_HMAC_SHA1().
"""
algorithm = "pbkdf2_sha1"
digest = hashlib.sha1
class BCryptSHA256PasswordHasher(BasePasswordHasher):
"""
Secure password hashing using the bcrypt algorithm (recommended)
This is considered by many to be the most secure algorithm but you
must first install the bcrypt library. Please be warned that
this library depends on native C code and might cause portability
issues.
"""
algorithm = "bcrypt_sha256"
digest = hashlib.sha256
library = ("bcrypt", "bcrypt")
rounds = 12
def salt(self):
bcrypt = self._load_library()
return bcrypt.gensalt(self.rounds)
def encode(self, password, salt):
bcrypt = self._load_library()
# Need to reevaluate the force_bytes call once bcrypt is supported on
# Python 3
# Hash the password prior to using bcrypt to prevent password truncation
# See: https://code.djangoproject.com/ticket/20138
if self.digest is not None:
# We use binascii.hexlify here because Python3 decided that a hex encoded
# bytestring is somehow a unicode.
password = binascii.hexlify(self.digest(force_bytes(password)).digest())
else:
password = force_bytes(password)
data = bcrypt.hashpw(password, salt)
return "%s$%s" % (self.algorithm, force_text(data))
def verify(self, password, encoded):
algorithm, data = encoded.split('$', 1)
assert algorithm == self.algorithm
bcrypt = self._load_library()
# Hash the password prior to using bcrypt to prevent password truncation
# See: https://code.djangoproject.com/ticket/20138
if self.digest is not None:
# We use binascii.hexlify here because Python3 decided that a hex encoded
# bytestring is somehow a unicode.
password = binascii.hexlify(self.digest(force_bytes(password)).digest())
else:
password = force_bytes(password)
# Ensure that our data is a bytestring
data = force_bytes(data)
# force_bytes() necessary for py-bcrypt compatibility
hashpw = force_bytes(bcrypt.hashpw(password, data))
return constant_time_compare(data, hashpw)
def safe_summary(self, encoded):
algorithm, empty, algostr, work_factor, data = encoded.split('$', 4)
assert algorithm == self.algorithm
salt, checksum = data[:22], data[22:]
return SortedDict([
(_('algorithm'), algorithm),
(_('work factor'), work_factor),
(_('salt'), mask_hash(salt)),
(_('checksum'), mask_hash(checksum)),
])
class BCryptPasswordHasher(BCryptSHA256PasswordHasher):
"""
Secure password hashing using the bcrypt algorithm
This is considered by many to be the most secure algorithm but you
must first install the bcrypt library. Please be warned that
this library depends on native C code and might cause portability
issues.
This hasher does not first hash the password which means it is subject to
the 72 character bcrypt password truncation, most use cases should prefer
the BCryptSha512PasswordHasher.
See: https://code.djangoproject.com/ticket/20138
"""
algorithm = "bcrypt"
digest = None
class SHA1PasswordHasher(BasePasswordHasher):
"""
The SHA1 password hashing algorithm (not recommended)
"""
algorithm = "sha1"
def encode(self, password, salt):
assert password is not None
assert salt and '$' not in salt
hash = hashlib.sha1(force_bytes(salt + password)).hexdigest()
return "%s$%s$%s" % (self.algorithm, salt, hash)
def verify(self, password, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, salt)
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
return SortedDict([
(_('algorithm'), algorithm),
(_('salt'), mask_hash(salt, show=2)),
(_('hash'), mask_hash(hash)),
])
class MD5PasswordHasher(BasePasswordHasher):
"""
The Salted MD5 password hashing algorithm (not recommended)
"""
algorithm = "md5"
def encode(self, password, salt):
assert password is not None
assert salt and '$' not in salt
hash = hashlib.md5(force_bytes(salt + password)).hexdigest()
return "%s$%s$%s" % (self.algorithm, salt, hash)
def verify(self, password, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, salt)
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
return SortedDict([
(_('algorithm'), algorithm),
(_('salt'), mask_hash(salt, show=2)),
(_('hash'), mask_hash(hash)),
])
class UnsaltedSHA1PasswordHasher(BasePasswordHasher):
"""
Very insecure algorithm that you should *never* use; stores SHA1 hashes
with an empty salt.
This class is implemented because Django used to accept such password
hashes. Some older Django installs still have these values lingering
around so we need to handle and upgrade them properly.
"""
algorithm = "unsalted_sha1"
def salt(self):
return ''
def encode(self, password, salt):
assert salt == ''
hash = hashlib.sha1(force_bytes(password)).hexdigest()
return 'sha1$$%s' % hash
def verify(self, password, encoded):
encoded_2 = self.encode(password, '')
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
assert encoded.startswith('sha1$$')
hash = encoded[6:]
return SortedDict([
(_('algorithm'), self.algorithm),
(_('hash'), mask_hash(hash)),
])
class UnsaltedMD5PasswordHasher(BasePasswordHasher):
"""
Incredibly insecure algorithm that you should *never* use; stores unsalted
MD5 hashes without the algorithm prefix, also accepts MD5 hashes with an
empty salt.
This class is implemented because Django used to store passwords this way
and to accept such password hashes. Some older Django installs still have
these values lingering around so we need to handle and upgrade them
properly.
"""
algorithm = "unsalted_md5"
def salt(self):
return ''
def encode(self, password, salt):
assert salt == ''
return hashlib.md5(force_bytes(password)).hexdigest()
def verify(self, password, encoded):
if len(encoded) == 37 and encoded.startswith('md5$$'):
encoded = encoded[5:]
encoded_2 = self.encode(password, '')
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
return SortedDict([
(_('algorithm'), self.algorithm),
(_('hash'), mask_hash(encoded, show=3)),
])
class CryptPasswordHasher(BasePasswordHasher):
"""
Password hashing using UNIX crypt (not recommended)
The crypt module is not supported on all platforms.
"""
algorithm = "crypt"
library = "crypt"
def salt(self):
return get_random_string(2)
def encode(self, password, salt):
crypt = self._load_library()
assert len(salt) == 2
data = crypt.crypt(force_str(password), salt)
# we don't need to store the salt, but Django used to do this
return "%s$%s$%s" % (self.algorithm, '', data)
def verify(self, password, encoded):
crypt = self._load_library()
algorithm, salt, data = encoded.split('$', 2)
assert algorithm == self.algorithm
return constant_time_compare(data, crypt.crypt(force_str(password), data))
def safe_summary(self, encoded):
algorithm, salt, data = encoded.split('$', 2)
assert algorithm == self.algorithm
return SortedDict([
(_('algorithm'), algorithm),
(_('salt'), salt),
(_('hash'), mask_hash(data, show=3)),
])
|
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# pylint: disable=invalid-overridden-method
import functools
from typing import ( # pylint: disable=unused-import
Union,
Optional,
Any,
IO,
Iterable,
AnyStr,
Dict,
List,
Tuple,
TYPE_CHECKING,
)
try:
from urllib.parse import urlparse, quote, unquote # pylint: disable=unused-import
except ImportError:
from urlparse import urlparse # type: ignore
from urllib2 import quote, unquote # type: ignore
from azure.core.exceptions import HttpResponseError
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.async_paging import AsyncItemPaged
from .._serialize import get_api_version
from .._shared.base_client_async import AsyncStorageAccountHostsMixin
from .._shared.request_handlers import add_metadata_headers, serialize_iso
from .._shared.response_handlers import (
return_response_headers,
process_storage_error,
return_headers_and_deserialized,
)
from .._deserialize import deserialize_queue_properties, deserialize_queue_creation
from .._generated.aio import AzureQueueStorage
from .._generated.models import SignedIdentifier
from .._generated.models import QueueMessage as GenQueueMessage
from .._models import QueueMessage, AccessPolicy
from ._models import MessagesPaged
from .._shared.policies_async import ExponentialRetry
from .._queue_client import QueueClient as QueueClientBase
if TYPE_CHECKING:
from datetime import datetime
from azure.core.pipeline.policies import HTTPPolicy
from .._models import QueueSasPermissions, QueueProperties
class QueueClient(AsyncStorageAccountHostsMixin, QueueClientBase):
"""A client to interact with a specific Queue.
:param str account_url:
The URL to the storage account. In order to create a client given the full URI to the queue,
use the :func:`from_queue_url` classmethod.
:param queue_name: The name of the queue.
:type queue_name: str
:param credential:
The credentials with which to authenticate. This is optional if the
account URL already has a SAS token. The value can be a SAS token string,
an instance of a AzureSasCredential from azure.core.credentials, an account
shared access key, or an instance of a TokenCredentials class from azure.identity.
:keyword str api_version:
The Storage API version to use for requests. Default value is the most recent service version that is
compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
:keyword str secondary_hostname:
The hostname of the secondary endpoint.
:keyword message_encode_policy: The encoding policy to use on outgoing messages.
Default is not to encode messages. Other options include :class:`TextBase64EncodePolicy`,
:class:`BinaryBase64EncodePolicy` or `None`.
:keyword message_decode_policy: The decoding policy to use on incoming messages.
Default value is not to decode messages. Other options include :class:`TextBase64DecodePolicy`,
:class:`BinaryBase64DecodePolicy` or `None`.
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_message_async.py
:start-after: [START async_create_queue_client]
:end-before: [END async_create_queue_client]
:language: python
:dedent: 16
:caption: Create the queue client with url and credential.
.. literalinclude:: ../samples/queue_samples_message_async.py
:start-after: [START async_create_queue_client_from_connection_string]
:end-before: [END async_create_queue_client_from_connection_string]
:language: python
:dedent: 8
:caption: Create the queue client with a connection string.
"""
def __init__(
self,
account_url, # type: str
queue_name, # type: str
credential=None, # type: Optional[Any]
**kwargs # type: Any
):
# type: (...) -> None
kwargs["retry_policy"] = kwargs.get("retry_policy") or ExponentialRetry(**kwargs)
loop = kwargs.pop('loop', None)
super(QueueClient, self).__init__(
account_url, queue_name=queue_name, credential=credential, loop=loop, **kwargs
)
self._client = AzureQueueStorage(self.url, pipeline=self._pipeline, loop=loop) # type: ignore
self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access
self._loop = loop
@distributed_trace_async
async def create_queue(self, **kwargs):
# type: (Optional[Any]) -> None
"""Creates a new queue in the storage account.
If a queue with the same name already exists, the operation fails with
a `ResourceExistsError`.
:keyword dict(str,str) metadata:
A dict containing name-value pairs to associate with the queue as
metadata. Note that metadata names preserve the case with which they
were created, but are case-insensitive when set or read.
:keyword int timeout:
The server timeout, expressed in seconds.
:return: None or the result of cls(response)
:rtype: None
:raises: StorageErrorException
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_hello_world_async.py
:start-after: [START async_create_queue]
:end-before: [END async_create_queue]
:language: python
:dedent: 12
:caption: Create a queue.
"""
metadata = kwargs.pop('metadata', None)
timeout = kwargs.pop('timeout', None)
headers = kwargs.pop("headers", {})
headers.update(add_metadata_headers(metadata)) # type: ignore
try:
return await self._client.queue.create( # type: ignore
metadata=metadata, timeout=timeout, headers=headers, cls=deserialize_queue_creation, **kwargs
)
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def delete_queue(self, **kwargs):
# type: (Optional[Any]) -> None
"""Deletes the specified queue and any messages it contains.
When a queue is successfully deleted, it is immediately marked for deletion
and is no longer accessible to clients. The queue is later removed from
the Queue service during garbage collection.
Note that deleting a queue is likely to take at least 40 seconds to complete.
If an operation is attempted against the queue while it was being deleted,
an :class:`HttpResponseError` will be thrown.
:keyword int timeout:
The server timeout, expressed in seconds.
:rtype: None
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_hello_world_async.py
:start-after: [START async_delete_queue]
:end-before: [END async_delete_queue]
:language: python
:dedent: 16
:caption: Delete a queue.
"""
timeout = kwargs.pop('timeout', None)
try:
await self._client.queue.delete(timeout=timeout, **kwargs)
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def get_queue_properties(self, **kwargs):
# type: (Optional[Any]) -> QueueProperties
"""Returns all user-defined metadata for the specified queue.
The data returned does not include the queue's list of messages.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:return: User-defined metadata for the queue.
:rtype: ~azure.storage.queue.QueueProperties
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_message_async.py
:start-after: [START async_get_queue_properties]
:end-before: [END async_get_queue_properties]
:language: python
:dedent: 16
:caption: Get the properties on the queue.
"""
timeout = kwargs.pop('timeout', None)
try:
response = await self._client.queue.get_properties(
timeout=timeout, cls=deserialize_queue_properties, **kwargs
)
except HttpResponseError as error:
process_storage_error(error)
response.name = self.queue_name
return response # type: ignore
@distributed_trace_async
async def set_queue_metadata(self, metadata=None, **kwargs):
# type: (Optional[Dict[str, Any]], Optional[Any]) -> None
"""Sets user-defined metadata on the specified queue.
Metadata is associated with the queue as name-value pairs.
:param metadata:
A dict containing name-value pairs to associate with the
queue as metadata.
:type metadata: dict(str, str)
:keyword int timeout:
The server timeout, expressed in seconds.
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_message_async.py
:start-after: [START async_set_queue_metadata]
:end-before: [END async_set_queue_metadata]
:language: python
:dedent: 16
:caption: Set metadata on the queue.
"""
timeout = kwargs.pop('timeout', None)
headers = kwargs.pop("headers", {})
headers.update(add_metadata_headers(metadata)) # type: ignore
try:
return await self._client.queue.set_metadata( # type: ignore
timeout=timeout, headers=headers, cls=return_response_headers, **kwargs
)
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def get_queue_access_policy(self, **kwargs):
# type: (Optional[Any]) -> Dict[str, Any]
"""Returns details about any stored access policies specified on the
queue that may be used with Shared Access Signatures.
:keyword int timeout:
The server timeout, expressed in seconds.
:return: A dictionary of access policies associated with the queue.
:rtype: dict(str, ~azure.storage.queue.AccessPolicy)
"""
timeout = kwargs.pop('timeout', None)
try:
_, identifiers = await self._client.queue.get_access_policy(
timeout=timeout, cls=return_headers_and_deserialized, **kwargs
)
except HttpResponseError as error:
process_storage_error(error)
return {s.id: s.access_policy or AccessPolicy() for s in identifiers}
@distributed_trace_async
async def set_queue_access_policy(self, signed_identifiers, **kwargs):
# type: (Dict[str, AccessPolicy], Optional[Any]) -> None
"""Sets stored access policies for the queue that may be used with Shared
Access Signatures.
When you set permissions for a queue, the existing permissions are replaced.
To update the queue's permissions, call :func:`~get_queue_access_policy` to fetch
all access policies associated with the queue, modify the access policy
that you wish to change, and then call this function with the complete
set of data to perform the update.
When you establish a stored access policy on a queue, it may take up to
30 seconds to take effect. During this interval, a shared access signature
that is associated with the stored access policy will throw an
:class:`HttpResponseError` until the access policy becomes active.
:param signed_identifiers:
SignedIdentifier access policies to associate with the queue.
This may contain up to 5 elements. An empty dict
will clear the access policies set on the service.
:type signed_identifiers: dict(str, ~azure.storage.queue.AccessPolicy)
:keyword int timeout:
The server timeout, expressed in seconds.
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_message_async.py
:start-after: [START async_set_access_policy]
:end-before: [END async_set_access_policy]
:language: python
:dedent: 16
:caption: Set an access policy on the queue.
"""
timeout = kwargs.pop('timeout', None)
if len(signed_identifiers) > 15:
raise ValueError(
"Too many access policies provided. The server does not support setting "
"more than 15 access policies on a single resource."
)
identifiers = []
for key, value in signed_identifiers.items():
if value:
value.start = serialize_iso(value.start)
value.expiry = serialize_iso(value.expiry)
identifiers.append(SignedIdentifier(id=key, access_policy=value))
signed_identifiers = identifiers # type: ignore
try:
await self._client.queue.set_access_policy(queue_acl=signed_identifiers or None, timeout=timeout, **kwargs)
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def send_message( # type: ignore
self,
content, # type: Any
**kwargs # type: Optional[Any]
):
# type: (...) -> QueueMessage
"""Adds a new message to the back of the message queue.
The visibility timeout specifies the time that the message will be
invisible. After the timeout expires, the message will become visible.
If a visibility timeout is not specified, the default value of 0 is used.
The message time-to-live specifies how long a message will remain in the
queue. The message will be deleted from the queue when the time-to-live
period expires.
If the key-encryption-key field is set on the local service object, this method will
encrypt the content before uploading.
:param obj content:
Message content. Allowed type is determined by the encode_function
set on the service. Default is str. The encoded message can be up to
64KB in size.
:keyword int visibility_timeout:
If not specified, the default value is 0. Specifies the
new visibility timeout value, in seconds, relative to server time.
The value must be larger than or equal to 0, and cannot be
larger than 7 days. The visibility timeout of a message cannot be
set to a value later than the expiry time. visibility_timeout
should be set to a value smaller than the time-to-live value.
:keyword int time_to_live:
Specifies the time-to-live interval for the message, in
seconds. The time-to-live may be any positive number or -1 for infinity. If this
parameter is omitted, the default time-to-live is 7 days.
:keyword int timeout:
The server timeout, expressed in seconds.
:return:
A :class:`~azure.storage.queue.QueueMessage` object.
This object is also populated with the content although it is not
returned from the service.
:rtype: ~azure.storage.queue.QueueMessage
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_message_async.py
:start-after: [START async_send_messages]
:end-before: [END async_send_messages]
:language: python
:dedent: 16
:caption: Send messages.
"""
visibility_timeout = kwargs.pop('visibility_timeout', None)
time_to_live = kwargs.pop('time_to_live', None)
timeout = kwargs.pop('timeout', None)
self._config.message_encode_policy.configure(
require_encryption=self.require_encryption,
key_encryption_key=self.key_encryption_key,
resolver=self.key_resolver_function
)
encoded_content = self._config.message_encode_policy(content)
new_message = GenQueueMessage(message_text=encoded_content)
try:
enqueued = await self._client.messages.enqueue(
queue_message=new_message,
visibilitytimeout=visibility_timeout,
message_time_to_live=time_to_live,
timeout=timeout,
**kwargs
)
queue_message = QueueMessage(content=content)
queue_message.id = enqueued[0].message_id
queue_message.inserted_on = enqueued[0].insertion_time
queue_message.expires_on = enqueued[0].expiration_time
queue_message.pop_receipt = enqueued[0].pop_receipt
queue_message.next_visible_on = enqueued[0].time_next_visible
return queue_message
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def receive_message(self, **kwargs):
# type: (Optional[Any]) -> QueueMessage
"""Removes one message from the front of the queue.
When the message is retrieved from the queue, the response includes the message
content and a pop_receipt value, which is required to delete the message.
The message is not automatically deleted from the queue, but after it has
been retrieved, it is not visible to other clients for the time interval
specified by the visibility_timeout parameter.
If the key-encryption-key or resolver field is set on the local service object, the message will be
decrypted before being returned.
:keyword int visibility_timeout:
If not specified, the default value is 0. Specifies the
new visibility timeout value, in seconds, relative to server time.
The value must be larger than or equal to 0, and cannot be
larger than 7 days. The visibility timeout of a message cannot be
set to a value later than the expiry time. visibility_timeout
should be set to a value smaller than the time-to-live value.
:keyword int timeout:
The server timeout, expressed in seconds.
:return:
Returns a message from the Queue.
:rtype: ~azure.storage.queue.QueueMessage
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_message_async.py
:start-after: [START receive_one_message]
:end-before: [END receive_one_message]
:language: python
:dedent: 12
:caption: Receive one message from the queue.
"""
visibility_timeout = kwargs.pop('visibility_timeout', None)
timeout = kwargs.pop('timeout', None)
self._config.message_decode_policy.configure(
require_encryption=self.require_encryption,
key_encryption_key=self.key_encryption_key,
resolver=self.key_resolver_function)
try:
message = await self._client.messages.dequeue(
number_of_messages=1,
visibilitytimeout=visibility_timeout,
timeout=timeout,
cls=self._config.message_decode_policy,
**kwargs
)
wrapped_message = QueueMessage._from_generated( # pylint: disable=protected-access
message[0]) if message != [] else None
return wrapped_message
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace
def receive_messages(self, **kwargs):
# type: (Optional[Any]) -> AsyncItemPaged[QueueMessage]
"""Removes one or more messages from the front of the queue.
When a message is retrieved from the queue, the response includes the message
content and a pop_receipt value, which is required to delete the message.
The message is not automatically deleted from the queue, but after it has
been retrieved, it is not visible to other clients for the time interval
specified by the visibility_timeout parameter.
If the key-encryption-key or resolver field is set on the local service object, the messages will be
decrypted before being returned.
:keyword int messages_per_page:
A nonzero integer value that specifies the number of
messages to retrieve from the queue, up to a maximum of 32. If
fewer are visible, the visible messages are returned. By default,
a single message is retrieved from the queue with this operation.
`by_page()` can be used to provide a page iterator on the AsyncItemPaged if messages_per_page is set.
`next()` can be used to get the next page.
:keyword int visibility_timeout:
If not specified, the default value is 0. Specifies the
new visibility timeout value, in seconds, relative to server time.
The value must be larger than or equal to 0, and cannot be
larger than 7 days. The visibility timeout of a message cannot be
set to a value later than the expiry time. visibility_timeout
should be set to a value smaller than the time-to-live value.
:keyword int timeout:
The server timeout, expressed in seconds.
:return:
Returns a message iterator of dict-like Message objects.
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.queue.QueueMessage]
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_message_async.py
:start-after: [START async_receive_messages]
:end-before: [END async_receive_messages]
:language: python
:dedent: 16
:caption: Receive messages from the queue.
"""
messages_per_page = kwargs.pop('messages_per_page', None)
visibility_timeout = kwargs.pop('visibility_timeout', None)
timeout = kwargs.pop('timeout', None)
self._config.message_decode_policy.configure(
require_encryption=self.require_encryption,
key_encryption_key=self.key_encryption_key,
resolver=self.key_resolver_function
)
try:
command = functools.partial(
self._client.messages.dequeue,
visibilitytimeout=visibility_timeout,
timeout=timeout,
cls=self._config.message_decode_policy,
**kwargs
)
return AsyncItemPaged(command, results_per_page=messages_per_page, page_iterator_class=MessagesPaged)
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def update_message(
self,
message,
pop_receipt=None,
content=None,
**kwargs
):
# type: (Any, int, Optional[str], Optional[Any], Any) -> QueueMessage
"""Updates the visibility timeout of a message. You can also use this
operation to update the contents of a message.
This operation can be used to continually extend the invisibility of a
queue message. This functionality can be useful if you want a worker role
to "lease" a queue message. For example, if a worker role calls :func:`~receive_messages()`
and recognizes that it needs more time to process a message, it can
continually extend the message's invisibility until it is processed. If
the worker role were to fail during processing, eventually the message
would become visible again and another worker role could process it.
If the key-encryption-key field is set on the local service object, this method will
encrypt the content before uploading.
:param message:
The message object or id identifying the message to update.
:type message: str or ~azure.storage.queue.QueueMessage
:param str pop_receipt:
A valid pop receipt value returned from an earlier call
to the :func:`~receive_messages` or :func:`~update_message` operation.
:param obj content:
Message content. Allowed type is determined by the encode_function
set on the service. Default is str.
:keyword int visibility_timeout:
Specifies the new visibility timeout value, in seconds,
relative to server time. The new value must be larger than or equal
to 0, and cannot be larger than 7 days. The visibility timeout of a
message cannot be set to a value later than the expiry time. A
message can be updated until it has been deleted or has expired.
The message object or message id identifying the message to update.
:keyword int timeout:
The server timeout, expressed in seconds.
:return:
A :class:`~azure.storage.queue.QueueMessage` object. For convenience,
this object is also populated with the content, although it is not returned by the service.
:rtype: ~azure.storage.queue.QueueMessage
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_message_async.py
:start-after: [START async_update_message]
:end-before: [END async_update_message]
:language: python
:dedent: 16
:caption: Update a message.
"""
visibility_timeout = kwargs.pop('visibility_timeout', None)
timeout = kwargs.pop('timeout', None)
try:
message_id = message.id
message_text = content or message.content
receipt = pop_receipt or message.pop_receipt
inserted_on = message.inserted_on
expires_on = message.expires_on
dequeue_count = message.dequeue_count
except AttributeError:
message_id = message
message_text = content
receipt = pop_receipt
inserted_on = None
expires_on = None
dequeue_count = None
if receipt is None:
raise ValueError("pop_receipt must be present")
if message_text is not None:
self._config.message_encode_policy.configure(
self.require_encryption, self.key_encryption_key, self.key_resolver_function
)
encoded_message_text = self._config.message_encode_policy(message_text)
updated = GenQueueMessage(message_text=encoded_message_text)
else:
updated = None # type: ignore
try:
response = await self._client.message_id.update(
queue_message=updated,
visibilitytimeout=visibility_timeout or 0,
timeout=timeout,
pop_receipt=receipt,
cls=return_response_headers,
queue_message_id=message_id,
**kwargs
)
new_message = QueueMessage(content=message_text)
new_message.id = message_id
new_message.inserted_on = inserted_on
new_message.expires_on = expires_on
new_message.dequeue_count = dequeue_count
new_message.pop_receipt = response["popreceipt"]
new_message.next_visible_on = response["time_next_visible"]
return new_message
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def peek_messages(self, max_messages=None, **kwargs):
# type: (Optional[int], Optional[Any]) -> List[QueueMessage]
"""Retrieves one or more messages from the front of the queue, but does
not alter the visibility of the message.
Only messages that are visible may be retrieved. When a message is retrieved
for the first time with a call to :func:`~receive_messages`, its dequeue_count property
is set to 1. If it is not deleted and is subsequently retrieved again, the
dequeue_count property is incremented. The client may use this value to
determine how many times a message has been retrieved. Note that a call
to peek_messages does not increment the value of dequeue_count, but returns
this value for the client to read.
If the key-encryption-key or resolver field is set on the local service object,
the messages will be decrypted before being returned.
:param int max_messages:
A nonzero integer value that specifies the number of
messages to peek from the queue, up to a maximum of 32. By default,
a single message is peeked from the queue with this operation.
:keyword int timeout:
The server timeout, expressed in seconds.
:return:
A list of :class:`~azure.storage.queue.QueueMessage` objects. Note that
next_visible_on and pop_receipt will not be populated as peek does
not pop the message and can only retrieve already visible messages.
:rtype: list(:class:`~azure.storage.queue.QueueMessage`)
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_message_async.py
:start-after: [START async_peek_message]
:end-before: [END async_peek_message]
:language: python
:dedent: 16
:caption: Peek messages.
"""
timeout = kwargs.pop('timeout', None)
if max_messages and not 1 <= max_messages <= 32:
raise ValueError("Number of messages to peek should be between 1 and 32")
self._config.message_decode_policy.configure(
require_encryption=self.require_encryption,
key_encryption_key=self.key_encryption_key,
resolver=self.key_resolver_function
)
try:
messages = await self._client.messages.peek(
number_of_messages=max_messages, timeout=timeout, cls=self._config.message_decode_policy, **kwargs
)
wrapped_messages = []
for peeked in messages:
wrapped_messages.append(QueueMessage._from_generated(peeked)) # pylint: disable=protected-access
return wrapped_messages
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def clear_messages(self, **kwargs):
# type: (Optional[Any]) -> None
"""Deletes all messages from the specified queue.
:keyword int timeout:
The server timeout, expressed in seconds.
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_message_async.py
:start-after: [START async_clear_messages]
:end-before: [END async_clear_messages]
:language: python
:dedent: 16
:caption: Clears all messages.
"""
timeout = kwargs.pop('timeout', None)
try:
await self._client.messages.clear(timeout=timeout, **kwargs)
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def delete_message(self, message, pop_receipt=None, **kwargs):
# type: (Any, Optional[str], Any) -> None
"""Deletes the specified message.
Normally after a client retrieves a message with the receive messages operation,
the client is expected to process and delete the message. To delete the
message, you must have the message object itself, or two items of data: id and pop_receipt.
The id is returned from the previous receive_messages operation. The
pop_receipt is returned from the most recent :func:`~receive_messages` or
:func:`~update_message` operation. In order for the delete_message operation
to succeed, the pop_receipt specified on the request must match the
pop_receipt returned from the :func:`~receive_messages` or :func:`~update_message`
operation.
:param message:
The message object or id identifying the message to delete.
:type message: str or ~azure.storage.queue.QueueMessage
:param str pop_receipt:
A valid pop receipt value returned from an earlier call
to the :func:`~receive_messages` or :func:`~update_message`.
:keyword int timeout:
The server timeout, expressed in seconds.
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_message_async.py
:start-after: [START async_delete_message]
:end-before: [END async_delete_message]
:language: python
:dedent: 16
:caption: Delete a message.
"""
timeout = kwargs.pop('timeout', None)
try:
message_id = message.id
receipt = pop_receipt or message.pop_receipt
except AttributeError:
message_id = message
receipt = pop_receipt
if receipt is None:
raise ValueError("pop_receipt must be present")
try:
await self._client.message_id.delete(
pop_receipt=receipt, timeout=timeout, queue_message_id=message_id, **kwargs
)
except HttpResponseError as error:
process_storage_error(error)
|
|
# -*- coding: utf-8 -*-
#*****************************************************************************
# Copyright (C) 2003-2006 Gary Bishop.
# Copyright (C) 2006 Jorgen Stenarson. <jorgen.stenarson@bostream.nu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
u''' an attempt to implement readline for Python in Python using ctypes'''
import sys,os,re,time
from glob import glob
import release
import pyreadline.lineeditor.lineobj as lineobj
import pyreadline.lineeditor.history as history
import pyreadline.clipboard as clipboard
import pyreadline.console as console
import pyreadline.logger as logger
from pyreadline.keysyms.common import make_KeyPress_from_keydescr
from pyreadline.unicode_helper import ensure_unicode
from logger import log
from modes import editingmodes
from error import ReadlineError, GetSetError
in_ironpython = u"IronPython" in sys.version
if in_ironpython:#ironpython does not provide a prompt string to readline
import System
default_prompt = u">>> "
else:
default_prompt = u""
import pdb
class MockConsoleError(Exception):
pass
class MockConsole(object):
u"""object used during refactoring. Should raise errors when someone tries to use it.
"""
def __setattr__(self, x):
raise MockConsoleError(u"Should not try to get attributes from MockConsole")
def cursor(self, size=50):
pass
class BaseReadline(object):
def __init__(self):
self.allow_ctrl_c = False
self.ctrl_c_tap_time_interval = 0.3
self.debug = False
self.bell_style = u'none'
self.mark = -1
self.console=MockConsole()
# this code needs to follow l_buffer and history creation
self.editingmodes = [mode(self) for mode in editingmodes]
for mode in self.editingmodes:
mode.init_editing_mode(None)
self.mode = self.editingmodes[0]
self.read_inputrc()
log(u"\n".join(self.mode.rl_settings_to_string()))
self.callback = None
def parse_and_bind(self, string):
u'''Parse and execute single line of a readline init file.'''
try:
log(u'parse_and_bind("%s")' % string)
if string.startswith(u'#'):
return
if string.startswith(u'set'):
m = re.compile(ur'set\s+([-a-zA-Z0-9]+)\s+(.+)\s*$').match(string)
if m:
var_name = m.group(1)
val = m.group(2)
try:
setattr(self, var_name.replace(u'-',u'_'), val)
except AttributeError:
log(u'unknown var="%s" val="%s"' % (var_name, val))
else:
log(u'bad set "%s"' % string)
return
m = re.compile(ur'\s*(.+)\s*:\s*([-a-zA-Z]+)\s*$').match(string)
if m:
key = m.group(1)
func_name = m.group(2)
py_name = func_name.replace(u'-', u'_')
try:
func = getattr(self.mode, py_name)
except AttributeError:
log(u'unknown func key="%s" func="%s"' % (key, func_name))
if self.debug:
print u'pyreadline parse_and_bind error, unknown function to bind: "%s"' % func_name
return
self.mode._bind_key(key, func)
except:
log(u'error')
raise
def _set_prompt(self, prompt):
self.mode.prompt = prompt
def _get_prompt(self):
return self.mode.prompt
prompt = property(_get_prompt, _set_prompt)
def get_line_buffer(self):
u'''Return the current contents of the line buffer.'''
return self.mode.l_buffer.get_line_text()
def insert_text(self, string):
u'''Insert text into the command line.'''
self.mode.insert_text(string)
def read_init_file(self, filename=None):
u'''Parse a readline initialization file. The default filename is the last filename used.'''
log(u'read_init_file("%s")' % filename)
#History file book keeping methods (non-bindable)
def add_history(self, line):
u'''Append a line to the history buffer, as if it was the last line typed.'''
self.mode._history.add_history(line)
def get_history_length(self ):
u'''Return the desired length of the history file.
Negative values imply unlimited history file size.'''
return self.mode._history.get_history_length()
def set_history_length(self, length):
u'''Set the number of lines to save in the history file.
write_history_file() uses this value to truncate the history file
when saving. Negative values imply unlimited history file size.
'''
self.mode._history.set_history_length(length)
def clear_history(self):
u'''Clear readline history'''
self.mode._history.clear_history()
def read_history_file(self, filename=None):
u'''Load a readline history file. The default filename is ~/.history.'''
if filename is None:
filename = self.mode._history.history_filename
log(u"read_history_file from %s"%ensure_unicode(filename))
self.mode._history.read_history_file(filename)
def write_history_file(self, filename=None):
u'''Save a readline history file. The default filename is ~/.history.'''
self.mode._history.write_history_file(filename)
#Completer functions
def set_completer(self, function=None):
u'''Set or remove the completer function.
If function is specified, it will be used as the new completer
function; if omitted or None, any completer function already
installed is removed. The completer function is called as
function(text, state), for state in 0, 1, 2, ..., until it returns a
non-string value. It should return the next possible completion
starting with text.
'''
log(u'set_completer')
self.mode.completer = function
def get_completer(self):
u'''Get the completer function.
'''
log(u'get_completer')
return self.mode.completer
def get_begidx(self):
u'''Get the beginning index of the readline tab-completion scope.'''
return self.mode.begidx
def get_endidx(self):
u'''Get the ending index of the readline tab-completion scope.'''
return self.mode.endidx
def set_completer_delims(self, string):
u'''Set the readline word delimiters for tab-completion.'''
self.mode.completer_delims = string
def get_completer_delims(self):
u'''Get the readline word delimiters for tab-completion.'''
return self.mode.completer_delims.encode("ascii")
def set_startup_hook(self, function=None):
u'''Set or remove the startup_hook function.
If function is specified, it will be used as the new startup_hook
function; if omitted or None, any hook function already installed is
removed. The startup_hook function is called with no arguments just
before readline prints the first prompt.
'''
self.mode.startup_hook = function
def set_pre_input_hook(self, function=None):
u'''Set or remove the pre_input_hook function.
If function is specified, it will be used as the new pre_input_hook
function; if omitted or None, any hook function already installed is
removed. The pre_input_hook function is called with no arguments
after the first prompt has been printed and just before readline
starts reading input characters.
'''
self.mode.pre_input_hook = function
#Functions that are not relevant for all Readlines but should at least have a NOP
def _bell(self):
pass
#
# Standard call, not available for all implementations
#
def readline(self, prompt=u''):
raise NotImplementedError
#
# Callback interface
#
def process_keyevent(self, keyinfo):
return self.mode.process_keyevent(keyinfo)
def readline_setup(self, prompt=u""):
return self.mode.readline_setup(prompt)
def keyboard_poll(self):
return self.mode._readline_from_keyboard_poll()
def callback_handler_install(self, prompt, callback):
u'''bool readline_callback_handler_install ( string prompt, callback callback)
Initializes the readline callback interface and terminal, prints the prompt and returns immediately
'''
self.callback = callback
self.readline_setup(prompt)
def callback_handler_remove(self):
u'''Removes a previously installed callback handler and restores terminal settings'''
self.callback = None
def callback_read_char(self):
u'''Reads a character and informs the readline callback interface when a line is received'''
if self.keyboard_poll():
line = self.get_line_buffer() + u'\n'
# however there is another newline added by
# self.mode.readline_setup(prompt) which is called by callback_handler_install
# this differs from GNU readline
self.add_history(self.mode.l_buffer)
# TADA:
self.callback(line)
def read_inputrc(self, #in 2.4 we cannot call expanduser with unicode string
inputrcpath=os.path.expanduser("~/pyreadlineconfig.ini")):
modes = dict([(x.mode,x) for x in self.editingmodes])
mode = self.editingmodes[0].mode
def setmode(name):
self.mode = modes[name]
def bind_key(key, name):
if hasattr(modes[mode], name):
modes[mode]._bind_key(key, getattr(modes[mode], name))
else:
print u"Trying to bind unknown command '%s' to key '%s'"%(name, key)
def un_bind_key(key):
keyinfo = make_KeyPress_from_keydescr(key).tuple()
if keyinfo in modes[mode].key_dispatch:
del modes[mode].key_dispatch[keyinfo]
def bind_exit_key(key):
modes[mode]._bind_exit_key(key)
def un_bind_exit_key(key):
keyinfo = make_KeyPress_from_keydescr(key).tuple()
if keyinfo in modes[mode].exit_dispatch:
del modes[mode].exit_dispatch[keyinfo]
def setkill_ring_to_clipboard(killring):
import pyreadline.lineeditor.lineobj
pyreadline.lineeditor.lineobj.kill_ring_to_clipboard = killring
def sethistoryfilename(filename):
self.mode._history.history_filename=os.path.expanduser(filename)
def setbellstyle(mode):
self.bell_style = mode
def sethistorylength(length):
self.mode._history.history_length = int(length)
def allow_ctrl_c(mode):
log(u"allow_ctrl_c:%s:%s"%(self.allow_ctrl_c, mode))
self.allow_ctrl_c = mode
def setbellstyle(mode):
self.bell_style = mode
def show_all_if_ambiguous(mode):
self.mode.show_all_if_ambiguous = mode
def ctrl_c_tap_time_interval(mode):
self.ctrl_c_tap_time_interval = mode
def mark_directories(mode):
self.mode.mark_directories = mode
def completer_delims(delims):
self.mode.completer_delims = delims
def debug_output(on, filename=u"pyreadline_debug_log.txt"): #Not implemented yet
if on in [u"on", u"on_nologfile"]:
self.debug=True
if on == "on":
logger.start_file_log(filename)
logger.start_socket_log()
logger.log(u"STARTING LOG")
elif on == u"on_nologfile":
logger.start_socket_log()
logger.log(u"STARTING LOG")
else:
logger.log(u"STOPING LOG")
logger.stop_file_log()
logger.stop_socket_log()
_color_trtable={u"black":0, u"darkred":4, u"darkgreen":2,
u"darkyellow":6, u"darkblue":1, u"darkmagenta":5,
u"darkcyan":3, u"gray":7, u"red":4+8,
u"green":2+8, u"yellow":6+8, u"blue":1+8,
u"magenta":5+8, u"cyan":3+8, u"white":7+8}
def set_prompt_color(color):
self.prompt_color = self._color_trtable.get(color.lower(),7)
def set_input_color(color):
self.command_color=self._color_trtable.get(color.lower(),7)
loc = {u"branch":release.branch,
u"version":release.version,
u"mode":mode,
u"modes":modes,
u"set_mode":setmode,
u"bind_key":bind_key,
u"bind_exit_key":bind_exit_key,
u"un_bind_key":un_bind_key,
u"un_bind_exit_key":un_bind_exit_key,
u"bell_style":setbellstyle,
u"mark_directories":mark_directories,
u"show_all_if_ambiguous":show_all_if_ambiguous,
u"completer_delims":completer_delims,
u"debug_output":debug_output,
u"history_filename":sethistoryfilename,
u"history_length":sethistorylength,
u"set_prompt_color":set_prompt_color,
u"set_input_color":set_input_color,
u"allow_ctrl_c":allow_ctrl_c,
u"ctrl_c_tap_time_interval":ctrl_c_tap_time_interval,
u"kill_ring_to_clipboard":setkill_ring_to_clipboard,
}
if os.path.isfile(inputrcpath):
try:
execfile(inputrcpath, loc, loc)
except Exception,x:
raise
import traceback
print >>sys.stderr, u"Error reading .pyinputrc"
filepath,lineno=traceback.extract_tb(sys.exc_traceback)[1][:2]
print >>sys.stderr, u"Line: %s in file %s"%(lineno, filepath)
print >>sys.stderr, x
raise ReadlineError(u"Error reading .pyinputrc")
class Readline(BaseReadline):
"""Baseclass for readline based on a console
"""
def __init__(self):
BaseReadline.__init__(self)
self.console = console.Console()
self.selection_color = self.console.saveattr<<4
self.command_color = None
self.prompt_color = None
self.size = self.console.size()
# variables you can control with parse_and_bind
# To export as readline interface
## Internal functions
def _bell(self):
u'''ring the bell if requested.'''
if self.bell_style == u'none':
pass
elif self.bell_style == u'visible':
raise NotImplementedError(u"Bellstyle visible is not implemented yet.")
elif self.bell_style == u'audible':
self.console.bell()
else:
raise ReadlineError(u"Bellstyle %s unknown."%self.bell_style)
def _clear_after(self):
c = self.console
x, y = c.pos()
w, h = c.size()
c.rectangle((x, y, w+1, y+1))
c.rectangle((0, y+1, w, min(y+3,h)))
def _set_cursor(self):
c = self.console
xc, yc = self.prompt_end_pos
w, h = c.size()
xc += self.mode.l_buffer.visible_line_width()
while(xc >= w):
xc -= w
yc += 1
c.pos(xc, yc)
def _print_prompt(self):
c = self.console
x, y = c.pos()
n = c.write_scrolling(self.prompt, self.prompt_color)
self.prompt_begin_pos = (x, y - n)
self.prompt_end_pos = c.pos()
self.size = c.size()
def _update_prompt_pos(self, n):
if n != 0:
bx, by = self.prompt_begin_pos
ex, ey = self.prompt_end_pos
self.prompt_begin_pos = (bx, by - n)
self.prompt_end_pos = (ex, ey - n)
def _update_line(self):
c = self.console
l_buffer = self.mode.l_buffer
c.cursor(0) #Hide cursor avoiding flicking
c.pos(*self.prompt_begin_pos)
self._print_prompt()
ltext = l_buffer.quoted_text()
if l_buffer.enable_selection and (l_buffer.selection_mark >= 0):
start = len(l_buffer[:l_buffer.selection_mark].quoted_text())
stop = len(l_buffer[:l_buffer.point].quoted_text())
if start > stop:
stop,start = start,stop
n = c.write_scrolling(ltext[:start], self.command_color)
n = c.write_scrolling(ltext[start:stop], self.selection_color)
n = c.write_scrolling(ltext[stop:], self.command_color)
else:
n = c.write_scrolling(ltext, self.command_color)
x, y = c.pos() #Preserve one line for Asian IME(Input Method Editor) statusbar
w, h = c.size()
if (y >= h - 1) or (n > 0):
c.scroll_window(-1)
c.scroll((0, 0, w, h), 0, -1)
n += 1
self._update_prompt_pos(n)
if hasattr(c, u"clear_to_end_of_window"): #Work around function for ironpython due
c.clear_to_end_of_window() #to System.Console's lack of FillFunction
else:
self._clear_after()
#Show cursor, set size vi mode changes size in insert/overwrite mode
c.cursor(1, size=self.mode.cursor_size)
self._set_cursor()
def callback_read_char(self):
#Override base to get automatic newline
u'''Reads a character and informs the readline callback interface when a line is received'''
if self.keyboard_poll():
line = self.get_line_buffer() + u'\n'
self.console.write(u"\r\n")
# however there is another newline added by
# self.mode.readline_setup(prompt) which is called by callback_handler_install
# this differs from GNU readline
self.add_history(self.mode.l_buffer)
# TADA:
self.callback(line)
def event_available(self):
return self.console.peek() or (len(self.paste_line_buffer) > 0)
def _readline_from_keyboard(self):
while 1:
if self._readline_from_keyboard_poll():
break
def _readline_from_keyboard_poll(self):
pastebuffer = self.mode.paste_line_buffer
if len(pastebuffer) > 0:
#paste first line in multiline paste buffer
self.l_buffer = lineobj.ReadLineTextBuffer(pastebuffer[0])
self._update_line()
self.mode.paste_line_buffer = pastebuffer[1:]
return True
c = self.console
def nop(e):
pass
try:
event = c.getkeypress()
except KeyboardInterrupt:
event = self.handle_ctrl_c()
try:
result = self.mode.process_keyevent(event.keyinfo)
except EOFError:
logger.stop_logging()
raise
self._update_line()
return result
def readline_setup(self, prompt=u''):
BaseReadline.readline_setup(self, prompt)
self._print_prompt()
self._update_line()
def readline(self, prompt=u''):
self.readline_setup(prompt)
self.ctrl_c_timeout = time.time()
self._readline_from_keyboard()
self.console.write(u'\r\n')
log(u'returning(%s)' % self.get_line_buffer())
return self.get_line_buffer() + u'\n'
def handle_ctrl_c(self):
from pyreadline.keysyms.common import KeyPress
from pyreadline.console.event import Event
log(u"KBDIRQ")
event = Event(0,0)
event.char = u"c"
event.keyinfo = KeyPress(u"c", shift=False, control=True,
meta=False, keyname=None)
if self.allow_ctrl_c:
now = time.time()
if (now - self.ctrl_c_timeout) < self.ctrl_c_tap_time_interval:
log(u"Raise KeyboardInterrupt")
raise KeyboardInterrupt
else:
self.ctrl_c_timeout = now
else:
raise KeyboardInterrupt
return event
# create a Readline object to contain the state
rl = Readline()
def GetOutputFile():
u'''Return the console object used by readline so that it can be used for printing in color.'''
return rl.console
# make these available so this looks like the python readline module
read_init_file = rl.read_init_file
parse_and_bind = rl.parse_and_bind
clear_history = rl.clear_history
add_history = rl.add_history
insert_text = rl.insert_text
write_history_file = rl.write_history_file
read_history_file = rl.read_history_file
get_completer_delims = rl.get_completer_delims
get_history_length = rl.get_history_length
get_line_buffer = rl.get_line_buffer
set_completer = rl.set_completer
get_completer = rl.get_completer
get_begidx = rl.get_begidx
get_endidx = rl.get_endidx
set_completer_delims = rl.set_completer_delims
set_history_length = rl.set_history_length
set_pre_input_hook = rl.set_pre_input_hook
set_startup_hook = rl.set_startup_hook
callback_handler_install=rl.callback_handler_install
callback_handler_remove=rl.callback_handler_remove
callback_read_char=rl.callback_read_char
if __name__ == u'__main__':
res = [ rl.readline(u'In[%d] ' % i) for i in range(3) ]
print res
else:
console.install_readline(rl.readline)
pass
|
|
"""Test for RFlink light components.
Test setup of rflink lights component/platform. State tracking and
control of Rflink switch devices.
"""
import asyncio
from homeassistant.components.light import ATTR_BRIGHTNESS
from homeassistant.components.rflink import EVENT_BUTTON_PRESSED
from homeassistant.const import (
ATTR_ENTITY_ID, SERVICE_TURN_OFF, SERVICE_TURN_ON)
from homeassistant.core import callback
from ..test_rflink import mock_rflink
DOMAIN = 'light'
CONFIG = {
'rflink': {
'port': '/dev/ttyABC0',
'ignore_devices': ['ignore_wildcard_*', 'ignore_light'],
},
DOMAIN: {
'platform': 'rflink',
'devices': {
'protocol_0_0': {
'name': 'test',
'aliasses': ['test_alias_0_0'],
},
'dimmable_0_0': {
'name': 'dim_test',
'type': 'dimmable',
},
'switchable_0_0': {
'name': 'switch_test',
'type': 'switchable',
}
},
},
}
@asyncio.coroutine
def test_default_setup(hass, monkeypatch):
"""Test all basic functionality of the rflink switch component."""
# setup mocking rflink module
event_callback, create, protocol, _ = yield from mock_rflink(
hass, CONFIG, DOMAIN, monkeypatch)
# make sure arguments are passed
assert create.call_args_list[0][1]['ignore']
# test default state of light loaded from config
light_initial = hass.states.get('light.test')
assert light_initial.state == 'off'
assert light_initial.attributes['assumed_state']
# light should follow state of the hardware device by interpreting
# incoming events for its name and aliasses
# mock incoming command event for this device
event_callback({
'id': 'protocol_0_0',
'command': 'on',
})
yield from hass.async_block_till_done()
light_after_first_command = hass.states.get('light.test')
assert light_after_first_command.state == 'on'
# also after receiving first command state not longer has to be assumed
assert 'assumed_state' not in light_after_first_command.attributes
# mock incoming command event for this device
event_callback({
'id': 'protocol_0_0',
'command': 'off',
})
yield from hass.async_block_till_done()
assert hass.states.get('light.test').state == 'off'
# test following aliasses
# mock incoming command event for this device alias
event_callback({
'id': 'test_alias_0_0',
'command': 'on',
})
yield from hass.async_block_till_done()
assert hass.states.get('light.test').state == 'on'
# test event for new unconfigured sensor
event_callback({
'id': 'protocol2_0_1',
'command': 'on',
})
yield from hass.async_block_till_done()
assert hass.states.get('light.protocol2_0_1').state == 'on'
# test changing state from HA propagates to Rflink
hass.async_add_job(
hass.services.async_call(DOMAIN, SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: 'light.test'}))
yield from hass.async_block_till_done()
assert hass.states.get('light.test').state == 'off'
assert protocol.send_command_ack.call_args_list[0][0][0] == 'protocol_0_0'
assert protocol.send_command_ack.call_args_list[0][0][1] == 'off'
hass.async_add_job(
hass.services.async_call(DOMAIN, SERVICE_TURN_ON,
{ATTR_ENTITY_ID: 'light.test'}))
yield from hass.async_block_till_done()
assert hass.states.get('light.test').state == 'on'
assert protocol.send_command_ack.call_args_list[1][0][1] == 'on'
# protocols supporting dimming and on/off should create hybrid light entity
event_callback({
'id': 'newkaku_0_1',
'command': 'off',
})
yield from hass.async_block_till_done()
hass.async_add_job(
hass.services.async_call(DOMAIN, SERVICE_TURN_ON,
{ATTR_ENTITY_ID: 'light.newkaku_0_1'}))
yield from hass.async_block_till_done()
# dimmable should send highest dim level when turning on
assert protocol.send_command_ack.call_args_list[2][0][1] == '15'
# and send on command for fallback
assert protocol.send_command_ack.call_args_list[3][0][1] == 'on'
hass.async_add_job(
hass.services.async_call(DOMAIN, SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: 'light.newkaku_0_1',
ATTR_BRIGHTNESS: 128,
}))
yield from hass.async_block_till_done()
assert protocol.send_command_ack.call_args_list[4][0][1] == '7'
hass.async_add_job(
hass.services.async_call(DOMAIN, SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: 'light.dim_test',
ATTR_BRIGHTNESS: 128,
}))
yield from hass.async_block_till_done()
assert protocol.send_command_ack.call_args_list[5][0][1] == '7'
@asyncio.coroutine
def test_new_light_group(hass, monkeypatch):
"""New devices should be added to configured group."""
config = {
'rflink': {
'port': '/dev/ttyABC0',
},
DOMAIN: {
'platform': 'rflink',
'new_devices_group': 'new_rflink_lights',
},
}
# setup mocking rflink module
event_callback, _, _, _ = yield from mock_rflink(
hass, config, DOMAIN, monkeypatch)
# test event for new unconfigured sensor
event_callback({
'id': 'protocol_0_0',
'command': 'off',
})
yield from hass.async_block_till_done()
# make sure new device is added to correct group
group = hass.states.get('group.new_rflink_lights')
assert group.attributes.get('entity_id') == ('light.protocol_0_0',)
@asyncio.coroutine
def test_firing_bus_event(hass, monkeypatch):
"""Incoming Rflink command events should be put on the HA event bus."""
config = {
'rflink': {
'port': '/dev/ttyABC0',
},
DOMAIN: {
'platform': 'rflink',
'devices': {
'protocol_0_0': {
'name': 'test',
'aliasses': ['test_alias_0_0'],
'fire_event': True,
},
},
},
}
# setup mocking rflink module
event_callback, _, _, _ = yield from mock_rflink(
hass, config, DOMAIN, monkeypatch)
calls = []
@callback
def listener(event):
calls.append(event)
hass.bus.async_listen_once(EVENT_BUTTON_PRESSED, listener)
# test event for new unconfigured sensor
event_callback({
'id': 'protocol_0_0',
'command': 'off',
})
yield from hass.async_block_till_done()
assert calls[0].data == {'state': 'off', 'entity_id': 'light.test'}
@asyncio.coroutine
def test_signal_repetitions(hass, monkeypatch):
"""Command should be sent amount of configured repetitions."""
config = {
'rflink': {
'port': '/dev/ttyABC0',
},
DOMAIN: {
'platform': 'rflink',
'device_defaults': {
'signal_repetitions': 3,
},
'devices': {
'protocol_0_0': {
'name': 'test',
'signal_repetitions': 2,
},
'protocol_0_1': {
'name': 'test1',
},
'newkaku_0_1': {
'type': 'hybrid',
}
},
},
}
# setup mocking rflink module
event_callback, _, protocol, _ = yield from mock_rflink(
hass, config, DOMAIN, monkeypatch)
# test if signal repetition is performed according to configuration
hass.async_add_job(
hass.services.async_call(DOMAIN, SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: 'light.test'}))
# wait for commands and repetitions to finish
yield from hass.async_block_till_done()
assert protocol.send_command_ack.call_count == 2
# test if default apply to configured devcies
hass.async_add_job(
hass.services.async_call(DOMAIN, SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: 'light.test1'}))
# wait for commands and repetitions to finish
yield from hass.async_block_till_done()
assert protocol.send_command_ack.call_count == 5
# test if device defaults apply to newly created devices
event_callback({
'id': 'protocol_0_2',
'command': 'off',
})
# make sure entity is created before setting state
yield from hass.async_block_till_done()
hass.async_add_job(
hass.services.async_call(DOMAIN, SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: 'light.protocol_0_2'}))
# wait for commands and repetitions to finish
yield from hass.async_block_till_done()
assert protocol.send_command_ack.call_count == 8
@asyncio.coroutine
def test_signal_repetitions_alternation(hass, monkeypatch):
"""Simultaneously switching entities must alternate repetitions."""
config = {
'rflink': {
'port': '/dev/ttyABC0',
},
DOMAIN: {
'platform': 'rflink',
'devices': {
'protocol_0_0': {
'name': 'test',
'signal_repetitions': 2,
},
'protocol_0_1': {
'name': 'test1',
'signal_repetitions': 2,
},
},
},
}
# setup mocking rflink module
_, _, protocol, _ = yield from mock_rflink(
hass, config, DOMAIN, monkeypatch)
hass.async_add_job(
hass.services.async_call(DOMAIN, SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: 'light.test'}))
hass.async_add_job(
hass.services.async_call(DOMAIN, SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: 'light.test1'}))
yield from hass.async_block_till_done()
assert protocol.send_command_ack.call_args_list[0][0][0] == 'protocol_0_0'
assert protocol.send_command_ack.call_args_list[1][0][0] == 'protocol_0_1'
assert protocol.send_command_ack.call_args_list[2][0][0] == 'protocol_0_0'
assert protocol.send_command_ack.call_args_list[3][0][0] == 'protocol_0_1'
@asyncio.coroutine
def test_signal_repetitions_cancelling(hass, monkeypatch):
"""Cancel outstanding repetitions when state changed."""
config = {
'rflink': {
'port': '/dev/ttyABC0',
},
DOMAIN: {
'platform': 'rflink',
'devices': {
'protocol_0_0': {
'name': 'test',
'signal_repetitions': 3,
},
},
},
}
# setup mocking rflink module
_, _, protocol, _ = yield from mock_rflink(
hass, config, DOMAIN, monkeypatch)
hass.async_add_job(
hass.services.async_call(DOMAIN, SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: 'light.test'}))
hass.async_add_job(
hass.services.async_call(DOMAIN, SERVICE_TURN_ON,
{ATTR_ENTITY_ID: 'light.test'}))
yield from hass.async_block_till_done()
print(protocol.send_command_ack.call_args_list)
assert protocol.send_command_ack.call_args_list[0][0][1] == 'off'
assert protocol.send_command_ack.call_args_list[1][0][1] == 'on'
assert protocol.send_command_ack.call_args_list[2][0][1] == 'on'
assert protocol.send_command_ack.call_args_list[3][0][1] == 'on'
|
|
# -*- coding: UTF-8 -*-
# Copyright 2016-2018 Luc Saffre
# License: BSD (see file COPYING for details)
"""
Remove all tags except some when saving the content of a
:class:`RichHtmlField <lino.core.fields.RichHtmlField>`.
When copying rich text from other applications into Lino, the text can
contain styles and other things which can cause side effects when
displaying or printing them.
A possible strategy for avoiding such problems is to bleach any
content, i.e. allow only simple plain HTML formatting.
If you use this in your application, then your application must add
`bleach <http://bleach.readthedocs.org/en/latest/>`_ to its
:ref:`install_requires`.
Usage example (excerpt from
:class:`lino.modlib.comments.models.Comment`)::
from lino.mixins.bleached import Bleached
from lino.api import dd
class MyModel(Bleached):
short_text = dd.RichTextField(_("Short text"), blank=True)
more_text = dd.RichTextField(_("More text"), blank=True)
bleached_fields = "short_text more_text"
Note that `bleach` until 20170225 required html5lib` version
`0.9999999` (7*"9") while the current version is `0.999999999`
(9*"9"). Which means that you might inadvertedly break `bleach` when
you ask to update `html5lib`::
$ pip install -U html5lib
...
Successfully installed html5lib-0.999999999
$ python -m bleach
Traceback (most recent call last):
File "/usr/lib/python2.7/runpy.py", line 163, in _run_module_as_main
mod_name, _Error)
File "/usr/lib/python2.7/runpy.py", line 111, in _get_module_details
__import__(mod_name) # Do not catch exceptions initializing package
File "/site-packages/bleach/__init__.py", line 14, in <module>
from html5lib.sanitizer import HTMLSanitizer
ImportError: No module named sanitizer
"""
import six
try:
import bleach
except ImportError:
bleach = None
import logging
logger = logging.getLogger(__name__)
from lino.core.model import Model
from lino.core.fields import fields_list, RichTextField
from lino.utils.restify import restify
from lino.utils.soup import truncate_comment
from etgen.html import E, tostring
from lino.api import _
from lxml import html as lxml_html
def rich_text_to_elems(ar, description):
"""
A RichTextField can contain HTML markup or plain text.
"""
if description.startswith("<"):
# desc = E.raw('<div>%s</div>' % self.description)
desc = lxml_html.fragments_fromstring(ar.parse_memo(description))
return desc
# desc = E.raw('<div>%s</div>' % self.description)
html = restify(ar.parse_memo(description))
# logger.info(u"20180320 restify %s --> %s", description, html)
# html = html.strip()
try:
desc = lxml_html.fragments_fromstring(html)
except Exception as e:
raise Exception(
"Could not parse {!r} : {}".format(html, e))
# logger.info(
# "20160704c parsed --> %s", tostring(desc))
return desc
# if desc.tag == 'body':
# # happens if it contains more than one paragraph
# return list(desc) # .children
# return [desc]
def body_subject_to_elems(ar, title, description):
"""
Convert the given `title` and `description` to a list of HTML
elements.
Used by :mod:`lino.modlib.notify` and by :mod:`lino_xl.lib.sales`
"""
if description:
elems = [E.p(E.b(title), E.br())]
elems += rich_text_to_elems(ar, description)
else:
elems = [E.b(title)]
# return E.span(self.title)
return elems
class Bleached(Model):
"""
Mixin for models that have at least one text field which might
contain HTML.
When using this, you should specify :attr:`bleached_fields`.
.. attribute:: bleached_fields
A list of strings with the names of the fields that are
to be bleached.
.. attribute:: allowed_tags
A list of tag names which are to *remain* in HTML comments if
bleaching is active.
"""
allowed_tags = ['a', 'b', 'i', 'em', 'ul', 'ol', 'li', 'strong',
'p', 'br', 'span', 'pre', 'def', 'table', 'th', 'tr',
'td', 'thead', 'tfoot', 'tbody']
bleached_fields = []
class Meta(object):
abstract = True
@classmethod
def on_analyze(cls, site):
super(Bleached, cls).on_analyze(site)
if cls.bleached_fields is None:
return
if isinstance(cls.bleached_fields, six.string_types):
cls.bleached_fields = fields_list(cls, cls.bleached_fields)
if not bleach:
# site.logger.debug(
# "%s not being bleached because `bleach` is broken "
# "or not installed.", cls)
raise Exception(
"{} has bleached fields but `bleach` is not installed.".format(
cls))
# def full_clean(self, *args, **kwargs):
def before_ui_save(self, ar):
"""This does the actual bleaching work.
TODO: Lino should log at least a bit of bleach's "activity",
for example an info message saying "Removed tags x, y, z from
short_text"
"""
if bleach and self.bleached_fields:
for k in self.bleached_fields:
old = getattr(self, k)
if old is None:
continue
try:
new = bleach.clean(
old, tags=self.allowed_tags, strip=True)
except TypeError as e:
logger.warning(
"Could not bleach %r : %s (%s)", old, e, self)
continue
if old != new:
logger.debug(
"Bleaching %s from %r to %r", k, old, new)
setattr(self, k, new)
# super(Bleached, self).full_clean(*args, **kwargs)
super(Bleached, self).before_ui_save(ar)
class BleachedPreviewBody(Bleached):
class Meta:
abstract = True
bleached_fields = 'body'
body = RichTextField(_("Body"), blank=True, format='html')
body_preview = RichTextField(
_("Preview"), blank=True, editable=False)
# def full_clean(self, *args, **kwargs):
def before_ui_save(self, ar):
"""Fills the body_preview field.
"""
# super(BleachedPreviewBody, self).full_clean(*args, **kwargs)
super(BleachedPreviewBody, self).before_ui_save(ar)
self.body_preview = truncate_comment(self.body)
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import operations_v1
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.deploy_v1.types import cloud_deploy
from google.longrunning import operations_pb2 # type: ignore
from .base import CloudDeployTransport, DEFAULT_CLIENT_INFO
class CloudDeployGrpcTransport(CloudDeployTransport):
"""gRPC backend transport for CloudDeploy.
CloudDeploy service creates and manages Continuous Delivery
operations on Google Cloud Platform via Skaffold
(https://skaffold.dev).
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "clouddeploy.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "clouddeploy.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
# Return the client from cache.
return self._operations_client
@property
def list_delivery_pipelines(
self,
) -> Callable[
[cloud_deploy.ListDeliveryPipelinesRequest],
cloud_deploy.ListDeliveryPipelinesResponse,
]:
r"""Return a callable for the list delivery pipelines method over gRPC.
Lists DeliveryPipelines in a given project and
location.
Returns:
Callable[[~.ListDeliveryPipelinesRequest],
~.ListDeliveryPipelinesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_delivery_pipelines" not in self._stubs:
self._stubs["list_delivery_pipelines"] = self.grpc_channel.unary_unary(
"/google.cloud.deploy.v1.CloudDeploy/ListDeliveryPipelines",
request_serializer=cloud_deploy.ListDeliveryPipelinesRequest.serialize,
response_deserializer=cloud_deploy.ListDeliveryPipelinesResponse.deserialize,
)
return self._stubs["list_delivery_pipelines"]
@property
def get_delivery_pipeline(
self,
) -> Callable[
[cloud_deploy.GetDeliveryPipelineRequest], cloud_deploy.DeliveryPipeline
]:
r"""Return a callable for the get delivery pipeline method over gRPC.
Gets details of a single DeliveryPipeline.
Returns:
Callable[[~.GetDeliveryPipelineRequest],
~.DeliveryPipeline]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_delivery_pipeline" not in self._stubs:
self._stubs["get_delivery_pipeline"] = self.grpc_channel.unary_unary(
"/google.cloud.deploy.v1.CloudDeploy/GetDeliveryPipeline",
request_serializer=cloud_deploy.GetDeliveryPipelineRequest.serialize,
response_deserializer=cloud_deploy.DeliveryPipeline.deserialize,
)
return self._stubs["get_delivery_pipeline"]
@property
def create_delivery_pipeline(
self,
) -> Callable[
[cloud_deploy.CreateDeliveryPipelineRequest], operations_pb2.Operation
]:
r"""Return a callable for the create delivery pipeline method over gRPC.
Creates a new DeliveryPipeline in a given project and
location.
Returns:
Callable[[~.CreateDeliveryPipelineRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_delivery_pipeline" not in self._stubs:
self._stubs["create_delivery_pipeline"] = self.grpc_channel.unary_unary(
"/google.cloud.deploy.v1.CloudDeploy/CreateDeliveryPipeline",
request_serializer=cloud_deploy.CreateDeliveryPipelineRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_delivery_pipeline"]
@property
def update_delivery_pipeline(
self,
) -> Callable[
[cloud_deploy.UpdateDeliveryPipelineRequest], operations_pb2.Operation
]:
r"""Return a callable for the update delivery pipeline method over gRPC.
Updates the parameters of a single DeliveryPipeline.
Returns:
Callable[[~.UpdateDeliveryPipelineRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_delivery_pipeline" not in self._stubs:
self._stubs["update_delivery_pipeline"] = self.grpc_channel.unary_unary(
"/google.cloud.deploy.v1.CloudDeploy/UpdateDeliveryPipeline",
request_serializer=cloud_deploy.UpdateDeliveryPipelineRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_delivery_pipeline"]
@property
def delete_delivery_pipeline(
self,
) -> Callable[
[cloud_deploy.DeleteDeliveryPipelineRequest], operations_pb2.Operation
]:
r"""Return a callable for the delete delivery pipeline method over gRPC.
Deletes a single DeliveryPipeline.
Returns:
Callable[[~.DeleteDeliveryPipelineRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_delivery_pipeline" not in self._stubs:
self._stubs["delete_delivery_pipeline"] = self.grpc_channel.unary_unary(
"/google.cloud.deploy.v1.CloudDeploy/DeleteDeliveryPipeline",
request_serializer=cloud_deploy.DeleteDeliveryPipelineRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_delivery_pipeline"]
@property
def list_targets(
self,
) -> Callable[[cloud_deploy.ListTargetsRequest], cloud_deploy.ListTargetsResponse]:
r"""Return a callable for the list targets method over gRPC.
Lists Targets in a given project and location.
Returns:
Callable[[~.ListTargetsRequest],
~.ListTargetsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_targets" not in self._stubs:
self._stubs["list_targets"] = self.grpc_channel.unary_unary(
"/google.cloud.deploy.v1.CloudDeploy/ListTargets",
request_serializer=cloud_deploy.ListTargetsRequest.serialize,
response_deserializer=cloud_deploy.ListTargetsResponse.deserialize,
)
return self._stubs["list_targets"]
@property
def get_target(
self,
) -> Callable[[cloud_deploy.GetTargetRequest], cloud_deploy.Target]:
r"""Return a callable for the get target method over gRPC.
Gets details of a single Target.
Returns:
Callable[[~.GetTargetRequest],
~.Target]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_target" not in self._stubs:
self._stubs["get_target"] = self.grpc_channel.unary_unary(
"/google.cloud.deploy.v1.CloudDeploy/GetTarget",
request_serializer=cloud_deploy.GetTargetRequest.serialize,
response_deserializer=cloud_deploy.Target.deserialize,
)
return self._stubs["get_target"]
@property
def create_target(
self,
) -> Callable[[cloud_deploy.CreateTargetRequest], operations_pb2.Operation]:
r"""Return a callable for the create target method over gRPC.
Creates a new Target in a given project and location.
Returns:
Callable[[~.CreateTargetRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_target" not in self._stubs:
self._stubs["create_target"] = self.grpc_channel.unary_unary(
"/google.cloud.deploy.v1.CloudDeploy/CreateTarget",
request_serializer=cloud_deploy.CreateTargetRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_target"]
@property
def update_target(
self,
) -> Callable[[cloud_deploy.UpdateTargetRequest], operations_pb2.Operation]:
r"""Return a callable for the update target method over gRPC.
Updates the parameters of a single Target.
Returns:
Callable[[~.UpdateTargetRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_target" not in self._stubs:
self._stubs["update_target"] = self.grpc_channel.unary_unary(
"/google.cloud.deploy.v1.CloudDeploy/UpdateTarget",
request_serializer=cloud_deploy.UpdateTargetRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_target"]
@property
def delete_target(
self,
) -> Callable[[cloud_deploy.DeleteTargetRequest], operations_pb2.Operation]:
r"""Return a callable for the delete target method over gRPC.
Deletes a single Target.
Returns:
Callable[[~.DeleteTargetRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_target" not in self._stubs:
self._stubs["delete_target"] = self.grpc_channel.unary_unary(
"/google.cloud.deploy.v1.CloudDeploy/DeleteTarget",
request_serializer=cloud_deploy.DeleteTargetRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_target"]
@property
def list_releases(
self,
) -> Callable[
[cloud_deploy.ListReleasesRequest], cloud_deploy.ListReleasesResponse
]:
r"""Return a callable for the list releases method over gRPC.
Lists Releases in a given project and location.
Returns:
Callable[[~.ListReleasesRequest],
~.ListReleasesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_releases" not in self._stubs:
self._stubs["list_releases"] = self.grpc_channel.unary_unary(
"/google.cloud.deploy.v1.CloudDeploy/ListReleases",
request_serializer=cloud_deploy.ListReleasesRequest.serialize,
response_deserializer=cloud_deploy.ListReleasesResponse.deserialize,
)
return self._stubs["list_releases"]
@property
def get_release(
self,
) -> Callable[[cloud_deploy.GetReleaseRequest], cloud_deploy.Release]:
r"""Return a callable for the get release method over gRPC.
Gets details of a single Release.
Returns:
Callable[[~.GetReleaseRequest],
~.Release]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_release" not in self._stubs:
self._stubs["get_release"] = self.grpc_channel.unary_unary(
"/google.cloud.deploy.v1.CloudDeploy/GetRelease",
request_serializer=cloud_deploy.GetReleaseRequest.serialize,
response_deserializer=cloud_deploy.Release.deserialize,
)
return self._stubs["get_release"]
@property
def create_release(
self,
) -> Callable[[cloud_deploy.CreateReleaseRequest], operations_pb2.Operation]:
r"""Return a callable for the create release method over gRPC.
Creates a new Release in a given project and
location.
Returns:
Callable[[~.CreateReleaseRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_release" not in self._stubs:
self._stubs["create_release"] = self.grpc_channel.unary_unary(
"/google.cloud.deploy.v1.CloudDeploy/CreateRelease",
request_serializer=cloud_deploy.CreateReleaseRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_release"]
@property
def approve_rollout(
self,
) -> Callable[
[cloud_deploy.ApproveRolloutRequest], cloud_deploy.ApproveRolloutResponse
]:
r"""Return a callable for the approve rollout method over gRPC.
Approves a Rollout.
Returns:
Callable[[~.ApproveRolloutRequest],
~.ApproveRolloutResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "approve_rollout" not in self._stubs:
self._stubs["approve_rollout"] = self.grpc_channel.unary_unary(
"/google.cloud.deploy.v1.CloudDeploy/ApproveRollout",
request_serializer=cloud_deploy.ApproveRolloutRequest.serialize,
response_deserializer=cloud_deploy.ApproveRolloutResponse.deserialize,
)
return self._stubs["approve_rollout"]
@property
def list_rollouts(
self,
) -> Callable[
[cloud_deploy.ListRolloutsRequest], cloud_deploy.ListRolloutsResponse
]:
r"""Return a callable for the list rollouts method over gRPC.
Lists Rollouts in a given project and location.
Returns:
Callable[[~.ListRolloutsRequest],
~.ListRolloutsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_rollouts" not in self._stubs:
self._stubs["list_rollouts"] = self.grpc_channel.unary_unary(
"/google.cloud.deploy.v1.CloudDeploy/ListRollouts",
request_serializer=cloud_deploy.ListRolloutsRequest.serialize,
response_deserializer=cloud_deploy.ListRolloutsResponse.deserialize,
)
return self._stubs["list_rollouts"]
@property
def get_rollout(
self,
) -> Callable[[cloud_deploy.GetRolloutRequest], cloud_deploy.Rollout]:
r"""Return a callable for the get rollout method over gRPC.
Gets details of a single Rollout.
Returns:
Callable[[~.GetRolloutRequest],
~.Rollout]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_rollout" not in self._stubs:
self._stubs["get_rollout"] = self.grpc_channel.unary_unary(
"/google.cloud.deploy.v1.CloudDeploy/GetRollout",
request_serializer=cloud_deploy.GetRolloutRequest.serialize,
response_deserializer=cloud_deploy.Rollout.deserialize,
)
return self._stubs["get_rollout"]
@property
def create_rollout(
self,
) -> Callable[[cloud_deploy.CreateRolloutRequest], operations_pb2.Operation]:
r"""Return a callable for the create rollout method over gRPC.
Creates a new Rollout in a given project and
location.
Returns:
Callable[[~.CreateRolloutRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_rollout" not in self._stubs:
self._stubs["create_rollout"] = self.grpc_channel.unary_unary(
"/google.cloud.deploy.v1.CloudDeploy/CreateRollout",
request_serializer=cloud_deploy.CreateRolloutRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_rollout"]
@property
def get_config(
self,
) -> Callable[[cloud_deploy.GetConfigRequest], cloud_deploy.Config]:
r"""Return a callable for the get config method over gRPC.
Gets the configuration for a location.
Returns:
Callable[[~.GetConfigRequest],
~.Config]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_config" not in self._stubs:
self._stubs["get_config"] = self.grpc_channel.unary_unary(
"/google.cloud.deploy.v1.CloudDeploy/GetConfig",
request_serializer=cloud_deploy.GetConfigRequest.serialize,
response_deserializer=cloud_deploy.Config.deserialize,
)
return self._stubs["get_config"]
def close(self):
self.grpc_channel.close()
__all__ = ("CloudDeployGrpcTransport",)
|
|
# Copyright 2019 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import collections
import datetime
import unittest
from unittest import mock
from dateutil import tz
from cloudkitty import dataframe
from cloudkitty.storage.v2.elasticsearch import client
from cloudkitty.storage.v2.elasticsearch import exceptions
class TestElasticsearchClient(unittest.TestCase):
def setUp(self):
super(TestElasticsearchClient, self).setUp()
self.client = client.ElasticsearchClient(
'http://elasticsearch:9200',
'index_name',
'test_mapping',
autocommit=False)
def test_build_must_no_params(self):
self.assertEqual(self.client._build_must(None, None, None, None), [])
def test_build_must_with_start_end(self):
start = datetime.datetime(2019, 8, 30, tzinfo=tz.tzutc())
end = datetime.datetime(2019, 8, 31, tzinfo=tz.tzutc())
self.assertEqual(
self.client._build_must(start, end, None, None),
[{'range': {'start': {'gte': '2019-08-30T00:00:00+00:00'}}},
{'range': {'end': {'lte': '2019-08-31T00:00:00+00:00'}}}],
)
def test_build_must_with_filters(self):
filters = {'one': '1', 'two': '2', 'type': 'awesome'}
self.assertEqual(
self.client._build_must(None, None, None, filters),
[{'term': {'type': 'awesome'}}],
)
def test_build_must_with_metric_types(self):
types = ['awesome', 'amazing']
self.assertEqual(
self.client._build_must(None, None, types, None),
[{'terms': {'type': ['awesome', 'amazing']}}],
)
def test_build_should_no_filters(self):
self.assertEqual(
self.client._build_should(None),
[],
)
def test_build_should_with_filters(self):
filters = collections.OrderedDict([
('one', '1'), ('two', '2'), ('type', 'awesome')])
self.assertEqual(
self.client._build_should(filters),
[
{'term': {'groupby.one': '1'}},
{'term': {'metadata.one': '1'}},
{'term': {'groupby.two': '2'}},
{'term': {'metadata.two': '2'}},
],
)
def test_build_composite_no_groupby(self):
self.assertEqual(self.client._build_composite(None), [])
def test_build_composite(self):
self.assertEqual(
self.client._build_composite(['one', 'type', 'two']),
{'sources': [
{'one': {'terms': {'field': 'groupby.one'}}},
{'type': {'terms': {'field': 'type'}}},
{'two': {'terms': {'field': 'groupby.two'}}},
]},
)
def test_build_query_no_args(self):
self.assertEqual(self.client._build_query(None, None, None), {})
def test_build_query(self):
must = [{'range': {'start': {'gte': '2019-08-30T00:00:00+00:00'}}},
{'range': {'start': {'lt': '2019-08-31T00:00:00+00:00'}}}]
should = [
{'term': {'groupby.one': '1'}},
{'term': {'metadata.one': '1'}},
{'term': {'groupby.two': '2'}},
{'term': {'metadata.two': '2'}},
]
composite = {'sources': [
{'one': {'terms': {'field': 'groupby.one'}}},
{'type': {'terms': {'field': 'type'}}},
{'two': {'terms': {'field': 'groupby.two'}}},
]}
expected = {
'query': {
'bool': {
'must': must,
'should': should,
'minimum_should_match': 2,
},
},
'aggs': {
'sum_and_price': {
'composite': composite,
'aggregations': {
"sum_price": {"sum": {"field": "price"}},
"sum_qty": {"sum": {"field": "qty"}},
},
},
},
}
self.assertEqual(
self.client._build_query(must, should, composite), expected)
def test_log_query_no_hits(self):
url = '/endpoint'
body = {'1': 'one'}
response = {'took': 42}
expected = """Query on /endpoint with body "{'1': 'one'}" took 42ms"""
with mock.patch.object(client.LOG, 'debug') as debug_mock:
self.client._log_query(url, body, response)
debug_mock.assert_called_once_with(expected)
def test_log_query_with_hits(self):
url = '/endpoint'
body = {'1': 'one'}
response = {'took': 42, 'hits': {'total': 1337}}
expected = """Query on /endpoint with body "{'1': 'one'}" took 42ms"""
expected += " for 1337 hits"
with mock.patch.object(client.LOG, 'debug') as debug_mock:
self.client._log_query(url, body, response)
debug_mock.assert_called_once_with(expected)
def test_req_valid_status_code_no_deserialize(self):
resp_mock = mock.MagicMock()
resp_mock.status_code = 200
method_mock = mock.MagicMock()
method_mock.return_value = resp_mock
req_resp = self.client._req(
method_mock, None, None, None, deserialize=False)
method_mock.assert_called_once_with(None, data=None, params=None)
self.assertEqual(req_resp, resp_mock)
def test_req_valid_status_code_deserialize(self):
resp_mock = mock.MagicMock()
resp_mock.status_code = 200
resp_mock.json.return_value = 'output'
method_mock = mock.MagicMock()
method_mock.return_value = resp_mock
with mock.patch.object(self.client, '_log_query') as log_mock:
req_resp = self.client._req(
method_mock, None, None, None, deserialize=True)
method_mock.assert_called_once_with(None, data=None, params=None)
self.assertEqual(req_resp, 'output')
log_mock.assert_called_once_with(None, None, 'output')
def test_req_invalid_status_code(self):
resp_mock = mock.MagicMock()
resp_mock.status_code = 400
method_mock = mock.MagicMock()
method_mock.return_value = resp_mock
self.assertRaises(exceptions.InvalidStatusCode,
self.client._req,
method_mock, None, None, None)
def test_put_mapping(self):
mapping = {'a': 'b'}
with mock.patch.object(self.client, '_req') as rmock:
self.client.put_mapping(mapping)
rmock.assert_called_once_with(
self.client._sess.put,
'http://elasticsearch:9200/index_name/_mapping/test_mapping',
'{"a": "b"}', {'include_type_name': 'true'}, deserialize=False)
def test_get_index(self):
with mock.patch.object(self.client, '_req') as rmock:
self.client.get_index()
rmock.assert_called_once_with(
self.client._sess.get,
'http://elasticsearch:9200/index_name',
None, None, deserialize=False)
def test_search_without_scroll(self):
mapping = {'a': 'b'}
with mock.patch.object(self.client, '_req') as rmock:
self.client.search(mapping, scroll=False)
rmock.assert_called_once_with(
self.client._sess.get,
'http://elasticsearch:9200/index_name/_search',
'{"a": "b"}', None)
def test_search_with_scroll(self):
mapping = {'a': 'b'}
with mock.patch.object(self.client, '_req') as rmock:
self.client.search(mapping, scroll=True)
rmock.assert_called_once_with(
self.client._sess.get,
'http://elasticsearch:9200/index_name/_search',
'{"a": "b"}', {'scroll': '60s'})
def test_scroll(self):
body = {'a': 'b'}
with mock.patch.object(self.client, '_req') as rmock:
self.client.scroll(body)
rmock.assert_called_once_with(
self.client._sess.get,
'http://elasticsearch:9200/_search/scroll',
'{"a": "b"}', None)
def test_close_scroll(self):
body = {'a': 'b'}
with mock.patch.object(self.client, '_req') as rmock:
self.client.close_scroll(body)
rmock.assert_called_once_with(
self.client._sess.delete,
'http://elasticsearch:9200/_search/scroll',
'{"a": "b"}', None, deserialize=False)
def test_close_scrolls(self):
with mock.patch.object(self.client, 'close_scroll') as func_mock:
with mock.patch.object(self.client, '_scroll_ids',
new=['a', 'b', 'c']):
self.client.close_scrolls()
func_mock.assert_called_once_with(
{'scroll_id': ['a', 'b', 'c']})
self.assertSetEqual(set(), self.client._scroll_ids)
def test_bulk_with_instruction(self):
instruction = {'instruction': {}}
terms = ('one', 'two', 'three')
expected_data = ''.join([
'{"instruction": {}}\n'
'"one"\n'
'{"instruction": {}}\n'
'"two"\n'
'{"instruction": {}}\n'
'"three"\n',
])
with mock.patch.object(self.client, '_req') as rmock:
self.client.bulk_with_instruction(instruction, terms)
rmock.assert_called_once_with(
self.client._sess.post,
'http://elasticsearch:9200/index_name/test_mapping/_bulk',
expected_data, None, deserialize=False)
def test_bulk_index(self):
terms = ('one', 'two', 'three')
with mock.patch.object(self.client, 'bulk_with_instruction') as fmock:
self.client.bulk_index(terms)
fmock.assert_called_once_with({'index': {}}, terms)
def test_commit(self):
docs = ['one', 'two', 'three', 'four', 'five', 'six', 'seven']
size = 3
with mock.patch.object(self.client, 'bulk_index') as bulk_mock:
with mock.patch.object(self.client, '_docs', new=docs):
with mock.patch.object(self.client, '_chunk_size', new=size):
self.client.commit()
bulk_mock.assert_has_calls([
mock.call(['one', 'two', 'three']),
mock.call(['four', 'five', 'six']),
mock.call(['seven']),
])
def test_add_point_no_autocommit(self):
point = dataframe.DataPoint(
'unit', '0.42', '0.1337', {}, {})
start = datetime.datetime(2019, 1, 1)
end = datetime.datetime(2019, 1, 1, 1)
with mock.patch.object(self.client, 'commit') as func_mock:
with mock.patch.object(self.client, '_autocommit', new=False):
with mock.patch.object(self.client, '_chunk_size', new=3):
self.client._docs = []
for _ in range(5):
self.client.add_point(
point, 'awesome_type', start, end)
func_mock.assert_not_called()
self.assertEqual(self.client._docs, [{
'start': start,
'end': end,
'type': 'awesome_type',
'unit': point.unit,
'qty': point.qty,
'price': point.price,
'groupby': point.groupby,
'metadata': point.metadata,
} for _ in range(5)])
self.client._docs = []
def test_add_point_with_autocommit(self):
point = dataframe.DataPoint(
'unit', '0.42', '0.1337', {}, {})
start = datetime.datetime(2019, 1, 1)
end = datetime.datetime(2019, 1, 1, 1)
commit_calls = {'count': 0}
def commit():
# We can't re-assign nonlocal variables in python2
commit_calls['count'] += 1
self.client._docs = []
with mock.patch.object(self.client, 'commit', new=commit):
with mock.patch.object(self.client, '_autocommit', new=True):
with mock.patch.object(self.client, '_chunk_size', new=3):
self.client._docs = []
for i in range(5):
self.client.add_point(
point, 'awesome_type', start, end)
self.assertEqual(commit_calls['count'], 1)
self.assertEqual(self.client._docs, [{
'start': start,
'end': end,
'type': 'awesome_type',
'unit': point.unit,
'qty': point.qty,
'price': point.price,
'groupby': point.groupby,
'metadata': point.metadata,
} for _ in range(2)])
# cleanup
self.client._docs = []
def test_delete_by_query_with_must(self):
with mock.patch.object(self.client, '_req') as rmock:
with mock.patch.object(self.client, '_build_must') as func_mock:
func_mock.return_value = {'a': 'b'}
self.client.delete_by_query()
rmock.assert_called_once_with(
self.client._sess.post,
'http://elasticsearch:9200/index_name/_delete_by_query',
'{"query": {"bool": {"must": {"a": "b"}}}}', None)
def test_delete_by_query_no_must(self):
with mock.patch.object(self.client, '_req') as rmock:
with mock.patch.object(self.client, '_build_must') as func_mock:
func_mock.return_value = {}
self.client.delete_by_query()
rmock.assert_called_once_with(
self.client._sess.post,
'http://elasticsearch:9200/index_name/_delete_by_query',
None, None)
def test_retrieve_no_pagination(self):
search_resp = {
'_scroll_id': '000',
'hits': {'hits': ['one', 'two', 'three'], 'total': 12},
}
scroll_resps = [{
'_scroll_id': str(i + 1) * 3,
'hits': {'hits': ['one', 'two', 'three']},
} for i in range(3)]
scroll_resps.append({'_scroll_id': '444', 'hits': {'hits': []}})
self.client._scroll_ids = set()
with mock.patch.object(self.client, 'search') as search_mock:
with mock.patch.object(self.client, 'scroll') as scroll_mock:
with mock.patch.object(self.client, 'close_scrolls') as close:
search_mock.return_value = search_resp
scroll_mock.side_effect = scroll_resps
total, resp = self.client.retrieve(
None, None, None, None, paginate=False)
search_mock.assert_called_once()
scroll_mock.assert_has_calls([
mock.call({
'scroll_id': str(i) * 3,
'scroll': '60s',
}) for i in range(4)
])
self.assertEqual(total, 12)
self.assertEqual(resp, ['one', 'two', 'three'] * 4)
self.assertSetEqual(self.client._scroll_ids,
set(str(i) * 3 for i in range(5)))
close.assert_called_once()
self.client._scroll_ids = set()
def test_retrieve_with_pagination(self):
search_resp = {
'_scroll_id': '000',
'hits': {'hits': ['one', 'two', 'three'], 'total': 12},
}
scroll_resps = [{
'_scroll_id': str(i + 1) * 3,
'hits': {'hits': ['one', 'two', 'three']},
} for i in range(3)]
scroll_resps.append({'_scroll_id': '444', 'hits': {'hits': []}})
self.client._scroll_ids = set()
with mock.patch.object(self.client, 'search') as search_mock:
with mock.patch.object(self.client, 'scroll') as scroll_mock:
with mock.patch.object(self.client, 'close_scrolls') as close:
search_mock.return_value = search_resp
scroll_mock.side_effect = scroll_resps
total, resp = self.client.retrieve(
None, None, None, None,
offset=2, limit=4, paginate=True)
search_mock.assert_called_once()
scroll_mock.assert_called_once_with({
'scroll_id': '000',
'scroll': '60s',
})
self.assertEqual(total, 12)
self.assertEqual(resp, ['three', 'one', 'two', 'three'])
self.assertSetEqual(self.client._scroll_ids,
set(str(i) * 3 for i in range(2)))
close.assert_called_once()
self.client._scroll_ids = set()
def _do_test_total(self, groupby, paginate):
with mock.patch.object(self.client, 'search') as search_mock:
if groupby:
search_resps = [{
'aggregations': {
'sum_and_price': {
'buckets': ['one', 'two', 'three'],
'after_key': str(i),
}
}
} for i in range(3)]
last_resp_aggs = search_resps[2]['aggregations']
last_resp_aggs['sum_and_price'].pop('after_key')
last_resp_aggs['sum_and_price']['buckets'] = []
search_mock.side_effect = search_resps
else:
search_mock.return_value = {
'aggregations': ['one', 'two', 'three'],
}
resp = self.client.total(None, None, None, None, groupby,
offset=2, limit=4, paginate=paginate)
if not groupby:
search_mock.assert_called_once()
return resp
def test_total_no_groupby_no_pagination(self):
total, aggs = self._do_test_total(None, False)
self.assertEqual(total, 1)
self.assertEqual(aggs, [['one', 'two', 'three']])
def test_total_no_groupby_with_pagination(self):
total, aggs = self._do_test_total(None, True)
self.assertEqual(total, 1)
self.assertEqual(aggs, [['one', 'two', 'three']])
def test_total_with_groupby_no_pagination(self):
total, aggs = self._do_test_total(['x'], False)
self.assertEqual(total, 6)
self.assertEqual(aggs, ['one', 'two', 'three'] * 2)
def test_total_with_groupby_with_pagination(self):
total, aggs = self._do_test_total(['x'], True)
self.assertEqual(total, 6)
self.assertEqual(aggs, ['three', 'one', 'two', 'three'])
|
|
import cgi
import urllib
import time
import random
import urlparse
import hmac
import base64
VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
# Generic exception class
class OAuthError(RuntimeError):
def __init__(self, message='OAuth error occured.'):
RuntimeError.__init__(self, message)
self.message = message
# optional WWW-Authenticate header (401 error)
def build_authenticate_header(realm=''):
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
# url escape
def escape(s):
# escape '/' too
return urllib.quote(s, safe='~')
# Turn into utf8 as appropriate
def _utf8_str(s):
if isinstance(s, unicode):
return s.encode("utf-8")
else:
return str(s)
# util function: current timestamp
# seconds since epoch (UTC)
def generate_timestamp():
return int(time.time())
# util function: nonce
# pseudorandom number
def generate_nonce(length=8):
return ''.join(str(random.randint(0, 9)) for i in range(length))
# OAuthConsumer is a data type that represents the identity of the Consumer
# via its shared secret with the Service Provider.
class OAuthConsumer(object):
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
# OAuthToken is a data type that represents an End User via either an access
# or request token.
class OAuthToken(object):
# access tokens and request tokens
key = None
secret = None
'''
key = the token
secret = the token secret
'''
def __init__(self, key, secret):
self.key = key
self.secret = secret
def to_string(self):
return urllib.urlencode({'oauth_token': self.key, 'oauth_token_secret': self.secret})
# return a token from something like:
# oauth_token_secret=digg&oauth_token=digg
@staticmethod
def from_string(s):
params = cgi.parse_qs(s, keep_blank_values=False)
key = params['oauth_token'][0]
secret = params['oauth_token_secret'][0]
return OAuthToken(key, secret)
def __str__(self):
return self.to_string()
# OAuthRequest represents the request and can be serialized
class OAuthRequest(object):
'''
OAuth parameters:
- oauth_consumer_key
- oauth_token
- oauth_signature_method
- oauth_signature
- oauth_timestamp
- oauth_nonce
- oauth_version
... any additional parameters, as defined by the Service Provider.
'''
parameters = None # oauth parameters
http_method = HTTP_METHOD
http_url = None
version = VERSION
def __init__(self, http_method=HTTP_METHOD, http_url=None, parameters=None):
self.http_method = http_method
self.http_url = http_url
self.parameters = parameters or {}
def set_parameter(self, parameter, value):
self.parameters[parameter] = value
def get_parameter(self, parameter):
try:
return self.parameters[parameter]
except:
raise OAuthError('Parameter not found: %s' % parameter)
def _get_timestamp_nonce(self):
return self.get_parameter('oauth_timestamp'), self.get_parameter('oauth_nonce')
# get any non-oauth parameters
def get_nonoauth_parameters(self):
parameters = {}
for k, v in self.parameters.iteritems():
# ignore oauth parameters
if k.find('oauth_') < 0:
parameters[k] = v
return parameters
# serialize as a header for an HTTPAuth request
def to_header(self, realm=''):
auth_header = 'OAuth realm="%s"' % realm
# add the oauth parameters
if self.parameters:
for k, v in self.parameters.iteritems():
auth_header += ', %s="%s"' % (k, escape(str(v)))
return {'Authorization': auth_header}
# serialize as post data for a POST request
def to_postdata(self):
return '&'.join('%s=%s' % (escape(str(k)), escape(str(v))) for k, v in self.parameters.iteritems())
# serialize as a url for a GET request
def to_url(self):
return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata())
# return a string that consists of all the parameters that need to be signed
def get_normalized_parameters(self):
params = self.parameters
try:
# exclude the signature if it exists
del params['oauth_signature']
except:
pass
key_values = params.items()
# sort lexicographically, first after key, then after value
key_values.sort()
# combine key value pairs in string and escape
return '&'.join('%s=%s' % (escape(_utf8_str(k)), escape(_utf8_str(v))) for k, v in key_values)
# just uppercases the http method
def get_normalized_http_method(self):
return self.http_method.upper()
# parses the url and rebuilds it to be scheme://host/path
def get_normalized_http_url(self):
parts = urlparse.urlparse(self.http_url)
url_string = '%s://%s%s' % (parts[0], parts[1], parts[2]) # scheme, netloc, path
return url_string
# set the signature parameter to the result of build_signature
def sign_request(self, signature_method, consumer, token):
# set the signature method
self.set_parameter('oauth_signature_method', signature_method.get_name())
# set the signature
self.set_parameter('oauth_signature', self.build_signature(signature_method, consumer, token))
def build_signature(self, signature_method, consumer, token):
# call the build signature method within the signature method
return signature_method.build_signature(self, consumer, token)
@staticmethod
def from_request(http_method, http_url, headers=None, parameters=None, query_string=None):
# combine multiple parameter sources
if parameters is None:
parameters = {}
# headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# check that the authorization header is OAuth
if auth_header.index('OAuth') > -1:
try:
# get the parameters from the header
header_params = OAuthRequest._split_header(auth_header)
parameters.update(header_params)
except:
raise OAuthError('Unable to parse OAuth parameters from Authorization header.')
# GET or POST query string
if query_string:
query_params = OAuthRequest._split_url_string(query_string)
parameters.update(query_params)
# URL parameters
param_str = urlparse.urlparse(http_url)[4] # query
url_params = OAuthRequest._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return OAuthRequest(http_method, http_url, parameters)
return None
@staticmethod
def from_consumer_and_token(oauth_consumer, token=None, http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': oauth_consumer.key,
'oauth_timestamp': generate_timestamp(),
'oauth_nonce': generate_nonce(),
'oauth_version': OAuthRequest.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
return OAuthRequest(http_method, http_url, parameters)
@staticmethod
def from_token_and_callback(token, callback=None, http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = escape(callback)
return OAuthRequest(http_method, http_url, parameters)
# util function: turn Authorization: header into parameters, has to do some unescaping
@staticmethod
def _split_header(header):
params = {}
parts = header.split(',')
for param in parts:
# ignore realm parameter
if param.find('OAuth realm') > -1:
continue
# remove whitespace
param = param.strip()
# split key-value
param_parts = param.split('=', 1)
# remove quotes and unescape the value
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
# util function: turn url string into parameters, has to do some unescaping
@staticmethod
def _split_url_string(param_str):
parameters = cgi.parse_qs(param_str, keep_blank_values=False)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
# OAuthServer is a worker to check a requests validity against a data store
class OAuthServer(object):
timestamp_threshold = 300 # in seconds, five minutes
version = VERSION
signature_methods = None
data_store = None
def __init__(self, data_store=None, signature_methods=None):
self.data_store = data_store
self.signature_methods = signature_methods or {}
def set_data_store(self, oauth_data_store):
self.data_store = data_store
def get_data_store(self):
return self.data_store
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.get_name()] = signature_method
return self.signature_methods
# process a request_token request
# returns the request token on success
def fetch_request_token(self, oauth_request):
try:
# get the request token for authorization
token = self._get_token(oauth_request, 'request')
except OAuthError:
# no token required for the initial token request
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
self._check_signature(oauth_request, consumer, None)
# fetch a new token
token = self.data_store.fetch_request_token(consumer)
return token
# process an access_token request
# returns the access token on success
def fetch_access_token(self, oauth_request):
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
# get the request token
token = self._get_token(oauth_request, 'request')
self._check_signature(oauth_request, consumer, token)
new_token = self.data_store.fetch_access_token(consumer, token)
return new_token
# verify an api call, checks all the parameters
def verify_request(self, oauth_request):
# -> consumer and token
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
# get the access token
token = self._get_token(oauth_request, 'access')
self._check_signature(oauth_request, consumer, token)
parameters = oauth_request.get_nonoauth_parameters()
return consumer, token, parameters
# authorize a request token
def authorize_token(self, token, user):
return self.data_store.authorize_request_token(token, user)
# get the callback url
def get_callback(self, oauth_request):
return oauth_request.get_parameter('oauth_callback')
# optional support for the authenticate header
def build_authenticate_header(self, realm=''):
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
# verify the correct version request for this server
def _get_version(self, oauth_request):
try:
version = oauth_request.get_parameter('oauth_version')
except:
version = VERSION
if version and version != self.version:
raise OAuthError('OAuth version %s not supported.' % str(version))
return version
# figure out the signature with some defaults
def _get_signature_method(self, oauth_request):
try:
signature_method = oauth_request.get_parameter('oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# get the signature method object
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise OAuthError('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_consumer(self, oauth_request):
consumer_key = oauth_request.get_parameter('oauth_consumer_key')
if not consumer_key:
raise OAuthError('Invalid consumer key.')
consumer = self.data_store.lookup_consumer(consumer_key)
if not consumer:
raise OAuthError('Invalid consumer.')
return consumer
# try to find the token for the provided request token key
def _get_token(self, oauth_request, token_type='access'):
token_field = oauth_request.get_parameter('oauth_token')
token = self.data_store.lookup_token(token_type, token_field)
if not token:
raise OAuthError('Invalid %s token: %s' % (token_type, token_field))
return token
def _check_signature(self, oauth_request, consumer, token):
timestamp, nonce = oauth_request._get_timestamp_nonce()
self._check_timestamp(timestamp)
self._check_nonce(consumer, token, nonce)
signature_method = self._get_signature_method(oauth_request)
try:
signature = oauth_request.get_parameter('oauth_signature')
except:
raise OAuthError('Missing signature.')
# validate the signature
valid_sig = signature_method.check_signature(oauth_request, consumer, token, signature)
if not valid_sig:
key, base = signature_method.build_signature_base_string(oauth_request, consumer, token)
raise OAuthError('Invalid signature. Expected signature base string: %s' % base)
built = signature_method.build_signature(oauth_request, consumer, token)
def _check_timestamp(self, timestamp):
# verify that timestamp is recentish
timestamp = int(timestamp)
now = int(time.time())
lapsed = now - timestamp
if lapsed > self.timestamp_threshold:
raise OAuthError('Expired timestamp: given %d and now %s has a greater difference than threshold %d' % (timestamp, now, self.timestamp_threshold))
def _check_nonce(self, consumer, token, nonce):
# verify that the nonce is uniqueish
nonce = self.data_store.lookup_nonce(consumer, token, nonce)
if nonce:
raise OAuthError('Nonce already used: %s' % str(nonce))
# OAuthClient is a worker to attempt to execute a request
class OAuthClient(object):
consumer = None
token = None
def __init__(self, oauth_consumer, oauth_token):
self.consumer = oauth_consumer
self.token = oauth_token
def get_consumer(self):
return self.consumer
def get_token(self):
return self.token
def fetch_request_token(self, oauth_request):
# -> OAuthToken
raise NotImplementedError
def fetch_access_token(self, oauth_request):
# -> OAuthToken
raise NotImplementedError
def access_resource(self, oauth_request):
# -> some protected resource
raise NotImplementedError
# OAuthDataStore is a database abstraction used to lookup consumers and tokens
class OAuthDataStore(object):
def lookup_consumer(self, key):
# -> OAuthConsumer
raise NotImplementedError
def lookup_token(self, oauth_consumer, token_type, token_token):
# -> OAuthToken
raise NotImplementedError
def lookup_nonce(self, oauth_consumer, oauth_token, nonce, timestamp):
# -> OAuthToken
raise NotImplementedError
def fetch_request_token(self, oauth_consumer):
# -> OAuthToken
raise NotImplementedError
def fetch_access_token(self, oauth_consumer, oauth_token):
# -> OAuthToken
raise NotImplementedError
def authorize_request_token(self, oauth_token, user):
# -> OAuthToken
raise NotImplementedError
# OAuthSignatureMethod is a strategy class that implements a signature method
class OAuthSignatureMethod(object):
def get_name(self):
# -> str
raise NotImplementedError
def build_signature_base_string(self, oauth_request, oauth_consumer, oauth_token):
# -> str key, str raw
raise NotImplementedError
def build_signature(self, oauth_request, oauth_consumer, oauth_token):
# -> str
raise NotImplementedError
def check_signature(self, oauth_request, consumer, token, signature):
built = self.build_signature(oauth_request, consumer, token)
return built == signature
class OAuthSignatureMethod_HMAC_SHA1(OAuthSignatureMethod):
def get_name(self):
return 'HMAC-SHA1'
def build_signature_base_string(self, oauth_request, consumer, token):
sig = (
escape(oauth_request.get_normalized_http_method()),
escape(oauth_request.get_normalized_http_url()),
escape(oauth_request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def build_signature(self, oauth_request, consumer, token):
# build the base signature string
key, raw = self.build_signature_base_string(oauth_request, consumer, token)
# hmac object
try:
import hashlib # 2.5
hashed = hmac.new(key, raw, hashlib.sha1)
except:
import sha # deprecated
hashed = hmac.new(key, raw, sha)
# calculate the digest base 64
return base64.b64encode(hashed.digest())
class OAuthSignatureMethod_PLAINTEXT(OAuthSignatureMethod):
def get_name(self):
return 'PLAINTEXT'
def build_signature_base_string(self, oauth_request, consumer, token):
# concatenate the consumer key and secret
sig = escape(consumer.secret) + '&'
if token:
sig = sig + escape(token.secret)
return sig
def build_signature(self, oauth_request, consumer, token):
return self.build_signature_base_string(oauth_request, consumer, token)
|
|
"""
Functions for Shapelet related operations
"""
import sys
import numpy as np
from scipy.misc import factorial
from scipy import special
#TODO: hermite 2d, round gaussian?
#TODO: Fourier transform
#########################################################
#def polar2cart():
# """Convert a set of polar coefficients to Cartesian coefficients [manual eq. 1.27]
# """
def hermite2d(n0,n1):
"""Return a n0 x n1 order 2D Hermite polynomial"""
h0=special.hermite(n0)
h1=special.hermite(n1)
return [h0,h1]
def laguerre(n0,m0):
"""Return a generalized Laguerre polynomial L^(|m|)_((n-|m|)/2)(x)"""
l0=special.genlaguerre(n=(n0-np.abs(m0))/2,alpha=np.abs(m0))
return l0
def rotMatrix(phi):
"""2D Cartesian rotation matrix (radians)"""
return np.matrix([[np.cos(phi),-1.*np.sin(phi)],[np.sin(phi),np.cos(phi)]])
def basis2d(n0,n1,beta=[1.,1.],phi=0.,fourier=False):
"""2d dimensionless Cartesian basis function
phi: rotation angle
fourier: return the Fourier transformed version of the function
"""
b=hermite2d(n0,n1)
m=rotMatrix(phi)
phs=[1.,1.]
if fourier:
beta=[1./beta[0],1./beta[1]]
phs=[1j**(n0),1j**(n1)]
b[0]*=((2**n0)*(np.pi**(.5))*factorial(n0))**(-.5)*phs[0]
exp0=lambda x: beta[0] * b[0](x) * np.exp(-.5*(x**2))
b[1]*=((2**n1)*(np.pi**(.5))*factorial(n1))**(-.5)*phs[1]
exp1=lambda x: beta[1] * b[1](x) * np.exp(-.5*(x**2))
return lambda y,x: exp0(m[0,0]*y+m[0,1]*x)*exp1(m[1,0]*y+m[1,1]*x)
def dimBasis2d(n0,n1,beta=[1.,1.],phi=0.,fourier=False):
"""2d dimensional Cartesian basis function of characteristic size beta
phi: rotation angle
fourier: return the Fourier transformed version of the function
"""
b=hermite2d(n0,n1)
m=rotMatrix(phi)
phs=[1.,1.]
if fourier:
beta=[1./beta[0],1./beta[1]]
phs=[1j**(n0),1j**(n1)]
b[0]*=(beta[0]**(-.5))*(((2**n0)*(np.pi**(.5))*factorial(n0))**(-.5))*phs[0]
exp0=lambda x: b[0](x/beta[0]) * np.exp(-.5*((x/beta[0])**2))
b[1]*=(beta[1]**(-.5))*(((2**n1)*(np.pi**(.5))*factorial(n1))**(-.5))*phs[1]
exp1=lambda x: b[1](x/beta[1]) * np.exp(-.5*((x/beta[1])**2))
return lambda y,x: exp0(m[0,0]*y+m[0,1]*x)*exp1(m[1,0]*y+m[1,1]*x)
#TODO: make into an elliptical form?
#TODO: fourier transform is not quite correct
def polarDimBasis(n0,m0,beta=1.,phi=0.,fourier=False):
"""Polar dimensional basis function based on Laguerre polynomials of characteristic size beta
phi: rotation angle
fourier: return the Fourier transformed version of the function
"""
if len(beta)==1: beta=[beta,beta]
phs=1.
if fourier:
beta=[1./beta[0],1./beta[1]]
phs=1j**(n0+m0)
b0=laguerre(n0,m0)
norm=(((-1.)**((n0-np.abs(m0))/2))/np.sqrt(beta[0]**(np.abs(m0)+1)*beta[1]**(np.abs(m0)+1)))*((float(factorial(int((n0-np.abs(m0))/2)))/float(factorial(int((n0+np.abs(m0))/2))))**.5)*phs
exp0=lambda r,th: norm * r**(np.abs(m0)) * b0((r**2.)/(beta[0]*beta[1])) * np.exp(-.5*(r**2.)/(beta[0]*beta[1])) * np.exp(-1j*m0*(th+phi))
return exp0
def polarArray(xc,size,rot=0.):
"""Return arrays of shape 'size' with radius and theta values centered on xc
rot: radians in which to rotate the shapelet
"""
ry=np.array(range(0,size[0]),dtype=float)-xc[0]
rx=np.array(range(0,size[1]),dtype=float)-xc[1]
yy=np.reshape(np.tile(rx,size[0]),(size[0],size[1]))
xx=np.reshape(np.tile(ry,size[1]),(size[1],size[0]))
rExp = lambda y,x: np.sqrt(np.square(y) + np.square(x))
thExp = lambda y,x: np.arctan2(y,x)+rot
return rExp(yy,xx.T), thExp(yy,xx.T)
def cartArray(xc,size):
"""Return arrays of shape 'size' with y,x values centered on xc
"""
ry=np.array(range(0,size[0]),dtype=float)-xc[0]
rx=np.array(range(0,size[1]),dtype=float)-xc[1]
yy=np.reshape(np.tile(ry,size[1]),(size[0],size[1]))
xx=np.reshape(np.tile(rx,size[0]),(size[1],size[0]))
return yy.T,xx
def xy2Grid(ry,rx):
"""Convert a range of x and y to a grid of shape (len(x),len(y))"""
yy=np.reshape(np.tile(ry,len(rx)),(len(rx),len(ry)))
xx=np.reshape(np.tile(rx,len(ry)),(len(ry),len(rx)))
return yy.T,xx
def xy2rthGrid(ry,rx):
"""Convert a range of y and x to r,th arrays of shape (len(y),len(x))"""
yy=np.reshape(np.tile(ry,len(rx)),(len(rx),len(ry)))
xx=np.reshape(np.tile(rx,len(ry)),(len(ry),len(rx)))
rExp = lambda y,x: np.sqrt(np.square(y) + np.square(x))
thExp = lambda y,x: np.arctan2(y,x)
return rExp(yy,xx.T), thExp(yy,xx.T)
def rth2xy(r,th):
"""Convert r,theta array pair to an y,x pair"""
y=r*np.cos(th)
x=r*np.sin(th)
return y,x
def xyRotate(ry,rx,rot=0.):
"""Apply a rotation(radians) to an set of Y,Y coordinates"""
r0,th0=xy2rthGrid(ry,rx)
th0+=rot
return rth2xy(r0,th0)
def computeBasisPolar(b,r,th):
"""Compute the values of a Polar Basis function b over the R and Theta range"""
return b(r,th)
def computeBasisPolarAtom(b,r,th):
"""Compute the polar basis function b in the position (rad,theta)"""
return b(r,th)
def computeBasis2d(b,yy,xx):
"""Compute the values of a 2D Basis function b for (yy,xx)"""
return b(yy,xx)
def computeBasis2dAtom(b,y,x):
"""Compute the basis function b in the position (y,x), x and y can be arrays"""
return b(y,x)
if __name__ == "__main__":
print '============================================'
print 'Testing shapelets module:'
print '============================================'
tc=0
te=0
#hermite2d(n0,n1):
tc+=1
try:
h0,h1=hermite2d(3,4)
print 'hermite:', type(h0), type(h1)
except:
print 'Test failed (%i):'%tc, sys.exc_info()[0]
te+=1
#laguerre(n0,m0):
tc+=1
try:
l0=laguerre(3,3)
print 'laguerre:', type(l0)
except:
print 'Test failed (%i):'%tc, sys.exc_info()[0]
te+=1
#rotMatrix(phi):
tc+=1
try:
print rotMatrix(np.pi/4.)
except:
print 'Test failed (%i):'%tc, sys.exc_info()[0]
te+=1
#xyRotate(rx,ry,rot=0.):
tc+=1
try:
xp,yp=xyRotate(np.array([1.]),np.array([0.]),rot=np.pi/4.)
print xp,yp
except:
print 'Test failed (%i):'%tc, sys.exc_info()[0]
te+=1
#basis2d(n0,n1,beta=[1.,1.],phi=0.):
tc+=1
try:
b=basis2d(3,4,beta=[1.,1.],phi=np.pi/4.)
print b(2.,3.5)
except:
print 'Test failed (%i):'%tc, sys.exc_info()[0]
te+=1
#dimBasis2d(n0,n1,beta=[1.,1.],phi=0.):
tc+=1
try:
b=dimBasis2d(3,4,beta=[1.,1.],phi=np.pi/4.)
print b(2.,3.5)
except:
print 'Test failed (%i):'%tc, sys.exc_info()[0]
te+=1
#polarDimBasis(n0,m0,beta=[1.,1.],phi=0.):
tc+=1
try:
b=polarDimBasis(3,3,beta=[1.,1.],phi=np.pi/4.)
print b(3.5,np.pi/8.)
except:
print 'Test failed (%i):'%tc, sys.exc_info()[0]
te+=1
#polarArray(xc,size,rot=0.):
tc+=1
try:
r,th=polarArray([0.,0.],[15,20],rot=0.)
print r.shape, th.shape
except:
print 'Test failed (%i):'%tc, sys.exc_info()[0]
te+=1
#cartArray(xc,size):
tc+=1
try:
x,y=cartArray([6.,7.],[15,20])
print x.shape, y.shape
except:
print 'Test failed (%i):'%tc, sys.exc_info()[0]
te+=1
#xy2rthGrid(rx,ry):
tc+=1
try:
r,th=xy2rthGrid(np.arange(10),np.arange(10))
print r.shape, th.shape
except:
print 'Test failed (%i):'%tc, sys.exc_info()[0]
te+=1
#rth2xy(r,th):
tc+=1
try:
x,y=rth2xy(np.random.randn(10),2.*np.pi*np.random.rand(10))
print x.shape,y.shape
except:
print 'Test failed (%i):'%tc, sys.exc_info()[0]
te+=1
#computeBasisPolar(b,r,th):
tc+=1
try:
r,th=polarArray([0.,0.],[15,20],rot=0.)
b=polarDimBasis(3,3,beta=[1.,1.],phi=np.pi/4.)
bval=computeBasisPolar(b,r,th)
print bval.shape
except:
print 'Test failed (%i):'%tc, sys.exc_info()[0]
te+=1
#computeBasisPolarAtom(b,r,th):
tc+=1
try:
b=polarDimBasis(3,3,beta=[1.,1.],phi=np.pi/4.)
bval=computeBasisPolar(b,5.,np.pi/8.)
print bval
except:
print 'Test failed (%i):'%tc, sys.exc_info()[0]
te+=1
#computeBasis2d(b,rx,ry):
tc+=1
try:
rx,ry=cartArray([6.,7.],[15,20])
b=dimBasis2d(3,4,beta=[1.,1.],phi=np.pi/4.)
bval=computeBasis2d(b,rx,ry)
print bval.shape
except:
print 'Test failed (%i):'%tc, sys.exc_info()[0]
te+=1
#computeBasis2dAtom(b,x,y):
tc+=1
try:
b=dimBasis2d(3,4,beta=[1.,1.],phi=np.pi/4.)
bval=computeBasis2dAtom(b,np.random.randn(10),np.random.randn(10))
print bval.shape
except:
print 'Test failed (%i):'%tc, sys.exc_info()[0]
te+=1
print '============================================'
print '%i of %i tests succeeded'%(tc-te,tc)
print '============================================'
|
|
#!/usr/bin/python -u
import sys
import os
import subprocess
import time
import datetime
import shutil
import tempfile
import hashlib
import re
debug = False
################
#### Telegraf Variables
################
# Packaging variables
PACKAGE_NAME = "telegraf"
INSTALL_ROOT_DIR = "/usr/bin"
LOG_DIR = "/var/log/telegraf"
SCRIPT_DIR = "/usr/lib/telegraf/scripts"
CONFIG_DIR = "/etc/telegraf"
LOGROTATE_DIR = "/etc/logrotate.d"
INIT_SCRIPT = "scripts/init.sh"
SYSTEMD_SCRIPT = "scripts/telegraf.service"
LOGROTATE_SCRIPT = "etc/logrotate.d/telegraf"
DEFAULT_CONFIG = "etc/telegraf.conf"
DEFAULT_WINDOWS_CONFIG = "etc/telegraf_windows.conf"
POSTINST_SCRIPT = "scripts/post-install.sh"
PREINST_SCRIPT = "scripts/pre-install.sh"
# Default AWS S3 bucket for uploads
DEFAULT_BUCKET = "get.influxdb.org/telegraf"
CONFIGURATION_FILES = [
CONFIG_DIR + '/telegraf.conf',
LOGROTATE_DIR + '/telegraf',
]
# META-PACKAGE VARIABLES
PACKAGE_LICENSE = "MIT"
PACKAGE_URL = "https://github.com/influxdata/telegraf"
MAINTAINER = "support@influxdb.com"
VENDOR = "InfluxData"
DESCRIPTION = "Plugin-driven server agent for reporting metrics into InfluxDB."
# SCRIPT START
prereqs = [ 'git', 'go' ]
go_vet_command = "go tool vet -composites=true ./"
optional_prereqs = [ 'gvm', 'fpm', 'rpmbuild' ]
fpm_common_args = "-f -s dir --log error \
--vendor {} \
--url {} \
--license {} \
--maintainer {} \
--config-files {} \
--config-files {} \
--after-install {} \
--before-install {} \
--description \"{}\"".format(
VENDOR,
PACKAGE_URL,
PACKAGE_LICENSE,
MAINTAINER,
CONFIG_DIR + '/telegraf.conf',
LOGROTATE_DIR + '/telegraf',
POSTINST_SCRIPT,
PREINST_SCRIPT,
DESCRIPTION)
targets = {
'telegraf' : './cmd/telegraf',
}
supported_builds = {
"darwin": [ "amd64" ],
"windows": [ "amd64", "i386" ],
"linux": [ "amd64", "i386", "armhf", "armel", "arm64" ],
"freebsd": [ "amd64" ]
}
supported_packages = {
"darwin": [ "tar", "zip" ],
"linux": [ "deb", "rpm", "tar" ],
"windows": [ "zip" ],
"freebsd": [ "tar" ]
}
supported_tags = {
# "linux": {
# "amd64": ["sensors"]
# }
}
prereq_cmds = {
# "linux": "sudo apt-get install lm-sensors libsensors4-dev"
}
################
#### Telegraf Functions
################
def create_package_fs(build_root):
print("Creating a filesystem hierarchy from directory: {}".format(build_root))
# Using [1:] for the path names due to them being absolute
# (will overwrite previous paths, per 'os.path.join' documentation)
dirs = [ INSTALL_ROOT_DIR[1:], LOG_DIR[1:], SCRIPT_DIR[1:], CONFIG_DIR[1:], LOGROTATE_DIR[1:] ]
for d in dirs:
create_dir(os.path.join(build_root, d))
os.chmod(os.path.join(build_root, d), 0o755)
def package_scripts(build_root, windows=False):
print("Copying scripts and sample configuration to build directory")
if windows:
shutil.copyfile(DEFAULT_WINDOWS_CONFIG, os.path.join(build_root, "telegraf.conf"))
os.chmod(os.path.join(build_root, "telegraf.conf"), 0o644)
else:
shutil.copyfile(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]))
os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]), 0o644)
shutil.copyfile(SYSTEMD_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]))
os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]), 0o644)
shutil.copyfile(LOGROTATE_SCRIPT, os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"))
os.chmod(os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"), 0o644)
shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"))
os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"), 0o644)
def run_generate():
# NOOP for Telegraf
return True
def go_get(branch, update=False, no_stash=False):
if not check_path_for("gdm"):
print("Downloading `gdm`...")
get_command = "go get github.com/sparrc/gdm"
run(get_command)
print("Retrieving dependencies with `gdm`...")
run("{}/bin/gdm restore -f Godeps_windows".format(os.environ.get("GOPATH")))
run("{}/bin/gdm restore".format(os.environ.get("GOPATH")))
return True
def run_tests(race, parallel, timeout, no_vet):
# Currently a NOOP for Telegraf
return True
################
#### All Telegraf-specific content above this line
################
def run(command, allow_failure=False, shell=False):
out = None
if debug:
print("[DEBUG] {}".format(command))
try:
if shell:
out = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=shell)
else:
out = subprocess.check_output(command.split(), stderr=subprocess.STDOUT)
out = out.decode("utf8")
if debug:
print("[DEBUG] command output: {}".format(out))
except subprocess.CalledProcessError as e:
print("")
print("")
print("Executed command failed!")
print("-- Command run was: {}".format(command))
print("-- Failure was: {}".format(e.output))
if allow_failure:
print("Continuing...")
return None
else:
print("")
print("Stopping.")
sys.exit(1)
except OSError as e:
print("")
print("")
print("Invalid command!")
print("-- Command run was: {}".format(command))
print("-- Failure was: {}".format(e))
if allow_failure:
print("Continuing...")
return out
else:
print("")
print("Stopping.")
sys.exit(1)
else:
return out
def create_temp_dir(prefix = None):
if prefix is None:
return tempfile.mkdtemp(prefix="{}-build.".format(PACKAGE_NAME))
else:
return tempfile.mkdtemp(prefix=prefix)
def get_current_version_tag():
version = run("git describe --always --tags --abbrev=0").strip()
return version
def get_current_version():
version_tag = get_current_version_tag()
if version_tag[0] == 'v':
# Remove leading 'v' and possible '-rc\d+'
version = re.sub(r'-rc\d+', '', version_tag[1:])
else:
version = re.sub(r'-rc\d+', '', version_tag)
return version
def get_current_rc():
rc = None
version_tag = get_current_version_tag()
matches = re.match(r'.*-rc(\d+)', version_tag)
if matches:
rc, = matches.groups(1)
return rc
def get_current_commit(short=False):
command = None
if short:
command = "git log --pretty=format:'%h' -n 1"
else:
command = "git rev-parse HEAD"
out = run(command)
return out.strip('\'\n\r ')
def get_current_branch():
command = "git rev-parse --abbrev-ref HEAD"
out = run(command)
return out.strip()
def get_system_arch():
arch = os.uname()[4]
if arch == "x86_64":
arch = "amd64"
return arch
def get_system_platform():
if sys.platform.startswith("linux"):
return "linux"
else:
return sys.platform
def get_go_version():
out = run("go version")
matches = re.search('go version go(\S+)', out)
if matches is not None:
return matches.groups()[0].strip()
return None
def check_path_for(b):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
full_path = os.path.join(path, b)
if os.path.isfile(full_path) and os.access(full_path, os.X_OK):
return full_path
def check_environ(build_dir=None):
print("")
print("Checking environment:")
for v in [ "GOPATH", "GOBIN", "GOROOT" ]:
print("- {} -> {}".format(v, os.environ.get(v)))
cwd = os.getcwd()
if build_dir is None and os.environ.get("GOPATH") and os.environ.get("GOPATH") not in cwd:
print("!! WARNING: Your current directory is not under your GOPATH. This may lead to build failures.")
def check_prereqs():
print("")
print("Checking for dependencies:")
for req in prereqs:
path = check_path_for(req)
if path:
print("- {} -> {}".format(req, path))
else:
print("- {} -> ?".format(req))
for req in optional_prereqs:
path = check_path_for(req)
if path:
print("- {} (optional) -> {}".format(req, path))
else:
print("- {} (optional) -> ?".format(req))
print("")
return True
def upload_packages(packages, bucket_name=None, nightly=False):
if debug:
print("[DEBUG] upload_packages: {}".format(packages))
try:
import boto
from boto.s3.key import Key
except ImportError:
print("!! Cannot upload packages without the 'boto' Python library.")
return 1
print("Connecting to S3...".format(bucket_name))
c = boto.connect_s3()
if bucket_name is None:
bucket_name = DEFAULT_BUCKET
bucket = c.get_bucket(bucket_name.split('/')[0])
print("Using bucket: {}".format(bucket_name))
for p in packages:
if '/' in bucket_name:
# Allow for nested paths within the bucket name (ex:
# bucket/folder). Assuming forward-slashes as path
# delimiter.
name = os.path.join('/'.join(bucket_name.split('/')[1:]),
os.path.basename(p))
else:
name = os.path.basename(p)
if bucket.get_key(name) is None or nightly:
print("Uploading {}...".format(name))
sys.stdout.flush()
k = Key(bucket)
k.key = name
if nightly:
n = k.set_contents_from_filename(p, replace=True)
else:
n = k.set_contents_from_filename(p, replace=False)
k.make_public()
else:
print("!! Not uploading package {}, as it already exists.".format(p))
print("")
return 0
def build(version=None,
branch=None,
commit=None,
platform=None,
arch=None,
nightly=False,
rc=None,
race=False,
clean=False,
outdir="."):
print("\n-------------------------\n")
print("Build Plan:")
print("- version: {}".format(version))
if rc:
print("- release candidate: {}".format(rc))
print("- commit: {}".format(get_current_commit(short=True)))
print("- branch: {}".format(get_current_branch()))
print("- platform: {}".format(platform))
print("- arch: {}".format(arch))
print("- nightly? {}".format(str(nightly).lower()))
print("- race enabled? {}".format(str(race).lower()))
print("")
if not os.path.exists(outdir):
os.makedirs(outdir)
elif clean and outdir != '/':
print("Cleaning build directory...")
shutil.rmtree(outdir)
os.makedirs(outdir)
if rc:
# If a release candidate, update the version information accordingly
version = "{}rc{}".format(version, rc)
print("Starting build...")
tmp_build_dir = create_temp_dir()
for b, c in targets.items():
print("Building '{}'...".format(os.path.join(outdir, b)))
build_command = ""
if "arm" in arch:
build_command += "GOOS={} GOARCH={} ".format(platform, "arm")
else:
if arch == 'i386':
arch = '386'
elif arch == 'x86_64':
arch = 'amd64'
build_command += "GOOS={} GOARCH={} ".format(platform, arch)
if "arm" in arch:
if arch == "armel":
build_command += "GOARM=5 "
elif arch == "armhf" or arch == "arm":
build_command += "GOARM=6 "
elif arch == "arm64":
build_command += "GOARM=7 "
else:
print("!! Invalid ARM architecture specifed: {}".format(arch))
print("Please specify either 'armel', 'armhf', or 'arm64'")
return 1
if platform == 'windows':
build_command += "go build -o {} ".format(os.path.join(outdir, b + '.exe'))
else:
build_command += "go build -o {} ".format(os.path.join(outdir, b))
if race:
build_command += "-race "
go_version = get_go_version()
if "1.4" in go_version:
build_command += "-ldflags=\"-X main.Version {} -X main.Branch {} -X main.Commit {}\" ".format(version,
get_current_branch(),
get_current_commit())
else:
# With Go 1.5, the linker flag arguments changed to 'name=value' from 'name value'
build_command += "-ldflags=\"-X main.Version={} -X main.Branch={} -X main.Commit={}\" ".format(version,
get_current_branch(),
get_current_commit())
build_command += c
run(build_command, shell=True)
return 0
def create_dir(path):
try:
os.makedirs(path)
except OSError as e:
print(e)
def rename_file(fr, to):
try:
os.rename(fr, to)
except OSError as e:
print(e)
# Return the original filename
return fr
else:
# Return the new filename
return to
def copy_file(fr, to):
try:
shutil.copy(fr, to)
except OSError as e:
print(e)
def generate_md5_from_file(path):
m = hashlib.md5()
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b""):
m.update(chunk)
return m.hexdigest()
def generate_md5_from_file(path):
m = hashlib.md5()
with open(path, 'rb') as f:
while True:
data = f.read(4096)
if not data:
break
m.update(data)
return m.hexdigest()
def build_packages(build_output, version, nightly=False, rc=None, iteration=1):
outfiles = []
tmp_build_dir = create_temp_dir()
if debug:
print("[DEBUG] build_output = {}".format(build_output))
try:
print("-------------------------\n")
print("Packaging...")
for platform in build_output:
# Create top-level folder displaying which platform (linux, etc)
create_dir(os.path.join(tmp_build_dir, platform))
for arch in build_output[platform]:
# Create second-level directory displaying the architecture (amd64, etc)
current_location = build_output[platform][arch]
# Create directory tree to mimic file system of package
build_root = os.path.join(tmp_build_dir,
platform,
arch,
'{}-{}-{}'.format(PACKAGE_NAME, version, iteration))
create_dir(build_root)
create_package_fs(build_root)
# Copy packaging scripts to build directory
package_scripts(build_root)
for binary in targets:
# Copy newly-built binaries to packaging directory
if platform == 'windows':
binary = binary + '.exe'
# Where the binary currently is located
fr = os.path.join(current_location, binary)
# Where the binary should go in the package filesystem
to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], binary)
if debug:
print("[{}][{}] - Moving from '{}' to '{}'".format(platform,
arch,
fr,
to))
copy_file(fr, to)
for package_type in supported_packages[platform]:
# Package the directory structure for each package type for the platform
print("Packaging directory '{}' as '{}'...".format(build_root, package_type))
name = PACKAGE_NAME
# Reset version, iteration, and current location on each run
# since they may be modified below.
package_version = version
package_iteration = iteration
package_build_root = build_root
current_location = build_output[platform][arch]
if rc is not None:
# Set iteration to 0 since it's a release candidate
package_iteration = "0.rc{}".format(rc)
if package_type in ['zip', 'tar']:
# For tars and zips, start the packaging one folder above
# the build root (to include the package name)
package_build_root = os.path.join('/', '/'.join(build_root.split('/')[:-1]))
if nightly:
name = '{}-nightly_{}_{}'.format(name,
platform,
arch)
else:
name = '{}-{}-{}_{}_{}'.format(name,
package_version,
package_iteration,
platform,
arch)
current_location = os.path.join(os.getcwd(), current_location)
if package_type == 'tar':
tar_command = "cd {} && tar -cvzf {}.tar.gz ./*".format(build_root, name)
run(tar_command, shell=True)
run("mv {}.tar.gz {}".format(os.path.join(build_root, name), current_location), shell=True)
outfile = os.path.join(current_location, name + ".tar.gz")
outfiles.append(outfile)
print("MD5({}) = {}".format(outfile, generate_md5_from_file(outfile)))
elif package_type == 'zip':
zip_command = "cd {} && zip -r {}.zip ./*".format(build_root, name)
run(zip_command, shell=True)
run("mv {}.zip {}".format(os.path.join(build_root, name), current_location), shell=True)
outfile = os.path.join(current_location, name + ".zip")
outfiles.append(outfile)
print("MD5({}) = {}".format(outfile, generate_md5_from_file(outfile)))
else:
fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format(fpm_common_args,
name,
arch,
package_type,
package_version,
package_iteration,
package_build_root,
current_location)
if debug:
fpm_command += "--verbose "
if package_type == "rpm":
fpm_command += "--depends coreutils "
fpm_command += "--depends lsof "
out = run(fpm_command, shell=True)
matches = re.search(':path=>"(.*)"', out)
outfile = None
if matches is not None:
outfile = matches.groups()[0]
if outfile is None:
print("!! Could not determine output from packaging command.")
else:
# Strip nightly version (the unix epoch) from filename
if nightly:
outfile = rename_file(outfile, outfile.replace("{}-{}".format(version, iteration), "nightly"))
outfiles.append(os.path.join(os.getcwd(), outfile))
# Display MD5 hash for generated package
print("MD5({}) = {}".format(outfile, generate_md5_from_file(outfile)))
print("")
if debug:
print("[DEBUG] package outfiles: {}".format(outfiles))
return outfiles
finally:
# Cleanup
shutil.rmtree(tmp_build_dir)
def print_usage():
print("Usage: ./build.py [options]")
print("")
print("Options:")
print("\t --outdir=<path> \n\t\t- Send build output to a specified path. Defaults to ./build.")
print("\t --arch=<arch> \n\t\t- Build for specified architecture. Acceptable values: x86_64|amd64, 386|i386, arm, or all")
print("\t --platform=<platform> \n\t\t- Build for specified platform. Acceptable values: linux, windows, darwin, or all")
print("\t --version=<version> \n\t\t- Version information to apply to build metadata. If not specified, will be pulled from repo tag.")
print("\t --commit=<commit> \n\t\t- Use specific commit for build (currently a NOOP).")
print("\t --branch=<branch> \n\t\t- Build from a specific branch (currently a NOOP).")
print("\t --rc=<rc number> \n\t\t- Whether or not the build is a release candidate (affects version information).")
print("\t --iteration=<iteration number> \n\t\t- The iteration to display on the package output (defaults to 0 for RC's, and 1 otherwise).")
print("\t --race \n\t\t- Whether the produced build should have race detection enabled.")
print("\t --package \n\t\t- Whether the produced builds should be packaged for the target platform(s).")
print("\t --nightly \n\t\t- Whether the produced build is a nightly (affects version information).")
print("\t --update \n\t\t- Whether dependencies should be updated prior to building.")
print("\t --test \n\t\t- Run Go tests. Will not produce a build.")
print("\t --parallel \n\t\t- Run Go tests in parallel up to the count specified.")
print("\t --generate \n\t\t- Run `go generate`.")
print("\t --timeout \n\t\t- Timeout for Go tests. Defaults to 480s.")
print("\t --clean \n\t\t- Clean the build output directory prior to creating build.")
print("\t --no-get \n\t\t- Do not run `go get` before building.")
print("\t --bucket=<S3 bucket>\n\t\t- Full path of the bucket to upload packages to (must also specify --upload).")
print("\t --debug \n\t\t- Displays debug output.")
print("")
def print_package_summary(packages):
print(packages)
def main():
global debug
# Command-line arguments
outdir = "build"
commit = None
target_platform = None
target_arch = None
nightly = False
race = False
branch = None
version = get_current_version()
rc = get_current_rc()
package = False
update = False
clean = False
upload = False
test = False
parallel = None
timeout = None
iteration = 1
no_vet = False
run_get = True
upload_bucket = None
generate = False
no_stash = False
for arg in sys.argv[1:]:
if '--outdir' in arg:
# Output directory. If none is specified, then builds will be placed in the same directory.
outdir = arg.split("=")[1]
if '--commit' in arg:
# Commit to build from. If none is specified, then it will build from the most recent commit.
commit = arg.split("=")[1]
if '--branch' in arg:
# Branch to build from. If none is specified, then it will build from the current branch.
branch = arg.split("=")[1]
elif '--arch' in arg:
# Target architecture. If none is specified, then it will build for the current arch.
target_arch = arg.split("=")[1]
elif '--platform' in arg:
# Target platform. If none is specified, then it will build for the current platform.
target_platform = arg.split("=")[1]
elif '--version' in arg:
# Version to assign to this build (0.9.5, etc)
version = arg.split("=")[1]
elif '--rc' in arg:
# Signifies that this is a release candidate build.
rc = arg.split("=")[1]
elif '--race' in arg:
# Signifies that race detection should be enabled.
race = True
elif '--package' in arg:
# Signifies that packages should be built.
package = True
# If packaging do not allow stashing of local changes
no_stash = True
elif '--nightly' in arg:
# Signifies that this is a nightly build.
nightly = True
elif '--update' in arg:
# Signifies that dependencies should be updated.
update = True
elif '--upload' in arg:
# Signifies that the resulting packages should be uploaded to S3
upload = True
elif '--test' in arg:
# Run tests and exit
test = True
elif '--parallel' in arg:
# Set parallel for tests.
parallel = int(arg.split("=")[1])
elif '--timeout' in arg:
# Set timeout for tests.
timeout = arg.split("=")[1]
elif '--clean' in arg:
# Signifies that the outdir should be deleted before building
clean = True
elif '--iteration' in arg:
iteration = arg.split("=")[1]
elif '--no-vet' in arg:
no_vet = True
elif '--no-get' in arg:
run_get = False
elif '--bucket' in arg:
# The bucket to upload the packages to, relies on boto
upload_bucket = arg.split("=")[1]
elif '--no-stash' in arg:
# Do not stash uncommited changes
# Fail if uncommited changes exist
no_stash = True
elif '--generate' in arg:
generate = True
elif '--debug' in arg:
print("[DEBUG] Using debug output")
debug = True
elif '--help' in arg:
print_usage()
return 0
else:
print("!! Unknown argument: {}".format(arg))
print_usage()
return 1
if nightly and rc:
print("!! Cannot be both nightly and a release candidate! Stopping.")
return 1
if nightly:
# In order to cleanly delineate nightly version, we are adding the epoch timestamp
# to the version so that version numbers are always greater than the previous nightly.
version = "{}~n{}".format(version, int(time.time()))
iteration = 0
elif rc:
iteration = 0
# Pre-build checks
check_environ()
if not check_prereqs():
return 1
if not commit:
commit = get_current_commit(short=True)
if not branch:
branch = get_current_branch()
if not target_arch:
system_arch = get_system_arch()
if 'arm' in system_arch:
# Prevent uname from reporting ARM arch (eg 'armv7l')
target_arch = "arm"
else:
target_arch = system_arch
if target_arch == '386':
target_arch = 'i386'
elif target_arch == 'x86_64':
target_arch = 'amd64'
if target_platform:
if target_platform not in supported_builds and target_platform != 'all':
print("! Invalid build platform: {}".format(target_platform))
return 1
else:
target_platform = get_system_platform()
build_output = {}
if generate:
if not run_generate():
return 1
if run_get:
if not go_get(branch, update=update, no_stash=no_stash):
return 1
if test:
if not run_tests(race, parallel, timeout, no_vet):
return 1
return 0
platforms = []
single_build = True
if target_platform == 'all':
platforms = supported_builds.keys()
single_build = False
else:
platforms = [target_platform]
for platform in platforms:
build_output.update( { platform : {} } )
archs = []
if target_arch == "all":
single_build = False
archs = supported_builds.get(platform)
else:
archs = [target_arch]
for arch in archs:
od = outdir
if not single_build:
od = os.path.join(outdir, platform, arch)
if build(version=version,
branch=branch,
commit=commit,
platform=platform,
arch=arch,
nightly=nightly,
rc=rc,
race=race,
clean=clean,
outdir=od):
return 1
build_output.get(platform).update( { arch : od } )
# Build packages
if package:
if not check_path_for("fpm"):
print("!! Cannot package without command 'fpm'.")
return 1
packages = build_packages(build_output, version, nightly=nightly, rc=rc, iteration=iteration)
if upload:
upload_packages(packages, bucket_name=upload_bucket, nightly=nightly)
print("Done!")
return 0
if __name__ == '__main__':
sys.exit(main())
|
|
#!/usr/bin/python2.7
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants that aren't specific to a particular module or handler."""
# We use lazy translation in this file because the language isn't set yet.
from django_setup import gettext_lazy as _
# The root URL of this application.
ROOT_URL = 'http://google.org/personfinder'
# The domain name of this application. The application hosts multiple
# repositories; each repository ID is http://<HOME_DOMAIN>/<REPO>.
HOME_DOMAIN = 'personfinder.google.org'
# Mapping from language codes to endonyms for all available languages.
# You can get the list of language names in each language in Unicode CLDR data.
# Go to http://unicode.org/Public/cldr/latest , download core.zip and look
# at common/main/*.xml in it.
# Some names are taken from Wikipedia because they are missing in CLDR data or
# they are in different script from our translation.
LANGUAGE_ENDONYMS = {
'af': u'Afrikaans',
'am': u'\u12a0\u121b\u122d\u129b',
'ar': u'\u0627\u0644\u0639\u0631\u0628\u064A\u0629',
'az': u'az\u0259rbaycanca',
'bg': u'\u0431\u044A\u043B\u0433\u0430\u0440\u0441\u043A\u0438',
'bn': u'\u09ac\u09be\u0982\u09b2\u09be',
'ca': u'Catal\u00E0',
'cs': u'\u010De\u0161tina',
'da': u'Dansk',
'de': u'Deutsch',
'el': u'\u0395\u03BB\u03BB\u03B7\u03BD\u03B9\u03BA\u03AC',
'en': u'English',
'en-GB': u'English (UK)',
'es': u'espa\u00F1ol',
'es-419': u'espa\u00F1ol (Latinoam\u00e9rica)',
'et': u'eesti',
'eu': u'Euskara',
'fa': u'\u0641\u0627\u0631\u0633\u06CC',
'fi': u'suomi',
'fil': u'Filipino',
'fr': u'Fran\u00e7ais',
'fr-CA': u'Fran\u00e7ais (Canada)',
'gl': u'Galego',
'gu': u'\u0a97\u0ac1\u0a9c\u0ab0\u0abe\u0aa4\u0ac0',
'hi': u'\u0939\u093F\u0928\u094D\u0926\u0940',
'hr': u'Hrvatski',
'ht': u'Krey\u00f2l',
'hu': u'magyar',
'hy': u'\u0570\u0561\u0575\u0565\u0580\u0565\u0576',
'id': u'Bahasa Indonesia',
'is': u'\u00edslenska',
'it': u'Italiano',
'iw': u'\u05E2\u05D1\u05E8\u05D9\u05EA',
'ja': u'\u65E5\u672C\u8A9E',
'jv': u'basa Jawa',
'ka': u'\u10e5\u10d0\u10e0\u10d7\u10e3\u10da\u10d8',
'kk': u'\u049b\u0430\u0437\u0430\u049b \u0442\u0456\u043b\u0456',
'km': u'\u1781\u17d2\u1798\u17c2\u179a',
'kn': u'\u0c95\u0ca8\u0ccd\u0ca8\u0ca1',
'ko': u'\uD55C\uAD6D\uC5B4',
'ky': u'\u041a\u044b\u0440\u0433\u044b\u0437',
'lo': u'\u0ea5\u0eb2\u0ea7',
'lt': u'Lietuvi\u0173',
'lv': u'Latvie\u0161u valoda',
'mk': u'\u043c\u0430\u043a\u0435\u0434\u043e\u043d\u0441\u043a\u0438',
'ml': u'\u0d2e\u0d32\u0d2f\u0d3e\u0d33\u0d02',
'mn': u'\u043c\u043e\u043d\u0433\u043e\u043b',
'mr': u'\u092e\u0930\u093e\u0920\u0940',
'ms': u'Bahasa Melayu',
'my': u'\u1017\u1019\u102c',
'ne': u'\u0928\u0947\u092a\u093e\u0932\u0940',
'nl': u'Nederlands',
'no': u'Norsk',
'pa': u'\u0a2a\u0a70\u0a1c\u0a3e\u0a2c\u0a40',
'pl': u'polski',
'pt-BR': u'Portugu\u00EAs (Brasil)',
'pt-PT': u'Portugu\u00EAs (Portugal)',
'ro': u'Rom\u00E2n\u0103',
'ru': u'\u0420\u0443\u0441\u0441\u043A\u0438\u0439',
'si': u'\u0dc3\u0dd2\u0d82\u0dc4\u0dbd',
'sk': u'Sloven\u010Dina',
'sl': u'Sloven\u0161\u010Dina',
'sq': u'shqip',
'sr': u'\u0441\u0440\u043F\u0441\u043A\u0438',
'su': u'Basa Sunda',
'sv': u'Svenska',
'sw': u'Kiswahili',
'ta': u'\u0ba4\u0bae\u0bbf\u0bb4\u0bcd',
'te': u'\u0c24\u0c46\u0c32\u0c41\u0c17\u0c41',
'th': u'\u0E44\u0E17\u0E22',
'tr': u'T\u00FCrk\u00E7e',
'uk': u'\u0423\u043A\u0440\u0430\u0457\u043D\u0441\u044C\u043A\u0430',
'ur': u'\u0627\u0631\u062F\u0648',
'uz': u'o\u02bbzbek tili',
'vi': u'Ti\u1EBFng Vi\u1EC7t',
'zh-CN': u'\u4E2D \u6587 (\u7B80 \u4F53)',
'zh-HK': u'\u4E2D \u6587 (\u9999 \u6e2f)',
'zh-TW': u'\u4E2D \u6587 (\u7E41 \u9AD4)',
'zu': u'isiZulu',
}
# Mapping from language codes to English names for all available languages.
# You can get the list of language names in each language in Unicode CLDR data.
# Go to http://unicode.org/Public/cldr/latest , download core.zip and look
# at common/main/*.xml in it.
LANGUAGE_EXONYMS = {
'af': 'Afrikaans',
'am': 'Amharic',
'ar': 'Arabic',
'az': 'Azerbaijani',
'bg': 'Bulgarian',
'bn': 'Bengali',
'ca': 'Catalan',
'cs': 'Czech',
'da': 'Danish',
'de': 'German',
'el': 'Greek',
'en': 'English (US)',
'en-GB': 'English (UK)',
'es': 'Spanish',
'es-419': 'Spanish (Latin America)',
'et': 'Estonian',
'eu': 'Basque',
'fa': 'Persian',
'fi': 'Finnish',
'fil': 'Filipino',
'fr': 'French (France)',
'fr-CA': 'French (Canada)',
'gl': 'Galician',
'gu': 'Gujarati',
'hi': 'Hindi',
'hr': 'Croatian',
'ht': 'Haitian Creole',
'hu': 'Hungarian',
'hy': 'Armenian',
'id': 'Indonesian',
'is': 'Icelandic',
'it': 'Italian',
'iw': 'Hebrew',
'ja': 'Japanese',
'jv': 'Javanese',
'ka': 'Georgian',
'kk': 'Kazakh',
'km': 'Khmer',
'kn': 'Kannada',
'ko': 'Korean',
'ky': 'Kirghiz',
'lo': 'Lao',
'lt': 'Lithuanian',
'lv': 'Latvian',
'mk': 'Macedonian',
'ml': 'Malayalam',
'mn': 'Mongolian',
'mr': 'Marathi',
'ms': 'Malay',
'my': 'Burmese',
'ne': 'Nepali',
'nl': 'Dutch',
'no': 'Norwegian',
'pa': 'Punjabi',
'pl': 'Polish',
'pt-BR': 'Portuguese (Brazil)',
'pt-PT': 'Portuguese (Portugal)',
'ro': 'Romanian',
'ru': 'Russian',
'si': 'Sinhala',
'sk': 'Slovak',
'sl': 'Slovenian',
'sq': 'Albanian',
'sr': 'Serbian',
'su': 'Sundanese',
'sv': 'Swedish',
'sw': 'Swahili',
'ta': 'Tamil',
'te': 'Telugu',
'th': 'Thai',
'tr': 'Turkish',
'uk': 'Ukranian',
'ur': 'Urdu',
'uz': 'Uzbek',
'vi': 'Vietnamese',
'zh-CN': 'Chinese (Simplified)',
'zh-HK': 'Chinese (Hong Kong)',
'zh-TW': 'Chinese (Traditional)',
'zu': 'Zulu',
}
# See go/iii
LANGUAGE_SYNONYMS = {
'he' : 'iw',
'in' : 'id',
'mo' : 'ro',
# Note that we don't currently support jv (Javanese) or yi (Yiddish).
'jw' : 'jv',
'ji' : 'yi',
# Django has a bug that django.utils.translation.activate() throws
# AttributeError when:
# - The language is not in $APPENGINE_DIR/lib/django_1_2/django/conf/locale
# - The language code contains a dash '-'
# and 'zh-HK' meets both criteria. We workaround this bug by using 'zhhk'
# instead of 'zh-HK' internally.
#
# The cause of the bug is that
# $APPENGINE_DIR/lib/django_1_2/django/utils/translation/trans_real.py:142
# accesses res._info even when res is None.
'zh-HK': 'zhhk',
}
# Mapping from language codes to the names of LayoutCode constants. See:
# http://code.google.com/apis/ajaxlanguage/documentation/referenceKeyboard.html
VIRTUAL_KEYBOARD_LAYOUTS = {
'ur': 'URDU'
}
# Charset string for UTF-8 used in env.charset.
CHARSET_UTF8 = 'utf-8'
# UI text for the sex field when displaying a person.
PERSON_SEX_TEXT = {
# This dictionary must have an entry for '' that gives the default text.
'': '',
'female': _('female'),
'male': _('male'),
'other': _('other')
}
# UI text for the expiry field when displayinga person.
PERSON_EXPIRY_TEXT = {
'30': _('About 1 month (30 days) from now'),
'60': _('About 2 months (60 days) from now'),
'90': _('About 3 months (90 days) from now'),
'180': _('About 6 months (180 days) from now'),
'360': _('About 1 year (360 days) from now'),
}
# UI text for the status field when posting or displaying a note.
NOTE_STATUS_TEXT = {
# This dictionary must have an entry for '' that gives the default text.
'': _('Unspecified'),
'information_sought': _('I am seeking information'),
'is_note_author': _('I am this person'),
'believed_alive':
_('I have received information that this person is alive'),
'believed_missing': _('I have reason to think this person is missing'),
'believed_dead': _('I have received information that this person is dead'),
}
# UI text for the rolled-up status when displaying a person.
PERSON_STATUS_TEXT = {
# This dictionary must have an entry for '' that gives the default text.
'': _('Unspecified'),
'information_sought': _('Someone is seeking information about this person'),
'is_note_author': _('This person has posted a message'),
'believed_alive':
_('Someone has received information that this person is alive'),
'believed_missing': _('Someone has reported that this person is missing'),
'believed_dead':
_('Someone has received information that this person is dead'),
}
# The list of external websites with profile pages sorted in the order they are
# shown in create page, used as the default value for profile_websites config.
DEFAULT_PROFILE_WEBSITES = [
{
# Display name of the website
'name': 'Facebook',
# Filename of the icon file served as /global/<icon_filename>.
'icon_filename': 'facebook-16x16.png',
# Regexp to check for valid profile page URLs.
'url_regexp': 'http://(www\\.)?facebook\\.com/.*',
},
{
'name': 'Twitter',
'icon_filename': 'twitter-16x16.png',
'url_regexp': 'http://(www\\.)?twitter\\.com/.*',
},
{
'name': 'LinkedIn',
'icon_filename': 'linkedin-16x16.png',
'url_regexp': 'http://(www\\.)?linkedin\\.com/.*',
},
]
|
|
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 12 00:35:12 2017
@author: abu
"""
import numpy as np
np.random.seed(1989)
import os
import glob
import datetime
import pandas as pd
import time
import argparse
#import warnings
#warnings.filterwarnings("ignore")
from sklearn.cross_validation import KFold
from sklearn.metrics import log_loss
from keras import __version__ as keras_version
from keras.applications.inception_v3 import InceptionV3
from keras.models import Model, Sequential, save_model, load_model
from keras.layers.core import Dense, Dropout, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, AveragePooling2D
from keras.optimizers import SGD
from keras.callbacks import EarlyStopping, ModelCheckpoint, CSVLogger, TensorBoard
from keras.utils import np_utils
from keras.preprocessing.image import ImageDataGenerator
from scipy.misc import imread, imresize
ROOT_FOLDER = './'
INPUT_FOLDER = 'input'
OUTPUT_FOLDER = 'output'
Image_Size_InceptionV3 = 299
CLASSES = ['ALB', 'BET', 'DOL', 'LAG', 'NoF', 'OTHER', 'SHARK', 'YFT']
def get_image(path):
img = imread(path)
return imresize(img, (Image_Size_InceptionV3,Image_Size_InceptionV3))
def load_train_data():
X_train = []
X_train_id = []
y_train = []
start_time = time.time()
print('Read train images')
folders = CLASSES
for fld in folders:
index = folders.index(fld)
print('Load folder {} (Index: {})'.format(fld, index))
path = os.path.join(ROOT_FOLDER, INPUT_FOLDER, 'train', fld, '*.jpg')
files = glob.glob(path)
for fl in files:
flbase = os.path.basename(fl)
img = get_image(fl)
X_train.append(img)
X_train_id.append(flbase)
y_train.append(index)
print('Read train data time: {} seconds'.format(round(time.time() - start_time, 2)))
return X_train, y_train, X_train_id
def load_test_data():
path = os.path.join(ROOT_FOLDER, INPUT_FOLDER, 'test/stage1', '*.jpg')
files = sorted(glob.glob(path))
X_test = []
X_test_id = []
for fl in files:
flbase = os.path.basename(fl)
img = get_image(fl)
X_test.append(img)
X_test_id.append(flbase)
return X_test, X_test_id
def create_submission(predictions, test_id, info):
result1 = pd.DataFrame(predictions, columns=CLASSES)
result1.loc[:, 'image'] = pd.Series(test_id, index=result1.index)
now = datetime.datetime.now()
sub_file = 'submission_' + info + '_' + str(now.strftime("%Y-%m-%d-%H-%M")) + '.csv'
sub_file = os.path.join(ROOT_FOLDER, OUTPUT_FOLDER, sub_file)
result1.to_csv(sub_file, index=False)
def get_train_data():
train_data, train_target, train_id = load_train_data()
print('Convert to numpy...')
train_data = np.array(train_data)
train_target = np.array(train_target)
# not necessary for tensorflow
#print('Reshape...')
#train_data = train_data.transpose((0, 3, 1, 2))
# do it just before training, to save memory
#print('Convert to float...')
#train_data = train_data.astype('float32')
#train_data = train_data / 255
train_target = np_utils.to_categorical(train_target, 8)
print('Train data shape:', train_data.shape)
print(train_data.shape[0], 'train samples')
return train_data, train_target, train_id
def get_test_data():
start_time = time.time()
test_data, test_id = load_test_data()
test_data = np.array(test_data)
# not necessary for tensorflow
#test_data = test_data.transpose((0, 3, 1, 2))
# do it just before training, to save memory
#test_data = test_data.astype('float32')
#test_data = test_data / 255
print('Test data shape:', test_data.shape)
print(test_data.shape[0], 'test samples')
print('Read and process test data time: {} seconds'.format(round(time.time() - start_time, 2)))
return test_data, test_id
def merge_several_folds_mean(data, nfolds):
a = np.array(data[0])
for i in range(1, nfolds):
a += np.array(data[i])
a /= nfolds
return a.tolist()
def create_model():
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(64, 64, 3), dim_ordering='tf'))
model.add(Convolution2D(8, 3, 3, activation='relu', dim_ordering='tf', init='he_uniform'))
model.add(Dropout(0.2))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), dim_ordering='tf'))
model.add(ZeroPadding2D((1, 1), dim_ordering='tf'))
model.add(Convolution2D(16, 3, 3, activation='relu', dim_ordering='tf', init='he_uniform'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), dim_ordering='tf'))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(96, activation='relu',init='he_uniform'))
model.add(Dropout(0.4))
model.add(Dense(24, activation='relu',init='he_uniform'))
model.add(Dropout(0.2))
model.add(Dense(8, activation='softmax'))
sgd = SGD(lr=1e-2, decay=1e-4, momentum=0.88, nesterov=False)
model.compile(optimizer=sgd, loss='categorical_crossentropy')
return model
def create_inception_model():
InceptionV3_notop = InceptionV3(include_top=False, weights='imagenet',
input_tensor=None, input_shape=(299, 299, 3))
for layer in InceptionV3_notop.layers:
layer.trainable = False
# Note that the preprocessing of InceptionV3 is:
# (x / 255 - 0.5) x 2
print('Adding Average Pooling Layer and Softmax Output Layer ...')
output = InceptionV3_notop.get_layer(index = -1).output # Shape: (8, 8, 2048)
output = AveragePooling2D((8, 8), strides=(8, 8), name='avg_pool')(output)
output = Flatten(name='flatten')(output)
output = Dense(8, activation='softmax', name='predictions')(output)
model = Model(InceptionV3_notop.input, output)
return model
def create_model_from_weights(weights_file):
#weights_path = os.path.join(ROOT_FOLDER, OUTPUT_FOLDER, weights_file)
model = load_model(weights_file)
return model
def run_cross_validation_training(nb_epoch=20, nfolds=10):
# input image dimensions
batch_size = 32
train_data, train_target, train_id = get_train_data()
yfull_train = dict()
kfold = KFold(len(train_id), n_folds=nfolds, shuffle=True, random_state=51)
sum_score = 0
for fold_index, (train_index, valid_index) in enumerate(kfold):
#model = create_model()
model = create_inception_model()
sgd = SGD(lr=1e-4, decay=1e-4, momentum=0.88, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
if fold_index==0:
model.summary()
X_train = train_data[train_index].astype('float32')/255*2-1
Y_train = train_target[train_index]
X_valid = train_data[valid_index].astype('float32')/255*2-1
Y_valid = train_target[valid_index]
print('Training for {}/{} KFolds'.format(fold_index+1, nfolds))
print('Splited training samples: ', len(train_index))
print('Splited validation samples: ', len(valid_index))
callbacks = [
EarlyStopping(monitor='val_loss', patience=3, verbose=0),
#ModelCheckpoint(os.path.join(ROOT_FOLDER, 'weights-fold{}.h5'.format(fold_index)), monitor='val_acc', save_best_only = True),
CSVLogger('training.log', append=True),
TensorBoard(log_dir='./tmp', histogram_freq=0, write_graph=True, write_images=False)
]
hist = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
shuffle=True, validation_data=(X_valid, Y_valid),
callbacks=callbacks)
model.save(os.path.join(ROOT_FOLDER, OUTPUT_FOLDER, 'weights-fold{}.h5'.format(fold_index)))
val_loss = max(hist.history['val_loss'])
sum_score += val_loss*len(valid_index)
score = sum_score/len(train_index)
info_string = 'loss_' + str(np.round(score,3)) + '_folds_' + str(nfolds) + '_eps_' + str(nb_epoch)
return info_string
def predict_fold(nfold):
weights_path = os.path.join(ROOT_FOLDER, OUTPUT_FOLDER, 'weights-fold{}.h5'.format(nfold))
model = create_model_from_weights(weights_path)
sgd = SGD(lr=1e-4, decay=1e-4, momentum=0.88, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
test_data, test_id = get_test_data()
test_data = test_data.astype('float32')/255*2-1
prediction = model.predict(test_data, batch_size=24)
return prediction
def augmented_predict_fold(nfold, nb_augmentation):
nbr_test_samples = 1000
test_data_dir = os.path.join(ROOT_FOLDER, INPUT_FOLDER, 'test')
# test data generator for prediction
test_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.1,
zoom_range=0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True)
for idx in np.arange(nb_augmentation):
print('Running Prediction for {}/{} augmentation...'.format(idx+1, nb_augmentation))
random_seed = np.random.random_integers(0, 100000)
test_generator = test_datagen.flow_from_directory(
test_data_dir,
target_size=(Image_Size_InceptionV3, Image_Size_InceptionV3),
batch_size=24,
shuffle = False, # Important !!!
seed = random_seed,
classes = None,
class_mode = None)
weights_path = os.path.join(ROOT_FOLDER, OUTPUT_FOLDER, 'weights-fold{}.h5'.format(nfold))
model = create_model_from_weights(weights_path)
if idx == 0:
prediction = model.predict_generator(test_generator, nbr_test_samples)
else:
prediction += model.predict_generator(test_generator, nbr_test_samples)
prediction /= nb_augmentation
#print(prediction)
return prediction
def run_cross_validation_prediction(info_string, nfolds, nb_augmentation):
predictions = []
test_id = []
for nfold in np.arange(nfolds):
print('Loading model from {}th prediction from {} KFolds'.format(nfold+1, nfolds))
if nb_augmentation>0:
prediction = augmented_predict_fold(nfold, nb_augmentation)
predictions.append(prediction)
else:
prediction = predict_fold(nfold)
predictions.append(prediction)
prediction_result = merge_several_folds_mean(predictions, nfolds)
create_submission(prediction_result, test_id, info_string)
if __name__ == '__main__':
parse = argparse.ArgumentParser()
parse.add_argument('-e', '--epochs', help='Number of epochs for training', type=int, default=10)
parse.add_argument('-f', '--folds', help='Number of KFolds', type=int, default=5)
parse.add_argument('-a', '--augmentations', help='Number of augmentation for prediction', type=int, default=0)
args = parse.parse_args()
print(args)
print('Keras version: {}'.format(keras_version))
info_string = ''
#info_string = run_cross_validation_training(nb_epoch=args.epochs, nfolds=args.folds)
run_cross_validation_prediction(info_string, nfolds=args.folds, nb_augmentation=args.augmentations)
|
|
from django.db.models.query import QuerySet
from django.db.models.lookups import Lookup
from django.db.models.sql.where import SubqueryConstraint, WhereNode
from django.utils.six import text_type
from wagtail.wagtailsearch.index import class_is_indexed
class FilterError(Exception):
pass
class FieldError(Exception):
pass
class BaseSearchQuery(object):
DEFAULT_OPERATOR = 'or'
def __init__(self, queryset, query_string, fields=None, operator=None, order_by_relevance=True):
self.queryset = queryset
self.query_string = query_string
self.fields = fields
self.operator = operator or self.DEFAULT_OPERATOR
self.order_by_relevance = order_by_relevance
def _get_filterable_field(self, field_attname):
# Get field
field = dict(
(field.get_attname(self.queryset.model), field)
for field in self.queryset.model.get_filterable_search_fields()
).get(field_attname, None)
return field
def _process_lookup(self, field, lookup, value):
raise NotImplementedError
def _connect_filters(self, filters, connector, negated):
raise NotImplementedError
def _process_filter(self, field_attname, lookup, value):
# Get the field
field = self._get_filterable_field(field_attname)
if field is None:
raise FieldError(
'Cannot filter search results with field "' + field_attname + '". Please add index.FilterField(\'' +
field_attname + '\') to ' + self.queryset.model.__name__ + '.search_fields.'
)
# Process the lookup
result = self._process_lookup(field, lookup, value)
if result is None:
raise FilterError(
'Could not apply filter on search results: "' + field_attname + '__' +
lookup + ' = ' + text_type(value) + '". Lookup "' + lookup + '"" not recognised.'
)
return result
def _get_filters_from_where_node(self, where_node):
# Check if this is a leaf node
if isinstance(where_node, Lookup):
field_attname = where_node.lhs.target.attname
lookup = where_node.lookup_name
value = where_node.rhs
# Ignore pointer fields that show up in specific page type queries
if field_attname.endswith('_ptr_id'):
return
# Process the filter
return self._process_filter(field_attname, lookup, value)
elif isinstance(where_node, SubqueryConstraint):
raise FilterError('Could not apply filter on search results: Subqueries are not allowed.')
elif isinstance(where_node, WhereNode):
# Get child filters
connector = where_node.connector
child_filters = [self._get_filters_from_where_node(child) for child in where_node.children]
child_filters = [child_filter for child_filter in child_filters if child_filter]
return self._connect_filters(child_filters, connector, where_node.negated)
else:
raise FilterError('Could not apply filter on search results: Unknown where node: ' + str(type(where_node)))
def _get_filters_from_queryset(self):
return self._get_filters_from_where_node(self.queryset.query.where)
class BaseSearchResults(object):
def __init__(self, backend, query, prefetch_related=None):
self.backend = backend
self.query = query
self.prefetch_related = prefetch_related
self.start = 0
self.stop = None
self._results_cache = None
self._count_cache = None
def _set_limits(self, start=None, stop=None):
if stop is not None:
if self.stop is not None:
self.stop = min(self.stop, self.start + stop)
else:
self.stop = self.start + stop
if start is not None:
if self.stop is not None:
self.start = min(self.stop, self.start + start)
else:
self.start = self.start + start
def _clone(self):
klass = self.__class__
new = klass(self.backend, self.query, prefetch_related=self.prefetch_related)
new.start = self.start
new.stop = self.stop
return new
def _do_search(self):
raise NotImplementedError
def _do_count(self):
raise NotImplementedError
def results(self):
if self._results_cache is None:
self._results_cache = self._do_search()
return self._results_cache
def count(self):
if self._count_cache is None:
if self._results_cache is not None:
self._count_cache = len(self._results_cache)
else:
self._count_cache = self._do_count()
return self._count_cache
def __getitem__(self, key):
new = self._clone()
if isinstance(key, slice):
# Set limits
start = int(key.start) if key.start else None
stop = int(key.stop) if key.stop else None
new._set_limits(start, stop)
# Copy results cache
if self._results_cache is not None:
new._results_cache = self._results_cache[key]
return new
else:
if self._results_cache is not None:
return self._results_cache[key]
new.start = self.start + key
new.stop = self.start + key + 1
return list(new)[0]
def __iter__(self):
return iter(self.results())
def __len__(self):
return len(self.results())
def __repr__(self):
data = list(self[:21])
if len(data) > 20:
data[-1] = "...(remaining elements truncated)..."
return '<SearchResults %r>' % data
class BaseSearch(object):
query_class = None
results_class = None
def __init__(self, params):
pass
def get_rebuilder(self):
return None
def reset_index(self):
raise NotImplementedError
def add_type(self, model):
raise NotImplementedError
def refresh_index(self):
raise NotImplementedError
def add(self, obj):
raise NotImplementedError
def add_bulk(self, model, obj_list):
raise NotImplementedError
def delete(self, obj):
raise NotImplementedError
def search(self, query_string, model_or_queryset, fields=None, filters=None,
prefetch_related=None, operator=None, order_by_relevance=True):
# Find model/queryset
if isinstance(model_or_queryset, QuerySet):
model = model_or_queryset.model
queryset = model_or_queryset
else:
model = model_or_queryset
queryset = model_or_queryset.objects.all()
# Model must be a class that is in the index
if not class_is_indexed(model):
return []
# Check that theres still a query string after the clean up
if query_string == "":
return []
# Apply filters to queryset
if filters:
queryset = queryset.filter(**filters)
# Prefetch related
if prefetch_related:
for prefetch in prefetch_related:
queryset = queryset.prefetch_related(prefetch)
# Check operator
if operator is not None:
operator = operator.lower()
if operator not in ['or', 'and']:
raise ValueError("operator must be either 'or' or 'and'")
# Search
search_query = self.query_class(
queryset, query_string, fields=fields, operator=operator, order_by_relevance=order_by_relevance
)
return self.results_class(self, search_query)
|
|
# coding=utf-8
#
# Copyright 2014 Red Hat, Inc.
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A driver wrapping the Ironic API, such that Nova may provision
bare metal resources.
"""
import logging as py_logging
import time
from oslo.config import cfg
from oslo.serialization import jsonutils
from oslo.utils import excutils
from oslo.utils import importutils
import six
from nova.compute import arch
from nova.compute import hv_type
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_mode
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova import objects
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova.virt import driver as virt_driver
from nova.virt import firewall
from nova.virt import hardware
from nova.virt.ironic import client_wrapper
from nova.virt.ironic import ironic_states
from nova.virt.ironic import patcher
ironic = None
LOG = logging.getLogger(__name__)
opts = [
cfg.IntOpt('api_version',
default=1,
help='Version of Ironic API service endpoint.'),
cfg.StrOpt('api_endpoint',
help='URL for Ironic API endpoint.'),
cfg.StrOpt('admin_username',
help='Ironic keystone admin name'),
cfg.StrOpt('admin_password',
help='Ironic keystone admin password.'),
cfg.StrOpt('admin_auth_token',
help='Ironic keystone auth token.'),
cfg.StrOpt('admin_url',
help='Keystone public API endpoint.'),
cfg.StrOpt('client_log_level',
help='Log level override for ironicclient. Set this in '
'order to override the global "default_log_levels", '
'"verbose", and "debug" settings.'),
cfg.StrOpt('admin_tenant_name',
help='Ironic keystone tenant name.'),
cfg.IntOpt('api_max_retries',
default=60,
help='How many retries when a request does conflict.'),
cfg.IntOpt('api_retry_interval',
default=2,
help='How often to retry in seconds when a request '
'does conflict'),
]
ironic_group = cfg.OptGroup(name='ironic',
title='Ironic Options')
CONF = cfg.CONF
CONF.register_group(ironic_group)
CONF.register_opts(opts, ironic_group)
_POWER_STATE_MAP = {
ironic_states.POWER_ON: power_state.RUNNING,
ironic_states.NOSTATE: power_state.NOSTATE,
ironic_states.POWER_OFF: power_state.SHUTDOWN,
}
def map_power_state(state):
try:
return _POWER_STATE_MAP[state]
except KeyError:
LOG.warning(_LW("Power state %s not found."), state)
return power_state.NOSTATE
def _validate_instance_and_node(ironicclient, instance):
"""Get the node associated with the instance.
Check with the Ironic service that this instance is associated with a
node, and return the node.
"""
try:
return ironicclient.call("node.get_by_instance_uuid", instance.uuid)
except ironic.exc.NotFound:
raise exception.InstanceNotFound(instance_id=instance.uuid)
def _get_nodes_supported_instances(cpu_arch=None):
"""Return supported instances for a node."""
if not cpu_arch:
return []
return [(cpu_arch,
hv_type.BAREMETAL,
vm_mode.HVM)]
def _log_ironic_polling(what, node, instance):
power_state = (None if node.power_state is None else
'"%s"' % node.power_state)
tgt_power_state = (None if node.target_power_state is None else
'"%s"' % node.target_power_state)
prov_state = (None if node.provision_state is None else
'"%s"' % node.provision_state)
tgt_prov_state = (None if node.target_provision_state is None else
'"%s"' % node.target_provision_state)
LOG.debug('Still waiting for ironic node %(node)s to %(what)s: '
'power_state=%(power_state)s, '
'target_power_state=%(tgt_power_state)s, '
'provision_state=%(prov_state)s, '
'target_provision_state=%(tgt_prov_state)s',
dict(what=what,
node=node.uuid,
power_state=power_state,
tgt_power_state=tgt_power_state,
prov_state=prov_state,
tgt_prov_state=tgt_prov_state),
instance=instance)
class IronicDriver(virt_driver.ComputeDriver):
"""Hypervisor driver for Ironic - bare metal provisioning."""
capabilities = {"has_imagecache": False,
"supports_recreate": False}
def __init__(self, virtapi, read_only=False):
super(IronicDriver, self).__init__(virtapi)
global ironic
if ironic is None:
ironic = importutils.import_module('ironicclient')
# NOTE(deva): work around a lack of symbols in the current version.
if not hasattr(ironic, 'exc'):
ironic.exc = importutils.import_module('ironicclient.exc')
if not hasattr(ironic, 'client'):
ironic.client = importutils.import_module(
'ironicclient.client')
self.firewall_driver = firewall.load_driver(
default='nova.virt.firewall.NoopFirewallDriver')
self.node_cache = {}
self.node_cache_time = 0
# TODO(mrda): Bug ID 1365230 Logging configurability needs
# to be addressed
ironicclient_log_level = CONF.ironic.client_log_level
if ironicclient_log_level:
level = py_logging.getLevelName(ironicclient_log_level)
logger = py_logging.getLogger('ironicclient')
logger.setLevel(level)
def _node_resources_unavailable(self, node_obj):
"""Determine whether the node's resources are in an acceptable state.
Determines whether the node's resources should be presented
to Nova for use based on the current power and maintenance state.
Returns True if unacceptable.
"""
bad_states = [ironic_states.ERROR, ironic_states.NOSTATE]
return (node_obj.maintenance or
node_obj.power_state in bad_states)
def _node_resource(self, node):
"""Helper method to create resource dict from node stats."""
vcpus = int(node.properties.get('cpus', 0))
memory_mb = int(node.properties.get('memory_mb', 0))
local_gb = int(node.properties.get('local_gb', 0))
raw_cpu_arch = node.properties.get('cpu_arch', None)
try:
cpu_arch = arch.canonicalize(raw_cpu_arch)
except exception.InvalidArchitectureName:
cpu_arch = None
if not cpu_arch:
LOG.warning(_LW("cpu_arch not defined for node '%s'"), node.uuid)
nodes_extra_specs = {}
# NOTE(deva): In Havana and Icehouse, the flavor was required to link
# to an arch-specific deploy kernel and ramdisk pair, and so the flavor
# also had to have extra_specs['cpu_arch'], which was matched against
# the ironic node.properties['cpu_arch'].
# With Juno, the deploy image(s) may be referenced directly by the
# node.driver_info, and a flavor no longer needs to contain any of
# these three extra specs, though the cpu_arch may still be used
# in a heterogeneous environment, if so desired.
# NOTE(dprince): we use the raw cpu_arch here because extra_specs
# filters aren't canonicalized
nodes_extra_specs['cpu_arch'] = raw_cpu_arch
# NOTE(gilliard): To assist with more precise scheduling, if the
# node.properties contains a key 'capabilities', we expect the value
# to be of the form "k1:v1,k2:v2,etc.." which we add directly as
# key/value pairs into the node_extra_specs to be used by the
# ComputeCapabilitiesFilter
capabilities = node.properties.get('capabilities')
if capabilities:
for capability in str(capabilities).split(','):
parts = capability.split(':')
if len(parts) == 2 and parts[0] and parts[1]:
nodes_extra_specs[parts[0]] = parts[1]
else:
LOG.warning(_LW("Ignoring malformed capability '%s'. "
"Format should be 'key:val'."), capability)
vcpus_used = 0
memory_mb_used = 0
local_gb_used = 0
if node.instance_uuid:
# Node has an instance, report all resource as unavailable
vcpus_used = vcpus
memory_mb_used = memory_mb
local_gb_used = local_gb
elif self._node_resources_unavailable(node):
# The node's current state is such that it should not present any
# of its resources to Nova
vcpus = 0
memory_mb = 0
local_gb = 0
dic = {
'node': str(node.uuid),
'hypervisor_hostname': str(node.uuid),
'hypervisor_type': self._get_hypervisor_type(),
'hypervisor_version': self._get_hypervisor_version(),
# The Ironic driver manages multiple hosts, so there are
# likely many different CPU models in use. As such it is
# impossible to provide any meaningful info on the CPU
# model of the "host"
'cpu_info': None,
'vcpus': vcpus,
'vcpus_used': vcpus_used,
'local_gb': local_gb,
'local_gb_used': local_gb_used,
'disk_total': local_gb,
'disk_used': local_gb_used,
'disk_available': local_gb - local_gb_used,
'memory_mb': memory_mb,
'memory_mb_used': memory_mb_used,
'host_memory_total': memory_mb,
'host_memory_free': memory_mb - memory_mb_used,
'supported_instances': jsonutils.dumps(
_get_nodes_supported_instances(cpu_arch)),
'stats': jsonutils.dumps(nodes_extra_specs),
'host': CONF.host,
}
dic.update(nodes_extra_specs)
return dic
def _start_firewall(self, instance, network_info):
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
self.firewall_driver.apply_instance_filter(instance, network_info)
def _stop_firewall(self, instance, network_info):
self.firewall_driver.unfilter_instance(instance, network_info)
def _add_driver_fields(self, node, instance, image_meta, flavor,
preserve_ephemeral=None):
ironicclient = client_wrapper.IronicClientWrapper()
patch = patcher.create(node).get_deploy_patch(instance,
image_meta,
flavor,
preserve_ephemeral)
# Associate the node with an instance
patch.append({'path': '/instance_uuid', 'op': 'add',
'value': instance.uuid})
try:
ironicclient.call('node.update', node.uuid, patch)
except ironic.exc.BadRequest:
msg = (_("Failed to add deploy parameters on node %(node)s "
"when provisioning the instance %(instance)s")
% {'node': node.uuid, 'instance': instance.uuid})
LOG.error(msg)
raise exception.InstanceDeployFailure(msg)
def _cleanup_deploy(self, context, node, instance, network_info,
flavor=None):
ironicclient = client_wrapper.IronicClientWrapper()
if flavor is None:
# TODO(mrda): It would be better to use instance.get_flavor() here
# but right now that doesn't include extra_specs which are required
# NOTE(pmurray): Flavor may have been deleted
ctxt = context.elevated(read_deleted="yes")
flavor = objects.Flavor.get_by_id(ctxt,
instance.instance_type_id)
patch = patcher.create(node).get_cleanup_patch(instance, network_info,
flavor)
# Unassociate the node
patch.append({'op': 'remove', 'path': '/instance_uuid'})
try:
ironicclient.call('node.update', node.uuid, patch)
except ironic.exc.BadRequest:
LOG.error(_LE("Failed to clean up the parameters on node %(node)s "
"when unprovisioning the instance %(instance)s"),
{'node': node.uuid, 'instance': instance.uuid})
reason = (_("Fail to clean up node %s parameters") % node.uuid)
raise exception.InstanceTerminationFailure(reason=reason)
self._unplug_vifs(node, instance, network_info)
self._stop_firewall(instance, network_info)
def _wait_for_active(self, ironicclient, instance):
"""Wait for the node to be marked as ACTIVE in Ironic."""
node = _validate_instance_and_node(ironicclient, instance)
if node.provision_state == ironic_states.ACTIVE:
# job is done
LOG.debug("Ironic node %(node)s is now ACTIVE",
dict(node=node.uuid), instance=instance)
raise loopingcall.LoopingCallDone()
if node.target_provision_state == ironic_states.DELETED:
# ironic is trying to delete it now
raise exception.InstanceNotFound(instance_id=instance.uuid)
if node.provision_state == ironic_states.NOSTATE:
# ironic already deleted it
raise exception.InstanceNotFound(instance_id=instance.uuid)
if node.provision_state == ironic_states.DEPLOYFAIL:
# ironic failed to deploy
msg = (_("Failed to provision instance %(inst)s: %(reason)s")
% {'inst': instance.uuid, 'reason': node.last_error})
raise exception.InstanceDeployFailure(msg)
_log_ironic_polling('become ACTIVE', node, instance)
def _wait_for_power_state(self, ironicclient, instance, message):
"""Wait for the node to complete a power state change."""
node = _validate_instance_and_node(ironicclient, instance)
if node.target_power_state == ironic_states.NOSTATE:
raise loopingcall.LoopingCallDone()
_log_ironic_polling(message, node, instance)
def init_host(self, host):
"""Initialize anything that is necessary for the driver to function.
:param host: the hostname of the compute host.
"""
return
def _get_hypervisor_type(self):
"""Get hypervisor type."""
return 'ironic'
def _get_hypervisor_version(self):
"""Returns the version of the Ironic API service endpoint."""
return CONF.ironic.api_version
def instance_exists(self, instance):
"""Checks the existence of an instance.
Checks the existence of an instance. This is an override of the
base method for efficiency.
:param instance: The instance object.
:returns: True if the instance exists. False if not.
"""
ironicclient = client_wrapper.IronicClientWrapper()
try:
_validate_instance_and_node(ironicclient, instance)
return True
except exception.InstanceNotFound:
return False
def list_instances(self):
"""Return the names of all the instances provisioned.
:returns: a list of instance names.
"""
ironicclient = client_wrapper.IronicClientWrapper()
# NOTE(lucasagomes): limit == 0 is an indicator to continue
# pagination until there're no more values to be returned.
node_list = ironicclient.call("node.list", associated=True, limit=0)
context = nova_context.get_admin_context()
return [objects.Instance.get_by_uuid(context,
i.instance_uuid).name
for i in node_list]
def list_instance_uuids(self):
"""Return the UUIDs of all the instances provisioned.
:returns: a list of instance UUIDs.
"""
ironicclient = client_wrapper.IronicClientWrapper()
# NOTE(lucasagomes): limit == 0 is an indicator to continue
# pagination until there're no more values to be returned.
node_list = ironicclient.call("node.list", associated=True, limit=0)
return list(n.instance_uuid for n in node_list)
def node_is_available(self, nodename):
"""Confirms a Nova hypervisor node exists in the Ironic inventory.
:param nodename: The UUID of the node.
:returns: True if the node exists, False if not.
"""
# NOTE(comstud): We can cheat and use caching here. This method
# just needs to return True for nodes that exist. It doesn't
# matter if the data is stale. Sure, it's possible that removing
# node from Ironic will cause this method to return True until
# the next call to 'get_available_nodes', but there shouldn't
# be much harm. There's already somewhat of a race.
if not self.node_cache:
# Empty cache, try to populate it.
self._refresh_cache()
if nodename in self.node_cache:
return True
# NOTE(comstud): Fallback and check Ironic. This case should be
# rare.
ironicclient = client_wrapper.IronicClientWrapper()
try:
ironicclient.call("node.get", nodename)
return True
except ironic.exc.NotFound:
return False
def _refresh_cache(self):
ironicclient = client_wrapper.IronicClientWrapper()
# NOTE(lucasagomes): limit == 0 is an indicator to continue
# pagination until there're no more values to be returned.
node_list = ironicclient.call('node.list', detail=True, limit=0)
node_cache = {}
for node in node_list:
node_cache[node.uuid] = node
self.node_cache = node_cache
self.node_cache_time = time.time()
def get_available_nodes(self, refresh=False):
"""Returns the UUIDs of all nodes in the Ironic inventory.
:param refresh: Boolean value; If True run update first. Ignored by
this driver.
:returns: a list of UUIDs
"""
# NOTE(jroll) we refresh the cache every time this is called
# because it needs to happen in the resource tracker
# periodic task. This task doesn't pass refresh=True,
# unfortunately.
self._refresh_cache()
node_uuids = list(self.node_cache.keys())
LOG.debug("Returning %(num_nodes)s available node(s)",
dict(num_nodes=len(node_uuids)))
return node_uuids
def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and
as part of a periodic task that records the results in the DB.
:param nodename: the UUID of the node.
:returns: a dictionary describing resources.
"""
# NOTE(comstud): We can cheat and use caching here. This method is
# only called from a periodic task and right after the above
# get_available_nodes() call is called.
if not self.node_cache:
# Well, it's also called from init_host(), so if we have empty
# cache, let's try to populate it.
self._refresh_cache()
cache_age = time.time() - self.node_cache_time
if nodename in self.node_cache:
LOG.debug("Using cache for node %(node)s, age: %(age)s",
{'node': nodename, 'age': cache_age})
node = self.node_cache[nodename]
else:
LOG.debug("Node %(node)s not found in cache, age: %(age)s",
{'node': nodename, 'age': cache_age})
ironicclient = client_wrapper.IronicClientWrapper()
node = ironicclient.call("node.get", nodename)
return self._node_resource(node)
def get_info(self, instance):
"""Get the current state and resource usage for this instance.
If the instance is not found this method returns (a dictionary
with) NOSTATE and all resources == 0.
:param instance: the instance object.
:returns: a InstanceInfo object
"""
ironicclient = client_wrapper.IronicClientWrapper()
try:
node = _validate_instance_and_node(ironicclient, instance)
except exception.InstanceNotFound:
return hardware.InstanceInfo(
state=map_power_state(ironic_states.NOSTATE))
memory_kib = int(node.properties.get('memory_mb', 0)) * 1024
if memory_kib == 0:
LOG.warning(_LW("Warning, memory usage is 0 for "
"%(instance)s on baremetal node %(node)s."),
{'instance': instance.uuid,
'node': instance.node})
num_cpu = node.properties.get('cpus', 0)
if num_cpu == 0:
LOG.warning(_LW("Warning, number of cpus is 0 for "
"%(instance)s on baremetal node %(node)s."),
{'instance': instance.uuid,
'node': instance.node})
return hardware.InstanceInfo(state=map_power_state(node.power_state),
max_mem_kb=memory_kib,
mem_kb=memory_kib,
num_cpu=num_cpu)
def deallocate_networks_on_reschedule(self, instance):
"""Does the driver want networks deallocated on reschedule?
:param instance: the instance object.
:returns: Boolean value. If True deallocate networks on reschedule.
"""
return True
def macs_for_instance(self, instance):
"""List the MAC addresses of an instance.
List of MAC addresses for the node which this instance is
associated with.
:param instance: the instance object.
:return: None, or a set of MAC ids (e.g. set(['12:34:56:78:90:ab'])).
None means 'no constraints', a set means 'these and only these
MAC addresses'.
"""
ironicclient = client_wrapper.IronicClientWrapper()
try:
node = ironicclient.call("node.get", instance.node)
except ironic.exc.NotFound:
return None
ports = ironicclient.call("node.list_ports", node.uuid)
return set([p.address for p in ports])
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None,
flavor=None):
"""Deploy an instance.
:param context: The security context.
:param instance: The instance object.
:param image_meta: Image dict returned by nova.image.glance
that defines the image from which to boot this instance.
:param injected_files: User files to inject into instance. Ignored
by this driver.
:param admin_password: Administrator password to set in
instance. Ignored by this driver.
:param network_info: Instance network information.
:param block_device_info: Instance block device
information. Ignored by this driver.
:param flavor: The flavor for the instance to be spawned.
"""
# The compute manager is meant to know the node uuid, so missing uuid
# is a significant issue. It may mean we've been passed the wrong data.
node_uuid = instance.get('node')
if not node_uuid:
raise ironic.exc.BadRequest(
_("Ironic node uuid not supplied to "
"driver for instance %s.") % instance.uuid)
ironicclient = client_wrapper.IronicClientWrapper()
node = ironicclient.call("node.get", node_uuid)
flavor = objects.Flavor.get_by_id(context,
instance.instance_type_id)
self._add_driver_fields(node, instance, image_meta, flavor)
# NOTE(Shrews): The default ephemeral device needs to be set for
# services (like cloud-init) that depend on it being returned by the
# metadata server. Addresses bug https://launchpad.net/bugs/1324286.
if flavor['ephemeral_gb']:
instance.default_ephemeral_device = '/dev/sda1'
instance.save()
# validate we are ready to do the deploy
validate_chk = ironicclient.call("node.validate", node_uuid)
if not validate_chk.deploy or not validate_chk.power:
# something is wrong. undo what we have done
self._cleanup_deploy(context, node, instance, network_info,
flavor=flavor)
raise exception.ValidationError(_(
"Ironic node: %(id)s failed to validate."
" (deploy: %(deploy)s, power: %(power)s)")
% {'id': node.uuid,
'deploy': validate_chk.deploy,
'power': validate_chk.power})
# prepare for the deploy
try:
self._plug_vifs(node, instance, network_info)
self._start_firewall(instance, network_info)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error preparing deploy for instance "
"%(instance)s on baremetal node %(node)s."),
{'instance': instance.uuid,
'node': node_uuid})
self._cleanup_deploy(context, node, instance, network_info,
flavor=flavor)
# trigger the node deploy
try:
ironicclient.call("node.set_provision_state", node_uuid,
ironic_states.ACTIVE)
except Exception as e:
with excutils.save_and_reraise_exception():
msg = (_LE("Failed to request Ironic to provision instance "
"%(inst)s: %(reason)s"),
{'inst': instance.uuid,
'reason': six.text_type(e)})
LOG.error(msg)
self._cleanup_deploy(context, node, instance, network_info,
flavor=flavor)
timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_active,
ironicclient, instance)
try:
timer.start(interval=CONF.ironic.api_retry_interval).wait()
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error deploying instance %(instance)s on "
"baremetal node %(node)s."),
{'instance': instance.uuid,
'node': node_uuid})
self.destroy(context, instance, network_info)
def _unprovision(self, ironicclient, instance, node):
"""This method is called from destroy() to unprovision
already provisioned node after required checks.
"""
try:
ironicclient.call("node.set_provision_state", node.uuid, "deleted")
except Exception as e:
# if the node is already in a deprovisioned state, continue
# This should be fixed in Ironic.
# TODO(deva): This exception should be added to
# python-ironicclient and matched directly,
# rather than via __name__.
if getattr(e, '__name__', None) != 'InstanceDeployFailure':
raise
# using a dict because this is modified in the local method
data = {'tries': 0}
def _wait_for_provision_state():
node = _validate_instance_and_node(ironicclient, instance)
if not node.provision_state:
LOG.debug("Ironic node %(node)s is now unprovisioned",
dict(node=node.uuid), instance=instance)
raise loopingcall.LoopingCallDone()
if data['tries'] >= CONF.ironic.api_max_retries:
msg = (_("Error destroying the instance on node %(node)s. "
"Provision state still '%(state)s'.")
% {'state': node.provision_state,
'node': node.uuid})
LOG.error(msg)
raise exception.NovaException(msg)
else:
data['tries'] += 1
_log_ironic_polling('unprovision', node, instance)
# wait for the state transition to finish
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_provision_state)
timer.start(interval=CONF.ironic.api_retry_interval).wait()
def destroy(self, context, instance, network_info,
block_device_info=None, destroy_disks=True, migrate_data=None):
"""Destroy the specified instance, if it can be found.
:param context: The security context.
:param instance: The instance object.
:param network_info: Instance network information.
:param block_device_info: Instance block device
information. Ignored by this driver.
:param destroy_disks: Indicates if disks should be
destroyed. Ignored by this driver.
:param migrate_data: implementation specific params.
Ignored by this driver.
"""
ironicclient = client_wrapper.IronicClientWrapper()
try:
node = _validate_instance_and_node(ironicclient, instance)
except exception.InstanceNotFound:
LOG.warning(_LW("Destroy called on non-existing instance %s."),
instance.uuid)
# NOTE(deva): if nova.compute.ComputeManager._delete_instance()
# is called on a non-existing instance, the only way
# to delete it is to return from this method
# without raising any exceptions.
return
if node.provision_state in (ironic_states.ACTIVE,
ironic_states.DEPLOYFAIL,
ironic_states.ERROR,
ironic_states.DEPLOYWAIT):
self._unprovision(ironicclient, instance, node)
self._cleanup_deploy(context, node, instance, network_info)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot the specified instance.
NOTE: Ironic does not support soft-off, so this method
always performs a hard-reboot.
NOTE: Unlike the libvirt driver, this method does not delete
and recreate the instance; it preserves local state.
:param context: The security context.
:param instance: The instance object.
:param network_info: Instance network information. Ignored by
this driver.
:param reboot_type: Either a HARD or SOFT reboot. Ignored by
this driver.
:param block_device_info: Info pertaining to attached volumes.
Ignored by this driver.
:param bad_volumes_callback: Function to handle any bad volumes
encountered. Ignored by this driver.
"""
ironicclient = client_wrapper.IronicClientWrapper()
node = _validate_instance_and_node(ironicclient, instance)
ironicclient.call("node.set_power_state", node.uuid, 'reboot')
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_power_state,
ironicclient, instance, 'reboot')
timer.start(interval=CONF.ironic.api_retry_interval).wait()
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance.
NOTE: Ironic does not support soft-off, so this method ignores
timeout and retry_interval parameters.
NOTE: Unlike the libvirt driver, this method does not delete
and recreate the instance; it preserves local state.
:param instance: The instance object.
:param timeout: time to wait for node to shutdown. Ignored by
this driver.
:param retry_interval: How often to signal node while waiting
for it to shutdown. Ignored by this driver.
"""
ironicclient = client_wrapper.IronicClientWrapper()
node = _validate_instance_and_node(ironicclient, instance)
ironicclient.call("node.set_power_state", node.uuid, 'off')
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_power_state,
ironicclient, instance, 'power off')
timer.start(interval=CONF.ironic.api_retry_interval).wait()
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance.
NOTE: Unlike the libvirt driver, this method does not delete
and recreate the instance; it preserves local state.
:param context: The security context.
:param instance: The instance object.
:param network_info: Instance network information. Ignored by
this driver.
:param block_device_info: Instance block device
information. Ignored by this driver.
"""
ironicclient = client_wrapper.IronicClientWrapper()
node = _validate_instance_and_node(ironicclient, instance)
ironicclient.call("node.set_power_state", node.uuid, 'on')
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_power_state,
ironicclient, instance, 'power on')
timer.start(interval=CONF.ironic.api_retry_interval).wait()
def refresh_security_group_rules(self, security_group_id):
"""Refresh security group rules from data store.
Invoked when security group rules are updated.
:param security_group_id: The security group id.
"""
self.firewall_driver.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
"""Refresh security group members from data store.
Invoked when instances are added/removed to a security group.
:param security_group_id: The security group id.
"""
self.firewall_driver.refresh_security_group_members(security_group_id)
def refresh_provider_fw_rules(self):
"""Triggers a firewall update based on database changes."""
self.firewall_driver.refresh_provider_fw_rules()
def refresh_instance_security_rules(self, instance):
"""Refresh security group rules from data store.
Gets called when an instance gets added to or removed from
the security group the instance is a member of or if the
group gains or loses a rule.
:param instance: The instance object.
"""
self.firewall_driver.refresh_instance_security_rules(instance)
def ensure_filtering_rules_for_instance(self, instance, network_info):
"""Set up filtering rules.
:param instance: The instance object.
:param network_info: Instance network information.
"""
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
def unfilter_instance(self, instance, network_info):
"""Stop filtering instance.
:param instance: The instance object.
:param network_info: Instance network information.
"""
self.firewall_driver.unfilter_instance(instance, network_info)
def _plug_vifs(self, node, instance, network_info):
# NOTE(PhilDay): Accessing network_info will block if the thread
# it wraps hasn't finished, so do this ahead of time so that we
# don't block while holding the logging lock.
network_info_str = str(network_info)
LOG.debug("plug: instance_uuid=%(uuid)s vif=%(network_info)s",
{'uuid': instance.uuid,
'network_info': network_info_str})
# start by ensuring the ports are clear
self._unplug_vifs(node, instance, network_info)
ironicclient = client_wrapper.IronicClientWrapper()
ports = ironicclient.call("node.list_ports", node.uuid)
if len(network_info) > len(ports):
raise exception.VirtualInterfacePlugException(_(
"Ironic node: %(id)s virtual to physical interface count"
" missmatch"
" (Vif count: %(vif_count)d, Pif count: %(pif_count)d)")
% {'id': node.uuid,
'vif_count': len(network_info),
'pif_count': len(ports)})
if len(network_info) > 0:
# not needed if no vif are defined
for vif, pif in zip(network_info, ports):
# attach what neutron needs directly to the port
port_id = unicode(vif['id'])
patch = [{'op': 'add',
'path': '/extra/vif_port_id',
'value': port_id}]
ironicclient.call("port.update", pif.uuid, patch)
def _unplug_vifs(self, node, instance, network_info):
# NOTE(PhilDay): Accessing network_info will block if the thread
# it wraps hasn't finished, so do this ahead of time so that we
# don't block while holding the logging lock.
network_info_str = str(network_info)
LOG.debug("unplug: instance_uuid=%(uuid)s vif=%(network_info)s",
{'uuid': instance.uuid,
'network_info': network_info_str})
if network_info and len(network_info) > 0:
ironicclient = client_wrapper.IronicClientWrapper()
ports = ironicclient.call("node.list_ports", node.uuid,
detail=True)
# not needed if no vif are defined
for vif, pif in zip(network_info, ports):
if 'vif_port_id' in pif.extra:
# we can not attach a dict directly
patch = [{'op': 'remove', 'path': '/extra/vif_port_id'}]
try:
ironicclient.call("port.update", pif.uuid, patch)
except ironic.exc.BadRequest:
pass
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks.
:param instance: The instance object.
:param network_info: Instance network information.
"""
ironicclient = client_wrapper.IronicClientWrapper()
node = ironicclient.call("node.get", instance.node)
self._plug_vifs(node, instance, network_info)
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks.
:param instance: The instance object.
:param network_info: Instance network information.
"""
ironicclient = client_wrapper.IronicClientWrapper()
node = ironicclient.call("node.get", instance.node)
self._unplug_vifs(node, instance, network_info)
def rebuild(self, context, instance, image_meta, injected_files,
admin_password, bdms, detach_block_devices,
attach_block_devices, network_info=None,
recreate=False, block_device_info=None,
preserve_ephemeral=False):
"""Rebuild/redeploy an instance.
This version of rebuild() allows for supporting the option to
preserve the ephemeral partition. We cannot call spawn() from
here because it will attempt to set the instance_uuid value
again, which is not allowed by the Ironic API. It also requires
the instance to not have an 'active' provision state, but we
cannot safely change that. Given that, we implement only the
portions of spawn() we need within rebuild().
:param context: The security context.
:param instance: The instance object.
:param image_meta: Image object returned by nova.image.glance
that defines the image from which to boot this instance. Ignored
by this driver.
:param injected_files: User files to inject into instance. Ignored
by this driver.
:param admin_password: Administrator password to set in
instance. Ignored by this driver.
:param bdms: block-device-mappings to use for rebuild. Ignored
by this driver.
:param detach_block_devices: function to detach block devices. See
nova.compute.manager.ComputeManager:_rebuild_default_impl for
usage. Ignored by this driver.
:param attach_block_devices: function to attach block devices. See
nova.compute.manager.ComputeManager:_rebuild_default_impl for
usage. Ignored by this driver.
:param network_info: Instance network information. Ignored by
this driver.
:param recreate: Boolean value; if True the instance is
recreated on a new hypervisor - all the cleanup of old state is
skipped. Ignored by this driver.
:param block_device_info: Instance block device
information. Ignored by this driver.
:param preserve_ephemeral: Boolean value; if True the ephemeral
must be preserved on rebuild.
"""
instance.task_state = task_states.REBUILD_SPAWNING
instance.save(expected_task_state=[task_states.REBUILDING])
node_uuid = instance.node
ironicclient = client_wrapper.IronicClientWrapper()
node = ironicclient.call("node.get", node_uuid)
flavor = objects.Flavor.get_by_id(context,
instance.instance_type_id)
self._add_driver_fields(node, instance, image_meta, flavor,
preserve_ephemeral)
# Trigger the node rebuild/redeploy.
try:
ironicclient.call("node.set_provision_state",
node_uuid, ironic_states.REBUILD)
except (exception.NovaException, # Retry failed
ironic.exc.InternalServerError, # Validations
ironic.exc.BadRequest) as e: # Maintenance
msg = (_("Failed to request Ironic to rebuild instance "
"%(inst)s: %(reason)s") % {'inst': instance.uuid,
'reason': six.text_type(e)})
raise exception.InstanceDeployFailure(msg)
# Although the target provision state is REBUILD, it will actually go
# to ACTIVE once the redeploy is finished.
timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_active,
ironicclient, instance)
timer.start(interval=CONF.ironic.api_retry_interval).wait()
|
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright 2013 Canonical Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import contextlib
import re
import mock
from oslo.vmware import exceptions as vexc
from oslo.vmware import pbm
from nova import context
from nova import exception
from nova.network import model as network_model
from nova.openstack.common import uuidutils
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.virt.vmwareapi import fake
from nova.tests.unit.virt.vmwareapi import stubs
from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import ds_util
from nova.virt.vmwareapi import vm_util
class partialObject(object):
def __init__(self, path='fake-path'):
self.path = path
self.fault = fake.DataObject()
class VMwareVMUtilTestCase(test.NoDBTestCase):
def setUp(self):
super(VMwareVMUtilTestCase, self).setUp()
fake.reset()
stubs.set_stubs(self.stubs)
vm_util.vm_refs_cache_reset()
def _test_get_stats_from_cluster(self, connection_state="connected",
maintenance_mode=False):
ManagedObjectRefs = [fake.ManagedObjectReference("host1",
"HostSystem"),
fake.ManagedObjectReference("host2",
"HostSystem")]
hosts = fake._convert_to_array_of_mor(ManagedObjectRefs)
respool = fake.ManagedObjectReference("resgroup-11", "ResourcePool")
prop_dict = {'host': hosts, 'resourcePool': respool}
hardware = fake.DataObject()
hardware.numCpuCores = 8
hardware.numCpuThreads = 16
hardware.vendor = "Intel"
hardware.cpuModel = "Intel(R) Xeon(R)"
runtime_host_1 = fake.DataObject()
runtime_host_1.connectionState = "connected"
runtime_host_1.inMaintenanceMode = False
runtime_host_2 = fake.DataObject()
runtime_host_2.connectionState = connection_state
runtime_host_2.inMaintenanceMode = maintenance_mode
prop_list_host_1 = [fake.Prop(name="hardware_summary", val=hardware),
fake.Prop(name="runtime_summary",
val=runtime_host_1)]
prop_list_host_2 = [fake.Prop(name="hardware_summary", val=hardware),
fake.Prop(name="runtime_summary",
val=runtime_host_2)]
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.ObjectContent("prop_list_host1",
prop_list_host_1))
fake_objects.add_object(fake.ObjectContent("prop_list_host1",
prop_list_host_2))
respool_resource_usage = fake.DataObject()
respool_resource_usage.maxUsage = 5368709120
respool_resource_usage.overallUsage = 2147483648
def fake_call_method(*args):
if "get_dynamic_properties" in args:
return prop_dict
elif "get_properties_for_a_collection_of_objects" in args:
return fake_objects
else:
return respool_resource_usage
session = fake.FakeSession()
with mock.patch.object(session, '_call_method', fake_call_method):
result = vm_util.get_stats_from_cluster(session, "cluster1")
mem_info = {}
if connection_state == "connected" and not maintenance_mode:
vcpus = 32
else:
vcpus = 16
mem_info['total'] = 5120
mem_info['free'] = 3072
expected_stats = {'vcpus': vcpus, 'mem': mem_info}
self.assertEqual(expected_stats, result)
def test_get_stats_from_cluster_hosts_connected_and_active(self):
self._test_get_stats_from_cluster()
def test_get_stats_from_cluster_hosts_disconnected_and_active(self):
self._test_get_stats_from_cluster(connection_state="disconnected")
def test_get_stats_from_cluster_hosts_connected_and_maintenance(self):
self._test_get_stats_from_cluster(maintenance_mode=True)
def test_get_host_ref_no_hosts_in_cluster(self):
self.assertRaises(exception.NoValidHost,
vm_util.get_host_ref,
fake.FakeObjectRetrievalSession(""), 'fake_cluster')
def test_get_resize_spec(self):
fake_instance = {'id': 7, 'name': 'fake!',
'uuid': 'bda5fb9e-b347-40e8-8256-42397848cb00',
'vcpus': 2, 'memory_mb': 2048}
result = vm_util.get_vm_resize_spec(fake.FakeFactory(),
fake_instance)
expected = """{'memoryMB': 2048,
'numCPUs': 2,
'obj_name': 'ns0:VirtualMachineConfigSpec'}"""
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def test_get_cdrom_attach_config_spec(self):
result = vm_util.get_cdrom_attach_config_spec(fake.FakeFactory(),
fake.Datastore(),
"/tmp/foo.iso",
200, 0)
expected = """{
'deviceChange': [
{
'device': {
'connectable': {
'allowGuestControl': False,
'startConnected': True,
'connected': True,
'obj_name': 'ns0: VirtualDeviceConnectInfo'
},
'backing': {
'datastore': {
"summary.maintenanceMode": "normal",
"summary.type": "VMFS",
"summary.accessible":true,
"summary.name": "fake-ds",
"summary.capacity": 1099511627776,
"summary.freeSpace": 536870912000,
"browser": ""
},
'fileName': '/tmp/foo.iso',
'obj_name': 'ns0: VirtualCdromIsoBackingInfo'
},
'controllerKey': 200,
'unitNumber': 0,
'key': -1,
'obj_name': 'ns0: VirtualCdrom'
},
'operation': 'add',
'obj_name': 'ns0: VirtualDeviceConfigSpec'
}
],
'obj_name': 'ns0: VirtualMachineConfigSpec'
}
"""
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def test_lsilogic_controller_spec(self):
# Test controller spec returned for lsiLogic sas adapter type
config_spec = vm_util.create_controller_spec(fake.FakeFactory(), -101,
adapter_type="lsiLogicsas")
self.assertEqual("ns0:VirtualLsiLogicSASController",
config_spec.device.obj_name)
def test_paravirtual_controller_spec(self):
# Test controller spec returned for paraVirtual adapter type
config_spec = vm_util.create_controller_spec(fake.FakeFactory(), -101,
adapter_type="paraVirtual")
self.assertEqual("ns0:ParaVirtualSCSIController",
config_spec.device.obj_name)
def _vmdk_path_and_adapter_type_devices(self, filename, parent=None):
# Test the adapter_type returned for a lsiLogic sas controller
controller_key = 1000
disk = fake.VirtualDisk()
disk.controllerKey = controller_key
disk_backing = fake.VirtualDiskFlatVer2BackingInfo()
disk_backing.fileName = filename
if parent:
disk_backing.parent = parent
disk.backing = disk_backing
controller = fake.VirtualLsiLogicSASController()
controller.key = controller_key
devices = [disk, controller]
return devices
def test_get_vmdk_path(self):
uuid = '00000000-0000-0000-0000-000000000000'
filename = '[test_datastore] %s/%s.vmdk' % (uuid, uuid)
devices = self._vmdk_path_and_adapter_type_devices(filename)
session = fake.FakeSession()
with mock.patch.object(session, '_call_method',
return_value=devices):
instance = {'uuid': uuid}
vmdk_path = vm_util.get_vmdk_path(session, None, instance)
self.assertEqual(filename, vmdk_path)
def test_get_vmdk_path_and_adapter_type(self):
filename = '[test_datastore] test_file.vmdk'
devices = self._vmdk_path_and_adapter_type_devices(filename)
vmdk_info = vm_util.get_vmdk_path_and_adapter_type(devices)
adapter_type = vmdk_info[1]
self.assertEqual('lsiLogicsas', adapter_type)
self.assertEqual(vmdk_info[0], filename)
def test_get_vmdk_path_and_adapter_type_with_match(self):
n_filename = '[test_datastore] uuid/uuid.vmdk'
devices = self._vmdk_path_and_adapter_type_devices(n_filename)
vmdk_info = vm_util.get_vmdk_path_and_adapter_type(
devices, uuid='uuid')
adapter_type = vmdk_info[1]
self.assertEqual('lsiLogicsas', adapter_type)
self.assertEqual(n_filename, vmdk_info[0])
def test_get_vmdk_path_and_adapter_type_with_nomatch(self):
n_filename = '[test_datastore] diuu/diuu.vmdk'
devices = self._vmdk_path_and_adapter_type_devices(n_filename)
vmdk_info = vm_util.get_vmdk_path_and_adapter_type(
devices, uuid='uuid')
adapter_type = vmdk_info[1]
self.assertEqual('lsiLogicsas', adapter_type)
self.assertIsNone(vmdk_info[0])
def test_get_vmdk_adapter_type(self):
# Test for the adapter_type to be used in vmdk descriptor
# Adapter type in vmdk descriptor is same for LSI-SAS, LSILogic
# and ParaVirtual
vmdk_adapter_type = vm_util.get_vmdk_adapter_type("lsiLogic")
self.assertEqual("lsiLogic", vmdk_adapter_type)
vmdk_adapter_type = vm_util.get_vmdk_adapter_type("lsiLogicsas")
self.assertEqual("lsiLogic", vmdk_adapter_type)
vmdk_adapter_type = vm_util.get_vmdk_adapter_type("paraVirtual")
self.assertEqual("lsiLogic", vmdk_adapter_type)
vmdk_adapter_type = vm_util.get_vmdk_adapter_type("dummyAdapter")
self.assertEqual("dummyAdapter", vmdk_adapter_type)
def test_find_allocated_slots(self):
disk1 = fake.VirtualDisk(200, 0)
disk2 = fake.VirtualDisk(200, 1)
disk3 = fake.VirtualDisk(201, 1)
ide0 = fake.VirtualIDEController(200)
ide1 = fake.VirtualIDEController(201)
scsi0 = fake.VirtualLsiLogicController(key=1000, scsiCtlrUnitNumber=7)
devices = [disk1, disk2, disk3, ide0, ide1, scsi0]
taken = vm_util._find_allocated_slots(devices)
self.assertEqual([0, 1], sorted(taken[200]))
self.assertEqual([1], taken[201])
self.assertEqual([7], taken[1000])
def test_allocate_controller_key_and_unit_number_ide_default(self):
# Test that default IDE controllers are used when there is a free slot
# on them
disk1 = fake.VirtualDisk(200, 0)
disk2 = fake.VirtualDisk(200, 1)
ide0 = fake.VirtualIDEController(200)
ide1 = fake.VirtualIDEController(201)
devices = [disk1, disk2, ide0, ide1]
(controller_key, unit_number,
controller_spec) = vm_util.allocate_controller_key_and_unit_number(
None,
devices,
'ide')
self.assertEqual(201, controller_key)
self.assertEqual(0, unit_number)
self.assertIsNone(controller_spec)
def test_allocate_controller_key_and_unit_number_ide(self):
# Test that a new controller is created when there is no free slot on
# the default IDE controllers
ide0 = fake.VirtualIDEController(200)
ide1 = fake.VirtualIDEController(201)
devices = [ide0, ide1]
for controller_key in [200, 201]:
for unit_number in [0, 1]:
disk = fake.VirtualDisk(controller_key, unit_number)
devices.append(disk)
factory = fake.FakeFactory()
(controller_key, unit_number,
controller_spec) = vm_util.allocate_controller_key_and_unit_number(
factory,
devices,
'ide')
self.assertEqual(-101, controller_key)
self.assertEqual(0, unit_number)
self.assertIsNotNone(controller_spec)
def test_allocate_controller_key_and_unit_number_scsi(self):
# Test that we allocate on existing SCSI controller if there is a free
# slot on it
devices = [fake.VirtualLsiLogicController(1000, scsiCtlrUnitNumber=7)]
for unit_number in range(7):
disk = fake.VirtualDisk(1000, unit_number)
devices.append(disk)
factory = fake.FakeFactory()
(controller_key, unit_number,
controller_spec) = vm_util.allocate_controller_key_and_unit_number(
factory,
devices,
'lsiLogic')
self.assertEqual(1000, controller_key)
self.assertEqual(8, unit_number)
self.assertIsNone(controller_spec)
def _test_get_vnc_config_spec(self, port):
result = vm_util.get_vnc_config_spec(fake.FakeFactory(),
port)
return result
def test_get_vnc_config_spec(self):
result = self._test_get_vnc_config_spec(7)
expected = """{'extraConfig': [
{'value': 'true',
'key': 'RemoteDisplay.vnc.enabled',
'obj_name': 'ns0:OptionValue'},
{'value': 7,
'key': 'RemoteDisplay.vnc.port',
'obj_name': 'ns0:OptionValue'}],
'obj_name': 'ns0:VirtualMachineConfigSpec'}"""
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def _create_fake_vms(self):
fake_vms = fake.FakeRetrieveResult()
OptionValue = collections.namedtuple('OptionValue', ['key', 'value'])
for i in range(10):
vm = fake.ManagedObject()
opt_val = OptionValue(key='', value=5900 + i)
vm.set(vm_util.VNC_CONFIG_KEY, opt_val)
fake_vms.add_object(vm)
return fake_vms
def test_get_vnc_port(self):
fake_vms = self._create_fake_vms()
self.flags(vnc_port=5900, group='vmware')
self.flags(vnc_port_total=10000, group='vmware')
actual = vm_util.get_vnc_port(
fake.FakeObjectRetrievalSession(fake_vms))
self.assertEqual(actual, 5910)
def test_get_vnc_port_exhausted(self):
fake_vms = self._create_fake_vms()
self.flags(vnc_port=5900, group='vmware')
self.flags(vnc_port_total=10, group='vmware')
self.assertRaises(exception.ConsolePortRangeExhausted,
vm_util.get_vnc_port,
fake.FakeObjectRetrievalSession(fake_vms))
def test_get_all_cluster_refs_by_name_none(self):
fake_objects = fake.FakeRetrieveResult()
refs = vm_util.get_all_cluster_refs_by_name(
fake.FakeObjectRetrievalSession(fake_objects), ['fake_cluster'])
self.assertEqual({}, refs)
def test_get_all_cluster_refs_by_name_exists(self):
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.ClusterComputeResource(name='cluster'))
refs = vm_util.get_all_cluster_refs_by_name(
fake.FakeObjectRetrievalSession(fake_objects), ['cluster'])
self.assertEqual(1, len(refs))
def test_get_all_cluster_refs_by_name_missing(self):
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(partialObject(path='cluster'))
refs = vm_util.get_all_cluster_refs_by_name(
fake.FakeObjectRetrievalSession(fake_objects), ['cluster'])
self.assertEqual({}, refs)
def test_propset_dict_simple(self):
ObjectContent = collections.namedtuple('ObjectContent', ['propSet'])
DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
object = ObjectContent(propSet=[
DynamicProperty(name='foo', val="bar")])
propdict = vm_util.propset_dict(object.propSet)
self.assertEqual("bar", propdict['foo'])
def test_propset_dict_complex(self):
ObjectContent = collections.namedtuple('ObjectContent', ['propSet'])
DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
MoRef = collections.namedtuple('Val', ['value'])
object = ObjectContent(propSet=[
DynamicProperty(name='foo', val="bar"),
DynamicProperty(name='some.thing',
val=MoRef(value='else')),
DynamicProperty(name='another.thing', val='value')])
propdict = vm_util.propset_dict(object.propSet)
self.assertEqual("bar", propdict['foo'])
self.assertTrue(hasattr(propdict['some.thing'], 'value'))
self.assertEqual("else", propdict['some.thing'].value)
self.assertEqual("value", propdict['another.thing'])
def _test_detach_virtual_disk_spec(self, destroy_disk=False):
virtual_device_config = vm_util.detach_virtual_disk_spec(
fake.FakeFactory(),
'fake_device',
destroy_disk)
self.assertEqual('remove', virtual_device_config.operation)
self.assertEqual('fake_device', virtual_device_config.device)
self.assertEqual('ns0:VirtualDeviceConfigSpec',
virtual_device_config.obj_name)
if destroy_disk:
self.assertEqual('destroy', virtual_device_config.fileOperation)
else:
self.assertFalse(hasattr(virtual_device_config, 'fileOperation'))
def test_detach_virtual_disk_spec(self):
self._test_detach_virtual_disk_spec(destroy_disk=False)
def test_detach_virtual_disk_destroy_spec(self):
self._test_detach_virtual_disk_spec(destroy_disk=True)
def test_get_vm_create_spec(self):
instance_uuid = uuidutils.generate_uuid()
fake_instance = {'id': 7, 'name': 'fake!',
'uuid': instance_uuid,
'vcpus': 2, 'memory_mb': 2048}
extra_specs = vm_util.ExtraSpecs()
result = vm_util.get_vm_create_spec(fake.FakeFactory(),
fake_instance, instance_uuid,
'fake-datastore', [],
extra_specs)
expected = """{
'files': {'vmPathName': '[fake-datastore]',
'obj_name': 'ns0:VirtualMachineFileInfo'},
'instanceUuid': '%(instance_uuid)s',
'name': '%(instance_uuid)s', 'deviceChange': [],
'extraConfig': [{'value': '%(instance_uuid)s',
'key': 'nvp.vm-uuid',
'obj_name': 'ns0:OptionValue'}],
'memoryMB': 2048,
'managedBy': {'extensionKey': 'org.openstack.compute',
'type': 'instance',
'obj_name': 'ns0:ManagedByInfo'},
'version': None,
'obj_name': 'ns0:VirtualMachineConfigSpec',
'guestId': 'otherGuest',
'tools': {'beforeGuestStandby': True,
'beforeGuestReboot': True,
'beforeGuestShutdown': True,
'afterResume': True,
'afterPowerOn': True,
'obj_name': 'ns0:ToolsConfigInfo'},
'numCPUs': 2}""" % {'instance_uuid': instance_uuid}
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_allocations(self):
instance_uuid = uuidutils.generate_uuid()
fake_instance = {'id': 7, 'name': 'fake!',
'uuid': instance_uuid,
'vcpus': 2, 'memory_mb': 2048}
cpu_limits = vm_util.CpuLimits(cpu_limit=7,
cpu_reservation=6)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
result = vm_util.get_vm_create_spec(fake.FakeFactory(),
fake_instance, instance_uuid,
'fake-datastore', [],
extra_specs)
expected = """{
'files': {'vmPathName': '[fake-datastore]',
'obj_name': 'ns0:VirtualMachineFileInfo'},
'instanceUuid': '%(instance_uuid)s',
'name': '%(instance_uuid)s', 'deviceChange': [],
'extraConfig': [{'value': '%(instance_uuid)s',
'key': 'nvp.vm-uuid',
'obj_name': 'ns0:OptionValue'}],
'memoryMB': 2048,
'managedBy': {'extensionKey': 'org.openstack.compute',
'type': 'instance',
'obj_name': 'ns0:ManagedByInfo'},
'version': None,
'obj_name': 'ns0:VirtualMachineConfigSpec',
'guestId': 'otherGuest',
'tools': {'beforeGuestStandby': True,
'beforeGuestReboot': True,
'beforeGuestShutdown': True,
'afterResume': True,
'afterPowerOn': True,
'obj_name': 'ns0:ToolsConfigInfo'},
'cpuAllocation': {'reservation': 6,
'limit': 7,
'obj_name': 'ns0:ResourceAllocationInfo'},
'numCPUs': 2}""" % {'instance_uuid': instance_uuid}
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_limit(self):
instance_uuid = uuidutils.generate_uuid()
fake_instance = {'id': 7, 'name': 'fake!',
'uuid': instance_uuid,
'vcpus': 2, 'memory_mb': 2048}
cpu_limits = vm_util.CpuLimits(cpu_limit=7)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
result = vm_util.get_vm_create_spec(fake.FakeFactory(),
fake_instance, instance_uuid,
'fake-datastore', [],
extra_specs)
expected = """{
'files': {'vmPathName': '[fake-datastore]',
'obj_name': 'ns0:VirtualMachineFileInfo'},
'instanceUuid': '%(instance_uuid)s',
'name': '%(instance_uuid)s', 'deviceChange': [],
'extraConfig': [{'value': '%(instance_uuid)s',
'key': 'nvp.vm-uuid',
'obj_name': 'ns0:OptionValue'}],
'memoryMB': 2048,
'managedBy': {'extensionKey': 'org.openstack.compute',
'type': 'instance',
'obj_name': 'ns0:ManagedByInfo'},
'version': None,
'obj_name': 'ns0:VirtualMachineConfigSpec',
'guestId': 'otherGuest',
'tools': {'beforeGuestStandby': True,
'beforeGuestReboot': True,
'beforeGuestShutdown': True,
'afterResume': True,
'afterPowerOn': True,
'obj_name': 'ns0:ToolsConfigInfo'},
'cpuAllocation': {'limit': 7,
'obj_name': 'ns0:ResourceAllocationInfo'},
'numCPUs': 2}""" % {'instance_uuid': instance_uuid}
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_share(self):
instance_uuid = uuidutils.generate_uuid()
fake_instance = {'id': 7, 'name': 'fake!',
'uuid': instance_uuid,
'vcpus': 2, 'memory_mb': 2048}
cpu_limits = vm_util.CpuLimits(cpu_shares_level='high')
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
result = vm_util.get_vm_create_spec(fake.FakeFactory(),
fake_instance, instance_uuid,
'fake-datastore', [],
extra_specs)
expected = """{
'files': {'vmPathName': '[fake-datastore]',
'obj_name': 'ns0:VirtualMachineFileInfo'},
'instanceUuid': '%(instance_uuid)s',
'name': '%(instance_uuid)s', 'deviceChange': [],
'extraConfig': [{'value': '%(instance_uuid)s',
'key': 'nvp.vm-uuid',
'obj_name': 'ns0:OptionValue'}],
'memoryMB': 2048,
'managedBy': {'extensionKey': 'org.openstack.compute',
'type': 'instance',
'obj_name': 'ns0:ManagedByInfo'},
'version': None,
'obj_name': 'ns0:VirtualMachineConfigSpec',
'guestId': 'otherGuest',
'tools': {'beforeGuestStandby': True,
'beforeGuestReboot': True,
'beforeGuestShutdown': True,
'afterResume': True,
'afterPowerOn': True,
'obj_name': 'ns0:ToolsConfigInfo'},
'cpuAllocation': {'shares': {'level': 'high',
'shares': 0,
'obj_name':'ns0:SharesInfo'},
'obj_name':'ns0:ResourceAllocationInfo'},
'numCPUs': 2}""" % {'instance_uuid': instance_uuid}
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_share_custom(self):
instance_uuid = uuidutils.generate_uuid()
fake_instance = {'id': 7, 'name': 'fake!',
'uuid': instance_uuid,
'vcpus': 2, 'memory_mb': 2048}
cpu_limits = vm_util.CpuLimits(cpu_shares_level='custom',
cpu_shares_share=1948)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
result = vm_util.get_vm_create_spec(fake.FakeFactory(),
fake_instance, instance_uuid,
'fake-datastore', [],
extra_specs)
expected = """{
'files': {'vmPathName': '[fake-datastore]',
'obj_name': 'ns0:VirtualMachineFileInfo'},
'instanceUuid': '%(instance_uuid)s',
'name': '%(instance_uuid)s', 'deviceChange': [],
'extraConfig': [{'value': '%(instance_uuid)s',
'key': 'nvp.vm-uuid',
'obj_name': 'ns0:OptionValue'}],
'memoryMB': 2048,
'managedBy': {'extensionKey': 'org.openstack.compute',
'type': 'instance',
'obj_name': 'ns0:ManagedByInfo'},
'version': None,
'obj_name': 'ns0:VirtualMachineConfigSpec',
'guestId': 'otherGuest',
'tools': {'beforeGuestStandby': True,
'beforeGuestReboot': True,
'beforeGuestShutdown': True,
'afterResume': True,
'afterPowerOn': True,
'obj_name': 'ns0:ToolsConfigInfo'},
'cpuAllocation': {'shares': {'level': 'custom',
'shares': 1948,
'obj_name':'ns0:SharesInfo'},
'obj_name':'ns0:ResourceAllocationInfo'},
'numCPUs': 2}""" % {'instance_uuid': instance_uuid}
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def test_create_vm(self):
method_list = ['CreateVM_Task', 'get_dynamic_property']
def fake_call_method(module, method, *args, **kwargs):
expected_method = method_list.pop(0)
self.assertEqual(expected_method, method)
if (expected_method == 'CreateVM_Task'):
return 'fake_create_vm_task'
elif (expected_method == 'get_dynamic_property'):
task_info = mock.Mock(state="success", result="fake_vm_ref")
return task_info
else:
self.fail('Should not get here....')
def fake_wait_for_task(self, *args):
task_info = mock.Mock(state="success", result="fake_vm_ref")
return task_info
session = fake.FakeSession()
fake_instance = mock.MagicMock()
fake_call_mock = mock.Mock(side_effect=fake_call_method)
fake_wait_mock = mock.Mock(side_effect=fake_wait_for_task)
with contextlib.nested(
mock.patch.object(session, '_wait_for_task',
fake_wait_mock),
mock.patch.object(session, '_call_method',
fake_call_mock)
) as (wait_for_task, call_method):
vm_ref = vm_util.create_vm(
session,
fake_instance,
'fake_vm_folder',
'fake_config_spec',
'fake_res_pool_ref')
self.assertEqual('fake_vm_ref', vm_ref)
call_method.assert_called_once_with(mock.ANY, 'CreateVM_Task',
'fake_vm_folder', config='fake_config_spec',
pool='fake_res_pool_ref')
wait_for_task.assert_called_once_with('fake_create_vm_task')
@mock.patch.object(vm_util.LOG, 'warning')
def test_create_vm_invalid_guestid(self, mock_log_warn):
"""Ensure we warn when create_vm() fails after we passed an
unrecognised guestId
"""
found = [False]
def fake_log_warn(msg, values):
if not isinstance(values, dict):
return
if values.get('ostype') == 'invalid_os_type':
found[0] = True
mock_log_warn.side_effect = fake_log_warn
instance_values = {'id': 7, 'name': 'fake-name',
'uuid': uuidutils.generate_uuid(),
'vcpus': 2, 'memory_mb': 2048}
instance = fake_instance.fake_instance_obj(
context.RequestContext('fake', 'fake', is_admin=False),
**instance_values)
session = driver.VMwareAPISession()
config_spec = vm_util.get_vm_create_spec(
session.vim.client.factory,
instance, instance.name, 'fake-datastore', [],
vm_util.ExtraSpecs(),
os_type='invalid_os_type')
self.assertRaises(vexc.VMwareDriverException,
vm_util.create_vm, session, instance, 'folder',
config_spec, 'res-pool')
self.assertTrue(found[0])
def test_convert_vif_model(self):
expected = "VirtualE1000"
result = vm_util.convert_vif_model(network_model.VIF_MODEL_E1000)
self.assertEqual(expected, result)
expected = "VirtualE1000e"
result = vm_util.convert_vif_model(network_model.VIF_MODEL_E1000E)
self.assertEqual(expected, result)
types = ["VirtualE1000", "VirtualE1000e", "VirtualPCNet32",
"VirtualVmxnet"]
for type in types:
self.assertEqual(type,
vm_util.convert_vif_model(type))
self.assertRaises(exception.Invalid,
vm_util.convert_vif_model,
"InvalidVifModel")
def test_power_on_instance_with_vm_ref(self):
session = fake.FakeSession()
fake_instance = mock.MagicMock()
with contextlib.nested(
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(session, "_wait_for_task"),
) as (fake_call_method, fake_wait_for_task):
vm_util.power_on_instance(session, fake_instance,
vm_ref='fake-vm-ref')
fake_call_method.assert_called_once_with(session.vim,
"PowerOnVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
def test_power_on_instance_without_vm_ref(self):
session = fake.FakeSession()
fake_instance = mock.MagicMock()
with contextlib.nested(
mock.patch.object(vm_util, "get_vm_ref",
return_value='fake-vm-ref'),
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(session, "_wait_for_task"),
) as (fake_get_vm_ref, fake_call_method, fake_wait_for_task):
vm_util.power_on_instance(session, fake_instance)
fake_get_vm_ref.assert_called_once_with(session, fake_instance)
fake_call_method.assert_called_once_with(session.vim,
"PowerOnVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
def test_power_on_instance_with_exception(self):
session = fake.FakeSession()
fake_instance = mock.MagicMock()
with contextlib.nested(
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(session, "_wait_for_task",
side_effect=exception.NovaException('fake')),
) as (fake_call_method, fake_wait_for_task):
self.assertRaises(exception.NovaException,
vm_util.power_on_instance,
session, fake_instance,
vm_ref='fake-vm-ref')
fake_call_method.assert_called_once_with(session.vim,
"PowerOnVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
def test_power_on_instance_with_power_state_exception(self):
session = fake.FakeSession()
fake_instance = mock.MagicMock()
with contextlib.nested(
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(
session, "_wait_for_task",
side_effect=vexc.InvalidPowerStateException),
) as (fake_call_method, fake_wait_for_task):
vm_util.power_on_instance(session, fake_instance,
vm_ref='fake-vm-ref')
fake_call_method.assert_called_once_with(session.vim,
"PowerOnVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
def test_create_virtual_disk(self):
session = fake.FakeSession()
dm = session.vim.service_content.virtualDiskManager
with contextlib.nested(
mock.patch.object(vm_util, "get_vmdk_create_spec",
return_value='fake-spec'),
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(session, "_wait_for_task"),
) as (fake_get_spec, fake_call_method, fake_wait_for_task):
vm_util.create_virtual_disk(session, 'fake-dc-ref',
'fake-adapter-type', 'fake-disk-type',
'fake-path', 7)
fake_get_spec.assert_called_once_with(
session.vim.client.factory, 7,
'fake-adapter-type',
'fake-disk-type')
fake_call_method.assert_called_once_with(
session.vim,
"CreateVirtualDisk_Task",
dm,
name='fake-path',
datacenter='fake-dc-ref',
spec='fake-spec')
fake_wait_for_task.assert_called_once_with('fake-task')
def test_copy_virtual_disk(self):
session = fake.FakeSession()
dm = session.vim.service_content.virtualDiskManager
with contextlib.nested(
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(session, "_wait_for_task"),
) as (fake_call_method, fake_wait_for_task):
vm_util.copy_virtual_disk(session, 'fake-dc-ref',
'fake-source', 'fake-dest')
fake_call_method.assert_called_once_with(
session.vim,
"CopyVirtualDisk_Task",
dm,
sourceName='fake-source',
sourceDatacenter='fake-dc-ref',
destName='fake-dest')
fake_wait_for_task.assert_called_once_with('fake-task')
def _create_fake_vm_objects(self):
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.VirtualMachine())
return fake_objects
def test_get_values(self):
objects = self._create_fake_vm_objects()
query = vm_util.get_values_from_object_properties(
fake.FakeObjectRetrievalSession(objects), objects)
self.assertEqual('poweredOn', query['runtime.powerState'])
self.assertEqual('guestToolsRunning',
query['summary.guest.toolsRunningStatus'])
self.assertEqual('toolsOk', query['summary.guest.toolsStatus'])
def test_reconfigure_vm(self):
session = fake.FakeSession()
with contextlib.nested(
mock.patch.object(session, '_call_method',
return_value='fake_reconfigure_task'),
mock.patch.object(session, '_wait_for_task')
) as (_call_method, _wait_for_task):
vm_util.reconfigure_vm(session, 'fake-ref', 'fake-spec')
_call_method.assert_called_once_with(mock.ANY,
'ReconfigVM_Task', 'fake-ref', spec='fake-spec')
_wait_for_task.assert_called_once_with(
'fake_reconfigure_task')
def test_get_network_attach_config_spec_opaque(self):
vif_info = {'network_name': 'br-int',
'mac_address': '00:00:00:ca:fe:01',
'network_ref': {'type': 'OpaqueNetwork',
'network-id': 'fake-network-id',
'network-type': 'opaque'},
'iface_id': 7,
'vif_model': 'VirtualE1000'}
result = vm_util.get_network_attach_config_spec(
fake.FakeFactory(), vif_info, 1)
card = 'ns0:VirtualEthernetCardOpaqueNetworkBackingInfo'
expected = """{
'extraConfig': [{'value': 7,
'key': 'nvp.iface-id.1',
'obj_name':'ns0:OptionValue'}],
'deviceChange': [
{'device': {
'macAddress':'00:00:00:ca:fe:01',
'addressType': 'manual',
'connectable': {
'allowGuestControl':True,
'startConnected': True,
'connected': True,
'obj_name':'ns0:VirtualDeviceConnectInfo'},
'backing': {
'opaqueNetworkType': 'opaque',
'opaqueNetworkId': 'fake-network-id',
'obj_name': '%(card)s'},
'key': -47,
'obj_name': 'ns0:VirtualE1000',
'wakeOnLanEnabled': True},
'operation': 'add',
'obj_name': 'ns0:VirtualDeviceConfigSpec'}],
'obj_name':'ns0:VirtualMachineConfigSpec'}""" % {'card': card}
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def test_get_network_attach_config_spec_dvs(self):
vif_info = {'network_name': 'br100',
'mac_address': '00:00:00:ca:fe:01',
'network_ref': {'type': 'DistributedVirtualPortgroup',
'dvsw': 'fake-network-id',
'dvpg': 'fake-group'},
'iface_id': 7,
'vif_model': 'VirtualE1000'}
result = vm_util.get_network_attach_config_spec(
fake.FakeFactory(), vif_info, 1)
port = 'ns0:DistributedVirtualSwitchPortConnection'
backing = 'ns0:VirtualEthernetCardDistributedVirtualPortBackingInfo'
expected = """{
'extraConfig': [{'value': 7,
'key': 'nvp.iface-id.1',
'obj_name': 'ns0:OptionValue'}],
'deviceChange': [
{'device': {'macAddress': '00:00:00:ca:fe:01',
'addressType': 'manual',
'connectable': {
'allowGuestControl': True,
'startConnected': True,
'connected': True,
'obj_name': 'ns0:VirtualDeviceConnectInfo'},
'backing': {
'port': {
'portgroupKey': 'fake-group',
'switchUuid': 'fake-network-id',
'obj_name': '%(obj_name_port)s'},
'obj_name': '%(obj_name_backing)s'},
'key': -47,
'obj_name': 'ns0:VirtualE1000',
'wakeOnLanEnabled': True},
'operation': 'add',
'obj_name': 'ns0:VirtualDeviceConfigSpec'}],
'obj_name':'ns0:VirtualMachineConfigSpec'}""" % {
'obj_name_backing': backing,
'obj_name_port': port}
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def test_get_network_detach_config_spec(self):
result = vm_util.get_network_detach_config_spec(
fake.FakeFactory(), 'fake-device', 2)
expected = """{
'extraConfig': [{'value': 'free',
'key': 'nvp.iface-id.2',
'obj_name': 'ns0:OptionValue'}],
'deviceChange': [{'device': 'fake-device',
'operation': 'remove',
'obj_name': 'ns0:VirtualDeviceConfigSpec'}],
'obj_name':'ns0:VirtualMachineConfigSpec'}"""
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
@mock.patch.object(vm_util, "get_vm_ref")
def test_power_off_instance(self, fake_get_ref):
session = fake.FakeSession()
fake_instance = mock.MagicMock()
with contextlib.nested(
mock.patch.object(session, '_call_method',
return_value='fake-task'),
mock.patch.object(session, '_wait_for_task')
) as (fake_call_method, fake_wait_for_task):
vm_util.power_off_instance(session, fake_instance, 'fake-vm-ref')
fake_call_method.assert_called_once_with(session.vim,
"PowerOffVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
self.assertFalse(fake_get_ref.called)
@mock.patch.object(vm_util, "get_vm_ref", return_value="fake-vm-ref")
def test_power_off_instance_no_vm_ref(self, fake_get_ref):
session = fake.FakeSession()
fake_instance = mock.MagicMock()
with contextlib.nested(
mock.patch.object(session, '_call_method',
return_value='fake-task'),
mock.patch.object(session, '_wait_for_task')
) as (fake_call_method, fake_wait_for_task):
vm_util.power_off_instance(session, fake_instance)
fake_get_ref.assert_called_once_with(session, fake_instance)
fake_call_method.assert_called_once_with(session.vim,
"PowerOffVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
@mock.patch.object(vm_util, "get_vm_ref")
def test_power_off_instance_with_exception(self, fake_get_ref):
session = fake.FakeSession()
fake_instance = mock.MagicMock()
with contextlib.nested(
mock.patch.object(session, '_call_method',
return_value='fake-task'),
mock.patch.object(session, '_wait_for_task',
side_effect=exception.NovaException('fake'))
) as (fake_call_method, fake_wait_for_task):
self.assertRaises(exception.NovaException,
vm_util.power_off_instance,
session, fake_instance, 'fake-vm-ref')
fake_call_method.assert_called_once_with(session.vim,
"PowerOffVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
self.assertFalse(fake_get_ref.called)
@mock.patch.object(vm_util, "get_vm_ref")
def test_power_off_instance_power_state_exception(self, fake_get_ref):
session = fake.FakeSession()
fake_instance = mock.MagicMock()
with contextlib.nested(
mock.patch.object(session, '_call_method',
return_value='fake-task'),
mock.patch.object(
session, '_wait_for_task',
side_effect=vexc.InvalidPowerStateException)
) as (fake_call_method, fake_wait_for_task):
vm_util.power_off_instance(session, fake_instance, 'fake-vm-ref')
fake_call_method.assert_called_once_with(session.vim,
"PowerOffVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
self.assertFalse(fake_get_ref.called)
def test_get_vm_create_spec_updated_hw_version(self):
extra_specs = vm_util.ExtraSpecs(hw_version='vmx-08')
instance_uuid = uuidutils.generate_uuid()
fake_instance = {'id': 7, 'name': 'fake!',
'uuid': instance_uuid,
'vcpus': 2, 'memory_mb': 2048}
result = vm_util.get_vm_create_spec(fake.FakeFactory(),
fake_instance, instance_uuid,
'fake-datastore', [],
extra_specs=extra_specs)
self.assertEqual('vmx-08', result.version)
def test_vm_create_spec_with_profile_spec(self):
instance_uuid = uuidutils.generate_uuid()
fake_instance = {'id': 7, 'name': 'fake!',
'uuid': instance_uuid,
'vcpus': 2, 'memory_mb': 2048}
datastore = ds_util.Datastore('fake-ds-ref', 'fake-ds-name')
extra_specs = vm_util.ExtraSpecs()
create_spec = vm_util.get_vm_create_spec(fake.FakeFactory(),
fake_instance, instance_uuid,
datastore.name, [],
extra_specs,
profile_spec='fake_profile_spec')
self.assertEqual(['fake_profile_spec'], create_spec.vmProfile)
@mock.patch.object(pbm, 'get_profile_id_by_name')
def test_get_storage_profile_spec(self, mock_retrieve_profile_id):
fake_profile_id = fake.DataObject()
fake_profile_id.uniqueId = 'fake_unique_id'
mock_retrieve_profile_id.return_value = fake_profile_id
profile_spec = vm_util.get_storage_profile_spec(fake.FakeSession(),
'fake_policy')
self.assertEqual('ns0:VirtualMachineDefinedProfileSpec',
profile_spec.obj_name)
self.assertEqual(fake_profile_id.uniqueId, profile_spec.profileId)
@mock.patch.object(pbm, 'get_profile_id_by_name')
def test_storage_spec_empty_profile(self, mock_retrieve_profile_id):
mock_retrieve_profile_id.return_value = None
profile_spec = vm_util.get_storage_profile_spec(fake.FakeSession(),
'fake_policy')
self.assertIsNone(profile_spec)
def test_get_ephemeral_name(self):
filename = vm_util.get_ephemeral_name(0)
self.assertEqual('ephemeral_0.vmdk', filename)
@mock.patch.object(driver.VMwareAPISession, 'vim', stubs.fake_vim_prop)
class VMwareVMUtilGetHostRefTestCase(test.NoDBTestCase):
# N.B. Mocking on the class only mocks test_*(), but we need
# VMwareAPISession.vim to be mocked in both setUp and tests. Not mocking in
# setUp causes object initialisation to fail. Not mocking in tests results
# in vim calls not using FakeVim.
@mock.patch.object(driver.VMwareAPISession, 'vim', stubs.fake_vim_prop)
def setUp(self):
super(VMwareVMUtilGetHostRefTestCase, self).setUp()
fake.reset()
vm_util.vm_refs_cache_reset()
self.session = driver.VMwareAPISession()
# Create a fake VirtualMachine running on a known host
self.host_ref = fake._db_content['HostSystem'].keys()[0]
self.vm_ref = fake.create_vm(host_ref=self.host_ref)
@mock.patch.object(vm_util, 'get_vm_ref')
def test_get_host_ref_for_vm(self, mock_get_vm_ref):
mock_get_vm_ref.return_value = self.vm_ref
ret = vm_util.get_host_ref_for_vm(self.session, 'fake-instance')
mock_get_vm_ref.assert_called_once_with(self.session, 'fake-instance')
self.assertEqual(self.host_ref, ret)
@mock.patch.object(vm_util, 'get_vm_ref')
def test_get_host_name_for_vm(self, mock_get_vm_ref):
mock_get_vm_ref.return_value = self.vm_ref
host = fake._get_object(self.host_ref)
ret = vm_util.get_host_name_for_vm(self.session, 'fake-instance')
mock_get_vm_ref.assert_called_once_with(self.session, 'fake-instance')
self.assertEqual(host.name, ret)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the setup module."""
import logging
import os
import shutil
import tempfile
import unittest
from apache_beam.io.filesystems import FileSystems
from apache_beam.runners.dataflow.internal import dependency
from apache_beam.runners.dataflow.internal import names
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
class SetupTest(unittest.TestCase):
def update_options(self, options):
setup_options = options.view_as(SetupOptions)
setup_options.sdk_location = ''
google_cloud_options = options.view_as(GoogleCloudOptions)
if google_cloud_options.temp_location is None:
google_cloud_options.temp_location = google_cloud_options.staging_location
def create_temp_file(self, path, contents):
with open(path, 'w') as f:
f.write(contents)
return f.name
def populate_requirements_cache(self, requirements_file, cache_dir):
_ = requirements_file
self.create_temp_file(os.path.join(cache_dir, 'abc.txt'), 'nothing')
self.create_temp_file(os.path.join(cache_dir, 'def.txt'), 'nothing')
def test_no_staging_location(self):
with self.assertRaises(RuntimeError) as cm:
dependency.stage_job_resources(PipelineOptions())
self.assertEqual('The --staging_location option must be specified.',
cm.exception.message)
def test_no_temp_location(self):
staging_dir = tempfile.mkdtemp()
options = PipelineOptions()
google_cloud_options = options.view_as(GoogleCloudOptions)
google_cloud_options.staging_location = staging_dir
self.update_options(options)
google_cloud_options.temp_location = None
with self.assertRaises(RuntimeError) as cm:
dependency.stage_job_resources(options)
self.assertEqual('The --temp_location option must be specified.',
cm.exception.message)
def test_no_main_session(self):
staging_dir = tempfile.mkdtemp()
options = PipelineOptions()
options.view_as(GoogleCloudOptions).staging_location = staging_dir
options.view_as(SetupOptions).save_main_session = False
self.update_options(options)
self.assertEqual(
[],
dependency.stage_job_resources(options))
def test_with_main_session(self):
staging_dir = tempfile.mkdtemp()
options = PipelineOptions()
options.view_as(GoogleCloudOptions).staging_location = staging_dir
options.view_as(SetupOptions).save_main_session = True
self.update_options(options)
self.assertEqual(
[names.PICKLED_MAIN_SESSION_FILE],
dependency.stage_job_resources(options))
self.assertTrue(
os.path.isfile(
os.path.join(staging_dir, names.PICKLED_MAIN_SESSION_FILE)))
def test_default_resources(self):
staging_dir = tempfile.mkdtemp()
options = PipelineOptions()
options.view_as(GoogleCloudOptions).staging_location = staging_dir
self.update_options(options)
self.assertEqual(
[],
dependency.stage_job_resources(options))
def test_with_requirements_file(self):
try:
staging_dir = tempfile.mkdtemp()
requirements_cache_dir = tempfile.mkdtemp()
source_dir = tempfile.mkdtemp()
options = PipelineOptions()
options.view_as(GoogleCloudOptions).staging_location = staging_dir
self.update_options(options)
options.view_as(SetupOptions).requirements_cache = requirements_cache_dir
options.view_as(SetupOptions).requirements_file = os.path.join(
source_dir, dependency.REQUIREMENTS_FILE)
self.create_temp_file(
os.path.join(source_dir, dependency.REQUIREMENTS_FILE), 'nothing')
self.assertEqual(
sorted([dependency.REQUIREMENTS_FILE,
'abc.txt', 'def.txt']),
sorted(dependency.stage_job_resources(
options,
populate_requirements_cache=self.populate_requirements_cache)))
self.assertTrue(
os.path.isfile(
os.path.join(staging_dir, dependency.REQUIREMENTS_FILE)))
self.assertTrue(os.path.isfile(os.path.join(staging_dir, 'abc.txt')))
self.assertTrue(os.path.isfile(os.path.join(staging_dir, 'def.txt')))
finally:
shutil.rmtree(staging_dir)
shutil.rmtree(requirements_cache_dir)
shutil.rmtree(source_dir)
def test_requirements_file_not_present(self):
staging_dir = tempfile.mkdtemp()
with self.assertRaises(RuntimeError) as cm:
options = PipelineOptions()
options.view_as(GoogleCloudOptions).staging_location = staging_dir
self.update_options(options)
options.view_as(SetupOptions).requirements_file = 'nosuchfile'
dependency.stage_job_resources(
options, populate_requirements_cache=self.populate_requirements_cache)
self.assertEqual(
cm.exception.message,
'The file %s cannot be found. It was specified in the '
'--requirements_file command line option.' % 'nosuchfile')
def test_with_requirements_file_and_cache(self):
staging_dir = tempfile.mkdtemp()
source_dir = tempfile.mkdtemp()
options = PipelineOptions()
options.view_as(GoogleCloudOptions).staging_location = staging_dir
self.update_options(options)
options.view_as(SetupOptions).requirements_file = os.path.join(
source_dir, dependency.REQUIREMENTS_FILE)
options.view_as(SetupOptions).requirements_cache = os.path.join(
tempfile.gettempdir(), 'alternative-cache-dir')
self.create_temp_file(
os.path.join(source_dir, dependency.REQUIREMENTS_FILE), 'nothing')
self.assertEqual(
sorted([dependency.REQUIREMENTS_FILE,
'abc.txt', 'def.txt']),
sorted(dependency.stage_job_resources(
options,
populate_requirements_cache=self.populate_requirements_cache)))
self.assertTrue(
os.path.isfile(
os.path.join(staging_dir, dependency.REQUIREMENTS_FILE)))
self.assertTrue(os.path.isfile(os.path.join(staging_dir, 'abc.txt')))
self.assertTrue(os.path.isfile(os.path.join(staging_dir, 'def.txt')))
def test_with_setup_file(self):
staging_dir = tempfile.mkdtemp()
source_dir = tempfile.mkdtemp()
self.create_temp_file(
os.path.join(source_dir, 'setup.py'), 'notused')
options = PipelineOptions()
options.view_as(GoogleCloudOptions).staging_location = staging_dir
self.update_options(options)
options.view_as(SetupOptions).setup_file = os.path.join(
source_dir, 'setup.py')
self.assertEqual(
[dependency.WORKFLOW_TARBALL_FILE],
dependency.stage_job_resources(
options,
# We replace the build setup command because a realistic one would
# require the setuptools package to be installed. Note that we can't
# use "touch" here to create the expected output tarball file, since
# touch is not available on Windows, so we invoke python to produce
# equivalent behavior.
build_setup_args=[
'python', '-c', 'open(__import__("sys").argv[1], "a")',
os.path.join(source_dir, dependency.WORKFLOW_TARBALL_FILE)],
temp_dir=source_dir))
self.assertTrue(
os.path.isfile(
os.path.join(staging_dir, dependency.WORKFLOW_TARBALL_FILE)))
def test_setup_file_not_present(self):
staging_dir = tempfile.mkdtemp()
options = PipelineOptions()
options.view_as(GoogleCloudOptions).staging_location = staging_dir
self.update_options(options)
options.view_as(SetupOptions).setup_file = 'nosuchfile'
with self.assertRaises(RuntimeError) as cm:
dependency.stage_job_resources(options)
self.assertEqual(
cm.exception.message,
'The file %s cannot be found. It was specified in the '
'--setup_file command line option.' % 'nosuchfile')
def test_setup_file_not_named_setup_dot_py(self):
staging_dir = tempfile.mkdtemp()
source_dir = tempfile.mkdtemp()
options = PipelineOptions()
options.view_as(GoogleCloudOptions).staging_location = staging_dir
self.update_options(options)
options.view_as(SetupOptions).setup_file = (
os.path.join(source_dir, 'xyz-setup.py'))
self.create_temp_file(
os.path.join(source_dir, 'xyz-setup.py'), 'notused')
with self.assertRaises(RuntimeError) as cm:
dependency.stage_job_resources(options)
self.assertTrue(
cm.exception.message.startswith(
'The --setup_file option expects the full path to a file named '
'setup.py instead of '))
def override_file_copy(self, expected_from_path, expected_to_dir):
def file_copy(from_path, to_path):
if not from_path.endswith(names.PICKLED_MAIN_SESSION_FILE):
self.assertEqual(expected_from_path, from_path)
self.assertEqual(FileSystems.join(expected_to_dir,
names.DATAFLOW_SDK_TARBALL_FILE),
to_path)
if from_path.startswith('gs://') or to_path.startswith('gs://'):
logging.info('Faking file_copy(%s, %s)', from_path, to_path)
else:
shutil.copyfile(from_path, to_path)
dependency._dependency_file_copy = file_copy
def override_file_download(self, expected_from_url, expected_to_folder):
def file_download(from_url, _):
self.assertEqual(expected_from_url, from_url)
tarball_path = os.path.join(expected_to_folder, 'sdk-tarball')
with open(tarball_path, 'w') as f:
f.write('Some contents.')
return tarball_path
dependency._dependency_file_download = file_download
return os.path.join(expected_to_folder, 'sdk-tarball')
def override_pypi_download(self, expected_from_url, expected_to_folder):
def pypi_download(_):
tarball_path = os.path.join(expected_to_folder, 'sdk-tarball')
with open(tarball_path, 'w') as f:
f.write('Some contents.')
return tarball_path
dependency._download_pypi_sdk_package = pypi_download
return os.path.join(expected_to_folder, 'sdk-tarball')
def test_sdk_location_default(self):
staging_dir = tempfile.mkdtemp()
expected_from_url = 'pypi'
expected_from_path = self.override_pypi_download(
expected_from_url, staging_dir)
self.override_file_copy(expected_from_path, staging_dir)
options = PipelineOptions()
options.view_as(GoogleCloudOptions).staging_location = staging_dir
self.update_options(options)
options.view_as(SetupOptions).sdk_location = 'default'
self.assertEqual(
[names.DATAFLOW_SDK_TARBALL_FILE],
dependency.stage_job_resources(
options,
file_copy=dependency._dependency_file_copy))
def test_sdk_location_local(self):
staging_dir = tempfile.mkdtemp()
sdk_location = tempfile.mkdtemp()
self.create_temp_file(
os.path.join(
sdk_location,
names.DATAFLOW_SDK_TARBALL_FILE),
'contents')
options = PipelineOptions()
options.view_as(GoogleCloudOptions).staging_location = staging_dir
self.update_options(options)
options.view_as(SetupOptions).sdk_location = sdk_location
self.assertEqual(
[names.DATAFLOW_SDK_TARBALL_FILE],
dependency.stage_job_resources(options))
tarball_path = os.path.join(
staging_dir, names.DATAFLOW_SDK_TARBALL_FILE)
with open(tarball_path) as f:
self.assertEqual(f.read(), 'contents')
def test_sdk_location_local_not_present(self):
staging_dir = tempfile.mkdtemp()
sdk_location = 'nosuchdir'
with self.assertRaises(RuntimeError) as cm:
options = PipelineOptions()
options.view_as(GoogleCloudOptions).staging_location = staging_dir
self.update_options(options)
options.view_as(SetupOptions).sdk_location = sdk_location
dependency.stage_job_resources(options)
self.assertEqual(
'The file "%s" cannot be found. Its '
'location was specified by the --sdk_location command-line option.' %
sdk_location,
cm.exception.message)
def test_sdk_location_gcs(self):
staging_dir = tempfile.mkdtemp()
sdk_location = 'gs://my-gcs-bucket/tarball.tar.gz'
self.override_file_copy(sdk_location, staging_dir)
options = PipelineOptions()
options.view_as(GoogleCloudOptions).staging_location = staging_dir
self.update_options(options)
options.view_as(SetupOptions).sdk_location = sdk_location
self.assertEqual(
[names.DATAFLOW_SDK_TARBALL_FILE],
dependency.stage_job_resources(options))
def test_with_extra_packages(self):
staging_dir = tempfile.mkdtemp()
source_dir = tempfile.mkdtemp()
self.create_temp_file(
os.path.join(source_dir, 'abc.tar.gz'), 'nothing')
self.create_temp_file(
os.path.join(source_dir, 'xyz.tar.gz'), 'nothing')
self.create_temp_file(
os.path.join(source_dir, 'xyz2.tar'), 'nothing')
self.create_temp_file(
os.path.join(source_dir, 'whl.whl'), 'nothing')
self.create_temp_file(
os.path.join(source_dir, dependency.EXTRA_PACKAGES_FILE), 'nothing')
options = PipelineOptions()
options.view_as(GoogleCloudOptions).staging_location = staging_dir
self.update_options(options)
options.view_as(SetupOptions).extra_packages = [
os.path.join(source_dir, 'abc.tar.gz'),
os.path.join(source_dir, 'xyz.tar.gz'),
os.path.join(source_dir, 'xyz2.tar'),
os.path.join(source_dir, 'whl.whl'),
'gs://my-gcs-bucket/gcs.tar.gz']
gcs_copied_files = []
def file_copy(from_path, to_path):
if from_path.startswith('gs://'):
gcs_copied_files.append(from_path)
_, from_name = os.path.split(from_path)
self.create_temp_file(os.path.join(to_path, from_name), 'nothing')
logging.info('Fake copied GCS file: %s to %s', from_path, to_path)
elif to_path.startswith('gs://'):
logging.info('Faking file_copy(%s, %s)', from_path, to_path)
else:
shutil.copyfile(from_path, to_path)
dependency._dependency_file_copy = file_copy
self.assertEqual(
['abc.tar.gz', 'xyz.tar.gz', 'xyz2.tar', 'whl.whl', 'gcs.tar.gz',
dependency.EXTRA_PACKAGES_FILE],
dependency.stage_job_resources(options))
with open(os.path.join(staging_dir, dependency.EXTRA_PACKAGES_FILE)) as f:
self.assertEqual(['abc.tar.gz\n', 'xyz.tar.gz\n', 'xyz2.tar\n',
'whl.whl\n', 'gcs.tar.gz\n'], f.readlines())
self.assertEqual(['gs://my-gcs-bucket/gcs.tar.gz'], gcs_copied_files)
def test_with_extra_packages_missing_files(self):
staging_dir = tempfile.mkdtemp()
with self.assertRaises(RuntimeError) as cm:
options = PipelineOptions()
options.view_as(GoogleCloudOptions).staging_location = staging_dir
self.update_options(options)
options.view_as(SetupOptions).extra_packages = ['nosuchfile.tar.gz']
dependency.stage_job_resources(options)
self.assertEqual(
cm.exception.message,
'The file %s cannot be found. It was specified in the '
'--extra_packages command line option.' % 'nosuchfile.tar.gz')
def test_with_extra_packages_invalid_file_name(self):
staging_dir = tempfile.mkdtemp()
source_dir = tempfile.mkdtemp()
self.create_temp_file(
os.path.join(source_dir, 'abc.tgz'), 'nothing')
with self.assertRaises(RuntimeError) as cm:
options = PipelineOptions()
options.view_as(GoogleCloudOptions).staging_location = staging_dir
self.update_options(options)
options.view_as(SetupOptions).extra_packages = [
os.path.join(source_dir, 'abc.tgz')]
dependency.stage_job_resources(options)
self.assertEqual(
cm.exception.message,
'The --extra_package option expects a full path ending with ".tar" or '
'".tar.gz" instead of %s' % os.path.join(source_dir, 'abc.tgz'))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
|
# coding: utf-8
import inspect
from . import registry
from .exceptions import processing, DocumentStep
from .fields import BaseField, DocumentField, DictField
from .roles import DEFAULT_ROLE, Var, Scope, all_, construct_matcher, Resolvable, Resolution
from .resolutionscope import ResolutionScope, EMPTY_SCOPE
from ._compat import iteritems, iterkeys, with_metaclass, OrderedDict, Prepareable
def _set_owner_to_document_fields(cls):
for field in cls.walk(through_document_fields=False, visited_documents=set([cls])):
if isinstance(field, DocumentField):
field.owner_cls = cls
# INHERITANCE CONSTANTS AND MAPPING
INLINE = 'inline' # default inheritance mode
ALL_OF = 'all_of'
ANY_OF = 'any_of'
ONE_OF = 'one_of'
_INHERITANCE_MODES = {
INLINE: 'allOf', # used in the case that an inline class inherits from document bases
ALL_OF: 'allOf',
ANY_OF: 'anyOf',
ONE_OF: 'oneOf'
}
class Options(object):
"""
A container for options.
All the arguments are the same and work exactly as for :class:`.fields.DictField`
except ``properties`` (since it is automatically populated with the document fields)
and these:
:param definition_id:
A unique string to be used as a key for this document in the "definitions"
schema section. If not specified, will be generated from module and class names.
:type definition_id: str or :class:`.Resolvable`
:param str schema_uri:
An URI of the JSON Schema meta-schema.
:param roles_to_propagate:
A matcher. If it returns ``True`` for a role, it will be passed to nested
documents.
:type roles_to_propagate: callable, string or iterable
:param str inheritance_mode:
An :ref:`inheritance mode <inheritance>`: one of :data:`INLINE` (default),
:data:`ALL_OF`, :data:`ANY_OF`, or :data:`ONE_OF`
.. versionadded:: 0.1.4
"""
def __init__(self, additional_properties=False, pattern_properties=None,
min_properties=None, max_properties=None,
title=None, description=None,
default=None, enum=None,
id='', schema_uri='http://json-schema.org/draft-04/schema#',
definition_id=None, roles_to_propagate=None,
inheritance_mode=INLINE):
self.pattern_properties = pattern_properties
self.additional_properties = additional_properties
self.min_properties = min_properties
self.max_properties = max_properties
self.title = title
self.description = description
self.default = default
self.enum = enum
self.id = id
self.schema_uri = schema_uri
self.definition_id = definition_id
self.roles_to_propagate = construct_matcher(roles_to_propagate or all_)
if inheritance_mode not in _INHERITANCE_MODES:
raise ValueError(
'Unknown inheritance mode: {0!r}. '
'Must be one of the following: {1!r}'.format(
inheritance_mode,
sorted([m for m in _INHERITANCE_MODES])
)
)
self.inheritance_mode = inheritance_mode
class DocumentBackend(DictField):
def _get_property_key(self, prop, field):
return prop if field.name is None else field.name
def resolve_and_iter_properties(self, role=DEFAULT_ROLE):
for name, field in iteritems(self.properties):
field = field.resolve(role).value
if isinstance(field, BaseField):
yield name, field
class DocumentMeta(with_metaclass(Prepareable, type)):
"""
A metaclass for :class:`~.Document`. It's responsible for collecting
options, fields and scopes registering the document in the registry, making
it the owner of nested :class:`document fields <.DocumentField>` s and so on.
"""
options_container = Options
"""
A class to be used by :meth:`~.DocumentMeta.create_options`.
Must be a subclass of :class:`~.Options`.
"""
@classmethod
def __prepare__(mcs, name, bases):
return OrderedDict()
def __new__(mcs, name, bases, attrs):
options_data = mcs.collect_options(bases, attrs)
options = mcs.create_options(options_data)
if options.inheritance_mode == INLINE:
fields = mcs.collect_fields(bases, attrs)
parent_documents = set()
for base in bases:
if issubclass(base, Document) and base is not Document:
parent_documents.update(base._parent_documents)
else:
fields = mcs.collect_fields([], attrs)
parent_documents = [base for base in bases
if issubclass(base, Document) and base is not Document]
attrs['_fields'] = fields
attrs['_parent_documents'] = sorted(parent_documents, key=lambda d: d.get_definition_id())
attrs['_options'] = options
attrs['_backend'] = DocumentBackend(
properties=fields,
pattern_properties=options.pattern_properties,
additional_properties=options.additional_properties,
min_properties=options.min_properties,
max_properties=options.max_properties,
title=options.title,
description=options.description,
enum=options.enum,
default=options.default,
id=options.id,
)
klass = type.__new__(mcs, name, bases, attrs)
registry.put_document(klass.__name__, klass, module=klass.__module__)
_set_owner_to_document_fields(klass)
return klass
@classmethod
def collect_fields(mcs, bases, attrs):
"""
Collects fields from the current class and its parent classes.
:rtype: a dictionary mapping field names to fields
"""
fields = OrderedDict()
# fields from parent classes:
for base in reversed(bases):
if hasattr(base, '_fields'):
fields.update(base._fields)
to_be_replaced = object()
# and from the current class:
pre_fields = OrderedDict()
scopes = []
for key, value in iteritems(attrs):
if isinstance(value, (BaseField, Resolvable)):
pre_fields[key] = value
elif isinstance(value, Scope):
scopes.append(value)
for scope_key in iterkeys(value.__fields__):
pre_fields[scope_key] = to_be_replaced
for name, field in iteritems(pre_fields):
if field is to_be_replaced:
values = []
for scope in scopes:
if name in scope.__fields__:
values.append((scope.__matcher__, scope.__fields__[name]))
fields[name] = Var(values)
else:
fields[name] = field
return fields
@classmethod
def collect_options(mcs, bases, attrs):
"""
Collects options from the current class and its parent classes.
:returns: a dictionary of options
"""
options = {}
# options from parent classes:
for base in reversed(bases):
if hasattr(base, '_options'):
for key, value in inspect.getmembers(base._options):
if not key.startswith('_') and value is not None:
options[key] = value
# options from the current class:
if 'Options' in attrs:
for key, value in inspect.getmembers(attrs['Options']):
if not key.startswith('_') and value is not None:
# HACK HACK HACK
if inspect.ismethod(value) and value.im_self is None:
value = value.im_func
options[key] = value
return options
@classmethod
def create_options(cls, options):
"""
Wraps ``options`` into a container class
(see :attr:`~.DocumentMeta.options_container`).
:param options: a dictionary of options
:return: an instance of :attr:`~.DocumentMeta.options_container`
"""
return cls.options_container(**options)
class Document(with_metaclass(DocumentMeta)):
"""A document. Can be thought as a kind of :class:`.fields.DictField`, which
properties are defined by the fields and scopes added to the document class.
It can be tuned using special ``Options`` attribute (see :class:`.Options`
for available settings)::
class User(Document):
class Options(object):
title = 'User'
description = 'A person who uses a computer or network service.'
login = StringField(required=True)
.. note::
A subclass inherits options of its parent documents.
"""
@classmethod
def is_recursive(cls, role=DEFAULT_ROLE):
"""Returns ``True`` if there is a :class:`.DocumentField`-references cycle
that contains ``cls``.
:param str role: A current role.
"""
for field in cls.resolve_and_walk(through_document_fields=True,
role=role, visited_documents=set([cls])):
if isinstance(field, DocumentField):
if field.document_cls == cls:
return True
return False
@classmethod
def get_definition_id(cls, role=DEFAULT_ROLE):
"""Returns a unique string to be used as a key for this document
in the ``"definitions"`` schema section.
"""
definition_id = cls._options.definition_id
if isinstance(definition_id, Resolvable):
definition_id = definition_id.resolve(role).value
return definition_id or '{0}.{1}'.format(cls.__module__, cls.__name__)
@classmethod
def resolve_field(cls, field, role=DEFAULT_ROLE):
"""Resolves a field with the name ``field`` using ``role``.
:raises: :class:`AttributeError`
"""
properties = cls._backend.properties
if field in properties:
return properties[field].resolve(role)
else:
return Resolution(None, role)
@classmethod
def resolve_and_iter_fields(cls, role=DEFAULT_ROLE):
"""Resolves each resolvable attribute of a document using the specified role
and yields a tuple of (attribute name, field) in case the result is a JSL field.
.. versionchanged:: 0.2
The method has been changed to iterate only over fields that attached as attributes,
and yield tuples instead of plain :class:`.BaseField`.
:rtype: iterable of (str, :class:`.BaseField`)
"""
return cls._backend.resolve_and_iter_properties(role=role)
@classmethod
def resolve_and_walk(cls, role=DEFAULT_ROLE, through_document_fields=False,
visited_documents=frozenset()):
"""The same as :meth:`.walk`, but :class:`resolvables <.Resolvable>` are
resolved using ``role``.
"""
fields = cls._backend.resolve_and_walk(
role=role, through_document_fields=through_document_fields,
visited_documents=visited_documents)
next(fields) # we don't want to yield _field itself
return fields
@classmethod
def iter_fields(cls):
"""Iterates over the fields of the document, resolving its
:class:`resolvables <.Resolvable>` to all possible values.
"""
return cls._backend.iter_fields()
@classmethod
def walk(cls, through_document_fields=False, visited_documents=frozenset()):
"""
Iterates recursively over the fields of the document, resolving
occurring :class:`resolvables <.Resolvable>` to their all possible values.
Visits fields in a DFS order.
:param bool through_document_fields:
If ``True``, walks through nested :class:`.DocumentField` fields.
:param set visited_documents:
Keeps track of visited :class:`documents <.Document>` to avoid infinite
recursion when ``through_document_field`` is ``True``.
:returns: iterable of :class:`.BaseField`
"""
fields = cls._backend.walk(through_document_fields=through_document_fields,
visited_documents=visited_documents)
next(fields) # we don't want to yield _field itself
return fields
@classmethod
def get_schema(cls, role=DEFAULT_ROLE, ordered=False):
"""Returns a JSON schema (draft v4) of the document.
:param str role: A role.
:param bool ordered:
If ``True``, the resulting schema dictionary is ordered. Fields are
listed in the order they are added to the class. Schema properties are
also ordered in a sensible and consistent way, making the schema more
human-readable.
:raises: :class:`.SchemaGenerationException`
:rtype: dict or OrderedDict
"""
definitions, schema = cls.get_definitions_and_schema(
role=role, ordered=ordered,
res_scope=ResolutionScope(base=cls._options.id, current=cls._options.id)
)
rv = OrderedDict() if ordered else {}
if cls._options.id:
rv['id'] = cls._options.id
if cls._options.schema_uri is not None:
rv['$schema'] = cls._options.schema_uri
if definitions:
rv['definitions'] = definitions
rv.update(schema)
return rv
@classmethod
def get_definitions_and_schema(cls, role=DEFAULT_ROLE, res_scope=EMPTY_SCOPE,
ordered=False, ref_documents=None):
"""Returns a tuple of two elements.
The second element is a JSON schema of the document, and the first is
a dictionary that contains definitions that are referenced from the schema.
:param str role: A role.
:param bool ordered:
If ``True``, the resulting schema dictionary is ordered. Fields are
listed in the order they are added to the class. Schema properties are
also ordered in a sensible and consistent way, making the schema more
human-readable.
:param res_scope:
The current resolution scope.
:type res_scope: :class:`~.ResolutionScope`
:param set ref_documents:
If subclass of :class:`.Document` is in this set, all :class:`.DocumentField` s
pointing to it will be resolved as a reference: ``{"$ref": "#/definitions/..."}``.
Note: resulting definitions will not contain schema for this document.
:raises: :class:`~.SchemaGenerationException`
:rtype: (dict or OrderedDict)
"""
is_recursive = cls.is_recursive(role=role)
if is_recursive:
ref_documents = set(ref_documents) if ref_documents else set()
ref_documents.add(cls)
res_scope = res_scope.replace(output=res_scope.base)
with processing(DocumentStep(cls, role=role)):
definitions, schema = cls._backend.get_definitions_and_schema(
role=role, res_scope=res_scope, ordered=ordered, ref_documents=ref_documents)
if cls._parent_documents:
mode = _INHERITANCE_MODES[cls._options.inheritance_mode]
contents = []
for parent_document in cls._parent_documents:
parent_definitions, parent_schema = parent_document.get_definitions_and_schema(
role=role, res_scope=res_scope, ordered=ordered, ref_documents=ref_documents)
parent_definition_id = parent_document.get_definition_id()
definitions.update(parent_definitions)
definitions[parent_definition_id] = parent_schema
contents.append(res_scope.create_ref(parent_definition_id))
contents.append(schema)
schema = {mode: contents}
if is_recursive:
definition_id = cls.get_definition_id()
definitions[definition_id] = schema
schema = res_scope.create_ref(definition_id)
if ordered:
definitions = OrderedDict(sorted(definitions.items()))
return definitions, schema
# Remove Document itself from registry
registry.remove_document(Document.__name__, module=Document.__module__)
|
|
#! /usr/bin/env python
#
# example2.py -- Simple, configurable FITS viewer.
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from __future__ import print_function
import sys, os
import logging
from ginga import AstroImage, colors
from ginga.canvas.CanvasObject import get_canvas_types
from ginga.misc import log
from ginga.web.pgw import Widgets, Viewers, PgMain
class FitsViewer(object):
def __init__(self, logger, window):
self.logger = logger
self.drawcolors = colors.get_colors()
self.dc = get_canvas_types()
self.top = window
self.top.add_callback('closed', self.closed)
vbox = Widgets.VBox()
vbox.set_border_width(2)
vbox.set_spacing(1)
fi = Viewers.CanvasView(logger)
fi.enable_autocuts('on')
fi.set_autocut_params('zscale')
fi.enable_autozoom('on')
fi.set_zoom_algorithm('rate')
fi.set_zoomrate(1.4)
fi.show_pan_mark(True)
fi.set_callback('drag-drop', self.drop_file)
fi.set_callback('none-move', self.motion)
fi.set_bg(0.2, 0.2, 0.2)
fi.ui_setActive(True)
self.fitsimage = fi
bd = fi.get_bindings()
bd.enable_all(True)
# canvas that we will draw on
canvas = self.dc.DrawingCanvas()
canvas.enable_draw(True)
canvas.enable_edit(True)
canvas.set_drawtype('rectangle', color='lightblue')
canvas.setSurface(fi)
self.canvas = canvas
# add canvas to view
private_canvas = fi.get_canvas()
private_canvas.add(canvas)
canvas.ui_setActive(True)
canvas.register_for_cursor_drawing(fi)
self.drawtypes = canvas.get_drawtypes()
self.drawtypes.sort()
# add a color bar
private_canvas.add(self.dc.ColorBar(side='bottom', offset=10))
# add little mode indicator that shows modal states in
# the corner
private_canvas.add(self.dc.ModeIndicator(corner='ur', fontsize=14))
# little hack necessary to get correct operation of the mode indicator
# in all circumstances
bm = fi.get_bindmap()
bm.add_callback('mode-set', lambda *args: fi.redraw(whence=3))
fi.set_desired_size(512, 512)
w = Viewers.GingaViewer(viewer=fi)
vbox.add_widget(w, stretch=1)
self.readout = Widgets.Label("")
vbox.add_widget(self.readout, stretch=0)
hbox = Widgets.HBox()
hbox.set_border_width(2)
wdrawtype = Widgets.ComboBox()
for name in self.drawtypes:
wdrawtype.append_text(name)
index = self.drawtypes.index('rectangle')
wdrawtype.set_index(index)
wdrawtype.add_callback('activated', lambda w, idx: self.set_drawparams())
self.wdrawtype = wdrawtype
wdrawcolor = Widgets.ComboBox()
for name in self.drawcolors:
wdrawcolor.append_text(name)
index = self.drawcolors.index('lightblue')
wdrawcolor.set_index(index)
wdrawcolor.add_callback('activated', lambda w, idx: self.set_drawparams())
self.wdrawcolor = wdrawcolor
wfill = Widgets.CheckBox("Fill")
wfill.add_callback('activated', lambda w, tf: self.set_drawparams())
self.wfill = wfill
walpha = Widgets.SpinBox(dtype=float)
walpha.set_limits(0.0, 1.0, incr_value=0.1)
walpha.set_value(1.0)
walpha.set_decimals(2)
walpha.add_callback('value-changed', lambda w, val: self.set_drawparams())
self.walpha = walpha
wclear = Widgets.Button("Clear Canvas")
wclear.add_callback('activated', lambda w: self.clear_canvas())
## wopen = Widgets.Button("Open File")
## wopen.add_callback('activated', lambda w: self.open_file())
## wquit = Widgets.Button("Quit")
## wquit.add_callback('activated', lambda w: self.quit())
hbox.add_widget(Widgets.Label(''), stretch=1)
for w in (wdrawtype, wdrawcolor, wfill,
Widgets.Label('Alpha:'), walpha, wclear):
hbox.add_widget(w, stretch=0)
vbox.add_widget(hbox, stretch=0)
mode = self.canvas.get_draw_mode()
hbox = Widgets.HBox()
btn1 = Widgets.RadioButton("Draw")
btn1.set_state(mode == 'draw')
btn1.add_callback('activated', lambda w, val: self.set_mode_cb('draw', val))
btn1.set_tooltip("Choose this to draw on the canvas")
hbox.add_widget(btn1)
btn2 = Widgets.RadioButton("Edit", group=btn1)
btn2.set_state(mode == 'edit')
btn2.add_callback('activated', lambda w, val: self.set_mode_cb('edit', val))
btn2.set_tooltip("Choose this to edit things on the canvas")
hbox.add_widget(btn2)
btn3 = Widgets.CheckBox("I'm using a trackpad")
btn3.add_callback('activated', lambda w, tf: self.use_trackpad_cb(tf))
hbox.add_widget(btn3)
hbox.add_widget(Widgets.Label(''), stretch=1)
vbox.add_widget(hbox, stretch=0)
self.top.set_widget(vbox)
def set_drawparams(self):
index = self.wdrawtype.get_index()
kind = self.drawtypes[index]
index = self.wdrawcolor.get_index()
fill = self.wfill.get_state()
alpha = self.walpha.get_value()
params = { 'color': self.drawcolors[index],
'alpha': alpha,
}
if kind in ('circle', 'rectangle', 'polygon', 'triangle',
'righttriangle', 'ellipse', 'square', 'box'):
params['fill'] = fill
params['fillalpha'] = alpha
self.canvas.set_drawtype(kind, **params)
def clear_canvas(self):
self.canvas.deleteAllObjects()
def load_file(self, filepath):
image = AstroImage.AstroImage(logger=self.logger)
image.load_file(filepath)
self.fitsimage.set_image(image)
self.top.set_title(filepath)
def open_file(self):
res = Widgets.FileDialog.getOpenFileName(self, "Open FITS file",
".", "FITS files (*.fits)")
if isinstance(res, tuple):
fileName = res[0]
else:
fileName = str(res)
if len(fileName) != 0:
self.load_file(fileName)
def drop_file(self, fitsimage, paths):
fileName = paths[0]
self.load_file(fileName)
def motion(self, viewer, button, data_x, data_y):
# Get the value under the data coordinates
try:
#value = viewer.get_data(data_x, data_y)
# We report the value across the pixel, even though the coords
# change halfway across the pixel
value = viewer.get_data(int(data_x+0.5), int(data_y+0.5))
except Exception:
value = None
fits_x, fits_y = data_x + 1, data_y + 1
# Calculate WCS RA
try:
# NOTE: image function operates on DATA space coords
image = viewer.get_image()
if image is None:
# No image loaded
return
ra_txt, dec_txt = image.pixtoradec(fits_x, fits_y,
format='str', coords='fits')
except Exception as e:
self.logger.warn("Bad coordinate conversion: %s" % (
str(e)))
ra_txt = 'BAD WCS'
dec_txt = 'BAD WCS'
text = "RA: %s DEC: %s X: %.2f Y: %.2f Value: %s" % (
ra_txt, dec_txt, fits_x, fits_y, value)
self.readout.set_text(text)
def set_mode_cb(self, mode, tf):
self.logger.info("canvas mode changed (%s) %s" % (mode, tf))
if not (tf is False):
self.canvas.set_draw_mode(mode)
return True
def use_trackpad_cb(self, state):
settings = self.fitsimage.get_bindings().get_settings()
val = 1.0
if state:
val = 0.1
settings.set(scroll_zoom_acceleration=val)
def closed(self, w):
self.logger.info("Top window closed.")
self.top = None
sys.exit()
def quit(self, *args):
self.readout.set_text("Quitting!")
self.logger.info("Attempting to shut down the application...")
if not self.top is None:
self.top.close()
sys.exit()
def main(options, args):
logger = log.get_logger("example2", options=options)
if options.use_opencv:
from ginga import trcalc
try:
trcalc.use('opencv')
except Exception as e:
logger.warn("Error using opencv: %s" % str(e))
base_url = "http://%s:%d/app" % (options.host, options.port)
# establish our widget application
app = Widgets.Application(logger=logger, base_url=base_url)
# web server/connection machinery
server = PgMain.PgMain(logger=logger, app=app,
host=options.host, port=options.port)
# create top level window
window = app.make_window("Ginga web example2")
# our own viewer object, customized with methods (see above)
viewer = FitsViewer(logger, window)
server.add_callback('shutdown', viewer.quit)
#window.resize(700, 540)
if len(args) > 0:
viewer.load_file(args[0])
#window.show()
#window.raise_()
try:
server.mainloop()
except KeyboardInterrupt:
logger.info("Terminating viewer...")
window.close()
if __name__ == "__main__":
# Parse command line options with nifty optparse module
from optparse import OptionParser
usage = "usage: %prog [options] cmd [args]"
optprs = OptionParser(usage=usage, version=('%%prog'))
optprs.add_option("--debug", dest="debug", default=False, action="store_true",
help="Enter the pdb debugger on main()")
optprs.add_option("--host", dest="host", metavar="HOST",
default='localhost',
help="Listen on HOST for connections")
optprs.add_option("--log", dest="logfile", metavar="FILE",
help="Write logging output to FILE")
optprs.add_option("--loglevel", dest="loglevel", metavar="LEVEL",
type='int', default=logging.INFO,
help="Set logging level to LEVEL")
optprs.add_option("--opencv", dest="use_opencv", default=False,
action="store_true",
help="Use OpenCv acceleration")
optprs.add_option("--port", dest="port", metavar="PORT",
type=int, default=9909,
help="Listen on PORT for connections")
optprs.add_option("--profile", dest="profile", action="store_true",
default=False,
help="Run the profiler on main()")
optprs.add_option("--stderr", dest="logstderr", default=False,
action="store_true",
help="Copy logging also to stderr")
optprs.add_option("-t", "--toolkit", dest="toolkit", metavar="NAME",
default='qt',
help="Choose GUI toolkit (gtk|qt)")
(options, args) = optprs.parse_args(sys.argv[1:])
# Are we debugging this?
if options.debug:
import pdb
pdb.run('main(options, args)')
# Are we profiling this?
elif options.profile:
import profile
print(("%s profile:" % sys.argv[0]))
profile.run('main(options, args)')
else:
main(options, args)
# END
|
|
import sys
import numpy as np
import pdb
class DependencyDecoder():
'''
Dependency decoder class
'''
def __init__(self):
self.verbose = False
def parse_marginals_nonproj(self, scores):
'''
Compute marginals and the log-partition function using the matrix-tree theorem
'''
nr, nc = np.shape(scores)
if nr != nc:
raise ValueError("scores must be a squared matrix with nw+1 rows")
return []
nw = nr - 1;
s = np.matrix(scores)
lap = np.matrix(np.zeros((nw+1, nw+1)))
for m in range(1, nw + 1):
d = 0.0
for h in range(0, nw + 1):
if m != h:
d += np.exp(s[h,m])
lap[h,m] = -np.exp(s[h,m])
lap[m,m] = d
r = lap[0,1:]
minor = lap[1:,1:]
#logZ = np.linalg.slogdet(minor)[1]
logZ = np.log(np.linalg.det(minor))
invmin = np.linalg.inv(minor)
marginals = np.zeros((nw+1, nw+1))
for m in range(1, nw + 1):
marginals[0,m] = np.exp(s[0,m]) * invmin[m-1,m-1]
for h in range(1, nw + 1):
if m != h:
marginals[h,m] = np.exp(s[h,m]) * (invmin[m-1,m-1] - invmin[m-1,h-1])
return marginals, logZ
def parse_proj(self, scores):
'''
Parse using Eisner's algorithm.
'''
###########################
# Solution to Exercise 4.3.6
nr, nc = np.shape(scores)
if nr != nc:
raise ValueError("scores must be a squared matrix with nw+1 rows")
return []
N = nr - 1 # Number of words (excluding root).
# Initialize CKY table.
complete = np.zeros([N+1, N+1, 2]) # s, t, direction (right=1).
incomplete = np.zeros([N+1, N+1, 2]) # s, t, direction (right=1).
complete_backtrack = -np.ones([N+1, N+1, 2], dtype=int) # s, t, direction (right=1).
incomplete_backtrack = -np.ones([N+1, N+1, 2], dtype=int) # s, t, direction (right=1).
incomplete[0, :, 0] -= np.inf
# Loop from smaller items to larger items.
for k in xrange(1,N+1):
for s in xrange(N-k+1):
t = s+k
# First, create incomplete items.
# left tree
incomplete_vals0 = complete[s, s:t, 1] + complete[(s+1):(t+1), t, 0] + scores[t, s]
incomplete[s, t, 0] = np.max(incomplete_vals0)
incomplete_backtrack[s, t, 0] = s + np.argmax(incomplete_vals0)
# right tree
incomplete_vals1 = complete[s, s:t, 1] + complete[(s+1):(t+1), t, 0] + scores[s, t]
incomplete[s, t, 1] = np.max(incomplete_vals1)
incomplete_backtrack[s, t, 1] = s + np.argmax(incomplete_vals1)
# Second, create complete items.
# left tree
complete_vals0 = complete[s, s:t, 0] + incomplete[s:t, t, 0]
complete[s, t, 0] = np.max(complete_vals0)
complete_backtrack[s, t, 0] = s + np.argmax(complete_vals0)
# right tree
complete_vals1 = incomplete[s, (s+1):(t+1), 1] + complete[(s+1):(t+1), t, 1]
complete[s, t, 1] = np.max(complete_vals1)
complete_backtrack[s, t, 1] = s + 1 + np.argmax(complete_vals1)
value = complete[0][N][1]
heads = -np.ones(N+1, dtype=int)
self.backtrack_eisner(incomplete_backtrack, complete_backtrack, 0, N, 1, 1, heads)
value_proj = 0.0
for m in xrange(1,N+1):
h = heads[m]
value_proj += scores[h,m]
return heads
# End of solution to Exercise 4.3.6
###########################
def backtrack_eisner(self, incomplete_backtrack, complete_backtrack, s, t, direction, complete, heads):
'''
Backtracking step in Eisner's algorithm.
- incomplete_backtrack is a (NW+1)-by-(NW+1) numpy array indexed by a start position,
an end position, and a direction flag (0 means left, 1 means right). This array contains
the arg-maxes of each step in the Eisner algorithm when building *incomplete* spans.
- complete_backtrack is a (NW+1)-by-(NW+1) numpy array indexed by a start position,
an end position, and a direction flag (0 means left, 1 means right). This array contains
the arg-maxes of each step in the Eisner algorithm when building *complete* spans.
- s is the current start of the span
- t is the current end of the span
- direction is 0 (left attachment) or 1 (right attachment)
- complete is 1 if the current span is complete, and 0 otherwise
- heads is a (NW+1)-sized numpy array of integers which is a placeholder for storing the
head of each word.
'''
if s == t:
return
if complete:
r = complete_backtrack[s][t][direction]
if direction == 0:
self.backtrack_eisner(incomplete_backtrack, complete_backtrack, s, r, 0, 1, heads)
self.backtrack_eisner(incomplete_backtrack, complete_backtrack, r, t, 0, 0, heads)
return
else:
self.backtrack_eisner(incomplete_backtrack, complete_backtrack, s, r, 1, 0, heads)
self.backtrack_eisner(incomplete_backtrack, complete_backtrack, r, t, 1, 1, heads)
return
else:
r = incomplete_backtrack[s][t][direction]
if direction == 0:
heads[s] = t
self.backtrack_eisner(incomplete_backtrack, complete_backtrack, s, r, 1, 1, heads)
self.backtrack_eisner(incomplete_backtrack, complete_backtrack, r+1, t, 0, 1, heads)
return
else:
heads[t] = s
self.backtrack_eisner(incomplete_backtrack, complete_backtrack, s, r, 1, 1, heads)
self.backtrack_eisner(incomplete_backtrack, complete_backtrack, r+1, t, 0, 1, heads)
return
def parse_nonproj(self, scores):
'''
Parse using Chu-Liu-Edmonds algorithm.
'''
nr, nc = np.shape(scores)
if nr != nc:
raise ValueError("scores must be a squared matrix with nw+1 rows")
return []
nw = nr - 1;
curr_nodes = np.ones(nw+1, int)
reps = []
oldI = -np.ones((nw+1, nw+1), int)
oldO = -np.ones((nw+1, nw+1), int)
for i in range(0, nw+1):
reps.append({i : 0})
for j in range(0, nw+1):
oldI[i,j] = i
oldO[i,j] = j
if i==j or j==0:
continue
if self.verbose:
print "Starting C-L-E...\n"
scores_copy = scores.copy()
final_edges = self.chu_liu_edmonds(scores_copy, curr_nodes, oldI, oldO, {}, reps)
heads = np.zeros(nw+1, int)
heads[0] = -1
for key in final_edges.keys():
ch = key
pr = final_edges[key]
heads[ch] = pr
return heads
def chu_liu_edmonds(self, scores, curr_nodes, oldI, oldO, final_edges, reps):
'''
Chu-Liu-Edmonds algorithm
'''
# need to construct for each node list of nodes they represent (here only!)
nw = np.size(curr_nodes) - 1;
# create best graph
par = -np.ones(nw+1, int)
for m in range(1, nw+1):
# only interested in current nodes
if 0 == curr_nodes[m]:
continue
max_score = scores[0,m]
par[m] = 0
for h in range(nw+1):
if m == h:
continue
if 0 == curr_nodes[h]:
continue
if scores[h,m] > max_score:
max_score = scores[h,m]
par[m] = h
if self.verbose:
print "After init\n"
for m in range(0, nw+1):
if 0 < curr_nodes[m]:
print "{0}|{1} ".format(par[m],m)
print "\n"
# find a cycle
cycles = []
added = np.zeros(nw+1, int)
for m in range(0, nw+1):
if np.size(cycles) > 0:
break
if added[m] or 0 == curr_nodes[m]:
continue
added[m] = 1
cycle = {m : 0}
l = m
while True:
if par[l] == -1:
added[l] = 1
break
if par[l] in cycle:
cycle = {}
lorg = par[l]
cycle[lorg] = par[lorg]
added[lorg] = 1
l1 = par[lorg]
while l1 != lorg:
cycle[l1] = par[l1]
added[l1] = True
l1 = par[l1]
cycles.append(cycle)
break
cycle[l] = 0
l = par[l]
if added[l] and (l not in cycle):
break
added[l] = 1
# get all edges and return them
if np.size(cycles) == 0:
for m in range(0, nw+1):
if 0 == curr_nodes[m]:
continue
if par[m] != -1:
pr = oldI[par[m], m]
ch = oldO[par[m], m]
final_edges[ch] = pr
else:
final_edges[0] = -1
return final_edges
max_cyc = 0
wh_cyc = 0
for cycle in cycles:
if np.size(cycle.keys()) > max_cyc:
max_cyc = np.size(cycle.keys())
wh_cyc = cycle
cycle = wh_cyc
cyc_nodes = cycle.keys()
rep = cyc_nodes[0]
if self.verbose:
print "Found Cycle\n"
for node in cyc_nodes:
print "{0} ".format(node)
print "\n"
cyc_weight = 0.0
for node in cyc_nodes:
cyc_weight += scores[par[node], node]
for i in range(0, nw+1):
if 0 == curr_nodes[i] or (i in cycle):
continue
max1 = -np.inf
wh1 = -1
max2 = -np.inf
wh2 = -1
for j1 in cyc_nodes:
if scores[j1, i] > max1:
max1 = scores[j1, i]
wh1 = j1
# cycle weight + new edge - removal of old
scr = cyc_weight + scores[i, j1] - scores[par[j1], j1]
if scr > max2:
max2 = scr
wh2 = j1
scores[rep, i] = max1
oldI[rep, i] = oldI[wh1, i]
oldO[rep, i] = oldO[wh1, i]
scores[i, rep] = max2
oldO[i, rep] = oldO[i, wh2]
oldI[i, rep] = oldI[i, wh2]
rep_cons = []
for i in range(0, np.size(cyc_nodes)):
rep_con = {}
keys = sorted(reps[int(cyc_nodes[i])].keys())
if self.verbose:
print "{0}: ".format(cyc_nodes[i])
for key in keys:
rep_con[key] = 0
if self.verbose:
print "{0} ".format(key)
rep_cons.append(rep_con)
if self.verbose:
print "\n"
# don't consider not representative nodes
# these nodes have been folded
for node in cyc_nodes[1:]:
curr_nodes[node] = 0
for key in reps[int(node)]:
reps[int(rep)][key] = 0
self.chu_liu_edmonds(scores, curr_nodes, oldI, oldO, final_edges, reps)
# check each node in cycle, if one of its representatives
# is a key in the final_edges, it is the one.
if self.verbose:
print final_edges
wh = -1;
found = False
for i in range(0, np.size(rep_cons)):
if found:
break
for key in rep_cons[i]:
if found:
break
if key in final_edges:
wh = cyc_nodes[i]
found = True
l = par[wh]
while l != wh:
ch = oldO[par[l]][l]
pr = oldI[par[l]][l]
final_edges[ch] = pr
l = par[l]
return final_edges
|
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import sys
from keystone.common import utils
from keystoneclient.common import cms
from oslo_config import cfg
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import six
from keystone.common import controller
from keystone.common import dependency
from keystone.common import wsgi
from keystone import exception
from keystone.i18n import _
from keystone.models import token_model
from keystone.token import provider
CONF = cfg.CONF
LOG = log.getLogger(__name__)
class ExternalAuthNotApplicable(Exception):
"""External authentication is not applicable."""
pass
@dependency.requires('assignment_api', 'catalog_api', 'identity_api',
'resource_api', 'role_api', 'token_provider_api',
'trust_api')
class Auth(controller.V2Controller):
@controller.v2_deprecated
def ca_cert(self, context, auth=None):
ca_file = open(CONF.signing.ca_certs, 'r')
data = ca_file.read()
ca_file.close()
return data
@controller.v2_deprecated
def signing_cert(self, context, auth=None):
cert_file = open(CONF.signing.certfile, 'r')
data = cert_file.read()
cert_file.close()
return data
@controller.v2_deprecated
def authenticate(self, context, auth=None):
"""Authenticate credentials and return a token.
Accept auth as a dict that looks like::
{
"auth":{
"passwordCredentials":{
"username":"test_user",
"password":"mypass"
},
"tenantName":"customer-x"
}
}
In this case, tenant is optional, if not provided the token will be
considered "unscoped" and can later be used to get a scoped token.
Alternatively, this call accepts auth with only a token and tenant
that will return a token that is scoped to that tenant.
"""
if auth is None:
raise exception.ValidationError(attribute='auth',
target='request body')
if "token" in auth:
# Try to authenticate using a token
auth_info = self._authenticate_token(
context, auth)
else:
# Try external authentication
try:
auth_info = self._authenticate_external(
context, auth)
except ExternalAuthNotApplicable:
# Try local authentication
auth_info = self._authenticate_local(
context, auth)
user_ref, tenant_ref, metadata_ref, expiry, bind, audit_id = auth_info
# Validate that the auth info is valid and nothing is disabled
try:
self.identity_api.assert_user_enabled(
user_id=user_ref['id'], user=user_ref)
if tenant_ref:
self.resource_api.assert_project_enabled(
project_id=tenant_ref['id'], project=tenant_ref)
except AssertionError as e:
six.reraise(exception.Unauthorized, exception.Unauthorized(e),
sys.exc_info()[2])
# NOTE(morganfainberg): Make sure the data is in correct form since it
# might be consumed external to Keystone and this is a v2.0 controller.
# The user_ref is encoded into the auth_token_data which is returned as
# part of the token data. The token provider doesn't care about the
# format.
user_ref = self.v3_to_v2_user(user_ref)
if tenant_ref:
tenant_ref = self.v3_to_v2_project(tenant_ref)
auth_token_data = self._get_auth_token_data(user_ref,
tenant_ref,
metadata_ref,
expiry,
audit_id)
if tenant_ref:
catalog_ref = self.catalog_api.get_catalog(
user_ref['id'], tenant_ref['id'])
else:
catalog_ref = {}
auth_token_data['id'] = 'placeholder'
if bind:
auth_token_data['bind'] = bind
roles_ref = []
for role_id in metadata_ref.get('roles', []):
role_ref = self.role_api.get_role(role_id)
roles_ref.append(dict(name=role_ref['name']))
(token_id, token_data) = self.token_provider_api.issue_v2_token(
auth_token_data, roles_ref=roles_ref, catalog_ref=catalog_ref)
# NOTE(wanghong): We consume a trust use only when we are using trusts
# and have successfully issued a token.
if CONF.trust.enabled and 'trust_id' in auth:
self.trust_api.consume_use(auth['trust_id'])
return token_data
def _restrict_scope(self, token_model_ref):
# A trust token cannot be used to get another token
if token_model_ref.trust_scoped:
raise exception.Forbidden()
if not CONF.token.allow_rescope_scoped_token:
# Do not allow conversion from scoped tokens.
if token_model_ref.project_scoped or token_model_ref.domain_scoped:
raise exception.Forbidden(action=_("rescope a scoped token"))
def _authenticate_token(self, context, auth):
"""Try to authenticate using an already existing token.
Returns auth_token_data, (user_ref, tenant_ref, metadata_ref)
"""
if 'token' not in auth:
raise exception.ValidationError(
attribute='token', target='auth')
if "id" not in auth['token']:
raise exception.ValidationError(
attribute="id", target="token")
old_token = auth['token']['id']
if len(old_token) > CONF.max_token_size:
raise exception.ValidationSizeError(attribute='token',
size=CONF.max_token_size)
try:
token_model_ref = token_model.KeystoneToken(
token_id=old_token,
token_data=self.token_provider_api.validate_token(old_token))
except exception.NotFound as e:
raise exception.Unauthorized(e)
wsgi.validate_token_bind(context, token_model_ref)
self._restrict_scope(token_model_ref)
user_id = token_model_ref.user_id
tenant_id = self._get_project_id_from_auth(auth)
if not CONF.trust.enabled and 'trust_id' in auth:
raise exception.Forbidden('Trusts are disabled.')
elif CONF.trust.enabled and 'trust_id' in auth:
try:
trust_ref = self.trust_api.get_trust(auth['trust_id'])
except exception.TrustNotFound:
raise exception.Forbidden()
if user_id != trust_ref['trustee_user_id']:
raise exception.Forbidden()
if (trust_ref['project_id'] and
tenant_id != trust_ref['project_id']):
raise exception.Forbidden()
if ('expires' in trust_ref) and (trust_ref['expires']):
expiry = trust_ref['expires']
if expiry < timeutils.parse_isotime(utils.isotime()):
raise exception.Forbidden()
user_id = trust_ref['trustor_user_id']
trustor_user_ref = self.identity_api.get_user(
trust_ref['trustor_user_id'])
if not trustor_user_ref['enabled']:
raise exception.Forbidden()
trustee_user_ref = self.identity_api.get_user(
trust_ref['trustee_user_id'])
if not trustee_user_ref['enabled']:
raise exception.Forbidden()
if trust_ref['impersonation'] is True:
current_user_ref = trustor_user_ref
else:
current_user_ref = trustee_user_ref
else:
current_user_ref = self.identity_api.get_user(user_id)
metadata_ref = {}
tenant_ref, metadata_ref['roles'] = self._get_project_roles_and_ref(
user_id, tenant_id)
expiry = token_model_ref.expires
if CONF.trust.enabled and 'trust_id' in auth:
trust_id = auth['trust_id']
trust_roles = []
for role in trust_ref['roles']:
if 'roles' not in metadata_ref:
raise exception.Forbidden()
if role['id'] in metadata_ref['roles']:
trust_roles.append(role['id'])
else:
raise exception.Forbidden()
if 'expiry' in trust_ref and trust_ref['expiry']:
trust_expiry = timeutils.parse_isotime(trust_ref['expiry'])
if trust_expiry < expiry:
expiry = trust_expiry
metadata_ref['roles'] = trust_roles
metadata_ref['trustee_user_id'] = trust_ref['trustee_user_id']
metadata_ref['trust_id'] = trust_id
bind = token_model_ref.bind
audit_id = token_model_ref.audit_chain_id
return (current_user_ref, tenant_ref, metadata_ref, expiry, bind,
audit_id)
def _authenticate_local(self, context, auth):
"""Try to authenticate against the identity backend.
Returns auth_token_data, (user_ref, tenant_ref, metadata_ref)
"""
if 'passwordCredentials' not in auth:
raise exception.ValidationError(
attribute='passwordCredentials', target='auth')
if "password" not in auth['passwordCredentials']:
raise exception.ValidationError(
attribute='password', target='passwordCredentials')
password = auth['passwordCredentials']['password']
if password and len(password) > CONF.identity.max_password_length:
raise exception.ValidationSizeError(
attribute='password', size=CONF.identity.max_password_length)
if (not auth['passwordCredentials'].get("userId") and
not auth['passwordCredentials'].get("username")):
raise exception.ValidationError(
attribute='username or userId',
target='passwordCredentials')
user_id = auth['passwordCredentials'].get('userId')
if user_id and len(user_id) > CONF.max_param_size:
raise exception.ValidationSizeError(attribute='userId',
size=CONF.max_param_size)
username = auth['passwordCredentials'].get('username', '')
if username:
if len(username) > CONF.max_param_size:
raise exception.ValidationSizeError(attribute='username',
size=CONF.max_param_size)
try:
user_ref = self.identity_api.get_user_by_name(
username, CONF.identity.default_domain_id)
user_id = user_ref['id']
except exception.UserNotFound as e:
raise exception.Unauthorized(e)
try:
user_ref = self.identity_api.authenticate(
context,
user_id=user_id,
password=password)
except AssertionError as e:
raise exception.Unauthorized(e.args[0])
metadata_ref = {}
tenant_id = self._get_project_id_from_auth(auth)
tenant_ref, metadata_ref['roles'] = self._get_project_roles_and_ref(
user_id, tenant_id)
expiry = provider.default_expire_time()
bind = None
audit_id = None
return (user_ref, tenant_ref, metadata_ref, expiry, bind, audit_id)
def _authenticate_external(self, context, auth):
"""Try to authenticate an external user via REMOTE_USER variable.
Returns auth_token_data, (user_ref, tenant_ref, metadata_ref)
"""
environment = context.get('environment', {})
if not environment.get('REMOTE_USER'):
raise ExternalAuthNotApplicable()
username = environment['REMOTE_USER']
try:
user_ref = self.identity_api.get_user_by_name(
username, CONF.identity.default_domain_id)
user_id = user_ref['id']
except exception.UserNotFound as e:
raise exception.Unauthorized(e)
metadata_ref = {}
tenant_id = self._get_project_id_from_auth(auth)
tenant_ref, metadata_ref['roles'] = self._get_project_roles_and_ref(
user_id, tenant_id)
expiry = provider.default_expire_time()
bind = None
if ('kerberos' in CONF.token.bind and
environment.get('AUTH_TYPE', '').lower() == 'negotiate'):
bind = {'kerberos': username}
audit_id = None
return (user_ref, tenant_ref, metadata_ref, expiry, bind, audit_id)
def _get_auth_token_data(self, user, tenant, metadata, expiry, audit_id):
return dict(user=user,
tenant=tenant,
metadata=metadata,
expires=expiry,
parent_audit_id=audit_id)
def _get_project_id_from_auth(self, auth):
"""Extract tenant information from auth dict.
Returns a valid tenant_id if it exists, or None if not specified.
"""
tenant_id = auth.get('tenantId')
if tenant_id and len(tenant_id) > CONF.max_param_size:
raise exception.ValidationSizeError(attribute='tenantId',
size=CONF.max_param_size)
tenant_name = auth.get('tenantName')
if tenant_name and len(tenant_name) > CONF.max_param_size:
raise exception.ValidationSizeError(attribute='tenantName',
size=CONF.max_param_size)
if tenant_name:
try:
tenant_ref = self.resource_api.get_project_by_name(
tenant_name, CONF.identity.default_domain_id)
tenant_id = tenant_ref['id']
except exception.ProjectNotFound as e:
raise exception.Unauthorized(e)
return tenant_id
def _get_project_roles_and_ref(self, user_id, tenant_id):
"""Returns the project roles for this user, and the project ref."""
tenant_ref = None
role_list = []
if tenant_id:
try:
tenant_ref = self.resource_api.get_project(tenant_id)
role_list = self.assignment_api.get_roles_for_user_and_project(
user_id, tenant_id)
except exception.ProjectNotFound:
msg = _('Project ID not found: %(t_id)s') % {'t_id': tenant_id}
raise exception.Unauthorized(msg)
if not role_list:
msg = _('User %(u_id)s is unauthorized for tenant %(t_id)s')
msg = msg % {'u_id': user_id, 't_id': tenant_id}
LOG.warning(msg)
raise exception.Unauthorized(msg)
return (tenant_ref, role_list)
def _get_token_ref(self, token_id, belongs_to=None):
"""Returns a token if a valid one exists.
Optionally, limited to a token owned by a specific tenant.
"""
token_ref = token_model.KeystoneToken(
token_id=token_id,
token_data=self.token_provider_api.validate_token(token_id))
if belongs_to:
if not token_ref.project_scoped:
raise exception.Unauthorized(
_('Token does not belong to specified tenant.'))
if token_ref.project_id != belongs_to:
raise exception.Unauthorized(
_('Token does not belong to specified tenant.'))
return token_ref
@controller.v2_deprecated
@controller.protected()
def validate_token_head(self, context, token_id):
"""Check that a token is valid.
Optionally, also ensure that it is owned by a specific tenant.
Identical to ``validate_token``, except does not return a response.
The code in ``keystone.common.wsgi.render_response`` will remove
the content body.
"""
belongs_to = context['query_string'].get('belongsTo')
return self.token_provider_api.validate_v2_token(token_id, belongs_to)
@controller.v2_deprecated
@controller.protected()
def validate_token(self, context, token_id):
"""Check that a token is valid.
Optionally, also ensure that it is owned by a specific tenant.
Returns metadata about the token along any associated roles.
"""
belongs_to = context['query_string'].get('belongsTo')
# TODO(ayoung) validate against revocation API
return self.token_provider_api.validate_v2_token(token_id, belongs_to)
@controller.v2_deprecated
def delete_token(self, context, token_id):
"""Delete a token, effectively invalidating it for authz."""
# TODO(termie): this stuff should probably be moved to middleware
self.assert_admin(context)
self.token_provider_api.revoke_token(token_id)
@controller.v2_deprecated
@controller.protected()
def revocation_list(self, context, auth=None):
if not CONF.token.revoke_by_id:
raise exception.Gone()
tokens = self.token_provider_api.list_revoked_tokens()
for t in tokens:
expires = t['expires']
if expires and isinstance(expires, datetime.datetime):
t['expires'] = utils.isotime(expires)
data = {'revoked': tokens}
json_data = jsonutils.dumps(data)
signed_text = cms.cms_sign_text(json_data,
CONF.signing.certfile,
CONF.signing.keyfile)
return {'signed': signed_text}
@controller.v2_deprecated
def endpoints(self, context, token_id):
"""Return a list of endpoints available to the token."""
self.assert_admin(context)
token_ref = self._get_token_ref(token_id)
catalog_ref = None
if token_ref.project_id:
catalog_ref = self.catalog_api.get_catalog(
token_ref.user_id,
token_ref.project_id)
return Auth.format_endpoint_list(catalog_ref)
@classmethod
def format_endpoint_list(cls, catalog_ref):
"""Formats a list of endpoints according to Identity API v2.
The v2.0 API wants an endpoint list to look like::
{
'endpoints': [
{
'id': $endpoint_id,
'name': $SERVICE[name],
'type': $SERVICE,
'tenantId': $tenant_id,
'region': $REGION,
}
],
'endpoints_links': [],
}
"""
if not catalog_ref:
return {}
endpoints = []
for region_name, region_ref in catalog_ref.items():
for service_type, service_ref in region_ref.items():
endpoints.append({
'id': service_ref.get('id'),
'name': service_ref.get('name'),
'type': service_type,
'region': region_name,
'publicURL': service_ref.get('publicURL'),
'internalURL': service_ref.get('internalURL'),
'adminURL': service_ref.get('adminURL'),
})
return {'endpoints': endpoints, 'endpoints_links': []}
|
|
"""
Generic utilities for testing txyoga.
"""
from functools import partial
from StringIO import StringIO
from twisted.internet import defer
from twisted.web import http, http_headers, resource, server
from txyoga.serializers import json
BASE_URL = "http://localhost"
correctAcceptHeaders = http_headers.Headers()
correctAcceptHeaders.setRawHeaders("Accept", ["application/json"])
class _FakeRequest(object):
"""
Mimics a twisted.web.server.Request, poorly.
"""
def __init__(self, args=None, body="", method="GET",
prePathURL=BASE_URL, requestHeaders=None):
self.args = args or {}
self.content = StringIO(body)
self._responseContent = StringIO()
self.prePathURL = lambda: prePathURL
# we're always directly aimed at a resource and nobody is doing any
# postpath-related stuff, so let's just pretend it's always emtpy...
self.postpath = []
self.code = http.OK
self.requestHeaders = requestHeaders or http_headers.Headers()
self.responseHeaders = http_headers.Headers()
self.method = method
self._finished = False
self._notifiers = []
def write(self, part):
self._responseContent.write(part)
def finish(self):
self._finished = True
self._responseContent.seek(0, 0)
for d in self._notifiers:
d.callback(None)
def notifyFinish(self):
if self._finished:
return defer.succeed(None)
else:
d = defer.Deferred()
self._notifiers.append(d)
return d
def setResponseCode(self, code):
self.code = code
def getHeader(self, name):
# TODO: twisted ticket for inconsistent terminology (name/key)
value = self.requestHeaders.getRawHeaders(name)
if value is not None:
return value[-1]
def setHeader(self, name, value):
self.responseHeaders.setRawHeaders(name, [value])
_FakeDELETERequest = partial(_FakeRequest, method="DELETE")
_FakePOSTRequest = partial(_FakeRequest, method="POST")
_FakePUTRequest = partial(_FakeRequest, method="PUT")
class _BaseCollectionTest(object):
"""
A base class for tests of a collection.
"""
def setUp(self):
self.collection = self.collectionClass()
self.resource = resource.IResource(self.collection)
def addElements(self):
"""
Adds some elements to the collection.
Creates the default elements specified by the ``elementClass`` and
``elementArgs`` class attributes.
"""
for element in [self.elementClass(*a) for a in self.elementArgs]:
self.collection.add(element)
def _makeRequest(self, resource, request):
"""
Makes a request to a particular resource.
"""
self.request = request
result = resource.render(request)
if result is not server.NOT_DONE_YET:
request.write(result)
request.finish()
return request.notifyFinish()
def _decodeResponse(self):
"""
Tries to decode the body of a response.
"""
self.responseContent = json.load(self.request._responseContent)
def _checkContentType(self, expectedContentType="application/json"):
"""
Verifies the content type of a response.
If the type is ``None``, verifies that the header is not passed. This
is intended for cases where an empty response body is expected.
"""
headers = self.request.responseHeaders.getRawHeaders("Content-Type")
if expectedContentType is None:
self.assertEqual(headers, None)
else:
self.assertEqual(headers, [expectedContentType])
def _checkBadRequest(self, expectedCode):
"""
Tests that a failed request has a particular response code, and that
the response content has an error message and some details in it.
"""
self.assertEqual(self.request.code, expectedCode)
self.assertIn("errorMessage", self.responseContent)
self.assertIn("errorDetails", self.responseContent)
def _getResource(self, args=None, headers=None, path=()):
"""
Generalized GET for a particular resource.
"""
headers = headers or correctAcceptHeaders
request = _FakeRequest(args=args, requestHeaders=headers)
resource = self.resource
for childName in path:
resource = resource.getChildWithDefault(childName, request)
d = self._makeRequest(resource, request)
@d.addCallback
def verify(_):
self._checkContentType()
self._decodeResponse()
return d
def getElements(self, args=None, headers=None):
"""
Gets a bunch of elements from a collection.
"""
return self._getResource(args, headers)
def getElement(self, element, args=None, headers=None):
"""
Gets a particular element from a collection.
"""
return self._getResource(args, headers, [element])
def getElementChild(self, element, child, args=None, headers=None):
"""
Gets a child of a particular element from a collection.
"""
return self._getResource(args, headers, [element, child])
def updateElement(self, name, body, headers=None):
"""
Update an element.
For a successful update, the headers should contain a Content-Type.
"""
request = _FakePUTRequest(body=body, requestHeaders=headers)
elementResource = self.resource.getChild(name, request)
return self._makeRequest(elementResource, request)
def deleteElement(self, name):
"""
Delete an element.
"""
request = _FakeDELETERequest()
elementResource = self.resource.getChild(name, request)
return self._makeRequest(elementResource, request)
def createElement(self, name, body, headers=None, method="PUT"):
"""
Create a new element.
"""
if method == "PUT":
return self.updateElement(name, body, headers)
elif method == "POST":
request = _FakePOSTRequest(body=body, requestHeaders=headers)
return self._makeRequest(self.resource, request)
|
|
'''tzinfo timezone information for Asia/Magadan.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Magadan(DstTzInfo):
'''Asia/Magadan timezone definition. See datetime.tzinfo for details'''
zone = 'Asia/Magadan'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1924,5,1,13,56,48),
d(1930,6,20,14,0,0),
d(1981,3,31,13,0,0),
d(1981,9,30,12,0,0),
d(1982,3,31,13,0,0),
d(1982,9,30,12,0,0),
d(1983,3,31,13,0,0),
d(1983,9,30,12,0,0),
d(1984,3,31,13,0,0),
d(1984,9,29,15,0,0),
d(1985,3,30,15,0,0),
d(1985,9,28,15,0,0),
d(1986,3,29,15,0,0),
d(1986,9,27,15,0,0),
d(1987,3,28,15,0,0),
d(1987,9,26,15,0,0),
d(1988,3,26,15,0,0),
d(1988,9,24,15,0,0),
d(1989,3,25,15,0,0),
d(1989,9,23,15,0,0),
d(1990,3,24,15,0,0),
d(1990,9,29,15,0,0),
d(1991,3,30,15,0,0),
d(1991,9,28,16,0,0),
d(1992,1,18,16,0,0),
d(1992,3,28,12,0,0),
d(1992,9,26,11,0,0),
d(1993,3,27,15,0,0),
d(1993,9,25,15,0,0),
d(1994,3,26,15,0,0),
d(1994,9,24,15,0,0),
d(1995,3,25,15,0,0),
d(1995,9,23,15,0,0),
d(1996,3,30,15,0,0),
d(1996,10,26,15,0,0),
d(1997,3,29,15,0,0),
d(1997,10,25,15,0,0),
d(1998,3,28,15,0,0),
d(1998,10,24,15,0,0),
d(1999,3,27,15,0,0),
d(1999,10,30,15,0,0),
d(2000,3,25,15,0,0),
d(2000,10,28,15,0,0),
d(2001,3,24,15,0,0),
d(2001,10,27,15,0,0),
d(2002,3,30,15,0,0),
d(2002,10,26,15,0,0),
d(2003,3,29,15,0,0),
d(2003,10,25,15,0,0),
d(2004,3,27,15,0,0),
d(2004,10,30,15,0,0),
d(2005,3,26,15,0,0),
d(2005,10,29,15,0,0),
d(2006,3,25,15,0,0),
d(2006,10,28,15,0,0),
d(2007,3,24,15,0,0),
d(2007,10,27,15,0,0),
d(2008,3,29,15,0,0),
d(2008,10,25,15,0,0),
d(2009,3,28,15,0,0),
d(2009,10,24,15,0,0),
d(2010,3,27,15,0,0),
d(2010,10,30,15,0,0),
d(2011,3,26,15,0,0),
d(2011,10,29,15,0,0),
d(2012,3,24,15,0,0),
d(2012,10,27,15,0,0),
d(2013,3,30,15,0,0),
d(2013,10,26,15,0,0),
d(2014,3,29,15,0,0),
d(2014,10,25,15,0,0),
d(2015,3,28,15,0,0),
d(2015,10,24,15,0,0),
d(2016,3,26,15,0,0),
d(2016,10,29,15,0,0),
d(2017,3,25,15,0,0),
d(2017,10,28,15,0,0),
d(2018,3,24,15,0,0),
d(2018,10,27,15,0,0),
d(2019,3,30,15,0,0),
d(2019,10,26,15,0,0),
d(2020,3,28,15,0,0),
d(2020,10,24,15,0,0),
d(2021,3,27,15,0,0),
d(2021,10,30,15,0,0),
d(2022,3,26,15,0,0),
d(2022,10,29,15,0,0),
d(2023,3,25,15,0,0),
d(2023,10,28,15,0,0),
d(2024,3,30,15,0,0),
d(2024,10,26,15,0,0),
d(2025,3,29,15,0,0),
d(2025,10,25,15,0,0),
d(2026,3,28,15,0,0),
d(2026,10,24,15,0,0),
d(2027,3,27,15,0,0),
d(2027,10,30,15,0,0),
d(2028,3,25,15,0,0),
d(2028,10,28,15,0,0),
d(2029,3,24,15,0,0),
d(2029,10,27,15,0,0),
d(2030,3,30,15,0,0),
d(2030,10,26,15,0,0),
d(2031,3,29,15,0,0),
d(2031,10,25,15,0,0),
d(2032,3,27,15,0,0),
d(2032,10,30,15,0,0),
d(2033,3,26,15,0,0),
d(2033,10,29,15,0,0),
d(2034,3,25,15,0,0),
d(2034,10,28,15,0,0),
d(2035,3,24,15,0,0),
d(2035,10,27,15,0,0),
d(2036,3,29,15,0,0),
d(2036,10,25,15,0,0),
d(2037,3,28,15,0,0),
d(2037,10,24,15,0,0),
]
_transition_info = [
i(36180,0,'LMT'),
i(36000,0,'MAGT'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(39600,0,'MAGST'),
i(36000,0,'MAGT'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
i(43200,3600,'MAGST'),
i(39600,0,'MAGT'),
]
Magadan = Magadan()
|
|
## Automatically adapted for numpy.oldnumeric May 17, 2011 by -c
# Natural Language Toolkit: Classifiers
#
# Copyright (C) 2001 University of Pennsylvania
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
#
# $Id: __init__.py,v 1.2 2003/10/27 04:41:28 trevorcohn1 Exp $
# To do:
# - make sure variable names are used consistantly (fd_list, etc.)
# - remove any confusions about the type of labels (string vs
# immutable)
"""
Classes and interfaces used to classify texts into categories. A
X{category} is a coherent group of texts. This module focuses on
X{single-category text classification}, in which:
- There set of categories is known.
- The number of categories is finite.
- Each text belongs to exactly one category.
A X{classifier} choses the most likely category for a given text.
Classifiers can also be used to estimate the probability that a given
text belongs to a category. This module defines the C{ClassifierI}
interface for creating classifiers. Note that classifiers can operate
on any kind of text. For example, classifiers can be used:
- to group documents by topic
- to group words by part of speech
- to group acoustic signals by which phoneme they represent
- to group sentences by their author
Each category is uniquely defined by a X{label}, such as C{'sports'}
or C{'news'}. Labels are typically C{string}s or C{integer}s, but can
be any immutable type. Classified texts are represented by C{Tokens}
whose types are C{LabeledText} objects. A C{LabeledText} consists of
a label and a text.
C{ClassifierTrainerI} is a general interface for classes that build
classifiers from training data.
C{accuracy} and C{log_likelihood} provide simple metrics for
evaluating the performance of a classifier.
@group Data Types: LabeledText
@group Interfaces: ClassifierI, ClassifierTrainerI
@group Evaulation: accuracy, log_likelihood, ConfusionMatrix
@sort: ClassifierI, ClassifierTrainerI
"""
from nltk.token import Token
from nltk.chktype import chktype as _chktype
from nltk.chktype import classeq as _classeq
import math, numpy.oldnumeric as Numeric, types, operator
##//////////////////////////////////////////////////////
## Texts and Labels
##//////////////////////////////////////////////////////
# A text can be any object. Texts are required to be immutable, since
# they are used as the type of a token.
# A label can be any immutable object. Typically, labels are either
# integers or strings.
##//////////////////////////////////////////////////////
## LabeledTexts
##//////////////////////////////////////////////////////
class LabeledText:
"""
A type consisting of a text and a label. A typical example would
be a document labeled with a category, such as \"sports\".
The text and the label are both required to be immutable. Labels
are ususally short strings or integers.
@type _text: (immutable)
@ivar _text: The C{LabeledText}'s text.
@type _label: (immutable)
@ivar _label: The text type's label. This specifies which
category the text belongs to.
"""
def __init__(self, text, label):
"""
Construct a new C{LabeledType}.
@param text: The new C{LabeledType}'s text.
@type text: (immutable)
@param label: The new C{LabeledType}'s label. This specifies
which category the text belongs to.
@type label: (immutable)
"""
self._text = text
self._label = label
def text(self):
"""
@return: this C{LabeledType}'s text.
@rtype: (immutable)
"""
return self._text
def label(self):
"""
@return: this C{LabeledType}'s label.
@rtype: (immutable)
"""
return self._label
def __lt__(self, other):
"""
Raise a C{TypeError}, since C{LabeledText} is not an ordered
type.
@raise TypeError: C{LabeledText} is not an ordered type.
"""
raise TypeError("LabeledText is not an ordered type")
def __le__(self, other):
"""
Raise a C{TypeError}, since C{LabeledText} is not an ordered
type.
@raise TypeError: C{LabeledText} is not an ordered type.
"""
raise TypeError("LabeledText is not an ordered type")
def __gt__(self, other):
"""
Raise a C{TypeError}, since C{LabeledText} is not an ordered
type.
@raise TypeError: C{LabeledText} is not an ordered type.
"""
raise TypeError("LabeledText is not an ordered type")
def __ge__(self, other):
"""
Raise a C{TypeError}, since C{LabeledText} is not an ordered
type.
@raise TypeError: C{LabeledText} is not an ordered type.
"""
raise TypeError("LabeledText is not an ordered type")
def __cmp__(self, other):
"""
@return: 0 if this C{LabeledType} is equal to C{other}. In
particular, return 0 iff C{other} is a C{LabeledType},
C{self.text()==other.text()}, and
C{self.label()==other.label()}; return a nonzero number
otherwise.
@rtype: C{int}
@param other: The C{LabeledText} to compare this
C{LabeledText} with.
@type other: C{LabeledText}
"""
if not _classeq(self, other): return 0
return not (self._text == other._text and
self._label == other._label)
def __hash__(self):
return hash( (self._text, self._label) )
def __repr__(self):
"""
@return: a string representation of this labeled text.
@rtype: C{string}
"""
return "%r/%r" % (self._text, self._label)
##//////////////////////////////////////////////////////
## Classiifer Interface
##//////////////////////////////////////////////////////
class ClassifierI:
"""
A processing interface for categorizing texts. The set of
categories used by a classifier must be fixed, and finite. Each
category is uniquely defined by a X{label}, such as C{'sports'} or
C{'news'}. Labels are typically C{string}s or C{integer}s, but
can be any immutable type. Classified texts are represented by
C{Tokens} whose types are C{LabeledText} objects.
Classifiers are required to implement two methods:
- C{classify}: determines which label is most appropriate for a
given text token, and returns a labeled text token with that
label.
- C{labels}: returns the list of category labels that are used
by this classifier.
Classifiers are also encouranged to implement the following
methods:
- C{distribution}: return a probability distribution that
specifies M{P(label|text)} for a given text token.
- C{prob}: returns M{P(label|text)} for a given labeled text
token.
- C{distribution_dictionary}: Return a dictionary that maps from
labels to probabilities.
- C{distribution_list}: Return a sequence, specifying the
probability of each label.
Classes implementing the ClassifierI interface may choose to only
support certain classes of tokens for input. If a method is
unable to return a correct result because it is given an
unsupported class of token, then it should raise a
NotImplementedError.
Typically, classifier classes encode specific classifier models;
but do not include the algorithms for training the classifiers.
Instead, C{ClassifierTrainer}s are used to generate classifiers
from training data.
@see: C{ClassifierTrainerI}
"""
def labels(self):
"""
@return: the list of category labels used by this classifier.
@rtype: C{list} of (immutable)
"""
raise AssertionError()
def classify(self, unlabeled_token):
"""
Determine which label is most appropriate for the given text
token, and return a C{LabeledText} token constructed from the
given text token and the chosen label.
@return: a C{LabeledText} token whose label is the most
appropriate label for the given token; whose text is the
given token's text; and whose location is the given
token's location.
@rtype: C{Token} with type C{LabeledText}
@param unlabeled_token: The text to be classified.
@type unlabeled_token: C{Token}
"""
raise AssertionError()
def distribution(self, unlabeled_token):
"""
Return a probability distribution indicating the likelihood
that C{unlabeled_token} is a member of each category.
@return: a probability distribution whose samples are
tokens derived from C{unlabeled_token}. The samples
are C{LabeledText} tokens whose text is
C{unlabeled_token}'s text; and whose location is
C{unlabeled_token}'s location. The probability of each
sample indicates the likelihood that the unlabeled token
belongs to each label's category.
@rtype: C{ProbDistI}
@param unlabeled_token: The text to be classified.
@type unlabeled_token: C{Token}
"""
raise NotImplementedError()
def prob(self, labeled_token):
"""
@return: The probability that C{labeled_token}'s text belongs
to the category indicated by C{labeled_token}'s label.
@rtype: C{float}
@param labeled_token: The labeled token for which to generate
a probability estimate.
@type labeled_token: C{Token} with type C{LabeledText}
"""
raise NotImplementedError()
def distribution_dictionary(self, unlabeled_token):
"""
Return a dictionary indicating the likelihood that
C{unlabeled_token} is a member of each category.
@return: a dictionary that maps from each label to the
probability that C{unlabeled_token} is a member of that
label's category.
@rtype: C{dictionary} from (immutable) to C{float}
@param unlabeled_token: The text to be classified.
@type unlabeled_token: C{Token}
"""
raise NotImplementedError()
def distribution_list(self, unlabeled_token):
"""
Return a list indicating the likelihood that
C{unlabeled_token} is a member of each category.
@return: a list of probabilities. The M{i}th element of the
list is the probability that C{unlabeled_text} belongs to
C{labels()[M{i}]}'s category.
@rtype: C{sequence} of C{float}
@param unlabeled_token: The text to be classified.
@type unlabeled_token: C{Token}
"""
raise NotImplementedError()
##//////////////////////////////////////////////////////
## Classiifer Trainer Interface
##//////////////////////////////////////////////////////
class ClassifierTrainerI:
"""
A processing interface for constructing new classifiers, using
training data. Classifier trainers must implement one method,
C{train}, which generates a new classifier from a list of training
samples.
"""
def train(self, labeled_tokens, **kwargs):
"""
Train a new classifier, using the given training samples.
@type labeled_tokens: C{list} of (C{Token} with type C{LabeledText})
@param labeled_tokens: A list of correctly labeled texts.
These texts will be used as training samples to construct
new classifiers.
@param kwargs: Keyword arguments.
- C{labels}: The set of possible labels. If none is
given, then the set of all labels attested in the
training data will be used instead. (type=C{list} of
(immutable)).
@return: A new classifier, trained from the given labeled
tokens.
@rtype: C{ClassifierI}
"""
raise AssertionError()
def find_labels(labeled_tokens):
"""
@return: A list of all labels that are attested in the given list
of labeled tokens.
@rtype: C{list} of (immutable)
@param labeled_tokens: The list of labeled tokens from which to
extract labels.
@type labeled_tokens: C{list} of (C{Token} with type C{LabeledText})
"""
assert _chktype(1, labeled_tokens, [Token], (Token,))
labelmap = {}
for token in labeled_tokens:
labelmap[token.type().label()] = 1
return labelmap.keys()
def label_tokens(unlabeled_tokens, label):
"""
@return: a list of labeled tokens, whose text and location
correspond to C{unlabeled_tokens}, and whose labels are
C{label}.
@rtype: C{list} of (C{Token} with type C{LabeledText})
@param unlabeled_tokens: The list of tokens for which a labeled
token list should be created.
@type unlabeled_tokens: C{list} of C{Token}
@param label: The label for the new labeled tokens.
@type label: (immutable)
"""
assert _chktype(1, unlabeled_tokens, [Token], (Token,))
return [Token(LabeledText(tok.type(), label), tok.loc())
for tok in unlabeled_tokens]
##//////////////////////////////////////////////////////
## Evaluation Metrics
##//////////////////////////////////////////////////////
def accuracy(classifier, labeled_tokens):
"""
@rtype: C{float}
@return: the given classifier model's accuracy on the given list
of labeled tokens. This float between zero and one indicates
what proportion of the tokens the model would label correctly.
@param labeled_tokens: The tokens for which the model's
accuracy should be computed.
@type labeled_tokens: C{list} of (C{Token} with type
C{LabeledText})
"""
assert _chktype(1, classifier, ClassifierI)
assert _chktype(2, labeled_tokens, [Token], (Token,))
total = 0
correct = 0
for ltok in labeled_tokens:
utok = Token(ltok.type().text(), ltok.loc())
if classifier.classify(utok) == ltok:
correct += 1
total += 1
return float(correct)/total
def log_likelihood(classifier, labeled_tokens):
"""
Evaluate the log likelihood of the given list of labeled
tokens for the given classifier model. This nonpositive float
gives an indication of how well the classifier models the
data. Values closer to zero indicate that it models it more
accurately.
@rtype: C{float}
@return: The log likelihood of C{labeled_tokens} for the given
classifier model.
@param labeled_tokens: The tokens whose log likelihood should
be computed.
@type labeled_tokens: C{list} of (C{Token} with type
C{LabeledText})
"""
assert _chktype(1, classifier, ClassifierI)
assert _chktype(2, labeled_tokens, [Token], (Token,))
likelihood = 0.0
for ltok in labeled_tokens:
utok = Token(ltok.type().text(), ltok.loc())
label = ltok.type().label()
dist = classifier.distribution_dictionary(utok)
if dist[label] == 0:
# Use some approximation to infinity. What this does
# depends on your system's float implementation.
likelihood -= 1e1000
else:
likelihood += math.log(dist[label])
return likelihood / len(labeled_tokens)
class ConfusionMatrix:
def __init__(self, classifier, labeled_tokens):
"""
Entry conf[i][j] is the number of times a document with label i
was given label j.
"""
assert _chktype(1, classifier, ClassifierI)
assert _chktype(2, labeled_tokens, [Token], (Token,))
try: import numpy.oldnumeric as Numeric
except: raise ImportError('ConfusionMatrix requires Numeric')
# Extract the labels.
ldict = {}
for ltok in labeled_tokens: ldict[ltok.type().label()] = 1
labels = ldict.keys()
# Construct a label->index dictionary
indices = {}
for i in range(len(labels)): indices[labels[i]] = i
confusion = Numeric.zeros( (len(labels), len(labels)) )
for ltok in labeled_tokens:
utok = Token(ltok.type().text(), ltok.loc())
ctok = classifier.classify(utok)
confusion[indices[ltok.type().label()],
indices[ctok.type().label()]] += 1
self._labels = labels
self._confusion = confusion
self._max_conf = max(Numeric.resize(confusion, (len(labels)**2,)))
def __getitem__(self, index):
assert _chktype(1, index, types.IntType)
return self._confusion[index[0], index[1]]
def __str__(self):
confusion = self._confusion
labels = self._labels
indexlen = len(`len(labels)`)
entrylen = max(indexlen, len(`self._max_conf`))
index_format = '%' + `indexlen` + 'd | '
entry_format = '%' + `entrylen` + 'd '
str = (' '*(indexlen)) + ' | '
for j in range(len(labels)):
str += (entry_format % j)
str += '\n'
str += ('-' * ((entrylen+1) * len(labels) + indexlen + 2)) + '\n'
for i in range(len(labels)):
str += index_format % i
for j in range(len(labels)):
str += entry_format % confusion[i,j]
str += '\n'
return str
def key(self):
labels = self._labels
str = 'Label key: (row = true label; col = classifier label)\n'
indexlen = len(`len(labels)`)
key_format = ' %'+`indexlen`+'d: %s\n'
for i in range(len(labels)):
str += key_format % (i, labels[i])
return str
def cross_validate(trainer, labeled_tokens, n_folds=10, target=None, trace=False):
"""
Perform N-fold cross validation on the given classifier. This divides the
tokens into N equally sized groups (subject to rounding), then performs N
training and testing passes. Each pass involves testing on a single fold
and testing on the remaining folds. This way every instance is used
exactly once for testing. The results (predictive accuracy) are averaged
over the N trials. The mean and standard deviation are returned as a
tuple.
"""
assert len(labeled_tokens) >= n_folds
# should randomly reorder labeled_tokens first?
folds = []
n = len(labeled_tokens)
for i in range(n_folds):
start = i * n / n_folds
end = (i + 1) * n / n_folds
folds.append(labeled_tokens[start:end])
if trace:
print 'cross_validate - using %d folds of %d items each approx' \
% (n_folds, len(folds[0]))
accuracies = []
precisions = []
recalls = []
for i in range(n_folds):
training = folds[:]
testing = training[i]
del training[i]
training = reduce(operator.add, training) # flatten
if trace:
print 'cross_validate [%d] - training classifier...' % (i + 1)
import time
start = time.time()
classifier = trainer.train(training)
if trace:
end = time.time()
print 'cross_validate elapsed time %.2f seconds' % (end - start)
print 'cross_validate [%d] - testing classifier...' % (i + 1)
start = end
yes = no = 0
tp = tn = fp = fn = 0
for ltok in testing:
utok = Token(ltok.type().text(), ltok.loc())
if trace >= 2:
print 'cross_validate [%d] - given' % (i + 1), ltok
ctok = classifier.classify(utok)
if trace >= 2:
print 'cross_validate [%d] - classified' % (i + 1),
print ctok.type().label()
if ltok.type().label() == ctok.type().label():
yes += 1
else:
no += 1
if target:
if ltok.type().label() == target:
if ltok.type().label() == ctok.type().label():
tp += 1
else:
fn += 1
else:
if ltok.type().label() == ctok.type().label():
fp += 1
else:
tn += 1
acc = float(yes) / (yes + no)
accuracies.append(acc)
if target:
precision = recall = None
try:
recall = float(tp) / (tp + fn)
recalls.append(recall)
except ZeroDivisionError:
pass
try:
precision = float(tp) / (tp + fp)
precisions.append(precision)
except ZeroDivisionError:
pass
if trace:
end = time.time()
print 'cross_validate elapsed time %.2f seconds' % (end - start)
print 'cross_validate [%d] - accuracy %.3f' % (i + 1, acc)
if target:
print 'cross_validate [%d] - precision %s recall %s' \
% (i + 1, precision, recall)
if trace:
print 'cross_validate - calculating mean and variance'
# find the mean
mean = reduce(operator.add, accuracies) / float(len(accuracies))
if target:
recall = reduce(operator.add, recalls) / float(len(recalls))
if len(precisions) > 0:
precision = reduce(operator.add, precisions) / float(len(precisions))
else:
precision = None
# find the standard deviation
var = 0
for i in range(n_folds):
var += accuracies[i] * (accuracies[i] - mean) ** 2
sd = var ** 0.5
if target:
return mean, sd, precision, recall
else:
return mean, sd
|
|
# tools for accessing tables from the NASA Exoplanet Archive,
# either by downloading them downloading or loading local tables
# to-do:
# [] implement filtering by the "where" keyword to the archive
from ..imports import *
class Downloader(Talker):
expiration = 1.0
# anything special to know about reading this file format?
readkw = dict(delimiter='|',
fill_values=[('',np.nan), ('--', np.nan)])
def get(self, remake=False, skip_update=False):
'''
Get the table, downloading it from online if necessary.
If the table is older than some particular threshold,
then ask the user whether or not they want to download.
Parameters
----------
remake : bool
Should we definitely redownload the table?
skip_update : bool
Should we skip checking if the table's out of date?
'''
# if file doesn't exist, download it
if not skip_update:
remake = remake | check_if_needs_updating(self.path,
self.expiration)
# either download a fresh file, or load a local one
if remake:
self.download_fresh()
else:
self.speak(f'Loading local file from {self.path}')
# read the actual file
return ascii.read(self.path, **self.readkw)
def download_fresh(self):
'''
Download a brand new table from the Exoplanet Archive.
Parameters
----------
table : str
Which table should we download from the archive?
Return
'''
self.speak(f'Attempting to freshly download data from \n{self.url}')
# download the file from the URL (10-minute timeout)
temporary_path = download_file(self.url, cache=False, timeout=600)
# copy the file to its new location
shutil.copyfile(temporary_path, self.path)
self.speak(f'Download successful! Saved file to {self.path}')
class ExoplanetArchiveDownloader(Downloader):
# define the base of all URLs for to access the archive API
base = 'http://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/nph-nstedAPI?'
# what format do we want for the downloaded table?
format='bar-delimited'
# what columns do we want? (probably easiest to go with everything)
select = '*'
# what tables are currently support through this mode of access?
supported_tables = ['exoplanets', 'compositepars']
def __init__(self, table='exoplanets'):#, where='*'):
self.table = table
#self.where = where
@property
def url(self):
'''
Define the download URL to acces the online table.
'''
# create the URL by stitching together keyword pairs
url = (f'{self.base}'
f'table={self.table}'
f'&select={self.select}'
f'&format={self.format}')
return url
@property
def path(self):
'''
Where should the local copy of this file be stored?
'''
return os.path.join(directories['data'],
f'nea-{self.table}.txt')
exoplanets = ExoplanetArchiveDownloader('exoplanets')
composite_exoplanets = ExoplanetArchiveDownloader('compositepars')
class MergedExoplanetArchiveDownloader(ExoplanetArchiveDownloader):
def __init__(self):
self.table = 'merged'
def download_fresh(self):
'''
Download a brand new merged table from the Exoplanet Archive.
This grabs both the `exoplanets` and `compositepars` tables,
and merges them together into one massive table with lots
of columns for options.
'''
self.speak(f'Creating a merged exoplanet table from the NASA Exoplanet Archive.')
# load the individual tables
e = exoplanets.get()
c = composite_exoplanets.get()
# join the two tables together on the planets' names
self.speak('Joining the two Exoplanet Archive tables together.')
self.speak(' (This may take a while. The tables are big!)')
c.rename_column('fpl_name', 'pl_name')
j = join(e, c, keys='pl_name', table_names=['exoplanets', 'composite'])
# tidy up some of the column names
for k in j.colnames:
if '_exoplanets' in k:
j.rename_column(k, k.replace('_exoplanets', ''))
# write the merged table out to a file
self.speak(f'Merge successful! Saving file to {self.path}.')
self.speak(' (This may take a while. The table is big!)')
j.write(self.path,
format='ascii.fixed_width',
bookend=False,
delimiter='|',
overwrite=True)
self.speak('File saved.')
merged_exoplanets = MergedExoplanetArchiveDownloader()
class ExoFOPDownloader(Downloader):
expiration = 0.0
def __init__(self):
self.url = 'https://exofop.ipac.caltech.edu/tess/download_toi.php?sort=toi&output=pipe'
@property
def path(self):
'''
Where should the local copy of this file be stored?
'''
return os.path.join(directories['data'], 'TOI-exofop.txt')
toi_exofop = ExoFOPDownloader()
class MergedTOIDownloader(ExoFOPDownloader):
'''
Download the TOIs from the exoatlas table, but also search the MAST
archive to pull out extra parameters for each star from the TIC.
'''
@property
def path(self):
'''
Where should the local copy of this file be stored?
'''
return os.path.join(directories['data'], 'TOI-merged.txt')
def download_fresh(self):
'''
Download the TOIs from ExoFOP, then search for their entries in
the TIC catalog on the MAST to download more data.
'''
self.speak(f'Creating a merged TOI table from ExoFOP and MAST.')
# download the table of TOIs from the ExoFOP
self.speak('Downloading TOIs from ExoFOP.')
t = toi_exofop.get(remake=True)
# download the TIC entries for these stars
self.speak(f'Searching for {len(t)} stars in the TIC on the MAST.')
# import Catalogs only when we need it
# (otherwise, we'll need the internet to ever run exoatlas)
from astroquery.mast import Catalogs
tic_table = Catalogs.query_criteria(catalog="Tic", ID=np.unique(t['TIC ID']))
# preface all the columns with TIC so we can keep them straight
for k in tic_table.colnames:
tic_table[k].name = f'TIC {k}'
# make sure the indices are integers so we can join on them
tic_table['TIC ID'] = np.array(tic_table['TIC ID']).astype(np.int)
# join the two tables together
self.speak('Joining the TOI table with data from the TIC.')
withtic = join(t, tic_table, 'TIC ID', join_type='left')
# write the merged table out to a file
self.speak(f'Merge successful! Saving file to {self.path}.')
withtic.write(self.path,
format='ascii.fixed_width',
bookend=False,
delimiter='|',
overwrite=True)
self.speak('File saved.')
toi_merged = MergedTOIDownloader()
|
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Fabrice Laporte, Yevgeny Bezman, and Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import division, absolute_import, print_function
import subprocess
import os
import collections
import sys
import warnings
import re
from six.moves import zip
from beets import logging
from beets import ui
from beets.plugins import BeetsPlugin
from beets.util import syspath, command_output, displayable_path
# Utilities.
class ReplayGainError(Exception):
"""Raised when a local (to a track or an album) error occurs in one
of the backends.
"""
class FatalReplayGainError(Exception):
"""Raised when a fatal error occurs in one of the backends.
"""
class FatalGstreamerPluginReplayGainError(FatalReplayGainError):
"""Raised when a fatal error occurs in the GStreamerBackend when
loading the required plugins."""
def call(args):
"""Execute the command and return its output or raise a
ReplayGainError on failure.
"""
try:
return command_output(args)
except subprocess.CalledProcessError as e:
raise ReplayGainError(
u"{0} exited with status {1}".format(args[0], e.returncode)
)
except UnicodeEncodeError:
# Due to a bug in Python 2's subprocess on Windows, Unicode
# filenames can fail to encode on that platform. See:
# http://code.google.com/p/beets/issues/detail?id=499
raise ReplayGainError(u"argument encoding failed")
# Backend base and plumbing classes.
Gain = collections.namedtuple("Gain", "gain peak")
AlbumGain = collections.namedtuple("AlbumGain", "album_gain track_gains")
class Backend(object):
"""An abstract class representing engine for calculating RG values.
"""
def __init__(self, config, log):
"""Initialize the backend with the configuration view for the
plugin.
"""
self._log = log
def compute_track_gain(self, items):
raise NotImplementedError()
def compute_album_gain(self, album):
# TODO: implement album gain in terms of track gain of the
# individual tracks which can be used for any backend.
raise NotImplementedError()
# bsg1770gain backend
class Bs1770gainBackend(Backend):
"""bs1770gain is a loudness scanner compliant with ITU-R BS.1770 and
its flavors EBU R128, ATSC A/85 and Replaygain 2.0.
"""
def __init__(self, config, log):
super(Bs1770gainBackend, self).__init__(config, log)
config.add({
'chunk_at': 5000,
'method': 'replaygain',
})
self.chunk_at = config['chunk_at'].as_number()
self.method = b'--' + bytes(config['method'].as_str())
cmd = b'bs1770gain'
try:
call([cmd, self.method])
self.command = cmd
except OSError:
raise FatalReplayGainError(
u'Is bs1770gain installed? Is your method in config correct?'
)
if not self.command:
raise FatalReplayGainError(
u'no replaygain command found: install bs1770gain'
)
def compute_track_gain(self, items):
"""Computes the track gain of the given tracks, returns a list
of TrackGain objects.
"""
output = self.compute_gain(items, False)
return output
def compute_album_gain(self, album):
"""Computes the album gain of the given album, returns an
AlbumGain object.
"""
# TODO: What should be done when not all tracks in the album are
# supported?
supported_items = album.items()
output = self.compute_gain(supported_items, True)
if not output:
raise ReplayGainError(u'no output from bs1770gain')
return AlbumGain(output[-1], output[:-1])
def isplitter(self, items, chunk_at):
"""Break an iterable into chunks of at most size `chunk_at`,
generating lists for each chunk.
"""
iterable = iter(items)
while True:
result = []
for i in range(chunk_at):
try:
a = next(iterable)
except StopIteration:
break
else:
result.append(a)
if result:
yield result
else:
break
def compute_gain(self, items, is_album):
"""Computes the track or album gain of a list of items, returns
a list of TrackGain objects.
When computing album gain, the last TrackGain object returned is
the album gain
"""
if len(items) == 0:
return []
albumgaintot = 0.0
albumpeaktot = 0.0
returnchunks = []
# In the case of very large sets of music, we break the tracks
# into smaller chunks and process them one at a time. This
# avoids running out of memory.
if len(items) > self.chunk_at:
i = 0
for chunk in self.isplitter(items, self.chunk_at):
i += 1
returnchunk = self.compute_chunk_gain(chunk, is_album)
albumgaintot += returnchunk[-1].gain
albumpeaktot += returnchunk[-1].peak
returnchunks = returnchunks + returnchunk[0:-1]
returnchunks.append(Gain(albumgaintot / i, albumpeaktot / i))
return returnchunks
else:
return self.compute_chunk_gain(items, is_album)
def compute_chunk_gain(self, items, is_album):
"""Compute ReplayGain values and return a list of results
dictionaries as given by `parse_tool_output`.
"""
# Construct shell command.
cmd = [self.command]
cmd = cmd + [self.method]
cmd = cmd + [b'-p']
# Workaround for Windows: the underlying tool fails on paths
# with the \\?\ prefix, so we don't use it here. This
# prevents the backend from working with long paths.
args = cmd + [syspath(i.path, prefix=False) for i in items]
# Invoke the command.
self._log.debug(
u'executing {0}', u' '.join(map(displayable_path, args))
)
output = call(args)
self._log.debug(u'analysis finished: {0}', output)
results = self.parse_tool_output(output,
len(items) + is_album)
self._log.debug(u'{0} items, {1} results', len(items), len(results))
return results
def parse_tool_output(self, text, num_lines):
"""Given the output from bs1770gain, parse the text and
return a list of dictionaries
containing information about each analyzed file.
"""
out = []
data = text.decode('utf8', errors='ignore')
regex = re.compile(
u'(\\s{2,2}\\[\\d+\\/\\d+\\].*?|\\[ALBUM\\].*?)'
'(?=\\s{2,2}\\[\\d+\\/\\d+\\]|\\s{2,2}\\[ALBUM\\]'
':|done\\.\\s)', re.DOTALL | re.UNICODE)
results = re.findall(regex, data)
for parts in results[0:num_lines]:
part = parts.split(b'\n')
if len(part) == 0:
self._log.debug(u'bad tool output: {0!r}', text)
raise ReplayGainError(u'bs1770gain failed')
try:
song = {
'file': part[0],
'gain': float((part[1].split('/'))[1].split('LU')[0]),
'peak': float(part[2].split('/')[1]),
}
except IndexError:
self._log.info(u'bs1770gain reports (faulty file?): {}', parts)
continue
out.append(Gain(song['gain'], song['peak']))
return out
# mpgain/aacgain CLI tool backend.
class CommandBackend(Backend):
def __init__(self, config, log):
super(CommandBackend, self).__init__(config, log)
config.add({
'command': u"",
'noclip': True,
})
self.command = config["command"].as_str()
if self.command:
# Explicit executable path.
if not os.path.isfile(self.command):
raise FatalReplayGainError(
u'replaygain command does not exist: {0}'.format(
self.command)
)
else:
# Check whether the program is in $PATH.
for cmd in (b'mp3gain', b'aacgain'):
try:
call([cmd, b'-v'])
self.command = cmd
except OSError:
pass
if not self.command:
raise FatalReplayGainError(
u'no replaygain command found: install mp3gain or aacgain'
)
self.noclip = config['noclip'].get(bool)
target_level = config['targetlevel'].as_number()
self.gain_offset = int(target_level - 89)
def compute_track_gain(self, items):
"""Computes the track gain of the given tracks, returns a list
of TrackGain objects.
"""
supported_items = list(filter(self.format_supported, items))
output = self.compute_gain(supported_items, False)
return output
def compute_album_gain(self, album):
"""Computes the album gain of the given album, returns an
AlbumGain object.
"""
# TODO: What should be done when not all tracks in the album are
# supported?
supported_items = list(filter(self.format_supported, album.items()))
if len(supported_items) != len(album.items()):
self._log.debug(u'tracks are of unsupported format')
return AlbumGain(None, [])
output = self.compute_gain(supported_items, True)
return AlbumGain(output[-1], output[:-1])
def format_supported(self, item):
"""Checks whether the given item is supported by the selected tool.
"""
if 'mp3gain' in self.command and item.format != 'MP3':
return False
elif 'aacgain' in self.command and item.format not in ('MP3', 'AAC'):
return False
return True
def compute_gain(self, items, is_album):
"""Computes the track or album gain of a list of items, returns
a list of TrackGain objects.
When computing album gain, the last TrackGain object returned is
the album gain
"""
if len(items) == 0:
self._log.debug(u'no supported tracks to analyze')
return []
"""Compute ReplayGain values and return a list of results
dictionaries as given by `parse_tool_output`.
"""
# Construct shell command. The "-o" option makes the output
# easily parseable (tab-delimited). "-s s" forces gain
# recalculation even if tags are already present and disables
# tag-writing; this turns the mp3gain/aacgain tool into a gain
# calculator rather than a tag manipulator because we take care
# of changing tags ourselves.
cmd = [self.command, b'-o', b'-s', b's']
if self.noclip:
# Adjust to avoid clipping.
cmd = cmd + [b'-k']
else:
# Disable clipping warning.
cmd = cmd + [b'-c']
cmd = cmd + [b'-d', bytes(self.gain_offset)]
cmd = cmd + [syspath(i.path) for i in items]
self._log.debug(u'analyzing {0} files', len(items))
self._log.debug(u"executing {0}", " ".join(map(displayable_path, cmd)))
output = call(cmd)
self._log.debug(u'analysis finished')
return self.parse_tool_output(output,
len(items) + (1 if is_album else 0))
def parse_tool_output(self, text, num_lines):
"""Given the tab-delimited output from an invocation of mp3gain
or aacgain, parse the text and return a list of dictionaries
containing information about each analyzed file.
"""
out = []
for line in text.split(b'\n')[1:num_lines + 1]:
parts = line.split(b'\t')
if len(parts) != 6 or parts[0] == b'File':
self._log.debug(u'bad tool output: {0}', text)
raise ReplayGainError(u'mp3gain failed')
d = {
'file': parts[0],
'mp3gain': int(parts[1]),
'gain': float(parts[2]),
'peak': float(parts[3]) / (1 << 15),
'maxgain': int(parts[4]),
'mingain': int(parts[5]),
}
out.append(Gain(d['gain'], d['peak']))
return out
# GStreamer-based backend.
class GStreamerBackend(Backend):
def __init__(self, config, log):
super(GStreamerBackend, self).__init__(config, log)
self._import_gst()
# Initialized a GStreamer pipeline of the form filesrc ->
# decodebin -> audioconvert -> audioresample -> rganalysis ->
# fakesink The connection between decodebin and audioconvert is
# handled dynamically after decodebin figures out the type of
# the input file.
self._src = self.Gst.ElementFactory.make("filesrc", "src")
self._decbin = self.Gst.ElementFactory.make("decodebin", "decbin")
self._conv = self.Gst.ElementFactory.make("audioconvert", "conv")
self._res = self.Gst.ElementFactory.make("audioresample", "res")
self._rg = self.Gst.ElementFactory.make("rganalysis", "rg")
if self._src is None or self._decbin is None or self._conv is None \
or self._res is None or self._rg is None:
raise FatalGstreamerPluginReplayGainError(
u"Failed to load required GStreamer plugins"
)
# We check which files need gain ourselves, so all files given
# to rganalsys should have their gain computed, even if it
# already exists.
self._rg.set_property("forced", True)
self._rg.set_property("reference-level",
config["targetlevel"].as_number())
self._sink = self.Gst.ElementFactory.make("fakesink", "sink")
self._pipe = self.Gst.Pipeline()
self._pipe.add(self._src)
self._pipe.add(self._decbin)
self._pipe.add(self._conv)
self._pipe.add(self._res)
self._pipe.add(self._rg)
self._pipe.add(self._sink)
self._src.link(self._decbin)
self._conv.link(self._res)
self._res.link(self._rg)
self._rg.link(self._sink)
self._bus = self._pipe.get_bus()
self._bus.add_signal_watch()
self._bus.connect("message::eos", self._on_eos)
self._bus.connect("message::error", self._on_error)
self._bus.connect("message::tag", self._on_tag)
# Needed for handling the dynamic connection between decodebin
# and audioconvert
self._decbin.connect("pad-added", self._on_pad_added)
self._decbin.connect("pad-removed", self._on_pad_removed)
self._main_loop = self.GLib.MainLoop()
self._files = []
def _import_gst(self):
"""Import the necessary GObject-related modules and assign `Gst`
and `GObject` fields on this object.
"""
try:
import gi
except ImportError:
raise FatalReplayGainError(
u"Failed to load GStreamer: python-gi not found"
)
try:
gi.require_version('Gst', '1.0')
except ValueError as e:
raise FatalReplayGainError(
u"Failed to load GStreamer 1.0: {0}".format(e)
)
from gi.repository import GObject, Gst, GLib
# Calling GObject.threads_init() is not needed for
# PyGObject 3.10.2+
with warnings.catch_warnings():
warnings.simplefilter("ignore")
GObject.threads_init()
Gst.init([sys.argv[0]])
self.GObject = GObject
self.GLib = GLib
self.Gst = Gst
def compute(self, files, album):
self._error = None
self._files = list(files)
if len(self._files) == 0:
return
self._file_tags = collections.defaultdict(dict)
if album:
self._rg.set_property("num-tracks", len(self._files))
if self._set_first_file():
self._main_loop.run()
if self._error is not None:
raise self._error
def compute_track_gain(self, items):
self.compute(items, False)
if len(self._file_tags) != len(items):
raise ReplayGainError(u"Some tracks did not receive tags")
ret = []
for item in items:
ret.append(Gain(self._file_tags[item]["TRACK_GAIN"],
self._file_tags[item]["TRACK_PEAK"]))
return ret
def compute_album_gain(self, album):
items = list(album.items())
self.compute(items, True)
if len(self._file_tags) != len(items):
raise ReplayGainError(u"Some items in album did not receive tags")
# Collect track gains.
track_gains = []
for item in items:
try:
gain = self._file_tags[item]["TRACK_GAIN"]
peak = self._file_tags[item]["TRACK_PEAK"]
except KeyError:
raise ReplayGainError(u"results missing for track")
track_gains.append(Gain(gain, peak))
# Get album gain information from the last track.
last_tags = self._file_tags[items[-1]]
try:
gain = last_tags["ALBUM_GAIN"]
peak = last_tags["ALBUM_PEAK"]
except KeyError:
raise ReplayGainError(u"results missing for album")
return AlbumGain(Gain(gain, peak), track_gains)
def close(self):
self._bus.remove_signal_watch()
def _on_eos(self, bus, message):
# A file finished playing in all elements of the pipeline. The
# RG tags have already been propagated. If we don't have a next
# file, we stop processing.
if not self._set_next_file():
self._pipe.set_state(self.Gst.State.NULL)
self._main_loop.quit()
def _on_error(self, bus, message):
self._pipe.set_state(self.Gst.State.NULL)
self._main_loop.quit()
err, debug = message.parse_error()
f = self._src.get_property("location")
# A GStreamer error, either an unsupported format or a bug.
self._error = ReplayGainError(
u"Error {0!r} - {1!r} on file {2!r}".format(err, debug, f)
)
def _on_tag(self, bus, message):
tags = message.parse_tag()
def handle_tag(taglist, tag, userdata):
# The rganalysis element provides both the existing tags for
# files and the new computes tags. In order to ensure we
# store the computed tags, we overwrite the RG values of
# received a second time.
if tag == self.Gst.TAG_TRACK_GAIN:
self._file_tags[self._file]["TRACK_GAIN"] = \
taglist.get_double(tag)[1]
elif tag == self.Gst.TAG_TRACK_PEAK:
self._file_tags[self._file]["TRACK_PEAK"] = \
taglist.get_double(tag)[1]
elif tag == self.Gst.TAG_ALBUM_GAIN:
self._file_tags[self._file]["ALBUM_GAIN"] = \
taglist.get_double(tag)[1]
elif tag == self.Gst.TAG_ALBUM_PEAK:
self._file_tags[self._file]["ALBUM_PEAK"] = \
taglist.get_double(tag)[1]
elif tag == self.Gst.TAG_REFERENCE_LEVEL:
self._file_tags[self._file]["REFERENCE_LEVEL"] = \
taglist.get_double(tag)[1]
tags.foreach(handle_tag, None)
def _set_first_file(self):
if len(self._files) == 0:
return False
self._file = self._files.pop(0)
self._pipe.set_state(self.Gst.State.NULL)
self._src.set_property("location", syspath(self._file.path))
self._pipe.set_state(self.Gst.State.PLAYING)
return True
def _set_file(self):
"""Initialize the filesrc element with the next file to be analyzed.
"""
# No more files, we're done
if len(self._files) == 0:
return False
self._file = self._files.pop(0)
# Disconnect the decodebin element from the pipeline, set its
# state to READY to to clear it.
self._decbin.unlink(self._conv)
self._decbin.set_state(self.Gst.State.READY)
# Set a new file on the filesrc element, can only be done in the
# READY state
self._src.set_state(self.Gst.State.READY)
self._src.set_property("location", syspath(self._file.path))
# Ensure the filesrc element received the paused state of the
# pipeline in a blocking manner
self._src.sync_state_with_parent()
self._src.get_state(self.Gst.CLOCK_TIME_NONE)
# Ensure the decodebin element receives the paused state of the
# pipeline in a blocking manner
self._decbin.sync_state_with_parent()
self._decbin.get_state(self.Gst.CLOCK_TIME_NONE)
return True
def _set_next_file(self):
"""Set the next file to be analyzed while keeping the pipeline
in the PAUSED state so that the rganalysis element can correctly
handle album gain.
"""
# A blocking pause
self._pipe.set_state(self.Gst.State.PAUSED)
self._pipe.get_state(self.Gst.CLOCK_TIME_NONE)
# Try setting the next file
ret = self._set_file()
if ret:
# Seek to the beginning in order to clear the EOS state of the
# various elements of the pipeline
self._pipe.seek_simple(self.Gst.Format.TIME,
self.Gst.SeekFlags.FLUSH,
0)
self._pipe.set_state(self.Gst.State.PLAYING)
return ret
def _on_pad_added(self, decbin, pad):
sink_pad = self._conv.get_compatible_pad(pad, None)
assert(sink_pad is not None)
pad.link(sink_pad)
def _on_pad_removed(self, decbin, pad):
# Called when the decodebin element is disconnected from the
# rest of the pipeline while switching input files
peer = pad.get_peer()
assert(peer is None)
class AudioToolsBackend(Backend):
"""ReplayGain backend that uses `Python Audio Tools
<http://audiotools.sourceforge.net/>`_ and its capabilities to read more
file formats and compute ReplayGain values using it replaygain module.
"""
def __init__(self, config, log):
super(AudioToolsBackend, self).__init__(config, log)
self._import_audiotools()
def _import_audiotools(self):
"""Check whether it's possible to import the necessary modules.
There is no check on the file formats at runtime.
:raises :exc:`ReplayGainError`: if the modules cannot be imported
"""
try:
import audiotools
import audiotools.replaygain
except ImportError:
raise FatalReplayGainError(
u"Failed to load audiotools: audiotools not found"
)
self._mod_audiotools = audiotools
self._mod_replaygain = audiotools.replaygain
def open_audio_file(self, item):
"""Open the file to read the PCM stream from the using
``item.path``.
:return: the audiofile instance
:rtype: :class:`audiotools.AudioFile`
:raises :exc:`ReplayGainError`: if the file is not found or the
file format is not supported
"""
try:
audiofile = self._mod_audiotools.open(item.path)
except IOError:
raise ReplayGainError(
u"File {} was not found".format(item.path)
)
except self._mod_audiotools.UnsupportedFile:
raise ReplayGainError(
u"Unsupported file type {}".format(item.format)
)
return audiofile
def init_replaygain(self, audiofile, item):
"""Return an initialized :class:`audiotools.replaygain.ReplayGain`
instance, which requires the sample rate of the song(s) on which
the ReplayGain values will be computed. The item is passed in case
the sample rate is invalid to log the stored item sample rate.
:return: initialized replagain object
:rtype: :class:`audiotools.replaygain.ReplayGain`
:raises: :exc:`ReplayGainError` if the sample rate is invalid
"""
try:
rg = self._mod_replaygain.ReplayGain(audiofile.sample_rate())
except ValueError:
raise ReplayGainError(
u"Unsupported sample rate {}".format(item.samplerate))
return
return rg
def compute_track_gain(self, items):
"""Compute ReplayGain values for the requested items.
:return list: list of :class:`Gain` objects
"""
return [self._compute_track_gain(item) for item in items]
def _title_gain(self, rg, audiofile):
"""Get the gain result pair from PyAudioTools using the `ReplayGain`
instance `rg` for the given `audiofile`.
Wraps `rg.title_gain(audiofile.to_pcm())` and throws a
`ReplayGainError` when the library fails.
"""
try:
# The method needs an audiotools.PCMReader instance that can
# be obtained from an audiofile instance.
return rg.title_gain(audiofile.to_pcm())
except ValueError as exc:
# `audiotools.replaygain` can raise a `ValueError` if the sample
# rate is incorrect.
self._log.debug(u'error in rg.title_gain() call: {}', exc)
raise ReplayGainError(u'audiotools audio data error')
def _compute_track_gain(self, item):
"""Compute ReplayGain value for the requested item.
:rtype: :class:`Gain`
"""
audiofile = self.open_audio_file(item)
rg = self.init_replaygain(audiofile, item)
# Each call to title_gain on a ReplayGain object returns peak and gain
# of the track.
rg_track_gain, rg_track_peak = self._title_gain(rg, audiofile)
self._log.debug(u'ReplayGain for track {0} - {1}: {2:.2f}, {3:.2f}',
item.artist, item.title, rg_track_gain, rg_track_peak)
return Gain(gain=rg_track_gain, peak=rg_track_peak)
def compute_album_gain(self, album):
"""Compute ReplayGain values for the requested album and its items.
:rtype: :class:`AlbumGain`
"""
self._log.debug(u'Analysing album {0}', album)
# The first item is taken and opened to get the sample rate to
# initialize the replaygain object. The object is used for all the
# tracks in the album to get the album values.
item = list(album.items())[0]
audiofile = self.open_audio_file(item)
rg = self.init_replaygain(audiofile, item)
track_gains = []
for item in album.items():
audiofile = self.open_audio_file(item)
rg_track_gain, rg_track_peak = self._title_gain(rg, audiofile)
track_gains.append(
Gain(gain=rg_track_gain, peak=rg_track_peak)
)
self._log.debug(u'ReplayGain for track {0}: {1:.2f}, {2:.2f}',
item, rg_track_gain, rg_track_peak)
# After getting the values for all tracks, it's possible to get the
# album values.
rg_album_gain, rg_album_peak = rg.album_gain()
self._log.debug(u'ReplayGain for album {0}: {1:.2f}, {2:.2f}',
album, rg_album_gain, rg_album_peak)
return AlbumGain(
Gain(gain=rg_album_gain, peak=rg_album_peak),
track_gains=track_gains
)
# Main plugin logic.
class ReplayGainPlugin(BeetsPlugin):
"""Provides ReplayGain analysis.
"""
backends = {
"command": CommandBackend,
"gstreamer": GStreamerBackend,
"audiotools": AudioToolsBackend,
"bs1770gain": Bs1770gainBackend
}
def __init__(self):
super(ReplayGainPlugin, self).__init__()
# default backend is 'command' for backward-compatibility.
self.config.add({
'overwrite': False,
'auto': True,
'backend': u'command',
'targetlevel': 89,
})
self.overwrite = self.config['overwrite'].get(bool)
backend_name = self.config['backend'].as_str()
if backend_name not in self.backends:
raise ui.UserError(
u"Selected ReplayGain backend {0} is not supported. "
u"Please select one of: {1}".format(
backend_name,
u', '.join(self.backends.keys())
)
)
# On-import analysis.
if self.config['auto']:
self.import_stages = [self.imported]
try:
self.backend_instance = self.backends[backend_name](
self.config, self._log
)
except (ReplayGainError, FatalReplayGainError) as e:
raise ui.UserError(
u'replaygain initialization failed: {0}'.format(e))
def track_requires_gain(self, item):
return self.overwrite or \
(not item.rg_track_gain or not item.rg_track_peak)
def album_requires_gain(self, album):
# Skip calculating gain only when *all* files don't need
# recalculation. This way, if any file among an album's tracks
# needs recalculation, we still get an accurate album gain
# value.
return self.overwrite or \
any([not item.rg_album_gain or not item.rg_album_peak
for item in album.items()])
def store_track_gain(self, item, track_gain):
item.rg_track_gain = track_gain.gain
item.rg_track_peak = track_gain.peak
item.store()
self._log.debug(u'applied track gain {0}, peak {1}',
item.rg_track_gain, item.rg_track_peak)
def store_album_gain(self, album, album_gain):
album.rg_album_gain = album_gain.gain
album.rg_album_peak = album_gain.peak
album.store()
self._log.debug(u'applied album gain {0}, peak {1}',
album.rg_album_gain, album.rg_album_peak)
def handle_album(self, album, write):
"""Compute album and track replay gain store it in all of the
album's items.
If ``write`` is truthy then ``item.write()`` is called for each
item. If replay gain information is already present in all
items, nothing is done.
"""
if not self.album_requires_gain(album):
self._log.info(u'Skipping album {0}', album)
return
self._log.info(u'analyzing {0}', album)
try:
album_gain = self.backend_instance.compute_album_gain(album)
if len(album_gain.track_gains) != len(album.items()):
raise ReplayGainError(
u"ReplayGain backend failed "
u"for some tracks in album {0}".format(album)
)
self.store_album_gain(album, album_gain.album_gain)
for item, track_gain in zip(album.items(), album_gain.track_gains):
self.store_track_gain(item, track_gain)
if write:
item.try_write()
except ReplayGainError as e:
self._log.info(u"ReplayGain error: {0}", e)
except FatalReplayGainError as e:
raise ui.UserError(
u"Fatal replay gain error: {0}".format(e))
def handle_track(self, item, write):
"""Compute track replay gain and store it in the item.
If ``write`` is truthy then ``item.write()`` is called to write
the data to disk. If replay gain information is already present
in the item, nothing is done.
"""
if not self.track_requires_gain(item):
self._log.info(u'Skipping track {0}', item)
return
self._log.info(u'analyzing {0}', item)
try:
track_gains = self.backend_instance.compute_track_gain([item])
if len(track_gains) != 1:
raise ReplayGainError(
u"ReplayGain backend failed for track {0}".format(item)
)
self.store_track_gain(item, track_gains[0])
if write:
item.try_write()
except ReplayGainError as e:
self._log.info(u"ReplayGain error: {0}", e)
except FatalReplayGainError as e:
raise ui.UserError(
u"Fatal replay gain error: {0}".format(e))
def imported(self, session, task):
"""Add replay gain info to items or albums of ``task``.
"""
if task.is_album:
self.handle_album(task.album, False)
else:
self.handle_track(task.item, False)
def commands(self):
"""Return the "replaygain" ui subcommand.
"""
def func(lib, opts, args):
self._log.setLevel(logging.INFO)
write = ui.should_write()
if opts.album:
for album in lib.albums(ui.decargs(args)):
self.handle_album(album, write)
else:
for item in lib.items(ui.decargs(args)):
self.handle_track(item, write)
cmd = ui.Subcommand('replaygain', help=u'analyze for ReplayGain')
cmd.parser.add_album_option()
cmd.func = func
return [cmd]
|
|
"""The tests for the logbook component."""
# pylint: disable=protected-access,too-many-public-methods
from datetime import timedelta
import unittest
from unittest.mock import patch
from homeassistant.components import sun
import homeassistant.core as ha
from homeassistant.const import (
EVENT_STATE_CHANGED, EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP,
ATTR_HIDDEN, STATE_NOT_HOME, STATE_ON, STATE_OFF)
import homeassistant.util.dt as dt_util
from homeassistant.components import logbook
from homeassistant.bootstrap import setup_component
from tests.common import mock_http_component, get_test_home_assistant
class TestComponentLogbook(unittest.TestCase):
"""Test the History component."""
EMPTY_CONFIG = logbook.CONFIG_SCHEMA({logbook.DOMAIN: {}})
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
mock_http_component(self.hass)
self.hass.config.components += ['frontend', 'recorder', 'api']
with patch('homeassistant.components.logbook.'
'register_built_in_panel'):
assert setup_component(self.hass, logbook.DOMAIN,
self.EMPTY_CONFIG)
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_service_call_create_logbook_entry(self):
"""Test if service call create log book entry."""
calls = []
def event_listener(event):
calls.append(event)
self.hass.bus.listen(logbook.EVENT_LOGBOOK_ENTRY, event_listener)
self.hass.services.call(logbook.DOMAIN, 'log', {
logbook.ATTR_NAME: 'Alarm',
logbook.ATTR_MESSAGE: 'is triggered',
logbook.ATTR_DOMAIN: 'switch',
logbook.ATTR_ENTITY_ID: 'switch.test_switch'
}, True)
# Logbook entry service call results in firing an event.
# Our service call will unblock when the event listeners have been
# scheduled. This means that they may not have been processed yet.
self.hass.block_till_done()
self.assertEqual(1, len(calls))
last_call = calls[-1]
self.assertEqual('Alarm', last_call.data.get(logbook.ATTR_NAME))
self.assertEqual('is triggered', last_call.data.get(
logbook.ATTR_MESSAGE))
self.assertEqual('switch', last_call.data.get(logbook.ATTR_DOMAIN))
self.assertEqual('switch.test_switch', last_call.data.get(
logbook.ATTR_ENTITY_ID))
def test_service_call_create_log_book_entry_no_message(self):
"""Test if service call create log book entry without message."""
calls = []
def event_listener(event):
calls.append(event)
self.hass.bus.listen(logbook.EVENT_LOGBOOK_ENTRY, event_listener)
self.hass.services.call(logbook.DOMAIN, 'log', {}, True)
# Logbook entry service call results in firing an event.
# Our service call will unblock when the event listeners have been
# scheduled. This means that they may not have been processed yet.
self.hass.block_till_done()
self.assertEqual(0, len(calls))
def test_humanify_filter_sensor(self):
"""Test humanify filter too frequent sensor values."""
entity_id = 'sensor.bla'
pointA = dt_util.utcnow().replace(minute=2)
pointB = pointA.replace(minute=5)
pointC = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(pointB, entity_id, 20)
eventC = self.create_state_changed_event(pointC, entity_id, 30)
entries = list(logbook.humanify((eventA, eventB, eventC)))
self.assertEqual(2, len(entries))
self.assert_entry(
entries[0], pointB, 'bla', domain='sensor', entity_id=entity_id)
self.assert_entry(
entries[1], pointC, 'bla', domain='sensor', entity_id=entity_id)
def test_filter_continuous_sensor_values(self):
"""Test remove continuous sensor events from logbook."""
entity_id = 'sensor.bla'
pointA = dt_util.utcnow()
attributes = {'unit_of_measurement': 'foo'}
eventA = self.create_state_changed_event(
pointA, entity_id, 10, attributes)
entries = list(logbook.humanify((eventA,)))
self.assertEqual(0, len(entries))
def test_exclude_events_hidden(self):
"""Test if events are excluded if entity is hidden."""
entity_id = 'sensor.bla'
entity_id2 = 'sensor.blu'
pointA = dt_util.utcnow()
pointB = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
eventA = self.create_state_changed_event(pointA, entity_id, 10,
{ATTR_HIDDEN: 'true'})
eventB = self.create_state_changed_event(pointB, entity_id2, 20)
events = logbook._exclude_events((ha.Event(EVENT_HOMEASSISTANT_STOP),
eventA, eventB), self.EMPTY_CONFIG)
entries = list(logbook.humanify(events))
self.assertEqual(2, len(entries))
self.assert_entry(
entries[0], name='Home Assistant', message='stopped',
domain=ha.DOMAIN)
self.assert_entry(
entries[1], pointB, 'blu', domain='sensor', entity_id=entity_id2)
def test_exclude_events_entity(self):
"""Test if events are filtered if entity is excluded in config."""
entity_id = 'sensor.bla'
entity_id2 = 'sensor.blu'
pointA = dt_util.utcnow()
pointB = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(pointB, entity_id2, 20)
config = logbook.CONFIG_SCHEMA({
ha.DOMAIN: {},
logbook.DOMAIN: {logbook.CONF_EXCLUDE: {
logbook.CONF_ENTITIES: [entity_id, ]}}})
events = logbook._exclude_events((ha.Event(EVENT_HOMEASSISTANT_STOP),
eventA, eventB), config)
entries = list(logbook.humanify(events))
self.assertEqual(2, len(entries))
self.assert_entry(
entries[0], name='Home Assistant', message='stopped',
domain=ha.DOMAIN)
self.assert_entry(
entries[1], pointB, 'blu', domain='sensor', entity_id=entity_id2)
def test_exclude_events_domain(self):
"""Test if events are filtered if domain is excluded in config."""
entity_id = 'switch.bla'
entity_id2 = 'sensor.blu'
pointA = dt_util.utcnow()
pointB = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(pointB, entity_id2, 20)
config = logbook.CONFIG_SCHEMA({
ha.DOMAIN: {},
logbook.DOMAIN: {logbook.CONF_EXCLUDE: {
logbook.CONF_DOMAINS: ['switch', ]}}})
events = logbook._exclude_events((ha.Event(EVENT_HOMEASSISTANT_START),
eventA, eventB), config)
entries = list(logbook.humanify(events))
self.assertEqual(2, len(entries))
self.assert_entry(entries[0], name='Home Assistant', message='started',
domain=ha.DOMAIN)
self.assert_entry(entries[1], pointB, 'blu', domain='sensor',
entity_id=entity_id2)
def test_include_events_entity(self):
"""Test if events are filtered if entity is included in config."""
entity_id = 'sensor.bla'
entity_id2 = 'sensor.blu'
pointA = dt_util.utcnow()
pointB = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(pointB, entity_id2, 20)
config = logbook.CONFIG_SCHEMA({
ha.DOMAIN: {},
logbook.DOMAIN: {logbook.CONF_INCLUDE: {
logbook.CONF_ENTITIES: [entity_id2, ]}}})
events = logbook._exclude_events((ha.Event(EVENT_HOMEASSISTANT_STOP),
eventA, eventB), config)
entries = list(logbook.humanify(events))
self.assertEqual(2, len(entries))
self.assert_entry(
entries[0], name='Home Assistant', message='stopped',
domain=ha.DOMAIN)
self.assert_entry(
entries[1], pointB, 'blu', domain='sensor', entity_id=entity_id2)
def test_include_events_domain(self):
"""Test if events are filtered if domain is included in config."""
entity_id = 'switch.bla'
entity_id2 = 'sensor.blu'
pointA = dt_util.utcnow()
pointB = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(pointB, entity_id2, 20)
config = logbook.CONFIG_SCHEMA({
ha.DOMAIN: {},
logbook.DOMAIN: {logbook.CONF_INCLUDE: {
logbook.CONF_DOMAINS: ['sensor', ]}}})
events = logbook._exclude_events((ha.Event(EVENT_HOMEASSISTANT_START),
eventA, eventB), config)
entries = list(logbook.humanify(events))
self.assertEqual(2, len(entries))
self.assert_entry(entries[0], name='Home Assistant', message='started',
domain=ha.DOMAIN)
self.assert_entry(entries[1], pointB, 'blu', domain='sensor',
entity_id=entity_id2)
def test_include_exclude_events(self):
"""Test if events are filtered if include and exclude is configured."""
entity_id = 'switch.bla'
entity_id2 = 'sensor.blu'
entity_id3 = 'sensor.bli'
pointA = dt_util.utcnow()
pointB = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
eventA1 = self.create_state_changed_event(pointA, entity_id, 10)
eventA2 = self.create_state_changed_event(pointA, entity_id2, 10)
eventA3 = self.create_state_changed_event(pointA, entity_id3, 10)
eventB1 = self.create_state_changed_event(pointB, entity_id, 20)
eventB2 = self.create_state_changed_event(pointB, entity_id2, 20)
config = logbook.CONFIG_SCHEMA({
ha.DOMAIN: {},
logbook.DOMAIN: {
logbook.CONF_INCLUDE: {
logbook.CONF_DOMAINS: ['sensor', ],
logbook.CONF_ENTITIES: ['switch.bla', ]},
logbook.CONF_EXCLUDE: {
logbook.CONF_DOMAINS: ['switch', ],
logbook.CONF_ENTITIES: ['sensor.bli', ]}}})
events = logbook._exclude_events((ha.Event(EVENT_HOMEASSISTANT_START),
eventA1, eventA2, eventA3,
eventB1, eventB2), config)
entries = list(logbook.humanify(events))
self.assertEqual(3, len(entries))
self.assert_entry(entries[0], name='Home Assistant', message='started',
domain=ha.DOMAIN)
self.assert_entry(entries[1], pointA, 'blu', domain='sensor',
entity_id=entity_id2)
self.assert_entry(entries[2], pointB, 'blu', domain='sensor',
entity_id=entity_id2)
def test_exclude_auto_groups(self):
"""Test if events of automatically generated groups are filtered."""
entity_id = 'switch.bla'
entity_id2 = 'group.switches'
pointA = dt_util.utcnow()
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(pointA, entity_id2, 20,
{'auto': True})
entries = list(logbook.humanify((eventA, eventB)))
self.assertEqual(1, len(entries))
self.assert_entry(entries[0], pointA, 'bla', domain='switch',
entity_id=entity_id)
def test_exclude_attribute_changes(self):
"""Test if events of attribute changes are filtered."""
entity_id = 'switch.bla'
entity_id2 = 'switch.blu'
pointA = dt_util.utcnow()
pointB = pointA + timedelta(minutes=1)
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(
pointA, entity_id2, 20, last_changed=pointA, last_updated=pointB)
entries = list(logbook.humanify((eventA, eventB)))
self.assertEqual(1, len(entries))
self.assert_entry(entries[0], pointA, 'bla', domain='switch',
entity_id=entity_id)
def test_entry_to_dict(self):
"""Test conversion of entry to dict."""
entry = logbook.Entry(
dt_util.utcnow(), 'Alarm', 'is triggered', 'switch', 'test_switch'
)
data = entry.as_dict()
self.assertEqual('Alarm', data.get(logbook.ATTR_NAME))
self.assertEqual('is triggered', data.get(logbook.ATTR_MESSAGE))
self.assertEqual('switch', data.get(logbook.ATTR_DOMAIN))
self.assertEqual('test_switch', data.get(logbook.ATTR_ENTITY_ID))
def test_home_assistant_start_stop_grouped(self):
"""Test if HA start and stop events are grouped.
Events that are occuring in the same minute.
"""
entries = list(logbook.humanify((
ha.Event(EVENT_HOMEASSISTANT_STOP),
ha.Event(EVENT_HOMEASSISTANT_START),
)))
self.assertEqual(1, len(entries))
self.assert_entry(
entries[0], name='Home Assistant', message='restarted',
domain=ha.DOMAIN)
def test_home_assistant_start(self):
"""Test if HA start is not filtered or converted into a restart."""
entity_id = 'switch.bla'
pointA = dt_util.utcnow()
entries = list(logbook.humanify((
ha.Event(EVENT_HOMEASSISTANT_START),
self.create_state_changed_event(pointA, entity_id, 10)
)))
self.assertEqual(2, len(entries))
self.assert_entry(
entries[0], name='Home Assistant', message='started',
domain=ha.DOMAIN)
self.assert_entry(entries[1], pointA, 'bla', domain='switch',
entity_id=entity_id)
def test_entry_message_from_state_device(self):
"""Test if logbook message is correctly created for switches.
Especially test if the special handling for turn on/off events is done.
"""
pointA = dt_util.utcnow()
# message for a device state change
eventA = self.create_state_changed_event(pointA, 'switch.bla', 10)
to_state = ha.State.from_dict(eventA.data.get('new_state'))
message = logbook._entry_message_from_state(to_state.domain, to_state)
self.assertEqual('changed to 10', message)
# message for a switch turned on
eventA = self.create_state_changed_event(pointA, 'switch.bla',
STATE_ON)
to_state = ha.State.from_dict(eventA.data.get('new_state'))
message = logbook._entry_message_from_state(to_state.domain, to_state)
self.assertEqual('turned on', message)
# message for a switch turned off
eventA = self.create_state_changed_event(pointA, 'switch.bla',
STATE_OFF)
to_state = ha.State.from_dict(eventA.data.get('new_state'))
message = logbook._entry_message_from_state(to_state.domain, to_state)
self.assertEqual('turned off', message)
def test_entry_message_from_state_device_tracker(self):
"""Test if logbook message is correctly created for device tracker."""
pointA = dt_util.utcnow()
# message for a device tracker "not home" state
eventA = self.create_state_changed_event(pointA, 'device_tracker.john',
STATE_NOT_HOME)
to_state = ha.State.from_dict(eventA.data.get('new_state'))
message = logbook._entry_message_from_state(to_state.domain, to_state)
self.assertEqual('is away', message)
# message for a device tracker "home" state
eventA = self.create_state_changed_event(pointA, 'device_tracker.john',
'work')
to_state = ha.State.from_dict(eventA.data.get('new_state'))
message = logbook._entry_message_from_state(to_state.domain, to_state)
self.assertEqual('is at work', message)
def test_entry_message_from_state_sun(self):
"""Test if logbook message is correctly created for sun."""
pointA = dt_util.utcnow()
# message for a sun rise
eventA = self.create_state_changed_event(pointA, 'sun.sun',
sun.STATE_ABOVE_HORIZON)
to_state = ha.State.from_dict(eventA.data.get('new_state'))
message = logbook._entry_message_from_state(to_state.domain, to_state)
self.assertEqual('has risen', message)
# message for a sun set
eventA = self.create_state_changed_event(pointA, 'sun.sun',
sun.STATE_BELOW_HORIZON)
to_state = ha.State.from_dict(eventA.data.get('new_state'))
message = logbook._entry_message_from_state(to_state.domain, to_state)
self.assertEqual('has set', message)
def test_process_custom_logbook_entries(self):
"""Test if custom log book entries get added as an entry."""
name = 'Nice name'
message = 'has a custom entry'
entity_id = 'sun.sun'
entries = list(logbook.humanify((
ha.Event(logbook.EVENT_LOGBOOK_ENTRY, {
logbook.ATTR_NAME: name,
logbook.ATTR_MESSAGE: message,
logbook.ATTR_ENTITY_ID: entity_id,
}),
)))
self.assertEqual(1, len(entries))
self.assert_entry(
entries[0], name=name, message=message,
domain='sun', entity_id=entity_id)
def assert_entry(self, entry, when=None, name=None, message=None,
domain=None, entity_id=None):
"""Assert an entry is what is expected."""
if when:
self.assertEqual(when, entry.when)
if name:
self.assertEqual(name, entry.name)
if message:
self.assertEqual(message, entry.message)
if domain:
self.assertEqual(domain, entry.domain)
if entity_id:
self.assertEqual(entity_id, entry.entity_id)
def create_state_changed_event(self, event_time_fired, entity_id, state,
attributes=None, last_changed=None,
last_updated=None):
"""Create state changed event."""
# Logbook only cares about state change events that
# contain an old state but will not actually act on it.
state = ha.State(entity_id, state, attributes, last_changed,
last_updated).as_dict()
return ha.Event(EVENT_STATE_CHANGED, {
'entity_id': entity_id,
'old_state': state,
'new_state': state,
}, time_fired=event_time_fired)
|
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import functools
import os
import pickle
from argparse import Namespace
from dataclasses import dataclass
from unittest import mock
import cloudpickle
import pytest
import torch
from fsspec.implementations.local import LocalFileSystem
from omegaconf import Container, OmegaConf
from torch.utils.data import DataLoader
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.core.datamodule import LightningDataModule
from pytorch_lightning.core.saving import load_hparams_from_yaml, save_hparams_to_yaml
from pytorch_lightning.utilities import _HYDRA_EXPERIMENTAL_AVAILABLE, AttributeDict, is_picklable
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.helpers import BoringModel, RandomDataset
if _HYDRA_EXPERIMENTAL_AVAILABLE:
from hydra.experimental import compose, initialize
class SaveHparamsModel(BoringModel):
"""Tests that a model can take an object"""
def __init__(self, hparams):
super().__init__()
self.save_hyperparameters(hparams)
def decorate(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
class SaveHparamsDecoratedModel(BoringModel):
"""Tests that a model can take an object"""
@decorate
@decorate
def __init__(self, hparams, *my_args, **my_kwargs):
super().__init__()
self.save_hyperparameters(hparams)
# -------------------------
# STANDARD TESTS
# -------------------------
def _run_standard_hparams_test(tmpdir, model, cls, try_overwrite=False):
"""
Tests for the existence of an arg 'test_arg=14'
"""
hparam_type = type(model.hparams)
# test proper property assignments
assert model.hparams.test_arg == 14
# verify we can train
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, overfit_batches=2)
trainer.fit(model)
# make sure the raw checkpoint saved the properties
raw_checkpoint_path = _raw_checkpoint_path(trainer)
raw_checkpoint = torch.load(raw_checkpoint_path)
assert LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in raw_checkpoint
assert raw_checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]["test_arg"] == 14
# verify that model loads correctly
model2 = cls.load_from_checkpoint(raw_checkpoint_path)
assert model2.hparams.test_arg == 14
assert isinstance(model2.hparams, hparam_type)
if try_overwrite:
# verify that we can overwrite the property
model3 = cls.load_from_checkpoint(raw_checkpoint_path, test_arg=78)
assert model3.hparams.test_arg == 78
return raw_checkpoint_path
@pytest.mark.parametrize("cls", [SaveHparamsModel, SaveHparamsDecoratedModel])
def test_namespace_hparams(tmpdir, cls):
# init model
model = cls(hparams=Namespace(test_arg=14))
# run standard test suite
_run_standard_hparams_test(tmpdir, model, cls)
@pytest.mark.parametrize("cls", [SaveHparamsModel, SaveHparamsDecoratedModel])
def test_dict_hparams(tmpdir, cls):
# init model
model = cls(hparams={"test_arg": 14})
# run standard test suite
_run_standard_hparams_test(tmpdir, model, cls)
@pytest.mark.parametrize("cls", [SaveHparamsModel, SaveHparamsDecoratedModel])
def test_omega_conf_hparams(tmpdir, cls):
# init model
conf = OmegaConf.create(dict(test_arg=14, mylist=[15.4, dict(a=1, b=2)]))
model = cls(hparams=conf)
assert isinstance(model.hparams, Container)
# run standard test suite
raw_checkpoint_path = _run_standard_hparams_test(tmpdir, model, cls)
model2 = cls.load_from_checkpoint(raw_checkpoint_path)
assert isinstance(model2.hparams, Container)
# config specific tests
assert model2.hparams.test_arg == 14
assert model2.hparams.mylist[0] == 15.4
def test_explicit_args_hparams(tmpdir):
"""
Tests that a model can take implicit args and assign
"""
# define model
class LocalModel(BoringModel):
def __init__(self, test_arg, test_arg2):
super().__init__()
self.save_hyperparameters("test_arg", "test_arg2")
model = LocalModel(test_arg=14, test_arg2=90)
# run standard test suite
raw_checkpoint_path = _run_standard_hparams_test(tmpdir, model, LocalModel)
model = LocalModel.load_from_checkpoint(raw_checkpoint_path, test_arg2=120)
# config specific tests
assert model.hparams.test_arg2 == 120
def test_implicit_args_hparams(tmpdir):
"""
Tests that a model can take regular args and assign
"""
# define model
class LocalModel(BoringModel):
def __init__(self, test_arg, test_arg2):
super().__init__()
self.save_hyperparameters()
model = LocalModel(test_arg=14, test_arg2=90)
# run standard test suite
raw_checkpoint_path = _run_standard_hparams_test(tmpdir, model, LocalModel)
model = LocalModel.load_from_checkpoint(raw_checkpoint_path, test_arg2=120)
# config specific tests
assert model.hparams.test_arg2 == 120
def test_explicit_missing_args_hparams(tmpdir):
"""
Tests that a model can take regular args and assign
"""
# define model
class LocalModel(BoringModel):
def __init__(self, test_arg, test_arg2):
super().__init__()
self.save_hyperparameters("test_arg")
model = LocalModel(test_arg=14, test_arg2=90)
# test proper property assignments
assert model.hparams.test_arg == 14
# verify we can train
trainer = Trainer(default_root_dir=tmpdir, max_epochs=2, overfit_batches=0.5)
trainer.fit(model)
# make sure the raw checkpoint saved the properties
raw_checkpoint_path = _raw_checkpoint_path(trainer)
raw_checkpoint = torch.load(raw_checkpoint_path)
assert LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in raw_checkpoint
assert raw_checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]["test_arg"] == 14
# verify that model loads correctly
model = LocalModel.load_from_checkpoint(raw_checkpoint_path, test_arg2=123)
assert model.hparams.test_arg == 14
assert "test_arg2" not in model.hparams # test_arg2 is not registered in class init
return raw_checkpoint_path
# -------------------------
# SPECIFIC TESTS
# -------------------------
def test_class_nesting():
class MyModule(LightningModule):
def forward(self):
...
# make sure PL modules are always nn.Module
a = MyModule()
assert isinstance(a, torch.nn.Module)
def test_outside():
a = MyModule()
_ = a.hparams
class A:
def test(self):
a = MyModule()
_ = a.hparams
def test2(self):
test_outside()
test_outside()
A().test2()
A().test()
class CustomBoringModel(BoringModel):
def __init__(self, batch_size=64):
super().__init__()
self.save_hyperparameters()
class SubClassBoringModel(CustomBoringModel):
any_other_loss = torch.nn.CrossEntropyLoss()
def __init__(self, *args, subclass_arg=1200, **kwargs):
super().__init__(*args, **kwargs)
self.save_hyperparameters()
class SubSubClassBoringModel(SubClassBoringModel):
pass
class AggSubClassBoringModel(SubClassBoringModel):
def __init__(self, *args, my_loss=torch.nn.CrossEntropyLoss(), **kwargs):
super().__init__(*args, **kwargs)
self.save_hyperparameters()
class UnconventionalArgsBoringModel(CustomBoringModel):
"""A model that has unconventional names for "self", "*args" and "**kwargs"."""
def __init__(obj, *more_args, other_arg=300, **more_kwargs):
# intentionally named obj
super().__init__(*more_args, **more_kwargs)
obj.save_hyperparameters()
class DictConfSubClassBoringModel(SubClassBoringModel):
def __init__(self, *args, dict_conf=OmegaConf.create(dict(my_param="something")), **kwargs):
super().__init__(*args, **kwargs)
self.save_hyperparameters()
@pytest.mark.parametrize(
"cls",
[
CustomBoringModel,
SubClassBoringModel,
SubSubClassBoringModel,
AggSubClassBoringModel,
UnconventionalArgsBoringModel,
DictConfSubClassBoringModel,
],
)
def test_collect_init_arguments(tmpdir, cls):
"""Test that the model automatically saves the arguments passed into the constructor"""
extra_args = {}
if cls is AggSubClassBoringModel:
extra_args.update(my_loss=torch.nn.CosineEmbeddingLoss())
elif cls is DictConfSubClassBoringModel:
extra_args.update(dict_conf=OmegaConf.create(dict(my_param="anything")))
model = cls(**extra_args)
assert model.hparams.batch_size == 64
model = cls(batch_size=179, **extra_args)
assert model.hparams.batch_size == 179
if isinstance(model, SubClassBoringModel):
assert model.hparams.subclass_arg == 1200
if isinstance(model, AggSubClassBoringModel):
assert isinstance(model.hparams.my_loss, torch.nn.CosineEmbeddingLoss)
# verify that the checkpoint saved the correct values
trainer = Trainer(default_root_dir=tmpdir, max_epochs=2, overfit_batches=0.5)
trainer.fit(model)
raw_checkpoint_path = _raw_checkpoint_path(trainer)
raw_checkpoint = torch.load(raw_checkpoint_path)
assert LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in raw_checkpoint
assert raw_checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]["batch_size"] == 179
# verify that model loads correctly
model = cls.load_from_checkpoint(raw_checkpoint_path)
assert model.hparams.batch_size == 179
if isinstance(model, AggSubClassBoringModel):
assert isinstance(model.hparams.my_loss, torch.nn.CosineEmbeddingLoss)
if isinstance(model, DictConfSubClassBoringModel):
assert isinstance(model.hparams.dict_conf, Container)
assert model.hparams.dict_conf["my_param"] == "anything"
# verify that we can overwrite whatever we want
model = cls.load_from_checkpoint(raw_checkpoint_path, batch_size=99)
assert model.hparams.batch_size == 99
def _raw_checkpoint_path(trainer) -> str:
raw_checkpoint_paths = os.listdir(trainer.checkpoint_callback.dirpath)
raw_checkpoint_paths = [x for x in raw_checkpoint_paths if ".ckpt" in x]
assert raw_checkpoint_paths
raw_checkpoint_path = raw_checkpoint_paths[0]
raw_checkpoint_path = os.path.join(trainer.checkpoint_callback.dirpath, raw_checkpoint_path)
return raw_checkpoint_path
class LocalVariableModelSuperLast(BoringModel):
"""This model has the super().__init__() call at the end."""
def __init__(self, arg1, arg2, *args, **kwargs):
self.argument1 = arg1 # arg2 intentionally not set
arg1 = "overwritten"
local_var = 1234 # noqa: F841
super().__init__(*args, **kwargs) # this is intentionally here at the end
class LocalVariableModelSuperFirst(BoringModel):
"""This model has the _auto_collect_arguments() call at the end."""
def __init__(self, arg1, arg2, *args, **kwargs):
super().__init__(*args, **kwargs)
self.argument1 = arg1 # arg2 intentionally not set
arg1 = "overwritten"
local_var = 1234 # noqa: F841
self.save_hyperparameters() # this is intentionally here at the end
@pytest.mark.parametrize(
"cls",
[
LocalVariableModelSuperFirst,
# LocalVariableModelSuperLast,
],
)
def test_collect_init_arguments_with_local_vars(cls):
"""Tests that only the arguments are collected and not local variables."""
model = cls(arg1=1, arg2=2)
assert "local_var" not in model.hparams
assert model.hparams["arg1"] == "overwritten"
assert model.hparams["arg2"] == 2
# @pytest.mark.parametrize("cls,config", [
# (SaveHparamsModel, Namespace(my_arg=42)),
# (SaveHparamsModel, dict(my_arg=42)),
# (SaveHparamsModel, OmegaConf.create(dict(my_arg=42))),
# (AssignHparamsModel, Namespace(my_arg=42)),
# (AssignHparamsModel, dict(my_arg=42)),
# (AssignHparamsModel, OmegaConf.create(dict(my_arg=42))),
# ])
# def test_single_config_models(tmpdir, cls, config):
# """ Test that the model automatically saves the arguments passed into the constructor """
# model = cls(config)
#
# # no matter how you do it, it should be assigned
# assert model.hparams.my_arg == 42
#
# # verify that the checkpoint saved the correct values
# trainer = Trainer(default_root_dir=tmpdir, max_epochs=2, overfit_batches=0.5)
# trainer.fit(model)
#
# # verify that model loads correctly
# raw_checkpoint_path = _raw_checkpoint_path(trainer)
# model = cls.load_from_checkpoint(raw_checkpoint_path)
# assert model.hparams.my_arg == 42
class AnotherArgModel(BoringModel):
def __init__(self, arg1):
super().__init__()
self.save_hyperparameters(arg1)
class OtherArgsModel(BoringModel):
def __init__(self, arg1, arg2):
super().__init__()
self.save_hyperparameters(arg1, arg2)
@pytest.mark.parametrize(
"cls,config", [(AnotherArgModel, dict(arg1=42)), (OtherArgsModel, dict(arg1=3.14, arg2="abc"))]
)
def test_single_config_models_fail(tmpdir, cls, config):
"""Test fail on passing unsupported config type."""
with pytest.raises(ValueError):
_ = cls(**config)
@pytest.mark.parametrize("past_key", ["module_arguments"])
def test_load_past_checkpoint(tmpdir, past_key):
model = CustomBoringModel()
# verify we can train
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1)
trainer.fit(model)
# make sure the raw checkpoint saved the properties
raw_checkpoint_path = _raw_checkpoint_path(trainer)
raw_checkpoint = torch.load(raw_checkpoint_path)
raw_checkpoint[past_key] = raw_checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]
raw_checkpoint["hparams_type"] = "Namespace"
raw_checkpoint[past_key]["batch_size"] = -17
del raw_checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]
# save back the checkpoint
torch.save(raw_checkpoint, raw_checkpoint_path)
# verify that model loads correctly
model2 = CustomBoringModel.load_from_checkpoint(raw_checkpoint_path)
assert model2.hparams.batch_size == -17
def test_hparams_pickle(tmpdir):
ad = AttributeDict({"key1": 1, "key2": "abc"})
pkl = pickle.dumps(ad)
assert ad == pickle.loads(pkl)
pkl = cloudpickle.dumps(ad)
assert ad == pickle.loads(pkl)
class UnpickleableArgsBoringModel(BoringModel):
"""A model that has an attribute that cannot be pickled."""
def __init__(self, foo="bar", pickle_me=(lambda x: x + 1), **kwargs):
super().__init__(**kwargs)
assert not is_picklable(pickle_me)
self.save_hyperparameters()
def test_hparams_pickle_warning(tmpdir):
model = UnpickleableArgsBoringModel()
trainer = Trainer(default_root_dir=tmpdir, max_steps=1)
with pytest.warns(UserWarning, match="attribute 'pickle_me' removed from hparams because it cannot be pickled"):
trainer.fit(model)
assert "pickle_me" not in model.hparams
def test_hparams_save_yaml(tmpdir):
hparams = dict(
batch_size=32, learning_rate=0.001, data_root="./any/path/here", nasted=dict(any_num=123, anystr="abcd")
)
path_yaml = os.path.join(tmpdir, "testing-hparams.yaml")
save_hparams_to_yaml(path_yaml, hparams)
assert load_hparams_from_yaml(path_yaml, use_omegaconf=False) == hparams
save_hparams_to_yaml(path_yaml, Namespace(**hparams))
assert load_hparams_from_yaml(path_yaml, use_omegaconf=False) == hparams
save_hparams_to_yaml(path_yaml, AttributeDict(hparams))
assert load_hparams_from_yaml(path_yaml, use_omegaconf=False) == hparams
save_hparams_to_yaml(path_yaml, OmegaConf.create(hparams))
assert load_hparams_from_yaml(path_yaml) == hparams
class NoArgsSubClassBoringModel(CustomBoringModel):
def __init__(self):
super().__init__()
@pytest.mark.parametrize("cls", [BoringModel, NoArgsSubClassBoringModel])
def test_model_nohparams_train_test(tmpdir, cls):
"""Test models that do not take any argument in init."""
model = cls()
trainer = Trainer(max_epochs=1, default_root_dir=tmpdir)
train_loader = DataLoader(RandomDataset(32, 64), batch_size=32)
trainer.fit(model, train_loader)
test_loader = DataLoader(RandomDataset(32, 64), batch_size=32)
trainer.test(test_dataloaders=test_loader)
def test_model_ignores_non_exist_kwargument(tmpdir):
"""Test that the model takes only valid class arguments."""
class LocalModel(BoringModel):
def __init__(self, batch_size=15):
super().__init__()
self.save_hyperparameters()
model = LocalModel()
assert model.hparams.batch_size == 15
# verify that the checkpoint saved the correct values
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1)
trainer.fit(model)
# verify that we can overwrite whatever we want
raw_checkpoint_path = _raw_checkpoint_path(trainer)
model = LocalModel.load_from_checkpoint(raw_checkpoint_path, non_exist_kwarg=99)
assert "non_exist_kwarg" not in model.hparams
class SuperClassPositionalArgs(BoringModel):
def __init__(self, hparams):
super().__init__()
self._hparams = hparams # pretend BoringModel did not call self.save_hyperparameters()
class SubClassVarArgs(SuperClassPositionalArgs):
"""Loading this model should accept hparams and init in the super class"""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
def test_args(tmpdir):
"""Test for inheritance: super class takes positional arg, subclass takes varargs."""
hparams = dict(test=1)
model = SubClassVarArgs(hparams)
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1)
trainer.fit(model)
raw_checkpoint_path = _raw_checkpoint_path(trainer)
with pytest.raises(TypeError, match=r"__init__\(\) got an unexpected keyword argument 'test'"):
SubClassVarArgs.load_from_checkpoint(raw_checkpoint_path)
class RuntimeParamChangeModelSaving(BoringModel):
def __init__(self, **kwargs):
super().__init__()
self.save_hyperparameters()
@pytest.mark.parametrize("cls", [RuntimeParamChangeModelSaving])
def test_init_arg_with_runtime_change(tmpdir, cls):
"""Test that we save/export only the initial hparams, no other runtime change allowed"""
model = cls(running_arg=123)
assert model.hparams.running_arg == 123
model.hparams.running_arg = -1
assert model.hparams.running_arg == -1
trainer = Trainer(
default_root_dir=tmpdir, limit_train_batches=2, limit_val_batches=2, limit_test_batches=2, max_epochs=1
)
trainer.fit(model)
path_yaml = os.path.join(trainer.logger.log_dir, trainer.logger.NAME_HPARAMS_FILE)
hparams = load_hparams_from_yaml(path_yaml)
assert hparams.get("running_arg") == 123
class UnsafeParamModel(BoringModel):
def __init__(self, my_path, any_param=123):
super().__init__()
self.save_hyperparameters()
def test_model_with_fsspec_as_parameter(tmpdir):
model = UnsafeParamModel(LocalFileSystem(tmpdir))
trainer = Trainer(
default_root_dir=tmpdir, limit_train_batches=2, limit_val_batches=2, limit_test_batches=2, max_epochs=1
)
trainer.fit(model)
trainer.test()
@pytest.mark.skipif(not _HYDRA_EXPERIMENTAL_AVAILABLE, reason="Hydra experimental is not available")
def test_model_save_hyper_parameters_interpolation_with_hydra(tmpdir):
"""
This test relies on configuration saved under tests/models/conf/config.yaml
"""
class TestHydraModel(BoringModel):
def __init__(self, args_0, args_1, args_2, kwarg_1=None):
self.save_hyperparameters()
assert self.hparams.args_0.log == "Something"
assert self.hparams.args_1["cfg"].log == "Something"
assert self.hparams.args_2[0].log == "Something"
assert self.hparams.kwarg_1["cfg"][0].log == "Something"
super().__init__()
with initialize(config_path="conf"):
args_0 = compose(config_name="config")
args_1 = {"cfg": compose(config_name="config")}
args_2 = [compose(config_name="config")]
kwarg_1 = {"cfg": [compose(config_name="config")]}
model = TestHydraModel(args_0, args_1, args_2, kwarg_1=kwarg_1)
epochs = 2
checkpoint_callback = ModelCheckpoint(monitor=None, dirpath=tmpdir, save_top_k=-1)
trainer = Trainer(
default_root_dir=tmpdir,
callbacks=[checkpoint_callback],
limit_train_batches=10,
limit_val_batches=10,
max_epochs=epochs,
logger=False,
)
trainer.fit(model)
_ = TestHydraModel.load_from_checkpoint(checkpoint_callback.best_model_path)
@pytest.mark.parametrize("ignore", ("arg2", ("arg2", "arg3")))
def test_ignore_args_list_hparams(tmpdir, ignore):
"""
Tests that args can be ignored in save_hyperparameters
"""
class LocalModel(BoringModel):
def __init__(self, arg1, arg2, arg3):
super().__init__()
self.save_hyperparameters(ignore=ignore)
model = LocalModel(arg1=14, arg2=90, arg3=50)
# test proper property assignments
assert model.hparams.arg1 == 14
for arg in ignore:
assert arg not in model.hparams
# verify we can train
trainer = Trainer(default_root_dir=tmpdir, max_epochs=2, overfit_batches=0.5)
trainer.fit(model)
# make sure the raw checkpoint saved the properties
raw_checkpoint_path = _raw_checkpoint_path(trainer)
raw_checkpoint = torch.load(raw_checkpoint_path)
assert LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in raw_checkpoint
assert raw_checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]["arg1"] == 14
# verify that model loads correctly
model = LocalModel.load_from_checkpoint(raw_checkpoint_path, arg2=123, arg3=100)
assert model.hparams.arg1 == 14
for arg in ignore:
assert arg not in model.hparams
class HparamsKwargsContainerModel(BoringModel):
def __init__(self, **kwargs):
super().__init__()
self.save_hyperparameters(kwargs)
class HparamsNamespaceContainerModel(BoringModel):
def __init__(self, config):
super().__init__()
self.save_hyperparameters(config)
def test_empty_hparams_container(tmpdir):
"""Test that save_hyperparameters() is a no-op when saving an empty hparams container."""
model = HparamsKwargsContainerModel()
assert not model.hparams
model = HparamsNamespaceContainerModel(Namespace())
assert not model.hparams
@dataclass
class DataClassModel(BoringModel):
mandatory: int
optional: str = "optional"
ignore_me: bool = False
def __post_init__(self):
super().__init__()
self.save_hyperparameters(ignore=("ignore_me",))
def test_dataclass_lightning_module(tmpdir):
"""Test that save_hyperparameters() works with a LightningModule as a dataclass."""
model = DataClassModel(33, optional="cocofruit")
assert model.hparams == dict(mandatory=33, optional="cocofruit")
class NoHparamsModel(BoringModel):
"""Tests a model without hparams."""
class DataModuleWithoutHparams(LightningDataModule):
def train_dataloader(self, *args, **kwargs) -> DataLoader:
return DataLoader(RandomDataset(32, 64), batch_size=32)
class DataModuleWithHparams(LightningDataModule):
def __init__(self, hparams):
super().__init__()
self.save_hyperparameters(hparams)
def train_dataloader(self, *args, **kwargs) -> DataLoader:
return DataLoader(RandomDataset(32, 64), batch_size=32)
def _get_mock_logger(tmpdir):
mock_logger = mock.MagicMock(name="logger")
mock_logger.name = "mock_logger"
mock_logger.save_dir = tmpdir
mock_logger.version = "0"
del mock_logger.__iter__
return mock_logger
@pytest.mark.parametrize("model", (SaveHparamsModel({"arg1": 5, "arg2": "abc"}), NoHparamsModel()))
@pytest.mark.parametrize("data", (DataModuleWithHparams({"data_dir": "foo"}), DataModuleWithoutHparams()))
def test_adding_datamodule_hparams(tmpdir, model, data):
"""Test that hparams from datamodule and model are logged."""
org_model_hparams = copy.deepcopy(model.hparams_initial)
org_data_hparams = copy.deepcopy(data.hparams_initial)
mock_logger = _get_mock_logger(tmpdir)
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, logger=mock_logger)
trainer.fit(model, datamodule=data)
# Hparams of model and data were not modified
assert org_model_hparams == model.hparams
assert org_data_hparams == data.hparams
# Merged hparams were logged
merged_hparams = copy.deepcopy(org_model_hparams)
merged_hparams.update(org_data_hparams)
mock_logger.log_hyperparams.assert_called_with(merged_hparams)
def test_no_datamodule_for_hparams(tmpdir):
"""Test that hparams model are logged if no datamodule is used."""
model = SaveHparamsModel({"arg1": 5, "arg2": "abc"})
org_model_hparams = copy.deepcopy(model.hparams_initial)
data = DataModuleWithoutHparams()
data.setup()
mock_logger = _get_mock_logger(tmpdir)
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, logger=mock_logger)
trainer.fit(model, datamodule=data)
# Merged hparams were logged
mock_logger.log_hyperparams.assert_called_with(org_model_hparams)
def test_colliding_hparams(tmpdir):
model = SaveHparamsModel({"data_dir": "abc", "arg2": "abc"})
data = DataModuleWithHparams({"data_dir": "foo"})
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1)
with pytest.raises(MisconfigurationException, match=r"Error while merging hparams:"):
trainer.fit(model, datamodule=data)
|
|
# -*- coding: utf-8 -*-
from PyQt4 import QtCore, QtGui
from acq4.util.DataManager import *
from acq4.util.debug import *
import os
class DirTreeWidget(QtGui.QTreeWidget):
sigSelectionChanged = QtCore.Signal(object)
### something funny is happening with sigSelectionChanged and currentItemChanged; the signals seem to be emitted before the DirTreeWidget actually knows that the item changed.
### ie. if a function is connected to the signal, and the function asks DirTreeWidget.selectedFile() the previously selected file is returned, not the new selection.
### you can get around this by using the (current, previous) items that are passed with the currentItemChanged signal.
def __init__(self, parent=None, baseDirHandle=None, checkState=None, allowMove=True, allowRename=True, sortMode='date'):
QtGui.QTreeWidget.__init__(self, parent)
self.baseDir = baseDirHandle
self.checkState = checkState
self.allowMove = allowMove
self.allowRename = allowRename
self.currentDir = None
self.sortMode = sortMode
self.setEditTriggers(QtGui.QAbstractItemView.SelectedClicked)
self.items = {}
self.itemExpanded.connect(self.itemExpandedEvent)
self.itemChanged.connect(self.itemChangedEvent)
self.currentItemChanged.connect(self.selectionChanged)
self.setAcceptDrops(True)
self.setDragEnabled(True)
def __del__(self):
try:
self.quit()
except:
pass
def setSortMode(self, mode):
"""Set the method used to sort. Must be 'date' or 'alpha'."""
self.sortMode = mode
self.rebuildTree()
def flushSignals(self):
for h in self.items.keys():
h.flushSignals()
def quit(self):
## not sure if any of this is necessary..
try:
self.itemExpanded.disconnect(self.itemExpandedEvent)
except TypeError:
pass
try:
self.itemChanged.disconnect(self.itemChangedEvent)
except TypeError:
pass
for h in self.items:
self.unwatch(h)
#self.handles = {}
self.items = {}
self.clear()
def refresh(self, handle):
try:
item = self.item(handle)
except:
return
self.rebuildChildren(item)
def selectionChanged(self, item=None, _=None):
"""Selection has changed; check to see whether currentDir item needs to be recolored"""
self.sigSelectionChanged.emit(self)
if item is None:
item = self.currentItem()
if not isinstance(item, FileTreeItem):
return
if self.handle(item) is self.currentDir:
self.setStyleSheet('selection-background-color: #BB00BB;')
else:
self.setStyleSheet('')
def selectedFile(self):
"""Return the handle for the currently selected file.
If no items are selected, return None.
If multiple items are selected, raise an exception."""
items = self.selectedItems()
if len(items) == 0:
return None
if len(items) > 1:
raise Exception('Multiple items selected. Use selectedFiles instead.')
return self.handle(items[0])
def selectedFiles(self):
"""Return list of handles for the currently selected file(s)."""
items = self.selectedItems()
return [self.handle(items[i]) for i in range(len(items))]
def handle(self, item):
"""Given a tree item, return the corresponding file handle"""
if hasattr(item, 'handle'):
return item.handle
elif item is self.invisibleRootItem():
return self.baseDir
else:
raise Exception("Can't determine handle for item '%s'" % item.text(0))
def item(self, handle, create=False):
"""Given a file handle, return the corresponding tree item."""
if handle in self.items:
return self.items[handle]
else:
self.flushSignals() ## might be something waiting to be added to the tree
if handle in self.items:
return self.items[handle]
elif create:
return self.addHandle(handle)
else:
raise Exception("Can't find tree item for file '%s'" % handle.name())
def itemChangedEvent(self, item, col):
"""Item text has changed; try renaming the file"""
handle = self.handle(item)
try:
newName = str(item.text(0))
if handle.shortName() != newName:
if os.path.sep in newName:
raise Exception("Can't rename file to have slashes in it.")
handle.rename(newName)
#print "Rename %s -> %s" % (handle.shortName(), item.text(0))
except:
printExc("Error while renaming file:")
finally:
item.setText(0, handle.shortName())
def setBaseDirHandle(self, d):
#print "set base", d.name()
if self.baseDir is not None:
self.unwatch(self.baseDir)
self.baseDir = d
if d is not None:
self.watch(self.baseDir)
for h in self.items:
self.unwatch(h)
#self.handles = {}
if d is not None:
self.items = {self.baseDir: self.invisibleRootItem()}
self.clear()
if d is not None:
self.rebuildChildren(self.invisibleRootItem())
#self.rebuildTree()
def baseDirHandle(self):
return self.baseDir
def setRoot(self, d):
"""Synonym for setBaseDirHandle"""
return self.setBaseDirHandle(d)
def setCurrentDir(self, d):
#print "set current %s -> %s" % (self.currentDir, d)
## uncolor previous current item
if self.currentDir in self.items:
item = self.items[self.currentDir]
item.setBackground(0, QtGui.QBrush(QtGui.QColor(255,255,255)))
#print " - uncolor item ", item, self.handle(item)
self.currentDir = d
if d is self.baseDir:
return
self.expandTo(d)
if d in self.items:
self.updateCurrentDirItem()
#else:
#print " - current dir changed but new dir not yet present in tree."
def updateCurrentDirItem(self):
"""Color the currentDir item, expand, and scroll-to"""
#print "UpdateCurrentDirItem"
item = self.item(self.currentDir)
item.setBackground(0, QtGui.QBrush(QtGui.QColor(250, 100, 100)))
item.setExpanded(True)
self.scrollToItem(item)
self.selectionChanged()
def expandTo(self, dh):
"""Expand all nodes from baseDir up to dh"""
dirs = dh.name(relativeTo=self.baseDir).split(os.path.sep)
node = self.baseDir
while len(dirs) > 0:
item = self.items[node]
item.setExpanded(True)
node = node[dirs.pop(0)]
def watch(self, handle):
#QtCore.QObject.connect(handle, QtCore.SIGNAL('delayedChange'), self.dirChanged)
handle.sigDelayedChange.connect(self.dirChanged)
def unwatch(self, handle):
#QtCore.QObject.disconnect(handle, QtCore.SIGNAL('delayedChange'), self.dirChanged)
try:
handle.sigDelayedChange.disconnect(self.dirChanged)
except:
pass
def dirChanged(self, handle, changes):
if handle is self.baseDir:
item = self.invisibleRootItem()
else:
item = self.items[handle]
if 'renamed' in changes:
item.setText(0, handle.shortName())
if 'deleted' in changes:
self.forgetHandle(handle)
if 'children' in changes:
self.rebuildChildren(item)
item.setChildIndicatorPolicy(QtGui.QTreeWidgetItem.ShowIndicator)
def addHandle(self, handle):
if handle in self.items:
raise Exception("Tried to add handle '%s' twice." % handle.name())
item = FileTreeItem(handle, self.checkState, self.allowMove, self.allowRename)
self.items[handle] = item
#self.handles[item] = handle
self.watch(handle)
if handle is self.currentDir:
self.updateCurrentDirItem()
return item
def forgetHandle(self, handle):
item = self.item(handle)
del self.items[handle]
#del self.handles[item]
self.unwatch(handle)
def rebuildChildren(self, root):
"""Make sure all children are present and in the correct order"""
handle = self.handle(root)
files = handle.ls(sortMode=self.sortMode)
handles = [handle[f] for f in files]
i = 0
while True:
if i >= len(handles):
## no more handles; remainder of items should be removed
while root.childCount() > i:
ch = root.takeChild(i)
break
h = handles[i]
if (i >= root.childCount()) or (h not in self.items) or (h is not self.handle(root.child(i))):
item = self.item(h, create=True)
parent = self.itemParent(item)
if parent is not None:
parent.removeChild(item)
root.insertChild(i, item)
item.recallExpand()
i += 1
def itemParent(self, item):
"""Return the parent of an item (since item.parent can not be trusted). Note: damn silly."""
if item.parent() is None:
root = self.invisibleRootItem()
tlc = [root.child(i) for i in range(root.childCount())]
#if item in tlc:
#return root
#else:
#return None
for tli in tlc:
if tli is item:
return root
return None
else:
return item.parent()
def editItem(self, handle):
item = self.item(handle)
QtGui.QTreeWidget.editItem(self, item, 0)
def rebuildTree(self, root=None, useCache=True):
"""Completely clear and rebuild the entire tree starting at root"""
if root is None:
root = self.invisibleRootItem()
handle = self.handle(root)
self.clearTree(root)
if handle is None:
return
for f in handle.ls(useCache=useCache):
#print "Add handle", f
try:
childHandle = handle[f]
except:
printExc("Error getting file handle:")
continue
item = self.addHandle(childHandle)
root.addChild(item)
def clearTree(self, root):
while root.childCount() > 0:
child = root.child(0)
if isinstance(child, FileTreeItem):
self.clearTree(child)
handle = self.handle(child)
self.unwatch(handle)
#del self.handles[child]
del self.items[handle]
root.removeChild(child)
def itemExpandedEvent(self, item):
"""Called whenever an item in the tree is expanded; responsible for loading children if they have not been loaded yet."""
if not item.childrenLoaded:
try:
QtGui.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.WaitCursor))
## Display loading message before starting load
loading = None
if item.handle.isDir():
loading = QtGui.QTreeWidgetItem(['loading..'])
item.addChild(loading)
QtGui.QApplication.instance().processEvents() ## make sure the 'loading' item is displayed before building the tree
if loading is not None:
item.removeChild(loading)
## now load all children
self.rebuildChildren(item)
item.childrenLoaded = True
finally:
QtGui.QApplication.restoreOverrideCursor()
item.expanded()
self.scrollToItem(item.child(item.childCount()-1))
self.scrollToItem(item)
def select(self, handle):
item = self.item(handle)
self.setCurrentItem(item)
def dropMimeData(self, parent, index, data, action):
#print "dropMimeData:", parent, index, self.selectedFiles()
#source = [self.handle(s) for s in self.selectedItems()]
source = self.selectedFiles()
if parent is None:
target = self.baseDir
else:
target = self.handle(parent)
try:
for s in source:
s.move(target)
return True
except:
printExc('Move failed:')
return False
#def handleScheduledMove(self, item, parent):
#handle = self.handle(item)
#try:
#handle.move(self.handle(parent))
#except:
#printExc("Move failed:")
def contextMenuEvent(self, ev):
print "menu:", ev.pos()
item = self.itemAt(ev.pos())
if item is None:
print "no item"
return
self.menu = QtGui.QMenu(self)
act = self.menu.addAction('refresh', self.refreshClicked)
self.contextItem = item
self.menu.popup(ev.globalPos())
def refreshClicked(self):
self.rebuildTree(self.contextItem, useCache=False)
class FileTreeItem(QtGui.QTreeWidgetItem):
def __init__(self, handle, checkState=None, allowMove=True, allowRename=True):
QtGui.QTreeWidgetItem.__init__(self, [handle.shortName()])
self.handle = handle
self.childrenLoaded = False
if self.handle.isDir():
self.setExpanded(False)
#if self.handle.hasChildren(): ## too expensive.
self.setChildIndicatorPolicy(QtGui.QTreeWidgetItem.ShowIndicator)
self.setFlags(QtCore.Qt.ItemIsSelectable|QtCore.Qt.ItemIsDropEnabled|QtCore.Qt.ItemIsEnabled)
self.setForeground(0, QtGui.QBrush(QtGui.QColor(0, 0, 150)))
else:
self.setFlags(QtCore.Qt.ItemIsSelectable|QtCore.Qt.ItemIsEnabled)
if allowMove:
self.setFlag(QtCore.Qt.ItemIsDragEnabled)
if allowRename:
self.setFlag(QtCore.Qt.ItemIsEditable)
if checkState is not None:
self.setFlag(QtCore.Qt.ItemIsUserCheckable)
if checkState:
self.setCheckState(0, QtCore.Qt.Checked)
else:
self.setCheckState(0, QtCore.Qt.Unchecked)
self.expandState = False
#QtCore.QObject.connect(self.handle, QtCore.SIGNAL('changed'), self.handleChanged)
self.handle.sigChanged.connect(self.handleChanged)
self.updateBoldState()
def setFlag(self, flag, v=True):
if v:
self.setFlags(self.flags() | flag)
else:
self.setFlags(self.flags() & ~flag)
def updateBoldState(self):
if self.handle.isManaged():
info = self.handle.info()
font = self.font(0)
if ('important' in info) and (info['important'] is True):
font.setWeight(QtGui.QFont.Bold)
else:
font.setWeight(QtGui.QFont.Normal)
self.setFont(0, font)
def handleChanged(self, handle, change, *args):
#print "handleChanged:", change
if change == 'children':
if self.handle.hasChildren() > 0:
self.setChildIndicatorPolicy(QtGui.QTreeWidgetItem.ShowIndicator)
else:
self.setChildIndicatorPolicy(QtGui.QTreeWidgetItem.DontShowIndicatorWhenChildless)
elif change == 'meta':
self.updateBoldState()
def expanded(self):
"""Called whenever this item is expanded or collapsed."""
#print "Expand:", self.isExpanded()
self.expandState = self.isExpanded()
def recallExpand(self):
if self.expandState:
#print "re-expanding", self.handle.shortName()
self.setExpanded(False)
self.setExpanded(True)
for i in range(self.childCount()):
self.child(i).recallExpand()
def setChecked(self, c):
if c:
self.setCheckState(0, QtCore.Qt.Checked)
else:
self.setCheckState(0, QtCore.Qt.Unchecked)
|
|
"""Addins are the primary way of extending FeedPlatform the add
additional aggregator functionality.
You'll find a number of them in the builtin ``feedplatform.lib``
module, although custom addins can easily be created, and
generally, when extended or specialized customization is required,
creating an addin will be a common thing to do.
Addins require an installation process to register their hooks and
other modifiers, so that addins can be loaded and reloaded at any
time. If you change the list of addins at any point afterwards,
use ``reinstall()`` to put it into effect.
It is recommended that addins subclass ``base``, though it is not
required and an addin may in fact be any object that features a
``setup`` method. Addins can also specify a tuple attribute
``depends``, referring to other addin classes that are required
for an addin to function, too. If the user hasn't specified those
addins, they will be added implicitely, so long their constructor
allows parameterless instantiation. Otherwise, an error would be
raised, asking the user to manually add the dependency.
Currently, the ``depends`` tuple may refer to the other addins only
via a class reference.
"""
import types
import inspect
from copy import copy
from feedplatform import hooks
from feedplatform import log
from feedplatform.conf import config
__all__ = ('base', 'install', 'reinstall', 'get_addins')
class base(object):
"""Common base class for addins.
It's use is optional, addins are not required to use it. However,
doing so will provide certain helpers:
* Instead of manually registering your hook callbacks, you can
simply write them as methods, using the hook name prefixed
with 'on_*' - e.g. 'on_get_guid'.
An exception will be raised if the name after 'on_' does not
refer to a valid hook.
* self.log provides a namespaced Python logging facility.
* A ``abstract`` attribute allows addins to be marked as not
directly usable.
"""
class __metaclass__(type):
def __new__(cls, name, bases, attrs):
# reset the ``abstract`` property for every new class
# automatically, i.e. does not inherit.
if not 'abstract' in attrs:
attrs['abstract'] = False
return type.__new__(cls, name, bases, attrs)
def __new__(cls, *args, **kwargs):
if getattr(cls, 'abstract', False):
raise TypeError('%s is an abstract addin.' % cls)
return super(base, cls).__new__(cls, *args, **kwargs)
def setup(self):
"""Called to have the addin register it's hook callbacks.
This is also the place for related setup jobs like setting up
custom models.
If an addin does not subclass ``addins.base``, it must provide
this method itself.
"""
# register new hook that the addin wants to define
if hasattr(self, 'get_hooks'):
new_hooks = self.get_hooks()
if hooks:
for name in new_hooks:
hooks.register(name)
# auto-register all hook callbacks ('on_*'-pattern)
for name in dir(self):
if name.startswith('on_'):
attr = getattr(self, name)
if isinstance(attr, types.MethodType):
try:
hooks.add_callback(name[3:], attr)
except KeyError, e:
raise RuntimeError(('%s: failed to initialize '
'because %s method does not refer to a valid '
'hook (%s).') % (self.__class__, name, e))
@property
def log(self):
"""Provide a logger namespace for each addin, accessible
via ``self.log``.
This is lazy, e.g. the logger is created only when accessed.
"""
if not hasattr(self, '_log'):
self._log = log.get('lib.%s' % self.__class__.__name__)
return self._log
_ADDINS = None
def get_addins():
"""Return the actual list of currently active addin instances,
as opposed to config.ADDINS, which is just the original user input.
"""
global _ADDINS
if _ADDINS is None:
reinstall()
return _ADDINS
def _make_addin(addin):
"""Normalizes addin's given by the user - makes sure an instance
is returned.
If ``addin`` is a class, an instance is created, if possible.
Otherwise, an error is raised, or ``addin`` is returned unmodified.
"""
if isinstance(addin, type):
if not addin.__init__ is object.__init__: # won't work with getargspec
args, _, _, defaults = inspect.getargspec(addin.__init__)
# for method types, the first argument will be the
# self-pointer, which we know will get filled, so we
# may ignore it.
if isinstance(addin.__init__, types.MethodType) and args:
args = args[1:]
if (not defaults and args) or (defaults and len(args) != len(defaults)):
raise ValueError('The addin "%s" was given as a class, '
'rather than an instance, but requires arguments '
'to be constructed.' % addin.__name__)
addin = addin()
return addin
def reinstall(addins=None):
"""Install the addins specified by the configuration, or via
``addins`.
The addin installation process consists mainly if letting each
adding register it's hook callbacks, as well as rebuilding the
models.
Addins that were previously installed will automatically be
removed.
The function returns the list of addins installed. It may
differ from the explicitly specified list due to dependencies,
and will contain only addin instances, not classes.
"""
if addins is None:
addins = copy(config.ADDINS)
# Start by making sure all addins are available as instances,
# and use a separate list that we may modify going further.
# Note that by starting with an initial list of all specified
# addins, dependency order is not enforced for those. E.g. if
# ``b`` depends on ``a`, but the user puts ``b`` before ``a``,
# then that will be accepted by this installation process. In
# contrast, if he only specifies ``b``, the ``a`` dependency
# would automatically be inserted before it.
to_be_setup = []
for addin in addins:
to_be_setup.append(_make_addin(addin))
# resolve dependencies
for i in range(0, len(to_be_setup)):
def resolve_dependencies(addin, index):
dependencies = getattr(addin, 'depends', ())
for dependency in dependencies:
exists = False
# Check if the dependency is already installed. Note
# that dependencies may be both classes and instances.
for existing in to_be_setup:
if not isinstance(dependency, type):
if isinstance(existing, type(dependency)):
exists = True
elif isinstance(existing, dependency):
exists = True
# if not, insert it at the right position, and
# recursively resolve it's own dependencies.
if not exists:
dependency = _make_addin(dependency)
to_be_setup.insert(index, dependency)
index = resolve_dependencies(dependency, index)
index += 1
return index
i = resolve_dependencies(to_be_setup[i], i)
# finally, setup all the addins we determined to be installed
hooks.reset()
for addin in to_be_setup:
addin.setup()
global _ADDINS
_ADDINS = to_be_setup
return to_be_setup
def install(*args, **kwargs):
"""Like ``reinstall``, but only works the very first time the
addins need to be installed. If they already are, this is a noop.
Useful if you need to ensure that addins are active.
"""
global _ADDINS
if _ADDINS is None:
return reinstall(*args, **kwargs)
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Comment splicer for lib2to3 trees.
The lib2to3 syntax tree produced by the parser holds comments and whitespace in
prefix attributes of nodes, rather than nodes themselves. This module provides
functionality to splice comments out of prefixes and into nodes of their own,
making them easier to process.
SpliceComments(): the main function exported by this module.
"""
from lib2to3 import pygram
from lib2to3 import pytree
from lib2to3.pgen2 import token
from yapf.yapflib import pytree_utils
def SpliceComments(tree):
"""Given a pytree, splice comments into nodes of their own right.
Extract comments from the prefixes where they are housed after parsing.
The prefixes that previously housed the comments become empty.
Args:
tree: a pytree.Node - the tree to work on. The tree is modified by this
function.
"""
# The previous leaf node encountered in the traversal.
# This is a list because Python 2.x doesn't have 'nonlocal' :)
prev_leaf = [None]
_AnnotateIndents(tree)
def _VisitNodeRec(node):
# This loop may insert into node.children, so we'll iterate over a copy.
for child in node.children[:]:
if isinstance(child, pytree.Node):
# Nodes don't have prefixes.
_VisitNodeRec(child)
else:
if child.prefix.lstrip().startswith('#'):
# We have a comment prefix in this child, so splicing is needed.
comment_prefix = child.prefix
comment_lineno = child.lineno - comment_prefix.count('\n')
comment_column = child.column
# Remember the leading indentation of this prefix and clear it.
# Mopping up the prefix is important because we may go over this same
# child in the next iteration...
child_prefix = child.prefix.lstrip('\n')
prefix_indent = child_prefix[:child_prefix.find('#')]
if '\n' in prefix_indent:
prefix_indent = prefix_indent[prefix_indent.rfind('\n') + 1:]
child.prefix = ''
if child.type == token.NEWLINE:
# If the prefix was on a NEWLINE leaf, it's part of the line so it
# will be inserted after the previously encountered leaf.
# We can't just insert it before the NEWLINE node, because as a
# result of the way pytrees are organized, this node can be under
# an inappropriate parent.
comment_column -= len(comment_prefix.lstrip())
pytree_utils.InsertNodesAfter(
_CreateCommentsFromPrefix(
comment_prefix,
comment_lineno,
comment_column,
standalone=False), prev_leaf[0])
elif child.type == token.DEDENT:
# Comment prefixes on DEDENT nodes also deserve special treatment,
# because their final placement depends on their prefix.
# We'll look for an ancestor of this child with a matching
# indentation, and insert the comment before it if the ancestor is
# on a DEDENT node and after it otherwise.
#
# lib2to3 places comments that should be separated into the same
# DEDENT node. For example, "comment 1" and "comment 2" will be
# combined.
#
# def _():
# for x in y:
# pass
# # comment 1
#
# # comment 2
# pass
#
# In this case, we need to split them up ourselves.
# Split into groups of comments at decreasing levels of indentation
comment_groups = []
comment_column = None
for cmt in comment_prefix.split('\n'):
col = cmt.find('#')
if col < 0:
if comment_column is None:
# Skip empty lines at the top of the first comment group
comment_lineno += 1
continue
elif comment_column is None or col < comment_column:
comment_column = col
comment_indent = cmt[:comment_column]
comment_groups.append((comment_column, comment_indent, []))
comment_groups[-1][-1].append(cmt)
# Insert a node for each group
for comment_column, comment_indent, comment_group in comment_groups:
ancestor_at_indent = _FindAncestorAtIndent(child, comment_indent)
if ancestor_at_indent.type == token.DEDENT:
InsertNodes = pytree_utils.InsertNodesBefore # pylint: disable=invalid-name
else:
InsertNodes = pytree_utils.InsertNodesAfter # pylint: disable=invalid-name
InsertNodes(
_CreateCommentsFromPrefix(
'\n'.join(comment_group) + '\n',
comment_lineno,
comment_column,
standalone=True), ancestor_at_indent)
comment_lineno += len(comment_group)
else:
# Otherwise there are two cases.
#
# 1. The comment is on its own line
# 2. The comment is part of an expression.
#
# Unfortunately, it's fairly difficult to distinguish between the
# two in lib2to3 trees. The algorithm here is to determine whether
# child is the first leaf in the statement it belongs to. If it is,
# then the comment (which is a prefix) belongs on a separate line.
# If it is not, it means the comment is buried deep in the statement
# and is part of some expression.
stmt_parent = _FindStmtParent(child)
for leaf_in_parent in stmt_parent.leaves():
if leaf_in_parent.type == token.NEWLINE:
continue
elif id(leaf_in_parent) == id(child):
# This comment stands on its own line, and it has to be inserted
# into the appropriate parent. We'll have to find a suitable
# parent to insert into. See comments above
# _STANDALONE_LINE_NODES for more details.
node_with_line_parent = _FindNodeWithStandaloneLineParent(child)
pytree_utils.InsertNodesBefore(
_CreateCommentsFromPrefix(
comment_prefix, comment_lineno, 0, standalone=True),
node_with_line_parent)
break
else:
if comment_lineno == prev_leaf[0].lineno:
comment_lines = comment_prefix.splitlines()
value = comment_lines[0].lstrip()
if value.rstrip('\n'):
comment_column = prev_leaf[0].column
comment_column += len(prev_leaf[0].value)
comment_column += (
len(comment_lines[0]) - len(comment_lines[0].lstrip()))
comment_leaf = pytree.Leaf(
type=token.COMMENT,
value=value.rstrip('\n'),
context=('', (comment_lineno, comment_column)))
pytree_utils.InsertNodesAfter([comment_leaf], prev_leaf[0])
comment_prefix = '\n'.join(comment_lines[1:])
comment_lineno += 1
rindex = (0 if '\n' not in comment_prefix.rstrip() else
comment_prefix.rstrip().rindex('\n') + 1)
comment_column = (
len(comment_prefix[rindex:]) - len(
comment_prefix[rindex:].lstrip()))
comments = _CreateCommentsFromPrefix(
comment_prefix,
comment_lineno,
comment_column,
standalone=False)
pytree_utils.InsertNodesBefore(comments, child)
break
prev_leaf[0] = child
_VisitNodeRec(tree)
def _CreateCommentsFromPrefix(comment_prefix,
comment_lineno,
comment_column,
standalone=False):
"""Create pytree nodes to represent the given comment prefix.
Args:
comment_prefix: (unicode) the text of the comment from the node's prefix.
comment_lineno: (int) the line number for the start of the comment.
comment_column: (int) the column for the start of the comment.
standalone: (bool) determines if the comment is standalone or not.
Returns:
The simple_stmt nodes if this is a standalone comment, otherwise a list of
new COMMENT leafs. The prefix may consist of multiple comment blocks,
separated by blank lines. Each block gets its own leaf.
"""
# The comment is stored in the prefix attribute, with no lineno of its
# own. So we only know at which line it ends. To find out at which line it
# starts, look at how many newlines the comment itself contains.
comments = []
lines = comment_prefix.split('\n')
index = 0
while index < len(lines):
comment_block = []
while index < len(lines) and lines[index].lstrip().startswith('#'):
comment_block.append(lines[index].strip())
index += 1
if comment_block:
new_lineno = comment_lineno + index - 1
comment_block[0] = comment_block[0].strip()
comment_block[-1] = comment_block[-1].strip()
comment_leaf = pytree.Leaf(
type=token.COMMENT,
value='\n'.join(comment_block),
context=('', (new_lineno, comment_column)))
comment_node = comment_leaf if not standalone else pytree.Node(
pygram.python_symbols.simple_stmt, [comment_leaf])
comments.append(comment_node)
while index < len(lines) and not lines[index].lstrip():
index += 1
return comments
# "Standalone line nodes" are tree nodes that have to start a new line in Python
# code (and cannot follow a ';' or ':'). Other nodes, like 'expr_stmt', serve as
# parents of other nodes but can come later in a line. This is a list of
# standalone line nodes in the grammar. It is meant to be exhaustive
# *eventually*, and we'll modify it with time as we discover more corner cases
# in the parse tree.
#
# When splicing a standalone comment (i.e. a comment that appears on its own
# line, not on the same line with other code), it's important to insert it into
# an appropriate parent of the node it's attached to. An appropriate parent
# is the first "standaline line node" in the parent chain of a node.
_STANDALONE_LINE_NODES = frozenset([
'suite', 'if_stmt', 'while_stmt', 'for_stmt', 'try_stmt', 'with_stmt',
'funcdef', 'classdef', 'decorated', 'file_input'
])
def _FindNodeWithStandaloneLineParent(node):
"""Find a node whose parent is a 'standalone line' node.
See the comment above _STANDALONE_LINE_NODES for more details.
Arguments:
node: node to start from
Returns:
Suitable node that's either the node itself or one of its ancestors.
"""
if pytree_utils.NodeName(node.parent) in _STANDALONE_LINE_NODES:
return node
else:
# This is guaranteed to terminate because 'file_input' is the root node of
# any pytree.
return _FindNodeWithStandaloneLineParent(node.parent)
# "Statement nodes" are standalone statements. The don't have to start a new
# line.
_STATEMENT_NODES = frozenset(['simple_stmt']) | _STANDALONE_LINE_NODES
def _FindStmtParent(node):
"""Find the nearest parent of node that is a statement node.
Arguments:
node: node to start from
Returns:
Nearest parent (or node itself, if suitable).
"""
if pytree_utils.NodeName(node) in _STATEMENT_NODES:
return node
else:
return _FindStmtParent(node.parent)
def _FindAncestorAtIndent(node, indent):
"""Find an ancestor of node with the given indentation.
Arguments:
node: node to start from. This must not be the tree root.
indent: indentation string for the ancestor we're looking for.
See _AnnotateIndents for more details.
Returns:
An ancestor node with suitable indentation. If no suitable ancestor is
found, the closest ancestor to the tree root is returned.
"""
if node.parent.parent is None:
# Our parent is the tree root, so there's nowhere else to go.
return node
# If the parent has an indent annotation, and it's shorter than node's
# indent, this is a suitable ancestor.
# The reason for "shorter" rather than "equal" is that comments may be
# improperly indented (i.e. by three spaces, where surrounding statements
# have either zero or two or four), and we don't want to propagate them all
# the way to the root.
parent_indent = pytree_utils.GetNodeAnnotation(
node.parent, pytree_utils.Annotation.CHILD_INDENT)
if parent_indent is not None and indent.startswith(parent_indent):
return node
else:
# Keep looking up the tree.
return _FindAncestorAtIndent(node.parent, indent)
def _AnnotateIndents(tree):
"""Annotate the tree with child_indent annotations.
A child_indent annotation on a node specifies the indentation (as a string,
like " ") of its children. It is inferred from the INDENT child of a node.
Arguments:
tree: root of a pytree. The pytree is modified to add annotations to nodes.
Raises:
RuntimeError: if the tree is malformed.
"""
# Annotate the root of the tree with zero indent.
if tree.parent is None:
pytree_utils.SetNodeAnnotation(tree, pytree_utils.Annotation.CHILD_INDENT,
'')
for child in tree.children:
if child.type == token.INDENT:
child_indent = pytree_utils.GetNodeAnnotation(
tree, pytree_utils.Annotation.CHILD_INDENT)
if child_indent is not None and child_indent != child.value:
raise RuntimeError('inconsistent indentation for child', (tree, child))
pytree_utils.SetNodeAnnotation(tree, pytree_utils.Annotation.CHILD_INDENT,
child.value)
_AnnotateIndents(child)
|
|
"""
Some JSON encoding and decoding utilities.
"""
from __future__ import absolute_import, division, print_function
import json
from datetime import date,datetime,time
import base64
import zlib
import logging
logger = logging.getLogger('jsonUtil')
class json_compressor:
"""Used for files and other large things sent over json.
Great for log files.
"""
@staticmethod
def compress(obj):
return base64.b64encode(zlib.compress(obj)) if obj else b''
@staticmethod
def uncompress(obj):
return zlib.decompress(base64.b64decode(obj)).decode('utf-8') if obj else ''
class datetime_converter:
@staticmethod
def dumps(obj):
return obj.isoformat()
@staticmethod
def loads(obj,name=None):
if ':' in obj:
if 'T' in obj or ' ' in obj:
center = ' '
if 'T' in obj:
center = 'T'
# must be datetime
if '.' in obj:
return datetime.strptime( obj, "%Y-%m-%d"+center+"%H:%M:%S.%f")
else:
return datetime.strptime( obj, "%Y-%m-%d"+center+"%H:%M:%S")
else:
# must be time
if '.' in obj:
return datetime.strptime( obj, "%H:%M:%S.%f")
else:
return datetime.strptime( obj, "%H:%M:%S")
else:
# must be date
return datetime.strptime( obj, "%Y-%m-%d")
class date_converter(datetime_converter):
@staticmethod
def loads(obj,name=None):
d = datetime_converter.loads(obj)
return date(d.year,d.month,d.day)
class time_converter(datetime_converter):
@staticmethod
def loads(obj,name=None):
d = datetime_converter.loads(obj)
return time(d.hour,d.minute,d.second,d.microsecond)
class binary_converter:
"""note that is is really only for decode of json, since python bytes are strings"""
@staticmethod
def dumps(obj,name=None):
return base64.b64encode(obj)
@staticmethod
def loads(obj,name=None):
return base64.b64decode(obj).decode('utf-8')
class bytearray_converter:
@staticmethod
def dumps(obj,name=None):
return base64.b64encode(str(obj))
@staticmethod
def loads(obj,name=None):
return bytearray(base64.b64decode(obj))
class set_converter:
@staticmethod
def dumps(obj):
return list(obj)
@staticmethod
def loads(obj,name=None):
return set(obj)
# do some dataclass json conversions
from iceprod.core import dataclasses
class var_converter:
@staticmethod
def dumps(obj):
return obj.__dict__
@staticmethod
def loads(obj,name=None):
ret = getattr(dataclasses,name)()
for k in obj:
setattr(ret,k,obj[k])
return ret
# convert the IFace
from iceprod.core import util
class iface_converter:
@staticmethod
def dumps(obj):
return obj.__dict__
@staticmethod
def loads(obj,name=None):
ret = util.IFace()
for k in obj:
setattr(ret,k,obj[k])
return ret
# do some default conversions
# for things like OrderedDict
import ast
from collections import OrderedDict
class repr_converter:
@staticmethod
def dumps(obj):
return repr(obj)
@staticmethod
def loads(obj,name=None):
parts = obj.split('(',1)
type = parts[0]
if type not in globals():
raise Exception()
parts2 = parts[1].rsplit(')',1)
args = ast.literal_eval(parts2[0])
if isinstance(args,tuple):
ret = globals()['type'](*args)
else:
ret = globals()['type'](args)
return ret
JSONConverters = {
'datetime':datetime_converter,
'date':date_converter,
'time':time_converter,
'binary':binary_converter,
'bytearray':bytearray_converter,
'OrderedDict':repr_converter,
'set':set_converter,
'IFace':iface_converter,
}
import inspect
for k in dict(inspect.getmembers(dataclasses,inspect.isclass)):
JSONConverters[k] = var_converter
def objToJSON(obj):
if isinstance(obj,(dict,list,tuple,str,int,float,bool)) or obj is None:
return obj
else:
name = obj.__class__.__name__
if name in JSONConverters:
return {'__jsonclass__':[name,JSONConverters[name].dumps(obj)]}
else:
logger.error('name: %s, obj: %r', name, obj)
raise Exception('Cannot encode %s class to JSON'%name)
def JSONToObj(obj):
ret = obj
if isinstance(obj,dict) and '__jsonclass__' in obj:
logger.info('try unpacking class')
try:
name = obj['__jsonclass__'][0]
if name not in JSONConverters:
raise Exception('class %r not found in converters'%name)
obj_repr = obj['__jsonclass__'][1]
ret = JSONConverters[name].loads(obj_repr,name=name)
except Exception as e:
logger.warning('error making json class: %r',e,exc_info=True)
return ret
# copied from tornado.escape so we don't have to include that project
def recursive_unicode(obj):
"""Walks a simple data structure, converting byte strings to unicode.
Supports lists, tuples, sets, and dictionaries.
"""
if isinstance(obj, dict):
return {recursive_unicode(k): recursive_unicode(obj[k]) for k in obj}
elif isinstance(obj, set):
return {recursive_unicode(i) for i in obj}
elif isinstance(obj, list):
return [recursive_unicode(i) for i in obj]
elif isinstance(obj, tuple):
return tuple(recursive_unicode(i) for i in obj)
elif isinstance(obj, bytes):
return obj.decode("utf-8")
else:
return obj
def json_encode(value, indent=None):
"""JSON-encodes the given Python object."""
return json.dumps(recursive_unicode(value),default=objToJSON,separators=(',',':'), indent=indent).replace("</", "<\\/")
def json_decode(value):
"""Returns Python objects for the given JSON string."""
return json.loads(value,object_hook=JSONToObj)
|
|
import ConfigParser
from email import parser
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.Utils import formatdate
from getpass import getpass
import logging
import mimetypes
import os
import poplib
import smtplib
import sys
import time
log = logging.getLogger("mail")
# See if we can use GnuPG. If not, disable encryption.
use_gnupg = False
try:
import gnupg
use_gnupg = True
except ImportError:
log.error("Could not import gnupg. Encryption disabled.")
class Mail(object):
pass
class MailHandler(object):
# List of fingerprints of potential recipients. Necessary for encryption.
FPs = {"MaxDemian": "F0B840E33233E8C33CDA3BF5432B81517FCD8602"}
signature = "This is a signature."\
" There are many like it but this one is mine."
print signature
def __init__(self, account, username, configdir):
# ConfigParser setup.
log.debug("Initializing MailHandler with {}, {} in {}."
.format(account, username, configdir))
self.configdir = configdir
self.configfile = os.path.join(configdir, "gmxmail.ini")
self.config = ConfigParser.SafeConfigParser()
if not os.path.isfile(self.configfile):
log.error("Config file not found at {}.".format(self.configfile))
sys.exit(1)
self.config.read(self.configfile)
self.account = account or "mikar@gmx.de"
try:
self.username = username or self.get_opt("username")
except ConfigParser.NoOptionError:
self.username = self.account
log.debug("No username found. Defaulting to {}."
.format(self.account))
self.content_subtype = "plain"
self.content_charset = "utf-8"
self.user_agent = "gmxmail (https://github.com/mikar/gmxmail"
# Note: Could also use the config as a dictionary with:
# self.c = self.config._sections[self.account]
# But that will somehow skip the DEFAULT values so we'll stick with
# self.get_opt() for now.
def get_opt(self, option, optiontype=str):
"Parse an option from config.ini"
log.debug("Querying option: {}.".format(option))
section = self.account
if not self.config.has_section(section):
section = "DEFAULT"
log.debug("Section {} not found. Using DEFAULT".format(section))
if optiontype == int:
return self.config.getint(section, option)
elif optiontype == float:
return self.config.getfloat(section, option)
elif optiontype == bool:
return self.config.getboolean(section, option)
elif optiontype == str:
return self.config.get(section, option)
else:
log.error("Invalid option type: {} ({}).".format(option,
optiontype))
def print_options(self):
"Print all available options. For debugging purposes."
for i in self.config.options(self.account):
print i + ":", self.config.get(self.account, i)
def get_mail(self):
"Get the mail. Uses poplib as GMX Freemail does not allow imap."
log.info("Getting mail for {}".format(self.account))
if not self.username:
self.username = self.account
password = getpass("Password for {}: ".format(self.username))
server = self.get_opt("incserver")
port = self.get_opt("incport", int)
# Unnecessarily check if we'll use SSL.
if self.get_opt("incsecurity") == "SSL":
session = poplib.POP3_SSL(server, port)
else:
session = poplib.POP3(server, port)
# If the loglevel is DEBUG (10), enable verbose logging.
if logging.getLogger().getEffectiveLevel() == 10:
session.set_debuglevel(1)
try:
session.user(self.username)
session.pass_(password)
except poplib.error_proto:
log.error("Authentification for {} failed. Wrong credentials?"
.format(self.account))
sys.exit(1)
messages = [session.retr(i) for i in range(1, len(session.list()[1]))]
messages = ["\n".join(msg[1]) for msg in messages]
messages = [parser.Parser().parsestr(msg) for msg in messages]
# TODO: Make this prettier. Example:
# http://g33k.wordpress.com/2009/02/04/check-gmail-the-python-way/
print "You have {} new messages.".format(len(messages))
for m in messages:
print "{}, [{}], ({})".format(m["From"], m["Subject"], m["Date"])
session.quit()
def send_mail(self, recipient, header, message,
sign, encrypt, attachkey, dryrun):
"Sends a mail via SMTP."
log.info("Sending mail to {} ({}). Sign/Encrypt/AttachKey: {}/{}/{}."
.format(recipient, header, sign, encrypt, attachkey))
recipients = {i for i in recipient.split(",") if "@" in i}
if not recipients:
log.error("No valid recipients in {}.".format(recipients))
return
# TODO: Hash the password with sha256+salt and only ask once at start-
# up, if we implement a curse UI.
if not self.username:
self.username = self.account
password = getpass("Password for {}: ".format(self.username))
server = self.get_opt("outserver")
port = self.get_opt("outport", int)
# Split header into CC, BCC and Subject.
cc, bcc = "", ""
header = header.split("::")
if len(header) == 3:
cc, bcc, subject = header[0], header[1], header[2]
elif len(header) == 2:
cc, subject = header[0], header[1]
else:
subject = header[0]
cc = {i for i in cc.split(",") if "@" in i}
bcc = {i for i in bcc.split(",") if "@" in i}
# Initialize our message to attach signatures/keyfiles, body etc to.
msg = MIMEMultipart()
if sign or encrypt:
gpg = gnupg.GPG()
keyid = self.get_opt("keyid")
keyfp = self.get_opt("keyfp")
for i in gpg.list_keys():
if keyid in i["keyid"]:
break
else:
log.error("{} not found in gpg.list_keys().".format(keyid))
sys.exit(1)
if sign and encrypt:
encrypted = str(gpg.encrypt(message, self.FPs["MaxDemian"],
sign=keyfp))
if encrypted:
encryptedtext = MIMEText(
_text=encrypted,
_subtype=self.content_subtype,
_charset=self.content_charset
)
msg.attach(encryptedtext)
else:
log.error("Failed to encrypt the message.")
sys.exit(1)
elif sign:
# message = msg.as_string().replace('\n', '\r\n')
signed = str(gpg.sign(message, keyid=keyid))
if signed:
signedtext = MIMEText(
_text=signed,
_subtype=self.content_subtype,
_charset=self.content_charset
)
msg.attach(signedtext)
else:
log.error("Failed to sign the message.")
sys.exit(1)
elif encrypt:
encrypted = str(gpg.encrypt(message, self.FPs["MaxDemian"]))
if encrypted:
encryptedtext = MIMEText(
_text=encrypted,
_subtype=self.content_subtype,
_charset=self.content_charset
)
msg.attach(encryptedtext)
else:
log.error("Failed to encrypt the message.")
sys.exit(1)
else:
log.error("No GPG keys found.")
pubkeyloc = None
if attachkey: # Attach GPG Public attachkey.
pubkeyfile = self.get_opt("keyfile")
if os.path.isfile(pubkeyfile):
pubkeyloc = pubkeyfile
elif os.path.isfile(os.path.join(self.configdir, pubkeyfile)):
pubkeyloc = os.path.join(self.configdir, pubkeyfile)
else:
log.error("Public attachkey '{}' could not be found."
.format(pubkeyfile))
if pubkeyloc:
ctype, encoding = mimetypes.guess_type(pubkeyloc)
if ctype is None or encoding is not None:
ctype = 'application/octet-stream'
maintype, subtype = ctype.split('/', 1)
if maintype == 'text':
with open(pubkeyloc) as f:
# keyatt = f.read()
keyatt = MIMEText(
f.read(),
_subtype=subtype,
_charset=self.content_charset)
keyatt.add_header(
'Content-Disposition',
'attachment',
filename=pubkeyfile
)
msg.attach(keyatt)
log.info("Attached public attachkey {} to message."
.format(pubkeyfile))
else:
log.error("{} is not a textfile. Sure it's a GPG Key?"
.format(pubkeyloc))
# Add Mime infos to the message.
msg["From"] = self.account
msg["To"] = ", ".join(recipients)
if cc:
msg["Cc"] = ", ".join(cc)
msg["Date"] = formatdate(time.time())
msg["User-Agent"] = self.user_agent
msg["Subject"] = subject
# If --dryrun is enabled, we exit here.
if dryrun:
print msg
sys.exit()
session = smtplib.SMTP(server, port)
# If the loglevel is DEBUG (10), enable verbose logging.
# if logging.getLogger().getEffectiveLevel() == 10:
# session.set_debuglevel(1)
if self.get_opt("outsecurity"):
session.ehlo()
session.starttls()
session.ehlo()
# Union of the three sets.
recipients = recipients | cc | bcc
try:
session.login(self.username, password)
except smtplib.SMTPAuthenticationError:
log.error("Authentication failed. Wrong credentials?")
sys.exit(1)
# TODO: Add footer (with user-agent, timestamp?)
session.sendmail(self.account, recipients, msg.as_string())
log.info("Mail sent from {} to {} ({}).".format(self.account,
recipients, subject))
session.quit()
|
|
#!/usr/bin/python
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
from nose import SkipTest
from uuid import uuid4
from swift.common.constraints import MAX_META_COUNT, MAX_META_NAME_LENGTH, \
MAX_META_OVERALL_SIZE, MAX_META_VALUE_LENGTH
from swift_testing import check_response, retry, skip, skip2, skip3, \
swift_test_perm, web_front_end
class TestContainer(unittest.TestCase):
def setUp(self):
if skip:
raise SkipTest
self.name = uuid4().hex
def put(url, token, parsed, conn):
conn.request('PUT', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEquals(resp.status, 201)
def tearDown(self):
if skip:
raise SkipTest
def get(url, token, parsed, conn):
conn.request('GET', parsed.path + '/' + self.name + '?format=json',
'', {'X-Auth-Token': token})
return check_response(conn)
def delete(url, token, parsed, conn, obj):
conn.request('DELETE',
'/'.join([parsed.path, self.name, obj['name']]), '',
{'X-Auth-Token': token})
return check_response(conn)
while True:
resp = retry(get)
body = resp.read()
self.assert_(resp.status // 100 == 2, resp.status)
objs = json.loads(body)
if not objs:
break
for obj in objs:
resp = retry(delete, obj)
resp.read()
self.assertEquals(resp.status, 204)
def delete(url, token, parsed, conn):
conn.request('DELETE', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(delete)
resp.read()
self.assertEquals(resp.status, 204)
def test_multi_metadata(self):
if skip:
raise SkipTest
def post(url, token, parsed, conn, name, value):
conn.request('POST', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token, name: value})
return check_response(conn)
def head(url, token, parsed, conn):
conn.request('HEAD', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(post, 'X-Container-Meta-One', '1')
resp.read()
self.assertEquals(resp.status, 204)
resp = retry(head)
resp.read()
self.assert_(resp.status in (200, 204), resp.status)
self.assertEquals(resp.getheader('x-container-meta-one'), '1')
resp = retry(post, 'X-Container-Meta-Two', '2')
resp.read()
self.assertEquals(resp.status, 204)
resp = retry(head)
resp.read()
self.assert_(resp.status in (200, 204), resp.status)
self.assertEquals(resp.getheader('x-container-meta-one'), '1')
self.assertEquals(resp.getheader('x-container-meta-two'), '2')
def test_unicode_metadata(self):
if skip:
raise SkipTest
def post(url, token, parsed, conn, name, value):
conn.request('POST', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token, name: value})
return check_response(conn)
def head(url, token, parsed, conn):
conn.request('HEAD', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token})
return check_response(conn)
uni_key = u'X-Container-Meta-uni\u0E12'
uni_value = u'uni\u0E12'
if (web_front_end == 'integral'):
resp = retry(post, uni_key, '1')
resp.read()
self.assertEquals(resp.status, 204)
resp = retry(head)
resp.read()
self.assert_(resp.status in (200, 204), resp.status)
self.assertEquals(resp.getheader(uni_key.encode('utf-8')), '1')
resp = retry(post, 'X-Container-Meta-uni', uni_value)
resp.read()
self.assertEquals(resp.status, 204)
resp = retry(head)
resp.read()
self.assert_(resp.status in (200, 204), resp.status)
self.assertEquals(resp.getheader('X-Container-Meta-uni'),
uni_value.encode('utf-8'))
if (web_front_end == 'integral'):
resp = retry(post, uni_key, uni_value)
resp.read()
self.assertEquals(resp.status, 204)
resp = retry(head)
resp.read()
self.assert_(resp.status in (200, 204), resp.status)
self.assertEquals(resp.getheader(uni_key.encode('utf-8')),
uni_value.encode('utf-8'))
def test_PUT_metadata(self):
if skip:
raise SkipTest
def put(url, token, parsed, conn, name, value):
conn.request('PUT', parsed.path + '/' + name, '',
{'X-Auth-Token': token,
'X-Container-Meta-Test': value})
return check_response(conn)
def head(url, token, parsed, conn, name):
conn.request('HEAD', parsed.path + '/' + name, '',
{'X-Auth-Token': token})
return check_response(conn)
def get(url, token, parsed, conn, name):
conn.request('GET', parsed.path + '/' + name, '',
{'X-Auth-Token': token})
return check_response(conn)
def delete(url, token, parsed, conn, name):
conn.request('DELETE', parsed.path + '/' + name, '',
{'X-Auth-Token': token})
return check_response(conn)
name = uuid4().hex
resp = retry(put, name, 'Value')
resp.read()
self.assertEquals(resp.status, 201)
resp = retry(head, name)
resp.read()
self.assert_(resp.status in (200, 204), resp.status)
self.assertEquals(resp.getheader('x-container-meta-test'), 'Value')
resp = retry(get, name)
resp.read()
self.assert_(resp.status in (200, 204), resp.status)
self.assertEquals(resp.getheader('x-container-meta-test'), 'Value')
resp = retry(delete, name)
resp.read()
self.assertEquals(resp.status, 204)
name = uuid4().hex
resp = retry(put, name, '')
resp.read()
self.assertEquals(resp.status, 201)
resp = retry(head, name)
resp.read()
self.assert_(resp.status in (200, 204), resp.status)
self.assertEquals(resp.getheader('x-container-meta-test'), None)
resp = retry(get, name)
resp.read()
self.assert_(resp.status in (200, 204), resp.status)
self.assertEquals(resp.getheader('x-container-meta-test'), None)
resp = retry(delete, name)
resp.read()
self.assertEquals(resp.status, 204)
def test_POST_metadata(self):
if skip:
raise SkipTest
def post(url, token, parsed, conn, value):
conn.request('POST', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token,
'X-Container-Meta-Test': value})
return check_response(conn)
def head(url, token, parsed, conn):
conn.request('HEAD', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token})
return check_response(conn)
def get(url, token, parsed, conn):
conn.request('GET', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(head)
resp.read()
self.assert_(resp.status in (200, 204), resp.status)
self.assertEquals(resp.getheader('x-container-meta-test'), None)
resp = retry(get)
resp.read()
self.assert_(resp.status in (200, 204), resp.status)
self.assertEquals(resp.getheader('x-container-meta-test'), None)
resp = retry(post, 'Value')
resp.read()
self.assertEquals(resp.status, 204)
resp = retry(head)
resp.read()
self.assert_(resp.status in (200, 204), resp.status)
self.assertEquals(resp.getheader('x-container-meta-test'), 'Value')
resp = retry(get)
resp.read()
self.assert_(resp.status in (200, 204), resp.status)
self.assertEquals(resp.getheader('x-container-meta-test'), 'Value')
def test_PUT_bad_metadata(self):
if skip:
raise SkipTest
def put(url, token, parsed, conn, name, extra_headers):
headers = {'X-Auth-Token': token}
headers.update(extra_headers)
conn.request('PUT', parsed.path + '/' + name, '', headers)
return check_response(conn)
def delete(url, token, parsed, conn, name):
conn.request('DELETE', parsed.path + '/' + name, '',
{'X-Auth-Token': token})
return check_response(conn)
name = uuid4().hex
resp = retry(
put, name,
{'X-Container-Meta-' + ('k' * MAX_META_NAME_LENGTH): 'v'})
resp.read()
self.assertEquals(resp.status, 201)
resp = retry(delete, name)
resp.read()
self.assertEquals(resp.status, 204)
name = uuid4().hex
resp = retry(
put, name,
{'X-Container-Meta-' + ('k' * (MAX_META_NAME_LENGTH + 1)): 'v'})
resp.read()
self.assertEquals(resp.status, 400)
resp = retry(delete, name)
resp.read()
self.assertEquals(resp.status, 404)
name = uuid4().hex
resp = retry(
put, name,
{'X-Container-Meta-Too-Long': 'k' * MAX_META_VALUE_LENGTH})
resp.read()
self.assertEquals(resp.status, 201)
resp = retry(delete, name)
resp.read()
self.assertEquals(resp.status, 204)
name = uuid4().hex
resp = retry(
put, name,
{'X-Container-Meta-Too-Long': 'k' * (MAX_META_VALUE_LENGTH + 1)})
resp.read()
self.assertEquals(resp.status, 400)
resp = retry(delete, name)
resp.read()
self.assertEquals(resp.status, 404)
name = uuid4().hex
headers = {}
for x in xrange(MAX_META_COUNT):
headers['X-Container-Meta-%d' % x] = 'v'
resp = retry(put, name, headers)
resp.read()
self.assertEquals(resp.status, 201)
resp = retry(delete, name)
resp.read()
self.assertEquals(resp.status, 204)
name = uuid4().hex
headers = {}
for x in xrange(MAX_META_COUNT + 1):
headers['X-Container-Meta-%d' % x] = 'v'
resp = retry(put, name, headers)
resp.read()
self.assertEquals(resp.status, 400)
resp = retry(delete, name)
resp.read()
self.assertEquals(resp.status, 404)
name = uuid4().hex
headers = {}
header_value = 'k' * MAX_META_VALUE_LENGTH
size = 0
x = 0
while size < MAX_META_OVERALL_SIZE - 4 - MAX_META_VALUE_LENGTH:
size += 4 + MAX_META_VALUE_LENGTH
headers['X-Container-Meta-%04d' % x] = header_value
x += 1
if MAX_META_OVERALL_SIZE - size > 1:
headers['X-Container-Meta-k'] = \
'v' * (MAX_META_OVERALL_SIZE - size - 1)
resp = retry(put, name, headers)
resp.read()
self.assertEquals(resp.status, 201)
resp = retry(delete, name)
resp.read()
self.assertEquals(resp.status, 204)
name = uuid4().hex
headers['X-Container-Meta-k'] = \
'v' * (MAX_META_OVERALL_SIZE - size)
resp = retry(put, name, headers)
resp.read()
self.assertEquals(resp.status, 400)
resp = retry(delete, name)
resp.read()
self.assertEquals(resp.status, 404)
def test_POST_bad_metadata(self):
if skip:
raise SkipTest
def post(url, token, parsed, conn, extra_headers):
headers = {'X-Auth-Token': token}
headers.update(extra_headers)
conn.request('POST', parsed.path + '/' + self.name, '', headers)
return check_response(conn)
resp = retry(
post,
{'X-Container-Meta-' + ('k' * MAX_META_NAME_LENGTH): 'v'})
resp.read()
self.assertEquals(resp.status, 204)
resp = retry(
post,
{'X-Container-Meta-' + ('k' * (MAX_META_NAME_LENGTH + 1)): 'v'})
resp.read()
self.assertEquals(resp.status, 400)
resp = retry(
post,
{'X-Container-Meta-Too-Long': 'k' * MAX_META_VALUE_LENGTH})
resp.read()
self.assertEquals(resp.status, 204)
resp = retry(
post,
{'X-Container-Meta-Too-Long': 'k' * (MAX_META_VALUE_LENGTH + 1)})
resp.read()
self.assertEquals(resp.status, 400)
headers = {}
for x in xrange(MAX_META_COUNT):
headers['X-Container-Meta-%d' % x] = 'v'
resp = retry(post, headers)
resp.read()
self.assertEquals(resp.status, 204)
headers = {}
for x in xrange(MAX_META_COUNT + 1):
headers['X-Container-Meta-%d' % x] = 'v'
resp = retry(post, headers)
resp.read()
self.assertEquals(resp.status, 400)
headers = {}
header_value = 'k' * MAX_META_VALUE_LENGTH
size = 0
x = 0
while size < MAX_META_OVERALL_SIZE - 4 - MAX_META_VALUE_LENGTH:
size += 4 + MAX_META_VALUE_LENGTH
headers['X-Container-Meta-%04d' % x] = header_value
x += 1
if MAX_META_OVERALL_SIZE - size > 1:
headers['X-Container-Meta-k'] = \
'v' * (MAX_META_OVERALL_SIZE - size - 1)
resp = retry(post, headers)
resp.read()
self.assertEquals(resp.status, 204)
headers['X-Container-Meta-k'] = \
'v' * (MAX_META_OVERALL_SIZE - size)
resp = retry(post, headers)
resp.read()
self.assertEquals(resp.status, 400)
def test_public_container(self):
if skip:
raise SkipTest
def get(url, token, parsed, conn):
conn.request('GET', parsed.path + '/' + self.name)
return check_response(conn)
try:
resp = retry(get)
raise Exception('Should not have been able to GET')
except Exception as err:
self.assert_(str(err).startswith('No result after '), err)
def post(url, token, parsed, conn):
conn.request('POST', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token,
'X-Container-Read': '.r:*,.rlistings'})
return check_response(conn)
resp = retry(post)
resp.read()
self.assertEquals(resp.status, 204)
resp = retry(get)
resp.read()
self.assertEquals(resp.status, 204)
def post(url, token, parsed, conn):
conn.request('POST', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token, 'X-Container-Read': ''})
return check_response(conn)
resp = retry(post)
resp.read()
self.assertEquals(resp.status, 204)
try:
resp = retry(get)
raise Exception('Should not have been able to GET')
except Exception as err:
self.assert_(str(err).startswith('No result after '), err)
def test_cross_account_container(self):
if skip or skip2:
raise SkipTest
# Obtain the first account's string
first_account = ['unknown']
def get1(url, token, parsed, conn):
first_account[0] = parsed.path
conn.request('HEAD', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(get1)
resp.read()
# Ensure we can't access the container with the second account
def get2(url, token, parsed, conn):
conn.request('GET', first_account[0] + '/' + self.name, '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(get2, use_account=2)
resp.read()
self.assertEquals(resp.status, 403)
# Make the container accessible by the second account
def post(url, token, parsed, conn):
conn.request('POST', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token,
'X-Container-Read': swift_test_perm[1],
'X-Container-Write': swift_test_perm[1]})
return check_response(conn)
resp = retry(post)
resp.read()
self.assertEquals(resp.status, 204)
# Ensure we can now use the container with the second account
resp = retry(get2, use_account=2)
resp.read()
self.assertEquals(resp.status, 204)
# Make the container private again
def post(url, token, parsed, conn):
conn.request('POST', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token, 'X-Container-Read': '',
'X-Container-Write': ''})
return check_response(conn)
resp = retry(post)
resp.read()
self.assertEquals(resp.status, 204)
# Ensure we can't access the container with the second account again
resp = retry(get2, use_account=2)
resp.read()
self.assertEquals(resp.status, 403)
def test_cross_account_public_container(self):
if skip or skip2:
raise SkipTest
# Obtain the first account's string
first_account = ['unknown']
def get1(url, token, parsed, conn):
first_account[0] = parsed.path
conn.request('HEAD', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(get1)
resp.read()
# Ensure we can't access the container with the second account
def get2(url, token, parsed, conn):
conn.request('GET', first_account[0] + '/' + self.name, '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(get2, use_account=2)
resp.read()
self.assertEquals(resp.status, 403)
# Make the container completely public
def post(url, token, parsed, conn):
conn.request('POST', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token,
'X-Container-Read': '.r:*,.rlistings'})
return check_response(conn)
resp = retry(post)
resp.read()
self.assertEquals(resp.status, 204)
# Ensure we can now read the container with the second account
resp = retry(get2, use_account=2)
resp.read()
self.assertEquals(resp.status, 204)
# But we shouldn't be able to write with the second account
def put2(url, token, parsed, conn):
conn.request('PUT', first_account[0] + '/' + self.name + '/object',
'test object', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(put2, use_account=2)
resp.read()
self.assertEquals(resp.status, 403)
# Now make the container also writeable by the second account
def post(url, token, parsed, conn):
conn.request('POST', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token,
'X-Container-Write': swift_test_perm[1]})
return check_response(conn)
resp = retry(post)
resp.read()
self.assertEquals(resp.status, 204)
# Ensure we can still read the container with the second account
resp = retry(get2, use_account=2)
resp.read()
self.assertEquals(resp.status, 204)
# And that we can now write with the second account
resp = retry(put2, use_account=2)
resp.read()
self.assertEquals(resp.status, 201)
def test_nonadmin_user(self):
if skip or skip3:
raise SkipTest
# Obtain the first account's string
first_account = ['unknown']
def get1(url, token, parsed, conn):
first_account[0] = parsed.path
conn.request('HEAD', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(get1)
resp.read()
# Ensure we can't access the container with the third account
def get3(url, token, parsed, conn):
conn.request('GET', first_account[0] + '/' + self.name, '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(get3, use_account=3)
resp.read()
self.assertEquals(resp.status, 403)
# Make the container accessible by the third account
def post(url, token, parsed, conn):
conn.request('POST', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token,
'X-Container-Read': swift_test_perm[2]})
return check_response(conn)
resp = retry(post)
resp.read()
self.assertEquals(resp.status, 204)
# Ensure we can now read the container with the third account
resp = retry(get3, use_account=3)
resp.read()
self.assertEquals(resp.status, 204)
# But we shouldn't be able to write with the third account
def put3(url, token, parsed, conn):
conn.request('PUT', first_account[0] + '/' + self.name + '/object',
'test object', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(put3, use_account=3)
resp.read()
self.assertEquals(resp.status, 403)
# Now make the container also writeable by the third account
def post(url, token, parsed, conn):
conn.request('POST', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token,
'X-Container-Write': swift_test_perm[2]})
return check_response(conn)
resp = retry(post)
resp.read()
self.assertEquals(resp.status, 204)
# Ensure we can still read the container with the third account
resp = retry(get3, use_account=3)
resp.read()
self.assertEquals(resp.status, 204)
# And that we can now write with the third account
resp = retry(put3, use_account=3)
resp.read()
self.assertEquals(resp.status, 201)
def test_long_name_content_type(self):
if skip:
raise SkipTest
def put(url, token, parsed, conn):
container_name = 'X' * 2048
conn.request('PUT', '%s/%s' % (parsed.path, container_name),
'there', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEquals(resp.status, 400)
self.assertEquals(resp.getheader('Content-Type'),
'text/html; charset=UTF-8')
def test_null_name(self):
if skip:
raise SkipTest
def put(url, token, parsed, conn):
conn.request('PUT', '%s/abc%%00def' % parsed.path, '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(put)
if (web_front_end == 'apache2'):
self.assertEquals(resp.status, 404)
else:
self.assertEquals(resp.read(), 'Invalid UTF8 or contains NULL')
self.assertEquals(resp.status, 412)
if __name__ == '__main__':
unittest.main()
|
|
import pdb
import struct
import sys
import gzip
from io import BufferedReader
from collections import Counter, defaultdict
import collections.abc
from .gr import Gr, Entry
MAX_LEN = 2**29-1 # As defined by max size supported by bai indexes.
def bisect_left(a, x):
lo = 0
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if a[mid][1] < x: lo = mid+1
else: hi = mid
return lo
def zero_coverage():
return [[0, MAX_LEN+1, 0]]
class Cov(collections.abc.Mapping):
def __init__(self, path):
chrom_depths = defaultdict(Counter)
with Bam(path) as bam:
for row in bam.coverage(): # Is zero/one based counting correct here
depths = chrom_depths[row[0]]
depths[row[1]] += 1
depths[row[2]+1] -= 1
self._data = defaultdict(zero_coverage)
for chrom, depths in chrom_depths.items():
start = 0
depth = 0
cov = []
for pos, change in sorted(depths.items()):
cov.append((start, pos-1, depth))
start = pos
depth += change
if depth != 0: # Should never ever happen under any circumstance.
raise RuntimeError("Fatal error in initial coverage calculation algorithm")
cov.append((start, MAX_LEN+1, depth))
self._data[chrom] = cov
def __getitem__(self, key):
return self._data[key]
def __iter__(self):
return iter(self._data)
def __len__(self):
return len(self._data)
def calculate(self, region, depth, name=""):
total = bool(name)
results = defaultdict(CoverageInfo)
for entry in region:
if entry.stop >= entry.start:
start = entry.start
stop = entry.stop
else: # if an insertion then calculate from base before to base afterwards
start = entry.stop
stop = entry.start
if not total:
name = entry.name
info = results[name]
info.name = name
allcovered = True
cchrom = self._data[entry.chrom]
# leftmost element where coverage.stop >= start
for i in range(bisect_left(cchrom, start), len(cchrom)):
cstart, cstop, cdepth = cchrom[i]
if cstart > stop:
break
elif cstop >= start:
bases = min(stop, cstop) - max(start, cstart) + 1
if cdepth >= depth:
info.bases_covered += bases
info.depth_covered += bases * cdepth
info.range_covered.add(Entry(entry.chrom, max(start, cstart), min(stop, cstop), entry.name, entry.strand))
else:
info.bases_uncovered += bases
info.depth_uncovered += bases * cdepth
info.range_uncovered.add(Entry(entry.chrom, max(start, cstart), min(stop, cstop), entry.name, entry.strand))
allcovered = False
if allcovered:
info.components_covered += 1
else:
info.components_uncovered += 1
results = sorted(results.values())
for info in results:
info.depth_covered = info.depth_covered // max(info.bases_covered, 1)
info.depth_uncovered = info.depth_uncovered // max(info.bases_uncovered, 1)
info.range_covered = info.range_covered.merged
info.range_uncovered = info.range_uncovered.merged
return results[0] if total else results
class CoverageInfo(object):
def __repr__(self):
return "{} {}%".format(self.name, self.percent_covered)
def __init__(self):
self.name = ""
self.depth_covered = 0
self.depth_uncovered = 0
self.bases_covered = 0
self.bases_uncovered = 0
self.range_covered = Gr()
self.range_uncovered = Gr()
self.components_covered = 0
self.components_uncovered = 0
def __lt__(self, other):
return self.name < other.name
@property
def depth(self):
return ((self.depth_covered * self.bases_covered) + (self.depth_uncovered * self.bases_uncovered)) // (self.bases or 1)
@property
def percent_covered(self):
return float(self.bases_covered*100) / (self.bases or 1)
@property
def percent_uncovered(self):
return 100 - self.percent_covered
@property
def range(self):
return self.range_covered.combined_with(self.range_uncovered).merged
@property
def bases(self):
return self.bases_covered + self.bases_uncovered
@property
def components(self):
return self.components_covered + self.components_uncovered
@property
def percent_components_covered(self):
return float(self.components_covered*100) / (self.components or 1)
@property
def percent_components_uncovered(self):
return 100 - self.percent_components_covered
@property
def completely_covered(self):
return not(self.incompletely_covered)
@property
def incompletely_covered(self):
return bool(self.bases_uncovered)
class Bam(object):
def __init__(self, path):
""" May raise IOError or RuntimeError
"""
self.path = path
self.bam = None
self.bai = None
try:
self.bam = BufferedReader(gzip.open(path, "rb"))
if self.bam.read(4) != b"BAM\1":
self.bam.close()
raise RuntimeError(f"{path} is not a BAM file!")
len_header_text = struct.unpack("<i", self.bam.read(4))[0]
header_text = self.bam.read(len_header_text)
num_ref_seq = struct.unpack("<i", self.bam.read(4))[0]
chr_2_ref = {}
self.ref_2_chr = [None] * num_ref_seq
for x in range(0, num_ref_seq):
len_ref_name = struct.unpack("<i", self.bam.read(4))[0]
ref_name = self.bam.read(len_ref_name - 1)
chrom = ref_name.decode("utf-8")
self.ref_2_chr[x] = chrom
chr_2_ref[chrom] = x
self.bam.read(5)
except struct.error:
raise RuntimeError(f"{path} has a truncated header")
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
self.bam.close()
def coverage(self):
duplicate = 0x400
secondary = 0x100
unmapped = 0x4
bad = duplicate | secondary | unmapped
try:
while True:
read = self.bam.read(36)
if len(read) == 0:
break
block_size, ref_id, pos, bin_mq_nl, flag_nc, len_seq, next_ref_id, next_pos, len_template = struct.unpack("<iiiIIiiii", read)
flag = flag_nc >> 16#// 0x10000
if (ref_id == -1) or (flag & bad):
self.bam.read(block_size-32)
else:
len_read_name = bin_mq_nl & 0xFF
n_cigar_op = flag_nc & 0xFFFF
direction = "-" if flag & 0x10 else "+"
start = pos + 1
read_name = self.bam.read(len_read_name - 1)
self.bam.read(1)
cigar_bytes = n_cigar_op * 4
length = 0
for cigar in struct.unpack("<" + "I" * n_cigar_op, self.bam.read(cigar_bytes)):
cigar_op = cigar & 0xF
if cigar_op in (0, 2, 7, 8):
length += cigar // 0x10
elif cigar_op == 3: # skip an intron
if length:
yield (self.ref_2_chr[ref_id], start, start + length - 1, direction)
start += length + (cigar//0x10)
length = 0
if length:
yield (self.ref_2_chr[ref_id], start, start + length - 1, direction)
self.bam.read(block_size - 32 - len_read_name - cigar_bytes)
except struct.error:
raise RuntimeError("{} is truncated".format(self.path))
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from libcloud.utils.py3 import httplib
from libcloud.compute.base import Node
from libcloud.compute.types import Provider
from libcloud.compute.drivers.elasticstack import (ElasticStackException,
ElasticStackBaseConnection,
ElasticStackBaseNodeDriver as ElasticStack)
from libcloud.compute.drivers.elastichosts import \
(ElasticHostsBaseNodeDriver as ElasticHosts)
from libcloud.compute.drivers.skalicloud import \
(SkaliCloudNodeDriver as SkaliCloud)
from libcloud.compute.drivers.serverlove import \
(ServerLoveNodeDriver as ServerLove)
from libcloud.common.types import InvalidCredsError, MalformedResponseError
from libcloud.test import MockHttp
from libcloud.test.file_fixtures import ComputeFileFixtures
class ElasticStackTestCase(object):
def setUp(self):
# Re-use ElasticHosts fixtures for the base ElasticStack platform tests
"""ElasticStack.type = Provider.ELASTICHOSTS
ElasticStack.api_name = 'elastichosts'
ElasticStackBaseConnection.host = 'test.com'
ElasticStack.connectionCls.conn_classes = (None,
ElasticStackMockHttp)
ElasticStack._standard_drives = ElasticHosts._standard_drives
self.driver = ElasticStack('foo', 'bar')
"""
self.mockHttp = ElasticStackMockHttp
self.mockHttp.type = None
self.node = Node(id=72258, name=None, state=None, public_ips=None,
private_ips=None, driver=self.driver)
def test_invalid_creds(self):
self.mockHttp.type = 'UNAUTHORIZED'
try:
self.driver.list_nodes()
except InvalidCredsError:
e = sys.exc_info()[1]
self.assertEqual(True, isinstance(e, InvalidCredsError))
else:
self.fail('test should have thrown')
def test_malformed_response(self):
self.mockHttp.type = 'MALFORMED'
try:
self.driver.list_nodes()
except MalformedResponseError:
pass
else:
self.fail('test should have thrown')
def test_parse_error(self):
self.mockHttp.type = 'PARSE_ERROR'
try:
self.driver.list_nodes()
except Exception:
e = sys.exc_info()[1]
self.assertTrue(str(e).find('X-Elastic-Error') != -1)
else:
self.fail('test should have thrown')
def test_ex_set_node_configuration(self):
success = self.driver.ex_set_node_configuration(node=self.node,
name='name',
cpu='2')
self.assertTrue(success)
def test_ex_set_node_configuration_invalid_keys(self):
try:
self.driver.ex_set_node_configuration(node=self.node, foo='bar')
except ElasticStackException:
pass
else:
self.fail('Invalid option specified, but an exception was not thrown')
def test_list_nodes(self):
nodes = self.driver.list_nodes()
self.assertTrue(isinstance(nodes, list))
self.assertEqual(len(nodes), 1)
node = nodes[0]
self.assertEqual(node.public_ips[0], "1.2.3.4")
self.assertEqual(node.public_ips[1], "1.2.3.5")
self.assertEqual(node.extra['smp'], 1)
self.assertEqual(node.extra['ide:0:0'], "b6049e7a-aa1b-47f9-b21d-cdf2354e28d3")
def test_list_sizes(self):
images = self.driver.list_sizes()
self.assertEqual(len(images), 6)
image = [i for i in images if i.id == 'small'][0]
self.assertEqual(image.id, 'small')
self.assertEqual(image.name, 'Small instance')
self.assertEqual(image.cpu, 2000)
self.assertEqual(image.ram, 1700)
self.assertEqual(image.disk, 160)
self.assertTrue(isinstance(image.price, float))
def test_list_images(self):
images = self.driver.list_images()
self.assertEqual(len(images), len(self.driver._standard_drives))
for uuid, values in list(self.driver._standard_drives.items()):
self.assertEqual(len([image for image in images if image.id == uuid]), 1)
def test_reboot_node(self):
node = self.driver.list_nodes()[0]
self.assertTrue(self.driver.reboot_node(node))
def test_destroy_node(self):
node = self.driver.list_nodes()[0]
self.assertTrue(self.driver.destroy_node(node))
def test_create_node(self):
sizes = self.driver.list_sizes()
size = [s for s in sizes if \
s.id == 'large'][0]
image = self.image
self.assertTrue(self.driver.create_node(name="api.ivan.net.nz",
image=image, size=size))
class ElasticHostsTestCase(ElasticStackTestCase, unittest.TestCase):
def setUp(self):
ElasticHosts.connectionCls.conn_classes = (None,
ElasticStackMockHttp)
self.driver = ElasticHosts('foo', 'bar')
images = self.driver.list_images()
self.image = [i for i in images if \
i.id == '38df0986-4d85-4b76-b502-3878ffc80161'][0]
ElasticStackTestCase.setUp(self)
unittest.TestCase.setUp(self)
class SkaliCloudTestCase(ElasticStackTestCase, unittest.TestCase):
def setUp(self):
SkaliCloud.connectionCls.conn_classes = (None,
ElasticStackMockHttp)
self.driver = SkaliCloud('foo', 'bar')
images = self.driver.list_images()
self.image = [i for i in images if \
i.id == '90aa51f2-15c0-4cff-81ee-e93aa20b9468'][0]
ElasticStackTestCase.setUp(self)
unittest.TestCase.setUp(self)
class ServerLoveTestCase(ElasticStackTestCase, unittest.TestCase):
def setUp(self):
ServerLove.connectionCls.conn_classes = (None,
ElasticStackMockHttp)
self.driver = ServerLove('foo', 'bar')
images = self.driver.list_images()
self.image = [i for i in images if \
i.id == '679f5f44-0be7-4745-a658-cccd4334c1aa'][0]
ElasticStackTestCase.setUp(self)
unittest.TestCase.setUp(self)
class ElasticStackMockHttp(MockHttp):
fixtures = ComputeFileFixtures('elastichosts')
def _servers_info_UNAUTHORIZED(self, method, url, body, headers):
return (httplib.UNAUTHORIZED, body, {}, httplib.responses[httplib.NO_CONTENT])
def _servers_info_MALFORMED(self, method, url, body, headers):
body = "{malformed: '"
return (httplib.OK, body, {}, httplib.responses[httplib.NO_CONTENT])
def _servers_info_PARSE_ERROR(self, method, url, body, headers):
return (505, body, {}, httplib.responses[httplib.NO_CONTENT])
def _servers_b605ca90_c3e6_4cee_85f8_a8ebdf8f9903_reset(self, method, url, body, headers):
return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT])
def _servers_b605ca90_c3e6_4cee_85f8_a8ebdf8f9903_destroy(self, method, url, body, headers):
return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT])
def _drives_create(self, method, url, body, headers):
body = self.fixtures.load('drives_create.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _drives_0012e24a_6eae_4279_9912_3432f698cec8_image_38df0986_4d85_4b76_b502_3878ffc80161_gunzip(self, method, url, body, headers):
# ElasticHosts image
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _drives_0012e24a_6eae_4279_9912_3432f698cec8_image_90aa51f2_15c0_4cff_81ee_e93aa20b9468_gunzip(self, method, url, body, headers):
# Skalikloud image
return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT])
def _drives_0012e24a_6eae_4279_9912_3432f698cec8_image_679f5f44_0be7_4745_a658_cccd4334c1aa_gunzip(self, method, url, body, headers):
# ServerLove image
return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT])
def _drives_0012e24a_6eae_4279_9912_3432f698cec8_info(self, method, url, body, headers):
body = self.fixtures.load('drives_info.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _servers_create(self, method, url, body, headers):
body = self.fixtures.load('servers_create.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _servers_info(self, method, url, body, headers):
body = self.fixtures.load('servers_info.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _servers_72258_set(self, method, url, body, headers):
body = '{}'
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
|
|
# Copyright 2013 by David Arenillas and Anthony Mathelier. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Provides read access to a JASPAR5 formatted database.
This modules requires MySQLdb to be installed.
Example, substitute the your database credentials as
appropriate:
>>> from Bio.motifs.jaspar.db import JASPAR5
>>>
>>> JASPAR_DB_HOST = "hostname.example.org"
>>> JASPAR_DB_NAME = "JASPAR_2013"
>>> JASPAR_DB_USER = "guest"
>>> JASPAR_DB_PASS = "guest"
>>>
>>> DFLT_COLLECTION = 'CORE'
>>> jdb = JASPAR5(
... host=JASPAR_DB_HOST,
... name=JASPAR_DB_NAME,
... user=JASPAR_DB_USER,
... password=JASPAR_DB_PASS
... )
>>>
>>>
>>> ets1 = jdb.fetch_motif_by_id('MA0098')
>>> print(ets1)
TF name ETS1
Matrix ID MA0098.1
Collection CORE
TF class Winged Helix-Turn-Helix
TF family Ets
Species 9606
Taxonomic group vertebrates
Accession ['CAG47050']
Data type used SELEX
Medline 1542566
PAZAR ID TF0000070
Comments -
Matrix:
0 1 2 3 4 5
A: 4.00 17.00 0.00 0.00 0.00 5.00
C: 16.00 0.00 1.00 39.00 39.00 3.00
G: 4.00 0.00 0.00 1.00 0.00 17.00
T: 16.00 23.00 39.00 0.00 1.00 15.00
>>>
>>> motifs = jdb.fetch_motifs(
... collection = 'CORE',
... tax_group = ['vertebrates', 'insects'],
... tf_class = 'Winged Helix-Turn-Helix',
... tf_family = ['Forkhead', 'Ets'],
... min_ic = 12
... )
>>>
>>> for motif in motifs:
... pass # do something with the motif
"""
from __future__ import print_function
import warnings
from Bio import BiopythonWarning
from Bio import MissingPythonDependencyError
try:
import MySQLdb as mdb
except:
raise MissingPythonDependencyError("Install MySQLdb if you want to use "
"Bio.motifs.jaspar.db")
from Bio.Alphabet.IUPAC import unambiguous_dna as dna
from Bio.motifs import jaspar, matrix
JASPAR_DFLT_COLLECTION = 'CORE'
class JASPAR5(object):
"""
Class representing a JASPAR5 DB. The methods within are loosely based
on the perl TFBS::DB::JASPAR5 module.
Note: We will only implement reading of JASPAR motifs from the DB.
Unlike the perl module, we will not attempt to implement any methods to
store JASPAR motifs or create a new DB at this time.
"""
def __init__(self, host=None, name=None, user=None, password=None):
"""
Construct a JASPAR5 instance and connect to specified DB
Arguments:
host - host name of the the JASPAR DB server
name - name of the JASPAR database
user - user name to connect to the JASPAR DB
password - JASPAR DB password
"""
self.name = name
self.host = host
self.user = user
self.password = password
self.dbh = mdb.connect(host, user, password, name)
def __str__(self):
"""
Return a string represention of the JASPAR5 DB connection.
"""
text = "%s\@%s:%s" % (self.user, self.host, self.name)
return text
def fetch_motif_by_id(self, id):
"""
Fetch a single JASPAR motif from the DB by it's JASPAR matrix ID
(e.g. 'MA0001.1').
Arguments:
- id - JASPAR matrix ID. This may be a fully specified ID including
the version number (e.g. MA0049.2) or just the base ID (e.g.
MA0049). If only a base ID is provided, the latest version is
returned.
Returns:
- A Bio.motifs.jaspar.Motif object
**NOTE:** The perl TFBS module allows you to specify the type of matrix
to return (PFM, PWM, ICM) but matrices are always stored in JASPAR as
PFMs so this does not really belong here. Once a PFM is fetched the
pwm() and pssm() methods can be called to return the normalized and
log-odds matrices.
"""
# separate stable ID and version number
(base_id, version) = jaspar.split_jaspar_id(id)
if not version:
# if ID contains no version portion, fetch the latest version
version = self._fetch_latest_version(base_id)
# fetch internal JASPAR matrix ID - also a check for validity
int_id = None
if version:
int_id = self._fetch_internal_id(base_id, version)
# fetch JASPAR motif using internal ID
motif = None
if int_id:
motif = self._fetch_motif_by_internal_id(int_id)
return motif
def fetch_motifs_by_name(self, name):
"""
Fetch a list of JASPAR motifs from a JASPAR DB by the given TF name(s).
Arguments:
name - a single name or list of names
Returns:
A list of Bio.motifs.Motif.japar objects
Notes:
Names are not guaranteed to be unique. There may be more than one
motif with the same name. Therefore even if name specifies a single
name, a list of motifs is returned. This just calls
self.fetch_motifs(collection = None, tf_name = name).
This behaviour is different from the TFBS perl module's
get_Matrix_by_name() method which always returns a single matrix,
issuing a warning message and returning the first matrix retrieved
in the case where multiple matrices have the same name.
"""
return self.fetch_motifs(collection=None, tf_name=name)
def fetch_motifs(
self, collection=JASPAR_DFLT_COLLECTION, tf_name=None, tf_class=None,
tf_family=None, matrix_id=None, tax_group=None, species=None,
pazar_id=None, data_type=None, medline=None, min_ic=0, min_length=0,
min_sites=0, all=False, all_versions=False
):
"""
Fetch a jaspar.Record (list) of motifs based on the provided selection
criteria.
Arguments::
Except where obvious, all selection criteria arguments may be
specified as a single value or a list of values. Motifs must
meet ALL the specified selection criteria to be returned with
the precedent exceptions noted below.
all - Takes precedent of all other selection criteria.
Every motif is returned. If 'all_versions' is also
specified, all versions of every motif are returned,
otherwise just the latest version of every motif is
returned.
matrix_id - Takes precedence over all other selection criteria
except 'all'. Only motifs with the given JASPAR
matrix ID(s) are returned. A matrix ID may be
specified as just a base ID or full JASPAR IDs
including version number. If only a base ID is
provided for specific motif(s), then just the latest
version of those motif(s) are returned unless
'all_versions' is also specified.
collection - Only motifs from the specified JASPAR collection(s)
are returned. NOTE - if not specified, the collection
defaults to CORE for all other selection criteria
except 'all' and 'matrix_id'. To apply the other
selection criteria across all JASPAR collections,
explicitly set collection=None.
tf_name - Only motifs with the given name(s) are returned.
tf_class - Only motifs of the given TF class(es) are returned.
tf_family - Only motifs from the given TF families are returned.
tax_group - Only motifs belonging to the given taxonomic
supergroups are returned (e.g. 'vertebrates',
'insects', 'nematodes' etc.)
species - Only motifs derived from the given species are
returned. Species are specified as taxonomy IDs.
data_type - Only motifs generated with the given data type (e.g.
('ChIP-seq', 'PBM', 'SELEX' etc.) are returned.
NOTE - must match exactly as stored in the database.
pazar_id - Only motifs with the given PAZAR TF ID are returned.
medline - Only motifs with the given medline (PubmMed IDs) are
returned.
min_ic - Only motifs whose profile matrices have at least this
information content (specificty) are returned.
min_length - Only motifs whose profiles are of at least this
length are returned.
min_sites - Only motifs compiled from at least these many binding
sites are returned.
all_versions- Unless specified, just the latest version of motifs
determined by the other selection criteria are
returned. Otherwise all versions of the selected
motifs are returned.
Returns:
- A Bio.motifs.jaspar.Record (list) of motifs.
"""
# Fetch the internal IDs of the motifs using the criteria provided
int_ids = self._fetch_internal_id_list(
collection=collection,
tf_name=tf_name,
tf_class=tf_class,
tf_family=tf_family,
matrix_id=matrix_id,
tax_group=tax_group,
species=species,
pazar_id=pazar_id,
data_type=data_type,
medline=medline,
all=all,
all_versions=all_versions
)
record = jaspar.Record()
"""
Now further filter motifs returned above based on any specified
matrix specific criteria.
"""
for int_id in int_ids:
motif = self._fetch_motif_by_internal_id(int_id)
# Filter motifs to those with matrix IC greater than min_ic
if min_ic:
if motif.pssm.mean() < min_ic:
continue
# Filter motifs to those with minimum length of min_length
if min_length:
if motif.length < min_length:
continue
# XXX We could also supply a max_length filter.
"""
Filter motifs to those composed of at least this many sites.
The perl TFBS module assumes column sums may be different but
this should be strictly enforced here we will ignore this and
just use the first column sum.
"""
if min_sites:
num_sites = sum(
[motif.counts[nt][0] for nt in motif.alphabet.letters]
)
if num_sites < min_sites:
continue
record.append(motif)
return record
def _fetch_latest_version(self, base_id):
"""
Get the latest version number for the given base_id,
"""
cur = self.dbh.cursor()
cur.execute("""select VERSION from MATRIX where BASE_id = %s
order by VERSION desc limit 1""", (base_id,))
row = cur.fetchone()
latest = None
if row:
latest = row[0]
else:
warnings.warn("Failed to fetch latest version number for JASPAR "
"motif with base ID '{0}'. "
"No JASPAR motif with this base ID appears to exist "
"in the database.".format(base_id), BiopythonWarning)
return latest
def _fetch_internal_id(self, base_id, version):
"""
Fetch the internal id for a base id + version. Also checks if this
combo exists or not
"""
cur = self.dbh.cursor()
cur.execute("""select id from MATRIX where BASE_id = %s
and VERSION = %s""", (base_id, version))
row = cur.fetchone()
int_id = None
if row:
int_id = row[0]
else:
warnings.warn("Failed to fetch internal database ID for JASPAR "
"motif with matrix ID '{0}.{1}'. "
"No JASPAR motif with this matrix ID appears to "
"exist.".format(base_id, version), BiopythonWarning)
return int_id
def _fetch_motif_by_internal_id(self, int_id):
# fetch basic motif information
cur = self.dbh.cursor()
cur.execute("""select BASE_ID, VERSION, COLLECTION, NAME from MATRIX
where id = %s""", (int_id,))
row = cur.fetchone()
# This should never happen as it is an internal method. If it does
# we should probably raise an exception
if not row:
warnings.warn("Could not fetch JASPAR motif with internal "
"ID = {0}".format(int_id), BiopythonWarning)
return None
base_id = row[0]
version = row[1]
collection = row[2]
name = row[3]
matrix_id = "".join([base_id, '.', str(version)])
# fetch the counts matrix
counts = self._fetch_counts_matrix(int_id)
# Create new JASPAR motif
motif = jaspar.Motif(
matrix_id, name, collection=collection, counts=counts
)
# fetch species
cur.execute("""select TAX_ID from MATRIX_SPECIES
where id = %s""", (int_id,))
tax_ids = []
rows = cur.fetchall()
for row in rows:
tax_ids.append(row[0])
# Many JASPAR motifs (especially those not in the CORE collection)
# do not have taxonomy IDs. So this warning would get annoying.
# if not tax_ids:
# warnings.warn("Could not fetch any taxonomy IDs for JASPAR motif"
# " {0}".format(motif.matrix_id), BiopythonWarning)
motif.species = tax_ids
# fetch protein accession numbers
cur.execute("select ACC FROM MATRIX_PROTEIN where id = %s", (int_id,))
accs = []
rows = cur.fetchall()
for row in rows:
accs.append(row[0])
# Similarly as for taxonomy IDs, it would get annoying to print
# warnings for JASPAR motifs which do not have accession numbers.
motif.acc = accs
# fetch remaining annotation as tags from the ANNOTATION table
cur.execute("""select TAG, VAL from MATRIX_ANNOTATION
where id = %s""", (int_id,))
rows = cur.fetchall()
for row in rows:
attr = row[0]
val = row[1]
if attr == 'class':
motif.tf_class = val
elif attr == 'family':
motif.tf_family = val
elif attr == 'tax_group':
motif.tax_group = val
elif attr == 'type':
motif.data_type = val
elif attr == 'pazar_tf_id':
motif.pazar_id = val
elif attr == 'medline':
motif.medline = val
elif attr == 'comment':
motif.comment = val
else:
"""
TODO If we were to implement additional abitrary tags
motif.tag(attr, val)
"""
pass
return motif
def _fetch_counts_matrix(self, int_id):
"""
Fetch the counts matrix from the JASPAR DB by the internal ID
Returns a Bio.motifs.matrix.GenericPositionMatrix
"""
counts = {}
cur = self.dbh.cursor()
for base in dna.letters:
base_counts = []
cur.execute("""select val from MATRIX_DATA where ID = %s
and row = %s order by col""", (int_id, base))
rows = cur.fetchall()
for row in rows:
base_counts.append(row[0])
counts[base] = [float(x) for x in base_counts]
return matrix.GenericPositionMatrix(dna, counts)
def _fetch_internal_id_list(
self, collection=JASPAR_DFLT_COLLECTION, tf_name=None, tf_class=None,
tf_family=None, matrix_id=None, tax_group=None, species=None,
pazar_id=None, data_type=None, medline=None, all=False,
all_versions=False
):
"""
Fetch a list of internal JASPAR motif IDs based on various passed
parameters which may then be used to fetch the rest of the motif data.
Caller:
fetch_motifs()
Arguments:
See arguments sections of fetch_motifs()
Returns:
A list of internal JASPAR motif IDs which match the given
selection criteria arguments.
Build an SQL query based on the selection arguments provided.
1: First add table joins and sub-clauses for criteria corresponding to
named fields from the MATRIX and MATRIX_SPECIES tables such as
collection, matrix ID, name, species etc.
2: Then add joins/sub-clauses for tag/value parameters from the
MATRIX_ANNOTATION table.
For the surviving matrices, the responsibility to do matrix-based
feature filtering such as ic, number of sites etc, fall on the
calling fetch_motifs() method.
"""
int_ids = []
cur = self.dbh.cursor()
"""
Special case 1: fetch ALL motifs. Highest priority.
Ignore all other selection arguments.
"""
if all:
cur.execute("select ID from MATRIX")
rows = cur.fetchall()
for row in rows:
int_ids.append(row[0])
return int_ids
"""
Special case 2: fetch specific motifs by their JASPAR IDs. This
has higher priority than any other except the above 'all' case.
Ignore all other selection arguments.
"""
if matrix_id:
"""
These might be either stable IDs or stable_ID.version.
If just stable ID and if all_versions == 1, return all versions,
otherwise just the latest
"""
if all_versions:
for id in matrix_id:
# ignore vesion here, this is a stupidity filter
(base_id, version) = jaspar.split_jaspar_id(id)
cur.execute(
"select ID from MATRIX where BASE_ID = %s", (base_id,)
)
rows = cur.fetchall()
for row in rows:
int_ids.append(row[0])
else:
# only the lastest version, or the requested version
for id in matrix_id:
(base_id, version) = jaspar.split_jaspar_id(id)
if not version:
version = self._fetch_latest_version(base_id)
int_id = None
if version:
int_id = self._fetch_internal_id(base_id, version)
if int_id:
int_ids.append(int_id)
return int_ids
tables = ["MATRIX m"]
where_clauses = []
# Select by MATRIX.COLLECTION
if collection:
if isinstance(collection, list):
# Multiple collections passed in as a list
clause = "m.COLLECTION in ('"
clause = "".join([clause, "','".join(collection)])
clause = "".join([clause, "')"])
else:
# A single collection - typical usage
clause = "m.COLLECTION = '%s'" % collection
where_clauses.append(clause)
# Select by MATRIX.NAME
if tf_name:
if isinstance(tf_name, list):
# Multiple names passed in as a list
clause = "m.NAME in ('"
clause = "".join([clause, "','".join(tf_name)])
clause = "".join([clause, "')"])
else:
# A single name
clause = "m.NAME = '%s'" % tf_name
where_clauses.append(clause)
# Select by MATRIX_SPECIES.TAX_ID
if species:
tables.append("MATRIX_SPECIES ms")
where_clauses.append("m.ID = ms.ID")
"""
NOTE: species are numeric taxonomy IDs but stored as varchars
in the DB.
"""
if isinstance(species, list):
# Multiple tax IDs passed in as a list
clause = "ms.TAX_ID in ('"
clause = "".join([clause, "','".join(str(s) for s in species)])
clause = "".join([clause, "')"])
else:
# A single tax ID
clause = "ms.TAX_ID = '%s'" % str(species)
where_clauses.append(clause)
"""
Tag based selection from MATRIX_ANNOTATION
Differs from perl TFBS module in that the matrix class explicitly
has a tag attribute corresponding to the tags in the database. This
provides tremendous flexibility in adding new tags to the DB and
being able to select based on those tags with out adding new code.
In the JASPAR Motif class we have elected to use specific attributes
for the most commonly used tags and here correspondingly only allow
selection on these attributes.
The attributes corresponding to the tags for which selection is
provided are:
Attribute Tag
tf_class class
tf_family family
pazar_id pazar_tf_id
medline medline
data_type type
tax_group tax_group
"""
# Select by TF class(es) (MATRIX_ANNOTATION.TAG="class")
if tf_class:
tables.append("MATRIX_ANNOTATION ma1")
where_clauses.append("m.ID = ma1.ID")
clause = "ma1.TAG = 'class'"
if isinstance(tf_class, list):
# A list of TF classes
clause = "".join([clause, " and ma1.VAL in ('"])
clause = "".join([clause, "','".join(tf_class)])
clause = "".join([clause, "')"])
else:
# A single TF class
clause = "".join([clause, " and ma1.VAL = '%s' " % tf_class])
where_clauses.append(clause)
# Select by TF families (MATRIX_ANNOTATION.TAG="family")
if tf_family:
tables.append("MATRIX_ANNOTATION ma2")
where_clauses.append("m.ID = ma2.ID")
clause = "ma2.TAG = 'family'"
if isinstance(tf_family, list):
# A list of TF families
clause = "".join([clause, " and ma2.VAL in ('"])
clause = "".join([clause, "','".join(tf_family)])
clause = "".join([clause, "')"])
else:
# A single TF family
clause = "".join([clause, " and ma2.VAL = '%s' " % tf_family])
where_clauses.append(clause)
# Select by PAZAR TF ID(s) (MATRIX_ANNOTATION.TAG="pazar_tf_id")
if pazar_id:
tables.append("MATRIX_ANNOTATION ma3")
where_clauses.append("m.ID = ma3.ID")
clause = "ma3.TAG = 'pazar_tf_id'"
if isinstance(pazar_id, list):
# A list of PAZAR IDs
clause = "".join([clause, " and ma3.VAL in ('"])
clause = "".join([clause, "','".join(pazar_id)])
clause = "".join([clause, "')"])
else:
# A single PAZAR ID
clause = "".join([" and ma3.VAL = '%s' " % pazar_id])
where_clauses.append(clause)
# Select by PubMed ID(s) (MATRIX_ANNOTATION.TAG="medline")
if medline:
tables.append("MATRIX_ANNOTATION ma4")
where_clauses.append("m.ID = ma4.ID")
clause = "ma4.TAG = 'medline'"
if isinstance(medline, list):
# A list of PubMed IDs
clause = "".join([clause, " and ma4.VAL in ('"])
clause = "".join([clause, "','".join(medline)])
clause = "".join([clause, "')"])
else:
# A single PubMed ID
clause = "".join([" and ma4.VAL = '%s' " % medline])
where_clauses.append(clause)
# Select by data type(s) used to compile the matrix
# (MATRIX_ANNOTATION.TAG="type")
if data_type:
tables.append("MATRIX_ANNOTATION ma5")
where_clauses.append("m.ID = ma5.ID")
clause = "ma5.TAG = 'type'"
if isinstance(data_type, list):
# A list of data types
clause = "".join([clause, " and ma5.VAL in ('"])
clause = "".join([clause, "','".join(data_type)])
clause = "".join([clause, "')"])
else:
# A single data type
clause = "".join([" and ma5.VAL = '%s' " % data_type])
where_clauses.append(clause)
# Select by taxonomic supergroup(s) (MATRIX_ANNOTATION.TAG="tax_group")
if tax_group:
tables.append("MATRIX_ANNOTATION ma6")
where_clauses.append("m.ID = ma6.ID")
clause = "ma6.TAG = 'tax_group'"
if isinstance(tax_group, list):
# A list of tax IDs
clause = "".join([clause, " and ma6.VAL in ('"])
clause = "".join([clause, "','".join(tax_group)])
clause = "".join([clause, "')"])
else:
# A single tax ID
clause = "".join([clause, " and ma6.VAL = '%s' " % tax_group])
where_clauses.append(clause)
sql = "".join(["select distinct(m.ID) from ", ", ".join(tables)])
if where_clauses:
sql = "".join([sql, " where ", " and ".join(where_clauses)])
# print "sql = %s" % sql
cur.execute(sql)
rows = cur.fetchall()
for row in rows:
id = row[0]
if all_versions:
int_ids.append(id)
else:
# is the latest version?
if self._is_latest_version(id):
int_ids.append(id)
if len(int_ids) < 1:
warnings.warn("Zero motifs returned with current select critera",
BiopythonWarning)
return int_ids
def _is_latest_version(self, int_id):
"""
Does this internal ID represent the latest version of the JASPAR
matrix (collapse on base ids)
"""
cur = self.dbh.cursor()
cur.execute("select count(*) from MATRIX where "
"BASE_ID = (select BASE_ID from MATRIX where ID = %s) "
"and VERSION > (select VERSION from MATRIX where ID = %s)",
(int_id, int_id))
row = cur.fetchone()
count = row[0]
if count == 0:
# no matrices with higher version ID and same base id
return True
return False
|
|
from __future__ import division, absolute_import, print_function, unicode_literals
import asyncore
import socket
import struct
import threading
import sys
from collections import deque
DEBUG = False
PY3 = False
if sys.version_info[0] == 3:
import queue as Queue
PY3 = True
else:
import Queue
range = xrange
class Client():
CONNECT = 1
BIND = 2
UDP_ASSOCIATE = 3
def __init__(self):
self.recvbuf = Queue.Queue()
self.writebuf = Queue.Queue()
# Hard coding this is bad and I feel bad
self._conns = [deque(range(1, 2048))]
self._conns.extend([None]*2048)
def recv(self):
raise NotImplementedError
def write(self):
raise NotImplementedError
def start(self):
t = threading.Thread(target=self.write)
t.daemon = True
t.start()
t = threading.Thread(target=self._dataparse)
t.daemon = True
t.start()
t = threading.Thread(target=self.recv)
t.daemon = True
t.start()
return t
def new_conn(self, cmd, addr, port, s):
id = self._conns[0].pop()
if DEBUG:
print('Client new conn:', id, cmd, addr, port)
s.settimeout(10)
self._conns[id] = s
if PY3:
addr = bytes(addr, 'utf8')
else:
addr = str(addr)
msg = struct.pack('<HBH'+str(len(addr))+'sB', id, cmd, port, addr, 0x00)
self.writebuf.put(msg)
t = threading.Thread(target=self._recv_loop, args=(id,))
t.daemon = True
t.start()
def _recv_loop(self, id):
while self._conns[id] is not None:
try:
data = self._conns[id].recv(65535)
size = len(data)
if data == b'':
raise socket.error
else:
if self._conns[id] is not None:
if DEBUG:
print('Client c->s:', id, size)
self.writebuf.put(struct.pack('<HH', id, size) + data)
except socket.timeout:
pass
except socket.error:
self._close_id(id)
def _close_id(self, id):
if DEBUG:
print('Client closing id', id)
if self._conns[id] is not None:
self._conns[id].close()
self._conns[id] = None
resp = struct.pack('<HH', 0x00, id)
self.writebuf.put(resp)
self._conns[0].appendleft(id)
def _dataparse(self, get=True):
data = b''
needmore = False
while True:
if not data or needmore:
data += self.recvbuf.get()
needmore = False
else:
try:
data += self.recvbuf.get_nowait()
except:
pass
# Make sure we at least have the header
if len(data) < 4:
needmore = True
continue
id, = struct.unpack('<H', data[:2])
# ID 0 is to close a con
if id == 0:
id, = struct.unpack('<H', data[2:4])
if self._id_check(id):
self._close_id(id)
data = data[4:]
# If we dont have that conn ID, tell the server its closed
elif not self._id_check(id):
resp = struct.pack('<HH', 0x00, id)
size, = struct.unpack('<H', data[2:4])
if DEBUG:
print('Client invalid id request', id)
self.writebuf.put(resp)
# TODO: Need to add support for if size>msg
data = data[4+size:]
# Else write the data
else:
tosend = False
size, = struct.unpack('<H', data[2:4])
datasize = len(data[4:])
if DEBUG:
print('Client s->c:', id, size, datasize)
if datasize == size:
tosend = data[4:]
data = b''
elif datasize > size:
tosend = data[4:size+4]
data = data[size+4:]
elif datasize < size:
needmore = True
if tosend:
try:
if DEBUG:
print('Client c->out:', id, len(tosend))
self._conns[id].sendall(tosend)
except:
self._close_id(id)
def _id_check(self, id):
# TODO: Make this better
try:
return self._conns[id] is not None
except:
return False
class SocksHandler():
def __init__(self):
pass
def new_request(self, sock, addr, client):
# Client sends version and methods
sock.setblocking(True)
data = sock.recv(1)
if not data:
return None
ver, = struct.unpack('!B', data)
if DEBUG:
print('Version:', ver)
if ver == 4:
ret = self._socks4_init(sock, client)
elif ver == 5:
ret = self._socks5_init(sock, client)
else:
if DEBUG:
print('ERROR: Invalid socks version')
sock.close()
if not ret:
return None
def _socks4_init(self, sock, client):
cmd, dstport, a, b, c ,d = struct.unpack('!BHBBBB', sock.recv(7))
userid = ''
data = struct.unpack('!B', sock.recv(1))
while data[0] != 0:
userid += chr(data[0])
data = struct.unpack('!B', sock.recv(1))
dstaddr = ''
# sock4a
if a + b + c == 0 and d > 0:
data = struct.unpack('!B', sock.recv(1))
while data[0] != 0:
dstaddr += chr(data[0])
data = struct.unpack('!B', sock.recv(1))
# normal socks4
else:
dstaddr = "{}.{}.{}.{}".format(a, b, c, d)
ret = client.new_conn(cmd, dstaddr, dstport, sock)
sock.sendall(struct.pack('!BBHI', 0x00, 0x5A, 0x0000, 0x00000000))
return ret
def _socks5_init(self, sock, client):
# Get list of auth methods
methods, = struct.unpack('!B', sock.recv(1))
mlist = []
for i in range(0, methods):
test = sock.recv(1)
val, = struct.unpack('!B', test)
mlist.append(val)
# Always use no auth
if 0 in mlist:
sock.send(struct.pack('!BB', 0x05, 0x00))
else:
print('No valid auth method', mlist)
sock.send(struct.pack('!BB', 0x05, 0xFF))
sock.close()
# Get the request
ver, cmd, rsv, atyp = struct.unpack('!BBBB', sock.recv(4))
dstaddr = None
dstport = None
if atyp == 1:
a, b, c, d, dstport = struct.unpack('!BBBBH', sock.recv(6))
dstaddr = "{}.{}.{}.{}".format(a, b, c, d)
elif atyp == 3:
size, = struct.unpack('!B', sock.recv(1))
dstaddr = sock.recv(size)
if type(dstaddr) == bytes:
dstaddr = dstaddr.decode('utf8')
dstport, = struct.unpack('!H', sock.recv(2))
#TODO: ipv6 addr support
#elif atyp = 4:
else:
print('Unknown address type', atyp)
sock.send(struct.pack('!BB', 0x05, 0xFF))
sock.close()
ret = client.new_conn(cmd, dstaddr, dstport, sock)
sock.sendall(struct.pack('!BBBBHI', 0x05, 0x00, 0x00, 0x01, 0x00000000, 0x0000))
return ret
class OneToOneHandler():
def __init__(self, addr, port):
self.addr = addr
self.port = port
def new_request(self, sock, addr, client):
ret = client.new_conn(1, self.addr, self.port, sock)
return ret
class Listener(asyncore.dispatcher):
host = '127.0.0.1'
port = 1080
handler = SocksHandler()
def __init__(self, client, host=None, port=None, handler=None):
asyncore.dispatcher.__init__(self)
if host is not None:
self.host = host
if port is not None:
self.port = port
if handler is not None:
self.handler = handler
self.client = client
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((self.host, self.port))
self.listen(5)
def handle_accept(self):
pair = self.accept()
if pair is not None:
sock, addr = pair
handle = self.handler.new_request(sock, addr, self.client)
def wait(self):
asyncore.loop()
|
|
# transform() function is where you should start reading to understand this code.
# transform() has to be called at the bottom of this file for things to work.
from sys import *
import re
from importlib import import_module
import importlib.util
# functions:
def transform():
file_name = argv[1] # so you can use this Terminal command: python transformer.py text.txt
text = get_text(file_name)
text = text.lower() # lowercase
text = remove_multi_spaces(text)
sentences = get_sentences(text)
clear_code_file(code_file_name)
compile_code(sentences)
run_code()
def get_text(file_name):
text = open(file_name, 'r').read() # for example: file_name = "text.txt"
return text
def remove_multi_spaces(text):
# just doing ' '.join(text.split()) would not enable use of terse mode (which uses '\n')
newtext = ''
for line in text.split('\n'):
line = ' '.join(line.split())
newtext += line + '\n'
return newtext.strip() # remove trailing line(s)
def get_sentences(text):
# normally, each sentence is expected to begin with "please "
split_by_word = 'please '
# but you can turn on "terse mode" to use newline characters per line of code if your interface enables it
terse_mode_on = check_terse_mode(text)
if terse_mode_on:
split_by_word = '\n'
# split into sentences by "please " or by "\n"
sentences = text.split(split_by_word)[1:] # assume index [0] is always empty or invalid before the first "please " or "\n"
return sentences
def check_terse_mode(text):
"""
example:
please no need to say please
print this works
print no need to say please before each line
"""
terse_mode_on = False
checkphrases = ['please no need to say please',
'please use enter mode',
'please use short mode']
if any(text.startswith(checkphrase) for checkphrase in checkphrases):
terse_mode_on = True
return terse_mode_on
def clear_code_file(filename):
open(filename, 'w').close()
def compile_code(sentences):
global nested_blocks_ignore
i = 0
for sentence in sentences: # use i to access sentence indices for go-to locations
sentence = sentence.strip() # remove '\n' and leading/trailing spaces
sentence = modify_sentence(sentence)
with open(code_file_name, 'a') as f:
f.write(sentence+'\n')
def modify_sentence(sentence):
# note that order matters for replacing things in the sentence
recognized = False
if sentence == '': # blank/new line
return sentence
sentence = check_spell(sentence)
[sentence, is_note] = check_note(sentence)
if is_note:
# don't bother checking the rest
return sentence
[sentence, is_print] = check_print(sentence)
if is_print:
# don't bother checking the rest
return sentence
[sentence, is_import] = check_import(sentence)
if is_import:
# don't bother checking the rest
return sentence
[sentence, is_variable] = check_variable(sentence)
if is_variable:
recognized = True
[sentence, is_math] = check_math(sentence)
if is_math:
recognized = True
[sentence, is_list] = check_list(sentence) # this can rely on math replacing integers
if is_list:
recognized = True
[sentence, is_dictionary] = check_dictionary(sentence)
if is_dictionary:
recognized = True
[sentence, is_use] = check_use(sentence)
if is_use:
recognized = True
[sentence, is_assign] = check_assign(sentence)
if is_assign:
recognized = True
[sentence, is_if] = check_if(sentence)
if is_if:
recognized = True
[sentence, is_for] = check_for(sentence)
if is_for:
recognized = True
[sentence, is_class] = check_class(sentence)
if is_class:
recognized = True
[sentence, is_function] = check_function(sentence)
if is_function:
recognized = True
# treat with suspicion if nothing was recognized in the sentence
if not recognized:
raise Exception('DID NOT RECOGNIZE COMMAND: ' + sentence)
else:
return sentence
def run_code():
code_file_name_without_py_extension = code_file_name[:-3]
importlib.import_module(code_file_name_without_py_extension)
"""
example:
please spell with the first letters of Neptune unicorn moose panda Yoda
"""
def check_spell(sentence):
# find matches in sentence:
for phrase_start in spell_checkphrases:
for phrase_stop in spell_finish_words:
checkphrase = phrase_start + ' (.+)' + phrase_stop
matches = re.search(checkphrase, sentence)
if matches:
words_to_spell_with = matches.group(1) # this is substring found inside '(.+)'
spelt_word = spell_with_first_letters(checkphrase, words_to_spell_with)
print_debug('SPELL: spelt_word=' + spelt_word)
phrase_to_replace = phrase_start + ' ' + words_to_spell_with
sentence = sentence.replace(phrase_to_replace, spelt_word + ' ').strip()
return sentence
def spell_with_first_letters(checkphrase, sentence):
local_sent = sentence.replace(checkphrase, '')
words = local_sent.split()
spelt_word = ''.join(list(word[0] for word in words))
return spelt_word
"""
example:
please note this is a comment
"""
def check_note(sentence):
word = 'note' + ' '
word_len = len(word)
if sentence.startswith(word):
sentence = '\t'*num_indents + '# ' + sentence[word_len:]
return [sentence, True]
else:
return [sentence, False]
"""
example:
please print this string of words
"""
def check_print(sentence):
matches_print = re.match('print (.+)', sentence)
if matches_print:
string = matches_print.group(1)
string = replace_index_of_variable_in_print(string) # do this before replace variable to check more restrictive match first
string = '"' + replace_variables_in_print(string) + '"' # enables replacing string '...variable <...> ...' with '...' + str(<..>) + '...'
string = remove_empty_start_end(string)
sentence = '\t'*num_indents + 'print(' + string + ')'
return [sentence, True]
elif sentence == 'print':
sentence = '\t'*num_indents + 'print()'
return [sentence, True]
else:
return [sentence, False]
def replace_index_of_variable_in_print(string):
# add spaces to make it easier to cover all cases (only, start, mid, end) in single search regexes
string = ' ' + string + ' '
indexes_found = re.findall(' index (.+) of (.+) ', string)
for index_found in indexes_found:
index_string = index_found[0]
index_value = str(index_string)
if index_string in math_words_numbers:
index_value = str(math_words_numbers[index_string]) # start index at zero (so don't add +1 here)
elif is_digit(index_string):
index_value = str(int(index_string)) # start index at zero (so don't add +1 here)
elif index_string.startswith('variable '):
index_value = index_value.replace('variable ', '')
variable_name = index_found[1].replace('variable ', '')
update_variable_names_list(variable_name)
variable_name_replacer = index_found[1].replace(' ','_') # variable names can't have spaces
replace_over = 'index ' + index_string + ' of ' + variable_name
replace_with = ' ' + '" + ' + variable_name_replacer + '[' + index_value + ']' + ' + "'
string = string.replace(replace_over, replace_with)
return string
def replace_variables_in_print(string):
# add spaces to make it easier to cover all cases (only, start, mid, end) in single search regexes
string = ' ' + string + ' '
if 'variable ' in string:
for variable_name in variable_names:
variable_name_spaced = variable_name.replace('_',' ')
if variable_name_spaced in string:
replace_over = ' variable ' + variable_name_spaced
replace_with = ' ' + '" + str(' + variable_name + ') + "'
# note: add an initial space to replace_with so that words between variables get spaces between them
string = string.replace(replace_over, replace_with)
variables_found = re.findall('variable (.+?) ', string) # .findall() = get ALL non-overlapping matches
for variable_found in variables_found:
replace_over = ' variable ' + variable_found
replace_with = ' ' + '" + str(' + variable_found + ') + "'
# note: add an initial space to replace_with so that words between variables get spaces between them
string = string.replace(replace_over, replace_with)
return string.strip()
def remove_empty_start_end(string):
false_start = '"" + '
false_end = ' + ""'
string = string.replace(false_start, '').replace(false_end, '')
return string
"""
example:
please import alternate
please import test from library
please import numpy as nectarine pony
"""
def check_import(sentence):
# order matters; start with most restrictive first
if not sentence.startswith('import '):
return [sentence, False]
matches_as_from = re.match('import (.+) as (.+) from (.+)', sentence)
if matches_as_from:
import_name = matches_as_from.group(1)
import_as = matches_as_from.group(2).replace(' ','_') # import names can't have spaces
import_from = matches_as_from.group(3)
sentence = '\t'*num_indents + 'from ' + import_from + ' import ' + import_name + ' as ' + import_as
return [sentence, True]
matches_as = re.match('import (.+) as (.+)', sentence)
if matches_as:
import_name = matches_as.group(1)
import_as = matches_as.group(2).replace(' ','_') # import names can't have spaces
sentence = '\t'*num_indents + 'import ' + import_name + ' as ' + import_as
return [sentence, True]
matches_from = re.match('import (.+) from (.+)', sentence)
if matches_from:
import_name = matches_from.group(1)
import_from = matches_from.group(2)
sentence = '\t'*num_indents + 'from ' + import_from + ' import ' + import_name
return [sentence, True]
matches_name = re.match('import (.+)', sentence)
if matches_name:
import_name = matches_name.group(1)
sentence = '\t'*num_indents + 'import ' + import_name
return [sentence, True]
# just in case
return [sentence, False]
"""
example:
please create variable apple
please variable banana
please print you assigned variable apple to apple
"""
def check_variable(sentence):
has_variable = 'variable ' in sentence
if not has_variable:
return [sentence, False]
else:
# order matters; start with most restrictive first
matches_variable_index = re.search(' index (.+) of variable (.+)', sentence)
if matches_variable_index:
variable_name = matches_variable_index.group(2).replace(' ','_') # variable names can't have spaces
update_variable_names_list(variable_name)
variable_index = matches_variable_index.group(1)
replace_over = ' index ' + variable_index + ' of variable ' + variable_name
replace_with = variable_name + '[' + variable_index + ']'
sentence = sentence.replace(replace_over, replace_with)
return [sentence, True]
matches_variable_only = re.match('create variable (.+)', sentence)
if matches_variable_only:
variable_name = matches_variable_only.group(1).replace(' ','_') # variable names can't have spaces
update_variable_names_list(variable_name)
sentence = '\t'*num_indents + variable_name + ' = None'
return [sentence, True]
matches_variable_only = re.match('variable (.+)', sentence)
if matches_variable_only:
variable_name = matches_variable_only.group(1).replace(' ','_') # variable names can't have spaces (use underscores to avoid name collisions)
update_variable_names_list(variable_name)
sentence = '\t'*num_indents + variable_name + ' = None'
return [sentence, True]
matches_variable_only = re.search('variable (.+)', sentence)
if matches_variable_only:
variable_name = matches_variable_only.group(1).replace(' ','_') # variable names can't have spaces
update_variable_names_list(variable_name)
replace_over = ' variable ' + variable_name
replace_with = ' ' + variable_name
sentence = sentence.replace(replace_over, replace_with)
return [sentence, True]
# just in case
return [sentence, False]
"""
example:
please one plus two
"""
def check_math(sentence):
recognized = False
words = sentence.split()
math_expression = ''
replace_expression = ''
# need to find math expressions word-by-word (since typically embedded in sentences like assign...to...)
for i, word in enumerate(words):
if word in math_words_numbers:
sentence = sentence.replace(word, str(math_words_numbers[word]))
recognized = True
elif word in math_words_boolean:
sentence = sentence.replace(word, str(math_words_boolean[word]))
recognized = True
elif word in math_words_operators:
replace_over = word
if word == 'negative': # "- 1" --> "-1" for check_list(sentence) to work
replace_over = 'negative '
sentence = sentence.replace(replace_over, math_words_operators[word])
# no str() because already string, just need to add to expression
recognized = True
return [sentence, recognized]
def is_digit(string):
# built-in isdigit() doesn't work with negative numbers
try:
int(string)
return True
except:
return False
"""
example:
please assign dragon fruit the value of list starting from eight ending at twelve
please assign variable crazy list the value list of one and two and tree bark
"""
def check_list(sentence):
# check if ordered list of items from int to int
matches_list_ordered = re.search(' list starting from (.+) ending at (.+)', sentence)
if matches_list_ordered:
list_start = matches_list_ordered.group(1).replace(' ','') # 1 00 should become 100
list_stop = matches_list_ordered.group(2).replace(' ','') # 2 00 should become 200
if int(list_stop)-int(list_start) < 10:
# list of 10 or less integers? just show each individual item
ordered_list_items = list(range(int(list_start), int(list_stop) + 1)) # + 1 so that the number spoken actually appears in the list
ordered_list_items = create_list_string(ordered_list_items)
else:
# list of more than 10 integers? show as a list(range(..., ...))
ordered_list_items = 'list(range(' + list_start + ',' + str(int(list_stop)+1) + '))'
replace_over = matches_list_ordered.group()
replace_with = ' ' + ordered_list_items
sentence = sentence.replace(replace_over, replace_with)
return [sentence, True]
# check if unordered list of items separated by ' and '
matches_list_unordered = re.search(' list of (.+)', sentence)
if matches_list_unordered:
string_of_list_items = matches_list_unordered.group(1)
unordered_list_items = string_of_list_items.split(' and ') # items separated by ' and '
unordered_list_items = create_list_string(unordered_list_items)
replace_over = matches_list_unordered.group()
replace_with = ' ' + unordered_list_items
sentence = sentence.replace(replace_over, replace_with)
return [sentence, True]
# just in case
return [sentence, False]
"""
example:
please assign my dictionary the value dictionary key one value apple
please assign my dictionary the value dictionary key one value apple key two value banana
"""
def check_dictionary(sentence):
matches_dictionary = re.search(' (dictionary( key .+ value .+)+)', sentence)
if matches_dictionary:
pairs = matches_dictionary.group(2).split(' key ') # returns ['', '<keyval> value <value>', ...]
pairs = list(filter(None,pairs)) # filter(None,...) is shorthand for filter(lambda x:x, ...)
replace_with = []
for pair in pairs:
key = pair.split(' value ')[0]
val = pair.split(' value ')[1]
replace_with.append('\'' + key + '\'' + ':' + '\'' + val + '\'')
replace_with = '{ ' + ', '.join(replace_with) + ' }'
replace_over = matches_dictionary.group(1)
sentence = sentence.replace(replace_over, replace_with)
return [sentence, True]
# just in case
return [sentence, False]
def create_list_string(list_items):
# note: spaces between '[ ', ' ]', and ' , ' because need to identify list items as numbers/strings
list_string = '[ '
for item in list_items:
if is_digit(item):
list_string += str(item)
elif item in math_words_numbers:
list_string += str(math_words_numbers[item])
elif item in math_words_boolean:
list_string += str(math_words_boolean[item])
elif ' ' in str(item) and all((is_digit(word) or word in math_words_numbers or word in math_words_operators or word in math_words_boolean) for word in item.split()):
# need this condition to account for things like negative numbers in (un)ordered lists
# if composed of multiple words that are all math words
for word in item.split():
if is_digit(word):
list_string += str(word)
elif word in math_words_numbers:
list_string += str(math_words_numbers[word])
elif word in math_words_boolean:
list_string += str(math_words_boolean[word])
elif word in math_words_operators: # use this because could contain minus/plus/etc.
list_string += str(math_words_operators[word])
else:
list_string += '\'' + str(item) + '\''
list_string += ' , '
list_string = list_string[:-3] # remove last comma and space
list_string += ' ]'
return list_string
"""
example 1:
please use test_function of test
please use test_function from test
"""
"""
example 2:
please define function test with item
please print variable item
please end function
please assign other the value it works
please use function test on variable other
"""
def check_use(sentence): # TODO: make into one regex
# order matters; start with most restrictive first
matches_from_and_input = re.match('assign (.+) (use|using) (.+) (from|of) (.+) (on|with) (.+)', sentence)
if matches_from_and_input:
function_output = matches_from_and_input.group(1).replace('variable ', '')
function_name = matches_from_and_input.group(3).replace('function ', '').replace(' ','_')
function_from = matches_from_and_input.group(5).replace(' ','_')
function_input = matches_from_and_input.group(7).replace('variable ', '')
replace_over = matches_from_and_input.group()
# check assignment
function_output = re.match('(to )?(.+) the value (of )?', function_output)
if function_output:
function_output = function_output.group(2).replace(' ','_') + ' = '
replace_with = '\t'*num_indents + function_output + function_from + '.' + function_name + '(' + function_input + ')'
sentence = sentence.replace(replace_over, replace_with)
recognized = True
return [sentence, recognized]
matches_from_and_input = re.match('assign (.+) (use|using) (.+) (from|of) (.+)', sentence)
if matches_from_and_input:
function_output = matches_from_and_input.group(1).replace('variable ', '')
function_name = matches_from_and_input.group(3).replace('function ', '').replace(' ','_')
function_from = matches_from_and_input.group(5).replace(' ','_')
replace_over = matches_from_and_input.group()
# check assignment
function_output = re.match('(to )?(.+) the value (of )?', function_output)
if function_output:
function_output = function_output.group(2).replace(' ','_') + ' = '
replace_with = '\t'*num_indents + function_output + function_from + '.' + function_name + '()'
sentence = sentence.replace(replace_over, replace_with)
recognized = True
return [sentence, recognized]
matches_from_and_input = re.match('assign (.+) (use|using) (.+) (on|with) (.+)', sentence)
if matches_from_and_input:
function_output = matches_from_and_input.group(1).replace('variable ', '')
function_name = matches_from_and_input.group(3).replace('function ', '').replace(' ','_')
function_input = matches_from_and_input.group(5).replace('variable ', '')
replace_over = matches_from_and_input.group()
# check assignment
function_output = re.match('(to )?(.+) the value (of )?', function_output)
if function_output:
function_output = function_output.group(2).replace(' ','_') + ' = '
replace_with = '\t'*num_indents + function_output + function_name + '(' + function_input + ')'
sentence = sentence.replace(replace_over, replace_with)
recognized = True
return [sentence, recognized]
matches_from_and_input = re.search('(use|using) (.+) (from|of) (.+) (on|with) (.+)', sentence)
if matches_from_and_input:
function_name = matches_from_and_input.group(2).replace('function ', '').replace(' ','_')
function_from = matches_from_and_input.group(4).replace(' ','_')
function_input = matches_from_and_input.group(6).replace('variable ', '')
replace_over = matches_from_and_input.group()
replace_with = '\t'*num_indents + function_from + '.' + function_name + '(' + function_input + ')'
sentence = sentence.replace(replace_over, replace_with)
recognized = True
return [sentence, recognized]
matches_from = re.search('(use|using) (.+) (from|of) (.+)', sentence)
if matches_from:
function_name = matches_from.group(2).replace('function ', '').replace(' ','_')
function_from = matches_from.group(4).replace(' ','_')
replace_over = matches_from.group()
replace_with = '\t'*num_indents + function_from + '.' + function_name + '()'
sentence = sentence.replace(replace_over, replace_with)
recognized = True
return [sentence, recognized]
matches_input = re.search('(use|using) (.+) (on|with) (.+)', sentence)
if matches_input:
function_name = matches_input.group(2).replace('function ', '').replace(' ','_')
function_input = matches_input.group(4).replace('variable ', '')
replace_over = matches_input.group()
replace_with = '\t'*num_indents + function_name + '(' + function_input + ')'
sentence = sentence.replace(replace_over, replace_with)
recognized = True
return [sentence, True]
matches_name = re.search('(use|using) (.+)', sentence)
if matches_name:
function_name = matches_name.group(2).replace('function ', '').replace(' ','_')
replace_over = matches_name.group()
replace_with = '\t'*num_indents + function_name + '()'
sentence = sentence.replace(replace_over, replace_with)
recognized = True
return [sentence, recognized]
# just in case
return [sentence, False]
"""
example:
please assign to variable apple the value of one
please assign to banana the value three hundred
please assign coconut the value of some words
please assign dragon fruit the value four
NOTE DEPRECATED/OBSOLTE:
please assign one to apple
"""
def check_assign(sentence):
matches_assign2 = re.match('assign (to )?(variable )?(.+) (the )+?value (of )?(.+)', sentence)
if matches_assign2:
variable_name = matches_assign2.group(3).replace(' ','_') # variable names can't have spaces
update_variable_names_list(variable_name)
variable_value = matches_assign2.group(6)
first_word_is_string = check_if_just_string(variable_value)
# if the first word is not math, then just make the whole variable value a string (otherwise leave as is)
if first_word_is_string and not variable_value.startswith('variable') and not variable_value.startswith('list'):
variable_value = '\'' + variable_value + '\'' # need to put quotation marks around strings being assigned
elif variable_value.startswith('variable'):
variable_value = variable_value.replace('variable ', '')
elif is_digit(variable_value.replace(' ','')): # TODO need better way to detect that it's not a string
variable_value = variable_value.replace(' ','') # so "3 00" becomes "300"
sentence = '\t'*num_indents + variable_name + ' = ' + variable_value
return [sentence, True]
# just in case
return [sentence, False]
def check_if_just_string(variable_value):
# get first word
first_word = variable_value.split(' ',1)[0]
# do various checks
not_variable = first_word != 'variable'
not_number = first_word not in math_words_numbers
not_boolean = first_word not in math_words_boolean
not_math_punctuation = first_word not in math_punctuation
first_few_characters_are_math = is_digit(first_word[0]) or is_digit(first_word[:2])
# put those checks together
first_word_is_string = not_variable and not_number and not_boolean and not first_few_characters_are_math and not_math_punctuation
return first_word_is_string
def update_variable_names_list(variable_name):
if variable_name not in variable_names:
variable_names.append(variable_name)
"""
example:
please if true then print this is a one line if statement
please if one equals one then
please print it works
please end if
please if one equals two then
please print it should not print this
please end if
"""
def check_if(sentence):
global num_indents
if sentence.startswith('end if') or sentence.startswith('done if'):
num_indents -= 1
sentence = '\t'*num_indents
return [sentence, True]
# escape early if does not start with 'if '
if not sentence.startswith('if '):
return [sentence, False]
# note: force 'if ' to be first word; DO NOT start regex with '.*'
matches_multiliner = re.match('if (.+) then ?$', sentence) # $ for end of sentence
matches_oneliner = re.match('if (.+) then (.+)', sentence) # space after 'then' WITHOUT $ because sentence continues
if matches_multiliner:
condition = matches_multiliner.group(1).replace('variable ', '')
sentence = '\t'*num_indents + 'if ' + condition + ':'
num_indents += 1 # affect indents for later lines, not current line
return [sentence, True]
if matches_oneliner:
condition = matches_oneliner.group(1).replace('variable ', '')
then = check_print(matches_oneliner.group(2))[0] # because check_print() only activates if 'print ' starts the string
sentence = '\t'*num_indents + 'if ' + condition + ':' + '\n' + '\t'*(num_indents+1) + then + '\n' + '\t'*num_indents
return [sentence, True]
# just in case
return [sentence, False]
"""
example:
please assign variable circle the value of list from negative one to three
please for each index in circle
please print variable index
please end for
"""
def check_for(sentence):
global num_indents
matches_for = re.match('for (.+) in (.+)', sentence)
if matches_for:
for_what = matches_for.group(1).replace('each ', '')
for_in = matches_for.group(2).replace(' ','_') # variable names can't have spaces
sentence = '\t'*num_indents + 'for ' + for_what + ' in ' + for_in + ':'
num_indents += 1 # affect indents for later lines, not current line
return [sentence, True]
if sentence.startswith('end for') or sentence.startswith('done for'):
num_indents -= 1
sentence = '\t'*num_indents
return [sentence, True]
# just in case
return [sentence, False]
def check_class(sentence):
global num_indents
matches_define_class = re.match('(define |create )(a )?class (named )?(.+)', sentence)
if matches_define_class:
class_name = matches_define_class.group(4).replace(' ','_') # class names can't have spaces
sentence = '\t'*num_indents + 'class ' + class_name + ':'
num_indents += 1 # affect indents for later lines, not current line
return [sentence, True]
matches_end_class = re.match('end class', sentence)
matches_end_class2 = re.match('done class', sentence)
if matches_end_class or matches_end_class2:
num_indents -= 1
sentence = '\t'*num_indents
return [sentence, True]
# just in case
return [sentence, False]
"""
example:
please define function test with item
please print variable item
please end function
please assign to other the value it works
please use function test on variable other
"""
def check_function(sentence):
global num_indents
matches_define_function_with_input = re.match('define function (.+) (with|using) (inputs |input )?(.+)$', sentence)
if matches_define_function_with_input:
function_name = matches_define_function_with_input.group(1).replace(' ','_') # function names can't have spaces
input_names = ','.join(matches_define_function_with_input.group(4).split(' and '))
sentence = '\t'*num_indents + 'def ' + function_name + '(' + input_names + '):'
num_indents += 1 # affect indents for later lines, not current line
return [sentence, True]
matches_define_function = re.match('define function (.+)$', sentence)
if matches_define_function:
function_name = matches_define_function.group(1).replace(' ','_') # function names can't have spaces
sentence = '\t'*num_indents + 'def ' + function_name + '():'
num_indents += 1 # affect indents for later lines, not current line
return [sentence, True]
matches_end_function = re.match('end function', sentence)
matches_end_function2 = re.match('done function', sentence)
if matches_end_function or matches_end_function2:
num_indents -= 1
sentence = '\t'*num_indents
return [sentence, True]
matches_return = re.match('return (.+)', sentence)
if matches_return:
output_value = matches_return.group(1) # will either output the literal value "...", or the value of "variable ..."
if output_value.startswith('variable '):
# print(variable_names) # TODO: some variables it has should not have been created
output_value = replace_variables_in_return(output_value)
# for variable_name in variable_names:
# if 'variable ' + variable_name.replace('_',' ') in output_value:
# output_value = output_value.replace('variable ' + variable_name.replace('_',' '), variable_name)
# output_value = output_value.replace('variable ', '') #.replace(' ','_')
output_value = check_math(output_value)[0] # will either output the literal value "...", or the value of "variable ..."
sentence = '\t'*num_indents + 'return ' + str(output_value)
return [sentence, True]
# just in case
return [sentence, False]
def replace_variables_in_return(string):
# add spaces to make it easier to cover all cases (only, start, mid, end) in single search regexes
string = ' ' + string + ' '
if 'variable ' in string:
for variable_name in variable_names:
variable_name_spaced = variable_name.replace('_',' ')
if variable_name_spaced in string:
replace_over = ' variable ' + variable_name_spaced
replace_with = variable_name
# note: add an initial space to replace_with so that words between variables get spaces between them
string = string.replace(replace_over, replace_with)
variables_found = re.findall('variable ([\w ]+) ', string) # .findall() = get ALL non-overlapping matches
for variable_found in variables_found:
replace_over = 'variable ' + variable_found
replace_with = variable_found.replace(' ', '_')
# note: add an initial space to replace_with so that words between variables get spaces between them
string = string.replace(replace_over, replace_with)
return string.strip()
def print_debug(string):
if hide_debug_printouts == False:
print(' DEBUG ' + string)
# initialize global variables:
num_indents = 0
code_file_name = 'code.py'
# track variable names
variable_names = []
# recognize words for numbers, math operations, spelling checkphases, etc.
math_words_numbers = {'zero':0,'one':1,'two':2,'three':3,'four':4,'five':5,
'six':6,'seven':7,'eight':8,'nine':9,'ten':10,
'eleven':11,'twelve':12,'thirteen':13,'fourteen':14,'fifteen':15,
'sixteen':16,'seventeen':17,'eighteen':18,'nineteen':19,
'twenty':20,'thirty':30,'forty':40,'fifty':50,
'sixty':60,'seventy':70,'eighty':80,'ninety':90,
'hundred':'00','thousand':'000','million':'000000',
'billion':'000000000','trillion':'000000000',
'0':0,'1':1,'2':2,'3':3,'4':4,'5':5,'6':6,'7':7,'8':8,'9':9}
math_words_boolean = {'true':True,'false':False}
math_words_operators = {'plus':'+','positive':'+','minus':'-','negative':'-',
'times':'*','divide':'/','divided':'/',
'equals':'==','equal':'==','over':'>','above':'>','under':'<','below':'<',
'not':'!',
'modulus':'%','modulo':'%'} # add more functions later as needed
math_punctuation = '()[]{},.:-+=/*><!%'
spell_checkphrases = ['spell with first letters of',
'spell with first letter of',
'spelled with first letters of',
'spelled with first letter of',
'spell with the first letters of',
'spell with the first letter of',
'spelled with the first letters of',
'spelled with the first letter of',
'spell using the first letters of',
'spell using the first letter of',
'spelled using the first letters of',
'spelled using the first letter of',
'spelt with the first letters of',
'spelt with the first letter of',
'spelt using the first letters of',
'spelt using the first letter of',
]
spell_finish_words = ['to', 'as', 'from', 'then', '$'] # $ for end of line for regex
# True = hide debug prints:
# False = show debug prints:
hide_debug_printouts = True
# (this if statement lets code after it only run if you're running this file directly)
if __name__ == '__main__':
print('\nPLEASE WORK...\n')
# run this interpreter:
transform()
print('\n...THANK YOU!\n')
|
|
from __future__ import absolute_import
import logging
import six
from django.core.context_processors import csrf
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, HttpResponseRedirect
from django.middleware.csrf import CsrfViewMiddleware
from django.views.generic import View
from django.views.decorators.csrf import csrf_exempt
from sudo.views import redirect_to_sudo
from sentry import roles
from sentry.api.serializers import serialize
from sentry.auth import access
from sentry.auth.superuser import is_active_superuser
from sentry.models import (
Authenticator, Organization, OrganizationMember, OrganizationStatus, Project, ProjectStatus,
Team, TeamStatus
)
from sentry.utils import auth
from sentry.utils.audit import create_audit_entry
from sentry.web.helpers import render_to_response
from sentry.web.frontend.generic import FOREVER_CACHE
logger = logging.getLogger(__name__)
audit_logger = logging.getLogger('sentry.audit.ui')
class OrganizationMixin(object):
# TODO(dcramer): move the implicit organization logic into its own class
# as it's only used in a single location and over complicates the rest of
# the code
def get_active_organization(self, request, organization_slug=None):
"""
Returns the currently active organization for the request or None
if no organization.
"""
# TODO(dcramer): this is a huge hack, and we should refactor this
# it is currently needed to handle the is_auth_required check on
# OrganizationBase
active_organization = getattr(self, '_active_org', None)
cached_active_org = (
active_organization and active_organization[0].slug == organization_slug
and active_organization[1] == request.user
)
if cached_active_org:
return active_organization[0]
active_organization = None
is_implicit = organization_slug is None
if is_implicit:
organization_slug = request.session.get('activeorg')
if organization_slug is not None:
if is_active_superuser(request):
try:
active_organization = Organization.objects.get_from_cache(
slug=organization_slug,
)
if active_organization.status != OrganizationStatus.VISIBLE:
raise Organization.DoesNotExist
except Organization.DoesNotExist:
logger.info('Active organization [%s] not found', organization_slug)
if active_organization is None:
organizations = Organization.objects.get_for_user(
user=request.user,
)
if active_organization is None and organization_slug:
try:
active_organization = six.next(
o for o in organizations if o.slug == organization_slug
)
except StopIteration:
logger.info('Active organization [%s] not found in scope', organization_slug)
if is_implicit:
del request.session['activeorg']
active_organization = None
if active_organization is None:
if not is_implicit:
return None
try:
active_organization = organizations[0]
except IndexError:
logger.info('User is not a member of any organizations')
pass
if active_organization and self._is_org_member(request.user, active_organization):
if active_organization.slug != request.session.get('activeorg'):
request.session['activeorg'] = active_organization.slug
self._active_org = (active_organization, request.user)
return active_organization
def _is_org_member(self, user, organization):
return OrganizationMember.objects.filter(
user=user,
organization=organization,
).exists()
def is_not_2fa_compliant(self, user, organization):
return organization.flags.require_2fa and not Authenticator.objects.user_has_2fa(user)
def get_active_team(self, request, organization, team_slug):
"""
Returns the currently selected team for the request or None
if no match.
"""
try:
team = Team.objects.get_from_cache(
slug=team_slug,
organization=organization,
)
except Team.DoesNotExist:
return None
if team.status != TeamStatus.VISIBLE:
return None
return team
def get_active_project(self, request, organization, project_slug):
try:
project = Project.objects.get_from_cache(
slug=project_slug,
organization=organization,
)
except Project.DoesNotExist:
return None
if project.status != ProjectStatus.VISIBLE:
return None
return project
def redirect_to_org(self, request):
from sentry import features
# TODO(dcramer): deal with case when the user cannot create orgs
organization = self.get_active_organization(request)
if organization:
url = reverse('sentry-organization-home', args=[organization.slug])
elif not features.has('organizations:create'):
return self.respond('sentry/no-organization-access.html', status=403)
else:
url = '/organizations/new/'
return HttpResponseRedirect(url)
class BaseView(View, OrganizationMixin):
auth_required = True
# TODO(dcramer): change sudo so it can be required only on POST
sudo_required = False
csrf_protect = True
def __init__(self, auth_required=None, sudo_required=None, csrf_protect=None,
*args, **kwargs):
if auth_required is not None:
self.auth_required = auth_required
if sudo_required is not None:
self.sudo_required = sudo_required
if csrf_protect is not None:
self.csrf_protect = csrf_protect
super(BaseView, self).__init__(*args, **kwargs)
@csrf_exempt
def dispatch(self, request, *args, **kwargs):
"""
A note on the CSRF protection process.
Because the CSRF decorators don't work well with view subclasses, we
allow them to control whether a CSRF check is done by setting
self.csrf_protect. This has a couple of implications:
1. We need to mark this method as @csrf_exempt so that when the CSRF
middleware checks it as part of the regular middleware sequence, it
always passes.
2. If self.csrf_protect is set, we will re-run the CSRF check ourselves
using CsrfViewMiddleware().process_view()
3. But first we must remove the csrf_exempt attribute that was set by
the decorator so that the middleware doesn't shortcut and pass the
check unconditionally again.
"""
if self.csrf_protect:
if hasattr(self.dispatch.__func__, 'csrf_exempt'):
delattr(self.dispatch.__func__, 'csrf_exempt')
response = self.test_csrf(request)
if response:
return response
if self.is_auth_required(request, *args, **kwargs):
return self.handle_auth_required(request, *args, **kwargs)
if self.is_sudo_required(request, *args, **kwargs):
return self.handle_sudo_required(request, *args, **kwargs)
args, kwargs = self.convert_args(request, *args, **kwargs)
request.access = self.get_access(request, *args, **kwargs)
if not self.has_permission(request, *args, **kwargs):
return self.handle_permission_required(request, *args, **kwargs)
if 'organization' in kwargs and self.is_not_2fa_compliant(
request.user, kwargs['organization']):
return self.handle_not_2fa_compliant(request, *args, **kwargs)
self.request = request
self.default_context = self.get_context_data(request, *args, **kwargs)
return self.handle(request, *args, **kwargs)
def test_csrf(self, request):
middleware = CsrfViewMiddleware()
return middleware.process_view(request, self.dispatch, [request], {})
def get_access(self, request, *args, **kwargs):
return access.DEFAULT
def convert_args(self, request, *args, **kwargs):
return (args, kwargs)
def handle(self, request, *args, **kwargs):
return super(BaseView, self).dispatch(request, *args, **kwargs)
def is_auth_required(self, request, *args, **kwargs):
return (
self.auth_required and not (request.user.is_authenticated() and request.user.is_active)
)
def handle_auth_required(self, request, *args, **kwargs):
auth.initiate_login(request, next_url=request.get_full_path())
if 'organization_slug' in kwargs:
redirect_to = reverse('sentry-auth-organization', args=[kwargs['organization_slug']])
else:
redirect_to = auth.get_login_url()
return self.redirect(redirect_to)
def is_sudo_required(self, request, *args, **kwargs):
return self.sudo_required and not request.is_sudo()
def handle_sudo_required(self, request, *args, **kwargs):
return redirect_to_sudo(request.get_full_path())
def has_permission(self, request, *args, **kwargs):
return True
def handle_permission_required(self, request, *args, **kwargs):
redirect_uri = self.get_no_permission_url(request, *args, **kwargs)
return self.redirect(redirect_uri)
def handle_not_2fa_compliant(self, request, *args, **kwargs):
redirect_uri = self.get_not_2fa_compliant_url(request, *args, **kwargs)
return self.redirect(redirect_uri)
def get_no_permission_url(request, *args, **kwargs):
return reverse('sentry-login')
def get_not_2fa_compliant_url(self, request, *args, **kwargs):
return reverse('sentry-account-settings-security')
def get_context_data(self, request, **kwargs):
context = csrf(request)
return context
def respond(self, template, context=None, status=200):
default_context = self.default_context
if context:
default_context.update(context)
return render_to_response(template, default_context, self.request, status=status)
def redirect(self, url):
return HttpResponseRedirect(url)
def get_team_list(self, user, organization):
return Team.objects.get_for_user(
organization=organization,
user=user,
with_projects=True,
)
def create_audit_entry(self, request, transaction_id=None, **kwargs):
return create_audit_entry(request, transaction_id, audit_logger, **kwargs)
class OrganizationView(BaseView):
"""
Any view acting on behalf of an organization should inherit from this base.
The 'organization' keyword argument is automatically injected into the
resulting dispatch.
"""
required_scope = None
valid_sso_required = True
def get_access(self, request, organization, *args, **kwargs):
if organization is None:
return access.DEFAULT
return access.from_request(request, organization)
def get_context_data(self, request, organization, **kwargs):
context = super(OrganizationView, self).get_context_data(request)
context['organization'] = organization
context['TEAM_LIST'] = self.get_team_list(request.user, organization)
context['ACCESS'] = request.access.to_django_context()
return context
def has_permission(self, request, organization, *args, **kwargs):
if organization is None:
return False
if self.valid_sso_required:
if request.access.requires_sso and not request.access.sso_is_valid:
return False
if self.needs_sso(request, organization):
return False
if self.required_scope and not request.access.has_scope(self.required_scope):
logger.info(
'User %s does not have %s permission to access organization %s', request.user,
self.required_scope, organization
)
return False
return True
def is_auth_required(self, request, organization_slug=None, *args, **kwargs):
result = super(OrganizationView, self).is_auth_required(request, *args, **kwargs)
if result:
return result
# if the user is attempting to access an organization that *may* be
# accessible if they simply re-authenticate, we want to allow that
# this opens up a privacy hole, but the pros outweigh the cons
if not organization_slug:
return False
active_organization = self.get_active_organization(
request=request,
organization_slug=organization_slug,
)
if not active_organization:
try:
Organization.objects.get_from_cache(slug=organization_slug)
except Organization.DoesNotExist:
pass
else:
return True
return False
def handle_permission_required(self, request, organization, *args, **kwargs):
if self.needs_sso(request, organization):
logger.info(
'access.must-sso',
extra={
'organization_id': organization.id,
'user_id': request.user.id,
}
)
auth.initiate_login(request, next_url=request.get_full_path())
redirect_uri = reverse('sentry-auth-organization', args=[organization.slug])
else:
redirect_uri = self.get_no_permission_url(request, *args, **kwargs)
return self.redirect(redirect_uri)
def needs_sso(self, request, organization):
if not organization:
return False
# XXX(dcramer): this branch should really never hit
if not request.user.is_authenticated():
return False
if not self.valid_sso_required:
return False
if not request.access.requires_sso:
return False
if not auth.has_completed_sso(request, organization.id):
return True
if not request.access.sso_is_valid:
return True
return False
def convert_args(self, request, organization_slug=None, *args, **kwargs):
active_organization = self.get_active_organization(
request=request,
organization_slug=organization_slug,
)
kwargs['organization'] = active_organization
return (args, kwargs)
def get_allowed_roles(self, request, organization, member=None):
can_admin = request.access.has_scope('member:admin')
allowed_roles = []
if can_admin and not is_active_superuser(request):
acting_member = OrganizationMember.objects.get(
user=request.user,
organization=organization,
)
if member and roles.get(acting_member.role).priority < roles.get(member.role).priority:
can_admin = False
else:
allowed_roles = [
r for r in roles.get_all()
if r.priority <= roles.get(acting_member.role).priority
]
can_admin = bool(allowed_roles)
elif is_active_superuser(request):
allowed_roles = roles.get_all()
return (can_admin, allowed_roles, )
class TeamView(OrganizationView):
"""
Any view acting on behalf of a team should inherit from this base and the
matching URL pattern must pass 'team_slug'.
Two keyword arguments are added to the resulting dispatch:
- organization
- team
"""
def get_context_data(self, request, organization, team, **kwargs):
context = super(TeamView, self).get_context_data(request, organization)
context['team'] = team
return context
def has_permission(self, request, organization, team, *args, **kwargs):
if team is None:
return False
rv = super(TeamView, self).has_permission(request, organization)
if not rv:
return rv
if self.required_scope:
if not request.access.has_team_scope(team, self.required_scope):
logger.info(
'User %s does not have %s permission to access team %s', request.user,
self.required_scope, team
)
return False
elif not request.access.has_team(team):
logger.info('User %s does not have access to team %s', request.user, team)
return False
return True
def convert_args(self, request, organization_slug, team_slug, *args, **kwargs):
active_organization = self.get_active_organization(
request=request,
organization_slug=organization_slug,
)
if active_organization:
active_team = self.get_active_team(
request=request,
team_slug=team_slug,
organization=active_organization,
)
else:
active_team = None
kwargs['organization'] = active_organization
kwargs['team'] = active_team
return (args, kwargs)
class ProjectView(OrganizationView):
"""
Any view acting on behalf of a project should inherit from this base and the
matching URL pattern must pass 'org_slug' as well as 'project_slug'.
Three keyword arguments are added to the resulting dispatch:
- organization
- project
"""
def get_context_data(self, request, organization, project, **kwargs):
context = super(ProjectView, self).get_context_data(request, organization)
context['project'] = project
context['processing_issues'] = serialize(project).get('processingIssues', 0)
return context
def has_permission(self, request, organization, project, *args, **kwargs):
if project is None:
return False
rv = super(ProjectView, self).has_permission(request, organization)
if not rv:
return rv
teams = list(project.teams.all())
if self.required_scope:
if not any(request.access.has_team_scope(team, self.required_scope) for team in teams):
logger.info(
'User %s does not have %s permission to access project %s', request.user,
self.required_scope, project
)
return False
elif not any(request.access.has_team(team) for team in teams):
logger.info('User %s does not have access to project %s', request.user, project)
return False
return True
def convert_args(self, request, organization_slug, project_slug, *args, **kwargs):
active_organization = self.get_active_organization(
request=request,
organization_slug=organization_slug,
)
if active_organization:
active_project = self.get_active_project(
request=request,
organization=active_organization,
project_slug=project_slug,
)
else:
active_project = None
kwargs['project'] = active_project
kwargs['organization'] = active_organization
return (args, kwargs)
class AvatarPhotoView(View):
model = None
def get(self, request, *args, **kwargs):
avatar_id = kwargs['avatar_id']
try:
avatar = self.model.objects.get(ident=avatar_id)
except self.model.DoesNotExist:
return HttpResponseNotFound()
photo = avatar.file
if not photo:
return HttpResponseNotFound()
size = request.GET.get('s')
photo_file = photo.getfile()
if size:
try:
size = int(size)
except ValueError:
return HttpResponseBadRequest()
else:
photo_file = avatar.get_cached_photo(size)
res = HttpResponse(photo_file, content_type='image/png')
res['Cache-Control'] = FOREVER_CACHE
return res
|
|
# Copyright (c) 2010 Aldo Cortesi
# Copyright (c) 2011 Florian Mounier
# Copyright (c) 2011 oitel
# Copyright (c) 2011 Kenji_Takahashi
# Copyright (c) 2011 Paul Colomiets
# Copyright (c) 2012, 2014 roger
# Copyright (c) 2012 nullzion
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2014-2015 Sean Vig
# Copyright (c) 2014 Nathan Hoad
# Copyright (c) 2014 dequis
# Copyright (c) 2014 Tycho Andersen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import division
import collections
import math
import cairocffi
import xcffib.xproto
from . import pangocffi
from . import utils
class TextLayout(object):
def __init__(self, drawer, text, colour, font_family, font_size,
font_shadow, wrap=True, markup=False):
self.drawer, self.colour = drawer, colour
layout = drawer.ctx.create_layout()
layout.set_alignment(pangocffi.ALIGN_CENTER)
if not wrap: # pango wraps by default
layout.set_ellipsize(pangocffi.ELLIPSIZE_END)
desc = pangocffi.FontDescription.from_string(font_family)
desc.set_absolute_size(pangocffi.units_from_double(font_size))
layout.set_font_description(desc)
self.font_shadow = font_shadow
self.layout = layout
self.markup = markup
self.text = text
self._width = None
def finalize(self):
self.layout.finalize()
@property
def text(self):
return self.layout.get_text()
@text.setter
def text(self, value):
if self.markup:
# pangocffi doesn't like None here, so we use "".
if value is None:
value = ''
attrlist, value, accel_char = pangocffi.parse_markup(value)
self.layout.set_attributes(attrlist)
self.layout.set_text(utils.scrub_to_utf8(value))
@property
def width(self):
if self._width is not None:
return self._width
else:
return self.layout.get_pixel_size()[0]
@width.setter
def width(self, value):
self._width = value
self.layout.set_width(pangocffi.units_from_double(value))
@width.deleter
def width(self):
self._width = None
self.layout.set_width(-1)
@property
def height(self):
return self.layout.get_pixel_size()[1]
def fontdescription(self):
return self.layout.get_font_description()
@property
def font_family(self):
d = self.fontdescription()
return d.get_family()
@font_family.setter
def font_family(self, font):
d = self.fontdescription()
d.set_family(font)
self.layout.set_font_description(d)
@property
def font_size(self):
d = self.fontdescription()
return d.get_size()
@font_size.setter
def font_size(self, size):
d = self.fontdescription()
d.set_size(size)
d.set_absolute_size(pangocffi.units_from_double(size))
self.layout.set_font_description(d)
def draw(self, x, y):
if self.font_shadow is not None:
self.drawer.set_source_rgb(self.font_shadow)
self.drawer.ctx.move_to(x + 1, y + 1)
self.drawer.ctx.show_layout(self.layout)
self.drawer.set_source_rgb(self.colour)
self.drawer.ctx.move_to(x, y)
self.drawer.ctx.show_layout(self.layout)
def framed(self, border_width, border_color, pad_x, pad_y, highlight_color=None):
return TextFrame(self, border_width, border_color, pad_x, pad_y, highlight_color=highlight_color)
class TextFrame(object):
def __init__(self, layout, border_width, border_color, pad_x, pad_y, highlight_color=None):
self.layout = layout
self.border_width = border_width
self.border_color = border_color
self.drawer = self.layout.drawer
self.highlight_color = highlight_color
if isinstance(pad_x, collections.Iterable):
self.pad_left = pad_x[0]
self.pad_right = pad_x[1]
else:
self.pad_left = self.pad_right = pad_x
if isinstance(pad_y, collections.Iterable):
self.pad_top = pad_y[0]
self.pad_bottom = pad_y[1]
else:
self.pad_top = self.pad_bottom = pad_y
def draw(self, x, y, rounded=True, fill=False, line=False, highlight=False):
self.drawer.set_source_rgb(self.border_color)
opts = [
x, y,
self.layout.width + self.pad_left + self.pad_right,
self.layout.height + self.pad_top + self.pad_bottom,
self.border_width
]
if line:
if highlight:
self.drawer.set_source_rgb(self.highlight_color)
self.drawer.fillrect(*opts)
self.drawer.set_source_rgb(self.border_color)
# change to only fill in bottom line
opts[1] = self.height - self.border_width # y
opts[3] = self.border_width # height
self.drawer.fillrect(*opts)
elif fill:
if rounded:
self.drawer.rounded_fillrect(*opts)
else:
self.drawer.fillrect(*opts)
else:
if rounded:
self.drawer.rounded_rectangle(*opts)
else:
self.drawer.rectangle(*opts)
self.drawer.ctx.stroke()
self.layout.draw(
x + self.pad_left,
y + self.pad_top
)
def draw_fill(self, x, y, rounded=True):
self.draw(x, y, rounded=rounded, fill=True)
def draw_line(self, x, y, highlighted):
self.draw(x, y, line=True, highlight=highlighted)
@property
def height(self):
return self.layout.height + self.pad_top + self.pad_bottom
@property
def width(self):
return self.layout.width + self.pad_left + self.pad_right
class Drawer(object):
""" A helper class for drawing and text layout.
We have a drawer object for each widget in the bar. The underlying surface
is a pixmap with the same size as the bar itself. We draw to the pixmap
starting at offset 0, 0, and when the time comes to display to the window,
we copy the appropriate portion of the pixmap onto the window.
"""
def __init__(self, qtile, wid, width, height):
self.qtile = qtile
self.wid, self.width, self.height = wid, width, height
self.pixmap = self.qtile.conn.conn.generate_id()
self.gc = self.qtile.conn.conn.generate_id()
self.qtile.conn.conn.core.CreatePixmap(
self.qtile.conn.default_screen.root_depth,
self.pixmap,
self.wid,
self.width,
self.height
)
self.qtile.conn.conn.core.CreateGC(
self.gc,
self.wid,
xcffib.xproto.GC.Foreground | xcffib.xproto.GC.Background,
[
self.qtile.conn.default_screen.black_pixel,
self.qtile.conn.default_screen.white_pixel
]
)
self.surface = cairocffi.XCBSurface(
qtile.conn.conn,
self.pixmap,
self.find_root_visual(),
self.width,
self.height,
)
self.ctx = self.new_ctx()
self.clear((0, 0, 1))
def finalize(self):
self.qtile.conn.conn.core.FreeGC(self.gc)
self.qtile.conn.conn.core.FreePixmap(self.pixmap)
self.ctx = None
self.surface = None
def _rounded_rect(self, x, y, width, height, linewidth):
aspect = 1.0
corner_radius = height / 10.0
radius = corner_radius / aspect
degrees = math.pi / 180.0
self.ctx.new_sub_path()
delta = radius + linewidth / 2
self.ctx.arc(x + width - delta, y + delta, radius,
-90 * degrees, 0 * degrees)
self.ctx.arc(x + width - delta, y + height - delta,
radius, 0 * degrees, 90 * degrees)
self.ctx.arc(x + delta, y + height - delta, radius,
90 * degrees, 180 * degrees)
self.ctx.arc(x + delta, y + delta, radius,
180 * degrees, 270 * degrees)
self.ctx.close_path()
def rounded_rectangle(self, x, y, width, height, linewidth):
self._rounded_rect(x, y, width, height, linewidth)
self.ctx.set_line_width(linewidth)
self.ctx.stroke()
def rounded_fillrect(self, x, y, width, height, linewidth):
self._rounded_rect(x, y, width, height, linewidth)
self.ctx.fill()
def rectangle(self, x, y, width, height, linewidth=2):
self.ctx.set_line_width(linewidth)
self.ctx.rectangle(x, y, width, height)
self.ctx.stroke()
def fillrect(self, x, y, width, height, linewidth=2):
self.ctx.set_line_width(linewidth)
self.ctx.rectangle(x, y, width, height)
self.ctx.fill()
self.ctx.stroke()
def draw(self, offsetx=0, offsety=0, width=None, height=None):
"""
Parameters
==========
offsetx :
the X offset to start drawing at.
offsety :
the Y offset to start drawing at.
width :
the X portion of the canvas to draw at the starting point.
height :
the Y portion of the canvas to draw at the starting point.
"""
self.qtile.conn.conn.core.CopyArea(
self.pixmap,
self.wid,
self.gc,
0, 0, # srcx, srcy
offsetx, offsety, # dstx, dsty
self.width if width is None else width,
self.height if height is None else height
)
def find_root_visual(self):
for i in self.qtile.conn.default_screen.allowed_depths:
for v in i.visuals:
if v.visual_id == self.qtile.conn.default_screen.root_visual:
return v
def new_ctx(self):
return pangocffi.CairoContext(cairocffi.Context(self.surface))
def set_source_rgb(self, colour):
if type(colour) == list:
if len(colour) == 0:
# defaults to black
self.ctx.set_source_rgba(*utils.rgb("#000000"))
elif len(colour) == 1:
self.ctx.set_source_rgba(*utils.rgb(colour[0]))
else:
linear = cairocffi.LinearGradient(0.0, 0.0, 0.0, self.height)
step_size = 1.0 / (len(colour) - 1)
step = 0.0
for c in colour:
rgb_col = utils.rgb(c)
if len(rgb_col) < 4:
rgb_col[3] = 1
linear.add_color_stop_rgba(step, *rgb_col)
step += step_size
self.ctx.set_source(linear)
else:
self.ctx.set_source_rgba(*utils.rgb(colour))
def clear(self, colour):
self.set_source_rgb(colour)
self.ctx.rectangle(0, 0, self.width, self.height)
self.ctx.fill()
self.ctx.stroke()
def textlayout(self, text, colour, font_family, font_size, font_shadow,
markup=False, **kw):
"""Get a text layout"""
return TextLayout(self, text, colour, font_family, font_size,
font_shadow, markup=markup, **kw)
def max_layout_size(self, texts, font_family, font_size):
sizelayout = self.textlayout(
"", "ffffff", font_family, font_size, None)
widths, heights = [], []
for i in texts:
sizelayout.text = i
widths.append(sizelayout.width)
heights.append(sizelayout.height)
return max(widths), max(heights)
# Old text layout functions, to be deprecated.
def set_font(self, fontface, size, antialias=True):
self.ctx.select_font_face(fontface)
self.ctx.set_font_size(size)
fo = self.ctx.get_font_options()
fo.set_antialias(cairocffi.ANTIALIAS_SUBPIXEL)
def text_extents(self, text):
return self.ctx.text_extents(utils.scrub_to_utf8(text))
def font_extents(self):
return self.ctx.font_extents()
def fit_fontsize(self, heightlimit):
"""Try to find a maximum font size that fits any strings within the height"""
self.ctx.set_font_size(heightlimit)
asc, desc, height, _, _ = self.font_extents()
self.ctx.set_font_size(
int(heightlimit * heightlimit / height))
return self.font_extents()
def fit_text(self, strings, heightlimit):
"""Try to find a maximum font size that fits all strings within the height"""
self.ctx.set_font_size(heightlimit)
_, _, _, maxheight, _, _ = self.ctx.text_extents("".join(strings))
if not maxheight:
return 0, 0
self.ctx.set_font_size(
int(heightlimit * heightlimit / maxheight))
maxwidth, maxheight = 0, 0
for i in strings:
_, _, x, y, _, _ = self.ctx.text_extents(i)
maxwidth = max(maxwidth, x)
maxheight = max(maxheight, y)
return maxwidth, maxheight
def draw_vbar(self, color, x, y1, y2, linewidth=1):
self.set_source_rgb(color)
self.ctx.move_to(x, y1)
self.ctx.line_to(x, y2)
self.ctx.set_line_width(linewidth)
self.ctx.stroke()
def draw_hbar(self, color, x1, x2, y, linewidth=1):
self.set_source_rgb(color)
self.ctx.move_to(x1, y)
self.ctx.line_to(x2, y)
self.ctx.set_line_width(linewidth)
self.ctx.stroke()
|
|
from __future__ import division
import csv
import itertools
import json
import random
import tempfile
from django.conf import settings
from dateutil.relativedelta import relativedelta
from dateutil.parser import parse as parse_date
from frontend import bq_schemas as schemas
from gcutils.bigquery import Client
class DataFactory(object):
"""
This class provides methods to generate test fixtures and upload them to
BigQuery
"""
def __init__(self, seed=36):
self.random = random.Random()
self.random.seed(seed)
counter = itertools.count()
self.next_id = lambda: next(counter)
self._reset_caches()
def _reset_caches(self):
self._practices = []
self._practice_statistics = []
self._presentations = []
self._prescribing = []
self._bnf_map = []
def create_months(self, start_date, num_months):
date = parse_date(start_date)
return [
(date + relativedelta(months=i)).strftime('%Y-%m-%d 00:00:00 UTC')
for i in range(0, num_months)
]
def create_practice(self):
practice = {'code': 'ABC{:03}'.format(self.next_id())}
self._practices.append(practice)
return practice
def create_practices(self, num_practices):
return [
self.create_practice()
for i in range(num_practices)
]
def create_statistics_for_one_practice_and_month(self, practice, month):
data = {
'month': month,
'practice': practice['code'],
# We don't care about the PCT at the moment
'pct_id': '00A',
'astro_pu_items': self.random.random() * 1000,
'astro_pu_cost': self.random.random() * 1000,
'star_pu': json.dumps({
# This is just a small selection of available STAR-PU values
'statins_cost': self.random.random() * 1000,
'hypnotics_adq': self.random.random() * 1000,
'laxatives_cost': self.random.random() * 1000,
'analgesics_cost': self.random.random() * 1000,
'oral_nsaids_cost': self.random.random() * 1000,
}),
# We increment this value below
'total_list_size': 0
}
age_bands = (
'0_4', '5_14', '15_24', '25_34', '35_44',
'45_54', '55_64', '65_74', '75_plus'
)
for age_band in age_bands:
for sex in ('male', 'female'):
value = self.random.randint(0, 1000)
data['{}_{}'.format(sex, age_band)] = value
data['total_list_size'] += value
self._practice_statistics.append(data)
return data
def create_practice_statistics(self, practices, months):
return [
self.create_statistics_for_one_practice_and_month(practice, month)
for practice in practices
for month in months
]
def create_presentation(self):
index = self.next_id()
presentation = {
'bnf_code': self.create_bnf_code(index),
'name': 'Foo Tablet {}'.format(index),
'is_generic': self.random.choice([True, False]),
'adq_per_quantity': self.random.choice(
[None, self.random.random() * 30]
)
}
self._presentations.append(presentation)
return presentation
def create_bnf_code(self, index):
return '0123456789ABCD{}'.format(index)
def create_presentations(self, num_presentations):
return [self.create_presentation() for i in range(num_presentations)]
def create_prescription(self, presentation, practice, month):
prescription = {
'month': month,
'practice': practice['code'],
'bnf_code': presentation['bnf_code'],
'bnf_name': presentation['name'],
'items': self.random.randint(1, 100),
'quantity': self.random.randint(1, 100),
# Costs should be in pounds to two decimal places
'net_cost': self.random.randint(1, 10000) / 100,
'actual_cost': self.random.randint(1, 10000) / 100,
'sha': None,
'pct': None,
'stp': None,
'regional_team': None
}
self._prescribing.append(prescription)
return prescription
def create_prescribing(self, presentations, practices, months):
prescribing = []
for practice in practices:
# Make sure each practice prescribes in at least one month, although
# probably not every month
n = self.random.randint(1, len(months))
selected_months = self.random.sample(months, n)
for month in selected_months:
# Make sure the practice prescribes at least one presentation,
# although probably not every one
n = self.random.randint(1, len(presentations))
selected_presentations = self.random.sample(presentations, n)
for presentation in selected_presentations:
prescribing.append(
self.create_prescription(presentation, practice, month)
)
return prescribing
def update_bnf_code(self, presentation):
new_bnf_code = self.create_bnf_code(self.next_id())
self._bnf_map.append({
'former_bnf_code': presentation['bnf_code'],
'current_bnf_code': new_bnf_code
})
new_presentation = dict(presentation, bnf_code=new_bnf_code)
# Update references to the old BNF code, if there are any
indices = [
i for i, other_presentation
in enumerate(self._presentations)
if other_presentation['bnf_code'] == presentation['bnf_code']
]
if indices:
for i in indices:
self._presentations[i] = new_presentation
else:
self._presentations.append(new_presentation)
return new_presentation
def upload_to_bigquery(self):
client = Client('hscic')
assert_is_test_dataset(client)
create_and_populate_bq_table(
client,
'presentation',
schemas.PRESENTATION_SCHEMA,
self._presentations
)
create_and_populate_bq_table(
client,
'prescribing',
schemas.PRESCRIBING_SCHEMA,
self._prescribing
)
create_and_populate_bq_table(
client,
'practice_statistics_all_years',
schemas.PRACTICE_STATISTICS_SCHEMA,
self._practice_statistics
)
create_and_populate_bq_table(
client,
'bnf_map',
schemas.BNF_MAP_SCHEMA,
self._bnf_map
)
self._reset_caches()
def assert_is_test_dataset(client):
bq_nonce = getattr(settings, 'BQ_NONCE', None)
if not bq_nonce or str(bq_nonce) not in client.dataset_id:
raise RuntimeError('BQ_NONCE must be set')
def create_and_populate_bq_table(client, name, schema, table_data):
table = client.get_or_create_table(name, schema)
if not table_data:
return
with tempfile.NamedTemporaryFile() as f:
writer = csv.writer(f)
for item in table_data:
writer.writerow(dict_to_row(item, schema))
f.seek(0)
table.insert_rows_from_csv(f.name)
def dict_to_row(dictionary, schema):
row = [dictionary[field.name] for field in schema]
if len(row) != len(schema):
extra = set(dictionary) - set([field.name for field in schema])
raise ValueError(
'Dictionary has keys which are not in BigQuery schema: {}'
.format(', '.join(extra))
)
return row
|
|
import io
from random import randint, choice
from unittest import TestCase
import api.scans.controllers as api_scans
from api.common.errors import HTTPInvalidParam
from mock import MagicMock, patch
from api.scans.models import Scan, ScanEvents
from api.scans.schemas import ScanSchema
from api.files_ext.schemas import FileExtSchema
from irma.common.base.utils import IrmaScanStatus
class TestScansRoutes(TestCase):
def assertIsScan(self, data):
self.assertTrue(type(data) == dict)
self.assertCountEqual(data.keys(), ScanSchema().fields)
def assertIsScanList(self, data):
self.assertTrue(type(data) == list)
for scan in data:
self.assertIsScan(scan)
def assertIsFileExt(self, data):
self.assertTrue(type(data) == dict)
self.assertCountEqual(data.keys(), FileExtSchema().fields)
def assertIsFileWebList(self, data):
self.assertTrue(type(data) == list)
for fw in data:
self.assertIsFileExt(fw)
def setUp(self):
self.db = MagicMock()
self.session = self.db.session
self.old_db = api_scans.db
api_scans.db = self.db
def tearDown(self):
api_scans.db = self.old_db
del self.db
def test_list_error(self):
exception = Exception("test")
self.session.query.side_effect = exception
with self.assertRaises(Exception):
api_scans.list()
self.session.query.assert_called_once_with(Scan)
def test_list_default(self):
default_offset, default_limit = 0, 5
result = api_scans.list()
self.assertEqual(result["offset"], default_offset)
self.assertEqual(result["limit"], default_limit)
self.session.query.assert_called_with(Scan)
self.session.query().limit.assert_called_with(default_limit)
m_offset = self.session.query().limit().offset
m_offset.assert_called_with(default_offset)
self.session.query().count.assert_not_called()
def test_list_custom_request_no_status(self):
offset, limit = randint(1, 100), randint(1, 100)
status = choice(list(IrmaScanStatus.label.values()))
result = api_scans.list(offset=offset, limit=limit)
self.assertEqual(result["offset"], offset)
self.assertEqual(result["limit"], limit)
self.assertIsScanList(result["data"])
self.session.query().count.assert_called()
def test_list_custom_request_status(self):
offset, limit = randint(1, 100), randint(1, 100)
status = choice(list(IrmaScanStatus.label.values()))
for (k, v) in IrmaScanStatus.label.items():
if v == status:
status_code = k
break
result = api_scans.list(status=status, offset=offset, limit=limit)
self.assertEqual(result["offset"], offset)
self.assertEqual(result["limit"], limit)
self.assertIsScanList(result["data"])
self.session.query().filter().count.assert_called()
def test_list_custom_request_status_not_existing(self):
offset, limit = randint(1, 100), randint(1, 100)
with self.assertRaises(api_scans.HTTPInvalidParam):
api_scans.list(status="whatever", offset=offset, limit=limit)
@patch("api.scans.controllers.Scan")
def test_new_ok(self, m_Scan):
m_request = MagicMock()
result = api_scans.new(m_request)
m_Scan.assert_called()
self.assertIsInstance(m_Scan.call_args[0][0], float)
self.assertEqual(m_Scan.call_args[0][1], m_request.remote_addr)
m_Scan().set_status.assert_called_with(IrmaScanStatus.empty)
self.assertIsScan(result)
@patch("api.scans.controllers.Scan")
def test_new_error(self, m_Scan):
exception = Exception("test")
m_Scan.side_effect = exception
m_request = MagicMock()
with self.assertRaises(Exception):
api_scans.new(m_request)
@patch("api.scans.controllers.Scan")
def test_get_ok(self, m_Scan):
m_scan = MagicMock()
m_Scan.load_from_ext_id.return_value = m_scan
scan_id = "whatever"
result = api_scans.get(scan_id)
m_Scan.load_from_ext_id.assert_called_once_with(scan_id, self.session)
self.assertIsScan(result)
@patch("api.scans.controllers.Scan")
def test_get_error(self, m_Scan):
exception = Exception("test")
m_Scan.load_from_ext_id.side_effect = exception
scan_id = "whatever"
with self.assertRaises(Exception):
api_scans.get(scan_id)
@patch("api.scans.controllers.celery_frontend")
@patch("api.probes.services.check_probe")
@patch("api.scans.controllers.Scan")
def test_launch_v1_ok(self, m_Scan, m_check_probe, m_celery_frontend):
scan_id = "whatever"
probes = ["probe1", "probe2"]
force = "False"
mimetype_filtering = "False"
resubmit_files = "False"
m_check_probe.return_value = []
m_scan = Scan("date", "ip")
m_check_probe.assert_called_once_with(None)
m_scan.force = None
m_scan.mimetype_filtering = None
m_scan.resubmit_files = None
m_scan.events = [ScanEvents(IrmaScanStatus.empty, m_scan)]
m_Scan.load_from_ext_id.return_value = m_scan
result = api_scans.launch_v1(scan_id=scan_id,
probes=probes,
force=force,
mimetype_filtering=mimetype_filtering,
resubmit_files=resubmit_files
)
m_check_probe.assert_called()
m_Scan.load_from_ext_id.assert_called_once_with(scan_id, self.session)
m_celery_frontend.scan_launch.assert_called_once_with(scan_id)
self.assertIsScan(result)
self.assertEqual(result["force"], force, "force")
self.assertEqual(result["mimetype_filtering"], mimetype_filtering,
"mimetype")
self.assertEqual(result["resubmit_files"], resubmit_files, "resubmit")
@patch("api.scans.controllers.Scan")
def test_launch_v1_error(self, m_Scan):
exception = Exception("test")
m_Scan.load_from_ext_id.side_effect = exception
scan_id = "whatever"
probes = ["probe1", "probe2"]
force = False
mimetype_filtering = False
resubmit_files = False
with self.assertRaises(Exception):
api_scans.launch_v1(scan_id=scan_id,
probes=probes,
force=force,
mimetype_filtering=mimetype_filtering,
resubmit_files=resubmit_files
)
@patch("api.scans.controllers.FileExt")
@patch("api.scans.controllers.celery_frontend")
@patch("api.scans.controllers.probe_ctrl.check_probe")
def test_launch_v2_ok(self, m_check_probe, m_celery_frontend, m_FileExt):
m_request = MagicMock()
force = False
mimetype_filtering = False
resubmit_files = False
probes = ["probe1", "probe2"]
m_body = {
"files": ["file_ext1"],
"options": {
"probes": probes,
"force": force,
"mimetype_filtering": mimetype_filtering,
"resubmit_files": resubmit_files,
}
}
m_file_ext = MagicMock()
m_file_ext.scan = None
m_FileExt.load_from_ext_id.return_value = m_file_ext
result = api_scans.launch_v2(m_request, m_body)
m_check_probe.assert_called_once_with(probes)
m_celery_frontend.scan_launch.assert_called_once()
self.assertIsScan(result)
self.assertEqual(result["force"], str(force),
"force value is wrong")
self.assertEqual(result["mimetype_filtering"], str(mimetype_filtering),
"mimetype_filtering value is wrong")
self.assertEqual(result["resubmit_files"], str(resubmit_files),
"resubmit_files value is wrong")
@patch("api.scans.controllers.FileExt")
@patch("api.scans.controllers.celery_frontend")
@patch("api.scans.controllers.probe_ctrl")
def test_launch_v2_file_deleted(self, m_probe_ctrl, m_celery_frontend,
m_FileExt):
m_request = MagicMock()
force = False
mimetype_filtering = False
resubmit_files = False
probes = ["probe1", "probe2"]
m_body = {
"files": ["file_ext1"],
"options": {
"probes": probes,
"force": force,
"mimetype_filtering": mimetype_filtering,
"resubmit_files": resubmit_files,
}
}
sha256 = "whatever"
m_file_ext, m_file = MagicMock(), MagicMock()
m_file.path = None
m_file_ext.file = m_file
m_file_ext.file.sha256 = sha256
m_file_ext.scan = None
m_FileExt.load_from_ext_id.return_value = m_file_ext
expected = "The \"files\" parameter is invalid. File with hash " \
"%s should be (re)uploaded" % sha256
with self.assertRaises(api_scans.HTTPInvalidParam) as context:
api_scans.launch_v2(m_request, m_body)
m_probe_ctrl.check_probe.assert_not_called()
m_celery_frontend.scan_launch.assert_not_called()
self.assertEqual(context.exception.description, expected)
@patch("api.scans.controllers.FileExt")
@patch("api.scans.controllers.celery_frontend")
@patch("api.scans.controllers.probe_ctrl")
def test_launch_v2_file_not_found(self, m_probe_ctrl, m_celery_frontend,
m_FileExt):
m_request = MagicMock()
force = False
mimetype_filtering = False
resubmit_files = False
probes = ["probe1", "probe2"]
m_body = {
"files": ["file_ext1", "file_ext2"],
"options": {
"probes": probes,
"force": force,
"mimetype_filtering": mimetype_filtering,
"resubmit_files": resubmit_files,
}
}
m_FileExt.load_from_ext_id.side_effect = \
api_scans.IrmaDatabaseResultNotFound
expected = "The \"files\" parameter is invalid. File file_ext1 " \
"not found"
with self.assertRaises(api_scans.HTTPInvalidParam) as context:
api_scans.launch_v2(m_request, m_body)
m_probe_ctrl.check_probe.assert_not_called()
m_celery_frontend.scan_launch.assert_not_called()
self.assertEqual(context.exception.description, expected)
@patch("api.scans.controllers.FileWeb")
@patch("api.scans.controllers.celery_frontend")
@patch("api.scans.controllers.probe_ctrl")
def test_launch_v2_file_already_scanned(self, m_probe_ctrl,
m_celery_frontend, m_FileWeb):
m_request = MagicMock()
force = False
mimetype_filtering = False
resubmit_files = False
probes = ["probe1", "probe2"]
m_body = {
"files": ["fileweb1"],
"options": {
"probes": probes,
"force": force,
"mimetype_filtering": mimetype_filtering,
"resubmit_files": resubmit_files,
}
}
m_file_ext = MagicMock()
m_file_ext.scan = "scanid1"
m_FileWeb.load_from_ext_id.return_value = m_file_ext
expected = "The \"files\" parameter is invalid. File fileweb1 " \
"already scanned"
with self.assertRaises(api_scans.HTTPInvalidParam) as context:
api_scans.launch_v2(m_request, m_body)
m_probe_ctrl.check_probe.assert_not_called()
m_celery_frontend.scan_launch.assert_not_called()
self.assertEqual(context.exception.description, expected)
def test_launch_v2_error(self):
m_body = MagicMock()
m_request = MagicMock()
with self.assertRaises(HTTPInvalidParam):
api_scans.launch_v2(m_request, m_body)
def test_launch_v2_no_body(self):
m_body = None
m_request = MagicMock()
with self.assertRaises(HTTPInvalidParam):
api_scans.launch_v2(m_request, m_body)
def test_launch_v2_force_wrong_type(self):
m_request = MagicMock()
m_body = {"files": [MagicMock()],
"options": {"force": 15,
"mimetype_filtering": True,
"resubmit_files": True}}
with self.assertRaises(HTTPInvalidParam):
api_scans.launch_v2(m_request, m_body)
def test_launch_v2_mimetype_filtering_wrong_type(self):
m_request = MagicMock()
m_body = {"files": [MagicMock()],
"options": {"force": True,
"mimetype_filtering": 42,
"resubmit_files": True}}
with self.assertRaises(HTTPInvalidParam):
api_scans.launch_v2(m_request, m_body)
def test_launch_v2_resubmit_files_wrong_type(self):
m_request = MagicMock()
m_body = {"files": [MagicMock()],
"options": {"force": True,
"mimetype_filtering": True,
"resubmit_files": 17}}
with self.assertRaises(HTTPInvalidParam):
api_scans.launch_v2(m_request, m_body)
@patch("api.scans.controllers.scan_ctrl")
@patch("api.scans.controllers.Scan")
def test_cancel_ok(self, m_Scan, m_scan_ctrl):
scan_id = "whatever"
result = api_scans.cancel(scan_id)
m_Scan.load_from_ext_id.assert_called_once_with(scan_id, self.session)
self.assertIsScan(result)
m_scan_ctrl.cancel.assert_called_once()
@patch("api.scans.controllers.Scan")
def test_cancel_raises(self, m_Scan):
scan_id = "whatever"
m_Scan.load_from_ext_id.side_effect = Exception()
with self.assertRaises(Exception):
api_scans.cancel(scan_id)
@patch("api.scans.controllers.File")
@patch("api.scans.controllers.FileExt")
@patch("api.scans.controllers.IrmaScanStatus")
@patch("api.scans.controllers.Scan")
def test_add_files_ok(self, m_Scan, m_IrmaScanStatus, m_FileExt, m_File):
m_file = MagicMock()
m_request = MagicMock()
scan_id = "whatever"
data = b"DATA"
filename = "filename"
m_file.filename = filename
m_file.file = io.BytesIO(data)
m_request._params = {'files': m_file}
result = api_scans.add_files(m_request, scan_id)
m_Scan.load_from_ext_id.assert_called_once_with(scan_id, self.session)
self.assertIsScan(result)
@patch("api.scans.controllers.IrmaScanStatus")
@patch("api.scans.controllers.scan_ctrl")
@patch("api.scans.controllers.Scan")
def test_add_files_no_files(self, m_Scan, m_scan_ctrl, m_IrmaScanStatus):
scan_id = "whatever"
m_request = MagicMock()
m_request.files = {}
expected = "The \"files\" parameter is invalid. Empty list"
with self.assertRaises(HTTPInvalidParam) as context:
api_scans.add_files(m_request, scan_id=scan_id)
m_Scan.load_from_ext_id.assert_called_once_with(scan_id, self.session)
self.assertEqual(context.exception.description, expected)
m_scan_ctrl.add_files.assert_not_called()
@patch("api.scans.controllers.Scan")
def test_get_results_ok(self, m_Scan):
scan_id = "whatever"
result = api_scans.get_results(scan_id)
m_Scan.load_from_ext_id.assert_called_once_with(scan_id, self.session)
self.assertIsFileWebList(result)
@patch("api.scans.controllers.Scan")
def test_get_results_raises(self, m_Scan):
scan_id = "whatever"
m_Scan.load_from_ext_id.side_effect = Exception()
with self.assertRaises(Exception):
api_scans.get_results(scan_id)
@patch("api.scans.controllers.Scan")
def test_get_report(self, m_Scan):
request = MagicMock()
response = MagicMock()
m_scan = MagicMock()
m_scan.external_id = "whatever"
m_scan.finished.return_value = True
def side_effect(scan_id, session):
if scan_id == m_scan.external_id:
return m_scan
m_Scan.load_from_ext_id.side_effect = side_effect
self.assertEqual(api_scans.get_report(request, response,
m_scan.external_id),
m_scan)
@patch("api.scans.controllers.Scan")
def test_get_report_error(self, m_Scan):
request = MagicMock()
response = MagicMock()
m_scan = MagicMock()
m_scan.external_id = "whatever"
m_scan.finished.return_value = False
def side_effect(scan_id, session):
if scan_id == m_scan.external_id:
return m_scan
m_Scan.load_from_ext_id.side_effect = side_effect
with self.assertRaises(api_scans.HTTPUnauthorized):
api_scans.get_report(request, response, m_scan.external_id)
@patch("api.scans.controllers.Scan")
@patch("api.scans.controllers.FileExt")
@patch("api.scans.controllers.celery_frontend")
@patch("api.scans.controllers.files_ctrl")
def test_quick_scan(self, m_files_ctrl, m_celery_frontend, m_FileExt,
m_Scan):
m_request = MagicMock()
m_file_ext = MagicMock()
m_FileExt.load_from_ext_id.return_value = m_file_ext
m_Scan.return_value.external_id = "extid_HOvDI2"
result = api_scans.quick_scan(m_request)
m_files_ctrl.create.assert_called_once_with(m_request)
m_celery_frontend.scan_launch.assert_called_once_with("extid_HOvDI2")
self.assertIsScan(result)
|
|
__author__ = 'sushil, abdullahS'
import sys
from pprint import pprint, pformat # NOQA
from optparse import OptionParser
import logging
from sets import Set
from hydra.lib import util
from hydra.lib.h_analyser import HAnalyser
from hydra.lib.hydrabase import HydraBase
try:
# Python 2.x
from ConfigParser import ConfigParser
except ImportError:
# Python 3.x
from configparser import ConfigParser
l = util.createlogger('runTest', logging.INFO)
# l.setLevel(logging.DEBUG)
tout_60s = 60000
tout_30s = 30000
tout_10s = 10000
class ZMQPubAnalyser(HAnalyser):
def __init__(self, server_ip, server_port, task_id):
HAnalyser.__init__(self, server_ip, server_port, task_id)
class ZMQSubAnalyser(HAnalyser):
def __init__(self, server_ip, server_port, task_id):
HAnalyser.__init__(self, server_ip, server_port, task_id)
class RunTestZMQ(HydraBase):
def __init__(self, options, runtest=True):
# self.options = options
# self.test_duration = options.test_duration
# self.msg_batch = options.msg_batch
# self.msg_rate = options.msg_rate
# self.total_sub_apps = options.total_sub_apps
# self.config_file = options.config_file
# self.keep_running = options.keep_running
self.config = ConfigParser()
HydraBase.__init__(self, 'zmqScale', options, self.config, startappserver=runtest)
self.zstpub = self.format_appname('/zst-pub')
self.zstsub = self.format_appname('/zst-sub')
self.add_appid(self.zstpub)
self.add_appid(self.zstsub)
self.boundary_setup(self.options, 'msg_rate', self.boundary_resultfn)
if runtest:
self.run_test()
self.stop_appserver()
def rerun_test(self, options):
self.set_options(options)
self.boundary_setup(self.options, 'msg_rate', self.boundary_resultfn)
# self.test_duration = options.test_duration
# self.msg_batch = options.msg_batch
# self.msg_rate = options.msg_rate
l.info("Updating test metrics: test_duration=%s, msg_batch=%s, msg_rate=%s",
self.options.test_duration, self.options.msg_batch, self.options.msg_rate)
# Update the PUB server with new metrics
self.ha_pub.update_config(test_duration=self.options.test_duration,
msg_batch=self.options.msg_batch,
msg_requested_rate=self.options.msg_rate)
l.info("PUB server updated")
self.reset_all_app_stats(self.zstsub)
# Select which sub's are going to be slow
# and send them there rate.
# add the properties to the sub app data structure on their rate.
acnt = self.get_app_instcnt(self.zstsub)
slow_num = int(acnt * options.slow_clients_percent / 100)
update_sub_config = False
if slow_num:
slow_clients = self.get_app_property(self.zstsub, 'slow_clients')
if not slow_clients or int(slow_num) != len(slow_clients):
# reset all the clients
self.set_app_property(self.zstsub, 'slow_clients',
Set(self.random_select_instances(self.zstsub, slow_num)))
update_sub_config = True
rec_num = int(acnt * options.rec_clients_percent / 100)
if rec_num:
rec_clients = self.get_app_property(self.zstsub, 'reconnecting_clients')
if not rec_clients or rec_num != len(rec_clients):
self.set_app_property(self.zstsub, 'reconnecting_clients',
Set(self.random_select_instances(self.zstsub, rec_num)))
update_sub_config = True
if update_sub_config:
# Now update all the slow clients
ipm = self.get_app_ipport_map(self.zstsub)
slow_set = self.get_app_property(self.zstsub, 'slow_clients')
rec_set = self.get_app_property(self.zstsub, 'reconnecting_clients')
for key in ipm.keys():
ip = ipm[key][1]
port = ipm[key][0]
ha = HAnalyser(ip, port, key)
recv_rate = 0
reconnect_rate = 0
if slow_set and key in slow_set:
print("Task ID " + key + " Is going to be slow")
recv_rate = options.slow_clients_rate
if rec_set and key in rec_set:
print("Task ID " + key + " Is going to be reconnecting")
reconnect_rate = options.rec_clients_rate
ha.update_config(recv_rate=recv_rate, reconnect_rate=reconnect_rate)
ha.stop()
# Signal message sending
l.info("Sending signal to PUB to start sending all messages..")
self.ha_pub.start_test()
self.ha_pub.wait_for_testend()
self.fetch_app_stats(self.zstpub)
assert(len(self.apps[self.zstpub]['stats']) == 1)
pub_data = self.apps[self.zstpub]['stats'].values()[0]
l.info("Publisher send %d packets at the rate of %d pps" % (pub_data['msg_cnt'],
pub_data['rate']))
# Fetch all sub client data
self.fetch_app_stats(self.zstsub)
return self.result_parser()
def run_test(self, first_run=True):
self.start_init()
if hasattr(self, 'sub_app_ip_rep_port_map'):
# If Sub's have been launched Reset first
self.reset_all_app_stats(self.zstsub)
self.launch_zmq_pub()
# Launch zmq sub up to self.total_sub_apps
self.launch_zmq_sub()
res = self.rerun_test(self.options)
return res
def boundary_resultfn(self, options, res):
message_rate = options.msg_rate
l.info("Completed run with message rate = %d and client count=%d/%d " %
(message_rate, options.total_sub_apps * 10, res['valid_client_cnt']) +
"Reported Rate PUB:%.0f SUB:%.0f and Reported Drop Percentage : %.4f" %
(res['average_tx_rate'], res['average_rate'], res['average_packet_loss']))
l.info("\t\tCompleted-2: Pub-CPU:%3f%% PUB-TX:%.2fMbps PUB-RX:%.2fMbps " %
(res['pub_cpu'], res['pub_net_txrate'] / 1e6, res['pub_net_rxrate'] / 1e6))
run_pass = True
if (res['average_tx_rate'] < 0.7 * message_rate):
# if we are unable to get 70% of the tx rate
run_pass = False
return (run_pass, res['average_rate'], res['average_packet_loss'])
def stop_and_delete_all_apps(self):
self.delete_all_launched_apps()
def result_parser(self):
result = {
'client_count': 0,
'average_packets': 0,
'average_rate': 0,
'failing_clients': 0,
'average_packet_loss': 0
}
pub_data = self.apps[self.zstpub]['stats'].values()[0]
msg_cnt_pub_tx = pub_data['msg_cnt']
bad_clients = 0
client_rate = 0
bad_client_rate = 0
clients_packet_count = 0
slow_clients_rate = 0
slow_clients_packet_cnt = 0
slow_clients_cnt = 0
rec_clients_rate = 0
rec_clients_packet_cnt = 0
rec_clients_cnt = 0
rec_cnt = 0
stats = self.get_app_stats(self.zstsub)
num_subs = len(stats)
valid_client_cnt = 0
for client in stats.keys():
# l.info(" CLIENT = " + pformat(client) + " DATA = " + pformat(info))
info = stats[client]
task_id = info['task_id']
slow_clients = self.get_app_property(self.zstsub, 'slow_clients')
rec_clients = self.get_app_property(self.zstsub, 'reconnecting_clients')
# we are seeing some of the clients compleately fail to receive
# messages Need to create a seperate account for these clients.
# For now will hack it to be grouped with slow clients.
if (slow_clients and task_id in slow_clients) or info['msg_cnt'] == 0:
slow_clients_rate += info['rate']
slow_clients_packet_cnt += info['msg_cnt']
slow_clients_cnt += 1
elif rec_clients and task_id in rec_clients:
rec_clients_rate += info['rate']
rec_clients_packet_cnt += info['msg_cnt']
rec_clients_cnt += 1
rec_cnt += info['reconnect_cnt']
else:
client_rate += info['rate']
clients_packet_count += info['msg_cnt']
valid_client_cnt += 1
if info['msg_cnt'] != msg_cnt_pub_tx:
if (bad_clients < 4):
l.info("[%s] Count Mismatch Info: %s" % (client, pformat(info)))
# else:
# l.info("[%s] Count Mismatch Suppressing details (Use DCOS to get data)." % (client))
bad_clients += 1
bad_client_rate += info['rate']
if bad_clients > 0:
l.info("Total number of clients experiencing packet drop = %d out of %d clients" %
(bad_clients, num_subs))
l.info('Average rate seen at the failing clients %f' % (bad_client_rate / bad_clients))
else:
l.info("No client experienced packet drops out of %d clients" % num_subs)
l.info("Total packet's send by PUB:%d and average packets received by client:%d" %
(msg_cnt_pub_tx, clients_packet_count / valid_client_cnt))
l.info('Average rate seen at the pub %f and at clients %f' %
(pub_data['rate'], (client_rate / valid_client_cnt)))
if slow_clients_cnt:
plos = ((msg_cnt_pub_tx - (1.0 * slow_clients_packet_cnt / slow_clients_cnt)) * 100.0 / msg_cnt_pub_tx)
l.info("Slow Client[%d] :: Average Packets Received:%d and Average Rate: %f average packet loss %f" %
(slow_clients_cnt, slow_clients_packet_cnt / slow_clients_cnt,
slow_clients_rate / slow_clients_cnt, plos))
if rec_clients_cnt:
plos = ((msg_cnt_pub_tx - (1.0 * rec_clients_packet_cnt / rec_clients_cnt)) * 100.0 / msg_cnt_pub_tx)
l.info("RECONNECTING Client"
"[%d] :: Average Packets Received:%d and Average Rate: %f"
" average packet loss %f Total Reconnects %d" %
(rec_clients_cnt, rec_clients_packet_cnt / rec_clients_cnt,
rec_clients_rate / rec_clients_cnt, plos, rec_cnt))
result['client_count'] = num_subs
result['packet_tx'] = msg_cnt_pub_tx
result['average_packets'] = clients_packet_count / valid_client_cnt
result['average_rate'] = client_rate / valid_client_cnt
result['failing_clients'] = bad_clients
result['average_tx_rate'] = pub_data['rate']
result['valid_client_cnt'] = valid_client_cnt
if bad_clients:
result['failing_clients_rate'] = (bad_client_rate / bad_clients)
result['average_packet_loss'] = \
((msg_cnt_pub_tx - (1.0 * clients_packet_count / valid_client_cnt)) * 100.0 / msg_cnt_pub_tx)
if 'cpu:start' in pub_data:
pub_total_cpu = (pub_data['cpu:end'][0] + pub_data['cpu:end'][1] -
(pub_data['cpu:start'][0] + pub_data['cpu:start'][1]))
else:
pub_total_cpu = 0
pub_total_time = pub_data['time:end'] - pub_data['time:start']
if 'net:start' in pub_data:
pub_total_nw_txbytes = pub_data['net:end'][0] - pub_data['net:start'][0]
pub_total_nw_rxbytes = pub_data['net:end'][1] - pub_data['net:start'][1]
else:
pub_total_nw_rxbytes = pub_total_nw_txbytes = 0
result['pub_cpu'] = 100.0 * pub_total_cpu / pub_total_time
result['pub_net_txrate'] = pub_total_nw_txbytes / pub_total_time
result['pub_net_rxrate'] = pub_total_nw_rxbytes / pub_total_time
l.debug(" RESULTS on TEST = " + pformat(result))
return result
def launch_zmq_pub(self):
l.info("Launching the pub app")
constraints = [self.app_constraints(field='hostname', operator='UNIQUE')]
# Use cluster0 for launching the PUB
if 0 in self.mesos_cluster:
constraints.append(self.app_constraints(field=self.mesos_cluster[0]['cat'],
operator='CLUSTER', value=self.mesos_cluster[0]['match']))
if not self.options.c_pub:
self.create_hydra_app(name=self.zstpub, app_path='hydra.zmqtest.zmq_pub.run',
app_args='%s %s %s %s' % (self.options.test_duration,
self.options.msg_batch,
self.options.msg_rate, self.options.total_sub_apps),
cpus=0.01, mem=32,
ports=[0],
constraints=constraints)
else:
self.create_binary_app(name=self.zstpub,
app_script='./src/main/scripts/zmq_pub',
cpus=0.01, mem=32,
ports=[0],
constraints=constraints)
# Find launched pub server's ip, rep PORT
ipm = self.get_app_ipport_map(self.zstpub)
assert(len(ipm) == 1)
self.pub_ip = ipm.values()[0][1]
self.pub_rep_taskport = str(ipm.values()[0][0])
# self.pub_ip = self.find_ip_uniqueapp(self.zstpub)
# tasks = self.get_app_tasks(self.zstpub)
# assert(len(tasks) == 1)
# assert(len(tasks[0].ports) == 1)
# self.pub_rep_taskport = str(tasks[0].ports[0])
l.info("[zmq_pub] ZMQ pub server running at [%s]", self.pub_ip)
l.info("[zmq_pub] ZMQ REP server running at [%s:%s]", self.pub_ip, self.pub_rep_taskport)
# Init ZMQPubAnalyser
self.ha_pub = ZMQPubAnalyser(self.pub_ip, self.pub_rep_taskport, ipm.keys()[0])
def launch_zmq_sub(self):
l.info("Launching the sub app")
constraints = []
t_app_path = 'hydra.zmqtest.zmq_sub.run10'
if self.options.c_sub:
t_app_path = 'hydra.zmqtest.zmq_sub.run10cpp'
# Use cluster 1 for launching the SUB
if 1 in self.mesos_cluster:
constraints.append(self.app_constraints(field=self.mesos_cluster[1]['cat'],
operator='CLUSTER', value=self.mesos_cluster[1]['match']))
self.create_hydra_app(name=self.zstsub, app_path=t_app_path,
app_args='%s 15556' % (self.pub_ip), # pub_ip, pub_port
cpus=0.01, mem=32,
ports=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
constraints=constraints)
# scale
self.scale_sub_app()
def scale_sub_app(self):
self.scale_app(self.zstsub, self.options.total_sub_apps)
self.remove_unresponsive_tasks(self.zstsub)
def delete_all_launched_apps(self):
l.info("Deleting all launched apps")
l.info("Deleting PUB")
self.delete_app(self.zstpub)
l.info("Deleting SUBs")
self.delete_app(self.zstsub)
class RunNegativeTest(object):
def __init__(self, argv):
raise Exception("Negative Test for Exception.")
class RunPositiveTest(object):
def __init__(self, argv):
pass
class RunTest(object):
def __init__(self, argv):
usage = ('python %prog --test_duration=<time to run test> --msg_batch=<msg burst batch before sleep>'
'--msg_rate=<rate in packet per secs> --total_sub_apps=<Total sub apps to launch>'
'--config_file=<path_to_config_file> --keep_running')
parser = OptionParser(description='zmq scale test master',
version="0.1", usage=usage)
parser.add_option("--test_duration", dest='test_duration', type='int', default=10)
parser.add_option("--msg_batch", dest='msg_batch', type='int', default=100)
parser.add_option("--msg_rate", dest='msg_rate', type='int', default=10000)
parser.add_option("--total_sub_apps", dest='total_sub_apps', type='int', default=100)
parser.add_option("--config_file", dest='config_file', type='string', default='hydra.ini')
parser.add_option("--keep_running", dest='keep_running', action="store_true", default=False)
parser.add_option("--c_pub", dest='c_pub', action="store_true", default=False)
parser.add_option("--c_sub", dest='c_sub', action="store_true", default=False)
parser.add_option("--slow_clients_percent", dest='slow_clients_percent', type='float', default=0)
parser.add_option("--slow_clients_rate", dest='slow_clients_rate', type='int', default=1000)
parser.add_option("--reconnecting_clients_percent", dest='rec_clients_percent', type='float', default=0)
parser.add_option("--reconnecting_clients_rate", dest='rec_clients_rate', type='float', default=0.5)
(options, args) = parser.parse_args()
if ((len(args) != 0)):
parser.print_help()
sys.exit(1)
r = RunTestZMQ(options, False)
r.start_appserver()
res = r.run_test()
r.delete_all_launched_apps()
print("RES = " + pformat(res))
if not options.keep_running:
r.stop_appserver()
else:
print("Keep running is set: Leaving the app server running")
print(" you can use the marathon gui/cli to scale the app up.")
print(" after you are done press enter on this window")
input('>')
r.stop_appserver()
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ConfigParser import ConfigParser
import textwrap
import string
from swift.common.utils import config_true_value, SWIFT_CONF_FILE
from swift.common.ring import Ring
from swift.common.utils import quorum_size
from swift.common.exceptions import RingValidationError
from pyeclib.ec_iface import ECDriver, ECDriverError, VALID_EC_TYPES
LEGACY_POLICY_NAME = 'Policy-0'
VALID_CHARS = '-' + string.letters + string.digits
VALID_HYBRID_TYPES = ['ec_ec', 'ec_rep', 'rep_rep', 'rep_ec']
DEFAULT_POLICY_TYPE = REPL_POLICY = 'replication'
EC_POLICY = 'erasure_coding'
HYBRID_POLICY = 'hybrid'
DEFAULT_EC_OBJECT_SEGMENT_SIZE = 1048576
CLOUD_EC_OBJECT_SEGMENT_SIZE = 1048576
class PolicyError(ValueError):
def __init__(self, msg, index=None):
if index is not None:
msg += ', for index %r' % index
super(PolicyError, self).__init__(msg)
def _get_policy_string(base, policy_index):
if policy_index == 0 or policy_index is None:
return_string = base
else:
return_string = base + "-%d" % int(policy_index)
return return_string
def get_policy_string(base, policy_or_index):
"""
Helper function to construct a string from a base and the policy.
Used to encode the policy index into either a file name or a
directory name by various modules.
:param base: the base string
:param policy_or_index: StoragePolicy instance, or an index
(string or int), if None the legacy
storage Policy-0 is assumed.
:returns: base name with policy index added
:raises: PolicyError if no policy exists with the given policy_index
"""
if isinstance(policy_or_index, BaseStoragePolicy):
policy = policy_or_index
else:
policy = POLICIES.get_by_index(policy_or_index)
if policy is None:
raise PolicyError("Unknown policy", index=policy_or_index)
return _get_policy_string(base, int(policy))
def split_policy_string(policy_string):
"""
Helper function to convert a string representing a base and a
policy. Used to decode the policy from either a file name or
a directory name by various modules.
:param policy_string: base name with policy index added
:raises: PolicyError if given index does not map to a valid policy
:returns: a tuple, in the form (base, policy) where base is the base
string and policy is the StoragePolicy instance for the
index encoded in the policy_string.
"""
if '-' in policy_string:
base, policy_index = policy_string.rsplit('-', 1)
else:
base, policy_index = policy_string, None
policy = POLICIES.get_by_index(policy_index)
if get_policy_string(base, policy) != policy_string:
raise PolicyError("Unknown policy", index=policy_index)
return base, policy
class BaseStoragePolicy(object):
"""
Represents a storage policy. Not meant to be instantiated directly;
implement a derived subclasses (e.g. StoragePolicy, ECStoragePolicy, etc)
or use :func:`~swift.common.storage_policy.reload_storage_policies` to
load POLICIES from ``swift.conf``.
The object_ring property is lazy loaded once the service's ``swift_dir``
is known via :meth:`~StoragePolicyCollection.get_object_ring`, but it may
be over-ridden via object_ring kwarg at create time for testing or
actively loaded with :meth:`~StoragePolicy.load_ring`.
"""
policy_type_to_policy_cls = {}
def __init__(self, idx, name='', is_default=False, is_deprecated=False,
object_ring=None):
# do not allow BaseStoragePolicy class to be instantiated directly
if type(self) == BaseStoragePolicy:
raise TypeError("Can't instantiate BaseStoragePolicy directly")
# policy parameter validation
try:
self.idx = int(idx)
except ValueError:
raise PolicyError('Invalid index', idx)
if self.idx < 0:
raise PolicyError('Invalid index', idx)
if not name:
raise PolicyError('Invalid name %r' % name, idx)
# this is defensively restrictive, but could be expanded in the future
if not all(c in VALID_CHARS for c in name):
raise PolicyError('Names are used as HTTP headers, and can not '
'reliably contain any characters not in %r. '
'Invalid name %r' % (VALID_CHARS, name))
if name.upper() == LEGACY_POLICY_NAME.upper() and self.idx != 0:
msg = 'The name %s is reserved for policy index 0. ' \
'Invalid name %r' % (LEGACY_POLICY_NAME, name)
raise PolicyError(msg, idx)
self.name = name
self.is_deprecated = config_true_value(is_deprecated)
self.is_default = config_true_value(is_default)
if self.policy_type not in BaseStoragePolicy.policy_type_to_policy_cls:
raise PolicyError('Invalid type', self.policy_type)
if self.is_deprecated and self.is_default:
raise PolicyError('Deprecated policy can not be default. '
'Invalid config', self.idx)
self.ring_name = _get_policy_string('object', self.idx)
self.object_ring = object_ring
def __int__(self):
return self.idx
def __cmp__(self, other):
return cmp(self.idx, int(other))
def __repr__(self):
return ("%s(%d, %r, is_default=%s, "
"is_deprecated=%s, policy_type=%r)") % \
(self.__class__.__name__, self.idx, self.name,
self.is_default, self.is_deprecated, self.policy_type)
@classmethod
def register(cls, policy_type):
"""
Decorator for Storage Policy implementations to register
their StoragePolicy class. This will also set the policy_type
attribute on the registered implementation.
"""
def register_wrapper(policy_cls):
if policy_type in cls.policy_type_to_policy_cls:
raise PolicyError(
'%r is already registered for the policy_type %r' % (
cls.policy_type_to_policy_cls[policy_type],
policy_type))
cls.policy_type_to_policy_cls[policy_type] = policy_cls
policy_cls.policy_type = policy_type
return policy_cls
return register_wrapper
@classmethod
def _config_options_map(cls):
"""
Map config option name to StoragePolicy parameter name.
"""
return {
'name': 'name',
'policy_type': 'policy_type',
'default': 'is_default',
'deprecated': 'is_deprecated',
}
@classmethod
def from_config(cls, policy_index, options):
config_to_policy_option_map = cls._config_options_map()
policy_options = {}
for config_option, value in options.items():
try:
policy_option = config_to_policy_option_map[config_option]
except KeyError:
raise PolicyError('Invalid option %r in '
'storage-policy section' % config_option,
index=policy_index)
policy_options[policy_option] = value
return cls(policy_index, **policy_options)
def get_info(self, config=False):
"""
Return the info dict and conf file options for this policy.
:param config: boolean, if True all config options are returned
"""
info = {}
for config_option, policy_attribute in \
self._config_options_map().items():
info[config_option] = getattr(self, policy_attribute)
if not config:
# remove some options for public consumption
if not self.is_default:
info.pop('default')
if not self.is_deprecated:
info.pop('deprecated')
info.pop('policy_type')
return info
def _validate_ring(self):
"""
Hook, called when the ring is loaded. Can be used to
validate the ring against the StoragePolicy configuration.
"""
pass
def load_ring(self, swift_dir):
"""
Load the ring for this policy immediately.
:param swift_dir: path to rings
"""
if self.object_ring:
return
self.object_ring = Ring(swift_dir, ring_name=self.ring_name)
# Validate ring to make sure it conforms to policy requirements
self._validate_ring()
@property
def quorum(self):
"""
Number of successful backend requests needed for the proxy to
consider the client request successful.
"""
raise NotImplementedError()
@BaseStoragePolicy.register(REPL_POLICY)
class StoragePolicy(BaseStoragePolicy):
"""
Represents a storage policy of type 'replication'. Default storage policy
class unless otherwise overridden from swift.conf.
Not meant to be instantiated directly; use
:func:`~swift.common.storage_policy.reload_storage_policies` to load
POLICIES from ``swift.conf``.
"""
@property
def quorum(self):
"""
Quorum concept in the replication case:
floor(number of replica / 2) + 1
"""
if not self.object_ring:
self.load_ring('/etc/swift')
return quorum_size(self.object_ring.replica_count)
@BaseStoragePolicy.register(HYBRID_POLICY)
class HybridStoragePolicy(BaseStoragePolicy):
"""
Represents a storage policy of type 'hybrid'.
Not meant to be instantiated directly; use
:func:`~swift.common.storage_policy.reload_storage_policies` to load
POLICIES from ``swift.conf``.
"""
def __init__(self, idx, name='', is_default=False,is_deprecated=False,
object_ring=None, cloud_object_ring=None,
local_ec_segment_size=DEFAULT_EC_OBJECT_SEGMENT_SIZE,
cloud_ec_segment_size=CLOUD_EC_OBJECT_SEGMENT_SIZE,
hybrid_type=None, local_ec_type=None, cloud_ec_type=None,
local_repl_num=None, cloud_repl_num=None,
local_ec_ndata=None, local_ec_nparity=None,
cloud_ec_ndata=None, cloud_ec_nparity=None):
super(HybridStoragePolicy, self).__init__(
idx, name, is_default, is_deprecated, object_ring)
# Validate hybrid policy specific members
# hybrid_type must in rep_ec, ec_rep, rep_rep, ec_ec
if hybrid_type is None:
raise PolicyError('Missing hybrid_type')
if hybrid_type not in VALID_HYBRID_TYPES:
raise PolicyError('Wrong hybrid_type %s for policy %s, should be one'
'of "%s"' % (hybrid_type, self.name, ','.join(VALID_HYBRID_TYPES)))
self._hybrid_type = hybrid_type
#Validate erasure_coding policy specific members
#local_ec_type is one of the EC implementations supported by PyEClib
def valid_local_ec_type(local_ec_type):
if local_ec_type is None:
raise PolicyError('Missing local_ec_type')
if local_ec_type not in VALID_EC_TYPES:
raise PolicyError('Wrong local_ec_type %s for policy %s, should be one'
' of "%s"' % (local_ec_type, self.name,
', '.join(VALID_EC_TYPES)))
self._local_ec_type = local_ec_type
#Validate erasure_coding policy specific members
#cloud_ec_type is one of the EC implementations supported by PyEClib
def valid_cloud_ec_type(cloud_ec_type):
if cloud_ec_type is None:
raise PolicyError('Missing cloud_ec_type')
if cloud_ec_type not in VALID_EC_TYPES:
raise PolicyError('Wrong cloud_ec_type %s for policy %s, should be one'
' of "%s"' % (cloud_ec_type, self.name,
', '.join(VALID_EC_TYPES)))
self._cloud_ec_type = cloud_ec_type
#Define _local_repl_num as the number of replicate in local
#Accessible as the property "local_repl_num"
def valid_local_repl_num(local_repl_num):
try:
value = int(local_repl_num)
if value <= 0:
raise PolicyError('Invalid local_repl_num')
self._local_repl_num = value
except (TypeError, ValueError):
raise PolicyError('Invalid local_repl_num %r' %
local_repl_num, index=self.idx )
#Define _cloud_repl_num as the number of replicate in clouds
#Accessible as the property "cloud_repl_num"
def valid_cloud_repl_num(cloud_repl_num):
try:
value = int(cloud_repl_num)
if value <= 0:
raise PolicyError('Invalid cloud_repl_num')
self._cloud_repl_num = value
except (TypeError, ValueError):
raise PolicyError('Invalid cloud_repl_num %r' %
cloud_repl_num, index=self.idx)
#Define _local_ec_ndata as the number of EC data fragments in local
#Accessible as the property "local_ec_ndata"
def valid_local_ec_ndata(local_ec_ndata):
try:
value = int(local_ec_ndata)
if value <= 0:
raise ValueError
self._local_ec_ndata = value
except (TypeError, ValueError):
raise PolicyError('Invalid local_ec_num_data_fragments %r' %
local_ec_ndata, index=self.idx)
#Define _local_ec_nparity as the number of EC parity fragments in local
#Accessible as the property "local_ec_nparity"
def valid_local_ec_nparity(local_ec_nparity):
try:
value = int(local_ec_nparity)
if value <= 0:
raise ValueError
self._local_ec_nparity = value
except (TypeError, ValueError):
raise PolicyError('Invalid local_ec_num_parity_fragments %r' %
local_ec_nparity, index=self.idx)
# Define _cloud_ec_ndata as the number of EC data fragments in clouds
# Accessible as the property "cloud_ec_ndata"
def valid_cloud_ec_ndata(cloud_ec_ndata):
try:
value = int(cloud_ec_ndata)
if value <= 0:
raise ValueError
self._cloud_ec_ndata = value
except (TypeError, ValueError):
raise PolicyError('Invalid cloud_ec_num_data_fragments %r' %
cloud_ec_ndata, index=self.idx)
# Define _cloud_ec_nparity as the number of EC parity fragments in clouds
# Accessible as the property "cloud_ec_nparity"
def valid_cloud_ec_nparity(cloud_ec_nparity):
try:
value = int(cloud_ec_nparity)
if value <= 0:
raise ValueError
self._cloud_ec_nparity = value
except (TypeError, ValueError):
raise PolicyError('Invalid cloud_ec_num_parity_fragments %r' %
cloud_ec_nparity, index=self.idx)
# Define _local_ec_segment_size as the encode segment unit size
# Accessible as the property "local_ec_segment_size"
def valid_local_ec_segment_size(local_ec_segment_size):
try:
value = int(local_ec_segment_size)
if value <= 0:
raise ValueError
self._local_ec_segment_size = value
except (TypeError, ValueError):
raise PolicyError('Invalid local_ec_object_segment_size %r' %
local_ec_segment_size, index=self.idx)
# Define _cloud_ec_segment_size as the encode segment unit size
# Accessible as the property "cloud_ec_segment_size"
def valid_cloud_ec_segment_size(cloud_ec_segment_size):
try:
value = int(cloud_ec_segment_size)
if value <= 0:
raise ValueError
self._cloud_ec_segment_size = value
except (TypeError, ValueError):
raise PolicyError('Invalid cloud_ec_objec_segment_size %d' %
cloud_ec_segment_size, index=self.idx)
#cloud_ring_name, cloud_object_ring is specific for HybridStoragePolicy class
self.cloud_ring_name = self.ring_name + 'c'
self.cloud_object_ring = cloud_object_ring
def valid_and_return_policy():
if hybrid_type == 'rep_ec':
valid_cloud_ec_type(cloud_ec_type)
valid_local_repl_num(local_repl_num)
valid_cloud_ec_ndata(cloud_ec_ndata)
valid_cloud_ec_nparity(cloud_ec_nparity)
valid_cloud_ec_segment_size(cloud_ec_segment_size)
local = StoragePolicy(idx, name, is_default, is_deprecated, object_ring)
cloud = ECStoragePolicy(idx, name, is_default, is_deprecated, cloud_object_ring,
cloud_ec_segment_size, cloud_ec_type, cloud_ec_ndata, cloud_ec_nparity)
return local, cloud
if hybrid_type == 'ec_rep':
valid_local_ec_type(local_ec_type)
valid_local_ec_ndata(local_ec_ndata)
valid_local_ec_nparity(local_ec_nparity)
valid_cloud_repl_num(cloud_repl_num)
valid_local_ec_segment_size(local_ec_segment_size)
local = ECStoragePolicy(idx, name, is_default, is_deprecated, object_ring,
local_ec_segment_size, local_ec_type, local_ec_ndata, local_ec_nparity)
cloud = StoragePolicy(idx, name, is_default, is_deprecated, cloud_object_ring)
return local, cloud
if hybrid_type =='rep_rep':
valid_local_repl_num(local_repl_num)
valid_cloud_repl_num(cloud_repl_num)
local = StoragePolicy(idx, name, is_default, is_deprecated, object_ring)
cloud = StoragePolicy(idx, name, is_default, is_deprecated, cloud_object_ring)
return local, cloud
if hybrid_type == 'ec_ec':
valid_local_ec_type(local_ec_type)
valid_cloud_ec_type(cloud_ec_type)
valid_local_ec_ndata(local_ec_ndata)
valid_local_ec_nparity(local_ec_nparity)
valid_cloud_ec_ndata(cloud_ec_ndata)
valid_cloud_ec_nparity(cloud_ec_nparity)
valid_local_ec_segment_size(local_ec_segment_size)
valid_cloud_ec_segment_size(cloud_ec_segment_size)
local = ECStoragePolicy(idx, name, is_default, is_deprecated, object_ring,
local_ec_segment_size, local_ec_type, local_ec_ndata, local_ec_nparity)
cloud = ECStoragePolicy(idx, name, is_default, is_deprecated, cloud_object_ring,
cloud_ec_segment_size, cloud_ec_type, cloud_ec_ndata, cloud_ec_nparity)
return local, cloud
self._local_policy, self._cloud_policy = valid_and_return_policy()
#ring_name, cloud_ring_name is called ring_name in StoragePolicy and ECStoragePolicy
self._cloud_policy.ring_name = self.ring_name + 'c'
@property
def hybrid_type(self):
return self._hybrid_type
@property
def get_local_policy(self):
return self._local_policy
@property
def get_cloud_policy(self):
return self._cloud_policy
@property
def get_local_policy_type(self):
if self.hybrid_type.startswith('rep'):
return REPL_POLICY
elif self.hybrid_type.startswith('ec'):
return EC_POLICY
else:
raise PolicyError('Invalid local type of hybrid')
@property
def get_cloud_policy_type(self):
if self.hybrid_type.endswith('rep'):
return REPL_POLICY
elif self.hybrid_type.endswith('ec'):
return EC_POLICY
else:
raise PolicyError('Invalid cloud type of hybrid')
@property
def local_ec_type(self):
return self._local_ec_type
@property
def cloud_ec_type(self):
return self._cloud_ec_type
@property
def local_repl_num(self):
return self._local_repl_num
@property
def cloud_repl_num(self):
return self._cloud_repl_num
@property
def local_ec_ndata(self):
return self._local_ec_ndata
@property
def cloud_ec_ndata(self):
return self._cloud_ec_ndata
@property
def local_ec_nparity(self):
return self._local_ec_nparity
@property
def cloud_ec_nparity(self):
return self._cloud_ec_nparity
@property
def local_ec_segment_size(self):
return self._local_ec_segment_size
@property
def cloud_ec_segment_size(self):
return self._cloud_ec_segment_size
@property
def local_fragment_size(self):
if self._hybrid_type == 'ec_ec' or self._hybrid_type == 'ec_rep':
return self._local_policy.fragment_size
@property
def cloud_fragment_size(self):
if self._hybrid_type == 'ec_ec' or self._hybrid_type == 'rep_ec':
return self._cloud_policy.fragment_size
@property
def hybrid_scheme_description(self):
if self._hybrid_type == 'rep_ec':
return "%s %d+%d+%d" % (self._hybrid_type, self._local_repl_num,
self._cloud_ec_ndata, self._cloud_ec_nparity)
if self._hybrid_type == 'ec_rep':
return "%s %d+%d+%d" % (self._hybrid_type, self._local_ec_ndata,
self._local_ec_nparity, self._cloud_repl_num)
if self._hybrid_type == 'rep_rep':
return "%s %d+%d" % (self._hybrid_type, self._local_repl_num,
self._cloud_repl_num)
if self._hybrid_type == 'ec_ec':
return "%s %d+%d+%d+%d" % (self._hybrid_type,
self._local_ec_ndata, self._local_ec_nparity,
self._cloud_ec_ndata, self._cloud_ec_nparity)
def __repr__(self):
if self._hybrid_type == 'rep_ec':
return ("%s, HYBRID config(hybrid_type=%s, local_repl_num=%d, cloud_ec_type=%s, "
"cloud_ec_segment_size=%d, cloud_ec_ndata=%d, cloud_ec_nparity=%d)") % (
super(HybridStoragePolicy, self).__repr__(), self.hybrid_type,
self.local_repl_num, self.cloud_ec_type, self.cloud_ec_segment_size,
self.cloud_ec_ndata, self.cloud_ec_nparity)
if self._hybrid_type == 'ec_rep':
return ("%s, HYBRID config(hybrid_type=%s, local_ec_type=%s,local_ec_segment_size=%d, "
"local_ec_ndata=%d, local_ec_nparity=%d, cloud_repl_num=%d)") % (
super(HybridStoragePolicy, self).__repr__(), self.hybrid_type,
self.local_ec_type, self.local_ec_segment_size, self.local_ec_ndata,
self.local_ec_nparity, self.cloud_repl_num)
if self._hybrid_type == 'rep_rep':
return ("%s, HYBRID config(hybrid_type=%s, local_repl_num=%d, cloud_repl_num=%d)") % (
super(HybridStoragePolicy, self).__repr__(), self.hybrid_type,
self.local_repl_num, self.cloud_repl_num)
if self._hybrid_type == 'ec_ec':
return ("%s, HYBRID config(hybrid_type=%s, local_ec_type=%s, local_ec_segment_size=%d, "
"local_ec_ndata=%d, local_ec_nparity=%d, cloud_ec_type=%s, "
"cloud_ec_segment_size=%d, cloud_ec_ndata=%d, cloud_ec_nparity=%d)") % (
super(HybridStoragePolicy, self).__repr__(), self.hybrid_type,
self.local_ec_type, self.local_ec_segment_size, self.local_ec_ndata, self.local_ec_nparity,
self.cloud_ec_type, self.cloud_ec_segment_size, self.cloud_ec_ndata, self.cloud_ec_nparity)
@classmethod
def _config_options_map(cls, hy_type):
options = super(HybridStoragePolicy, cls)._config_options_map()
if hy_type == 'rep_ec':
options.update({
'hybrid_type': 'hybrid_type',
'cloud_ec_object_segment_size': 'cloud_ec_segment_size',
'cloud_ec_type': 'cloud_ec_type',
'local_replica_num': 'local_repl_num',
'cloud_ec_num_data_fragments': 'cloud_ec_ndata',
'cloud_ec_num_parity_fragments': 'cloud_ec_nparity',
})
if hy_type == 'ec_rep':
options.update({
'hybrid_type': 'hybrid_type',
'local_ec_object_segment_size': 'local_ec_segment_size',
'local_ec_type': 'local_ec_type',
'cloud_replica_num': 'cloud_repl_num',
'local_ec_num_data_fragments': 'local_ec_ndata',
'local_ec_num_parity_fragments': 'local_ec_nparity',
})
if hy_type == 'rep_rep':
options.update({
'hybrid_type': 'hybrid_type',
'local_replica_num': 'local_repl_num',
'cloud_replica_num': 'cloud_repl_num',
})
if hy_type == 'ec_ec':
options.update({
'hybrid_type': 'hybrid_type',
'local_ec_object_segment_size': 'local_ec_segment_size',
'cloud_ec_object_segment_size': 'cloud_ec_segment_size',
'local_ec_type': 'local_ec_type',
'cloud_ec_type': 'cloud_ec_type',
'local_ec_num_data_fragments': 'local_ec_ndata',
'local_ec_num_parity_fragments': 'local_ec_nparity',
'cloud_ec_num_data_fragments': 'cloud_ec_ndata',
'cloud_ec_num_parity_fragments': 'cloud_ec_nparity',
})
return options
@classmethod
def from_config(cls, policy_index, options):
hy_type = options['hybrid_type']
config_to_policy_option_map = cls._config_options_map(hy_type)
policy_options = {}
for config_option, value in options.items():
try:
policy_option = config_to_policy_option_map[config_option]
except KeyError:
raise PolicyError('Invalid option %r in '
'storage-policy section' % config_option, index=policy_index)
policy_options[policy_option] = value
return cls(policy_index, **policy_options)
def get_info(self, config=False):
#info = super(HybridStoragePolicy, self).get_info(config=config)
info = {}
for config_option, policy_attribute in self._config_options_map(self._hybrid_type).items():
info[config_option] = getattr(self, policy_attribute)
if not config:
if not self.is_default:
info.pop('default')
if not self.is_deprecated:
info.pop('deprecated')
info.pop('policy_type')
if self._hybrid_type == 'rep_ec':
info.pop('local_replica_num')
info.pop('cloud_ec_object_segment_size')
info.pop('cloud_ec_num_data_fragments')
info.pop('cloud_ec_num_parity_fragments')
info.pop('cloud_ec_type')
info.pop('hybrid_type')
if self._hybrid_type == 'ec_rep':
info.pop('local_ec_object_segment_size')
info.pop('local_ec_num_data_fragments')
info.pop('local_ec_num_parity_fragments')
info.pop('cloud_replica_num')
info.pop('local_ec_type')
info.pop('hybrid_type')
if self._hybrid_type == 'rep_rep':
info.pop('local_replica_num')
info.pop('cloud_replica_num')
info.pop('hybrid_type')
if self._hybrid_type == 'ec_ec':
info.pop('local_ec_object_segment_size')
info.pop('local_ec_num_data_fragments')
info.pop('local_ec_num_parity_fragments')
info.pop('cloud_ec_object_segment_size')
info.pop('cloud_ec_num_data_fragments')
info.pop('cloud_ec_num_parity_fragments')
info.pop('local_ec_type')
info.pop('cloud_ec_type')
info.pop('hybrid_type')
return info
def load_ring(self, swift_dir):
if self.object_ring and self.cloud_object_ring:
return
self.object_ring = Ring(swift_dir, ring_name=self.ring_name)
self.cloud_object_ring = Ring(swift_dir, ring_name=self.cloud_ring_name)
self._local_policy.object_ring = self.object_ring
self._cloud_policy.object_ring = self.cloud_object_ring
# Validate ring to make sure it conforms to policy requirements
self._validate_ring()
def _validate_ring(self):
"""
EC specific Validation
Replica count check - we need _at_least_ (#data + #parity) replicas
configured. Also if the replica count is larger than exactly that
number there's a non-zero risk of error for code that is considering
the num of nodes in the primary list from the ring.
"""
if not self.object_ring or not self.cloud_object_ring:
raise PolicyError('a Ring is not loaded')
local_nodes_configured = self.object_ring.replica_count
cloud_nodes_configured = self.cloud_object_ring.replica_count
if self._hybrid_type == 'rep_ec' and (local_nodes_configured != self.local_repl_num or
cloud_nodes_configured != self.cloud_ec_ndata + self.cloud_ec_nparity):
raise RingValidationError(
'EC ring for policy %s needs to be configured with '
'exactly (%d %d)nodes. Got (%d %d).' % (self.name, self.local_repl_num,
self.cloud_ec_ndata + self.cloud_ec_nparity, local_nodes_configured, cloud_nodes_configured))
if self._hybrid_type == 'ec_rep' and (local_nodes_configured != self.local_ec_ndata + self.local_ec_nparity or
cloud_nodes_configured != self.cloud_repl_num):
raise RingValidationError(
'EC ring for policy %s needs to be configured with '
'exactly (%d %d) nodes. Got (%d %d).' % (self.name,
self.local_ec_ndata + self.local_ec_nparity, self.cloud_repl_num,
local_nodes_configured, cloud_nodes_configured))
if self._hybrid_type == 'rep_rep' and (local_nodes_configured != self.local_repl_num or
cloud_nodes_configured != self.cloud_repl_num):
raise RingValidationError(
'EC ring for policy %s need to be configured with exactly (%d %d) nodes. Got (%d %d).' % (
self.name, self.local_repl_num, self.cloud_repl_num, local_nodes_configured, cloud_nodes_configured))
if self._hybrid_type == 'ec_ec' and (local_nodes_configured != self.local_ec_ndata + self.local_ec_nparity or
cloud_nodes_configured != self.cloud_ec_ndata + self.cloud_ec_nparity):
raise RingValidationError(
'EC ring for policy %s needs to be configured with exactly(%d %d) nodes. Got (%d %d).' % (
self.name, self.local_ec_ndata + self.local_ec_nparity, self.cloud_ec_ndata + self.cloud_ec_nparity,
local_nodes_configured, cloud_nodes_configured))
@property
def quorum(self):
return self._local_policy.quorum, self._cloud_policy.quorum
@BaseStoragePolicy.register(EC_POLICY)
class ECStoragePolicy(BaseStoragePolicy):
"""
Represents a storage policy of type 'erasure_coding'.
Not meant to be instantiated directly; use
:func:`~swift.common.storage_policy.reload_storage_policies` to load
POLICIES from ``swift.conf``.
"""
def __init__(self, idx, name='', is_default=False,
is_deprecated=False, object_ring=None,
ec_segment_size=DEFAULT_EC_OBJECT_SEGMENT_SIZE,
ec_type=None, ec_ndata=None, ec_nparity=None):
super(ECStoragePolicy, self).__init__(
idx, name, is_default, is_deprecated, object_ring)
# Validate erasure_coding policy specific members
# ec_type is one of the EC implementations supported by PyEClib
if ec_type is None:
raise PolicyError('Missing ec_type')
if ec_type not in VALID_EC_TYPES:
raise PolicyError('Wrong ec_type %s for policy %s, should be one'
' of "%s"' % (ec_type, self.name,
', '.join(VALID_EC_TYPES)))
self._ec_type = ec_type
# Define _ec_ndata as the number of EC data fragments
# Accessible as the property "ec_ndata"
try:
value = int(ec_ndata)
if value <= 0:
raise ValueError
self._ec_ndata = value
except (TypeError, ValueError):
raise PolicyError('Invalid ec_num_data_fragments %r' %
ec_ndata, index=self.idx)
# Define _ec_nparity as the number of EC parity fragments
# Accessible as the property "ec_nparity"
try:
value = int(ec_nparity)
if value <= 0:
raise ValueError
self._ec_nparity = value
except (TypeError, ValueError):
raise PolicyError('Invalid ec_num_parity_fragments %r'
% ec_nparity, index=self.idx)
# Define _ec_segment_size as the encode segment unit size
# Accessible as the property "ec_segment_size"
try:
value = int(ec_segment_size)
if value <= 0:
raise ValueError
self._ec_segment_size = value
except (TypeError, ValueError):
raise PolicyError('Invalid ec_object_segment_size %r' %
ec_segment_size, index=self.idx)
# Initialize PyECLib EC backend
try:
self.pyeclib_driver = \
ECDriver(k=self._ec_ndata, m=self._ec_nparity,
ec_type=self._ec_type)
except ECDriverError as e:
raise PolicyError("Error creating EC policy (%s)" % e,
index=self.idx)
# quorum size in the EC case depends on the choice of EC scheme.
self._ec_quorum_size = \
self._ec_ndata + self.pyeclib_driver.min_parity_fragments_needed()
@property
def ec_type(self):
return self._ec_type
@property
def ec_ndata(self):
return self._ec_ndata
@property
def ec_nparity(self):
return self._ec_nparity
@property
def ec_segment_size(self):
return self._ec_segment_size
@property
def fragment_size(self):
"""
Maximum length of a fragment, including header.
NB: a fragment archive is a sequence of 0 or more max-length
fragments followed by one possibly-shorter fragment.
"""
# Technically pyeclib's get_segment_info signature calls for
# (data_len, segment_size) but on a ranged GET we don't know the
# ec-content-length header before we need to compute where in the
# object we should request to align with the fragment size. So we
# tell pyeclib a lie - from it's perspective, as long as data_len >=
# segment_size it'll give us the answer we want. From our
# perspective, because we only use this answer to calculate the
# *minimum* size we should read from an object body even if data_len <
# segment_size we'll still only read *the whole one and only last
# fragment* and pass than into pyeclib who will know what to do with
# it just as it always does when the last fragment is < fragment_size.
return self.pyeclib_driver.get_segment_info(
self.ec_segment_size, self.ec_segment_size)['fragment_size']
@property
def ec_scheme_description(self):
"""
This short hand form of the important parts of the ec schema is stored
in Object System Metadata on the EC Fragment Archives for debugging.
"""
return "%s %d+%d" % (self._ec_type, self._ec_ndata, self._ec_nparity)
def __repr__(self):
return ("%s, EC config(ec_type=%s, ec_segment_size=%d, "
"ec_ndata=%d, ec_nparity=%d)") % (
super(ECStoragePolicy, self).__repr__(), self.ec_type,
self.ec_segment_size, self.ec_ndata, self.ec_nparity)
@classmethod
def _config_options_map(cls):
options = super(ECStoragePolicy, cls)._config_options_map()
options.update({
'ec_type': 'ec_type',
'ec_object_segment_size': 'ec_segment_size',
'ec_num_data_fragments': 'ec_ndata',
'ec_num_parity_fragments': 'ec_nparity',
})
return options
def get_info(self, config=False):
info = super(ECStoragePolicy, self).get_info(config=config)
if not config:
info.pop('ec_object_segment_size')
info.pop('ec_num_data_fragments')
info.pop('ec_num_parity_fragments')
info.pop('ec_type')
return info
def _validate_ring(self):
"""
EC specific validation
Replica count check - we need _at_least_ (#data + #parity) replicas
configured. Also if the replica count is larger than exactly that
number there's a non-zero risk of error for code that is considering
the number of nodes in the primary list from the ring.
"""
if not self.object_ring:
raise PolicyError('Ring is not loaded')
nodes_configured = self.object_ring.replica_count
if nodes_configured != (self.ec_ndata + self.ec_nparity):
raise RingValidationError(
'EC ring for policy %s needs to be configured with '
'exactly %d nodes. Got %d.' % (self.name,
self.ec_ndata + self.ec_nparity, nodes_configured))
@property
def quorum(self):
"""
Number of successful backend requests needed for the proxy to consider
the client request successful.
The quorum size for EC policies defines the minimum number
of data + parity elements required to be able to guarantee
the desired fault tolerance, which is the number of data
elements supplemented by the minimum number of parity
elements required by the chosen erasure coding scheme.
For example, for Reed-Solomon, the minimum number parity
elements required is 1, and thus the quorum_size requirement
is ec_ndata + 1.
Given the number of parity elements required is not the same
for every erasure coding scheme, consult PyECLib for
min_parity_fragments_needed()
"""
return self._ec_quorum_size
class StoragePolicyCollection(object):
"""
This class represents the collection of valid storage policies for the
cluster and is instantiated as :class:`StoragePolicy` objects are added to
the collection when ``swift.conf`` is parsed by
:func:`parse_storage_policies`.
When a StoragePolicyCollection is created, the following validation
is enforced:
* If a policy with index 0 is not declared and no other policies defined,
Swift will create one
* The policy index must be a non-negative integer
* If no policy is declared as the default and no other policies are
defined, the policy with index 0 is set as the default
* Policy indexes must be unique
* Policy names are required
* Policy names are case insensitive
* Policy names must contain only letters, digits or a dash
* Policy names must be unique
* The policy name 'Policy-0' can only be used for the policy with index 0
* If any policies are defined, exactly one policy must be declared default
* Deprecated policies can not be declared the default
"""
def __init__(self, pols):
self.default = []
self.by_name = {}
self.by_index = {}
self._validate_policies(pols)
def _add_policy(self, policy):
"""
Add pre-validated policies to internal indexes.
"""
self.by_name[policy.name.upper()] = policy
self.by_index[int(policy)] = policy
def __repr__(self):
return (textwrap.dedent("""
StoragePolicyCollection([
%s
])
""") % ',\n '.join(repr(p) for p in self)).strip()
def __len__(self):
return len(self.by_index)
def __getitem__(self, key):
return self.by_index[key]
def __iter__(self):
return iter(self.by_index.values())
def _validate_policies(self, policies):
"""
:param policies: list of policies
"""
for policy in policies:
if int(policy) in self.by_index:
raise PolicyError('Duplicate index %s conflicts with %s' % (
policy, self.get_by_index(int(policy))))
if policy.name.upper() in self.by_name:
raise PolicyError('Duplicate name %s conflicts with %s' % (
policy, self.get_by_name(policy.name)))
if policy.is_default:
if not self.default:
self.default = policy
else:
raise PolicyError(
'Duplicate default %s conflicts with %s' % (
policy, self.default))
self._add_policy(policy)
# If a 0 policy wasn't explicitly given, or nothing was
# provided, create the 0 policy now
if 0 not in self.by_index:
if len(self) != 0:
raise PolicyError('You must specify a storage policy '
'section for policy index 0 in order '
'to define multiple policies')
self._add_policy(StoragePolicy(0, name=LEGACY_POLICY_NAME))
# at least one policy must be enabled
enabled_policies = [p for p in self if not p.is_deprecated]
if not enabled_policies:
raise PolicyError("Unable to find policy that's not deprecated!")
# if needed, specify default
if not self.default:
if len(self) > 1:
raise PolicyError("Unable to find default policy")
self.default = self[0]
self.default.is_default = True
def get_by_name(self, name):
"""
Find a storage policy by its name.
:param name: name of the policy
:returns: storage policy, or None
"""
return self.by_name.get(name.upper())
def get_by_index(self, index):
"""
Find a storage policy by its index.
An index of None will be treated as 0.
:param index: numeric index of the storage policy
:returns: storage policy, or None if no such policy
"""
# makes it easier for callers to just pass in a header value
if index in ('', None):
index = 0
else:
try:
index = int(index)
except ValueError:
return None
return self.by_index.get(index)
@property
def legacy(self):
return self.get_by_index(None)
def get_object_ring(self, policy_idx, swift_dir):
"""
Get the ring object to use to handle a request based on its policy.
An index of None will be treated as 0.
:param policy_idx: policy index as defined in swift.conf
:param swift_dir: swift_dir used by the caller
:returns: appropriate ring object
"""
policy = self.get_by_index(policy_idx)
if not policy:
raise PolicyError("No policy with index %s" % policy_idx)
if not policy.object_ring:
policy.load_ring(swift_dir)
if policy.policy_type != HYBRID_POLICY:
return policy.object_ring
else:
return (policy.object_ring, policy.cloud_object_ring)
def get_policy_info(self):
"""
Build info about policies for the /info endpoint
:returns: list of dicts containing relevant policy information
"""
policy_info = []
for pol in self:
# delete from /info if deprecated
if pol.is_deprecated:
continue
policy_entry = pol.get_info()
policy_info.append(policy_entry)
return policy_info
def parse_storage_policies(conf):
"""
Parse storage policies in ``swift.conf`` - note that validation
is done when the :class:`StoragePolicyCollection` is instantiated.
:param conf: ConfigParser parser object for swift.conf
"""
policies = []
for section in conf.sections():
if not section.startswith('storage-policy:'):
continue
policy_index = section.split(':', 1)[1]
config_options = dict(conf.items(section))
policy_type = config_options.pop('policy_type', DEFAULT_POLICY_TYPE)
policy_cls = BaseStoragePolicy.policy_type_to_policy_cls[policy_type]
policy = policy_cls.from_config(policy_index, config_options)
policies.append(policy)
return StoragePolicyCollection(policies)
class StoragePolicySingleton(object):
"""
An instance of this class is the primary interface to storage policies
exposed as a module level global named ``POLICIES``. This global
reference wraps ``_POLICIES`` which is normally instantiated by parsing
``swift.conf`` and will result in an instance of
:class:`StoragePolicyCollection`.
You should never patch this instance directly, instead patch the module
level ``_POLICIES`` instance so that swift code which imported
``POLICIES`` directly will reference the patched
:class:`StoragePolicyCollection`.
"""
def __iter__(self):
return iter(_POLICIES)
def __len__(self):
return len(_POLICIES)
def __getitem__(self, key):
return _POLICIES[key]
def __getattribute__(self, name):
return getattr(_POLICIES, name)
def __repr__(self):
return repr(_POLICIES)
def reload_storage_policies():
"""
Reload POLICIES from ``swift.conf``.
"""
global _POLICIES
policy_conf = ConfigParser()
policy_conf.read(SWIFT_CONF_FILE)
try:
_POLICIES = parse_storage_policies(policy_conf)
except PolicyError as e:
raise SystemExit('ERROR: Invalid Storage Policy Configuration '
'in %s (%s)' % (SWIFT_CONF_FILE, e))
# parse configuration and setup singleton
_POLICIES = None
reload_storage_policies()
POLICIES = StoragePolicySingleton()
|
|
from datetime import date, datetime
from inspect import isclass
import six
import sqlalchemy as sa
__version__ = '0.4.4'
class Column(sa.Column):
def __init__(self, *args, **kwargs):
kwargs.setdefault('info', {})
kwargs['info'].setdefault('choices', kwargs.pop('choices', None))
kwargs['info'].setdefault('label', kwargs.pop('label', ''))
kwargs['info'].setdefault('description', kwargs.pop('description', ''))
kwargs['info'].setdefault('validators', kwargs.pop('validators', []))
kwargs['info'].setdefault('min', kwargs.pop('min', None))
kwargs['info'].setdefault('max', kwargs.pop('max', None))
kwargs['info'].setdefault('auto_now', kwargs.pop('auto_now', False))
# Make strings and booleans not nullable by default
if args:
if (
any(bool_or_str(arg) for arg in args[0:2]) or
('type' in kwargs and bool_or_str(kwargs['type']))
):
kwargs.setdefault('nullable', False)
sa.Column.__init__(self, *args, **kwargs)
@property
def choices(self):
return self.info['choices'] if 'choices' in self.info else []
@property
def validators(self):
return self.info['validators'] if 'validators' in self.info else []
@property
def description(self):
return self.info['description'] if 'description' in self.info else ''
class ConfigurationManager(object):
DEFAULT_OPTIONS = {
'auto_now': True,
'numeric_defaults': True,
'string_defaults': True,
'boolean_defaults': True,
'min_max_check_constraints': True,
'enum_names': True,
'index_foreign_keys': True
}
def __call__(self, mapper, class_):
if hasattr(class_, '__lazy_options__'):
configurator = ModelConfigurator(self, class_)
configurator()
class ModelConfigurator(object):
def __init__(self, manager, model):
self.manager = manager
self.model = model
self.table = self.model.__table__
def get_option(self, name):
try:
return self.model.__lazy_options__[name]
except (AttributeError, KeyError):
return self.manager.DEFAULT_OPTIONS[name]
def literal_value(self, value):
return (
value.isoformat()
if isinstance(value, (date, datetime))
else value
)
def append_check_constraints(self, column):
"""
Generate check constraints based on min and max column info arguments
"""
if 'min' in column.info and column.info['min'] is not None:
constraint = sa.schema.CheckConstraint(
column >= self.literal_value(column.info['min'])
)
self.table.append_constraint(constraint)
if 'max' in column.info and column.info['max'] is not None:
constraint = sa.schema.CheckConstraint(
column <= self.literal_value(column.info['max'])
)
self.table.append_constraint(constraint)
def assign_foreign_key_indexes(self, column):
"""
Assign index for column if column has foreign key constraints.
"""
if column.foreign_keys:
column.index = True
def assign_datetime_auto_now(self, column):
"""
Assigns datetime auto now defaults
"""
if column.info.get('auto_now'):
column.default = sa.schema.ColumnDefault(datetime.utcnow)
if not column.server_default:
# Does not support MySQL < 5.6.5
column.server_default = sa.schema.DefaultClause(sa.func.now())
def assign_numeric_defaults(self, column):
"""
Assigns int column server_default based on column default value
"""
if column.default is not None and hasattr(column.default, 'arg'):
if not column.server_default:
column.server_default = sa.schema.DefaultClause(
six.text_type(column.default.arg)
)
def assign_string_defaults(self, column):
"""
Assigns string column server_default based on column default value
"""
if column.default is not None and column.server_default is None and (
isinstance(column.default.arg, six.text_type)
):
column.server_default = sa.schema.DefaultClause(
column.default.arg
)
def assign_boolean_defaults(self, column):
"""
Assigns int column server_default based on column default value
"""
if column.default is None:
column.default = sa.schema.ColumnDefault(False)
if column.default is not None:
if column.default.arg is False:
column.server_default = sa.schema.DefaultClause(
sa.sql.expression.false()
)
else:
column.server_default = sa.schema.DefaultClause(
sa.sql.expression.true()
)
def assign_type_defaults(self, column):
if (isinstance(column.type, sa.Boolean) and
self.get_option('boolean_defaults')):
self.assign_boolean_defaults(column)
elif (is_string(column.type) and self.get_option('string_defaults')):
self.assign_string_defaults(column)
elif (is_numeric(column.type) and self.get_option('numeric_defaults')):
self.assign_numeric_defaults(column)
elif ((isinstance(column.type, sa.Date) or
isinstance(column.type, sa.DateTime))
and self.get_option('auto_now')):
self.assign_datetime_auto_now(column)
elif (isinstance(column.type, sa.Enum) and
self.get_option('enum_names')):
if (not hasattr(column.type, 'name') or not
column.type.name):
column.type.name = '%s_enum' % column.name
def __call__(self):
for column in self.table.columns:
if self.get_option('min_max_check_constraints'):
self.append_check_constraints(column)
if self.get_option('index_foreign_keys'):
self.assign_foreign_key_indexes(column)
self.assign_type_defaults(column)
def bool_or_str(type_):
return is_string(type_) or is_boolean(type_)
def is_string(type_):
return (
isinstance(type_, sa.String) or
(isclass(type_) and issubclass(type_, sa.String))
)
def is_boolean(type_):
return (
isinstance(type_, sa.Boolean) or
(isclass(type_) and issubclass(type_, sa.Boolean))
)
def is_numeric(type_):
return any(
isinstance(type_, type_cls)
for type_cls in (sa.Integer, sa.Float, sa.Numeric)
)
def make_lazy_configured(mapper):
manager = ConfigurationManager()
sa.event.listen(
mapper,
'mapper_configured',
manager
)
|
|
from itertools import izip
import numpy as np
import geo2d.geometry as g
# import matplotlib.pyplot as plt
import math
import random
import time
from walker import OdoListener
class RobotPose():
point = g.Point(0, 0)
direction = 0.0
distance = 100
weight = 0.0
def __init__(self, x, y, direct):
self.point = g.Point(x, y)
self.direction = direct
def odoTranslate(self, x, y, theta):
y *= -1
dist = math.hypot(x, y)
angle = math.atan2(y, x)
self.point.translate(math.cos(self.direction - angle) * dist, math.sin(self.direction - angle) * dist)
self.direction += theta
self.direction % (math.pi * 2)
def printPose(self):
circle1 = plt.Circle((self.point.x, self.point.y), 100, color = 'r')
plt.gcf().gca().add_artist(circle1)
plt.plot((self.point.x, (self.point.x + math.cos(self.direction) * self.distance)),
(self.point.y, (self.point.y + math.sin(self.direction) * self.distance)))
class Map:
central_line_corner_x = 0
central_line_corner_y = 2000
corner_x = 3000
corner_y = 2000
central_y = 0
penalty_corners_y = 1100
penalty_corner_x = 2400
max_distance = 5000
enemy_point = g.Point(-2800.0, 0.0)
friendly_point = g.Point(2800.0, 0.0)
start_point = g.Point(600.0, 0.0)
def __init__(self):
self.lines = [g.Line(*(line)) for line in [(g.Point(self.central_line_corner_x, self.central_line_corner_y), g.Point(self.central_line_corner_x, -self.central_line_corner_y)),
(g.Point(self.corner_x, self.central_line_corner_y), g.Point(-self.corner_x, self.central_line_corner_y)),
(g.Point(self.corner_x, -self.central_line_corner_y), g.Point(-self.corner_x, -self.central_line_corner_y)),
(g.Point(self.corner_x, self.central_line_corner_y), g.Point(self.corner_x, -self.central_line_corner_y)),
(g.Point(-self.corner_x, self.central_line_corner_y), g.Point(-self.corner_x, -self.central_line_corner_y)),
(g.Point(self.penalty_corner_x, self.penalty_corners_y), g.Point(self.penalty_corner_x, -self.penalty_corners_y)),
(g.Point(-self.penalty_corner_x, self.penalty_corners_y), g.Point(-self.penalty_corner_x, -self.penalty_corners_y)),
(g.Point(self.corner_x, self.penalty_corners_y), g.Point(self.penalty_corner_x, self.penalty_corners_y)),
(g.Point(self.corner_x, -self.penalty_corners_y), g.Point(self.penalty_corner_x, -self.penalty_corners_y)),
(g.Point(-self.corner_x, self.penalty_corners_y), g.Point(-self.penalty_corner_x, self.penalty_corners_y)),
(g.Point(-self.corner_x, -self.penalty_corners_y), g.Point(-self.penalty_corner_x, -self.penalty_corners_y))]]
def get_intersect_point(self, rp, point, distance = None):
start = time.time()
direction2 = math.atan2(point.y, point.x)
int_line = g.Line(rp.point,
g.Point((rp.point.x + math.cos(direction2 + rp.direction) * self.max_distance),
(rp.point.y + math.sin(direction2 + rp.direction) * self.max_distance)))
i_p = g.Point()
found = False
dist = self.max_distance
neededline = None
#
for l in self.lines:
start = time.time()
temp = l.intersection(int_line)
#
start = time.time()
if temp != None and temp != float("inf") and self.check_if_point_in_lines(l, int_line, temp):
#
start = time.time()
tmp_dist = rp.point.distance_to(temp)
#
if distance is None:
if tmp_dist < dist:
i_p = temp
dist = tmp_dist
neededline = l
found = True
else:
t_d = abs(distance - tmp_dist)
if t_d <= dist:
dist = t_d
i_p = temp
neededline = l
found = True
if found:
return i_p, neededline
#
return None, None
def check_if_point_in_line(self, l, p):
return ((p.x >= l.p1.x and p.x <= l.p2.x) or (p.x <= l.p1.x and p.x >= l.p2.x) or (math.fabs(p.x - l.p1.x) <= 0.01)) and \
((p.y >= l.p1.y and p.y <= l.p2.y) or (p.y <= l.p1.y and p.y >= l.p2.y) or (math.fabs(p.y - l.p1.y) <= 0.01))
def check_if_point_in_lines(self, l1, l2, p):
return self.check_if_point_in_line(l1, p) and self.check_if_point_in_line(l2, p)
def lines_eq(self, l1, l2):
return l1.p1.x == l2.p1.x and l1.p1.y == l2.p1.y and l1.p2.x == l2.p2.x and l1.p2.y == l2.p2.y
def print_map(self):
for line in self.lines:
plt.plot((line.p1.x, line.p2.x), (line.p1.y, line.p2.y), 'g-')
plt.axis([-3800, 3800, -2600, 2600])
class LocalizationModule(OdoListener):
map = Map()
particles_number = 50
print_once = True
parsed_lines = []
distances = []
corners = []
side = 1
def __init__(self, robot, cam_geom, goalie = False):
self.robot = robot
self.cam_geom = cam_geom
self.side = math.copysign(self.side, robot.joints.positions([1])["data"][0])
if goalie:
self.position = RobotPose(2400.0, 0.0, math.pi)
else:
self.position = RobotPose(1500.0, -2000.0, math.pi / 2)
def initial_generation(self):
if self.side < 0:
self.particles = [self.get_random_particle() for i in range(self.particles_number)]
else:
self.particles = [self.get_random_particle(min_x=0, min_y=1800, max_x=3000,
max_y=2050, min_dir=math.radians(260),
max_dir=math.radians(280)) for i in range(self.particles_number)]
def get_random_particle(self, min_x = 0, min_y = -2200, max_x = 3000, max_y = -2000, min_dir = math.radians(70), max_dir = math.radians(110)):
return RobotPose(random.uniform(min_x, max_x), random.uniform(min_y, max_y), random.uniform(min_dir, max_dir))
def sort_particles(self):
self.particles.sort(key=lambda rp: rp.weight)
def count_deviations(self):
arr_x = np.array([rp.point.x for rp in self.particles])
arr_y = np.array([rp.point.y for rp in self.particles])
arr_d = np.array([rp.direction for rp in self.particles])
return (np.std(arr_x), np.std(arr_y), np.std(arr_d))
def count_mean(self):
arr_x = np.array([rp.point.x for rp in self.particles])
arr_y = np.array([rp.point.y for rp in self.particles])
arr_d = np.array([rp.direction for rp in self.particles])
return (np.mean(arr_x), np.mean(arr_y), np.mean(arr_d))
def norm_weights(self):
m = max([rp.weight for rp in self.particles])
if m != 0:
for p in self.particles:
p.weight /= m
def get_corners(self):
self.corners = []
if len(self.parsed_lines) > 1:
for l1 in self.parsed_lines:
for l2 in self.parsed_lines:
if l1 != l2:
corner = l1.intersection(l2)
if corner != None and corner != float("inf") and self.map.check_if_point_in_lines(l1, l2, corner):
self.corners.append(corner)
def resample(self, after_fall):
self.particles = [rp for rp in self.particles if rp.weight >= 0.98]
self.sort_particles()
particles_nedded = self.particles_number - len(self.particles)
if particles_nedded == self.particles_number:
if not after_fall:
if self.side < 0:
self.particles = [self.get_random_particle() for i in range(self.particles_number)]
else:
self.particles = [self.get_random_particle(min_x = 0, min_y = 1800, max_x = 3000,
max_y = 2050, min_dir = math.radians(260),
max_dir = math.radians(280)) for i in range(self.particles_number)]
else:
self.generate_after_fall_particles()
return
if particles_nedded == 0:
particles_nedded = self.particles_number / 2
self.particles = self.particles[:particles_nedded]
dev = self.count_deviations()
x = (max(self.particles, key=lambda rp: rp.point.x).point.x + dev[0] * 0.15, min(self.particles, key=lambda rp: rp.point.x).point.x - dev[0] * 0.15)
y = (max(self.particles, key=lambda rp: rp.point.y).point.y + dev[1] * 0.15, min(self.particles, key=lambda rp: rp.point.y).point.y - dev[1] * 0.15)
d = (max(self.particles, key=lambda rp: rp.direction).direction + dev[2] * 0.15, min(self.particles, key=lambda rp: rp.direction).direction - dev[2] * 0.15)
self.particles.extend([self.get_random_particle(x[1], y[1], x[0], y[0], d[1], d[0]) for i in range(particles_nedded)])
def get_sensors(self):
# hui = g.Point(0, 0)
self.robot.vision.updateFrame()
vision_lines = self.robot.vision.lineDetect()
if len(vision_lines) != 0:
self.parsed_lines = []
self.distances = []
for i in vision_lines:
c1 = self.cam_geom.imagePixelToWorld(i["x1"], i["y1"], False)
c2 = self.cam_geom.imagePixelToWorld(i["x2"], i["y2"], False)
if c1[0] > self.map.max_distance or c1[0] < 0 or c2[0] > self.map.max_distance or c2[0] < 0:
continue
self.parsed_lines.append((c1, c2))
self.distances.append((math.hypot(c1[0], c1[1]), math.hypot(c2[0], c2[1])))
# self.parsed_lines.sort(key=lambda e: hui.distance_to(e))
def deb_print(self, rp, lines):
self.map.print_map()
rp.printPose()
for line in lines:
plt.plot((line.p1.x, line.p2.x), (line.p1.y, line.p2.y), 'r-')
plt.plot((rp.point.x, line.p1.x), (rp.point.y, line.p1.y), 'b-')
plt.plot((rp.point.x, line.p2.x), (rp.point.y, line.p2.y), 'b-')
plt.show()
def notify(self, frodo):
self.odo = frodo
#
self.udapte_odo_pos(self.odo)
def update_sensors(self, need_get):
if need_get:
self.get_sensors()
if len(self.parsed_lines) > 0:
start = time.time()
for p in self.particles:
for i, (l, d) in enumerate(izip(self.parsed_lines, self.distances)):#range(len(self.parsed_lines)):
start = time.time()
point1, l1 = self.map.get_intersect_point(p, g.Point(l[0][0], l[0][1]), distance=d[0])
start = time.time()
point2, l2 = self.map.get_intersect_point(p, g.Point(l[1][0], l[1][1]), distance=d[1])
if point1 is None or point2 is None or not self.map.lines_eq(l1 ,l2):
p.weight = 0.0
continue
else:
# deb_lines.append(g.Line(point1, point2))
dist = p.point.distance_to(point1)
w = abs(dist - d[0])
p.weight += (1 - w / self.map.max_distance) / 2
dist = p.point.distance_to(point2)
w = abs(dist - d[1])
p.weight += (1 - w / self.map.max_distance) / 2
def generate_after_fall_particles(self):
self.particles = [self.get_random_particle(min_x=self.position.point.x - 200.0, min_y=self.position.point.y - 200,
max_x=self.position.point.x + 200.0,
max_y=self.position.point.y + 200, min_dir=self.position.direction - math.radians(10),
max_dir=self.position.direction + math.radians(10)) for i in range(self.particles_number)]
def localization(self, after_fall=False):
self.side = math.copysign(self.side, self.robot.joints.positions([1])["data"][0])
self.robot.joints.hardness([0, 1], [0.8, 0.8])
self.robot.kinematics.lookAt(500.0, 500.0 * self.side, 0.0, False)
look_at_points = [(1000.0, 500.0, 0.0), (1000.0, 0.0, 0.0)]
index = 0
sign = -1
if after_fall:
self.generate_after_fall_particles()
else:
self.initial_generation()
count = 0
update = True
while self.count_deviations() > (300.0, 150.0, math.radians(10)):
start = time.time()
self.update_sensors(update)
update = False
start = time.time()
self.norm_weights()
start = time.time()
self.resample(after_fall)
count += 1
if count == 50:
count = 0
update = True
if not after_fall:
self.robot.kinematics.lookAt(look_at_points[index][0], look_at_points[index][1] * self.side, look_at_points[index][2], False)
else:
self.robot.kinematics.lookAt(look_at_points[index][0], look_at_points[index][1] * sign, look_at_points[index][2], False)
sign *= -1
time.sleep(0.5)
index += 1
if index > 1:
index = 0
mean = self.count_mean()
self.position.point = g.Point(mean[0], mean[1])
self.position.direction = mean[2]
def print_plot(self, once=False):
self.map.print_map()
self.position.printPose()
if(self.print_once):
if not once:
plt.ion()
self.print_once = False
plt.show()
else:
plt.draw()
time.sleep(0.05)
plt.clf()
def udapte_odo_pos(self, odometry):
self.position.odoTranslate(odometry[0], odometry[1], odometry[2])
def global_to_local(self, x, y):
new_x = x - self.position.point.x
new_y = y - self.position.point.y
rotated_x = new_x * math.cos(self.position.direction) + new_y * math.sin(self.position.direction)
rotated_y = -new_x * math.sin(self.position.direction) + new_y * math.cos(self.position.direction)
return (rotated_x, rotated_y, math.atan2(rotated_y, rotated_x))
#This is only for testing puproses
class LocaTesting:
map = Map()
position = RobotPose(0.0, 0.0, 0.0)
parsed_lines = []
def __init__(self, robot, cam_geom):
self.robot = robot
self.cam_geom = cam_geom
def get_sensors(self):
self.robot.vision.updateFrame()
vision_lines = self.robot.vision.lineDetect()
if len(vision_lines) != 0:
self.parsed_lines = []
for i in vision_lines:
c1 = self.cam_geom.imagePixelToWorld(i["x1"], i["y1"], False)
c2 = self.cam_geom.imagePixelToWorld(i["x2"], i["y2"], False)
if c1[0] > self.map.max_distance or c1[0] < 0 or c2[0] > self.map.max_distance or c2[0] < 0:
continue
else:
p1 = g.Point(self.position.point.x, self.position.point.y)
p2 = g.Point(self.position.point.x, self.position.point.y)
dist = math.hypot(c1[0], c1[1])
angle = math.atan2(c1[1], c1[0])
p1.translate(math.cos(self.position.direction - angle) * dist, math.sin(self.position.direction - angle) * dist)
dist = math.hypot(c2[0], c2[1])
angle = math.atan2(c2[1], c2[0])
p2.translate(math.cos(self.position.direction - angle) * dist, math.sin(self.position.direction - angle) * dist)
self.parsed_lines.append((c1, c2))
def print_plot(self):
self.position.printPose()
for i in self.parsed_lines:
plt.plot((i[0][0], i[1][0]),(i[0][1], i[1][1]))
plt.axis([-200, 4800, -3600, 3600])
plt.show()
|
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.23
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class AuthenticationV1TokenRequest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1TokenRequestSpec',
'status': 'V1TokenRequestStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
"""AuthenticationV1TokenRequest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""Gets the api_version of this AuthenticationV1TokenRequest. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this AuthenticationV1TokenRequest. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this AuthenticationV1TokenRequest.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this AuthenticationV1TokenRequest. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this AuthenticationV1TokenRequest. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this AuthenticationV1TokenRequest. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this AuthenticationV1TokenRequest.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this AuthenticationV1TokenRequest. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this AuthenticationV1TokenRequest. # noqa: E501
:return: The metadata of this AuthenticationV1TokenRequest. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this AuthenticationV1TokenRequest.
:param metadata: The metadata of this AuthenticationV1TokenRequest. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this AuthenticationV1TokenRequest. # noqa: E501
:return: The spec of this AuthenticationV1TokenRequest. # noqa: E501
:rtype: V1TokenRequestSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this AuthenticationV1TokenRequest.
:param spec: The spec of this AuthenticationV1TokenRequest. # noqa: E501
:type: V1TokenRequestSpec
"""
if self.local_vars_configuration.client_side_validation and spec is None: # noqa: E501
raise ValueError("Invalid value for `spec`, must not be `None`") # noqa: E501
self._spec = spec
@property
def status(self):
"""Gets the status of this AuthenticationV1TokenRequest. # noqa: E501
:return: The status of this AuthenticationV1TokenRequest. # noqa: E501
:rtype: V1TokenRequestStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this AuthenticationV1TokenRequest.
:param status: The status of this AuthenticationV1TokenRequest. # noqa: E501
:type: V1TokenRequestStatus
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AuthenticationV1TokenRequest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, AuthenticationV1TokenRequest):
return True
return self.to_dict() != other.to_dict()
|
|
#!/usr/bin/env python
"""peep ("prudently examine every package") verifies that packages conform to a
trusted, locally stored hash and only then installs them::
peep install -r requirements.txt
This makes your deployments verifiably repeatable without having to maintain a
local PyPI mirror or use a vendor lib. Just update the version numbers and
hashes in requirements.txt, and you're all set.
"""
from base64 import urlsafe_b64encode
from contextlib import contextmanager
from hashlib import sha256
from itertools import chain
from linecache import getline
from optparse import OptionParser
from os import listdir
from os.path import join, basename
import re
import shlex
from shutil import rmtree
from sys import argv, exit
from tempfile import mkdtemp
from pkg_resources import require, VersionConflict, DistributionNotFound
# We don't admit our dependency on pip in setup.py, lest a naive user simply
# say `pip install peep.tar.gz` and thus pull down an untrusted copy of pip
# from PyPI. Instead, we make sure it's installed and new enough here and spit
# out an error message if not:
def activate(specifier):
"""Make a compatible version of pip importable. Raise a RuntimeError if we
couldn't."""
try:
for distro in require(specifier):
distro.activate()
except (VersionConflict, DistributionNotFound):
raise RuntimeError('The installed version of pip is too old; peep '
'requires ' + specifier)
activate('pip>=0.6.2') # Before 0.6.2, the log module wasn't there, so some
# of our monkeypatching fails. It probably wouldn't be
# much work to support even earlier, though.
import pip
from pip.log import logger
from pip.req import parse_requirements
__version__ = 1, 0, 0
ITS_FINE_ITS_FINE = 0
SOMETHING_WENT_WRONG = 1
# "Traditional" for command-line errors according to optparse docs:
COMMAND_LINE_ERROR = 2
class PipException(Exception):
"""When I delegated to pip, it exited with an error."""
def __init__(self, error_code):
self.error_code = error_code
def encoded_hash(sha):
"""Return a short, 7-bit-safe representation of a hash.
If you pass a sha256, this results in the hash algorithm that the Wheel
format (PEP 427) uses, except here it's intended to be run across the
downloaded archive before unpacking.
"""
return urlsafe_b64encode(sha.digest()).rstrip('=')
@contextmanager
def ephemeral_dir():
dir = mkdtemp(prefix='peep-')
try:
yield dir
finally:
rmtree(dir)
def run_pip(initial_args):
"""Delegate to pip the given args (starting with the subcommand), and raise
``PipException`` if something goes wrong."""
status_code = pip.main(initial_args=initial_args)
# Clear out the registrations in the pip "logger" singleton. Otherwise,
# loggers keep getting appended to it with every run. Pip assumes only one
# command invocation will happen per interpreter lifetime.
logger.consumers = []
if status_code:
raise PipException(status_code)
def pip_download(req, argv, temp_path):
"""Download a package, and return its filename.
:arg req: The InstallRequirement which describes the package
:arg argv: Arguments to be passed along to pip, starting after the
subcommand
:arg temp_path: The path to the directory to download to
"""
# Get the original line out of the reqs file:
line = getline(*requirements_path_and_line(req))
# Remove any requirement file args.
argv = (['install', '--no-deps', '--download', temp_path] +
list(requirement_args(argv, want_other=True)) + # other args
shlex.split(line)) # ['nose==1.3.0']. split() removes trailing \n.
# Remember what was in the dir so we can backtrack and tell what we've
# downloaded (disgusting):
old_contents = set(listdir(temp_path))
# pip downloads the tarball into a second temp dir it creates, then it
# copies it to our specified download dir, then it unpacks it into the
# build dir in the venv (probably to read metadata out of it), then it
# deletes that. Don't be afraid: the tarball we're hashing is the pristine
# one downloaded from PyPI, not a fresh tarring of unpacked files.
run_pip(argv)
return (set(listdir(temp_path)) - old_contents).pop()
def pip_install_archives_from(temp_path):
"""pip install the archives from the ``temp_path`` dir, omitting
dependencies."""
# TODO: Make this preserve any pip options passed in, but strip off -r
# options and other things that don't make sense at this point in the
# process.
for filename in listdir(temp_path):
archive_path = join(temp_path, filename)
run_pip(['install', '--no-deps', archive_path])
def hash_of_file(path):
"""Return the hash of a downloaded file."""
with open(path, 'r') as archive:
sha = sha256()
while True:
data = archive.read(2 ** 20)
if not data:
break
sha.update(data)
return encoded_hash(sha)
def version_of_archive(filename, package_name):
"""Deduce the version number of a downloaded package from its filename."""
# Since we know the project_name, we can strip that off the left, strip any
# archive extensions off the right, and take the rest as the version.
# And for Wheel files (http://legacy.python.org/dev/peps/pep-0427/#file-name-convention)
# we know the format bits are '-' separated.
if filename.endswith('.whl'):
whl_package_name, version, _rest = filename.split('-', 2)
# Do the alteration to package_name from PEP 427:
our_package_name = re.sub(r'[^\w\d.]+', '_', package_name, re.UNICODE)
if whl_package_name != our_package_name:
raise RuntimeError("The archive '%s' didn't start with the package name '%s', so I couldn't figure out the version number. My bad; improve me." %
(filename, whl_package_name))
return version
extensions = ['.tar.gz', '.tgz', '.tar', '.zip']
for ext in extensions:
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
if not filename.startswith(package_name):
# TODO: What about safe/unsafe names?
raise RuntimeError("The archive '%s' didn't start with the package name '%s', so I couldn't figure out the version number. My bad; improve me." %
(filename, package_name))
return filename[len(package_name) + 1:] # Strip off '-' before version.
def requirement_args(argv, want_paths=False, want_other=False):
"""Return an iterable of filtered arguments.
:arg want_paths: If True, the returned iterable includes the paths to any
requirements files following a ``-r`` or ``--requirement`` option.
:arg want_other: If True, the returned iterable includes the args that are
not a requirement-file path or a ``-r`` or ``--requirement`` flag.
"""
was_r = False
for arg in argv:
# Allow for requirements files named "-r", don't freak out if there's a
# trailing "-r", etc.
if was_r:
if want_paths:
yield arg
was_r = False
elif arg in ['-r', '--requirement']:
was_r = True
else:
if want_other:
yield arg
def requirements_path_and_line(req):
"""Return the path and line number of the file from which an
InstallRequirement came."""
path, line = (re.match(r'-r (.*) \(line (\d+)\)$',
req.comes_from).groups())
return path, int(line)
def hashes_of_requirements(requirements):
"""Return a map of package names to lists of known-good hashes, given
multiple requirements files."""
def hashes_above(path, line_number):
"""Yield hashes from contiguous comment lines before line
``line_number``."""
for line_number in xrange(line_number - 1, 0, -1):
# If we hit a non-comment line, abort:
line = getline(path, line_number)
if not line.startswith('#'):
break
# If it's a hash line, add it to the pile:
if line.startswith('# sha256: '):
yield line.split(':', 1)[1].strip()
expected_hashes = {}
missing_hashes = []
for req in requirements: # InstallRequirements
path, line_number = requirements_path_and_line(req)
hashes = list(hashes_above(path, line_number))
if hashes:
hashes.reverse() # because we read them backwards
expected_hashes[req.name] = hashes
else:
missing_hashes.append(req.name)
return expected_hashes, missing_hashes
def hash_mismatches(expected_hash_map, downloaded_hashes):
"""Yield the list of allowed hashes, package name, and download-hash of
each package whose download-hash didn't match one allowed for it in the
requirements file.
If a package is missing from ``download_hashes``, ignore it; that means
it's already installed and we're not risking anything.
"""
for package_name, expected_hashes in expected_hash_map.iteritems():
try:
hash_of_download = downloaded_hashes[package_name]
except KeyError:
pass
else:
if hash_of_download not in expected_hashes:
yield expected_hashes, package_name, hash_of_download
def peep_hash(argv):
"""Return the peep hash of one or more files, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand
"""
parser = OptionParser(
usage='usage: %prog hash file [file ...]',
description='Print a peep hash line for one or more files: for '
'example, "# sha256: '
'oz42dZy6Gowxw8AelDtO4gRgTW_xPdooH484k7I5EOY".')
_, paths = parser.parse_args(args=argv)
if paths:
for path in paths:
print '# sha256:', hash_of_file(path)
return ITS_FINE_ITS_FINE
else:
parser.print_usage()
return COMMAND_LINE_ERROR
class EmptyOptions(object):
"""Fake optparse options for compatibility with pip<1.2
pip<1.2 had a bug in parse_requirments() in which the ``options`` kwarg
was required. We work around that by passing it a mock object.
"""
default_vcs = None
skip_requirements_regex = None
def peep_install(argv):
"""Perform the ``peep install`` subcommand, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand
"""
req_paths = list(requirement_args(argv, want_paths=True))
if not req_paths:
print "You have to specify one or more requirements files with the -r option, because"
print "otherwise there's nowhere for peep to look up the hashes."
return COMMAND_LINE_ERROR
# We're a "peep install" command, and we have some requirement paths.
requirements = list(chain(*(parse_requirements(path,
options=EmptyOptions())
for path in req_paths)))
downloaded_hashes, downloaded_versions, satisfied_reqs = {}, {}, []
with ephemeral_dir() as temp_path:
for req in requirements:
req.check_if_exists()
if req.satisfied_by: # This is already installed.
satisfied_reqs.append(req)
else:
name = req.req.project_name
archive_filename = pip_download(req, argv, temp_path)
downloaded_hashes[name] = hash_of_file(join(temp_path, archive_filename))
downloaded_versions[name] = version_of_archive(archive_filename, name)
expected_hashes, missing_hashes = hashes_of_requirements(requirements)
mismatches = list(hash_mismatches(expected_hashes, downloaded_hashes))
# Remove satisfied_reqs from missing_hashes, preserving order:
satisfied_req_names = set(req.name for req in satisfied_reqs)
missing_hashes = [m for m in missing_hashes if m not in satisfied_req_names]
# Skip a line after pip's "Cleaning up..." so the important stuff
# stands out:
if mismatches or missing_hashes:
print
# Mismatched hashes:
if mismatches:
print "THE FOLLOWING PACKAGES DIDN'T MATCHES THE HASHES SPECIFIED IN THE REQUIREMENTS"
print "FILE. If you have updated the package versions, update the hashes. If not,"
print "freak out, because someone has tampered with the packages.\n"
for expected_hashes, package_name, hash_of_download in mismatches:
hash_of_download = downloaded_hashes[package_name]
preamble = ' %s: expected%s' % (
package_name,
' one of' if len(expected_hashes) > 1 else '')
print preamble,
print ('\n' + ' ' * (len(preamble) + 1)).join(expected_hashes)
print ' ' * (len(preamble) - 4), 'got', hash_of_download
if mismatches:
print # Skip a line before "Not proceeding..."
# Missing hashes:
if missing_hashes:
print 'The following packages had no hashes specified in the requirements file, which'
print 'leaves them open to tampering. Vet these packages to your satisfaction, then'
print 'add these "sha256" lines like so:\n'
for package_name in missing_hashes:
print '# sha256: %s' % downloaded_hashes[package_name]
print '%s==%s\n' % (package_name,
downloaded_versions[package_name])
if mismatches or missing_hashes:
print '-------------------------------'
print 'Not proceeding to installation.'
return SOMETHING_WENT_WRONG
else:
pip_install_archives_from(temp_path)
if satisfied_reqs:
print "These packages were already installed, so we didn't need to download or build"
print "them again. If you installed them with peep in the first place, you should be"
print "safe. If not, uninstall them, then re-attempt your install with peep."
for req in satisfied_reqs:
print ' ', req.req
return ITS_FINE_ITS_FINE
def main():
"""Be the top-level entrypoint. Return a shell status code."""
commands = {'hash': peep_hash,
'install': peep_install}
try:
if len(argv) >= 2 and argv[1] in commands:
return commands[argv[1]](argv[2:])
else:
# Fall through to top-level pip main() for everything else:
return pip.main()
except PipException as exc:
return exc.error_code
if __name__ == '__main__':
exit(main())
|
|
__author__ = 'jonathan'
import logging
import test.nova._fixtures as models
from lib.rome.core.orm.query import Query
from lib.rome.core.session.session import Session as Session
def test_relationships_single_str(save_instance=True, save_info_cache=True, use_update=False, use_session=False):
print("Ensure that foreign keys are working test_relationships_single_str(save_instance=%s, save_info_cache=%s, use_update=%s, use_session=%s)" % (save_instance, save_info_cache, use_update, use_session))
session = None
if use_session:
session = Session()
instance_count = Query(models.Instance).count()
instance = models.Instance()
instance.uuid = "uuid_%s" % (instance_count)
if save_instance:
if use_session:
session.add(instance)
else:
instance.save()
instance_info_cache = models.InstanceInfoCache()
if not use_update:
instance_info_cache.instance_uuid = instance.uuid
else:
instance_info_cache.update({"instance_uuid": instance.uuid})
if not save_info_cache:
if use_session:
session.add(instance)
else:
instance.save()
else:
if use_session:
session.add(instance_info_cache)
else:
instance_info_cache.save()
if use_session:
session.flush()
instance_from_db = Query(models.Instance, models.Instance.id==instance.id).first()
instance_info_cache_from_db = Query(models.InstanceInfoCache, models.InstanceInfoCache.id==instance_info_cache.id).first()
assert instance_from_db.id == instance.id
assert instance_info_cache_from_db.id == instance_info_cache.id
assert instance_from_db.info_cache is not None
assert instance_from_db.info_cache.id == instance_info_cache.id
assert instance_info_cache_from_db.instance is not None
assert instance_info_cache_from_db.instance.id == instance.id
assert instance_info_cache_from_db.instance_uuid == instance.uuid
def test_relationships_single_object(save_instance=True, save_info_cache=True, use_update=False, update_instance=False, use_session=False):
print("Ensure that foreign keys are working test_relationships_single_object(save_instance=%s, save_info_cache=%s, use_update=%s, update_instance=%s, use_session=%s)" % (save_instance, save_info_cache, use_update, update_instance, use_session))
session = None
if use_session:
session = Session()
instance_count = Query(models.Instance).count()
instance = models.Instance()
instance_uuid = "uuid_%s" % (instance_count)
if save_instance:
if use_session:
session.add(instance)
else:
instance.save()
instance_info_cache = models.InstanceInfoCache()
if update_instance:
if not use_update:
instance.info_cache = instance_info_cache
instance.uuid = instance_uuid
else:
# CLASSIC
# instance.update({"info_cache": instance_info_cache})
# DEBUG
values = {}
values['uuid'] = instance_uuid
# instance['info_cache'] = models.InstanceInfoCache()
instance['info_cache'] = instance_info_cache
info_cache = values.pop('info_cache', None)
if info_cache is not None:
instance['info_cache'].update(info_cache)
instance.update(values, do_save=False)
if not save_info_cache:
if use_session:
session.add(instance)
else:
instance.save()
else:
if use_session:
session.add(instance_info_cache)
else:
instance_info_cache.save()
else:
instance.uuid = instance_uuid
if not use_update:
instance_info_cache.instance = instance
else:
instance_info_cache.update({"instance": instance})
if not save_info_cache:
instance.save()
else:
if use_session:
session.add(instance_info_cache)
else:
instance_info_cache.save()
if use_session:
session.flush()
instance_from_db = Query(models.Instance, models.Instance.id==instance.id).first()
instance_info_cache_from_db = Query(models.InstanceInfoCache, models.InstanceInfoCache.id==instance_info_cache.id).first()
assert instance_from_db.id == instance.id
assert instance_info_cache_from_db.id == instance_info_cache.id
assert instance_from_db.info_cache is not None
assert instance_from_db.info_cache.id == instance_info_cache.id
assert instance_info_cache_from_db.instance is not None
assert instance_info_cache_from_db.instance.id == instance.id
assert instance_info_cache_from_db.instance_uuid == instance.uuid
def test_relationships_list_int(save_fixed_ip=True):
print("Ensure that foreign keys are working test_relationships_list_int(save_fixed_ip=%s)" % (save_fixed_ip))
network = models.Network()
network.save()
fixed_ips = []
for i in range(0, 5):
fixed_ip = models.FixedIp()
fixed_ip.network_id = network.id
fixed_ips += [fixed_ip]
if not save_fixed_ip:
fixed_ip.network = network
network.save()
else:
fixed_ip.save()
network_from_db = Query(models.Network, models.Network.id==network.id).first()
for fixed_ip in fixed_ips:
fixed_ip_from_db = Query(models.FixedIp, models.FixedIp.network_id==network.id, models.FixedIp.id==fixed_ip.id).first()
assert network_from_db.id == network.id
assert fixed_ip_from_db.id == fixed_ip.id
network_from_db.load_relationships()
assert network_from_db.fixed_ips is not None and len(network_from_db.fixed_ips) > 0
assert fixed_ip_from_db.id in map(lambda x: x.id, network_from_db.fixed_ips)
assert fixed_ip_from_db.network is not None
assert fixed_ip_from_db.network.id == network_from_db.id
assert fixed_ip_from_db.network_id == network_from_db.id
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
test_relationships_single_object(save_instance=True, save_info_cache=True, use_update=True, update_instance=True, use_session=True)
test_relationships_single_object(save_instance=True, save_info_cache=True, use_update=True, update_instance=True, use_session=True)
# sys.exit(0)
######################
# Instance/InfoCache #
######################
test_relationships_single_str(save_instance=True, save_info_cache=True, use_update=False, use_session=True)
test_relationships_single_object(save_instance=True, save_info_cache=True, use_update=True, update_instance=True, use_session=True)
for use_session in [True, False]:
test_relationships_single_str(use_session=use_session)
test_relationships_single_str(use_update=True, use_session=use_session)
for use_session in [True, False]:
for use_update in [True, False]:
for update_instance in [True, False]:
test_relationships_single_object(use_update=use_update, update_instance=update_instance, use_session=use_session)
test_relationships_single_object(save_instance=False, use_update=use_update, update_instance=update_instance, use_session=use_session)
test_relationships_single_object(save_info_cache=False, use_update=use_update, update_instance=update_instance, use_session=use_session)
test_relationships_single_object(save_instance=False, save_info_cache=False, use_update=use_update, update_instance=update_instance, use_session=use_session)
######################
# Network/FixedIp #
######################
test_relationships_list_int()
test_relationships_list_int(save_fixed_ip=False)
|
|
import sympy
import tempfile
import os
from sympy import symbols, Eq
from sympy.external import import_module
from sympy.tensor import IndexedBase, Idx
from sympy.utilities.autowrap import autowrap, ufuncify, CodeWrapError
from sympy.utilities.pytest import skip
numpy = import_module('numpy', min_module_version='1.6.1')
Cython = import_module('Cython', min_module_version='0.15.1')
f2py = import_module('numpy.f2py', __import__kwargs={'fromlist': ['f2py']})
f2pyworks = False
if f2py:
try:
autowrap(symbols('x'), 'f95', 'f2py')
except (CodeWrapError, ImportError, OSError):
f2pyworks = False
else:
f2pyworks = True
a, b, c = symbols('a b c')
n, m, d = symbols('n m d', integer=True)
A, B, C = symbols('A B C', cls=IndexedBase)
i = Idx('i', m)
j = Idx('j', n)
k = Idx('k', d)
def has_module(module):
"""
Return True if module exists, otherwise run skip().
module should be a string.
"""
# To give a string of the module name to skip(), this function takes a
# string. So we don't waste time running import_module() more than once,
# just map the three modules tested here in this dict.
modnames = {'numpy': numpy, 'Cython': Cython, 'f2py': f2py}
if modnames[module]:
if module == 'f2py' and not f2pyworks:
skip("Couldn't run f2py.")
return True
skip("Couldn't import %s." % module)
#
# test runners used by several language-backend combinations
#
def runtest_autowrap_twice(language, backend):
f = autowrap((((a + b)/c)**5).expand(), language, backend)
g = autowrap((((a + b)/c)**4).expand(), language, backend)
# check that autowrap updates the module name. Else, g gives the same as f
assert f(1, -2, 1) == -1.0
assert g(1, -2, 1) == 1.0
def runtest_autowrap_trace(language, backend):
has_module('numpy')
trace = autowrap(A[i, i], language, backend)
assert trace(numpy.eye(100)) == 100
def runtest_autowrap_matrix_vector(language, backend):
has_module('numpy')
x, y = symbols('x y', cls=IndexedBase)
expr = Eq(y[i], A[i, j]*x[j])
mv = autowrap(expr, language, backend)
# compare with numpy's dot product
M = numpy.random.rand(10, 20)
x = numpy.random.rand(20)
y = numpy.dot(M, x)
assert numpy.sum(numpy.abs(y - mv(M, x))) < 1e-13
def runtest_autowrap_matrix_matrix(language, backend):
has_module('numpy')
expr = Eq(C[i, j], A[i, k]*B[k, j])
matmat = autowrap(expr, language, backend)
# compare with numpy's dot product
M1 = numpy.random.rand(10, 20)
M2 = numpy.random.rand(20, 15)
M3 = numpy.dot(M1, M2)
assert numpy.sum(numpy.abs(M3 - matmat(M1, M2))) < 1e-13
def runtest_ufuncify(language, backend):
has_module('numpy')
a, b, c = symbols('a b c')
fabc = ufuncify([a, b, c], a*b + c, backend=backend)
facb = ufuncify([a, c, b], a*b + c, backend=backend)
grid = numpy.linspace(-2, 2, 50)
b = numpy.linspace(-5, 4, 50)
c = numpy.linspace(-1, 1, 50)
expected = grid*b + c
numpy.testing.assert_allclose(fabc(grid, b, c), expected)
numpy.testing.assert_allclose(facb(grid, c, b), expected)
def runtest_issue_10274(language, backend):
expr = (a - b + c)**(13)
tmp = tempfile.mkdtemp()
f = autowrap(expr, language, backend, tempdir=tmp, helpers=('helper', a - b + c, (a, b, c)))
assert f(1, 1, 1) == 1
for file in os.listdir(tmp):
if file.startswith("wrapped_code_") and file.endswith(".c"):
fil = open(tmp + '/' + file)
assert fil.read() == ("/******************************************************************************\n"
" * Code generated with sympy "+ sympy.__version__+" *\n"
" * *\n"
" * See http://www.sympy.org/ for more information. *\n"
" * *\n"
" * This file is part of 'autowrap' *\n"
" ******************************************************************************/\n"
"#include " + '"' + file[:-1]+ 'h"' + "\n"
"#include <math.h>\n"
"\n"
"double helper(double a, double b, double c) {\n"
"\n"
" double helper_result;\n"
" helper_result = a - b + c;\n"
" return helper_result;\n"
"\n"
"}\n"
"\n"
"double autofunc(double a, double b, double c) {\n"
"\n"
" double autofunc_result;\n"
" autofunc_result = pow(helper(a, b, c), 13);\n"
" return autofunc_result;\n"
"\n"
"}\n")
#
# tests of language-backend combinations
#
# f2py
def test_wrap_twice_f95_f2py():
has_module('f2py')
runtest_autowrap_twice('f95', 'f2py')
def test_autowrap_trace_f95_f2py():
has_module('f2py')
runtest_autowrap_trace('f95', 'f2py')
def test_autowrap_matrix_vector_f95_f2py():
has_module('f2py')
runtest_autowrap_matrix_vector('f95', 'f2py')
def test_autowrap_matrix_matrix_f95_f2py():
has_module('f2py')
runtest_autowrap_matrix_matrix('f95', 'f2py')
def test_ufuncify_f95_f2py():
has_module('f2py')
runtest_ufuncify('f95', 'f2py')
# Cython
def test_wrap_twice_c_cython():
has_module('Cython')
runtest_autowrap_twice('C', 'cython')
def test_autowrap_trace_C_Cython():
has_module('Cython')
runtest_autowrap_trace('C', 'cython')
def test_autowrap_matrix_vector_C_cython():
has_module('Cython')
runtest_autowrap_matrix_vector('C', 'cython')
def test_autowrap_matrix_matrix_C_cython():
has_module('Cython')
runtest_autowrap_matrix_matrix('C', 'cython')
def test_ufuncify_C_Cython():
has_module('Cython')
runtest_ufuncify('C', 'cython')
def test_issue_10274_C_cython():
has_module('Cython')
runtest_issue_10274('C', 'cython')
# Numpy
def test_ufuncify_numpy():
# This test doesn't use Cython, but if Cython works, then there is a valid
# C compiler, which is needed.
has_module('Cython')
runtest_ufuncify('C', 'numpy')
|
|
"""`Nanduri2012Model`, `Nanduri2012Spatial`, `Nanduri2012Temporal`
[Nanduri2012]_"""
import numpy as np
from .base import Model, SpatialModel, TemporalModel
from ._nanduri2012 import spatial_fast, temporal_fast
from ..implants import ElectrodeArray, DiskElectrode
from ..stimuli import Stimulus
class Nanduri2012Spatial(SpatialModel):
"""Spatial response model of [Nanduri2012]_
Implements the spatial response model described in [Nanduri2012]_, which
assumes that the spatial activation of retinal tissue is equivalent to the
"current spread" :math:`I`, described as a function of distance :math:`r`
from the center of the stimulating electrode:
.. math::
I(r) =
\\begin{cases}
\\frac{\\verb!atten_a!}{\\verb!atten_a! + (r-a)^\\verb!atten_n!}
& r > a \\\\
1 & r \\leq a
\\end{cases}
where :math:`a` is the radius of the electrode (see Eq.2 in the paper).
.. note::
Use this class if you just want the spatial response model.
Use :py:class:`~pulse2percept.models.Nanduri2012Model` if you want both
the spatial and temporal model.
Parameters
----------
atten_a : float, optional
Nominator of the attentuation function
atten_n : float32, optional
Exponent of the attenuation function's denominator
retinotopy : :py:class:`~pulse2percept.utils.VisualFieldMap`, optional
An instance of a :py:class:`~pulse2percept.utils.VisualFieldMap`
object that provides ``ret2dva`` and ``dva2ret`` methods.
By default, :py:class:`~pulse2percept.utils.Curcio1990Map` is
used.
n_gray : int, optional
The number of gray levels to use. If an integer is given, k-means
clustering is used to compress the color space of the percept into
``n_gray`` bins. If None, no compression is performed.
noise : float or int, optional
Adds salt-and-pepper noise to each percept frame. An integer will be
interpreted as the number of pixels to subject to noise in each frame.
A float between 0 and 1 will be interpreted as a ratio of pixels to
subject to noise in each frame.
n_threads : int, optional
Number of CPU threads to use during parallelization using OpenMP.
Defaults to max number of user CPU cores.
"""
def get_default_params(self):
"""Returns all settable parameters of the Nanduri model"""
base_params = super(Nanduri2012Spatial, self).get_default_params()
params = {'atten_a': 14000, 'atten_n': 1.69}
return {**base_params, **params}
def _predict_spatial(self, earray, stim):
"""Predicts the brightness at spatial locations"""
# This does the expansion of a compact stimulus and a list of
# electrodes to activation values at X,Y grid locations:
return spatial_fast(stim.data,
np.array([earray[e].x for e in stim.electrodes],
dtype=np.float32),
np.array([earray[e].y for e in stim.electrodes],
dtype=np.float32),
np.array([earray[e].z for e in stim.electrodes],
dtype=np.float32),
np.array([earray[e].r for e in stim.electrodes],
dtype=np.float32),
self.grid.xret.ravel(),
self.grid.yret.ravel(),
self.atten_a,
self.atten_n,
self.thresh_percept,
self.n_threads)
def predict_percept(self, implant, t_percept=None):
if not np.all([isinstance(e, DiskElectrode)
for e in implant.electrode_objects]):
raise TypeError("The Nanduri2012 spatial model only supports "
"DiskElectrode arrays.")
return super(Nanduri2012Spatial, self).predict_percept(
implant, t_percept=t_percept
)
class Nanduri2012Temporal(TemporalModel):
"""Temporal model of [Nanduri2012]_
Implements the temporal response model described in [Nanduri2012]_, which
assumes that the temporal activation of retinal tissue is the output of a
linear-nonlinear model cascade (see Fig.6 in the paper).
.. note::
Use this class if you just want the temporal response model.
Use :py:class:`~pulse2percept.models.Nanduri2012Model` if you want both
the spatial and temporal model.
Parameters
----------
dt : float, optional
Sampling time step (ms)
tau1: float, optional
Time decay constant for the fast leaky integrater.
tau2: float, optional
Time decay constant for the charge accumulation.
tau3: float, optional
Time decay constant for the slow leaky integrator.
eps: float, optional
Scaling factor applied to charge accumulation.
asymptote: float, optional
Asymptote of the logistic function used in the stationary nonlinearity
stage.
slope: float, optional
Slope of the logistic function in the stationary nonlinearity stage.
shift: float, optional
Shift of the logistic function in the stationary nonlinearity stage.
scale_out : float32, optional
A scaling factor applied to the output of the model
thresh_percept : float, optional
Below threshold, the percept has brightness zero.
n_threads : int, optional
Number of CPU threads to use during parallelization using OpenMP.
Defaults to max number of user CPU cores.
"""
def get_default_params(self):
base_params = super(Nanduri2012Temporal, self).get_default_params()
params = {
# Time decay for the ganglion cell impulse response:
'tau1': 0.42,
# Time decay for the charge accumulation:
'tau2': 45.25,
# Time decay for the slow leaky integrator:
'tau3': 26.25,
# Scaling factor applied to charge accumulation:
'eps': 8.73,
# Asymptote of the sigmoid:
'asymptote': 14.0,
# Slope of the sigmoid:
'slope': 3.0,
# Shift of the sigmoid:
'shift': 16.0,
# Scale the output:
'scale_out': 1.0
}
return {**base_params, **params}
def _predict_temporal(self, stim, t_percept):
"""Predict the temporal response"""
# Pass the stimulus as a 2D NumPy array to the fast Cython function:
stim_data = stim.data.reshape((-1, len(stim.time)))
# Calculate at which simulation time steps we need to output a percept.
# This is basically t_percept/self.dt, but we need to beware of
# floating point rounding errors! 29.999 will be rounded down to 29 by
# np.uint32, so we need to np.round it first:
idx_percept = np.uint32(np.round(t_percept / self.dt))
if np.unique(idx_percept).size < t_percept.size:
raise ValueError(f"All times 't_percept' must be distinct multiples "
f"of `dt`={self.dt:.2e}")
# Cython returns a 2D (space x time) NumPy array:
return temporal_fast(stim_data.astype(np.float32),
stim.time.astype(np.float32),
idx_percept,
self.dt, self.tau1, self.tau2, self.tau3,
self.asymptote, self.shift, self.slope, self.eps,
self.scale_out, self.thresh_percept, self.n_threads)
class Nanduri2012Model(Model):
"""[Nanduri2012]_ Model
Implements the model described in [Nanduri2012]_, where percepts are
circular and their brightness evolves over time.
The model combines two parts:
* :py:class:`~pulse2percept.models.Nanduri2012Spatial` is used to
calculate the spatial activation function, which is assumed to be
equivalent to the "current spread" described as a function of distance
from the center of the stimulating electrode (see Eq.2 in the paper).
* :py:class:`~pulse2percept.models.Nanduri2012Temporal` is used to
calculate the temporal activation function, which is assumed to be the
output of a linear-nonlinear cascade model (see Fig.6 in the paper).
Parameters
----------
atten_a : float, optional
Nominator of the attentuation function (Eq.2 in the paper)
atten_n : float32, optional
Exponent of the attenuation function's denominator (Eq.2 in the paper)
dt : float, optional
Sampling time step (ms)
tau1: float, optional
Time decay constant for the fast leaky integrater.
tau2: float, optional
Time decay constant for the charge accumulation.
tau3: float, optional
Time decay constant for the slow leaky integrator.
eps: float, optional
Scaling factor applied to charge accumulation.
asymptote: float, optional
Asymptote of the logistic function used in the stationary nonlinearity
stage.
slope: float, optional
Slope of the logistic function in the stationary nonlinearity stage.
shift: float, optional
Shift of the logistic function in the stationary nonlinearity stage.
scale_out : float32, optional
A scaling factor applied to the output of the model
thresh_percept: float, optional
Below threshold, the percept has brightness zero.
retinotopy : :py:class:`~pulse2percept.utils.VisualFieldMap`, optional
An instance of a :py:class:`~pulse2percept.utils.VisualFieldMap`
object that provides ``ret2dva`` and ``dva2ret`` methods.
By default, :py:class:`~pulse2percept.utils.Curcio1990Map` is
used.
n_gray : int, optional
The number of gray levels to use. If an integer is given, k-means
clustering is used to compress the color space of the percept into
``n_gray`` bins. If None, no compression is performed.
noise : float or int, optional
Adds salt-and-pepper noise to each percept frame. An integer will be
interpreted as the number of pixels to subject to noise in each frame.
A float between 0 and 1 will be interpreted as a ratio of pixels to
subject to noise in each frame.
n_threads: int, optional
Number of CPU threads to use during parallelization using OpenMP. Defaults to max number of user CPU cores.
"""
def __init__(self, **params):
super(Nanduri2012Model, self).__init__(spatial=Nanduri2012Spatial(),
temporal=Nanduri2012Temporal(),
**params)
|
|
# Standard library imports
import string, pkgutil
from xceptions import *
# Third party imports
#### netcdf --- currently support cdms2, python-netCDF4 and Scientific
l = pkgutil.iter_modules()
ll = map( lambda x: x[1], l )
supportedNetcdf = ['cdms2','netCDF4','Scientific','ncq3']
installedSupportedNetcdf = []
##ll = []
for x in supportedNetcdf:
if x in ll:
if len(installedSupportedNetcdf) == 0:
try:
cmd = 'import %s' % x
exec cmd
installedSupportedNetcdf.append( x )
except:
print 'Failed to install %s' % x
else:
installedSupportedNetcdf.append( x )
if len(installedSupportedNetcdf) > 0:
ncLib = installedSupportedNetcdf[0]
else:
print """No supported netcdf module found.
Supported modules are %s.
Attempting to run with experimental ncq3
Execution may fail, depending on options chosen.
""" % str(supportedNetcdf)
ncLib = 'ncq3'
if ncLib == 'Scientific':
from Scientific.IO import NetCDF as ncdf
## end of netcdf import.
## utility function to convert "type" to string and standardise terminology
def tstr( x ):
x1 = str(x)
return {'real':'float32', 'integer':'int32', 'float':'float32', 'double':'float64' }.get( x1, x1 )
class fileMetadata(object):
def __init__(self,dummy=False,attributeMappingsLog=None,forceLib=None):
self.dummy = dummy
self.atMapLog = attributeMappingsLog
self.forceLib = forceLib
self.ncLib = ncLib
if self.atMapLog == None:
self.atMapLog = open( 'cccc_atMapLog.txt', 'a' )
if self.forceLib == 'ncq3':
import ncq3
self.ncq3 = ncq3
self.ncLib = 'ncq3'
elif self.forceLib == 'cdms2':
import cdms2
self.cdms2 = cdms2
self.ncLib = 'cdms2'
elif self.forceLib == 'netCDF4':
import netCDF4
self.netCDF4 = netCDF4
self.ncLib = 'netCDF4 [%s]' % netCDF4.__version__
elif self.forceLib == 'Scientific':
import Scientific
from Scientific.IO import NetCDF as ncdf
self.ncdf = ncdf
self.ncLib = 'Scientific [%s]' % Scientific.__version__
else:
self.ncLib = ncLib
def close(self):
self.atMapLog.close()
def loadNc(self,fpath):
self.fpath = fpath
self.fn = string.split( fpath, '/' )[-1]
self.fparts = string.split( self.fn[:-3], '_' )
self.ga = {}
self.va = {}
self.da = {}
if self.dummy:
self.makeDummyFileImage()
return
elif self.ncLib == 'cdms2':
import cdms2
self.cdms2 = cdms2
self.loadNc__Cdms(fpath)
elif self.ncLib[:7] == 'netCDF4':
import netCDF4
self.netCDF4 = netCDF4
self.loadNc__Netcdf4(fpath)
elif self.ncLib[:10] == 'Scientific':
from Scientific.IO import NetCDF as ncdf
self.ncdf = ncdf
self.loadNc__Scientific(fpath)
else:
import ncq3
self.ncq3 = ncq3
self.loadNc__ncq(fpath)
##raise baseException( 'No supported netcdf module assigned' )
def loadNc__ncq(self,fpath):
self.nc0 = self.ncq3.open( fpath )
self.nc0.getDigest()
self.ncq3.close( self.nc0 )
self.nc = self.ncq3.Browse( self.nc0.digest )
for a in self.nc._gal:
self.ga[a.name] = a.value
for v in self.nc._vdict.keys():
thisv = self.nc._vdict[v][0]
if v not in self.nc._ddict.keys():
self.va[v] = {}
for a in self.nc._ll[thisv.id]:
self.va[v][a.name] = a.value
self.va[v]['_type'] = tstr( thisv.type )
if v in ['plev','plev_bnds','height']:
x = thisv.data
if type(x) != type([]):
x = [x]
self.va[v]['_data'] = x
else:
self.da[v] = {}
thisa = self.nc._ddict[v]
for a in self.nc._ll[thisv.id]:
self.da[v][a.name] = a.value
self.da[v]['_type'] = tstr( thisv.type )
self.da[v]['_data'] = thisv.data
def loadNc__Cdms(self,fpath):
self.nc = self.cdms2.open( fpath )
for k in self.nc.attributes.keys():
self.ga[k] = self.nc.attributes[k]
if len( self.ga[k] ) == 1:
self.ga[k] = self.ga[k][0]
## nasty fix to deal with fact that cdms2 does not read the 'id' global attribute
try:
thisid = self.nc.id
self.ga['id'] = thisid
except:
pass
for v in self.nc.variables.keys():
self.va[v] = {}
for k in self.nc.variables[v].attributes.keys():
x = self.nc.variables[v].attributes[k]
## returns a list for some scalar attributes.
if type(x) == type([]) and len(x) == 1:
x = x[0]
self.va[v][k] = x
self.va[v]['_type'] = tstr( self.nc.variables[v].dtype )
if v in ['plev','plev_bnds','height']:
x = self.nc.variables[v].getValue().tolist()
if type(x) != type([]):
x = [x]
self.va[v]['_data'] = x
### Note: returns a scalar if data has a scalar value.
## remove missing_value == None
if self.va[v].has_key( 'missing_value' ) and self.va[v]['missing_value'] == None:
self.va[v].pop( 'missing_value' )
for v in self.nc.axes.keys():
self.da[v] = {}
for k in self.nc.axes[v].attributes.keys():
self.da[v][k] = self.nc.axes[v].attributes[k]
self.da[v]['_type'] = tstr( self.nc.axes[v].getValue().dtype )
self.da[v]['_data'] = self.nc.axes[v].getValue().tolist()
self.nc.close()
###
### attributes in .__dict__ dictionary
### variables in .variables dicttionary
### dimension lengths in .dimensions
### <variable>.getValue() returns an numpy.ndarray
### data type in <variable>.getValue().dtype
### for scalar variables, <variable>.getValue().tolist() returns a scalar.
###
def loadNc__Scientific(self,fpath):
self.nc = self.ncdf.NetCDFFile( fpath, 'r' )
for k in self.nc.__dict__.keys():
self.ga[k] = self.nc.__dict__[k]
if type(self.ga[k]) not in [type('x'),type(1),type(1.)] and len(self.ga[k]) == 1:
self.ga[k] = self.ga[k][0]
for v in self.nc.variables.keys():
if v not in self.nc.dimensions.keys():
self.va[v] = {}
for k in self.nc.variables[v].__dict__.keys():
self.va[v][k] = self.nc.variables[v].__dict__[k]
self.va[v]['_type'] = tstr( self.nc.variables[v].getValue().dtype )
if v in ['plev','plev_bnds','height']:
### Note: returns a scalar if data has a scalar value.
x = self.nc.variables[v].getValue().tolist()
if type(x) != type([]):
x = [x]
self.va[v]['_data'] = x
for v in self.nc.dimensions.keys():
self.da[v] = {}
if v in self.nc.variables.keys():
for k in self.nc.variables[v].__dict__.keys():
self.da[v][k] = self.nc.variables[v].__dict__[k]
self.da[v]['_type'] = tstr( self.nc.variables[v].getValue().dtype )
self.da[v]['_data'] = self.nc.variables[v].getValue().tolist()
else:
self.da[v]['_type'] = 'index (no data variable)'
self.nc.close()
def loadNc__Netcdf4(self,fpath):
self.nc = self.netCDF4.Dataset(fpath, 'r')
for k in self.nc.ncattrs():
self.ga[k] = self.nc.getncattr(k)
if type( self.ga[k] ) in [ type([]),type(()) ]:
if len( self.ga[k] ) == 1:
self.ga[k] = self.ga[k][0]
for v in self.nc.variables.keys():
if v not in self.nc.dimensions.keys():
self.va[v] = {}
for k in self.nc.variables[v].ncattrs():
self.va[v][k] = self.nc.variables[v].getncattr(k)
try:
self.va[v]['_type'] = tstr( self.nc.variables[v].dtype )
except:
self.va[v]['_type'] = tstr( self.nc.variables[v].datatype )
if v in ['plev','plev_bnds','height']:
self.va[v]['_data'] = self.nc.variables[v][:].tolist()
if type( self.va[v]['_data'] ) != type( [] ):
self.va[v]['_data'] = [self.va[v]['_data'],]
for v in self.nc.dimensions.keys():
self.da[v] = {}
if v in self.nc.variables.keys():
for k in self.nc.variables[v].ncattrs():
self.da[v][k] = self.nc.variables[v].getncattr(k)
try:
self.da[v]['_type'] = tstr( self.nc.variables[v].dtype )
except:
self.da[v]['_type'] = tstr( self.nc.variables[v].datatype )
self.da[v]['_data'] = self.nc.variables[v][:].tolist()
if type( self.da[v]['_data'] ) != type( [] ):
self.da[v]['_data'] = [self.da[v]['_data'],]
else:
self.da[v]['_type'] = 'index (no data variable)'
self.nc.close()
def makeDummyFileImage(self):
for k in range(10):
self.ga['ga%s' % k] = str(k)
for v in [self.fparts[0],]:
self.va[v] = {}
self.va[v]['standard_name'] = 's%s' % v
self.va[v]['long_name'] = v
self.va[v]['cell_methods'] = 'time: point'
self.va[v]['units'] = '1'
self.va[v]['_type'] = 'float32'
for v in ['lat','lon','time']:
self.da[v] = {}
self.da[v]['_type'] = 'float64'
self.da[v]['_data'] = range(5)
dlist = ['lat','lon','time']
svals = lambda p,q: map( lambda y,z: self.da[y].__setitem__(p, z), dlist, q )
svals( 'standard_name', ['latitude', 'longitude','time'] )
svals( 'long_name', ['latitude', 'longitude','time'] )
svals( 'units', ['degrees_north', 'degrees_east','days since 19590101'] )
def applyMap( self, mapList, globalAttributesInFn, log=None ):
for m in mapList:
if m[0] == 'am001':
if m[1][0][0] == "@var":
if m[1][0][1] in self.va.keys():
this = self.va[m[1][0][1]]
apThis = True
for c in m[1][1:]:
if c[0] not in this.keys():
apThis = False
elif c[1] != this[c[0]]:
apThis = False
if m[2][0] != '':
targ = m[2][0]
else:
targ = m[1][-1][0]
if apThis:
if log != None:
log.info( 'Setting %s to %s' % (targ,m[2][1]) )
##print 'Setting %s:%s to %s' % (m[1][0][1],targ,m[2][1])
thisval = self.va[m[1][0][1]].get( targ, None )
self.va[m[1][0][1]][targ] = m[2][1]
self.atMapLog.write( '@var:"%s","%s","%s","%s","%s"\n' % (self.fpath, m[1][0][1], targ, thisval, m[2][1] ) )
elif m[1][0][0] == "@ax":
##print 'checking dimension ',m[1][0][1], self.da.keys()
if m[1][0][1] in self.da.keys():
##print 'checking dimension [2]',m[1][0][1]
this = self.da[m[1][0][1]]
apThis = True
for c in m[1][1:]:
if c[0] not in this.keys():
apThis = False
elif c[1] != this[c[0]]:
apThis = False
if m[2][0] != '':
targ = m[2][0]
else:
targ = m[1][-1][0]
if apThis:
if log != None:
log.info( 'Setting %s to %s' % (targ,m[2][1]) )
##print 'Setting %s:%s to %s' % (m[1][0][1],targ,m[2][1])
thisval = self.da[m[1][0][1]].get( targ, None )
self.da[m[1][0][1]][targ] = m[2][1]
self.atMapLog.write( '@ax:"%s","%s","%s","%s","%s"\n' % (self.fpath, m[1][0][1], targ, thisval, m[2][1]) )
elif m[1][0][0] == "@":
this = self.ga
apThis = True
## apply change where attribute absent only
for c in m[1][1:]:
if c[0] not in this.keys():
if c[1] != '__absent__':
apThis = False
elif c[1] == '__absent__' or c[1] != this[c[0]]:
apThis = False
if m[2][0] != '':
targ = m[2][0]
else:
targ = m[1][-1][0]
if apThis:
if log != None:
log.info( 'Setting %s to %s' % (targ,m[2][1]) )
##print 'Setting %s to %s' % (targ,m[2][1])
thisval = self.ga.get( targ, None )
self.ga[targ] = m[2][1]
self.atMapLog.write( '@:"%s","%s","%s","%s","%s"\n' % (self.fpath, 'ga', targ, thisval, m[2][1]) )
##
if targ in globalAttributesInFn:
i = globalAttributesInFn.index(targ)
thisval = self.fparts[ i ]
self.fparts[ i ] = m[2][1]
self.fn = string.join( self.fparts, '_' ) + '.nc'
self.atMapLog.write( '@fn:"%s","%s","%s"\n' % (self.fpath, thisval, m[2][1]) )
else:
print 'Token %s not recognised' % m[1][0][0]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.