gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
import tarfile
import os
import shelve
import logging
import logging.config
import indexer.callnumber as call_number
from indexer import oai_reader
class Builder(object):
"""
Handles the entire build process
This is a bit of God object, which is unfortunate.
"""
def __init__(self, categorizer, records, elasticsearch, records_seen):
"""
:type categorizer: indexer.categorizer.Categorizer
:type records: indexer.record_store.RecordStore
:param records: the record store
:type elasticsearch: indexer.elasticsearch_indexer.ElasticSearchIndexer
:param elasticsearch: elasticsearch writer
:type records_seen: shelve.Shelf
:param records_seen: a shelf
:return:
"""
self.records_seen = records_seen
self.adds = 0
self.deletes = 0
self.categorizer = categorizer
self.records = records
self.elasticsearch = elasticsearch
self.building = False
self.current_tarball = ''
self.current_oai = ''
self.logger = logging.getLogger(__name__)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
print("Adds: {}".format(self.adds))
print("Dels: {}".format(self.deletes))
self.elasticsearch.close()
self.records_seen.close()
self.records.close()
def index(self, src_directory, since, until):
"""
:param src_directory: the directory containing the source tarballs
:param since: index only records added after this date
:param until: index only records added beforet his date
:return:
"""
self.building = True
os.chdir(src_directory)
raw_file_list = os.listdir(src_directory)
all_files = sorted(raw_file_list, key=lambda x: os.path.getmtime(x), reverse=True)
tarballs = list(
filter(lambda x: x.endswith('tar.gz') and until > os.path.getmtime(src_directory + '/' + x) > since,
all_files))
for tarball in tarballs:
self.current_tarball = tarball
full_path = src_directory + '/' + tarball
if not full_path in self.records_seen:
self.read_tarball(full_path)
self.records_seen[full_path] = True
self.records.close()
self.building = False
def reindex(self):
try:
for oai_string in self.records:
self.read_oai(oai_string)
except IndexError:
pass
def read_oai(self, oai_string):
"""
Reads a single OAI record
:param oai_string: str
:return: OAIRecord
"""
oai_record = oai_reader.read(oai_string)
return oai_record
def read_tarball(self, tarball_file):
"""
Reads a tarball containing multiple OAI records
:param tarball_file: str
:return:
"""
tar = tarfile.open(tarball_file, 'r', encoding='utf-8')
for tarinfo in tar:
self.read_tarred_file(tar, tarball_file, tarinfo)
def read_tarred_file(self, tar, tarball_file, tarinfo):
"""
Reads a single file from within a tarfile, containing one OAI record
:param tar:
:param tarball_file:
:param tarinfo:
:return:
"""
self.current_oai = tarinfo.name
try:
(name, extension) = tarinfo.name.split('.')
except ValueError:
name = ''
self.logger.error('No name or extension: ' + tarinfo.name + " in " + tarball_file)
record_id = 'urm_publish-' + name
if not record_id in self.records_seen:
try:
f = tar.extractfile(tarinfo)
contents = f.read()
contents = contents.decode('utf-8')
oai_record = self.read_oai(contents)
if oai_record.status == 'deleted':
self.delete_record(oai_record.id)
elif oai_record.status == 'new' or oai_record.status == 'updated':
self.add_record(oai_record)
else:
self.logger.error('Generic error - {0}'.format(oai_record.id))
self.records_seen[oai_record.id] = True
except ValueError as detail:
self.logger.exception('Error reading {0}'.format(self.current_tarball))
except AttributeError as detail:
self.logger.exception('Error reading {0}'.format(self.current_tarball))
def delete_record(self, id):
self.deletes += 1
self.elasticsearch.delete(id)
self.records.delete(id)
def add_record(self, oai_record):
self.adds += 1
self._write_to_catalog_index(oai_record)
self.records.add(oai_record)
def _only_at_law(self, locations):
"""
Returns true if a record is Law Library-only
:type locations: list
:param locations: a list of locations
:return:
"""
if not locations:
return False
for location in locations:
if not location.startswith('LAW'):
return False
return True
def _write_to_catalog_index(self, oai_record):
"""
Add a record to the catalog index
:type oai_record: indexer.oai_record.OAIRecord
:param oai_record:
:return:
"""
index_record = oai_record.index_record
try:
call_nums_norm = [call_number.normalize(lcc) for lcc in index_record.callnum]
taxonomies = self.categorizer.categorize(collections=index_record.collections,
locations=index_record.location,
lccs_norm=call_nums_norm)
except ValueError:
self.logger.info("Strange callnumber {} for ".format(index_record.callnum, oai_record.id))
taxonomies = []
tax1 = set()
tax2 = set()
tax3 = set()
for taxonomy in taxonomies:
tax1.add(taxonomy[1])
tax2.add(taxonomy[2])
try:
tax3.add(taxonomy[3])
except KeyError as e:
pass
index_record.tax1 = list(tax1)
index_record.tax2 = list(tax2)
index_record.tax3 = list(tax3)
self.elasticsearch.add_catalog_record(oai_record)
|
|
import argparse
import re
import time
import curses
import bisect
import queue
import can
from argparse_addons import Integer
from .. import database
from .__utils__ import format_message
from .__utils__ import format_multiplexed_name
class QuitError(Exception):
pass
class Monitor(can.Listener):
def __init__(self, stdscr, args):
self._stdscr = stdscr
print(f'Reading bus description file "{args.database}"...\r')
self._dbase = database.load_file(args.database,
encoding=args.encoding,
frame_id_mask=args.frame_id_mask,
strict=not args.no_strict)
self._single_line = args.single_line
self._filtered_sorted_message_names = []
self._filter = ''
self._filter_cursor_pos = 0
self._compiled_filter = None
self._formatted_messages = {}
self._playing = True
self._modified = True
self._show_filter = False
self._queue = queue.Queue()
self._nrows, self._ncols = stdscr.getmaxyx()
self._received = 0
self._discarded = 0
self._basetime = None
self._page_first_row = 0
stdscr.keypad(True)
stdscr.nodelay(True)
curses.use_default_colors()
curses.curs_set(False)
curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_GREEN)
curses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_CYAN)
curses.init_pair(3, curses.COLOR_CYAN, curses.COLOR_BLACK)
bus = self.create_bus(args)
self._notifier = can.Notifier(bus, [self])
def create_bus(self, args):
kwargs = {}
if args.bit_rate is not None:
kwargs['bitrate'] = int(args.bit_rate)
if args.fd:
kwargs['fd'] = True
try:
return can.Bus(bustype=args.bus_type,
channel=args.channel,
**kwargs)
except:
raise Exception(
"Failed to create CAN bus with bustype='{}' and "
"channel='{}'.".format(args.bus_type,
args.channel))
def run(self, max_num_keys_per_tick=-1):
while True:
try:
self.tick(max_num_keys_per_tick)
except QuitError:
break
time.sleep(0.05)
def tick(self, max_num_keys=-1):
modified = self.update()
if modified:
self.redraw()
self.process_user_input(max_num_keys)
def redraw(self):
# Clear the screen.
self._stdscr.clear()
# Draw everything.
self.draw_stats(0)
self.draw_title(1)
lines = []
for name in self._filtered_sorted_message_names:
for line in self._formatted_messages[name]:
lines.append(line)
# Only render the visible screen. We only have (self._nrows - 3)
# available rows to draw on, due to the persistent TUI features that
# are drawn:
#
# - line 0: stats
# - line 1: title
# - line (n - 1): menu
num_actual_usable_rows = self._nrows - 2 - 1
row = 2
# make sure that we don't overshoot the last line of
# content. this is a bit of a hack, because manipulation of
# the controls is not supposed to happen within this method
if len(lines) < self._page_first_row + num_actual_usable_rows:
self._page_first_row = max(0, len(lines) - num_actual_usable_rows)
for line in lines[self._page_first_row:self._page_first_row + num_actual_usable_rows]:
self.addstr(row, 0, line)
row += 1
self.draw_menu(self._nrows - 1)
# Refresh the screen.
self._stdscr.refresh()
def draw_stats(self, row):
status_text = \
f'Received: {self._received}, Discarded: {self._discarded}, Errors: 0'
if self._filter:
status_text += f', Filter: {self._filter}'
self.addstr(row, 0, status_text)
def draw_title(self, row):
self.addstr_color(row,
0,
self.stretch(' TIMESTAMP MESSAGE'),
curses.color_pair(1))
def draw_menu(self, row):
if self._show_filter:
col = 0
# text before cursor
text = 'Filter regex: ' + self._filter[:self._filter_cursor_pos]
self.addstr_color(row,
col,
text,
curses.color_pair(2))
col = len(text)
# cursor
if self._filter_cursor_pos >= len(self._filter):
c = " "
else:
c = self._filter[self._filter_cursor_pos]
self.addstr_color(row,
col,
c,
curses.color_pair(3))
col += 1
# text after cursor
text = self._filter[self._filter_cursor_pos + 1:]
if len(text) > 0:
self.addstr_color(row,
col,
text,
curses.color_pair(2))
col += len(text)
# fill rest of line
self.addstr_color(row,
col,
' '*(self._ncols - col),
curses.color_pair(2))
else:
text = 'q: Quit, f: Filter, p: Play/Pause, r: Reset'
self.addstr_color(row,
0,
self.stretch(text),
curses.color_pair(2))
def addstr(self, row, col, text):
try:
self._stdscr.addstr(row, col, text)
except curses.error:
pass
def addstr_color(self, row, col, text, color):
try:
self._stdscr.addstr(row, col, text, color)
except curses.error:
pass
def stretch(self, text):
return text + ' ' * (self._ncols - len(text))
def process_user_input(self, max_num_keys=-1):
while max_num_keys < 0 or max_num_keys > 0:
max_num_keys -= 1
try:
key = self._stdscr.getkey()
except curses.error:
return
if self._show_filter:
self.process_user_input_filter(key)
else:
self.process_user_input_menu(key)
def process_user_input_menu(self, key):
if key == 'q':
raise QuitError()
elif key == 'p':
self._playing = not self._playing
elif key == 'r':
self._playing = True
self._filtered_sorted_message_names = []
self._formatted_messages = {}
self._received = 0
self._discarded = 0
self._basetime = None
self._filter = ''
self._compiled_filter = None
self._modified = True
self._page = 0
while not self._queue.empty():
self._queue.get()
elif key in ['f', '/']:
self._old_filter = self._filter
self._show_filter = True
self._filter_cursor_pos = len(self._filter)
self._modified = True
curses.curs_set(True)
elif key in ['KEY_UP']:
self.line_up()
elif key in ['KEY_DOWN']:
self.line_down()
elif key in ['KEY_PPAGE']:
self.page_up()
elif key in ['KEY_NPAGE']:
self.page_down()
def line_down(self):
# Increment line
self._page_first_row += 1
self._modified = True
def line_up(self):
# Decrement line
if self._page_first_row > 0:
self._page_first_row -= 1
else:
self._page_first_row = 0
self._modified = True
def page_down(self):
num_actual_usable_rows = self._nrows - 2 - 1
# Increment page
self._page_first_row += num_actual_usable_rows
self._modified = True
def page_up(self):
num_actual_usable_rows = self._nrows - 2 - 1
# Decrement page
if self._page_first_row > num_actual_usable_rows:
self._page_first_row -= num_actual_usable_rows
else:
self._page_first_row = 0
self._modified = True
def page_down(self):
num_actual_usable_rows = self._nrows - 2 - 1
# Increment page
self._page_first_row += num_actual_usable_rows
self._modified = True
def compile_filter(self):
try:
self._compiled_filter = re.compile(self._filter)
except:
self._compiled_filter = None
def process_user_input_filter(self, key):
if key == '\n':
self._show_filter = False
curses.curs_set(False)
elif key == chr(27):
# Escape
self._show_filter = False
self._filter = self._old_filter
del self._old_filter
curses.curs_set(False)
elif key in ['KEY_BACKSPACE', '\b']:
if self._filter_cursor_pos > 0:
self._filter = \
self._filter[:self._filter_cursor_pos - 1] + \
self._filter[self._filter_cursor_pos:]
self._filter_cursor_pos -= 1
elif key == 'KEY_DC':
# delete key
if self._filter_cursor_pos < len(self._filter):
self._filter = \
self._filter[:self._filter_cursor_pos] + \
self._filter[self._filter_cursor_pos + 1:]
elif key == 'KEY_LEFT':
if self._filter_cursor_pos > 0:
self._filter_cursor_pos -= 1
elif key == 'KEY_RIGHT':
if self._filter_cursor_pos < len(self._filter):
self._filter_cursor_pos += 1
elif key in ['KEY_UP']:
self.line_up()
elif key in ['KEY_DOWN']:
self.line_down()
elif key in ['KEY_PPAGE']:
self.page_up()
elif key in ['KEY_NPAGE']:
self.page_down()
else:
# we ignore keys with more than one character here. These
# (mostly?) are control keys like KEY_UP, KEY_DOWN, etc.
if len(key) == 1:
self._filter = \
self._filter[:self._filter_cursor_pos] + \
key + \
self._filter[self._filter_cursor_pos:]
self._filter_cursor_pos += 1
self.compile_filter()
self._filtered_sorted_message_names = []
for name in self._formatted_messages:
self.insort_filtered(name)
self._modified = True
def try_update_message(self):
message = self._queue.get_nowait()
frame_id = message.arbitration_id
data = message.data
timestamp = message.timestamp
if self._basetime is None:
self._basetime = timestamp
timestamp -= self._basetime
self._received += 1
try:
message = self._dbase.get_message_by_frame_id(frame_id)
except KeyError:
self._discarded += 1
return
if len(data) != message.length:
self._discarded += 1
return
name = message.name
if message.is_multiplexed():
# Handle the case where a multiplexer index is used that isn't
# specified in the DBC file (ie. outside of the range). In this
# case, we just discard the message, like we do when the CAN
# message ID or length doesn't match what's specified in the DBC.
try:
name = format_multiplexed_name(message, data, True)
except database.DecodeError:
self._discarded += 1
return
if self._single_line:
formatted = format_message(message, data, True, True)
self._formatted_messages[name] = [
'{:12.3f} {}'.format(timestamp, formatted)
]
else:
formatted = format_message(message, data, True, False)
lines = formatted.splitlines()
formatted = ['{:12.3f} {}'.format(timestamp, lines[1])]
formatted += [14 * ' ' + line for line in lines[2:]]
self._formatted_messages[name] = formatted
if name not in self._filtered_sorted_message_names:
self.insort_filtered(name)
def update_messages(self):
modified = False
try:
while True:
self.try_update_message()
modified = True
except queue.Empty:
pass
return modified
def update(self):
if self._playing:
modified = self.update_messages()
else:
modified = False
if self._modified:
self._modified = False
modified = True
if curses.is_term_resized(self._nrows, self._ncols):
self._nrows, self._ncols = self._stdscr.getmaxyx()
modified = True
return modified
def insort_filtered(self, name):
if self._compiled_filter is None or self._compiled_filter.search(name):
bisect.insort(self._filtered_sorted_message_names,
name)
def on_message_received(self, msg):
self._queue.put(msg)
def _do_monitor(args):
def monitor(stdscr):
Monitor(stdscr, args).run()
try:
curses.wrapper(monitor)
except KeyboardInterrupt:
pass
def add_subparser(subparsers):
monitor_parser = subparsers.add_parser(
'monitor',
description='Monitor CAN bus traffic in a text based user interface.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
monitor_parser.add_argument(
'-s', '--single-line',
action='store_true',
help='Print the decoded message on a single line.')
monitor_parser.add_argument(
'-e', '--encoding',
help='File encoding.')
monitor_parser.add_argument(
'--no-strict',
action='store_true',
help='Skip database consistency checks.')
monitor_parser.add_argument(
'-m', '--frame-id-mask',
type=Integer(0),
help=('Only compare selected frame id bits to find the message in the '
'database. By default the received and database frame ids must '
'be equal for a match.'))
monitor_parser.add_argument(
'-b', '--bus-type',
default='socketcan',
help='Python CAN bus type.')
monitor_parser.add_argument(
'-c', '--channel',
default='vcan0',
help='Python CAN bus channel.')
monitor_parser.add_argument(
'-B', '--bit-rate',
help='Python CAN bus bit rate.')
monitor_parser.add_argument(
'-f', '--fd',
action='store_true',
help='Python CAN CAN-FD bus.')
monitor_parser.add_argument(
'database',
help='Database file.')
monitor_parser.set_defaults(func=_do_monitor)
|
|
# pylint: disable=wrong-or-nonexistent-copyright-notice
import numpy as np
import pytest
import cirq
def test_projector_matrix():
q0 = cirq.NamedQubit('q0')
zero_projector = cirq.ProjectorString({q0: 0})
one_projector = cirq.ProjectorString({q0: 1})
coeff_projector = cirq.ProjectorString({q0: 0}, 1.23 + 4.56j)
np.testing.assert_allclose(zero_projector.matrix().toarray(), [[1.0, 0.0], [0.0, 0.0]])
np.testing.assert_allclose(one_projector.matrix().toarray(), [[0.0, 0.0], [0.0, 1.0]])
np.testing.assert_allclose(
coeff_projector.matrix().toarray(), [[1.23 + 4.56j, 0.0], [0.0, 0.0]]
)
def test_projector_repr():
q0 = cirq.NamedQubit('q0')
assert (
repr(cirq.ProjectorString({q0: 0}))
== "cirq.ProjectorString(projector_dict={cirq.NamedQubit('q0'): 0},coefficient=(1+0j))"
)
def test_projector_from_np_array():
q0 = cirq.NamedQubit('q0')
zero_projector = cirq.ProjectorString({q0: 0})
np.testing.assert_allclose(zero_projector.matrix().toarray(), [[1.0, 0.0], [0.0, 0.0]])
def test_projector_matrix_missing_qid():
q0, q1 = cirq.LineQubit.range(2)
proj = cirq.ProjectorString({q0: 0})
proj_with_coefficient = cirq.ProjectorString({q0: 0}, 1.23 + 4.56j)
np.testing.assert_allclose(proj.matrix().toarray(), np.diag([1.0, 0.0]))
np.testing.assert_allclose(proj.matrix([q0]).toarray(), np.diag([1.0, 0.0]))
np.testing.assert_allclose(proj.matrix([q1]).toarray(), np.diag([1.0, 1.0]))
np.testing.assert_allclose(proj.matrix([q0, q1]).toarray(), np.diag([1.0, 1.0, 0.0, 0.0]))
np.testing.assert_allclose(proj.matrix([q1, q0]).toarray(), np.diag([1.0, 0.0, 1.0, 0.0]))
np.testing.assert_allclose(
proj_with_coefficient.matrix([q1, q0]).toarray(),
np.diag([1.23 + 4.56j, 0.0, 1.23 + 4.56j, 0.0]),
)
def test_equality():
q0 = cirq.NamedQubit('q0')
obj1a = cirq.ProjectorString({q0: 0})
obj1b = cirq.ProjectorString({q0: 0})
obj2 = cirq.ProjectorString({q0: 1})
obj3 = cirq.ProjectorString({q0: 1}, coefficient=0.20160913)
eq = cirq.testing.EqualsTester()
eq.add_equality_group(obj1a, obj1b)
eq.add_equality_group(obj2)
eq.add_equality_group(obj3)
def test_get_values():
q0 = cirq.NamedQubit('q0')
d = cirq.ProjectorString({q0: 0}, 1.23 + 4.56j)
assert len(d.projector_dict) == 1
assert d.projector_dict[q0] == 0
assert d.coefficient == 1.23 + 4.56j
def test_expectation_from_state_vector_basis_states_empty():
q0 = cirq.NamedQubit('q0')
d = cirq.ProjectorString({})
np.testing.assert_allclose(d.expectation_from_state_vector(np.array([1.0, 0.0]), {q0: 0}), 1.0)
def test_expectation_from_state_vector_basis_states_single_qubits():
q0 = cirq.NamedQubit('q0')
d = cirq.ProjectorString({q0: 0})
np.testing.assert_allclose(d.expectation_from_state_vector(np.array([1.0, 0.0]), {q0: 0}), 1.0)
np.testing.assert_allclose(d.expectation_from_state_vector(np.array([0.0, 1.0]), {q0: 0}), 0.0)
def test_expectation_from_state_vector_basis_states_three_qubits():
q0 = cirq.NamedQubit('q0')
q1 = cirq.NamedQubit('q1')
q2 = cirq.NamedQubit('q2')
d_1qbit = cirq.ProjectorString({q1: 1})
d_2qbits = cirq.ProjectorString({q0: 0, q1: 1})
state_vector = cirq.testing.random_superposition(8)
# If the mapping of state_vector is {q0: 0, q1: 1, q2: 2}, then the coefficients are:
# 0: (q0, q1, q2) = (0, 0, 0)
# 1: (q0, q1, q2) = (0, 0, 1)
# 2: (q0, q1, q2) = (0, 1, 0) -> Projected on
# 3: (q0, q1, q2) = (0, 1, 1) -> Projected on
# 4: (q0, q1, q2) = (1, 0, 0)
# 5: (q0, q1, q2) = (1, 0, 1)
# 6: (q0, q1, q2) = (1, 1, 0)
# 7: (q0, q1, q2) = (1, 1, 1)
np.testing.assert_allclose(
d_2qbits.expectation_from_state_vector(state_vector, {q0: 0, q1: 1, q2: 2}),
sum(abs(state_vector[i]) ** 2 for i in [2, 3]),
)
# Same as above except it's only for q1=1, which happens for indices 2, 3, 6, and 7:
np.testing.assert_allclose(
d_1qbit.expectation_from_state_vector(state_vector, {q0: 0, q1: 1, q2: 2}),
sum(abs(state_vector[i]) ** 2 for i in [2, 3, 6, 7]),
)
# Here we have a different mapping, but the idea is the same.
# 0: (q0 ,q2, q1) = (0, 0, 0)
# 1: (q0, q2, q1) = (0, 0, 1) -> Projected on
# 2: (q0, q2, q1) = (0, 1, 0)
# 3: (q0, q2, q1) = (0, 1, 1) -> Projected on
# 4: (q0, q2, q1) = (1, 0, 0)
# 5: (q0, q2, q1) = (1, 0, 1)
# 6: (q0, q2, q1) = (1, 1, 0)
# 7: (q0, q2, q1) = (1, 1, 1)
np.testing.assert_allclose(
d_2qbits.expectation_from_state_vector(state_vector, {q0: 0, q1: 2, q2: 1}),
sum(abs(state_vector[i]) ** 2 for i in [1, 3]),
)
# Same as above except it's only for q1=1, which happens for indices 1, 3, 5, and 7:
np.testing.assert_allclose(
d_1qbit.expectation_from_state_vector(state_vector, {q0: 0, q1: 2, q2: 1}),
sum(abs(state_vector[i]) ** 2 for i in [1, 3, 5, 7]),
)
def test_expectation_from_density_matrix_three_qubits():
q0 = cirq.NamedQubit('q0')
q1 = cirq.NamedQubit('q1')
q2 = cirq.NamedQubit('q2')
d_1qbit = cirq.ProjectorString({q1: 1})
d_2qbits = cirq.ProjectorString({q0: 0, q1: 1})
state = cirq.testing.random_density_matrix(8)
# If the mapping of state is {q0: 0, q1: 1, q2: 2}, then the coefficients are:
# 0: (q0, q1, q2) = (0, 0, 0)
# 1: (q0, q1, q2) = (0, 0, 1)
# 2: (q0, q1, q2) = (0, 1, 0) -> Projected on
# 3: (q0, q1, q2) = (0, 1, 1) -> Projected on
# 4: (q0, q1, q2) = (1, 0, 0)
# 5: (q0, q1, q2) = (1, 0, 1)
# 6: (q0, q1, q2) = (1, 1, 0)
# 7: (q0, q1, q2) = (1, 1, 1)
np.testing.assert_allclose(
d_2qbits.expectation_from_density_matrix(state, {q0: 0, q1: 1, q2: 2}),
sum(state[i][i].real for i in [2, 3]),
)
# Same as above except it's only for q1=1, which happens for indices 2, 3, 6, and 7:
np.testing.assert_allclose(
d_1qbit.expectation_from_density_matrix(state, {q0: 0, q1: 1, q2: 2}),
sum(state[i][i].real for i in [2, 3, 6, 7]),
)
# Here we have a different mapping, but the idea is the same.
# 0: (q0 ,q2, q1) = (0, 0, 0)
# 1: (q0, q2, q1) = (0, 0, 1) -> Projected on
# 2: (q0, q2, q1) = (0, 1, 0)
# 3: (q0, q2, q1) = (0, 1, 1) -> Projected on
# 4: (q0, q2, q1) = (1, 0, 0)
# 5: (q0, q2, q1) = (1, 0, 1)
# 6: (q0, q2, q1) = (1, 1, 0)
# 7: (q0, q2, q1) = (1, 1, 1)
np.testing.assert_allclose(
d_2qbits.expectation_from_density_matrix(state, {q0: 0, q1: 2, q2: 1}),
sum(state[i][i].real for i in [1, 3]),
)
# Same as above except it's only for q1=1, which happens for indices 1, 3, 5, and 7:
np.testing.assert_allclose(
d_1qbit.expectation_from_density_matrix(state, {q0: 0, q1: 2, q2: 1}),
sum(state[i][i].real for i in [1, 3, 5, 7]),
)
def test_consistency_state_vector_and_density_matrix():
q0 = cirq.NamedQubit('q0')
q1 = cirq.NamedQubit('q1')
q2 = cirq.NamedQubit('q2')
state_vector = cirq.testing.random_superposition(8)
state = np.einsum('i,j->ij', state_vector, np.conj(state_vector))
for proj_qubit in q0, q1, q2:
for proj_idx in [0, 1]:
d = cirq.ProjectorString({proj_qubit: proj_idx})
np.testing.assert_allclose(
d.expectation_from_state_vector(state_vector, {q0: 0, q1: 1, q2: 2}),
d.expectation_from_density_matrix(state, {q0: 0, q1: 1, q2: 2}),
)
def test_expectation_higher_dims():
qubit = cirq.NamedQid('q0', dimension=2)
qutrit = cirq.NamedQid('q1', dimension=3)
with pytest.raises(ValueError, match="Only qubits are supported"):
cirq.ProjectorString({qutrit: 0})
d = cirq.ProjectorString({qubit: 0})
with pytest.raises(ValueError, match="Only qubits are supported"):
_ = (d.expectation_from_state_vector(np.zeros(2 * 3), {qubit: 0, qutrit: 0}),)
def test_expectation_with_coefficient():
q0 = cirq.NamedQubit('q0')
d = cirq.ProjectorString({q0: 0}, coefficient=(0.6 + 0.4j))
np.testing.assert_allclose(
d.expectation_from_state_vector(np.array([[1.0, 0.0]]), qid_map={q0: 0}), 0.6 + 0.4j
)
np.testing.assert_allclose(
d.expectation_from_density_matrix(np.array([[1.0, 0.0], [0.0, 0.0]]), {q0: 0}), 0.6 + 0.4j
)
def test_expectation_from_density_matrix_basis_states_empty():
q0 = cirq.NamedQubit('q0')
d = cirq.ProjectorString({})
np.testing.assert_allclose(
d.expectation_from_density_matrix(np.array([[1.0, 0.0], [0.0, 0.0]]), {q0: 0}), 1.0
)
def test_expectation_from_density_matrix_basis_states_single_qubits():
q0 = cirq.NamedQubit('q0')
d = cirq.ProjectorString({q0: 0})
np.testing.assert_allclose(
d.expectation_from_density_matrix(np.array([[1.0, 0.0], [0.0, 0.0]]), {q0: 0}), 1.0
)
np.testing.assert_allclose(
d.expectation_from_density_matrix(np.array([[0.0, 0.0], [0.0, 1.0]]), {q0: 0}), 0.0
)
|
|
#! /usr/bin/env python
"""
pyparsing based grammar for DCPU-16 0x10c assembler
"""
try:
from itertools import izip_longest
except ImportError:
from itertools import zip_longest as izip_longest
try:
basestring
except NameError:
basestring = str
import logging
log = logging.getLogger("dcpu16_asm")
log.setLevel(logging.DEBUG)
import argparse
import os
import struct
import sys
import pyparsing as P
from collections import defaultdict
# Replace the debug actions so that the results go to the debug log rather
# than stdout, so that the output can be usefully piped.
def _defaultStartDebugAction(instring, loc, expr):
log.debug("Match " + P._ustr(expr) + " at loc " + P._ustr(loc) + "(%d,%d)"
% (P.lineno(loc, instring), P.col(loc, instring)))
def _defaultSuccessDebugAction(instring, startloc, endloc, expr, toks):
log.debug("Matched " + P._ustr(expr) + " -> " + str(toks.asList()))
def _defaultExceptionDebugAction(instring, loc, expr, exc):
log.debug("Exception raised:" + P._ustr(exc))
P._defaultStartDebugAction = _defaultStartDebugAction
P._defaultSuccessDebugAction = _defaultSuccessDebugAction
P._defaultExceptionDebugAction = _defaultExceptionDebugAction
# Run with "DEBUG=1 python ./asm_pyparsing.py"
DEBUG = "DEBUG" in os.environ
WORD_MAX = 0xFFFF
# otherwise \n is also treated as ignorable whitespace
P.ParserElement.setDefaultWhitespaceChars(" \t")
identifier = P.Word(P.alphas + "_", P.alphanums + "_")
label = P.Combine(P.Literal(":").suppress() + identifier)
comment = P.Literal(";").suppress() + P.restOfLine
register = (P.Or(P.CaselessKeyword(x) for x in "ABCIJXYZO")
| P.oneOf("PC SP", caseless=True))
stack_op = P.oneOf("PEEK POP PUSH", caseless=True)
hex_literal = P.Combine(P.Literal("0x") + P.Word(P.hexnums))
dec_literal = P.Word(P.nums)
numeric_literal = hex_literal | dec_literal
literal = numeric_literal | identifier
opcode = P.oneOf("SET ADD SUB MUL DIV MOD SHL SHR "
"AND BOR XOR IFE IFN IFG IFB JSR", caseless=True)
basic_operand = P.Group(register("register")
| stack_op("stack_op")
| literal("literal"))
indirect_expr = P.Group(literal("literal")
+ P.Literal("+")
+ register("register"))
hex_literal.setParseAction(lambda s, l, t: int(t[0], 16))
dec_literal.setParseAction(lambda s, l, t: int(t[0]))
register.addParseAction(P.upcaseTokens)
stack_op.addParseAction(P.upcaseTokens)
opcode.addParseAction(P.upcaseTokens)
def sandwich(brackets, expr):
l, r = brackets
return P.Literal(l).suppress() + expr + P.Literal(r).suppress()
indirection_content = indirect_expr("expr") | basic_operand("basic")
indirection = P.Group(sandwich("[]", indirection_content) |
sandwich("()", indirection_content))
operand = basic_operand("basic") | indirection("indirect")
def make_words(data):
return [a << 8 | b for a, b in izip_longest(data[::2], data[1::2], fillvalue=0)]
def wordize_string(s, l, tokens):
bytes = [ord(c) for c in tokens.string]
# TODO(pwaller): possibly add syntax for packing string data?
packed = False
return make_words(bytes) if packed else bytes
quoted_string = P.quotedString("string").addParseAction(P.removeQuotes).addParseAction(wordize_string)
datum = quoted_string | numeric_literal
def parse_data(string, loc, tokens):
result = []
for token in tokens:
values = datum.parseString(token).asList()
assert all(v < WORD_MAX for v in values), "Datum exceeds word size"
result.extend(values)
return result
# TODO(pwaller): Support for using macro argument values in data statement
datalist = P.commaSeparatedList.copy().setParseAction(parse_data)
data = P.CaselessKeyword("DAT")("opcode") + P.Group(datalist)("data")
line = P.Forward()
macro_definition_args = P.Group(P.delimitedList(P.Optional(identifier("arg"))))("args")
macro_definition = P.Group(
P.CaselessKeyword("#macro").suppress()
+ identifier("name")
+ sandwich("()", macro_definition_args)
+ sandwich("{}", P.Group(P.OneOrMore(line))("lines"))
)("macro_definition")
macro_argument = operand | datum
macro_call_args = P.Group(P.delimitedList(P.Group(macro_argument)("arg")))("args")
macro_call = P.Group(
identifier("name") + sandwich("()", macro_call_args)
)("macro_call")
instruction = (
opcode("opcode")
+ P.Group(operand)("first")
+ P.Optional(P.Literal(",").suppress() + P.Group(operand)("second"))
)
statement = P.Group(
instruction
| data
| macro_definition
| macro_call
)
line << P.Group(
P.Optional(label("label"))
+ P.Optional(statement("statement"), default=None)
+ P.Optional(comment("comment"))
+ P.lineEnd.suppress()
)("line")
full_grammar = (
P.stringStart
+ P.ZeroOrMore(line)
+ (P.stringEnd | P.Literal("#stop").suppress())
)("program")
if DEBUG:
# Turn setdebug on for all parse elements
for name, var in locals().copy().items():
if isinstance(var, P.ParserElement):
var.setName(name).setDebug()
def debug_line(string, location, tokens):
"""
Show the current line number and content being parsed
"""
lineno = string[:location].count("\n")
remaining = string[location:]
line_end = remaining.index("\n") if "\n" in remaining else None
log.debug("====")
log.debug(" Parse line {0}".format(lineno))
log.debug(" '{0}'".format(remaining[:line_end]))
log.debug("====")
line.setDebugActions(debug_line, None, None)
IDENTIFIERS = {"A": 0x0, "B": 0x1, "C": 0x2, "X": 0x3, "Y": 0x4, "Z": 0x5,
"I": 0x6, "J": 0x7,
"POP": 0x18, "PEEK": 0x19, "PUSH": 0x1A,
"SP": 0x1B, "PC": 0x1C,
"O": 0x1D}
OPCODES = {"SET": 0x1, "ADD": 0x2, "SUB": 0x3, "MUL": 0x4, "DIV": 0x5,
"MOD": 0x6, "SHL": 0x7, "SHR": 0x8, "AND": 0x9, "BOR": 0xA,
"XOR": 0xB, "IFE": 0xC, "IFN": 0xD, "IFG": 0xE, "IFB": 0xF}
def process_operand(o, lvalue=False):
"""
Returns (a, x) where a is a value which identifies the nature of the value
and x is either None or a word to be inserted directly into the output stream
(e.g. a literal value >= 0x20)
"""
# TODO(pwaller): Reject invalid lvalues
def invalid_op(reason):
# TODO(pwaller): Need to indicate origin of error
return RuntimeError("Invalid operand, {0}: {1}"
.format(reason, o.asXML()))
def check_indirect_register(register):
if register not in "ABCXYZIJ":
raise invalid_op("only registers A-J can be used for indirection")
if o.basic:
# Literals, stack ops, registers
b = o.basic
if b.register:
return IDENTIFIERS[b.register], None
elif b.stack_op:
return IDENTIFIERS[b.stack_op], None
elif b.literal is not None:
l = b.literal
if not isinstance(l, basestring) and l < 0x20:
return 0x20 | l, None
if l == "":
raise invalid_op("this is a bug")
if isinstance(l, int) and not 0 <= l <= WORD_MAX:
raise invalid_op("literal exceeds word size")
return 0x1F, l
elif o.indirect:
i = o.indirect
if i.basic:
# [register], [literal]
ib = i.basic
if ib.register:
check_indirect_register(ib.register)
return 0x8 + IDENTIFIERS[ib.register], None
elif ib.stack_op:
raise invalid_op("don't use PUSH/POP/PEEK with indirection")
elif ib.literal is not None:
return 0x1E, ib.literal
elif i.expr:
# [register+literal]
ie = i.expr
check_indirect_register(ie.register)
return 0x10 | IDENTIFIERS[ie.register], ie.literal
raise invalid_op("this is a bug")
def codegen(source, input_filename="<unknown>"):
try:
parsed = full_grammar.parseString(source)
except P.ParseException as exc:
log.fatal("Parse error:")
log.fatal(" {0}:{1}:{2} HERE {3}"
.format(input_filename, exc.lineno, exc.col,
exc.markInputline()))
return None
log.debug("=====")
log.debug(" Successful parse, XML syntax interpretation:")
log.debug("=====")
log.debug(parsed.asXML())
labels = {}
macros = {}
program = []
# Number of times a given macro has been called so that we can generate
# unique labels
n_macro_calls = defaultdict(int)
def process_macro_definition(statement):
log.debug("Macro definition: {0}".format(statement.asXML()))
macros[statement.name] = statement
def process_macro_call(offset, statement, context=""):
log.debug("--------------")
log.debug("Macro call: {0}".format(statement.asXML()))
log.debug("--------------")
macroname = statement.name
macro = macros.get(macroname, None)
n_macro_calls[macroname] += 1
context = context + macroname + str(n_macro_calls[macroname])
if not macro:
raise RuntimeError("Call to undefined macro: {0}".format(macroname))
assert len(macro.args) == len(statement.args), (
"Wrong number of arguments to macro call {0!r}".format(macroname))
# TODO(pwaller): Check for collisions between argument name and code
# label
args = {}
log.debug("Populated args:")
for name, arg in zip(macro.args, statement.args):
args[name] = arg
log.debug(" - {0}: {1}".format(name, arg))
lines = []
for l in macro.lines:
new_line = l.copy()
s = l.statement
if s:
new_statement = s.copy()
new_line["statement"] = new_statement
#if l.label: new_line["label"] = context + l.label
# Replace literals whose names are macro arguments
# also, substitute labels with (context, label).
# Resolution of a label happens later by first searching for a label
# called `context + label`, and if it doesn't exist `label` is used.
if s and s.first and s.first.basic and s.first.basic.literal:
if s.first.basic.literal in args:
new_statement["first"] = args[s.first.basic.literal]
elif isinstance(s.first.basic.literal, basestring):
new_basic = s.first.basic.copy()
new_basic["literal"] = context, s.first.basic.literal
new_op = new_statement.first.copy()
new_op["basic"] = new_basic
new_statement["first"] = new_op
if s and s.second and s.second.basic and s.second.basic.literal:
if s.second.basic.literal in args:
new_statement["second"] = args[s.second.basic.literal]
elif isinstance(s.second.basic.literal, basestring):
new_basic = s.second.basic.copy()
new_basic["literal"] = context, s.second.basic.literal
new_op = new_statement.second.copy()
new_op["basic"] = new_basic
new_statement["second"] = new_op
# Replace macro call arguments
if s and s.macro_call:
new_macro_call = s.macro_call.copy()
new_statement["macro_call"] = new_macro_call
new_macro_call_args = s.macro_call.args.copy()
new_statement.macro_call["args"] = new_macro_call_args
for i, arg in enumerate(s.macro_call.args):
if arg.basic.literal not in args:
continue
new_macro_call_args[i] = args[arg.basic.literal]
lines.append(new_line)
log.debug("Populated macro: {0}"
.format("\n".join(l.dump() for l in lines)))
# Do code generation
code = []
for l in lines:
a = generate(offset + len(code), l, context)
log.debug("Codegen for statement: {0}".format(l.asXML()))
log.debug(" Code: {0}".format(a))
code.extend(a)
return code
def generate(offset, line, context=""):
log.debug("Interpreting element {0}: {1}".format(i, line))
if line.label:
label = context + line.label
if label in labels:
# TODO(pwaller): Line indications
msg = "Duplicate label definition! {0}".format(label)
log.fatal(msg)
raise RuntimeError(msg)
labels[label] = offset
s = line.statement
if not s:
return []
if s.macro_definition:
process_macro_definition(s.macro_definition)
return []
elif s.macro_call:
return process_macro_call(offset, s.macro_call, context)
log.debug("Generating for {0}".format(s.asXML(formatted=False)))
if s.opcode == "DAT":
return s.data
if s.opcode == "JSR":
o = 0x00
a, x = 0x01, None
b, y = process_operand(s.first)
else:
o = OPCODES[s.opcode]
a, x = process_operand(s.first, lvalue=True)
b, y = process_operand(s.second)
code = []
code.append(((b << 10) + (a << 4) + o))
if x is not None:
code.append(x)
if y is not None:
code.append(y)
return code
for i, line in enumerate(parsed):
program.extend(generate(len(program), line))
log.debug("Labels: {0}".format(labels))
log.debug("program: {0}".format(program))
# Substitute labels
for i, c in enumerate(program):
if isinstance(c, basestring):
if c not in labels:
raise RuntimeError("Undefined label used: {0}".format(c))
program[i] = labels[c]
elif isinstance(c, tuple):
context, label = c
if context + label in labels:
label = context + label
if label not in labels:
raise RuntimeError("Undefined label used: {0}".format(c))
program[i] = labels[label]
# Turn words into bytes
result = bytes()
for word in program:
result += struct.pack(">H", word)
return result
def main():
parser = argparse.ArgumentParser(
description='A simple pyparsing-based DCPU assembly compiler')
parser.add_argument(
'source', metavar='IN', type=str,
help='file path of the file containing the assembly code')
parser.add_argument(
'destination', metavar='OUT', type=str, nargs='?',
help='file path where to store the binary code')
args = parser.parse_args()
if not log.handlers:
from sys import stderr
handler = logging.StreamHandler(stderr)
log.addHandler(handler)
if not DEBUG:
handler.setLevel(logging.INFO)
if args.source == "-":
program = codegen(sys.stdin.read(), "<stdin>")
else:
with open(args.source) as fd:
program = codegen(fd.read(), args.source)
if program is None:
log.fatal("No program produced.")
if not DEBUG:
log.fatal("Run with DEBUG=1 ./asm_pyparsing.py "
"for more information.")
return 1
if not args.destination:
if os.isatty(sys.stdout.fileno()):
log.fatal("stdout is a tty, not writing binary. "
"Specify destination file or pipe output somewhere")
else:
sys.stdout.write(program)
else:
with open(args.destination, "wb") as fd:
fd.write(program)
log.info("Program written to {0} ({1} bytes, hash={2})"
.format(args.destination, len(program),
hex(abs(hash(program)))))
return 0
if __name__ == "__main__":
raise SystemExit(main())
|
|
#!/usr/bin/python
import json
import csv
import sys
import argparse
import os
import uuid
import hashlib # convert incident_id to UUID
import copy
import logging
import re
from datetime import datetime
import ConfigParser
# Default Configuration Settings
cfg = {
'log_level': 'warning',
'log_file': None,
'schemafile': "../vcdb/veris.json",
'enumfile': "../vcdb/veris-enum.json",
'vcdb':False,
'version':"1.3",
'countryfile':'all.json',
'output': os.getcwd(),
'quiet': False,
'repositories': ""
}
def reqSchema(v, base="", mykeylist={}):
"given schema in v, returns a list of keys and it's type"
if 'required' in v:
if v['required']:
if base not in mykeylist:
mykeylist[base] = v['type']
# mykeylist.append(base)
if v['type']=="object":
for k,v2 in v['properties'].items():
if len(base):
callout = base + "." + k
else:
callout = k
reqSchema(v2, callout, mykeylist)
elif v['type']=="array":
reqSchema(v['items'], base, mykeylist)
return mykeylist
def parseSchema(v, base="", mykeylist=[]):
"given schema in v, returns a list of concatenated keys in the schema"
if v['type']=="object":
for k,v2 in v['properties'].items():
if len(base):
callout = base + "." + k
else:
callout = k
parseSchema(v2, callout, mykeylist)
elif v['type']=="array":
parseSchema(v['items'], base, mykeylist)
else:
mykeylist.append(base)
return mykeylist
def isnum(x):
x = re.sub('[$,]', '', x)
try:
x=int(float(x))
except:
return None
return x
def isfloat(x):
x = re.sub('[$,]', '', x)
try:
x=float(x)
except:
return
return x
def addValue(src, enum, dst, val="list"):
"adding value to dst at key if present in src"
if src.has_key(enum):
if len(src[enum]):
allenum = enum.split('.')
saved = dst
for i in range(len(allenum)-1):
if not saved.has_key(allenum[i]):
saved[allenum[i]] = {}
saved = saved[allenum[i]]
if val=="list":
templist = [x.strip() for x in src[enum].split(',') if len(x)>0 ]
saved[allenum[-1]] = [x for x in templist if len(x)>0 ]
elif val=="string":
saved[allenum[-1]] = unicode(src[enum],errors='ignore')
elif val=="numeric":
if isnum(src[enum]):
saved[allenum[-1]] = isnum(src[enum])
elif val=="integer":
if isnum(src[enum]):
saved[allenum[-1]] = isnum(src[enum])
def chkDefault(incident, enum, default):
allenum = enum.split('.')
saved = incident
for i in range(len(allenum)-1):
if not saved.has_key(allenum[i]):
saved[allenum[i]] = {}
saved = saved[allenum[i]]
if not saved[allenum[-1]]:
saved[allenum[-1]] = copy.deepcopy(default)
def openJSON(filename):
parsed = {}
rawjson = open(filename).read()
try:
parsed = json.loads(rawjson)
except:
print "Unexpected error while loading", filename, "-", sys.exc_info()[1]
parsed = None
return parsed
def compareFromTo(label, fromArray, toArray):
if isinstance(fromArray, basestring):
if fromArray not in toArray:
logging.warning("%s: %s has invalid enumeration: \"%s\"", iid, label, fromArray)
else:
if len(fromArray) == 0:
logging.warning("%s: %s has no values in enumeration", iid, label)
for item in fromArray:
if item not in toArray:
logging.warning("%s: %s has invalid enumeration: \"%s\"", iid, label, item)
def compareCountryFromTo(label, fromArray, toArray):
if isinstance(fromArray, basestring):
if fromArray not in toArray:
logging.warning("%s: %s has invalid enumeration[1]: \"%s\"", iid, label, fromArray)
else:
if len(fromArray) == 0:
logging.warning("%s: %s has no values in enumeration", iid, label)
for idx, item in enumerate(fromArray):
if item not in toArray:
if item == "USA":
logging.warning("%s: %s was set to 'USA', converting to 'US'", iid, label)
fromArray[idx] = "US"
elif item == "UK":
logging.warning("%s: %s was set to 'UK', converting to 'GB'", iid, label)
fromArray[idx] = "GB"
else:
fromArray[idx] = "Unknown"
logging.warning("%s: %s has invalid enumeration[2]: \"%s\", converting to 'Unknown'", iid, label, item)
if type(fromArray) == "str":
fromArray = [ fromArray ]
return(fromArray)
def checkIndustry(label, industry):
if not industry.isdigit() and not industry in [ "31-33", "44-45", "48-49" ]:
logging.warning("%s: %s is not numbers: \"%s\"", iid, label, industry)
# retString.append("must be numbers or one of 31-33, 44-45, 48-49")
def checkEnum(incident, schema, country_region, cfg=cfg):
if 'security_incident' not in incident:
logging.warning("%s: security_incident not found (required)", iid)
else:
compareFromTo('security_incident', incident['security_incident'], schema['security_incident'])
if 'victim' not in incident:
logging.info("%s: auto-filled Unknown for victim section", iid)
incident['victim'] = { 'employee_count' : 'Unknown', 'industry':"00", 'country': [ "Unknown" ], 'notes':'auto-filled Unknown' }
victim = incident['victim']
if 'employee_count' not in victim:
logging.info("%s: auto-filled Unknown for victim.employee_count", iid)
victim['employee_count'] = "Unknown"
compareFromTo('victim.employee_count', victim['employee_count'], schema['victim']['employee_count'])
if 'industry' not in victim:
logging.info("%s: auto-filled Unknown for victim.industry", iid)
victim['industry'] = "00"
checkIndustry('victim.industry', victim['industry'])
if 'country' not in victim:
logging.info("%s: auto-filled Unknown for victim.country", iid)
victim['country'] = [ "Unknown" ]
# CC
victim['country'] = compareCountryFromTo('victim.country', victim['country'], schema['country'])
if 'actor' not in incident:
logging.info("%s: auto-filled Unknown for entire actor section", iid)
incident['actor'] = { 'unknown' : { 'notes':'auto-filled Unknown' } }
if 'external' in incident['actor']:
actor = incident['actor']['external']
if 'motive' not in actor:
logging.info("%s: auto-filled Unknown for actor.external.motive", iid)
actor['motive'] = [ "Unknown" ]
if len(actor['motive']) == 0:
logging.info("%s: auto-filled Unknown for empty array in actor.external.motive", iid)
actor['motive'] = [ "Unknown" ]
compareFromTo('actor.external.motive', actor['motive'], schema['actor']['motive'])
if 'variety' not in actor:
logging.info("%s: auto-filled Unknown for actor.external.variety", iid)
actor['variety'] = [ "Unknown" ]
if len(actor['variety']) == 0:
logging.info("%s: auto-filled Unknown for empty array in actor.external.variety", iid)
actor['variety'] = [ "Unknown" ]
compareFromTo('actor.external.variety', actor['variety'], schema['actor']['external']['variety'])
if 'country' in actor:
if len(actor['country']) == 0:
logging.info("%s: auto-filled Unknown for empty array in actor.external.country", iid)
actor['country'] = [ "Unknown" ]
else:
if 'plus' not in incident:
incident['plus'] = {}
# only add region if it doesn't exist at all in the incident.
# if 'external_region' not in incident['plus']:
# logging.info("%s: auto-filled plus.external_region from the actor.external.country", iid)
# incident['plus']['external_region'] = [ country_region[c] for c in actor['country'] ]
# elif len(incident['plus']['external_region']) < len(actor['country']):
# logging.info("%s: auto-filled plus.external_region from the actor.external.country (len region < actor.country", iid)
# incident['plus']['external_region'] = [ country_region[c] for c in actor['country'] ]
else:
logging.info("%s: auto-filled Unknown for actor.external.country", iid)
actor['country'] = [ "Unknown" ]
# CC
actor['country'] = compareCountryFromTo('actor.external.country', actor['country'], schema['country'])
if 'internal' in incident['actor']:
actor = incident['actor']['internal']
if 'motive' not in actor:
logging.info("%s: auto-filled Unknown for actor.internal.motive", iid)
actor['motive'] = [ "Unknown" ]
if len(actor['motive']) == 0:
logging.info("%s: auto-filled Unknown for empty array in actor.internal.motive", iid)
actor['motive'] = [ "Unknown" ]
compareFromTo('actor.internal.motive', actor['motive'], schema['actor']['motive'])
if 'variety' not in actor:
logging.info("%s: auto-filled Unknown for actor.internal.variety", iid)
actor['variety'] = [ "Unknown" ]
if len(actor['variety']) == 0:
logging.info("%s: auto-filled Unknown for empty array in actor.internal.variety", iid)
actor['variety'] = [ "Unknown" ]
compareFromTo('actor.internal.variety', actor['variety'], schema['actor']['internal']['variety'])
if 'partner' in incident['actor']:
actor = incident['actor']['partner']
if 'motive' not in actor:
logging.info("%s: auto-filled Unknown for actor.partner.motive", iid)
actor['motive'] = [ "Unknown" ]
if len(actor['motive']) == 0:
logging.info("%s: auto-filled Unknown for empty array in actor.partner.motive", iid)
actor['motive'] = [ "Unknown" ]
compareFromTo('actor.partner.motive', actor['motive'], schema['actor']['motive'])
if 'country' not in actor:
logging.info("%s: auto-filled Unknown for actor.partner.country", iid)
actor['country'] = [ "Unknown" ]
if len(actor['country']) == 0:
logging.info("%s: auto-filled Unknown for empty array in actor.partner.country", iid)
actor['country'] = [ "Unknown" ]
# compareFromTo('actor.partner.variety', actor['variety'], schema['country'])
# CC
actor['country'] = compareCountryFromTo('actor.partner.country', actor['country'], schema['country'])
if 'industry' not in actor:
logging.info("%s: auto-filled Unknown for actor.partner.industry", iid)
actor['industry'] = "00"
checkIndustry('actor.partner.industry', actor['industry'])
if 'action' not in incident:
logging.info("%s: auto-filled Unknown for entire action section", iid)
incident['action'] = { "unknown" : { "notes" : "auto-filled Unknown" } }
for action in ['malware', 'hacking', 'social', 'misuse', 'physical', 'error']:
if action in incident['action']:
for method in ['variety', 'vector']:
if method not in incident['action'][action]:
logging.info("%s: auto-filled Unknown for action.%s.%s", iid, action, method)
incident['action'][action][method] = [ 'Unknown' ]
if len(incident['action'][action][method]) == 0:
logging.info("%s: auto-filled Unknown for empty array in action.%s.%s", iid, action, method)
incident['action'][action][method] = [ 'Unknown' ]
astring = 'action.' + action + '.' + method
compareFromTo(astring, incident['action'][action][method], schema['action'][action][method])
if action == "physical":
method = "vector"
if method not in incident['action'][action]:
logging.info("%s: auto-filled Unknown for action.%s.%s", iid, action, method)
incident['action'][action][method] = [ 'Unknown' ]
if len(incident['action'][action][method]) == 0:
logging.info("%s: auto-filled Unknown for empty array in action.%s.%s", iid, action, method)
incident['action'][action][method] = [ 'Unknown' ]
astring = 'action.' + action + '.' + method
compareFromTo(astring, incident['action'][action][method], schema['action'][action][method])
if action == "social":
method = "target"
if method not in incident['action'][action]:
logging.info("%s: auto-filled Unknown for action.%s.%s", iid, action, method)
incident['action'][action][method] = [ 'Unknown' ]
if len(incident['action'][action][method]) == 0:
logging.info("%s: auto-filled Unknown for empty array in action.%s.%s", iid, action, method)
incident['action'][action][method] = [ 'Unknown' ]
astring = 'action.' + action + '.' + method
compareFromTo(astring, incident['action'][action][method], schema['action'][action][method])
action = 'environmental'
if action in incident['action']:
method = "variety"
if method not in incident['action'][action]:
logging.info("%s: auto-filled Unknown for action.%s.%s", iid, action, method)
incident['action'][action][method] = [ 'Unknown' ]
if len(incident['action'][action][method]) == 0:
logging.info("%s: auto-filled Unknown for empty array in action.%s.%s", iid, action, method)
incident['action'][action][method] = [ 'Unknown' ]
astring = 'action.' + action + '.' + method
compareFromTo(astring, incident['action'][action][method], schema['action'][action][method])
if 'asset' not in incident:
logging.info("%s: auto-filled Unknown for entire asset section", iid)
incident['asset'] = { "assets" : [ { "variety" : "Unknown" } ] }
if 'assets' not in incident['asset']:
logging.info("%s: auto-filled Unknown for asset.asseets section", iid)
incident['asset']['assets'] = [ { "variety" : "Unknown" } ]
for index, asset in enumerate(incident['asset']['assets']):
if 'variety' not in asset:
logging.info("%s: auto-filled Unknown for asset.asseets.variety ", iid)
asset['variety'] = "Unknown"
compareFromTo('asset.assets.' + str(index) + '.variety', asset['variety'], schema['asset']['variety'])
for method in ["cloud", "accessibility", "ownership", "management", "hosting"]:
if method in incident:
compareFromTo('asset.'+method, incident['asset'][method], schema['asset'][method])
if 'attribute' not in incident:
logging.info("%s: no attribute section is found (not required)", iid)
else:
if 'confidentiality' in incident['attribute']:
if 'data' not in incident['attribute']['confidentiality']:
logging.info("%s: auto-filled Unknown for attribute.confidentiality.data.variety ", iid)
incident['attribute']['confidentiality']['data'] = [ { 'variety' : 'Unknown' } ]
if len(incident['attribute']['confidentiality']['data']) == 0:
logging.info("%s: auto-filled Unknown for empty attribute.confidentiality.data.variety ", iid)
incident['attribute']['confidentiality']['data'] = [ { 'variety' : 'Unknown' } ]
for index, datatype in enumerate(incident['attribute']['confidentiality']['data']):
astring = 'attribute.confidentiality.data.' + str(index) + '.variety'
compareFromTo(astring, datatype['variety'], schema['attribute']['confidentiality']['data']['variety'])
if 'data_disclosure' not in incident['attribute']['confidentiality']:
logging.warning("%s: data_disclosure not present (required if confidentiality present)", iid)
else:
astring = 'attribute.confidentiality.data_disclosure'
compareFromTo(astring, incident['attribute']['confidentiality']['data_disclosure'], schema['attribute']['confidentiality']['data_disclosure'])
if 'state' in incident['attribute']['confidentiality']:
astring = 'attribute.confidentiality.state'
compareFromTo(astring, incident['attribute']['confidentiality']['state'], schema['attribute']['confidentiality']['state'])
for attribute in ['integrity', 'availability']:
if attribute in incident['attribute']:
if 'variety' not in incident['attribute'][attribute]:
logging.info("%s: auto-filled Unknown for attribute.%s.variety", iid, attribute)
incident['attribute'][attribute]['variety'] = [ 'Unknown' ]
if len(incident['attribute'][attribute]['variety']) == 0:
logging.info("%s: auto-filled Unknown for empty attribute.%s.variety", iid, attribute)
incident['attribute'][attribute]['variety'] = [ 'Unknown' ]
astring = 'attribute.' + attribute + '.variety'
compareFromTo(astring, incident['attribute'][attribute]['variety'], schema['attribute'][attribute]['variety'])
# only for availability
if 'duration' in incident['attribute'][attribute]:
if 'unit' not in incident['attribute'][attribute]['duration']:
logging.info("%s: auto-filled Unknown for attribute.%s.duration.unit", iid, attribute)
incident['attribute'][attribute]['duration']['unit'] = "unit"
astring = 'attribute.' + attribute + '.duration.unit'
compareFromTo(astring, incident['attribute'][attribute]['duration']['unit'], schema['timeline']['unit'])
if 'timeline' not in incident:
logging.info("{0}: timeline section missing, auto-fillng in {1}".format(iid, cfg["year"]-1))
incident['timeline'] = { 'incident' : { 'year' : cfg["year"]-1 } }
if 'incident' not in incident['timeline']:
logging.info("{0}: timeline.incident section missing, auto-fillng in {1}".format(iid, cfg["year"]-1))
incident['timeline']['incident'] = { 'year' : cfg["year"]-1 }
# assume that the schema validator will verify number
for timeline in ['compromise', 'exfiltration', 'discovery', 'containment']:
astring = 'timeline.' + timeline + '.unit'
if timeline in incident['timeline']:
if 'unit' in incident['timeline'][timeline]:
compareFromTo(astring, incident['timeline'][timeline]['unit'], schema['timeline']['unit'])
if 'discovery_method' not in incident:
logging.info("%s: auto-filled Unknown for discovery_method", iid)
incident['discovery_method'] = "Unknown"
compareFromTo('discovery_method', incident['discovery_method'], schema['discovery_method'])
if incident.has_key('cost_corrective_action'):
compareFromTo('cost_corrective_action', incident['cost_corrective_action'], schema['cost_corrective_action'])
if incident.has_key('targeted'):
compareFromTo('targeted', incident['targeted'], schema['targeted'])
if incident.has_key('impact'):
if incident.has_key('overall_rating'):
compareFromTo('impact.overall_rating', incident['impact']['overall_rating'], schema['impact']['overall_rating'])
if incident.has_key('iso_currency_code'):
compareFromTo('impact.iso_currency_code', incident['impact']['iso_currency_code'], schema['iso_currency_code'])
if incident['impact'].has_key('loss'):
for index, loss in enumerate(incident['impact']['loss']):
if loss.has_key('variety'):
astring = 'impact.loss.' + str(index) + '.variety'
compareFromTo(astring, loss['variety'], schema['impact']['loss']['variety'])
if loss.has_key('rating'):
astring = 'impact.loss.' + str(index) + '.rating' # added g to the end of '.ratin' - GDB
compareFromTo(astring, loss['rating'], schema['impact']['loss']['rating'])
if 'plus' not in incident:
incident['plus'] = {}
for method in ['attack_difficulty_legacy', 'attack_difficulty_initial', 'attack_difficulty_subsequent']:
if incident['plus'].has_key(method):
astring = 'plus.' + method
compareFromTo(astring, incident['plus'][method], schema['plus']['attack_difficulty'])
for method in ['analysis_status', 'public_disclosure', 'security_maturity']:
if incident['plus'].has_key(method):
astring = 'plus.' + method
compareFromTo(astring, incident['plus'][method], schema['plus'][method])
if 'dbir_year' not in incident['plus'] and cfg['vcdb'] != True:
logging.warning("{0}: missing plus.dbir_year, auto-filled {1}".format(iid, cfg["year"]))
incident['plus']['dbir_year'] = cfg["year"]
if ('source_id' not in incident or cfg["force_analyst"]) and 'source' in cfg:
incident['source_id'] = cfg['source']
mydate = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
if 'created' not in incident['plus']:
logging.info("%s: auto-filling now() for plus.created", iid)
incident['plus']['created'] = mydate
if 'modified' not in incident['plus']:
logging.info("%s: auto-filling now() for plus.modified", iid)
incident['plus']['modified'] = mydate
if 'master_id' not in incident['plus']:
if 'incident_id' in incident:
master_id = incident['incident_id']
else:
master_id = "notblank"
logging.info("%s: auto-filling plus.master_id to %s", iid, master_id)
incident['plus']['master_id'] = master_id
return incident
def addRules(incident):
"Takes in an incident and applies rules for internal consistency and consistency with previous incidents"
if 'action' not in incident:
incident['action'] = { "Unknown" : {} }
# Malware always has an integrity attribute
if 'malware' in incident['action']:
if 'attribute' not in incident:
logging.info("%s: Added attribute.integrity since malware was involved.",iid)
incident['attribute'] = {}
if 'integrity' not in incident['attribute']:
logging.info("%s: Added integrity since it has a malware action.",iid)
incident['attribute']['integrity'] = {}
if 'variety' not in incident['attribute']['integrity']:
logging.info("%s: Added integrity.variety array since it didn't have one.",iid)
incident['attribute']['integrity']['variety'] = []
if 'Software installation' not in incident['attribute']['integrity']['variety']:
logging.info("%s: Added software installation to attribute.integrity.variety since malware was involved.",iid)
incident['attribute']['integrity']['variety'].append('Software installation')
# Social engineering alters human behavior
if 'social' in incident['action']:
if 'attribute' not in incident:
logging.info("%s: Added attribute.integrity since social engineering was involved.",iid)
incident['attribute'] = {}
incident['attribute']['integrity'] = {}
if 'integrity' not in incident['attribute']:
logging.info("%s: Added attribute.integrity since social engineering was involved.",iid)
incident['attribute']['integrity'] = {}
if 'variety' not in incident['attribute']['integrity']:
logging.info("%s: Added attribute.integrity.variety array since it wasn't there.",iid)
incident['attribute']['integrity']['variety'] = []
if 'Alter behavior' not in incident['attribute']['integrity']['variety']:
logging.info("%s: Added alter behavior to attribute.integrity.variety since social engineering was involved.",iid)
incident['attribute']['integrity']['variety'].append('Alter behavior')
# The target of social engineering is one of the affected assets
if 'social' in incident['action']:
if 'target' not in incident['action']['social']:
logging.info("%s: Added action.social.target since it wasn't there.",iid)
incident['action']['social']['target'] = ['Unknown']
if 'asset' not in incident:
logging.info("%s: Added asset object since it wasn't there.",iid)
incident['asset'] = {}
if 'assets' not in incident['asset']:
logging.info("%s: Added asset.assets list since it wasn't there.",iid)
incident['asset']['assets'] = []
asset_list = list()
for each in incident['asset']['assets']:
asset_list.append(each['variety'])
for each in incident['action']['social']['target']:
if each == "Unknown":
if 'P - Other' not in asset_list:
logging.info("%s: Adding P - Other to asset list since there was social engineering.",iid)
incident['asset']['assets'].append({'variety':'P - Other'})
continue
if 'P - '+each not in asset_list:
if 'P - '+each != 'P - Unknown':
logging.info("%s: Adding P - %s to asset list since there was social engineering.",each,iid)
incident['asset']['assets'].append({'variety':'P - '+each})
# If SQLi was involved then there needs to be misappropriation too
if 'hacking' in incident['action']:
if 'variety' not in incident['action']['hacking']:
logging.info("%s: Adding hacking variety because it wasn't in there.",iid)
incident['action']['hacking']['variety'] = ['Unknown']
if 'SQLi' in incident['action']['hacking']['variety']:
if 'integrity' not in incident['attribute']:
logging.info("%s: Adding attribute.integrity since SQLi was involved.",iid)
incident['attribute']['integrity'] = {'variety': [] }
if 'variety' not in incident['attribute']['integrity']:
logging.info("%s: Adding attribute.integrity.variety array since it was omitted.",iid)
incident['attribute']['integrity']['variety'] = []
if 'Repurpose' not in incident['attribute']['integrity']['variety']:
logging.info("%s: Adding repurpose since SQLi was there.",iid)
incident['attribute']['integrity']['variety'].append('Repurpose')
# If there is a theft or loss then there is an availability loss
if 'physical' in incident['action']:
if 'Theft' in incident['action']['physical']['variety']:
if 'availability' not in incident['attribute']:
logging.info("%s: Added attribute.availability since there was theft.",iid)
incident['attribute']['availability'] = {'variety': ['Loss']}
if 'Loss' not in incident['attribute']['availability']['variety']:
logging.info("%s: Added Loss to attribute.availability.variety in respone %s since there was theft.",iid)
incident['attribute']['availability']['variety'].append('Loss')
if 'error' in incident['action']:
if 'Loss' in incident['action']['error']['variety']:
if 'availability' not in incident['attribute']:
logging.info("%s: Added attribute.availability since there was theft.",iid)
incident['attribute']['availability'] = {'variety': ['Loss']}
if 'Loss' not in incident['attribute']['availability']['variety']:
logging.info("%s: Added Loss to attribute.availability.variety in respone %s since there was theft.",iid)
incident['attribute']['availability']['variety'].append('Loss')
# Commented out as discussion is these should only be applied to SG short form-entered incidents
'''
# ATM/Gas/POS Skimmer shim rules. From Marc/Jay 2/13/15. Added by gbassett
try:
if 'Skimmer' in incident['action']['physical']['variety']:
logging.info('Adding attribute.confidentiality.data.variety=Payment, '
'attribute.integrity.variety = Hardware tampering and '
'action.misuse.variety.Unapproved hardware')
# ensure attribute, integrity, and variety exist and set them to hardware tampering
if 'attribute' not in incident:
incident['attribute'] = {'integrity':{'variety':['Hardware tampering']}}
elif 'integrity' not in incident['attribute']:
incident['attribute']['integrity'] = {'variety': ['Hardware tampering']}
else:
if 'Hardware tampering' not in incident['attribute']['integrity']['variety']:
incident['attribute']['integrity']['variety'].append('Hardware tampering')
# ensure cofidentiality, data, and variety are in the incident and add 'payment' to the list
if 'confidentiality' not in incident['attribute']:
incident['attribute']['confidentiality'] = {'data': [{'variety': 'Payment'}]}
elif 'data' not in incident['attribute']['confidentiality']:
incident['attribute']['confidentiality']['data'] = [{'variety': 'Payment'}]
else:
if 'Payment'.lower().strip() not in [x['variety'].lower().strip() for x in incident['attribute']['confidentiality']['data']]:
incident['attribute']['confidentiality']['data'].append({'variety': 'Payment'})
# ensure action, misuse, and variety are in the incident and add 'Unapproved hardware' to the list
if 'action' not in incident:
incident['action'] = {'misuse':{'variety':['Unapproved hardware']}}
elif 'misuse' not in incident['action']:
incident['action']['misuse'] = {'variety':['Unapproved hardware']}
else:
if 'Unapproved hardware' not in incident['action']['misuse']['variety']:
incident['action']['misuse']['variety'].append('Unapproved hardware')
except KeyError:
logging.info('act.physical.variety not set so Skimmer (ATM/gas station/PoS skimmer shim) rule ignored.')
# Handheld Skimmer rules. From Marc/Jay 2/13/15. Added by gbassett
try:
if 'Possession abuse' in incident['action']['misuse']['variety']:
logging.info('Adding attribute.confidentiality.data.variety=Payment, '
'asset.assets.variety = M - Payment card, and '
'action.misuse.variety.Unapproved hardware')
# ensure asset, assets, and variety are in the dictionary and set it to M - Payment card as it is a string
if 'asset' not in incident:
incident['asset'] = {'assets': [{'variety': 'M - Payment card'}]}
elif 'assets' not in incident['asset']:
incident['asset']['assets'] = [{'variety': 'M - Payment card'}]
else:
if 'M - Payment card'.lower().strip() not in [x['variety'].lower().strip() for x in incident['asset']['assets']]:
incident['asset']['assets'].append({'variety': 'M - Payment card'})
# ensure confidentiality, data, and variety are in the incident and add 'payment' to the list
if 'attribute' not in incident:
incident['attribute'] = {'confidentiality': {'data': [{'variety': 'Payment'}]}}
elif 'confidentiality' not in incident['attribute']:
incident['attribute']['confidentiality'] = {'data': [{'variety': 'Payment'}]}
elif 'data' not in incident['attribute']['confidentiality']:
incident['attribute']['confidentiality']['data'] = [{'variety': 'Payment'}]
else:
if 'Payment'.lower().strip() not in [x['variety'].lower().strip() for x in incident['attribute']['confidentiality']['data']]:
incident['attribute']['confidentiality']['data'].append({'variety': 'Payment'})
# ensure action, misuse, and variety are in the incident and add 'Unapproved hardware' to the list
if 'action' not in incident:
incident['action'] = {'misuse':{'variety':['Unapproved hardware']}}
elif 'misuse' not in incident['action']:
incident['action']['misuse'] = {'variety':['Unapproved hardware']}
else:
if 'Unapproved hardware' not in incident['action']['misuse']['variety']:
incident['action']['misuse']['variety'].append('Unapproved hardware')
except KeyError:
logging.info('act.misuse.variety not set so Possession abuse (handheld skimmer) rule ignored.')
'''
# Unknown victims have NAICS code of "000", not just one zero
if incident['victim']['industry'].lower() in ['0','unknown']:
incident['victim']['industry'] = "000"
# KDT the script sometimes produces incidents with an asset array that has
# no entries. I'm too lazy to figure out where that happens so I'll just
# check for it here and fix it.
if len(incident['asset']['assets']) < 1:
incident['asset']['assets'].append({'variety':'Unknown'})
return incident
def parseComplex(field, inline, labels):
regex = re.compile(r',+') # parse on one or more consequtive commas
units = [x.strip() for x in regex.split(inline)]
retval = []
for i in units:
entry = [x.strip() for x in i.split(':')]
out = {}
for index, s in enumerate(entry):
if index > len(labels):
logging.warning("%s: failed to parse complex field %s, more entries seperated by colons than labels, skipping", iid, field)
return
elif len(s):
out[labels[index]] = s
if len(out) > 0:
retval.append(copy.deepcopy(out))
return retval
def cleanValue(incident, enum):
v = re.sub("^[,]+", "", incident[enum])
v = re.sub("[,]+$", "", v)
v = re.sub("[,]+", ",", v)
return(v)
def convertCSV(incident, cfg=cfg):
out = {}
out['schema_version'] = cfg["version"]
if incident.has_key("incident_id"):
if len(incident['incident_id']):
# out['incident_id'] = incident['incident_id']
# Changing incident_id to UUID to prevent de-anonymiziation of incidents
m = hashlib.md5(incident["incident_id"])
out["incident_id"] = str(uuid.UUID(bytes=m.digest())).upper()
else:
out['incident_id'] = str(uuid.uuid4()).upper()
else:
out['incident_id'] = str(uuid.uuid4()).upper()
tmp = {}
for enum in incident: tmp[enum] = cleanValue(incident, enum)
incident = tmp
for enum in ['source_id', 'reference', 'security_incident', 'confidence', 'summary', 'related_incidents', 'notes']:
addValue(incident, enum, out, "string")
# victim
for enum in ['victim_id', 'industry', 'employee_count', 'state',
'revenue.iso_currency_code', 'secondary.notes', 'notes']:
addValue(incident, 'victim.'+enum, out, "string")
addValue(incident, 'victim.revenue.amount', out, "integer")
addValue(incident, 'victim.secondary.amount', out, "numeric")
addValue(incident, 'victim.secondary.victim_id', out, "list")
addValue(incident, 'victim.locations_affected', out, "numeric")
addValue(incident, 'victim.country', out, "list")
# actor
for enum in ['motive', 'variety', 'country']:
addValue(incident, 'actor.external.'+enum, out, 'list')
addValue(incident, 'actor.external.notes', out, 'string')
for enum in ['motive', 'variety']:
addValue(incident, 'actor.internal.'+enum, out, 'list')
addValue(incident, 'actor.internal.notes', out, 'string')
for enum in ['motive', 'country']:
addValue(incident, 'actor.partner.'+enum, out, 'list')
addValue(incident, 'actor.partner.industry', out, 'string')
addValue(incident, 'actor.partner.notes', out, 'string')
# action
action = "malware."
for enum in ['variety', 'vector']:
addValue(incident, 'action.' + action + enum, out, 'list')
for enum in ['cve', 'name', 'notes']:
addValue(incident, 'action.' + action + enum, out, 'string')
action = "hacking."
for enum in ['variety', 'vector']:
addValue(incident, 'action.' + action + enum, out, 'list')
for enum in ['cve', 'notes']:
addValue(incident, 'action.' + action + enum, out, 'string')
action = "social."
for enum in ['variety', 'vector', 'target']:
addValue(incident, 'action.' + action + enum, out, 'list')
for enum in ['notes']:
addValue(incident, 'action.' + action + enum, out, 'string')
action = "misuse."
for enum in ['variety', 'vector']:
addValue(incident, 'action.' + action + enum, out, 'list')
for enum in ['notes']:
addValue(incident, 'action.' + action + enum, out, 'string')
action = "physical."
for enum in ['variety', 'vector', 'vector']:
addValue(incident, 'action.' + action + enum, out, 'list')
for enum in ['notes']:
addValue(incident, 'action.' + action + enum, out, 'string')
action = "error."
for enum in ['variety', 'vector']:
addValue(incident, 'action.' + action + enum, out, 'list')
for enum in ['notes']:
addValue(incident, 'action.' + action + enum, out, 'string')
action = "environmental."
for enum in ['variety']:
addValue(incident, 'action.' + action + enum, out, 'list')
for enum in ['notes']:
addValue(incident, 'action.' + action + enum, out, 'string')
# asset
if 'asset.assets.variety' in incident:
if 'asset' not in out:
out['asset'] = {}
if 'assets' not in out['asset']:
out['asset']['assets'] = []
assets = parseComplex("asset.assets.variety", incident['asset.assets.variety'], ['variety', 'amount'])
if len(assets):
for i in assets:
if 'amount' in i:
if isnum(i['amount']) is not None:
i['amount'] = isnum(i['amount'])
else:
del i['amount']
out['asset']['assets'] = copy.deepcopy(assets)
for enum in ['accessibility', 'ownership', 'management', 'hosting', 'cloud', 'notes']:
addValue(incident, 'asset.' + enum, out, 'string')
addValue(incident, 'asset.country', out, 'list')
# attributes
if 'attribute.confidentiality.data.variety' in incident:
data = parseComplex("attribute.confidentiality.data.variety", incident['attribute.confidentiality.data.variety'], ['variety', 'amount'])
if len(data):
if 'attribute' not in out:
out['attribute'] = {}
if 'confidentiality' not in out['attribute']:
out['attribute']['confidentiality'] = {}
if 'data' not in out['attribute']['confidentiality']:
out['attribute']['confidentiality']['data'] = []
for i in data:
if 'amount' in i:
if isnum(i['amount']) is not None:
i['amount'] = isnum(i['amount'])
else:
del i['amount']
out['attribute']['confidentiality']['data'] = copy.deepcopy(data)
addValue(incident, 'attribute.confidentiality.data_disclosure', out, 'string')
addValue(incident, 'attribute.confidentiality.data_total', out, 'numeric')
addValue(incident, 'attribute.confidentiality.state', out, 'list')
addValue(incident, 'attribute.confidentiality.notes', out, 'string')
addValue(incident, 'attribute.integrity.variety', out, 'list')
addValue(incident, 'attribute.integrity.notes', out, 'string')
addValue(incident, 'attribute.availability.variety', out, 'list')
addValue(incident, 'attribute.availability.duration.unit', out, 'string')
addValue(incident, 'attribute.availability.duration.value', out, 'numeric')
addValue(incident, 'attribute.availability.notes', out, 'string')
# timeline
addValue(incident, 'timeline.incident.year', out, 'numeric')
addValue(incident, 'timeline.incident.month', out, 'numeric')
addValue(incident, 'timeline.incident.day', out, 'numeric')
addValue(incident, 'timeline.incident.time', out, 'string')
addValue(incident, 'timeline.compromise.unit', out, 'string')
addValue(incident, 'timeline.compromise.value', out, 'numeric')
addValue(incident, 'timeline.exfiltration.unit', out, 'string')
addValue(incident, 'timeline.exfiltration.value', out, 'numeric')
addValue(incident, 'timeline.discovery.unit', out, 'string')
addValue(incident, 'timeline.discovery.value', out, 'numeric')
addValue(incident, 'timeline.containment.unit', out, 'string')
addValue(incident, 'timeline.containment.value', out, 'numeric')
# trailer values
for enum in ['discovery_method', 'targeted', 'control_failure', 'corrective_action']:
addValue(incident, enum, out, 'string')
if 'ioc.indicator' in incident:
ioc = parseComplex("ioc.indicator", incident['ioc.indicator'], ['indicator', 'comment'])
if len(ioc):
out['ioc'] = copy.deepcopy(ioc)
# impact
for enum in ['overall_min_amount', 'overall_amount', 'overall_max_amount']:
addValue(incident, 'impact.'+enum, out, 'numeric')
# TODO handle impact.loss varieties
for enum in ['overall_rating', 'iso_currency_code', 'notes']:
addValue(incident, 'impact.'+enum, out, 'string')
# plus
plusfields = ['master_id', 'investigator', 'issue_id', 'casename', 'analyst',
'analyst_notes', 'public_disclosure', 'analysis_status',
'attack_difficulty_legacy', 'attack_difficulty_subsequent',
'attack_difficulty_initial', 'security_maturity' ]
if cfg["vcdb"]:
plusfields.append('github')
for enum in plusfields:
addValue(incident, 'plus.'+enum, out, "string")
addValue(incident, 'plus.dbir_year', out, "numeric")
# addValue(incident, 'plus.external_region', out, "list")
if cfg["vcdb"]:
addValue(incident, 'plus.timeline.notification.year', out, "numeric")
addValue(incident, 'plus.timeline.notification.month', out, "numeric")
addValue(incident, 'plus.timeline.notification.day', out, "numeric")
# Skipping: 'unknown_unknowns', useful_evidence', antiforensic_measures, unfollowed_policies,
# countrol_inadequacy_legacy, pci
# TODO dbir_year
return out
def getCountryCode(countryfile): # Removed default of 'all.json' - GDB
# Fixed the hard-coded name - GDB
country_codes = json.loads(open(countryfile).read())
country_code_remap = {'Unknown':'000000'}
for eachCountry in country_codes:
try:
country_code_remap[eachCountry['alpha-2']] = eachCountry['region-code']
except:
country_code_remap[eachCountry['alpha-2']] = "000"
try:
country_code_remap[eachCountry['alpha-2']] += eachCountry['sub-region-code']
except:
country_code_remap[eachCountry['alpha-2']] += "000"
return country_code_remap
# jenums = openJSON("verisvz-enum.json")
# jscehma = openJSON("verisvz.json")
def main(infile, cfg, reqfields, sfields, jenums, jschema):
for f in infile.fieldnames:
if f not in sfields:
if f != "repeat":
logging.warning("column will not be used: %s. May be inaccurate for 'plus' columns.", f)
if 'plus.analyst' not in infile.fieldnames:
logging.warning("the optional plus.analyst field is not found in the source document")
if 'source_id' not in infile.fieldnames:
logging.warning("the optional source_id field is not found in the source document")
row = 0
for incident in infile:
row += 1
# have to look for white-space only and remove it
try:
incident = { x:incident[x].strip() for x in incident }
except AttributeError as e:
logging.error("Error removing white space from feature {0} on row {1}.".format(x, row))
raise e
if 'incident_id' in incident:
iid = incident['incident_id']
else:
iid = "srcrow_" + str(row)
# logging.warning("This includes the row number")
repeat = 1
logging.info("-----> parsing incident %s", iid)
if incident.has_key('repeat'):
if incident['repeat'].lower()=="ignore" or incident['repeat'] == "0":
logging.info("Skipping row %s", iid)
continue
repeat = isnum(incident['repeat'])
if not repeat:
repeat = 1
if incident.has_key('security_incident'):
if incident['security_incident'].lower()=="no":
logging.info("Skipping row %s", iid)
continue
outjson = convertCSV(incident, cfg)
country_region = getCountryCode(cfg["countryfile"])
checkEnum(outjson, jenums, country_region, cfg)
addRules(outjson)
while repeat > 0:
outjson['plus']['master_id'] = str(uuid.uuid4()).upper()
yield iid, outjson
# outjson['incident_id'] = str(uuid.uuid4()).upper() ### HERE
# outjson['plus']['master_id'] = outjson['incident_id'] ###
repeat -= 1
if repeat > 0:
logging.info("Repeating %s more times on %s", repeat, iid)
iid = "" # setting global
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Convert Standard Excel (csv) format to VERIS 1.3 schema-compatible JSON files")
parser.add_argument("-i", "--input", help="The csv file containing the data")
parser.add_argument("-l","--log_level",choices=["critical","warning","info","debug"], help="Minimum logging level to display")
parser.add_argument('--log_file', help='Location of log file')
parser.add_argument("-s","--schemafile", help="The JSON schema file")
parser.add_argument("-e","--enumfile", help="The JSON file with VERIS enumerations")
parser.add_argument("--vcdb",help="Convert the data for use in VCDB",action="store_true")
parser.add_argument("--version", help="The version of veris in use")
parser.add_argument('--conf', help='The location of the config file', default="./_checkValidity.cfg")
parser.add_argument('--year', help='The DBIR year to assign tot he records.')
parser.add_argument('--countryfile', help='The json file holdering the country mapping.')
parser.add_argument('--source', help="Source_id to use for the incidents. Partner pseudonym.")
parser.add_argument("-f", "--force_analyst", help="Override default analyst with --analyst.", action='store_true')
output_group = parser.add_mutually_exclusive_group()
output_group.add_argument("-o", "--output", help="directory where json files will be written")
output_group.add_argument("-q", "--quiet", help="suppress the writing of json files.", action='store_true')
args = parser.parse_args()
args = {k:v for k,v in vars(args).iteritems() if v is not None}
logging_remap = {'warning':logging.WARNING, 'critical':logging.CRITICAL, 'info':logging.INFO, 'debug':logging.DEBUG}
# Parse the config file
try:
config = ConfigParser.SafeConfigParser()
config.readfp(open(args["conf"]))
cfg_key = {
'GENERAL': ['input', 'output'],
'LOGGING': ['level', 'log_file'],
'VERIS': ['version', 'schemafile', 'enumfile', 'vcdb', 'year', 'countryfile']
}
for section in cfg_key.keys():
if config.has_section(section):
for value in cfg_key[section]:
if value.lower() in config.options(section):
cfg[value] = config.get(section, value)
cfg["year"] = int(cfg["year"])
cfg["vcdb"] = {True:True, False:False, "false":False, "true":True}[cfg["vcdb"].lower()]
logging.debug("config import succeeded.")
except Exception as e:
logging.warning("config import failed.")
#raise e
pass
#cfg.update({k:v for k,v in vars(args).iteritems() if k not in cfg.keys()}) # copy command line arguments to the
#cfg.update(vars(args)) # overwrite configuration file variables with
cfg.update(args)
if 'quiet' in args and args['quiet'] == True:
_ = cfg.pop('output')
# if source missing, try and guess it from directory
if 'source' not in cfg or not cfg['source']:
cfg['source'] = cfg['input'].split("/")[-2].lower()
cfg['source'] = ''.join(e for e in cfg['source'] if e.isalnum())
logging.warning("Source not defined. Using the directory of the input file {0} instead.".format(cfg['source']))
# Quick test to replace any placeholders accidentally left in the config
for k, v in cfg.iteritems():
if k not in ["repositories", "source"] and type(v) == str:
cfg[k] = v.format(repositories=cfg["repositories"], partner_name=cfg["source"])
logging.basicConfig(level=logging_remap[cfg["log_level"]],
format='%(asctime)19s %(levelname)8s %(message)s', datefmt='%m/%d/%Y %H:%M:%S')
if cfg["log_file"] is not None:
logging.FileHandler(cfg["log_file"])
# format='%(asctime)s %(levelname)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
logging.debug(args)
logging.debug(cfg)
try:
# Added to file read to catch multiple columns with same name which causes second column to overwrite first. - GDB
file_handle = open(cfg["input"], 'rU')
csv_reader = csv.reader(file_handle)
l = csv_reader.next()
if len(l) > len(set(l)):
logging.error(l)
raise KeyError("Input file has multiple columns of the same name. Please create unique columns and rerun.")
exit(1)
else:
file_handle.seek(0)
infile = csv.DictReader(file_handle)
# infile = csv.DictReader(open(args.filename,'rU')) # Old File Read - gdb
except IOError:
logging.critical("ERROR: Input file not found.")
exit(1)
try:
jschema = openJSON(cfg["schemafile"])
except IOError:
logging.critical("ERROR: Schema file not found.")
exit(1)
try:
jenums = openJSON(cfg["enumfile"])
except IOError:
logging.critical("ERROR: Enumeration file not found.")
exit(1)
reqfields = reqSchema(jschema)
sfields = parseSchema(jschema)
# call the main loop which yields json incidents
logging.info("Output files will be written to %s",cfg["output"])
for iid, incident_json in main(infile, cfg, reqfields, sfields, jenums, jschema):
# write the json to a file
if cfg["output"].endswith("/"):
dest = cfg["output"] + incident_json['plus']['master_id'] + '.json'
# dest = args.output + outjson['incident_id'] + '.json'
else:
dest = cfg["output"] + '/' + incident_json['plus']['master_id'] + '.json'
# dest = args.output + '/' + outjson['incident_id'] + '.json'
logging.info("%s: writing file to %s", iid, dest)
try:
fwrite = open(dest, 'w')
fwrite.write(json.dumps(incident_json, indent=2, sort_keys=True))
fwrite.close()
except UnicodeDecodeError:
print incident_json
|
|
"""Support for skip/xfail functions and markers."""
import os
import platform
import sys
import traceback
from collections.abc import Mapping
from typing import Generator
from typing import Optional
from typing import Tuple
from typing import Type
import attr
from _pytest.config import Config
from _pytest.config import hookimpl
from _pytest.config.argparsing import Parser
from _pytest.mark.structures import Mark
from _pytest.nodes import Item
from _pytest.outcomes import fail
from _pytest.outcomes import skip
from _pytest.outcomes import xfail
from _pytest.reports import BaseReport
from _pytest.runner import CallInfo
from _pytest.stash import StashKey
def pytest_addoption(parser: Parser) -> None:
group = parser.getgroup("general")
group.addoption(
"--runxfail",
action="store_true",
dest="runxfail",
default=False,
help="report the results of xfail tests as if they were not marked",
)
parser.addini(
"xfail_strict",
"default for the strict parameter of xfail "
"markers when not given explicitly (default: False)",
default=False,
type="bool",
)
def pytest_configure(config: Config) -> None:
if config.option.runxfail:
# yay a hack
import pytest
old = pytest.xfail
config.add_cleanup(lambda: setattr(pytest, "xfail", old))
def nop(*args, **kwargs):
pass
nop.Exception = xfail.Exception # type: ignore[attr-defined]
setattr(pytest, "xfail", nop)
config.addinivalue_line(
"markers",
"skip(reason=None): skip the given test function with an optional reason. "
'Example: skip(reason="no way of currently testing this") skips the '
"test.",
)
config.addinivalue_line(
"markers",
"skipif(condition, ..., *, reason=...): "
"skip the given test function if any of the conditions evaluate to True. "
"Example: skipif(sys.platform == 'win32') skips the test if we are on the win32 platform. "
"See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-skipif",
)
config.addinivalue_line(
"markers",
"xfail(condition, ..., *, reason=..., run=True, raises=None, strict=xfail_strict): "
"mark the test function as an expected failure if any of the conditions "
"evaluate to True. Optionally specify a reason for better reporting "
"and run=False if you don't even want to execute the test function. "
"If only specific exception(s) are expected, you can list them in "
"raises, and if the test fails in other ways, it will be reported as "
"a true failure. See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-xfail",
)
def evaluate_condition(item: Item, mark: Mark, condition: object) -> Tuple[bool, str]:
"""Evaluate a single skipif/xfail condition.
If an old-style string condition is given, it is eval()'d, otherwise the
condition is bool()'d. If this fails, an appropriately formatted pytest.fail
is raised.
Returns (result, reason). The reason is only relevant if the result is True.
"""
# String condition.
if isinstance(condition, str):
globals_ = {
"os": os,
"sys": sys,
"platform": platform,
"config": item.config,
}
for dictionary in reversed(
item.ihook.pytest_markeval_namespace(config=item.config)
):
if not isinstance(dictionary, Mapping):
raise ValueError(
"pytest_markeval_namespace() needs to return a dict, got {!r}".format(
dictionary
)
)
globals_.update(dictionary)
if hasattr(item, "obj"):
globals_.update(item.obj.__globals__) # type: ignore[attr-defined]
try:
filename = f"<{mark.name} condition>"
condition_code = compile(condition, filename, "eval")
result = eval(condition_code, globals_)
except SyntaxError as exc:
msglines = [
"Error evaluating %r condition" % mark.name,
" " + condition,
" " + " " * (exc.offset or 0) + "^",
"SyntaxError: invalid syntax",
]
fail("\n".join(msglines), pytrace=False)
except Exception as exc:
msglines = [
"Error evaluating %r condition" % mark.name,
" " + condition,
*traceback.format_exception_only(type(exc), exc),
]
fail("\n".join(msglines), pytrace=False)
# Boolean condition.
else:
try:
result = bool(condition)
except Exception as exc:
msglines = [
"Error evaluating %r condition as a boolean" % mark.name,
*traceback.format_exception_only(type(exc), exc),
]
fail("\n".join(msglines), pytrace=False)
reason = mark.kwargs.get("reason", None)
if reason is None:
if isinstance(condition, str):
reason = "condition: " + condition
else:
# XXX better be checked at collection time
msg = (
"Error evaluating %r: " % mark.name
+ "you need to specify reason=STRING when using booleans as conditions."
)
fail(msg, pytrace=False)
return result, reason
@attr.s(slots=True, frozen=True, auto_attribs=True)
class Skip:
"""The result of evaluate_skip_marks()."""
reason: str = "unconditional skip"
def evaluate_skip_marks(item: Item) -> Optional[Skip]:
"""Evaluate skip and skipif marks on item, returning Skip if triggered."""
for mark in item.iter_markers(name="skipif"):
if "condition" not in mark.kwargs:
conditions = mark.args
else:
conditions = (mark.kwargs["condition"],)
# Unconditional.
if not conditions:
reason = mark.kwargs.get("reason", "")
return Skip(reason)
# If any of the conditions are true.
for condition in conditions:
result, reason = evaluate_condition(item, mark, condition)
if result:
return Skip(reason)
for mark in item.iter_markers(name="skip"):
try:
return Skip(*mark.args, **mark.kwargs)
except TypeError as e:
raise TypeError(str(e) + " - maybe you meant pytest.mark.skipif?") from None
return None
@attr.s(slots=True, frozen=True, auto_attribs=True)
class Xfail:
"""The result of evaluate_xfail_marks()."""
reason: str
run: bool
strict: bool
raises: Optional[Tuple[Type[BaseException], ...]]
def evaluate_xfail_marks(item: Item) -> Optional[Xfail]:
"""Evaluate xfail marks on item, returning Xfail if triggered."""
for mark in item.iter_markers(name="xfail"):
run = mark.kwargs.get("run", True)
strict = mark.kwargs.get("strict", item.config.getini("xfail_strict"))
raises = mark.kwargs.get("raises", None)
if "condition" not in mark.kwargs:
conditions = mark.args
else:
conditions = (mark.kwargs["condition"],)
# Unconditional.
if not conditions:
reason = mark.kwargs.get("reason", "")
return Xfail(reason, run, strict, raises)
# If any of the conditions are true.
for condition in conditions:
result, reason = evaluate_condition(item, mark, condition)
if result:
return Xfail(reason, run, strict, raises)
return None
# Saves the xfail mark evaluation. Can be refreshed during call if None.
xfailed_key = StashKey[Optional[Xfail]]()
@hookimpl(tryfirst=True)
def pytest_runtest_setup(item: Item) -> None:
skipped = evaluate_skip_marks(item)
if skipped:
raise skip.Exception(skipped.reason, _use_item_location=True)
item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item)
if xfailed and not item.config.option.runxfail and not xfailed.run:
xfail("[NOTRUN] " + xfailed.reason)
@hookimpl(hookwrapper=True)
def pytest_runtest_call(item: Item) -> Generator[None, None, None]:
xfailed = item.stash.get(xfailed_key, None)
if xfailed is None:
item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item)
if xfailed and not item.config.option.runxfail and not xfailed.run:
xfail("[NOTRUN] " + xfailed.reason)
yield
# The test run may have added an xfail mark dynamically.
xfailed = item.stash.get(xfailed_key, None)
if xfailed is None:
item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item)
@hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item: Item, call: CallInfo[None]):
outcome = yield
rep = outcome.get_result()
xfailed = item.stash.get(xfailed_key, None)
if item.config.option.runxfail:
pass # don't interfere
elif call.excinfo and isinstance(call.excinfo.value, xfail.Exception):
assert call.excinfo.value.msg is not None
rep.wasxfail = "reason: " + call.excinfo.value.msg
rep.outcome = "skipped"
elif not rep.skipped and xfailed:
if call.excinfo:
raises = xfailed.raises
if raises is not None and not isinstance(call.excinfo.value, raises):
rep.outcome = "failed"
else:
rep.outcome = "skipped"
rep.wasxfail = xfailed.reason
elif call.when == "call":
if xfailed.strict:
rep.outcome = "failed"
rep.longrepr = "[XPASS(strict)] " + xfailed.reason
else:
rep.outcome = "passed"
rep.wasxfail = xfailed.reason
def pytest_report_teststatus(report: BaseReport) -> Optional[Tuple[str, str, str]]:
if hasattr(report, "wasxfail"):
if report.skipped:
return "xfailed", "x", "XFAIL"
elif report.passed:
return "xpassed", "X", "XPASS"
return None
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# TEST_UNICODE_LITERALS
import functools
import pytest
import numpy as np
from ... import units as u
from .. import (PhysicsSphericalRepresentation, CartesianRepresentation,
CylindricalRepresentation, SphericalRepresentation,
UnitSphericalRepresentation, SphericalDifferential,
CartesianDifferential, UnitSphericalDifferential,
SphericalCosLatDifferential, UnitSphericalCosLatDifferential,
PhysicsSphericalDifferential, CylindricalDifferential,
RadialRepresentation, RadialDifferential, Longitude, Latitude)
from ..representation import DIFFERENTIAL_CLASSES
from ..angle_utilities import angular_separation
from ...utils.compat.numpy import broadcast_arrays
from ...tests.helper import assert_quantity_allclose
def assert_representation_allclose(actual, desired, rtol=1.e-7, atol=None,
**kwargs):
actual_xyz = actual.to_cartesian().get_xyz(xyz_axis=-1)
desired_xyz = desired.to_cartesian().get_xyz(xyz_axis=-1)
actual_xyz, desired_xyz = broadcast_arrays(actual_xyz, desired_xyz,
subok=True)
assert_quantity_allclose(actual_xyz, desired_xyz, rtol, atol, **kwargs)
def assert_differential_allclose(actual, desired, rtol=1.e-7, **kwargs):
assert actual.components == desired.components
for component in actual.components:
actual_c = getattr(actual, component)
atol = 1.e-10 * actual_c.unit
assert_quantity_allclose(actual_c, getattr(desired, component),
rtol, atol, **kwargs)
def representation_equal(first, second):
return functools.reduce(np.logical_and,
(getattr(first, component) ==
getattr(second, component)
for component in first.components))
class TestArithmetic():
def setup(self):
# Choose some specific coordinates, for which ``sum`` and ``dot``
# works out nicely.
self.lon = Longitude(np.arange(0, 12.1, 2), u.hourangle)
self.lat = Latitude(np.arange(-90, 91, 30), u.deg)
self.distance = [5., 12., 4., 2., 4., 12., 5.] * u.kpc
self.spherical = SphericalRepresentation(self.lon, self.lat,
self.distance)
self.unit_spherical = self.spherical.represent_as(
UnitSphericalRepresentation)
self.cartesian = self.spherical.to_cartesian()
def test_norm_spherical(self):
norm_s = self.spherical.norm()
assert isinstance(norm_s, u.Quantity)
# Just to be sure, test against getting object arrays.
assert norm_s.dtype.kind == 'f'
assert np.all(norm_s == self.distance)
@pytest.mark.parametrize('representation',
(PhysicsSphericalRepresentation,
CartesianRepresentation,
CylindricalRepresentation))
def test_norm(self, representation):
in_rep = self.spherical.represent_as(representation)
norm_rep = in_rep.norm()
assert isinstance(norm_rep, u.Quantity)
assert_quantity_allclose(norm_rep, self.distance)
def test_norm_unitspherical(self):
norm_rep = self.unit_spherical.norm()
assert norm_rep.unit == u.dimensionless_unscaled
assert np.all(norm_rep == 1. * u.dimensionless_unscaled)
@pytest.mark.parametrize('representation',
(SphericalRepresentation,
PhysicsSphericalRepresentation,
CartesianRepresentation,
CylindricalRepresentation,
UnitSphericalRepresentation))
def test_neg_pos(self, representation):
in_rep = self.cartesian.represent_as(representation)
pos_rep = +in_rep
assert type(pos_rep) is type(in_rep)
assert pos_rep is not in_rep
assert np.all(representation_equal(pos_rep, in_rep))
neg_rep = -in_rep
assert type(neg_rep) is type(in_rep)
assert np.all(neg_rep.norm() == in_rep.norm())
in_rep_xyz = in_rep.to_cartesian().xyz
assert_quantity_allclose(neg_rep.to_cartesian().xyz,
-in_rep_xyz, atol=1.e-10*in_rep_xyz.unit)
def test_mul_div_spherical(self):
s0 = self.spherical / (1. * u.Myr)
assert isinstance(s0, SphericalRepresentation)
assert s0.distance.dtype.kind == 'f'
assert np.all(s0.lon == self.spherical.lon)
assert np.all(s0.lat == self.spherical.lat)
assert np.all(s0.distance == self.distance / (1. * u.Myr))
s1 = (1./u.Myr) * self.spherical
assert isinstance(s1, SphericalRepresentation)
assert np.all(representation_equal(s1, s0))
s2 = self.spherical * np.array([[1.], [2.]])
assert isinstance(s2, SphericalRepresentation)
assert s2.shape == (2, self.spherical.shape[0])
assert np.all(s2.lon == self.spherical.lon)
assert np.all(s2.lat == self.spherical.lat)
assert np.all(s2.distance ==
self.spherical.distance * np.array([[1.], [2.]]))
s3 = np.array([[1.], [2.]]) * self.spherical
assert isinstance(s3, SphericalRepresentation)
assert np.all(representation_equal(s3, s2))
s4 = -self.spherical
assert isinstance(s4, SphericalRepresentation)
assert np.all(s4.lon == self.spherical.lon)
assert np.all(s4.lat == self.spherical.lat)
assert np.all(s4.distance == -self.spherical.distance)
s5 = +self.spherical
assert s5 is not self.spherical
assert np.all(representation_equal(s5, self.spherical))
@pytest.mark.parametrize('representation',
(PhysicsSphericalRepresentation,
CartesianRepresentation,
CylindricalRepresentation))
def test_mul_div(self, representation):
in_rep = self.spherical.represent_as(representation)
r1 = in_rep / (1. * u.Myr)
assert isinstance(r1, representation)
for component in in_rep.components:
in_rep_comp = getattr(in_rep, component)
r1_comp = getattr(r1, component)
if in_rep_comp.unit == self.distance.unit:
assert np.all(r1_comp == in_rep_comp / (1.*u.Myr))
else:
assert np.all(r1_comp == in_rep_comp)
r2 = np.array([[1.], [2.]]) * in_rep
assert isinstance(r2, representation)
assert r2.shape == (2, in_rep.shape[0])
assert_quantity_allclose(r2.norm(),
self.distance * np.array([[1.], [2.]]))
r3 = -in_rep
assert np.all(representation_equal(r3, in_rep * -1.))
with pytest.raises(TypeError):
in_rep * in_rep
with pytest.raises(TypeError):
dict() * in_rep
def test_mul_div_unit_spherical(self):
s1 = self.unit_spherical * self.distance
assert isinstance(s1, SphericalRepresentation)
assert np.all(s1.lon == self.unit_spherical.lon)
assert np.all(s1.lat == self.unit_spherical.lat)
assert np.all(s1.distance == self.spherical.distance)
s2 = self.unit_spherical / u.s
assert isinstance(s2, SphericalRepresentation)
assert np.all(s2.lon == self.unit_spherical.lon)
assert np.all(s2.lat == self.unit_spherical.lat)
assert np.all(s2.distance == 1./u.s)
u3 = -self.unit_spherical
assert isinstance(u3, UnitSphericalRepresentation)
assert_quantity_allclose(u3.lon, self.unit_spherical.lon + 180.*u.deg)
assert np.all(u3.lat == -self.unit_spherical.lat)
assert_quantity_allclose(u3.to_cartesian().xyz,
-self.unit_spherical.to_cartesian().xyz,
atol=1.e-10*u.dimensionless_unscaled)
u4 = +self.unit_spherical
assert isinstance(u4, UnitSphericalRepresentation)
assert u4 is not self.unit_spherical
assert np.all(representation_equal(u4, self.unit_spherical))
def test_add_sub_cartesian(self):
c1 = self.cartesian + self.cartesian
assert isinstance(c1, CartesianRepresentation)
assert c1.x.dtype.kind == 'f'
assert np.all(representation_equal(c1, 2. * self.cartesian))
with pytest.raises(TypeError):
self.cartesian + 10.*u.m
with pytest.raises(u.UnitsError):
self.cartesian + (self.cartesian / u.s)
c2 = self.cartesian - self.cartesian
assert isinstance(c2, CartesianRepresentation)
assert np.all(representation_equal(
c2, CartesianRepresentation(0.*u.m, 0.*u.m, 0.*u.m)))
c3 = self.cartesian - self.cartesian / 2.
assert isinstance(c3, CartesianRepresentation)
assert np.all(representation_equal(c3, self.cartesian / 2.))
@pytest.mark.parametrize('representation',
(PhysicsSphericalRepresentation,
SphericalRepresentation,
CylindricalRepresentation))
def test_add_sub(self, representation):
in_rep = self.cartesian.represent_as(representation)
r1 = in_rep + in_rep
assert isinstance(r1, representation)
expected = 2. * in_rep
for component in in_rep.components:
assert_quantity_allclose(getattr(r1, component),
getattr(expected, component))
with pytest.raises(TypeError):
10.*u.m + in_rep
with pytest.raises(u.UnitsError):
in_rep + (in_rep / u.s)
r2 = in_rep - in_rep
assert isinstance(r2, representation)
assert np.all(representation_equal(
r2.to_cartesian(), CartesianRepresentation(0.*u.m, 0.*u.m, 0.*u.m)))
r3 = in_rep - in_rep / 2.
assert isinstance(r3, representation)
expected = in_rep / 2.
assert_representation_allclose(r3, expected)
def test_add_sub_unit_spherical(self):
s1 = self.unit_spherical + self.unit_spherical
assert isinstance(s1, SphericalRepresentation)
expected = 2. * self.unit_spherical
for component in s1.components:
assert_quantity_allclose(getattr(s1, component),
getattr(expected, component))
with pytest.raises(TypeError):
10.*u.m - self.unit_spherical
with pytest.raises(u.UnitsError):
self.unit_spherical + (self.unit_spherical / u.s)
s2 = self.unit_spherical - self.unit_spherical / 2.
assert isinstance(s2, SphericalRepresentation)
expected = self.unit_spherical / 2.
for component in s2.components:
assert_quantity_allclose(getattr(s2, component),
getattr(expected, component))
@pytest.mark.parametrize('representation',
(CartesianRepresentation,
PhysicsSphericalRepresentation,
SphericalRepresentation,
CylindricalRepresentation))
def test_sum_mean(self, representation):
in_rep = self.spherical.represent_as(representation)
r_sum = in_rep.sum()
assert isinstance(r_sum, representation)
expected = SphericalRepresentation(
90. * u.deg, 0. * u.deg, 14. * u.kpc).represent_as(representation)
for component in expected.components:
exp_component = getattr(expected, component)
assert_quantity_allclose(getattr(r_sum, component),
exp_component,
atol=1e-10*exp_component.unit)
r_mean = in_rep.mean()
assert isinstance(r_mean, representation)
expected = expected / len(in_rep)
for component in expected.components:
exp_component = getattr(expected, component)
assert_quantity_allclose(getattr(r_mean, component),
exp_component,
atol=1e-10*exp_component.unit)
def test_sum_mean_unit_spherical(self):
s_sum = self.unit_spherical.sum()
assert isinstance(s_sum, SphericalRepresentation)
expected = SphericalRepresentation(
90. * u.deg, 0. * u.deg, 3. * u.dimensionless_unscaled)
for component in expected.components:
exp_component = getattr(expected, component)
assert_quantity_allclose(getattr(s_sum, component),
exp_component,
atol=1e-10*exp_component.unit)
s_mean = self.unit_spherical.mean()
assert isinstance(s_mean, SphericalRepresentation)
expected = expected / len(self.unit_spherical)
for component in expected.components:
exp_component = getattr(expected, component)
assert_quantity_allclose(getattr(s_mean, component),
exp_component,
atol=1e-10*exp_component.unit)
@pytest.mark.parametrize('representation',
(CartesianRepresentation,
PhysicsSphericalRepresentation,
SphericalRepresentation,
CylindricalRepresentation))
def test_dot(self, representation):
in_rep = self.cartesian.represent_as(representation)
r_dot_r = in_rep.dot(in_rep)
assert isinstance(r_dot_r, u.Quantity)
assert r_dot_r.shape == in_rep.shape
assert_quantity_allclose(np.sqrt(r_dot_r), self.distance)
r_dot_r_rev = in_rep.dot(in_rep[::-1])
assert isinstance(r_dot_r_rev, u.Quantity)
assert r_dot_r_rev.shape == in_rep.shape
expected = [-25., -126., 2., 4., 2., -126., -25.] * u.kpc**2
assert_quantity_allclose(r_dot_r_rev, expected)
for axis in 'xyz':
project = CartesianRepresentation(*(
(1. if axis == _axis else 0.) * u.dimensionless_unscaled
for _axis in 'xyz'))
assert_quantity_allclose(in_rep.dot(project),
getattr(self.cartesian, axis),
atol=1.*u.upc)
with pytest.raises(TypeError):
in_rep.dot(self.cartesian.xyz)
def test_dot_unit_spherical(self):
u_dot_u = self.unit_spherical.dot(self.unit_spherical)
assert isinstance(u_dot_u, u.Quantity)
assert u_dot_u.shape == self.unit_spherical.shape
assert_quantity_allclose(u_dot_u, 1.*u.dimensionless_unscaled)
cartesian = self.unit_spherical.to_cartesian()
for axis in 'xyz':
project = CartesianRepresentation(*(
(1. if axis == _axis else 0.) * u.dimensionless_unscaled
for _axis in 'xyz'))
assert_quantity_allclose(self.unit_spherical.dot(project),
getattr(cartesian, axis), atol=1.e-10)
@pytest.mark.parametrize('representation',
(CartesianRepresentation,
PhysicsSphericalRepresentation,
SphericalRepresentation,
CylindricalRepresentation))
def test_cross(self, representation):
in_rep = self.cartesian.represent_as(representation)
r_cross_r = in_rep.cross(in_rep)
assert isinstance(r_cross_r, representation)
assert_quantity_allclose(r_cross_r.norm(), 0.*u.kpc**2,
atol=1.*u.mpc**2)
r_cross_r_rev = in_rep.cross(in_rep[::-1])
sep = angular_separation(self.lon, self.lat,
self.lon[::-1], self.lat[::-1])
expected = self.distance * self.distance[::-1] * np.sin(sep)
assert_quantity_allclose(r_cross_r_rev.norm(), expected,
atol=1.*u.mpc**2)
unit_vectors = CartesianRepresentation(
[1., 0., 0.]*u.one,
[0., 1., 0.]*u.one,
[0., 0., 1.]*u.one)[:, np.newaxis]
r_cross_uv = in_rep.cross(unit_vectors)
assert r_cross_uv.shape == (3, 7)
assert_quantity_allclose(r_cross_uv.dot(unit_vectors), 0.*u.kpc,
atol=1.*u.upc)
assert_quantity_allclose(r_cross_uv.dot(in_rep), 0.*u.kpc**2,
atol=1.*u.mpc**2)
zeros = np.zeros(len(in_rep)) * u.kpc
expected = CartesianRepresentation(
u.Quantity((zeros, -self.cartesian.z, self.cartesian.y)),
u.Quantity((self.cartesian.z, zeros, -self.cartesian.x)),
u.Quantity((-self.cartesian.y, self.cartesian.x, zeros)))
# Comparison with spherical is hard since some distances are zero,
# implying the angles are undefined.
r_cross_uv_cartesian = r_cross_uv.to_cartesian()
assert_representation_allclose(r_cross_uv_cartesian,
expected, atol=1.*u.upc)
# A final check, with the side benefit of ensuring __div__ and norm
# work on multi-D representations.
r_cross_uv_by_distance = r_cross_uv / self.distance
uv_sph = unit_vectors.represent_as(UnitSphericalRepresentation)
sep = angular_separation(self.lon, self.lat, uv_sph.lon, uv_sph.lat)
assert_quantity_allclose(r_cross_uv_by_distance.norm(), np.sin(sep),
atol=1e-9)
with pytest.raises(TypeError):
in_rep.cross(self.cartesian.xyz)
def test_cross_unit_spherical(self):
u_cross_u = self.unit_spherical.cross(self.unit_spherical)
assert isinstance(u_cross_u, SphericalRepresentation)
assert_quantity_allclose(u_cross_u.norm(), 0.*u.one, atol=1.e-10*u.one)
u_cross_u_rev = self.unit_spherical.cross(self.unit_spherical[::-1])
assert isinstance(u_cross_u_rev, SphericalRepresentation)
sep = angular_separation(self.lon, self.lat,
self.lon[::-1], self.lat[::-1])
expected = np.sin(sep)
assert_quantity_allclose(u_cross_u_rev.norm(), expected,
atol=1.e-10*u.one)
class TestUnitVectorsAndScales():
@staticmethod
def check_unit_vectors(e):
for v in e.values():
assert type(v) is CartesianRepresentation
assert_quantity_allclose(v.norm(), 1. * u.one)
return e
@staticmethod
def check_scale_factors(sf, rep):
unit = rep.norm().unit
for c, f in sf.items():
assert type(f) is u.Quantity
assert (f.unit * getattr(rep, c).unit).is_equivalent(unit)
def test_spherical(self):
s = SphericalRepresentation(lon=[0., 6., 21.] * u.hourangle,
lat=[0., -30., 85.] * u.deg,
distance=[1, 2, 3] * u.kpc)
e = s.unit_vectors()
self.check_unit_vectors(e)
sf = s.scale_factors()
self.check_scale_factors(sf, s)
s_lon = s + s.distance * 1e-5 * np.cos(s.lat) * e['lon']
assert_quantity_allclose(s_lon.lon, s.lon + 1e-5*u.rad,
atol=1e-10*u.rad)
assert_quantity_allclose(s_lon.lat, s.lat, atol=1e-10*u.rad)
assert_quantity_allclose(s_lon.distance, s.distance)
s_lon2 = s + 1e-5 * u.radian * sf['lon'] * e['lon']
assert_representation_allclose(s_lon2, s_lon)
s_lat = s + s.distance * 1e-5 * e['lat']
assert_quantity_allclose(s_lat.lon, s.lon)
assert_quantity_allclose(s_lat.lat, s.lat + 1e-5*u.rad,
atol=1e-10*u.rad)
assert_quantity_allclose(s_lon.distance, s.distance)
s_lat2 = s + 1.e-5 * u.radian * sf['lat'] * e['lat']
assert_representation_allclose(s_lat2, s_lat)
s_distance = s + 1. * u.pc * e['distance']
assert_quantity_allclose(s_distance.lon, s.lon, atol=1e-10*u.rad)
assert_quantity_allclose(s_distance.lat, s.lat, atol=1e-10*u.rad)
assert_quantity_allclose(s_distance.distance, s.distance + 1.*u.pc)
s_distance2 = s + 1. * u.pc * sf['distance'] * e['distance']
assert_representation_allclose(s_distance2, s_distance)
def test_unit_spherical(self):
s = UnitSphericalRepresentation(lon=[0., 6., 21.] * u.hourangle,
lat=[0., -30., 85.] * u.deg)
e = s.unit_vectors()
self.check_unit_vectors(e)
sf = s.scale_factors()
self.check_scale_factors(sf, s)
s_lon = s + 1e-5 * np.cos(s.lat) * e['lon']
assert_quantity_allclose(s_lon.lon, s.lon + 1e-5*u.rad,
atol=1e-10*u.rad)
assert_quantity_allclose(s_lon.lat, s.lat, atol=1e-10*u.rad)
s_lon2 = s + 1e-5 * u.radian * sf['lon'] * e['lon']
assert_representation_allclose(s_lon2, s_lon)
s_lat = s + 1e-5 * e['lat']
assert_quantity_allclose(s_lat.lon, s.lon)
assert_quantity_allclose(s_lat.lat, s.lat + 1e-5*u.rad,
atol=1e-10*u.rad)
s_lat2 = s + 1.e-5 * u.radian * sf['lat'] * e['lat']
assert_representation_allclose(s_lat2, s_lat)
def test_radial(self):
r = RadialRepresentation(10.*u.kpc)
with pytest.raises(NotImplementedError):
r.unit_vectors()
sf = r.scale_factors()
assert np.all(sf['distance'] == 1.*u.one)
assert np.all(r.norm() == r.distance)
with pytest.raises(TypeError):
r + r
def test_physical_spherical(self):
s = PhysicsSphericalRepresentation(phi=[0., 6., 21.] * u.hourangle,
theta=[90., 120., 5.] * u.deg,
r=[1, 2, 3] * u.kpc)
e = s.unit_vectors()
self.check_unit_vectors(e)
sf = s.scale_factors()
self.check_scale_factors(sf, s)
s_phi = s + s.r * 1e-5 * np.sin(s.theta) * e['phi']
assert_quantity_allclose(s_phi.phi, s.phi + 1e-5*u.rad,
atol=1e-10*u.rad)
assert_quantity_allclose(s_phi.theta, s.theta, atol=1e-10*u.rad)
assert_quantity_allclose(s_phi.r, s.r)
s_phi2 = s + 1e-5 * u.radian * sf['phi'] * e['phi']
assert_representation_allclose(s_phi2, s_phi)
s_theta = s + s.r * 1e-5 * e['theta']
assert_quantity_allclose(s_theta.phi, s.phi)
assert_quantity_allclose(s_theta.theta, s.theta + 1e-5*u.rad,
atol=1e-10*u.rad)
assert_quantity_allclose(s_theta.r, s.r)
s_theta2 = s + 1.e-5 * u.radian * sf['theta'] * e['theta']
assert_representation_allclose(s_theta2, s_theta)
s_r = s + 1. * u.pc * e['r']
assert_quantity_allclose(s_r.phi, s.phi, atol=1e-10*u.rad)
assert_quantity_allclose(s_r.theta, s.theta, atol=1e-10*u.rad)
assert_quantity_allclose(s_r.r, s.r + 1.*u.pc)
s_r2 = s + 1. * u.pc * sf['r'] * e['r']
assert_representation_allclose(s_r2, s_r)
def test_cartesian(self):
s = CartesianRepresentation(x=[1, 2, 3] * u.pc,
y=[2, 3, 4] * u.Mpc,
z=[3, 4, 5] * u.kpc)
e = s.unit_vectors()
sf = s.scale_factors()
for v, expected in zip(e.values(), ([1., 0., 0.] * u.one,
[0., 1., 0.] * u.one,
[0., 0., 1.] * u.one)):
assert np.all(v.get_xyz(xyz_axis=-1) == expected)
for f in sf.values():
assert np.all(f == 1.*u.one)
def test_cylindrical(self):
s = CylindricalRepresentation(rho=[1, 2, 3] * u.pc,
phi=[0., 90., -45.] * u.deg,
z=[3, 4, 5] * u.kpc)
e = s.unit_vectors()
self.check_unit_vectors(e)
sf = s.scale_factors()
self.check_scale_factors(sf, s)
s_rho = s + 1. * u.pc * e['rho']
assert_quantity_allclose(s_rho.rho, s.rho + 1.*u.pc)
assert_quantity_allclose(s_rho.phi, s.phi)
assert_quantity_allclose(s_rho.z, s.z)
s_rho2 = s + 1. * u.pc * sf['rho'] * e['rho']
assert_representation_allclose(s_rho2, s_rho)
s_phi = s + s.rho * 1e-5 * e['phi']
assert_quantity_allclose(s_phi.rho, s.rho)
assert_quantity_allclose(s_phi.phi, s.phi + 1e-5*u.rad)
assert_quantity_allclose(s_phi.z, s.z)
s_phi2 = s + 1e-5 * u.radian * sf['phi'] * e['phi']
assert_representation_allclose(s_phi2, s_phi)
s_z = s + 1. * u.pc * e['z']
assert_quantity_allclose(s_z.rho, s.rho)
assert_quantity_allclose(s_z.phi, s.phi, atol=1e-10*u.rad)
assert_quantity_allclose(s_z.z, s.z + 1.*u.pc)
s_z2 = s + 1. * u.pc * sf['z'] * e['z']
assert_representation_allclose(s_z2, s_z)
@pytest.mark.parametrize('omit_coslat', [False, True], scope='class')
class TestSphericalDifferential():
# these test cases are subclassed for SphericalCosLatDifferential,
# hence some tests depend on omit_coslat.
def _setup(self, omit_coslat):
if omit_coslat:
self.SD_cls = SphericalCosLatDifferential
else:
self.SD_cls = SphericalDifferential
s = SphericalRepresentation(lon=[0., 6., 21.] * u.hourangle,
lat=[0., -30., 85.] * u.deg,
distance=[1, 2, 3] * u.kpc)
self.s = s
self.e = s.unit_vectors()
self.sf = s.scale_factors(omit_coslat=omit_coslat)
def test_name_coslat(self, omit_coslat):
self._setup(omit_coslat)
if omit_coslat:
assert self.SD_cls is SphericalCosLatDifferential
assert self.SD_cls.get_name() == 'sphericalcoslat'
else:
assert self.SD_cls is SphericalDifferential
assert self.SD_cls.get_name() == 'spherical'
assert self.SD_cls.get_name() in DIFFERENTIAL_CLASSES
def test_simple_differentials(self, omit_coslat):
self._setup(omit_coslat)
s, e, sf = self.s, self.e, self.sf
o_lon = self.SD_cls(1.*u.arcsec, 0.*u.arcsec, 0.*u.kpc)
o_lonc = o_lon.to_cartesian(base=s)
o_lon2 = self.SD_cls.from_cartesian(o_lonc, base=s)
assert_differential_allclose(o_lon, o_lon2)
# simple check by hand for first element.
# lat[0] is 0, so cos(lat) term doesn't matter.
assert_quantity_allclose(o_lonc[0].xyz,
[0., np.pi/180./3600., 0.]*u.kpc)
# check all using unit vectors and scale factors.
s_lon = s + 1.*u.arcsec * sf['lon'] * e['lon']
assert_representation_allclose(o_lonc, s_lon - s, atol=1*u.npc)
s_lon2 = s + o_lon
assert_representation_allclose(s_lon2, s_lon, atol=1*u.npc)
o_lat = self.SD_cls(0.*u.arcsec, 1.*u.arcsec, 0.*u.kpc)
o_latc = o_lat.to_cartesian(base=s)
assert_quantity_allclose(o_latc[0].xyz,
[0., 0., np.pi/180./3600.]*u.kpc,
atol=1.*u.npc)
s_lat = s + 1.*u.arcsec * sf['lat'] * e['lat']
assert_representation_allclose(o_latc, s_lat - s, atol=1*u.npc)
s_lat2 = s + o_lat
assert_representation_allclose(s_lat2, s_lat, atol=1*u.npc)
o_distance = self.SD_cls(0.*u.arcsec, 0.*u.arcsec, 1.*u.mpc)
o_distancec = o_distance.to_cartesian(base=s)
assert_quantity_allclose(o_distancec[0].xyz,
[1e-6, 0., 0.]*u.kpc, atol=1.*u.npc)
s_distance = s + 1.*u.mpc * sf['distance'] * e['distance']
assert_representation_allclose(o_distancec, s_distance - s,
atol=1*u.npc)
s_distance2 = s + o_distance
assert_representation_allclose(s_distance2, s_distance)
def test_differential_arithmetic(self, omit_coslat):
self._setup(omit_coslat)
s = self.s
o_lon = self.SD_cls(1.*u.arcsec, 0.*u.arcsec, 0.*u.kpc)
o_lon_by_2 = o_lon / 2.
assert_representation_allclose(o_lon_by_2.to_cartesian(s) * 2.,
o_lon.to_cartesian(s), atol=1e-10*u.kpc)
assert_representation_allclose(s + o_lon, s + 2 * o_lon_by_2,
atol=1e-10*u.kpc)
o_lon_rec = o_lon_by_2 + o_lon_by_2
assert_representation_allclose(s + o_lon, s + o_lon_rec,
atol=1e-10*u.kpc)
o_lon_0 = o_lon - o_lon
for c in o_lon_0.components:
assert np.all(getattr(o_lon_0, c) == 0.)
o_lon2 = self.SD_cls(1*u.mas/u.yr, 0*u.mas/u.yr, 0*u.km/u.s)
assert_quantity_allclose(o_lon2.norm(s)[0], 4.74*u.km/u.s,
atol=0.01*u.km/u.s)
assert_representation_allclose(o_lon2.to_cartesian(s) * 1000.*u.yr,
o_lon.to_cartesian(s), atol=1e-10*u.kpc)
s_off = s + o_lon
s_off2 = s + o_lon2 * 1000.*u.yr
assert_representation_allclose(s_off, s_off2, atol=1e-10*u.kpc)
factor = 1e5 * u.radian/u.arcsec
if not omit_coslat:
factor = factor / np.cos(s.lat)
s_off_big = s + o_lon * factor
assert_representation_allclose(
s_off_big, SphericalRepresentation(s.lon + 90.*u.deg, 0.*u.deg,
1e5*s.distance),
atol=5.*u.kpc)
o_lon3c = CartesianRepresentation(0., 4.74047, 0., unit=u.km/u.s)
o_lon3 = self.SD_cls.from_cartesian(o_lon3c, base=s)
expected0 = self.SD_cls(1.*u.mas/u.yr, 0.*u.mas/u.yr, 0.*u.km/u.s)
assert_differential_allclose(o_lon3[0], expected0)
s_off_big2 = s + o_lon3 * 1e5 * u.yr * u.radian/u.mas
assert_representation_allclose(
s_off_big2, SphericalRepresentation(90.*u.deg, 0.*u.deg,
1e5*u.kpc), atol=5.*u.kpc)
with pytest.raises(TypeError):
o_lon - s
with pytest.raises(TypeError):
s.to_cartesian() + o_lon
def test_differential_init_errors(self, omit_coslat):
self._setup(omit_coslat)
s = self.s
with pytest.raises(u.UnitsError):
self.SD_cls(1.*u.arcsec, 0., 0.)
with pytest.raises(TypeError):
self.SD_cls(1.*u.arcsec, 0.*u.arcsec, 0.*u.kpc,
False, False)
with pytest.raises(TypeError):
self.SD_cls(1.*u.arcsec, 0.*u.arcsec, 0.*u.kpc,
copy=False, d_lat=0.*u.arcsec)
with pytest.raises(TypeError):
self.SD_cls(1.*u.arcsec, 0.*u.arcsec, 0.*u.kpc,
copy=False, flying='circus')
with pytest.raises(ValueError):
self.SD_cls(np.ones(2)*u.arcsec,
np.zeros(3)*u.arcsec, np.zeros(2)*u.kpc)
with pytest.raises(u.UnitsError):
self.SD_cls(1.*u.arcsec, 1.*u.s, 0.*u.kpc)
with pytest.raises(u.UnitsError):
self.SD_cls(1.*u.kpc, 1.*u.arcsec, 0.*u.kpc)
o = self.SD_cls(1.*u.arcsec, 1.*u.arcsec, 0.*u.km/u.s)
with pytest.raises(u.UnitsError):
o.to_cartesian(s)
with pytest.raises(AttributeError):
o.d_lat = 0.*u.arcsec
with pytest.raises(AttributeError):
del o.d_lat
o = self.SD_cls(1.*u.arcsec, 1.*u.arcsec, 0.*u.km)
with pytest.raises(TypeError):
o.to_cartesian()
c = CartesianRepresentation(10., 0., 0., unit=u.km)
with pytest.raises(TypeError):
self.SD_cls.to_cartesian(c)
with pytest.raises(TypeError):
self.SD_cls.from_cartesian(c)
with pytest.raises(TypeError):
self.SD_cls.from_cartesian(c, SphericalRepresentation)
with pytest.raises(TypeError):
self.SD_cls.from_cartesian(c, c)
@pytest.mark.parametrize('omit_coslat', [False, True], scope='class')
class TestUnitSphericalDifferential():
def _setup(self, omit_coslat):
if omit_coslat:
self.USD_cls = UnitSphericalCosLatDifferential
else:
self.USD_cls = UnitSphericalDifferential
s = UnitSphericalRepresentation(lon=[0., 6., 21.] * u.hourangle,
lat=[0., -30., 85.] * u.deg)
self.s = s
self.e = s.unit_vectors()
self.sf = s.scale_factors(omit_coslat=omit_coslat)
def test_name_coslat(self, omit_coslat):
self._setup(omit_coslat)
if omit_coslat:
assert self.USD_cls is UnitSphericalCosLatDifferential
assert self.USD_cls.get_name() == 'unitsphericalcoslat'
else:
assert self.USD_cls is UnitSphericalDifferential
assert self.USD_cls.get_name() == 'unitspherical'
assert self.USD_cls.get_name() in DIFFERENTIAL_CLASSES
def test_simple_differentials(self, omit_coslat):
self._setup(omit_coslat)
s, e, sf = self.s, self.e, self.sf
o_lon = self.USD_cls(1.*u.arcsec, 0.*u.arcsec)
o_lonc = o_lon.to_cartesian(base=s)
o_lon2 = self.USD_cls.from_cartesian(o_lonc, base=s)
assert_differential_allclose(o_lon, o_lon2)
# simple check by hand for first element
# (lat[0]=0, so works for both normal and CosLat differential)
assert_quantity_allclose(o_lonc[0].xyz,
[0., np.pi/180./3600., 0.]*u.one)
# check all using unit vectors and scale factors.
s_lon = s + 1.*u.arcsec * sf['lon'] * e['lon']
assert type(s_lon) is SphericalRepresentation
assert_representation_allclose(o_lonc, s_lon - s, atol=1e-10*u.one)
s_lon2 = s + o_lon
assert_representation_allclose(s_lon2, s_lon, atol=1e-10*u.one)
o_lat = self.USD_cls(0.*u.arcsec, 1.*u.arcsec)
o_latc = o_lat.to_cartesian(base=s)
assert_quantity_allclose(o_latc[0].xyz,
[0., 0., np.pi/180./3600.]*u.one,
atol=1e-10*u.one)
s_lat = s + 1.*u.arcsec * sf['lat'] * e['lat']
assert type(s_lat) is SphericalRepresentation
assert_representation_allclose(o_latc, s_lat - s, atol=1e-10*u.one)
s_lat2 = s + o_lat
assert_representation_allclose(s_lat2, s_lat, atol=1e-10*u.one)
def test_differential_arithmetic(self, omit_coslat):
self._setup(omit_coslat)
s = self.s
o_lon = self.USD_cls(1.*u.arcsec, 0.*u.arcsec)
o_lon_by_2 = o_lon / 2.
assert type(o_lon_by_2) is self.USD_cls
assert_representation_allclose(o_lon_by_2.to_cartesian(s) * 2.,
o_lon.to_cartesian(s), atol=1e-10*u.one)
s_lon = s + o_lon
s_lon2 = s + 2 * o_lon_by_2
assert type(s_lon) is SphericalRepresentation
assert_representation_allclose(s_lon, s_lon2, atol=1e-10*u.one)
o_lon_rec = o_lon_by_2 + o_lon_by_2
assert type(o_lon_rec) is self.USD_cls
assert representation_equal(o_lon, o_lon_rec)
assert_representation_allclose(s + o_lon, s + o_lon_rec,
atol=1e-10*u.one)
o_lon_0 = o_lon - o_lon
assert type(o_lon_0) is self.USD_cls
for c in o_lon_0.components:
assert np.all(getattr(o_lon_0, c) == 0.)
o_lon2 = self.USD_cls(1.*u.mas/u.yr, 0.*u.mas/u.yr)
kks = u.km/u.kpc/u.s
assert_quantity_allclose(o_lon2.norm(s)[0], 4.74047*kks, atol=1e-4*kks)
assert_representation_allclose(o_lon2.to_cartesian(s) * 1000.*u.yr,
o_lon.to_cartesian(s), atol=1e-10*u.one)
s_off = s + o_lon
s_off2 = s + o_lon2 * 1000.*u.yr
assert_representation_allclose(s_off, s_off2, atol=1e-10*u.one)
factor = 1e5 * u.radian/u.arcsec
if not omit_coslat:
factor = factor / np.cos(s.lat)
s_off_big = s + o_lon * factor
assert_representation_allclose(
s_off_big, SphericalRepresentation(s.lon + 90.*u.deg,
0.*u.deg, 1e5),
atol=5.*u.one)
o_lon3c = CartesianRepresentation(0., 4.74047, 0., unit=kks)
# This looses information!!
o_lon3 = self.USD_cls.from_cartesian(o_lon3c, base=s)
expected0 = self.USD_cls(1.*u.mas/u.yr, 0.*u.mas/u.yr)
assert_differential_allclose(o_lon3[0], expected0)
# Part of motion kept.
part_kept = s.cross(CartesianRepresentation(0,1,0, unit=u.one)).norm()
assert_quantity_allclose(o_lon3.norm(s), 4.74047*part_kept*kks,
atol=1e-10*kks)
# (lat[0]=0, so works for both normal and CosLat differential)
s_off_big2 = s + o_lon3 * 1e5 * u.yr * u.radian/u.mas
expected0 = SphericalRepresentation(90.*u.deg, 0.*u.deg,
1e5*u.one)
assert_representation_allclose(s_off_big2[0], expected0, atol=5.*u.one)
def test_differential_init_errors(self, omit_coslat):
self._setup(omit_coslat)
with pytest.raises(u.UnitsError):
self.USD_cls(0.*u.deg, 10.*u.deg/u.yr)
class TestRadialDifferential():
def setup(self):
s = SphericalRepresentation(lon=[0., 6., 21.] * u.hourangle,
lat=[0., -30., 85.] * u.deg,
distance=[1, 2, 3] * u.kpc)
self.s = s
self.r = s.represent_as(RadialRepresentation)
self.e = s.unit_vectors()
self.sf = s.scale_factors()
def test_name(self):
assert RadialDifferential.get_name() == 'radial'
assert RadialDifferential.get_name() in DIFFERENTIAL_CLASSES
def test_simple_differentials(self):
r, s, e, sf = self.r, self.s, self.e, self.sf
o_distance = RadialDifferential(1.*u.mpc)
# Can be applied to RadialRepresentation, though not most useful.
r_distance = r + o_distance
assert_quantity_allclose(r_distance.distance,
r.distance + o_distance.d_distance)
r_distance2 = o_distance + r
assert_quantity_allclose(r_distance2.distance,
r.distance + o_distance.d_distance)
# More sense to apply it relative to spherical representation.
o_distancec = o_distance.to_cartesian(base=s)
assert_quantity_allclose(o_distancec[0].xyz,
[1e-6, 0., 0.]*u.kpc, atol=1.*u.npc)
o_recover = RadialDifferential.from_cartesian(o_distancec, base=s)
assert_quantity_allclose(o_recover.d_distance, o_distance.d_distance)
s_distance = s + 1.*u.mpc * sf['distance'] * e['distance']
assert_representation_allclose(o_distancec, s_distance - s,
atol=1*u.npc)
s_distance2 = s + o_distance
assert_representation_allclose(s_distance2, s_distance)
class TestPhysicsSphericalDifferential():
"""Test copied from SphericalDifferential, so less extensive."""
def setup(self):
s = PhysicsSphericalRepresentation(phi=[0., 90., 315.] * u.deg,
theta=[90., 120., 5.] * u.deg,
r=[1, 2, 3] * u.kpc)
self.s = s
self.e = s.unit_vectors()
self.sf = s.scale_factors()
def test_name(self):
assert PhysicsSphericalDifferential.get_name() == 'physicsspherical'
assert PhysicsSphericalDifferential.get_name() in DIFFERENTIAL_CLASSES
def test_simple_differentials(self):
s, e, sf = self.s, self.e, self.sf
o_phi = PhysicsSphericalDifferential(1*u.arcsec, 0*u.arcsec, 0*u.kpc)
o_phic = o_phi.to_cartesian(base=s)
o_phi2 = PhysicsSphericalDifferential.from_cartesian(o_phic, base=s)
assert_quantity_allclose(o_phi.d_phi, o_phi2.d_phi, atol=1.*u.narcsec)
assert_quantity_allclose(o_phi.d_theta, o_phi2.d_theta,
atol=1.*u.narcsec)
assert_quantity_allclose(o_phi.d_r, o_phi2.d_r, atol=1.*u.npc)
# simple check by hand for first element.
assert_quantity_allclose(o_phic[0].xyz,
[0., np.pi/180./3600., 0.]*u.kpc,
atol=1.*u.npc)
# check all using unit vectors and scale factors.
s_phi = s + 1.*u.arcsec * sf['phi'] * e['phi']
assert_representation_allclose(o_phic, s_phi - s, atol=1e-10*u.kpc)
o_theta = PhysicsSphericalDifferential(0*u.arcsec, 1*u.arcsec, 0*u.kpc)
o_thetac = o_theta.to_cartesian(base=s)
assert_quantity_allclose(o_thetac[0].xyz,
[0., 0., -np.pi/180./3600.]*u.kpc,
atol=1.*u.npc)
s_theta = s + 1.*u.arcsec * sf['theta'] * e['theta']
assert_representation_allclose(o_thetac, s_theta - s, atol=1e-10*u.kpc)
s_theta2 = s + o_theta
assert_representation_allclose(s_theta2, s_theta, atol=1e-10*u.kpc)
o_r = PhysicsSphericalDifferential(0*u.arcsec, 0*u.arcsec, 1*u.mpc)
o_rc = o_r.to_cartesian(base=s)
assert_quantity_allclose(o_rc[0].xyz, [1e-6, 0., 0.]*u.kpc,
atol=1.*u.npc)
s_r = s + 1.*u.mpc * sf['r'] * e['r']
assert_representation_allclose(o_rc, s_r - s, atol=1e-10*u.kpc)
s_r2 = s + o_r
assert_representation_allclose(s_r2, s_r)
def test_differential_init_errors(self):
with pytest.raises(u.UnitsError):
PhysicsSphericalDifferential(1.*u.arcsec, 0., 0.)
class TestCylindricalDifferential():
"""Test copied from SphericalDifferential, so less extensive."""
def setup(self):
s = CylindricalRepresentation(rho=[1, 2, 3] * u.kpc,
phi=[0., 90., 315.] * u.deg,
z=[3, 2, 1] * u.kpc)
self.s = s
self.e = s.unit_vectors()
self.sf = s.scale_factors()
def test_name(self):
assert CylindricalDifferential.get_name() == 'cylindrical'
assert CylindricalDifferential.get_name() in DIFFERENTIAL_CLASSES
def test_simple_differentials(self):
s, e, sf = self.s, self.e, self.sf
o_rho = CylindricalDifferential(1.*u.mpc, 0.*u.arcsec, 0.*u.kpc)
o_rhoc = o_rho.to_cartesian(base=s)
assert_quantity_allclose(o_rhoc[0].xyz, [1.e-6, 0., 0.]*u.kpc)
s_rho = s + 1.*u.mpc * sf['rho'] * e['rho']
assert_representation_allclose(o_rhoc, s_rho - s, atol=1e-10*u.kpc)
s_rho2 = s + o_rho
assert_representation_allclose(s_rho2, s_rho)
o_phi = CylindricalDifferential(0.*u.kpc, 1.*u.arcsec, 0.*u.kpc)
o_phic = o_phi.to_cartesian(base=s)
o_phi2 = CylindricalDifferential.from_cartesian(o_phic, base=s)
assert_quantity_allclose(o_phi.d_rho, o_phi2.d_rho, atol=1.*u.npc)
assert_quantity_allclose(o_phi.d_phi, o_phi2.d_phi, atol=1.*u.narcsec)
assert_quantity_allclose(o_phi.d_z, o_phi2.d_z, atol=1.*u.npc)
# simple check by hand for first element.
assert_quantity_allclose(o_phic[0].xyz,
[0., np.pi/180./3600., 0.]*u.kpc)
# check all using unit vectors and scale factors.
s_phi = s + 1.*u.arcsec * sf['phi'] * e['phi']
assert_representation_allclose(o_phic, s_phi - s, atol=1e-10*u.kpc)
o_z = CylindricalDifferential(0.*u.kpc, 0.*u.arcsec, 1.*u.mpc)
o_zc = o_z.to_cartesian(base=s)
assert_quantity_allclose(o_zc[0].xyz, [0., 0., 1.e-6]*u.kpc)
s_z = s + 1.*u.mpc * sf['z'] * e['z']
assert_representation_allclose(o_zc, s_z - s, atol=1e-10*u.kpc)
s_z2 = s + o_z
assert_representation_allclose(s_z2, s_z)
def test_differential_init_errors(self):
with pytest.raises(u.UnitsError):
CylindricalDifferential(1.*u.pc, 1.*u.arcsec, 3.*u.km/u.s)
class TestCartesianDifferential():
"""Test copied from SphericalDifferential, so less extensive."""
def setup(self):
s = CartesianRepresentation(x=[1, 2, 3] * u.kpc,
y=[2, 3, 1] * u.kpc,
z=[3, 1, 2] * u.kpc)
self.s = s
self.e = s.unit_vectors()
self.sf = s.scale_factors()
def test_name(self):
assert CartesianDifferential.get_name() == 'cartesian'
assert CartesianDifferential.get_name() in DIFFERENTIAL_CLASSES
def test_simple_differentials(self):
s, e, sf = self.s, self.e, self.sf
for d, differential in ( # test different inits while we're at it.
('x', CartesianDifferential(1.*u.pc, 0.*u.pc, 0.*u.pc)),
('y', CartesianDifferential([0., 1., 0.], unit=u.pc)),
('z', CartesianDifferential(np.array([[0., 0., 1.]]) * u.pc,
xyz_axis=1))):
o_c = differential.to_cartesian(base=s)
o_c2 = differential.to_cartesian()
assert np.all(representation_equal(o_c, o_c2))
assert all(np.all(getattr(differential, 'd_'+c) == getattr(o_c, c))
for c in ('x', 'y', 'z'))
differential2 = CartesianDifferential.from_cartesian(o_c)
assert np.all(representation_equal(differential2, differential))
differential3 = CartesianDifferential.from_cartesian(o_c, base=o_c)
assert np.all(representation_equal(differential3, differential))
s_off = s + 1.*u.pc * sf[d] * e[d]
assert_representation_allclose(o_c, s_off - s, atol=1e-10*u.kpc)
s_off2 = s + differential
assert_representation_allclose(s_off2, s_off)
def test_init_failures(self):
with pytest.raises(ValueError):
CartesianDifferential(1.*u.kpc/u.s, 2.*u.kpc)
with pytest.raises(u.UnitsError):
CartesianDifferential(1.*u.kpc/u.s, 2.*u.kpc, 3.*u.kpc)
with pytest.raises(ValueError):
CartesianDifferential(1.*u.kpc, 2.*u.kpc, 3.*u.kpc, xyz_axis=1)
class TestDifferentialConversion():
def setup(self):
self.s = SphericalRepresentation(lon=[0., 6., 21.] * u.hourangle,
lat=[0., -30., 85.] * u.deg,
distance=[1, 2, 3] * u.kpc)
@pytest.mark.parametrize('sd_cls', [SphericalDifferential,
SphericalCosLatDifferential])
def test_represent_as_own_class(self, sd_cls):
so = sd_cls(1.*u.deg, 2.*u.deg, 0.1*u.kpc)
so2 = so.represent_as(sd_cls)
assert so2 is so
def test_represent_other_coslat(self):
s = self.s
coslat = np.cos(s.lat)
so = SphericalDifferential(1.*u.deg, 2.*u.deg, 0.1*u.kpc)
so_coslat = so.represent_as(SphericalCosLatDifferential, base=s)
assert_quantity_allclose(so.d_lon * coslat,
so_coslat.d_lon_coslat)
so2 = so_coslat.represent_as(SphericalDifferential, base=s)
assert np.all(representation_equal(so2, so))
so3 = SphericalDifferential.from_representation(so_coslat, base=s)
assert np.all(representation_equal(so3, so))
so_coslat2 = SphericalCosLatDifferential.from_representation(so, base=s)
assert np.all(representation_equal(so_coslat2, so_coslat))
# Also test UnitSpherical
us = s.represent_as(UnitSphericalRepresentation)
uo = so.represent_as(UnitSphericalDifferential)
uo_coslat = so.represent_as(UnitSphericalCosLatDifferential, base=s)
assert_quantity_allclose(uo.d_lon * coslat,
uo_coslat.d_lon_coslat)
uo2 = uo_coslat.represent_as(UnitSphericalDifferential, base=us)
assert np.all(representation_equal(uo2, uo))
uo3 = UnitSphericalDifferential.from_representation(uo_coslat, base=us)
assert np.all(representation_equal(uo3, uo))
uo_coslat2 = UnitSphericalCosLatDifferential.from_representation(
uo, base=us)
assert np.all(representation_equal(uo_coslat2, uo_coslat))
uo_coslat3 = uo.represent_as(UnitSphericalCosLatDifferential, base=us)
assert np.all(representation_equal(uo_coslat3, uo_coslat))
@pytest.mark.parametrize('sd_cls', [SphericalDifferential,
SphericalCosLatDifferential])
@pytest.mark.parametrize('r_cls', (SphericalRepresentation,
UnitSphericalRepresentation,
PhysicsSphericalRepresentation,
CylindricalRepresentation))
def test_represent_regular_class(self, sd_cls, r_cls):
so = sd_cls(1.*u.deg, 2.*u.deg, 0.1*u.kpc)
r = so.represent_as(r_cls, base=self.s)
c = so.to_cartesian(self.s)
r_check = c.represent_as(r_cls)
assert np.all(representation_equal(r, r_check))
so2 = sd_cls.from_representation(r, base=self.s)
so3 = sd_cls.from_cartesian(r.to_cartesian(), self.s)
assert np.all(representation_equal(so2, so3))
@pytest.mark.parametrize('sd_cls', [SphericalDifferential,
SphericalCosLatDifferential])
def test_convert_physics(self, sd_cls):
# Conversion needs no base for SphericalDifferential, but does
# need one (to get the latitude) for SphericalCosLatDifferential.
if sd_cls is SphericalDifferential:
usd_cls = UnitSphericalDifferential
base_s = base_u = base_p = None
else:
usd_cls = UnitSphericalCosLatDifferential
base_s = self.s[1]
base_u = base_s.represent_as(UnitSphericalRepresentation)
base_p = base_s.represent_as(PhysicsSphericalRepresentation)
so = sd_cls(1.*u.deg, 2.*u.deg, 0.1*u.kpc)
po = so.represent_as(PhysicsSphericalDifferential, base=base_s)
so2 = sd_cls.from_representation(po, base=base_s)
assert_differential_allclose(so, so2)
po2 = PhysicsSphericalDifferential.from_representation(so, base=base_p)
assert_differential_allclose(po, po2)
so3 = po.represent_as(sd_cls, base=base_p)
assert_differential_allclose(so, so3)
s = self.s
p = s.represent_as(PhysicsSphericalRepresentation)
cso = so.to_cartesian(s[1])
cpo = po.to_cartesian(p[1])
assert_representation_allclose(cso, cpo)
assert_representation_allclose(s[1] + so, p[1] + po)
po2 = so.represent_as(PhysicsSphericalDifferential,
base=None if base_s is None else s)
assert_representation_allclose(s + so, p + po2)
suo = usd_cls.from_representation(so)
puo = usd_cls.from_representation(po, base=base_u)
assert_differential_allclose(suo, puo)
suo2 = so.represent_as(usd_cls)
puo2 = po.represent_as(usd_cls, base=base_p)
assert_differential_allclose(suo2, puo2)
assert_differential_allclose(puo, puo2)
sro = RadialDifferential.from_representation(so)
pro = RadialDifferential.from_representation(po)
assert representation_equal(sro, pro)
sro2 = so.represent_as(RadialDifferential)
pro2 = po.represent_as(RadialDifferential)
assert representation_equal(sro2, pro2)
assert representation_equal(pro, pro2)
@pytest.mark.parametrize(
('sd_cls', 'usd_cls'),
[(SphericalDifferential, UnitSphericalDifferential),
(SphericalCosLatDifferential, UnitSphericalCosLatDifferential)])
def test_convert_unit_spherical_radial(self, sd_cls, usd_cls):
s = self.s
us = s.represent_as(UnitSphericalRepresentation)
rs = s.represent_as(RadialRepresentation)
assert_representation_allclose(rs * us, s)
uo = usd_cls(2.*u.deg, 1.*u.deg)
so = uo.represent_as(sd_cls, base=s)
assert_quantity_allclose(so.d_distance, 0.*u.kpc, atol=1.*u.npc)
uo2 = so.represent_as(usd_cls)
assert_representation_allclose(uo.to_cartesian(us),
uo2.to_cartesian(us))
so1 = sd_cls(2.*u.deg, 1.*u.deg, 5.*u.pc)
uo_r = so1.represent_as(usd_cls)
ro_r = so1.represent_as(RadialDifferential)
assert np.all(representation_equal(uo_r, uo))
assert np.all(representation_equal(ro_r, RadialDifferential(5.*u.pc)))
@pytest.mark.parametrize('sd_cls', [SphericalDifferential,
SphericalCosLatDifferential])
def test_convert_cylindrial(self, sd_cls):
s = self.s
so = sd_cls(1.*u.deg, 2.*u.deg, 0.1*u.kpc)
cyo = so.represent_as(CylindricalDifferential, base=s)
cy = s.represent_as(CylindricalRepresentation)
so1 = cyo.represent_as(sd_cls, base=cy)
assert_representation_allclose(so.to_cartesian(s),
so1.to_cartesian(s))
cyo2 = CylindricalDifferential.from_representation(so, base=cy)
assert_representation_allclose(cyo2.to_cartesian(base=cy),
cyo.to_cartesian(base=cy))
so2 = sd_cls.from_representation(cyo2, base=s)
assert_representation_allclose(so.to_cartesian(s),
so2.to_cartesian(s))
@pytest.mark.parametrize('sd_cls', [SphericalDifferential,
SphericalCosLatDifferential])
def test_combinations(self, sd_cls):
if sd_cls is SphericalDifferential:
uo = UnitSphericalDifferential(2.*u.deg, 1.*u.deg)
uo_d_lon = uo.d_lon
else:
uo = UnitSphericalCosLatDifferential(2.*u.deg, 1.*u.deg)
uo_d_lon = uo.d_lon_coslat
ro = RadialDifferential(1.*u.mpc)
so1 = uo + ro
so1c = sd_cls(uo_d_lon, uo.d_lat, ro.d_distance)
assert np.all(representation_equal(so1, so1c))
so2 = uo - ro
so2c = sd_cls(uo_d_lon, uo.d_lat, -ro.d_distance)
assert np.all(representation_equal(so2, so2c))
so3 = so2 + ro
so3c = sd_cls(uo_d_lon, uo.d_lat, 0.*u.kpc)
assert np.all(representation_equal(so3, so3c))
so4 = so1 + ro
so4c = sd_cls(uo_d_lon, uo.d_lat, 2*ro.d_distance)
assert np.all(representation_equal(so4, so4c))
so5 = so1 - uo
so5c = sd_cls(0*u.deg, 0.*u.deg, ro.d_distance)
assert np.all(representation_equal(so5, so5c))
assert_representation_allclose(self.s + (uo+ro), self.s+so1)
@pytest.mark.parametrize('rep,dif', [
[CartesianRepresentation([1, 2, 3]*u.kpc),
CartesianDifferential([.1, .2, .3]*u.km/u.s)],
[SphericalRepresentation(90*u.deg, 0.*u.deg, 14.*u.kpc),
SphericalDifferential(1.*u.deg, 2.*u.deg, 0.1*u.kpc)]
])
def test_arithmetic_with_differentials_fail(rep, dif):
rep = rep.with_differentials(dif)
with pytest.raises(TypeError):
rep + rep
with pytest.raises(TypeError):
rep - rep
with pytest.raises(TypeError):
rep * rep
with pytest.raises(TypeError):
rep / rep
with pytest.raises(TypeError):
10. * rep
with pytest.raises(TypeError):
rep / 10.
with pytest.raises(TypeError):
-rep
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import http.client
from testtools import matchers
from keystone.common import provider_api
import keystone.conf
from keystone.credential.providers import fernet as credential_fernet
from keystone import exception
from keystone.tests import unit
from keystone.tests.unit import ksfixtures
from keystone.tests.unit import test_v3
from keystone.tests.unit import utils as test_utils
CONF = keystone.conf.CONF
PROVIDERS = provider_api.ProviderAPIs
class ResourceTestCase(test_v3.RestfulTestCase,
test_v3.AssignmentTestMixin):
"""Test domains and projects."""
def setUp(self):
super(ResourceTestCase, self).setUp()
self.useFixture(
ksfixtures.KeyRepository(
self.config_fixture,
'credential',
credential_fernet.MAX_ACTIVE_KEYS
)
)
# Domain CRUD tests
def test_create_domain(self):
"""Call ``POST /domains``."""
ref = unit.new_domain_ref()
r = self.post(
'/domains',
body={'domain': ref})
return self.assertValidDomainResponse(r, ref)
def test_create_domain_case_sensitivity(self):
"""Call `POST /domains`` twice with upper() and lower() cased name."""
ref = unit.new_domain_ref()
# ensure the name is lowercase
ref['name'] = ref['name'].lower()
r = self.post(
'/domains',
body={'domain': ref})
self.assertValidDomainResponse(r, ref)
# ensure the name is uppercase
ref['name'] = ref['name'].upper()
r = self.post(
'/domains',
body={'domain': ref})
self.assertValidDomainResponse(r, ref)
def test_create_domain_bad_request(self):
"""Call ``POST /domains``."""
self.post('/domains', body={'domain': {}},
expected_status=http.client.BAD_REQUEST)
def test_create_domain_unsafe(self):
"""Call ``POST /domains with unsafe names``."""
unsafe_name = 'i am not / safe'
self.config_fixture.config(group='resource',
domain_name_url_safe='off')
ref = unit.new_domain_ref(name=unsafe_name)
self.post(
'/domains',
body={'domain': ref})
for config_setting in ['new', 'strict']:
self.config_fixture.config(group='resource',
domain_name_url_safe=config_setting)
ref = unit.new_domain_ref(name=unsafe_name)
self.post(
'/domains',
body={'domain': ref},
expected_status=http.client.BAD_REQUEST)
def test_create_domain_unsafe_default(self):
"""Check default for unsafe names for ``POST /domains``."""
unsafe_name = 'i am not / safe'
# By default, we should be able to create unsafe names
ref = unit.new_domain_ref(name=unsafe_name)
self.post(
'/domains',
body={'domain': ref})
def test_create_domain_creates_is_domain_project(self):
"""Check a project that acts as a domain is created.
Call ``POST /domains``.
"""
# Create a new domain
domain_ref = unit.new_domain_ref()
r = self.post('/domains', body={'domain': domain_ref})
self.assertValidDomainResponse(r, domain_ref)
# Retrieve its correspondent project
r = self.get('/projects/%(project_id)s' % {
'project_id': r.result['domain']['id']})
self.assertValidProjectResponse(r)
# The created project has is_domain flag as True
self.assertTrue(r.result['project']['is_domain'])
# And its parent_id and domain_id attributes are equal
self.assertIsNone(r.result['project']['parent_id'])
self.assertIsNone(r.result['project']['domain_id'])
def test_create_is_domain_project_creates_domain(self):
"""Call ``POST /projects`` is_domain and check a domain is created."""
# Create a new project that acts as a domain
project_ref = unit.new_project_ref(domain_id=None, is_domain=True)
r = self.post('/projects', body={'project': project_ref})
self.assertValidProjectResponse(r)
# Retrieve its correspondent domain
r = self.get('/domains/%(domain_id)s' % {
'domain_id': r.result['project']['id']})
self.assertValidDomainResponse(r)
self.assertIsNotNone(r.result['domain'])
def test_create_domain_valid_explicit_id(self):
"""Call ``POST /domains`` with a valid `explicit_domain_id` set."""
ref = unit.new_domain_ref()
explicit_domain_id = '9aea63518f0040c6b4518d8d2242911c'
ref['explicit_domain_id'] = explicit_domain_id
r = self.post(
'/domains',
body={'domain': ref})
self.assertValidDomainResponse(r, ref)
r = self.get('/domains/%(domain_id)s' % {
'domain_id': explicit_domain_id})
self.assertValidDomainResponse(r)
self.assertIsNotNone(r.result['domain'])
def test_create_second_domain_valid_explicit_id_fails(self):
"""Call ``POST /domains`` with a valid `explicit_domain_id` set."""
ref = unit.new_domain_ref()
explicit_domain_id = '9aea63518f0040c6b4518d8d2242911c'
ref['explicit_domain_id'] = explicit_domain_id
r = self.post(
'/domains',
body={'domain': ref})
self.assertValidDomainResponse(r, ref)
# second one should fail
r = self.post(
'/domains',
body={'domain': ref},
expected_status=http.client.CONFLICT)
def test_create_domain_invalid_explicit_ids(self):
"""Call ``POST /domains`` with various invalid explicit_domain_ids."""
ref = unit.new_domain_ref()
bad_ids = ['bad!',
'',
'9aea63518f0040c',
'1234567890123456789012345678901234567890',
'9aea63518f0040c6b4518d8d2242911c9aea63518f0040c6b45']
for explicit_domain_id in bad_ids:
ref['explicit_domain_id'] = explicit_domain_id
self.post('/domains', body={'domain': {}},
expected_status=http.client.BAD_REQUEST)
def test_list_head_domains(self):
"""Call ``GET & HEAD /domains``."""
resource_url = '/domains'
r = self.get(resource_url)
self.assertValidDomainListResponse(r, ref=self.domain,
resource_url=resource_url)
self.head(resource_url, expected_status=http.client.OK)
def test_list_limit_for_domains(self):
for x in range(6):
domain = {'domain': unit.new_domain_ref()}
self.post('/domains', body=domain)
for expected_length in range(1, 6):
self.config_fixture.config(
group='resource', list_limit=expected_length
)
response = self.get('/domains')
domain_list = response.json_body['domains']
self.assertEqual(expected_length, len(domain_list))
def test_get_head_domain(self):
"""Call ``GET /domains/{domain_id}``."""
resource_url = '/domains/%(domain_id)s' % {
'domain_id': self.domain_id}
r = self.get(resource_url)
self.assertValidDomainResponse(r, self.domain)
self.head(resource_url, expected_status=http.client.OK)
def test_update_domain(self):
"""Call ``PATCH /domains/{domain_id}``."""
ref = unit.new_domain_ref()
del ref['id']
r = self.patch('/domains/%(domain_id)s' % {
'domain_id': self.domain_id},
body={'domain': ref})
self.assertValidDomainResponse(r, ref)
def test_update_domain_unsafe(self):
"""Call ``POST /domains/{domain_id} with unsafe names``."""
unsafe_name = 'i am not / safe'
self.config_fixture.config(group='resource',
domain_name_url_safe='off')
ref = unit.new_domain_ref(name=unsafe_name)
del ref['id']
self.patch('/domains/%(domain_id)s' % {
'domain_id': self.domain_id},
body={'domain': ref})
unsafe_name = 'i am still not / safe'
for config_setting in ['new', 'strict']:
self.config_fixture.config(group='resource',
domain_name_url_safe=config_setting)
ref = unit.new_domain_ref(name=unsafe_name)
del ref['id']
self.patch('/domains/%(domain_id)s' % {
'domain_id': self.domain_id},
body={'domain': ref},
expected_status=http.client.BAD_REQUEST)
def test_update_domain_unsafe_default(self):
"""Check default for unsafe names for ``POST /domains``."""
unsafe_name = 'i am not / safe'
# By default, we should be able to create unsafe names
ref = unit.new_domain_ref(name=unsafe_name)
del ref['id']
self.patch('/domains/%(domain_id)s' % {
'domain_id': self.domain_id},
body={'domain': ref})
def test_update_domain_updates_is_domain_project(self):
"""Check the project that acts as a domain is updated.
Call ``PATCH /domains``.
"""
# Create a new domain
domain_ref = unit.new_domain_ref()
r = self.post('/domains', body={'domain': domain_ref})
self.assertValidDomainResponse(r, domain_ref)
# Disable it
self.patch('/domains/%s' % r.result['domain']['id'],
body={'domain': {'enabled': False}})
# Retrieve its correspondent project
r = self.get('/projects/%(project_id)s' % {
'project_id': r.result['domain']['id']})
self.assertValidProjectResponse(r)
# The created project is disabled as well
self.assertFalse(r.result['project']['enabled'])
def test_disable_domain(self):
"""Call ``PATCH /domains/{domain_id}`` (set enabled=False)."""
# Create a 2nd set of entities in a 2nd domain
domain2 = unit.new_domain_ref()
PROVIDERS.resource_api.create_domain(domain2['id'], domain2)
project2 = unit.new_project_ref(domain_id=domain2['id'])
PROVIDERS.resource_api.create_project(project2['id'], project2)
user2 = unit.create_user(PROVIDERS.identity_api,
domain_id=domain2['id'],
project_id=project2['id'])
role_member = unit.new_role_ref()
PROVIDERS.role_api.create_role(role_member['id'], role_member)
PROVIDERS.assignment_api.add_role_to_user_and_project(
user2['id'], project2['id'], role_member['id']
)
# First check a user in that domain can authenticate..
auth_data = self.build_authentication_request(
user_id=user2['id'],
password=user2['password'],
project_id=project2['id'])
self.v3_create_token(auth_data)
# Now disable the domain
domain2['enabled'] = False
r = self.patch('/domains/%(domain_id)s' % {
'domain_id': domain2['id']},
body={'domain': {'enabled': False}})
self.assertValidDomainResponse(r, domain2)
# Try looking up in v3 by name and id
auth_data = self.build_authentication_request(
user_id=user2['id'],
password=user2['password'],
project_id=project2['id'])
self.v3_create_token(auth_data,
expected_status=http.client.UNAUTHORIZED)
auth_data = self.build_authentication_request(
username=user2['name'],
user_domain_id=domain2['id'],
password=user2['password'],
project_id=project2['id'])
self.v3_create_token(auth_data,
expected_status=http.client.UNAUTHORIZED)
def test_delete_enabled_domain_fails(self):
"""Call ``DELETE /domains/{domain_id}`` (when domain enabled)."""
# Try deleting an enabled domain, which should fail
self.delete('/domains/%(domain_id)s' % {
'domain_id': self.domain['id']},
expected_status=exception.ForbiddenAction.code)
def test_delete_domain(self):
"""Call ``DELETE /domains/{domain_id}``.
The sample data set up already has a user and project that is part of
self.domain. Additionally we will create a group and a credential
within it. Since we will authenticate in this domain,
we create another set of entities in a second domain. Deleting this
second domain should delete all these new entities. In addition,
all the entities in the regular self.domain should be unaffected
by the delete.
Test Plan:
- Create domain2 and a 2nd set of entities
- Disable domain2
- Delete domain2
- Check entities in domain2 have been deleted
- Check entities in self.domain are unaffected
"""
# Create a group and a credential in the main domain
group = unit.new_group_ref(domain_id=self.domain_id)
group = PROVIDERS.identity_api.create_group(group)
credential = unit.new_credential_ref(user_id=self.user['id'],
project_id=self.project_id)
PROVIDERS.credential_api.create_credential(
credential['id'], credential
)
# Create a 2nd set of entities in a 2nd domain
domain2 = unit.new_domain_ref()
PROVIDERS.resource_api.create_domain(domain2['id'], domain2)
project2 = unit.new_project_ref(domain_id=domain2['id'])
project2 = PROVIDERS.resource_api.create_project(
project2['id'], project2
)
user2 = unit.new_user_ref(domain_id=domain2['id'],
project_id=project2['id'])
user2 = PROVIDERS.identity_api.create_user(user2)
group2 = unit.new_group_ref(domain_id=domain2['id'])
group2 = PROVIDERS.identity_api.create_group(group2)
credential2 = unit.new_credential_ref(user_id=user2['id'],
project_id=project2['id'])
PROVIDERS.credential_api.create_credential(
credential2['id'], credential2
)
# Now disable the new domain and delete it
domain2['enabled'] = False
r = self.patch('/domains/%(domain_id)s' % {
'domain_id': domain2['id']},
body={'domain': {'enabled': False}})
self.assertValidDomainResponse(r, domain2)
self.delete('/domains/%(domain_id)s' % {'domain_id': domain2['id']})
# Check all the domain2 relevant entities are gone
self.assertRaises(exception.DomainNotFound,
PROVIDERS.resource_api.get_domain,
domain2['id'])
self.assertRaises(exception.ProjectNotFound,
PROVIDERS.resource_api.get_project,
project2['id'])
self.assertRaises(exception.GroupNotFound,
PROVIDERS.identity_api.get_group,
group2['id'])
self.assertRaises(exception.UserNotFound,
PROVIDERS.identity_api.get_user,
user2['id'])
self.assertRaises(exception.CredentialNotFound,
PROVIDERS.credential_api.get_credential,
credential2['id'])
# ...and that all self.domain entities are still here
r = PROVIDERS.resource_api.get_domain(self.domain['id'])
self.assertDictEqual(self.domain, r)
r = PROVIDERS.resource_api.get_project(self.project['id'])
self.assertDictEqual(self.project, r)
r = PROVIDERS.identity_api.get_group(group['id'])
self.assertDictEqual(group, r)
r = PROVIDERS.identity_api.get_user(self.user['id'])
self.user.pop('password')
self.assertDictEqual(self.user, r)
r = PROVIDERS.credential_api.get_credential(credential['id'])
self.assertDictEqual(credential, r)
def test_delete_domain_with_idp(self):
# Create a new domain
domain_ref = unit.new_domain_ref()
r = self.post('/domains', body={'domain': domain_ref})
self.assertValidDomainResponse(r, domain_ref)
domain_id = r.result['domain']['id']
# Create a Idp in the domain
self.put('/OS-FEDERATION/identity_providers/test_idp',
body={"identity_provider": {
"domain_id": domain_id}},
expected_status=http.client.CREATED)
# Disable and delete the domain with no error.
self.patch('/domains/%(domain_id)s' % {
'domain_id': domain_id},
body={'domain': {'enabled': False}})
self.delete('/domains/%s' % domain_id)
# The Idp is deleted as well
self.get('/OS-FEDERATION/identity_providers/test_idp',
expected_status=http.client.NOT_FOUND)
def test_delete_domain_deletes_is_domain_project(self):
"""Check the project that acts as a domain is deleted.
Call ``DELETE /domains``.
"""
# Create a new domain
domain_ref = unit.new_domain_ref()
r = self.post('/domains', body={'domain': domain_ref})
self.assertValidDomainResponse(r, domain_ref)
# Retrieve its correspondent project
self.get('/projects/%(project_id)s' % {
'project_id': r.result['domain']['id']})
# Delete the domain
self.patch('/domains/%s' % r.result['domain']['id'],
body={'domain': {'enabled': False}})
self.delete('/domains/%s' % r.result['domain']['id'])
# The created project is deleted as well
self.get('/projects/%(project_id)s' % {
'project_id': r.result['domain']['id']}, expected_status=404)
def test_delete_default_domain(self):
# Need to disable it first.
self.patch('/domains/%(domain_id)s' % {
'domain_id': CONF.identity.default_domain_id},
body={'domain': {'enabled': False}})
self.delete(
'/domains/%(domain_id)s' % {
'domain_id': CONF.identity.default_domain_id})
def test_token_revoked_once_domain_disabled(self):
"""Test token from a disabled domain has been invalidated.
Test that a token that was valid for an enabled domain
becomes invalid once that domain is disabled.
"""
domain = unit.new_domain_ref()
PROVIDERS.resource_api.create_domain(domain['id'], domain)
user2 = unit.create_user(PROVIDERS.identity_api,
domain_id=domain['id'])
# build a request body
auth_body = self.build_authentication_request(
user_id=user2['id'],
password=user2['password'])
# sends a request for the user's token
token_resp = self.post('/auth/tokens', body=auth_body)
subject_token = token_resp.headers.get('x-subject-token')
# validates the returned token and it should be valid.
self.head('/auth/tokens',
headers={'x-subject-token': subject_token},
expected_status=http.client.OK)
# now disable the domain
domain['enabled'] = False
url = "/domains/%(domain_id)s" % {'domain_id': domain['id']}
self.patch(url,
body={'domain': {'enabled': False}})
# validates the same token again and it should be 'not found'
# as the domain has already been disabled.
self.head('/auth/tokens',
headers={'x-subject-token': subject_token},
expected_status=http.client.NOT_FOUND)
def test_delete_domain_hierarchy(self):
"""Call ``DELETE /domains/{domain_id}``."""
domain = unit.new_domain_ref()
PROVIDERS.resource_api.create_domain(domain['id'], domain)
root_project = unit.new_project_ref(domain_id=domain['id'])
root_project = PROVIDERS.resource_api.create_project(
root_project['id'], root_project
)
leaf_project = unit.new_project_ref(
domain_id=domain['id'],
parent_id=root_project['id'])
PROVIDERS.resource_api.create_project(leaf_project['id'], leaf_project)
# Need to disable it first.
self.patch('/domains/%(domain_id)s' % {
'domain_id': domain['id']},
body={'domain': {'enabled': False}})
self.delete(
'/domains/%(domain_id)s' % {
'domain_id': domain['id']})
self.assertRaises(exception.DomainNotFound,
PROVIDERS.resource_api.get_domain,
domain['id'])
self.assertRaises(exception.ProjectNotFound,
PROVIDERS.resource_api.get_project,
root_project['id'])
self.assertRaises(exception.ProjectNotFound,
PROVIDERS.resource_api.get_project,
leaf_project['id'])
def test_forbid_operations_on_federated_domain(self):
"""Make sure one cannot operate on federated domain.
This includes operations like create, update, delete
on domain identified by id and name where difference variations of
id 'Federated' are used.
"""
def create_domains():
for variation in ('Federated', 'FEDERATED',
'federated', 'fEderated'):
domain = unit.new_domain_ref()
domain['id'] = variation
yield domain
for domain in create_domains():
self.assertRaises(
AssertionError, PROVIDERS.resource_api.create_domain,
domain['id'], domain)
self.assertRaises(
AssertionError, PROVIDERS.resource_api.update_domain,
domain['id'], domain)
self.assertRaises(
exception.DomainNotFound, PROVIDERS.resource_api.delete_domain,
domain['id'])
# swap 'name' with 'id' and try again, expecting the request to
# gracefully fail
domain['id'], domain['name'] = domain['name'], domain['id']
self.assertRaises(
AssertionError, PROVIDERS.resource_api.create_domain,
domain['id'], domain)
self.assertRaises(
AssertionError, PROVIDERS.resource_api.update_domain,
domain['id'], domain)
self.assertRaises(
exception.DomainNotFound, PROVIDERS.resource_api.delete_domain,
domain['id'])
def test_forbid_operations_on_defined_federated_domain(self):
"""Make sure one cannot operate on a user-defined federated domain.
This includes operations like create, update, delete.
"""
non_default_name = 'beta_federated_domain'
self.config_fixture.config(group='federation',
federated_domain_name=non_default_name)
domain = unit.new_domain_ref(name=non_default_name)
self.assertRaises(AssertionError,
PROVIDERS.resource_api.create_domain,
domain['id'], domain)
self.assertRaises(exception.DomainNotFound,
PROVIDERS.resource_api.delete_domain,
domain['id'])
self.assertRaises(AssertionError,
PROVIDERS.resource_api.update_domain,
domain['id'], domain)
# Project CRUD tests
def test_list_head_projects(self):
"""Call ``GET & HEAD /projects``."""
resource_url = '/projects'
r = self.get(resource_url)
self.assertValidProjectListResponse(r, ref=self.project,
resource_url=resource_url)
self.head(resource_url, expected_status=http.client.OK)
def test_create_project(self):
"""Call ``POST /projects``."""
ref = unit.new_project_ref(domain_id=self.domain_id)
r = self.post(
'/projects',
body={'project': ref})
self.assertValidProjectResponse(r, ref)
def test_create_project_bad_request(self):
"""Call ``POST /projects``."""
self.post('/projects', body={'project': {}},
expected_status=http.client.BAD_REQUEST)
def test_create_project_invalid_domain_id(self):
"""Call ``POST /projects``."""
ref = unit.new_project_ref(domain_id=uuid.uuid4().hex)
self.post('/projects', body={'project': ref},
expected_status=http.client.BAD_REQUEST)
def test_create_project_unsafe(self):
"""Call ``POST /projects with unsafe names``."""
unsafe_name = 'i am not / safe'
self.config_fixture.config(group='resource',
project_name_url_safe='off')
ref = unit.new_project_ref(name=unsafe_name)
self.post(
'/projects',
body={'project': ref})
for config_setting in ['new', 'strict']:
self.config_fixture.config(group='resource',
project_name_url_safe=config_setting)
ref = unit.new_project_ref(name=unsafe_name)
self.post(
'/projects',
body={'project': ref},
expected_status=http.client.BAD_REQUEST)
def test_create_project_unsafe_default(self):
"""Check default for unsafe names for ``POST /projects``."""
unsafe_name = 'i am not / safe'
# By default, we should be able to create unsafe names
ref = unit.new_project_ref(name=unsafe_name)
self.post(
'/projects',
body={'project': ref})
def test_create_project_with_parent_id_none_and_domain_id_none(self):
"""Call ``POST /projects``."""
# Grant a domain role for the user
collection_url = (
'/domains/%(domain_id)s/users/%(user_id)s/roles' % {
'domain_id': self.domain_id,
'user_id': self.user['id']})
member_url = '%(collection_url)s/%(role_id)s' % {
'collection_url': collection_url,
'role_id': self.role_id}
self.put(member_url)
# Create an authentication request for a domain scoped token
auth = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
domain_id=self.domain_id)
# Without parent_id and domain_id passed as None, the domain_id should
# be normalized to the domain on the token, when using a domain
# scoped token.
ref = unit.new_project_ref()
r = self.post(
'/projects',
auth=auth,
body={'project': ref})
ref['domain_id'] = self.domain['id']
self.assertValidProjectResponse(r, ref)
def test_create_project_without_parent_id_and_without_domain_id(self):
"""Call ``POST /projects``."""
# Grant a domain role for the user
collection_url = (
'/domains/%(domain_id)s/users/%(user_id)s/roles' % {
'domain_id': self.domain_id,
'user_id': self.user['id']})
member_url = '%(collection_url)s/%(role_id)s' % {
'collection_url': collection_url,
'role_id': self.role_id}
self.put(member_url)
# Create an authentication request for a domain scoped token
auth = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
domain_id=self.domain_id)
# Without domain_id and parent_id, the domain_id should be
# normalized to the domain on the token, when using a domain
# scoped token.
ref = unit.new_project_ref()
r = self.post(
'/projects',
auth=auth,
body={'project': ref})
ref['domain_id'] = self.domain['id']
self.assertValidProjectResponse(r, ref)
@test_utils.wip('waiting for support for parent_id to imply domain_id')
def test_create_project_with_parent_id_and_no_domain_id(self):
"""Call ``POST /projects``."""
# With only the parent_id, the domain_id should be
# normalized to the parent's domain_id
ref_child = unit.new_project_ref(parent_id=self.project['id'])
r = self.post(
'/projects',
body={'project': ref_child})
self.assertEqual(self.project['domain_id'],
r.result['project']['domain_id'])
ref_child['domain_id'] = self.domain['id']
self.assertValidProjectResponse(r, ref_child)
def _create_projects_hierarchy(self, hierarchy_size=1):
"""Create a single-branched project hierarchy with the specified size.
:param hierarchy_size: the desired hierarchy size, default is 1 -
a project with one child.
:returns projects: a list of the projects in the created hierarchy.
"""
new_ref = unit.new_project_ref(domain_id=self.domain_id)
resp = self.post('/projects', body={'project': new_ref})
projects = [resp.result]
for i in range(hierarchy_size):
new_ref = unit.new_project_ref(
domain_id=self.domain_id,
parent_id=projects[i]['project']['id'])
resp = self.post('/projects',
body={'project': new_ref})
self.assertValidProjectResponse(resp, new_ref)
projects.append(resp.result)
return projects
def _create_project_and_tags(self, num_of_tags=1):
"""Create a project and a number of tags attached to that project.
:param num_of_tags: the desired number of tags created with a specified
project.
:returns: A tuple containing a new project and a list of
random tags
"""
tags = [uuid.uuid4().hex for i in range(num_of_tags)]
ref = unit.new_project_ref(
domain_id=self.domain_id,
tags=tags)
resp = self.post('/projects', body={'project': ref})
return resp.result['project'], tags
def test_list_project_response_returns_tags(self):
"""Call ``GET /projects`` should always return tag attributes."""
tagged_project, tags = self._create_project_and_tags()
self.get('/projects')
ref = unit.new_project_ref(domain_id=self.domain_id)
untagged_project = self.post(
'/projects', body={'project': ref}
).json_body['project']
resp = self.get('/projects')
for project in resp.json_body['projects']:
if project['id'] == tagged_project['id']:
self.assertIsNotNone(project['tags'])
self.assertEqual(project['tags'], tags)
if project['id'] == untagged_project['id']:
self.assertEqual(project['tags'], [])
def test_list_projects_filtering_by_tags(self):
"""Call ``GET /projects?tags={tags}``."""
project, tags = self._create_project_and_tags(num_of_tags=2)
tag_string = ','.join(tags)
resp = self.get('/projects?tags=%(values)s' % {
'values': tag_string})
self.assertValidProjectListResponse(resp)
self.assertEqual(project['id'], resp.result['projects'][0]['id'])
def test_list_projects_filtering_by_tags_any(self):
"""Call ``GET /projects?tags-any={tags}``."""
project, tags = self._create_project_and_tags(num_of_tags=2)
project1, tags1 = self._create_project_and_tags(num_of_tags=2)
tag_string = tags[0] + ',' + tags1[0]
resp = self.get('/projects?tags-any=%(values)s' % {
'values': tag_string})
pids = [p['id'] for p in resp.result['projects']]
self.assertValidProjectListResponse(resp)
self.assertIn(project['id'], pids)
self.assertIn(project1['id'], pids)
def test_list_projects_filtering_by_not_tags(self):
"""Call ``GET /projects?not-tags={tags}``."""
project1, tags1 = self._create_project_and_tags(num_of_tags=2)
project2, tags2 = self._create_project_and_tags(num_of_tags=2)
tag_string = ','.join(tags1)
resp = self.get('/projects?not-tags=%(values)s' % {
'values': tag_string})
self.assertValidProjectListResponse(resp)
pids = [p['id'] for p in resp.result['projects']]
self.assertNotIn(project1['id'], pids)
self.assertIn(project2['id'], pids)
def test_list_projects_filtering_by_not_tags_any(self):
"""Call ``GET /projects?not-tags-any={tags}``."""
project1, tags1 = self._create_project_and_tags(num_of_tags=2)
project2, tags2 = self._create_project_and_tags(num_of_tags=2)
project3, tags3 = self._create_project_and_tags(num_of_tags=2)
tag_string = tags1[0] + ',' + tags2[0]
resp = self.get('/projects?not-tags-any=%(values)s' % {
'values': tag_string})
self.assertValidProjectListResponse(resp)
pids = [p['id'] for p in resp.result['projects']]
self.assertNotIn(project1['id'], pids)
self.assertNotIn(project2['id'], pids)
self.assertIn(project3['id'], pids)
def test_list_projects_filtering_multiple_tag_filters(self):
"""Call ``GET /projects?tags={tags}&tags-any={tags}``."""
project1, tags1 = self._create_project_and_tags(num_of_tags=2)
project2, tags2 = self._create_project_and_tags(num_of_tags=2)
project3, tags3 = self._create_project_and_tags(num_of_tags=2)
tags1_query = ','.join(tags1)
resp = self.patch('/projects/%(project_id)s' %
{'project_id': project3['id']},
body={'project': {'tags': tags1}})
tags1.append(tags2[0])
resp = self.patch('/projects/%(project_id)s' %
{'project_id': project1['id']},
body={'project': {'tags': tags1}})
url = '/projects?tags=%(value1)s&tags-any=%(value2)s'
resp = self.get(url % {'value1': tags1_query,
'value2': ','.join(tags2)})
self.assertValidProjectListResponse(resp)
self.assertEqual(len(resp.result['projects']), 1)
self.assertIn(project1['id'], resp.result['projects'][0]['id'])
def test_list_projects_filtering_multiple_any_tag_filters(self):
"""Call ``GET /projects?tags-any={tags}¬-tags-any={tags}``."""
project1, tags1 = self._create_project_and_tags()
project2, tags2 = self._create_project_and_tags(num_of_tags=2)
url = '/projects?tags-any=%(value1)s¬-tags-any=%(value2)s'
resp = self.get(url % {'value1': tags1[0],
'value2': tags2[0]})
self.assertValidProjectListResponse(resp)
pids = [p['id'] for p in resp.result['projects']]
self.assertIn(project1['id'], pids)
self.assertNotIn(project2['id'], pids)
def test_list_projects_filtering_conflict_tag_filters(self):
"""Call ``GET /projects?tags={tags}¬-tags={tags}``."""
project, tags = self._create_project_and_tags(num_of_tags=2)
tag_string = ','.join(tags)
url = '/projects?tags=%(values)s¬-tags=%(values)s'
resp = self.get(url % {'values': tag_string})
self.assertValidProjectListResponse(resp)
self.assertEqual(len(resp.result['projects']), 0)
def test_list_projects_filtering_conflict_any_tag_filters(self):
"""Call ``GET /projects?tags-any={tags}¬-tags-any={tags}``."""
project, tags = self._create_project_and_tags(num_of_tags=2)
tag_string = ','.join(tags)
url = '/projects?tags-any=%(values)s¬-tags-any=%(values)s'
resp = self.get(url % {'values': tag_string})
self.assertValidProjectListResponse(resp)
self.assertEqual(len(resp.result['projects']), 0)
def test_list_projects_by_tags_and_name(self):
"""Call ``GET /projects?tags-any={tags}&name={name}``."""
project, tags = self._create_project_and_tags(num_of_tags=2)
ref = {'project': {'name': 'tags and name'}}
resp = self.patch('/projects/%(project_id)s' %
{'project_id': project['id']},
body=ref)
url = '/projects?tags-any=%(values)s&name=%(name)s'
resp = self.get(url % {'values': tags[0],
'name': 'tags and name'})
self.assertValidProjectListResponse(resp)
pids = [p['id'] for p in resp.result['projects']]
self.assertIn(project['id'], pids)
resp = self.get(url % {'values': tags[0],
'name': 'foo'})
self.assertValidProjectListResponse(resp)
self.assertEqual(len(resp.result['projects']), 0)
def test_list_projects_filtering_by_parent_id(self):
"""Call ``GET /projects?parent_id={project_id}``."""
projects = self._create_projects_hierarchy(hierarchy_size=2)
# Add another child to projects[1] - it will be projects[3]
new_ref = unit.new_project_ref(
domain_id=self.domain_id,
parent_id=projects[1]['project']['id'])
resp = self.post('/projects',
body={'project': new_ref})
self.assertValidProjectResponse(resp, new_ref)
projects.append(resp.result)
# Query for projects[0] immediate children - it will
# be only projects[1]
r = self.get(
'/projects?parent_id=%(project_id)s' % {
'project_id': projects[0]['project']['id']})
self.assertValidProjectListResponse(r)
projects_result = r.result['projects']
expected_list = [projects[1]['project']]
# projects[0] has projects[1] as child
self.assertEqual(expected_list, projects_result)
# Query for projects[1] immediate children - it will
# be projects[2] and projects[3]
r = self.get(
'/projects?parent_id=%(project_id)s' % {
'project_id': projects[1]['project']['id']})
self.assertValidProjectListResponse(r)
projects_result = r.result['projects']
expected_list = [projects[2]['project'], projects[3]['project']]
# projects[1] has projects[2] and projects[3] as children
self.assertEqual(expected_list, projects_result)
# Query for projects[2] immediate children - it will be an empty list
r = self.get(
'/projects?parent_id=%(project_id)s' % {
'project_id': projects[2]['project']['id']})
self.assertValidProjectListResponse(r)
projects_result = r.result['projects']
expected_list = []
# projects[2] has no child, projects_result must be an empty list
self.assertEqual(expected_list, projects_result)
def test_create_hierarchical_project(self):
"""Call ``POST /projects``."""
self._create_projects_hierarchy()
def test_get_head_project(self):
"""Call ``GET & HEAD /projects/{project_id}``."""
resource_url = '/projects/%(project_id)s' % {
'project_id': self.project_id}
r = self.get(resource_url)
self.assertValidProjectResponse(r, self.project)
self.head(resource_url, expected_status=http.client.OK)
def test_get_project_with_parents_as_list_with_invalid_id(self):
"""Call ``GET /projects/{project_id}?parents_as_list``."""
self.get('/projects/%(project_id)s?parents_as_list' % {
'project_id': None}, expected_status=http.client.NOT_FOUND)
self.get('/projects/%(project_id)s?parents_as_list' % {
'project_id': uuid.uuid4().hex},
expected_status=http.client.NOT_FOUND)
def test_get_project_with_subtree_as_list_with_invalid_id(self):
"""Call ``GET /projects/{project_id}?subtree_as_list``."""
self.get('/projects/%(project_id)s?subtree_as_list' % {
'project_id': None}, expected_status=http.client.NOT_FOUND)
self.get('/projects/%(project_id)s?subtree_as_list' % {
'project_id': uuid.uuid4().hex},
expected_status=http.client.NOT_FOUND)
def test_get_project_with_parents_as_ids(self):
"""Call ``GET /projects/{project_id}?parents_as_ids``."""
projects = self._create_projects_hierarchy(hierarchy_size=2)
# Query for projects[2] parents_as_ids
r = self.get(
'/projects/%(project_id)s?parents_as_ids' % {
'project_id': projects[2]['project']['id']})
self.assertValidProjectResponse(r, projects[2]['project'])
parents_as_ids = r.result['project']['parents']
# Assert parents_as_ids is a structured dictionary correctly
# representing the hierarchy. The request was made using projects[2]
# id, hence its parents should be projects[1], projects[0] and the
# is_domain_project, which is the root of the hierarchy. It should
# have the following structure:
# {
# projects[1]: {
# projects[0]: {
# is_domain_project: None
# }
# }
# }
is_domain_project_id = projects[0]['project']['domain_id']
expected_dict = {
projects[1]['project']['id']: {
projects[0]['project']['id']: {is_domain_project_id: None}
}
}
self.assertDictEqual(expected_dict, parents_as_ids)
# Query for projects[0] parents_as_ids
r = self.get(
'/projects/%(project_id)s?parents_as_ids' % {
'project_id': projects[0]['project']['id']})
self.assertValidProjectResponse(r, projects[0]['project'])
parents_as_ids = r.result['project']['parents']
# projects[0] has only the project that acts as a domain as parent
expected_dict = {
is_domain_project_id: None
}
self.assertDictEqual(expected_dict, parents_as_ids)
# Query for is_domain_project parents_as_ids
r = self.get(
'/projects/%(project_id)s?parents_as_ids' % {
'project_id': is_domain_project_id})
parents_as_ids = r.result['project']['parents']
# the project that acts as a domain has no parents, parents_as_ids
# must be None
self.assertIsNone(parents_as_ids)
def test_get_project_with_parents_as_list_with_full_access(self):
"""``GET /projects/{project_id}?parents_as_list`` with full access.
Test plan:
- Create 'parent', 'project' and 'subproject' projects;
- Assign a user a role on each one of those projects;
- Check that calling parents_as_list on 'subproject' returns both
'project' and 'parent'.
"""
# Create the project hierarchy
parent, project, subproject = self._create_projects_hierarchy(2)
# Assign a role for the user on all the created projects
for proj in (parent, project, subproject):
self.put(self.build_role_assignment_link(
role_id=self.role_id, user_id=self.user_id,
project_id=proj['project']['id']))
# Make the API call
r = self.get('/projects/%(project_id)s?parents_as_list' %
{'project_id': subproject['project']['id']})
self.assertValidProjectResponse(r, subproject['project'])
# Assert only 'project' and 'parent' are in the parents list
self.assertIn(project, r.result['project']['parents'])
self.assertIn(parent, r.result['project']['parents'])
self.assertEqual(2, len(r.result['project']['parents']))
def test_get_project_with_parents_as_list_with_partial_access(self):
"""``GET /projects/{project_id}?parents_as_list`` with partial access.
Test plan:
- Create 'parent', 'project' and 'subproject' projects;
- Assign a user a role on 'parent' and 'subproject';
- Check that calling parents_as_list on 'subproject' only returns
'parent'.
"""
# Create the project hierarchy
parent, project, subproject = self._create_projects_hierarchy(2)
# Assign a role for the user on parent and subproject
for proj in (parent, subproject):
self.put(self.build_role_assignment_link(
role_id=self.role_id, user_id=self.user_id,
project_id=proj['project']['id']))
# Make the API call
r = self.get('/projects/%(project_id)s?parents_as_list' %
{'project_id': subproject['project']['id']})
self.assertValidProjectResponse(r, subproject['project'])
# Assert only 'parent' is in the parents list
self.assertIn(parent, r.result['project']['parents'])
self.assertEqual(1, len(r.result['project']['parents']))
def test_get_project_with_parents_as_list_and_parents_as_ids(self):
"""Attempt to list a project's parents as both a list and as IDs.
This uses ``GET /projects/{project_id}?parents_as_list&parents_as_ids``
which should fail with a Bad Request due to the conflicting query
strings.
"""
projects = self._create_projects_hierarchy(hierarchy_size=2)
self.get(
'/projects/%(project_id)s?parents_as_list&parents_as_ids' % {
'project_id': projects[1]['project']['id']},
expected_status=http.client.BAD_REQUEST)
def test_get_project_with_include_limits(self):
PROVIDERS.assignment_api.create_system_grant_for_user(
self.user_id, self.role_id
)
system_admin_token = self.get_system_scoped_token()
parent, project, subproject = self._create_projects_hierarchy(2)
# Assign a role for the user on all the created projects
for proj in (parent, project, subproject):
self.put(self.build_role_assignment_link(
role_id=self.role_id, user_id=self.user_id,
project_id=proj['project']['id']))
# create a registered limit and three limits for each project.
reg_limit = unit.new_registered_limit_ref(service_id=self.service_id,
region_id=self.region_id,
resource_name='volume')
self.post(
'/registered_limits',
body={'registered_limits': [reg_limit]},
token=system_admin_token,
expected_status=http.client.CREATED)
limit1 = unit.new_limit_ref(project_id=parent['project']['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume')
limit2 = unit.new_limit_ref(project_id=project['project']['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume')
limit3 = unit.new_limit_ref(project_id=subproject['project']['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume')
self.post(
'/limits',
body={'limits': [limit1, limit2, limit3]},
token=system_admin_token,
expected_status=http.client.CREATED)
# "include_limits" should work together with "parents_as_list" or
# "subtree_as_list". Only using "include_limits" really does nothing.
r = self.get('/projects/%(project_id)s?include_limits' %
{'project_id': subproject['project']['id']})
self.assertNotIn('parents', r.result['project'])
self.assertNotIn('subtree', r.result['project'])
self.assertNotIn('limits', r.result['project'])
# using "include_limits" with "parents_as_list"
r = self.get('/projects/%(project_id)s?include_limits&parents_as_list'
% {'project_id': subproject['project']['id']})
self.assertEqual(2, len(r.result['project']['parents']))
for parent in r.result['project']['parents']:
self.assertEqual(1, len(parent['project']['limits']))
self.assertEqual(parent['project']['id'],
parent['project']['limits'][0]['project_id'])
self.assertEqual(10,
parent['project']['limits'][0]['resource_limit'])
# using "include_limits" with "subtree_as_list"
r = self.get('/projects/%(project_id)s?include_limits&subtree_as_list'
% {'project_id': parent['project']['id']})
self.assertEqual(2, len(r.result['project']['subtree']))
for child in r.result['project']['subtree']:
self.assertEqual(1, len(child['project']['limits']))
self.assertEqual(child['project']['id'],
child['project']['limits'][0]['project_id'])
self.assertEqual(10,
child['project']['limits'][0]['resource_limit'])
def test_list_project_is_domain_filter(self):
"""Call ``GET /projects?is_domain=True/False``."""
# Get the initial number of projects, both acting as a domain as well
# as regular.
r = self.get('/projects?is_domain=True', expected_status=200)
initial_number_is_domain_true = len(r.result['projects'])
r = self.get('/projects?is_domain=False', expected_status=200)
initial_number_is_domain_false = len(r.result['projects'])
# Add some more projects acting as domains
new_is_domain_project = unit.new_project_ref(is_domain=True)
new_is_domain_project = PROVIDERS.resource_api.create_project(
new_is_domain_project['id'], new_is_domain_project)
new_is_domain_project2 = unit.new_project_ref(is_domain=True)
new_is_domain_project2 = PROVIDERS.resource_api.create_project(
new_is_domain_project2['id'], new_is_domain_project2)
number_is_domain_true = initial_number_is_domain_true + 2
r = self.get('/projects?is_domain=True', expected_status=200)
self.assertThat(r.result['projects'],
matchers.HasLength(number_is_domain_true))
self.assertIn(new_is_domain_project['id'],
[p['id'] for p in r.result['projects']])
self.assertIn(new_is_domain_project2['id'],
[p['id'] for p in r.result['projects']])
# Now add a regular project
new_regular_project = unit.new_project_ref(domain_id=self.domain_id)
new_regular_project = PROVIDERS.resource_api.create_project(
new_regular_project['id'], new_regular_project)
number_is_domain_false = initial_number_is_domain_false + 1
# Check we still have the same number of projects acting as domains
r = self.get('/projects?is_domain=True', expected_status=200)
self.assertThat(r.result['projects'],
matchers.HasLength(number_is_domain_true))
# Check the number of regular projects is correct
r = self.get('/projects?is_domain=False', expected_status=200)
self.assertThat(r.result['projects'],
matchers.HasLength(number_is_domain_false))
self.assertIn(new_regular_project['id'],
[p['id'] for p in r.result['projects']])
def test_list_project_is_domain_filter_default(self):
"""Default project list should not see projects acting as domains."""
# Get the initial count of regular projects
r = self.get('/projects?is_domain=False', expected_status=200)
number_is_domain_false = len(r.result['projects'])
# Make sure we have at least one project acting as a domain
new_is_domain_project = unit.new_project_ref(is_domain=True)
new_is_domain_project = PROVIDERS.resource_api.create_project(
new_is_domain_project['id'], new_is_domain_project)
r = self.get('/projects', expected_status=200)
self.assertThat(r.result['projects'],
matchers.HasLength(number_is_domain_false))
self.assertNotIn(new_is_domain_project, r.result['projects'])
def test_get_project_with_subtree_as_ids(self):
"""Call ``GET /projects/{project_id}?subtree_as_ids``.
This test creates a more complex hierarchy to test if the structured
dictionary returned by using the ``subtree_as_ids`` query param
correctly represents the hierarchy.
The hierarchy contains 5 projects with the following structure::
+--A--+
| |
+--B--+ C
| |
D E
"""
projects = self._create_projects_hierarchy(hierarchy_size=2)
# Add another child to projects[0] - it will be projects[3]
new_ref = unit.new_project_ref(
domain_id=self.domain_id,
parent_id=projects[0]['project']['id'])
resp = self.post('/projects',
body={'project': new_ref})
self.assertValidProjectResponse(resp, new_ref)
projects.append(resp.result)
# Add another child to projects[1] - it will be projects[4]
new_ref = unit.new_project_ref(
domain_id=self.domain_id,
parent_id=projects[1]['project']['id'])
resp = self.post('/projects',
body={'project': new_ref})
self.assertValidProjectResponse(resp, new_ref)
projects.append(resp.result)
# Query for projects[0] subtree_as_ids
r = self.get(
'/projects/%(project_id)s?subtree_as_ids' % {
'project_id': projects[0]['project']['id']})
self.assertValidProjectResponse(r, projects[0]['project'])
subtree_as_ids = r.result['project']['subtree']
# The subtree hierarchy from projects[0] should have the following
# structure:
# {
# projects[1]: {
# projects[2]: None,
# projects[4]: None
# },
# projects[3]: None
# }
expected_dict = {
projects[1]['project']['id']: {
projects[2]['project']['id']: None,
projects[4]['project']['id']: None
},
projects[3]['project']['id']: None
}
self.assertDictEqual(expected_dict, subtree_as_ids)
# Now query for projects[1] subtree_as_ids
r = self.get(
'/projects/%(project_id)s?subtree_as_ids' % {
'project_id': projects[1]['project']['id']})
self.assertValidProjectResponse(r, projects[1]['project'])
subtree_as_ids = r.result['project']['subtree']
# The subtree hierarchy from projects[1] should have the following
# structure:
# {
# projects[2]: None,
# projects[4]: None
# }
expected_dict = {
projects[2]['project']['id']: None,
projects[4]['project']['id']: None
}
self.assertDictEqual(expected_dict, subtree_as_ids)
# Now query for projects[3] subtree_as_ids
r = self.get(
'/projects/%(project_id)s?subtree_as_ids' % {
'project_id': projects[3]['project']['id']})
self.assertValidProjectResponse(r, projects[3]['project'])
subtree_as_ids = r.result['project']['subtree']
# projects[3] has no subtree, subtree_as_ids must be None
self.assertIsNone(subtree_as_ids)
def test_get_project_with_subtree_as_list_with_full_access(self):
"""``GET /projects/{project_id}?subtree_as_list`` with full access.
Test plan:
- Create 'parent', 'project' and 'subproject' projects;
- Assign a user a role on each one of those projects;
- Check that calling subtree_as_list on 'parent' returns both 'parent'
and 'subproject'.
"""
# Create the project hierarchy
parent, project, subproject = self._create_projects_hierarchy(2)
# Assign a role for the user on all the created projects
for proj in (parent, project, subproject):
self.put(self.build_role_assignment_link(
role_id=self.role_id, user_id=self.user_id,
project_id=proj['project']['id']))
# Make the API call
r = self.get('/projects/%(project_id)s?subtree_as_list' %
{'project_id': parent['project']['id']})
self.assertValidProjectResponse(r, parent['project'])
# Assert only 'project' and 'subproject' are in the subtree
self.assertIn(project, r.result['project']['subtree'])
self.assertIn(subproject, r.result['project']['subtree'])
self.assertEqual(2, len(r.result['project']['subtree']))
def test_get_project_with_subtree_as_list_with_partial_access(self):
"""``GET /projects/{project_id}?subtree_as_list`` with partial access.
Test plan:
- Create 'parent', 'project' and 'subproject' projects;
- Assign a user a role on 'parent' and 'subproject';
- Check that calling subtree_as_list on 'parent' returns 'subproject'.
"""
# Create the project hierarchy
parent, project, subproject = self._create_projects_hierarchy(2)
# Assign a role for the user on parent and subproject
for proj in (parent, subproject):
self.put(self.build_role_assignment_link(
role_id=self.role_id, user_id=self.user_id,
project_id=proj['project']['id']))
# Make the API call
r = self.get('/projects/%(project_id)s?subtree_as_list' %
{'project_id': parent['project']['id']})
self.assertValidProjectResponse(r, parent['project'])
# Assert only 'subproject' is in the subtree
self.assertIn(subproject, r.result['project']['subtree'])
self.assertEqual(1, len(r.result['project']['subtree']))
def test_get_project_with_subtree_as_list_and_subtree_as_ids(self):
"""Attempt to get a project subtree as both a list and as IDs.
This uses ``GET /projects/{project_id}?subtree_as_list&subtree_as_ids``
which should fail with a bad request due to the conflicting query
strings.
"""
projects = self._create_projects_hierarchy(hierarchy_size=2)
self.get(
'/projects/%(project_id)s?subtree_as_list&subtree_as_ids' % {
'project_id': projects[1]['project']['id']},
expected_status=http.client.BAD_REQUEST)
def test_update_project(self):
"""Call ``PATCH /projects/{project_id}``."""
ref = unit.new_project_ref(domain_id=self.domain_id,
parent_id=self.project['parent_id'])
del ref['id']
r = self.patch(
'/projects/%(project_id)s' % {
'project_id': self.project_id},
body={'project': ref})
self.assertValidProjectResponse(r, ref)
def test_update_project_unsafe(self):
"""Call ``POST /projects/{project_id} with unsafe names``."""
unsafe_name = 'i am not / safe'
self.config_fixture.config(group='resource',
project_name_url_safe='off')
ref = unit.new_project_ref(name=unsafe_name,
domain_id=self.domain_id,
parent_id=self.project['parent_id'])
del ref['id']
self.patch(
'/projects/%(project_id)s' % {
'project_id': self.project_id},
body={'project': ref})
unsafe_name = 'i am still not / safe'
for config_setting in ['new', 'strict']:
self.config_fixture.config(group='resource',
project_name_url_safe=config_setting)
ref = unit.new_project_ref(name=unsafe_name,
domain_id=self.domain_id,
parent_id=self.project['parent_id'])
del ref['id']
self.patch(
'/projects/%(project_id)s' % {
'project_id': self.project_id},
body={'project': ref},
expected_status=http.client.BAD_REQUEST)
def test_update_project_unsafe_default(self):
"""Check default for unsafe names for ``POST /projects``."""
unsafe_name = 'i am not / safe'
# By default, we should be able to create unsafe names
ref = unit.new_project_ref(name=unsafe_name,
domain_id=self.domain_id,
parent_id=self.project['parent_id'])
del ref['id']
self.patch(
'/projects/%(project_id)s' % {
'project_id': self.project_id},
body={'project': ref})
def test_update_project_domain_id(self):
"""Call ``PATCH /projects/{project_id}`` with domain_id.
A projects's `domain_id` is immutable. Ensure that any attempts to
update the `domain_id` of a project fails.
"""
project = unit.new_project_ref(domain_id=self.domain['id'])
project = PROVIDERS.resource_api.create_project(project['id'], project)
project['domain_id'] = CONF.identity.default_domain_id
self.patch('/projects/%(project_id)s' % {
'project_id': project['id']},
body={'project': project},
expected_status=exception.ValidationError.code)
def test_update_project_parent_id(self):
"""Call ``PATCH /projects/{project_id}``."""
projects = self._create_projects_hierarchy()
leaf_project = projects[1]['project']
leaf_project['parent_id'] = None
self.patch(
'/projects/%(project_id)s' % {
'project_id': leaf_project['id']},
body={'project': leaf_project},
expected_status=http.client.FORBIDDEN)
def test_update_project_is_domain_not_allowed(self):
"""Call ``PATCH /projects/{project_id}`` with is_domain.
The is_domain flag is immutable.
"""
project = unit.new_project_ref(domain_id=self.domain['id'])
resp = self.post('/projects',
body={'project': project})
self.assertFalse(resp.result['project']['is_domain'])
project['parent_id'] = resp.result['project']['parent_id']
project['is_domain'] = True
self.patch('/projects/%(project_id)s' % {
'project_id': resp.result['project']['id']},
body={'project': project},
expected_status=http.client.BAD_REQUEST)
def test_disable_leaf_project(self):
"""Call ``PATCH /projects/{project_id}``."""
projects = self._create_projects_hierarchy()
leaf_project = projects[1]['project']
leaf_project['enabled'] = False
r = self.patch(
'/projects/%(project_id)s' % {
'project_id': leaf_project['id']},
body={'project': leaf_project})
self.assertEqual(
leaf_project['enabled'], r.result['project']['enabled'])
def test_disable_not_leaf_project(self):
"""Call ``PATCH /projects/{project_id}``."""
projects = self._create_projects_hierarchy()
root_project = projects[0]['project']
root_project['enabled'] = False
self.patch(
'/projects/%(project_id)s' % {
'project_id': root_project['id']},
body={'project': root_project},
expected_status=http.client.FORBIDDEN)
def test_delete_project(self):
"""Call ``DELETE /projects/{project_id}``.
As well as making sure the delete succeeds, we ensure
that any credentials that reference this projects are
also deleted, while other credentials are unaffected.
"""
credential = unit.new_credential_ref(user_id=self.user['id'],
project_id=self.project_id)
PROVIDERS.credential_api.create_credential(
credential['id'], credential
)
# First check the credential for this project is present
r = PROVIDERS.credential_api.get_credential(credential['id'])
self.assertDictEqual(credential, r)
# Create a second credential with a different project
project2 = unit.new_project_ref(domain_id=self.domain['id'])
PROVIDERS.resource_api.create_project(project2['id'], project2)
credential2 = unit.new_credential_ref(user_id=self.user['id'],
project_id=project2['id'])
PROVIDERS.credential_api.create_credential(
credential2['id'], credential2
)
# Now delete the project
self.delete(
'/projects/%(project_id)s' % {
'project_id': self.project_id})
# Deleting the project should have deleted any credentials
# that reference this project
self.assertRaises(exception.CredentialNotFound,
PROVIDERS.credential_api.get_credential,
credential_id=credential['id'])
# But the credential for project2 is unaffected
r = PROVIDERS.credential_api.get_credential(credential2['id'])
self.assertDictEqual(credential2, r)
def test_delete_not_leaf_project(self):
"""Call ``DELETE /projects/{project_id}``."""
projects = self._create_projects_hierarchy()
self.delete(
'/projects/%(project_id)s' % {
'project_id': projects[0]['project']['id']},
expected_status=http.client.FORBIDDEN)
def test_create_project_with_tags(self):
project, tags = self._create_project_and_tags(num_of_tags=10)
ref = self.get(
'/projects/%(project_id)s' % {
'project_id': project['id']},
expected_status=http.client.OK)
self.assertIn('tags', ref.result['project'])
for tag in tags:
self.assertIn(tag, ref.result['project']['tags'])
def test_update_project_with_tags(self):
project, tags = self._create_project_and_tags(num_of_tags=9)
tag = uuid.uuid4().hex
project['tags'].append(tag)
ref = self.patch(
'/projects/%(project_id)s' % {
'project_id': self.project_id},
body={'project': {'tags': project['tags']}})
self.assertIn(tag, ref.result['project']['tags'])
def test_create_project_tag(self):
tag = uuid.uuid4().hex
url = '/projects/%(project_id)s/tags/%(value)s'
self.put(url % {'project_id': self.project_id, 'value': tag},
expected_status=http.client.CREATED)
self.get(url % {'project_id': self.project_id, 'value': tag},
expected_status=http.client.NO_CONTENT)
def test_create_project_tag_is_case_insensitive(self):
case_tags = ['case', 'CASE']
for tag in case_tags:
self.put(
'/projects/%(project_id)s/tags/%(value)s' % {
'project_id': self.project_id,
'value': tag},
expected_status=http.client.CREATED)
resp = self.get('/projects/%(project_id)s' %
{'project_id': self.project_id},
expected_status=http.client.OK)
for tag in case_tags:
self.assertIn(tag, resp.result['project']['tags'])
def test_get_single_project_tag(self):
project, tags = self._create_project_and_tags()
self.get(
'/projects/%(project_id)s/tags/%(value)s' % {
'project_id': project['id'],
'value': tags[0]},
expected_status=http.client.NO_CONTENT)
self.head(
'/projects/%(project_id)s/tags/%(value)s' % {
'project_id': project['id'],
'value': tags[0]},
expected_status=http.client.NO_CONTENT)
def test_get_project_tag_that_does_not_exist(self):
project, _ = self._create_project_and_tags()
self.get(
'/projects/%(project_id)s/tags/%(value)s' % {
'project_id': project['id'],
'value': uuid.uuid4().hex},
expected_status=http.client.NOT_FOUND)
def test_delete_project_tag(self):
project, tags = self._create_project_and_tags()
self.delete(
'/projects/%(project_id)s/tags/%(value)s' % {
'project_id': project['id'],
'value': tags[0]},
expected_status=http.client.NO_CONTENT)
self.get(
'/projects/%(project_id)s/tags/%(value)s' % {
'project_id': self.project_id,
'value': tags[0]},
expected_status=http.client.NOT_FOUND)
def test_delete_project_tags(self):
project, tags = self._create_project_and_tags(num_of_tags=5)
self.delete(
'/projects/%(project_id)s/tags/' % {
'project_id': project['id']},
expected_status=http.client.NO_CONTENT)
self.get(
'/projects/%(project_id)s/tags/%(value)s' % {
'project_id': self.project_id,
'value': tags[0]},
expected_status=http.client.NOT_FOUND)
resp = self.get(
'/projects/%(project_id)s/tags/' % {
'project_id': self.project_id},
expected_status=http.client.OK)
self.assertEqual(len(resp.result['tags']), 0)
def test_create_project_tag_invalid_project_id(self):
self.put(
'/projects/%(project_id)s/tags/%(value)s' % {
'project_id': uuid.uuid4().hex,
'value': uuid.uuid4().hex},
expected_status=http.client.NOT_FOUND)
def test_create_project_tag_unsafe_name(self):
tag = uuid.uuid4().hex + ','
self.put(
'/projects/%(project_id)s/tags/%(value)s' % {
'project_id': self.project_id,
'value': tag},
expected_status=http.client.BAD_REQUEST)
def test_create_project_tag_already_exists(self):
project, tags = self._create_project_and_tags()
self.put(
'/projects/%(project_id)s/tags/%(value)s' % {
'project_id': project['id'],
'value': tags[0]},
expected_status=http.client.BAD_REQUEST)
def test_create_project_tag_over_tag_limit(self):
project, _ = self._create_project_and_tags(num_of_tags=80)
self.put(
'/projects/%(project_id)s/tags/%(value)s' % {
'project_id': project['id'],
'value': uuid.uuid4().hex},
expected_status=http.client.BAD_REQUEST)
def test_create_project_tag_name_over_character_limit(self):
tag = 'a' * 256
self.put(
'/projects/%(project_id)s/tags/%(value)s' % {
'project_id': self.project_id,
'value': tag},
expected_status=http.client.BAD_REQUEST)
def test_delete_tag_invalid_project_id(self):
self.delete(
'/projects/%(project_id)s/tags/%(value)s' % {
'project_id': uuid.uuid4().hex,
'value': uuid.uuid4().hex},
expected_status=http.client.NOT_FOUND)
def test_delete_project_tag_not_found(self):
self.delete(
'/projects/%(project_id)s/tags/%(value)s' % {
'project_id': self.project_id,
'value': uuid.uuid4().hex},
expected_status=http.client.NOT_FOUND)
def test_list_project_tags(self):
project, tags = self._create_project_and_tags(num_of_tags=5)
resp = self.get(
'/projects/%(project_id)s/tags' % {
'project_id': project['id']},
expected_status=http.client.OK)
for tag in tags:
self.assertIn(tag, resp.result['tags'])
def test_check_if_project_tag_exists(self):
project, tags = self._create_project_and_tags(num_of_tags=5)
self.head(
'/projects/%(project_id)s/tags/%(value)s' % {
'project_id': project['id'],
'value': tags[0]},
expected_status=http.client.NO_CONTENT)
def test_list_project_tags_for_project_with_no_tags(self):
resp = self.get(
'/projects/%(project_id)s/tags' % {
'project_id': self.project_id},
expected_status=http.client.OK)
self.assertEqual([], resp.result['tags'])
def test_check_project_with_no_tags(self):
self.head(
'/projects/%(project_id)s/tags/%(value)s' % {
'project_id': self.project_id,
'value': uuid.uuid4().hex},
expected_status=http.client.NOT_FOUND)
def test_update_project_tags(self):
project, tags = self._create_project_and_tags(num_of_tags=5)
resp = self.put(
'/projects/%(project_id)s/tags' % {
'project_id': project['id']},
body={'tags': tags},
expected_status=http.client.OK)
self.assertIn(tags[1], resp.result['tags'])
def test_update_project_tags_removes_previous_tags(self):
tag = uuid.uuid4().hex
project, tags = self._create_project_and_tags(num_of_tags=5)
self.put(
'/projects/%(project_id)s/tags/%(value)s' % {
'project_id': project['id'],
'value': tag},
expected_status=http.client.CREATED)
resp = self.put(
'/projects/%(project_id)s/tags' % {
'project_id': project['id']},
body={'tags': tags},
expected_status=http.client.OK)
self.assertNotIn(tag, resp.result['tags'])
self.assertIn(tags[1], resp.result['tags'])
def test_update_project_tags_unsafe_names(self):
project, tags = self._create_project_and_tags(num_of_tags=5)
invalid_chars = [',', '/']
for char in invalid_chars:
tags[0] = uuid.uuid4().hex + char
self.put(
'/projects/%(project_id)s/tags' % {
'project_id': project['id']},
body={'tags': tags},
expected_status=http.client.BAD_REQUEST)
def test_update_project_tags_with_too_many_tags(self):
project, _ = self._create_project_and_tags()
tags = [uuid.uuid4().hex for i in range(81)]
tags.append(uuid.uuid4().hex)
self.put(
'/projects/%(project_id)s/tags' % {'project_id': project['id']},
body={'tags': tags},
expected_status=http.client.BAD_REQUEST)
def test_list_projects_by_user_with_inherited_role(self):
"""Ensure the cache is invalidated when creating/deleting a project."""
domain_ref = unit.new_domain_ref()
resp = self.post('/domains', body={'domain': domain_ref})
domain = resp.result['domain']
user_ref = unit.new_user_ref(domain_id=self.domain_id)
resp = self.post('/users', body={'user': user_ref})
user = resp.result['user']
role_ref = unit.new_role_ref()
resp = self.post('/roles', body={'role': role_ref})
role = resp.result['role']
self.put('/OS-INHERIT/domains/%(domain_id)s/users/%(user_id)s/roles/'
'%(role_id)s/inherited_to_projects' % {
'domain_id': domain['id'],
'user_id': user['id'],
'role_id': role['id']})
resp = self.get('/users/%(user)s/projects' % {'user': user['id']})
self.assertValidProjectListResponse(resp)
self.assertEqual([], resp.result['projects'])
project_ref = unit.new_project_ref(domain_id=domain['id'])
resp = self.post('/projects', body={'project': project_ref})
project = resp.result['project']
resp = self.get('/users/%(user)s/projects' % {'user': user['id']})
self.assertValidProjectListResponse(resp)
self.assertEqual(project['id'], resp.result['projects'][0]['id'])
class StrictTwoLevelLimitsResourceTestCase(ResourceTestCase):
def setUp(self):
super(StrictTwoLevelLimitsResourceTestCase, self).setUp()
def config_overrides(self):
super(StrictTwoLevelLimitsResourceTestCase, self).config_overrides()
self.config_fixture.config(group='unified_limit',
enforcement_model='strict_two_level')
def _create_projects_hierarchy(self, hierarchy_size=1):
if hierarchy_size > 1:
self.skip_test_overrides(
"Strict two level limit enforcement model doesn't allow the"
"project tree depth > 2")
return super(StrictTwoLevelLimitsResourceTestCase,
self)._create_projects_hierarchy(hierarchy_size)
def test_create_hierarchical_project(self):
projects = self._create_projects_hierarchy()
# create grandchild project will fail.
new_ref = unit.new_project_ref(
domain_id=self.domain_id,
parent_id=projects[1]['project']['id'])
self.post('/projects',
body={'project': new_ref},
expected_status=http.client.FORBIDDEN)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_check_existence_request(
resource_group_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="HEAD",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request(
resource_group_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request_initial(
resource_group_name: str,
subscription_id: str,
*,
force_deletion_types: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if force_deletion_types is not None:
query_parameters['forceDeletionTypes'] = _SERIALIZER.query("force_deletion_types", force_deletion_types, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
resource_group_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_update_request(
resource_group_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_export_template_request_initial(
subscription_id: str,
resource_group_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/exportTemplate')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_list_request(
subscription_id: str,
*,
filter: Optional[str] = None,
top: Optional[int] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourcegroups')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = _SERIALIZER.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = _SERIALIZER.query("top", top, 'int')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class ResourceGroupsOperations(object):
"""ResourceGroupsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.resources.v2021_01_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def check_existence(
self,
resource_group_name: str,
**kwargs: Any
) -> bool:
"""Checks whether a resource group exists.
:param resource_group_name: The name of the resource group to check. The name is case
insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_check_existence_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=self.check_existence.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}'} # type: ignore
@distributed_trace
def create_or_update(
self,
resource_group_name: str,
parameters: "_models.ResourceGroup",
**kwargs: Any
) -> "_models.ResourceGroup":
"""Creates or updates a resource group.
:param resource_group_name: The name of the resource group to create or update. Can include
alphanumeric, underscore, parentheses, hyphen, period (except at end), and Unicode characters
that match the allowed characters.
:type resource_group_name: str
:param parameters: Parameters supplied to the create or update a resource group.
:type parameters: ~azure.mgmt.resource.resources.v2021_01_01.models.ResourceGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourceGroup, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2021_01_01.models.ResourceGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ResourceGroup')
request = build_create_or_update_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ResourceGroup', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ResourceGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}'} # type: ignore
def _delete_initial(
self,
resource_group_name: str,
force_deletion_types: Optional[str] = None,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
force_deletion_types=force_deletion_types,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
force_deletion_types: Optional[str] = None,
**kwargs: Any
) -> LROPoller[None]:
"""Deletes a resource group.
When you delete a resource group, all of its resources are also deleted. Deleting a resource
group deletes all of its template deployments and currently stored operations.
:param resource_group_name: The name of the resource group to delete. The name is case
insensitive.
:type resource_group_name: str
:param force_deletion_types: The resource types you want to force delete. Currently, only the
following is supported:
forceDeletionTypes=Microsoft.Compute/virtualMachines,Microsoft.Compute/virtualMachineScaleSets.
:type force_deletion_types: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
force_deletion_types=force_deletion_types,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
**kwargs: Any
) -> "_models.ResourceGroup":
"""Gets a resource group.
:param resource_group_name: The name of the resource group to get. The name is case
insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourceGroup, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2021_01_01.models.ResourceGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ResourceGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}'} # type: ignore
@distributed_trace
def update(
self,
resource_group_name: str,
parameters: "_models.ResourceGroupPatchable",
**kwargs: Any
) -> "_models.ResourceGroup":
"""Updates a resource group.
Resource groups can be updated through a simple PATCH operation to a group address. The format
of the request is the same as that for creating a resource group. If a field is unspecified,
the current value is retained.
:param resource_group_name: The name of the resource group to update. The name is case
insensitive.
:type resource_group_name: str
:param parameters: Parameters supplied to update a resource group.
:type parameters: ~azure.mgmt.resource.resources.v2021_01_01.models.ResourceGroupPatchable
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourceGroup, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2021_01_01.models.ResourceGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ResourceGroupPatchable')
request = build_update_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ResourceGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}'} # type: ignore
def _export_template_initial(
self,
resource_group_name: str,
parameters: "_models.ExportTemplateRequest",
**kwargs: Any
) -> Optional["_models.ResourceGroupExportResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ResourceGroupExportResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ExportTemplateRequest')
request = build_export_template_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
content_type=content_type,
json=_json,
template_url=self._export_template_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ResourceGroupExportResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_export_template_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/exportTemplate'} # type: ignore
@distributed_trace
def begin_export_template(
self,
resource_group_name: str,
parameters: "_models.ExportTemplateRequest",
**kwargs: Any
) -> LROPoller["_models.ResourceGroupExportResult"]:
"""Captures the specified resource group as a template.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param parameters: Parameters for exporting the template.
:type parameters: ~azure.mgmt.resource.resources.v2021_01_01.models.ExportTemplateRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ResourceGroupExportResult or the result
of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.resource.resources.v2021_01_01.models.ResourceGroupExportResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceGroupExportResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._export_template_initial(
resource_group_name=resource_group_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ResourceGroupExportResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_export_template.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/exportTemplate'} # type: ignore
@distributed_trace
def list(
self,
filter: Optional[str] = None,
top: Optional[int] = None,
**kwargs: Any
) -> Iterable["_models.ResourceGroupListResult"]:
"""Gets all the resource groups for a subscription.
:param filter: The filter to apply on the operation.:code:`<br>`:code:`<br>`You can filter by
tag names and values. For example, to filter for a tag name and value, use $filter=tagName eq
'tag1' and tagValue eq 'Value1'.
:type filter: str
:param top: The number of results to return. If null is passed, returns all resource groups.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceGroupListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.resource.resources.v2021_01_01.models.ResourceGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ResourceGroupListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups'} # type: ignore
|
|
# Natural Language Toolkit: Chunk parsing API
#
# Copyright (C) 2001-2013 NLTK Project
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
Named entity chunker
"""
from __future__ import print_function
import os, re, pickle
from xml.etree import ElementTree as ET
from nltk.tag import ClassifierBasedTagger, pos_tag
try:
from nltk.classify import MaxentClassifier
except ImportError:
pass
from nltk.tree import Tree
from nltk.tokenize import word_tokenize
from nltk.data import find
from nltk.chunk.api import ChunkParserI
from nltk.chunk.util import ChunkScore
class NEChunkParserTagger(ClassifierBasedTagger):
"""
The IOB tagger used by the chunk parser.
"""
def __init__(self, train):
ClassifierBasedTagger.__init__(
self, train=train,
classifier_builder=self._classifier_builder)
def _classifier_builder(self, train):
return MaxentClassifier.train(train, algorithm='megam',
gaussian_prior_sigma=1,
trace=2)
def _english_wordlist(self):
try:
wl = self._en_wordlist
except AttributeError:
from nltk.corpus import words
self._en_wordlist = set(words.words('en-basic'))
wl = self._en_wordlist
return wl
def _feature_detector(self, tokens, index, history):
word = tokens[index][0]
pos = simplify_pos(tokens[index][1])
if index == 0:
prevword = prevprevword = None
prevpos = prevprevpos = None
prevshape = prevtag = prevprevtag = None
elif index == 1:
prevword = tokens[index-1][0].lower()
prevprevword = None
prevpos = simplify_pos(tokens[index-1][1])
prevprevpos = None
prevtag = history[index-1][0]
prevshape = prevprevtag = None
else:
prevword = tokens[index-1][0].lower()
prevprevword = tokens[index-2][0].lower()
prevpos = simplify_pos(tokens[index-1][1])
prevprevpos = simplify_pos(tokens[index-2][1])
prevtag = history[index-1]
prevprevtag = history[index-2]
prevshape = shape(prevword)
if index == len(tokens)-1:
nextword = nextnextword = None
nextpos = nextnextpos = None
elif index == len(tokens)-2:
nextword = tokens[index+1][0].lower()
nextpos = tokens[index+1][1].lower()
nextnextword = None
nextnextpos = None
else:
nextword = tokens[index+1][0].lower()
nextpos = tokens[index+1][1].lower()
nextnextword = tokens[index+2][0].lower()
nextnextpos = tokens[index+2][1].lower()
# 89.6
features = {
'bias': True,
'shape': shape(word),
'wordlen': len(word),
'prefix3': word[:3].lower(),
'suffix3': word[-3:].lower(),
'pos': pos,
'word': word,
'en-wordlist': (word in self._english_wordlist()),
'prevtag': prevtag,
'prevpos': prevpos,
'nextpos': nextpos,
'prevword': prevword,
'nextword': nextword,
'word+nextpos': '%s+%s' % (word.lower(), nextpos),
'pos+prevtag': '%s+%s' % (pos, prevtag),
'shape+prevtag': '%s+%s' % (prevshape, prevtag),
}
return features
class NEChunkParser(ChunkParserI):
"""
Expected input: list of pos-tagged words
"""
def __init__(self, train):
self._train(train)
def parse(self, tokens):
"""
Each token should be a pos-tagged word
"""
tagged = self._tagger.tag(tokens)
tree = self._tagged_to_parse(tagged)
return tree
def _train(self, corpus):
# Convert to tagged sequence
corpus = [self._parse_to_tagged(s) for s in corpus]
self._tagger = NEChunkParserTagger(train=corpus)
def _tagged_to_parse(self, tagged_tokens):
"""
Convert a list of tagged tokens to a chunk-parse tree.
"""
sent = Tree('S', [])
for (tok,tag) in tagged_tokens:
if tag == 'O':
sent.append(tok)
elif tag.startswith('B-'):
sent.append(Tree(tag[2:], [tok]))
elif tag.startswith('I-'):
if (sent and isinstance(sent[-1], Tree) and
sent[-1].node == tag[2:]):
sent[-1].append(tok)
else:
sent.append(Tree(tag[2:], [tok]))
return sent
@staticmethod
def _parse_to_tagged(sent):
"""
Convert a chunk-parse tree to a list of tagged tokens.
"""
toks = []
for child in sent:
if isinstance(child, Tree):
if len(child) == 0:
print("Warning -- empty chunk in sentence")
continue
toks.append((child[0], 'B-%s' % child.node))
for tok in child[1:]:
toks.append((tok, 'I-%s' % child.node))
else:
toks.append((child, 'O'))
return toks
def shape(word):
if re.match('[0-9]+(\.[0-9]*)?|[0-9]*\.[0-9]+$', word):
return 'number'
elif re.match('\W+$', word):
return 'punct'
elif re.match('[A-Z][a-z]+$', word):
return 'upcase'
elif re.match('[a-z]+$', word):
return 'downcase'
elif re.match('\w+$', word):
return 'mixedcase'
else:
return 'other'
def simplify_pos(s):
if s.startswith('V'): return "V"
else: return s.split('-')[0]
def postag_tree(tree):
# Part-of-speech tagging.
words = tree.leaves()
tag_iter = (pos for (word, pos) in pos_tag(words))
newtree = Tree('S', [])
for child in tree:
if isinstance(child, Tree):
newtree.append(Tree(child.node, []))
for subchild in child:
newtree[-1].append( (subchild, next(tag_iter)) )
else:
newtree.append( (child, next(tag_iter)) )
return newtree
def load_ace_data(roots, fmt='binary', skip_bnews=True):
for root in roots:
for root, dirs, files in os.walk(root):
if root.endswith('bnews') and skip_bnews:
continue
for f in files:
if f.endswith('.sgm'):
for sent in load_ace_file(os.path.join(root, f), fmt):
yield sent
def load_ace_file(textfile, fmt):
print(' - %s' % os.path.split(textfile)[1])
annfile = textfile+'.tmx.rdc.xml'
# Read the xml file, and get a list of entities
entities = []
xml = ET.parse(open(annfile)).getroot()
for entity in xml.findall('document/entity'):
typ = entity.find('entity_type').text
for mention in entity.findall('entity_mention'):
if mention.get('TYPE') != 'NAME': continue # only NEs
s = int(mention.find('head/charseq/start').text)
e = int(mention.find('head/charseq/end').text)+1
entities.append( (s, e, typ) )
# Read the text file, and mark the entities.
with open(textfile) as fp:
text = fp.read()
# Strip XML tags, since they don't count towards the indices
text = re.sub('<(?!/?TEXT)[^>]+>', '', text)
# Blank out anything before/after <TEXT>
def subfunc(m): return ' '*(m.end()-m.start()-6)
text = re.sub('[\s\S]*<TEXT>', subfunc, text)
text = re.sub('</TEXT>[\s\S]*', '', text)
# Simplify quotes
text = re.sub("``", ' "', text)
text = re.sub("''", '" ', text)
entity_types = set(typ for (s,e,typ) in entities)
# Binary distinction (NE or not NE)
if fmt == 'binary':
i = 0
toks = Tree('S', [])
for (s,e,typ) in sorted(entities):
if s < i: s = i # Overlapping! Deal with this better?
if e <= s: continue
toks.extend(word_tokenize(text[i:s]))
toks.append(Tree('NE', text[s:e].split()))
i = e
toks.extend(word_tokenize(text[i:]))
yield toks
# Multiclass distinction (NE type)
elif fmt == 'multiclass':
i = 0
toks = Tree('S', [])
for (s,e,typ) in sorted(entities):
if s < i: s = i # Overlapping! Deal with this better?
if e <= s: continue
toks.extend(word_tokenize(text[i:s]))
toks.append(Tree(typ, text[s:e].split()))
i = e
toks.extend(word_tokenize(text[i:]))
yield toks
else:
raise ValueError('bad fmt value')
# This probably belongs in a more general-purpose location (as does
# the parse_to_tagged function).
def cmp_chunks(correct, guessed):
correct = NEChunkParser._parse_to_tagged(correct)
guessed = NEChunkParser._parse_to_tagged(guessed)
ellipsis = False
for (w, ct), (w, gt) in zip(correct, guessed):
if ct == gt == 'O':
if not ellipsis:
print(" %-15s %-15s %s" % (ct, gt, w))
print(' %-15s %-15s %s' % ('...', '...', '...'))
ellipsis = True
else:
ellipsis = False
print(" %-15s %-15s %s" % (ct, gt, w))
def build_model(fmt='binary'):
print('Loading training data...')
train_paths = [find('corpora/ace_data/ace.dev'),
find('corpora/ace_data/ace.heldout'),
find('corpora/ace_data/bbn.dev'),
find('corpora/ace_data/muc.dev')]
train_trees = load_ace_data(train_paths, fmt)
train_data = [postag_tree(t) for t in train_trees]
print('Training...')
cp = NEChunkParser(train_data)
del train_data
print('Loading eval data...')
eval_paths = [find('corpora/ace_data/ace.eval')]
eval_trees = load_ace_data(eval_paths, fmt)
eval_data = [postag_tree(t) for t in eval_trees]
print('Evaluating...')
chunkscore = ChunkScore()
for i, correct in enumerate(eval_data):
guess = cp.parse(correct.leaves())
chunkscore.score(correct, guess)
if i < 3: cmp_chunks(correct, guess)
print(chunkscore)
outfilename = '/tmp/ne_chunker_%s.pickle' % fmt
print('Saving chunker to %s...' % outfilename)
with open(outfilename, 'wb') as out:
pickle.dump(cp, out, -1)
return cp
if __name__ == '__main__':
# Make sure that the pickled object has the right class name:
from nltk.chunk.named_entity import build_model
build_model('binary')
build_model('multiclass')
|
|
"""Kea Control Channel - socket"""
# pylint: disable=invalid-name,line-too-long
import pytest
import misc
import srv_msg
import srv_control
from forge_cfg import world
@pytest.mark.v6
@pytest.mark.controlchannel
@pytest.mark.kea_only
def test_control_channel_socket_config_get():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::f')
srv_control.open_control_channel()
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
srv_msg.send_ctrl_cmd_via_socket('{"command": "config-get","arguments": {}}')
# Using UNIX socket on server in path control_socket send {"command": "list-commands","arguments": {}}
# compare json result with config file
@pytest.mark.v6
@pytest.mark.controlchannel
@pytest.mark.kea_only
def test_control_channel_socket_config_test():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::f')
srv_control.open_control_channel()
# To global section of the config add file line: "expired-leases-processing":{"flush-reclaimed-timer-wait-time": 0,"hold-reclaimed-time": 0,"max-reclaim-leases": 100,"max-reclaim-time": 0,"reclaim-timer-wait-time": 0,"unwarned-reclaim-cycles": 5}
# To global section of the config add file line: "expired-leases-processing":{"flush-reclaimed-timer-wait-time": 0,"hold-reclaimed-time": 0,"max-reclaim-leases": 100,"max-reclaim-time": 0,"reclaim-timer-wait-time": 0,"unwarned-reclaim-cycles": 5}
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::f')
srv_control.config_srv_prefix('2001:db8:1::', 0, 90, 96)
srv_control.open_control_channel('control_socket_ANOTHER_ONE')
srv_control.config_srv_id('LLT', '00:01:00:02:52:7b:a8:f0:08:00:27:58:f1:e8')
srv_control.config_srv_opt('sip-server-addr', '2001:db8::1,2001:db8::2')
srv_control.config_srv_opt('new-posix-timezone', 'EST5EDT4\\,M3.2.0/02:00\\,M11.1.0/02:00')
srv_control.host_reservation_in_subnet('ip-address',
'3000::1',
0,
'duid',
'00:03:00:01:f6:f5:f4:f3:f2:01')
srv_control.build_config_files()
srv_msg.send_ctrl_cmd_via_socket('{"command": "config-test","arguments": $(DHCP_CONFIG) }')
# should be ok
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::f')
srv_control.config_srv_prefix('2001:db8:1::', 0, 90, 96)
srv_control.open_control_channel('control_socket_ANOTHER_ONE')
srv_control.config_srv_id('LLT', '00:01:00:02:52:7b:a8:f0:08:00:27:58:f1:e8')
srv_control.config_srv_opt('sip-server-addr', '2001:db8::1,2001:db8::2')
srv_control.config_srv_opt('new-posix-timezone', 'EST5EDT4\\,M3.2.0/02:00\\,M11.1.0/02:00')
# WRONG ADDRESS RESERVATION
srv_control.host_reservation_in_subnet('ip-address',
'192.168.0.5',
0,
'duid',
'00:03:00:01:f6:f5:f4:f3:f2:01')
srv_control.build_config_files()
srv_msg.send_ctrl_cmd_via_socket('{"command": "config-test","arguments": $(DHCP_CONFIG) }', exp_result=1)
# should NOT be ok
@pytest.mark.v6
@pytest.mark.controlchannel
@pytest.mark.kea_only
def test_control_channel_socket_config_write():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::f')
srv_control.open_control_channel()
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:11')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'addr', '3000::1')
misc.test_setup()
srv_control.config_srv_subnet('2001:db8:1::/64', '2001:db8:1::1-2001:db8:1::1')
srv_control.open_control_channel()
srv_control.build_config_files()
srv_msg.send_ctrl_cmd_via_socket({"command": "config-set", "arguments": world.dhcp_cfg})
srv_msg.send_ctrl_cmd_via_socket({"command": "config-write",
"arguments": {"filename": world.f_cfg.get_dhcp_conf_path()}})
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:22')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'addr', '2001:db8:1::1')
# Using UNIX socket on server in path control_socket send {"command": "list-commands","arguments": {}}
# Using UNIX socket on server in path control_socket send {"command": "config-write","parameters": { "filename": "abc"} }
# Using UNIX socket on server in path control_socket send {"command": "config-write","arguments": { "filename": "whatever"} }
# Using UNIX socket on server in path control_socket send {"command": "config-write","arguments": { "filename": "installed/git/etc/kea/kea.conf"} }
# Pause the Test.
srv_control.start_srv('DHCP', 'restarted')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:33')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'addr', '2001:db8:1::1')
@pytest.mark.v6
@pytest.mark.controlchannel
@pytest.mark.kea_only
def test_control_channel_socket_config_reload():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::f')
srv_control.open_control_channel()
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:11')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'addr', '3000::1')
srv_msg.send_ctrl_cmd_via_socket('{"command": "list-commands","arguments": {}}')
misc.test_setup()
srv_control.config_srv_subnet('2001:db8:1::/64', '2001:db8:1::1-2001:db8:1::1')
srv_control.open_control_channel()
# Generate server configuration file.
srv_control.build_and_send_config_files()
srv_msg.send_ctrl_cmd_via_socket('{"command": "config-reload","arguments": {} }')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:11')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'addr', '2001:db8:1::1')
srv_control.start_srv('DHCP', 'restarted')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:66:55:44:33:22:11')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'addr', '2001:db8:1::1')
|
|
from datetime import datetime
import logging
import re
from google.appengine.api import memcache
from google.appengine.api.labs.taskqueue import TaskAlreadyExistsError
from google.appengine.api.urlfetch import DownloadError
from google.appengine.ext import db
from google.appengine.ext import deferred
import simplejson as json
from twimonial.models import Data, Twimonial, TQI, User
from twimonial.util import fetch
import config
TWITTER_SEARCH_BASE_URI = config.TWITTER_SEARCH_BASE_URI
SEARCH_TWIMONIAL_URI = TWITTER_SEARCH_BASE_URI + '?rpp=100&q=%s' % config.TRACKING_HASHTAG.replace('#', '%23')
TWITTER_SHOW_URI = config.TWITTER_SHOW_URI
RE_TWIMONIAL = re.compile('(.*)%s ?@([_a-zA-Z0-9]+)$' % config.TRACKING_HASHTAG)
def get_twimonials():
# Uses Twitter Search API to get the testimonials
since_id = Data.read('since_id')
search_twimonial_uri = SEARCH_TWIMONIAL_URI
continue_search = memcache.get('continue_search')
if continue_search:
# next page
continue_search['page'] = continue_search['page'] + 1
search_twimonial_uri += '&page=%d&max_id=%d' % (continue_search['page'], continue_search['max_id'])
logging.debug('Continuing search: Page %d, max_id=%d' % (continue_search['page'] + 1, continue_search['max_id']))
else:
search_twimonial_uri += '&since_id=%s' % since_id
# Searching
try:
f = fetch(search_twimonial_uri, config.TWITTER_ID, config.TWITTER_PW)
except DownloadError:
logging.info('Caught Download Error on searching')
return
if f.status_code == 200:
# Parsing
logging.debug(f.content)
p_json = json.loads(f.content)
results = p_json['results']
if not results:
logging.debug('No twimonials')
return
# Starting processing
tqis = []
for t in results:
if t['text'].find('http') > -1:
# Possibly a link, skip
continue
# A twimonial? Must have to_user and #twimonial must at the end, or in
# this form 'blah blah #twimonial @user'
# Twitter would not supply to_user, identi.ca would set to_user to null
if 'to_user' not in t or t['to_user'] is None:
# Doesn't have to_user, not using @replay
m = RE_TWIMONIAL.match(t['text'])
if not m:
continue
t['to_user'] = m.group(2)
t['to_user_id'] = 0
text = m.group(1).strip()
else:
if t['to_user_id'] == t['from_user_id']:
# Should not wrote a twimonial about self
continue
if not t['text'].lower().strip().endswith(config.TRACKING_HASHTAG):
# No #twimonial at the end of tweet
continue
# Remove @to_user and #twimonial
# 1+len(t['to_user'] => @to_user
# -10 => -len('#twimonial')
text = t['text'].strip()[1+len(t['to_user']):-len(config.TRACKING_HASHTAG)].strip()
# For identi.ca, it's ids is string 2009-12-15T20:24:28+0800
t['from_user_id'] = int(t['from_user_id'])
t['to_user_id'] = int(t['to_user_id'])
# XXX from/to_user_id is not the real Twitter ID
# http://code.google.com/p/twitter-api/issues/detail?id=214
new_tqi = TQI(key_name=str(t['id']), # Just to prevent duplicates, in case
to_user=t['to_user'], to_user_id=t['to_user_id'],
from_user=t['from_user'], from_user_id=t['from_user_id'],
profile_image_url=t['profile_image_url'],
created_at=datetime.strptime(t['created_at'], '%a, %d %b %Y %H:%M:%S +0000'),
text=text, tweet_id=int(t['id']),
)
tqis.append(new_tqi)
db.put(tqis)
if continue_search:
if len(results) < 100 or continue_search['page'] >= 15:
# No more tweets
memcache.delete('continue_search')
logging.debug('%d twimonials stored, finished continuing searching' % len(tqis))
# Update since_id
Data.write('since_id', continue_search['max_id'])
else:
# Update continue search
memcache.set('continue_search', continue_search)
deferred.defer(get_twimonials, _countdown=5)
logging.debug('%d twimonials stored, rescheduled continuing searching' % len(tqis))
else:
if len(results) >= 100:
# Need to continue search
memcache.set('continue_search', {'page': 1, 'max_id': int(p_json['max_id'])})
deferred.defer(get_twimonials, _countdown=5)
logging.debug('%d twimonials stored, need to continue searching' % len(tqis))
else:
# Update since_id
Data.write('since_id', p_json['max_id'])
# elif f.status_code == 404:
# # since_id is too old
else:
if continue_search:
deferred.defer(get_twimonials, _countdown=5)
logging.error('Unable to continue searching, retry in 5 seconds, status_code: %d, content: %s'\
% (f.status_code, f.content))
else:
logging.error('Unable to search, status_code: %d, content: %s'\
% (f.status_code, f.content))
def process_TQI():
# Get oldest TQI
q = TQI.all()
q.order('created_at')
if q.count() == 0:
# Nothing to process
# deferred.defer(process_TQI, _countdown=config.TASK_PROCESS_TQI_INTERVAL)
# logging.debug('No TQIs, rescheduled')
logging.debug('No TQIs')
return
tqi = q.get()
# Check if the twimonial writer follows
logging.debug('Checking if %s follows %s...' % (tqi.from_user, tqi.to_user))
# Using IDs results 403
f = fetch(TWITTER_SHOW_URI % (tqi.from_user, tqi.to_user), config.TWITTER_ID, config.TWITTER_PW)
if f.status_code == 200:
p_json = json.loads(f.content)
if p_json['relationship']['source']['following']:
logging.debug('%s follows %s' % (tqi.from_user, tqi.to_user))
# XXX from/to_user_id is not the real Twitter ID
# http://code.google.com/p/twitter-api/issues/detail?id=214
# So must override with correct IDs from friends/show API
tqi.from_user_id = int(p_json['relationship']['source']['id'])
tqi.to_user_id = int(p_json['relationship']['target']['id'])
# Does follow
from_user, to_user = User.get_by_key_name([str(tqi.from_user_id),
str(tqi.to_user_id)])
if from_user:
if from_user.normalized_screen_name != tqi.from_user.lower() or \
from_user.profile_image_url != tqi.profile_image_url:
# screen_name and/or profile_image_url changes
from_user.screen_name = tqi.from_user
from_user.normalized_screen_name = tqi.from_user.lower()
from_user.profile_image_url = tqi.profile_image_url
from_user.put()
else:
from_user = User.add(tqi.from_user_id, tqi.from_user, tqi.profile_image_url)
if to_user:
to_user.check_profile_image()
if to_user.normalized_screen_name != tqi.to_user.lower():
# screen_name changes
to_user.screen_name = tqi.to_user
to_user.normalized_screen_name = tqi.to_user.lower()
to_user.put()
else:
to_user = User.add(tqi.to_user_id, tqi.to_user)
# Add or update twimonial
q = Twimonial.all()
q.filter('from_user =', from_user)
q.filter('to_user =', to_user)
t = q.get()
if t:
t.created_at = tqi.created_at
t.text = tqi.text
t.agrees = 0
t.scores = 0.0
t.tweet_id = tqi.tweet_id
else:
t = Twimonial(from_user=from_user, to_user=to_user, created_at=tqi.created_at, text=tqi.text, tweet_id=tqi.tweet_id)
to_user.incr_recvs()
t.put()
logging.debug('Twimonial saved')
else:
logging.debug('%s does not follow %s' % (tqi.from_user, tqi.to_user))
tqi.delete()
deferred.defer(process_TQI)
logging.debug('rescheduled')
elif f.status_code == 403:
# One or both are protected accounts, or are not exiting, drop it
tqi.delete()
deferred.defer(process_TQI)
logging.debug('Got 403, TQI deleted, rescheduled')
else:
# Something goes wrong
logging.error('Unable to check follow, status_code: %d'\
% f.status_code)
# deferred.defer(process_TQI, _countdown=config.TASK_PROCESS_TQI_INTERVAL)
def queue_profile_image(key_name):
try:
# XXX see _countdown=30 will help solve recvs = 0 problem
deferred.defer(update_profile_image_url, key_name, _countdown=30)
#deferred.defer(update_profile_image_url, key_name, _name='update-profile-image-%s' % key_name)
except TaskAlreadyExistsError:
pass
def update_profile_image_url(key_name):
logging.debug('Task update_profile_image_url')
user = User.get_by_key_name(key_name)
if user:
user.update_profile_image_url()
def recount_recvs(key_name=None):
q = User.all()
if key_name:
q.filter('__key__ >', User.get_by_key_name(key_name).key())
u = q.get()
if not u:
# There is no user to recount
return
# TODO count more than 1000
q = Twimonial.all().filter('to_user =', u.key())
recvs = q.count()
if recvs != u.recvs:
u.recvs = recvs
u.put()
deferred.defer(recount_recvs, u.key().name())
|
|
# Copyright (c) 2013 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo.config import cfg
from neutron.common import constants
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import utils
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.common import utils as commonutils
from neutron.api.v2 import attributes
LOG = logging.getLogger(__name__)
class DhcpAgentNotifyAPI(n_rpc.RpcProxy):
"""API for plugin to notify DHCP agent."""
BASE_RPC_API_VERSION = '1.0'
# It seems dhcp agent does not support bulk operation
VALID_RESOURCES = ['network', 'subnet', 'port']
VALID_METHOD_NAMES = ['network.create.end',
'network.update.end',
'network.delete.end',
'subnet.create.end',
'subnet.update.end',
'subnet.delete.end',
'port.create.end',
'port.update.end',
'port.delete.end']
def __init__(self, topic=topics.DHCP_AGENT, plugin=None):
super(DhcpAgentNotifyAPI, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self._plugin = plugin
self.conf = cfg.CONF
@property
def plugin(self):
if self._plugin is None:
self._plugin = manager.NeutronManager.get_plugin()
return self._plugin
def _schedule_network(self, context, network, existing_agents):
"""Schedule the network to new agents
:return: all agents associated with the network
"""
new_agents = self.plugin.schedule_network(context, network) or []
if new_agents:
for agent in new_agents:
self._cast_message(
context, 'network_create_end',
{'network': {'id': network['id']}}, agent['host'])
elif not existing_agents:
LOG.warn(_('Unable to schedule network %s: no agents available; '
'will retry on subsequent port creation events.'),
network['id'])
return new_agents + existing_agents
def _get_enabled_agents(self, context, network, agents, method, payload):
"""Get the list of agents whose admin_state is UP."""
network_id = network['id']
enabled_agents = [x for x in agents if x.admin_state_up]
active_agents = [x for x in agents if x.is_active]
len_enabled_agents = len(enabled_agents)
len_active_agents = len(active_agents)
if len_active_agents < len_enabled_agents:
LOG.warn(_("Only %(active)d of %(total)d DHCP agents associated "
"with network '%(net_id)s' are marked as active, so "
" notifications may be sent to inactive agents.")
% {'active': len_active_agents,
'total': len_enabled_agents,
'net_id': network_id})
if not enabled_agents:
num_ports = self.plugin.get_ports_count(
context, {'network_id': [network_id]})
notification_required = (
num_ports > 0 and len(network['subnets']) >= 1)
if notification_required:
LOG.error(_("Will not send event %(method)s for network "
"%(net_id)s: no agent available. Payload: "
"%(payload)s")
% {'method': method,
'net_id': network_id,
'payload': payload})
return enabled_agents
def _is_reserved_dhcp_port(self, port):
return port.get('device_id') == constants.DEVICE_ID_RESERVED_DHCP_PORT
def _is_dhcp_port(self, port):
return port.get('device_owner') == constants.DEVICE_OWNER_DHCP
def _notify_agents(self, context, method, payload, network_id):
"""Notify all the agents that are hosting the network."""
# fanout is required as we do not know who is "listening"
no_agents = not utils.is_extension_supported(
self.plugin, constants.DHCP_AGENT_SCHEDULER_EXT_ALIAS)
fanout_required = method == 'network_delete_end' or no_agents
# we do nothing on network creation because we want to give the
# admin the chance to associate an agent to the network manually
cast_required = method != 'network_create_end'
if fanout_required:
self._fanout_message(context, method, payload)
elif cast_required:
admin_ctx = (context if context.is_admin else context.elevated())
network = self.plugin.get_network(admin_ctx, network_id)
agents = self.plugin.get_dhcp_agents_hosting_networks(
context, [network_id])
# schedule the network first, if needed
schedule_required = (
method == 'port_create_end' and
not self._is_reserved_dhcp_port(payload['port']))
if schedule_required:
agents = self._schedule_network(admin_ctx, network, agents)
enabled_agents = self._get_enabled_agents(
context, network, agents, method, payload)
for agent in enabled_agents:
self._cast_message(
context, method, payload, agent.host, agent.topic)
def _notify_port_create_or_update(self, context, port, method, payload, network_id):
host_id = port.get('binding:host_id', '')
if self.plugin.distributed_add_network_to_host(context, network_id, host_id):
self._cast_message(context, method, payload, host_id, topics.DHCP_AGENT)
def _notify_port_delete(self, context, port, method, payload, network_id):
host_id = port.get('binding:host_id', '')
if host_id:
self._cast_message(context, 'port_delete_end', {'port_id': port['id']}, host_id)
self._remove_network_from_host(context, network_id, host_id)
def _distributed_notify_agents(self, context, method, payload, network_id):
LOG.debug('_distributed_notify_agents_method:%s, payload:%s, network_id:%s',
method, payload, network_id)
if method == 'port_create_end':
port = payload['port']
self._notify_port_create_or_update(context, port, method, payload, network_id)
elif method == 'port_update_end':
port = payload['port']
self._notify_port_create_or_update(context, port, method, payload, network_id)
"""for migration scene"""
if payload.has_key('old_port'):
old_port = payload['old_port']
self._notify_port_delete(context, old_port, 'port_delete_end', {'port_id': old_port['id']}, network_id)
elif method == 'port_delete_end':
port = payload['port']
self._notify_port_delete(context, port, 'port_delete_end', payload, network_id)
elif method == 'subnet_update_end':
self._fanout_message(context, method, payload)
elif method == 'subnet_delete_end':
self._fanout_message(context, method, {'subnet_id': payload['subnet']['id']})
elif method == 'network_delete_end':
network = payload['network']
host = network.get('host', '')
if host:
self._cast_message(context, 'network_delete_end', {'network_id': payload['network']['id']}, host)
else:
self._fanout_message(context, method, {'network_id': payload['network']['id']})
def _remove_network_from_host(self, context, network_id, host_id):
port_filters = {
'network_id': [network_id],
'binding:host_id': [host_id]
}
if self.plugin.get_ports_count(context, port_filters) == 0:
self.plugin.distributed_remove_network_from_host(context, network_id, host_id)
def _cast_message(self, context, method, payload, host,
topic=topics.DHCP_AGENT):
"""Cast the payload to the dhcp agent running on the host."""
self.cast(
context, self.make_msg(method,
payload=payload),
topic='%s.%s' % (topic, host))
def _fanout_message(self, context, method, payload):
"""Fanout the payload to all dhcp agents."""
self.fanout_cast(
context, self.make_msg(method,
payload=payload),
topic=topics.DHCP_AGENT)
def network_removed_from_agent(self, context, network_id, host):
self._cast_message(context, 'network_delete_end',
{'network_id': network_id}, host)
def network_added_to_agent(self, context, network_id, host):
self._cast_message(context, 'network_create_end',
{'network': {'id': network_id}}, host)
def agent_updated(self, context, admin_state_up, host):
self._cast_message(context, 'agent_updated',
{'admin_state_up': admin_state_up}, host)
def _distributed_notify(self, context, data, method_name):
if data.has_key('network'):
network_id = data['network']['id']
elif data.has_key('port'):
network_id = data['port']['network_id']
elif data.has_key('subnet'):
network_id = data['subnet']['network_id']
else:
return
method_name = method_name.replace(".", "_")
self._distributed_notify_agents(context, method_name, data, network_id)
def notify(self, context, data, method_name):
# data is {'key' : 'value'} with only one key
if method_name not in self.VALID_METHOD_NAMES:
return
if self.conf.dhcp_distributed:
self._distributed_notify(context, data, method_name)
return
obj_type = data.keys()[0]
if obj_type not in self.VALID_RESOURCES:
return
obj_value = data[obj_type]
network_id = None
if obj_type == 'network' and 'id' in obj_value:
network_id = obj_value['id']
elif obj_type in ['port', 'subnet'] and 'network_id' in obj_value:
network_id = obj_value['network_id']
if not network_id:
return
method_name = method_name.replace(".", "_")
if method_name.endswith("_delete_end"):
if 'id' in obj_value:
self._notify_agents(context, method_name,
{obj_type + '_id': obj_value['id']},
network_id)
else:
self._notify_agents(context, method_name, data, network_id)
|
|
# getTypeOf(scope,fqn) and getTypeOfExpr(scope,ast)
from bike.parsing.fastparserast import Class, Function, Module, Root, getRoot, Package, Instance, getModule
from bike.parsing.parserutils import generateLogicalLines, makeLineParseable,splitLogicalLines, makeLineParseable
from bike.parsing import visitor
from bike import log
from bike.parsing.newstuff import getModuleOrPackageUsingFQN
from bike.parsing.pathutils import getPackageBaseDirectory
from bike.parsing.load import Cache
import os
import re
import compiler
# used if an assignment exists, but cant find type
# e.g. a = SomeFunctionNotLoaded()
# (as opposed to 'None' if cant find an assignment)
class UnfoundType: pass
getTypeOfStack = []
# name is the fqn of the reference, scope is the scope ast object from
# which the question is being asked.
# returns an fastparser-ast object representing the type
# or None if type not found
def getTypeOf(scope, fqn):
if isinstance(scope, Root):
assert False, "Can't use getTypeOf to resolve from Root. Use getModuleOrPackageUsingFQN instead"
#print "getTypeOf:"+fqn+" -- "+str(scope)
#print
#print str(getTypeOfStack)
#print
if (fqn,scope) in getTypeOfStack: # loop protection
return None
# this is crap!
hashcode = str(scope)+fqn
try:
getTypeOfStack.append((fqn,scope))
try:
type = Cache.instance.typecache[hashcode]
except KeyError:
type = getTypeOf_impl(scope, fqn)
Cache.instance.typecache[hashcode] = type
return type
finally:
del getTypeOfStack[-1]
def getTypeOf_impl(scope, fqn):
#print "getTypeOf_impl",scope,fqn
if fqn == "None":
return None
if "."in fqn:
rcdr = ".".join(fqn.split(".")[:-1])
rcar = fqn.split(".")[-1]
newscope = getTypeOf(scope,rcdr)
if newscope is not None:
return getTypeOf(newscope, rcar)
else:
#print "couldnt find "+rcdr+" in "+str(scope)
pass
assert scope is not None
#assert not ("." in fqn)
if isinstance(scope,UnfoundType):
return UnfoundType()
if isinstance(scope, Package):
#assert 0,scope
return handlePackageScope(scope, fqn)
elif isinstance(scope,Instance):
return handleClassInstanceAttribute(scope, fqn)
else:
return handleModuleClassOrFunctionScope(scope,fqn)
def handleModuleClassOrFunctionScope(scope,name):
if name == "self" and isinstance(scope,Function) and \
isinstance(scope.getParent(),Class):
return Instance(scope.getParent())
matches = [c for c in scope.getChildNodes()if c.name == name]
if matches != []:
return matches[0]
type = scanScopeSourceForType(scope, name)
if type != None:
return type
#print "name = ",name,"scope = ",scope
type = getImportedType(scope, name) # try imported types
#print "type=",type
if type != None:
return type
parentScope = scope.getParent()
while isinstance(parentScope,Class):
# don't search class scope, since this is not accessible except
# through self (is this true?)
parentScope = parentScope.getParent()
if not (isinstance(parentScope,Package) or isinstance(parentScope,Root)):
return getTypeOf(parentScope, name)
def handleClassInstanceAttribute(instance, attrname):
theClass = instance.getType()
# search methods and inner classes
match = theClass.getChild(attrname)
if match:
return match
#search methods for assignments with self.foo getattrs
for child in theClass.getChildNodes():
if not isinstance(child,Function):
continue
res = scanScopeAST(child,attrname,
SelfAttributeAssignmentVisitor(child,attrname))
if res is not None:
return res
def handlePackageScope(package, fqn):
#print "handlePackageScope",package,fqn
child = package.getChild(fqn)
if child:
return child
if isinstance(package,Root):
return getModuleOrPackageUsingFQN(fqn)
# try searching the fs
node = getModuleOrPackageUsingFQN(fqn,package.path)
if node:
return node
# try the package init module
initmod = package.getChild("__init__")
if initmod is not None:
type = getImportedType(initmod, fqn)
if type:
return type
# maybe fqn is absolute
return getTypeOf(getRoot(), fqn)
wordRE = re.compile("\w+")
def isWordInLine(word, line):
if line.find(word) != -1:
words = wordRE.findall(line)
if word in words:
return 1
return 0
def getImportedType(scope, fqn):
lines = scope.module.getSourceNode().getLines()
for lineno in scope.getImportLineNumbers():
logicalline = generateLogicalLines(lines[lineno-1:]).next()
logicalline = makeLineParseable(logicalline)
ast = compiler.parse(logicalline)
match = visitor.walk(ast, ImportVisitor(scope,fqn)).match
if match:
return match
#else loop
class ImportVisitor:
def __init__(self,scope,fqn):
self.match = None
self.targetfqn = fqn
self.scope = scope
def visitImport(self, node):
# if target fqn is an import, then it must be a module or package
for name, alias in node.names:
if name == self.targetfqn:
self.match = resolveImportedModuleOrPackage(self.scope,name)
elif alias is not None and alias == self.targetfqn:
self.match = resolveImportedModuleOrPackage(self.scope,name)
def visitFrom(self, node):
if node.names[0][0] == '*': # e.g. from foo import *
if not "."in self.targetfqn:
module = resolveImportedModuleOrPackage(self.scope,
node.modname)
if module:
self.match = getTypeOf(module, self.targetfqn)
else:
for name, alias in node.names:
if alias == self.targetfqn or \
(alias is None and name == self.targetfqn):
scope = resolveImportedModuleOrPackage(self.scope,
node.modname)
if scope is not None:
if isinstance(scope,Package):
self.match = getModuleOrPackageUsingFQN(name,scope.path)
else:
assert isinstance(scope,Module)
self.match = getTypeOf(scope, name)
class TypeNotSupportedException:
def __init__(self,msg):
self.msg = msg
def __str__(self):
return self.msg
# attempts to evaluate the type of the expression
def getTypeOfExpr(scope, ast):
if isinstance(ast, compiler.ast.Name):
return getTypeOf(scope, ast.name)
elif isinstance(ast, compiler.ast.Getattr) or \
isinstance(ast, compiler.ast.AssAttr):
# need to do this in order to match foo.bah.baz as
# a string in import statements
fqn = attemptToConvertGetattrToFqn(ast)
if fqn is not None:
return getTypeOf(scope,fqn)
expr = getTypeOfExpr(scope, ast.expr)
if expr is not None:
attrnametype = getTypeOf(expr, ast.attrname)
return attrnametype
return None
elif isinstance(ast, compiler.ast.CallFunc):
node = getTypeOfExpr(scope,ast.node)
if isinstance(node,Class):
return Instance(node)
elif isinstance(node,Function):
return getReturnTypeOfFunction(node)
else:
#raise TypeNotSupportedException, \
# "Evaluation of "+str(ast)+" not supported. scope="+str(scope)
print >> log.warning, "Evaluation of "+str(ast)+" not supported. scope="+str(scope)
return None
def attemptToConvertGetattrToFqn(ast):
fqn = ast.attrname
ast = ast.expr
while isinstance(ast,compiler.ast.Getattr):
fqn = ast.attrname + "." + fqn
ast = ast.expr
if isinstance(ast,compiler.ast.Name):
return ast.name + "." + fqn
else:
return None
getReturnTypeOfFunction_stack = []
def getReturnTypeOfFunction(function):
if function in getReturnTypeOfFunction_stack: # loop protection
return None
try:
getReturnTypeOfFunction_stack.append(function)
return getReturnTypeOfFunction_impl(function)
finally:
del getReturnTypeOfFunction_stack[-1]
def getReturnTypeOfFunction_impl(function):
return scanScopeAST(function,"return",ReturnTypeVisitor(function))
# does parse of scope sourcecode to deduce type
def scanScopeSourceForType(scope, name):
return scanScopeAST(scope,name,AssignmentVisitor(scope,name))
# scans for lines containing keyword, and then runs the visitor over
# the parsed AST for that line
def scanScopeAST(scope,keyword,astvisitor):
lines = scope.getLinesNotIncludingThoseBelongingToChildScopes()
src = ''.join(lines)
match = None
#print "scanScopeAST:"+str(scope)
for line in splitLogicalLines(src):
if isWordInLine(keyword, line):
#print "scanning for "+keyword+" in line:"+line[:-1]
doctoredline = makeLineParseable(line)
ast = compiler.parse(doctoredline)
match = visitor.walk(ast,astvisitor).getMatch()
if match:
return match
return match
class AssignmentVisitor:
def __init__(self,scope,targetName):
self.match=None
self.scope = scope
self.targetName = targetName
def getMatch(self):
return self.match
def visitAssign(self,node):
if isinstance(node.expr,compiler.ast.CallFunc):
for assnode in node.nodes:
if isinstance(assnode,compiler.ast.AssName) and \
assnode.name == self.targetName:
self.match = getTypeOfExpr(self.scope,node.expr)
if self.match is None:
self.match = UnfoundType()
class SelfAttributeAssignmentVisitor:
def __init__(self,scope,targetName):
self.match=None
self.scope = scope
self.targetName = targetName
def getMatch(self):
return self.match
def visitAssign(self,node):
if isinstance(node.expr,compiler.ast.CallFunc):
for assnode in node.nodes:
if isinstance(assnode,compiler.ast.AssAttr) and \
isinstance(assnode.expr,compiler.ast.Name) and \
assnode.expr.name == "self" and \
assnode.attrname == self.targetName:
self.match = getTypeOfExpr(self.scope,node.expr)
#print "here!",self.match.getType().fqn
class ReturnTypeVisitor:
def __init__(self,fn):
self.match=None
self.fn = fn
def getMatch(self):
return self.match
def visitReturn(self,node):
try:
self.match = getTypeOfExpr(self.fn,node.value)
except TypeNotSupportedException, ex:
pass
def resolveImportedModuleOrPackage(scope,fqn):
# try searching from directory containing scope module
path = os.path.dirname(scope.module.filename)
node = getModuleOrPackageUsingFQN(fqn,path)
if node is not None:
return node
# try searching in same package hierarchy
basedir = getPackageBaseDirectory(scope.module.filename)
if fqn.split('.')[0] == os.path.split(basedir)[-1]:
# base package in fqn matches base directory
restOfFqn = ".".join(fqn.split('.')[1:])
node = getModuleOrPackageUsingFQN(restOfFqn,basedir)
if node is not None:
return node
# try searching the python path
node = getModuleOrPackageUsingFQN(fqn)
if node is not None:
return node
|
|
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2008 (ita)
"""
Java support
Javac is one of the few compilers that behaves very badly:
* it outputs files where it wants to (-d is only for the package root)
* it recompiles files silently behind your back
* it outputs an undefined amount of files (inner classes)
Fortunately, the convention makes it possible to use the build dir without
too many problems for the moment
Inner classes must be located and cleaned when a problem arise,
for the moment waf does not track the production of inner classes.
Adding all the files to a task and executing it if any of the input files
change is only annoying for the compilation times
Compilation can be run using Jython[1] rather than regular Python. Instead of
running one of the following commands:
./waf configure
python waf configure
You would have to run:
java -jar /path/to/jython.jar waf configure
[1] http://www.jython.org/
"""
import os, re
from Configure import conf
import TaskGen, Task, Utils, Options, Build
from TaskGen import feature, before, taskgen
class_check_source = '''
public class Test {
public static void main(String[] argv) {
Class lib;
if (argv.length < 1) {
System.err.println("Missing argument");
System.exit(77);
}
try {
lib = Class.forName(argv[0]);
} catch (ClassNotFoundException e) {
System.err.println("ClassNotFoundException");
System.exit(1);
}
lib = null;
System.exit(0);
}
}
'''
@feature('jar')
@before('apply_core')
def jar_files(self):
basedir = getattr(self, 'basedir', '.')
destfile = getattr(self, 'destfile', 'test.jar')
jaropts = getattr(self, 'jaropts', [])
jarcreate = getattr(self, 'jarcreate', 'cf')
dir = self.path.find_dir(basedir)
if not dir: raise
jaropts.append('-C')
jaropts.append(dir.abspath(self.env))
jaropts.append('.')
out = self.path.find_or_declare(destfile)
tsk = self.create_task('jar_create')
tsk.set_outputs(out)
tsk.inputs = [x for x in dir.find_iter(src=0, bld=1) if x.id != out.id]
tsk.env['JAROPTS'] = jaropts
tsk.env['JARCREATE'] = jarcreate
@feature('javac')
@before('apply_core')
def apply_java(self):
Utils.def_attrs(self, jarname='', jaropts='', classpath='',
sourcepath='.', srcdir='.', source_re='**/*.java',
jar_mf_attributes={}, jar_mf_classpath=[])
if getattr(self, 'source_root', None):
# old stuff
self.srcdir = self.source_root
nodes_lst = []
if not self.classpath:
if not self.env['CLASSPATH']:
self.env['CLASSPATH'] = '..' + os.pathsep + '.'
else:
self.env['CLASSPATH'] = self.classpath
srcdir_node = self.path.find_dir(self.srcdir)
if not srcdir_node:
raise Utils.WafError('could not find srcdir %r' % self.srcdir)
src_nodes = [x for x in srcdir_node.ant_glob(self.source_re, flat=False)]
bld_nodes = [x.change_ext('.class') for x in src_nodes]
self.env['OUTDIR'] = [srcdir_node.abspath(self.env)]
tsk = self.create_task('javac')
tsk.set_inputs(src_nodes)
tsk.set_outputs(bld_nodes)
if getattr(self, 'compat', None):
tsk.env.append_value('JAVACFLAGS', ['-source', self.compat])
if hasattr(self, 'sourcepath'):
fold = [self.path.find_dir(x) for x in self.to_list(self.sourcepath)]
names = os.pathsep.join([x.srcpath() for x in fold])
else:
names = srcdir_node.srcpath()
if names:
tsk.env.append_value('JAVACFLAGS', ['-sourcepath', names])
if self.jarname:
jtsk = self.create_task('jar_create', bld_nodes, self.path.find_or_declare(self.jarname))
jtsk.set_run_after(tsk)
if not self.env.JAROPTS:
if self.jaropts:
self.env.JAROPTS = self.jaropts
else:
dirs = '.'
self.env.JAROPTS = ['-C', ''.join(self.env['OUTDIR']), dirs]
Task.simple_task_type('jar_create', '${JAR} ${JARCREATE} ${TGT} ${JAROPTS}', color='GREEN')
cls = Task.simple_task_type('javac', '${JAVAC} -classpath ${CLASSPATH} -d ${OUTDIR} ${JAVACFLAGS} ${SRC}')
cls.color = 'BLUE'
def post_run_javac(self):
"""this is for cleaning the folder
javac creates single files for inner classes
but it is not possible to know which inner classes in advance"""
par = {}
for x in self.inputs:
par[x.parent.id] = x.parent
inner = {}
for k in par.values():
path = k.abspath(self.env)
lst = os.listdir(path)
for u in lst:
if u.find('$') >= 0:
inner_class_node = k.find_or_declare(u)
inner[inner_class_node.id] = inner_class_node
to_add = set(inner.keys()) - set([x.id for x in self.outputs])
for x in to_add:
self.outputs.append(inner[x])
return Task.Task.post_run(self)
cls.post_run = post_run_javac
def detect(conf):
# If JAVA_PATH is set, we prepend it to the path list
java_path = conf.environ['PATH'].split(os.pathsep)
v = conf.env
if 'JAVA_HOME' in conf.environ:
java_path = [os.path.join(conf.environ['JAVA_HOME'], 'bin')] + java_path
conf.env['JAVA_HOME'] = [conf.environ['JAVA_HOME']]
for x in 'javac java jar'.split():
conf.find_program(x, var=x.upper(), path_list=java_path)
conf.env[x.upper()] = conf.cmd_to_list(conf.env[x.upper()])
v['JAVA_EXT'] = ['.java']
if 'CLASSPATH' in conf.environ:
v['CLASSPATH'] = conf.environ['CLASSPATH']
if not v['JAR']: conf.fatal('jar is required for making java packages')
if not v['JAVAC']: conf.fatal('javac is required for compiling java classes')
v['JARCREATE'] = 'cf' # can use cvf
@conf
def check_java_class(self, classname, with_classpath=None):
"""Check if the specified java class is installed"""
import shutil
javatestdir = '.waf-javatest'
classpath = javatestdir
if self.env['CLASSPATH']:
classpath += os.pathsep + self.env['CLASSPATH']
if isinstance(with_classpath, str):
classpath += os.pathsep + with_classpath
shutil.rmtree(javatestdir, True)
os.mkdir(javatestdir)
java_file = open(os.path.join(javatestdir, 'Test.java'), 'w')
java_file.write(class_check_source)
java_file.close()
# Compile the source
Utils.exec_command(self.env['JAVAC'] + [os.path.join(javatestdir, 'Test.java')], shell=False)
# Try to run the app
cmd = self.env['JAVA'] + ['-cp', classpath, 'Test', classname]
self.log.write("%s\n" % str(cmd))
found = Utils.exec_command(cmd, shell=False, log=self.log)
self.check_message('Java class %s' % classname, "", not found)
shutil.rmtree(javatestdir, True)
return found
@conf
def check_jni_headers(conf):
"""
Check for jni headers and libraries
On success the environment variable xxx_JAVA is added for uselib
"""
if not conf.env.CC_NAME and not conf.env.CXX_NAME:
conf.fatal('load a compiler first (gcc, g++, ..)')
if not conf.env.JAVA_HOME:
conf.fatal('set JAVA_HOME in the system environment')
# jni requires the jvm
javaHome = conf.env['JAVA_HOME'][0]
b = Build.BuildContext()
b.load_dirs(conf.srcdir, conf.blddir)
dir = b.root.find_dir(conf.env.JAVA_HOME[0] + '/include')
f = dir.ant_glob('**/(jni|jni_md).h', flat=False)
incDirs = [x.parent.abspath() for x in f]
dir = b.root.find_dir(conf.env.JAVA_HOME[0])
f = dir.ant_glob('**/*jvm.(so|dll)', flat=False)
libDirs = [x.parent.abspath() for x in f] or [javaHome]
for i, d in enumerate(libDirs):
if conf.check(header_name='jni.h', define_name='HAVE_JNI_H', lib='jvm',
libpath=d, includes=incDirs, uselib_store='JAVA', uselib='JAVA'):
break
else:
conf.fatal('could not find lib jvm in %r (see config.log)' % libDirs)
|
|
from io_utilities.base_importData import base_importData
from io_utilities.base_exportData import base_exportData
from .general_feature_format import general_feature_format
from python_statistics.calculate_interface import calculate_interface
from .genome_annotations import genome_annotations
import json
class gff_coverage(general_feature_format):
def __init__(self,gff_file_I = None,plus_I = None,minus_I = None,
plus_high_regions_I = None, minus_high_regions_I = None,
coverage_I = None,coverageStats_I = None,
amplifications_I = None,amplificationStats_I = None,
amplificationAnnotations_I = None):
if gff_file_I:
self.gff_file = gff_file_I;
else:
self.gff_file = None;
if plus_I:
self.plus = plus_I;
else:
self.plus = None;
if minus_I:
self.minus = minus_I;
else:
self.minus = None;
if plus_high_regions_I:
self.plus_high_regions = plus_high_regions_I;
else:
self.plus_high_regions = None;
if minus_high_regions_I:
self.minus_high_regions = minus_high_regions_I;
else:
self.minus_high_regions = None;
if coverage_I:
self.coverage = coverage_I;
else:
self.coverage = [];
if coverageStats_I:
self.coverageStats = coverageStats_I;
else:
self.coverageStats = [];
if amplifications_I:
self.amplifications = amplifications_I;
else:
self.amplifications = [];
if amplificationStats_I:
self.amplificationStats = amplificationStats_I;
else:
self.amplificationStats = [];
if amplificationAnnotations_I:
self.amplificationAnnotations = amplificationAnnotations_I;
else:
self.amplificationAnnotations = [];
def find_amplifications_fromGff(self,gff_file,
strand_start, strand_stop,
experiment_id_I = None,
sample_name_I = None,
scale_factor=True, downsample_factor=0,
reads_min=1.5,reads_max=5.0,
indices_min=200,consecutive_tol=10):
"""find amplifications from the gff file
INPUT:
strand_start = index of the start position
strand_stop = index of the stop position
scale_factor = boolean, if true, reads will be normalized to have 100 max
downsample_factor = integer, factor to downsample the points to
reads_min = minimum number of reads to identify an amplification
reads_max = maximum number of reads to identify an amplification
indices_min : minimum number of points of a high coverage region
consecutive_tol: maximum number of consecutive points that do not meet the coverage_min/max criteria that can be included a high coverage region
OPTION INPUT:
experiment_id_I = tag for the experiment from which the sample came
sample_name_I = tag for the sample name
"""
data_O=[];
experiment_id = experiment_id_I;
sn = sample_name_I;
# get the data_dir
self.set_gffFile(gff_file);
# extract the strands
self.extract_strandsFromGff(strand_start, strand_stop, scale=scale_factor, downsample=downsample_factor)
# find high coverage regions
plus_high_region_indices,minus_high_region_indices = self.find_highCoverageRegions(coverage_min=reads_min,coverage_max=reads_max,points_min=indices_min,consecutive_tol=consecutive_tol);
# record high coverage regions
# + strand
iter = 0;
for index,reads in self.plus_high_regions.iteritems():
if index > plus_high_region_indices[iter]['stop']:
iter+=1;
data_O.append({
#'analysis_id':analysis_id,
'experiment_id':experiment_id,
'sample_name':sn,
'genome_chromosome':1, #default
'genome_strand':'+',
'genome_index':int(index),
'strand_start':strand_start,
'strand_stop':strand_stop,
'reads':float(reads),
'reads_min':reads_min,
'reads_max':reads_max,
'indices_min':indices_min,
'consecutive_tol':consecutive_tol,
'scale_factor':scale_factor,
'downsample_factor':downsample_factor,
'amplification_start':int(plus_high_region_indices[iter]['start']),
'amplification_stop':int(plus_high_region_indices[iter]['stop']),
'used_':True,
'comment_':None
});
# - strand
iter = 0;
for index,reads in self.minus_high_regions.iteritems():
if index > minus_high_region_indices[iter]['stop']:
iter+=1;
data_O.append({
#'analysis_id':analysis_id,
'experiment_id':experiment_id,
'sample_name':sn,
'genome_chromosome':1, #default
'genome_strand':'-',
'genome_index':int(index),
'strand_start':strand_start,
'strand_stop':strand_stop,
'reads':float(reads),
'reads_min':reads_min,
'reads_max':reads_max,
'indices_min':indices_min,
'consecutive_tol':consecutive_tol,
'scale_factor':scale_factor,
'downsample_factor':downsample_factor,
'amplification_start':int(minus_high_region_indices[iter]['start']),
'amplification_stop':int(minus_high_region_indices[iter]['stop']),
'used_':True,
'comment_':None
});
self.amplifications = data_O;
def _get_strandsByStrand(self,data_I,strand_I):
"""return all data for the given strand"""
data_O = [];
for d in data_I:
if d['genome_strand'] == strand_I:
data_O.append(d);
return data_O;
def _get_strandsByChromosomeAndStrand(self,data_I,chromosome_I,strand_I):
"""return all data for the given chromosome and strand"""
data_O = [];
for d in data_I:
if d['genome_chromosome'] == chromosome_I and d['genome_strand'] == strand_I:
data_O.append(d);
return data_O;
def import_coverage(self,filename):
'''import coverage from csv'''
io = base_importData();
io.read_csv(filename);
self.coverage=io.data;
def import_coverageStats(self,filename):
'''import coverageStats from csv'''
io = base_importData();
io.read_csv(filename);
self.coverageStats=io.data;
def import_amplifications(self,filename):
'''import amplifications from csv'''
io = base_importData();
io.read_csv(filename);
self.amplifications=io.data;
def import_amplificationStats(self,filename):
'''import amplificationStats from csv'''
io = base_importData();
io.read_csv(filename);
self.amplificationStats=io.data;
def import_amplificationAnnotations(self,filename):
'''import amplificationAnnotations from csv'''
io = base_importData();
io.read_csv(filename);
self.amplificationAnnotations=io.data;
def export_coverage(self,filename_O):
"""export coverage"""
io = base_exportData(self.coverage);
io.write_dict2csv(filename_O);
def export_coverageStats(self,filename_O):
"""export coverageStats"""
io = base_exportData(self.coverageStats);
io.write_dict2csv(filename_O);
def export_amplifications(self,filename_O):
"""export amplifications"""
io = base_exportData(self.amplifications);
io.write_dict2csv(filename_O);
def export_amplificationStats(self,filename_O):
"""export amplificationStats"""
io = base_exportData(self.amplificationStats);
io.write_dict2csv(filename_O);
def export_amplificationAnnotations(self,filename_O):
"""export amplificationAnnotations"""
io = base_exportData(self.amplificationAnnotations);
io.write_dict2csv(filename_O);
def clear_data(self):
self.gff_file = None;
self.minus = None;
self.plus = None;
self.plus_high_regions = None;
self.minus_high_regions = None;
del self.coverage[:];
del self.coverageStats[:];
del self.amplifications[:];
del self.amplificationStats[:];
del self.amplificationAnnotations[:];
def findAndCalculate_amplificationStats_fromGff(self,gff_file,
strand_start, strand_stop,
experiment_id_I = None,
sample_name_I = None,
scale_factor=True, downsample_factor=0,
reads_min=1.5,reads_max=5.0,
indices_min=200,consecutive_tol=10):
"""find amplifications from the gff file and calculate their statistics
INPUT:
strand_start = index of the start position
strand_stop = index of the stop position
scale_factor = boolean, if true, reads will be normalized to have 100 max
downsample_factor = integer, factor to downsample the points to
reads_min = minimum number of reads to identify an amplification
reads_max = maximum number of reads to identify an amplification
indices_min : minimum number of points of a high coverage region
consecutive_tol: maximum number of consecutive points that do not meet the coverage_min/max criteria that can be included a high coverage region
OPTION INPUT:
experiment_id_I = tag for the experiment from which the sample came
sample_name_I = tag for the sample name
"""
data_O=[];
stats_O=[];
experiment_id = experiment_id_I;
sn = sample_name_I;
calculate = calculate_interface();
# get the data_dir
self.set_gffFile(gff_file);
# extract the strands
self.extract_strandsFromGff(strand_start, strand_stop, scale=scale_factor, downsample=0)
# find high coverage regions
plus_high_region_indices,minus_high_region_indices = self.find_highCoverageRegions(coverage_min=reads_min,coverage_max=reads_max,points_min=indices_min,consecutive_tol=consecutive_tol);
# record the means for later use
plus_mean,minus_mean = self.plus.mean(),self.minus.mean();
plus_min,minus_min = self.plus.min(),self.minus.min();
plus_max,minus_max = self.plus.max(),self.minus.max();
# calculate stats on the high coverage regions
# + strand
for row_cnt,row in enumerate(plus_high_region_indices):
plus_region = self.plus_high_regions[(self.plus_high_regions.index>=row['start']) & (self.plus_high_regions.index<=row['stop'])]
# calculate using scipy
data_ave_O, data_var_O, data_lb_O, data_ub_O = None, None, None, None;
data_ave_O, data_var_O, data_lb_O, data_ub_O = calculate.calculate_ave_var(plus_region.values,confidence_I = 0.95);
# calculate the interquartile range
min_O, max_O, median_O, iq_1_O, iq_3_O = None, None, None, None, None;
min_O, max_O, median_O, iq_1_O, iq_3_O=calculate.calculate_interquartiles(plus_region.values);
# record data
stats_O.append({
#'analysis_id':analysis_id,
'experiment_id':experiment_id,
'sample_name':sn,
'genome_chromosome':1,
'genome_strand':'plus',
'strand_start':strand_start,
'strand_stop':strand_stop,
'reads_min':min_O,
'reads_max':max_O,
'reads_lb':data_lb_O,
'reads_ub':data_ub_O,
'reads_iq1':iq_1_O,
'reads_iq3':iq_3_O,
'reads_median':median_O,
'reads_mean':data_ave_O,
'reads_var':data_var_O,
'reads_n':len(plus_region.values),
'amplification_start':int(row['start']),
'amplification_stop':int(row['stop']),
'used_':True,
'comment_':None
})
# downsample
collapse_factor = None;
if downsample_factor > 1:
collapse_factor = int((row['stop'] - row['start']) / downsample_factor)
if collapse_factor and collapse_factor > 1:
plus_region = plus_region.groupby(lambda x: x // collapse_factor).mean()
plus_region.index *= collapse_factor
# add mean to index before and after the amplification start and stop, respectively (for visualization)
if downsample_factor > 1 and row_cnt==0:
#plus_region[strand_start]=plus_mean;
#plus_region[strand_stop]=plus_mean;
data_O.append({
#'analysis_id':analysis_id,
'experiment_id':experiment_id,
'sample_name':sn,
'genome_chromosome':1, #default
'genome_strand':'plus_mean',
#'genome_index':int(strand_start),
'genome_index':int(row['start']-1),
'strand_start':strand_start,
'strand_stop':strand_stop,
'reads':plus_mean,
'reads_min':reads_min,
'reads_max':reads_max,
'indices_min':indices_min,
'consecutive_tol':consecutive_tol,
'scale_factor':scale_factor,
'downsample_factor':downsample_factor,
'amplification_start':strand_start,
'amplification_stop':strand_stop,
'used_':True,
'comment_':'mean reads of the plus strand'
});
if downsample_factor > 1 and row_cnt==len(plus_high_region_indices)-1:
data_O.append({
#'analysis_id':analysis_id,
'experiment_id':experiment_id,
'sample_name':sn,
'genome_chromosome':1, #default
'genome_strand':'plus_mean',
#'genome_index':int(strand_stop),
'genome_index':int(row['stop']+1),
'strand_start':strand_start,
'strand_stop':strand_stop,
'reads':plus_mean,
'reads_min':reads_min,
'reads_max':reads_max,
'indices_min':indices_min,
'consecutive_tol':consecutive_tol,
'scale_factor':scale_factor,
'downsample_factor':downsample_factor,
'amplification_start':strand_start,
'amplification_stop':strand_stop,
'used_':True,
'comment_':'mean reads of the plus strand'
});
## add zeros to strand start and stop, respectively (for visualization)
#if downsample_factor > 1:
# plus_region[row['start']-1]=plus_mean;
# plus_region[row['stop']+1]=plus_mean;
# record high coverage regions
for index,reads in plus_region.iteritems():
data_O.append({
#'analysis_id':analysis_id,
'experiment_id':experiment_id,
'sample_name':sn,
'genome_chromosome':1, #default
'genome_strand':'plus',
'genome_index':int(index),
'strand_start':strand_start,
'strand_stop':strand_stop,
'reads':float(reads),
'reads_min':reads_min,
'reads_max':reads_max,
'indices_min':indices_min,
'consecutive_tol':consecutive_tol,
'scale_factor':scale_factor,
'downsample_factor':downsample_factor,
'amplification_start':int(row['start']),
'amplification_stop':int(row['stop']),
'used_':True,
'comment_':None
});
# - strand
for row_cnt,row in enumerate(minus_high_region_indices):
minus_region = self.minus_high_regions[(self.minus_high_regions.index>=row['start']) & (self.minus_high_regions.index<=row['stop'])]
# calculate using scipy
data_ave_O, data_var_O, data_lb_O, data_ub_O = None, None, None, None;
data_ave_O, data_var_O, data_lb_O, data_ub_O = calculate.calculate_ave_var(minus_region.values,confidence_I = 0.95);
# calculate the interquartile range
min_O, max_O, median_O, iq_1_O, iq_3_O = None, None, None, None, None;
min_O, max_O, median_O, iq_1_O, iq_3_O=calculate.calculate_interquartiles(minus_region.values);
# record data
stats_O.append({
#'analysis_id':analysis_id,
'experiment_id':experiment_id,
'sample_name':sn,
'genome_chromosome':1,
'genome_strand':'minus',
'strand_start':strand_start,
'strand_stop':strand_stop,
'reads_min':min_O,
'reads_max':max_O,
'reads_lb':data_lb_O,
'reads_ub':data_ub_O,
'reads_iq1':iq_1_O,
'reads_iq3':iq_3_O,
'reads_median':median_O,
'reads_mean':data_ave_O,
'reads_var':data_var_O,
'reads_n':len(minus_region.values),
'amplification_start':int(row['start']),
'amplification_stop':int(row['stop']),
'used_':True,
'comment_':None
})
# downsample
collapse_factor = None;
if downsample_factor > 1:
collapse_factor = int((row['stop'] - row['start']) / downsample_factor)
if collapse_factor and collapse_factor > 1:
minus_region = minus_region.groupby(lambda x: x // collapse_factor).mean()
minus_region.index *= collapse_factor
# add mean to index before and after the amplification start and stop, respectively (for visualization)
if downsample_factor > 1 and row_cnt==0:
#minus_region[strand_start]=minus_mean;
#minus_region[strand_stop]=minus_mean;
data_O.append({
#'analysis_id':analysis_id,
'experiment_id':experiment_id,
'sample_name':sn,
'genome_chromosome':1, #default
'genome_strand':'minus_mean',
#'genome_index':int(strand_start),
'genome_index':int(row['start']-1),
'strand_start':strand_start,
'strand_stop':strand_stop,
'reads':minus_mean,
'reads_min':reads_min,
'reads_max':reads_max,
'indices_min':indices_min,
'consecutive_tol':consecutive_tol,
'scale_factor':scale_factor,
'downsample_factor':downsample_factor,
'amplification_start':strand_start,
'amplification_stop':strand_stop,
'used_':True,
'comment_':'mean reads of the minus strand'
});
if downsample_factor > 1 and row_cnt==len(minus_high_region_indices)-1:
data_O.append({
#'analysis_id':analysis_id,
'experiment_id':experiment_id,
'sample_name':sn,
'genome_chromosome':1, #default
'genome_strand':'minus_mean',
#'genome_index':int(strand_stop),
'genome_index':int(row['stop']+1),
'strand_start':strand_start,
'strand_stop':strand_stop,
'reads':minus_mean,
'reads_min':reads_min,
'reads_max':reads_max,
'indices_min':indices_min,
'consecutive_tol':consecutive_tol,
'scale_factor':scale_factor,
'downsample_factor':downsample_factor,
'amplification_start':strand_start,
'amplification_stop':strand_stop,
'used_':True,
'comment_':'mean reads of the minus strand'
});
## add zeros to strand start and stop, respectively (for visualization)
#if downsample_factor > 1:
# minus_region[row['start']-1]=minus_mean;
# minus_region[row['stop']+1]=minus_mean;
# record high coverage regions
for index,reads in minus_region.iteritems():
data_O.append({
#'analysis_id':analysis_id,
'experiment_id':experiment_id,
'sample_name':sn,
'genome_chromosome':1, #default
'genome_strand':'minus',
'genome_index':int(index),
'strand_start':strand_start,
'strand_stop':strand_stop,
'reads':float(reads),
'reads_min':reads_min,
'reads_max':reads_max,
'indices_min':indices_min,
'consecutive_tol':consecutive_tol,
'scale_factor':scale_factor,
'downsample_factor':downsample_factor,
'amplification_start':int(row['start']),
'amplification_stop':int(row['stop']),
'used_':True,
'comment_':None});
#record the data
self.amplifications = data_O;
self.amplificationStats = stats_O;
def annotate_amplifications(self,ref_genome_I='U00096.2.gb',
ref_I = 'genbank',geneReference_I=None,biologicalmaterial_id_I='MG1655'):
"""annotate amplificaitons from reference
ref_genome_I = reference genome to use for the annotation
ref_I = reference database
geneReference_I = filename for the gene reference table
biologicalmaterial_id_I = biologicalmatereial_id for the geneReference to use for the annotation (required to generate an ecocyc link)
"""
genomeannotation = genome_annotations(ref_genome_I,ref_I,geneReference_I);
data_O = [];
# get amplification regions
amplificationStats = self.amplificationStats;
# annotate each region
for row in amplificationStats:
# annotate each mutation based on the position
annotations = [];
annotations = genomeannotation._find_genesInRegion(row['amplification_start'],row['amplification_start'])
for annotation in annotations:
# record the data
tmp = {
'experiment_id':row['experiment_id'],
'sample_name':row['sample_name'],
'genome_chromosome':row['genome_chromosome'],
'genome_strand':row['genome_strand'],
'strand_start':row['strand_start'],
'strand_stop':row['strand_stop'],
'amplification_start':row['amplification_start'],
'amplification_stop':row['amplification_stop'],
'used_':True,
'comment_':None};
tmp['feature_genes'] = annotation['gene']
tmp['feature_locations'] = annotation['location']
tmp['feature_annotations'] = annotation['product']
tmp['feature_start'] = annotation['start'];
tmp['feature_stop'] = annotation['stop'];
tmp['feature_types'] = annotation['type']
# generate a link to ecogene for the genes
tmp['feature_links'] = [];
for bnumber in annotation['locus_tag']:
if bnumber:
ecogenes = [];
ecogenes = genomeannotation._get_ecogenesByBiologicalmaterialIDAndOrderedLocusName(biologicalmaterial_id_I,bnumber);
if ecogenes:
ecogene = ecogenes[0];
ecogene_link = genomeannotation._generate_httplink2gene_ecogene(ecogene['ecogene_accession_number']);
tmp['feature_links'].append(ecogene_link)
else: print('no ecogene_accession_number found for ordered_locus_location ' + bnumber);
data_O.append(tmp);
self.amplificationAnnotations = data_O;
def _get_chromosomes(self,data_I,experiment_id_I=None,sample_name_I=None):
"""return all chromosomes"""
data_O = [];
for d in data_I:
if experiment_id_I and sample_name_I:
if d['experiment_id'] == experiment_id_I and d['sample_name'] == sample_name_I:
data_O.append(d['genome_chromosome']);
else:
data_O.append(d['genome_chromosome']);
return data_O;
def _get_strandsByChromosome(self,data_I,chromosome_I,experiment_id_I=None,sample_name_I=None):
"""return strands for the given chromosome"""
data_O = [];
for d in data_I:
if experiment_id_I and sample_name_I:
if d['experiment_id'] == experiment_id_I and d['sample_name'] == sample_name_I and d['genome_chromosome'] == chromosome_I:
data_O.append(d['genome_strand']);
elif d['genome_chromosome'] == chromosome_I:
data_O.append(d['genome_strand']);
return data_O;
def _get_startAndStopsByChromosomeAndStrand(self,data_I,chromosome_I,strand_I,experiment_id_I=None,sample_name_I=None):
"""return strand start and stop positions for the given chromosome and strand"""
genomic_starts,genomic_stops = [],[]
for d in data_I:
if experiment_id_I and sample_name_I:
if d['experiment_id'] == experiment_id_I and d['sample_name'] == sample_name_I and d['genome_chromosome'] == chromosome_I and d['genome_strand'] == strand_I:
genomic_starts.append(d['strand_start']);
genomic_stops.append(d['strand_stop']);
elif d['genome_chromosome'] == chromosome_I and d['genome_strand'] == strand_I:
genomic_starts.append(d['strand_start']);
genomic_stops.append(d['strand_stop']);
return genomic_starts,genomic_stops;
def _get_amplificationRegionsByChromosomeAndStrand(self,data_I,chromosome_I,strand_I,experiment_id_I=None,sample_name_I=None):
"""return strand start and stop positions for the given chromosome and strand"""
genomic_starts,genomic_stops = [],[]
for d in data_I:
if experiment_id_I and sample_name_I:
if d['experiment_id'] == experiment_id_I and d['sample_name'] == sample_name_I and d['genome_chromosome'] == chromosome_I and d['genome_strand'] == strand_I:
genomic_starts.append(d['amplification_start']);
genomic_stops.append(d['amplification_stop']);
elif d['genome_chromosome'] == chromosome_I and d['genome_strand'] == strand_I:
genomic_starts.append(d['amplification_start']);
genomic_stops.append(d['amplification_stop']);
return genomic_starts,genomic_stops;
def _get_amplificationRegions(self,data_I,experiment_id_I=None,sample_name_I=None):
"""return strand start and stop positions"""
genomic_starts,genomic_stops = [],[]
for d in data_I:
if experiment_id_I and sample_name_I:
if d['experiment_id'] == experiment_id_I and d['sample_name'] == sample_name_I:
genomic_starts.append(d['amplification_start']);
genomic_stops.append(d['amplification_stop']);
else:
genomic_starts.append(d['amplification_start']);
genomic_stops.append(d['amplification_stop']);
return genomic_starts,genomic_stops;
def extract_coverage_fromGff(self,gff_file,
strand_start,strand_stop,scale_factor=True,downsample_factor=2000,
experiment_id_I=None, sample_name_I=None):
"""extract coverage (genome position and reads) from .gff
INPUT:
strand_start = index of the start position
strand_stop = index of the stop position
scale_factor = boolean, if true, reads will be normalized to have 100 max
downsample_factor = integer, factor to downsample the points to
OPTION INPUT:
experiment_id_I = tag for the experiment from which the sample came
sample_name_I = tag for the sample name
"""
self.set_gffFile(gff_file);
filename = self.gff_file;
experiment_id = experiment_id_I;
sample_name = sample_name_I;
# parse the gff file into pandas dataframes
self.extract_strandsFromGff(strand_start, strand_stop, scale=scale_factor, downsample=downsample_factor)
# split into seperate data structures based on the destined table add
coverage_data = [];
if not self.plus.empty:
for index,reads in self.plus.iteritems():
coverage_data.append({
#'analysis_id':analysis_id,
'experiment_id':experiment_id,
'sample_name':sample_name,
'data_dir':filename,
'genome_chromosome':1, #default
'genome_strand':'plus',
'genome_index':int(index),
'strand_start':strand_start,
'strand_stop':strand_stop,
'reads':float(reads),
'scale_factor':scale_factor,
'downsample_factor':downsample_factor,
'used_':True,
'comment_':None});
if not self.minus.empty:
for index,reads in self.minus.iteritems():
coverage_data.append({
#'analysis_id':analysis_id,
'experiment_id':experiment_id,
'sample_name':sample_name,
'data_dir':filename,
'genome_chromosome':1, #default
'genome_strand':'minus',
'genome_index':int(index),
'strand_start':strand_start,
'strand_stop':strand_stop,
'reads':float(reads),
'scale_factor':scale_factor,
'downsample_factor':downsample_factor,
'used_':True,
'comment_':None});
# add data to the database:
self.coverage = coverage_data;
def calculate_coverageStats_fromGff(self,gff_file,
strand_start,strand_stop,scale_factor=True,downsample_factor=2000,
experiment_id_I=None, sample_name_I=None):
"""extract coverage (genome position and reads) from .gff
INPUT:
strand_start = index of the start position
strand_stop = index of the stop position
scale_factor = boolean, if true, reads will be normalized to have 100 max
downsample_factor = integer, factor to downsample the points to
OPTION INPUT:
experiment_id_I = tag for the experiment from which the sample came
sample_name_I = tag for the sample name
"""
calculate = calculate_interface();
self.set_gffFile(gff_file);
filename = self.gff_file;
experiment_id = experiment_id_I;
sn = sample_name_I;
# parse the gff file into pandas dataframes
self.extract_strandsFromGff(strand_start, strand_stop, scale=scale_factor, downsample=downsample_factor)
# split into seperate data structures based on the destined table add
coverageStats_data = [];
# plus strand
# calculate using scipy
data_ave_O, data_var_O, data_lb_O, data_ub_O = None, None, None, None;
data_ave_O, data_var_O, data_lb_O, data_ub_O = calculate.calculate_ave_var(self.plus.values,confidence_I = 0.95);
# calculate the interquartile range
min_O, max_O, median_O, iq_1_O, iq_3_O = None, None, None, None, None;
min_O, max_O, median_O, iq_1_O, iq_3_O=calculate.calculate_interquartiles(self.plus.values);
# record data
coverageStats_data.append({
#'analysis_id':analysis_id,
'experiment_id':experiment_id,
'sample_name':sn,
'genome_chromosome':1,
'genome_strand':'plus',
'strand_start':strand_start,
'strand_stop':strand_stop,
'reads_min':int(min_O),
'reads_max':int(max_O),
'reads_lb':data_lb_O,
'reads_ub':data_ub_O,
'reads_iq1':iq_1_O,
'reads_iq3':iq_3_O,
'reads_median':median_O,
'reads_mean':data_ave_O,
'reads_var':data_var_O,
'reads_n':len(self.plus.values),
'used_':True,
'comment_':None});
# minus strand
# calculate using scipy
data_ave_O, data_var_O, data_lb_O, data_ub_O = None, None, None, None;
data_ave_O, data_var_O, data_lb_O, data_ub_O = calculate.calculate_ave_var(self.minus.values,confidence_I = 0.95);
# calculate the interquartile range
min_O, max_O, median_O, iq_1_O, iq_3_O = None, None, None, None, None;
min_O, max_O, median_O, iq_1_O, iq_3_O=calculate.calculate_interquartiles(self.minus.values);
# record data
coverageStats_data.append({
#'analysis_id':analysis_id,
'experiment_id':experiment_id,
'sample_name':sn,
'genome_chromosome':1,
'genome_strand':'minus',
'strand_start':strand_start,
'strand_stop':strand_stop,
'reads_min':int(min_O),
'reads_max':int(max_O),
'reads_lb':data_lb_O,
'reads_ub':data_ub_O,
'reads_iq1':iq_1_O,
'reads_iq3':iq_3_O,
'reads_median':median_O,
'reads_mean':data_ave_O,
'reads_var':data_var_O,
'reads_n':len(self.minus.values),
'used_':True,
'comment_':None});
# record the data
self.coverageStats = coverageStats_data;
def export_amplifications_js(self,data_dir_I="tmp"):
"""export amplifications and statistics to js file"""
#get the data for the analysis
data1_O = [];
data2_O = [];
data3_O = [];
data1_O = self._make_sampleNameStrand(self.amplifications);
data2_O = self._make_sampleNameStrand(self.amplificationStats);
data3_O = self._make_sampleNameStrand(self.amplificationAnnotations);
# dump chart parameters to a js files
data1_keys = ['experiment_id',
'sample_name',
'genome_chromosome',
'genome_strand',
'amplification_start',
'amplification_stop',
'sample_name_strand',
]
data1_nestkeys = [
#'sample_name',
'genome_strand'
];
data1_keymap = {'xdata':'genome_index',
'ydata':'reads',
'serieslabel':'sample_name_strand',#custom for vis
#'serieslabel':'genome_strand',
'featureslabel':'reads'};
data2_keys = ['experiment_id',
'sample_name',
'genome_chromosome',
'genome_strand',
#'reads_min',
#'reads_max',
#'reads_lb',
#'reads_ub',
#'reads_iq1',
#'reads_iq3',
#'reads_median',
#'reads_mean',
#'reads_var',
#'reads_n',
'amplification_start',
'amplification_stop',
]
data2_nestkeys = ['sample_name'];
data2_keymap = {'xdata':'genome_index',
'ydata':'reads',
'serieslabel':'genome_strand',
'featureslabel':'reads'};
data3_keys = ['experiment_id',
'sample_name',
'genome_chromosome',
'genome_strand',
'feature_annotations',
'feature_genes',
'feature_locations',
'feature_links',
'feature_start',
'feature_stop',
'feature_types',
'amplification_start',
'amplification_stop',
]
data3_nestkeys = ['sample_name'];
data3_keymap = {'xdata':'genome_index',
'ydata':'reads',
'serieslabel':'genome_strand',
'featureslabel':'reads'};
# make the data object
dataobject_O = [{"data":data1_O,"datakeys":data1_keys,"datanestkeys":data1_nestkeys},
{"data":data2_O,"datakeys":data2_keys,"datanestkeys":data2_nestkeys},
{"data":data3_O,"datakeys":data3_keys,"datanestkeys":data3_nestkeys}
];
# make the tile parameter objects
# linked set #1
formtileparameters_O = {'tileheader':'Filter menu','tiletype':'html','tileid':"filtermenu1",'rowid':"row1",'colid':"col1",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-4"};
formparameters_O = {'htmlid':'filtermenuform1',"htmltype":'form_01',"formsubmitbuttonidtext":{'id':'submit1','text':'submit'},"formresetbuttonidtext":{'id':'reset1','text':'reset'},"formupdatebuttonidtext":{'id':'update1','text':'update'}};
formtileparameters_O.update(formparameters_O);
svgparameters_O = {"svgtype":'scatterplot2d_01',"svgkeymap":[data1_keymap,data1_keymap],
'svgid':'svg1',
"svgmargin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 },
"svgwidth":500,"svgheight":350,
"svgx1axislabel":"index","svgy1axislabel":"reads",
'svgformtileid':'filtermenu1','svgresetbuttonid':'reset1','svgsubmitbuttonid':'submit1',
"svgx1axistickformat":".2e",
"svgx1axisticktextattr":{"transform":"matrix(0,1,-1,0,16,6)",
#"transform":'rotate(90)',"transform":'translate(0,10)'
},
"svgx1axisticktextstyle":{"text-anchor":"start"}
};
svgtileparameters_O = {'tileheader':'Amplifications','tiletype':'svg','tileid':"tile2",'rowid':"row1",'colid':"col2",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-8"};
svgtileparameters_O.update(svgparameters_O);
# linked set #2
formtileparameters2_O = {'tileheader':'Filter menu','tiletype':'html','tileid':"filtermenu2",'rowid':"row2",'colid':"col1",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-4"};
formparameters2_O = {'htmlid':'filtermenuform2',"htmltype":'form_01',"formsubmitbuttonidtext":{'id':'submit2','text':'submit'},"formresetbuttonidtext":{'id':'reset2','text':'reset'},"formupdatebuttonidtext":{'id':'update2','text':'update'}};
formtileparameters2_O.update(formparameters2_O);
tableparameters_O = {"tabletype":'responsivetable_01',
'tableid':'table1',
"tablefilters":None,
"tableclass":"table table-condensed table-hover",
'tableformtileid':'filtermenu2','tableresetbuttonid':'reset2','tablesubmitbuttonid':'submit2'};
tabletileparameters_O = {'tileheader':'Amplification statistics','tiletype':'table','tileid':"tile3",'rowid':"row2",'colid':"col2",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-12"};
tabletileparameters_O.update(tableparameters_O);
# linked set #3
formtileparameters3_O = {'tileheader':'Filter menu','tiletype':'html','tileid':"filtermenu3",'rowid':"row3",'colid':"col1",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-4"};
formparameters3_O = {'htmlid':'filtermenuform3',"htmltype":'form_01',"formsubmitbuttonidtext":{'id':'submit3','text':'submit'},"formresetbuttonidtext":{'id':'reset3','text':'reset'},"formupdatebuttonidtext":{'id':'update3','text':'update'}};
formtileparameters3_O.update(formparameters3_O);
tableparameters2_O = {"tabletype":'responsivetable_01',
'tableid':'table2',
"tablefilters":None,
"tableclass":"table table-condensed table-hover",
'tableformtileid':'filtermenu3','tableresetbuttonid':'reset3','tablesubmitbuttonid':'submit3'};
tabletileparameters2_O = {'tileheader':'Amplification annotations','tiletype':'table','tileid':"tile4",'rowid':"row3",'colid':"col2",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-12"};
tabletileparameters2_O.update(tableparameters2_O);
parametersobject_O = [formtileparameters_O,svgtileparameters_O,formtileparameters2_O,tabletileparameters_O,formtileparameters3_O,tabletileparameters2_O];
tile2datamap_O = {"filtermenu1":[0],"tile2":[0,0],"tile3":[1],"tile4":[2],"filtermenu2":[1],"filtermenu3":[2]};
filtermenuobject_O = [{"filtermenuid":"filtermenu1","filtermenuhtmlid":"filtermenuform1",
"filtermenusubmitbuttonid":"submit1","filtermenuresetbuttonid":"reset1",
"filtermenuupdatebuttonid":"update1"},{"filtermenuid":"filtermenu2","filtermenuhtmlid":"filtermenuform2",
"filtermenusubmitbuttonid":"submit2","filtermenuresetbuttonid":"reset2",
"filtermenuupdatebuttonid":"update2"},{"filtermenuid":"filtermenu3","filtermenuhtmlid":"filtermenuform3",
"filtermenusubmitbuttonid":"submit3","filtermenuresetbuttonid":"reset3",
"filtermenuupdatebuttonid":"update3"}];
# dump the data to a json file
data_str = 'var ' + 'data' + ' = ' + json.dumps(dataobject_O) + ';';
parameters_str = 'var ' + 'parameters' + ' = ' + json.dumps(parametersobject_O) + ';';
tile2datamap_str = 'var ' + 'tile2datamap' + ' = ' + json.dumps(tile2datamap_O) + ';';
filtermenu_str = 'var ' + 'filtermenu' + ' = ' + json.dumps(filtermenuobject_O) + ';';
if data_dir_I=='tmp':
filename_str = 'ddt_data.js'
elif data_dir_I=='data_json':
data_json_O = data_str + '\n' + parameters_str + '\n' + tile2datamap_str + '\n' + filtermenu_str;
return data_json_O;
with open(filename_str,'w') as file:
file.write(data_str);
file.write(parameters_str);
file.write(tile2datamap_str);
file.write(filtermenu_str);
def export_coverage_js(self,data_dir_I="tmp"):
"""exportcoverage data to js file"""
#get the data for the analysis
data1_O = [];
data2_O = [];
data1_O = self._make_sampleNameStrand(self.coverage);
data2_O = self._make_sampleNameStrand(self.coverageStats);
# dump chart parameters to a js files
data1_keys = ['experiment_id',
'sample_name',
'genome_chromosome',
'genome_strand',
'sample_name_strand'
]
data1_nestkeys = [
#'sample_name',
'genome_strand'
];
data1_keymap = {'xdata':'genome_index',
'ydata':'reads',
'serieslabel':'sample_name_strand',#custom for vis
'featureslabel':'reads'};
data2_keys = ['experiment_id',
'sample_name',
'genome_chromosome',
'genome_strand',
#'strand_start',
#'strand_stop',
#'reads_min',
#'reads_max',
#'reads_lb',
#'reads_ub',
#'reads_iq1',
#'reads_iq3',
#'reads_median',
#'reads_mean',
#'reads_var',
#'reads_n',
'amplification_start',
'amplification_stop',
'used_',
'comment_'
]
data2_nestkeys = ['sample_name'];
data2_keymap = {'xdata':'genome_index',
'ydata':'reads',
'serieslabel':'genome_strand',
'featureslabel':'reads'};
# make the data object
dataobject_O = [{"data":data1_O,"datakeys":data1_keys,"datanestkeys":data1_nestkeys},{"data":data2_O,"datakeys":data2_keys,"datanestkeys":data2_nestkeys}];
# make the tile parameter objects
formtileparameters_O = {'tileheader':'Filter menu','tiletype':'html','tileid':"filtermenu1",'rowid':"row1",'colid':"col1",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-4"};
formparameters_O = {'htmlid':'filtermenuform1',"htmltype":'form_01',"formsubmitbuttonidtext":{'id':'submit1','text':'submit'},"formresetbuttonidtext":{'id':'reset1','text':'reset'},"formupdatebuttonidtext":{'id':'update1','text':'update'}};
formtileparameters_O.update(formparameters_O);
svgparameters_O = {"svgtype":'scatterplot2d_01',"svgkeymap":[data1_keymap,data1_keymap],
'svgid':'svg1',
"svgmargin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 },
"svgwidth":500,"svgheight":350,
"svgx1axislabel":"index","svgy1axislabel":"reads",
'svgformtileid':'filtermenu1','svgresetbuttonid':'reset1','svgsubmitbuttonid':'submit1',
"svgx1axistickformat":".2e",
"svgx1axisticktextattr":{"transform":"matrix(0,1,-1,0,16,6)",
#"transform":'rotate(90)',"transform":'translate(0,10)'
},
"svgx1axisticktextstyle":{"text-anchor":"start"}
};
svgtileparameters_O = {'tileheader':'Resequencing coverage','tiletype':'svg','tileid':"tile2",'rowid':"row1",'colid':"col2",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-8"};
svgtileparameters_O.update(svgparameters_O);
tableparameters_O = {"tabletype":'responsivetable_01',
'tableid':'table1',
"tablefilters":None,
"tableclass":"table table-condensed table-hover",
'tableformtileid':'filtermenu1','tableresetbuttonid':'reset1','tablesubmitbuttonid':'submit1'};
tabletileparameters_O = {'tileheader':'Resequencing coverage statistics','tiletype':'table','tileid':"tile3",'rowid':"row2",'colid':"col1",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-12"};
tabletileparameters_O.update(tableparameters_O);
parametersobject_O = [formtileparameters_O,svgtileparameters_O,tabletileparameters_O];
tile2datamap_O = {"filtermenu1":[0],"tile2":[0,0],"tile3":[1]};
# dump the data to a json file
data_str = 'var ' + 'data' + ' = ' + json.dumps(dataobject_O) + ';';
parameters_str = 'var ' + 'parameters' + ' = ' + json.dumps(parametersobject_O) + ';';
tile2datamap_str = 'var ' + 'tile2datamap' + ' = ' + json.dumps(tile2datamap_O) + ';';
if data_dir_I=='tmp':
filename_str = 'ddt_data.js'
elif data_dir_I=='data_json':
data_json_O = data_str + '\n' + parameters_str + '\n' + tile2datamap_str;
return data_json_O;
with open(filename_str,'w') as file:
file.write(data_str);
file.write(parameters_str);
file.write(tile2datamap_str);
def _make_sampleNameStrand(self,coverage_I):
"""generate a unique sample/strand name for visualization"""
for d in coverage_I:
d['sample_name_strand']="_".join([d['sample_name'],d['genome_strand']]);
return coverage_I;
|
|
from django.views.generic import View
from django.shortcuts import render_to_response, get_object_or_404 ,render
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse , HttpResponseRedirect
from django.core import serializers
from django.template import RequestContext
from django.conf import settings
import json
import os
from tinydb import TinyDB, where
from forms import *
tinydb_pathto_file = "{0}/db/tinydb.json"
tinydb_path = tinydb_pathto_file.format(settings.PROJECT_ROOT)
db = TinyDB(tinydb_path)
def project_info(request):
if request.method == 'POST':
# check cookie set
eid = request.session.get('eid',len(db)+1)
el = db.get(None,eid)
# if new data
if(el == None):
insert_data = request.POST
db.insert(insert_data)
request.session['eid'] = eid
el = db.get(None, eid)
# if update
else:
eid = request.session.get('eid',len(db)+1)
update_data = request.POST
update_data = {"cust_code": request.POST.get("cust_code", "cust_code"),
"cust_addr": request.POST.get("cust_addr", "cust_addr"),
"project_info": request.POST.get("project_info", "project_info"),
"database": request.POST.get("database", "database"),
"file_name": request.POST.get("file_name", "file_name"),
"cust_name": request.POST.get("cust_name", "cust_name"),
"project_code": request.POST.get("project_code", "project_code"),
"architecture": request.POST.get("architecture", "architecture"),
"prog_lang": request.POST.get("prog_lang", "prog_lang"),
"process_model": request.POST.get("process_model", "process_model")}
db.update(update_data,eids = [eid])
el = db.get(None, eid)
open_el = db.search(where('file_name'))
open_el_array = {}
for open_el_mem in open_el:
open_el_array.update({open_el_mem.eid:open_el_mem['file_name']})
file_name = el['file_name']
context_data = {'json_data': el,
'file_name':file_name,
'open_el_array':open_el_array}
return render_to_response(
'workflow/project_info.html',
context_data,
RequestContext(request)
)
else:
# opening existing session
if (request.session.get('eid',None) != None):
eid = request.session.get('eid',len(db)+1)
el = db.get(cond=None, eid = int(eid))
json_data = el
file_name = el['file_name']
#opening new session
else:
json_data =""
file_name = ''
open_el = db.search(where('file_name'))
open_el_array = {}
for open_el_mem in open_el:
open_el_array.update({open_el_mem.eid:open_el_mem['file_name']})
context_data = {'json_data': json_data,
'file_name':file_name,
'open_el_array':open_el_array}
return render_to_response(
'workflow/project_info.html',
context_data,
RequestContext(request)
)
def new_project(request):
if request.session.get('eid', None) != None:
del request.session['eid']
return HttpResponseRedirect("/projectspecs/project-info/")
def open_project(request ,eid = None):
if request.session.get('eid', None) != None:
del request.session['eid']
if eid:
request.session['eid'] = int(eid)
#return HttpResponse(request.session['eid'])
return HttpResponseRedirect("/projectspecs/project-info/")
else:
return HttpResponseRedirect("/projectspecs/project-info/")
def introduction(request):
if request.method == 'POST':
# check cookie set
eid = request.session.get('eid',len(db)+1)
el = db.get(None,eid)
# if new data
if(el == None):
insert_data = request.POST
db.insert(insert_data)
request.session['eid'] = eid
el = db.get(None, eid)
# if update
else:
eid = request.session.get('eid',len(db)+1)
update_data = request.POST
update_data = {"purpose": request.POST.get("purpose", "purpose"),
"scope": request.POST.get("scope", "scope"),
"reference": request.POST.get("reference", "reference"),
"standards": request.POST.get("standards", "standards")}
db.update(update_data,eids = [eid])
el = db.get(None, eid)
open_el = db.search(where('file_name'))
open_el_array = {}
for open_el_mem in open_el:
open_el_array.update({open_el_mem.eid:open_el_mem['file_name']})
file_name = el['file_name']
context_data = {'json_data': el,
'file_name':file_name,
'open_el_array':open_el_array}
return render_to_response(
'workflow/introduction.html',
context_data,
RequestContext(request)
)
else:
# opening existing session
if (request.session.get('eid',None) != None):
eid = request.session.get('eid',len(db)+1)
el = db.get(cond=None, eid = int(eid))
json_data = el
file_name = el['file_name']
#opening new session
else:
json_data =""
file_name = ''
open_el = db.search(where('file_name'))
open_el_array = {}
for open_el_mem in open_el:
open_el_array.update({open_el_mem.eid:open_el_mem['file_name']})
context_data = {'json_data': json_data,
'file_name':file_name,
'open_el_array':open_el_array}
return render_to_response(
'workflow/introduction.html',
context_data,
RequestContext(request)
)
def background(request):
if request.method == 'POST':
# check cookie set
eid = request.session.get('eid',len(db)+1)
el = db.get(None,eid)
# if new data
if(el == None):
insert_data = request.POST
db.insert(insert_data)
request.session['eid'] = eid
el = db.get(None, eid)
# if update
else:
eid = request.session.get('eid',len(db)+1)
update_data = request.POST
update_data = {"the_problem_of": request.POST.get("the_problem_of", "the_problem_of"),
"affects": request.POST.get("affects", "affects"),
"the_impact_of_which": request.POST.get("the_impact_of_which", "the_impact_of_which"),
"success_soln": request.POST.get("success_soln", "success_soln"),
"input_for": request.POST.get("input_for", "input_for"),
"input_who": request.POST.get("input_who", "input_who"),
"prod_sys_name": request.POST.get("prod_sys_name", "prod_sys_name"),
"input_that": request.POST.get("input_that", "input_that"),
"input_unlike": request.POST.get("input_unlike", "input_unlike"),
"our_product": request.POST.get("our_product", "our_product")}
db.update(update_data,eids = [eid])
el = db.get(None, eid)
open_el = db.search(where('file_name'))
open_el_array = {}
for open_el_mem in open_el:
open_el_array.update({open_el_mem.eid:open_el_mem['file_name']})
file_name = el['file_name']
context_data = {'json_data': el,
'file_name':file_name,
'open_el_array':open_el_array}
return render_to_response(
'workflow/background.html',
context_data,
RequestContext(request)
)
else:
# opening existing session
if (request.session.get('eid',None) != None):
eid = request.session.get('eid',len(db)+1)
el = db.get(cond=None, eid = int(eid))
json_data = el
file_name = el['file_name']
#opening new session
else:
json_data =""
file_name = ''
open_el = db.search(where('file_name'))
open_el_array = {}
for open_el_mem in open_el:
open_el_array.update({open_el_mem.eid:open_el_mem['file_name']})
context_data = {'json_data': json_data,
'file_name':file_name,
'open_el_array':open_el_array}
return render_to_response(
'workflow/background.html',
context_data,
RequestContext(request)
)
def prod_info(request):
if request.method == 'POST':
# check cookie set
eid = request.session.get('eid',len(db)+1)
el = db.get(None,eid)
# if new data
if(el == None):
insert_data = request.POST
db.insert(insert_data)
request.session['eid'] = eid
el = db.get(None, eid)
# if update
else:
eid = request.session.get('eid',len(db)+1)
update_data = request.POST
update_data = {"prod_info": request.POST.get("prod_info", "prod_info"),
"prod_viewpoints": request.POST.get("prod_viewpoints", "prod_viewpoints"),
"major_prod_constraints": request.POST.get("major_prod_constraints", "major_prod_constraints")}
db.update(update_data,eids = [eid])
el = db.get(None, eid)
open_el = db.search(where('file_name'))
open_el_array = {}
for open_el_mem in open_el:
open_el_array.update({open_el_mem.eid:open_el_mem['file_name']})
file_name = el['file_name']
context_data = {'json_data': el,
'file_name':file_name,
'open_el_array':open_el_array}
return render_to_response(
'workflow/prod_info.html',
context_data,
RequestContext(request)
)
else:
# opening existing session
if (request.session.get('eid',None) != None):
eid = request.session.get('eid',len(db)+1)
el = db.get(cond=None, eid = int(eid))
json_data = el
file_name = el['file_name']
#opening new session
else:
json_data =""
file_name = ''
open_el = db.search(where('file_name'))
open_el_array = {}
for open_el_mem in open_el:
open_el_array.update({open_el_mem.eid:open_el_mem['file_name']})
context_data = {'json_data': json_data,
'file_name':file_name,
'open_el_array':open_el_array}
return render_to_response(
'workflow/prod_info.html',
context_data,
RequestContext(request)
)
def features(request):
return render(request, 'workflow/features.html')
def non_func_1(request):
if request.method == 'POST':
# check cookie set
eid = request.session.get('eid',len(db)+1)
el = db.get(None,eid)
# if new data
if(el == None):
insert_data = request.POST
db.insert(insert_data)
request.session['eid'] = eid
el = db.get(None, eid)
# if update
else:
eid = request.session.get('eid',len(db)+1)
update_data = request.POST
update_data = {"system_req": request.POST.get("system_req", "system_req"),
"tech_req": request.POST.get("tech_req", "tech_req"),
"startup_req": request.POST.get("startup_req", "startup_req"),
"shutdown_req": request.POST.get("shutdown_req", "shutdown_req"),
"interface_req": request.POST.get("interface_req", "interface_req"),
"prob_req": request.POST.get("prob_req", "prob_req"),
"performance_req": request.POST.get("performance_req", "performance_req")}
db.update(update_data,eids = [eid])
el = db.get(None, eid)
open_el = db.search(where('file_name'))
open_el_array = {}
for open_el_mem in open_el:
open_el_array.update({open_el_mem.eid:open_el_mem['file_name']})
file_name = el['file_name']
context_data = {'json_data': el,
'file_name':file_name,
'open_el_array':open_el_array}
return render_to_response(
'workflow/non_func_1.html',
context_data,
RequestContext(request)
)
else:
# opening existing session
if (request.session.get('eid',None) != None):
eid = request.session.get('eid',len(db)+1)
el = db.get(cond=None, eid = int(eid))
json_data = el
file_name = el['file_name']
#opening new session
else:
json_data =""
file_name = ''
open_el = db.search(where('file_name'))
open_el_array = {}
for open_el_mem in open_el:
open_el_array.update({open_el_mem.eid:open_el_mem['file_name']})
context_data = {'json_data': json_data,
'file_name':file_name,
'open_el_array':open_el_array}
return render_to_response(
'workflow/non_func_1.html',
context_data,
RequestContext(request)
)
def non_func_2(request):
if request.method == 'POST':
# check cookie set
eid = request.session.get('eid',len(db)+1)
el = db.get(None,eid)
# if new data
if(el == None):
insert_data = request.POST
db.insert(insert_data)
request.session['eid'] = eid
el = db.get(None, eid)
# if update
else:
eid = request.session.get('eid',len(db)+1)
update_data = request.POST
update_data = {"reliability_req": request.POST.get("reliability_req", "reliability_req"),
"supp_req": request.POST.get("supp_req", "supp_req"),
"impl_req": request.POST.get("impl_req", "impl_req"),
"op_env_req": request.POST.get("op_env_req", "op_env_req"),
"usablity_req": request.POST.get("usablity_req", "usablity_req"),
"sec_req": request.POST.get("sec_req", "sec_req"),
"qual_req": request.POST.get("qual_req", "qual_req")}
db.update(update_data,eids = [eid])
el = db.get(None, eid)
open_el = db.search(where('file_name'))
open_el_array = {}
for open_el_mem in open_el:
open_el_array.update({open_el_mem.eid:open_el_mem['file_name']})
file_name = el['file_name']
context_data = {'json_data': el,
'file_name':file_name,
'open_el_array':open_el_array}
return render_to_response(
'workflow/non_func_2.html',
context_data,
RequestContext(request)
)
else:
# opening existing session
if (request.session.get('eid',None) != None):
eid = request.session.get('eid',len(db)+1)
el = db.get(cond=None, eid = int(eid))
json_data = el
file_name = el['file_name']
#opening new session
else:
json_data =""
file_name = ''
open_el = db.search(where('file_name'))
open_el_array = {}
for open_el_mem in open_el:
open_el_array.update({open_el_mem.eid:open_el_mem['file_name']})
context_data = {'json_data': json_data,
'file_name':file_name,
'open_el_array':open_el_array}
return render_to_response(
'workflow/non_func_2.html',
context_data,
RequestContext(request)
)
def non_func_3(request):
if request.method == 'POST':
# check cookie set
eid = request.session.get('eid',len(db)+1)
el = db.get(None,eid)
# if new data
if(el == None):
insert_data = request.POST
db.insert(insert_data)
request.session['eid'] = eid
el = db.get(None, eid)
# if update
else:
eid = request.session.get('eid',len(db)+1)
update_data = request.POST
update_data = {"trace_req": request.POST.get("trace_req", "trace_req"),
"config_req": request.POST.get("config_req", "config_req"),
"err_handling_req": request.POST.get("err_handling_req", "err_handling_req"),
"localization_req": request.POST.get("localization_req", "localization_req"),
"online_help_req": request.POST.get("online_help_req", "online_help_req"),
"reporting_req": request.POST.get("reporting_req", "reporting_req"),
"assumptions": request.POST.get("assumptions", "assumptions")}
db.update(update_data,eids = [eid])
el = db.get(None, eid)
open_el = db.search(where('file_name'))
open_el_array = {}
for open_el_mem in open_el:
open_el_array.update({open_el_mem.eid:open_el_mem['file_name']})
file_name = el['file_name']
context_data = {'json_data': el,
'file_name':file_name,
'open_el_array':open_el_array}
return render_to_response(
'workflow/non_func_3.html',
context_data,
RequestContext(request)
)
else:
# opening existing session
if (request.session.get('eid',None) != None):
eid = request.session.get('eid',len(db)+1)
el = db.get(cond=None, eid = int(eid))
json_data = el
file_name = el['file_name']
#opening new session
else:
json_data =""
file_name = ''
open_el = db.search(where('file_name'))
open_el_array = {}
for open_el_mem in open_el:
open_el_array.update({open_el_mem.eid:open_el_mem['file_name']})
context_data = {'json_data': json_data,
'file_name':file_name,
'open_el_array':open_el_array}
return render_to_response(
'workflow/non_func_3.html',
context_data,
RequestContext(request)
)
def environment(request):
if request.method == 'POST':
# check cookie set
eid = request.session.get('eid',len(db)+1)
el = db.get(None,eid)
# if new data
if(el == None):
insert_data = request.POST
db.insert(insert_data)
request.session['eid'] = eid
el = db.get(None, eid)
# if update
else:
eid = request.session.get('eid',len(db)+1)
update_data = request.POST
update_data = {"dev_hardware_req": request.POST.get("dev_hardware_req", "dev_hardware_req"),
"dev_software_req": request.POST.get("dev_software_req", "dev_software_req"),
"dev_deviations": request.POST.get("dev_deviations", "dev_deviations"),
"target_hardware_req": request.POST.get("target_hardware_req", "target_hardware_req"),
"target_software_req": request.POST.get("target_software_req", "target_software_req"),
"target_deviations": request.POST.get("target_deviations", "target_deviations")}
db.update(update_data,eids = [eid])
el = db.get(None, eid)
open_el = db.search(where('file_name'))
open_el_array = {}
for open_el_mem in open_el:
open_el_array.update({open_el_mem.eid:open_el_mem['file_name']})
file_name = el['file_name']
context_data = {'json_data': el,
'file_name':file_name,
'open_el_array':open_el_array}
return render_to_response(
'workflow/environment.html',
context_data,
RequestContext(request)
)
else:
# opening existing session
if (request.session.get('eid',None) != None):
eid = request.session.get('eid',len(db)+1)
el = db.get(cond=None, eid = int(eid))
json_data = el
file_name = el['file_name']
#opening new session
else:
json_data =""
file_name = ''
open_el = db.search(where('file_name'))
open_el_array = {}
for open_el_mem in open_el:
open_el_array.update({open_el_mem.eid:open_el_mem['file_name']})
context_data = {'json_data': json_data,
'file_name':file_name,
'open_el_array':open_el_array}
return render_to_response(
'workflow/environment.html',
context_data,
RequestContext(request)
)
def add_dev_consideration(request):
if request.method == 'POST':
# check cookie set
eid = request.session.get('eid',len(db)+1)
el = db.get(None,eid)
# if new data
if(el == None):
insert_data = request.POST
db.insert(insert_data)
request.session['eid'] = eid
el = db.get(None, eid)
# if update
else:
eid = request.session.get('eid',len(db)+1)
update_data = request.POST
update_data = {"cust_part_req": request.POST.get("cust_part_req", "cust_part_req"),
"commn_req": request.POST.get("commn_req", "commn_req"),
"infrastructure_req": request.POST.get("infrastructure_req", "infrastructure_req")}
db.update(update_data,eids = [eid])
el = db.get(None, eid)
open_el = db.search(where('file_name'))
open_el_array = {}
for open_el_mem in open_el:
open_el_array.update({open_el_mem.eid:open_el_mem['file_name']})
file_name = el['file_name']
context_data = {'json_data': el,
'file_name':file_name,
'open_el_array':open_el_array}
return render_to_response(
'workflow/add_dev_consideration.html',
context_data,
RequestContext(request)
)
else:
# opening existing session
if (request.session.get('eid',None) != None):
eid = request.session.get('eid',len(db)+1)
el = db.get(cond=None, eid = int(eid))
json_data = el
file_name = el['file_name']
#opening new session
else:
json_data =""
file_name = ''
open_el = db.search(where('file_name'))
open_el_array = {}
for open_el_mem in open_el:
open_el_array.update({open_el_mem.eid:open_el_mem['file_name']})
context_data = {'json_data': json_data,
'file_name':file_name,
'open_el_array':open_el_array}
return render_to_response(
'workflow/add_dev_consideration.html',
context_data,
RequestContext(request)
)
def post_dev(request):
if request.method == 'POST':
# check cookie set
eid = request.session.get('eid',len(db)+1)
el = db.get(None,eid)
# if new data
if(el == None):
insert_data = request.POST
db.insert(insert_data)
request.session['eid'] = eid
el = db.get(None, eid)
# if update
else:
eid = request.session.get('eid',len(db)+1)
update_data = request.POST
update_data = {"tech_transfer_req": request.POST.get("tech_transfer_req", "tech_transfer_req"),
"maintenance_req": request.POST.get("maintenance_req", "maintenance_req")}
db.update(update_data,eids = [eid])
el = db.get(None, eid)
open_el = db.search(where('file_name'))
open_el_array = {}
for open_el_mem in open_el:
open_el_array.update({open_el_mem.eid:open_el_mem['file_name']})
file_name = el['file_name']
context_data = {'json_data': el,
'file_name':file_name,
'open_el_array':open_el_array}
return render_to_response(
'workflow/post_dev.html',
context_data,
RequestContext(request)
)
else:
# opening existing session
if (request.session.get('eid',None) != None):
eid = request.session.get('eid',len(db)+1)
el = db.get(cond=None, eid = int(eid))
json_data = el
file_name = el['file_name']
#opening new session
else:
json_data =""
file_name = ''
open_el = db.search(where('file_name'))
open_el_array = {}
for open_el_mem in open_el:
open_el_array.update({open_el_mem.eid:open_el_mem['file_name']})
context_data = {'json_data': json_data,
'file_name':file_name,
'open_el_array':open_el_array}
return render_to_response(
'workflow/post_dev.html',
context_data,
RequestContext(request)
)
def use_case(request,puid = None):
if request.method == 'POST':
# check cookie set
eid = request.session.get('eid',len(db)+1)
el = db.get(None,eid)
# if new data
if(el == None):
insert_data = request.POST
db.insert(insert_data)
request.session['eid'] = eid
el = db.get(None, eid)
# if update
else:
eid = request.session.get('eid',len(db)+1)
update_data = request.POST
update_data = {"usecase":{"usercase_id": request.POST.get("usecase_id", "usecase_id"),
"usecase_name": request.POST.get("usecase_name", "usecase_name"),
"usercase_code_priority": request.POST.get("usecase_code_priority", "usecase_code_priority"),
"usecase_author": request.POST.get("usecase_author", "usecase_author"),
"usecase_date": request.POST.get("usecase_date", "usecase_date"),
"usecase_version": request.POST.get("usecase_version", "usecase_version"),
"usecase_actions": request.POST.get("usecase_actions", "usecase_actions"),
"usecase_frequency": request.POST.get("usecase_frequency", "usecase_frequency"),
"usecase_breif_desc": request.POST.get("usecase_breif_desc", "usecase_breif_desc"),
"usecase_pre_cond": request.POST.get("usecase_pre_cond", "usecase_pre_cond"),
"usecase_post_cond": request.POST.get("usecase_post_cond", "usecase_post_cond"),
"usecase_basic_flow": request.POST.get("usecase_basic_flow", "usecase_basic_flow"),
"usecase_alt_flow": request.POST.get("usecase_alt_flow", "usecase_alt_flow"),
"usecase_incl": request.POST.get("usecase_incl", "usecase_incl"),
"usecase_ext_point": request.POST.get("usecase_ext_point", "usecase_ext_point"),
"usecase_business_rules": request.POST.get("usecase_business_rules", "usecase_business_rules"),
"usecase_spl_req": request.POST.get("usecase_spl_req", "usecase_spl_req")}}
db.update(update_data,eids = [eid])
el = db.get(None, eid)
open_el = db.search(where('file_name'))
open_el_array = {}
for open_el_mem in open_el:
open_el_array.update({open_el_mem.eid:open_el_mem['file_name']})
file_name = el['file_name']
context_data = {'json_data': el,
'file_name':file_name,
'open_el_array':open_el_array}
return render_to_response(
'workflow/use_case.html',
context_data,
RequestContext(request)
)
else:
# opening existing session
#request.session['eid'] = 5
#return HttpResponse(request.session.get('eid',None))
if (request.session.get('eid',None) != None):
eid = request.session.get('eid',len(db)+1)
el = db.get(cond=None, eid = int(eid))
#return HttpResponse(el['usecase'])
json_data = el
file_name = el['file_name']
all_data = db.all()
if (el['usecase']):
usecase_data = el['usecase']
else:
usecase_data = ""
#opening new session
else:
all_data = db.all()
json_data =""
file_name = ""
usecase_data = ""
#for usecase_data in jason_data:
#return HttpResponse(json.dumps(json_data), content_type="application/json")
#return HttpResponse(all_data)
open_el = db.search(where('file_name'))
open_el_array = {}
for open_el_mem in open_el:
open_el_array.update({open_el_mem.eid:open_el_mem['file_name']})
context_data = {
'all_data': all_data,
'json_data':json_data,
'file_name':file_name,
'open_el_array':open_el_array,
'usecase_data': usecase_data # get usecase information
}
return render_to_response(
'workflow/use_case.html',
context_data,
RequestContext(request)
)
def io_config (request):
if request.method == 'POST':
# check cookie set
eid = request.session.get('eid',len(db)+1)
el = db.get(None,eid)
# if new data
if(el == None):
insert_data = request.POST
db.insert(insert_data)
request.session['eid'] = eid
el = db.get(None, eid)
# if update
else:
eid = request.session.get('eid',len(db)+1)
update_data = request.POST
update_data = {"cust_code": request.POST.get("cust_code", "cust_code")}
db.update(update_data,eids = [eid])
el = db.get(None, eid)
open_el = db.search(where('file_name'))
open_el_array = {}
for open_el_mem in open_el:
open_el_array.update({open_el_mem.eid:open_el_mem['file_name']})
file_name = el['file_name']
context_data = {'json_data': el,
'file_name':file_name,
'open_el_array':open_el_array}
return render_to_response(
'workflow/io_config.html',
context_data,
RequestContext(request)
)
else:
# opening existing session
if (request.session.get('eid',None) != None):
eid = request.session.get('eid',len(db)+1)
el = db.get(cond=None, eid = int(eid))
json_data = el
file_name = el['file_name']
#opening new session
else:
json_data =""
file_name = ''
open_el = db.search(where('file_name'))
open_el_array = {}
for open_el_mem in open_el:
open_el_array.update({open_el_mem.eid:open_el_mem['file_name']})
context_data = {'json_data': json_data,
'file_name':file_name,
'open_el_array':open_el_array}
return render_to_response(
'workflow/io_config.html',
context_data,
RequestContext(request)
)
|
|
#!/usr/bin/env python
import argparse
import copy
import dateutil.parser
import dateutil.tz
import datetime
import json
import logging
import time
import urllib
import urllib2
import threading
NEXT_ACTION_LABEL = u'next_action'
args = None
class TraversalState(object):
"""Simple class to contain the state of the item tree traversal."""
def __init__(self, next_action_label_id):
self.remove_labels = []
self.add_labels = []
self.found_next_action = False
self.next_action_label_id = next_action_label_id
def clone(self):
"""Perform a simple clone of this state object.
For parallel traversals it's necessary to produce copies so that every
traversal to a lower node has the same found_next_action status.
"""
t = TraversalState(self.next_action_label_id)
t.found_next_action = self.found_next_action
return t
def merge(self, other):
"""Merge clones back together.
After parallel traversals, merge the results back into the parent state.
"""
if other.found_next_action:
self.found_next_action = True
self.remove_labels += other.remove_labels
self.add_labels += other.add_labels
class Item(object):
def __init__(self, initial_data):
self.parent = None
self.children = []
self.checked = initial_data['checked'] == 1
self.content = initial_data['content']
self.indent = initial_data['indent']
self.item_id = initial_data['id']
self.labels = initial_data['labels']
self.priority = initial_data['priority']
if 'due_date_utc' in initial_data and initial_data['due_date_utc'] != None:
p = dateutil.parser.parser()
self.due_date_utc = p.parse(initial_data['due_date_utc'])
else:
# Arbitrary time in the future to always sort last
self.due_date_utc = datetime.datetime(2100, 1, 1, tzinfo=dateutil.tz.tzutc())
def GetItemMods(self, state):
if self.IsSequential():
self._SequentialItemMods(state)
elif self.IsParallel():
self._ParallelItemMods(state)
if not state.found_next_action and not self.checked:
state.found_next_action = True
if args.use_priority and self.priority != 4:
state.add_labels.append(self)
elif not args.use_priority and not state.next_action_label_id in self.labels:
state.add_labels.append(self)
else:
if args.use_priority and self.priority == 4:
state.remove_labels.append(self)
elif not args.use_priority and state.next_action_label_id in self.labels:
state.remove_labels.append(self)
def SortChildren(self):
# Sorting by priority and date seemed like a good idea at some point, but
# that has proven wrong. Don't sort.
pass
def GetLabelRemovalMods(self, state):
if args.use_priority:
return
if state.next_action_label_id in self.labels:
state.remove_labels.append(self)
for item in self.children:
item.GetLabelRemovalMods(state)
def _SequentialItemMods(self, state):
"""
Iterate over every child, walking down the tree.
If none of our children are the next action, check if we are.
"""
for item in self.children:
item.GetItemMods(state)
def _ParallelItemMods(self, state):
"""
Iterate over every child, walking down the tree.
If none of our children are the next action, check if we are.
Clone the state each time we descend down to a child.
"""
frozen_state = state.clone()
for item in self.children:
temp_state = frozen_state.clone()
item.GetItemMods(temp_state)
state.merge(temp_state)
def IsSequential(self):
return not self.content.endswith('.')
'''return self.content.endswith('--')'''
def IsParallel(self):
return self.content.endswith('.')
class Project(object):
def __init__(self, initial_data):
self.unsorted_items = dict()
self.children = []
self.indent = 0
self.is_archived = initial_data['is_archived'] == 1
self.is_deleted = initial_data['is_deleted'] == 1
self.name = initial_data['name']
# Project should act like an item, so it should have content.
self.content = initial_data['name']
self.project_id = initial_data['id']
def UpdateChangedData(self, changed_data):
self.name = changed_data['name']
def IsSequential(self):
return self.name.endswith('--')
'''return self.content.endswith('--')'''
'''return not self.content.endswith('=')'''
def IsParallel(self):
return self.name.endswith('.')
'''return self.content.endswith('=')'''
SortChildren = Item.__dict__['SortChildren']
def GetItemMods(self, state):
if self.IsSequential():
for item in self.children:
item.GetItemMods(state)
elif self.IsParallel():
frozen_state = state.clone()
for item in self.children:
temp_state = frozen_state.clone()
item.GetItemMods(temp_state)
state.merge(temp_state)
else: # Remove all next_action labels in this project.
for item in self.children:
item.GetLabelRemovalMods(state)
def AddItem(self, item):
'''Collect unsorted child items
All child items for all projects are bundled up into an 'Items' list in the
v5 api. They must be normalized and then sorted to make use of them.'''
self.unsorted_items[item['id']] = item
def DelItem(self, item):
'''Delete unsorted child items'''
del self.unsorted_items[item['id']]
def BuildItemTree(self):
'''Build a tree of items build off the unsorted list
Sort the unsorted children first so that indentation levels mean something.
'''
self.children = []
sortfunc = lambda item: item['item_order']
sorted_items = sorted(self.unsorted_items.values(), key=sortfunc)
parent_item = self
previous_item = self
for item_dict in sorted_items:
item = Item(item_dict)
if item.indent > previous_item.indent:
logging.debug('pushing "%s" on the parent stack beneath "%s"',
previous_item.content, parent_item.content)
parent_item = previous_item
# walk up the tree until we reach our parent
while item.indent <= parent_item.indent:
logging.debug('walking up the tree from "%s" to "%s"',
parent_item.content, parent_item.parent.content)
parent_item = parent_item.parent
logging.debug('adding item "%s" with parent "%s"', item.content,
parent_item.content)
parent_item.children.append(item)
item.parent = parent_item
previous_item = item
#self.SortChildren()
class TodoistData(object):
'''Construct an object based on a full Todoist /Get request's data'''
def __init__(self, initial_data):
self._next_action_id = None
self._SetLabelData(initial_data)
self._projects = dict()
self._seq_no = initial_data['seq_no']
for project in initial_data['Projects']:
if project['is_deleted'] == 0:
self._projects[project['id']] = Project(project)
for item in initial_data['Items']:
self._projects[item['project_id']].AddItem(item)
for project in self._projects.itervalues():
project.BuildItemTree()
def _SetLabelData(self, label_data):
if args.use_priority:
return
if 'Labels' not in label_data:
logging.debug("Label data not found, wasn't updated.")
return
# Store label data - we need this to set the next_action label.
for label in label_data['Labels']:
if label['name'] == NEXT_ACTION_LABEL:
self._next_action_id = label['id']
logging.info('Found next_action label, id: %s', label['id'])
if self._next_action_id == None:
logging.warning('Failed to find next_action label, need to create it.')
def GetSyncState(self):
return {'seq_no': self._seq_no}
def UpdateChangedData(self, changed_data):
if 'seq_no' in changed_data:
self._seq_no = changed_data['seq_no']
if 'TempIdMapping' in changed_data:
if self._next_action_id in changed_data['TempIdMapping']:
logging.info('Discovered temp->real next_action mapping ID')
self._next_action_id = changed_data['TempIdMapping'][self._next_action_id]
if 'Projects' in changed_data:
for project in changed_data['Projects']:
# delete missing projects
if project['is_deleted'] == 1:
logging.info('forgetting deleted project %s' % project['name'])
del self._projects[project['project_id']]
if project['id'] in self._projects:
self._projects[project['id']].UpdateChangedData(project)
else :
logging.info('found new project: %s' % project['name'])
self._projects[project['id']] = Project(project)
if 'Items' in changed_data:
for item in changed_data['Items']:
if item['is_deleted'] == 1:
logging.info('removing deleted item %d from project %d' % (item['id'], item['project_id']))
self._projects[item['project_id']].DelItem(item)
else:
self._projects[item['project_id']].AddItem(item)
for project in self._projects.itervalues():
project.BuildItemTree()
def GetProjectMods(self):
mods = []
# We need to create the next_action label
if self._next_action_id == None and not args.use_priority:
self._next_action_id = '$%d' % int(time.time())
mods.append({'type': 'label_register',
'timestamp': int(time.time()),
'temp_id': self._next_action_id,
'args': {
'name': NEXT_ACTION_LABEL
}})
# Exit early so that we can receive the real ID for the label.
# Otherwise we end up applying the label two different times, once with
# the temporary ID and once with the real one.
# This makes adding the label take an extra round through the sync
# process, but that's fine since this only happens on the first ever run.
logging.info("Adding next_action label")
return mods
for project in self._projects.itervalues():
state = TraversalState(self._next_action_id)
project.GetItemMods(state)
if len(state.add_labels) > 0 or len(state.remove_labels) > 0:
logging.info("For project %s, the following mods:", project.name)
for item in state.add_labels:
# Intentionally add the next_action label to the item.
# This prevents us from applying the label twice since the sync
# interface does not return our changes back to us on GetAndSync.
# Apply these changes to both the item in the tree and the unsorted
# data.
# I really don't like this aspect of the API - return me a full copy of
# changed items please.
#
# Also... OMFG. "Priority 1" in the UI is actually priority 4 via the API.
# Lowest priority = 1 here, but that is the word for highest priority
# on the website.
m = self.MakeNewMod(item)
mods.append(m)
if args.use_priority:
item.priority = 4
project.unsorted_items[item.item_id]['priority'] = 4
m['args']['priority'] = item.priority
else:
item.labels.append(self._next_action_id)
m['args']['labels'] = item.labels
logging.info("add next_action to: %s", item.content)
for item in state.remove_labels:
m = self.MakeNewMod(item)
mods.append(m)
if args.use_priority:
item.priority = 1
project.unsorted_items[item.item_id]['priority'] = 1
m['args']['priority'] = item.priority
else:
item.labels.remove(self._next_action_id)
m['args']['labels'] = item.labels
logging.info("remove next_action from: %s", item.content)
return mods
@staticmethod
def MakeNewMod(item):
return {'type': 'item_update',
'timestamp': int(time.time()),
'args': {
'id': item.item_id,
}
}
def GetResponse(api_token):
values = {'api_token': api_token, 'seq_no': '0'}
data = urllib.urlencode(values)
req = urllib2.Request('https://api.todoist.com/TodoistSync/v5.3/get', data)
return urllib2.urlopen(req)
def DoSyncAndGetUpdated(api_token, items_to_sync, sync_state):
values = {'api_token': api_token,
'items_to_sync': json.dumps(items_to_sync)}
for key, value in sync_state.iteritems():
values[key] = json.dumps(value)
logging.debug("posting %s", values)
data = urllib.urlencode(values)
req = urllib2.Request('https://api.todoist.com/TodoistSync/v5.3/syncAndGetUpdated', data)
return urllib2.urlopen(req)
def main():
parser = argparse.ArgumentParser(description='Add NextAction labels to Todoist.')
parser.add_argument('--api_token', required=False, help='Your API key')
parser.add_argument('--use_priority', required=False,
action="store_true", help='Use priority 1 rather than a label to indicate the next actions.')
global args
args = parser.parse_args()
args.api_token = 'b0e305163aa0739a9cde45c0db6011158b7498f5'
logging.basicConfig(level=logging.DEBUG)
response = GetResponse(args.api_token)
initial_data = response.read()
logging.debug("Got initial data: %s", initial_data)
initial_data = json.loads(initial_data)
a = TodoistData(initial_data)
while True:
try:
mods = a.GetProjectMods()
if len(mods) == 0:
time.sleep(5)
else:
logging.info("* Modifications necessary - skipping sleep cycle.")
logging.info("** Beginning sync")
sync_state = a.GetSyncState()
changed_data = DoSyncAndGetUpdated(args.api_token,mods, sync_state).read()
logging.debug("Got sync data %s", changed_data)
changed_data = json.loads(changed_data)
logging.info("* Updating model after receiving sync data")
a.UpdateChangedData(changed_data)
logging.info("* Finished updating model")
logging.info("** Finished sync")
except:
print "Network error, try again.."
if __name__ == '__main__':
main()
|
|
"""
Collection of physical constants and conversion factors.
Most constants are in SI units, so you can do
print '10 mile per minute is', 10*mile/minute, 'm/s or', 10*mile/(minute*knot), 'knots'
The list is not meant to be comprehensive, but just convenient for everyday use.
"""
"""
BasSw 2006
physical constants: imported from CODATA
unit conversion: see e.g., NIST special publication 811
Use at own risk: double-check values before calculating your Mars orbit-insertion burn.
Some constants exist in a few variants, which are marked with suffixes.
The ones without any suffix should be the most common ones.
"""
import math as _math
from .codata import value as _cd
import numpy as _np
# mathematical constants
pi = _math.pi
golden = golden_ratio = (1 + _math.sqrt(5)) / 2
# SI prefixes
yotta = 1e24
zetta = 1e21
exa = 1e18
peta = 1e15
tera = 1e12
giga = 1e9
mega = 1e6
kilo = 1e3
hecto = 1e2
deka = 1e1
deci = 1e-1
centi = 1e-2
milli = 1e-3
micro = 1e-6
nano = 1e-9
pico = 1e-12
femto = 1e-15
atto = 1e-18
zepto = 1e-21
# binary prefixes
kibi = 2**10
mebi = 2**20
gibi = 2**30
tebi = 2**40
pebi = 2**50
exbi = 2**60
zebi = 2**70
yobi = 2**80
# physical constants
c = speed_of_light = _cd('speed of light in vacuum')
mu_0 = _cd('vacuum mag. permeability')
epsilon_0 = _cd('vacuum electric permittivity')
h = Planck = _cd('Planck constant')
hbar = h / (2 * pi)
G = gravitational_constant = _cd('Newtonian constant of gravitation')
g = _cd('standard acceleration of gravity')
e = elementary_charge = _cd('elementary charge')
R = gas_constant = _cd('molar gas constant')
alpha = fine_structure = _cd('fine-structure constant')
N_A = Avogadro = _cd('Avogadro constant')
k = Boltzmann = _cd('Boltzmann constant')
sigma = Stefan_Boltzmann = _cd('Stefan-Boltzmann constant')
Wien = _cd('Wien wavelength displacement law constant')
Rydberg = _cd('Rydberg constant')
# mass in kg
gram = 1e-3
metric_ton = 1e3
grain = 64.79891e-6
lb = pound = 7000 * grain # avoirdupois
blob = slinch = pound * g / 0.0254 # lbf*s**2/in (added in 1.0.0)
slug = blob / 12 # lbf*s**2/foot (added in 1.0.0)
oz = ounce = pound / 16
stone = 14 * pound
long_ton = 2240 * pound
short_ton = 2000 * pound
troy_ounce = 480 * grain # only for metals / gems
troy_pound = 12 * troy_ounce
carat = 200e-6
m_e = electron_mass = _cd('electron mass')
m_p = proton_mass = _cd('proton mass')
m_n = neutron_mass = _cd('neutron mass')
m_u = u = atomic_mass = _cd('atomic mass constant')
# angle in rad
degree = pi / 180
arcmin = arcminute = degree / 60
arcsec = arcsecond = arcmin / 60
# time in second
minute = 60.0
hour = 60 * minute
day = 24 * hour
week = 7 * day
year = 365 * day
Julian_year = 365.25 * day
# length in meter
inch = 0.0254
foot = 12 * inch
yard = 3 * foot
mile = 1760 * yard
mil = inch / 1000
pt = point = inch / 72 # typography
survey_foot = 1200.0 / 3937
survey_mile = 5280 * survey_foot
nautical_mile = 1852.0
fermi = 1e-15
angstrom = 1e-10
micron = 1e-6
au = astronomical_unit = 149597870700.0
light_year = Julian_year * c
parsec = au / arcsec
# pressure in pascal
atm = atmosphere = _cd('standard atmosphere')
bar = 1e5
torr = mmHg = atm / 760
psi = pound * g / (inch * inch)
# area in meter**2
hectare = 1e4
acre = 43560 * foot**2
# volume in meter**3
litre = liter = 1e-3
gallon = gallon_US = 231 * inch**3 # US
# pint = gallon_US / 8
fluid_ounce = fluid_ounce_US = gallon_US / 128
bbl = barrel = 42 * gallon_US # for oil
gallon_imp = 4.54609e-3 # UK
fluid_ounce_imp = gallon_imp / 160
# speed in meter per second
kmh = 1e3 / hour
mph = mile / hour
mach = speed_of_sound = 340.5 # approx value at 15 degrees in 1 atm. Is this a common value?
knot = nautical_mile / hour
# temperature in kelvin
zero_Celsius = 273.15
degree_Fahrenheit = 1/1.8 # only for differences
# energy in joule
eV = electron_volt = elementary_charge # * 1 Volt
calorie = calorie_th = 4.184
calorie_IT = 4.1868
erg = 1e-7
Btu_th = pound * degree_Fahrenheit * calorie_th / gram
Btu = Btu_IT = pound * degree_Fahrenheit * calorie_IT / gram
ton_TNT = 1e9 * calorie_th
# Wh = watt_hour
# power in watt
hp = horsepower = 550 * foot * pound * g
# force in newton
dyn = dyne = 1e-5
lbf = pound_force = pound * g
kgf = kilogram_force = g # * 1 kg
# functions for conversions that are not linear
def convert_temperature(val, old_scale, new_scale):
"""
Convert from a temperature scale to another one among Celsius, Kelvin,
Fahrenheit, and Rankine scales.
Parameters
----------
val : array_like
Value(s) of the temperature(s) to be converted expressed in the
original scale.
old_scale: str
Specifies as a string the original scale from which the temperature
value(s) will be converted. Supported scales are Celsius ('Celsius',
'celsius', 'C' or 'c'), Kelvin ('Kelvin', 'kelvin', 'K', 'k'),
Fahrenheit ('Fahrenheit', 'fahrenheit', 'F' or 'f'), and Rankine
('Rankine', 'rankine', 'R', 'r').
new_scale: str
Specifies as a string the new scale to which the temperature
value(s) will be converted. Supported scales are Celsius ('Celsius',
'celsius', 'C' or 'c'), Kelvin ('Kelvin', 'kelvin', 'K', 'k'),
Fahrenheit ('Fahrenheit', 'fahrenheit', 'F' or 'f'), and Rankine
('Rankine', 'rankine', 'R', 'r').
Returns
-------
res : float or array of floats
Value(s) of the converted temperature(s) expressed in the new scale.
Notes
-----
.. versionadded:: 0.18.0
Examples
--------
>>> from scipy.constants import convert_temperature
>>> convert_temperature(np.array([-40, 40]), 'Celsius', 'Kelvin')
array([ 233.15, 313.15])
"""
# Convert from `old_scale` to Kelvin
if old_scale.lower() in ['celsius', 'c']:
tempo = _np.asanyarray(val) + zero_Celsius
elif old_scale.lower() in ['kelvin', 'k']:
tempo = _np.asanyarray(val)
elif old_scale.lower() in ['fahrenheit', 'f']:
tempo = (_np.asanyarray(val) - 32) * 5 / 9 + zero_Celsius
elif old_scale.lower() in ['rankine', 'r']:
tempo = _np.asanyarray(val) * 5 / 9
else:
raise NotImplementedError("%s scale is unsupported: supported scales "
"are Celsius, Kelvin, Fahrenheit, and "
"Rankine" % old_scale)
# and from Kelvin to `new_scale`.
if new_scale.lower() in ['celsius', 'c']:
res = tempo - zero_Celsius
elif new_scale.lower() in ['kelvin', 'k']:
res = tempo
elif new_scale.lower() in ['fahrenheit', 'f']:
res = (tempo - zero_Celsius) * 9 / 5 + 32
elif new_scale.lower() in ['rankine', 'r']:
res = tempo * 9 / 5
else:
raise NotImplementedError("'%s' scale is unsupported: supported "
"scales are 'Celsius', 'Kelvin', "
"'Fahrenheit', and 'Rankine'" % new_scale)
return res
# optics
def lambda2nu(lambda_):
"""
Convert wavelength to optical frequency
Parameters
----------
lambda_ : array_like
Wavelength(s) to be converted.
Returns
-------
nu : float or array of floats
Equivalent optical frequency.
Notes
-----
Computes ``nu = c / lambda`` where c = 299792458.0, i.e., the
(vacuum) speed of light in meters/second.
Examples
--------
>>> from scipy.constants import lambda2nu, speed_of_light
>>> lambda2nu(np.array((1, speed_of_light)))
array([ 2.99792458e+08, 1.00000000e+00])
"""
return c / _np.asanyarray(lambda_)
def nu2lambda(nu):
"""
Convert optical frequency to wavelength.
Parameters
----------
nu : array_like
Optical frequency to be converted.
Returns
-------
lambda : float or array of floats
Equivalent wavelength(s).
Notes
-----
Computes ``lambda = c / nu`` where c = 299792458.0, i.e., the
(vacuum) speed of light in meters/second.
Examples
--------
>>> from scipy.constants import nu2lambda, speed_of_light
>>> nu2lambda(np.array((1, speed_of_light)))
array([ 2.99792458e+08, 1.00000000e+00])
"""
return c / _np.asanyarray(nu)
|
|
# Copyright (c) 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Compute-related Utilities and helpers."""
import itertools
import string
import traceback
import netifaces
from oslo_config import cfg
from oslo_utils import encodeutils
from nova import block_device
from nova.compute import power_state
from nova.compute import task_states
from nova import exception
from nova.i18n import _LW
from nova.network import model as network_model
from nova import notifications
from nova import objects
from nova.objects import base as obj_base
from nova.openstack.common import log
from nova import rpc
from nova import utils
from nova.virt import driver
CONF = cfg.CONF
CONF.import_opt('host', 'nova.netconf')
LOG = log.getLogger(__name__)
def exception_to_dict(fault):
"""Converts exceptions to a dict for use in notifications."""
# TODO(johngarbutt) move to nova/exception.py to share with wrap_exception
code = 500
if hasattr(fault, "kwargs"):
code = fault.kwargs.get('code', 500)
# get the message from the exception that was thrown
# if that does not exist, use the name of the exception class itself
try:
message = fault.format_message()
# These exception handlers are broad so we don't fail to log the fault
# just because there is an unexpected error retrieving the message
except Exception:
try:
message = unicode(fault)
except Exception:
message = None
if not message:
message = fault.__class__.__name__
# NOTE(dripton) The message field in the database is limited to 255 chars.
# MySQL silently truncates overly long messages, but PostgreSQL throws an
# error if we don't truncate it.
b_message = encodeutils.safe_encode(message)[:255]
# NOTE(chaochin) UTF-8 character byte size varies from 1 to 6. If
# truncating a long byte string to 255, the last character may be
# cut in the middle, so that UnicodeDecodeError will occur when
# converting it back to unicode.
decode_ok = False
while not decode_ok:
try:
u_message = encodeutils.safe_decode(b_message)
decode_ok = True
except UnicodeDecodeError:
b_message = b_message[:-1]
fault_dict = dict(exception=fault)
fault_dict["message"] = u_message
fault_dict["code"] = code
return fault_dict
def _get_fault_details(exc_info, error_code):
details = ''
if exc_info and error_code == 500:
tb = exc_info[2]
if tb:
details = ''.join(traceback.format_tb(tb))
return unicode(details)
def add_instance_fault_from_exc(context, instance, fault, exc_info=None):
"""Adds the specified fault to the database."""
fault_obj = objects.InstanceFault(context=context)
fault_obj.host = CONF.host
fault_obj.instance_uuid = instance['uuid']
fault_obj.update(exception_to_dict(fault))
code = fault_obj.code
fault_obj.details = _get_fault_details(exc_info, code)
fault_obj.create()
def get_device_name_for_instance(context, instance, bdms, device):
"""Validates (or generates) a device name for instance.
This method is a wrapper for get_next_device_name that gets the list
of used devices and the root device from a block device mapping.
"""
mappings = block_device.instance_block_mapping(instance, bdms)
return get_next_device_name(instance, mappings.values(),
mappings['root'], device)
def default_device_names_for_instance(instance, root_device_name,
*block_device_lists):
"""Generate missing device names for an instance."""
dev_list = [bdm.device_name
for bdm in itertools.chain(*block_device_lists)
if bdm.device_name]
if root_device_name not in dev_list:
dev_list.append(root_device_name)
for bdm in itertools.chain(*block_device_lists):
dev = bdm.device_name
if not dev:
dev = get_next_device_name(instance, dev_list,
root_device_name)
bdm.device_name = dev
bdm.save()
dev_list.append(dev)
def get_next_device_name(instance, device_name_list,
root_device_name=None, device=None):
"""Validates (or generates) a device name for instance.
If device is not set, it will generate a unique device appropriate
for the instance. It uses the root_device_name (if provided) and
the list of used devices to find valid device names. If the device
name is valid but applicable to a different backend (for example
/dev/vdc is specified but the backend uses /dev/xvdc), the device
name will be converted to the appropriate format.
"""
is_xen = driver.compute_driver_matches('xenapi.XenAPIDriver')
req_prefix = None
req_letter = None
if device:
try:
req_prefix, req_letter = block_device.match_device(device)
except (TypeError, AttributeError, ValueError):
raise exception.InvalidDevicePath(path=device)
if not root_device_name:
root_device_name = block_device.DEFAULT_ROOT_DEV_NAME
try:
prefix = block_device.match_device(
block_device.prepend_dev(root_device_name))[0]
except (TypeError, AttributeError, ValueError):
raise exception.InvalidDevicePath(path=root_device_name)
# NOTE(vish): remove this when xenapi is setting default_root_device
if is_xen:
prefix = '/dev/xvd'
if req_prefix != prefix:
LOG.debug("Using %(prefix)s instead of %(req_prefix)s",
{'prefix': prefix, 'req_prefix': req_prefix})
used_letters = set()
for device_path in device_name_list:
letter = block_device.get_device_letter(device_path)
used_letters.add(letter)
# NOTE(vish): remove this when xenapi is properly setting
# default_ephemeral_device and default_swap_device
if is_xen:
flavor = instance.get_flavor()
if flavor.ephemeral_gb:
used_letters.add('b')
if flavor.swap:
used_letters.add('c')
if not req_letter:
req_letter = _get_unused_letter(used_letters)
if req_letter in used_letters:
raise exception.DevicePathInUse(path=device)
return prefix + req_letter
def _get_unused_letter(used_letters):
doubles = [first + second for second in string.ascii_lowercase
for first in string.ascii_lowercase]
all_letters = set(list(string.ascii_lowercase) + doubles)
letters = list(all_letters - used_letters)
# NOTE(vish): prepend ` so all shorter sequences sort first
letters.sort(key=lambda x: x.rjust(2, '`'))
return letters[0]
def get_image_metadata(context, image_api, image_id_or_uri, instance):
image_system_meta = {}
# In case of boot from volume, image_id_or_uri may be None or ''
if image_id_or_uri is not None and image_id_or_uri != '':
# If the base image is still available, get its metadata
try:
image = image_api.get(context, image_id_or_uri)
except (exception.ImageNotAuthorized,
exception.ImageNotFound,
exception.Invalid) as e:
LOG.warning(_LW("Can't access image %(image_id)s: %(error)s"),
{"image_id": image_id_or_uri, "error": e},
instance=instance)
else:
flavor = instance.get_flavor()
image_system_meta = utils.get_system_metadata_from_image(image,
flavor)
# Get the system metadata from the instance
system_meta = utils.instance_sys_meta(instance)
# Merge the metadata from the instance with the image's, if any
system_meta.update(image_system_meta)
# Convert the system metadata to image metadata
return utils.get_image_from_system_metadata(system_meta)
def get_value_from_system_metadata(instance, key, type, default):
"""Get a value of a specified type from image metadata.
@param instance: The instance object
@param key: The name of the property to get
@param type: The python type the value is be returned as
@param default: The value to return if key is not set or not the right type
"""
value = instance.system_metadata.get(key, default)
try:
return type(value)
except ValueError:
LOG.warning(_LW("Metadata value %(value)s for %(key)s is not of "
"type %(type)s. Using default value %(default)s."),
{'value': value, 'key': key, 'type': type,
'default': default}, instance=instance)
return default
def notify_usage_exists(notifier, context, instance_ref, current_period=False,
ignore_missing_network_data=True,
system_metadata=None, extra_usage_info=None):
"""Generates 'exists' notification for an instance for usage auditing
purposes.
:param notifier: a messaging.Notifier
:param current_period: if True, this will generate a usage for the
current usage period; if False, this will generate a usage for the
previous audit period.
:param ignore_missing_network_data: if True, log any exceptions generated
while getting network info; if False, raise the exception.
:param system_metadata: system_metadata DB entries for the instance,
if not None. *NOTE*: Currently unused here in trunk, but needed for
potential custom modifications.
:param extra_usage_info: Dictionary containing extra values to add or
override in the notification if not None.
"""
audit_start, audit_end = notifications.audit_period_bounds(current_period)
bw = notifications.bandwidth_usage(instance_ref, audit_start,
ignore_missing_network_data)
if system_metadata is None:
system_metadata = utils.instance_sys_meta(instance_ref)
# add image metadata to the notification:
image_meta = notifications.image_meta(system_metadata)
extra_info = dict(audit_period_beginning=str(audit_start),
audit_period_ending=str(audit_end),
bandwidth=bw, image_meta=image_meta)
if extra_usage_info:
extra_info.update(extra_usage_info)
notify_about_instance_usage(notifier, context, instance_ref, 'exists',
system_metadata=system_metadata, extra_usage_info=extra_info)
def notify_about_instance_usage(notifier, context, instance, event_suffix,
network_info=None, system_metadata=None,
extra_usage_info=None, fault=None):
"""Send a notification about an instance.
:param notifier: a messaging.Notifier
:param event_suffix: Event type like "delete.start" or "exists"
:param network_info: Networking information, if provided.
:param system_metadata: system_metadata DB entries for the instance,
if provided.
:param extra_usage_info: Dictionary containing extra values to add or
override in the notification.
"""
if not extra_usage_info:
extra_usage_info = {}
usage_info = notifications.info_from_instance(context, instance,
network_info, system_metadata, **extra_usage_info)
if fault:
# NOTE(johngarbutt) mirrors the format in wrap_exception
fault_payload = exception_to_dict(fault)
LOG.debug(fault_payload["message"], instance=instance)
usage_info.update(fault_payload)
if event_suffix.endswith("error"):
method = notifier.error
else:
method = notifier.info
method(context, 'compute.instance.%s' % event_suffix, usage_info)
def notify_about_server_group_update(context, event_suffix, sg_payload):
"""Send a notification about server group update.
:param event_suffix: Event type like "create.start" or "create.end"
:param sg_payload: payload for server group update
"""
notifier = rpc.get_notifier(service='servergroup')
notifier.info(context, 'servergroup.%s' % event_suffix, sg_payload)
def notify_about_aggregate_update(context, event_suffix, aggregate_payload):
"""Send a notification about aggregate update.
:param event_suffix: Event type like "create.start" or "create.end"
:param aggregate_payload: payload for aggregate update
"""
aggregate_identifier = aggregate_payload.get('aggregate_id', None)
if not aggregate_identifier:
aggregate_identifier = aggregate_payload.get('name', None)
if not aggregate_identifier:
LOG.debug("No aggregate id or name specified for this "
"notification and it will be ignored")
return
notifier = rpc.get_notifier(service='aggregate',
host=aggregate_identifier)
notifier.info(context, 'aggregate.%s' % event_suffix, aggregate_payload)
def notify_about_host_update(context, event_suffix, host_payload):
"""Send a notification about host update.
:param event_suffix: Event type like "create.start" or "create.end"
:param host_payload: payload for host update. It is a dict and there
should be at least the 'host_name' key in this
dict.
"""
host_identifier = host_payload.get('host_name')
if not host_identifier:
LOG.warning(_LW("No host name specified for the notification of "
"HostAPI.%s and it will be ignored"), event_suffix)
return
notifier = rpc.get_notifier(service='api', host=host_identifier)
notifier.info(context, 'HostAPI.%s' % event_suffix, host_payload)
def get_nw_info_for_instance(instance):
if isinstance(instance, obj_base.NovaObject):
if instance.info_cache is None:
return network_model.NetworkInfo.hydrate([])
return instance.info_cache.network_info
# FIXME(comstud): Transitional while we convert to objects.
info_cache = instance['info_cache'] or {}
nw_info = info_cache.get('network_info') or []
if not isinstance(nw_info, network_model.NetworkInfo):
nw_info = network_model.NetworkInfo.hydrate(nw_info)
return nw_info
def has_audit_been_run(context, conductor, host, timestamp=None):
begin, end = utils.last_completed_audit_period(before=timestamp)
task_log = conductor.task_log_get(context, "instance_usage_audit",
begin, end, host)
if task_log:
return True
else:
return False
def start_instance_usage_audit(context, conductor, begin, end, host,
num_instances):
conductor.task_log_begin_task(context, "instance_usage_audit", begin,
end, host, num_instances,
"Instance usage audit started...")
def finish_instance_usage_audit(context, conductor, begin, end, host, errors,
message):
conductor.task_log_end_task(context, "instance_usage_audit", begin, end,
host, errors, message)
def usage_volume_info(vol_usage):
def null_safe_str(s):
return str(s) if s else ''
tot_refreshed = vol_usage['tot_last_refreshed']
curr_refreshed = vol_usage['curr_last_refreshed']
if tot_refreshed and curr_refreshed:
last_refreshed_time = max(tot_refreshed, curr_refreshed)
elif tot_refreshed:
last_refreshed_time = tot_refreshed
else:
# curr_refreshed must be set
last_refreshed_time = curr_refreshed
usage_info = dict(
volume_id=vol_usage['volume_id'],
tenant_id=vol_usage['project_id'],
user_id=vol_usage['user_id'],
availability_zone=vol_usage['availability_zone'],
instance_id=vol_usage['instance_uuid'],
last_refreshed=null_safe_str(last_refreshed_time),
reads=vol_usage['tot_reads'] + vol_usage['curr_reads'],
read_bytes=vol_usage['tot_read_bytes'] +
vol_usage['curr_read_bytes'],
writes=vol_usage['tot_writes'] + vol_usage['curr_writes'],
write_bytes=vol_usage['tot_write_bytes'] +
vol_usage['curr_write_bytes'])
return usage_info
def get_reboot_type(task_state, current_power_state):
"""Checks if the current instance state requires a HARD reboot."""
if current_power_state != power_state.RUNNING:
return 'HARD'
soft_types = [task_states.REBOOT_STARTED, task_states.REBOOT_PENDING,
task_states.REBOOTING]
reboot_type = 'SOFT' if task_state in soft_types else 'HARD'
return reboot_type
def get_machine_ips():
"""Get the machine's ip addresses
:returns: list of Strings of ip addresses
"""
addresses = []
for interface in netifaces.interfaces():
try:
iface_data = netifaces.ifaddresses(interface)
for family in iface_data:
if family not in (netifaces.AF_INET, netifaces.AF_INET6):
continue
for address in iface_data[family]:
addr = address['addr']
# If we have an ipv6 address remove the
# %ether_interface at the end
if family == netifaces.AF_INET6:
addr = addr.split('%')[0]
addresses.append(addr)
except ValueError:
pass
return addresses
class EventReporter(object):
"""Context manager to report instance action events."""
def __init__(self, context, event_name, *instance_uuids):
self.context = context
self.event_name = event_name
self.instance_uuids = instance_uuids
def __enter__(self):
for uuid in self.instance_uuids:
objects.InstanceActionEvent.event_start(
self.context, uuid, self.event_name, want_result=False)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for uuid in self.instance_uuids:
objects.InstanceActionEvent.event_finish_with_failure(
self.context, uuid, self.event_name, exc_val=exc_val,
exc_tb=exc_tb, want_result=False)
return False
class UnlimitedSemaphore(object):
def __enter__(self):
pass
def __exit__(self):
pass
@property
def balance(self):
return 0
|
|
from __future__ import print_function, unicode_literals
import os
from django.conf import settings
from django.contrib.auth.models import User
from django.core import mail
from django.utils import six
from djblets.siteconfig.models import SiteConfiguration
from djblets.webapi.testing.testcases import WebAPITestCaseMixin
from reviewboard.notifications.tests import EmailTestHelper
from reviewboard.reviews.models import Review
from reviewboard.testing import TestCase
from reviewboard.webapi.tests.mimetypes import (
screenshot_comment_item_mimetype,
error_mimetype,
file_attachment_comment_item_mimetype,
review_diff_comment_item_mimetype)
from reviewboard.webapi.tests.urls import (
get_review_diff_comment_list_url,
get_review_file_attachment_comment_list_url,
get_screenshot_comment_list_url,
get_screenshot_list_url)
class BaseWebAPITestCase(WebAPITestCaseMixin, TestCase, EmailTestHelper):
error_mimetype = error_mimetype
def setUp(self):
super(BaseWebAPITestCase, self).setUp()
self.siteconfig = SiteConfiguration.objects.get_current()
self.siteconfig.set("mail_send_review_mail", False)
self.siteconfig.set("auth_require_sitewide_login", False)
self.siteconfig.save()
self._saved_siteconfig_settings = self.siteconfig.settings.copy()
mail.outbox = []
fixtures = getattr(self, 'fixtures', [])
if 'test_users' in fixtures:
self.client.login(username="grumpy", password="grumpy")
self.user = User.objects.get(username="grumpy")
self.base_url = 'http://testserver'
def tearDown(self):
super(BaseWebAPITestCase, self).tearDown()
self.client.logout()
if self.siteconfig.settings != self._saved_siteconfig_settings:
self.siteconfig.settings = self._saved_siteconfig_settings
self.siteconfig.save()
def _testHttpCaching(self, url, check_etags=False,
check_last_modified=False):
response = self.client.get(url)
self.assertHttpOK(response, check_etag=check_etags,
check_last_modified=check_last_modified)
headers = {}
if check_etags:
headers['HTTP_IF_NONE_MATCH'] = response['ETag']
if check_last_modified:
headers['HTTP_IF_MODIFIED_SINCE'] = response['Last-Modified']
response = self.client.get(url, **headers)
self.assertHttpNotModified(response)
#
# Some utility functions shared across test suites.
#
def _login_user(self, local_site=False, admin=False):
"""Creates a user for a test.
The proper user will be created based on whether a valid LocalSite
user is needed, and/or an admin user is needed.
"""
self.client.logout()
# doc is a member of the default LocalSite.
username = 'doc'
if admin:
if local_site:
user = User.objects.get(username=username)
local_site = self.get_local_site(name=self.local_site_name)
local_site.admins.add(user)
else:
username = 'admin'
elif not local_site:
# Pick a user that's not part of the default LocalSite.
username = 'grumpy'
self.assertTrue(self.client.login(username=username,
password=username))
return User.objects.get(username=username)
def _postNewDiffComment(self, review_request, review_id, comment_text,
filediff_id=None, interfilediff_id=None,
first_line=10, num_lines=5, issue_opened=None,
issue_status=None):
"""Creates a diff comment and returns the payload response."""
if filediff_id is None:
diffset = review_request.diffset_history.diffsets.latest()
filediff = diffset.files.all()[0]
filediff_id = filediff.id
data = {
'filediff_id': filediff_id,
'text': comment_text,
'first_line': first_line,
'num_lines': num_lines,
}
if interfilediff_id is not None:
data['interfilediff_id'] = interfilediff_id
if issue_opened is not None:
data['issue_opened'] = issue_opened
if issue_status is not None:
data['issue_status'] = issue_status
if review_request.local_site:
local_site_name = review_request.local_site.name
else:
local_site_name = None
review = Review.objects.get(pk=review_id)
rsp = self.api_post(
get_review_diff_comment_list_url(review, local_site_name),
data,
expected_mimetype=review_diff_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
return rsp
def _postNewScreenshotComment(self, review_request, review_id, screenshot,
comment_text, x, y, w, h, issue_opened=None,
issue_status=None):
"""Creates a screenshot comment and returns the payload response."""
if review_request.local_site:
local_site_name = review_request.local_site.name
else:
local_site_name = None
post_data = {
'screenshot_id': screenshot.id,
'text': comment_text,
'x': x,
'y': y,
'w': w,
'h': h,
}
if issue_opened is not None:
post_data['issue_opened'] = issue_opened
if issue_status is not None:
post_data['issue_status'] = issue_status
review = Review.objects.get(pk=review_id)
rsp = self.api_post(
get_screenshot_comment_list_url(review, local_site_name),
post_data,
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
return rsp
def _delete_screenshot(self, review_request, screenshot):
"""Deletes a screenshot.
This does not return anything, because DELETE requests don't return a
response with a payload.
"""
if review_request.local_site:
local_site_name = review_request.local_site.name
else:
local_site_name = None
self.api_delete(
get_screenshot_list_url(review_request, local_site_name) +
six.text_type(screenshot.id) + '/')
def _postNewFileAttachmentComment(self, review_request, review_id,
file_attachment, comment_text,
issue_opened=None,
issue_status=None,
extra_fields={}):
"""Creates a file attachment comment.
This returns the response from the API call to create the comment."""
if review_request.local_site:
local_site_name = review_request.local_site.name
else:
local_site_name = None
post_data = {
'file_attachment_id': file_attachment.id,
'text': comment_text,
}
post_data.update(extra_fields)
if issue_opened is not None:
post_data['issue_opened'] = issue_opened
if issue_status is not None:
post_data['issue_status'] = issue_status
review = Review.objects.get(pk=review_id)
rsp = self.api_post(
get_review_file_attachment_comment_list_url(review,
local_site_name),
post_data,
expected_mimetype=file_attachment_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
return rsp
def _getTrophyFilename(self):
return os.path.join(settings.STATIC_ROOT, "rb", "images", "trophy.png")
|
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
"""CherryPy Server to provide recommendations of semantic similarity."""
import os
import json
import codecs
import cherrypy
import argparse
import configparser
from gensim.models import Word2Vec
from nltk.tokenize import word_tokenize
from jinja2 import Environment, FileSystemLoader
from cherrypy.lib.static import serve_file
from functools import reduce
from tvecs.preprocessor import yandex_api as yandex
from tvecs.vector_space_mapper.vector_space_mapper import VectorSpaceMapper
class Server(object):
"""
Server Configuration for t-vex.
.. seealso::
* :mod:`cherrypy`
"""
def __init__(self):
"""Initialization the Language and Model."""
self.model = {
"english": Server._load_model("english"),
"hindi": Server._load_model("hindi"),
}
self.cross_lang_vm = {
("english", "hindi"): self._create_vector_space_mapper("english", "hindi"),
("hindi", "english"): self._create_vector_space_mapper("hindi", "english"),
}
self.cache_file_path = os.path.join(
"tvecs", "visualization", "cached_dictionary"
)
if not os.path.exists(self.cache_file_path):
json.dump({}, codecs.open(self.cache_file_path, "w", encoding="utf-8"))
self.cached_dictionary = {}
with codecs.open(self.cache_file_path, "r", encoding="utf-8") as f:
self.cached_dictionary = json.load(f)
@cherrypy.expose
def index(self):
"""Semantic spac visualization html returned."""
return serve_file(
os.path.abspath(
os.path.join("tvecs", "visualization", "static", "index.html")
)
)
@cherrypy.expose
def multivariate_analysis(self):
"""Parallel Coordinates for multivariate analysis html page return."""
return serve_file(
os.path.abspath(
os.path.join("tvecs", "visualization", "static", "multivariate.html")
)
)
@cherrypy.expose
def cross_lingual(self):
"""Cross Lingual recommender html returned."""
return serve_file(
os.path.abspath(
os.path.join("tvecs", "visualization", "static", "cross_lingual.html")
)
)
@cherrypy.expose
def distances(self):
"""Visualization with distances html returned."""
return serve_file(
os.path.abspath(
os.path.join("tvecs", "visualization", "static", "distances.html")
)
)
@cherrypy.expose
def lingual_semantics(self):
"""Semantically related words in same language returned."""
return serve_file(
os.path.abspath(
os.path.join("tvecs", "visualization", "static", "intra_language.html")
)
)
def retrieve_meaning(self, language, word):
"""
Optional: Translate the word.
Retrieve Eng definition(s) of a word from cached file or PyDictionary.
API Documentation
:param language: Language for which definition needed
:param word: Word whose definition needs to be retrieved
:type language: String
:type word: String
:return: word and definition
:rtype: :class:`String`
"""
from PyDictionary import PyDictionary
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
word = word.lower()
trword = word
if word in self.cached_dictionary:
return json.dumps(self.cached_dictionary[word])
else:
if language == "hindi":
trword = yandex.get_translation(word, "hi-en")
dictionary = PyDictionary(trword)
meanings = [trword, dictionary.meaning(trword)]
if meanings[1]:
self.cached_dictionary[word] = meanings
with codecs.open(self.cache_file_path, "w", encoding="utf-8") as f:
f.write(json.dumps(self.cached_dictionary))
return json.dumps(meanings)
@cherrypy.expose
def get_distance(self, word1, word2, language1, language2):
"""
Retrieve cosine distance between word1 and word2.
- word1 and word2 have to be in the vocabulary
of language1 and language2, respectively.
API Documentation
:param word1: A word in language1's vocabulary
:param language1: Language of word1
:param word2: A word in language2's vocabulary
:param language2: Language of word2
:type word1: String
:type language1: String
:type word2: String
:type language2: String
:return: Dictionary with keys 'word1', 'word2', and 'distance'
:rtype: :class:`Dictionary`
.. py:currentmodule:: tvecs.vector_space_mapper.vector_space_mapper
.. seealso::
* :func:`VectorSpaceMapper.obtain_cosine_similarity`
"""
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
word1 = word1.lower()
word2 = word2.lower()
vm = self.cross_lang_vm.get((language1, language2))
similarity = None
if vm is not None:
similarity = vm.obtain_cosine_similarity(word1, word2)
distance = 1 - similarity if similarity is not None else None
return json.dumps({"word1": word1, "word2": word2, "distance": distance})
@cherrypy.expose
def retrieve_recommendations(self, language, word, limit=10):
"""
Retrieve number of semantically similar recommendations.
- For specified word in the given lang retrieve limit recommendations
API Documentation
:param language: Language for which recommendations required
:param word: Semantic similar words provided for given word
:param limit: No of words to be recommended [ Default 10 ]
:type language: String
:type word: String
:type limit: Integer
:return: List of recommendations
:rtype: :class:`List`
.. seealso::
* :class:`gensim.models.Word2Vec`
"""
word = word.lower()
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
model = self.model.get(language)
if model is not None:
data = Server._recommend(word, int(limit), fn=model.most_similar)
else:
data = json.dumps(None)
return data
@cherrypy.expose
def get_cross_lingual_recommendations(self, lang1, lang2, word, topn=10):
"""
Provide cross lingual recommendations.
API Documentation
:param lang1: Language 1 for cross lingual recommendations.
:param lang2: Language 2 for cross lingual recommendations.
:param word: Word utilised for cross lingual recommendations.
:param topn: No of recommendations provided.
:type lang1: String
:type lang2: String
:type word: String
:type topn: Integer
:return: List of recommendations
:rtype: :class:`List`
.. seealso::
* :mod:`tvecs.vector_space_mapper.vector_space_mapper`
"""
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
sentence = word_tokenize(word.lower())
vm = self.cross_lang_vm.get((lang1, lang2))
data = None
if vm is not None:
result_vec = reduce(
lambda x, y: x + y, [self.model[lang1][word] for word in sentence]
)
data = Server._recommend(
result_vec, int(topn), fn=vm.get_recommendations_from_vec
)
return data
@cherrypy.expose
def _create_vector_space_mapper(self, lang1, lang2):
"""
Create Vector Space Mapper between Languages.
API Documentation
:param lang1: Language 1 used for building
:class:`tvecs.vector_space_mapper.vector_space_mapper.VectorSpaceMapper`
object
:param lang2: Language 2 used for building
:class:`tvecs.vector_space_mapper.vector_space_mapper.VectorSpaceMapper`
object
:return: JSON with successful/failure message
:rtype: JSON
.. seealso::
:mod:`tvecs.vector_space_mapper.vector_space_mapper`
"""
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
vm = None
with codecs.open(
os.path.join(
"data", "bilingual_dictionary", "%s_%s_train_bd" % (lang1, lang2)
),
"r",
encoding="utf-8",
) as file:
data = file.read().split("\n")
bilingual_dict = [(line.split(" ")[0], line.split(" ")[1]) for line in data]
if (self.model.get(lang1) is not None) and (
self.model.get(lang2) is not None
):
vm = VectorSpaceMapper(
self.model[lang1], self.model[lang2], bilingual_dict
)
vm.map_vector_spaces()
return vm
@staticmethod
def _recommend(word, limit, fn):
"""Vector Space Mapper recommend functionality."""
try:
vec_list = fn(word, topn=limit)
except KeyError:
vec_list = None
if vec_list is not None:
data = json.dumps([{"word": tup[0], "weight": tup[1]} for tup in vec_list])
else:
data = json.dumps(None)
return data
@staticmethod
def _load_model(language):
"""Used to load Word2Vec Model."""
return Word2Vec.load(
os.path.join("data", "models", "t-vex-%s-model" % language)
)
if __name__ == "__main__":
"""Setting up the Server with Specified Configuration"""
parser = argparse.ArgumentParser(description="Obtain Server Configuration")
parser.add_argument(
"-c",
"--config",
dest="config",
help="Config File Path",
action="store",
type=str,
default=os.path.join("tvecs", "visualization", "server.conf"),
)
parser.add_argument(
"-p", "--port", dest="port", help="Port", action="store", type=int, default=None
)
parser.add_argument(
"-s",
"--host",
dest="host",
help="Host Name",
action="store",
type=str,
default=None,
)
args = parser.parse_args()
server_config = configparser.RawConfigParser()
env = Environment(loader=FileSystemLoader("static"))
conf = {
"/": {"tools.staticdir.root": os.path.abspath(os.getcwd())},
"/js": {
"tools.staticdir.on": True,
"tools.staticdir.dir": os.path.join(
"tvecs", "visualization", "static", "js"
),
},
"/css": {
"tools.staticdir.on": True,
"tools.staticdir.dir": os.path.join(
"tvecs", "visualization", "static", "css"
),
},
"/images": {
"tools.staticdir.on": True,
"tools.staticdir.dir": os.path.join(
"tvecs", "visualization", "static", "images"
),
},
"/resources": {
"tools.staticdir.on": True,
"tools.staticdir.dir": os.path.join(
"tvecs", "visualization", "static", "resources"
),
},
}
server_port = args.port
server_host = args.host
server_config.read(args.config)
if args.port is None:
server_port = server_config.get("Server", "port")
if args.host is None:
server_host = server_config.get("Server", "host")
thread_pool = server_config.get("Server", "thread_pool")
queue_size = server_config.get("Server", "queue_size")
cherrypy.config.update({"server.socket_host": server_host})
cherrypy.config.update({"server.thread_pool": int(thread_pool)})
cherrypy.config.update({"server.socket_queue_size": int(queue_size)})
cherrypy.config.update(
{"server.socket_port": int(os.environ.get("PORT", server_port))}
)
cherrypy.quickstart(Server(), "/", conf)
|
|
# -*- coding: utf-8; -*-
from __future__ import unicode_literals
from datetime import datetime
from multiprocessing import Process, Queue
import time
import sys
import six
import signal
import dateutil.parser
import docker
from docker.errors import APIError
from ..utils import parse_stream, normalize_keys, capitalize_keys, logger
from .config import Config as ContainerConfig
from .host_config import HostConfig
class Container(object):
""" This is a domain object that represents a docker container.
"""
def __init__(self, client, name=None, image=None, id=None, container_config={}, host_config={}):
if not isinstance(container_config, dict):
raise TypeError('host_config needs to be of type dict.')
if not isinstance(host_config, dict):
raise TypeError('host_config needs to be of type dict.')
if not isinstance(client, docker.Client):
raise TypeError("client needs to be of type docker.Client, not: {0}".format(client))
if not id and (not name or not image):
raise AttributeError("Must provide name and image or id when instantiating the Container class.")
self.client = client
self._transcribe = False
self._transcribe_queue = None
self._transcribe_proc = None
self.config = ContainerConfig(container_config)
self.host_config = HostConfig(host_config)
if id:
self._find_by_id(id)
else:
self._create_container(name, image)
def __del__(self):
if hasattr(self, '_transcribe_proc'):
if self._transcribe_proc:
self._transcribe_proc.terminate()
@property
def config(self):
"""
"""
return self._config
@config.setter
def config(self, container_config):
"""
"""
if not isinstance(container_config, ContainerConfig):
raise TypeError("container_config must be an instance of Container.Config.")
self._config = container_config
@property
def host_config(self):
"""
"""
return self._host_config
@host_config.setter
def host_config(self, host_config):
"""
"""
if not isinstance(host_config, HostConfig):
raise TypeError("host_config must be an instance of HostConfig.")
if host_config.log_config.get('type') != 'json-file':
self._transcribe = True
self._host_config = host_config
def attach(self, stdout=True, stderr=True, stream=True, logs=False):
"""
Keeping this simple until we need to extend later.
"""
try:
data = parse_stream(self.client.attach(self.id, stdout, stderr, stream, logs))
except KeyboardInterrupt:
logger.warning(
"service container: {0} has been interrupted. "
"The container will be stopped but will not be deleted.".format(self.name)
)
data = None
self.stop()
return data
def commit(self, config, image_name, tag):
"""
"""
logger.info('Committing changes from {0}.'.format(image_name), extra={'formatter': 'container', 'container': self.name})
# TODO : Need to build some validation around this
response = normalize_keys(self.client.commit(self.id, repository=image_name, conf=capitalize_keys(config), tag=tag))
return response.get('id', False)
def delete(self, remove_volumes=False, links=False, force=False):
"""
"""
response = None
if self.state()["running"]:
self.stop()
logger.info('is being deleted.', extra={'formatter': 'container', 'container': self.name})
try:
response = self.client.remove_container(self.id, remove_volumes, links, force)
except APIError as e:
if e.response.status_code == 404:
logger.info('is unable to located.', extra={'formatter': 'container', 'container': self.name})
else:
raise APIError("Docker Error: {0}".format(e.explanation), e.response)
return response
def inspect(self):
"""
"""
# TODO: build object and return self converted.
return self.client.inspect_container(self.id)
def output(self):
output = self.client.logs(self.id, stdout=True, stderr=True, stream=False, timestamps=False, tail='all')
return output
def start(self, attach=False):
"""
Start a container. If the container is running it will return itself.
returns a running Container.
"""
if self.state()['running']:
logger.info('is already running.', extra={'formatter': 'container', 'container': self.name})
return True
else:
try:
logger.info(
'is being started.', extra={'formatter': 'container', 'container': self.name}
)
# returns None on success
self.client.start(self.id)
if self._transcribe:
self.start_transcribing()
except APIError as e:
#
# This is some bs... I assume that its needed because of dockers changes in 1.18 api.
# I will resolve this next week when we stop passing properties to start.
if e.response.status_code == 500:
self.client.start(self.id)
else:
raise RuntimeError("Docker Error: {0}".format(e.explanation))
if attach:
self.attach()
exit_code = self.wait()
else:
exit_code = self._wait_for_exit_code()
return True if exit_code == 0 else False
def start_transcribing(self):
"""
:return:
"""
if not self._transcribe:
self._transcribe = True
if self._transcribe_queue is None:
self._transcribe_queue = Queue()
if self._transcribe_proc is None:
# add for debugging
# print "Starting to record container output for {0}.".format(self.name)
self._transcribe_proc = Process(target=self._start_recording, args=(self._transcribe_queue,))
self._transcribe_proc.daemon = True
self._transcribe_proc.start()
def state(self):
"""
{
"State": {
"ExitCode": 0,
"FinishedAt": "2014-10-20T16:45:35.908823764Z",
"Paused": false,
"Pid": 774,
"Restarting": false,
"Running": true,
"StartedAt": "2014-10-20T16:47:02.804735752Z"
}
}
"""
response = normalize_keys(self.client.inspect_container(self.id))
return response['state']
def running(self):
state = self.state()
return state.get('running', False) if state else False
def stop(self):
"""
stop the container
"""
logger.info('is being stopped', extra={'formatter': 'container', 'container': self.name})
response = self.client.stop(self.id)
while self.state()['running']:
time.sleep(1)
return response
def wait(self):
"""
probably want to come back and take a look at this guy.
"""
response = self.client.wait(self.id)
return response
def dump_logs(self):
"""dump entirety of the container logs to stdout
:returns None
"""
msg = "log dump: \n"
if self._transcribe:
if self._transcribe_queue:
while not self._transcribe_queue.empty():
logs = self._transcribe_queue.get()
if isinstance(logs, six.binary_type):
logs = logs.decode(encoding='UTF-8', errors="ignore")
msg = '{0} {1}'.format(msg, logs)
else:
logs = self.client.logs(self.id, stdout=True, stderr=True, stream=False, timestamps=False, tail='all')
if isinstance(logs, six.binary_type):
logs = logs.decode(encoding='UTF-8', errors="ignore")
msg = '{0}{1}'.format(msg, logs)
logger.error(msg)
###
# Static methods
###
@staticmethod
def find_by_name(client, name):
"""
"""
if not isinstance(client, docker.Client):
raise TypeError("client needs to be of type docker.Client.")
containers = {}
try:
response = client.containers(all=True)
for container in response:
container = normalize_keys(container)
if 'names' in container and container["names"]:
# TODO: kind of a hack to fix the way name is coming back. look into patching the docker-py lib
for container_name in container['names']:
if container_name.count('/') is not 1:
continue
if name in container_name:
containers[container_name.replace('/', '')] = Container(client, id=container['id'])
except Exception as e:
raise e
return containers
###
# Private methods
##
def _create_container(self, name, image):
if name is None or not isinstance(name, six.string_types):
raise TypeError("name cant be none and must be a string")
if image is None or not isinstance(image, six.string_types):
raise TypeError("image cant be none and must be a string")
self.name = name
self._config.image = image
self.image = image
logger.info("is being created.", extra={'formatter': 'container', 'container': self.name})
host_config = self.client.create_host_config(**self._host_config.docker_py_dict())
try:
# docker.errors.APIError: 500 Server Error: Internal Server Error ("No command specified")
response = normalize_keys(self.client.create_container(
host_config=host_config,
name=self.name,
**self._config.docker_py_dict()
))
if 'id' in response:
self.id = response['id']
self.created_at = datetime.utcnow()
if 'warnings' in response and response['warnings']:
self.warnings = response['warnings']
for warning in self.warnings:
logger.warning(warning)
self.client.close()
except Exception as e:
# docker.errors.APIError: 500 Server Error: Internal Server Error ("b'Could not get container for something'")
# python 2.7
# docker.errors.APIError: 409 Client Error: Conflict ("Conflict. The name
# "itops-cia-couchdb-injector-builder" is already in use by container 035b9e1cdd7f.
# You have to delete (or rename) that container to be able to reuse that name.")
# python 3
# docker.errors.APIError: 409 Client Error: Conflict ("b'Conflict. The name
# "tune_platform-freight-forwarder-wheel-01" is already in use by container 4fa07acc188f. You have to delete
# (or rename) that container to be able to reuse that name.'")
raise e
def _find_by_id(self, id):
"""
Expected response:
{
"Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2",
"Created": "2013-05-07T14:51:42.041847+02:00",
"Path": "date",
"Args": [],
"Config": {
"Hostname": "4fa6e0f0c678",
"User": "",
"Memory": 0,
"MemorySwap": 0,
"AttachStdin": false,
"AttachStdout": true,
"AttachStderr": true,
"PortSpecs": null,
"Tty": false,
"OpenStdin": false,
"StdinOnce": false,
"Env": null,
"Cmd": [
"date"
],
"Dns": null,
"Image": "base",
"Volumes": {},
"VolumesFrom": "",
"WorkingDir":""
},
"State": {
"Running": false,
"Pid": 0,
"ExitCode": 0,
"StartedAt": "2013-05-07T14:51:42.087658+02:01360",
"Ghost": false
},
"Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
"NetworkSettings": {
"IpAddress": "",
"IpPrefixLen": 0,
"Gateway": "",
"Bridge": "",
"PortMapping": null
},
"SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker",
"ResolvConfPath": "/etc/resolv.conf",
"Volumes": {},
"HostConfig": {
"Binds": null,
"ContainerIDFile": "",
"LxcConf": [],
"Privileged": false,
"PortBindings": {
"80/tcp": [
{
"HostIp": "0.0.0.0",
"HostPort": "49153"
}
]
},
"Links": ["/name:alias"],
"PublishAllPorts": false,
"CapAdd: ["NET_ADMIN"],
"CapDrop: ["MKNOD"]
}
}
"""
if not isinstance(id, six.string_types):
raise TypeError('must supply a string as the id')
# TODO: We should probably catch container not found error and return out own errors.
response = normalize_keys(self.client.inspect_container(id))
# TODO: normalize response to change - to _
self.id = response['id']
self.name = response['name'].replace('/', '')
self.image = response['image']
# come back and figure the timezone stuff out later.
self.created_at = dateutil.parser.parse(response['created'], ignoretz=True)
self.config = ContainerConfig(response['config'])
self.host_config = HostConfig(response['host_config'])
if self._transcribe:
self.start_transcribing()
def _handler(self, signum=None, frame=None):
# add for debugging
# print 'Transcriber is being terminated with signum: {1}.\n'.format(self.name, signum)
sys.exit(0)
def _start_recording(self, queue):
"""
"""
for sig in [signal.SIGTERM, signal.SIGINT, signal.SIGHUP, signal.SIGQUIT]:
signal.signal(sig, self._handler)
client = None
try:
if isinstance(self.client.verify, bool):
tls_config = docker.tls.TLSConfig(
client_cert=self.client.cert,
verify=self.client.verify
)
else:
tls_config = docker.tls.TLSConfig(
client_cert=self.client.cert,
ca_cert=self.client.verify
)
client = docker.Client(self.client.base_url, tls=tls_config, timeout=self.client.timeout, version=self.client.api_version)
for line in client.attach(self.id, True, True, True, False):
queue.put(line)
finally:
if isinstance(client, docker.Client):
client.close()
def _wait_for_exit_code(self, timer=10):
"""
"""
exit_code = None
# wait up to ten seconds for an exit code.
for i in range(0, timer):
time.sleep(1)
container_state = self.state()
exit_code = container_state['exit_code']
if exit_code is None or exit_code == 0:
if exit_code == 0 and i == 10:
break
continue
else:
break
return exit_code
|
|
# Copyright 2016-2017 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import functools
from botocore.exceptions import ClientError
from concurrent.futures import as_completed
from c7n.actions import ActionRegistry, BaseAction
from c7n.filters import FilterRegistry, ValueFilter
from c7n.filters.iamaccess import CrossAccountAccessFilter
from c7n.manager import resources, ResourceManager
from c7n import query, utils
from c7n.utils import generate_arn, type_schema
ANNOTATION_KEY_MATCHED_METHODS = 'c7n:matched-resource-methods'
ANNOTATION_KEY_MATCHED_INTEGRATIONS = 'c7n:matched-method-integrations'
@resources.register('rest-account')
class RestAccount(ResourceManager):
# note this is not using a regular resource manager or type info
# its a pseudo resource, like an aws account
filter_registry = FilterRegistry('rest-account.filters')
action_registry = ActionRegistry('rest-account.actions')
class resource_type(query.TypeInfo):
service = 'apigateway'
name = id = 'account_id'
dimension = None
arn = False
@classmethod
def get_permissions(cls):
# this resource is not query manager based as its a pseudo
# resource. in that it always exists, it represents the
# service's account settings.
return ('apigateway:GET',)
@classmethod
def has_arn(self):
return False
def get_model(self):
return self.resource_type
def _get_account(self):
client = utils.local_session(self.session_factory).client('apigateway')
try:
account = client.get_account()
except ClientError as e:
if e.response['Error']['Code'] == 'NotFoundException':
return []
account.pop('ResponseMetadata', None)
account['account_id'] = 'apigw-settings'
return [account]
def resources(self):
return self.filter_resources(self._get_account())
def get_resources(self, resource_ids):
return self._get_account()
OP_SCHEMA = {
'type': 'object',
'required': ['op', 'path'],
'additonalProperties': False,
'properties': {
'op': {'enum': ['add', 'remove', 'update', 'copy', 'replace', 'test']},
'path': {'type': 'string'},
'value': {'type': 'string'},
'from': {'type': 'string'}
}
}
@RestAccount.action_registry.register('update')
class UpdateAccount(BaseAction):
"""Update the cloudwatch role associated to a rest account
:example:
.. code-block:: yaml
policies:
- name: correct-rest-account-log-role
resource: rest-account
filters:
- cloudwatchRoleArn: arn:aws:iam::000000000000:role/GatewayLogger
actions:
- type: update
patch:
- op: replace
path: /cloudwatchRoleArn
value: arn:aws:iam::000000000000:role/BetterGatewayLogger
"""
permissions = ('apigateway:PATCH',)
schema = utils.type_schema(
'update',
patch={'type': 'array', 'items': OP_SCHEMA},
required=['patch'])
def process(self, resources):
client = utils.local_session(
self.manager.session_factory).client('apigateway')
client.update_account(patchOperations=self.data['patch'])
class ApiDescribeSource(query.DescribeSource):
def augment(self, resources):
for r in resources:
tags = r.setdefault('Tags', [])
for k, v in r.pop('tags', {}).items():
tags.append({
'Key': k,
'Value': v})
return resources
@resources.register('rest-api')
class RestApi(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'apigateway'
arn_type = '/restapis'
enum_spec = ('get_rest_apis', 'items', None)
id = 'id'
name = 'name'
date = 'createdDate'
dimension = 'GatewayName'
cfn_type = config_type = "AWS::ApiGateway::RestApi"
universal_taggable = object()
permissions_enum = ('apigateway:GET',)
source_mapping = {
'config': query.ConfigSource,
'describe': ApiDescribeSource
}
@property
def generate_arn(self):
"""
Sample arn: arn:aws:apigateway:us-east-1::/restapis/rest-api-id
This method overrides c7n.utils.generate_arn and drops
account id from the generic arn.
"""
if self._generate_arn is None:
self._generate_arn = functools.partial(
generate_arn,
self.resource_type.service,
region=self.config.region,
resource_type=self.resource_type.arn_type)
return self._generate_arn
@RestApi.filter_registry.register('cross-account')
class RestApiCrossAccount(CrossAccountAccessFilter):
policy_attribute = 'policy'
permissions = ('apigateway:GET',)
@RestApi.action_registry.register('update')
class UpdateApi(BaseAction):
"""Update configuration of a REST API.
Non-exhaustive list of updateable attributes.
https://docs.aws.amazon.com/apigateway/api-reference/link-relation/restapi-update/#remarks
:example:
contrived example to update description on api gateways
.. code-block:: yaml
policies:
- name: apigw-description
resource: rest-api
filters:
- description: empty
actions:
- type: update
patch:
- op: replace
path: /description
value: "not empty :-)"
"""
permissions = ('apigateway:PATCH',)
schema = utils.type_schema(
'update',
patch={'type': 'array', 'items': OP_SCHEMA},
required=['patch'])
def process(self, resources):
client = utils.local_session(
self.manager.session_factory).client('apigateway')
for r in resources:
client.update_rest_api(
restApiId=r['id'],
patchOperations=self.data['patch'])
@RestApi.action_registry.register('delete')
class DeleteApi(BaseAction):
"""Delete a REST API.
:example:
contrived example to delete rest api
.. code-block:: yaml
policies:
- name: apigw-delete
resource: rest-api
filters:
- description: empty
actions:
- type: delete
"""
permissions = ('apigateway:DELETE',)
schema = type_schema('delete')
def process(self, resources):
client = utils.local_session(
self.manager.session_factory).client('apigateway')
for r in resources:
try:
client.delete_rest_api(restApiId=r['id'])
except client.exceptions.NotFoundException:
continue
@query.sources.register('describe-rest-stage')
class DescribeRestStage(query.ChildDescribeSource):
def get_query(self):
query = super(DescribeRestStage, self).get_query()
query.capture_parent_id = True
return query
def augment(self, resources):
results = []
# Using capture parent, changes the protocol
for parent_id, r in resources:
r['restApiId'] = parent_id
tags = r.setdefault('Tags', [])
for k, v in r.pop('tags', {}).items():
tags.append({
'Key': k,
'Value': v})
results.append(r)
return results
@resources.register('rest-stage')
class RestStage(query.ChildResourceManager):
class resource_type(query.TypeInfo):
service = 'apigateway'
parent_spec = ('rest-api', 'restApiId', None)
enum_spec = ('get_stages', 'item', None)
name = id = 'stageName'
date = 'createdDate'
universal_taggable = True
cfn_type = config_type = "AWS::ApiGateway::Stage"
arn_type = 'stages'
permissions_enum = ('apigateway:GET',)
child_source = 'describe'
source_mapping = {
'describe': DescribeRestStage,
'config': query.ConfigSource
}
@property
def generate_arn(self):
self._generate_arn = functools.partial(
generate_arn,
self.resource_type.service,
region=self.config.region)
return self._generate_arn
def get_arns(self, resources):
arns = []
for r in resources:
arns.append(self.generate_arn('/restapis/' + r['restApiId'] +
'/stages/' + r[self.get_model().id]))
return arns
@RestStage.action_registry.register('update')
class UpdateStage(BaseAction):
"""Update/remove values of an api stage
:example:
.. code-block:: yaml
policies:
- name: disable-stage-caching
resource: rest-stage
filters:
- methodSettings."*/*".cachingEnabled: true
actions:
- type: update
patch:
- op: replace
path: /*/*/caching/enabled
value: 'false'
"""
permissions = ('apigateway:PATCH',)
schema = utils.type_schema(
'update',
patch={'type': 'array', 'items': OP_SCHEMA},
required=['patch'])
def process(self, resources):
client = utils.local_session(
self.manager.session_factory).client('apigateway')
for r in resources:
self.manager.retry(
client.update_stage,
restApiId=r['restApiId'],
stageName=r['stageName'],
patchOperations=self.data['patch'])
@RestStage.action_registry.register('delete')
class DeleteStage(BaseAction):
"""Delete an api stage
:example:
.. code-block:: yaml
policies:
- name: delete-rest-stage
resource: rest-stage
filters:
- methodSettings."*/*".cachingEnabled: true
actions:
- type: delete
"""
permissions = ('apigateway:DELETE',)
schema = utils.type_schema('delete')
def process(self, resources):
client = utils.local_session(self.manager.session_factory).client('apigateway')
for r in resources:
try:
self.manager.retry(
client.delete_stage,
restApiId=r['restApiId'],
stageName=r['stageName'])
except client.exceptions.NotFoundException:
pass
@resources.register('rest-resource')
class RestResource(query.ChildResourceManager):
child_source = 'describe-rest-resource'
class resource_type(query.TypeInfo):
service = 'apigateway'
parent_spec = ('rest-api', 'restApiId', None)
enum_spec = ('get_resources', 'items', None)
id = 'id'
name = 'path'
permissions_enum = ('apigateway:GET',)
cfn_type = 'AWS::ApiGateway::Resource'
@query.sources.register('describe-rest-resource')
class DescribeRestResource(query.ChildDescribeSource):
def get_query(self):
query = super(DescribeRestResource, self).get_query()
query.capture_parent_id = True
return query
def augment(self, resources):
results = []
# Using capture parent id, changes the protocol
for parent_id, r in resources:
r['restApiId'] = parent_id
results.append(r)
return results
@resources.register('rest-vpclink')
class RestApiVpcLink(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'apigateway'
enum_spec = ('get_vpc_links', 'items', None)
id = 'id'
name = 'name'
permissions_enum = ('apigateway:GET',)
cfn_type = 'AWS::ApiGateway::VpcLink'
@RestResource.filter_registry.register('rest-integration')
class FilterRestIntegration(ValueFilter):
"""Filter rest resources based on a key value for the rest method integration of the api
:example:
.. code-block:: yaml
policies:
- name: api-method-integrations-with-type-aws
resource: rest-resource
filters:
- type: rest-integration
key: type
value: AWS
"""
schema = utils.type_schema(
'rest-integration',
method={'type': 'string', 'enum': [
'all', 'ANY', 'PUT', 'GET', "POST",
"DELETE", "OPTIONS", "HEAD", "PATCH"]},
rinherit=ValueFilter.schema)
schema_alias = False
permissions = ('apigateway:GET',)
def process(self, resources, event=None):
method_set = self.data.get('method', 'all')
# 10 req/s with burst to 40
client = utils.local_session(
self.manager.session_factory).client('apigateway')
# uniqueness constraint validity across apis?
resource_map = {r['id']: r for r in resources}
futures = {}
results = set()
with self.executor_factory(max_workers=2) as w:
tasks = []
for r in resources:
r_method_set = method_set
if method_set == 'all':
r_method_set = r.get('resourceMethods', {}).keys()
for m in r_method_set:
tasks.append((r, m))
for task_set in utils.chunks(tasks, 20):
futures[w.submit(
self.process_task_set, client, task_set)] = task_set
for f in as_completed(futures):
task_set = futures[f]
if f.exception():
self.manager.log.warning(
"Error retrieving integrations on resources %s",
["%s:%s" % (r['restApiId'], r['path'])
for r, mt in task_set])
continue
for i in f.result():
if self.match(i):
results.add(i['resourceId'])
resource_map[i['resourceId']].setdefault(
ANNOTATION_KEY_MATCHED_INTEGRATIONS, []).append(i)
return [resource_map[rid] for rid in results]
def process_task_set(self, client, task_set):
results = []
for r, m in task_set:
try:
integration = client.get_integration(
restApiId=r['restApiId'],
resourceId=r['id'],
httpMethod=m)
integration.pop('ResponseMetadata', None)
integration['restApiId'] = r['restApiId']
integration['resourceId'] = r['id']
integration['resourceHttpMethod'] = m
results.append(integration)
except ClientError as e:
if e.response['Error']['Code'] == 'NotFoundException':
pass
return results
@RestResource.action_registry.register('update-integration')
class UpdateRestIntegration(BaseAction):
"""Change or remove api integration properties based on key value
:example:
.. code-block:: yaml
policies:
- name: enforce-timeout-on-api-integration
resource: rest-resource
filters:
- type: rest-integration
key: timeoutInMillis
value: 29000
actions:
- type: update-integration
patch:
- op: replace
path: /timeoutInMillis
value: "3000"
"""
schema = utils.type_schema(
'update-integration',
patch={'type': 'array', 'items': OP_SCHEMA},
required=['patch'])
permissions = ('apigateway:PATCH',)
def validate(self):
found = False
for f in self.manager.iter_filters():
if isinstance(f, FilterRestIntegration):
found = True
break
if not found:
raise ValueError(
("update-integration action requires ",
"rest-integration filter usage in policy"))
return self
def process(self, resources):
client = utils.local_session(
self.manager.session_factory).client('apigateway')
ops = self.data['patch']
for r in resources:
for i in r.get(ANNOTATION_KEY_MATCHED_INTEGRATIONS, []):
client.update_integration(
restApiId=i['restApiId'],
resourceId=i['resourceId'],
httpMethod=i['resourceHttpMethod'],
patchOperations=ops)
@RestResource.action_registry.register('delete-integration')
class DeleteRestIntegration(BaseAction):
"""Delete an api integration. Useful if the integration type is a security risk.
:example:
.. code-block:: yaml
policies:
- name: enforce-no-resource-integration-with-type-aws
resource: rest-resource
filters:
- type: rest-integration
key: type
value: AWS
actions:
- type: delete-integration
"""
permissions = ('apigateway:DELETE',)
schema = utils.type_schema('delete-integration')
def process(self, resources):
client = utils.local_session(self.manager.session_factory).client('apigateway')
for r in resources:
for i in r.get(ANNOTATION_KEY_MATCHED_INTEGRATIONS, []):
try:
client.delete_integration(
restApiId=i['restApiId'],
resourceId=i['resourceId'],
httpMethod=i['resourceHttpMethod'])
except client.exceptions.NotFoundException:
continue
@RestResource.filter_registry.register('rest-method')
class FilterRestMethod(ValueFilter):
"""Filter rest resources based on a key value for the rest method of the api
:example:
.. code-block:: yaml
policies:
- name: api-without-key-required
resource: rest-resource
filters:
- type: rest-method
key: apiKeyRequired
value: false
"""
schema = utils.type_schema(
'rest-method',
method={'type': 'string', 'enum': [
'all', 'ANY', 'PUT', 'GET', "POST",
"DELETE", "OPTIONS", "HEAD", "PATCH"]},
rinherit=ValueFilter.schema)
schema_alias = False
permissions = ('apigateway:GET',)
def process(self, resources, event=None):
method_set = self.data.get('method', 'all')
# 10 req/s with burst to 40
client = utils.local_session(
self.manager.session_factory).client('apigateway')
# uniqueness constraint validity across apis?
resource_map = {r['id']: r for r in resources}
futures = {}
results = set()
with self.executor_factory(max_workers=2) as w:
tasks = []
for r in resources:
r_method_set = method_set
if method_set == 'all':
r_method_set = r.get('resourceMethods', {}).keys()
for m in r_method_set:
tasks.append((r, m))
for task_set in utils.chunks(tasks, 20):
futures[w.submit(
self.process_task_set, client, task_set)] = task_set
for f in as_completed(futures):
task_set = futures[f]
if f.exception():
self.manager.log.warning(
"Error retrieving methods on resources %s",
["%s:%s" % (r['restApiId'], r['path'])
for r, mt in task_set])
continue
for m in f.result():
if self.match(m):
results.add(m['resourceId'])
resource_map[m['resourceId']].setdefault(
ANNOTATION_KEY_MATCHED_METHODS, []).append(m)
return [resource_map[rid] for rid in results]
def process_task_set(self, client, task_set):
results = []
for r, m in task_set:
method = client.get_method(
restApiId=r['restApiId'],
resourceId=r['id'],
httpMethod=m)
method.pop('ResponseMetadata', None)
method['restApiId'] = r['restApiId']
method['resourceId'] = r['id']
results.append(method)
return results
@RestResource.action_registry.register('update-method')
class UpdateRestMethod(BaseAction):
"""Change or remove api method behaviors based on key value
:example:
.. code-block:: yaml
policies:
- name: enforce-iam-permissions-on-api
resource: rest-resource
filters:
- type: rest-method
key: authorizationType
value: NONE
op: eq
actions:
- type: update-method
patch:
- op: replace
path: /authorizationType
value: AWS_IAM
"""
schema = utils.type_schema(
'update-method',
patch={'type': 'array', 'items': OP_SCHEMA},
required=['patch'])
permissions = ('apigateway:GET',)
def validate(self):
found = False
for f in self.manager.iter_filters():
if isinstance(f, FilterRestMethod):
found = True
break
if not found:
raise ValueError(
("update-method action requires ",
"rest-method filter usage in policy"))
return self
def process(self, resources):
client = utils.local_session(
self.manager.session_factory).client('apigateway')
ops = self.data['patch']
for r in resources:
for m in r.get(ANNOTATION_KEY_MATCHED_METHODS, []):
client.update_method(
restApiId=m['restApiId'],
resourceId=m['resourceId'],
httpMethod=m['httpMethod'],
patchOperations=ops)
|
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytest
import pandas as pd
import numpy as np
from pandas import (Series, date_range, isna, Index, Timestamp)
from pandas.compat import lrange, range
from pandas.core.dtypes.common import is_integer
from pandas.core.indexing import IndexingError
from pandas.tseries.offsets import BDay
from pandas.util.testing import (assert_series_equal)
import pandas.util.testing as tm
def test_getitem_boolean(test_data):
s = test_data.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
tm.assert_index_equal(result.index, s.index[mask])
def test_getitem_boolean_empty():
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isna()]
assert s.index.name == 'index_name'
assert s.dtype == np.int64
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan, index=['C'], dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
pytest.raises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
pytest.raises(IndexingError, f)
def test_getitem_boolean_object(test_data):
# using column from DataFrame
s = test_data.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
pytest.raises(Exception, s.__getitem__, omask)
pytest.raises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(test_data):
ts = test_data.ts
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
# these used to raise...??
pytest.raises(Exception, ts.__getitem__, mask_shifted)
pytest.raises(Exception, ts.__setitem__, mask_shifted, 1)
# ts[mask_shifted]
# ts[mask_shifted] = 1
pytest.raises(Exception, ts.loc.__getitem__, mask_shifted)
pytest.raises(Exception, ts.loc.__setitem__, mask_shifted, 1)
# ts.loc[mask_shifted]
# ts.loc[mask_shifted] = 2
def test_setitem_boolean(test_data):
mask = test_data.series > test_data.series.median()
# similar indexed series
result = test_data.series.copy()
result[mask] = test_data.series * 2
expected = test_data.series * 2
assert_series_equal(result[mask], expected[mask])
# needs alignment
result = test_data.series.copy()
result[mask] = (test_data.series * 2)[0:5]
expected = (test_data.series * 2)[0:5].reindex_like(test_data.series)
expected[-mask] = test_data.series[mask]
assert_series_equal(result[mask], expected[mask])
def test_get_set_boolean_different_order(test_data):
ordered = test_data.series.sort_values()
# setting
copy = test_data.series.copy()
copy[ordered > 0] = 0
expected = test_data.series.copy()
expected[expected > 0] = 0
assert_series_equal(copy, expected)
# getting
sel = test_data.series[ordered > 0]
exp = test_data.series[test_data.series > 0]
assert_series_equal(sel, exp)
def test_where_unsafe():
# unsafe dtype changes
for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype)
assert_series_equal(s, expected)
assert s.dtype == expected.dtype
# these are allowed operations, but are upcasted
for dtype in [np.int64, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
s[mask] = values
expected = Series(values + lrange(5, 10), dtype='float64')
assert_series_equal(s, expected)
assert s.dtype == expected.dtype
# GH 9731
s = Series(np.arange(10), dtype='int64')
mask = s > 5
values = [2.5, 3.5, 4.5, 5.5]
s[mask] = values
expected = Series(lrange(6) + values, dtype='float64')
assert_series_equal(s, expected)
# can't do these as we are forced to change the itemsize of the input
# to something we cannot
for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
pytest.raises(Exception, s.__setitem__, tuple(mask), values)
# GH3235
s = Series(np.arange(10), dtype='int64')
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64')
assert_series_equal(s, expected)
assert s.dtype == expected.dtype
s = Series(np.arange(10), dtype='int64')
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64')
assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
def f():
s[mask] = [5, 4, 3, 2, 1]
pytest.raises(ValueError, f)
def f():
s[mask] = [0] * 5
pytest.raises(ValueError, f)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.where(s > 2, np.nan)
expected = Series([np.nan, np.nan, 3, 4])
assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
assert isna(result)
s = Series(range(10)).astype(float)
s[s > 8] = None
result = s[isna(s)]
expected = Series(np.nan, index=[9])
assert_series_equal(result, expected)
def test_where_raise_on_error_deprecation():
# gh-14968
# deprecation of raise_on_error
s = Series(np.random.randn(5))
cond = s > 0
with tm.assert_produces_warning(FutureWarning):
s.where(cond, raise_on_error=True)
with tm.assert_produces_warning(FutureWarning):
s.mask(cond, raise_on_error=True)
def test_where():
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert (s.shape == rs.shape)
assert (rs is not s)
# test alignment
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
assert_series_equal(rs, expected)
expected = s2.abs()
expected.iloc[0] = s2[0]
rs = s2.where(cond[:3], -s2)
assert_series_equal(rs, expected)
def test_where_error():
s = Series(np.random.randn(5))
cond = s > 0
pytest.raises(ValueError, s.where, 1)
pytest.raises(ValueError, s.where, cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
assert_series_equal(s, expected)
# failures
pytest.raises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[0, 2, 3])
pytest.raises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[])
@pytest.mark.parametrize('klass', [list, tuple, np.array, Series])
def test_where_array_like(klass):
# see gh-15414
s = Series([1, 2, 3])
cond = [False, True, True]
expected = Series([np.nan, 2, 3])
result = s.where(klass(cond))
assert_series_equal(result, expected)
@pytest.mark.parametrize('cond', [
[1, 0, 1],
Series([2, 5, 7]),
["True", "False", "True"],
[Timestamp("2017-01-01"), pd.NaT, Timestamp("2017-01-02")]
])
def test_where_invalid_input(cond):
# see gh-15414: only boolean arrays accepted
s = Series([1, 2, 3])
msg = "Boolean array expected for the condition"
with tm.assert_raises_regex(ValueError, msg):
s.where(cond)
msg = "Array conditional must be same shape as self"
with tm.assert_raises_regex(ValueError, msg):
s.where([True])
def test_where_ndframe_align():
msg = "Array conditional must be same shape as self"
s = Series([1, 2, 3])
cond = [True]
with tm.assert_raises_regex(ValueError, msg):
s.where(cond)
expected = Series([1, np.nan, np.nan])
out = s.where(Series(cond))
tm.assert_series_equal(out, expected)
cond = np.array([False, True, False, True])
with tm.assert_raises_regex(ValueError, msg):
s.where(cond)
expected = Series([np.nan, 2, np.nan])
out = s.where(Series(cond))
tm.assert_series_equal(out, expected)
def test_where_setitem_invalid():
# GH 2702
# make sure correct exceptions are raised on invalid list assignment
# slice
s = Series(list('abc'))
def f():
s[0:3] = list(range(27))
pytest.raises(ValueError, f)
s[0:3] = list(range(3))
expected = Series([0, 1, 2])
assert_series_equal(s.astype(np.int64), expected, )
# slice with step
s = Series(list('abcdef'))
def f():
s[0:4:2] = list(range(27))
pytest.raises(ValueError, f)
s = Series(list('abcdef'))
s[0:4:2] = list(range(2))
expected = Series([0, 'b', 1, 'd', 'e', 'f'])
assert_series_equal(s, expected)
# neg slices
s = Series(list('abcdef'))
def f():
s[:-1] = list(range(27))
pytest.raises(ValueError, f)
s[-3:-1] = list(range(2))
expected = Series(['a', 'b', 'c', 0, 1, 'f'])
assert_series_equal(s, expected)
# list
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(27))
pytest.raises(ValueError, f)
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(2))
pytest.raises(ValueError, f)
# scalar
s = Series(list('abc'))
s[0] = list(range(10))
expected = Series([list(range(10)), 'b', 'c'])
assert_series_equal(s, expected)
@pytest.mark.parametrize('size', range(2, 6))
@pytest.mark.parametrize('mask', [
[True, False, False, False, False],
[True, False],
[False]
])
@pytest.mark.parametrize('item', [
2.0, np.nan, np.finfo(np.float).max, np.finfo(np.float).min
])
# Test numpy arrays, lists and tuples as the input to be
# broadcast
@pytest.mark.parametrize('box', [
lambda x: np.array([x]),
lambda x: [x],
lambda x: (x,)
])
def test_broadcast(size, mask, item, box):
selection = np.resize(mask, size)
data = np.arange(size, dtype=float)
# Construct the expected series by taking the source
# data or item based on the selection
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
s = Series(data)
s[selection] = box(item)
assert_series_equal(s, expected)
s = Series(data)
result = s.where(~selection, box(item))
assert_series_equal(result, expected)
s = Series(data)
result = s.mask(selection, box(item))
assert_series_equal(result, expected)
def test_where_inplace():
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.where(cond, inplace=True)
assert_series_equal(rs.dropna(), s[cond])
assert_series_equal(rs, s.where(cond))
rs = s.copy()
rs.where(cond, -s, inplace=True)
assert_series_equal(rs, s.where(cond, -s))
def test_where_dups():
# GH 4550
# where crashes with dups in index
s1 = Series(list(range(3)))
s2 = Series(list(range(3)))
comb = pd.concat([s1, s2])
result = comb.where(comb < 2)
expected = Series([0, 1, np.nan, 0, 1, np.nan],
index=[0, 1, 2, 0, 1, 2])
assert_series_equal(result, expected)
# GH 4548
# inplace updating not working with dups
comb[comb < 1] = 5
expected = Series([5, 1, 2, 5, 1, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
comb[comb < 2] += 10
expected = Series([5, 11, 2, 5, 11, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
def test_where_numeric_with_string():
# GH 9280
s = pd.Series([1, 2, 3])
w = s.where(s > 1, 'X')
assert not is_integer(w[0])
assert is_integer(w[1])
assert is_integer(w[2])
assert isinstance(w[0], str)
assert w.dtype == 'object'
w = s.where(s > 1, ['X', 'Y', 'Z'])
assert not is_integer(w[0])
assert is_integer(w[1])
assert is_integer(w[2])
assert isinstance(w[0], str)
assert w.dtype == 'object'
w = s.where(s > 1, np.array(['X', 'Y', 'Z']))
assert not is_integer(w[0])
assert is_integer(w[1])
assert is_integer(w[2])
assert isinstance(w[0], str)
assert w.dtype == 'object'
def test_where_timedelta_coerce():
s = Series([1, 2], dtype='timedelta64[ns]')
expected = Series([10, 10])
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='object')
assert_series_equal(rs, expected)
def test_where_datetime_conversion():
s = Series(date_range('20130102', periods=2))
expected = Series([10, 10])
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='object')
assert_series_equal(rs, expected)
# GH 15701
timestamps = ['2016-12-31 12:00:04+00:00',
'2016-12-31 12:00:04.010000+00:00']
s = Series([pd.Timestamp(t) for t in timestamps])
rs = s.where(Series([False, True]))
expected = Series([pd.NaT, s[1]])
assert_series_equal(rs, expected)
def test_mask():
# compare with tested results in test_where
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(~cond, np.nan)
assert_series_equal(rs, s.mask(cond))
rs = s.where(~cond)
rs2 = s.mask(cond)
assert_series_equal(rs, rs2)
rs = s.where(~cond, -s)
rs2 = s.mask(cond, -s)
assert_series_equal(rs, rs2)
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
rs = s2.where(~cond[:3])
rs2 = s2.mask(cond[:3])
assert_series_equal(rs, rs2)
rs = s2.where(~cond[:3], -s2)
rs2 = s2.mask(cond[:3], -s2)
assert_series_equal(rs, rs2)
pytest.raises(ValueError, s.mask, 1)
pytest.raises(ValueError, s.mask, cond[:3].values, -s)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.mask(s > 2, np.nan)
expected = Series([1, 2, np.nan, np.nan])
assert_series_equal(result, expected)
def test_mask_inplace():
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.mask(cond, inplace=True)
assert_series_equal(rs.dropna(), s[~cond])
assert_series_equal(rs, s.mask(cond))
rs = s.copy()
rs.mask(cond, -s, inplace=True)
assert_series_equal(rs, s.mask(cond, -s))
|
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An inference engine that performs object detection.
.. note::
``DetectionEngine`` only supports SSD models with a post-processing op.
To perform object detection with other types of model architectures, instead
`use the TensorFlow Lite API
</docs/edgetpu/tflite-python/>`_.
"""
from edgetpu.basic.basic_engine import BasicEngine
from edgetpu.utils.warning import deprecated
from edgetpu.utils import image_processing
import numpy as np
from PIL import Image
class DetectionCandidate(object):
"""A data structure that represents one detection candidate (label id, score, and bounding box).
This is returned by methods :func:`~DetectionEngine.detect_with_image` and
:func:`~DetectionEngine.detect_with_input_tensor`."""
__slots__ = ['label_id', 'score', 'bounding_box']
def __init__(self, label_id, score, x1, y1, x2, y2):
#: An :obj:`int` for the label id.
self.label_id = label_id
#: A :obj:`float` for the confidence score.
self.score = score
#: A 2-D aray (:obj:`numpy.ndarray`) that describes the bounding box for the detected object.
#:
#: The format is [[x1, y1], [x2, y2]], where [x1, y1] is the top-left corner and [x2, y2]
#: is the bottom-right corner of the bounding box. The values can be either floats (relative
#: coordinates) or integers (pixel coordinates), depending on the ``relative_coord`` bool you
#: pass to the :func:`~DetectionEngine.detect_with_image` or
#: :func:`~DetectionEngine.detect_with_input_tensor` method. [0, 0] is always the top-left corner.
self.bounding_box = np.array([[x1, y1], [x2, y2]])
class DetectionEngine(BasicEngine):
"""Extends :class:`~edgetpu.basic.basic_engine.BasicEngine` to perform object detection
with a given model.
This API assumes the given model is trained for object detection. Now this
engine only supports SSD model with postprocessing operator.
"""
def __init__(self, model_path, device_path=None):
"""
Args:
model_path (str): Path to a TensorFlow Lite (``.tflite``) file.
This model must be `compiled for the Edge TPU
<https://coral.ai/docs/edgetpu/compiler/>`_; otherwise, it simply executes
on the host CPU.
device_path (str): The device path for the Edge TPU this engine should use. This argument
is needed only when you have multiple Edge TPUs and more inference engines than
available Edge TPUs. For details, read `how to use multiple Edge TPUs
</docs/edgetpu/multiple-edgetpu/>`_.
Raises:
ValueError: If the model's output tensor size is not 4.
"""
if device_path:
super().__init__(model_path, device_path)
else:
super().__init__(model_path)
output_tensors_sizes = self.get_all_output_tensors_sizes()
if output_tensors_sizes.size != 4:
raise ValueError(
('Dectection model should have 4 output tensors!'
'This model has {}.'.format(output_tensors_sizes.size)))
self._tensor_start_index = [0]
offset = 0
for i in range(3):
offset = offset + output_tensors_sizes[i]
self._tensor_start_index.append(int(offset))
def detect_with_image(self, img, threshold=0.1, top_k=3,
keep_aspect_ratio=False, relative_coord=True,
resample=Image.NEAREST):
"""Performs object detection with an image.
Args:
img (:obj:`PIL.Image`): The image you want to process.
threshold (float): Minimum confidence threshold for detected objects. For example,
use ``0.5`` to receive only detected objects with a confidence equal-to or higher-than 0.5.
top_k (int): The maximum number of detected objects to return.
keep_aspect_ratio (bool): If True, keep the image aspect ratio the same when down-sampling
the image (by adding black pixel padding so it fits the input tensor's dimensions, via the
:func:`~edgetpu.utils.image_processing.resampling_with_original_ratio()` function).
If False, resize and reshape the image (without cropping) to match the input
tensor's dimensions.
(Note: This option should be the same as what is applied on input images
during model training. Otherwise, the accuracy might be affected and the
bounding box of detection result might be stretched.)
relative_coord (bool): If True, provide coordinates as float values between 0 and 1,
representing each position relative to the total image width/height. If False, provide
coordinates as integers, representing pixel positions in the original image. [0, 0] is
always the top-left corner.
resample (int): A resampling filter for image resizing.
This can be one of :attr:`PIL.Image.NEAREST`, :attr:`PIL.Image.BOX`,
:attr:`PIL.Image.BILINEAR`, :attr:`PIL.Image.HAMMING`, :attr:`PIL.Image.BICUBIC`,
or :attr:`PIL.Image.LANCZOS`. Default is :attr:`PIL.Image.NEAREST`. See `Pillow filters
<https://pillow.readthedocs.io/en/stable/handbook/concepts.html#filters>`_.
(Note: A complex filter such as :attr:`PIL.Image.BICUBIC` may create slightly better
accuracy but it also causes higher latency.)
Returns:
A :obj:`list` of detected objects as :class:`DetectionCandidate` objects.
Raises:
RuntimeError: If the model's input tensor shape doesn't match the shape expected for an
object detection model, which is [1, height, width, channel].
ValueError: If the input tensor channel is not 1 (grayscale) or 3 (RGB)
ValueError: If argument values are invalid.
"""
input_tensor_shape = self.get_input_tensor_shape()
if (input_tensor_shape.size != 4 or
input_tensor_shape[0] != 1):
raise RuntimeError(
'Invalid input tensor shape! Expected: [1, height, width, channel]')
_, height, width, channel = input_tensor_shape
if keep_aspect_ratio:
resized_img, ratio = image_processing.resampling_with_original_ratio(
img, (width, height), resample)
else:
resized_img = img.resize((width, height), resample)
# Handle color space conversion.
if channel == 1:
resized_img = resized_img.convert('L')
elif channel == 3:
resized_img = resized_img.convert('RGB')
else:
raise ValueError(
'Invalid input tensor channel! Expected: 1 or 3. Actual: %d' % channel)
input_tensor = np.asarray(resized_img).flatten()
candidates = self.detect_with_input_tensor(input_tensor, threshold, top_k)
for c in candidates:
if keep_aspect_ratio:
c.bounding_box = c.bounding_box / ratio
c.bounding_box[0] = np.maximum([0.0, 0.0], c.bounding_box[0])
c.bounding_box[1] = np.minimum([1.0, 1.0], c.bounding_box[1])
if relative_coord is False:
c.bounding_box = c.bounding_box * [img.size]
return candidates
def detect_with_input_tensor(self, input_tensor, threshold=0.1, top_k=3):
"""Performs object detection with a raw input tensor.
This requires you to process the input data (the image) and convert
it to the appropriately formatted input tensor for your model.
Args:
input_tensor (:obj:`numpy.ndarray`): A 1-D array as the input tensor.
threshold (float): Minimum confidence threshold for detected objects. For example,
use ``0.5`` to receive only detected objects with a confidence equal-to or higher-than 0.5.
top_k (int): The maximum number of detected objects to return.
Returns:
A :obj:`list` of detected objects as :class:`DetectionCandidate` objects.
Raises:
ValueError: If argument values are invalid.
"""
if top_k <= 0:
raise ValueError('top_k must be positive!')
_, raw_result = self.run_inference(input_tensor)
result = []
num_candidates = raw_result[self._tensor_start_index[3]]
for i in range(int(round(num_candidates))):
score = raw_result[self._tensor_start_index[2] + i]
if score > threshold:
label_id = int(round(raw_result[self._tensor_start_index[1] + i]))
y1 = max(0.0, raw_result[self._tensor_start_index[0] + 4 * i])
x1 = max(0.0, raw_result[self._tensor_start_index[0] + 4 * i + 1])
y2 = min(1.0, raw_result[self._tensor_start_index[0] + 4 * i + 2])
x2 = min(1.0, raw_result[self._tensor_start_index[0] + 4 * i + 3])
result.append(DetectionCandidate(label_id, score, x1, y1, x2, y2))
result.sort(key=lambda x: -x.score)
return result[:top_k]
@deprecated
def DetectWithImage(self, img, threshold=0.1, top_k=3,
keep_aspect_ratio=False, relative_coord=True,
resample=Image.NEAREST):
return self.detect_with_image(img, threshold, top_k, keep_aspect_ratio,
relative_coord, resample)
@deprecated
def DetectWithInputTensor(self, input_tensor, threshold=0.1, top_k=3):
return self.detect_with_input_tensor(input_tensor, threshold, top_k)
|
|
from __future__ import absolute_import
# Copyright (c) 2010-2015 openpyxl
import pytest
from io import BytesIO
from zipfile import ZipFile
from openpyxl.xml.functions import fromstring, tostring
from openpyxl.tests.helper import compare_xml
from openpyxl.xml.constants import (
ARC_CONTENT_TYPES,
ARC_WORKBOOK,
CONTYPES_NS,
XLSM,
XLSX,
XLTM,
XLTX,
)
@pytest.fixture
def FileExtension():
from ..manifest import FileExtension
return FileExtension
class TestFileExtension:
def test_ctor(self, FileExtension):
ext = FileExtension(
ContentType="application/xml",
Extension="xml"
)
xml = tostring(ext.to_tree())
expected = """
<Default ContentType="application/xml" Extension="xml"/>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, FileExtension):
src = """
<Default ContentType="application/xml" Extension="xml"/>
"""
node = fromstring(src)
ext = FileExtension.from_tree(node)
assert ext == FileExtension(ContentType="application/xml", Extension="xml")
@pytest.fixture
def Override():
from ..manifest import Override
return Override
class TestOverride:
def test_ctor(self, Override):
override = Override(
ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml",
PartName="/xl/workbook.xml"
)
xml = tostring(override.to_tree())
expected = """
<Override ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml"
PartName="/xl/workbook.xml"/>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, Override):
src = """
<Override ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml"
PartName="/xl/workbook.xml"/>
"""
node = fromstring(src)
override = Override.from_tree(node)
assert override == Override(
ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml",
PartName="/xl/workbook.xml"
)
@pytest.fixture
def Manifest():
from ..manifest import Manifest
return Manifest
class TestManifest:
def test_ctor(self, Manifest):
manifest = Manifest()
xml = tostring(manifest.to_tree())
expected = """
<Types xmlns="http://schemas.openxmlformats.org/package/2006/content-types">
<Default ContentType="application/vnd.openxmlformats-package.relationships+xml" Extension="rels" />
<Default ContentType="application/xml" Extension="xml" />
<Override ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml"
PartName="/xl/workbook.xml"/>
<Override ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.sharedStrings+xml"
PartName="/xl/sharedStrings.xml"/>
<Override ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.styles+xml"
PartName="/xl/styles.xml"/>
<Override ContentType="application/vnd.openxmlformats-officedocument.theme+xml"
PartName="/xl/theme/theme1.xml"/>
<Override ContentType="application/vnd.openxmlformats-package.core-properties+xml"
PartName="/docProps/core.xml"/>
<Override ContentType="application/vnd.openxmlformats-officedocument.extended-properties+xml"
PartName="/docProps/app.xml"/>
</Types>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, datadir, Manifest):
datadir.chdir()
with open("manifest.xml") as src:
node = fromstring(src.read())
manifest = Manifest.from_tree(node)
assert len(manifest.Default) == 2
assert len(manifest.Override) == 10
def test_filenames(self, datadir, Manifest):
datadir.chdir()
with open("manifest.xml") as src:
node = fromstring(src.read())
manifest = Manifest.from_tree(node)
assert manifest.filenames == [
'/xl/workbook.xml',
'/xl/worksheets/sheet1.xml',
'/xl/chartsheets/sheet1.xml',
'/xl/theme/theme1.xml',
'/xl/styles.xml',
'/xl/sharedStrings.xml',
'/xl/drawings/drawing1.xml',
'/xl/charts/chart1.xml',
'/docProps/core.xml',
'/docProps/app.xml',
]
def test_exts(self, datadir, Manifest):
datadir.chdir()
with open("manifest.xml") as src:
node = fromstring(src.read())
manifest = Manifest.from_tree(node)
assert manifest.extensions == [
('xml', 'application/xml'),
]
class TestContentTypes:
def test_workbook(self):
from openpyxl import Workbook
wb = Workbook()
from ..manifest import write_content_types
manifest = write_content_types(wb)
xml = tostring(manifest.to_tree())
expected = """
<Types xmlns="http://schemas.openxmlformats.org/package/2006/content-types">
<Default ContentType="application/vnd.openxmlformats-package.relationships+xml" Extension="rels" />
<Default ContentType="application/xml" Extension="xml" />
<Override ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml"
PartName="/xl/workbook.xml"/>
<Override ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.sharedStrings+xml"
PartName="/xl/sharedStrings.xml"/>
<Override ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.styles+xml"
PartName="/xl/styles.xml"/>
<Override ContentType="application/vnd.openxmlformats-officedocument.theme+xml"
PartName="/xl/theme/theme1.xml"/>
<Override ContentType="application/vnd.openxmlformats-package.core-properties+xml"
PartName="/docProps/core.xml"/>
<Override ContentType="application/vnd.openxmlformats-officedocument.extended-properties+xml"
PartName="/docProps/app.xml"/>
<Override ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.worksheet+xml"
PartName="/xl/worksheets/sheet1.xml"/>
</Types>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_chartsheet(self):
from openpyxl import Workbook
wb = Workbook()
wb.create_chartsheet()
from ..manifest import write_content_types
manifest = write_content_types(wb)
xml = tostring(manifest.to_tree())
expected = """
<Types xmlns="http://schemas.openxmlformats.org/package/2006/content-types">
<Default ContentType="application/vnd.openxmlformats-package.relationships+xml" Extension="rels" />
<Default ContentType="application/xml" Extension="xml" />
<Override ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml"
PartName="/xl/workbook.xml"/>
<Override ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.sharedStrings+xml"
PartName="/xl/sharedStrings.xml"/>
<Override ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.styles+xml"
PartName="/xl/styles.xml"/>
<Override ContentType="application/vnd.openxmlformats-officedocument.theme+xml"
PartName="/xl/theme/theme1.xml"/>
<Override ContentType="application/vnd.openxmlformats-package.core-properties+xml"
PartName="/docProps/core.xml"/>
<Override ContentType="application/vnd.openxmlformats-officedocument.extended-properties+xml"
PartName="/docProps/app.xml"/>
<Override ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.worksheet+xml"
PartName="/xl/worksheets/sheet1.xml"/>
<Override ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.chartsheet+xml"
PartName="/xl/chartsheets/sheet1.xml"/>
</Types>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
@pytest.mark.lxml_required # for XPATH lookup
@pytest.mark.parametrize("has_vba, as_template, content_type",
[
(None, False, XLSX),
(None, True, XLTX),
(True, False, XLSM),
(True, True, XLTM)
]
)
def test_templates(self, has_vba, as_template, content_type, Manifest, Override):
from openpyxl import Workbook
from ..manifest import write_content_types
wb = Workbook()
if has_vba:
archive = ZipFile(BytesIO(), "w")
parts = [Override("/xl/workbook.xml", "")]
m = Manifest(Override=parts)
archive.writestr(ARC_CONTENT_TYPES, tostring(m.to_tree()))
wb.vba_archive = archive
manifest = write_content_types(wb, as_template=as_template)
xml = tostring(manifest.to_tree())
root = fromstring(xml)
node = root.find('{%s}Override[@PartName="/xl/workbook.xml"]'% CONTYPES_NS)
assert node.get("ContentType") == content_type
def test_comments(self, Manifest):
from openpyxl import Workbook
from ..manifest import write_content_types
wb = Workbook()
ws = wb.active
ws._comment_count = 1
manifest = write_content_types(wb)
xml = tostring(manifest.to_tree())
expected = """
<Types xmlns="http://schemas.openxmlformats.org/package/2006/content-types">
<Default ContentType="application/vnd.openxmlformats-package.relationships+xml" Extension="rels"/>
<Default ContentType="application/xml" Extension="xml"/>
<Default ContentType="application/vnd.openxmlformats-officedocument.vmlDrawing" Extension="vml"/>
<Override ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml" PartName="/xl/workbook.xml"/>
<Override ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.sharedStrings+xml" PartName="/xl/sharedStrings.xml"/>
<Override ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.styles+xml" PartName="/xl/styles.xml"/>
<Override ContentType="application/vnd.openxmlformats-officedocument.theme+xml" PartName="/xl/theme/theme1.xml"/>
<Override ContentType="application/vnd.openxmlformats-package.core-properties+xml" PartName="/docProps/core.xml"/>
<Override ContentType="application/vnd.openxmlformats-officedocument.extended-properties+xml" PartName="/docProps/app.xml"/>
<Override ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.worksheet+xml" PartName="/xl/worksheets/sheet1.xml"/>
<Override ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.comments+xml" PartName="/xl/comments1.xml"/>
</Types>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
|
|
"""
.. module: lemur.sources.service
:platform: Unix
:copyright: (c) 2015 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Kevin Glisson <kglisson@netflix.com>
"""
import arrow
from flask import current_app
from lemur import database
from lemur.sources.models import Source
from lemur.certificates.models import Certificate
from lemur.certificates import service as certificate_service
from lemur.endpoints import service as endpoint_service
from lemur.destinations import service as destination_service
from lemur.certificates.schemas import CertificateUploadInputSchema
from lemur.plugins.base import plugins
def certificate_create(certificate, source):
data, errors = CertificateUploadInputSchema().load(certificate)
if errors:
raise Exception("Unable to import certificate: {reasons}".format(reasons=errors))
data['creator'] = certificate['creator']
cert = certificate_service.import_certificate(**data)
cert.description = "This certificate was automatically discovered by Lemur"
cert.sources.append(source)
sync_update_destination(cert, source)
database.update(cert)
return cert
def certificate_update(certificate, source):
for s in certificate.sources:
if s.label == source.label:
break
else:
certificate.sources.append(source)
sync_update_destination(certificate, source)
database.update(certificate)
def sync_update_destination(certificate, source):
dest = destination_service.get_by_label(source.label)
if dest:
for d in certificate.destinations:
if d.label == source.label:
break
else:
certificate.destinations.append(dest)
def sync_endpoints(source):
new, updated = 0, 0
current_app.logger.debug("Retrieving endpoints from {0}".format(source.label))
s = plugins.get(source.plugin_name)
try:
endpoints = s.get_endpoints(source.options)
except NotImplementedError:
current_app.logger.warning("Unable to sync endpoints for source {0} plugin has not implemented 'get_endpoints'".format(source.label))
return
for endpoint in endpoints:
exists = endpoint_service.get_by_dnsname(endpoint['dnsname'])
certificate_name = endpoint.pop('certificate_name')
endpoint['certificate'] = certificate_service.get_by_name(certificate_name)
if not endpoint['certificate']:
current_app.logger.error(
"Certificate Not Found. Name: {0} Endpoint: {1}".format(certificate_name, endpoint['name']))
continue
policy = endpoint.pop('policy')
policy_ciphers = []
for nc in policy['ciphers']:
policy_ciphers.append(endpoint_service.get_or_create_cipher(name=nc))
policy['ciphers'] = policy_ciphers
endpoint['policy'] = endpoint_service.get_or_create_policy(**policy)
endpoint['source'] = source
if not exists:
current_app.logger.debug("Endpoint Created: Name: {name}".format(name=endpoint['name']))
endpoint_service.create(**endpoint)
new += 1
else:
current_app.logger.debug("Endpoint Updated: Name: {name}".format(name=endpoint['name']))
endpoint_service.update(exists.id, **endpoint)
updated += 1
return new, updated
def sync_certificates(source, user):
new, updated = 0, 0
current_app.logger.debug("Retrieving certificates from {0}".format(source.label))
s = plugins.get(source.plugin_name)
certificates = s.get_certificates(source.options)
for certificate in certificates:
exists = certificate_service.get_by_name(certificate['name'])
certificate['owner'] = user.email
certificate['creator'] = user
if not exists:
current_app.logger.debug("Creating Certificate. Name: {name}".format(name=certificate['name']))
certificate_create(certificate, source)
new += 1
else:
current_app.logger.debug("Updating Certificate. Name: {name}".format(name=certificate['name']))
certificate_update(exists, source)
updated += 1
assert len(certificates) == new + updated
return new, updated
def sync(source, user):
new_certs, updated_certs = sync_certificates(source, user)
new_endpoints, updated_endpoints = sync_endpoints(source)
source.last_run = arrow.utcnow()
database.update(source)
return {'endpoints': (new_endpoints, updated_endpoints), 'certificates': (new_certs, updated_certs)}
def create(label, plugin_name, options, description=None):
"""
Creates a new source, that can then be used as a source for certificates.
:param label: Source common name
:param plugin_name:
:param options:
:param description:
:rtype : Source
:return: New source
"""
source = Source(label=label, options=options, plugin_name=plugin_name, description=description)
return database.create(source)
def update(source_id, label, options, description):
"""
Updates an existing source.
:param source_id: Lemur assigned ID
:param label: Source common name
:param options:
:param description:
:rtype : Source
:return:
"""
source = get(source_id)
source.label = label
source.options = options
source.description = description
return database.update(source)
def delete(source_id):
"""
Deletes an source.
:param source_id: Lemur assigned ID
"""
database.delete(get(source_id))
def get(source_id):
"""
Retrieves an source by its lemur assigned ID.
:param source_id: Lemur assigned ID
:rtype : Source
:return:
"""
return database.get(Source, source_id)
def get_by_label(label):
"""
Retrieves a source by its label
:param label:
:return:
"""
return database.get(Source, label, field='label')
def get_all():
"""
Retrieves all source currently known by Lemur.
:return:
"""
query = database.session_query(Source)
return database.find_all(query, Source, {}).all()
def render(args):
filt = args.pop('filter')
certificate_id = args.pop('certificate_id', None)
if certificate_id:
query = database.session_query(Source).join(Certificate, Source.certificate)
query = query.filter(Certificate.id == certificate_id)
else:
query = database.session_query(Source)
if filt:
terms = filt.split(';')
query = database.filter(query, Source, terms)
return database.sort_and_page(query, Source, args)
|
|
#!/usr/bin/env python
import sys
import getopt
import time
import os
import subprocess
# "solver" should point to the binary built.
# e.g. "/home/z3-str/str"
solver = os.path.join(os.path.abspath(os.path.dirname(__file__)), "str_static")
#===================================================================
def encodeConstStr(constStr):
constStr = constStr.replace(' ', '_aScIi_040')
constStr = constStr.replace('\\\"', '_aScIi_042')
constStr = constStr.replace('#', '_aScIi_043')
constStr = constStr.replace('$', '_aScIi_044')
constStr = constStr.replace('\'', '_aScIi_047')
constStr = constStr.replace('(', '_aScIi_050')
constStr = constStr.replace(')', '_aScIi_051')
constStr = constStr.replace(',', '_aScIi_054')
constStr = constStr.replace(':', '_aScIi_072')
constStr = constStr.replace(';', '_aScIi_073')
constStr = constStr.replace('[', '_aScIi_133')
constStr = constStr.replace(']', '_aScIi_135')
constStr = constStr.replace('\\\\', '_aScIi_134')
constStr = constStr.replace('{', '_aScIi_173')
constStr = constStr.replace('}', '_aScIi_175')
constStr = constStr.replace('|', '_aScIi_174')
constStr = constStr.replace('`', '_aScIi_140')
constStr = constStr.replace('\\t', '_aScIi_011')
constStr = constStr.replace('\\n', '_aScIi_012')
return constStr
def convert(org_file):
absPath = os.path.dirname(os.path.abspath(org_file));
convertDir = absPath + "/convert";
if not os.path.exists(convertDir):
os.makedirs(convertDir)
fileName = os.path.basename(org_file);
new_file = os.path.join(convertDir, fileName)
f_o = open(org_file, 'r')
f_n = open(new_file, 'w')
declared_string_var = []
declared_string_const = []
converted_cstr = ""
linesInFile = f_o.readlines()
output_str = ""
for line in linesInFile:
line = line.strip();
if line == "":
continue
if line.startswith(';'):
output_str += line + "\n"
continue
if line.startswith('%'):
output_str += line + "\n"
continue
if line.startswith('//'):
output_str += line + "\n"
continue
if line.find("get-model") != -1:
# output_str += line + "\n"
continue
if line.find("get-value") != -1:
# output_str += line + "\n"
continue
if line.find("set-option") != -1:
output_str += line + "\n"
continue
if line.find("declare-variable") != -1:
declared_string_var.append(line.replace('declare-variable', 'declare-const'))
continue
# -----------------------------
# start: processing const string
p1 = -1
while True:
p1 = line.find('\"', p1 + 1);
if p1 == -1:
break;
# exclude the case "str\"str\"str"
p2 = line.find('\"', p1 + 1)
while (not (p2 == -1)) and (not line[p2 - 2] == '\\') and line[p2 - 1] == '\\' and line[p2] == '\"':
p2 = line.find('\"', p2 + 1)
if p2 == -1:
print('input format error!\n')
return "eRrOr"
old_s = line[p1: p2 + 1]
encoded_s = encodeConstStr( old_s[1 : len(old_s) - 1] )
line = line.replace(old_s, '__cOnStStR_' + encoded_s)
if encoded_s not in declared_string_const:
declared_string_const.append(encoded_s)
p1 = p2
# -----------------------------
# end: processing const string
converted_cstr = converted_cstr + line + '\n'
for strv in declared_string_var:
output_str = output_str + strv + "\n"
output_str = output_str + '\n'
for str_const in declared_string_const:
output_str = output_str + '(declare-const __cOnStStR_' + str_const + ' String)\n'
output_str = output_str + '\n'
output_str = output_str + converted_cstr
print output_str
f_n.write(output_str)
f_n.close()
f_o.close()
return new_file
def processOutput(output):
if output.find("(error \"line") >= 0:
res = ''
lines = output.split("\n")
for line in lines:
if line.startswith('(error '):
res = res + line + "\n"
return res
output = output.replace("\t", "\\t")
lines = output.split("\n")
result = ""
for line in lines:
line = line.lstrip(' ');
line = line.replace("\n", "\\n")
#skip intermediated variable solutions
if line.startswith('_t_'):
continue
result = result + line + "\n"
return result
def printUseage():
print 'USAGE: '
print ' Z3-str.py -f <inputfile> [OPTIONS]\n'
print 'OPTIONS:'
print ' -l <freeVarMaxLen> Define length upper bound for free variables'
print ' A free variable refers to a variable whose value'
print ' is not bounded, e.g, Z = X Y "abc" /\ X = M "efg"'
print ' Only Y and M are free variables.'
print ' <freeVarMaxLen> should be a positive integer.'
print ' If not provided, DEFAULT value is 7'
print ''
print ' -p [Experimental]'
print ' Allow self-cut (or loop inducing cut).'
print ' ** WARNING: it may not be terminated if allowing self-cut'
print ' Avoid self-cut is the DEFAULT behavior (no "-p")'
print '\n'
if __name__ == '__main__':
if not os.path.exists(solver):
print "Error: No Z3-str binary found @ \"" + solver + "\"."
exit(0)
argv = sys.argv[1:]
inputFile = '';
freeVarMaxLen = 7;
allowLoopCut = 0;
try:
opts, args = getopt.getopt(argv,"hpf:l:")
except getopt.GetoptError:
printUseage()
sys.exit()
for opt, arg in opts:
if opt == '-h':
printUseage()
sys.exit()
elif opt == '-p':
allowLoopCut = 1
elif opt in ("-f"):
inputFile = arg
elif opt in ("-l"):
try:
freeVarMaxLen = int(arg)
if freeVarMaxLen < 1:
print 'Error: "-l <freeVarMaxLen>"'
print ' <freeVarMaxLen> should be a positive integer'
sys.exit()
except ValueError:
print 'Error: "-l <freeVarMaxLen>"'
print ' <freeVarMaxLen> should be a positive integer'
sys.exit()
if inputFile == '':
printUseage()
sys.exit()
if not os.path.exists(inputFile):
print "Error: Input file does not exist: \"" + inputFile + "\""
exit(0)
convertedFile = convert(inputFile)
if convertedFile == "eRrOr":
exit(0)
try:
start = time.time()
freeVarMaxLenStr = "%d"%freeVarMaxLen
paras = []
if allowLoopCut == 0:
paras = [solver, "-f", convertedFile, "-l", freeVarMaxLenStr]
else:
paras = [solver, "-f", convertedFile, "-l", freeVarMaxLenStr, "-p"]
err = subprocess.check_output(paras, );
eclapse = (time.time() - start)
outStr = processOutput(err)
sys.stdout.write(outStr)
except KeyboardInterrupt:
print "Interrupted by keyborad";
os.remove(convertedFile)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for probability distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import hashlib
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
def assert_close(
x, y, data=None, summarize=None, message=None, name="assert_close"):
"""Assert that x and y are within machine epsilon of each other.
Args:
x: Floating-point `Tensor`
y: Floating-point `Tensor`
data: The tensors to print out if the condition is `False`. Defaults to
error message and first few entries of `x` and `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Returns:
Op raising `InvalidArgumentError` if |x - y| > machine epsilon.
"""
message = message or ""
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y")
if data is None:
data = [
message,
"Condition x ~= y did not hold element-wise: x = ", x.name, x, "y = ",
y.name, y
]
if x.dtype.is_integer:
return check_ops.assert_equal(
x, y, data=data, summarize=summarize, message=message, name=name)
with ops.name_scope(name, "assert_close", [x, y, data]):
tol = np.finfo(x.dtype.as_numpy_dtype).eps
condition = math_ops.reduce_all(math_ops.less_equal(math_ops.abs(x-y), tol))
return control_flow_ops.Assert(
condition, data, summarize=summarize)
def assert_integer_form(
x, data=None, summarize=None, message=None,
int_dtype=None, name="assert_integer_form"):
"""Assert that x has integer components (or floats equal to integers).
Args:
x: Floating-point `Tensor`
data: The tensors to print out if the condition is `False`. Defaults to
error message and first few entries of `x` and `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
int_dtype: A `tf.dtype` used to cast the float to. The default (`None`)
implies the smallest possible signed int will be used for casting.
name: A name for this operation (optional).
Returns:
Op raising `InvalidArgumentError` if `cast(x, int_dtype) != x`.
"""
with ops.name_scope(name, values=[x, data]):
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_integer:
return control_flow_ops.no_op()
message = message or "{} has non-integer components".format(x.op.name)
if int_dtype is None:
try:
int_dtype = {
dtypes.float16: dtypes.int16,
dtypes.float32: dtypes.int32,
dtypes.float64: dtypes.int64,
}[x.dtype.base_dtype]
except KeyError:
raise TypeError("Unrecognized type {}".format(x.dtype.name))
return check_ops.assert_equal(
x, math_ops.cast(math_ops.cast(x, int_dtype), x.dtype),
data=data, summarize=summarize, message=message, name=name)
def assert_symmetric(matrix):
matrix_t = array_ops.matrix_transpose(matrix)
return control_flow_ops.with_dependencies(
[check_ops.assert_equal(matrix, matrix_t)], matrix)
def embed_check_nonnegative_integer_form(
x, name="embed_check_nonnegative_integer_form"):
"""Assert x is a non-negative tensor, and optionally of integers."""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
assertions = [
check_ops.assert_non_negative(
x, message="'{}' must be non-negative.".format(x.op.name)),
]
if not x.dtype.is_integer:
assertions += [
assert_integer_form(
x, message="'{}' cannot contain fractional components.".format(
x.op.name)),
]
return control_flow_ops.with_dependencies(assertions, x)
def same_dynamic_shape(a, b):
"""Returns whether a and b have the same dynamic shape.
Args:
a: `Tensor`
b: `Tensor`
Returns:
`bool` `Tensor` representing if both tensors have the same shape.
"""
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
# Here we can't just do math_ops.equal(a.shape, b.shape), since
# static shape inference may break the equality comparison between
# shape(a) and shape(b) in math_ops.equal.
def all_shapes_equal():
return math_ops.reduce_all(math_ops.equal(
array_ops.concat([array_ops.shape(a), array_ops.shape(b)], 0),
array_ops.concat([array_ops.shape(b), array_ops.shape(a)], 0)))
# One of the shapes isn't fully defined, so we need to use the dynamic
# shape.
return control_flow_ops.cond(
math_ops.equal(array_ops.rank(a), array_ops.rank(b)),
all_shapes_equal,
lambda: constant_op.constant(False))
def get_logits_and_probs(logits=None,
probs=None,
multidimensional=False,
validate_args=False,
name="get_logits_and_probs"):
"""Converts logit to probabilities (or vice-versa), and returns both.
Args:
logits: Floating-point `Tensor` representing log-odds.
probs: Floating-point `Tensor` representing probabilities.
multidimensional: Python `bool`, default `False`.
If `True`, represents whether the last dimension of `logits` or `probs`,
a `[N1, N2, ... k]` dimensional tensor, representing the
logit or probability of `shape[-1]` classes.
validate_args: Python `bool`, default `False`. When `True`, either assert
`0 <= probs <= 1` (if not `multidimensional`) or that the last dimension
of `probs` sums to one.
name: A name for this operation (optional).
Returns:
logits, probs: Tuple of `Tensor`s. If `probs` has an entry that is `0` or
`1`, then the corresponding entry in the returned logit will be `-Inf` and
`Inf` respectively.
Raises:
ValueError: if neither `probs` nor `logits` were passed in, or both were.
"""
with ops.name_scope(name, values=[probs, logits]):
if (probs is None) == (logits is None):
raise ValueError("Must pass probs or logits, but not both.")
if probs is None:
logits = ops.convert_to_tensor(logits, name="logits")
if not logits.dtype.is_floating:
raise TypeError("logits must having floating type.")
# We can early return since we constructed probs and therefore know
# they're valid.
if multidimensional:
if validate_args:
logits = embed_check_categorical_event_shape(logits)
return logits, nn.softmax(logits, name="probs")
return logits, math_ops.sigmoid(logits, name="probs")
probs = ops.convert_to_tensor(probs, name="probs")
if not probs.dtype.is_floating:
raise TypeError("probs must having floating type.")
if validate_args:
with ops.name_scope("validate_probs"):
one = constant_op.constant(1., probs.dtype)
dependencies = [check_ops.assert_non_negative(probs)]
if multidimensional:
probs = embed_check_categorical_event_shape(probs)
dependencies += [assert_close(math_ops.reduce_sum(probs, -1), one,
message="probs does not sum to 1.")]
else:
dependencies += [check_ops.assert_less_equal(
probs, one, message="probs has components greater than 1.")]
probs = control_flow_ops.with_dependencies(dependencies, probs)
with ops.name_scope("logits"):
if multidimensional:
# Here we don't compute the multidimensional case, in a manner
# consistent with respect to the unidimensional case. We do so
# following the TF convention. Typically, you might expect to see
# logits = log(probs) - log(probs[pivot]). A side-effect of
# being consistent with the TF approach is that the unidimensional case
# implicitly handles the second dimension but the multidimensional case
# explicitly keeps the pivot dimension.
return math_ops.log(probs), probs
return math_ops.log(probs) - math_ops.log1p(-1. * probs), probs
def _is_known_unsigned_by_dtype(dt):
"""Helper returning True if dtype is known to be unsigned."""
return {
dtypes.bool: True,
dtypes.uint8: True,
dtypes.uint16: True,
}.get(dt.base_dtype, False)
def _is_known_signed_by_dtype(dt):
"""Helper returning True if dtype is known to be signed."""
return {
dtypes.float16: True,
dtypes.float32: True,
dtypes.float64: True,
dtypes.int8: True,
dtypes.int16: True,
dtypes.int32: True,
dtypes.int64: True,
}.get(dt.base_dtype, False)
def _is_known_dtype(dt):
"""Helper returning True if dtype is known."""
return _is_known_unsigned_by_dtype(dt) or _is_known_signed_by_dtype(dt)
def _largest_integer_by_dtype(dt):
"""Helper returning the largest integer exactly representable by dtype."""
if not _is_known_dtype(dt):
raise TypeError("Unrecognized dtype: {}".format(dt.name))
if dt.is_floating:
return int(2**(np.finfo(dt.as_numpy_dtype).nmant + 1))
if dt.is_integer:
return np.iinfo(dt.as_numpy_dtype).max
if dt.base_dtype == dtypes.bool:
return int(1)
# We actually can't land here but keep the case for completeness.
raise TypeError("Unrecognized dtype: {}".format(dt.name))
def _smallest_integer_by_dtype(dt):
"""Helper returning the smallest integer exactly representable by dtype."""
if not _is_known_dtype(dt):
raise TypeError("Unrecognized dtype: {}".format(dt.name))
if _is_known_unsigned_by_dtype(dt):
return 0
return -1 * _largest_integer_by_dtype(dt)
def _is_integer_like_by_dtype(dt):
"""Helper returning True if dtype.is_interger or is `bool`."""
if not _is_known_dtype(dt):
raise TypeError("Unrecognized dtype: {}".format(dt.name))
return dt.is_integer or dt.base_dtype == dtypes.bool
def embed_check_categorical_event_shape(
categorical_param,
name="embed_check_categorical_event_shape"):
"""Embeds checks that categorical distributions don't have too many classes.
A categorical-type distribution is one which, e.g., returns the class label
rather than a one-hot encoding. E.g., `Categorical(probs)`.
Since distributions output samples in the same dtype as the parameters, we
must ensure that casting doesn't lose precision. That is, the
`parameter.dtype` implies a maximum number of classes. However, since shape is
`int32` and categorical variables are presumed to be indexes into a `Tensor`,
we must also ensure that the number of classes is no larger than the largest
possible `int32` index, i.e., `2**31-1`.
In other words the number of classes, `K`, must satisfy the following
condition:
```python
K <= min(
int(2**31 - 1), # Largest float as an index.
{
dtypes.float16: int(2**11), # Largest int as a float16.
dtypes.float32: int(2**24),
dtypes.float64: int(2**53),
}.get(categorical_param.dtype.base_dtype, 0))
```
Args:
categorical_param: Floating-point `Tensor` representing parameters of
distribution over categories. The rightmost shape is presumed to be the
number of categories.
name: A name for this operation (optional).
Returns:
categorical_param: Input `Tensor` with appropriate assertions embedded.
Raises:
TypeError: if `categorical_param` has an unknown `dtype`.
ValueError: if we can statically identify `categorical_param` as being too
large (for being closed under int32/float casting).
"""
with ops.name_scope(name, values=[categorical_param]):
x = ops.convert_to_tensor(categorical_param, name="categorical_param")
# The size must not exceed both of:
# - The largest possible int32 (since categorical values are presumed to be
# indexes into a Tensor).
# - The largest possible integer exactly representable under the given
# floating-point dtype (since we need to cast to/from).
#
# The chosen floating-point thresholds are 2**(1 + mantissa_bits).
# For more details, see:
# https://en.wikipedia.org/wiki/Floating-point_arithmetic#Internal_representation
x_dtype = x.dtype.base_dtype
max_event_size = (_largest_integer_by_dtype(x_dtype)
if x_dtype.is_floating else 0)
if max_event_size is 0:
raise TypeError("Unable to validate size of unrecognized dtype "
"({}).".format(x_dtype.name))
try:
x_shape_static = x.get_shape().with_rank_at_least(1)
except ValueError:
raise ValueError("A categorical-distribution parameter must have "
"at least 1 dimension.")
if x_shape_static[-1].value is not None:
event_size = x_shape_static[-1].value
if event_size < 2:
raise ValueError("A categorical-distribution parameter must have at "
"least 2 events.")
if event_size > max_event_size:
raise ValueError(
"Number of classes exceeds `dtype` precision, i.e., "
"{} implies shape ({}) cannot exceed {}.".format(
x_dtype.name, event_size, max_event_size))
return x
else:
event_size = array_ops.shape(x, name="x_shape")[-1]
return control_flow_ops.with_dependencies([
check_ops.assert_rank_at_least(
x, 1, message=("A categorical-distribution parameter must have "
"at least 1 dimension.")),
check_ops.assert_greater_equal(
array_ops.shape(x)[-1], 2,
message=("A categorical-distribution parameter must have at "
"least 2 events.")),
check_ops.assert_less_equal(
event_size, max_event_size,
message="Number of classes exceeds `dtype` precision, "
"i.e., {} dtype cannot exceed {} shape.".format(
x_dtype.name, max_event_size)),
], x)
def embed_check_integer_casting_closed(
x,
target_dtype,
assert_nonnegative=True,
name="embed_check_casting_closed"):
"""Ensures integers remain unaffected despite casting to/from int/float types.
Example integer-types: `uint8`, `int32`, `bool`.
Example floating-types: `float32`, `float64`.
The largest possible integer representable by an IEEE754 floating-point is
`2**(1 + mantissa_bits)` yet the largest possible integer as an int-type is
`2**(bits - 1) - 1`. This function ensures that a `Tensor` purporting to have
integer-form values can be cast to some other type without loss of precision.
The smallest representable integer is the negative of the largest
representable integer, except for types: `uint8`, `uint16`, `bool`. For these
types, the smallest representable integer is `0`.
Args:
x: `Tensor` representing integer-form values.
target_dtype: TF `dtype` under which `x` should have identical values.
assert_nonnegative: `bool` indicating `x` should contain nonnegative values.
name: A name for this operation (optional).
Returns:
x: Input `Tensor` with appropriate assertions embedded.
Raises:
TypeError: if `x` is neither integer- nor floating-type.
TypeError: if `target_dtype` is neither integer- nor floating-type.
TypeError: if neither `x` nor `target_dtype` are integer-type.
"""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
if (not _is_integer_like_by_dtype(x.dtype)
and not x.dtype.is_floating):
raise TypeError("{}.dtype must be floating- or "
"integer-type.".format(x.dtype.name))
if (not _is_integer_like_by_dtype(target_dtype)
and not target_dtype.is_floating):
raise TypeError("target_dtype ({}) must be floating- or "
"integer-type.".format(target_dtype.name))
if (not _is_integer_like_by_dtype(x.dtype)
and not _is_integer_like_by_dtype(target_dtype)):
raise TypeError("At least one of {}.dtype ({}) and target_dtype ({}) "
"must be integer-type.".format(
x.op.name, x.dtype.name, target_dtype.name))
assertions = []
if assert_nonnegative:
assertions += [
check_ops.assert_non_negative(
x, message="Elements must be non-negative."),
]
if x.dtype.is_floating:
# Being here means _is_integer_like_by_dtype(target_dtype) = True.
# Since this check implies the magnitude check below, we need only it.
assertions += [
assert_integer_form(
x, int_dtype=target_dtype,
message="Elements must be {}-equivalent.".format(
target_dtype.name)),
]
else:
if (_largest_integer_by_dtype(x.dtype)
> _largest_integer_by_dtype(target_dtype)):
# Cast may lose integer precision.
assertions += [
check_ops.assert_less_equal(
x, _largest_integer_by_dtype(target_dtype),
message=("Elements cannot exceed {}.".format(
_largest_integer_by_dtype(target_dtype)))),
]
if (not assert_nonnegative and
(_smallest_integer_by_dtype(x.dtype)
< _smallest_integer_by_dtype(target_dtype))):
assertions += [
check_ops.assert_greater_equal(
x, _smallest_integer_by_dtype(target_dtype),
message=("Elements cannot be smaller than {}.".format(
_smallest_integer_by_dtype(target_dtype)))),
]
if not assertions:
return x
return control_flow_ops.with_dependencies(assertions, x)
def log_combinations(n, counts, name="log_combinations"):
"""Multinomial coefficient.
Given `n` and `counts`, where `counts` has last dimension `k`, we compute
the multinomial coefficient as:
```n! / sum_i n_i!```
where `i` runs over all `k` classes.
Args:
n: Floating-point `Tensor` broadcastable with `counts`. This represents `n`
outcomes.
counts: Floating-point `Tensor` broadcastable with `n`. This represents
counts in `k` classes, where `k` is the last dimension of the tensor.
name: A name for this operation (optional).
Returns:
`Tensor` representing the multinomial coefficient between `n` and `counts`.
"""
# First a bit about the number of ways counts could have come in:
# E.g. if counts = [1, 2], then this is 3 choose 2.
# In general, this is (sum counts)! / sum(counts!)
# The sum should be along the last dimension of counts. This is the
# "distribution" dimension. Here n a priori represents the sum of counts.
with ops.name_scope(name, values=[n, counts]):
n = ops.convert_to_tensor(n, name="n")
counts = ops.convert_to_tensor(counts, name="counts")
total_permutations = math_ops.lgamma(n + 1)
counts_factorial = math_ops.lgamma(counts + 1)
redundant_permutations = math_ops.reduce_sum(counts_factorial, axis=[-1])
return total_permutations - redundant_permutations
def matrix_diag_transform(matrix, transform=None, name=None):
"""Transform diagonal of [batch-]matrix, leave rest of matrix unchanged.
Create a trainable covariance defined by a Cholesky factor:
```python
# Transform network layer into 2 x 2 array.
matrix_values = tf.contrib.layers.fully_connected(activations, 4)
matrix = tf.reshape(matrix_values, (batch_size, 2, 2))
# Make the diagonal positive. If the upper triangle was zero, this would be a
# valid Cholesky factor.
chol = matrix_diag_transform(matrix, transform=tf.nn.softplus)
# LinearOperatorTriL ignores the upper triangle.
operator = LinearOperatorTriL(chol)
```
Example of heteroskedastic 2-D linear regression.
```python
# Get a trainable Cholesky factor.
matrix_values = tf.contrib.layers.fully_connected(activations, 4)
matrix = tf.reshape(matrix_values, (batch_size, 2, 2))
chol = matrix_diag_transform(matrix, transform=tf.nn.softplus)
# Get a trainable mean.
mu = tf.contrib.layers.fully_connected(activations, 2)
# This is a fully trainable multivariate normal!
dist = tf.contrib.distributions.MVNCholesky(mu, chol)
# Standard log loss. Minimizing this will "train" mu and chol, and then dist
# will be a distribution predicting labels as multivariate Gaussians.
loss = -1 * tf.reduce_mean(dist.log_prob(labels))
```
Args:
matrix: Rank `R` `Tensor`, `R >= 2`, where the last two dimensions are
equal.
transform: Element-wise function mapping `Tensors` to `Tensors`. To
be applied to the diagonal of `matrix`. If `None`, `matrix` is returned
unchanged. Defaults to `None`.
name: A name to give created ops.
Defaults to "matrix_diag_transform".
Returns:
A `Tensor` with same shape and `dtype` as `matrix`.
"""
with ops.name_scope(name, "matrix_diag_transform", [matrix]):
matrix = ops.convert_to_tensor(matrix, name="matrix")
if transform is None:
return matrix
# Replace the diag with transformed diag.
diag = array_ops.matrix_diag_part(matrix)
transformed_diag = transform(diag)
transformed_mat = array_ops.matrix_set_diag(matrix, transformed_diag)
return transformed_mat
def rotate_transpose(x, shift, name="rotate_transpose"):
"""Circularly moves dims left or right.
Effectively identical to:
```python
numpy.transpose(x, numpy.roll(numpy.arange(len(x.shape)), shift))
```
When `validate_args=False` additional graph-runtime checks are
performed. These checks entail moving data from to GPU to CPU.
Example:
```python
x = tf.random_normal([1, 2, 3, 4]) # Tensor of shape [1, 2, 3, 4].
rotate_transpose(x, -1).shape == [2, 3, 4, 1]
rotate_transpose(x, -2).shape == [3, 4, 1, 2]
rotate_transpose(x, 1).shape == [4, 1, 2, 3]
rotate_transpose(x, 2).shape == [3, 4, 1, 2]
rotate_transpose(x, 7).shape == rotate_transpose(x, 3).shape # [2, 3, 4, 1]
rotate_transpose(x, -7).shape == rotate_transpose(x, -3).shape # [4, 1, 2, 3]
```
Args:
x: `Tensor`.
shift: `Tensor`. Number of dimensions to transpose left (shift<0) or
transpose right (shift>0).
name: Python `str`. The name to give this op.
Returns:
rotated_x: Input `Tensor` with dimensions circularly rotated by shift.
Raises:
TypeError: if shift is not integer type.
"""
with ops.name_scope(name, values=[x, shift]):
x = ops.convert_to_tensor(x, name="x")
shift = ops.convert_to_tensor(shift, name="shift")
# We do not assign back to preserve constant-ness.
check_ops.assert_integer(shift)
shift_value_static = tensor_util.constant_value(shift)
ndims = x.get_shape().ndims
if ndims is not None and shift_value_static is not None:
if ndims < 2: return x
shift_value_static = np.sign(shift_value_static) * (
abs(shift_value_static) % ndims)
if shift_value_static == 0: return x
perm = np.roll(np.arange(ndims), shift_value_static)
return array_ops.transpose(x, perm=perm)
else:
# Consider if we always had a positive shift, and some specified
# direction.
# When shifting left we want the new array:
# last(x, n-shift) + first(x, shift)
# and if shifting right then we want:
# last(x, shift) + first(x, n-shift)
# Observe that last(a) == slice(a, n) and first(a) == slice(0, a).
# Also, we can encode direction and shift as one: direction * shift.
# Combining these facts, we have:
# a = cond(shift<0, -shift, n-shift)
# last(x, n-a) + first(x, a) == x[a:n] + x[0:a]
# Finally, we transform shift by modulo length so it can be specified
# independently from the array upon which it operates (like python).
ndims = array_ops.rank(x)
shift = array_ops.where(math_ops.less(shift, 0),
math_ops.mod(-shift, ndims),
ndims - math_ops.mod(shift, ndims))
first = math_ops.range(0, shift)
last = math_ops.range(shift, ndims)
perm = array_ops.concat([last, first], 0)
return array_ops.transpose(x, perm=perm)
def pick_vector(cond,
true_vector,
false_vector,
name="pick_vector"):
"""Picks possibly different length row `Tensor`s based on condition.
Value `Tensor`s should have exactly one dimension.
If `cond` is a python Boolean or `tf.constant` then either `true_vector` or
`false_vector` is immediately returned. I.e., no graph nodes are created and
no validation happens.
Args:
cond: `Tensor`. Must have `dtype=tf.bool` and be scalar.
true_vector: `Tensor` of one dimension. Returned when cond is `True`.
false_vector: `Tensor` of one dimension. Returned when cond is `False`.
name: Python `str`. The name to give this op.
Example:
```python
pick_vector(tf.less(0, 5), tf.range(10, 12), tf.range(15, 18)) # [10, 11]
pick_vector(tf.less(5, 0), tf.range(10, 12), tf.range(15, 18)) # [15, 16, 17]
```
Returns:
true_or_false_vector: `Tensor`.
Raises:
TypeError: if `cond.dtype != tf.bool`
TypeError: if `cond` is not a constant and
`true_vector.dtype != false_vector.dtype`
"""
with ops.name_scope(name, values=(cond, true_vector, false_vector)):
cond = ops.convert_to_tensor(cond, name="cond")
if cond.dtype != dtypes.bool:
raise TypeError("%s.dtype=%s which is not %s" %
(cond.name, cond.dtype, dtypes.bool))
cond_value_static = tensor_util.constant_value(cond)
if cond_value_static is not None:
return true_vector if cond_value_static else false_vector
true_vector = ops.convert_to_tensor(true_vector, name="true_vector")
false_vector = ops.convert_to_tensor(false_vector, name="false_vector")
if true_vector.dtype != false_vector.dtype:
raise TypeError(
"%s.dtype=%s does not match %s.dtype=%s"
% (true_vector.name, true_vector.dtype,
false_vector.name, false_vector.dtype))
n = array_ops.shape(true_vector)[0]
return array_ops.slice(
array_ops.concat([true_vector, false_vector], 0),
[array_ops.where(cond, 0, n)], [array_ops.where(cond, n, -1)])
def gen_new_seed(seed, salt):
"""Generate a new seed, from the given seed and salt."""
if seed is None:
return None
string = (str(seed) + salt).encode("utf-8")
return int(hashlib.md5(string).hexdigest()[:8], 16) & 0x7FFFFFFF
def fill_lower_triangular(x, validate_args=False, name="fill_lower_triangular"):
"""Creates a (batch of) lower triangular matrix from a vector of inputs.
If `x.get_shape()` is `[b1, b2, ..., bK, d]` then the output shape is `[b1,
b2, ..., bK, n, n]` where `n` is such that `d = n(n+1)/2`, i.e.,
`n = int(0.5 * (math.sqrt(1. + 8. * d) - 1.))`.
Although the non-batch complexity is O(n**2), large constants and sub-optimal
vectorization means the complexity of this function is 5x slower than zeroing
out the upper triangular, i.e., `tf.matrix_band_part(X, -1, 0)`. This
function becomes competitive only when several matmul/cholesky/etc ops can be
ellided in constructing the input. Example: wiring a fully connected layer as
a covariance matrix; this function reduces the final layer by 2x and possibly
reduces the network arch complexity considerably. In most cases it is better
to simply build a full matrix and zero out the upper triangular elements,
e.g., `tril = tf.matrix_band_part(full, -1, 0)`, rather than directly
construct a lower triangular.
Warning: This Op is intended for convenience, not efficiency.
Example:
```python
fill_lower_triangular([1, 2, 3, 4, 5, 6]) # [[1, 0, 0],
# [2, 3, 0],
# [4, 5, 6]]
```
For comparison, a pure numpy version of this function can be found in
`distribution_util_test.py`, function `_fill_lower_triangular`.
Args:
x: `Tensor` representing lower triangular elements.
validate_args: Python `bool`, default `False`. Whether to ensure the shape
of `x` can be mapped to a lower triangular matrix (controls non-static
checks only).
name: Python `str`. The name to give this op.
Returns:
tril: `Tensor` with lower triangular elements filled from `x`.
Raises:
ValueError: if shape of `x` has static shape which cannot be mapped to a
lower triangular matrix.
"""
# TODO(jvdillon): Replace this code with dedicated op when it exists.
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
if (x.get_shape().ndims is not None and
x.get_shape()[-1].value is not None):
d = x.get_shape()[-1].value
# d = n(n+1)/2 implies n is:
n = int(0.5 * (math.sqrt(1. + 8. * d) - 1.))
d_inferred = n * (n + 1) /2
if d != d_inferred:
raise ValueError("Input cannot be mapped to a lower triangular; "
"n*(n+1)/2 = %d != %d" % (d_inferred, d))
final_shape = x.get_shape()[:-1].concatenate(
tensor_shape.TensorShape([n, n]))
else:
d = math_ops.cast(array_ops.shape(x)[-1], dtype=dtypes.float32)
# d = n(n+1)/2 implies n is:
n = math_ops.cast(0.5 * (dtypes.sqrt(1. + 8. * d) - 1.),
dtype=dtypes.int32)
if validate_args:
is_valid_input_shape = check_ops.assert_equal(
n * (n + 1) / 2, d,
message="Input cannot be mapped to a lower triangular.")
n = control_flow_ops.with_dependencies([is_valid_input_shape], n)
final_shape = x.get_shape()[:-1].concatenate(
tensor_shape.TensorShape([None, None]))
def tril_ids(n):
"""Internal helper to create vector of linear indices into y."""
# Build the ids statically; chose 512 because it implies 1MiB.
if not tensor_util.is_tensor(n) and n <= 512:
ids = np.arange(n**2, dtype=np.int32)
rows = (ids / n).astype(np.int32) # Implicit floor.
# We need to stop incrementing the index when we encounter
# upper-triangular elements. The idea here is to compute the
# lower-right number of zeros then by "symmetry" subtract this from the
# total number of zeros, n(n-1)/2.
# Then we note that: n(n-1)/2 - (n-r)*(n-r-1)/2 = r(2n-r-1)/2
offset = (rows * (2 * n - rows - 1) / 2).astype(np.int32)
# We could also zero out when (rows < cols) == (rows < ids-n*rows).
# mask = (ids <= (n + 1) * rows).astype(np.int32)
else:
ids = math_ops.range(n**2)
rows = math_ops.cast(ids / n, dtype=dtypes.int32)
offset = math_ops.cast(rows * (2 * n - rows - 1) / 2,
dtype=dtypes.int32)
return ids - offset
# Special-case non-batch case.
if x.get_shape().ndims == 1:
y = array_ops.gather(x, array_ops.reshape(tril_ids(n), [n, n]))
y = array_ops.matrix_band_part(y, -1, 0)
y.set_shape(y.get_shape().merge_with(final_shape))
return y
# Make ids for each batch dim.
if (x.get_shape().ndims is not None and
x.get_shape()[:-1].is_fully_defined()):
batch_shape = np.asarray(x.get_shape()[:-1].as_list(), dtype=np.int32)
m = np.prod(batch_shape).astype(np.int32)
else:
batch_shape = array_ops.shape(x)[:-1]
m = math_ops.reduce_prod(array_ops.shape(x)[:-1])
batch_ids = math_ops.range(m)
# Assemble the tril_ids into batch,tril_id pairs.
idx = array_ops.stack([
array_ops.tile(array_ops.expand_dims(batch_ids, 1), [1, n * n]),
array_ops.tile(array_ops.expand_dims(tril_ids(n), 0), [m, 1])
])
idx = array_ops.transpose(idx, [1, 2, 0])
# Gather up, reshape, and return.
y = array_ops.reshape(x, [-1, d])
y = array_ops.gather_nd(y, idx)
y = array_ops.reshape(y, array_ops.concat([batch_shape, [n, n]], 0))
y = array_ops.matrix_band_part(y, -1, 0)
y.set_shape(y.get_shape().merge_with(final_shape))
return y
def tridiag(below=None, diag=None, above=None, name=None):
"""Creates a matrix with values set above, below, and on the diagonal.
Example:
```python
tridiag(below=[1., 2., 3.],
diag=[4., 5., 6., 7.],
above=[8., 9., 10.])
# ==> array([[ 4., 8., 0., 0.],
# [ 1., 5., 9., 0.],
# [ 0., 2., 6., 10.],
# [ 0., 0., 3., 7.]], dtype=float32)
```
Warning: This Op is intended for convenience, not efficiency.
Args:
below: `Tensor` of shape `[B1, ..., Bb, d-1]` corresponding to the below
diagonal part. `None` is logically equivalent to `below = 0`.
diag: `Tensor` of shape `[B1, ..., Bb, d]` corresponding to the diagonal
part. `None` is logically equivalent to `diag = 0`.
above: `Tensor` of shape `[B1, ..., Bb, d-1]` corresponding to the above
diagonal part. `None` is logically equivalent to `above = 0`.
name: Python `str`. The name to give this op.
Returns:
tridiag: `Tensor` with values set above, below and on the diagonal.
Raises:
ValueError: if all inputs are `None`.
"""
def _pad(x):
"""Prepends and appends a zero to every vector in a batch of vectors."""
shape = array_ops.concat([array_ops.shape(x)[:-1], [1]], axis=0)
z = array_ops.zeros(shape, dtype=x.dtype)
return array_ops.concat([z, x, z], axis=-1)
def _add(*x):
"""Adds list of Tensors, ignoring `None`."""
s = None
for y in x:
if y is None:
continue
elif s is None:
s = y
else:
s += y
if s is None:
raise ValueError("Must specify at least one of `below`, `diag`, `above`.")
return s
with ops.name_scope(name, "tridiag", [below, diag, above]):
if below is not None:
below = ops.convert_to_tensor(below, name="below")
below = array_ops.matrix_diag(_pad(below))[..., :-1, 1:]
if diag is not None:
diag = ops.convert_to_tensor(diag, name="diag")
diag = array_ops.matrix_diag(diag)
if above is not None:
above = ops.convert_to_tensor(above, name="above")
above = array_ops.matrix_diag(_pad(above))[..., 1:, :-1]
# TODO(jvdillon): Consider using scatter_nd instead of creating three full
# matrices.
return _add(below, diag, above)
# TODO(jvdillon): Merge this test back into:
# tensorflow/python/ops/softplus_op_test.py
# once TF core is accepting new ops.
def softplus_inverse(x, name=None):
"""Computes the inverse softplus, i.e., x = softplus_inverse(softplus(x)).
Mathematically this op is equivalent to:
```none
softplus_inverse = log(exp(x) - 1.)
```
Args:
x: `Tensor`. Non-negative (not enforced), floating-point.
name: A name for the operation (optional).
Returns:
`Tensor`. Has the same type/shape as input `x`.
"""
with ops.name_scope(name, "softplus_inverse", values=[x]):
x = ops.convert_to_tensor(x, name="x")
# We begin by deriving a more numerically stable softplus_inverse:
# x = softplus(y) = Log[1 + exp{y}], (which means x > 0).
# ==> exp{x} = 1 + exp{y} (1)
# ==> y = Log[exp{x} - 1] (2)
# = Log[(exp{x} - 1) / exp{x}] + Log[exp{x}]
# = Log[(1 - exp{-x}) / 1] + Log[exp{x}]
# = Log[1 - exp{-x}] + x (3)
# (2) is the "obvious" inverse, but (3) is more stable than (2) for large x.
# For small x (e.g. x = 1e-10), (3) will become -inf since 1 - exp{-x} will
# be zero. To fix this, we use 1 - exp{-x} approx x for small x > 0.
#
# In addition to the numerically stable derivation above, we clamp
# small/large values to be congruent with the logic in:
# tensorflow/core/kernels/softplus_op.h
#
# Finally, we set the input to one whenever the input is too large or too
# small. This ensures that no unchosen codepath is +/- inf. This is
# necessary to ensure the gradient doesn't get NaNs. Recall that the
# gradient of `where` behaves like `pred*pred_true + (1-pred)*pred_false`
# thus an `inf` in an unselected path results in `0*inf=nan`. We are careful
# to overwrite `x` with ones only when we will never actually use this
# value. Note that we use ones and not zeros since `log(expm1(0.)) = -inf`.
threshold = np.log(np.finfo(x.dtype.as_numpy_dtype).eps) + 2.
is_too_small = math_ops.less(x, np.exp(threshold))
is_too_large = math_ops.greater(x, -threshold)
too_small_value = math_ops.log(x)
too_large_value = x
# This `where` will ultimately be a NOP because we won't select this
# codepath whenever we used the surrogate `ones_like`.
x = array_ops.where(math_ops.logical_or(is_too_small, is_too_large),
array_ops.ones_like(x), x)
y = x + math_ops.log(-math_ops.expm1(-x)) # == log(expm1(x))
return array_ops.where(is_too_small, too_small_value,
array_ops.where(is_too_large, too_large_value, y))
# TODO(b/35290280): Add unit-tests.
def dimension_size(x, axis):
"""Returns the size of a specific dimension."""
# Since tf.gather isn't "constant-in, constant-out", we must first check the
# static shape or fallback to dynamic shape.
num_rows = (None if x.get_shape().ndims is None
else x.get_shape()[axis].value)
if num_rows is not None:
return num_rows
return array_ops.shape(x)[axis]
class AppendDocstring(object):
"""Helper class to promote private subclass docstring to public counterpart.
Example:
```python
class TransformedDistribution(Distribution):
@distribution_util.AppendDocstring(
additional_note="A special note!",
kwargs_dict={"foo": "An extra arg."})
def _prob(self, y, foo=None):
pass
```
In this case, the `AppendDocstring` decorator appends the `additional_note` to
the docstring of `prob` (not `_prob`) and adds a new `kwargs`
section with each dictionary item as a bullet-point.
For a more detailed example, see `TransformedDistribution`.
"""
def __init__(self, additional_note="", kwargs_dict=None):
"""Initializes the AppendDocstring object.
Args:
additional_note: Python string added as additional docstring to public
version of function.
kwargs_dict: Python string/string dictionary representing
specific kwargs expanded from the **kwargs input.
Raises:
ValueError: if kwargs_dict.key contains whitespace.
ValueError: if kwargs_dict.value contains newlines.
"""
self._additional_note = additional_note
if kwargs_dict:
bullets = []
for key in sorted(kwargs_dict.keys()):
value = kwargs_dict[key]
if any(x.isspace() for x in key):
raise ValueError(
"Parameter name \"%s\" contains whitespace." % key)
value = value.lstrip()
if "\n" in value:
raise ValueError(
"Parameter description for \"%s\" contains newlines." % key)
bullets.append("* `%s`: %s" % (key, value))
self._additional_note += ("\n\n##### `kwargs`:\n\n" +
"\n".join(bullets))
def __call__(self, fn):
@functools.wraps(fn)
def _fn(*args, **kwargs):
return fn(*args, **kwargs)
if _fn.__doc__ is None:
_fn.__doc__ = self._additional_note
else:
_fn.__doc__ += "\n%s" % self._additional_note
return _fn
|
|
# sql/crud.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Functions used by compiler.py to determine the parameters rendered
within INSERT and UPDATE statements.
"""
from .. import util
from .. import exc
from . import elements
import operator
REQUIRED = util.symbol('REQUIRED', """
Placeholder for the value within a :class:`.BindParameter`
which is required to be present when the statement is passed
to :meth:`.Connection.execute`.
This symbol is typically used when a :func:`.expression.insert`
or :func:`.expression.update` statement is compiled without parameter
values present.
""")
ISINSERT = util.symbol('ISINSERT')
ISUPDATE = util.symbol('ISUPDATE')
ISDELETE = util.symbol('ISDELETE')
def _setup_crud_params(compiler, stmt, local_stmt_type, **kw):
restore_isinsert = compiler.isinsert
restore_isupdate = compiler.isupdate
restore_isdelete = compiler.isdelete
should_restore = (
restore_isinsert or restore_isupdate or restore_isdelete
) or len(compiler.stack) > 1
if local_stmt_type is ISINSERT:
compiler.isupdate = False
compiler.isinsert = True
elif local_stmt_type is ISUPDATE:
compiler.isupdate = True
compiler.isinsert = False
elif local_stmt_type is ISDELETE:
if not should_restore:
compiler.isdelete = True
else:
assert False, "ISINSERT, ISUPDATE, or ISDELETE expected"
try:
if local_stmt_type in (ISINSERT, ISUPDATE):
return _get_crud_params(compiler, stmt, **kw)
finally:
if should_restore:
compiler.isinsert = restore_isinsert
compiler.isupdate = restore_isupdate
compiler.isdelete = restore_isdelete
def _get_crud_params(compiler, stmt, **kw):
"""create a set of tuples representing column/string pairs for use
in an INSERT or UPDATE statement.
Also generates the Compiled object's postfetch, prefetch, and
returning column collections, used for default handling and ultimately
populating the ResultProxy's prefetch_cols() and postfetch_cols()
collections.
"""
compiler.postfetch = []
compiler.prefetch = []
compiler.returning = []
# no parameters in the statement, no parameters in the
# compiled params - return binds for all columns
if compiler.column_keys is None and stmt.parameters is None:
return [
(c, _create_bind_param(
compiler, c, None, required=True))
for c in stmt.table.columns
]
if stmt._has_multi_parameters:
stmt_parameters = stmt.parameters[0]
else:
stmt_parameters = stmt.parameters
# getters - these are normally just column.key,
# but in the case of mysql multi-table update, the rules for
# .key must conditionally take tablename into account
_column_as_key, _getattr_col_key, _col_bind_name = \
_key_getters_for_crud_column(compiler, stmt)
# if we have statement parameters - set defaults in the
# compiled params
if compiler.column_keys is None:
parameters = {}
else:
parameters = dict((_column_as_key(key), REQUIRED)
for key in compiler.column_keys
if not stmt_parameters or
key not in stmt_parameters)
# create a list of column assignment clauses as tuples
values = []
if stmt_parameters is not None:
_get_stmt_parameters_params(
compiler,
parameters, stmt_parameters, _column_as_key, values, kw)
check_columns = {}
# special logic that only occurs for multi-table UPDATE
# statements
if compiler.isupdate and stmt._extra_froms and stmt_parameters:
_get_multitable_params(
compiler, stmt, stmt_parameters, check_columns,
_col_bind_name, _getattr_col_key, values, kw)
if compiler.isinsert and stmt.select_names:
_scan_insert_from_select_cols(
compiler, stmt, parameters,
_getattr_col_key, _column_as_key,
_col_bind_name, check_columns, values, kw)
else:
_scan_cols(
compiler, stmt, parameters,
_getattr_col_key, _column_as_key,
_col_bind_name, check_columns, values, kw)
if parameters and stmt_parameters:
check = set(parameters).intersection(
_column_as_key(k) for k in stmt_parameters
).difference(check_columns)
if check:
raise exc.CompileError(
"Unconsumed column names: %s" %
(", ".join("%s" % c for c in check))
)
if stmt._has_multi_parameters:
values = _extend_values_for_multiparams(compiler, stmt, values, kw)
return values
def _create_bind_param(
compiler, col, value, process=True,
required=False, name=None, **kw):
if name is None:
name = col.key
bindparam = elements.BindParameter(
name, value, type_=col.type, required=required)
bindparam._is_crud = True
if process:
bindparam = bindparam._compiler_dispatch(compiler, **kw)
return bindparam
def _key_getters_for_crud_column(compiler, stmt):
if compiler.isupdate and stmt._extra_froms:
# when extra tables are present, refer to the columns
# in those extra tables as table-qualified, including in
# dictionaries and when rendering bind param names.
# the "main" table of the statement remains unqualified,
# allowing the most compatibility with a non-multi-table
# statement.
_et = set(stmt._extra_froms)
def _column_as_key(key):
str_key = elements._column_as_key(key)
if hasattr(key, 'table') and key.table in _et:
return (key.table.name, str_key)
else:
return str_key
def _getattr_col_key(col):
if col.table in _et:
return (col.table.name, col.key)
else:
return col.key
def _col_bind_name(col):
if col.table in _et:
return "%s_%s" % (col.table.name, col.key)
else:
return col.key
else:
_column_as_key = elements._column_as_key
_getattr_col_key = _col_bind_name = operator.attrgetter("key")
return _column_as_key, _getattr_col_key, _col_bind_name
def _scan_insert_from_select_cols(
compiler, stmt, parameters, _getattr_col_key,
_column_as_key, _col_bind_name, check_columns, values, kw):
need_pks, implicit_returning, \
implicit_return_defaults, postfetch_lastrowid = \
_get_returning_modifiers(compiler, stmt)
cols = [stmt.table.c[_column_as_key(name)]
for name in stmt.select_names]
compiler._insert_from_select = stmt.select
add_select_cols = []
if stmt.include_insert_from_select_defaults:
col_set = set(cols)
for col in stmt.table.columns:
if col not in col_set and col.default:
cols.append(col)
for c in cols:
col_key = _getattr_col_key(c)
if col_key in parameters and col_key not in check_columns:
parameters.pop(col_key)
values.append((c, None))
else:
_append_param_insert_select_hasdefault(
compiler, stmt, c, add_select_cols, kw)
if add_select_cols:
values.extend(add_select_cols)
compiler._insert_from_select = compiler._insert_from_select._generate()
compiler._insert_from_select._raw_columns = \
tuple(compiler._insert_from_select._raw_columns) + tuple(
expr for col, expr in add_select_cols)
def _scan_cols(
compiler, stmt, parameters, _getattr_col_key,
_column_as_key, _col_bind_name, check_columns, values, kw):
need_pks, implicit_returning, \
implicit_return_defaults, postfetch_lastrowid = \
_get_returning_modifiers(compiler, stmt)
if stmt._parameter_ordering:
parameter_ordering = [
_column_as_key(key) for key in stmt._parameter_ordering
]
ordered_keys = set(parameter_ordering)
cols = [
stmt.table.c[key] for key in parameter_ordering
] + [
c for c in stmt.table.c if c.key not in ordered_keys
]
else:
cols = stmt.table.columns
for c in cols:
col_key = _getattr_col_key(c)
if col_key in parameters and col_key not in check_columns:
_append_param_parameter(
compiler, stmt, c, col_key, parameters, _col_bind_name,
implicit_returning, implicit_return_defaults, values, kw)
elif compiler.isinsert:
if c.primary_key and \
need_pks and \
(
implicit_returning or
not postfetch_lastrowid or
c is not stmt.table._autoincrement_column
):
if implicit_returning:
_append_param_insert_pk_returning(
compiler, stmt, c, values, kw)
else:
_append_param_insert_pk(compiler, stmt, c, values, kw)
elif c.default is not None:
_append_param_insert_hasdefault(
compiler, stmt, c, implicit_return_defaults,
values, kw)
elif c.server_default is not None:
if implicit_return_defaults and \
c in implicit_return_defaults:
compiler.returning.append(c)
elif not c.primary_key:
compiler.postfetch.append(c)
elif implicit_return_defaults and \
c in implicit_return_defaults:
compiler.returning.append(c)
elif c.primary_key and \
c is not stmt.table._autoincrement_column and \
not c.nullable:
_raise_pk_with_no_anticipated_value(c)
elif compiler.isupdate:
_append_param_update(
compiler, stmt, c, implicit_return_defaults, values, kw)
def _append_param_parameter(
compiler, stmt, c, col_key, parameters, _col_bind_name,
implicit_returning, implicit_return_defaults, values, kw):
value = parameters.pop(col_key)
if elements._is_literal(value):
value = _create_bind_param(
compiler, c, value, required=value is REQUIRED,
name=_col_bind_name(c)
if not stmt._has_multi_parameters
else "%s_0" % _col_bind_name(c),
**kw
)
else:
if isinstance(value, elements.BindParameter) and \
value.type._isnull:
value = value._clone()
value.type = c.type
if c.primary_key and implicit_returning:
compiler.returning.append(c)
value = compiler.process(value.self_group(), **kw)
elif implicit_return_defaults and \
c in implicit_return_defaults:
compiler.returning.append(c)
value = compiler.process(value.self_group(), **kw)
else:
compiler.postfetch.append(c)
value = compiler.process(value.self_group(), **kw)
values.append((c, value))
def _append_param_insert_pk_returning(compiler, stmt, c, values, kw):
"""Create a primary key expression in the INSERT statement and
possibly a RETURNING clause for it.
If the column has a Python-side default, we will create a bound
parameter for it and "pre-execute" the Python function. If
the column has a SQL expression default, or is a sequence,
we will add it directly into the INSERT statement and add a
RETURNING element to get the new value. If the column has a
server side default or is marked as the "autoincrement" column,
we will add a RETRUNING element to get at the value.
If all the above tests fail, that indicates a primary key column with no
noted default generation capabilities that has no parameter passed;
raise an exception.
"""
if c.default is not None:
if c.default.is_sequence:
if compiler.dialect.supports_sequences and \
(not c.default.optional or
not compiler.dialect.sequences_optional):
proc = compiler.process(c.default, **kw)
values.append((c, proc))
compiler.returning.append(c)
elif c.default.is_clause_element:
values.append(
(c, compiler.process(
c.default.arg.self_group(), **kw))
)
compiler.returning.append(c)
else:
values.append(
(c, _create_prefetch_bind_param(compiler, c))
)
elif c is stmt.table._autoincrement_column or c.server_default is not None:
compiler.returning.append(c)
elif not c.nullable:
# no .default, no .server_default, not autoincrement, we have
# no indication this primary key column will have any value
_raise_pk_with_no_anticipated_value(c)
def _create_prefetch_bind_param(compiler, c, process=True, name=None):
param = _create_bind_param(compiler, c, None, process=process, name=name)
compiler.prefetch.append(c)
return param
class _multiparam_column(elements.ColumnElement):
def __init__(self, original, index):
self.key = "%s_%d" % (original.key, index + 1)
self.original = original
self.default = original.default
self.type = original.type
def __eq__(self, other):
return isinstance(other, _multiparam_column) and \
other.key == self.key and \
other.original == self.original
def _process_multiparam_default_bind(compiler, c, index, kw):
if not c.default:
raise exc.CompileError(
"INSERT value for column %s is explicitly rendered as a bound"
"parameter in the VALUES clause; "
"a Python-side value or SQL expression is required" % c)
elif c.default.is_clause_element:
return compiler.process(c.default.arg.self_group(), **kw)
else:
col = _multiparam_column(c, index)
return _create_prefetch_bind_param(compiler, col)
def _append_param_insert_pk(compiler, stmt, c, values, kw):
"""Create a bound parameter in the INSERT statement to receive a
'prefetched' default value.
The 'prefetched' value indicates that we are to invoke a Python-side
default function or expliclt SQL expression before the INSERT statement
proceeds, so that we have a primary key value available.
if the column has no noted default generation capabilities, it has
no value passed in either; raise an exception.
"""
if (
(
# column has a Python-side default
c.default is not None and
(
# and it won't be a Sequence
not c.default.is_sequence or
compiler.dialect.supports_sequences
)
)
or
(
# column is the "autoincrement column"
c is stmt.table._autoincrement_column and
(
# and it's either a "sequence" or a
# pre-executable "autoincrement" sequence
compiler.dialect.supports_sequences or
compiler.dialect.preexecute_autoincrement_sequences
)
)
):
values.append(
(c, _create_prefetch_bind_param(compiler, c))
)
elif c.default is None and c.server_default is None and not c.nullable:
# no .default, no .server_default, not autoincrement, we have
# no indication this primary key column will have any value
_raise_pk_with_no_anticipated_value(c)
def _append_param_insert_hasdefault(
compiler, stmt, c, implicit_return_defaults, values, kw):
if c.default.is_sequence:
if compiler.dialect.supports_sequences and \
(not c.default.optional or
not compiler.dialect.sequences_optional):
proc = compiler.process(c.default, **kw)
values.append((c, proc))
if implicit_return_defaults and \
c in implicit_return_defaults:
compiler.returning.append(c)
elif not c.primary_key:
compiler.postfetch.append(c)
elif c.default.is_clause_element:
proc = compiler.process(c.default.arg.self_group(), **kw)
values.append((c, proc))
if implicit_return_defaults and \
c in implicit_return_defaults:
compiler.returning.append(c)
elif not c.primary_key:
# don't add primary key column to postfetch
compiler.postfetch.append(c)
else:
values.append(
(c, _create_prefetch_bind_param(compiler, c))
)
def _append_param_insert_select_hasdefault(
compiler, stmt, c, values, kw):
if c.default.is_sequence:
if compiler.dialect.supports_sequences and \
(not c.default.optional or
not compiler.dialect.sequences_optional):
proc = c.default
values.append((c, proc))
elif c.default.is_clause_element:
proc = c.default.arg.self_group()
values.append((c, proc))
else:
values.append(
(c, _create_prefetch_bind_param(compiler, c, process=False))
)
def _append_param_update(
compiler, stmt, c, implicit_return_defaults, values, kw):
if c.onupdate is not None and not c.onupdate.is_sequence:
if c.onupdate.is_clause_element:
values.append(
(c, compiler.process(
c.onupdate.arg.self_group(), **kw))
)
if implicit_return_defaults and \
c in implicit_return_defaults:
compiler.returning.append(c)
else:
compiler.postfetch.append(c)
else:
values.append(
(c, _create_prefetch_bind_param(compiler, c))
)
elif c.server_onupdate is not None:
if implicit_return_defaults and \
c in implicit_return_defaults:
compiler.returning.append(c)
else:
compiler.postfetch.append(c)
elif implicit_return_defaults and \
stmt._return_defaults is not True and \
c in implicit_return_defaults:
compiler.returning.append(c)
def _get_multitable_params(
compiler, stmt, stmt_parameters, check_columns,
_col_bind_name, _getattr_col_key, values, kw):
normalized_params = dict(
(elements._clause_element_as_expr(c), param)
for c, param in stmt_parameters.items()
)
affected_tables = set()
for t in stmt._extra_froms:
for c in t.c:
if c in normalized_params:
affected_tables.add(t)
check_columns[_getattr_col_key(c)] = c
value = normalized_params[c]
if elements._is_literal(value):
value = _create_bind_param(
compiler, c, value, required=value is REQUIRED,
name=_col_bind_name(c))
else:
compiler.postfetch.append(c)
value = compiler.process(value.self_group(), **kw)
values.append((c, value))
# determine tables which are actually to be updated - process onupdate
# and server_onupdate for these
for t in affected_tables:
for c in t.c:
if c in normalized_params:
continue
elif (c.onupdate is not None and not
c.onupdate.is_sequence):
if c.onupdate.is_clause_element:
values.append(
(c, compiler.process(
c.onupdate.arg.self_group(),
**kw)
)
)
compiler.postfetch.append(c)
else:
values.append(
(c, _create_prefetch_bind_param(
compiler, c, name=_col_bind_name(c)))
)
elif c.server_onupdate is not None:
compiler.postfetch.append(c)
def _extend_values_for_multiparams(compiler, stmt, values, kw):
values_0 = values
values = [values]
values.extend(
[
(
c,
(_create_bind_param(
compiler, c, row[c.key],
name="%s_%d" % (c.key, i + 1)
) if elements._is_literal(row[c.key])
else compiler.process(
row[c.key].self_group(), **kw))
if c.key in row else
_process_multiparam_default_bind(compiler, c, i, kw)
)
for (c, param) in values_0
]
for i, row in enumerate(stmt.parameters[1:])
)
return values
def _get_stmt_parameters_params(
compiler, parameters, stmt_parameters, _column_as_key, values, kw):
for k, v in stmt_parameters.items():
colkey = _column_as_key(k)
if colkey is not None:
parameters.setdefault(colkey, v)
else:
# a non-Column expression on the left side;
# add it to values() in an "as-is" state,
# coercing right side to bound param
if elements._is_literal(v):
v = compiler.process(
elements.BindParameter(None, v, type_=k.type),
**kw)
else:
v = compiler.process(v.self_group(), **kw)
values.append((k, v))
def _get_returning_modifiers(compiler, stmt):
need_pks = compiler.isinsert and \
not compiler.inline and \
not stmt._returning and \
not stmt._has_multi_parameters
implicit_returning = need_pks and \
compiler.dialect.implicit_returning and \
stmt.table.implicit_returning
if compiler.isinsert:
implicit_return_defaults = (implicit_returning and
stmt._return_defaults)
elif compiler.isupdate:
implicit_return_defaults = (compiler.dialect.implicit_returning and
stmt.table.implicit_returning and
stmt._return_defaults)
else:
# this line is unused, currently we are always
# isinsert or isupdate
implicit_return_defaults = False # pragma: no cover
if implicit_return_defaults:
if stmt._return_defaults is True:
implicit_return_defaults = set(stmt.table.c)
else:
implicit_return_defaults = set(stmt._return_defaults)
postfetch_lastrowid = need_pks and compiler.dialect.postfetch_lastrowid
return need_pks, implicit_returning, \
implicit_return_defaults, postfetch_lastrowid
def _raise_pk_with_no_anticipated_value(c):
msg = (
"Column '%s.%s' is marked as a member of the "
"primary key for table '%s', "
"but has no Python-side or server-side default generator indicated, "
"nor does it indicate 'autoincrement=True' or 'nullable=True', "
"and no explicit value is passed. "
"Primary key columns typically may not store NULL."
%
(c.table.fullname, c.name, c.table.fullname))
if len(c.table.primary_key.columns) > 1:
msg += (
" Note that as of SQLAlchemy 1.1, 'autoincrement=True' must be "
"indicated explicitly for composite (e.g. multicolumn) primary "
"keys if AUTO_INCREMENT/SERIAL/IDENTITY "
"behavior is expected for one of the columns in the primary key. "
"CREATE TABLE statements are impacted by this change as well on "
"most backends.")
raise exc.CompileError(msg)
|
|
import fnmatch
import itertools
import json
import random
import re
import string
import traceback
import urllib
import botconfig
import src.settings as var
from src import proxy, debuglog
from src.events import Event
from src.messages import messages
# message either privmsg or notice, depending on user settings
def pm(cli, target, message):
if is_fake_nick(target) and botconfig.DEBUG_MODE:
debuglog("Would message fake nick {0}: {1!r}".format(target, message))
return
if is_user_notice(target):
cli.notice(target, message)
return
cli.msg(target, message)
is_fake_nick = re.compile(r"^[0-9]+$").search
def mass_mode(cli, md_param, md_plain):
""" Example: mass_mode(cli, [('+v', 'asdf'), ('-v','wobosd')], ['-m']) """
lmd = len(md_param) # store how many mode changes to do
if md_param:
for start_i in range(0, lmd, var.MODELIMIT): # 4 mode-changes at a time
if start_i + var.MODELIMIT > lmd: # If this is a remainder (mode-changes < 4)
z = list(zip(*md_param[start_i:])) # zip this remainder
ei = lmd % var.MODELIMIT # len(z)
else:
z = list(zip(*md_param[start_i:start_i+var.MODELIMIT])) # zip four
ei = var.MODELIMIT # len(z)
# Now z equal something like [('+v', '-v'), ('asdf', 'wobosd')]
arg1 = "".join(md_plain) + "".join(z[0])
arg2 = " ".join(z[1]) # + " " + " ".join([x+"!*@*" for x in z[1]])
cli.mode(botconfig.CHANNEL, arg1, arg2)
elif md_plain:
cli.mode(botconfig.CHANNEL, "".join(md_plain))
def mass_privmsg(cli, targets, msg, notice=False, privmsg=False):
if not targets:
return
if not notice and not privmsg:
msg_targs = []
not_targs = []
for target in targets:
if is_fake_nick(target):
debuglog("Would message fake nick {0}: {1!r}".format(target, msg))
elif is_user_notice(target):
not_targs.append(target)
else:
msg_targs.append(target)
while msg_targs:
if len(msg_targs) <= var.MAX_PRIVMSG_TARGETS:
bgs = ",".join(msg_targs)
msg_targs = None
else:
bgs = ",".join(msg_targs[:var.MAX_PRIVMSG_TARGETS])
msg_targs = msg_targs[var.MAX_PRIVMSG_TARGETS:]
cli.msg(bgs, msg)
while not_targs:
if len(not_targs) <= var.MAX_PRIVMSG_TARGETS:
bgs = ",".join(not_targs)
not_targs = None
else:
bgs = ",".join(not_targs[:var.MAX_PRIVMSG_TARGETS])
not_targs = not_targs[var.MAX_PRIVMSG_TARGETS:]
cli.notice(bgs, msg)
else:
while targets:
if len(targets) <= var.MAX_PRIVMSG_TARGETS:
bgs = ",".join(targets)
targets = None
else:
bgs = ",".join(targets[:var.MAX_PRIVMSG_TARGETS])
target = targets[var.MAX_PRIVMSG_TARGETS:]
if notice:
cli.notice(bgs, msg)
else:
cli.msg(bgs, msg)
# Decide how to reply to a user, depending on the channel / query it was called in, and whether a game is running and they are playing
def reply(cli, nick, chan, msg, private=False, prefix_nick=False):
if chan == nick:
pm(cli, nick, msg)
elif private or (nick not in list_players() and var.PHASE in var.GAME_PHASES and chan == botconfig.CHANNEL):
cli.notice(nick, msg)
else:
if prefix_nick:
cli.msg(chan, "{0}: {1}".format(nick, msg))
else:
cli.msg(chan, msg)
def is_user_simple(nick):
if nick in var.USERS:
ident = irc_lower(var.USERS[nick]["ident"])
host = var.USERS[nick]["host"].lower()
acc = irc_lower(var.USERS[nick]["account"])
else:
return False
if acc and acc != "*" and not var.DISABLE_ACCOUNTS:
if acc in var.SIMPLE_NOTIFY_ACCS:
return True
return False
elif not var.ACCOUNTS_ONLY:
for hostmask in var.SIMPLE_NOTIFY:
if match_hostmask(hostmask, nick, ident, host):
return True
return False
def is_user_notice(nick):
if nick in var.USERS and var.USERS[nick]["account"] and var.USERS[nick]["account"] != "*" and not var.DISABLE_ACCOUNTS:
if irc_lower(var.USERS[nick]["account"]) in var.PREFER_NOTICE_ACCS:
return True
if nick in var.USERS and not var.ACCOUNTS_ONLY:
ident = irc_lower(var.USERS[nick]["ident"])
host = var.USERS[nick]["host"].lower()
for hostmask in var.PREFER_NOTICE:
if match_hostmask(hostmask, nick, ident, host):
return True
return False
def in_wolflist(nick, who):
myrole = get_role(nick)
role = get_role(who)
wolves = var.WOLFCHAT_ROLES
if var.RESTRICT_WOLFCHAT & var.RW_REM_NON_WOLVES:
if var.RESTRICT_WOLFCHAT & var.RW_TRAITOR_NON_WOLF:
wolves = var.WOLF_ROLES
else:
wolves = var.WOLF_ROLES | {"traitor"}
return myrole in wolves and role in wolves
def relay_wolfchat_command(cli, nick, message, roles, is_wolf_command=False, is_kill_command=False):
if not is_wolf_command and var.RESTRICT_WOLFCHAT & var.RW_NO_INTERACTION:
return
if not is_kill_command and var.RESTRICT_WOLFCHAT & var.RW_ONLY_KILL_CMD:
if var.PHASE == "night" and var.RESTRICT_WOLFCHAT & var.RW_DISABLE_NIGHT:
return
if var.PHASE == "day" and var.RESTRICT_WOLFCHAT & var.RW_DISABLE_DAY:
return
if not in_wolflist(nick, nick):
return
wcroles = var.WOLFCHAT_ROLES
if var.RESTRICT_WOLFCHAT & var.RW_ONLY_SAME_CMD:
if var.PHASE == "night" and var.RESTRICT_WOLFCHAT & var.RW_DISABLE_NIGHT:
wcroles = roles
if var.PHASE == "day" and var.RESTRICT_WOLFCHAT & var.RW_DISABLE_DAY:
wcroles = roles
elif var.RESTRICT_WOLFCHAT & var.RW_REM_NON_WOLVES:
if var.RESTRICT_WOLFCHAT & var.RW_TRAITOR_NON_WOLF:
wcroles = var.WOLF_ROLES
else:
wcroles = var.WOLF_ROLES | {"traitor"}
wcwolves = list_players(wcroles)
wcwolves.remove(nick)
mass_privmsg(cli, wcwolves, message)
mass_privmsg(cli, var.SPECTATING_WOLFCHAT, "[wolfchat] " + message)
@proxy.stub
def chk_nightdone(cli):
pass
@proxy.stub
def chk_decision(cli, force=""):
pass
@proxy.stub
def chk_win(cli, end_game=True, winner=None):
pass
def irc_lower(nick):
if nick is None:
return None
mapping = {
"[": "{",
"]": "}",
"\\": "|",
"^": "~",
}
# var.CASEMAPPING may not be defined yet in some circumstances (like database upgrades)
# if so, default to rfc1459
if hasattr(var, "CASEMAPPING"):
if var.CASEMAPPING == "strict-rfc1459":
mapping.pop("^")
elif var.CASEMAPPING == "ascii":
mapping = {}
return nick.lower().translate(str.maketrans(mapping))
def irc_equals(nick1, nick2):
return irc_lower(nick1) == irc_lower(nick2)
is_role = lambda plyr, rol: rol in var.ROLES and plyr in var.ROLES[rol]
def match_hostmask(hostmask, nick, ident, host):
# support n!u@h, u@h, or just h by itself
matches = re.match('(?:(?:(.*?)!)?(.*?)@)?(.*)', hostmask)
if ((not matches.group(1) or fnmatch.fnmatch(irc_lower(nick), irc_lower(matches.group(1)))) and
(not matches.group(2) or fnmatch.fnmatch(irc_lower(ident), irc_lower(matches.group(2)))) and
fnmatch.fnmatch(host.lower(), matches.group(3).lower())):
return True
return False
def is_owner(nick, ident=None, host=None, acc=None):
hosts = set(botconfig.OWNERS)
accounts = set(botconfig.OWNERS_ACCOUNTS)
if nick in var.USERS:
if not ident:
ident = var.USERS[nick]["ident"]
if not host:
host = var.USERS[nick]["host"]
if not acc:
acc = var.USERS[nick]["account"]
if not var.DISABLE_ACCOUNTS and acc and acc != "*":
for pattern in accounts:
if fnmatch.fnmatch(irc_lower(acc), irc_lower(pattern)):
return True
if host:
for hostmask in hosts:
if match_hostmask(hostmask, nick, ident, host):
return True
return False
def is_admin(nick, ident=None, host=None, acc=None):
if nick in var.USERS:
if not ident:
ident = var.USERS[nick]["ident"]
if not host:
host = var.USERS[nick]["host"]
if not acc:
acc = var.USERS[nick]["account"]
acc = irc_lower(acc)
hostmask = irc_lower(nick) + "!" + irc_lower(ident) + "@" + host.lower()
flags = var.FLAGS[hostmask] + var.FLAGS_ACCS[acc]
if not "F" in flags:
try:
hosts = set(botconfig.ADMINS)
accounts = set(botconfig.ADMINS_ACCOUNTS)
if not var.DISABLE_ACCOUNTS and acc and acc != "*":
for pattern in accounts:
if fnmatch.fnmatch(irc_lower(acc), irc_lower(pattern)):
return True
if host:
for hostmask in hosts:
if match_hostmask(hostmask, nick, ident, host):
return True
except AttributeError:
pass
return is_owner(nick, ident, host, acc)
return True
def plural(role, count=2):
if count == 1:
return role
bits = role.split()
if bits[-1][-2:] == "'s":
bits[-1] = plural(bits[-1][:-2], count)
bits[-1] += "'" if bits[-1][-1] == "s" else "'s"
else:
bits[-1] = {"person": "people",
"wolf": "wolves",
"has": "have",
"succubus": "succubi",
"child": "children"}.get(bits[-1], bits[-1] + "s")
return " ".join(bits)
def singular(plural):
# converse of plural above (kinda)
# this is used to map plural team names back to singular,
# so we don't need to worry about stuff like possessives
# Note that this is currently only ever called on team names,
# and will require adjustment if one wishes to use it on roles.
conv = {"wolves": "wolf",
"succubi": "succubus"}
if plural in conv:
return conv[plural]
# otherwise we just added an s on the end
return plural[:-1]
def list_players(roles = None):
if roles is None:
roles = var.ROLES.keys()
pl = set()
for x in roles:
if x in var.TEMPLATE_RESTRICTIONS.keys():
continue
for p in var.ROLES.get(x, ()):
pl.add(p)
return [p for p in var.ALL_PLAYERS if p in pl]
def list_players_and_roles():
plr = {}
for x in var.ROLES.keys():
if x in var.TEMPLATE_RESTRICTIONS.keys():
continue # only get actual roles
for p in var.ROLES[x]:
plr[p] = x
return plr
def get_role(p):
for role, pl in var.ROLES.items():
if role in var.TEMPLATE_RESTRICTIONS.keys():
continue # only get actual roles
if p in pl:
return role
def get_roles(*roles):
all_roles = []
for role in roles:
all_roles.append(var.ROLES[role])
return list(itertools.chain(*all_roles))
def get_reveal_role(nick):
if var.HIDDEN_TRAITOR and get_role(nick) == "traitor":
role = var.DEFAULT_ROLE
elif var.HIDDEN_AMNESIAC and nick in var.ORIGINAL_ROLES["amnesiac"]:
role = "amnesiac"
elif var.HIDDEN_CLONE and nick in var.ORIGINAL_ROLES["clone"]:
role = "clone"
else:
role = get_role(nick)
evt = Event("get_reveal_role", {"role": role})
evt.dispatch(var, nick)
role = evt.data["role"]
if var.ROLE_REVEAL != "team":
return role
if role in var.WOLFTEAM_ROLES:
return "wolf"
elif role in var.TRUE_NEUTRAL_ROLES:
return "neutral player"
else:
return "villager"
def get_templates(nick):
tpl = []
for x in var.TEMPLATE_RESTRICTIONS.keys():
try:
if nick in var.ROLES[x]:
tpl.append(x)
except KeyError:
pass
return tpl
role_order = lambda: var.ROLE_GUIDE
def break_long_message(phrases, joinstr = " "):
message = []
count = 0
for phrase in phrases:
# IRC max is 512, but freenode splits around 380ish, make 300 to have plenty of wiggle room
if count + len(joinstr) + len(phrase) > 300:
message.append("\n" + phrase)
count = len(phrase)
else:
if not message:
count = len(phrase)
else:
count += len(joinstr) + len(phrase)
message.append(phrase)
return joinstr.join(message)
#completes a partial nickname or string from a list
def complete_match(string, matches):
num_matches = 0
bestmatch = string
for possible in matches:
if string == possible:
return string, 1
if possible.startswith(string) or possible.lstrip("[{\\^_`|}]").startswith(string):
bestmatch = possible
num_matches += 1
if num_matches != 1:
return None, num_matches
else:
return bestmatch, 1
#wrapper around complete_match() used for roles
def get_victim(cli, nick, victim, in_chan, self_in_list=False, bot_in_list=False):
chan = botconfig.CHANNEL if in_chan else nick
if not victim:
reply(cli, nick, chan, messages["not_enough_parameters"], private=True)
return
pl = [x for x in list_players() if x != nick or self_in_list]
pll = [x.lower() for x in pl]
if bot_in_list: # for villagergame
pl.append(botconfig.NICK)
pll.append(botconfig.NICK.lower())
tempvictim, num_matches = complete_match(victim.lower(), pll)
if not tempvictim:
#ensure messages about not being able to act on yourself work
if num_matches == 0 and nick.lower().startswith(victim.lower()):
return nick
reply(cli, nick, chan, messages["not_playing"].format(victim), private=True)
return
return pl[pll.index(tempvictim)] #convert back to normal casing
# wrapper around complete_match() used for any nick on the channel
def get_nick(cli, nick):
ul = [x for x in var.USERS]
ull = [x.lower() for x in var.USERS]
lnick, num_matches = complete_match(nick.lower(), ull)
if not lnick:
return None
return ul[ull.index(lnick)]
def pastebin_tb(cli, msg, exc):
try:
bot_id = re.sub(r"[^A-Za-z0-9-]", "-", botconfig.NICK)
bot_id = re.sub(r"--+", "-", bot_id)
bot_id = re.sub(r"^-+|-+$", "", bot_id)
rand_id = "".join(random.sample(string.ascii_letters + string.digits, 8))
api_url = "https://ptpb.pw/~{0}-error-{1}".format(bot_id, rand_id)
req = urllib.request.Request(api_url, urllib.parse.urlencode({
"c": traceback.format_exc(), # contents
"s": 86400 # expiry (seconds)
}).encode("utf-8", "replace"))
req.add_header("Accept", "application/json")
resp = urllib.request.urlopen(req)
data = json.loads(resp.read().decode("utf-8"))
url = data["url"] + "/py3tb"
except urllib.error.HTTPError as e:
if e.code == 409: # paste ID conflict
pastebin_tb(exc) # retry
else:
# Make sure we print the exception anyway
traceback.print_exc()
cli.msg(botconfig.DEV_CHANNEL, msg + " (Unable to pastebin traceback; please check the console.)")
except Exception:
traceback.print_exc()
cli.msg(botconfig.DEV_CHANNEL, msg + " (Unable to pastebin traceback; please check the console.)")
else:
cli.msg(botconfig.DEV_CHANNEL, " ".join((msg, url)))
class InvalidModeException(Exception): pass
# vim: set sw=4 expandtab:
|
|
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import logging
import re
from scipy.spatial import Delaunay, ConvexHull
from scipy.special import factorial
from scipy.spatial.qhull import QhullError
from lmatools.lasso import empirical_charge_density as cd
# logger = logging.getLogger('FlashAutorunLogger')
class Flash(object):
def __init__(self, points):
self.points = points
class FlashMetadata(object):
def __init__(self, headerText):
#Search the header for info on how the data is written
self.header = headerText
isColumnHeaderLine = r"^Data:(.*)"
matchDataFormatLine = re.compile(isColumnHeaderLine, re.IGNORECASE | re.MULTILINE)
isDataStartTime = r"^Data start time:(.*)"
matchDataStartTimeLine = re.compile(isDataStartTime, re.IGNORECASE | re.MULTILINE)
secAnalyzedLine = r"^Number of seconds analyzed:(.*)"
matchSecAnalyzedLine = re.compile(secAnalyzedLine, re.IGNORECASE | re.MULTILINE)
startTimeMatch = matchDataStartTimeLine.search(headerText)
if startTimeMatch:
#Looking to deal with something like: " 06/28/04 23:50:00"
dateAndTime = startTimeMatch.group(1).split()
self.startmonth, self.startday, self.startyear = [ int(datePart) for datePart in dateAndTime[0].split('/') ]
self.starthour, self.startminute, self.startsecond = [ int(timePart) for timePart in dateAndTime[1].split(':') ]
if self.startyear < 1000 and self.startyear > 70:
self.startyear += 1900
else:
self.startyear += 2000
secAnalyzedMatch=matchSecAnalyzedLine.search(headerText)
if secAnalyzedMatch:
self.sec_analyzed = int(secAnalyzedMatch.group(1))
# formatMatch=matchDataFormatLine.search(headerText)
# if formatMatch:
# columns = formatMatch.group(1).split(',')
# self.columns = [columnName.strip() for columnName in columns]
def barotropic_rho(z):
rho = 1.225e9 #kg/km^3
H = 8. #km
return rho*np.exp(-z/H)
def poly_area(x,y):
""" Calculate the area of a non-self-intersecting planar polygon.
x0y1 - x0y0 + x1y2 - x2y1 + x2y3 - x2y2 + ... + xny0 - x0yn
"""
det = x[:-1]*y[1:] - x[1:]*y[:-1] # determinant
area = det.sum()
area += x[-1]*y[0] - x[0]*y[-1] # wrap-around terms in determinant
area *= 0.5
return area
def hull_volume(xyz):
""" Calculate the volume of the convex hull of 3D (X,Y,Z) LMA data.
xyz is a (N_points, 3) array of point locations in space. """
assert xyz.shape[1] == 3
tri = Delaunay(xyz[:,0:3])
vertices = tri.points[tri.vertices]
# This is the volume formula in
# https://github.com/scipy/scipy/blob/master/scipy/spatial/tests/test_qhull.py#L106
# Except the formula needs to be divided by ndim! to get the volume, cf.,
# http://en.wikipedia.org/wiki/Simplex#Geometric_properties
# Credit Pauli Virtanen, Oct 14, 2012, scipy-user list
q = vertices[:,:-1,:] - vertices[:,-1,None,:]
simplex_volumes = (1.0 / factorial(q.shape[-1])) * np.fromiter(
(np.linalg.det(q[k,:,:]) for k in range(tri.nsimplex)) , dtype=float)
# print vertices.shape # number of simplices, points per simplex, coords
# print q.shape
# The simplex volumes have negative values since they are oriented
# (think surface normal direction for a triangle
volume=np.sum(np.abs(simplex_volumes))
return volume, vertices, simplex_volumes
##############ADDED 01/05/2017 ###############
def energy(area, separation, zinit, constant, eta):
#Charge separation computed from 27th and 73rd percentiles of
#flash altitude source locations - marks where the most sources are typically
#found from synthetic flashes generated in the NSSL COMMAS.
#
#eta = 0.01 is recommended and is a ballpark neutrlization efficiency as found in Salinas et al. [In Progress - 060220]
distance = separation #np.abs(random)
density = cd.rho_retrieve(area, distance, zinit, separation, False, None) #None - No constant charge density specified
rho,w = density.calculate()
return(eta*w)
##############################################
def calculate_flash_stats(flash, min_pts=2):
logger = logging.getLogger('FlashAutorunLogger')
Re = 6378.137*1000; #Earth's radius in m
pi = np.pi
flash.pointCount = flash.points.shape[0]
fl_id = np.unique(flash.points['flash_id'])
assert (fl_id.shape[0] == 1)
flash.id = fl_id[0]
lat = np.asarray(flash.points['lat'],dtype=float)
lon = np.asarray(flash.points['lon'],dtype=float)
alt = np.asarray(flash.points['alt'], dtype=float)
#
# # mean location of all points
latavg, lonavg, altavg = lat.mean(), lon.mean(), alt.mean()
x = Re * (np.radians(lonavg) - np.radians(lon)) * np.cos(np.radians(latavg))
y = Re * (np.radians(latavg) - np.radians(lat))
z = altavg - alt
# r_sq = x**2.0 + y**2.0 + z**2.0
# sigma_sq = r_sq.sum()/r_sq.shape[0]
# sigma = np.std(r_sq)
separation = np.abs(np.percentile(alt,73) - np.percentile(alt,27))
flash_init_idx = np.argmin(flash.points['time'])
zinit = alt[flash_init_idx] #in meters
area = 0.0
eta = 0.01
if flash.pointCount > 2:
try:
# find the convex hull and calculate its area
cvh = ConvexHull(np.vstack((x,y)).T)
# NOT cvh.area - it is the perimeter in 2D.
# cvh.area is the surface area in 3D.
area = cvh.volume
except IndexError:
# tends to happen when a duplicate point causes the point count to
# drop to 2, leading to a degenerate polygon with no area
logger.warning('Setting area to 0 for flash with points %s, %s' % (x, y))
area=0.0
except KeyError:
# hull indexing has problems here
logger.warning('Setting area to 0 for flash with points %s, %s' % (x, y))
area=0.0
if area == 0.0:
energy_estimate = 0.
else:
energy_estimate = energy(area, separation, zinit, False, eta)
volume = 0.0
if flash.pointCount > 3:
# Need four points to make at least one tetrahedron.
try:
volume, vertices, simplex_volumes = hull_volume(np.vstack((x,y,z)).T)
except QhullError:
# this can happen with a degenerate first simplex - all points are
# coplanar to machine precision. Try again, after adding a tiny amount
# to the first point.
print("Perturbing one source to help triangulation for flash with {0} points".format(flash.pointCount))
# we can tolerate perturbing by no more than 1 m
machine_eps = 1.0 # np.finfo(x.dtype).eps
perturb = 2*machine_eps*np.random.random(size=3)-machine_eps
x[0] += perturb[0]
y[0] += perturb[1]
z[0] += perturb[2]
volume, vertices, simplex_volumes = hull_volume(np.vstack((x,y,z)).T)
flash_init_idx = np.argmin(flash.points['time'])
###ROUGH APPROXIMATION FOR NOW: #######################
air_density = barotropic_rho(alt[flash_init_idx]*1e-3)
if volume == 0.:
specific_energy = 0.
else:
specific_energy = energy_estimate / ((volume / 1.0e9) * air_density)
#######################################################
flash.start = flash.points[flash_init_idx]['time']
flash.end = flash.points['time'].max()
flash.duration = flash.end - flash.start
flash.area = area / 1.0e6 # km^2, 1000x1000
flash.initLat = lat[flash_init_idx]
flash.initLon = lon[flash_init_idx]
flash.initStd = 0.0
flash.initAlt = alt[flash_init_idx]
flash.initPts = (int(flash_init_idx),)
flash.ctralt = altavg
flash.ctrlat = latavg
flash.ctrlon = lonavg
flash.volume = volume / 1.0e9 # km^3, 1000x1000x1000 m
#CHANGED 03-20-17
flash.total_energy = energy_estimate #flash.energy ---> flash.tot_energy
flash.specific_energy = specific_energy #flash.tot_energy ---> flash.specific_energy
|
|
"""Service manager for agilefant
"""
import os
import os.path
import shutil
import sys
import time
import tempfile
import stat
import urllib2
import engage.drivers.service_manager as service_manager
import engage.drivers.resource_metadata as resource_metadata
import engage.utils
import engage.utils.path as iupath
import engage_utils.process as iuprocess
import engage.utils.log_setup
import engage.utils.file as iufile
import engage.utils.http as iuhttp
import engage.utils.timeout as iutimeout
from engage.utils.user_error import ScriptErrInf, UserError
import xml.etree.ElementTree as et
import engage.engine.install_context as install_context
import gettext
_ = gettext.gettext
errors = { }
def define_error(error_code, msg):
global errors
error_info = ScriptErrInf("Agilefant", error_code, msg)
errors[error_info.error_code] = error_info
# error codes
ERR_CALL_MYSQL = 1
ERR_NO_DBSCHEMA = 2
ERR_RUNTIME_PROPS = 3
ERR_DEPLOY_RSP = 4
ERR_NO_INSTALL_DIR = 5
ERR_NO_WAR_FILE = 6
ERR_TOMCAT_STARTUP = 7
ERR_TOMCAT_STARTRSP = 8
ERR_TOMCAT_STOPRSP = 9
ERR_TOMCAT_STOPREQ = 10
define_error(ERR_CALL_MYSQL,
_("Unexpected error in running query against MySQL."))
define_error(ERR_NO_DBSCHEMA,
_("It appears that the agilefant database schema was not created."))
define_error(ERR_RUNTIME_PROPS,
_("File '%(file)s' did not contain the expected configuration properties."))
define_error(ERR_DEPLOY_RSP,
_("Unexpected deployment response from Tomcat."))
define_error(ERR_NO_INSTALL_DIR,
_("Install directory '%(dir)s' does not exist."))
define_error(ERR_NO_WAR_FILE,
_("WAR file for OpenMRS has not been deployed to Tomcat server."))
define_error(ERR_TOMCAT_STARTRSP,
_("Unexpected startup response from Tomcat."))
define_error(ERR_TOMCAT_STARTUP,
_("Error in making startup request to Tomcat."))
define_error(ERR_TOMCAT_STOPRSP,
_("Unexpected shutdown response from Tomcat manager."))
define_error(ERR_TOMCAT_STOPREQ,
_("Error in making shutdown request to Tomcat."))
class AgilefantError(UserError):
def __init__(self, error_id, action, config, msg_args=None, developer_msg=None):
context = ["%s of %s, instance %s" %
(action, config.package_name, config.id)]
UserError.__init__(self, errors[error_id], msg_args, developer_msg, context)
self.config = config # keep this around just in case we want it later
logger = engage.utils.log_setup.setup_script_logger("Agilefant")
_deploy_req_uri = "http://%s:%d/manager/deploy?path=/agilefant&war=file:%s/agilefant.war"
_deploy_rsp = "OK - Deployed application at context path /agilefant"
_start_req_uri = "http://%s:%d/manager/start?path=/agilefant"
_start_rsp = "OK - Started application at context path /agilefant"
_stop_req_uri = "http://%s:%d/manager/stop?path=/agilefant"
_stop_rsp = "OK - Stopped application at context path /agilefant"
_tomcat_mgr_realm = "Tomcat Manager Application"
TIMEOUT_TRIES = 10
TIME_BETWEEN_TRIES = 2.0
_config_type = {
"config_port": {
"database_user": unicode,
"database_password": unicode,
"home": unicode
},
"input_ports": {
"java": {
"type": unicode,
"home": unicode
},
"tomcat": {
"admin_user": unicode,
"admin_password": unicode,
"hostname": unicode,
"manager_port": int,
"home": unicode
},
"mysql": {
"host": unicode,
"port": int
},
"mysql_admin": {
"root_password": unicode,
"install_dir": unicode
}
}
}
class Config(resource_metadata.Config):
def __init__(self, props_in, types, id, package_name):
resource_metadata.Config.__init__(self, props_in, types)
self._add_computed_prop("id", id)
self._add_computed_prop("package_name", package_name)
self._add_computed_prop("home_path",
os.path.abspath(self.config_port.home))
self._add_computed_prop("home_dir_parent",
os.path.dirname(self.home_path))
self._add_computed_prop("home_dir",
os.path.basename(self.home_path))
self._add_computed_prop("mysql_path",
os.path.join(
os.path.join(
self.input_ports.mysql_admin.install_dir,
"bin"), "mysql"))
self._add_computed_prop("socket_file",
os.path.join(
self.input_ports.mysql_admin.install_dir,
"mysql.sock"))
self._add_computed_prop("deployment_target_path",
os.path.join(
os.path.join(os.path.abspath(self.input_ports.tomcat.home),
"webapps"),
"agilefant"))
def call_mysql(config, user, pwd, input, continue_on_error=False):
cfg_filename = \
iufile.make_temp_config_file(
"[mysql]\nuser=%s\npassword=%s\nport=%d\n" %
(user, pwd,
config.input_ports.mysql.port),
dir=config.home_path)
defaults_file = "--defaults-file=%s" % cfg_filename
socket_file = "--socket=%s" % config.socket_file
try:
rc = iuprocess.run_and_log_program([config.mysql_path, defaults_file,
socket_file],
{},
logger,
cwd=config.home_path,
input=input)
finally:
os.remove(cfg_filename)
if rc!=0 and not continue_on_error:
raise AgilefantError(ERR_CALL_MYSQL, "Install", config,
developer_msg="Return code: '%d', Input: '%s'" % (rc, input))
return rc
def check_for_agilefant_db(config):
root_password = install_context.password_repository.get_value(config.input_ports.mysql_admin.root_password)
rc = call_mysql(config,
"root", root_password,
"use agilefant;\n", continue_on_error=True)
if rc!=0:
raise AgilefantError(ERR_NO_DBSCHEMA,
"Validate install", config,
developer_msg="mysql 'use agilefant' failed")
def check_status(config):
return iuhttp.check_url(config.input_ports.tomcat.hostname,
config.input_ports.tomcat.manager_port,
"/agilefant/", logger)
_mysql_cmds = \
"""CREATE DATABASE agilefant;
GRANT ALL ON agilefant.* TO '{0}'@'%' IDENTIFIED BY '{1}';
exit
"""
_mysql_createdb = \
"""use agilefant;
source {0}/create-db.ddl;
/*source {0}/insert-users.sql;*/
exit
"""
#TODO: set up apache forwarding
_apache_forward = \
"""ProxyPass /agilefant http://{0}:{1}/agilefant
ProxyPassReverse /agilefant http://{0}:{1}/agilefant
"""
_apache_connector = \
"""<Connector port="{1}"
protocol="HTTP/1.1" connectionTimeout="20000" redirectPort="8443"
proxyName="{0}" proxyPort="80"/>
"""
class Manager(service_manager.Manager):
def __init__(self, metadata):
package_name = "%s %s" % (metadata.key["name"],
metadata.key["version"])
service_manager.Manager.__init__(self, metadata, package_name)
self.config = metadata.get_config(_config_type, Config, self.id,
package_name)
def validate_pre_install(self):
# iupath.check_installable_to_target_dir(self.config.home_path,
# self.config.package_name)
logger.debug("%s instance %s passed pre-install checks." %
(self.config.package_name, self.id))
def install(self, package):
extracted_dir = package.extract(self.config.home_dir_parent, desired_common_dirname=self.config.home_dir)
# initialize the database
root_password = install_context.password_repository.get_value(self.config.input_ports.mysql_admin.root_password)
db_password = install_context.password_repository.get_value(self.config.config_port.database_password)
call_mysql(self.config,
"root", root_password,
_mysql_cmds.format(self.config.config_port.database_user,
db_password))
call_mysql(self.config,
self.config.config_port.database_user,
db_password,
_mysql_createdb.format(self.config.home_path))
# deploy the war file
uri = _deploy_req_uri % (self.config.input_ports.tomcat.hostname,
self.config.input_ports.tomcat.manager_port,
self.config.home_path)
logger.debug("Attempting to deploy agilefant:\nuri=%s" % uri)
tomcat_password = install_context.password_repository.get_value(self.config.input_ports.tomcat.admin_password)
result = iuhttp.make_request_with_basic_authentication(uri,
_tomcat_mgr_realm,
self.config.input_ports.tomcat.admin_user,
tomcat_password)
if result.find(_deploy_rsp)==-1:
raise AgilefantError(ERR_DEPLOY_RSP, "Install", self.config,
developer_msg="Response was: '%s'" % result)
# write out the init.d startup script
# we just stick it in the install directory for now and leave it to
# the user to manually copy it to /etc/init.d and enable it.
agilefant_initd_file = iufile.get_data_file_contents(__file__, "agilefant.sh")
startup_script = agilefant_initd_file % {
"mysql_install_dir":self.config.input_ports.mysql_admin.install_dir,
"tomcat_install_dir":self.config.input_ports.tomcat.home,
"os_user":self.config.input_ports.tomcat.os_user_name
}
start_script_filepath = os.path.join(self.config.home_path, "agilefant.sh")
start_script_file = open(start_script_filepath, "wb")
start_script_file.write(startup_script)
start_script_file.close()
os.chmod(start_script_filepath, 0755)
# check that everything is now in place
self.validate_post_install()
def is_installed(self):
#return os.path.exists(self.config.home_path)
return False
def validate_post_install(self):
logger.debug('validate post install')
if not os.path.exists(self.config.home_path):
raise AgilefantError(ERR_NO_INSTALL_DIR, "Validate post install",
self.config, {"dir":self.config.home_path})
check_for_agilefant_db(self.config)
if not os.path.exists(self.config.deployment_target_path):
raise AgilefantError(ERR_NO_WAR_FILE, "Validate post install",
self.config,
developer_msg="Expected file at '%s'" %
self.config.deployment_target_path)
def start(self):
uri = _start_req_uri % \
(self.config.input_ports.tomcat.hostname,
self.config.input_ports.tomcat.manager_port)
tomcat_password = install_context.password_repository.get_value(self.config.input_ports.tomcat.admin_password)
try:
result = iuhttp.make_request_with_basic_authentication(uri,
_tomcat_mgr_realm,
self.config.input_ports.tomcat.admin_user,
tomcat_password)
if result.find(_start_rsp)==-1:
raise AgilefantError(ERR_TOMCAT_STARTRSP, "Startup",
self.config,
developer_msg="Response was '%s'" % result)
except urllib2.URLError, msg:
raise AgilefantError(ERR_TOMCAT_STARTUP, "Startup", self.config,
developer_msg="Tomcat error was '%s'" % msg)
def is_running(self):
return check_status(self.config)
def stop(self):
uri = _stop_req_uri % \
(self.config.input_ports.tomcat.hostname,
self.config.input_ports.tomcat.manager_port)
try:
result = iuhttp.make_request_with_basic_authentication(uri,
_tomcat_mgr_realm,
self.config.input_ports.tomcat.admin_user,
self.config.input_ports.tomcat.admin_password)
if result.find(_stop_rsp)==-1:
raise AgilefantError(ERR_TOMCAT_STOPRSP, "Stop",
self.config,
developer_msg="Response was '%s'" % result)
except urllib2.URLError, msg:
raise AgilefantError(ERR_TOMCAT_STOPREQ, "Stop",
self.config,
developer_msg="URL error was: '%s'" % msg)
|
|
from flask import current_app as app, render_template, request, redirect, abort, jsonify, json as json_mod, url_for, session, Blueprint
from CTFd.utils import ctftime, view_after_ctf, authed, unix_time, get_kpm, user_can_view_challenges, is_admin, get_config, get_ip, is_verified, ctf_started, ctf_ended, ctf_name, is_on_team
from CTFd.models import db, Challenges, Files, Solves, WrongKeys, Keys, Tags, Users, Awards, Teams
from sqlalchemy.sql import and_, or_, not_
import time
import re
import logging
import json
challenges = Blueprint('challenges', __name__)
@challenges.route('/challenges', methods=['GET'])
def challenges_view():
errors = []
start = get_config('start') or 0
end = get_config('end') or 0
if not is_admin(): # User is not an admin
if not ctftime():
# It is not CTF time
if start > time.time(): # We are NOT allowed to view after the CTF ends
errors.append('{} challenges will be posted soon!'.format(ctf_name()))
elif not view_after_ctf():
errors.append('{} has ended.'.format(ctf_name()))
return render_template('chals.html', errors=errors, start=int(start), end=int(end))
if get_config('verify_emails') and not is_verified(): # User is not confirmed
return redirect(url_for('auth.confirm_user'))
if user_can_view_challenges(): # Do we allow unauthenticated users?
if get_config('start') and not ctf_started():
errors.append('{} has not started yet'.format(ctf_name()))
if (get_config('end') and ctf_ended()) and not view_after_ctf():
errors.append('{} has ended'.format(ctf_name()))
if not is_on_team():
errors.append('You are not on a team!')
return render_template('chals.html', errors=errors, start=int(start), end=int(end))
else:
if not is_on_team():
errors.append('You must create or join a team before you can start playing')
return render_template('chals.html', errors=errors, start=int(start), end=int(end))
return redirect(url_for('auth.login', next='challenges'))
@challenges.route('/chals', methods=['GET'])
def chals():
if not is_admin():
if not ctftime():
if view_after_ctf():
pass
else:
return redirect(url_for('views.static_html'))
if user_can_view_challenges():
chals = Challenges.query.filter(or_(Challenges.hidden != True, Challenges.hidden == None)).add_columns('id', 'name', 'value', 'description', 'category').order_by(Challenges.value).all()
json = {'game':[]}
for x in chals:
tags = [tag.tag for tag in Tags.query.add_columns('tag').filter_by(chal=x[1]).all()]
files = [ str(f.location) for f in Files.query.filter_by(chal=x.id).all() ]
json['game'].append({'id':x[1], 'name':x[2], 'value':x[3], 'description':x[4], 'category':x[5], 'files':files, 'tags':tags})
db.session.close()
return jsonify(json)
else:
db.session.close()
return redirect(url_for('auth.login', next='chals'))
@challenges.route('/chals/solves')
def chals_per_solves():
if not user_can_view_challenges():
return redirect(url_for('auth.login', next=request.path))
solves_sub = db.session.query(Solves.chalid, db.func.count(Solves.chalid).label('solves'))\
.join(Users, Solves.userid == Users.id)\
.filter(Users.banned == False)\
.group_by(Solves.chalid).subquery()
solves = db.session.query(solves_sub.columns.chalid, solves_sub.columns.solves, Challenges.name) \
.join(Challenges, solves_sub.columns.chalid == Challenges.id)\
.all()
json = {}
for chal, count, name in solves:
json[name] = count
db.session.close()
return jsonify(json)
@challenges.route('/team/solves')
@challenges.route('/team/solves/<teamid>')
def team_solves_view(teamid=None):
solves = None
awards = None
if teamid is None:
if is_admin():
solves = Solves.query.filter_by(userid=session['id']).all()
elif authed():
user = Users.query.filter_by(id=session.get('id')).first_or_404()
user_ids = [u.id for u in Users.query.with_entities(Users.id).filter_by(teamid=user.teamid)]
solves = Solves.query.filter(Solves.userid.in_(user_ids)).all()
else:
return redirect(url_for('auth.login', next='solves'))
else:
team = Teams.query.filter_by(id=teamid).first_or_404()
user_ids = [u.id for u in Users.query.with_entities(Users.id).filter_by(teamid=team.id)]
solves = Solves.query.filter(Solves.userid.in_(user_ids)).all()
awards = Awards.query.filter(Awards.userid.in_(user_ids)).all()
db.session.close()
json = {'solves': []}
for solve in solves:
json['solves'].append({
'chal': solve.chal.name,
'chalid': solve.chalid,
'team': solve.userid,
'value': solve.chal.value,
'category': solve.chal.category,
'time': unix_time(solve.date)
})
if awards:
for award in awards:
json['solves'].append({
'chal': award.name,
'chalid': None,
'team': award.userid,
'value': award.value,
'category': award.category,
'time': unix_time(award.date)
})
json['solves'].sort(key=lambda k: k['time'])
return jsonify(json)
@challenges.route('/solves')
@challenges.route('/solves/<userid>')
def solves_view(userid=None):
solves = None
awards = None
if userid is None:
if is_admin():
solves = Solves.query.filter_by(userid=session['id']).all()
elif authed():
user = Users.query.filter_by(id=session.get('id')).first_or_404()
user_ids = [u.id for u in Users.query.with_entities(Users.id).filter_by(teamid=user.teamid)]
solves = Solves.query.filter(Solves.userid.in_(user_ids)).all()
else:
return redirect(url_for('auth.login', next='solves'))
else:
# team = Teams.query.filter_by(id=teamid).first_or_404()
# user_ids = [u.id for u in Users.query.with_entities(Users.id).filter_by(teamid=team.id)]
# solves = Solves.query.filter(Solves.userid.in_(user_ids)).all()
# awards = Awards.query.filter(Awards.userid.in_(user_ids)).all()
solves = Solves.query.filter_by(userid=userid).all()
awards = Awards.query.filter_by(userid=userid).all()
db.session.close()
json = {'solves':[]}
for solve in solves:
json['solves'].append({
'chal': solve.chal.name,
'chalid': solve.chalid,
'team': solve.userid,
'value': solve.chal.value,
'category': solve.chal.category,
'time': unix_time(solve.date)
})
if awards:
for award in awards:
json['solves'].append({
'chal': award.name,
'chalid': None,
'team': award.userid,
'value': award.value,
'category': award.category,
'time': unix_time(award.date)
})
json['solves'].sort(key=lambda k: k['time'])
return jsonify(json)
@challenges.route('/maxattempts')
def attempts():
if not user_can_view_challenges():
return redirect(url_for('auth.login', next=request.path))
chals = Challenges.query.add_columns('id').all()
json = {'maxattempts':[]}
for chal, chalid in chals:
fails = WrongKeys.query.filter_by(userid=session['id'], chalid=chalid).count()
if fails >= int(get_config("max_tries")) and int(get_config("max_tries")) > 0:
json['maxattempts'].append({'chalid':chalid})
return jsonify(json)
@challenges.route('/fails/<userid>', methods=['GET'])
def fails(userid):
fails = WrongKeys.query.filter_by(userid=userid).count()
solves = Solves.query.filter_by(userid=userid).count()
db.session.close()
json = {'fails':str(fails), 'solves': str(solves)}
return jsonify(json)
@challenges.route('/chal/<chalid>/solves', methods=['GET'])
def who_solved(chalid):
if not user_can_view_challenges():
return redirect(url_for('auth.login', next=request.path))
solves = Solves.query\
.join(Users, Solves.userid == Users.id)\
.join(Teams, Users.teamid == Teams.id) \
.with_entities(Teams.id, Teams.name.label('teamname'), Solves.date.label('date')) \
.filter(Solves.chalid == chalid, Users.banned == False)\
.order_by(Solves.date.asc())
json = {'teams':[]}
for solve in solves:
json['teams'].append({'id':solve.id, 'name':solve.teamname, 'date':solve.date})
return jsonify(json)
@challenges.route('/chal/<chalid>', methods=['POST'])
def chal(chalid):
if ctf_ended() and not view_after_ctf():
return redirect(url_for('challenges.challenges_view'))
if not user_can_view_challenges():
return redirect(url_for('auth.login', next=request.path))
if authed() and is_verified() and (ctf_started() or view_after_ctf()):
fails = WrongKeys.query.filter_by(userid=session['id'], chalid=chalid).count()
logger = logging.getLogger('keys')
data = (time.strftime("%m/%d/%Y %X"), session['username'].encode('utf-8'), request.form['key'].encode('utf-8'), get_kpm(session['id']))
print("[{0}] {1} submitted {2} with kpm {3}".format(*data))
# Anti-bruteforce / submitting keys too quickly
if get_kpm(session['id']) > 10:
if ctftime():
wrong = WrongKeys(session['id'], chalid, request.form['key'])
db.session.add(wrong)
db.session.commit()
db.session.close()
logger.warn("[{0}] {1} submitted {2} with kpm {3} [TOO FAST]".format(*data))
# return "3" # Submitting too fast
return jsonify({'status': '3', 'message': "You're submitting keys too fast. Slow down."})
if not is_on_team():
logger.info("[{0}] {1} submitted {2} with kpm {3} [NOT ON TEAM]".format(*data))
return jsonify({'status':'3', 'message':'You are not on a team.'})
user = Users.query.filter_by(id=session.get('id')).first_or_404()
user_ids = [u.id for u in Users.query.with_entities(Users.id).filter_by(teamid=user.teamid)]
# solves = Solves.query.filter_by(userid=session['id'], chalid=chalid).first()
solves = Solves.query.filter(Solves.userid.in_(user_ids)).filter(Solves.chalid == chalid).first()
# Challange not solved yet
if not solves:
chal = Challenges.query.filter_by(id=chalid).first()
key = str(request.form['key'].strip().lower())
keys = json.loads(chal.flags)
# Hit max attempts
max_tries = int(get_config("max_tries"))
if fails >= max_tries > 0:
return jsonify({
'status': '0',
'message': "You have 0 tries remaining"
})
for x in keys:
if x['type'] == 0: #static key
print(x['flag'], key.strip().lower())
if x['flag'] and x['flag'].strip().lower() == key.strip().lower():
if ctftime():
solve = Solves(chalid=chalid, userid=session['id'], ip=get_ip(), flag=key)
db.session.add(solve)
db.session.commit()
db.session.close()
logger.info("[{0}] {1} submitted {2} with kpm {3} [CORRECT]".format(*data))
# return "1" # key was correct
return jsonify({'status':'1', 'message':'Correct'})
elif x['type'] == 1: #regex
res = re.match(str(x['flag']), key, re.IGNORECASE)
if res and res.group() == key:
if ctftime():
solve = Solves(chalid=chalid, userid=session['id'], ip=get_ip(), flag=key)
db.session.add(solve)
db.session.commit()
db.session.close()
logger.info("[{0}] {1} submitted {2} with kpm {3} [CORRECT]".format(*data))
# return "1" # key was correct
return jsonify({'status': '1', 'message': 'Correct'})
if ctftime():
wrong = WrongKeys(session['id'], chalid, request.form['key'])
db.session.add(wrong)
db.session.commit()
db.session.close()
logger.info("[{0}] {1} submitted {2} with kpm {3} [WRONG]".format(*data))
# return '0' # key was wrong
if max_tries:
attempts_left = max_tries - fails
tries_str = 'tries'
if attempts_left == 1:
tries_str = 'try'
return jsonify({'status': '0', 'message': 'Incorrect. You have {} {} remaining.'.format(attempts_left, tries_str)})
else:
return jsonify({'status': '0', 'message': 'Incorrect'})
# Challenge already solved
else:
logger.info("{0} submitted {1} with kpm {2} [ALREADY SOLVED]".format(*data))
# return "2" # challenge was already solved
return jsonify({'status': '2', 'message': 'You already solved this'})
else:
return "-1"
|
|
# progress reporting
# Author:: Sam Steingold (<sds@magnetic.com>)
# Copyright:: Copyright (c) 2014, 2015, 2016 Magnetic Media Online, Inc.
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import time
import datetime
import re
import util
def difftime2string (x):
ax = abs(x)
if ax < 1: return "%.2fms" % (x*1000.0)
if ax < 100: return "%.2fsec" % (x)
if ax < 6000: return "%.2fmin" % (x/60.0)
if ax < 108000: return "%.2fhrs" % (x/3600.0)
if ax < 400*24*3600: return "%.2fdays" % (x/(24*3600.0))
return "%.2fyrs" % (x/(365.25*24*3600))
def elapsed (start):
return difftime2string(time.time()-start)
def processed (start,count,unit):
spent = time.time() - start
return "%d new %ss in %s%s" % (
count,unit,difftime2string(spent),
(" (%s/%s)" % (difftime2string(spent/count),unit)) if count else "")
def timing (func, logger = None):
start = time.time()
ret = func()
util.info("Ran %s in %s" % (func, elapsed(start)),logger=logger)
return ret
difftime_rex = re.compile('^(-)?([0-9.]+)(ms|sec|min|hrs|days|yrs)$')
def parse_difftime (s):
if s is None:
return None
if isinstance(s,int):
return s
if not isinstance(s,str):
raise TypeError("parse_difftime",s)
m = difftime_rex.match(s)
if m is None:
raise ValueError("parse_difftime",s)
sign,num,units = m.groups()
num = float(num) * (1 if sign is None else -1)
if units == "ms": return num / 1000.0
if units == "sec": return num
if units == "min": return num * 60
if units == "hrs": return num * 3600
if units == "days": return num * 3600 * 24
if units == "yrs": return num * 3600 * 24 * 365.25
raise ValueError("parse_difftime",s,units)
def parse_ymdh (s):
return datetime.datetime.strptime(s,"%Y/%m/%d/%H")
def time2string (t = None):
return time.strftime("%F %T",time.localtime(t))
def test ():
print difftime2string(100)
print parse_difftime("-45min")
print time2string()
class Done (Exception):
pass
class Progress (object):
@staticmethod
def get_parser (max_ticks = None, tick_report = None,
max_time = None, time_report = None,
flow_report = None):
aparse = argparse.ArgumentParser(add_help=False)
aparse.add_argument('-max-ticks',type=int, default=max_ticks,
help='Iterate at most time many times')
aparse.add_argument('-tick-report',type=int, default=tick_report, metavar='N',
help='Report progress every N ticks')
aparse.add_argument('-max-time',default=max_time,
help='Iterate for at most this long (e.g., 4hrs)')
aparse.add_argument('-time-report',type=int, default=time_report, metavar='S',
help='Report progress every S seconds')
aparse.add_argument('-flow-report', default=flow_report,
help='Report progress based on data flow time interval, e.g., every 20min of data')
return aparse
def __init__ (self, logger, status, opts, max_possible = None):
self.logger = logger
self.status = status
self.start = time.time()
self.ticks = 0
self.last_report_ticks = self.ticks
self.last_report_time = self.start
self.max_ticks = min(opts.max_ticks or max_possible,
max_possible or opts.max_ticks)
self.tick_report = opts.tick_report
self.max_time = parse_difftime(opts.max_time)
self.time_report = opts.time_report
try:
self.date_beg = opts.beg
self.date_end = opts.end
self.flow_beg = datetime.datetime.combine(opts.beg, datetime.time.min)
self.flow_end = datetime.datetime.combine(opts.end, datetime.time.max)
except AttributeError:
self.date_beg = self.date_end = self.flow_beg = self.flow_end = None
self.flow_now = self.flow_beg
self.flow_report = None if opts.flow_report is None else parse_difftime(opts.flow_report)
self.last_report_flow = self.flow_now
def completed_ticks (self):
if self.max_ticks is None:
return None
return float(self.ticks) / self.max_ticks
def completed_flow (self):
if self.flow_now is None:
return None
return (float((self.flow_now - self.flow_beg).total_seconds()) /
(self.flow_end - self.flow_beg).total_seconds())
def completed (self):
completed_ticks = self.completed_ticks()
completed_flow = self.completed_flow()
if completed_flow:
if completed_ticks:
return (completed_flow + completed_ticks) / 2
return completed_flow
if completed_ticks:
return completed_ticks
return None
def __str__ (self):
return ("<start=" + time2string(self.start) +
('' if self.max_ticks is None else
" max_ticks={m:,d}".format(m=self.max_ticks)) +
('' if self.max_time is None else
" max_time=" + difftime2string(self.max_time)) +
('' if self.tick_report is None else
" tick_report={t:,d}".format(t=self.tick_report)) +
('' if self.time_report is None else
" time_report=" + difftime2string(self.time_report)) +
('' if self.flow_report is None else
" flow_report=" + difftime2string(self.flow_report)) +
" ticks={t:,d}>".format(t=self.ticks))
# return (remaining-time, expected-time-at-end)
def eta (self):
completed = self.completed()
if completed is None:
if self.max_time is None:
return (None, None)
end = self.start + self.max_time
return (end - time.time(), end)
now = time.time()
remains = (now - self.start) * (1-completed) / completed
if self.max_time is None:
return (remains, now + remains)
end = self.start + self.max_time
return (min(remains, end - now), min(now + remains, end))
# flow_now is the timestamp of the current record
def tick (self, flow_now = None):
now = time.time()
if ((self.max_ticks is not None and self.ticks == self.max_ticks) or
(self.max_time is not None and now > self.start + self.max_time)):
raise Done()
self.ticks += 1
if flow_now is not None:
self.flow_now = flow_now
if ((self.tick_report is not None and
self.ticks - self.last_report_ticks >= self.tick_report) or
(self.flow_report is not None and self.flow_now is not None and
((self.flow_now - self.last_report_flow).total_seconds()
>= self.flow_report)) or
(self.time_report is not None and
now - self.last_report_time >= self.time_report)):
self.logger.info("%s",self.report())
self.last_report_time = now
self.last_report_ticks = self.ticks
self.last_report_flow = self.flow_now
def report (self):
remains, eta = self.eta()
s = "" if self.flow_now is None else self.flow_now.strftime(
"%Y-%m-%d %H:%M:%S ")
s += "" if self.status is None else self.status()
if remains is None or remains <= 0:
return s + "{t:,d}".format(t=self.ticks)
return s + "{t:,d} ({c:.2%}) ETA: {e:s} ({r:s})".format(
t=self.ticks,c=self.completed() or 0,e=time2string(eta),
r=difftime2string(remains))
@staticmethod
def test ():
p = Progress(None, None, Progress.get_parser().parse_args())
p.max_ticks = 1000
p.ticks = 100
p.start -= 100
print p
print p.report()
p.tick()
print p
print p.report()
if __name__ == '__main__':
test()
Progress.test()
|
|
# -*- coding: utf-8 -*-
from itertools import repeat
import six
from django.db import models, connections
from django.db.models.query import QuerySet
from django.utils.encoding import smart_text
from djorm_pgfulltext.utils import adapt
# Compatibility import and fixes section.
try:
from django.db.transaction import atomic
except ImportError:
# This encapsulates pre django 1.6 transaction
# behavior under same abstraction as django 1.6 atomic
# decorator. This not intends to emulate django 1.6 atomic
# behavior, only has partially same interface for easy
# use.
from django.db import transaction
class atomic(object):
def __init__(self, using=None):
self.using = using
def __enter__(self):
if not transaction.is_managed(using=self.using):
transaction.enter_transaction_management(using=self.using)
self.forced_managed = True
else:
self.forced_managed = False
def __exit__(self, *args, **kwargs):
try:
if self.forced_managed:
transaction.commit(using=self.using)
else:
transaction.commit_unless_managed(using=self.using)
finally:
if self.forced_managed:
transaction.leave_transaction_management(using=self.using)
def auto_update_search_field_handler(sender, instance, *args, **kwargs):
instance.update_search_field()
class SearchManagerMixIn(object):
"""
A mixin to create a Manager with a 'search' method that may do a full text search
on the model.
The manager is set up with a list of one or more model's fields that will be searched.
It can be a list of field names, or a list of tuples (field_name, weight). It can also
be None, in that case every CharField and TextField in the model will be searched.
You can also give a 'search_field', a VectorField into where the values of the searched
fields are copied and normalized. If you give it, the searches will be made on this
field; if not, they will be made directly in the searched fields.
When using search_field, if auto_update = True, Django signals will be used to
automatically syncronize the search_field with the searched fields every time instances
are saved. If not, you can call to 'update_search_field' method in model instances to do this.
If search_field not used, both auto_update and update_search_field does nothing. Alternatively,
you can create a postgresql trigger to do the syncronization at database level, see this:
http://www.postgresql.org/docs/9.1/interactive/textsearch-features.html#TEXTSEARCH-UPDATE-TRIGGERS
In both cases, you should create a text search index, on either the searched fields or
the compound search_field, like explained here:
http://www.postgresql.org/docs/9.1/interactive/textsearch-tables.html#TEXTSEARCH-TABLES-INDEX
Finally, you can give a 'config', the Postgres text search configuration that will be used
to normalize the search_field and the queries. How do you can create a configuration:
http://www.postgresql.org/docs/9.1/interactive/textsearch-configuration.html
Note that 'config' can be a tuple as in ('pg_catalog.english', 'pg_catalog.simple').
In this case, fields are tokenized using each of the tokenizers specified in 'config'
and the result is contatenated. This allows you to create tsvector with multiple configs.
To do all those actions in database, create a setup sql script for Django:
https://docs.djangoproject.com/en/1.4/howto/initial-data/#providing-initial-sql-data
"""
def __init__(self,
fields=None,
search_field='search_index',
config='pg_catalog.english',
auto_update_search_field=False):
self.search_field = search_field
self.default_weight = 'D'
self.config = config
self.auto_update_search_field = auto_update_search_field
self._fields = fields
super(SearchManagerMixIn, self).__init__()
def contribute_to_class(self, cls, name):
'''
Called automatically by Django when setting up the model class.
'''
if not cls._meta.abstract:
# Attach this manager as _fts_manager in the model class.
if not getattr(cls, '_fts_manager', None):
cls._fts_manager = self
# Add 'update_search_field' instance method, that calls manager's update_search_field.
if not getattr(cls, 'update_search_field', None):
def update_search_field(self, search_field=None, fields=None, using=None, config=None, extra=None):
self._fts_manager.update_search_field(
pk=self.pk, search_field=search_field, fields=fields, using=using, config=config, extra=extra
)
setattr(cls, 'update_search_field', update_search_field)
if self.auto_update_search_field:
models.signals.post_save.connect(auto_update_search_field_handler, sender=cls)
super(SearchManagerMixIn, self).contribute_to_class(cls, name)
def get_queryset(self):
return SearchQuerySet(model=self.model, using=self._db)
def search(self, *args, **kwargs):
return self.get_queryset().search(*args, **kwargs)
def update_search_field(self, pk=None, search_field=None, fields=None, config=None, using=None, extra=None):
"""
Update the search_field of one instance, or a list of instances, or
all instances in the table (pk is one key, a list of keys or none).
If there is no search_field, this function does nothing.
:param pk: Primary key of instance
:param search_field: search_field which will be updated
:param fields: fields from which we update the search_field
:param config: config of full text search
:param using: DB we are using
"""
if not search_field:
search_field = self.search_field
if not search_field:
return
if fields is None:
fields = self._fields
if not config:
config = self.config
if using is None:
using = self.db
connection = connections[using]
qn = connection.ops.quote_name
where_sql = ''
params = []
if pk is not None:
if isinstance(pk, (list, tuple)):
params = pk
else:
params = [pk]
where_sql = "WHERE %s IN (%s)" % (
qn(self.model._meta.pk.column),
','.join(repeat("%s", len(params)))
)
search_vector = self._get_search_vector(config, using, fields=fields, extra=extra)
sql = "UPDATE %s SET %s = %s %s;" % (
qn(self.model._meta.db_table),
qn(search_field),
search_vector or "''",
where_sql
)
with atomic():
cursor = connection.cursor()
cursor.execute(sql, params)
def _find_text_fields(self):
fields = [f for f in self.model._meta.fields
if isinstance(f, (models.CharField, models.TextField))]
return [(f.name, None) for f in fields]
def _parse_fields(self, fields):
"""
Parse fields list into a correct format needed by this manager.
If any field does not exist, raise ValueError.
"""
parsed_fields = set()
if fields is not None and isinstance(fields, (list, tuple)):
if len(fields) > 0 and isinstance(fields[0], (list, tuple)):
parsed_fields.update(fields)
else:
parsed_fields.update([(x, None) for x in fields])
# Does not support field.attname.
field_names = set(field.name for field in self.model._meta.fields if not field.primary_key)
non_model_fields = set(x[0] for x in parsed_fields).difference(field_names)
if non_model_fields:
raise ValueError("The following fields do not exist in this"
" model: {0}".format(", ".join(x for x in non_model_fields)))
else:
parsed_fields.update(self._find_text_fields())
return parsed_fields
def _get_search_vector(self, configs, using, fields=None, extra=None):
if fields is None:
vector_fields = self._parse_fields(self._fields)
else:
vector_fields = self._parse_fields(fields)
if isinstance(configs, six.string_types[0]):
configs = [configs]
search_vector = []
for config in configs:
for field_name, weight in vector_fields:
search_vector.append(
self._get_vector_for_field(field_name, weight=weight, config=config, using=using, extra=extra)
)
return ' || '.join(search_vector)
def _get_vector_for_field(self, field_name, weight=None, config=None, using=None, extra=None):
if not weight:
weight = self.default_weight
if not config:
config = self.config
if using is None:
using = self.db
field = self.model._meta.get_field(field_name)
ret = None
if hasattr(self.model, '_convert_field_to_db'):
ret = self.model._convert_field_to_db(field, weight, config, using, extra=extra)
if ret is None:
ret = self._convert_field_to_db(field, weight, config, using, extra=extra)
return ret
@staticmethod
def _convert_field_to_db(field, weight, config, using, extra=None):
connection = connections[using]
qn = connection.ops.quote_name
return "setweight(to_tsvector('%s', coalesce(%s.%s, '')), '%s')" % \
(config, qn(field.model._meta.db_table), qn(field.column), weight)
class SearchQuerySet(QuerySet):
@property
def manager(self):
return self.model._fts_manager
@property
def db(self):
return self._db or self.manager.db
def search(self, query, rank_field=None, rank_function='ts_rank', config=None,
rank_normalization=32, raw=False, using=None, fields=None,
headline_field=None, headline_document=None):
'''
Convert query with to_tsquery or plainto_tsquery, depending on raw is
`True` or `False`, and return a QuerySet with the filter.
If `rank_field` is not `None`, a field with this name will be added
containing the search rank of the instances, and the queryset will be
ordered by it. The rank_function and normalization are explained here:
http://www.postgresql.org/docs/9.1/interactive/textsearch-controls.html#TEXTSEARCH-RANKING
If an empty query is given, no filter is made so the QuerySet will
return all model instances.
If `fields` is not `None`, the filter is made with this fields instead
of defined on a constructor of manager.
If `headline_field` and `headline_document` is not `None`, a field with
this `headline_field` name will be added containing the headline of the
instances, which will be searched inside `headline_document`.
Search headlines are explained here:
http://www.postgresql.org/docs/9.1/static/textsearch-controls.html#TEXTSEARCH-HEADLINE
'''
if not config:
config = self.manager.config
db_alias = using if using is not None else self.db
connection = connections[db_alias]
qn = connection.ops.quote_name
qs = self
if using is not None:
qs = qs.using(using)
if query:
function = "to_tsquery" if raw else "plainto_tsquery"
ts_query = smart_text(
"%s('%s', %s)" % (function, config, adapt(query))
)
full_search_field = "%s.%s" % (
qn(self.model._meta.db_table),
qn(self.manager.search_field)
)
# if fields is passed, obtain a vector expression with
# these fields. In other case, intent use of search_field if
# exists.
if fields:
search_vector = self.manager._get_search_vector(config, using, fields=fields)
else:
if not self.manager.search_field:
raise ValueError("search_field is not specified")
search_vector = full_search_field
where = " (%s) @@ (%s)" % (search_vector, ts_query)
select_dict, order = {}, []
if rank_field:
select_dict[rank_field] = '%s(%s, %s, %d)' % (
rank_function,
search_vector,
ts_query,
rank_normalization
)
order = ['-%s' % (rank_field,)]
if headline_field is not None and headline_document is not None:
select_dict[headline_field] = "ts_headline('%s', %s, %s)" % (
config,
headline_document,
ts_query
)
qs = qs.extra(select=select_dict, where=[where], order_by=order)
return qs
class SearchManager(SearchManagerMixIn, models.Manager):
pass
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
import re
import time
import shutil
import os
import sys
import subprocess
import textwrap
LOG = '/tmp/release.log'
def log(msg):
f = open(LOG, mode='ab')
f.write(msg.encode('utf-8'))
f.close()
def run(command):
log('\n\n%s: RUN: %s\n' % (datetime.datetime.now(), command))
if os.system('%s >> %s 2>&1' % (command, LOG)):
msg = ' FAILED: %s [see log %s]' % (command, LOG)
print(msg)
raise RuntimeError(msg)
def runAndSendGPGPassword(command, password):
p = subprocess.Popen(command, shell=True, bufsize=0, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE)
f = open(LOG, 'ab')
while True:
p.stdout.flush()
line = p.stdout.readline()
if len(line) == 0:
break
f.write(line)
if line.find(b'Enter GPG keystore password:') != -1:
time.sleep(1.0)
p.stdin.write((password + '\n').encode('UTF-8'))
p.stdin.write('\n'.encode('UTF-8'))
result = p.poll()
if result != 0:
msg = ' FAILED: %s [see log %s]' % (command, LOG)
print(msg)
raise RuntimeError(msg)
def scrubCheckout():
# removes any files not checked into svn
unversionedRex = re.compile('^ ?[\?ID] *[1-9 ]*[a-zA-Z]* +(.*)')
for l in os.popen('svn status --no-ignore -v').readlines():
match = unversionedRex.match(l)
if match:
s = match.group(1)
if os.path.exists(s):
print(' delete %s' % s)
if os.path.isdir(s) and not os.path.islink(s):
shutil.rmtree(s)
else:
os.remove(s)
def getSVNRev():
rev = os.popen('svnversion').read().strip()
try:
int(rev)
except (TypeError, ValueError):
raise RuntimeError('svn version is not clean: %s' % rev)
return rev
def prepare(root, version, gpgKeyID, gpgPassword):
print()
print('Prepare release...')
if os.path.exists(LOG):
os.remove(LOG)
os.chdir(root)
print(' svn up...')
run('svn up')
rev = getSVNRev()
print(' svn rev: %s' % rev)
log('\nSVN rev: %s\n' % rev)
print(' ant clean test')
run('ant clean test')
print(' clean checkout')
scrubCheckout()
open('rev.txt', mode='wb').write(rev.encode('UTF-8'))
print(' lucene prepare-release')
os.chdir('lucene')
cmd = 'ant -Dversion=%s' % version
if gpgKeyID is not None:
cmd += ' -Dgpg.key=%s prepare-release' % gpgKeyID
else:
cmd += ' prepare-release-no-sign'
if gpgPassword is not None:
runAndSendGPGPassword(cmd, gpgPassword)
else:
run(cmd)
print(' solr prepare-release')
os.chdir('../solr')
cmd = 'ant -Dversion=%s' % version
if gpgKeyID is not None:
cmd += ' -Dgpg.key=%s prepare-release' % gpgKeyID
else:
cmd += ' prepare-release-no-sign'
if gpgPassword is not None:
runAndSendGPGPassword(cmd, gpgPassword)
else:
run(cmd)
print(' done!')
print()
return rev
def push(version, root, rev, rcNum, username):
print('Push...')
dir = 'lucene-solr-%s-RC%d-rev%s' % (version, rcNum, rev)
s = os.popen('ssh %s@people.apache.org "ls -ld public_html/staging_area/%s" 2>&1' % (username, dir)).read()
if 'no such file or directory' not in s.lower():
print(' Remove old dir...')
run('ssh %s@people.apache.org "chmod -R u+rwX public_html/staging_area/%s; rm -rf public_html/staging_area/%s"' %
(username, dir, dir))
run('ssh %s@people.apache.org "mkdir -p public_html/staging_area/%s/lucene public_html/staging_area/%s/solr"' % \
(username, dir, dir))
print(' Lucene')
os.chdir('%s/lucene/dist' % root)
print(' zip...')
if os.path.exists('lucene.tar.bz2'):
os.remove('lucene.tar.bz2')
run('tar cjf lucene.tar.bz2 *')
print(' copy...')
run('scp lucene.tar.bz2 %s@people.apache.org:public_html/staging_area/%s/lucene' % (username, dir))
print(' unzip...')
run('ssh %s@people.apache.org "cd public_html/staging_area/%s/lucene; tar xjf lucene.tar.bz2; rm -f lucene.tar.bz2"' % (username, dir))
os.remove('lucene.tar.bz2')
print(' Solr')
os.chdir('%s/solr/package' % root)
print(' zip...')
if os.path.exists('solr.tar.bz2'):
os.remove('solr.tar.bz2')
run('tar cjf solr.tar.bz2 *')
print(' copy...')
run('scp solr.tar.bz2 %s@people.apache.org:public_html/staging_area/%s/solr' % (username, dir))
print(' unzip...')
run('ssh %s@people.apache.org "cd public_html/staging_area/%s/solr; tar xjf solr.tar.bz2; rm -f solr.tar.bz2"' % (username, dir))
os.remove('solr.tar.bz2')
print(' chmod...')
run('ssh %s@people.apache.org "chmod -R a+rX-w public_html/staging_area/%s"' % (username, dir))
print(' done!')
url = 'http://people.apache.org/~%s/staging_area/%s' % (username, dir)
return url
def pushLocal(version, root, rev, rcNum, localDir):
print('Push local [%s]...' % localDir)
os.makedirs(localDir)
dir = 'lucene-solr-%s-RC%d-rev%s' % (version, rcNum, rev)
os.makedirs('%s/%s/lucene' % (localDir, dir))
os.makedirs('%s/%s/solr' % (localDir, dir))
print(' Lucene')
os.chdir('%s/lucene/dist' % root)
print(' zip...')
if os.path.exists('lucene.tar.bz2'):
os.remove('lucene.tar.bz2')
run('tar cjf lucene.tar.bz2 *')
os.chdir('%s/%s/lucene' % (localDir, dir))
print(' unzip...')
run('tar xjf "%s/lucene/dist/lucene.tar.bz2"' % root)
os.remove('%s/lucene/dist/lucene.tar.bz2' % root)
print(' Solr')
os.chdir('%s/solr/package' % root)
print(' zip...')
if os.path.exists('solr.tar.bz2'):
os.remove('solr.tar.bz2')
run('tar cjf solr.tar.bz2 *')
print(' unzip...')
os.chdir('%s/%s/solr' % (localDir, dir))
run('tar xjf "%s/solr/package/solr.tar.bz2"' % root)
os.remove('%s/solr/package/solr.tar.bz2' % root)
print(' KEYS')
run('wget http://people.apache.org/keys/group/lucene.asc')
os.rename('lucene.asc', 'KEYS')
run('chmod a+r-w KEYS')
run('cp KEYS ../lucene')
print(' chmod...')
os.chdir('..')
run('chmod -R a+rX-w .')
print(' done!')
return 'file://%s/%s' % (os.path.abspath(localDir), dir)
def read_version(path):
version_props_file = os.path.join(path, 'lucene', 'version.properties')
return re.search(r'version\.base=(.*)', open(version_props_file).read()).group(1)
def parse_config():
epilogue = textwrap.dedent('''
Example usage for a Release Manager:
python3.2 -u buildAndPushRelease.py --push-remote mikemccand --sign 6E68DA61 --rc-num 1 --version 4.7.0 /path/to/lucene_solr_4_7
''')
description = 'Utility to build, push, and test a release.'
parser = argparse.ArgumentParser(description=description, epilog=epilogue,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--no-prepare', dest='prepare', default=True, action='store_false',
help='Use the already built release in the provided checkout')
parser.add_argument('--push-remote', metavar='USERNAME',
help='Push the release to people.apache.org for the given user')
parser.add_argument('--push-local', metavar='PATH',
help='Push the release to the local path')
parser.add_argument('--sign', metavar='KEYID',
help='Sign the release with the given gpg key')
parser.add_argument('--rc-num', metavar='NUM', type=int, default=1,
help='Release Candidate number, required')
parser.add_argument('--smoke-test', metavar='PATH',
help='Run the smoker tester on the release in the given directory')
parser.add_argument('root', metavar='checkout_path',
help='Root of SVN checkout for lucene-solr')
config = parser.parse_args()
if config.push_remote is not None and config.push_local is not None:
parser.error('Cannot specify --push-remote and --push-local together')
if not config.prepare and config.sign:
parser.error('Cannot sign already built release')
if config.push_local is not None and os.path.exists(config.push_local):
parser.error('Cannot push to local path that already exists')
if config.rc_num <= 0:
parser.error('Release Candidate number must be a positive integer')
if not os.path.isdir(config.root):
# TODO: add additional svn check to ensure dir is a real lucene-solr checkout
parser.error('Root path is not a valid lucene-solr checkout')
config.version = read_version(config.root)
print('Building version: %s' % config.version)
if config.sign:
sys.stdout.flush()
import getpass
config.key_id = config.sign
config.key_password = getpass.getpass('Enter GPG keystore password: ')
else:
config.gpg_password = None
return config
def main():
c = parse_config()
if c.prepare:
rev = prepare(c.root, c.version, c.key_id, c.key_password)
else:
os.chdir(root)
rev = open('rev.txt', encoding='UTF-8').read()
if c.push_remote:
url = push(c.version, c.root, rev, c.rc_num, c.push_remote)
elif c.push_local:
url = pushLocal(c.version, c.root, rev, c.rc_num, c.push_local)
else:
url = None
if url is not None:
print(' URL: %s' % url)
print('Next set the PYTHON_EXEC env var and you can run the smoker tester:')
p = re.compile("(.*)\/")
m = p.match(sys.argv[0])
print(' $PYTHON_EXEC %ssmokeTestRelease.py %s' % (m.group(), url))
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('Keyboard interrupt...exiting')
|
|
#!/usr/bin/env python
"""Defines the main menu module for a program that inputs a MemberHub directory dump,
a school roster, and a hub map to perform analyses on the MemberHub directory.
"""
import directory_tools
import roster_tools
import hub_map_tools
import import_file_tools
import roster
import actions
STUDENT_INDICATOR = "+SA"
STDOUT_SEPERATOR = "-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-"
def PrintToScreenFileOrNeither(prompt):
"""menu.PrintToScreenFileOrNeither
INPUTS:
- prompt -- string prompting the user to answer
OUTPUTS:
- 'y', 'f', or '' -- will ask over and over until one of these inputs is given
ASSUMPTIONS:
- None.
"""
answer = " "
while answer not in (None, '', 'y', 'Y', 'f', 'F'):
answer = input(prompt + "? ('y' for 'screen', 'f' for file, <enter> for neither) ")
return answer.lower()
def PrintToScreenOrNot():
"""menu.PrintToScreenOrNot
INPUTS:
- none
OUTPUTS:
- 'y' or '' -- will ask over and over until one of these inputs is given
ASSUMPTIONS:
- None.
"""
answer = " "
while answer not in (None, '', 'y', 'Y'):
answer = input("Print list to screen? (<enter> for 'no' and 'y' for 'to screen') ")
return answer.lower()
def FindMissingEmail(arg_list):
"""menu.FindMissingEmail
INPUTS:
- directory -- list containing the MemberHub directory families
- map_d -- dictionary mapping teacher names to hub IDs
OUTPUTS:
Prints to standard output statistics about families with and without
email addresses, and give the option to display the lists.
ASSUMPTIONS:
None.
"""
##
## perform the action
total_adult_count, no_email_person, no_email_family, partial_family, map_d = \
actions.FindMissingEmail(arg_list)
##
## extract copies of the arguments so they are not accidentally modified
directory = arg_list[0].copy()
hub_map_d = arg_list[1].copy()
##
## print some of the counts to the screen for the user to review
print(STDOUT_SEPERATOR)
print("The directory has %d families and %d adults." % \
(len(directory), total_adult_count))
print("%d out of the %d adults have no email address." % \
(len(no_email_person), total_adult_count))
print("%d out of %d families have no adult with an email address." % \
(len(no_email_family), len(directory)))
print("%d out of %d families have some adults without and some with email addresses." % \
(len(partial_family), len(directory)))
print("%d out of %d families have all adults with email addresses." % \
((len(directory)-len(no_email_family)-len(partial_family)), len(directory)))
print("%d out of %d families have at least one adult with an email address." % \
((len(directory)-len(no_email_family)), len(directory)))
##
## create a list of people in each hub who do not have an email
action = PrintToScreenFileOrNeither("Print list of adults without email")
if action == 'y':
for this_list in map_d.keys():
print('Hub ID = ', this_list)
for this_person in map_d[this_list]:
this_person.PrintWithHubs()
print('\n-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n')
elif action == 'f':
import_file_tools.CreateByHubFile(map_d, hub_map_d, "emailless_by_hub")
def FindOrphans(directory):
"""menu.FindOrphans
INPUTS:
- directory -- list containing the MemberHub directory families
OUTPUTS:
Prints to standard output the children in the directory who do not have an
parent associated with their entry.
ASSUMPTIONS:
None.
"""
##
## perform the action
orphan_families = actions.FindOrphans(directory)
##
## show user how many were found
print("Found %d families without adults out of %d families" % \
(len(orphan_families), len(directory)))
##
## if any orphans were found, prompt user whether to show on screen
if len(orphan_families) > 0:
if PrintToScreenOrNot() == 'y':
for entry_family in orphan_families:
entry_family.Print()
print(STDOUT_SEPERATOR)
def FindChildless(directory):
"""menu.FindChildless
INPUTS:
- directory -- list containing the MemberHub directory families
OUTPUTS:
Prints to standard output the adults in the directory who do not have a
child associated with their entry.
ASSUMPTIONS:
None.
"""
##
## perform the action
childless_families = actions.FindChildless(directory)
##
## show user how many were found
print("Found %d families without children out of %d families" % \
(len(childless_families), len(directory)))
##
## if any orphans were found, prompt user whether to show on screen
if len(childless_families) > 0:
if PrintToScreenOrNot() == 'y':
for entry_family in childless_families:
entry_family.Print()
print(STDOUT_SEPERATOR)
def FindHubless(arg_list):
"""menu.FindHubless
INPUTS:
- directory -- list containing the MemberHub directory families
- map_d -- dictionary mapping teacher names to hub IDs
OUTPUTS:
Prints to standard output the names in the directory who are not members of
at least one classroom hub.
ASSUMPTIONS:
None.
"""
##
## perform the action
hubless_adults, hubless_children = actions.FindHubless(arg_list)
##
## show user number of adults not in hubs, and prompt whether to show on screen
print("Found %d adults who are not in at least one classroom hub." % len(hubless_adults))
if len(hubless_adults) > 0:
if PrintToScreenOrNot() == "y":
for this_person in hubless_adults:
print("%s %s <%s>" % (this_person.first_name, this_person.last_name, this_person.hubs))
##
## show user number of children not in hubs, and prompt whether to show on screen
print("Found %d children who are not in a classroom hub." % len(hubless_children))
if len(hubless_children) > 0:
if PrintToScreenOrNot() == "y":
for this_person in hubless_children:
print("%s %s <%s>" % (this_person.first_name, this_person.last_name, this_person.hubs))
def FindChildrenInMultipleClassroom(arg_list):
"""menu.FindChildrenInMultipleClassroom
INPUTS:
- directory -- list containing the MemberHub directory families
- map_d -- dictionary mapping teacher names to hub IDs
OUTPUTS:
Prints to standard output the students in the directory who are members of
more than one classroom hub.
ASSUMPTIONS:
None.
"""
##
## perform the action
hubful_children = actions.FindChildrenInMultipleClassroom(arg_list)
##
## show user the number of students who are in multiple classroom hubs,
## and prompt whether to show them on the screen.
print("Found %d students who are not in more than one classroom hub." % len(hubful_children))
if len(hubful_children) > 0:
if PrintToScreenOrNot() == "y":
for this_person in hubful_children:
print("%s %s <%s>" % (this_person.first_name, this_person.last_name, this_person.hubs))
def ListShowAndAct(this_list, statement, file_name, hide_email=True):
print(len(this_list), statement)
if len(this_list) > 0:
action = PrintToScreenFileOrNeither('Print list to screen or file')
if action == "y":
for this_person in this_list:
if hide_email:
this_person.Print()
else:
this_person.PrintWithEmail()
elif action == "f":
import_file_tools.CreateFileFromPeople(this_list, file_name)
def FindAdultsWithoutAccounts(arg_list):
"""menu.FindAdultsWithoutAccounts
INPUTS:
- directory -- list of families from a MemberHub directory dump.
OUTPUTS:
Provides the option to write to standard output or to a file the list of adults who
do not have accounts, separated by whether their profile has an email address or not.
"""
##
## perform the action
teacher_without_email, no_account_without_email, teacher_with_no_account, no_account_with_email, without_email_map, with_email_map = \
actions.FindAdultsWithoutAccounts(arg_list)
##
## show the user the number of adults with neither account nor email, and prompt
## whether to print to the screen or save to a file.
ListShowAndAct(this_list = teacher_without_email,
statement = "people found who work for the school without accounts or emails.",
file_name = "teachers_without_email")
ListShowAndAct(this_list = no_account_without_email,
statement = "adults found without accounts or emails.",
file_name = "no_account_without_email")
##
## show the user the number of adults with no account but with email, and prompt
## whether to print to the screen or save to a file.
ListShowAndAct(this_list = teacher_with_no_account,
statement = "people found who work for the school without accounts, but with emails.",
file_name = "teachers_without_account",
hide_email = False)
ListShowAndAct(this_list = no_account_with_email,
statement = "adults found without accounts, but with emails.",
file_name = "no_account_with_email",
hide_email = False)
def PrintNotInDirectory(arg_list):
"""menu.PrintNotInDirectory
INPUTS:
- directory -- list containing the MemberHub directory families
- roster -- list containing the school roster families
OUTPUTS:
- entriless -- returns list of families in the school roster that could not
be found in the directory
Also prints to standard output the names in the school roster who are not in the
MemberHub directory.
ASSUMPTIONS:
None.
"""
##
## perform the action
entriless = actions.PrintNotInDirectory(arg_list)
##
## tell the user how many entriless families were found
print("Found %d people on the roster who were not in the directory" % len(entriless))
if len(entriless) == 0:
return
##
## ask the user how to output the list of entriless families
action = PrintToScreenFileOrNeither('Print list to screen or file')
##
## output to the screen
if action == 'y':
for entry in entriless:
print(STDOUT_SEPERATOR)
print("Did not find this family from the roster in the directory: ")
entry.Print()
##
## output to a file
elif action == 'f':
import_file_tools.CreateFileFromFamily(entriless)
def FindParentChildrenHubMismatches(directory):
"""menu.FindParentChildrenHubMismatches
INPUTS:
- directory -- list containing the MemberHub directory families
OUTPUTS:
- at user prompt, prints to standard output the family members and their
hubs that have adults who are not members of all their children's hubs
ASSUMPTIONS:
- None.
"""
##
## perform the action
mismatches = actions.FindParentChildrenHubMismatches(directory)
##
## show user the number of families with adults who are not in all their
## children's hubs, and prompt whether to show them on the screen.
print("Found %d families that have at least one adult who is not in all thier children's classroom hubs." % \
len(mismatches))
if len(mismatches) > 0:
if PrintToScreenOrNot() == "y":
for this_family in mismatches:
this_family.PrintWithHubs()
print(STDOUT_SEPERATOR)
def FindUnsedErrata(arg_list):
"""menu.FindUnsedErrata
INPUTS:
- arg_list - menu requires the function to have an input to match template, but this
is not used
OUTPUTS:
Prints the roster errata entries that are no longer found in the roster, and can be
removed.
ASSUMPTIONS:
- none
"""
##
## perform the action
unused_errata, all_errata = actions.FindUnsedErrata()
##
## show user the number of families with adults who are not in all their
## children's hubs, and prompt whether to show them on the screen.
print("Found %d unused errata." % len(unused_errata))
if len(unused_errata) > 0:
if PrintToScreenOrNot() == "y":
for entry in unused_errata:
print(entry, '|', all_errata[entry])
print(STDOUT_SEPERATOR)
def MakePrompt(choices):
guts = '\n'.join(['(%s) - %s' % (choice, choices[choice]['Description'])
for choice in sorted(choices.keys())])
return '\n' + STDOUT_SEPERATOR + '\nChoose:\n' + guts + '\nOr press <enter> to quit. Your selection --> '
def RunMenu(master_directory, master_roster, master_map):
"""Runs the user interface for dictionary manipulation."""
##
## The choices dictionary has function names for values.
choices = {'a': {'Description':'Find Missing Email',
'Function' :FindMissingEmail,
'Arg' :[master_directory, master_map]},
'b': {'Description':'Find Orphans',
'Function' :FindOrphans,
'Arg' :master_directory},
'c': {'Description':'Find Childless',
'Function' :FindChildless,
'Arg' :master_directory},
'd': {'Description':'Find Not In Classroom Hub',
'Function' :FindHubless,
'Arg' :[master_directory, master_map]},
'e': {'Description':'Find Adults without Accounts',
'Function' :FindAdultsWithoutAccounts,
'Arg' :[master_directory, master_map]},
'f': {'Description':'Find Not in Directory',
'Function' :PrintNotInDirectory,
'Arg' :[master_directory, master_roster]},
'g': {'Description':'Find Adults/Children Hub Mismatches',
'Function' :FindParentChildrenHubMismatches,
'Arg' :master_directory},
'h': {'Description':'Find Unused Errata',
'Function' :FindUnsedErrata,
'Arg' :'Unused String'},
'i': {'Description':'Find students who are in multipe classroom hubs',
'Function' :FindChildrenInMultipleClassroom,
'Arg' :[master_directory, master_map]}}
prompt = MakePrompt(choices)
##
## repeat until exit condition breaks the loop
while True:
##
## get the user's selection
this_choice = input(prompt).lower()
##
## if the selection is empty (<enter>), the break out of the loop and
## terminate the program
if not this_choice:
break
##
## otherwise, perform the selected action if the selection is recognized
elif this_choice in choices.keys():
# The appropriate function is called
# using the dictionary value for the name
# of the function.
choices[this_choice]['Function']( choices[this_choice]['Arg'] )
##
## the selection was not recognized, so tell the user to retry
else:
print("%s is not an acceptible choice. Try again." % this_choice)
def main():
master_map = hub_map_tools.ReadHubMap()
print(STDOUT_SEPERATOR)
master_directory = directory_tools.ReadDirectory(master_map)
print(STDOUT_SEPERATOR)
master_roster = roster_tools.ReadRoster(master_map)
RunMenu(master_directory, master_roster, master_map)
if __name__ == '__main__':
main()
|
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import json
import os
import sys
import shutil
import subprocess
import tempfile
CHROMIUM_PATH = os.path.join(os.path.dirname(__file__), '..', '..', '..', '..')
TOOLS_PERF_PATH = os.path.join(CHROMIUM_PATH, 'tools', 'perf')
sys.path.insert(1, TOOLS_PERF_PATH)
from core.external_modules import pandas
RUNS_USED_FOR_LIMIT_UPDATE = 30
CHANGE_PERCENTAGE_LIMIT = 0.01
SWARMING_PATH = os.path.join(CHROMIUM_PATH, 'tools', 'luci-go', 'swarming')
UPPER_LIMITS_DATA_DIR = os.path.join(
CHROMIUM_PATH, 'testing', 'scripts', 'representative_perf_test_data')
def FetchItemIds(tags, limit):
"""Fetches the item id of tasks described by the tags.
Args:
tags: The tags which describe the task such as OS and buildername.
limit: The number of runs to look at.
Returns:
A list containing the item Id of the tasks.
"""
query = [
SWARMING_PATH, 'tasks', '-S', 'chromium-swarm.appspot.com', '-limit',
str(limit), '-state=COMPLETED', '-field', 'items(task_id)', '-tag',
'master:chromium.gpu.fyi', '-tag', 'os:{os}'.format(**tags), '-tag',
'name:rendering_representative_perf_tests', '-tag',
'buildername:{buildername}'.format(**tags)
]
return json.loads(subprocess.check_output(query))
def FetchItemData(task_id, benchmark, index, temp_dir):
"""Fetches the performance values (AVG & CI ranges) of tasks.
Args:
task_id: The list of item Ids to fetch dat for.
benchmark: The benchmark these task are on (desktop/mobile).
index: The index field of the data_frame
temp_dir: The temp directory to store task data in.
Returns:
A data_frame containing the averages and confidence interval ranges.
"""
query = [
SWARMING_PATH, 'collect', '-S', 'chromium-swarm.appspot.com',
'-output-dir', temp_dir, '-perf', task_id
]
try:
subprocess.check_output(query)
except Exception as e:
print(e)
result_file_path = os.path.join(temp_dir, task_id, 'rendering.' + benchmark,
'perf_results.csv')
try:
df = pandas.read_csv(result_file_path)
df = df.loc[df['name'] == 'frame_times']
df = df[['stories', 'avg', 'ci_095']]
df['index'] = index
return df
except:
print("CSV results were not produced!")
def GetPercentileValues(benchmark, tags, limit, percentile):
"""Get the percentile value of recent runs described by given tags.
Given the tags, benchmark this function fetches the data of last {limit}
runs, and find the percentile value for each story.
Args:
benchmark: The benchmark these task are on (desktop/mobile).
tags: The tags which describe the tasks such as OS and buildername.
limit: The number of runs to look at.
percentile: the percentile to return.
Returns:
A dictionary with averages and confidence interval ranges calculated
from the percentile of recent runs.
"""
items = []
for tag_set in tags:
items.extend(FetchItemIds(tag_set, limit))
dfs = []
try:
temp_dir = tempfile.mkdtemp('perf_csvs')
for idx, item in enumerate(items):
dfs.append(FetchItemData(item['task_id'], benchmark, idx, temp_dir))
idx += 1
finally:
shutil.rmtree(temp_dir)
data_frame = pandas.concat(dfs, ignore_index=True)
if not data_frame.empty:
avg_df = data_frame.pivot(index='stories', columns='index', values='avg')
upper_limit = avg_df.quantile(percentile, axis = 1)
ci_df = data_frame.pivot(index='stories', columns='index', values='ci_095')
upper_limit_ci = ci_df.quantile(percentile, axis = 1)
results = {}
for index in avg_df.index:
results[index] = {
'avg': round(upper_limit[index], 3),
'ci_095': round(upper_limit_ci[index], 3)
}
return results
def MeasureNewUpperLimit(old_value, new_value, att_name, max_change):
# There has been an improvement.
if new_value < old_value:
# Decrease the limit gradually in case of improvements.
new_value = (old_value + new_value) / 2.0
change_pct = 0.0
if old_value > 0:
change_pct = (new_value - old_value) / old_value
print(
' {}:\t\t {} -> {} \t({:.2f}%)'.format(
att_name, old_value, new_value, change_pct * 100))
if new_value < 0.01:
print('WARNING: New selected value is close to 0.')
return (
round(new_value, 3),
max(max_change, abs(change_pct))
)
def RecalculateUpperLimits(data_point_count):
"""Recalculates the upper limits using the data of recent runs.
This method replaces the existing JSON file which contains the upper limits
used by representative perf tests if the changes of upper limits are
significant.
Args:
data_point_count: The number of runs to use for recalculation.
"""
with open(os.path.join(UPPER_LIMITS_DATA_DIR,
'platform_specific_tags.json')) as tags_data:
platform_specific_tags = json.load(tags_data)
with open(
os.path.join(
UPPER_LIMITS_DATA_DIR,
'representatives_frame_times_upper_limit.json')) as current_data:
current_upper_limits = json.load(current_data)
max_change = 0.0
results = {}
for platform in platform_specific_tags:
platform_data = platform_specific_tags[platform]
print('\n- Processing data ({})'.format(platform))
results[platform] = GetPercentileValues(
platform_data['benchmark'], platform_data['tags'],
data_point_count, 0.95)
# Loop over results and adjust base on current values.
for story in results[platform]:
if story in current_upper_limits[platform]:
print(story, ':')
new_avg, max_change = MeasureNewUpperLimit(
current_upper_limits[platform][story]['avg'],
results[platform][story]['avg'], 'AVG', max_change)
results[platform][story]['avg'] = new_avg
new_ci, max_change = MeasureNewUpperLimit(
current_upper_limits[platform][story]['ci_095'],
results[platform][story]['ci_095'], 'CI', max_change)
results[platform][story]['ci_095'] = new_ci
if current_upper_limits[platform][story].get('control', False):
results[platform][story]['control'] = True
if max_change > CHANGE_PERCENTAGE_LIMIT:
with open(
os.path.join(
UPPER_LIMITS_DATA_DIR,
'representatives_frame_times_upper_limit.json'
), 'w') as outfile:
json.dump(results, outfile, separators=(',', ': '), indent=2)
print(
'Upper limits were updated on '
'representatives_frame_times_upper_limit.json')
else:
print('Changes are small, no need for new limits')
if __name__ == '__main__':
sys.exit(RecalculateUpperLimits(RUNS_USED_FOR_LIMIT_UPDATE))
|
|
"""
The MIT License (MIT)
Copyright (c) 2016 Stratos Goudelis
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import datetime
import hashlib
import signal
import time
import mybitbank.libs.jsonrpc
from httplib import CannotSendRequest
from django.utils.timezone import utc
from mybitbank.libs import events
from mybitbank.libs import misc
from mybitbank.libs.bitcoinrpc.authproxy import JSONRPCException
from mybitbank.libs.jsonrpc import ServiceProxy
#from mybitbank.libs.entities.cacher import Cacher
measure_time = False
def timeit(method):
if measure_time is not True:
return method
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
print '%r() (%r, %r) %2.2f sec' % (method.__name__, args, kw, te - ts)
return result
return timed
class ExecuteCommandTimeoutException(Exception):
'''
WIP! Do not use!
'''
pass
class Connector(object):
# signal timeout
signal_timeout = 3
# how long to disable a failing service
disable_time = 10
# currency providers config
config = {}
# ServiceProxies objects
services = {}
# errors
errors = []
# alerts shown on the UI as alerts
alerts = {}
# the WSGIRequest object we are serving
request = None
# cache object
cache = []
@timeit
def __init__(self):
'''
Constructor, load config
'''
mybitbank.libs.jsonrpc.HTTP_TIMEOUT = 2
try:
import walletconfig
currency_configs = walletconfig.config
except (AttributeError, ImportError) as e:
self.errors.append({'message': 'Error occurred while loading the wallet configuration file (%s)' % (e), 'when': datetime.datetime.utcnow().replace(tzinfo=utc)})
for currency_config in currency_configs:
if currency_config.get('enabled', True):
self.config[currency_config['id']] = currency_config
self.config[currency_config['id']]['enabled'] = True
self.services[currency_config['id']] = ServiceProxy("http://%s:%s@%s:%s" %
(currency_config['rpcusername'],
currency_config['rpcpassword'],
currency_config['rpchost'],
currency_config['rpcport']))
def executeCommand(self, provider_id, command, *args):
'''
WIP! Do not use. This does not work when outside main thread!
Call the command from the currency provider (xxxcoinds) with timeout signals
since the xxxcoinds may accept the connection but will not respond because they are busy.
They can be busy for many reasons. Some calls block the RCP threads or they could be downloading
blocks. This make the httplib timeout useless. Using signals we can timeout any function regardless
of the reason it is delaying.
'''
print "provider_id: %s" % provider_id
print "command: %s" % command
print args
def timeout_handler(signum, frame):
raise ExecuteCommandTimeoutException()
old_handler = signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(self.signal_timeout)
try:
rpc_method = getattr(self.services[provider_id], command)
rpc_response = rpc_method(*args)
print rpc_response
except ExecuteCommandTimeoutException:
print "timeout"
self.errors.append({'message': 'Signal timeout occurred while doing %s (provider id: %s)' % (command, provider_id), 'when': datetime.datetime.utcnow().replace(tzinfo=utc)})
self.removeCurrencyService(provider_id)
return None
finally:
print "finally"
signal.signal(signal.SIGALRM, old_handler)
print "returning"
signal.alarm(0)
return rpc_response
def addAlert(self, category, alert):
'''
Add an alert for the UI
'''
if self.alerts.get(category, True) is True:
self.alerts[category] = []
self.alerts[category].append(alert)
return True
@timeit
def removeCurrencyService(self, provider_id):
'''
Remove the ServiceProxy object from the list of service in case of a xxxcoind daemon not responding in time
'''
if self.config.get(provider_id, False):
currency_provider_config = self.config.get(provider_id, {})
if currency_provider_config.get('enabled', False) is True:
self.addAlert('currencybackend', {'provider_id': provider_id, 'message': 'Currency service provider %s named %s is disabled for %s seconds due an error communicating.' % (provider_id, currency_provider_config['name'], self.disable_time), 'when': datetime.datetime.utcnow().replace(tzinfo=utc)})
currency_provider_config['enabled'] = datetime.datetime.utcnow().replace(tzinfo=utc) + datetime.timedelta(0, self.disable_time)
events.addEvent(self.request, "Currency service %s has being disabled for %s seconds due to error communicating" % (currency_provider_config['currency'], self.disable_time), 'error')
if self.services.get(provider_id, None):
del self.services[provider_id]
def longNumber(self, x):
'''
Convert number coming from the JSON-RPC to a human readable format with 8 decimal
'''
if type(x) is str:
return x
else:
return "{:.8f}".format(x)
def getParamHash(self, param=""):
'''
This function takes a string and calculates a sha224 hash out of it.
It is used to hash the input parameters of functions/method in order to
uniquely identify a cached result based only on the input parameters of
the function/method call.
'''
cache_hash = hashlib.sha224(param).hexdigest()
return cache_hash
@timeit
def getInfo(self, provider_id):
'''
Get xxxcoind info
'''
if provider_id not in self.services.keys():
return {'message': 'Non-existing currency provider id %s' % provider_id, 'code':-100}
peerinfo = {}
try:
if self.config.get(provider_id, False) and self.config[provider_id]['enabled'] is True:
peerinfo = self.services[provider_id].getinfo()
except (JSONRPCException, Exception), e:
self.errors.append({'message': 'Error occurred while doing getinfo (provider id: %s, error: %s)' % (provider_id, e), 'when': datetime.datetime.utcnow().replace(tzinfo=utc)})
self.removeCurrencyService(provider_id)
return peerinfo
@timeit
def getPeerInfo(self, provider_id):
'''
Get peer info from the connector (xxxcoind)
'''
peers = []
try:
if self.config.get(provider_id, False) and self.config[provider_id]['enabled'] is True:
peers = self.services[provider_id].getpeerinfo()
except JSONRPCException:
# in case coind not support getpeerinfo command
return {'error'}
except Exception, e:
# in case of an error, store the error, disabled the service and move on
self.errors.append({'message': 'Error occurred while doing getpeerinfo (provider id: %s, error: %s)' % (provider_id, e), 'when': datetime.datetime.utcnow().replace(tzinfo=utc)})
self.removeCurrencyService(provider_id)
return peers
@timeit
def listAccounts(self, gethidden=False, getarchived=False, selected_provider_id=-1):
'''
Get a list of accounts. This method also supports filtering, fetches address for each account etc.
'''
# get data from the connector (xxxcoind)
fresh_accounts = {}
if selected_provider_id > 0:
provider_ids = [int(selected_provider_id)]
else:
provider_ids = self.config.keys()
for provider_id in provider_ids:
if self.config.get(provider_id, False) and self.config[provider_id]['enabled'] is True:
try:
fresh_accounts[provider_id] = self.services[provider_id].listaccounts()
for fresh_account_name, fresh_account_balance in fresh_accounts[provider_id].items():
fresh_accounts[provider_id][fresh_account_name] = self.longNumber(fresh_account_balance)
except (Exception, CannotSendRequest) as e:
# in case of an error, store the error, remove the service and move on
self.errors.append({'message': 'Error occurred while doing listaccounts (provider id: %s, error: %s)' % (provider_id, e), 'when': datetime.datetime.utcnow().replace(tzinfo=utc)})
self.removeCurrencyService(provider_id)
return fresh_accounts
@timeit
def getAddressesByAccount(self, account, provider_id):
'''
Get the address of an account name
'''
if type(account) in [str, unicode]:
name = account
elif account.get('name', False):
name = account['name']
else:
return []
addresses = []
if self.config.get(provider_id, False) and self.config[provider_id]['enabled'] is True:
try:
addresses = self.services[provider_id].getaddressesbyaccount(name)
except Exception, e:
self.errors.append({'message': 'Error occurred while doing getaddressesbyaccount (provider id: %s, error: %s)' % (provider_id, e), 'when': datetime.datetime.utcnow().replace(tzinfo=utc)})
self.removeCurrencyService(provider_id)
return addresses
@timeit
def listTransactionsByAccount(self, account_name, provider_id, limit=100000, start=0):
'''
Get a list of transactions by account name and provider_id
'''
transactions = []
if self.config.get(provider_id, False) and self.config[provider_id]['enabled'] is True:
try:
transactions = self.services[provider_id].listtransactions(account_name, limit, start)
except Exception as e:
self.errors.append({'message': 'Error occurred while doing listtransactions (provider_id: %s, error: %s)' % (provider_id, e), 'when': datetime.datetime.utcnow().replace(tzinfo=utc)})
self.removeCurrencyService(provider_id)
return transactions
@timeit
def getNewAddress(self, provider_id, account_name):
'''
Create a new address
'''
new_address = None
if provider_id not in self.config.keys():
return False
if self.config.get(provider_id, False) and self.config[provider_id]['enabled'] is True:
if self.services.get(provider_id, False) and type(account_name) in [str, unicode]:
new_address = self.services[provider_id].getnewaddress(account_name)
return new_address
else:
return False
@timeit
def getBalance(self, provider_id=0, account_name="*"):
'''
Get balance for each provider
'''
balances = {}
if self.config.get(provider_id, False) and self.config[provider_id]['enabled'] is True:
try:
balances[provider_id] = self.services[provider_id].getbalance(account_name)
except Exception as e:
# in case of an Exception continue on to the next currency service (xxxcoind)
self.errors.append({'message': 'Error occurred while doing getbalance (provider id: %s, error: %s)' % (provider_id, e), 'when': datetime.datetime.utcnow().replace(tzinfo=utc)})
self.removeCurrencyService(provider_id)
return balances
@timeit
def moveAmount(self, from_account, to_account, provider_id, amount, minconf=1, comment=""):
'''
Move amount from local to local accounts
Note: from_account my be an empty string
'''
if provider_id not in self.services.keys():
return {'message': 'Non-existing currency provider id %s' % provider_id, 'code':-100}
if self.config[provider_id]['enabled'] is not True:
return {'message': 'Currency service with id %s disabled for now' % provider_id, 'code':-150}
if not misc.isFloat(amount) or type(amount) is bool:
return {'message': 'Amount is not a number', 'code':-102}
if type(comment) not in [str, unicode]:
return {'message': 'Comment is not valid', 'code':-104}
try:
minconf = int(minconf)
except:
return {'message': 'Invalid minconf value', 'code':-105}
account_list = self.services[provider_id].listaccounts()
account_names = []
for account_name in account_list:
account_names.append(account_name)
if from_account in account_names and to_account in account_names:
# both accounts have being found, perform the move
try:
reply = self.services[provider_id].move(from_account, to_account, amount, minconf, comment)
except JSONRPCException, e:
return e.error
except ValueError, e:
return {'message': e, 'code':-1}
return reply
else:
# account not found
return {'message': 'source or destination account not found', 'code':-103}
@timeit
def sendFrom(self, from_account, to_address, amount, provider_id, minconf=1, comment="", comment_to=""):
if type(from_account) not in [str, unicode]:
return {'message': 'Invalid input from account', 'code':-156}
if not to_address or not provider_id:
return {'message': 'Invalid input to account or address', 'code':-101}
if provider_id not in self.services.keys():
return {'message': 'Non-existing currency provider id %s' % provider_id, 'code':-100}
if not misc.isFloat(amount) or type(amount) is bool:
return {'message': 'Amount is not a number', 'code':-102}
if type(comment) not in [str, unicode] or type(comment_to) not in [str, unicode]:
return {'message': 'Comment is not valid', 'code':-104}
account_list = self.services[provider_id].listaccounts()
account_names = []
for account_name in account_list:
account_names.append(account_name)
if from_account in account_names:
# account given exists, continue
try:
reply = self.services[provider_id].sendfrom(from_account, to_address, amount, minconf, comment, comment_to)
except JSONRPCException, e:
return e.error
except ValueError, e:
return {'message': e, 'code':-1}
except Exception, e:
return e
return reply
else:
# account not found
return {'message': 'Source account not found', 'code':-106}
@timeit
def getRawTransaction(self, txid, provider_id):
'''
Return transaction details, like sender address
'''
if provider_id not in self.config.keys():
return {'message': 'Non-existing currency provider id %s' % provider_id, 'code':-121}
if self.config[provider_id]['enabled'] is not True:
return {'message': 'Currency service %s disabled for now' % provider_id, 'code':-150}
if type(txid) not in [str, unicode] or not len(txid):
return {'message': 'Transaction ID is not valid', 'code':-127}
transaction_details = None
try:
if self.config.get(provider_id, False) and self.config[provider_id]['enabled'] is True:
transaction_details = self.services[provider_id].getrawtransaction(txid, 1)
except JSONRPCException:
return {}
except Exception:
return {}
return transaction_details
@timeit
def decodeRawTransaction(self, transaction, provider_id):
'''
Decode raw transaction
'''
if self.config.get(provider_id, False) and self.config[provider_id]['enabled'] is True:
return self.services[provider_id].decoderawtransaction(transaction)
@timeit
def getTransaction(self, txid, provider_id):
'''
Return a transaction
'''
if provider_id not in self.config.keys():
return {'message': 'Non-existing currency provider id %s' % provider_id, 'code':-121}
if self.config[provider_id]['enabled'] is not True:
return {'message': 'Currency service provider id %s disabled for now' % provider_id, 'code':-150}
if type(txid) not in [str, unicode] or not len(txid):
return {'message': 'Transaction ID is not valid', 'code':-127}
transaction_details = None
try:
if self.config.get(provider_id, False) and self.config[provider_id]['enabled'] is True:
transaction_details = self.services[provider_id].gettransaction(txid)
except JSONRPCException:
return {}
except Exception:
return {}
return transaction_details
@timeit
def walletPassphrase(self, passphrase, provider_id):
'''
Unlock the wallet
'''
if type(passphrase) not in [str, unicode]:
return {'message': 'Incorrect data type for passphrase', 'code':-110}
if len(passphrase) < 1:
return {'message': 'No passphrase given', 'code':-111}
if provider_id not in self.services.keys():
return {'message': 'Invalid non-existing or disabled currency', 'code':-112}
if self.config[provider_id]['enabled'] is not True:
return {'message': 'Currency service provider id %s disabled for now' % provider_id, 'code':-150}
try:
if self.config.get(provider_id, False) and self.config[provider_id]['enabled'] is True:
unload_exit = self.services[provider_id].walletpassphrase(passphrase, 30)
else:
return False
except JSONRPCException, e:
return e.error
except Exception, e:
return e.error
if type(unload_exit) is dict and unload_exit.get('code', None) and unload_exit['code'] < 0:
# error occurred
return unload_exit
else:
return True
@timeit
def walletLock(self, provider_id):
'''
Lock wallet
'''
if provider_id not in self.services.keys():
return {'message': 'Invalid non-existing or disabled currency', 'code':-112}
if self.config.get(provider_id, False) and self.config[provider_id]['enabled'] is True:
self.services[provider_id].walletlock()
|
|
import os.path
import os
import logging
import socket
from base64 import b64encode
import sys
from urllib3 import PoolManager, proxy_from_url, Timeout
from urllib3.util.retry import Retry
from urllib3.util.ssl_ import (
ssl, OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION,
PROTOCOL_TLS, DEFAULT_CIPHERS,
)
from urllib3.exceptions import SSLError as URLLib3SSLError
from urllib3.exceptions import ReadTimeoutError as URLLib3ReadTimeoutError
from urllib3.exceptions import ConnectTimeoutError as URLLib3ConnectTimeoutError
from urllib3.exceptions import NewConnectionError, ProtocolError, ProxyError
try:
from urllib3.util.ssl_ import PROTOCOL_TLS_CLIENT, OP_NO_TICKET
except ImportError:
# Fallback directly to ssl for version of urllib3 before 1.26.
# They are available in the standard library starting in Python 3.6.
from ssl import PROTOCOL_TLS_CLIENT, OP_NO_TICKET
try:
# Always import the original SSLContext, even if it has been patched
from urllib3.contrib.pyopenssl import orig_util_SSLContext as SSLContext
except ImportError:
from urllib3.util.ssl_ import SSLContext
import botocore.awsrequest
from botocore.vendored.six.moves.urllib_parse import unquote
from botocore.compat import filter_ssl_warnings, urlparse, ensure_bytes
from botocore.exceptions import (
ConnectionClosedError, EndpointConnectionError, HTTPClientError,
ReadTimeoutError, ProxyConnectionError, ConnectTimeoutError, SSLError,
InvalidProxiesConfigError
)
filter_ssl_warnings()
logger = logging.getLogger(__name__)
DEFAULT_TIMEOUT = 60
MAX_POOL_CONNECTIONS = 10
DEFAULT_CA_BUNDLE = os.path.join(os.path.dirname(__file__), 'cacert.pem')
try:
from certifi import where
except ImportError:
def where():
return DEFAULT_CA_BUNDLE
def get_cert_path(verify):
if verify is not True:
return verify
cert_path = where()
logger.debug("Certificate path: {0}".format(cert_path))
return cert_path
def create_urllib3_context(ssl_version=None, cert_reqs=None,
options=None, ciphers=None):
""" This function is a vendored version of the same function in urllib3
We vendor this function to ensure that the SSL contexts we construct
always use the std lib SSLContext instead of pyopenssl.
"""
# PROTOCOL_TLS is deprecated in Python 3.10
if not ssl_version or ssl_version == PROTOCOL_TLS:
ssl_version = PROTOCOL_TLS_CLIENT
context = SSLContext(ssl_version)
context.set_ciphers(ciphers or DEFAULT_CIPHERS)
# Setting the default here, as we may have no ssl module on import
cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
if options is None:
options = 0
# SSLv2 is easily broken and is considered harmful and dangerous
options |= OP_NO_SSLv2
# SSLv3 has several problems and is now dangerous
options |= OP_NO_SSLv3
# Disable compression to prevent CRIME attacks for OpenSSL 1.0+
# (issue urllib3#309)
options |= OP_NO_COMPRESSION
# TLSv1.2 only. Unless set explicitly, do not request tickets.
# This may save some bandwidth on wire, and although the ticket is encrypted,
# there is a risk associated with it being on wire,
# if the server is not rotating its ticketing keys properly.
options |= OP_NO_TICKET
context.options |= options
# Enable post-handshake authentication for TLS 1.3, see GH #1634. PHA is
# necessary for conditional client cert authentication with TLS 1.3.
# The attribute is None for OpenSSL <= 1.1.0 or does not exist in older
# versions of Python. We only enable on Python 3.7.4+ or if certificate
# verification is enabled to work around Python issue #37428
# See: https://bugs.python.org/issue37428
if (cert_reqs == ssl.CERT_REQUIRED or sys.version_info >= (3, 7, 4)) and getattr(
context, "post_handshake_auth", None
) is not None:
context.post_handshake_auth = True
def disable_check_hostname():
if (
getattr(context, "check_hostname", None) is not None
): # Platform-specific: Python 3.2
# We do our own verification, including fingerprints and alternative
# hostnames. So disable it here
context.check_hostname = False
# The order of the below lines setting verify_mode and check_hostname
# matter due to safe-guards SSLContext has to prevent an SSLContext with
# check_hostname=True, verify_mode=NONE/OPTIONAL. This is made even more
# complex because we don't know whether PROTOCOL_TLS_CLIENT will be used
# or not so we don't know the initial state of the freshly created SSLContext.
if cert_reqs == ssl.CERT_REQUIRED:
context.verify_mode = cert_reqs
disable_check_hostname()
else:
disable_check_hostname()
context.verify_mode = cert_reqs
# Enable logging of TLS session keys via defacto standard environment variable
# 'SSLKEYLOGFILE', if the feature is available (Python 3.8+). Skip empty values.
if hasattr(context, "keylog_filename"):
sslkeylogfile = os.environ.get("SSLKEYLOGFILE")
if sslkeylogfile and not sys.flags.ignore_environment:
context.keylog_filename = sslkeylogfile
return context
def ensure_boolean(val):
"""Ensures a boolean value if a string or boolean is provided
For strings, the value for True/False is case insensitive
"""
if isinstance(val, bool):
return val
else:
return val.lower() == 'true'
def mask_proxy_url(proxy_url):
"""
Mask proxy url credentials.
:type proxy_url: str
:param proxy_url: The proxy url, i.e. https://username:password@proxy.com
:return: Masked proxy url, i.e. https://***:***@proxy.com
"""
mask = '*' * 3
parsed_url = urlparse(proxy_url)
if parsed_url.username:
proxy_url = proxy_url.replace(parsed_url.username, mask, 1)
if parsed_url.password:
proxy_url = proxy_url.replace(parsed_url.password, mask, 1)
return proxy_url
class ProxyConfiguration(object):
"""Represents a proxy configuration dictionary and additional settings.
This class represents a proxy configuration dictionary and provides utility
functions to retreive well structured proxy urls and proxy headers from the
proxy configuration dictionary.
"""
def __init__(self, proxies=None, proxies_settings=None):
if proxies is None:
proxies = {}
if proxies_settings is None:
proxies_settings = {}
self._proxies = proxies
self._proxies_settings = proxies_settings
def proxy_url_for(self, url):
"""Retrieves the corresponding proxy url for a given url. """
parsed_url = urlparse(url)
proxy = self._proxies.get(parsed_url.scheme)
if proxy:
proxy = self._fix_proxy_url(proxy)
return proxy
def proxy_headers_for(self, proxy_url):
"""Retrieves the corresponding proxy headers for a given proxy url. """
headers = {}
username, password = self._get_auth_from_url(proxy_url)
if username and password:
basic_auth = self._construct_basic_auth(username, password)
headers['Proxy-Authorization'] = basic_auth
return headers
@property
def settings(self):
return self._proxies_settings
def _fix_proxy_url(self, proxy_url):
if proxy_url.startswith('http:') or proxy_url.startswith('https:'):
return proxy_url
elif proxy_url.startswith('//'):
return 'http:' + proxy_url
else:
return 'http://' + proxy_url
def _construct_basic_auth(self, username, password):
auth_str = '{0}:{1}'.format(username, password)
encoded_str = b64encode(auth_str.encode('ascii')).strip().decode()
return 'Basic {0}'.format(encoded_str)
def _get_auth_from_url(self, url):
parsed_url = urlparse(url)
try:
return unquote(parsed_url.username), unquote(parsed_url.password)
except (AttributeError, TypeError):
return None, None
class URLLib3Session(object):
"""A basic HTTP client that supports connection pooling and proxies.
This class is inspired by requests.adapters.HTTPAdapter, but has been
boiled down to meet the use cases needed by botocore. For the most part
this classes matches the functionality of HTTPAdapter in requests v2.7.0
(the same as our vendored version). The only major difference of note is
that we currently do not support sending chunked requests. While requests
v2.7.0 implemented this themselves, later version urllib3 support this
directly via a flag to urlopen so enabling it if needed should be trivial.
"""
def __init__(
self,
verify=True,
proxies=None,
timeout=None,
max_pool_connections=MAX_POOL_CONNECTIONS,
socket_options=None,
client_cert=None,
proxies_config=None,
):
self._verify = verify
self._proxy_config = ProxyConfiguration(
proxies=proxies, proxies_settings=proxies_config
)
self._pool_classes_by_scheme = {
'http': botocore.awsrequest.AWSHTTPConnectionPool,
'https': botocore.awsrequest.AWSHTTPSConnectionPool,
}
if timeout is None:
timeout = DEFAULT_TIMEOUT
if not isinstance(timeout, (int, float)):
timeout = Timeout(connect=timeout[0], read=timeout[1])
self._cert_file = None
self._key_file = None
if isinstance(client_cert, str):
self._cert_file = client_cert
elif isinstance(client_cert, tuple):
self._cert_file, self._key_file = client_cert
self._timeout = timeout
self._max_pool_connections = max_pool_connections
self._socket_options = socket_options
if socket_options is None:
self._socket_options = []
self._proxy_managers = {}
self._manager = PoolManager(**self._get_pool_manager_kwargs())
self._manager.pool_classes_by_scheme = self._pool_classes_by_scheme
@property
def _proxies_kwargs(self):
proxies_settings = self._proxy_config.settings
proxy_ssl_context = self._setup_proxy_ssl_context(proxies_settings)
proxies_kwargs = {
'proxy_ssl_context': proxy_ssl_context,
'use_forwarding_for_https': proxies_settings.get(
'proxy_use_forwarding_for_https'),
}
return {k: v for k, v in proxies_kwargs.items() if v is not None}
def _get_pool_manager_kwargs(self, **extra_kwargs):
pool_manager_kwargs = {
'strict': True,
'timeout': self._timeout,
'maxsize': self._max_pool_connections,
'ssl_context': self._get_ssl_context(),
'socket_options': self._socket_options,
'cert_file': self._cert_file,
'key_file': self._key_file,
}
pool_manager_kwargs.update(**extra_kwargs)
return pool_manager_kwargs
def _get_ssl_context(self):
return create_urllib3_context()
def _get_proxy_manager(self, proxy_url):
if proxy_url not in self._proxy_managers:
proxy_headers = self._proxy_config.proxy_headers_for(proxy_url)
proxy_manager_kwargs = self._get_pool_manager_kwargs(
proxy_headers=proxy_headers)
proxy_manager_kwargs.update(**self._proxies_kwargs)
proxy_manager = proxy_from_url(proxy_url, **proxy_manager_kwargs)
proxy_manager.pool_classes_by_scheme = self._pool_classes_by_scheme
self._proxy_managers[proxy_url] = proxy_manager
return self._proxy_managers[proxy_url]
def _path_url(self, url):
parsed_url = urlparse(url)
path = parsed_url.path
if not path:
path = '/'
if parsed_url.query:
path = path + '?' + parsed_url.query
return path
def _setup_ssl_cert(self, conn, url, verify):
if url.lower().startswith('https') and verify:
conn.cert_reqs = 'CERT_REQUIRED'
conn.ca_certs = get_cert_path(verify)
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
def _setup_proxy_ssl_context(self, proxies_settings):
proxy_ca_bundle = proxies_settings.get('proxy_ca_bundle')
proxy_cert = proxies_settings.get('proxy_client_cert')
if proxy_ca_bundle is None and proxy_cert is None:
return None
context = self._get_ssl_context()
try:
# urllib3 disables this by default but we need
# it for proper proxy tls negotiation.
context.check_hostname = True
if proxy_ca_bundle is not None:
context.load_verify_locations(cafile=proxy_ca_bundle)
if isinstance(proxy_cert, tuple):
context.load_cert_chain(proxy_cert[0], keyfile=proxy_cert[1])
elif isinstance(proxy_cert, str):
context.load_cert_chain(proxy_cert)
return context
except (IOError, URLLib3SSLError) as e:
raise InvalidProxiesConfigError(error=e)
def _get_connection_manager(self, url, proxy_url=None):
if proxy_url:
manager = self._get_proxy_manager(proxy_url)
else:
manager = self._manager
return manager
def _get_request_target(self, url, proxy_url):
has_proxy = proxy_url is not None
if not has_proxy:
return self._path_url(url)
# HTTP proxies expect the request_target to be the absolute url to know
# which host to establish a connection to. urllib3 also supports
# forwarding for HTTPS through the 'use_forwarding_for_https' parameter.
proxy_scheme = urlparse(proxy_url).scheme
using_https_forwarding_proxy = (
proxy_scheme == 'https' and
self._proxies_kwargs.get('use_forwarding_for_https', False)
)
if using_https_forwarding_proxy or url.startswith('http:'):
return url
else:
return self._path_url(url)
def _chunked(self, headers):
transfer_encoding = headers.get('Transfer-Encoding', b'')
transfer_encoding = ensure_bytes(transfer_encoding)
return transfer_encoding.lower() == b'chunked'
def send(self, request):
try:
proxy_url = self._proxy_config.proxy_url_for(request.url)
manager = self._get_connection_manager(request.url, proxy_url)
conn = manager.connection_from_url(request.url)
self._setup_ssl_cert(conn, request.url, self._verify)
if ensure_boolean(
os.environ.get('BOTO_EXPERIMENTAL__ADD_PROXY_HOST_HEADER', '')
):
# This is currently an "experimental" feature which provides
# no guarantees of backwards compatibility. It may be subject
# to change or removal in any patch version. Anyone opting in
# to this feature should strictly pin botocore.
host = urlparse(request.url).hostname
conn.proxy_headers['host'] = host
request_target = self._get_request_target(request.url, proxy_url)
urllib_response = conn.urlopen(
method=request.method,
url=request_target,
body=request.body,
headers=request.headers,
retries=Retry(False),
assert_same_host=False,
preload_content=False,
decode_content=False,
chunked=self._chunked(request.headers),
)
http_response = botocore.awsrequest.AWSResponse(
request.url,
urllib_response.status,
urllib_response.headers,
urllib_response,
)
if not request.stream_output:
# Cause the raw stream to be exhausted immediately. We do it
# this way instead of using preload_content because
# preload_content will never buffer chunked responses
http_response.content
return http_response
except URLLib3SSLError as e:
raise SSLError(endpoint_url=request.url, error=e)
except (NewConnectionError, socket.gaierror) as e:
raise EndpointConnectionError(endpoint_url=request.url, error=e)
except ProxyError as e:
raise ProxyConnectionError(proxy_url=mask_proxy_url(proxy_url), error=e)
except URLLib3ConnectTimeoutError as e:
raise ConnectTimeoutError(endpoint_url=request.url, error=e)
except URLLib3ReadTimeoutError as e:
raise ReadTimeoutError(endpoint_url=request.url, error=e)
except ProtocolError as e:
raise ConnectionClosedError(
error=e,
request=request,
endpoint_url=request.url
)
except Exception as e:
message = 'Exception received when sending urllib3 HTTP request'
logger.debug(message, exc_info=True)
raise HTTPClientError(error=e)
|
|
# -*- coding: utf-8 -*-
#
# field.py
# The collection of fields definitions for coregeo
#
# Copyright 2010 Atsushi Shibata
#
"""
The collection of fields definitions for aha
$Id: field.py 654 2010-08-23 02:02:08Z ats $
"""
__author__ = 'Atsushi Shibata <shibata@webcore.co.jp>'
__docformat__ = 'plaintext'
__licence__ = 'BSD'
__all__ = ('BaseField', 'TextField', 'HiddenField', 'RadioField',
'CheckboxGroup', 'PasswordField', 'ButtonField',
'CheckboxField', 'SelectField', 'TextArea', 'RichText',
'DescriptionField', 'FileField', 'ImageField')
import os
from lib import formencode
v = formencode.validators
from handler import MediaHandler, templatehandler
th = templatehandler
BASE_PATH = os.path.dirname(__file__)
class FieldMixin(object):
"""
A mixin class for the Field.
"""
def get_title(self):
"""
A method to get title of the field.
"""
return self.title
def get_desc(self):
"""
A method to get description of the field.
"""
return self.desc
def set_name(self, name):
"""
A method to set the name of the field.
:param name: A name for the firld.
"""
self.name = name
def get_name(self):
"""
A method to get the name of the field.
"""
if not self.name:
raise AttributeError('The field(%s) has no name' % self)
return self.name
def set_id(self, in_id):
"""
A method to set id of the field.
:param in_id: A id for the firld.
"""
self.id = in_id
def get_id(self):
"""
A method to get id of the field.
"""
if not self.id:
raise AttributeError('The field(%s) has no id' % self)
return self.id
def keyvalue2str(k, v):
"""
A function to convert key - value convination to string.
"""
body = ''
if isinstance(v, int):
body = "%s = %s " % (k, v)
else:
body = """%s = "%s" """ % (k, v)
return body
class BaseField(FieldMixin, MediaHandler):
"""
A base class of fields, handing basic functions of fields.
The class has some attributes::
:DEFAULT_ENGINE: A template engine to render result for fields.
:USE_FIELD_TITLE: A flag to determine whether to write title
for the rendering result.
:RENDER_WRAPPER: A flag to determine whether write wrapper
including label, description etc. for the rendering result.
"""
DEFAULT_ENGINE = 'mako'
USE_FIELD_TITLE = True
RENDER_WRAPPER = True
counter = 0
def __init__(self):
"""
Initialization method.
"""
self._counter = BaseField.counter
self.parent = None
BaseField.counter += 1
def __repr__(self):
"""
A method to return standard class representation.
"""
return "<%s name = '%s'>" % (self.__class__.__name__, self.name)
def set_parent(self, parent):
"""
A method to set parent form.
"""
self.parent = parent
def get_parent(self):
"""
A method to get parent form.
"""
return self.parent
def render_body(self, value = None, engine = '', translate = unicode):
"""
An abstract method to render field and return rendered string.
"""
return ''
raise NotImplementedError()
class TextField(BaseField):
"""
A field class representing simple text field.
Initialization takes following arguments.
:param name: A name of the field
:param enginename: A template engine to render result.
:param title: A title of the field.
:param desc: A description of the field.
:param args: Arguments to be rendered in response.
:param objects: Files such as css, js to be used for the field.
They are rendered along with the filed.
:param required: A flag to determine the field is required or not.
:param default: A default value of the field.
:param validator: A validator function to be used for the input.
:param generate_id: (Not in use)Flag to determine if the id
is to be generated automatically.
:param collapsable: A flag to determine
if the field is collapsable or not.
"""
TYPE = 'text'
# a flag to show the field requires whole posted value on validation
REQUIRE_VALUES_ON_VALIDATE = False
FIELD_TEMPLATE = """<input type = '%(TYPE)s' %(args)s />"""
def __init__(self, name = None, enginename = '', title = '', desc = '',
args = {}, objects = [], required = False, default = '',
validator = None, generate_id = False, collapsable = False):
"""
Initialization function.
"""
self.name = name
self.title = title
self.desc = desc
self.args = args
self.objects = objects
self.validator = validator
self.collapsable = collapsable
if required:
if isinstance(self.validator, (list, tuple)):
self.validator+= (v.NotEmpty(), )
elif not self.validator:
self.validator = v.NotEmpty()
else:
self.validator = [self.validator]+[v.NotEmpty()]
self.required = required
self.default = default
if enginename:
self.enginename = enginename
else:
self.enginename = self.DEFAULT_ENGINE
if hasattr(self, 'OBJECTS'):
objects = self.OBJECTS[:]
MediaHandler.__init__(self, objects)
self.id = None
if generate_id:
# TBD
pass
BaseField.__init__(self)
def expand_args(self, value = None,
except_value = False, except_name = False):
"""
A method to expand attributes in HTML.
An args {'class': 'foo', 'style': 'float: right;'} is expanded as
"class='foo' style='float: right;'".
Attributes self.id, self.name also are expanded as attributes.
"""
argstr = ''
if self.name and not except_name:
argstr+= keyvalue2str('name', self.name)
if self.id:
argstr+= keyvalue2str('id', self.id)
for k in sorted(self.args):
if k != 'value' and self.args[k]:
argstr+= keyvalue2str(k, self.args[k])
if not except_value:
if value:
argstr+= keyvalue2str('value', value)
elif self.default:
argstr+= keyvalue2str('value', self.default)
return argstr
def render_body(self, value = None, engine = '', translate = unicode):
"""
A method to render field and return result as a string.
"""
context = {
'args':self.expand_args(value = value),
'title':self.title,
'TYPE':self.TYPE
}
return self.FIELD_TEMPLATE % context
def validate(self, input_value = None):
"""
A method to check validation of input value.
returns validated and casted value and error string
"""
value = input_value
try:
if not self.validator:
return input_value, None
v = self.validator
if isinstance(v, (list, tuple)):
iv = input_value
for i in self.validator:
iv = i.to_python(iv)
value = iv
else:
value = v.to_python(input_value)
except formencode.Invalid, e:
return None, e
return value, None
class PasswordField(TextField):
"""
A field class representing password field.
"""
TYPE = 'password'
# a flag to show the field requires whole posted value on validation
REQUIRE_VALUES_ON_VALIDATE = False
class ButtonField(TextField):
"""
A field class representing button field.
"""
TYPE = 'button'
USE_FIELD_TITLE = False
# a flag to show the field requires whole posted value on validation
REQUIRE_VALUES_ON_VALIDATE = False
FIELD_TEMPLATE = """<input type = "%(TYPE)s" %(args)s value = "%(title)s"/>"""
class CheckboxField(TextField):
"""
A field class representing checkbox field.
Initialization method takes following arguments.
:param name: A name of the field
:param enginename: A template engine to render result.
:param title: A title of the field.
:param desc: A description of the field.
:param args: Arguments to be rendered in response.
:param objects: Files such as css, js to be used for the field.
They are rendered along with the filed.
:param required: A flag to determine the field is required or not.
:param default: A default value of the field.
:param validator: A validator function to be used for the input.
:param generate_id: (Not in use)Flag to determine if the id
is to be generated automatically.
:param collapsable: A flag to determine
if the field is collapsable or not.
"""
TYPE = 'checkbox'
FIELD_TEMPLATE = ("""<input type = "%(TYPE)s" %(args)s /> %(field_desc)s""")
def __init__(self, name = None, enginename = '', title = '', desc = '',
field_desc = '', value = '', args = {}, objects = [],
required = False, default = '',
validator = None, generate_id = False, collapsable = False):
"""
A initialization method.
"""
self.value = value
self.field_desc = field_desc
TextField.__init__(self, name, enginename, title, desc,
args, objects, required, id, validator, generate_id,
collapsable)
def render_body(self, value = None, engine = '', translate = unicode):
"""
A method to render field and return rendered string.
"""
context = {}
context['TYPE'] = self.TYPE
context['args'] = self.expand_args(except_value = True)
if self.value:
context['args'] += ' '+keyvalue2str('value', self.value)
if value:
context['args'] += ' '+keyvalue2str('checked', 'checked')
context['field_desc'] = self.field_desc
tbody = self.FIELD_TEMPLATE
return tbody % context
class HiddenField(TextField):
"""
A field class representing hidden field.
"""
RENDER_WRAPPER = False
TYPE = 'hidden'
FIELD_TEMPLATE = """<input type = "hidden" %(args)s />"""
def render_body(self, value = None, engine = '', translate = unicode):
"""
A method to render field and return rendered string.
"""
context = {}
context['args'] = self.expand_args(value = value or self.default)
return self.FIELD_TEMPLATE % context
class RadioField(TextField):
"""
A field class representing radio button field.
Initialization takes following arguments.
:param name: A name of the field
:param enginename: A template engine to render result.
:param title: A title of the field.
:param desc: A description of the field.
:param value: A values used to make radio buttons. Values must be
sequence of pairs, such as (('Female', 1), ('Male', 2), ('Gay', 3))
:param args: Arguments to be rendered in response.
:param objects: Files such as css, js to be used for the field.
They are rendered along with the filed.
:param required: A flag to determine the field is required or not.
:param default: A default value of the field.
:param validator: A validator function to be used for the input.
:param generate_id: (Not in use)Flag to determine if the id
is to be generated automatically.
:param collapsable: A flag to determine
if the field is collapsable or not.
:param vertical: A flag to determine whether buttons lies vertically.
"""
TYPE = 'radio'
FIELD_TEMPLATE = ("""%for t, v in values:\n"""
"""<%if v == value:\n"""
""" checked = 'checked'\n"""
"""else:\n"""
""" checked = ''\n"""
"""%>\n"""
"""<input type = 'radio' ${args} value = '${v}'"""
""" ${checked}>"""
"""<div class = 'multi-title'>${t}</div>\n"""
""" %if vertical:\n"""
""" <br />\n"""
""" %endif\n"""
"""%endfor""")
SELECT_ATTR = 'checked'
FLID = 'RadioFieldFIELD_TEMPLATE'
th.get_template(string = FIELD_TEMPLATE, tid = FLID)
def __init__(self, name = None, enginename = '', title = '', desc = '',
values = [], args = {}, objects = [], required = False, default = '',
validator = None, generate_id = False, collapsable = False,
vertical = False):
"""
Initialization function.
"""
self.vertical = vertical
if not values:
raise ValueError("The argument 'values' must be given")
self.values = values
TextField.__init__(self, name, enginename, title, desc,
args, objects, required, default,
validator, generate_id, collapsable)
def render_body(self, value = None, engine = '', translate = unicode):
"""
A method to render field and return rendered string.
"""
context = {}
context['args'] = self.expand_args(except_value = True)
context['values'] = self.values
context['value'] = value or self.default
context['vertical'] = self.vertical
return templatehandler.render(context, self.enginename, tid = self.FLID)
class CheckboxGroup(TextField):
"""
A field class representing checkbox field.
Initialization takes following arguments.
:param name: A name of the field
:param enginename: A template engine to render result.
:param title: A title of the field.
:param desc: A description of the field.
:param value: A values used to make radio buttons. Values must be
sequence of pairs, such as (('Female', 1), ('Male', 2), ('Gay', 3))
:param args: Arguments to be rendered in response.
:param objects: Files such as css, js to be used for the field.
They are rendered along with the filed.
:param required: A flag to determine the field is required or not.
:param default: A default value of the field.
:param validator: A validator function to be used for the input.
:param generate_id: (Not in use)Flag to determine if the id
is to be generated automatically.
:param collapsable: A flag to determine
if the field is collapsable or not.
:param vertical: A flag to determine whether buttons lies vertically.
"""
TYPE = 'cehckbox'
REQUIRE_VALUES_ON_VALIDATE = True
FIELD_TEMPLATE = ("""%for t, v in values:\n"""
"""<%if v in value:\n"""
""" selected = 'checked'\n"""
"""else:\n"""
""" selected = ''\n"""
"""%>\n"""
"""<input type = "checkbox" ${args} value = "${v}" """
""" name = "${name}_${v}" ${selected}>"""
"""<span class = "multi-title">${t}</span>\n"""
""" %if vertical:\n"""
""" <br />\n"""
""" %endif\n"""
"""%endfor""")
SELECT_ATTR = 'checked'
FLID = 'CheckboxGroupFIELD_TEMPLATE'
th.get_template(string = FIELD_TEMPLATE, tid = FLID)
def __init__(self, name = None, enginename = '', title = '', desc = '',
values = [], args = {}, objects = [], required = False, default = '',
validator = None, generate_id = False, vertical = False,
collapsable = False):
"""
Initialization function.
"""
self.vertical = vertical
if not values:
raise ValueError("The argument 'values' must be given")
self.values = values
TextField.__init__(self, name, enginename, title, desc,
args, objects, required, id, validator, generate_id,
collapsable)
def validate(self, input_value = None):
"""
A method to check validation of input value.
It returns value and error string
"""
values = []
pv = ['%s_%s' % (self.name, x[1]) for x in self.values]
for k in input_value:
if k in pv:
values.append(input_value[k])
if input_value.get(self.name, None):
values.extend(input_value[self.name])
if not self.validator:
return ((self.name, values, None), )
try:
v_v = []
for ov in values:
v = self.validator
if isinstance(v, (list, tuple)):
iv = ov
for i in self.validator:
iv = i.to_python(iv)
value = iv
else:
value = v.to_python(ov)
v_v.append(value)
except formencode.Invalid, e:
return ((self.name, None, e), )
return ((self.name, v_v, None), )
def render_body(self, value = None, engine = '', translate = unicode):
"""
A method to render field and return rendered string
"""
context = {}
context['args'] = self.expand_args(except_value = True, except_name = True)
context['values'] = [(x, unicode(y)) for x, y in self.values]
if value:
context['value'] = [unicode(x) for x in value]
else:
context['value'] = []
context['name'] = self.name
context['vertical'] = self.vertical
return templatehandler.render(context, self.enginename, tid = self.FLID)
class SelectField(RadioField):
"""
A field class representing select field.
"""
SELECT_TEMPLATE = ("""<select ${args}>\n"""
"""% for t, v in values:\n"""
"""<%if v == value:\n"""
""" selected = 'selected'\n"""
"""else:\n"""
""" selected = ''\n"""
"""%>\n"""
""" <option value = "${v}" ${selected}>"""
""" ${t} </option>\n"""
"""% endfor\n"""
"""</select>""")
FLID = 'SelectFieldSELECT_TEMPLATE'
th.get_template(string = SELECT_TEMPLATE, tid = FLID)
def render_body(self, value = None, engine = '', translate = unicode):
"""
A method to render field and return rendered string
"""
context = {}
context['args'] = self.expand_args(except_value = True)
context['values'] = self.values
context['value'] = value or self.default
return templatehandler.render(context, self.enginename, tid = self.FLID)
class TextArea(TextField):
"""
A field class representing text area field.
"""
FIELD_TEMPLATE = """<textarea ${args}>${value | h}</textarea>"""
FLID = 'TextAreaFIELD_TEMPLATE'
th.get_template(string = FIELD_TEMPLATE, tid = FLID)
def render_body(self, value = None, engine = '', translate = unicode):
"""
A method to render field and return rendered string
"""
context = {}
context['args'] = self.expand_args(except_value = True)
if value:
context['value'] = value
else:
context['value'] = ''
tbody = self.FIELD_TEMPLATE
return templatehandler.render(context, self.enginename, tid = self.FLID)
class RichText(TextField):
"""
A field class representing text area field that has WYSIWYG editor.
"""
FIELD_TEMPLATE = """
<script type = "text/javascript">
tinyMCE.init({
mode : %(mode)s ,
theme : "advanced",
plugins : "table,inlinepopups",
theme_advanced_buttons1 : "formatselect,styleselect, |,bold,italic,underline,separator,strikethrough,justifyleft,justifycenter,justifyright, justifyfull,blockquote,bullist,numlist,table,|,undo,redo,link,unlink,image,|,code",
theme_advanced_buttons2 : "",
theme_advanced_buttons3 : "",
theme_advanced_toolbar_location : "top",
theme_advanced_toolbar_align : "left",
theme_advanced_statusbar_location : "bottom",
theme_advanced_resizing : true,
theme_advanced_styles : "code=code;float-right=floatright;float-left=floatleft",
theme_advanced_blockformats : "p,h1,h2,h3,h4,blockquote,div",
relative_urls : false,
remove_script_host : false,
extended_valid_elements : "iframe[*]",
});
</script>
<textarea %(args)s >%(value)s</textarea>
"""
OBJECTS = (('/js/tiny_mce/tiny_mce.js', 'text/javascript'),)
def render_body(self, value = None, engine = '', translate = unicode):
"""
A method to render field and return rendered string
"""
context = {}
context['args'] = self.expand_args(except_value = True)
id = self.args.get('id', '')
if id:
context['mode'] = '"exact", "elements" : "%s"' % id
else:
context['mode'] = '"textareas"'
if value:
context['value'] = value
else:
context['value'] = ''
tbody = self.FIELD_TEMPLATE
return self.FIELD_TEMPLATE % context
class DescriptionField(TextField):
"""
A field class representing description field
"""
FIELD_TEMPLATE = """<p %(args)s >%(message)s</p>"""
USE_FIELD_TITLE = False
def render_body(self, value = None, engine = '', translate = unicode):
"""
A method to render field and return rendered string
"""
context = {}
context['args'] = self.expand_args(value = value, except_name = True)
context['message'] = self.title
return self.FIELD_TEMPLATE % context
class FileField(TextField):
"""
A field class representing file field, used for uploading file.
"""
TYPE = 'file'
FIELD_TEMPLATE = ("""<input type = "%(TYPE)s" %(args)s />\n"""
"""%(disable)s"""
)
REPLACE_PREFIX = '__replace_field_'
def get_desc(self):
"""
a method to return description.
"""
return self.desc
def render_body(self, value = None, engine = '', translate = unicode):
"""
A method to render field and return rendered string
"""
context = {}
context['args'] = self.expand_args(except_value = True)
context['title'] = self.title
context['TYPE'] = self.TYPE
if value is None:
context['disable'] = ''
else:
a = {'name':self.REPLACE_PREFIX+self.name,
}
astr = ''
for k in a:
astr+= keyvalue2str(k, a[k])
t = '<input type = "checkbox" %s />replace\n'
context['disable'] = t % astr
return self.FIELD_TEMPLATE % context
return templatehandler.render(context, self.enginename, tid = self.FLID)
def validate(self, input_value = None):
"""
A method to check validation of input value.
It returns value and error string.
"""
value = input_value
v = self.validator
try:
v = self.validator
if v:
if isinstance(v, (list, tuple)):
iv = input_value
for i in self.validator:
iv = i.to_python(iv)
value = iv
else:
value = v.to_python(input_value)
except formencode.Invalid, e:
return None, e
return value, None
class ImageField(FileField):
"""
A field class representing image field
It displays image using value as path.
"""
TYPE = 'file'
FIELD_TEMPLATE = ("""%if value:\n"""
"""<img src = "${value}" height = ${height} /><br />\n"""
"""%endif:\n"""
"""<input type = '${TYPE}' ${args} />\n"""
"""%if cbargs != 'disabled':\n"""
"""<input type = "checkbox" ${cbargs} />Delete Image\n"""
"""%endif\n"""
)
FLID = 'ImageFieldFIELD_TEMPLATE'
th.get_template(string = FIELD_TEMPLATE, tid = FLID)
ERASE_PREFIX = '__replace_field_'
def render_body(self, value = None, engine = '', translate = unicode):
"""
A method to render field and return rendered string
"""
context = {}
context['args'] = self.expand_args(except_value = True)
context['title'] = self.title
context['TYPE'] = self.TYPE
if value is None:
context['cbargs'] = 'disabled'
else:
a = {'name':self.ERASE_PREFIX+self.name,
}
astr = ''
for k in a:
astr+= keyvalue2str(k, a[k])
context['cbargs'] = astr
context['value'] = str(value)
context['height'] = 48
tbody = self.FIELD_TEMPLATE
return templatehandler.render(context, self.enginename, tid = self.FLID)
|
|
"""
OptMAGE Unit Tests
"""
import os
import unittest
from Bio import SeqIO
from Bio.Alphabet import generic_dna
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from optmage.oligo_designer import DEFAULT_GENOME
from optmage.oligo_designer import DEFAULT_OLIGO_SIZE
from optmage.oligo_designer import DEFAULT_MIN_SS_DG
from optmage.oligo_designer import DEFAULT_MUT_LOC_MAX
from optmage.oligo_designer import DEFAULT_NUM_PHOSPHOROTHIOATE
from optmage.oligo_designer import DEFAULT_REPLICATION_ORIGIN
from optmage.oligo_designer import DEFAULT_REPLICATION_TERMINUS
from optmage.oligo_designer import OligoGenerator
from optmage.oligo_designer import OligoTarget
from optmage.oligo_designer import OptMAGEConfig
PWD = os.path.dirname(os.path.realpath(__file__))
TEST_DATA = os.path.join(PWD, 'test_data')
DEFAULT_REF_GENOME = os.path.join(TEST_DATA, 'mg1655.fasta')
class MockObject(object):
"""Generic test object for passing to unit tests.
"""
pass
class TestOptMAGE(unittest.TestCase):
"""Tests for optMAGE.
"""
def setUp(self):
"""General setUp routines.
"""
# Get the reference genome.
with open(DEFAULT_REF_GENOME) as fh:
self.ref_genome = SeqIO.read(fh, 'fasta')
# Create a default config.
mock_args = MockObject()
mock_args.oligo_size = DEFAULT_OLIGO_SIZE
mock_args.min_ss_dG = DEFAULT_MIN_SS_DG
mock_args.mut_loc_max = DEFAULT_MUT_LOC_MAX
mock_args.num_thio = DEFAULT_NUM_PHOSPHOROTHIOATE
mock_args.manually_calc_replichore = False
mock_args.ref_genome = DEFAULT_GENOME
mock_args.replication_origin = DEFAULT_REPLICATION_ORIGIN
mock_args.replication_terminus = DEFAULT_REPLICATION_TERMINUS
self.config = OptMAGEConfig.build_from_args(mock_args)
def test_oligo_generator__get_candidate_block_seq(self):
"""Tests the part of the mage generator that determines the candidate
block seq.
"""
OLIGO_SIZE = 90
self.config.oligo_size = OLIGO_SIZE
OLIGO_END_BUFFER_DISTANCE = 20
self.config.oligo_end_buffer_distance = OLIGO_END_BUFFER_DISTANCE
# Set this arbitrarily low so we pretty much guarantee we get the
# oligo sequence centered around the mutation.
self.config.min_ss_dG = -200
# Ignore phosphorothioates for now.
self.config.num_phosphorothioate_bonds = 0
oligo_generator = OligoGenerator(self.config)
# Replichore 1 and negative strand means the anti-sense strand will
# be targeted, so the oligo will have a positive sense.
REF = 'C'
params = {
'target_id': 'test',
'replichore': 'NA',
'strand': '+1',
'start': 2216229,
'end': 2216230,
'mutation_type': 'R'
}
oligo_target = OligoTarget(self.config, params)
# Test getting the candidate block sequence.
formatted_block_seq = str(
oligo_generator.get_candidate_block_seq(oligo_target)).upper()
UPSTREAM_OF_MUT = 'CAACAACCAGCGCCACAGCGGATGCGTGGAGATTCGGCGGATGGCATCGCTACAGGCCAGCAATGCCAG'
DOWNSTREAM_OF_MUT = 'GCCGCAGCCAGCCAGAAACCACTGCCGAGGCTGGTACGCGCCAGCGCACTGCCATTTTGCGCCAGTTG'
EXPECTED_BLOCK_SEQ = UPSTREAM_OF_MUT + REF + DOWNSTREAM_OF_MUT
self.assertEqual(len(EXPECTED_BLOCK_SEQ), len(formatted_block_seq))
self.assertEqual(EXPECTED_BLOCK_SEQ, formatted_block_seq)
def test_oligo_generator__determine_oligo_from_block(self):
"""Tests that getting the oligo from block seq works.
"""
OLIGO_SIZE = 90
self.config.oligo_size = OLIGO_SIZE
OLIGO_END_BUFFER_DISTANCE = 20
self.config.oligo_end_buffer_distance = OLIGO_END_BUFFER_DISTANCE
# Ensures full space is explored
self.config.min_ss_dG = 100
# Ignore phosphorothioates for now.
self.config.num_phosphorothioate_bonds = 0
oligo_generator = OligoGenerator(self.config)
# Use seq cenetered aroud position 2216229.
REF = 'C'
UPSTREAM_OF_MUT = 'AACAACCAGCGCCACAGCGGATGCGTGGAGATTCGGCGGATGGCATCGCTACAGGCCAGCAATGCCAG'
DOWNSTREAM_OF_MUT = 'GCCGCAGCCAGCCAGAAACCACTGCCGAGGCTGGTACGCGCCAGCGCACTGCCATTTTGCGCCAGTTGG'
BLOCK_SEQ = UPSTREAM_OF_MUT + REF + DOWNSTREAM_OF_MUT
best_oligo_candidate, midpoint_range_explored = (
oligo_generator.determine_oligo_from_block(BLOCK_SEQ))
# Assert that we explored the full space (100 min dG ensures this.)
# NOTE: We are aware we miss one, but okay with it for simplicity.
EXPECTED_MIDPOINT_RANGE = [45, len(BLOCK_SEQ) - 44]
self.assertEqual(EXPECTED_MIDPOINT_RANGE, midpoint_range_explored)
def test_oligo_generator__from_reference__target_forward_strand(self):
"""Test for synthesizing an oligo that targets the forward strand,
meaning a reverse-sense oligo.
"""
OLIGO_SIZE = 90
self.config.oligo_size = OLIGO_SIZE
OLIGO_END_BUFFER_DISTANCE = 20
self.config.oligo_end_buffer_distance = OLIGO_END_BUFFER_DISTANCE
# Set this arbitrarily low so we pretty much guarantee we get the
# oligo sequence centered around the mutation.
self.config.min_ss_dG = -200
# Ignore phosphorothioates for now.
self.config.num_phosphorothioate_bonds = 0
oligo_generator = OligoGenerator(self.config)
# Replichore 1 and negative strand means the anti-sense strand will
# be targeted, so the oligo will have a positive sense.
params = {
'target_id': 'r_3_set1_der_2635317',
'replichore': 'NA',
'strand': -1,
'start': 2635317,
'end': 2635318,
'mutation_type': 'R'
}
oligo_target = OligoTarget(self.config, params)
# Test getting the candidate block sequence.
formatted_block_seq = str(
oligo_generator.get_candidate_block_seq(oligo_target)).upper()
EXPECTED_BLOCK_SEQ = 'CGACCGTACTTACGGTCACGAGTCAGACCCGGGAAATCCGCAACCAGCGCATCTCGGGTGCGAGTTAGACGGTTAAATAACGTGGATTTTCCTACGTTAGGGCGCCCGACAAGCGCGACCACAGGTACCATGTTTAAA'
self.assertEqual(EXPECTED_BLOCK_SEQ, formatted_block_seq)
# Test getting the actual oligo seq.
formatted_oligo_seq = str(
oligo_generator.generate_oligo(oligo_target).oligo_seq).upper()
EXPECTED_OLIGO_SEQ = 'AGACCCGGGAAATCCGCAACCAGCGCATCTCGGGTGCGAGTTAGACGGTTAAATAACGTGGATTTTCCTACGTTAGGGCGCCCGACAAGC'
self.assertEqual(OLIGO_SIZE, len(formatted_oligo_seq))
self.assertEqual(EXPECTED_OLIGO_SEQ, formatted_oligo_seq)
def test_oligo_generator__from_reference__target_reverse_strand(self):
"""Test for synthesizing an oligo that targets the reverse strand,
meaning a forward-sense oligo.
"""
OLIGO_SIZE = 90
self.config.oligo_size = OLIGO_SIZE
OLIGO_END_BUFFER_DISTANCE = 20
self.config.oligo_end_buffer_distance = OLIGO_END_BUFFER_DISTANCE
# Set this arbitrarily low so we pretty much guarantee we get the
# oligo sequence centered around the mutation.
self.config.min_ss_dG = -200
# Ignore phosphorothioates for now.
self.config.num_phosphorothioate_bonds = 0
oligo_generator = OligoGenerator(self.config)
# Replichore 1 and positive strand means the anti-sense strand will
# be targeted, so the oligo will have a positive sense.
params = {
'target_id': 'r_1_set1_ftsA_104352',
'replichore': 'NA',
'strand': '+1',
'start': 104352,
'end': 104353,
'mutation_type': 'R'
}
oligo_target = OligoTarget(self.config, params)
# Test getting the candidate block sequence.
formatted_block_seq = str(
oligo_generator.get_candidate_block_seq(oligo_target)).upper()
EXPECTED_BLOCK_SEQ = 'CCGGATTCTTGATCCCTTCCTGATAGTCAATCGCATACTCTTGCGGGATCACATGCAGCACACGATGCTCATCGCGCACACGCACCGATTTCGCGGTATGGACGACGTTTTCCACATCTTCTTGCGTCACTTCTTCTT'
self.assertEqual(EXPECTED_BLOCK_SEQ, formatted_block_seq)
# Test getting the actual oligo seq.
formatted_oligo_seq = str(
oligo_generator.generate_oligo(oligo_target).oligo_seq).upper()
EXPECTED_OLIGO_SEQ = 'AGTCAATCGCATACTCTTGCGGGATCACATGCAGCACACGATGCTCATCGCGCACACGCACCGATTTCGCGGTATGGACGACGTTTTCCA'
self.assertEqual(OLIGO_SIZE, len(formatted_oligo_seq))
self.assertEqual(EXPECTED_OLIGO_SEQ, formatted_oligo_seq)
def test_determine_oligo_sense(self):
"""Tests for OligoGenerator.determine_oligo_sense().
"""
OLIGO_GENERATOR = OligoGenerator(self.config)
# Replichore = 1, strand = +1.
self.assertEqual(-1, OLIGO_GENERATOR.determine_oligo_sense(
OligoTarget(self.config, {
'target_id': 'r_1_set1_ftsA_104352',
'replichore': 'NA',
'strand': '+1',
'start': 104352,
'end': 104353,
'mutation_type': 'R'
})))
# Replichore = 1, strand = -1.
self.assertEqual(-1, OLIGO_GENERATOR.determine_oligo_sense(
OligoTarget(self.config, {
'target_id': 'r_8_set1_surA_53597',
'replichore': 'NA',
'strand': '-1',
'start': 53597,
'end': 53598,
'mutation_type': 'R'
})))
# Replichore = 2, strand = +1.
self.assertEqual(1, OLIGO_GENERATOR.determine_oligo_sense(
OligoTarget(self.config, {
'target_id': 'r_102_set5_csiR_2794168',
'replichore': 'NA',
'strand': +1,
'start': 2794168,
'end': 2794169,
'mutation_type': 'R'
})))
# Replichore = 2, strand = -1.
self.assertEqual(1, OLIGO_GENERATOR.determine_oligo_sense(
OligoTarget(self.config, {
'target_id': 'r_3_set1_der_2635317',
'replichore': 'NA',
'strand': -1,
'start': 2635317,
'end': 2635318,
'mutation_type': 'R'
})))
def test_free_energy_optmization(self):
"""Tests that the oligo search optimizes the free energy and scans
both left and right of the mutation midpoint.
"""
self.config.should_calc_replichore = False
OLIGO_SIZE = 20
self.config.oligo_size = OLIGO_SIZE
OLIGO_END_BUFFER_DISTANCE = 2
self.config.oligo_end_buffer_distance = OLIGO_END_BUFFER_DISTANCE
OLIGO_GENERATOR = OligoGenerator(self.config)
### Test that the window slides downstream.
RAW_SEQ_1 = 'GGGGGGGGGGCCCCCCCCCCCCCAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
SEQ_OBJ_1 = Seq(RAW_SEQ_1, generic_dna)
GENOME_RECORD_1 = SeqRecord(SEQ_OBJ_1)
self.config.genome_record = GENOME_RECORD_1
OLIGO_TARGET = OligoTarget(self.config, {
'target_id': '1',
'replichore': 2,
'strand': -1,
'start': 35,
'end': 36,
'mutation_type': 'R'
})
oligo_result = OLIGO_GENERATOR.generate_oligo(OLIGO_TARGET)
self.assertTrue(oligo_result.ss_dG > DEFAULT_MIN_SS_DG)
EXPECTED_SEQ = 'A*A*AAAAAAAAAAAAAAAAAA'
self.assertEqual(EXPECTED_SEQ, str(oligo_result.oligo_seq).upper())
### Test that the window slides upstream.
RAW_SEQ_2 = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGGGGGGGGGGCCCCCCCCCCCCC'
SEQ_OBJ_2 = Seq(RAW_SEQ_2, generic_dna)
GENOME_RECORD_2 = SeqRecord(SEQ_OBJ_2)
self.config.genome_record = GENOME_RECORD_2
OLIGO_TARGET = OligoTarget(self.config, {
'target_id': '1',
'replichore': 2,
'strand': -1,
'start': 22,
'end': 23,
'mutation_type': 'R'
})
oligo_result = OLIGO_GENERATOR.generate_oligo(OLIGO_TARGET)
self.assertTrue(oligo_result.ss_dG > DEFAULT_MIN_SS_DG)
EXPECTED_SEQ = 'A*A*AAAAAAAAAAAAAAAAAA'
self.assertEqual(EXPECTED_SEQ, str(oligo_result.oligo_seq).upper())
def test_mutation__positive_strand(self):
"""Test making a mutation relative to the positive strand.
"""
self.config.should_calc_replichore = False
OLIGO_SIZE = 7
self.config.oligo_size = OLIGO_SIZE
OLIGO_END_BUFFER_DISTANCE = 0
self.config.oligo_end_buffer_distance = OLIGO_END_BUFFER_DISTANCE
self.config.num_phosphorothioate_bonds = 0
self.config.min_ss_dG = -200 # just want the centered window
OLIGO_GENERATOR = OligoGenerator(self.config)
RAW_SEQ_1 = 'CGCTAGCCC'
SEQ_OBJ_1 = Seq(RAW_SEQ_1, generic_dna)
GENOME_RECORD_1 = SeqRecord(SEQ_OBJ_1)
self.config.genome_record = GENOME_RECORD_1
OLIGO_TARGET = OligoTarget(self.config, {
'target_id': '1',
'replichore': 2, # so we get an oligo in the positive sense.
'strand': 1,
'start': 4,
'end': 7,
'mutation_type': 'M',
'mutation_seq': 'TAA'
})
oligo_result = OLIGO_GENERATOR.generate_oligo(OLIGO_TARGET)
self.assertEqual(OLIGO_SIZE, len(oligo_result.oligo_seq))
self.assertEqual('TAG', str(oligo_result.original_seq))
self.assertEqual('TAA', str(oligo_result.mutation_seq))
EXPECTED_OLIGO_SEQ = 'GCTAACC'
self.assertEqual(EXPECTED_OLIGO_SEQ,
str(oligo_result.oligo_seq).upper())
# Try similar with oligo size 8.
OLIGO_SIZE = 8
self.config.oligo_size = OLIGO_SIZE
oligo_result = OLIGO_GENERATOR.generate_oligo(OLIGO_TARGET)
self.assertEqual(OLIGO_SIZE, len(oligo_result.oligo_seq))
EXPECTED_OLIGO_SEQ = 'CGCTAACC'
self.assertEqual(EXPECTED_OLIGO_SEQ,
str(oligo_result.oligo_seq).upper())
def test_mutation__negative_strand(self):
"""Test making a mutation relative to the negative strand.
"""
self.config.should_calc_replichore = False
OLIGO_SIZE = 7
self.config.oligo_size = OLIGO_SIZE
OLIGO_END_BUFFER_DISTANCE = 0
self.config.oligo_end_buffer_distance = OLIGO_END_BUFFER_DISTANCE
self.config.num_phosphorothioate_bonds = 0
self.config.min_ss_dG = -200 # just want the centered window
OLIGO_GENERATOR = OligoGenerator(self.config)
RAW_SEQ_1 = 'CGCTAGCCC'
SEQ_OBJ_1 = Seq(RAW_SEQ_1, generic_dna)
GENOME_RECORD_1 = SeqRecord(SEQ_OBJ_1)
self.config.genome_record = GENOME_RECORD_1
OLIGO_TARGET = OligoTarget(self.config, {
'target_id': '1',
'replichore': 2, # so we get an oligo in the positive sense.
'strand': -1,
'start': 4,
'end': 7,
'mutation_type': 'M',
'mutation_seq': 'TTA'
})
oligo_result = OLIGO_GENERATOR.generate_oligo(OLIGO_TARGET)
self.assertEqual(OLIGO_SIZE, len(oligo_result.oligo_seq))
EXPECTED_OLIGO_SEQ = 'GCTAACC'
self.assertEqual(EXPECTED_OLIGO_SEQ,
str(oligo_result.oligo_seq).upper())
# Try similar with oligo size 8.
OLIGO_SIZE = 8
self.config.oligo_size = OLIGO_SIZE
oligo_result = OLIGO_GENERATOR.generate_oligo(OLIGO_TARGET)
self.assertEqual(OLIGO_SIZE, len(oligo_result.oligo_seq))
EXPECTED_OLIGO_SEQ = 'CGCTAACC'
self.assertEqual(EXPECTED_OLIGO_SEQ,
str(oligo_result.oligo_seq).upper())
def test_deletion(self):
"""Test making a deletion.
"""
self.config.should_calc_replichore = False
OLIGO_SIZE = 7
self.config.oligo_size = OLIGO_SIZE
OLIGO_END_BUFFER_DISTANCE = 2
self.config.oligo_end_buffer_distance = OLIGO_END_BUFFER_DISTANCE
self.config.num_phosphorothioate_bonds = 0
self.config.min_ss_dG = -200 # just want the centered window
OLIGO_GENERATOR = OligoGenerator(self.config)
# TCGC AGC
RAW_SEQ_1 = 'TTTTTTTCGCTAGCCCTTTTTTTTTTTTTTTT'
SEQ_OBJ_1 = Seq(RAW_SEQ_1, generic_dna)
GENOME_RECORD_1 = SeqRecord(SEQ_OBJ_1)
self.config.genome_record = GENOME_RECORD_1
OLIGO_TARGET = OligoTarget(self.config, {
'target_id': '1',
'replichore': 2, # so we get an oligo in the positive sense.
'strand': 1,
'start': 11,
'end': 12,
'mutation_type': 'D',
})
oligo_result = OLIGO_GENERATOR.generate_oligo(OLIGO_TARGET)
self.assertEqual(OLIGO_SIZE, len(oligo_result.oligo_seq))
EXPECTED_OLIGO_SEQ = 'TCGCAGC'
EXPECTED_OLIGO_SEQ_ALTERNATE = 'CGCAGCC'
self.assertTrue(str(oligo_result.oligo_seq).upper() in
[EXPECTED_OLIGO_SEQ, EXPECTED_OLIGO_SEQ_ALTERNATE],
'Got: ' + str(oligo_result.oligo_seq).upper())
# Try similar with oligo size 8.
OLIGO_SIZE = 8
self.config.oligo_size = OLIGO_SIZE
oligo_result = OLIGO_GENERATOR.generate_oligo(OLIGO_TARGET)
self.assertEqual(OLIGO_SIZE, len(oligo_result.oligo_seq))
EXPECTED_OLIGO_SEQ = 'TCGCAGCC'
EXPECTED_OLIGO_SEQ_ALTERNATE = 'TTCGCAGC'
self.assertTrue(str(oligo_result.oligo_seq).upper() in
[EXPECTED_OLIGO_SEQ, EXPECTED_OLIGO_SEQ_ALTERNATE])
### Test bigger deletion.
OLIGO_SIZE = 7
self.config.oligo_size = OLIGO_SIZE
OLIGO_TARGET = OligoTarget(self.config, {
'target_id': '1',
'replichore': 2, # so we get an oligo in the positive sense.
'strand': 1,
'start': 11,
'end': 14,
'mutation_type': 'D',
})
oligo_result = OLIGO_GENERATOR.generate_oligo(OLIGO_TARGET)
self.assertEqual(OLIGO_SIZE, len(oligo_result.oligo_seq))
EXPECTED_OLIGO_SEQ = 'TCGCCCC'
EXPECTED_OLIGO_SEQ_ALTERNATE = 'TTCGCCC'
self.assertTrue(str(oligo_result.oligo_seq).upper() in
[EXPECTED_OLIGO_SEQ, EXPECTED_OLIGO_SEQ_ALTERNATE])
# Try similar with oligo size 8.
OLIGO_SIZE = 8
self.config.oligo_size = OLIGO_SIZE
oligo_result = OLIGO_GENERATOR.generate_oligo(OLIGO_TARGET)
self.assertEqual(OLIGO_SIZE, len(oligo_result.oligo_seq))
EXPECTED_OLIGO_SEQ = 'TTCGCCCC'
EXPECTED_OLIGO_SEQ_ALTERNATE = 'TCGCCCCT'
self.assertTrue(str(oligo_result.oligo_seq).upper() in
[EXPECTED_OLIGO_SEQ, EXPECTED_OLIGO_SEQ_ALTERNATE])
def test_mutation__full_genome__positive_strand(self):
self.config.num_phosphorothioate_bonds = 0
OLIGO_GENERATOR = OligoGenerator(self.config)
OLIGO_TARGET = OligoTarget(self.config, {
'target_id': '1',
'replichore': 2, # so we get an oligo in the positive sense.
'strand': 1,
'start': 2216229,
'end': 2216230,
'mutation_type': 'M',
'mutation_seq': 'T'
})
oligo_result = OLIGO_GENERATOR.generate_oligo(OLIGO_TARGET)
self.assertEqual(DEFAULT_OLIGO_SIZE, len(oligo_result.oligo_seq))
self.assertEqual('C', str(oligo_result.original_seq).upper())
self.assertEqual('T', str(oligo_result.mutation_seq).upper())
self.assertEqual(2216229, oligo_result.start)
self.assertEqual(2216230, oligo_result.end)
def test_input_accepts_strings_or_numbers(self):
"""Input might be parsed from file so should handle numbers as
strings.
"""
self.config.num_phosphorothioate_bonds = 0
OLIGO_GENERATOR = OligoGenerator(self.config)
OLIGO_TARGET = OligoTarget(self.config, {
'target_id': '1',
'replichore': 2, # so we get an oligo in the positive sense.
'strand': 1,
'start': '2216229', # Testing this.
'end': 2216230,
'mutation_type': 'M',
'mutation_seq': 'T'
})
OLIGO_GENERATOR.generate_oligo(OLIGO_TARGET)
if __name__ == '__main__':
unittest.main()
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'EncounterInitiative'
db.delete_table(u'dm_encounterinitiative')
# Adding model 'NPCTypeSkill'
db.create_table(u'dm_npctypeskill', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('npc_type', self.gf('django.db.models.fields.related.ForeignKey')(related_name='skills', to=orm['dm.NPCType'])),
('skill', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['character_builder.Skill'])),
('value', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('dm', ['NPCTypeSkill'])
# Adding model 'NPCTypePower'
db.create_table(u'dm_npctypepower', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('npc_type', self.gf('django.db.models.fields.related.ForeignKey')(related_name='powers', to=orm['dm.NPCType'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('attack_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['character_builder.PowerRange'], null=True, blank=True)),
('action_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['character_builder.ActionType'], null=True, blank=True)),
('recharge_text', self.gf('django.db.models.fields.CharField')(max_length=20, blank=True)),
('usage', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['character_builder.PowerUsage'], null=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('dm', ['NPCTypePower'])
# Adding M2M table for field keywords on 'NPCTypePower'
db.create_table(u'dm_npctypepower_keywords', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('npctypepower', models.ForeignKey(orm['dm.npctypepower'], null=False)),
('powerkeyword', models.ForeignKey(orm['character_builder.powerkeyword'], null=False))
))
db.create_unique(u'dm_npctypepower_keywords', ['npctypepower_id', 'powerkeyword_id'])
# Adding field 'EncounterParticipant.encounter'
db.add_column(u'dm_encounterparticipant', 'encounter',
self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['dm.Encounter']),
keep_default=False)
# Adding field 'EncounterParticipant.initiative'
db.add_column(u'dm_encounterparticipant', 'initiative',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
# Adding field 'EncounterParticipant.symbol'
db.add_column(u'dm_encounterparticipant', 'symbol',
self.gf('django.db.models.fields.CharField')(default='', max_length=3, blank=True),
keep_default=False)
# Deleting field 'Encounter.encountertemplate_ptr'
db.delete_column(u'dm_encounter', 'encountertemplate_ptr_id')
# Adding field 'Encounter.id'
db.add_column(u'dm_encounter', u'id',
self.gf('django.db.models.fields.AutoField')(default=1, primary_key=True),
keep_default=False)
# Adding field 'Encounter.template'
db.add_column(u'dm_encounter', 'template',
self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['dm.EncounterTemplate']),
keep_default=False)
# Adding field 'Encounter.is_completed'
db.add_column(u'dm_encounter', 'is_completed',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding M2M table for field conditions on 'NPC'
db.create_table(u'dm_npc_conditions', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('npc', models.ForeignKey(orm['dm.npc'], null=False)),
('condition', models.ForeignKey(orm['character_builder.condition'], null=False))
))
db.create_unique(u'dm_npc_conditions', ['npc_id', 'condition_id'])
def backwards(self, orm):
# Adding model 'EncounterInitiative'
db.create_table(u'dm_encounterinitiative', (
('participant', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['dm.EncounterParticipant'])),
('initiative', self.gf('django.db.models.fields.IntegerField')()),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('encounter', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['dm.Encounter'])),
))
db.send_create_signal('dm', ['EncounterInitiative'])
# Deleting model 'NPCTypeSkill'
db.delete_table(u'dm_npctypeskill')
# Deleting model 'NPCTypePower'
db.delete_table(u'dm_npctypepower')
# Removing M2M table for field keywords on 'NPCTypePower'
db.delete_table('dm_npctypepower_keywords')
# Deleting field 'EncounterParticipant.encounter'
db.delete_column(u'dm_encounterparticipant', 'encounter_id')
# Deleting field 'EncounterParticipant.initiative'
db.delete_column(u'dm_encounterparticipant', 'initiative')
# Deleting field 'EncounterParticipant.symbol'
db.delete_column(u'dm_encounterparticipant', 'symbol')
# Adding field 'Encounter.encountertemplate_ptr'
db.add_column(u'dm_encounter', 'encountertemplate_ptr',
self.gf('django.db.models.fields.related.OneToOneField')(default=1, to=orm['dm.EncounterTemplate'], unique=True, primary_key=True),
keep_default=False)
# Deleting field 'Encounter.id'
db.delete_column(u'dm_encounter', u'id')
# Deleting field 'Encounter.template'
db.delete_column(u'dm_encounter', 'template_id')
# Deleting field 'Encounter.is_completed'
db.delete_column(u'dm_encounter', 'is_completed')
# Removing M2M table for field conditions on 'NPC'
db.delete_table('dm_npc_conditions')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'character_builder.ability': {
'Meta': {'object_name': 'Ability'},
'abbreviation': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'help_text': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.actiontype': {
'Meta': {'object_name': 'ActionType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.alignment': {
'Meta': {'object_name': 'Alignment'},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'character_builder.armorclass': {
'Meta': {'object_name': 'ArmorClass'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.armortype': {
'Meta': {'object_name': 'ArmorType'},
'armor_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.ArmorClass']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.character': {
'Meta': {'object_name': 'Character'},
'age': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'alignment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Alignment']"}),
'class_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.ClassType']"}),
'conditions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['character_builder.Condition']", 'symmetrical': 'False', 'blank': 'True'}),
'deity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Deity']"}),
'gender': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Gender']"}),
'height': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'hit_points': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_hit_points': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'race': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Race']"}),
'slug_name': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['auth.User']"}),
'weight': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'xp': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'character_builder.classtype': {
'Meta': {'ordering': "['name']", 'object_name': 'ClassType'},
'armor_proficiencies': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['character_builder.ArmorType']", 'symmetrical': 'False'}),
'base_hit_points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'description': ('django.db.models.fields.TextField', [], {}),
'favored_abilities': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['character_builder.Ability']", 'symmetrical': 'False'}),
'hit_points_per_level': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modifiers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['character_builder.Modifier']", 'symmetrical': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Role']"}),
'role_flavor': ('django.db.models.fields.TextField', [], {}),
'skill_choices': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Source']"}),
'trained_skills': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['character_builder.Skill']", 'null': 'True', 'blank': 'True'}),
'weapon_proficiencies': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['character_builder.WeaponProficiencyGroup']", 'symmetrical': 'False'})
},
'character_builder.condition': {
'Meta': {'object_name': 'Condition'},
'effect': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'character_builder.defense': {
'Meta': {'object_name': 'Defense'},
'abbreviation': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'abilities': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['character_builder.Ability']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'character_builder.deity': {
'Meta': {'object_name': 'Deity'},
'alignment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Alignment']"}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'character_builder.gender': {
'Meta': {'object_name': 'Gender'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
'character_builder.language': {
'Meta': {'object_name': 'Language'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'script': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.modifier': {
'Meta': {'object_name': 'Modifier'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'character_builder.powerkeyword': {
'Meta': {'object_name': 'PowerKeyword'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.powerrange': {
'Meta': {'object_name': 'PowerRange'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.powerusage': {
'Meta': {'object_name': 'PowerUsage'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.race': {
'Meta': {'ordering': "['name']", 'object_name': 'Race'},
'average_height_text': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'average_weight_text': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['character_builder.Language']", 'symmetrical': 'False'}),
'modifiers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['character_builder.Modifier']", 'symmetrical': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'playable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'size': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Size']"}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Source']"}),
'speed': ('django.db.models.fields.IntegerField', [], {}),
'vision': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Vision']"})
},
'character_builder.role': {
'Meta': {'object_name': 'Role'},
'flavor': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.size': {
'Meta': {'object_name': 'Size'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'reach': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'space': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.skill': {
'Meta': {'object_name': 'Skill'},
'ability': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Ability']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.source': {
'Meta': {'ordering': "['name']", 'object_name': 'Source'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.vision': {
'Meta': {'object_name': 'Vision'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.weaponcategory': {
'Meta': {'object_name': 'WeaponCategory'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.weaponproficiencygroup': {
'Meta': {'object_name': 'WeaponProficiencyGroup'},
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['character_builder.WeaponCategory']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'dm.basicstorynpc': {
'Meta': {'object_name': 'BasicStoryNPC', '_ormbases': ['dm.NPC']},
'description': ('django.db.models.fields.TextField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'npc_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['dm.NPC']", 'unique': 'True', 'primary_key': 'True'})
},
'dm.campaign': {
'Meta': {'object_name': 'Campaign'},
'dm': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dm.Party']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'dm.encounter': {
'Meta': {'object_name': 'Encounter'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_completed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dm.Party']"}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dm.EncounterTemplate']"})
},
'dm.encounterparticipant': {
'Meta': {'object_name': 'EncounterParticipant'},
'encounter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dm.Encounter']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initiative': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'symbol': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'})
},
'dm.encountertemplate': {
'Meta': {'object_name': 'EncounterTemplate'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'npcs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dm.NPC']", 'symmetrical': 'False'})
},
'dm.historyline': {
'Meta': {'object_name': 'HistoryLine'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logged_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dm.Session']"}),
'text': ('django.db.models.fields.TextField', [], {})
},
'dm.monsternpc': {
'Meta': {'object_name': 'MonsterNPC', '_ormbases': ['dm.NPC']},
'hit_points': ('django.db.models.fields.IntegerField', [], {}),
u'npc_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['dm.NPC']", 'unique': 'True', 'primary_key': 'True'}),
'npc_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dm.NPCType']"})
},
'dm.npc': {
'Meta': {'object_name': 'NPC'},
'conditions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['character_builder.Condition']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_alive': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'dm.npcencounterparticipant': {
'Meta': {'object_name': 'NPCEncounterParticipant', '_ormbases': ['dm.EncounterParticipant']},
u'encounterparticipant_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['dm.EncounterParticipant']", 'unique': 'True', 'primary_key': 'True'}),
'npc': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dm.NPC']"})
},
'dm.npctype': {
'Meta': {'object_name': 'NPCType'},
'alignment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Alignment']", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.IntegerField', [], {}),
'max_hit_points': ('django.db.models.fields.IntegerField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'race': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Race']"}),
'roles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['character_builder.Role']", 'symmetrical': 'False'}),
'vision': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Vision']"}),
'xp_reward': ('django.db.models.fields.IntegerField', [], {})
},
'dm.npctypeability': {
'Meta': {'object_name': 'NPCTypeAbility'},
'ability': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Ability']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'npc_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'abilities'", 'to': "orm['dm.NPCType']"}),
'value': ('django.db.models.fields.IntegerField', [], {})
},
'dm.npctypedefense': {
'Meta': {'object_name': 'NPCTypeDefense'},
'defense': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Defense']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'npc_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'defenses'", 'to': "orm['dm.NPCType']"}),
'value': ('django.db.models.fields.IntegerField', [], {})
},
'dm.npctypepower': {
'Meta': {'object_name': 'NPCTypePower'},
'action_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.ActionType']", 'null': 'True', 'blank': 'True'}),
'attack_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.PowerRange']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keywords': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['character_builder.PowerKeyword']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'npc_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'powers'", 'to': "orm['dm.NPCType']"}),
'recharge_text': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'usage': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.PowerUsage']", 'null': 'True', 'blank': 'True'})
},
'dm.npctypeskill': {
'Meta': {'object_name': 'NPCTypeSkill'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'npc_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'skills'", 'to': "orm['dm.NPCType']"}),
'skill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Skill']"}),
'value': ('django.db.models.fields.IntegerField', [], {})
},
'dm.party': {
'Meta': {'object_name': 'Party'},
'background': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'characters': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['character_builder.Character']", 'symmetrical': 'False'}),
'formed_on': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'dm.pcencounterparticipant': {
'Meta': {'object_name': 'PCEncounterParticipant', '_ormbases': ['dm.EncounterParticipant']},
'character': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Character']"}),
u'encounterparticipant_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['dm.EncounterParticipant']", 'unique': 'True', 'primary_key': 'True'})
},
'dm.session': {
'Meta': {'object_name': 'Session'},
'campaign': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dm.Campaign']"}),
'end_time': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'})
},
'dm.storynpc': {
'Meta': {'object_name': 'StoryNPC', '_ormbases': ['dm.BasicStoryNPC']},
u'basicstorynpc_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['dm.BasicStoryNPC']", 'unique': 'True', 'primary_key': 'True'}),
'hit_points': ('django.db.models.fields.IntegerField', [], {}),
'npc_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dm.NPCType']"})
}
}
complete_apps = ['dm']
|
|
import inspect
import traceback
from pythonscriptcffi import ffi, lib
@ffi.def_extern()
def pybind_init_sys_path_and_argv(pythonpath, res_path, data_path):
pythonpath = ffi.string(pythonpath)
res_path = ffi.string(res_path)
data_path = ffi.string(data_path)
import sys
from godot.bindings import OS
sys.argv = ["godot"] + OS.get_cmdline_args()
for p in pythonpath.split(';'):
if p.startswith("res://"):
p = p.replace("res:/", res_path, 1)
elif p.startswith("user://"):
p = p.replace("user:/", data_path, 1)
sys.path.append(p)
print('PYTHON_PATH: %s' % sys.path)
return True
# Protect python objects passed to C from beeing garbage collected
class ProtectFromGC:
def __init__(self):
self._data = {}
def register(self, value):
self._data[id(value)] = value
def unregister(self, value):
del self._data[id(value)]
def unregister_by_id(self, id):
del self._data[id]
protect_from_gc = ProtectFromGC()
def connect_handle(obj):
handle = obj.__dict__.get('_cffi_handle')
if not handle:
handle = ffi.new_handle(obj)
obj._cffi_handle = handle
return handle
@ffi.def_extern()
def pybind_load_exposed_class_per_module(modname):
modname = ffi.string(modname)
__import__(modname) # Force lazy loading of the module
cls = get_exposed_class_per_module(modname)
return connect_handle(cls)
@ffi.def_extern()
def pybind_wrap_gdobj_with_class(cls_handle, gdobj):
instance = ffi.from_handle(cls_handle)(gdobj)
protect_from_gc.register(instance)
return connect_handle(instance)
@ffi.def_extern()
def pybind_instanciate_from_classname(classname):
cls = get_exposed_class_per_name(ffi.string(classname))
instance = cls()
protect_from_gc.register(instance)
return connect_handle(instance)
@ffi.def_extern()
def pybind_release_instance(handle):
instance = ffi.from_handle(handle)
protect_from_gc.unregister(instance)
CALL_METH_OK = 0
CALL_METH_ERROR_INVALID_METHOD = 1
CALL_METH_ERROR_INVALID_ARGUMENT = 2
CALL_METH_ERROR_TOO_MANY_ARGUMENTS = 3
CALL_METH_ERROR_TOO_FEW_ARGUMENTS = 4
CALL_METH_ERROR_INSTANCE_IS_NULL = 5
CALL_METH_TYPE_NIL = 0 << 4
CALL_METH_TYPE_BOOL = 1 << 4
CALL_METH_TYPE_INT = 2 << 4
CALL_METH_TYPE_REAL = 3 << 4
CALL_METH_TYPE_STRING = 4 << 4
CALL_METH_TYPE_VECTOR2 = 5 << 4
CALL_METH_TYPE_RECT2 = 6 << 4
CALL_METH_TYPE_VECTOR3 = 7 << 4
CALL_METH_TYPE_TRANSFORM2D = 8 << 4
CALL_METH_TYPE_PLANE = 9 << 4
CALL_METH_TYPE_QUAT = 10 << 4
CALL_METH_TYPE_RECT3 = 11 << 4
CALL_METH_TYPE_BASIS = 12 << 4
CALL_METH_TYPE_TRANSFORM = 13 << 4
CALL_METH_TYPE_COLOR = 14 << 4
CALL_METH_TYPE_NODE_PATH = 15 << 4
CALL_METH_TYPE__RID = 16 << 4
CALL_METH_TYPE_OBJECT = 17 << 4
CALL_METH_TYPE_DICTIONARY = 18 << 4
CALL_METH_TYPE_ARRAY = 19 << 4
CALL_METH_TYPE_POOL_BYTE_ARRAY = 20 << 4
CALL_METH_TYPE_POOL_INT_ARRAY = 21 << 4
CALL_METH_TYPE_POOL_REAL_ARRAY = 22 << 4
CALL_METH_TYPE_POOL_STRING_ARRAY = 23 << 4
CALL_METH_TYPE_POOL_VECTOR2_ARRAY = 24 << 4
CALL_METH_TYPE_POOL_VECTOR3_ARRAY = 25 << 4
CALL_METH_TYPE_POOL_COLOR_ARRAY = 26 << 4
@ffi.def_extern()
def pybind_call_meth(handle, methname, args, argcount, ret, error):
instance = ffi.from_handle(handle)
try:
meth = getattr(instance, ffi.string(methname))
except AttributeError:
error[0] = CALL_METH_ERROR_INVALID_METHOD
return
# print('[GD->PY] Calling %s on %s ==> %s' % (ffi.string(methname), instance, meth))
pyargs = [variant_to_pyobj(args[i]) for i in range(argcount)]
# error is an hacky int compressing Variant::CallError values
try:
pyret = meth(*pyargs)
pyobj_to_variant(pyret, ret)
error[0] = CALL_METH_OK
except NotImplementedError:
error[0] = CALL_METH_ERROR_INVALID_METHOD
except TypeError:
traceback.print_exc()
error[0] = 1 | CALL_METH_ERROR_INVALID_ARGUMENT | CALL_METH_TYPE_NIL
# TODO: handle errors here
@ffi.def_extern()
def pybind_set_prop(handle, propname, val):
instance = ffi.from_handle(handle)
try:
pyval = variant_to_pyobj(val)
setattr(instance, ffi.string(propname), pyval)
return True
except Exception:
traceback.print_exc()
return False
@ffi.def_extern()
def pybind_get_prop(handle, propname, ret):
instance = ffi.from_handle(handle)
try:
pyret = getattr(instance, ffi.string(propname))
pyobj_to_variant(pyret, ret)
return True
except Exception:
traceback.print_exc()
return False
@ffi.def_extern()
def pybind_get_prop_type(handle, propname, prop_type):
instance = ffi.from_handle(handle)
prop = instance._exported.get(ffi.string(propname), None)
if not prop:
return False
else:
prop_type[0] = prop.gd_type
return True
@ffi.def_extern()
def pybind_get_prop_default_value(handle, propname, r_val):
cls_or_instance = ffi.from_handle(handle)
cls = cls_or_instance if isinstance(cls_or_instance, type) else type(cls_or_instance)
prop = cls.__exported.get(ffi.string(propname), None)
if not prop:
return False
pyobj_to_variant(prop.default, r_val)
return True
@ffi.def_extern()
def pybind_get_prop_info(handle, propname, r_prop_info):
cls_or_instance = ffi.from_handle(handle)
cls = cls_or_instance if isinstance(cls_or_instance, type) else type(cls_or_instance)
prop = cls.__exported.get(ffi.string(propname), None)
if not prop:
return False
r_prop_info.type = prop.gd_type
r_prop_info.hint = prop.gd_hint
r_prop_info.name = prop.gd_name[0]
r_prop_info.hint_string = prop.gd_hint_string[0]
r_prop_info.usage = prop.gd_usage
return True
@ffi.def_extern()
def pybind_get_prop_list(handle):
# Lazily generate the list of exported properties' names
cls_or_instance = ffi.from_handle(handle)
cls = cls_or_instance if isinstance(cls_or_instance, type) else type(cls_or_instance)
# Need to store the cached list with a per-class name to avoid shadowing
# from a parent class
field = '_%s__exported_raw_list' % cls.__name__
raw_list = getattr(cls, field, None)
exported = getattr(cls, '__exported')
if not raw_list:
# Build the list of exported fields' names, ready to be access by godot
raw_list = ffi.new('godot_string[]', len(exported) + 1)
for i, name in enumerate(exported.keys()):
lib.godot_string_new_unicode_data(ffi.addressof(raw_list[i]), name, -1)
# Last entry is an empty string
lib.godot_string_new(ffi.addressof(raw_list[len(exported)]))
setattr(cls, field, raw_list)
return raw_list
@ffi.def_extern()
def pybind_get_meth_list(handle):
# Lazily generate the list of methods' names
cls_or_instance = ffi.from_handle(handle)
cls = cls_or_instance if isinstance(cls_or_instance, type) else type(cls_or_instance)
# Need to store the cached list with a per-class name to avoid shadowing
# from a parent class
field = '_%s__meth_raw_list' % cls.__name__
raw_list = getattr(cls, field, None)
if not raw_list:
meths = [k for k in dir(cls) if not k.startswith('__') and callable(getattr(cls, k))]
raw_list = ffi.new('godot_string[]', len(meths) + 1)
for i, name in enumerate(meths):
lib.godot_string_new_unicode_data(ffi.addressof(raw_list[i]), name, -1)
# Last entry is an empty string
lib.godot_string_new(ffi.addressof(raw_list[len(meths)]))
setattr(cls, field, raw_list)
return raw_list
@ffi.def_extern()
def pybind_get_meth_info(handle, methname, r_argcount):
cls_or_instance = ffi.from_handle(handle)
cls = cls_or_instance if isinstance(cls_or_instance, type) else type(cls_or_instance)
meth = getattr(cls, ffi.string(methname), None)
if not meth:
return False
spec = inspect.getfullargspec(meth)
# Cannot pass keyword only arguments through godot
r_argcount[0] = len(spec.args)
return True
@ffi.def_extern()
def pybind_has_meth(handle, methname):
cls_or_instance = ffi.from_handle(handle)
cls = cls_or_instance if isinstance(cls_or_instance, type) else type(cls_or_instance)
meth = getattr(cls, ffi.string(methname), None)
return callable(meth)
@ffi.def_extern()
def pybind_is_tool(handle):
instance = ffi.from_handle(handle)
return getattr(instance, '__tool', False)
@ffi.def_extern()
def pybind_notification(handle, notification):
# Godot's notification should call all parent `_notification`
# methods (better not use `super()._notification` in those methods...)
instance = ffi.from_handle(handle)
cls = type(instance)
# TODO: cache the methods to call ?
for parentcls in inspect.getmro(cls):
try:
parentcls.__dict__['_notification'](instance, notification)
except (KeyError, NotImplementedError):
pass
@ffi.def_extern()
def pybind_get_rpc_mode(handle, methname):
cls_or_instance = ffi.from_handle(handle)
cls = cls_or_instance if isinstance(cls_or_instance, type) else type(cls_or_instance)
# TODO: it seems if gdstript find a method with RPC_MODE_DISABLED, it tries
# to find a parent with rpc enabled...
for parentcls in inspect.getmro(cls):
try:
mode = parentcls.__dict__[ffi.string(methname)].__rpc
if mode != lib.GODOT_METHOD_RPC_MODE_DISABLED:
return mode
except (KeyError, AttributeError):
pass
return lib.GODOT_METHOD_RPC_MODE_DISABLED
@ffi.def_extern()
def pybind_get_rset_mode(handle, varname):
cls_or_instance = ffi.from_handle(handle)
cls = cls_or_instance if isinstance(cls_or_instance, type) else type(cls_or_instance)
# TODO: it seems if gdstript find a method with RPC_MODE_DISABLED, it tries
# to find a parent with rpc enabled...
for parentcls in inspect.getmro(cls):
try:
mode = parentcls._exported[varname].rpc
if mode != lib.GODOT_METHOD_RPC_MODE_DISABLED:
return mode
except (ValueError, KeyError):
pass
return lib.GODOT_METHOD_RPC_MODE_DISABLED
@ffi.def_extern()
def pybind_get_signal_list(handle):
# Lazily generate the list of exported properties' names
cls_or_instance = ffi.from_handle(handle)
cls = cls_or_instance if isinstance(cls_or_instance, type) else type(cls_or_instance)
# Need to store the cached list with a per-class name to avoid shadowing
# from a parent class
field = '_%s__signal_raw_list' % cls.__name__
raw_list = getattr(cls, field, None)
if not raw_list:
# Build the list of signals, ready to be access by godot
raw_list = ffi.new('godot_string[]', len(cls.__signals) + 1)
for i, name in enumerate(cls.__signals.keys()):
lib.godot_string_new_unicode_data(ffi.addressof(raw_list[i]), name, -1)
# Last entry is an empty string
lib.godot_string_new(ffi.addressof(raw_list[len(cls.__signals)]))
setattr(cls, field, raw_list)
return raw_list
@ffi.def_extern()
def pybind_has_signal(handle, signalname):
cls_or_instance = ffi.from_handle(handle)
cls = cls_or_instance if isinstance(cls_or_instance, type) else type(cls_or_instance)
return ffi.string(signalname) in cls.__signals
@ffi.def_extern()
def pybind_get_signal_info(handle, signalname, r_argcount):
cls_or_instance = ffi.from_handle(handle)
cls = cls_or_instance if isinstance(cls_or_instance, type) else type(cls_or_instance)
signal = cls.__signals.get(signalname, None)
if not signal:
return False
# TODO: finish this
r_argcount[0] = 0
# spec = inspect.getfullargspec(signal)
# # Cannot pass keyword only arguments through godot
# r_argcount[0] = len(spec.args)
return True
@ffi.def_extern()
def pybind_get_class_name(handle, r_name):
cls_or_instance = ffi.from_handle(handle)
cls = cls_or_instance if isinstance(cls_or_instance, type) else type(cls_or_instance)
lib.godot_string_new_unicode_data(r_name, cls.__name__, -1)
|
|
from dtreeviz.utils import *
import numpy as np
import pandas as pd
import graphviz
from pathlib import Path
from sklearn import tree
from graphviz.backend import run, view
import matplotlib.pyplot as plt
from dtreeviz.shadow import *
from numbers import Number
import matplotlib.patches as patches
from mpl_toolkits.mplot3d import Axes3D
import tempfile
from os import getpid, makedirs
from sys import platform as PLATFORM
from colour import Color
YELLOW = "#fefecd" # "#fbfbd0" # "#FBFEB0"
GREEN = "#cfe2d4"
DARKBLUE = '#313695'
BLUE = '#4575b4'
DARKGREEN = '#006400'
LIGHTORANGE = '#fee090'
LIGHTBLUE = '#a6bddb'
GREY = '#444443'
WEDGE_COLOR = GREY #'orange'
HIGHLIGHT_COLOR = '#D67C03'
# How many bins should we have based upon number of classes
NUM_BINS = [0, 0, 10, 9, 8, 6, 6, 6, 5, 5, 5]
# 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10
color_blind_friendly_colors = [
None, # 0 classes
None, # 1 class
["#FEFEBB","#a1dab4"], # 2 classes
["#FEFEBB","#D9E6F5",'#a1dab4'], # 3 classes
["#FEFEBB","#D9E6F5",'#a1dab4','#fee090'], # 4
["#FEFEBB","#D9E6F5",'#a1dab4','#41b6c4','#fee090'], # 5
["#FEFEBB",'#c7e9b4','#41b6c4','#2c7fb8','#fee090','#f46d43'], # 6
["#FEFEBB",'#c7e9b4','#7fcdbb','#41b6c4','#225ea8','#fdae61','#f46d43'], # 7
["#FEFEBB",'#edf8b1','#c7e9b4','#7fcdbb','#1d91c0','#225ea8','#fdae61','#f46d43'], # 8
["#FEFEBB",'#c7e9b4','#41b6c4','#74add1','#4575b4','#313695','#fee090','#fdae61','#f46d43'], # 9
["#FEFEBB",'#c7e9b4','#41b6c4','#74add1','#4575b4','#313695','#fee090','#fdae61','#f46d43','#d73027'] # 10
]
class DTreeViz:
def __init__(self,dot):
self.dot = dot
def _repr_svg_(self):
return self.svg()
def svg(self):
"""Render tree as svg and return svg text."""
tmp = tempfile.gettempdir()
svgfilename = f"{tmp}/DTreeViz_{getpid()}.svg"
self.save(svgfilename)
with open(svgfilename, encoding='UTF-8') as f:
svg = f.read()
return svg
def view(self):
tmp = tempfile.gettempdir()
svgfilename = f"{tmp}/DTreeViz_{getpid()}.svg"
self.save(svgfilename)
view(svgfilename)
def save(self, filename):
"""
Save the svg of this tree visualization into filename argument.
Mac platform can save any file type (.pdf, .png, .svg). Other platforms
would fail with errors. See https://github.com/parrt/dtreeviz/issues/4
"""
path = Path(filename)
if not path.parent.exists:
makedirs(path.parent)
g = graphviz.Source(self.dot, format='svg')
dotfilename = g.save(directory=path.parent.as_posix(), filename=path.stem)
if PLATFORM=='darwin':
# dot seems broken in terms of fonts if we use -Tsvg. Force users to
# brew install graphviz with librsvg (else metrics are off) and
# use -Tsvg:cairo which fixes bug and also automatically embeds images
format = path.suffix[1:] # ".svg" -> "svg" etc...
cmd = ["dot", f"-T{format}:cairo", "-o", filename, dotfilename]
# print(' '.join(cmd))
stdout, stderr = run(cmd, capture_output=True, check=True, quiet=False)
else:
if not filename.endswith(".svg"):
raise (Exception(f"{PLATFORM} can only save .svg files: {filename}"))
# Gen .svg file from .dot but output .svg has image refs to other files
#orig_svgfilename = filename.replace('.svg', '-orig.svg')
cmd = ["dot", "-Tsvg", "-o", filename, dotfilename]
# print(' '.join(cmd))
stdout, stderr = run(cmd, capture_output=True, check=True, quiet=False)
# now merge in referenced SVG images to make all-in-one file
with open(filename, encoding='UTF-8') as f:
svg = f.read()
svg = inline_svg_images(svg)
with open(filename, "w", encoding='UTF-8') as f:
f.write(svg)
def rtreeviz_univar(ax,
x_train: (pd.Series, np.ndarray), # 1 vector of X data
y_train: (pd.Series, np.ndarray),
max_depth,
feature_name: str,
target_name: str,
fontsize: int = 14,
show={'title','splits'}):
if isinstance(x_train, pd.Series):
x_train = x_train.values
if isinstance(y_train, pd.Series):
y_train = y_train.values
y_range = (min(y_train), max(y_train)) # same y axis for all
overall_feature_range = (np.min(x_train), np.max(x_train))
t = tree.DecisionTreeRegressor(max_depth=max_depth)
t.fit(x_train.reshape(-1,1), y_train)
shadow_tree = ShadowDecTree(t, x_train.reshape(-1,1), y_train, feature_names=[feature_name])
splits = []
for node in shadow_tree.internal:
splits.append(node.split())
splits = sorted(splits)
bins = [overall_feature_range[0]] + splits + [overall_feature_range[1]]
means = []
for i in range(len(bins) - 1):
left = bins[i]
right = bins[i + 1]
inrange = y_train[(x_train >= left) & (x_train < right)]
means.append(np.mean(inrange))
ax.scatter(x_train, y_train, marker='o', alpha=.4, c=BLUE,
edgecolor=GREY, lw=.3)
if 'splits' in show:
for split in splits:
ax.plot([split, split], [*y_range], '--', color='grey', linewidth=.7)
prevX = overall_feature_range[0]
for i, m in enumerate(means):
split = overall_feature_range[1]
if i < len(splits):
split = splits[i]
ax.plot([prevX, split], [m, m], '-', color='#f46d43', linewidth=2)
prevX = split
ax.tick_params(axis='both', which='major', width=.3, labelcolor=GREY, labelsize=fontsize)
if 'title' in show:
title = f"Regression tree depth {max_depth}, training $R^2$={t.score(x_train.reshape(-1,1),y_train):.3f}"
plt.title(title, fontsize=fontsize, color=GREY)
plt.xlabel(feature_name, fontsize=fontsize)
plt.ylabel(target_name, fontsize=fontsize)
def rtreeviz_bivar_heatmap(ax, X_train, y_train, max_depth, feature_names,
fontsize=14, ticks_fontsize=12,
show={'title'}
) -> tree.DecisionTreeClassifier:
"""
Show tesselated 2D feature space for bivariate regression tree. X_train can
have lots of features but features lists indexes of 2 features to train tree with.
"""
if isinstance(X_train,pd.DataFrame):
X_train = X_train.values
if isinstance(y_train, pd.Series):
y_train = y_train.values
rt = tree.DecisionTreeRegressor(max_depth=max_depth)
rt.fit(X_train, y_train)
n_colors_in_map = 100
y_lim = np.min(y_train), np.max(y_train)
y_range = y_lim[1] - y_lim[0]
color_map = list(str(c) for c in Color("#c7e9b4").range_to(Color("#081d58"), n_colors_in_map))
shadow_tree = ShadowDecTree(rt, X_train, y_train, feature_names=feature_names)
tesselation = shadow_tree.tesselation()
for node,bbox in tesselation:
pred = node.prediction()
color = color_map[int(((pred - y_lim[0]) / y_range) * (n_colors_in_map-1))]
x = bbox[0]
y = bbox[1]
w = bbox[2]-bbox[0]
h = bbox[3]-bbox[1]
rect = patches.Rectangle((x, y), w, h, 0, linewidth=.3, alpha=.5,
edgecolor=GREY, facecolor=color)
ax.add_patch(rect)
colors = [color_map[int(((y-y_lim[0])/y_range)*(n_colors_in_map-1))] for y in y_train]
x, y, z = X_train[:,0], X_train[:,1], y_train
ax.scatter(x, y, marker='o', alpha=.95, c=colors, edgecolor=GREY, lw=.3)
ax.set_xlabel(f"{feature_names[0]}", fontsize=fontsize, fontname="Arial", color=GREY)
ax.set_ylabel(f"{feature_names[1]}", fontsize=fontsize, fontname="Arial", color=GREY)
ax.tick_params(axis='both', which='major', width=.3, labelcolor=GREY, labelsize=ticks_fontsize)
if 'title' in show:
accur = rt.score(X_train, y_train)
title = f"Regression tree depth {max_depth}, training $R^2$={accur:.3f}"
plt.title(title, fontsize=fontsize, color=GREY)
return None
def rtreeviz_bivar_3D(ax, X_train, y_train, max_depth, feature_names, target_name,
fontsize=14, ticks_fontsize=10,
azim=0, elev=0, dist=7,
show={'title'}
) -> tree.DecisionTreeClassifier:
"""
Show 3D feature space for bivariate regression tree. X_train can
have lots of features but features lists indexes of 2 features to train tree with.
"""
if isinstance(X_train,pd.DataFrame):
X_train = X_train.values
if isinstance(y_train, pd.Series):
y_train = y_train.values
n_colors_in_map = 100
ax.view_init(elev=elev, azim=azim)
ax.dist=dist
def plane(node, bbox):
x = np.linspace(bbox[0], bbox[2], 2)
y = np.linspace(bbox[1], bbox[3], 2)
xx, yy = np.meshgrid(x, y)
z = np.full(xx.shape, node.prediction())
# print(f"{node.prediction()}->{int(((node.prediction()-y_lim[0])/y_range)*(n_colors_in_map-1))}, lim {y_lim}")
# print(f"{color_map[int(((node.prediction()-y_lim[0])/y_range)*(n_colors_in_map-1))]}")
ax.plot_surface(xx, yy, z, alpha=.85, shade=False,
color=color_map[int(((node.prediction()-y_lim[0])/y_range)*(n_colors_in_map-1))],
edgecolor=GREY, lw=.3)
rt = tree.DecisionTreeRegressor(max_depth=max_depth)
rt.fit(X_train, y_train)
y_lim = np.min(y_train), np.max(y_train)
y_range = y_lim[1] - y_lim[0]
color_map = list(str(c) for c in Color("#c7e9b4").range_to(Color("#081d58"), n_colors_in_map))
colors = [color_map[int(((y-y_lim[0])/y_range)*(n_colors_in_map-1))] for y in y_train]
shadow_tree = ShadowDecTree(rt, X_train, y_train, feature_names=feature_names)
tesselation = shadow_tree.tesselation()
for node, bbox in tesselation:
plane(node, bbox)
x, y, z = X_train[:, 0], X_train[:, 1], y_train
ax.scatter(x, y, z, marker='o', alpha=.7, edgecolor=GREY, lw=.3, c=colors)
ax.set_xlabel(f"{feature_names[0]}", fontsize=fontsize, fontname="Arial", color=GREY)
ax.set_ylabel(f"{feature_names[1]}", fontsize=fontsize, fontname="Arial", color=GREY)
ax.set_zlabel(f"{target_name}", fontsize=fontsize, fontname="Arial", color=GREY)
ax.tick_params(axis='both', which='major', width=.3, labelcolor=GREY, labelsize=ticks_fontsize)
if 'title' in show:
accur = rt.score(X_train, y_train)
title = f"Regression tree depth {max_depth}, training $R^2$={accur:.3f}"
plt.title(title, fontsize=fontsize)
return None
def ctreeviz_univar(ax, x_train, y_train, max_depth, feature_name, class_names,
target_name,
fontsize=14, nbins=25, gtype='strip',
show={'title','legend','splits'}):
if isinstance(x_train, pd.Series):
x_train = x_train.values
if isinstance(y_train, pd.Series):
y_train = y_train.values
# ax.set_facecolor('#F9F9F9')
ct = tree.DecisionTreeClassifier(max_depth=max_depth)
ct.fit(x_train.reshape(-1, 1), y_train)
shadow_tree = ShadowDecTree(ct, x_train.reshape(-1, 1), y_train,
feature_names=[feature_name], class_names=class_names)
n_classes = shadow_tree.nclasses()
overall_feature_range = (np.min(x_train), np.max(x_train))
class_values = shadow_tree.unique_target_values
color_values = color_blind_friendly_colors[n_classes]
colors = {v: color_values[i] for i, v in enumerate(class_values)}
X_colors = [colors[cl] for cl in class_values]
ax.set_xlabel(f"{feature_name}", fontsize=fontsize, fontname="Arial",
color=GREY)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.yaxis.set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_linewidth(.3)
r = overall_feature_range[1] - overall_feature_range[0]
dot_w = 25
X_hist = [x_train[y_train == cl] for cl in class_values]
binwidth = r / nbins
if gtype == 'barstacked':
hist, bins, barcontainers = ax.hist(X_hist,
color=X_colors,
align='mid',
histtype='barstacked',
bins=np.arange(overall_feature_range[0],
overall_feature_range[
1] + binwidth, binwidth),
label=class_names)
for patch in barcontainers:
for rect in patch.patches:
rect.set_linewidth(.5)
rect.set_edgecolor(GREY)
ax.set_xlim(*overall_feature_range)
ax.set_xticks(overall_feature_range)
ax.set_yticks([0, max([max(h) for h in hist])])
elif gtype == 'strip':
# user should pass in short and wide fig
sigma = .013
mu = .08
class_step = .08
dot_w = 20
ax.set_ylim(0, mu + n_classes*class_step)
for i, bucket in enumerate(X_hist):
y_noise = np.random.normal(mu+i*class_step, sigma, size=len(bucket))
ax.scatter(bucket, y_noise, alpha=.7, marker='o', s=dot_w, c=colors[i],
edgecolors=GREY, lw=.3)
ax.tick_params(axis='both', which='major', width=.3, labelcolor=GREY,
labelsize=fontsize)
splits = []
for node in shadow_tree.internal:
splits.append(node.split())
splits = sorted(splits)
bins = [ax.get_xlim()[0]] + splits + [ax.get_xlim()[1]]
pred_box_height = .07 * ax.get_ylim()[1]
preds = []
for i in range(len(bins) - 1):
left = bins[i]
right = bins[i + 1]
inrange = y_train[(x_train >= left) & (x_train < right)]
values, counts = np.unique(inrange, return_counts=True)
pred = values[np.argmax(counts)]
rect = patches.Rectangle((left, 0), (right - left), pred_box_height, linewidth=.3,
edgecolor=GREY, facecolor=colors[pred])
ax.add_patch(rect)
preds.append(pred)
if 'legend' in show:
add_classifier_legend(ax, class_names, class_values, colors, target_name)
if 'title' in show:
accur = ct.score(x_train.reshape(-1, 1), y_train)
title = f"Classifier tree depth {max_depth}, training accuracy={accur*100:.2f}%"
plt.title(title, fontsize=fontsize, color=GREY)
if 'splits' in show:
for split in splits:
plt.plot([split, split], [*ax.get_ylim()], '--', color='grey', linewidth=1)
def ctreeviz_bivar(ax, X_train, y_train, max_depth, feature_names, class_names,
target_name,
fontsize=14,
show={'title','legend','splits'}):
"""
Show tesselated 2D feature space for bivariate classification tree. X_train can
have lots of features but features lists indexes of 2 features to train tree with.
"""
if isinstance(X_train,pd.DataFrame):
X_train = X_train.values
if isinstance(y_train, pd.Series):
y_train = y_train.values
ct = tree.DecisionTreeClassifier(max_depth=max_depth)
ct.fit(X_train, y_train)
shadow_tree = ShadowDecTree(ct, X_train, y_train,
feature_names=feature_names, class_names=class_names)
tesselation = shadow_tree.tesselation()
n_classes = shadow_tree.nclasses()
class_values = shadow_tree.unique_target_values
color_values = color_blind_friendly_colors[n_classes]
colors = {v: color_values[i] for i, v in enumerate(class_values)}
if 'splits' in show:
for node,bbox in tesselation:
x = bbox[0]
y = bbox[1]
w = bbox[2]-bbox[0]
h = bbox[3]-bbox[1]
rect = patches.Rectangle((x, y), w, h, 0, linewidth=.3, alpha=.4,
edgecolor=GREY, facecolor=colors[node.prediction()])
ax.add_patch(rect)
dot_w = 25
X_hist = [X_train[y_train == cl] for cl in class_values]
for i, h in enumerate(X_hist):
ax.scatter(h[:,0], h[:,1], alpha=1, marker='o', s=dot_w, c=colors[i],
edgecolors=GREY, lw=.3)
ax.set_xlabel(f"{feature_names[0]}", fontsize=fontsize, fontname="Arial", color=GREY)
ax.set_ylabel(f"{feature_names[1]}", fontsize=fontsize, fontname="Arial", color=GREY)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_linewidth(.3)
if 'legend' in show:
add_classifier_legend(ax, class_names, class_values, colors, target_name)
if 'title' in show:
accur = ct.score(X_train, y_train)
title = f"Classifier tree depth {max_depth}, training accuracy={accur*100:.2f}%"
plt.title(title, fontsize=fontsize, color=GREY)
return None
def add_classifier_legend(ax, class_names, class_values, colors, target_name):
# add boxes for legend
boxes = []
for i, c in enumerate(class_values):
box = patches.Rectangle((0, 0), 20, 10, linewidth=.4, edgecolor=GREY,
facecolor=colors[c], label=class_names[c])
boxes.append(box)
leg = ax.legend(handles=boxes,
frameon=True,
shadow=False,
fancybox=True,
title=target_name,
handletextpad=.35,
borderpad=.8,
bbox_to_anchor=(1.0, 1.0),
edgecolor=GREY)
leg.get_frame().set_linewidth(.5)
leg.get_title().set_color(GREY)
leg.get_title().set_fontsize(10)
leg.get_title().set_fontweight('bold')
for text in leg.get_texts():
text.set_color(GREY)
text.set_fontsize(10)
def dtreeviz(tree_model: (tree.DecisionTreeRegressor, tree.DecisionTreeClassifier),
X_train: (pd.DataFrame, np.ndarray),
y_train: (pd.Series, np.ndarray),
feature_names: List[str],
target_name: str,
class_names: (Mapping[Number, str], List[str]) = None, # required if classifier
precision: int = 2,
orientation: ('TD', 'LR') = "TD",
show_root_edge_labels: bool = True,
show_node_labels: bool = False,
fancy: bool = True,
histtype: ('bar', 'barstacked', 'strip') = 'barstacked',
highlight_path: List[int] = [],
X: np.ndarray = None,
max_X_features_LR: int = 10,
max_X_features_TD: int = 20) \
-> DTreeViz:
"""
Given a decision tree regressor or classifier, create and return a tree visualization
using the graphviz (DOT) language.
:param tree_model: A DecisionTreeRegressor or DecisionTreeClassifier that has been
fit to X_train, y_train.
:param X_train: A data frame or 2-D matrix of feature vectors used to train the model.
:param y_train: A pandas Series or 1-D vector with target values or classes.
:param feature_names: A list of the feature names.
:param target_name: The name of the target variable.
:param class_names: [For classifiers] A dictionary or list of strings mapping class
value to class name.
:param precision: When displaying floating-point numbers, how many digits to display
after the decimal point. Default is 2.
:param orientation: Is the tree top down, "TD", or left to right, "LR"?
:param show_root_edge_labels: Include < and >= on the edges emanating from the root?
:param show_node_labels: Add "Node id" to top of each node in graph for educational purposes
:param fancy:
:param histtype: [For classifiers] Either 'bar' or 'barstacked' to indicate
histogram type. We find that 'barstacked' looks great up to about.
four classes.
:param highlight_path: A list of node IDs to highlight, default is [].
Useful for emphasizing node(s) in tree for discussion.
If X argument given then this is ignored.
:type highlight_path: List[int]
:param X: Instance to run down the tree; derived path to highlight from this vector.
Show feature vector with labels underneath leaf reached. highlight_path
is ignored if X is not None.
:type np.ndarray
:param max_X_features_LR: If len(X) exceeds this limit for LR layout,
display only those features
used to guide X vector down tree. Helps when len(X) is large.
Default is 10.
:param max_X_features_TD: If len(X) exceeds this limit for TD layout,
display only those features
used to guide X vector down tree. Helps when len(X) is large.
Default is 25.
:return: A string in graphviz DOT language that describes the decision tree.
"""
def node_name(node : ShadowDecTreeNode) -> str:
return f"node{node.id}"
def split_node(name, node_name, split):
if fancy:
labelgraph = node_label(node) if show_node_labels else ''
html = f"""<table border="0">
{labelgraph}
<tr>
<td><img src="{tmp}/node{node.id}_{getpid()}.svg"/></td>
</tr>
</table>"""
else:
html = f"""<font face="Helvetica" color="#444443" point-size="12">{name}@{split}</font>"""
if node.id in highlight_path:
gr_node = f'{node_name} [margin="0" shape=box penwidth=".5" color="{HIGHLIGHT_COLOR}" style="dashed" label=<{html}>]'
else:
gr_node = f'{node_name} [margin="0" shape=none label=<{html}>]'
return gr_node
def regr_leaf_node(node, label_fontsize: int = 12):
# always generate fancy regr leaves for now but shrink a bit for nonfancy.
labelgraph = node_label(node) if show_node_labels else ''
html = f"""<table border="0">
{labelgraph}
<tr>
<td><img src="{tmp}/leaf{node.id}_{getpid()}.svg"/></td>
</tr>
</table>"""
if node.id in highlight_path:
return f'leaf{node.id} [margin="0" shape=box penwidth=".5" color="{HIGHLIGHT_COLOR}" style="dashed" label=<{html}>]'
else:
return f'leaf{node.id} [margin="0" shape=box penwidth="0" label=<{html}>]'
def class_leaf_node(node, label_fontsize: int = 12):
labelgraph = node_label(node) if show_node_labels else ''
html = f"""<table border="0" CELLBORDER="0">
{labelgraph}
<tr>
<td><img src="{tmp}/leaf{node.id}_{getpid()}.svg"/></td>
</tr>
</table>"""
if node.id in highlight_path:
return f'leaf{node.id} [margin="0" shape=box penwidth=".5" color="{HIGHLIGHT_COLOR}" style="dashed" label=<{html}>]'
else:
return f'leaf{node.id} [margin="0" shape=box penwidth="0" label=<{html}>]'
def node_label(node):
return f'<tr><td CELLPADDING="0" CELLSPACING="0"><font face="Helvetica" color="{GREY}" point-size="14"><i>Node {node.id}</i></font></td></tr>'
def class_legend_html():
return f"""
<table border="0" cellspacing="0" cellpadding="0">
<tr>
<td border="0" cellspacing="0" cellpadding="0"><img src="{tmp}/legend_{getpid()}.svg"/></td>
</tr>
</table>
"""
def class_legend_gr():
if not shadow_tree.isclassifier():
return ""
return f"""
subgraph cluster_legend {{
style=invis;
legend [penwidth="0" margin="0" shape=box margin="0.03" width=.1, height=.1 label=<
{class_legend_html()}
>]
}}
"""
def instance_html(path, label_fontsize: int = 11):
headers = []
features_used = [node.feature() for node in path[:-1]] # don't include leaf
display_X = X
display_feature_names = feature_names
highlight_feature_indexes = features_used
if (orientation=='TD' and len(X)>max_X_features_TD) or\
(orientation == 'LR' and len(X) > max_X_features_LR):
# squash all features down to just those used
display_X = [X[i] for i in features_used] + ['...']
display_feature_names = [node.feature_name() for node in path[:-1]] + ['...']
highlight_feature_indexes = range(0,len(features_used))
for i,name in enumerate(display_feature_names):
color = GREY
if i in highlight_feature_indexes:
color = HIGHLIGHT_COLOR
headers.append(f'<td cellpadding="1" align="right" bgcolor="white"><font face="Helvetica" color="{color}" point-size="{label_fontsize}"><b>{name}</b></font></td>')
values = []
for i,v in enumerate(display_X):
color = GREY
if i in highlight_feature_indexes:
color = HIGHLIGHT_COLOR
if isinstance(v,int) or isinstance(v, str):
disp_v = v
else:
disp_v = myround(v, precision)
values.append(f'<td cellpadding="1" align="right" bgcolor="white"><font face="Helvetica" color="{color}" point-size="{label_fontsize}">{disp_v}</font></td>')
return f"""
<table border="0" cellspacing="0" cellpadding="0">
<tr>
{''.join(headers)}
</tr>
<tr>
{''.join(values)}
</tr>
</table>
"""
def instance_gr():
if X is None:
return ""
pred, path = shadow_tree.predict(X)
leaf = f"leaf{path[-1].id}"
if shadow_tree.isclassifier():
edge_label = f" Prediction<br/> {path[-1].prediction_name()}"
else:
edge_label = f" Prediction<br/> {myround(path[-1].prediction(), precision)}"
return f"""
subgraph cluster_instance {{
style=invis;
X_y [penwidth="0.3" margin="0" shape=box margin="0.03" width=.1, height=.1 label=<
{instance_html(path)}
>]
}}
{leaf} -> X_y [dir=back; penwidth="1.2" color="{HIGHLIGHT_COLOR}" label=<<font face="Helvetica" color="{GREY}" point-size="{11}">{edge_label}</font>>]
"""
if orientation=="TD":
ranksep = ".2"
nodesep = "0.1"
else:
if fancy:
ranksep = ".22"
nodesep = "0.1"
else:
ranksep = ".05"
nodesep = "0.09"
tmp = tempfile.gettempdir()
# tmp = "/tmp"
shadow_tree = ShadowDecTree(tree_model, X_train, y_train,
feature_names=feature_names, class_names=class_names)
if X is not None:
pred, path = shadow_tree.predict(X)
highlight_path = [n.id for n in path]
n_classes = shadow_tree.nclasses()
color_values = color_blind_friendly_colors[n_classes]
# Fix the mapping from target value to color for entire tree
colors = None
if shadow_tree.isclassifier():
class_values = shadow_tree.unique_target_values
colors = {v:color_values[i] for i,v in enumerate(class_values)}
y_range = (min(y_train)*1.03, max(y_train)*1.03) # same y axis for all
if shadow_tree.isclassifier():
# draw_legend_boxes(shadow_tree, f"{tmp}/legend")
draw_legend(shadow_tree, target_name, f"{tmp}/legend_{getpid()}.svg")
if isinstance(X_train,pd.DataFrame):
X_train = X_train.values
if isinstance(y_train,pd.Series):
y_train = y_train.values
# Find max height (count) for any bar in any node
if shadow_tree.isclassifier():
nbins = get_num_bins(histtype, n_classes)
node_heights = shadow_tree.get_split_node_heights(X_train, y_train, nbins=nbins)
internal = []
for node in shadow_tree.internal:
if fancy:
if shadow_tree.isclassifier():
class_split_viz(node, X_train, y_train,
filename=f"{tmp}/node{node.id}_{getpid()}.svg",
precision=precision,
colors=colors,
histtype=histtype,
node_heights=node_heights,
X = X,
highlight_node=node.id in highlight_path)
else:
regr_split_viz(node, X_train, y_train,
filename=f"{tmp}/node{node.id}_{getpid()}.svg",
target_name=target_name,
y_range=y_range,
precision=precision,
X=X,
highlight_node=node.id in highlight_path)
nname = node_name(node)
gr_node = split_node(node.feature_name(), nname, split=myround(node.split(), precision))
internal.append(gr_node)
leaves = []
for node in shadow_tree.leaves:
if shadow_tree.isclassifier():
class_leaf_viz(node, colors=color_values,
filename=f"{tmp}/leaf{node.id}_{getpid()}.svg")
leaves.append( class_leaf_node(node) )
else:
# for now, always gen leaf
regr_leaf_viz(node, y_train, target_name=target_name,
filename=f"{tmp}/leaf{node.id}_{getpid()}.svg",
y_range=y_range, precision=precision)
leaves.append( regr_leaf_node(node) )
show_edge_labels = False
all_llabel = '<' if show_edge_labels else ''
all_rlabel = '≥' if show_edge_labels else ''
root_llabel = '<' if show_root_edge_labels else ''
root_rlabel = '≥' if show_root_edge_labels else ''
edges = []
# non leaf edges with > and <=
for node in shadow_tree.internal:
nname = node_name(node)
if node.left.isleaf():
left_node_name ='leaf%d' % node.left.id
else:
left_node_name = node_name(node.left)
if node.right.isleaf():
right_node_name ='leaf%d' % node.right.id
else:
right_node_name = node_name(node.right)
llabel = all_llabel
rlabel = all_rlabel
if node==shadow_tree.root:
llabel = root_llabel
rlabel = root_rlabel
lcolor = rcolor = GREY
lpw = rpw = "0.3"
if node.left.id in highlight_path:
lcolor = HIGHLIGHT_COLOR
lpw = "1.2"
if node.right.id in highlight_path:
rcolor = HIGHLIGHT_COLOR
rpw = "1.2"
edges.append( f'{nname} -> {left_node_name} [penwidth={lpw} color="{lcolor}" label=<{llabel}>]' )
edges.append( f'{nname} -> {right_node_name} [penwidth={rpw} color="{rcolor}" label=<{rlabel}>]' )
edges.append(f"""
{{
rank=same;
{left_node_name} -> {right_node_name} [style=invis]
}}
""")
newline = "\n\t"
dot = f"""
digraph G {{
splines=line;
nodesep={nodesep};
ranksep={ranksep};
rankdir={orientation};
margin=0.0;
node [margin="0.03" penwidth="0.5" width=.1, height=.1];
edge [arrowsize=.4 penwidth="0.3"]
{newline.join(internal)}
{newline.join(edges)}
{newline.join(leaves)}
{class_legend_gr()}
{instance_gr()}
}}
"""
return DTreeViz(dot)
def class_split_viz(node: ShadowDecTreeNode,
X_train: np.ndarray,
y_train: np.ndarray,
colors: Mapping[int, str],
node_heights,
filename: str = None,
ticks_fontsize: int = 8,
label_fontsize: int = 9,
precision=1,
histtype: ('bar', 'barstacked', 'strip') = 'barstacked',
X : np.array = None,
highlight_node : bool = False
):
height_range = (.5, 1.5)
h = prop_size(n=node_heights[node.id], counts=node_heights.values(), output_range=height_range)
figsize=(3.3, h)
fig, ax = plt.subplots(1, 1, figsize=figsize)
feature_name = node.feature_name()
# Get X, y data for all samples associated with this node.
X_feature = X_train[:, node.feature()]
X_feature, y_train = X_feature[node.samples()], y_train[node.samples()]
n_classes = node.shadow_tree.nclasses()
nbins = get_num_bins(histtype, n_classes)
overall_feature_range = (np.min(X_train[:, node.feature()]), np.max(X_train[:, node.feature()]))
overall_feature_range_wide = (overall_feature_range[0]-overall_feature_range[0]*.08,
overall_feature_range[1]+overall_feature_range[1]*.05)
ax.set_xlabel(f"{feature_name}", fontsize=label_fontsize, fontname="Arial",
color=GREY)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_linewidth(.3)
ax.spines['bottom'].set_linewidth(.3)
class_names = node.shadow_tree.class_names
r = overall_feature_range[1]-overall_feature_range[0]
class_values = node.shadow_tree.unique_target_values
X_hist = [X_feature[y_train == cl] for cl in class_values]
if histtype=='strip':
ax.yaxis.set_visible(False)
ax.spines['left'].set_visible(False)
sigma = .013
mu = .05
class_step = .08
dot_w = 20
ax.set_ylim(0, mu + n_classes * class_step)
for i, bucket in enumerate(X_hist):
alpha = .6 if len(bucket) > 10 else 1
y_noise = np.random.normal(mu + i * class_step, sigma, size=len(bucket))
ax.scatter(bucket, y_noise, alpha=alpha, marker='o', s=dot_w, c=colors[i],
edgecolors=GREY, lw=.3)
else:
X_colors = [colors[cl] for cl in class_values]
binwidth = r / nbins
hist, bins, barcontainers = ax.hist(X_hist,
color=X_colors,
align='mid',
histtype=histtype,
bins=np.arange(overall_feature_range[0],overall_feature_range[1] + binwidth, binwidth),
label=class_names)
# Alter appearance of each bar
for patch in barcontainers:
for rect in patch.patches:
rect.set_linewidth(.5)
rect.set_edgecolor(GREY)
ax.set_yticks([0,max([max(h) for h in hist])])
ax.set_xlim(*overall_feature_range_wide)
ax.set_xticks(overall_feature_range)
ax.tick_params(axis='both', which='major', width=.3, labelcolor=GREY, labelsize=ticks_fontsize)
def wedge(ax,x,color):
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
xr = xmax - xmin
yr = ymax - ymin
hr = h / (height_range[1] - height_range[0])
th = yr * .15 * 1 / hr # convert to graph coordinates (ugh)
tw = xr * .018
tipy = -0.1 * yr * .15 * 1 / hr
tria = np.array(
[[x, tipy], [x - tw, -th], [x + tw, -th]])
t = patches.Polygon(tria, facecolor=color)
t.set_clip_on(False)
ax.add_patch(t)
ax.text(node.split(), -2 * th,
f"{myround(node.split(),precision)}",
horizontalalignment='center',
fontsize=ticks_fontsize, color=GREY)
wedge(ax, node.split(), color=WEDGE_COLOR)
if highlight_node:
wedge(ax, X[node.feature()], color=HIGHLIGHT_COLOR)
if filename is not None:
plt.savefig(filename, bbox_inches='tight', pad_inches=0)
plt.close()
def class_leaf_viz(node : ShadowDecTreeNode,
colors : List[str],
filename: str):
size = prop_size(node.nsamples(), counts=node.shadow_tree.leaf_sample_counts(),
output_range=(1.01, 1.5))
# we visually need n=1 and n=9 to appear different but diff between 300 and 400 is no big deal
size = np.sqrt(np.log(size))
counts = node.class_counts()
draw_piechart(counts, size=size, colors=colors, filename=filename, label=f"n={node.nsamples()}")
def regr_split_viz(node: ShadowDecTreeNode,
X_train: np.ndarray,
y_train: np.ndarray,
target_name: str,
filename: str = None,
y_range=None,
ticks_fontsize: int = 8,
label_fontsize: int = 9,
precision=1,
X : np.array = None,
highlight_node : bool = False):
figsize = (2.5, 1.1)
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.tick_params(colors=GREY)
feature_name = node.feature_name()
ax.set_xlabel(f"{feature_name}", fontsize=label_fontsize, fontname="Arial", color=GREY)
ax.set_ylim(y_range)
if node==node.shadow_tree.root:
ax.set_ylabel(target_name, fontsize=label_fontsize, fontname="Arial", color=GREY)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_linewidth(.3)
ax.spines['bottom'].set_linewidth(.3)
ax.tick_params(axis='both', which='major', width=.3, labelcolor=GREY, labelsize=ticks_fontsize)
# Get X, y data for all samples associated with this node.
X_feature = X_train[:,node.feature()]
X_feature, y_train = X_feature[node.samples()], y_train[node.samples()]
overall_feature_range = (np.min(X_train[:,node.feature()]), np.max(X_train[:,node.feature()]))
ax.set_xlim(*overall_feature_range)
xmin, xmax = overall_feature_range
xr = xmax - xmin
xticks = list(overall_feature_range)
if node.split()>xmin+.10*xr and node.split()<xmax-.1*xr: # don't show split if too close to axis ends
xticks += [node.split()]
ax.set_xticks(xticks)
ax.scatter(X_feature, y_train, s=5, c=BLUE, alpha=.4, lw=.3)
left, right = node.split_samples()
left = y_train[left]
right = y_train[right]
split = node.split()
ax.plot([overall_feature_range[0],split],[np.mean(left),np.mean(left)],'--', color='k', linewidth=1)
ax.plot([split,split],[*y_range],'--', color='k', linewidth=1)
ax.plot([split,overall_feature_range[1]],[np.mean(right),np.mean(right)],'--', color='k', linewidth=1)
def wedge(ax,x,color):
ymin, ymax = ax.get_ylim()
xr = xmax - xmin
yr = ymax - ymin
hr = figsize[1]
th = yr * .1
tw = xr * .018
tipy = ymin
tria = np.array([[x, tipy], [x - tw, ymin-th], [x + tw, ymin-th]])
t = patches.Polygon(tria, facecolor=color)
t.set_clip_on(False)
ax.add_patch(t)
wedge(ax, node.split(), color=WEDGE_COLOR)
if highlight_node:
wedge(ax, X[node.feature()], color=HIGHLIGHT_COLOR)
plt.tight_layout()
if filename is not None:
plt.savefig(filename, bbox_inches='tight', pad_inches=0)
plt.close()
def regr_leaf_viz(node : ShadowDecTreeNode,
y : (pd.Series,np.ndarray),
target_name,
filename:str=None,
y_range=None,
precision=1,
label_fontsize: int = 9,
ticks_fontsize: int = 8):
samples = node.samples()
y = y[samples]
figsize = (.75, .8)
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.tick_params(colors=GREY)
m = np.mean(y)
ax.set_ylim(y_range)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_linewidth(.3)
ax.set_xticks([])
# ax.set_yticks(y_range)
ticklabelpad = plt.rcParams['xtick.major.pad']
ax.annotate(f"{target_name}={myround(m,precision)}\nn={len(y)}",
xy=(.5, 0), xytext=(.5, -.5*ticklabelpad), ha='center', va='top',
xycoords='axes fraction', textcoords='offset points',
fontsize = label_fontsize, fontname = "Arial", color = GREY)
ax.tick_params(axis='y', which='major', width=.3, labelcolor=GREY, labelsize=ticks_fontsize)
mu = .5
sigma = .08
X = np.random.normal(mu, sigma, size=len(y))
ax.set_xlim(0, 1)
alpha = .25
ax.scatter(X, y, s=5, c='#225ea8', alpha=alpha, lw=.3)
ax.plot([0,len(node.samples())],[m,m],'--', color=GREY, linewidth=1)
plt.tight_layout()
if filename is not None:
plt.savefig(filename, bbox_inches='tight', pad_inches=0)
plt.close()
def draw_legend(shadow_tree, target_name, filename):
n_classes = shadow_tree.nclasses()
class_values = shadow_tree.unique_target_values
class_names = shadow_tree.class_names
color_values = color_blind_friendly_colors[n_classes]
colors = {v:color_values[i] for i,v in enumerate(class_values)}
boxes = []
for i, c in enumerate(class_values):
box = patches.Rectangle((0, 0), 20, 10, linewidth=.4, edgecolor=GREY,
facecolor=colors[c], label=class_names[c])
boxes.append(box)
fig, ax = plt.subplots(1, 1, figsize=(1,1))
leg = ax.legend(handles=boxes,
frameon=True,
shadow=False,
fancybox=True,
loc='center',
title=target_name,
handletextpad=.35,
borderpad=.8,
edgecolor=GREY)
leg.get_frame().set_linewidth(.5)
leg.get_title().set_color(GREY)
leg.get_title().set_fontsize(10)
leg.get_title().set_fontweight('bold')
for text in leg.get_texts():
text.set_color(GREY)
text.set_fontsize(10)
ax.set_xlim(0,20)
ax.set_ylim(0,10)
ax.axis('off')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
if filename is not None:
plt.savefig(filename, bbox_inches='tight', pad_inches=0)
plt.close()
def draw_piechart(counts,size,colors,filename,label=None):
n_nonzero = np.count_nonzero(counts)
i = np.nonzero(counts)[0][0]
if n_nonzero==1:
counts = [counts[i]]
colors = [colors[i]]
tweak = size * .01
fig, ax = plt.subplots(1, 1, figsize=(size, size))
ax.axis('equal')
# ax.set_xlim(0 - tweak, size + tweak)
# ax.set_ylim(0 - tweak, size + tweak)
ax.set_xlim(0, size-10*tweak)
ax.set_ylim(0, size-10*tweak)
# frame=True needed for some reason to fit pie properly (ugh)
# had to tweak the crap out of this to get tight box around piechart :(
wedges, _ = ax.pie(counts, center=(size/2-6*tweak,size/2-6*tweak), radius=size/2, colors=colors, shadow=False, frame=True)
for w in wedges:
w.set_linewidth(.5)
w.set_edgecolor(GREY)
ax.axis('off')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
if label is not None:
ax.text(size/2-6*tweak, -10*tweak, label,
horizontalalignment='center',
verticalalignment='top',
fontsize=9, color=GREY, fontname="Arial")
# plt.tight_layout()
plt.savefig(filename, bbox_inches='tight', pad_inches=0)
plt.close()
def prop_size(n, counts, output_range = (0.00, 0.3)):
min_samples = min(counts)
max_samples = max(counts)
sample_count_range = max_samples - min_samples
if sample_count_range>0:
zero_to_one = (n - min_samples) / sample_count_range
return zero_to_one * (output_range[1] - output_range[0]) + output_range[0]
else:
return output_range[0]
def get_num_bins(histtype, n_classes):
bins = NUM_BINS[n_classes]
if histtype == 'barstacked':
bins *= 2
return bins
global dot_already_tested
if dot_already_tested: return
dot_already_tested = True
tmp = tempfile.gettempdir()
dotfilename = f"{tmp}/testing_svg_{getpid()}.dot"
with open(dotfilename, "w") as f:
f.write("digraph G { A -> B }\n")
svgfilename = f"{tmp}/testing_svg_{getpid()}.svg"
cmd = ["dot", "-Tsvg:cairo", "-o", svgfilename, dotfilename]
print(' '.join(cmd))
ok = True
try:
os.execlp("dot", "dot", "-Tsvg:cairo", "-o", svgfilename, dotfilename)
# run(cmd, capture_output=False, check=False, quiet=True)
except:
ok = False
return ok
|
|
from __future__ import unicode_literals
from ..predicate import PredicateRegistry, match_instance, match_key
from ..cache import DictCachingKeyLookup, LruCachingKeyLookup
from ..error import RegistrationError
from ..dispatch import dispatch
import pytest
def register_value(generic, key, value):
"""Low-level function that directly uses the internal registry of the
generic function to register an implementation.
"""
generic.register.__self__.registry.register(key, value)
def test_registry():
class Foo(object):
pass
class FooSub(Foo):
pass
@dispatch()
def view(self, request):
raise NotImplementedError()
def get_model(self, request):
return self
def get_name(self, request):
return request.name
def get_request_method(self, request):
return request.request_method
def model_fallback(self, request):
return "Model fallback"
def name_fallback(self, request):
return "Name fallback"
def request_method_fallback(self, request):
return "Request method fallback"
view.add_predicates([
match_instance('model', get_model, model_fallback),
match_key('name', get_name, name_fallback),
match_key('request_method', get_request_method,
request_method_fallback)])
def foo_default(self, request):
return "foo default"
def foo_post(self, request):
return "foo default post"
def foo_edit(self, request):
return "foo edit"
register_value(view, (Foo, '', 'GET'), foo_default)
register_value(view, (Foo, '', 'POST'), foo_post)
register_value(view, (Foo, 'edit', 'POST'), foo_edit)
key_lookup = view.key_lookup
assert key_lookup.component((Foo, '', 'GET')) is foo_default
assert key_lookup.component((Foo, '', 'POST')) is foo_post
assert key_lookup.component((Foo, 'edit', 'POST')) is foo_edit
assert key_lookup.component((FooSub, '', 'GET')) is foo_default
assert key_lookup.component((FooSub, '', 'POST')) is foo_post
class Request(object):
def __init__(self, name, request_method):
self.name = name
self.request_method = request_method
assert view(
Foo(), Request('', 'GET')) == 'foo default'
assert view(
FooSub(), Request('', 'GET')) == 'foo default'
assert view(
FooSub(), Request('edit', 'POST')) == 'foo edit'
class Bar(object):
pass
assert view(
Bar(), Request('', 'GET')) == 'Model fallback'
assert view(
Foo(), Request('dummy', 'GET')) == 'Name fallback'
assert view(
Foo(), Request('', 'PUT')) == 'Request method fallback'
assert view(
FooSub(), Request('dummy', 'GET')) == 'Name fallback'
def test_predicate_registry_class_lookup():
reg = PredicateRegistry(match_instance('obj'))
class Document(object):
pass
class SpecialDocument(Document):
pass
reg.register((Document,), 'document line count')
reg.register((SpecialDocument,),
'special document line count')
assert (reg.component((Document,)) ==
'document line count')
assert (reg.component((SpecialDocument,)) ==
'special document line count')
class AnotherDocument(Document):
pass
assert (reg.component((AnotherDocument,)) ==
'document line count')
class Other(object):
pass
assert reg.component((Other,)) is None
def test_predicate_registry_target_find_specific():
reg = PredicateRegistry(match_instance('obj'))
reg2 = PredicateRegistry(match_instance('obj'))
class Document(object):
pass
class SpecialDocument(Document):
pass
def linecount(obj):
pass
def special_linecount(obj):
pass
reg.register((Document,), 'line count')
reg2.register((Document,), 'special line count')
assert reg.component((Document,)) == 'line count'
assert (reg2.component((Document,)) ==
'special line count')
assert reg.component((SpecialDocument,)) == 'line count'
assert (reg2.component((SpecialDocument,)) ==
'special line count')
def test_registry_no_sources():
reg = PredicateRegistry()
class Animal(object):
pass
reg.register((), 'elephant')
assert reg.component(()) == 'elephant'
def test_register_twice_with_predicate():
reg = PredicateRegistry(match_instance('obj'))
class Document(object):
pass
reg.register((Document,), 'document line count')
with pytest.raises(RegistrationError):
reg.register((Document,), 'another line count')
def test_register_twice_without_predicates():
reg = PredicateRegistry()
reg.register((), 'once')
with pytest.raises(RegistrationError):
reg.register((), 'twice')
def test_dict_caching_registry():
class Foo(object):
pass
class FooSub(Foo):
pass
def get_model(self, request):
return self
def get_name(self, request):
return request.name
def get_request_method(self, request):
return request.request_method
def model_fallback(self, request):
return "Model fallback"
def name_fallback(self, request):
return "Name fallback"
def request_method_fallback(self, request):
return "Request method fallback"
def get_caching_key_lookup(r):
return DictCachingKeyLookup(r)
@dispatch(
match_instance('model', get_model, model_fallback),
match_key('name', get_name, name_fallback),
match_key('request_method', get_request_method,
request_method_fallback),
get_key_lookup=get_caching_key_lookup)
def view(self, request):
raise NotImplementedError()
def foo_default(self, request):
return "foo default"
def foo_post(self, request):
return "foo default post"
def foo_edit(self, request):
return "foo edit"
register_value(view, (Foo, '', 'GET'), foo_default)
register_value(view, (Foo, '', 'POST'), foo_post)
register_value(view, (Foo, 'edit', 'POST'), foo_edit)
class Request(object):
def __init__(self, name, request_method):
self.name = name
self.request_method = request_method
assert view(Foo(), Request('', 'GET')) == 'foo default'
assert view(
FooSub(), Request('', 'GET')) == 'foo default'
assert view(
FooSub(), Request('edit', 'POST')) == 'foo edit'
assert view.by_predicates(
model=Foo, name='', request_method='GET').key == (Foo, '', 'GET')
# use a bit of inside knowledge to check the cache is filled
assert view.key_lookup.component.__self__.get(
(Foo, '', 'GET')) is not None
assert view.key_lookup.component.__self__.get(
(FooSub, '', 'GET')) is not None
assert view.key_lookup.component.__self__.get(
(FooSub, 'edit', 'POST')) is not None
# now let's do this again. this time things come from the component cache
assert view(Foo(), Request('', 'GET')) == 'foo default'
assert view(FooSub(), Request('', 'GET')) == 'foo default'
assert view(FooSub(), Request('edit', 'POST')) == 'foo edit'
key_lookup = view.key_lookup
# prime and check the all cache
assert view.by_args(Foo(), Request('', 'GET')).all_matches == [foo_default]
assert key_lookup.all.__self__.get((Foo, '', 'GET')) is not None
# should be coming from cache now
assert view.by_args(Foo(), Request('', 'GET')).all_matches == [foo_default]
class Bar(object):
pass
assert view(Bar(), Request('', 'GET')) == 'Model fallback'
assert view(Foo(), Request('dummy', 'GET')) == 'Name fallback'
assert view(Foo(), Request('', 'PUT')) == 'Request method fallback'
assert view(FooSub(), Request('dummy', 'GET')) == 'Name fallback'
# fallbacks get cached too
assert key_lookup.fallback.__self__.get((Bar, '', 'GET')) is model_fallback
# these come from the fallback cache now
assert view(Bar(), Request('', 'GET')) == 'Model fallback'
assert view(Foo(), Request('dummy', 'GET')) == 'Name fallback'
assert view(Foo(), Request('', 'PUT')) == 'Request method fallback'
assert view(FooSub(), Request('dummy', 'GET')) == 'Name fallback'
def test_lru_caching_registry():
class Foo(object):
pass
class FooSub(Foo):
pass
def get_model(self, request):
return self
def get_name(self, request):
return request.name
def get_request_method(self, request):
return request.request_method
def model_fallback(self, request):
return "Model fallback"
def name_fallback(self, request):
return "Name fallback"
def request_method_fallback(self, request):
return "Request method fallback"
def get_caching_key_lookup(r):
return LruCachingKeyLookup(r, 100, 100, 100)
@dispatch(
match_instance('model', get_model, model_fallback),
match_key('name', get_name, name_fallback),
match_key('request_method', get_request_method,
request_method_fallback),
get_key_lookup=get_caching_key_lookup)
def view(self, request):
raise NotImplementedError()
def foo_default(self, request):
return "foo default"
def foo_post(self, request):
return "foo default post"
def foo_edit(self, request):
return "foo edit"
register_value(view, (Foo, '', 'GET'), foo_default)
register_value(view, (Foo, '', 'POST'), foo_post)
register_value(view, (Foo, 'edit', 'POST'), foo_edit)
class Request(object):
def __init__(self, name, request_method):
self.name = name
self.request_method = request_method
assert view(Foo(), Request('', 'GET')) == 'foo default'
assert view(
FooSub(), Request('', 'GET')) == 'foo default'
assert view(
FooSub(), Request('edit', 'POST')) == 'foo edit'
assert view.by_predicates(
model=Foo, name='', request_method='GET').key == (Foo, '', 'GET')
# use a bit of inside knowledge to check the cache is filled
component_cache = view.key_lookup.component.__closure__[0].cell_contents
assert component_cache.get(((Foo, '', 'GET'),)) is not None
assert component_cache.get(((FooSub, '', 'GET'),)) is not None
assert component_cache.get(((FooSub, 'edit', 'POST'),)) is not None
# now let's do this again. this time things come from the component cache
assert view(Foo(), Request('', 'GET')) == 'foo default'
assert view(FooSub(), Request('', 'GET')) == 'foo default'
assert view(FooSub(), Request('edit', 'POST')) == 'foo edit'
all_cache = view.key_lookup.all.__closure__[0].cell_contents
# prime and check the all cache
assert view.by_args(Foo(), Request('', 'GET')).all_matches == [foo_default]
assert all_cache.get(((Foo, '', 'GET'),)) is not None
# should be coming from cache now
assert view.by_args(Foo(), Request('', 'GET')).all_matches == [foo_default]
class Bar(object):
pass
assert view(Bar(), Request('', 'GET')) == 'Model fallback'
assert view(Foo(), Request('dummy', 'GET')) == 'Name fallback'
assert view(Foo(), Request('', 'PUT')) == 'Request method fallback'
assert view(FooSub(), Request('dummy', 'GET')) == 'Name fallback'
# fallbacks get cached too
fallback_cache = view.key_lookup.fallback.__closure__[0].cell_contents
assert fallback_cache.get(((Bar, '', 'GET'),)) is model_fallback
# these come from the fallback cache now
assert view(Bar(), Request('', 'GET')) == 'Model fallback'
assert view(Foo(), Request('dummy', 'GET')) == 'Name fallback'
assert view(Foo(), Request('', 'PUT')) == 'Request method fallback'
assert view(FooSub(), Request('dummy', 'GET')) == 'Name fallback'
|
|
import os
import warnings
from django.conf import settings, global_settings
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpRequest
from django.test import SimpleTestCase, TransactionTestCase, TestCase, signals
from django.test.utils import override_settings
from django.utils import unittest, six
@override_settings(TEST='override', TEST_OUTER='outer')
class FullyDecoratedTranTestCase(TransactionTestCase):
available_apps = []
def test_override(self):
self.assertEqual(settings.TEST, 'override')
self.assertEqual(settings.TEST_OUTER, 'outer')
@override_settings(TEST='override2')
def test_method_override(self):
self.assertEqual(settings.TEST, 'override2')
self.assertEqual(settings.TEST_OUTER, 'outer')
def test_decorated_testcase_name(self):
self.assertEqual(FullyDecoratedTranTestCase.__name__, 'FullyDecoratedTranTestCase')
def test_decorated_testcase_module(self):
self.assertEqual(FullyDecoratedTranTestCase.__module__, __name__)
@override_settings(TEST='override')
class FullyDecoratedTestCase(TestCase):
def test_override(self):
self.assertEqual(settings.TEST, 'override')
@override_settings(TEST='override2')
def test_method_override(self):
self.assertEqual(settings.TEST, 'override2')
class ClassDecoratedTestCaseSuper(TestCase):
"""
Dummy class for testing max recursion error in child class call to
super(). Refs #17011.
"""
def test_max_recursion_error(self):
pass
@override_settings(TEST='override')
class ClassDecoratedTestCase(ClassDecoratedTestCaseSuper):
def test_override(self):
self.assertEqual(settings.TEST, 'override')
@override_settings(TEST='override2')
def test_method_override(self):
self.assertEqual(settings.TEST, 'override2')
def test_max_recursion_error(self):
"""
Overriding a method on a super class and then calling that method on
the super class should not trigger infinite recursion. See #17011.
"""
try:
super(ClassDecoratedTestCase, self).test_max_recursion_error()
except RuntimeError:
self.fail()
class SettingsTests(TestCase):
def setUp(self):
self.testvalue = None
signals.setting_changed.connect(self.signal_callback)
def tearDown(self):
signals.setting_changed.disconnect(self.signal_callback)
def signal_callback(self, sender, setting, value, **kwargs):
if setting == 'TEST':
self.testvalue = value
def test_override(self):
settings.TEST = 'test'
self.assertEqual('test', settings.TEST)
with self.settings(TEST='override'):
self.assertEqual('override', settings.TEST)
self.assertEqual('test', settings.TEST)
del settings.TEST
def test_override_change(self):
settings.TEST = 'test'
self.assertEqual('test', settings.TEST)
with self.settings(TEST='override'):
self.assertEqual('override', settings.TEST)
settings.TEST = 'test2'
self.assertEqual('test', settings.TEST)
del settings.TEST
def test_override_doesnt_leak(self):
self.assertRaises(AttributeError, getattr, settings, 'TEST')
with self.settings(TEST='override'):
self.assertEqual('override', settings.TEST)
settings.TEST = 'test'
self.assertRaises(AttributeError, getattr, settings, 'TEST')
@override_settings(TEST='override')
def test_decorator(self):
self.assertEqual('override', settings.TEST)
def test_context_manager(self):
self.assertRaises(AttributeError, getattr, settings, 'TEST')
override = override_settings(TEST='override')
self.assertRaises(AttributeError, getattr, settings, 'TEST')
override.enable()
self.assertEqual('override', settings.TEST)
override.disable()
self.assertRaises(AttributeError, getattr, settings, 'TEST')
def test_class_decorator(self):
# SimpleTestCase can be decorated by override_settings, but not ut.TestCase
class SimpleTestCaseSubclass(SimpleTestCase):
pass
class UnittestTestCaseSubclass(unittest.TestCase):
pass
decorated = override_settings(TEST='override')(SimpleTestCaseSubclass)
self.assertIsInstance(decorated, type)
self.assertTrue(issubclass(decorated, SimpleTestCase))
with six.assertRaisesRegex(self, Exception,
"Only subclasses of Django SimpleTestCase*"):
decorated = override_settings(TEST='override')(UnittestTestCaseSubclass)
def test_signal_callback_context_manager(self):
self.assertRaises(AttributeError, getattr, settings, 'TEST')
with self.settings(TEST='override'):
self.assertEqual(self.testvalue, 'override')
self.assertEqual(self.testvalue, None)
@override_settings(TEST='override')
def test_signal_callback_decorator(self):
self.assertEqual(self.testvalue, 'override')
#
# Regression tests for #10130: deleting settings.
#
def test_settings_delete(self):
settings.TEST = 'test'
self.assertEqual('test', settings.TEST)
del settings.TEST
self.assertRaises(AttributeError, getattr, settings, 'TEST')
def test_settings_delete_wrapped(self):
self.assertRaises(TypeError, delattr, settings, '_wrapped')
def test_override_settings_delete(self):
"""
Allow deletion of a setting in an overriden settings set (#18824)
"""
previous_i18n = settings.USE_I18N
with self.settings(USE_I18N=False):
del settings.USE_I18N
self.assertRaises(AttributeError, getattr, settings, 'USE_I18N')
self.assertEqual(settings.USE_I18N, previous_i18n)
def test_override_settings_nested(self):
"""
Test that override_settings uses the actual _wrapped attribute at
runtime, not when it was instantiated.
"""
self.assertRaises(AttributeError, getattr, settings, 'TEST')
self.assertRaises(AttributeError, getattr, settings, 'TEST2')
inner = override_settings(TEST2='override')
with override_settings(TEST='override'):
self.assertEqual('override', settings.TEST)
with inner:
self.assertEqual('override', settings.TEST)
self.assertEqual('override', settings.TEST2)
# inner's __exit__ should have restored the settings of the outer
# context manager, not those when the class was instantiated
self.assertEqual('override', settings.TEST)
self.assertRaises(AttributeError, getattr, settings, 'TEST2')
self.assertRaises(AttributeError, getattr, settings, 'TEST')
self.assertRaises(AttributeError, getattr, settings, 'TEST2')
def test_allowed_include_roots_string(self):
"""
ALLOWED_INCLUDE_ROOTS is not allowed to be incorrectly set to a string
rather than a tuple.
"""
self.assertRaises(ValueError, setattr, settings,
'ALLOWED_INCLUDE_ROOTS', '/var/www/ssi/')
class TrailingSlashURLTests(TestCase):
"""
Tests for the MEDIA_URL and STATIC_URL settings.
They must end with a slash to ensure there's a deterministic way to build
paths in templates.
"""
settings_module = settings
def setUp(self):
self._original_media_url = self.settings_module.MEDIA_URL
self._original_static_url = self.settings_module.STATIC_URL
def tearDown(self):
self.settings_module.MEDIA_URL = self._original_media_url
self.settings_module.STATIC_URL = self._original_static_url
def test_blank(self):
"""
The empty string is accepted, even though it doesn't end in a slash.
"""
self.settings_module.MEDIA_URL = ''
self.assertEqual('', self.settings_module.MEDIA_URL)
self.settings_module.STATIC_URL = ''
self.assertEqual('', self.settings_module.STATIC_URL)
def test_end_slash(self):
"""
It works if the value ends in a slash.
"""
self.settings_module.MEDIA_URL = '/foo/'
self.assertEqual('/foo/', self.settings_module.MEDIA_URL)
self.settings_module.MEDIA_URL = 'http://media.foo.com/'
self.assertEqual('http://media.foo.com/',
self.settings_module.MEDIA_URL)
self.settings_module.STATIC_URL = '/foo/'
self.assertEqual('/foo/', self.settings_module.STATIC_URL)
self.settings_module.STATIC_URL = 'http://static.foo.com/'
self.assertEqual('http://static.foo.com/',
self.settings_module.STATIC_URL)
def test_no_end_slash(self):
"""
An ImproperlyConfigured exception is raised if the value doesn't end
in a slash.
"""
with self.assertRaises(ImproperlyConfigured):
self.settings_module.MEDIA_URL = '/foo'
with self.assertRaises(ImproperlyConfigured):
self.settings_module.MEDIA_URL = 'http://media.foo.com'
with self.assertRaises(ImproperlyConfigured):
self.settings_module.STATIC_URL = '/foo'
with self.assertRaises(ImproperlyConfigured):
self.settings_module.STATIC_URL = 'http://static.foo.com'
def test_double_slash(self):
"""
If the value ends in more than one slash, presume they know what
they're doing.
"""
self.settings_module.MEDIA_URL = '/stupid//'
self.assertEqual('/stupid//', self.settings_module.MEDIA_URL)
self.settings_module.MEDIA_URL = 'http://media.foo.com/stupid//'
self.assertEqual('http://media.foo.com/stupid//',
self.settings_module.MEDIA_URL)
self.settings_module.STATIC_URL = '/stupid//'
self.assertEqual('/stupid//', self.settings_module.STATIC_URL)
self.settings_module.STATIC_URL = 'http://static.foo.com/stupid//'
self.assertEqual('http://static.foo.com/stupid//',
self.settings_module.STATIC_URL)
class SecureProxySslHeaderTest(TestCase):
settings_module = settings
def setUp(self):
self._original_setting = self.settings_module.SECURE_PROXY_SSL_HEADER
def tearDown(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = self._original_setting
def test_none(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = None
req = HttpRequest()
self.assertEqual(req.is_secure(), False)
def test_set_without_xheader(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
req = HttpRequest()
self.assertEqual(req.is_secure(), False)
def test_set_with_xheader_wrong(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
req = HttpRequest()
req.META['HTTP_X_FORWARDED_PROTOCOL'] = 'wrongvalue'
self.assertEqual(req.is_secure(), False)
def test_set_with_xheader_right(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
req = HttpRequest()
req.META['HTTP_X_FORWARDED_PROTOCOL'] = 'https'
self.assertEqual(req.is_secure(), True)
|
|
# -*- coding: UTF-8 -*-
# Copyright 2009-2020 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
"""Views for `lino.modlib.bootstrap3`.
"""
from past.utils import old_div
from django import http
from django.conf import settings
from django.views.generic import View
from django.core import exceptions
from django.utils.translation import gettext as _
from django.utils.translation import get_language
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import ensure_csrf_cookie, csrf_protect
# from django.contrib import auth
from lino.core import auth
# from lino.api import dd
from lino.core import constants
# from lino.core import auth
from lino.core.requests import BaseRequest
from lino.core.tablerequest import TableRequest
from lino.core.views import action_request
from lino.core.utils import navinfo
from etgen.html import E, tostring
from etgen import html as xghtml
PLAIN_PAGE_LENGTH = 15
MENUS = dict()
def http_response(ar, tplname, context):
"Deserves a docstring"
u = ar.get_user()
lang = get_language()
k = (u.user_type, lang)
menu = MENUS.get(k, None)
if menu is None:
menu = settings.SITE.get_site_menu(u.user_type)
bs3 = settings.SITE.plugins.bootstrap3
if False: # 20150803 home button now in base.html
assert bs3.renderer is not None
url = bs3.build_plain_url()
menu.add_url_button(url, label=_("Home"))
e = bs3.renderer.show_menu(ar, menu)
menu = tostring(e)
MENUS[k] = menu
context.update(menu=menu)
context = ar.get_printable_context(**context)
context['ar'] = ar
context['memo'] = ar.parse_memo # MEMO_PARSER.parse
env = settings.SITE.plugins.jinja.renderer.jinja_env
template = env.get_template(tplname)
response = http.HttpResponse(
template.render(**context),
content_type='text/html;charset="utf-8"')
return response
def buttons2pager(buttons, title=None):
items = []
if title:
items.append(E.li(E.span(title)))
for symbol, label, url in buttons:
if url is None:
items.append(E.li(E.span(symbol), **{'class':"disabled"}))
else:
items.append(E.li(E.a(symbol, href=url)))
# Bootstrap version 2.x
# return E.div(E.ul(*items), class_='pagination')
return E.ul(*items, **{'class':'pagination pagination-sm'})
def table2html(ar, as_main=True):
"""Represent the given table request as an HTML table.
`ar` is the request to be rendered, an instance of
:class:`lino.core.tablerequest.TableRequest`.
The returned HTML enclosed in a ``<div>`` tag and generated using
:mod:`etgen.html`.
If `as_main` is True, include additional elements such as a paging
toolbar. (This argument is currently being ignored.)
"""
# as_main = True
t = xghtml.Table()
t.attrib.update(**{'class':"table table-striped table-hover"})
if ar.limit is None:
ar.limit = PLAIN_PAGE_LENGTH
pglen = ar.limit
if ar.offset is None:
page = 1
else:
"""
(assuming pglen is 5)
offset page
0 1
5 2
"""
page = int(old_div(ar.offset, pglen)) + 1
ar.dump2html(t, ar.sliced_data_iterator, header_links=as_main)
if not as_main:
url = ar.get_request_url() # open in own window
return E.div(
E.div(
E.div(
E.a(
E.span(**{'class':"glyphicon glyphicon-folder-open"}),
href=url, style="margin-left: 4px;",
**{'class':"btn btn-default pull-right"}),
E.h5(ar.get_title(), style="display: inline-block;"),
**{'class': "panel-title"}),
**{'class':"panel-heading"}),
t.as_element(),
style="display: inline-block;",
**{'class':"panel panel-default"})
buttons = []
kw = dict()
kw = {}
if pglen != PLAIN_PAGE_LENGTH:
kw[constants.URL_PARAM_LIMIT] = pglen
if page > 1:
kw[constants.URL_PARAM_START] = pglen * (page - 2)
prev_url = ar.get_request_url(**kw)
kw[constants.URL_PARAM_START] = 0
first_url = ar.get_request_url(**kw)
else:
prev_url = None
first_url = None
buttons.append(('<<', _("First page"), first_url))
buttons.append(('<', _("Previous page"), prev_url))
next_start = pglen * page
if next_start < ar.get_total_count():
kw[constants.URL_PARAM_START] = next_start
next_url = ar.get_request_url(**kw)
last_page = int(old_div((ar.get_total_count() - 1), pglen))
kw[constants.URL_PARAM_START] = pglen * last_page
last_url = ar.get_request_url(**kw)
else:
next_url = None
last_url = None
buttons.append(('>', _("Next page"), next_url))
buttons.append(('>>', _("Last page"), last_url))
return E.div(buttons2pager(buttons), t.as_element())
def layout2html(ar, elem):
wl = ar.bound_action.get_window_layout()
#~ print 20120901, wl.main
lh = wl.get_layout_handle(settings.SITE.kernel.default_ui)
items = list(lh.main.as_plain_html(ar, elem))
# if navigator:
# items.insert(0, navigator)
#~ print tostring(E.div())
#~ if len(items) == 0: return ""
return E.form(*items)
#~ print 20120901, lh.main.__html__(ar)
class List(View):
"""Render a list of records.
"""
def get(self, request, app_label=None, actor=None):
ar = action_request(app_label, actor, request, request.GET, True)
ar.renderer = settings.SITE.plugins.bootstrap3.renderer
context = dict(
title=ar.get_title(),
heading=ar.get_title(),
)
if isinstance(ar, TableRequest):
context.update(main=table2html(ar))
else:
context.update(main=layout2html(ar, None))
context.update(ar=ar)
return http_response(ar, ar.actor.list_html_template, context)
class Element(View):
"""Render a single record.
"""
def get(self, request, app_label=None, actor=None, pk=None):
# print(request, app_label, actor, pk)
ar = action_request(app_label, actor, request, request.GET, False)
ar.renderer = settings.SITE.plugins.bootstrap3.renderer
navigator = None
if pk and pk != '-99999' and pk != '-99998':
elem = ar.get_row_by_pk(pk)
if elem is None:
raise http.Http404("%s has no row with primary key %r" %
(ar.actor, pk))
#~ raise Exception("20120327 %s.get_row_by_pk(%r)" % (rpt,pk))
if ar.actor.show_detail_navigator:
ni = navinfo(ar.data_iterator, elem)
if ni:
# m = elem.__class__
buttons = []
#~ buttons.append( ('*',_("Home"), '/' ))
buttons.append(
('<<', _("First page"), ar.pk2url(ni['first'])))
buttons.append(
('<', _("Previous page"), ar.pk2url(ni['prev'])))
buttons.append(
('>', _("Next page"), ar.pk2url(ni['next'])))
buttons.append(
('>>', _("Last page"), ar.pk2url(ni['last'])))
navigator = buttons2pager(buttons)
else:
navigator = E.p("No navinfo")
else:
elem = None
# main = E.div(
# E.div(E.div(E.h5(ar.get_title(),
# style="display: inline-block;"),
# class_="panel-title"),
# class_="panel-heading"),
# E.div(layout2html(ar, elem),class_="panel-body"), # Content
# class_="panel panel-default",
# # style="display: inline-block;"
# )
main = layout2html(ar, elem)
# The `method="html"` argument isn't available in Python 2.6,
# only 2.7. It is useful to avoid side effects in case of
# empty elements: the default method (xml) writes an empty
# E.div() as "<div/>" while in HTML5 it must be "<div></div>"
# (and the ending / is ignored).
#~ return tostring(main, method="html")
#~ return tostring(main)
# return main
context = dict(
title=ar.get_action_title(),
obj=elem,
form=main,
navigator=navigator,
)
#~ template = web.jinja_env.get_template('detail.html')
context.update(ar=ar)
return http_response(ar, ar.actor.detail_html_template, context)
class Authenticate(View):
def get(self, request, *args, **kw):
action_name = request.GET.get(constants.URL_PARAM_ACTION_NAME)
if action_name == 'logout':
username = request.session.pop('username', None)
auth.logout(request)
# request.user = settings.SITE.user_model.get_anonymous_user()
# request.session.pop('password', None)
#~ username = request.session['username']
#~ del request.session['password']
target = '/'
return http.HttpResponseRedirect(target)
# ar = BaseRequest(request)
# ar.success("User %r logged out." % username)
# return ar.renderer.render_action_response(ar)
raise http.Http404()
def post(self, request, *args, **kw):
username = request.POST.get('username')
password = request.POST.get('password')
user = auth.authenticate(
request, username=username, password=password)
auth.login(request, user)
target = '/'
return http.HttpResponseRedirect(target)
# ar = BaseRequest(request)
# mw = auth.get_auth_middleware()
# msg = mw.authenticate(username, password, request)
# if msg:
# request.session.pop('username', None)
# ar.error(msg)
# else:
# request.session['username'] = username
# # request.session['password'] = password
# # ar.user = request....
# ar.success(("Now logged in as %r" % username))
# # print "20150428 Now logged in as %r (%s)" % (username, user)
# return ar.renderer.render_action_response(ar)
class Index(View):
"""
Render the main page.
"""
@method_decorator(ensure_csrf_cookie)
def get(self, request, *args, **kw):
# raise Exception("20171122 {} {}".format(
# get_language(), settings.MIDDLEWARE_CLASSES))
ui = settings.SITE.plugins.bootstrap3
# print("20170607", request.user)
# assert ui.renderer is not None
ar = BaseRequest(
# user=user,
request=request,
renderer=ui.renderer)
return index_response(ar)
def index_response(ar):
ui = settings.SITE.plugins.bootstrap3
main = settings.SITE.get_main_html(ar.request, extjs=ui)
main = ui.renderer.html_text(main)
context = dict(
title=settings.SITE.title,
main=main,
)
# if settings.SITE.user_model is None:
# user = auth.AnonymousUser.instance()
# else:
# user = request.subst_user or request.user
# context.update(ar=ar)
return http_response(ar, 'bootstrap3/index.html', context)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#Copyright 2013 Cloudbase Solutions SRL
#Copyright 2013 Pedro Navarro Perez
#All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Pedro Navarro Perez
# @author: Alessandro Pilotti, Cloudbase Solutions Srl
import eventlet
import platform
import re
import sys
import time
from quantum.agent import rpc as agent_rpc
from quantum.common import config as logging_config
from quantum.common import topics
from quantum import context
from quantum.openstack.common import cfg
from quantum.openstack.common import log as logging
from quantum.openstack.common.rpc import dispatcher
from quantum.plugins.hyperv.agent import utils
from quantum.plugins.hyperv.common import constants
LOG = logging.getLogger(__name__)
agent_opts = [
cfg.ListOpt(
'physical_network_vswitch_mappings',
default=[],
help=_('List of <physical_network>:<vswitch> '
'where the physical networks can be expressed with '
'wildcards, e.g.: ."*:external"')),
cfg.StrOpt(
'local_network_vswitch',
default='private',
help=_('Private vswitch name used for local networks')),
cfg.IntOpt('polling_interval', default=2,
help=_("The number of seconds the agent will wait between "
"polling for local device changes.")),
]
CONF = cfg.CONF
CONF.register_opts(agent_opts, "AGENT")
class HyperVQuantumAgent(object):
# Set RPC API version to 1.0 by default.
RPC_API_VERSION = '1.0'
def __init__(self):
self._utils = utils.HyperVUtils()
self._polling_interval = CONF.AGENT.polling_interval
self._load_physical_network_mappings()
self._network_vswitch_map = {}
self._setup_rpc()
def _setup_rpc(self):
self.agent_id = 'hyperv_%s' % platform.node()
self.topic = topics.AGENT
self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN)
# RPC network init
self.context = context.get_admin_context_without_session()
# Handle updates from service
self.dispatcher = self._create_rpc_dispatcher()
# Define the listening consumers for the agent
consumers = [[topics.PORT, topics.UPDATE],
[topics.NETWORK, topics.DELETE],
[topics.PORT, topics.DELETE],
[constants.TUNNEL, topics.UPDATE]]
self.connection = agent_rpc.create_consumers(self.dispatcher,
self.topic,
consumers)
def _load_physical_network_mappings(self):
self._physical_network_mappings = {}
for mapping in CONF.AGENT.physical_network_vswitch_mappings:
parts = mapping.split(':')
if len(parts) != 2:
LOG.debug(_('Invalid physical network mapping: %s'), mapping)
else:
pattern = re.escape(parts[0].strip()).replace('\\*', '.*')
vswitch = parts[1].strip()
self._physical_network_mappings[re.compile(pattern)] = vswitch
def _get_vswitch_for_physical_network(self, phys_network_name):
for compre in self._physical_network_mappings:
if phys_network_name is None:
phys_network_name = ''
if compre.match(phys_network_name):
return self._physical_network_mappings[compre]
# Not found in the mappings, the vswitch has the same name
return phys_network_name
def _get_network_vswitch_map_by_port_id(self, port_id):
for network_id, map in self._network_vswitch_map.iteritems():
if port_id in map['ports']:
return (network_id, map)
def network_delete(self, context, network_id=None):
LOG.debug(_("network_delete received. "
"Deleting network %s"), network_id)
# The network may not be defined on this agent
if network_id in self._network_vswitch_map:
self._reclaim_local_network(network_id)
else:
LOG.debug(_("Network %s not defined on agent."), network_id)
def port_delete(self, context, port_id=None):
LOG.debug(_("port_delete received"))
self._port_unbound(port_id)
def port_update(self, context, port=None, network_type=None,
segmentation_id=None, physical_network=None):
LOG.debug(_("port_update received"))
self._treat_vif_port(
port['id'], port['network_id'],
network_type, physical_network,
segmentation_id, port['admin_state_up'])
def _create_rpc_dispatcher(self):
return dispatcher.RpcDispatcher([self])
def _get_vswitch_name(self, network_type, physical_network):
if network_type != constants.TYPE_LOCAL:
vswitch_name = self._get_vswitch_for_physical_network(
physical_network)
else:
vswitch_name = CONF.AGENT.local_network_vswitch
return vswitch_name
def _provision_network(self, port_id,
net_uuid, network_type,
physical_network,
segmentation_id):
LOG.info(_("Provisioning network %s"), net_uuid)
vswitch_name = self._get_vswitch_name(network_type, physical_network)
if network_type == constants.TYPE_VLAN:
self._utils.add_vlan_id_to_vswitch(segmentation_id, vswitch_name)
elif network_type == constants.TYPE_FLAT:
self._utils.set_vswitch_mode_access(vswitch_name)
elif network_type == constants.TYPE_LOCAL:
#TODO (alexpilotti): Check that the switch type is private
#or create it if not existing
pass
else:
raise utils.HyperVException(_("Cannot provision unknown network "
"type %s for network %s"),
network_type, net_uuid)
map = {
'network_type': network_type,
'vswitch_name': vswitch_name,
'ports': [],
'vlan_id': segmentation_id}
self._network_vswitch_map[net_uuid] = map
def _reclaim_local_network(self, net_uuid):
LOG.info(_("Reclaiming local network %s"), net_uuid)
map = self._network_vswitch_map[net_uuid]
if map['network_type'] == constants.TYPE_VLAN:
LOG.info(_("Reclaiming VLAN ID %s "), map['vlan_id'])
self._utils.remove_vlan_id_from_vswitch(
map['vlan_id'], map['vswitch_name'])
else:
raise utils.HyperVException(_("Cannot reclaim unsupported "
"network type %s for network %s"),
map['network_type'], net_uuid)
del self._network_vswitch_map[net_uuid]
def _port_bound(self, port_id,
net_uuid,
network_type,
physical_network,
segmentation_id):
LOG.debug(_("Binding port %s"), port_id)
if net_uuid not in self._network_vswitch_map:
self._provision_network(
port_id, net_uuid, network_type,
physical_network, segmentation_id)
map = self._network_vswitch_map[net_uuid]
map['ports'].append(port_id)
self._utils.connect_vnic_to_vswitch(map['vswitch_name'], port_id)
if network_type == constants.TYPE_VLAN:
LOG.info(_('Binding VLAN ID %s to switch port %s'),
segmentation_id, port_id)
self._utils.set_vswitch_port_vlan_id(
segmentation_id,
port_id)
elif network_type == constants.TYPE_FLAT:
#Nothing to do
pass
elif network_type == constants.TYPE_LOCAL:
#Nothing to do
pass
else:
LOG.error(_('Unsupported network type %s'), network_type)
def _port_unbound(self, port_id):
(net_uuid, map) = self._get_network_vswitch_map_by_port_id(port_id)
if net_uuid not in self._network_vswitch_map:
LOG.info(_('Network %s is not avalailable on this agent'),
net_uuid)
return
LOG.debug(_("Unbinding port %s"), port_id)
self._utils.disconnect_switch_port(map['vswitch_name'], port_id, True)
if not map['ports']:
self._reclaim_local_network(net_uuid)
def _update_ports(self, registered_ports):
ports = self._utils.get_vnic_ids()
if ports == registered_ports:
return
added = ports - registered_ports
removed = registered_ports - ports
return {'current': ports,
'added': added,
'removed': removed}
def _treat_vif_port(self, port_id, network_id, network_type,
physical_network, segmentation_id,
admin_state_up):
if self._utils.vnic_port_exists(port_id):
if admin_state_up:
self._port_bound(port_id, network_id, network_type,
physical_network, segmentation_id)
else:
self._port_unbound(port_id)
else:
LOG.debug(_("No port %s defined on agent."), port_id)
def _treat_devices_added(self, devices):
resync = False
for device in devices:
LOG.info(_("Adding port %s") % device)
try:
device_details = self.plugin_rpc.get_device_details(
self.context,
device,
self.agent_id)
except Exception as e:
LOG.debug(_(
"Unable to get port details for device %s: %s"),
device, e)
resync = True
continue
if 'port_id' in device_details:
LOG.info(_(
"Port %(device)s updated. Details: %(device_details)s") %
locals())
self._treat_vif_port(
device_details['port_id'],
device_details['network_id'],
device_details['network_type'],
device_details['physical_network'],
device_details['segmentation_id'],
device_details['admin_state_up'])
return resync
def _treat_devices_removed(self, devices):
resync = False
for device in devices:
LOG.info(_("Removing port %s"), device)
try:
self.plugin_rpc.update_device_down(self.context,
device,
self.agent_id)
except Exception as e:
LOG.debug(_("Removing port failed for device %s: %s"),
device, e)
resync = True
continue
self._port_unbound(device)
return resync
def _process_network_ports(self, port_info):
resync_a = False
resync_b = False
if 'added' in port_info:
resync_a = self._treat_devices_added(port_info['added'])
if 'removed' in port_info:
resync_b = self._treat_devices_removed(port_info['removed'])
# If one of the above operations fails => resync with plugin
return (resync_a | resync_b)
def daemon_loop(self):
sync = True
ports = set()
while True:
try:
start = time.time()
if sync:
LOG.info(_("Agent out of sync with plugin!"))
ports.clear()
sync = False
port_info = self._update_ports(ports)
# notify plugin about port deltas
if port_info:
LOG.debug(_("Agent loop has new devices!"))
# If treat devices fails - must resync with plugin
sync = self._process_network_ports(port_info)
ports = port_info['current']
except Exception as e:
LOG.exception(_("Error in agent event loop: %s"), e)
sync = True
# sleep till end of polling interval
elapsed = (time.time() - start)
if (elapsed < self._polling_interval):
time.sleep(self._polling_interval - elapsed)
else:
LOG.debug(_("Loop iteration exceeded interval "
"(%(polling_interval)s vs. %(elapsed)s)"),
{'polling_interval': self._polling_interval,
'elapsed': elapsed})
def main():
eventlet.monkey_patch()
cfg.CONF(project='quantum')
logging_config.setup_logging(cfg.CONF)
plugin = HyperVQuantumAgent()
# Start everything.
LOG.info(_("Agent initialized successfully, now running... "))
plugin.daemon_loop()
sys.exit(0)
|
|
#!/usr/bin/python3
#
# A simple python program to demonstrate writing numbers to the console
# in BIG ASCII style.
#
import sys
import os
import collections
import re
#import getopt
import argparse
def setup_ascii_dictionary(dictionary_file):
# Open our acii character reprentation database.
# translate it into a dictionary describing which position
# each symbol/letter occurs at in our list that holds a set of
# lists each holding 1 of 7 rows describing the ascii representation
# of each symbol
try:
asciiCharacterDB = open(dictionary_file,"r")
# The first line in our database is always a string of the letters that are being
# represented. So we get our "alphabet" that will be used to create our
# dictionary later {letter(key), drawingAs7RowList(value)}
alphabet = asciiCharacterDB.readline()
alphabet = re.findall("\S",alphabet)
# The original DB had an extra line in between the characters and their
# representation for readability. So we move the pointer ahead one line
asciiCharacterDB.readline()
# File each row of each character into a list
pixel_map = asciiCharacterDB.readlines()
except:
print("Error reading database file (check format)!")
clean_pixel_map = []
for i in pixel_map:
clean_pixel_map.append(re.findall("(?<=\" \")[^\"]+",i))
# Setup the dictionary using a dictinoary comprehension
alphabet_dictionary = {character: number for number,
character in enumerate(alphabet)}
return alphabet_dictionary, clean_pixel_map
def write_ascii(phrase_to_translate, alphabet_dictionary, clean_pixel_map,
output_file):
# Main program, write ascii to screen :-)
try:
for row in range(7): # iterate through every row of the screen
line = ""
# iterate through user input grabbing
# each character and adding it's ascii representation
# of the current row to the line we are on
for column in range(len(phrase_to_translate)):
keyValue = 0
if phrase_to_translate[column] == ' ':
line += ' '
else:
character = phrase_to_translate[column] # grab user input
# lookup the position of
# character in our dictionary
# this should also match the position
# of the character in the character database
keyValue = alphabet_dictionary.get(character)
symbols = clean_pixel_map[row] # grab current row of every character from
# our database
line += symbols[keyValue] # add the row of ascii for the current
# character/column of our users input
# to the line we are on
# print current line to the screen for the row we are on
print(line)
if output_file:
output_to_file(output_file, line)
except IndexError:
print("Index Error!")
except ValueError as err:
print(err, "in", digit, " unacceptable value")
except TypeError as err:
print("Error: attempt to translate symbol not defined in current font file.")
def output_to_file(output_file, current_line):
with open(output_file,'a') as outPut:
outPut.write(current_line)
outPut.write('\n')
def main():
global dictionary_file
global output_file
global phrase_to_translate
# The new way to parse command line using argparse
if __name__ == "__main__":
# Create argument parser from commandline
parser = argparse.ArgumentParser(description='[*] writeASCII: \
Text to ASCII conversion tool',
formatter_class=\
argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('Words',
metavar='Words',
nargs='+',
help='Phrase to be converted to ASCII')
parser.add_argument('-f', '--font',
action='store',
default='asciiCharacters.ezfnt',
help='Db/Font used for translation',
dest='character_db')
parser.add_argument('-o', '--output',
action='store',
help='Output results to output_file',
dest='output_file')
parser.add_argument('-l', '--lengthwise',
action='store_true',
help='Force phrase to run horizontal. Often good \
for writing output to a file. Usually not so \
good for console output')
parser.add_argument('-v', '--vertical',
action='store_true',
help='Force phrase to run fully vertical. Often good \
for console output and/or grabing individual \
characters one after another')
args = parser.parse_args()
# Setup our variables based on the arguments
if os.path.exists(args.character_db):
dictionary_file = args.character_db
print('Using:', dictionary_file, ' as font for translation')
else:
parser.print_usage()
print('File:', args.character_db, ' does not exist!')
return(0)
#if args.output_file and os.path.exists(args.output_file):
# print('Are you sure you want to overwrite ', args.output_file,' ?')
output_file = args.output_file
# Setup the pixelmap and dictionary to lookup correct position in pixelmap
alphabet_dictionary, clean_pixel_map = setup_ascii_dictionary(dictionary_file)
# Easy way to call the main part that outputs
def heart_beat():
write_ascii(word, alphabet_dictionary,
clean_pixel_map, output_file)
# We either output verticle, horizontal, or each word on
# it's own verticle line
if args.vertical:
phrase_to_translate = ''.join(args.Words)
for word in phrase_to_translate:
heart_beat()
elif args.lengthwise:
word = ' '.join(args.Words)
heart_beat()
else:
phrase_to_translate = args.Words
for word in phrase_to_translate:
heart_beat()
main()
# The old way of seting up the dictionary, now replaced with a concise
# dictionary comprehension
#
# count = 0
# for character in alphabet:
# alphabet_dictionary[character] = count
# count += 1
# --The old way to parse command line using getopts-- (Depreciated)
# usage() comes from old way..although I still like the visual look of my usage
# better so until I figure out how to re-formate argparse help I'm keeping this
# def usage():
# print("[*] writeASCII: Text to ASCII conversion tool")
# print("Usage: writeASCII.py -d dictionary -o output_file -p phrase")
# print()
# print("-h --help - This usage message")
# print("-d --dictionary - Use dictionary as DB for translation")
# print("-o --output - Output results to output_file")
# print("-p --phrase - Phrase to translate (not optional)")
# print(" phrase must be...")
# print()
# print("-d and -o are optional.")
# print("-d = asciiCharacters.db by default")
# print("-o = stdout by default")
# print()
# print("Examples:")
# print("writeASCII.py -d myAsciiCharacters.db -p \"Translate me\"")
# print("writeASCII.py -d myAsciiCharacters.db -o myBigAscii.txt -p \"Transl$
# print("writeASCII.py -p \"Translate me\"")
# sys.exit(0)
# --The old way to parse command line using getopts-- (Depreciated)
#
# if not len(sys.argv[1:]):
# usage()
#
# try:
# opts, args = getopt.getopt(sys.argv[1:], "hd:o:p:",
# ["dictionary", "output", "phrase"])
#
# except getopt.GetoptError as err:
# print(str(err))
# usage()
#
# for o,a in opts:
# if o in ("-h", "--help"):
# usage()
# elif o in ("-d", "--dictionary"):
# dictionaryFile = a
# elif o in ("-o", "--output"):
# outputFile = a
# elif o in ("-p", "--phrase"):
# phraseToTranslate = a
# else:
# assert False, "Unhandled Option"
|
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.prolog
~~~~~~~~~~~~~~~~~~~~~~
Lexers for Prolog and Prolog-like languages.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['PrologLexer', 'LogtalkLexer']
class PrologLexer(RegexLexer):
"""
Lexer for Prolog files.
"""
name = 'Prolog'
aliases = ['prolog']
filenames = ['*.ecl', '*.prolog', '*.pro', '*.pl']
mimetypes = ['text/x-prolog']
flags = re.UNICODE | re.MULTILINE
tokens = {
'root': [
(r'^#.*', Comment.Single),
(r'/\*', Comment.Multiline, 'nested-comment'),
(r'%.*', Comment.Single),
# character literal
(r'0\'.', String.Char),
(r'0b[01]+', Number.Bin),
(r'0o[0-7]+', Number.Oct),
(r'0x[0-9a-fA-F]+', Number.Hex),
# literal with prepended base
(r'\d\d?\'[a-zA-Z0-9]+', Number.Integer),
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+', Number.Integer),
(r'[\[\](){}|.,;!]', Punctuation),
(r':-|-->', Punctuation),
(r'"(?:\\x[0-9a-fA-F]+\\|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|'
r'\\[0-7]+\\|\\["\nabcefnrstv]|[^\\"])*"', String.Double),
(r"'(?:''|[^'])*'", String.Atom), # quoted atom
# Needs to not be followed by an atom.
# (r'=(?=\s|[a-zA-Z\[])', Operator),
(r'is\b', Operator),
(r'(<|>|=<|>=|==|=:=|=|/|//|\*|\+|-)(?=\s|[a-zA-Z0-9\[])',
Operator),
(r'(mod|div|not)\b', Operator),
(r'_', Keyword), # The don't-care variable
(r'([a-z]+)(:)', bygroups(Name.Namespace, Punctuation)),
(u'([a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]'
u'[\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)'
u'(\\s*)(:-|-->)',
bygroups(Name.Function, Text, Operator)), # function defn
(u'([a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]'
u'[\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)'
u'(\\s*)(\\()',
bygroups(Name.Function, Text, Punctuation)),
(u'[a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]'
u'[\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*',
String.Atom), # atom, characters
# This one includes !
(u'[#&*+\\-./:<=>?@\\\\^~\u00a1-\u00bf\u2010-\u303f]+',
String.Atom), # atom, graphics
(r'[A-Z_]\w*', Name.Variable),
(u'\\s+|[\u2000-\u200f\ufff0-\ufffe\uffef]', Text),
],
'nested-comment': [
(r'\*/', Comment.Multiline, '#pop'),
(r'/\*', Comment.Multiline, '#push'),
(r'[^*/]+', Comment.Multiline),
(r'[*/]', Comment.Multiline),
],
}
def analyse_text(text):
return ':-' in text
class LogtalkLexer(RegexLexer):
"""
For `Logtalk <http://logtalk.org/>`_ source code.
.. versionadded:: 0.10
"""
name = 'Logtalk'
aliases = ['logtalk']
filenames = ['*.lgt', '*.logtalk']
mimetypes = ['text/x-logtalk']
tokens = {
'root': [
# Directives
(r'^\s*:-\s', Punctuation, 'directive'),
# Comments
(r'%.*?\n', Comment),
(r'/\*(.|\n)*?\*/', Comment),
# Whitespace
(r'\n', Text),
(r'\s+', Text),
# Numbers
(r"0'.", Number),
(r'0b[01]+', Number.Bin),
(r'0o[0-7]+', Number.Oct),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number),
# Variables
(r'([A-Z_]\w*)', Name.Variable),
# Event handlers
(r'(after|before)(?=[(])', Keyword),
# Message forwarding handler
(r'forward(?=[(])', Keyword),
# Execution-context methods
(r'(parameter|this|se(lf|nder))(?=[(])', Keyword),
# Reflection
(r'(current_predicate|predicate_property)(?=[(])', Keyword),
# DCGs and term expansion
(r'(expand_(goal|term)|(goal|term)_expansion|phrase)(?=[(])', Keyword),
# Entity
(r'(abolish|c(reate|urrent))_(object|protocol|category)(?=[(])', Keyword),
(r'(object|protocol|category)_property(?=[(])', Keyword),
# Entity relations
(r'co(mplements_object|nforms_to_protocol)(?=[(])', Keyword),
(r'extends_(object|protocol|category)(?=[(])', Keyword),
(r'imp(lements_protocol|orts_category)(?=[(])', Keyword),
(r'(instantiat|specializ)es_class(?=[(])', Keyword),
# Events
(r'(current_event|(abolish|define)_events)(?=[(])', Keyword),
# Flags
(r'(current|set)_logtalk_flag(?=[(])', Keyword),
# Compiling, loading, and library paths
(r'logtalk_(compile|l(ibrary_path|oad|oad_context)|make)(?=[(])', Keyword),
(r'\blogtalk_make\b', Keyword),
# Database
(r'(clause|retract(all)?)(?=[(])', Keyword),
(r'a(bolish|ssert(a|z))(?=[(])', Keyword),
# Control constructs
(r'(ca(ll|tch)|throw)(?=[(])', Keyword),
(r'(fa(il|lse)|true)\b', Keyword),
# All solutions
(r'((bag|set)of|f(ind|or)all)(?=[(])', Keyword),
# Multi-threading meta-predicates
(r'threaded(_(call|once|ignore|exit|peek|wait|notify))?(?=[(])', Keyword),
# Term unification
(r'(subsumes_term|unify_with_occurs_check)(?=[(])', Keyword),
# Term creation and decomposition
(r'(functor|arg|copy_term|numbervars|term_variables)(?=[(])', Keyword),
# Evaluable functors
(r'(div|rem|m(ax|in|od)|abs|sign)(?=[(])', Keyword),
(r'float(_(integer|fractional)_part)?(?=[(])', Keyword),
(r'(floor|t(an|runcate)|round|ceiling)(?=[(])', Keyword),
# Other arithmetic functors
(r'(cos|a(cos|sin|tan|tan2)|exp|log|s(in|qrt)|xor)(?=[(])', Keyword),
# Term testing
(r'(var|atom(ic)?|integer|float|c(allable|ompound)|n(onvar|umber)|'
r'ground|acyclic_term)(?=[(])', Keyword),
# Term comparison
(r'compare(?=[(])', Keyword),
# Stream selection and control
(r'(curren|se)t_(in|out)put(?=[(])', Keyword),
(r'(open|close)(?=[(])', Keyword),
(r'flush_output(?=[(])', Keyword),
(r'(at_end_of_stream|flush_output)\b', Keyword),
(r'(stream_property|at_end_of_stream|set_stream_position)(?=[(])', Keyword),
# Character and byte input/output
(r'(nl|(get|peek|put)_(byte|c(har|ode)))(?=[(])', Keyword),
(r'\bnl\b', Keyword),
# Term input/output
(r'read(_term)?(?=[(])', Keyword),
(r'write(q|_(canonical|term))?(?=[(])', Keyword),
(r'(current_)?op(?=[(])', Keyword),
(r'(current_)?char_conversion(?=[(])', Keyword),
# Atomic term processing
(r'atom_(length|c(hars|o(ncat|des)))(?=[(])', Keyword),
(r'(char_code|sub_atom)(?=[(])', Keyword),
(r'number_c(har|ode)s(?=[(])', Keyword),
# Implementation defined hooks functions
(r'(se|curren)t_prolog_flag(?=[(])', Keyword),
(r'\bhalt\b', Keyword),
(r'halt(?=[(])', Keyword),
# Message sending operators
(r'(::|:|\^\^)', Operator),
# External call
(r'[{}]', Keyword),
# Logic and control
(r'(ignore|once)(?=[(])', Keyword),
(r'\brepeat\b', Keyword),
# Sorting
(r'(key)?sort(?=[(])', Keyword),
# Bitwise functors
(r'(>>|<<|/\\|\\\\|\\)', Operator),
# Predicate aliases
(r'\bas\b', Operator),
# Arithemtic evaluation
(r'\bis\b', Keyword),
# Arithemtic comparison
(r'(=:=|=\\=|<|=<|>=|>)', Operator),
# Term creation and decomposition
(r'=\.\.', Operator),
# Term unification
(r'(=|\\=)', Operator),
# Term comparison
(r'(==|\\==|@=<|@<|@>=|@>)', Operator),
# Evaluable functors
(r'(//|[-+*/])', Operator),
(r'\b(e|pi|div|mod|rem)\b', Operator),
# Other arithemtic functors
(r'\b\*\*\b', Operator),
# DCG rules
(r'-->', Operator),
# Control constructs
(r'([!;]|->)', Operator),
# Logic and control
(r'\\+', Operator),
# Mode operators
(r'[?@]', Operator),
# Existential quantifier
(r'\^', Operator),
# Strings
(r'"(\\\\|\\"|[^"])*"', String),
# Ponctuation
(r'[()\[\],.|]', Text),
# Atoms
(r"[a-z]\w*", Text),
(r"'", String, 'quoted_atom'),
],
'quoted_atom': [
(r"''", String),
(r"'", String, '#pop'),
(r'\\([\\abfnrtv"\']|(x[a-fA-F0-9]+|[0-7]+)\\)', String.Escape),
(r"[^\\'\n]+", String),
(r'\\', String),
],
'directive': [
# Conditional compilation directives
(r'(el)?if(?=[(])', Keyword, 'root'),
(r'(e(lse|ndif))[.]', Keyword, 'root'),
# Entity directives
(r'(category|object|protocol)(?=[(])', Keyword, 'entityrelations'),
(r'(end_(category|object|protocol))[.]', Keyword, 'root'),
# Predicate scope directives
(r'(public|protected|private)(?=[(])', Keyword, 'root'),
# Other directives
(r'e(n(coding|sure_loaded)|xport)(?=[(])', Keyword, 'root'),
(r'in(clude|itialization|fo)(?=[(])', Keyword, 'root'),
(r'(built_in|dynamic|synchronized|threaded)[.]', Keyword, 'root'),
(r'(alias|d(ynamic|iscontiguous)|m(eta_(non_terminal|predicate)|ode|ultifile)|'
r's(et_(logtalk|prolog)_flag|ynchronized))(?=[(])', Keyword, 'root'),
(r'op(?=[(])', Keyword, 'root'),
(r'(c(alls|oinductive)|module|reexport|use(s|_module))(?=[(])', Keyword, 'root'),
(r'[a-z]\w*(?=[(])', Text, 'root'),
(r'[a-z]\w*[.]', Text, 'root'),
],
'entityrelations': [
(r'(complements|extends|i(nstantiates|mp(lements|orts))|specializes)(?=[(])', Keyword),
# Numbers
(r"0'.", Number),
(r'0b[01]+', Number.Bin),
(r'0o[0-7]+', Number.Oct),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number),
# Variables
(r'([A-Z_]\w*)', Name.Variable),
# Atoms
(r"[a-z]\w*", Text),
(r"'", String, 'quoted_atom'),
# Strings
(r'"(\\\\|\\"|[^"])*"', String),
# End of entity-opening directive
(r'([)]\.)', Text, 'root'),
# Scope operator
(r'(::)', Operator),
# Ponctuation
(r'[()\[\],.|]', Text),
# Comments
(r'%.*?\n', Comment),
(r'/\*(.|\n)*?\*/', Comment),
# Whitespace
(r'\n', Text),
(r'\s+', Text),
]
}
def analyse_text(text):
if ':- object(' in text:
return 1.0
elif ':- protocol(' in text:
return 1.0
elif ':- category(' in text:
return 1.0
elif re.search('^:-\s[a-z]', text, re.M):
return 0.9
else:
return 0.0
|
|
"""Converters for OSS Vizier's protos from/to PyVizier's classes."""
import datetime
from typing import List, Optional, Sequence, Tuple, Union
from absl import logging
from vizier.pyvizier.oss import metadata_util
from vizier.pyvizier.shared import parameter_config
from vizier.pyvizier.shared import trial
from vizier.service import study_pb2
ScaleType = parameter_config.ScaleType
ParameterType = parameter_config.ParameterType
MonotypeParameterSequence = parameter_config.MonotypeParameterSequence
class ParameterConfigConverter:
"""Converter for ParameterConfig."""
@classmethod
def _set_bounds(cls, proto: study_pb2.StudySpec.ParameterSpec, lower: float,
upper: float, parameter_type: ParameterType):
"""Sets the proto's min_value and max_value fields."""
if parameter_type == ParameterType.INTEGER:
proto.integer_value_spec.min_value = lower
proto.integer_value_spec.max_value = upper
elif parameter_type == ParameterType.DOUBLE:
proto.double_value_spec.min_value = lower
proto.double_value_spec.max_value = upper
@classmethod
def _set_feasible_points(cls, proto: study_pb2.StudySpec.ParameterSpec,
feasible_points: Sequence[float]):
"""Sets the proto's feasible_points field."""
feasible_points = sorted(feasible_points)
proto.discrete_value_spec.ClearField('values')
proto.discrete_value_spec.values.extend(feasible_points)
@classmethod
def _set_categories(cls, proto: study_pb2.StudySpec.ParameterSpec,
categories: Sequence[str]):
"""Sets the protos' categories field."""
proto.categorical_value_spec.ClearField('values')
proto.categorical_value_spec.values.extend(categories)
@classmethod
def _set_default_value(cls, proto: study_pb2.StudySpec.ParameterSpec,
default_value: Union[float, int, str]):
"""Sets the protos' default_value field."""
which_pv_spec = proto.WhichOneof('parameter_value_spec')
getattr(proto, which_pv_spec).default_value.value = default_value
@classmethod
def _matching_parent_values(
cls, proto: study_pb2.StudySpec.ParameterSpec.ConditionalParameterSpec
) -> MonotypeParameterSequence:
"""Returns the matching parent values, if set."""
oneof_name = proto.WhichOneof('parent_value_condition')
if not oneof_name:
return []
if oneof_name in ('parent_discrete_values', 'parent_int_values',
'parent_categorical_values'):
return list(getattr(getattr(proto, oneof_name), 'values'))
raise ValueError('Unknown matching_parent_vals: {}'.format(oneof_name))
@classmethod
def from_proto(
cls,
proto: study_pb2.StudySpec.ParameterSpec,
*,
strict_validation: bool = False) -> parameter_config.ParameterConfig:
"""Creates a ParameterConfig.
Args:
proto:
strict_validation: If True, raise ValueError to enforce that
from_proto(proto).to_proto == proto.
Returns:
ParameterConfig object
Raises:
ValueError: See the "strict_validtion" arg documentation.
"""
feasible_values = []
oneof_name = proto.WhichOneof('parameter_value_spec')
if oneof_name == 'integer_value_spec':
bounds = (int(proto.integer_value_spec.min_value),
int(proto.integer_value_spec.max_value))
elif oneof_name == 'double_value_spec':
bounds = (proto.double_value_spec.min_value,
proto.double_value_spec.max_value)
elif oneof_name == 'discrete_value_spec':
bounds = None
feasible_values = proto.discrete_value_spec.values
elif oneof_name == 'categorical_value_spec':
bounds = None
feasible_values = proto.categorical_value_spec.values
default_value = None
if getattr(proto, oneof_name).default_value.value:
default_value = getattr(proto, oneof_name).default_value.value
if proto.conditional_parameter_specs:
children = []
for conditional_ps in proto.conditional_parameter_specs:
parent_values = cls._matching_parent_values(conditional_ps)
children.append(
(parent_values, cls.from_proto(conditional_ps.parameter_spec)))
else:
children = None
scale_type = None
if proto.scale_type:
scale_type = parameter_config.ScaleType(proto.scale_type)
try:
config = parameter_config.ParameterConfig.factory(
name=proto.parameter_id,
feasible_values=feasible_values,
bounds=bounds,
children=children,
scale_type=scale_type,
default_value=default_value)
except ValueError as e:
raise ValueError(
'The provided proto was misconfigured. {}'.format(proto)) from e
if strict_validation and cls.to_proto(config) != proto:
raise ValueError(
'The provided proto was misconfigured. Expected: {} Given: {}'.format(
cls.to_proto(config), proto))
return config
@classmethod
def _set_child_parameter_configs(
cls, parent_proto: study_pb2.StudySpec.ParameterSpec,
pc: parameter_config.ParameterConfig):
"""Sets the parent_proto's conditional_parameter_specs field.
Args:
parent_proto: Modified in place.
pc: Parent ParameterConfig to copy children from.
Raises:
ValueError: If the child configs are invalid
"""
children: List[Tuple[MonotypeParameterSequence,
parameter_config.ParameterConfig]] = []
for child in pc.child_parameter_configs:
children.append((child.matching_parent_values, child))
if not children:
return
parent_proto.ClearField('conditional_parameter_specs')
for child_pair in children:
if len(child_pair) != 2:
raise ValueError("""Each element in children must be a tuple of
(Sequence of valid parent values, ParameterConfig)""")
logging.debug('_set_child_parameter_configs: parent_proto=%s, children=%s',
parent_proto, children)
for unsorted_parent_values, child in children:
parent_values = sorted(unsorted_parent_values)
child_proto = cls.to_proto(child.clone_without_children)
conditional_parameter_spec = study_pb2.StudySpec.ParameterSpec.ConditionalParameterSpec(
parameter_spec=child_proto)
if parent_proto.HasField('discrete_value_spec'):
conditional_parameter_spec.parent_discrete_values.values[:] = parent_values
elif parent_proto.HasField('categorical_value_spec'):
conditional_parameter_spec.parent_categorical_values.values[:] = parent_values
elif parent_proto.HasField('integer_value_spec'):
conditional_parameter_spec.parent_int_values.values[:] = parent_values
else:
raise ValueError('DOUBLE type cannot have child parameters')
if child.child_parameter_configs:
cls._set_child_parameter_configs(child_proto, child)
parent_proto.conditional_parameter_specs.extend(
[conditional_parameter_spec])
@classmethod
def to_proto(
cls, pc: parameter_config.ParameterConfig
) -> study_pb2.StudySpec.ParameterSpec:
"""Returns a ParameterConfig Proto."""
proto = study_pb2.StudySpec.ParameterSpec(parameter_id=pc.name)
if pc.type == ParameterType.DISCRETE:
cls._set_feasible_points(proto, [float(v) for v in pc.feasible_values])
elif pc.type == ParameterType.CATEGORICAL:
cls._set_categories(proto, pc.feasible_values)
elif pc.type in (ParameterType.INTEGER, ParameterType.DOUBLE):
cls._set_bounds(proto, pc.bounds[0], pc.bounds[1], pc.type)
else:
raise ValueError('Invalid ParameterConfig: {}'.format(pc))
if pc.scale_type is not None and pc.scale_type != ScaleType.UNIFORM_DISCRETE:
proto.scale_type = pc.scale_type
if pc.default_value is not None:
cls._set_default_value(proto, pc.default_value)
cls._set_child_parameter_configs(proto, pc)
return proto
class ParameterValueConverter:
"""Converter for trial.ParameterValue."""
@classmethod
def from_proto(
cls, proto: study_pb2.Trial.Parameter) -> Optional[trial.ParameterValue]:
"""Returns whichever value that is populated, or None."""
value_proto = proto.value
oneof_name = value_proto.WhichOneof('kind')
potential_value = getattr(value_proto, oneof_name)
if isinstance(potential_value, float) or isinstance(
potential_value, str) or isinstance(potential_value, bool):
return trial.ParameterValue(potential_value)
else:
return None
@classmethod
def to_proto(cls, parameter_value: trial.ParameterValue,
name: str) -> study_pb2.Trial.Parameter:
"""Returns Parameter Proto."""
proto = study_pb2.Trial.Parameter(parameter_id=name)
if isinstance(parameter_value.value, int):
proto.value.number_value = parameter_value.value
elif isinstance(parameter_value.value, bool):
proto.value.bool_value = parameter_value.value
elif isinstance(parameter_value.value, float):
proto.value.number_value = parameter_value.value
elif isinstance(parameter_value.value, str):
proto.value.string_value = parameter_value.value
return proto
class MeasurementConverter:
"""Converter for trial.MeasurementConverter."""
@classmethod
def from_proto(cls, proto: study_pb2.Measurement) -> trial.Measurement:
"""Creates a valid instance from proto.
Args:
proto: Measurement proto.
Returns:
A valid instance of Measurement object. Metrics with invalid values
are automatically filtered out.
"""
metrics = dict()
for metric in proto.metrics:
if metric.metric_id in metrics and metrics[
metric.metric_id].value != metric.value:
logging.log_first_n(
logging.ERROR, 'Duplicate metric of name "%s".'
'The newly found value %s will be used and '
'the previously found value %s will be discarded.'
'This always happens if the proto has an empty-named metric.', 5,
metric.metric_id, metric.value, metrics[metric.metric_id].value)
try:
metrics[metric.metric_id] = trial.Metric(value=metric.value)
except ValueError:
pass
return trial.Measurement(
metrics=metrics,
elapsed_secs=proto.elapsed_duration.seconds,
steps=proto.step_count)
@classmethod
def to_proto(cls, measurement: trial.Measurement) -> study_pb2.Measurement:
"""Converts to Measurement proto."""
proto = study_pb2.Measurement()
for name, metric in measurement.metrics.items():
proto.metrics.add(metric_id=name, value=metric.value)
proto.step_count = measurement.steps
int_seconds = int(measurement.elapsed_secs)
proto.elapsed_duration.seconds = int_seconds
proto.elapsed_duration.nanos = int(1e9 *
(measurement.elapsed_secs - int_seconds))
return proto
def _to_pyvizier_trial_status(
proto_state: study_pb2.Trial.State) -> trial.TrialStatus:
"""from_proto conversion for Trial statuses."""
if proto_state == study_pb2.Trial.State.REQUESTED:
return trial.TrialStatus.REQUESTED
elif proto_state == study_pb2.Trial.State.ACTIVE:
return trial.TrialStatus.PENDING
if proto_state == study_pb2.Trial.State.STOPPING:
return trial.TrialStatus.STOPPING
if proto_state == study_pb2.Trial.State.SUCCEEDED:
return trial.TrialStatus.COMPLETED
elif proto_state == study_pb2.Trial.State.INFEASIBLE:
return trial.TrialStatus.COMPLETED
else:
return trial.TrialStatus.UNKNOWN
def _from_pyvizier_trial_status(status: trial.TrialStatus,
infeasible: bool) -> study_pb2.Trial.State:
"""to_proto conversion for Trial states."""
if status == trial.TrialStatus.REQUESTED:
return study_pb2.Trial.State.REQUESTED
elif status == trial.TrialStatus.PENDING:
return study_pb2.Trial.State.ACTIVE
elif status == trial.TrialStatus.STOPPING:
return study_pb2.Trial.State.STOPPING
elif status == trial.TrialStatus.COMPLETED:
if infeasible:
return study_pb2.Trial.State.INFEASIBLE
else:
return study_pb2.Trial.State.SUCCEEDED
else:
return study_pb2.Trial.State.STATE_UNSPECIFIED
class TrialConverter:
"""Converter for trial.TrialConverter."""
@classmethod
def from_proto(cls, proto: study_pb2.Trial) -> trial.Trial:
"""Converts from Trial proto to object.
Args:
proto: Trial proto.
Returns:
A Trial object.
"""
parameters = {}
for parameter in proto.parameters:
value = ParameterValueConverter.from_proto(parameter)
if value is not None:
if parameter.parameter_id in parameters:
raise ValueError('Invalid trial proto contains duplicate parameter {}'
': {}'.format(parameter.parameter_id, proto))
parameters[parameter.parameter_id] = value
else:
logging.warning('A parameter without a value will be dropped: %s',
parameter)
final_measurement = None
if proto.HasField('final_measurement'):
final_measurement = MeasurementConverter.from_proto(
proto.final_measurement)
completion_time = None
infeasible = False
infeasibility_reason = None
if proto.state == study_pb2.Trial.State.SUCCEEDED:
if proto.HasField('end_time'):
completion_ts = int(proto.end_time.seconds + 1e9 * proto.end_time.nanos)
completion_time = datetime.datetime.fromtimestamp(completion_ts)
elif proto.state == study_pb2.Trial.State.INFEASIBLE:
infeasible = True
infeasibility_reason = proto.infeasible_reason
metadata = trial.Metadata()
for kv in proto.metadata:
metadata.abs_ns(kv.ns)[kv.key] = (
kv.proto if kv.HasField('proto') else kv.value)
measurements = []
for measure in proto.measurements:
measurements.append(MeasurementConverter.from_proto(measure))
creation_time = None
if proto.HasField('start_time'):
creation_ts = int(proto.start_time.seconds + 1e9 * proto.start_time.nanos)
creation_time = datetime.datetime.fromtimestamp(creation_ts)
return trial.Trial(
id=int(proto.id),
description=proto.name,
assigned_worker=proto.client_id or None,
status=_to_pyvizier_trial_status(proto.state),
stopping_reason=None,
parameters=parameters,
creation_time=creation_time,
completion_time=completion_time,
infeasible=infeasible,
infeasibility_reason=infeasibility_reason,
final_measurement=final_measurement,
measurements=measurements,
metadata=metadata) # pytype: disable=wrong-arg-types
@classmethod
def from_protos(cls, protos: Sequence[study_pb2.Trial]) -> List[trial.Trial]:
"""Convenience wrapper for from_proto."""
return [TrialConverter.from_proto(proto) for proto in protos]
@classmethod
def to_protos(cls, pytrials: Sequence[trial.Trial]) -> List[study_pb2.Trial]:
return [TrialConverter.to_proto(pytrial) for pytrial in pytrials]
@classmethod
def to_proto(cls, pytrial: trial.Trial) -> study_pb2.Trial:
"""Converts a pyvizier Trial to a Trial proto."""
proto = study_pb2.Trial()
if pytrial.description is not None:
proto.name = pytrial.description
proto.id = str(pytrial.id)
proto.state = _from_pyvizier_trial_status(pytrial.status,
pytrial.infeasible)
proto.client_id = pytrial.assigned_worker or ''
for name, value in pytrial.parameters.items():
proto.parameters.append(ParameterValueConverter.to_proto(value, name))
# pytrial always adds an empty metric. Ideally, we should remove it if the
# metric does not exist in the study config.
if pytrial.final_measurement is not None:
proto.final_measurement.CopyFrom(
MeasurementConverter.to_proto(pytrial.final_measurement))
for measurement in pytrial.measurements:
proto.measurements.append(MeasurementConverter.to_proto(measurement))
if pytrial.creation_time is not None:
creation_secs = datetime.datetime.timestamp(pytrial.creation_time)
proto.start_time.seconds = int(creation_secs)
proto.start_time.nanos = int(1e9 * (creation_secs - int(creation_secs)))
if pytrial.completion_time is not None:
completion_secs = datetime.datetime.timestamp(pytrial.completion_time)
proto.end_time.seconds = int(completion_secs)
proto.end_time.nanos = int(1e9 * (completion_secs - int(completion_secs)))
if pytrial.infeasibility_reason is not None:
proto.infeasible_reason = pytrial.infeasibility_reason
if pytrial.metadata is not None:
for ns in pytrial.metadata.namespaces():
repr_ns = repr(ns)
abs_ns = pytrial.metadata.abs_ns(ns)
for key, value in abs_ns.items():
metadata_util.assign(proto, key=key, ns=repr_ns, value=value)
return proto
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Framework of debug wrapper sessions.
A debug wrapper session is a wrapper around a TensorFlow Python Session.
The wrapper preserves the Session interface, most importantly the run() method,
while providing abilities to:
a) Intercept a run() call to a wrapped session and insert debug tensor watches
according to externally-specified debug URLs.
b) Release control to an external (i.e., non-Session) object before and after
the run() call, so that the external object can perform actions such as
launching a UI to let users inspect the intermediate tensors and partition
graphs from the run() call.
c) (To be implemented) Intercept a run() call and give control to DebugStepper
to let it perform stepping / continuing-to actions on the graph.
b) (To be implemented in a future CL) Enter an instruction loop to let an
external object (e.g., remote client) launch run() and cont() calls
remotely.
*** The lifetime of a debug wrapper session: ***
1) The wrapper session is created by calling the constructor with a
wrapped (normal) session as the argument:
wrapper = FooDebugWrapperSession(sess)
wherein FooDebugWrapperSession is a concrete subclass implementing the
abstract BaseDebugWrapperSession class below.
2) Near the end of the constructor call, the on_session_init() callback is
invoked, with a OnSessionInitRequest object as the argument. The object
carries the wrapped (normal) session object.
3) The callback handles the request and returns a OnSessionInitResponse
object with an action field, directing the wrapper session what to do next.
If the action field in the OnSessionInitResponse is PROCEED, the constuctor
returns. Control is released back to the caller of the constructor, which can
invoke run() method of wrapper session with the same syntax as a non-wrapped
session, e.g.,:
wrapper.run(fetches, feed_dict=feeds, options=run_options)
Below, A1 - A2 is the lifetime of a wrapper run() call if the action is
PROCEED:
A1) Right at the start of each run() call, the on_run_start() callback is
invoked, with an OnRunStartRequest object carrying information such as
the fetches, the feed dict, the run options and run metadata used in
this run call, along with a count of how many run calls has occurred
on this wrapper session. The callback then returns an OnRunStartResponse
object, of which the action field directs what the wrapper session
actually will do of the run() call.
If the action is DEBUG_RUN, a debugged (tensor-watched) run will ensue,
with the debug URLs supplied in the debug_urls field of the response.
These can be file:// or grpc:// URLs, for example.
If the action is NON_DEBUG_RUN, a non-debug (normal) run will ensue.
If the action is INVOKE_STEPPER, no run() call will be issued to the
wrapped session. But instead, a DebugStepper (i.e., "continuation
debugger") will be used to perform stepping / continue-to actions on
the graph.
TODO(cais): The event loop for the DebugStepper will request additional
callbacks including on_cont_start() and on_cont_end(). Add those.
A2) Right before the run() returns, the on_run_end() callback is invoked,
with an OnRunEndRequest object as the argument, which carries information
including the actual action performed in the warpper run() call and the
run_metadata from the run() call.
However, if the action field in OnSessionInitResponse is
REMOTE_INSTR_LOOP, the constructor will automatically invoke an instruction loop
that gives the control to a remote caller.
In the remote instruction loop, the following steps will happen:
B1) Callback on_instr_start() is invoked. The callback will return an
OnInstrStartResponse object with an action field which can order one of
the following actions:
i) a run() call with fetches, feeds and debug_urls specified.
ii) a DebugStepper cont() call with target specified.
iii) value overrides in the cached tensors from the DebugStepper.
iv) exit the instruction loop.
B2) The wrapper session carries out the action specified above.
B3) If still in the instruction loop, the wrapper session invokes the
on_instr_end() callback. After the on_instr_end() callback returns, jump
back to B1.
TODO(cais): Implemented the instruction loop in B1 - B3.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import re
import threading
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.debug.lib import stepper
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
# Helper function.
def _check_type(obj, expected_type):
"""Check if an object is of the expected type.
Args:
obj: The object being checked.
expected_type: (type) The expected type of obj.
Raises:
TypeError: If obj is not an instance of expected_type.
"""
if not isinstance(obj, expected_type):
raise TypeError("Expected type %s; got type %s" %
(expected_type, type(obj)))
class OnSessionInitRequest(object):
"""Request to an on-session-init callback.
This callback is invoked during the __init__ call to a debug-wrapper session.
"""
def __init__(self, sess):
"""Constructor.
Args:
sess: A tensorflow Session object.
"""
_check_type(sess, session.BaseSession)
self.session = sess
class OnSessionInitAction(object):
"""Enum-like values for possible action to take on session init."""
# Proceed, without special actions, in the wrapper session initialization.
# What action the wrapper session performs next is determined by the caller
# of the wrapper session. E.g., it can call run().
PROCEED = "proceed"
# Instead of letting the caller of the wrapper session determine what actions
# the wrapper session will perform next, enter a loop to receive instructions
# from a remote client.
# For example, TensorBoard visual debugger can use this action so that it can
# launch session.run() calls remotely.
REMOTE_INSTR_LOOP = "remote_instr_loop"
class OnSessionInitResponse(object):
"""Response from an on-session-init callback."""
def __init__(self, action):
"""Constructor.
Args:
action: (`OnSessionInitAction`) Debugger action to take on session init.
"""
_check_type(action, str)
self.action = action
class OnRunStartRequest(object):
"""Request to an on-run-start callback.
This callback is invoked during a run() call of the debug-wrapper
session, immediately after the run() call counter is incremented.
"""
def __init__(self, fetches, feed_dict, run_options, run_metadata,
run_call_count):
"""Constructor of `OnRunStartRequest`.
Args:
fetches: Fetch targets of the run() call.
feed_dict: The feed dictionary to the run() call.
run_options: RunOptions input to the run() call.
run_metadata: RunMetadata input to the run() call.
The above four arguments are identical to the input arguments to the
run() method of a non-wrapped TensorFlow session.
run_call_count: 1-based count of how many run calls (including this one)
has been invoked.
"""
self.fetches = fetches
self.feed_dict = feed_dict
self.run_options = run_options
self.run_metadata = run_metadata
self.run_call_count = run_call_count
class OnRunStartAction(object):
"""Enum-like values for possible action to take on start of a run() call."""
# Run once with debug tensor-watching.
DEBUG_RUN = "debug_run"
# Run once with profiler.
PROFILE_RUN = "profile_run"
# Run without debug tensor-watching.
NON_DEBUG_RUN = "non_debug_run"
# Instead of running the fetches as a whole, as would normally happen, invoke
# the (to-be-implemented) debug stepper.
# TODO(cais): Remove "to-be-implemented".
INVOKE_STEPPER = "invoke_stepper"
class OnRunStartResponse(object):
"""Request from an on-run-start callback.
The caller of the callback can use this response object to specify what
action the debug-wrapper session actually takes on the run() call.
"""
def __init__(self,
action,
debug_urls,
debug_ops="DebugIdentity",
node_name_regex_whitelist=None,
op_type_regex_whitelist=None,
tensor_dtype_regex_whitelist=None,
tolerate_debug_op_creation_failures=False):
"""Constructor of `OnRunStartResponse`.
Args:
action: (`OnRunStartAction`) the action actually taken by the wrapped
session for the run() call.
debug_urls: (`list` of `str`) debug_urls used in watching the tensors
during the run() call.
debug_ops: (`str` or `list` of `str`) Debug op(s) to be used by the
debugger.
node_name_regex_whitelist: Regular-expression whitelist for node
name.
op_type_regex_whitelist: Regular-expression whitelist for op type.
tensor_dtype_regex_whitelist: Regular-expression whitelist for tensor
dtype.
tolerate_debug_op_creation_failures: Whether debug op creation failures
are to be tolerated.
"""
_check_type(action, str)
self.action = action
_check_type(debug_urls, list)
self.debug_urls = debug_urls
self.debug_ops = debug_ops
self.node_name_regex_whitelist = node_name_regex_whitelist
self.op_type_regex_whitelist = op_type_regex_whitelist
self.tensor_dtype_regex_whitelist = tensor_dtype_regex_whitelist
self.tolerate_debug_op_creation_failures = (
tolerate_debug_op_creation_failures)
class OnRunEndRequest(object):
"""Request to an on-run-end callback.
The callback is invoked immediately before the wrapped run() call ends.
"""
def __init__(self,
performed_action,
run_metadata=None,
client_graph_def=None,
tf_error=None):
"""Constructor for `OnRunEndRequest`.
Args:
performed_action: (`OnRunStartAction`) Actually-performed action by the
debug-wrapper session.
run_metadata: run_metadata output from the run() call (if any).
client_graph_def: (GraphDef) GraphDef from the client side, i.e., from
the python front end of TensorFlow. Can be obtained with
session.graph.as_graph_def().
tf_error: (errors.OpError subtypes) TensorFlow OpError that occurred
during the run (if any).
"""
_check_type(performed_action, str)
self.performed_action = performed_action
if run_metadata is not None:
_check_type(run_metadata, config_pb2.RunMetadata)
self.run_metadata = run_metadata
self.client_graph_def = client_graph_def
self.tf_error = tf_error
class OnRunEndResponse(object):
"""Response from an on-run-end callback."""
def __init__(self):
# Currently only a placeholder.
pass
class BaseDebugWrapperSession(session.SessionInterface):
"""Base class of debug-wrapper session classes.
Concrete classes that inherit from this class need to implement the abstract
methods such as on_session_init, on_run_start and on_run_end.
"""
# TODO(cais): Add on_cont_start and on_cont_end callbacks once the stepper is
# is available.
def __init__(self, sess, thread_name_filter=None):
"""Constructor of `BaseDebugWrapperSession`.
Args:
sess: An (unwrapped) TensorFlow session instance.
thread_name_filter: Regular-expression filter (whitelist) for name(s) of
thread(s) on which the wrapper session will be active. This regular
expression is used in a start-anchored fashion on the thread name, i.e.,
by applying the `match` method of the compiled pattern. The default
`None` means that the wrapper session will be active on all threads.
E.g., r"MainThread$", r"QueueRunnerThread.*".
Raises:
ValueError: On invalid `OnSessionInitAction` value.
NotImplementedError: If a non-DirectSession sess object is received.
"""
_check_type(sess, session.BaseSession)
# The session being wrapped.
self._sess = sess
self._thread_name_filter_pattern = (re.compile(thread_name_filter)
if thread_name_filter else None)
# Keeps track of number of run calls that have been performed on this
# debug-wrapper session.
self._run_call_count = 0
# Invoke on-session-init callback.
response = self.on_session_init(OnSessionInitRequest(self._sess))
_check_type(response, OnSessionInitResponse)
if response.action == OnSessionInitAction.PROCEED:
pass
elif response.action == OnSessionInitAction.REMOTE_INSTR_LOOP:
# TODO(cais): Implement REMOTE_INSTR_LOOP
raise NotImplementedError(
"OnSessionInitAction REMOTE_INSTR_LOOP has not been "
"implemented.")
else:
raise ValueError(
"Invalid OnSessionInitAction value: %s" % response.action)
@property
def graph(self):
return self._sess.graph
@property
def graph_def(self):
return self._sess.graph_def
@property
def sess_str(self):
return self._sess.sess_str
@property
def session(self):
return self._sess
def as_default(self):
return ops.default_session(self)
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Wrapper around Session.run() that inserts tensor watch options.
Args:
fetches: Same as the `fetches` arg to regular `Session.run()`.
feed_dict: Same as the `feed_dict` arg to regular `Session.run()`.
options: Same as the `options` arg to regular `Session.run()`.
run_metadata: Same as the `run_metadata` arg to regular `Session.run()`.
Returns:
Simply forwards the output of the wrapped `Session.run()` call.
Raises:
ValueError: On invalid `OnRunStartAction` value.
"""
self._run_call_count += 1
if self._is_disabled_thread():
return self._sess.run(fetches,
feed_dict=feed_dict,
options=options,
run_metadata=run_metadata)
# Invoke on-run-start callback and obtain response.
run_start_resp = self.on_run_start(
OnRunStartRequest(fetches, feed_dict, options, run_metadata,
self._run_call_count))
_check_type(run_start_resp, OnRunStartResponse)
if run_start_resp.action == OnRunStartAction.DEBUG_RUN:
# Decorate RunOption to fill in debugger tensor watch specifications.
decorated_run_options = options or config_pb2.RunOptions()
run_metadata = run_metadata or config_pb2.RunMetadata()
self._decorate_run_options_for_debug(
decorated_run_options,
run_start_resp.debug_urls,
debug_ops=run_start_resp.debug_ops,
node_name_regex_whitelist=run_start_resp.node_name_regex_whitelist,
op_type_regex_whitelist=run_start_resp.op_type_regex_whitelist,
tensor_dtype_regex_whitelist=(
run_start_resp.tensor_dtype_regex_whitelist),
tolerate_debug_op_creation_failures=(
run_start_resp.tolerate_debug_op_creation_failures))
# Invoke the run() method of the wrapped Session. Catch any TensorFlow
# runtime errors.
tf_error = None
try:
retvals = self._sess.run(fetches,
feed_dict=feed_dict,
options=decorated_run_options,
run_metadata=run_metadata)
except errors.OpError as op_error:
tf_error = op_error
retvals = op_error
run_end_req = OnRunEndRequest(
run_start_resp.action,
run_metadata=run_metadata,
client_graph_def=self._sess.graph.as_graph_def(),
tf_error=tf_error)
elif run_start_resp.action == OnRunStartAction.PROFILE_RUN:
decorated_run_options = options or config_pb2.RunOptions()
run_metadata = run_metadata or config_pb2.RunMetadata()
self._decorate_run_options_for_profile(decorated_run_options)
retvals = self._sess.run(fetches,
feed_dict=feed_dict,
options=decorated_run_options,
run_metadata=run_metadata)
run_end_req = OnRunEndRequest(
run_start_resp.action,
run_metadata=run_metadata,
client_graph_def=self._sess.graph.as_graph_def())
elif (run_start_resp.action == OnRunStartAction.NON_DEBUG_RUN or
run_start_resp.action == OnRunStartAction.INVOKE_STEPPER):
if run_start_resp.action == OnRunStartAction.INVOKE_STEPPER:
with stepper.NodeStepper(
self._sess, fetches, feed_dict) as node_stepper:
retvals = self.invoke_node_stepper(
node_stepper, restore_variable_values_on_exit=True)
# Invoke run() method of the wrapped session.
retvals = self._sess.run(
fetches,
feed_dict=feed_dict,
options=options,
run_metadata=run_metadata)
# Prepare arg for the on-run-end callback.
run_end_req = OnRunEndRequest(run_start_resp.action)
else:
raise ValueError(
"Invalid OnRunStartAction value: %s" % run_start_resp.action)
# Invoke on-run-end callback and obtain response.
run_end_resp = self.on_run_end(run_end_req)
_check_type(run_end_resp, OnRunEndResponse)
# Currently run_end_resp is only a placeholder. No action is taken on it.
return retvals
def _is_disabled_thread(self):
thread_name = threading.current_thread().name or ""
return (self._thread_name_filter_pattern and
not self._thread_name_filter_pattern.match(thread_name))
def partial_run_setup(self, fetches, feeds=None):
"""Sets up the feeds and fetches for partial runs in the session."""
raise NotImplementedError(
"partial_run_setup is not implemented for debug-wrapper sessions.")
def partial_run(self, handle, fetches, feed_dict=None):
raise NotImplementedError(
"partial_run is not implemented for debug-wrapper sessions.")
def _decorate_run_options_for_debug(
self,
run_options,
debug_urls,
debug_ops="DebugIdentity",
node_name_regex_whitelist=None,
op_type_regex_whitelist=None,
tensor_dtype_regex_whitelist=None,
tolerate_debug_op_creation_failures=False):
"""Modify a RunOptions object for debug tensor watching.
Specifies request for outputting partition graphs. Adds
debug_tensor_watch_opts with proper debug URLs.
Args:
run_options: (RunOptions) the modified RunOptions object.
debug_urls: (list of str) debug URLs to be entered in run_options.
debug_tensor_watch_opts.
debug_ops: (str or list of str) debug op(s) to be used by the debugger.
node_name_regex_whitelist: Regular-expression whitelist for node
name.
op_type_regex_whitelist: Regular-expression whitelist for op type.
tensor_dtype_regex_whitelist: Regular-expression whitelist for tensor
dtype.
tolerate_debug_op_creation_failures: Whether debug op creation failures
are to be tolerated.
"""
run_options.output_partition_graphs = True
debug_utils.watch_graph(
run_options,
self._sess.graph,
debug_urls=debug_urls,
debug_ops=debug_ops,
node_name_regex_whitelist=node_name_regex_whitelist,
op_type_regex_whitelist=op_type_regex_whitelist,
tensor_dtype_regex_whitelist=tensor_dtype_regex_whitelist,
tolerate_debug_op_creation_failures=tolerate_debug_op_creation_failures)
def _decorate_run_options_for_profile(self, run_options):
"""Modify a RunOptions object for profiling TensorFlow graph execution.
Args:
run_options: (RunOptions) the modified RunOptions object.
"""
run_options.trace_level = config_pb2.RunOptions.FULL_TRACE
@abc.abstractmethod
def on_session_init(self, request):
"""Callback invoked during construction of the debug-wrapper session.
This is a blocking callback.
The invocation happens right before the constructor ends.
Args:
request: (`OnSessionInitRequest`) callback request carrying information
such as the session being wrapped.
Returns:
An instance of `OnSessionInitResponse`.
"""
@abc.abstractmethod
def on_run_start(self, request):
"""Callback invoked on run() calls to the debug-wrapper session.
This is a blocking callback.
The invocation happens after the wrapper's run() call is entered,
after an increment of run call counter.
Args:
request: (`OnRunStartRequest`) callback request object carrying
information about the run call such as the fetches, feed dict, run
options, run metadata, and how many `run()` calls to this wrapper
session have occurred.
Returns:
An instance of `OnRunStartResponse`, carrying information to
1) direct the wrapper session to perform a specified action (e.g., run
with or without debug tensor watching, invoking the stepper.)
2) debug URLs used to watch the tensors.
"""
@abc.abstractmethod
def on_run_end(self, request):
"""Callback invoked on run() calls to the debug-wrapper session.
This is a blocking callback.
The invocation happens right before the wrapper exits its run() call.
Args:
request: (`OnRunEndRequest`) callback request object carrying information
such as the actual action performed by the session wrapper for the
run() call.
Returns:
An instance of `OnRunStartResponse`.
"""
def __enter__(self):
return self._sess.__enter__()
def __exit__(self, exec_type, exec_value, exec_tb):
self._sess.__exit__(exec_type, exec_value, exec_tb)
def close(self):
self._sess.close()
# TODO(cais): Add _node_name_regex_whitelist and
# _node_op_type_regex_whitelist.
@abc.abstractmethod
def invoke_node_stepper(self,
node_stepper,
restore_variable_values_on_exit=True):
"""Callback invoked when the client intends to step through graph nodes.
Args:
node_stepper: (stepper.NodeStepper) An instance of NodeStepper to be used
in this stepping session.
restore_variable_values_on_exit: (bool) Whether any variables whose values
have been altered during this node-stepper invocation should be restored
to their old values when this invocation ends.
Returns:
The same return values as the `Session.run()` call on the same fetches as
the NodeStepper.
"""
class WatchOptions(object):
"""Type for return values of watch_fn."""
def __init__(self,
debug_ops=None,
node_name_regex_whitelist=None,
op_type_regex_whitelist=None,
tensor_dtype_regex_whitelist=None,
tolerate_debug_op_creation_failures=False):
"""Constructor of WatchOptions: Debug watch options.
Used as return values of `watch_fn`s.
Args:
debug_ops: (`str` or `list of str`) Debug ops to be used.
node_name_regex_whitelist: Regular-expression whitelist for node_name,
e.g., `"(weight_[0-9]+|bias_.*)"`
op_type_regex_whitelist: Regular-expression whitelist for the op type of
nodes, e.g., `"(Variable|Add)"`.
If both `node_name_regex_whitelist` and `op_type_regex_whitelist`
are set, the two filtering operations will occur in a logical `AND`
relation. In other words, a node will be included if and only if it
hits both whitelists.
tensor_dtype_regex_whitelist: Regular-expression whitelist for Tensor
data type, e.g., `"^int.*"`.
This whitelist operates in logical `AND` relations to the two whitelists
above.
tolerate_debug_op_creation_failures: (`bool`) whether debug op creation
failures (e.g., due to dtype incompatibility) are to be tolerated by not
throwing exceptions.
"""
if debug_ops:
self.debug_ops = debug_ops
else:
self.debug_ops = ["DebugIdentity"]
self.node_name_regex_whitelist = node_name_regex_whitelist
self.op_type_regex_whitelist = op_type_regex_whitelist
self.tensor_dtype_regex_whitelist = tensor_dtype_regex_whitelist
self.tolerate_debug_op_creation_failures = (
tolerate_debug_op_creation_failures)
def __repr__(self):
return ("WatchOptions(debug_ops=%r, node_name_regex_whitelist=%r, "
"op_type_regex_whitelist=%r, tensor_dtype_regex_whitelist=%r, "
"tolerate_debug_op_creation_failures=%r)" % (
self.debug_ops, self.node_name_regex_whitelist,
self.op_type_regex_whitelist, self.tensor_dtype_regex_whitelist,
self.tolerate_debug_op_creation_failures))
class NonInteractiveDebugWrapperSession(BaseDebugWrapperSession):
"""Base class for non-interactive (i.e., non-CLI) debug wrapper sessions."""
def __init__(self, sess, watch_fn=None, thread_name_filter=None):
"""Constructor of DumpingDebugWrapperSession.
Args:
sess: The TensorFlow `Session` object being wrapped.
watch_fn: (`Callable`) A Callable that maps the fetches and feeds of a
debugged `Session.run()` call to `WatchOptions.`
* Args:
* `fetches`: the fetches to the `Session.run()` call.
* `feeds`: the feeds to the `Session.run()` call.
* Returns:
(`tf_debug.WatchOptions`) An object containing debug options including
the debug ops to use, the node names, op types and/or tensor data
types to watch, etc. See the documentation of `tf_debug.WatchOptions`
for more details.
thread_name_filter: Regular-expression white list for threads on which the
wrapper session will be active. See doc of `BaseDebugWrapperSession` for
more details.
Raises:
TypeError: If a non-None `watch_fn` is specified and it is not callable.
"""
BaseDebugWrapperSession.__init__(
self, sess, thread_name_filter=thread_name_filter)
self._watch_fn = None
if watch_fn is not None:
if not callable(watch_fn):
raise TypeError("watch_fn is not callable")
self._watch_fn = watch_fn
def on_session_init(self, request):
"""See doc of BaseDebugWrapperSession.on_run_start."""
return OnSessionInitResponse(OnSessionInitAction.PROCEED)
@abc.abstractmethod
def prepare_run_debug_urls(self, fetches, feed_dict):
"""Abstract method to be implemented by concrete subclasses.
This method prepares the run-specific debug URL(s).
Args:
fetches: Same as the `fetches` argument to `Session.run()`
feed_dict: Same as the `feed_dict` argument to `Session.run()`
Returns:
debug_urls: (`str` or `list` of `str`) Debug URLs to be used in
this `Session.run()` call.
"""
def on_run_start(self, request):
"""See doc of BaseDebugWrapperSession.on_run_start."""
debug_urls, watch_opts = self._prepare_run_watch_config(
request.fetches, request.feed_dict)
return OnRunStartResponse(
OnRunStartAction.DEBUG_RUN,
debug_urls,
debug_ops=watch_opts.debug_ops,
node_name_regex_whitelist=watch_opts.node_name_regex_whitelist,
op_type_regex_whitelist=watch_opts.op_type_regex_whitelist,
tensor_dtype_regex_whitelist=watch_opts.tensor_dtype_regex_whitelist,
tolerate_debug_op_creation_failures=(
watch_opts.tolerate_debug_op_creation_failures))
def _prepare_run_watch_config(self, fetches, feed_dict):
"""Get the debug_urls, and node/op whitelists for the current run() call.
Args:
fetches: Same as the `fetches` argument to `Session.run()`.
feed_dict: Same as the `feed_dict argument` to `Session.run()`.
Returns:
debug_urls: (str or list of str) Debug URLs for the current run() call.
Currently, the list consists of only one URL that is a file:// URL.
watch_options: (WatchOptions) The return value of a watch_fn, containing
options including debug_ops, and whitelists.
"""
debug_urls = self.prepare_run_debug_urls(fetches, feed_dict)
if self._watch_fn is None:
watch_options = WatchOptions()
else:
watch_options = self._watch_fn(fetches, feed_dict)
if isinstance(watch_options, tuple):
# For legacy return type (tuples).
watch_options = WatchOptions(*watch_options)
return debug_urls, watch_options
def on_run_end(self, request):
"""See doc of BaseDebugWrapperSession.on_run_end."""
return OnRunEndResponse()
def invoke_node_stepper(self,
node_stepper,
restore_variable_values_on_exit=True):
"""See doc of BaseDebugWrapperSession.invoke_node_stepper."""
raise NotImplementedError(
"NonInteractiveDebugWrapperSession does not support node-stepper mode.")
|
|
# 3p
from nose.plugins.attrib import attr
# project
from checks import AgentCheck
from tests.checks.common import AgentCheckTest
@attr(requires='postgres')
class TestPostgres(AgentCheckTest):
CHECK_NAME = 'postgres'
def test_checks(self):
host = 'localhost'
port = 15432
dbname = 'datadog_test'
instances = [
{
'host': host,
'port': port,
'username': 'datadog',
'password': 'datadog',
'dbname': dbname,
'relations': ['persons'],
'custom_metrics': [{
'descriptors': [('datname', 'customdb')],
'metrics': {
'numbackends': ['custom.numbackends', 'Gauge'],
},
'query': "SELECT datname, %s FROM pg_stat_database WHERE datname = 'datadog_test' LIMIT(1)",
'relation': False,
}]
},
{
'host': host,
'port': port,
'username': 'datadog',
'password': 'datadog',
'dbname': 'dogs',
'relations': ['breed', 'kennel']
}
]
self.run_check_twice(dict(instances=instances))
# Useful to get server version
# FIXME: Not great, should have a function like that available
key = (host, port, dbname)
db = self.check.dbs[key]
# Testing DB_METRICS scope
COMMON_METRICS = [
'postgresql.connections',
'postgresql.commits',
'postgresql.rollbacks',
'postgresql.disk_read',
'postgresql.buffer_hit',
'postgresql.rows_returned',
'postgresql.rows_fetched',
'postgresql.rows_inserted',
'postgresql.rows_updated',
'postgresql.rows_deleted',
'postgresql.database_size',
]
for mname in COMMON_METRICS:
for db in ('datadog_test', 'dogs'):
self.assertMetric(mname, count=1, tags=['db:%s' % db])
NEWER_92_METRICS = [
'postgresql.deadlocks',
'postgresql.temp_bytes',
'postgresql.temp_files',
]
if self.check._is_9_2_or_above(key, db):
for mname in NEWER_92_METRICS:
for db in ('datadog_test', 'dogs'):
self.assertMetric(mname, count=1, tags=['db:%s' % db])
# Testing BGW_METRICS scope
COMMON_BGW_METRICS = [
'postgresql.bgwriter.checkpoints_timed',
'postgresql.bgwriter.checkpoints_requested',
'postgresql.bgwriter.buffers_checkpoint',
'postgresql.bgwriter.buffers_clean',
'postgresql.bgwriter.maxwritten_clean',
'postgresql.bgwriter.buffers_backend',
'postgresql.bgwriter.buffers_alloc',
]
for mname in COMMON_BGW_METRICS:
self.assertMetric(mname, count=1)
NEWER_91_BGW_METRICS = [
'postgresql.bgwriter.buffers_backend_fsync',
]
if self.check._is_9_1_or_above(key, db):
for mname in NEWER_91_BGW_METRICS:
self.assertMetric(mname, count=1)
NEWER_92_BGW_METRICS = [
'postgresql.bgwriter.write_time',
'postgresql.bgwriter.sync_time',
]
if self.check._is_9_2_or_above(key, db):
for mname in NEWER_92_BGW_METRICS:
self.assertMetric(mname, count=1)
# FIXME: Test postgresql.locks
# Relation specific metrics
RELATION_METRICS = [
'postgresql.seq_scans',
'postgresql.seq_rows_read',
'postgresql.index_scans',
'postgresql.index_rows_fetched',
'postgresql.rows_inserted',
'postgresql.rows_updated',
'postgresql.rows_deleted',
'postgresql.rows_hot_updated',
'postgresql.live_rows',
'postgresql.dead_rows',
]
SIZE_METRICS = [
'postgresql.table_size',
'postgresql.index_size',
'postgresql.total_size',
]
STATIO_METRICS = [
'postgresql.heap_blocks_read',
'postgresql.heap_blocks_hit',
'postgresql.index_blocks_read',
'postgresql.index_blocks_hit',
'postgresql.toast_blocks_read',
'postgresql.toast_blocks_hit',
'postgresql.toast_index_blocks_read',
'postgresql.toast_index_blocks_hit',
]
for inst in instances:
for rel in inst.get('relations', []):
expected_tags = ['db:%s' % inst['dbname'], 'table:%s' % rel]
for mname in RELATION_METRICS:
count = 1
# We only build a test index and stimulate it on breed
# in the dogs DB, so the other index metrics shouldn't be
# here.
if 'index' in mname and rel != 'breed':
count = 0
self.assertMetric(mname, count=count, tags=expected_tags)
for mname in SIZE_METRICS:
self.assertMetric(mname, count=1, tags=expected_tags)
for mname in STATIO_METRICS:
at_least = None
count = 1
if '.index' in mname and rel != 'breed':
count = 0
# FIXME: toast are not reliable, need to do some more setup
# to get some values here I guess
if 'toast' in mname:
at_least = 0 # how to set easily a flaky metric, w/o impacting coverage
count = None
self.assertMetric(mname, count=count, at_least=at_least, tags=expected_tags)
# Index metrics
IDX_METRICS = [
'postgresql.index_scans',
'postgresql.index_rows_read',
'postgresql.index_rows_fetched',
]
# we have a single index defined!
expected_tags = ['db:dogs', 'table:breed', 'index:breed_names']
for mname in IDX_METRICS:
self.assertMetric(mname, count=1, tags=expected_tags)
# instance connection metrics
CONNECTION_METRICS = [
'postgresql.max_connections',
'postgresql.percent_usage_connections',
]
for mname in CONNECTION_METRICS:
self.assertMetric(mname, count=1)
# db level connections
for inst in instances:
expected_tags = ['db:%s' % inst['dbname']]
self.assertMetric('postgresql.connections', count=1, tags=expected_tags)
# By schema metrics
self.assertMetric('postgresql.table.count', value=2, count=1, tags=['schema:public'])
self.assertMetric('postgresql.db.count', value=2, count=1)
# Our custom metric
self.assertMetric('custom.numbackends', value=1, tags=['customdb:datadog_test'])
# Test service checks
self.assertServiceCheck('postgres.can_connect',
count=1, status=AgentCheck.OK,
tags=['host:localhost', 'port:15432', 'db:datadog_test']
)
self.assertServiceCheck('postgres.can_connect',
count=1, status=AgentCheck.OK,
tags=['host:localhost', 'port:15432', 'db:dogs']
)
# Assert service metadata
self.assertServiceMetadata(['version'], count=2)
self.coverage_report()
|
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from os.path import basename, join, isdir, isfile, exists
from shutil import copyfile, rmtree
from os import remove, listdir, makedirs
from datetime import date, timedelta
from urllib.parse import quote
from itertools import zip_longest
from xml.etree import ElementTree as ET
from xml.etree.ElementTree import ParseError
from xml.sax.saxutils import escape
from gzip import GzipFile
from functools import partial
from h5py import File
from skbio.util import safe_md5
from qiita_files.demux import to_per_sample_ascii
from qiita_core.qiita_settings import qiita_config
from qiita_ware.exceptions import EBISubmissionError
from qiita_db.util import create_nested_path
from qiita_db.logger import LogEntry
from qiita_db.ontology import Ontology
from qiita_db.util import convert_to_id, get_mountpoint, open_file
from qiita_db.artifact import Artifact
from qiita_db.metadata_template.constants import (
TARGET_GENE_DATA_TYPES, PREP_TEMPLATE_COLUMNS_TARGET_GENE)
from qiita_db.processing_job import _system_call as system_call
def clean_whitespace(text):
"""Standardizes whitespaces so there is only one space separating tokens
Parameters
----------
text : str
The fixed text
Returns
-------
str
fixed text
"""
return ' '.join(str(text).split())
class EBISubmission(object):
"""Define an EBI submission, generate submission files and submit
Submit an artifact to EBI
The steps for EBI submission are:
1. Validate that we have all required info to submit
2. Generate per sample demultiplexed files
3. Generate XML files for submission
4. Submit sequences files
5. Submit XML files. The answer has the EBI submission numbers.
Parameters
----------
artifact_id : int
The artifact id to submit
action : str
The action to perform. Valid options see
EBISubmission.valid_ebi_actions
Raises
------
EBISubmissionError
- If the action is not in EBISubmission.valid_ebi_actions
- If the artifact cannot be submitted to EBI
- If the artifact has been already submitted to EBI and the action
is different from 'MODIFY'
- If the status of the study attached to the artifact is `submitting`
- If the prep template investigation type is not in the
ena_ontology.terms or not in the ena_ontology.user_defined_terms
- If the submission is missing required EBI fields either in the sample
or prep template
- If the sample preparation metadata doesn't have a platform field or
it isn't a EBISubmission.valid_platforms
"""
FWD_READ_SUFFIX = '.R1.fastq.gz'
REV_READ_SUFFIX = '.R2.fastq.gz'
valid_ebi_actions = ('ADD', 'VALIDATE', 'MODIFY')
valid_ebi_submission_states = ('submitting')
# valid_platforms dict of 'platform': ['valid_instrument_models']
valid_platforms = {'LS454': ['454 GS', '454 GS 20', '454 GS FLX',
'454 GS FLX+', '454 GS FLX TITANIUM',
'454 GS JUNIOR', 'UNSPECIFIED'],
'ION_TORRENT': ['ION TORRENT PGM', 'ION TORRENT PROTON',
'ION TORRENT S5', 'ION TORRENT S5 XL'],
'ILLUMINA': ['HISEQ X FIVE',
'HISEQ X TEN',
'ILLUMINA GENOME ANALYZER',
'ILLUMINA GENOME ANALYZER II',
'ILLUMINA GENOME ANALYZER IIX',
'ILLUMINA HISCANSQ',
'ILLUMINA HISEQ 1000',
'ILLUMINA HISEQ 1500',
'ILLUMINA HISEQ 2000',
'ILLUMINA HISEQ 2500',
'ILLUMINA HISEQ 3000',
'ILLUMINA HISEQ 4000',
'ILLUMINA MISEQ',
'ILLUMINA MINISEQ',
'ILLUMINA NOVASEQ 6000',
'NEXTSEQ 500',
'NEXTSEQ 550',
'UNSPECIFIED'],
'OXFORD_NANOPORE': ['GRIDION'],
'PACBIO_SMRT': ['PACBIO RS',
'PACBIO RS II',
'SEQUEL',
'SEQUEL II']}
xmlns_xsi = "http://www.w3.org/2001/XMLSchema-instance"
xsi_noNSL = "ftp://ftp.sra.ebi.ac.uk/meta/xsd/sra_1_3/SRA.%s.xsd"
def __init__(self, artifact_id, action):
error_msgs = []
if action not in self.valid_ebi_actions:
error_msg = ("%s is not a valid EBI submission action, valid "
"actions are: %s" %
(action, ', '.join(self.valid_ebi_actions)))
LogEntry.create('Runtime', error_msg)
raise EBISubmissionError(error_msg)
ena_ontology = Ontology(convert_to_id('ENA', 'ontology'))
self.action = action
self.artifact = Artifact(artifact_id)
if not self.artifact.can_be_submitted_to_ebi:
error_msg = ("Artifact %d cannot be submitted to EBI"
% self.artifact.id)
LogEntry.create('Runtime', error_msg)
raise EBISubmissionError(error_msg)
self.study = self.artifact.study
self.sample_template = self.study.sample_template
# If we reach this point, there should be only one prep template
# attached to the artifact. By design, each artifact has at least one
# prep template. Artifacts with more than one prep template cannot be
# submitted to EBI, so the attribute 'can_be_submitted_to_ebi' should
# be set to false, which is checked in the previous if statement
self.prep_template = self.artifact.prep_templates[0]
if self.artifact.is_submitted_to_ebi and action != 'MODIFY':
error_msg = ("Cannot resubmit! Artifact %d has already "
"been submitted to EBI." % artifact_id)
LogEntry.create('Runtime', error_msg)
raise EBISubmissionError(error_msg)
self.artifact_id = artifact_id
self.study_title = self.study.title
self.study_abstract = self.study.info['study_abstract']
it = self.prep_template.investigation_type
if it in ena_ontology.terms:
self.investigation_type = it
self.new_investigation_type = None
elif it in ena_ontology.user_defined_terms:
self.investigation_type = 'Other'
self.new_investigation_type = it
else:
# This should never happen
error_msgs.append("Unrecognized investigation type: '%s'. This "
"term is neither one of the official terms nor "
"one of the user-defined terms in the ENA "
"ontology." % it)
_, base_fp = get_mountpoint("preprocessed_data")[0]
self.ebi_dir = '%d_ebi_submission' % artifact_id
self.full_ebi_dir = join(base_fp, self.ebi_dir)
self.ascp_reply = join(self.full_ebi_dir, 'ascp_reply.txt')
self.curl_reply = join(self.full_ebi_dir, 'curl_reply.xml')
self.xml_dir = join(self.full_ebi_dir, 'xml_dir')
self.study_xml_fp = None
self.sample_xml_fp = None
self.experiment_xml_fp = None
self.run_xml_fp = None
self.submission_xml_fp = None
self.per_sample_FASTQ_reverse = False
self.publications = self.study.publications
# getting the restrictions
st_restrictions = [self.sample_template.columns_restrictions['EBI']]
pt_restrictions = [self.prep_template.columns_restrictions['EBI']]
if self.artifact.data_type in TARGET_GENE_DATA_TYPES:
# adding restictions on primer and barcode as these are
# conditionally requiered for target gene
pt_restrictions.append(
PREP_TEMPLATE_COLUMNS_TARGET_GENE['demultiplex'])
st_missing = self.sample_template.check_restrictions(st_restrictions)
pt_missing = self.prep_template.check_restrictions(pt_restrictions)
# testing if there are any missing columns
if st_missing:
error_msgs.append("Missing column in the sample template: %s" %
', '.join(list(st_missing)))
if pt_missing:
error_msgs.append("Missing column in the prep template: %s" %
', '.join(list(pt_missing)))
# generating all samples from sample template
self.samples = {}
self.samples_prep = {}
self.sample_demux_fps = {}
get_output_fp = partial(join, self.full_ebi_dir)
nvp = []
nvim = []
for k, sample_prep in self.prep_template.items():
# validating required fields
if ('platform' not in sample_prep or
sample_prep['platform'] is None):
nvp.append(k)
else:
platform = sample_prep['platform'].upper()
if platform not in self.valid_platforms:
nvp.append(k)
else:
if ('instrument_model' not in sample_prep or
sample_prep['instrument_model'] is None):
nvim.append(k)
else:
im = sample_prep['instrument_model'].upper()
if im not in self.valid_platforms[platform]:
nvim.append(k)
# IMPORTANT: note that we are generating the samples we are going
# to be using during submission and they come from the sample info
# file, however, we are only retrieving the samples that exist in
# the prep AKA not all samples
self.samples[k] = self.sample_template.get(sample_prep.id)
self.samples_prep[k] = sample_prep
self.sample_demux_fps[k] = get_output_fp(k)
if nvp:
error_msgs.append("These samples do not have a valid platform "
"(instrumet model wasn't checked): %s" % (
', '.join(nvp)))
if nvim:
error_msgs.append("These samples do not have a valid instrument "
"model: %s" % (', '.join(nvim)))
if error_msgs:
error_msgs = ("Errors found during EBI submission for study #%d, "
"artifact #%d and prep template #%d:\n%s"
% (self.study.id, artifact_id,
self.prep_template.id, '\n'.join(error_msgs)))
LogEntry.create('Runtime', error_msgs)
raise EBISubmissionError(error_msgs)
self._sample_aliases = {}
self._experiment_aliases = {}
self._run_aliases = {}
self._ebi_sample_accessions = \
self.sample_template.ebi_sample_accessions
self._ebi_experiment_accessions = \
self.prep_template.ebi_experiment_accessions
def _get_study_alias(self):
"""Format alias using ``self.study_id``"""
study_alias_format = '%s_sid_%s'
return study_alias_format % (
qiita_config.ebi_organization_prefix,
escape(clean_whitespace(str(self.study.id))))
def _get_sample_alias(self, sample_name):
"""Format alias using ``self.study_id``, `sample_name`"""
alias = "%s:%s" % (self._get_study_alias(),
escape(clean_whitespace(str(sample_name))))
self._sample_aliases[alias] = sample_name
return alias
def _get_experiment_alias(self, sample_name):
"""Format alias using ``self.prep_template.id``, and `sample_name`
Currently, this is identical to _get_sample_alias above, since we are
only going to allow submission of one prep for each sample
"""
exp_alias_format = '%s_ptid_%s:%s'
alias = exp_alias_format % (
qiita_config.ebi_organization_prefix,
escape(clean_whitespace(str(self.prep_template.id))),
escape(clean_whitespace(str(sample_name))))
self._experiment_aliases[alias] = sample_name
return alias
def _get_submission_alias(self):
"""Format alias using ``self.artifact_id``"""
safe_artifact_id = escape(
clean_whitespace(str(self.artifact_id)))
submission_alias_format = '%s_submission_%s'
return submission_alias_format % (qiita_config.ebi_organization_prefix,
safe_artifact_id)
def _get_run_alias(self, sample_name):
"""Format alias using `sample_name`
"""
alias = '%s_ppdid_%s:%s' % (
qiita_config.ebi_organization_prefix,
escape(clean_whitespace(str(self.artifact_id))),
sample_name)
self._run_aliases[alias] = sample_name
return alias
def _get_library_name(self, sample_name):
"""Format alias using `sample_name`
"""
return escape(clean_whitespace(sample_name))
def _add_dict_as_tags_and_values(self, parent_node, attribute_element_name,
data_dict):
"""Format key/value data using a common EBI XML motif"""
for attr, val in sorted(data_dict.items()):
if val is None:
val = "Unknown"
attribute_element = ET.SubElement(parent_node,
attribute_element_name)
tag = ET.SubElement(attribute_element, 'TAG')
tag.text = clean_whitespace(attr)
value = ET.SubElement(attribute_element, 'VALUE')
value.text = clean_whitespace(val)
def _get_publication_element(self, study_links, pmid, db_name):
study_link = ET.SubElement(study_links, 'STUDY_LINK')
xref_link = ET.SubElement(study_link, 'XREF_LINK')
db = ET.SubElement(xref_link, 'DB')
db.text = db_name
_id = ET.SubElement(xref_link, 'ID')
_id.text = str(pmid)
def generate_study_xml(self):
"""Generates the string for study XML file
Returns
-------
ET.Element
Object with study XML values
"""
study_set = ET.Element('STUDY_SET', {
'xmlns:xsi': self.xmlns_xsi,
'xsi:noNamespaceSchemaLocation': self.xsi_noNSL % "study"})
study = ET.SubElement(study_set, 'STUDY', {
'alias': self._get_study_alias(),
'center_name': qiita_config.ebi_center_name}
)
descriptor = ET.SubElement(study, 'DESCRIPTOR')
study_title = ET.SubElement(descriptor, 'STUDY_TITLE')
study_title.text = escape(clean_whitespace(self.study_title))
# study type is deprecated and not displayed anywhere on EBI-ENA;
# however it's required for submission so just injecting with Other
ET.SubElement(
descriptor, 'STUDY_TYPE', {'existing_study_type': 'Other'})
study_abstract = ET.SubElement(descriptor, 'STUDY_ABSTRACT')
study_abstract.text = clean_whitespace(escape(self.study_abstract))
# Add pubmed IDs
if self.publications:
study_links = ET.SubElement(study, 'STUDY_LINKS')
for pub, is_doi in self.publications:
if is_doi:
self._get_publication_element(study_links, pub, 'DOI')
else:
self._get_publication_element(study_links, pub, 'PUBMED')
return study_set
def generate_sample_xml(self, samples=None, ignore_columns=None):
"""Generates the sample XML file
Parameters
----------
samples : list of str, optional
The list of samples to be included in the sample xml. If not
provided or an empty list is provided, all the samples are used
ignore_columns : list of str, optional
The list of columns to ignore during submission; helpful for when
the submissions are too large
Returns
-------
ET.Element
Object with sample XML values
"""
sample_set = ET.Element('SAMPLE_SET', {
'xmlns:xsi': self.xmlns_xsi,
"xsi:noNamespaceSchemaLocation": self.xsi_noNSL % "sample"})
if not samples:
samples = self.samples.keys()
for sample_name in sorted(samples):
sample_info = dict(self.samples[sample_name])
sample_accession = self._ebi_sample_accessions[sample_name]
if self.action in ('ADD', 'VALIDATE'):
if sample_accession is not None:
continue
else:
sample = ET.SubElement(sample_set, 'SAMPLE', {
'alias': self._get_sample_alias(sample_name),
'center_name': qiita_config.ebi_center_name}
)
else:
sample = ET.SubElement(sample_set, 'SAMPLE', {
'accession': sample_accession,
'center_name': qiita_config.ebi_center_name}
)
sample_title = ET.SubElement(sample, 'TITLE')
sample_title.text = escape(clean_whitespace(sample_name))
sample_sample_name = ET.SubElement(sample, 'SAMPLE_NAME')
taxon_id = ET.SubElement(sample_sample_name, 'TAXON_ID')
text = sample_info.pop('taxon_id')
taxon_id.text = escape(clean_whitespace(text))
scientific_name = ET.SubElement(
sample_sample_name, 'SCIENTIFIC_NAME')
text = sample_info.pop('scientific_name')
scientific_name.text = escape(clean_whitespace(text))
description = ET.SubElement(sample, 'DESCRIPTION')
text = sample_info.pop('description')
description.text = escape(clean_whitespace(text))
if sample_info:
if ignore_columns is not None:
for key in ignore_columns:
del sample_info[key]
sample_attributes = ET.SubElement(sample, 'SAMPLE_ATTRIBUTES')
self._add_dict_as_tags_and_values(sample_attributes,
'SAMPLE_ATTRIBUTE',
sample_info)
return sample_set
def _generate_spot_descriptor(self, design, platform):
"""This XML element (and its subelements) must be written for every
sample, but its generation depends on only study-level information.
Therefore, we can break it out into its own method.
"""
# This section applies only to the LS454 platform
if platform != 'LS454':
return
# There is some hard-coded information in here, but this is what we
# have always done in the past...
spot_descriptor = ET.SubElement(design, 'SPOT_DESCRIPTOR')
ET.SubElement(spot_descriptor, 'SPOT_DECODE_SPEC')
read_spec = ET.SubElement(spot_descriptor, 'READ_SPEC')
read_index = ET.SubElement(read_spec, 'READ_INDEX')
read_index.text = '0'
read_class = ET.SubElement(read_spec, 'READ_CLASS')
read_class.text = 'Application Read'
read_type = ET.SubElement(read_spec, 'READ_TYPE')
read_type.text = 'Forward'
base_coord = ET.SubElement(read_spec, 'BASE_COORD')
base_coord.text = '1'
def generate_experiment_xml(self, samples=None):
"""Generates the experiment XML file
Parameters
----------
samples : list of str, optional
The list of samples to be included in the experiment xml
Returns
-------
ET.Element
Object with experiment XML values
"""
study_accession = self.study.ebi_study_accession
if study_accession:
study_ref_dict = {'accession': study_accession}
else:
study_ref_dict = {'refname': self._get_study_alias()}
experiment_set = ET.Element('EXPERIMENT_SET', {
'xmlns:xsi': self.xmlns_xsi,
"xsi:noNamespaceSchemaLocation": self.xsi_noNSL % "experiment"})
samples = samples if samples is not None else self.samples.keys()
if self.investigation_type == 'Other':
library_strategy = self.new_investigation_type
else:
library_strategy = self.investigation_type
for sample_name in sorted(samples):
experiment_alias = self._get_experiment_alias(sample_name)
sample_prep = dict(self.samples_prep[sample_name])
if self._ebi_sample_accessions[sample_name]:
sample_descriptor_dict = {
'accession': self._ebi_sample_accessions[sample_name]}
else:
sample_descriptor_dict = {
'refname': self._get_sample_alias(sample_name)}
platform = sample_prep.pop('platform')
experiment = ET.SubElement(experiment_set, 'EXPERIMENT', {
'alias': experiment_alias,
'center_name': qiita_config.ebi_center_name}
)
title = ET.SubElement(experiment, 'TITLE')
title.text = experiment_alias
ET.SubElement(experiment, 'STUDY_REF', study_ref_dict)
design = ET.SubElement(experiment, 'DESIGN')
design_description = ET.SubElement(design,
'DESIGN_DESCRIPTION')
edd = sample_prep.pop('experiment_design_description')
design_description.text = escape(clean_whitespace(edd))
ET.SubElement(design, 'SAMPLE_DESCRIPTOR', sample_descriptor_dict)
# this is the library contruction section. The only required fields
# is library_construction_protocol, the other are optional
library_descriptor = ET.SubElement(design, 'LIBRARY_DESCRIPTOR')
library_name = ET.SubElement(library_descriptor, 'LIBRARY_NAME')
library_name.text = self._get_library_name(sample_name)
lg = ET.SubElement(library_descriptor, 'LIBRARY_STRATEGY')
lg.text = escape(clean_whitespace(library_strategy.upper()))
# hardcoding some values,
# see https://github.com/biocore/qiita/issues/1485
library_source = ET.SubElement(library_descriptor,
"LIBRARY_SOURCE")
library_source.text = "METAGENOMIC"
library_selection = ET.SubElement(library_descriptor,
"LIBRARY_SELECTION")
library_selection.text = "PCR"
library_layout = ET.SubElement(library_descriptor,
"LIBRARY_LAYOUT")
if self.per_sample_FASTQ_reverse:
ET.SubElement(library_layout, "PAIRED")
else:
ET.SubElement(library_layout, "SINGLE")
lcp = ET.SubElement(library_descriptor,
"LIBRARY_CONSTRUCTION_PROTOCOL")
lcp.text = escape(clean_whitespace(
sample_prep.pop('library_construction_protocol')))
self._generate_spot_descriptor(design, platform)
platform_element = ET.SubElement(experiment, 'PLATFORM')
platform_info = ET.SubElement(platform_element,
platform.upper())
instrument_model = ET.SubElement(platform_info, 'INSTRUMENT_MODEL')
instrument_model.text = sample_prep.pop('instrument_model')
if sample_prep:
experiment_attributes = ET.SubElement(
experiment, 'EXPERIMENT_ATTRIBUTES')
self._add_dict_as_tags_and_values(experiment_attributes,
'EXPERIMENT_ATTRIBUTE',
sample_prep)
return experiment_set
def _add_file_subelement(self, add_file, file_type, sample_name,
is_forward):
"""generate_run_xml helper to avoid duplication of code
"""
if is_forward:
suffix = self.FWD_READ_SUFFIX
else:
suffix = self.REV_READ_SUFFIX
file_path = self.sample_demux_fps[sample_name] + suffix
with open(file_path, 'rb') as fp:
md5 = safe_md5(fp).hexdigest()
file_details = {'filetype': file_type,
'quality_scoring_system': 'phred',
'checksum_method': 'MD5',
'checksum': md5,
'filename': join(self.ebi_dir, basename(file_path))}
add_file(file_details)
def generate_run_xml(self):
"""Generates the run XML file
Returns
-------
ET.Element
Object with run XML values
"""
run_set = ET.Element('RUN_SET', {
'xmlns:xsi': self.xmlns_xsi,
"xsi:noNamespaceSchemaLocation": self.xsi_noNSL % "run"})
for sample_name, sample_prep in sorted(self.samples_prep.items()):
sample_prep = dict(sample_prep)
if self._ebi_experiment_accessions[sample_name]:
experiment_ref_dict = {
'accession': self._ebi_experiment_accessions[sample_name]}
else:
experiment_alias = self._get_experiment_alias(sample_name)
experiment_ref_dict = {'refname': experiment_alias}
# We only submit fastq
file_type = 'fastq'
run = ET.SubElement(run_set, 'RUN', {
'alias': self._get_run_alias(sample_name),
'center_name': qiita_config.ebi_center_name}
)
ET.SubElement(run, 'EXPERIMENT_REF', experiment_ref_dict)
data_block = ET.SubElement(run, 'DATA_BLOCK')
files = ET.SubElement(data_block, 'FILES')
add_file = partial(ET.SubElement, files, 'FILE')
add_file_subelement = partial(self._add_file_subelement, add_file,
file_type, sample_name)
add_file_subelement(is_forward=True)
if self.per_sample_FASTQ_reverse:
add_file_subelement(is_forward=False)
return run_set
def generate_submission_xml(self, submission_date=None):
"""Generates the submission XML file
Parameters
----------
submission_date : date, optional
Date when the submission was created, when None date.today() will
be used.
Returns
-------
ET.Element
Object with submission XML values
Notes
-----
EBI requieres a date when the submission will be automatically made
public. This date is generated from the submission date + 365 days.
"""
submission_set = ET.Element('SUBMISSION_SET', {
'xmlns:xsi': self.xmlns_xsi,
"xsi:noNamespaceSchemaLocation": self.xsi_noNSL % "submission"})
submission = ET.SubElement(submission_set, 'SUBMISSION', {
'alias': self._get_submission_alias(),
'center_name': qiita_config.ebi_center_name}
)
actions = ET.SubElement(submission, 'ACTIONS')
if self.study_xml_fp:
study_action = ET.SubElement(actions, 'ACTION')
ET.SubElement(study_action, self.action, {
'schema': 'study',
'source': basename(self.study_xml_fp)}
)
if self.sample_xml_fp:
sample_action = ET.SubElement(actions, 'ACTION')
ET.SubElement(sample_action, self.action, {
'schema': 'sample',
'source': basename(self.sample_xml_fp)}
)
if self.experiment_xml_fp:
experiment_action = ET.SubElement(actions, 'ACTION')
ET.SubElement(experiment_action, self.action, {
'schema': 'experiment',
'source': basename(self.experiment_xml_fp)}
)
if self.run_xml_fp:
run_action = ET.SubElement(actions, 'ACTION')
ET.SubElement(run_action, self.action, {
'schema': 'run', 'source': basename(self.run_xml_fp)}
)
if submission_date is None:
submission_date = date.today()
if self.action == 'ADD':
hold_action = ET.SubElement(actions, 'ACTION')
ET.SubElement(hold_action, 'HOLD', {
'HoldUntilDate': str(submission_date + timedelta(365))}
)
return submission_set
def write_xml_file(self, element, fp):
"""Writes an XML file after calling one of the XML generation
functions
Parameters
----------
element : ET.Element
The Element to be written
fp : str
The filepath to which the XML will be written
"""
if not exists(self.xml_dir):
makedirs(self.xml_dir)
ET.ElementTree(element).write(
fp, encoding='UTF-8', xml_declaration=True)
def generate_xml_files(self):
"""Generate all the XML files"""
get_output_fp = partial(join, self.xml_dir)
# There are really only 2 main cases for EBI submission: ADD and
# MODIFY and the only exception is in MODIFY
if self.action != 'MODIFY':
# The study.xml file needs to be generated if and only if the study
# does NOT have an ebi_study_accession
if not self.study.ebi_study_accession:
self.study_xml_fp = get_output_fp('study.xml')
self.write_xml_file(self.generate_study_xml(),
self.study_xml_fp)
# The sample.xml file needs to be generated if and only if there
# are samples in the current submission that do NOT have an
# ebi_sample_accession
new_samples = {
sample for sample, accession in
self.sample_template.ebi_sample_accessions.items()
if accession is None}
new_samples = new_samples.intersection(self.samples)
if new_samples:
self.sample_xml_fp = get_output_fp('sample.xml')
self.write_xml_file(self.generate_sample_xml(new_samples),
self.sample_xml_fp)
# The experiment.xml needs to be generated if and only if there are
# samples in the current submission that do NO have an
# ebi_experiment_accession
new_samples = {
sample for sample, accession in
self.prep_template.ebi_experiment_accessions.items()
if accession is None}
new_samples = new_samples.intersection(self.samples)
if new_samples:
self.experiment_xml_fp = get_output_fp('experiment.xml')
self.write_xml_file(self.generate_experiment_xml(new_samples),
self.experiment_xml_fp)
# Generate the run.xml as it should always be generated
self.run_xml_fp = get_output_fp('run.xml')
self.write_xml_file(self.generate_run_xml(), self.run_xml_fp)
self.submission_xml_fp = get_output_fp('submission.xml')
else:
# When MODIFY we can only modify the sample (sample.xml) and prep
# (experiment.xml) template. The easiest is to generate both and
# submit them. Note that we are assuming that Qiita is not
# allowing to change preprocessing required information
all_samples = self.sample_template.ebi_sample_accessions
samples = {k: all_samples[k] for k in self.samples}
# finding unique name for sample xml
i = 0
while True:
self.sample_xml_fp = get_output_fp('sample_%d.xml' % i)
if not exists(self.sample_xml_fp):
break
i = i + 1
self.write_xml_file(self.generate_sample_xml(samples),
self.sample_xml_fp)
# finding unique name for experiment xml
i = 0
while True:
self.experiment_xml_fp = get_output_fp('experiment_%d.xml' % i)
if not exists(self.experiment_xml_fp):
break
i = i + 1
self.write_xml_file(self.generate_experiment_xml(samples),
self.experiment_xml_fp)
# finding unique name for run xml
i = 0
while True:
self.submission_xml_fp = get_output_fp('submission_%d.xml' % i)
if not exists(self.submission_xml_fp):
break
i = i + 1
# just to keep all curl_reply-s we find a new name
i = 0
while True:
self.curl_reply = join(self.full_ebi_dir,
'curl_reply_%d.xml' % i)
if not exists(self.curl_reply):
break
i = i + 1
# The submission.xml is always generated
self.write_xml_file(self.generate_submission_xml(),
self.submission_xml_fp)
def generate_curl_command(
self,
ebi_seq_xfer_user=qiita_config.ebi_seq_xfer_user,
ebi_seq_xfer_pass=qiita_config.ebi_seq_xfer_pass,
ebi_dropbox_url=qiita_config.ebi_dropbox_url):
"""Generates the curl command for submission
Parameters
----------
ebi_seq_xfer_user : str
The user to use when submitting to EBI
ebi_seq_xfer_pass : str
The user password issued by EBI for REST submissions
ebi_dropbox_url : str
The dropbox url
Returns
-------
curl_command
The curl string to be executed
Notes
-----
- All 5 XML files (study, sample, experiment, run, and submission) must
be generated before executing this function
"""
# make sure that the XML files have been generated
url = '?auth=ENA%20{0}%20{1}'.format(quote(ebi_seq_xfer_user),
quote(ebi_seq_xfer_pass))
curl_cmd = ['curl -sS -k']
if self.submission_xml_fp is not None:
curl_cmd.append(' -F "SUBMISSION=@%s"' % self.submission_xml_fp)
if self.study_xml_fp is not None:
curl_cmd.append(' -F "STUDY=@%s"' % self.study_xml_fp)
if self.sample_xml_fp is not None:
curl_cmd.append(' -F "SAMPLE=@%s"' % self.sample_xml_fp)
if self.run_xml_fp is not None:
curl_cmd.append(' -F "RUN=@%s"' % self.run_xml_fp)
if self.experiment_xml_fp is not None:
curl_cmd.append(' -F "EXPERIMENT=@%s"' % self.experiment_xml_fp)
curl_cmd.append(' "%s"' % join(ebi_dropbox_url, url))
return ''.join(curl_cmd)
def generate_send_sequences_cmd(self):
"""Generate the sequences to EBI via ascp command
Returns
-------
ascp_command
The ascp command to be executed
Notes
-----
- All 5 XML files (study, sample, experiment, run, and submission) must
be generated before executing this function
"""
fastqs = []
for _, sfp in self.sample_demux_fps.items():
fastqs.append(sfp + self.FWD_READ_SUFFIX)
if self.per_sample_FASTQ_reverse:
sfp = sfp + self.REV_READ_SUFFIX
fastqs.append(sfp)
# divide all the fastqs in groups of 10
fastqs_div = [fastqs[i::10] for i in range(10) if fastqs[i::10]]
ascp_commands = []
for f in fastqs_div:
ascp_commands.append('ascp --ignore-host-key -d -QT -k2 '
'{0} {1}@{2}:./{3}/'.format(
' '.join(f),
qiita_config.ebi_seq_xfer_user,
qiita_config.ebi_seq_xfer_url,
self.ebi_dir))
return ascp_commands
def parse_EBI_reply(self, curl_result, test=False):
"""Parse and verify reply from EBI after sending XML files
Parameters
----------
curl_result : str
The reply sent by EBI after sending XML files
test : bool
If true we will assume is a test and ignore some parsing errors
Returns
-------
str
The study accession number. None in case of failure
dict of {str: str}
The sample accession numbers, keyed by sample id. None in case of
failure
dict of {str: str}
The biosample accession numbers, keyed by sample id. None in case
of failure
dict of {str: str}
The experiment accession numbers, keyed by sample id. None in case
of failure
dict of {str: str}
The run accession numbers, keyed by sample id. None in case of
failure
Raises
------
EBISubmissionError
If curl_result is not a valid XML file
If the ebi subumission has not been successful
If multiple study tags are found in the curl result
"""
try:
root = ET.fromstring(curl_result)
except ParseError:
error_msg = ("The curl result from the EBI submission doesn't "
"look like an XML file:\n%s" % curl_result)
le = LogEntry.create('Runtime', error_msg)
raise EBISubmissionError(
"The curl result from the EBI submission doesn't look like "
"an XML file. Contact and admin for more information. "
"Log id: %d" % le.id)
success = root.get('success') == 'true'
if not success:
# here we want to parse out the errors so the failures are clearer
errors = {elem.text for elem in root.iter("ERROR")}
raise EBISubmissionError("The EBI submission failed:\n%s"
% '\n'.join(errors))
if test:
study_accession = 'MyStudyAccession'
sample_accessions = {}
biosample_accessions = {}
experiment_accessions = {}
run_accessions = {}
return (study_accession, sample_accessions, biosample_accessions,
experiment_accessions, run_accessions)
study_elem = root.findall("STUDY")
if study_elem:
if len(study_elem) > 1:
raise EBISubmissionError(
"Multiple study tags found in EBI reply: %d"
% len(study_elem))
study_elem = study_elem[0]
study_accession = study_elem.get('accession')
else:
study_accession = None
sample_accessions = {}
biosample_accessions = {}
for elem in root.iter("SAMPLE"):
alias = elem.get('alias')
sample_id = self._sample_aliases[alias]
sample_accessions[sample_id] = elem.get('accession')
ext_id = elem.find('EXT_ID')
biosample_accessions[sample_id] = ext_id.get('accession')
def data_retriever(key, trans_dict):
res = {}
for elem in root.iter(key):
alias = elem.get('alias')
res[trans_dict[alias]] = elem.get('accession')
return res
experiment_accessions = data_retriever("EXPERIMENT",
self._experiment_aliases)
run_accessions = data_retriever("RUN", self._run_aliases)
return (study_accession, sample_accessions, biosample_accessions,
experiment_accessions, run_accessions)
def _generate_demultiplexed_fastq_per_sample_FASTQ(self):
"""Modularity helper"""
# helper function to write files in this method
def _rename_file(fp, new_fp):
if fp.endswith('.gz'):
copyfile(fp, new_fp)
else:
cmd = "gzip -c %s > %s" % (fp, new_fp)
stdout, stderr, rv = system_call(cmd)
if rv != 0:
error_msg = (
"Error:\nStd output:%s\nStd error:%s"
% (stdout, stderr))
raise EBISubmissionError(error_msg)
fwd_reads = []
rev_reads = []
for x in self.artifact.filepaths:
if x['fp_type'] == 'raw_forward_seqs':
fwd_reads.append((basename(x['fp']), x['fp']))
elif x['fp_type'] == 'raw_reverse_seqs':
rev_reads.append((basename(x['fp']), x['fp']))
fwd_reads.sort(key=lambda x: x[1])
rev_reads.sort(key=lambda x: x[1])
if rev_reads:
self.per_sample_FASTQ_reverse = True
# merging forward and reverse into a single list, note that at this
# stage the files have passed multiple rounds of reviews: validator
# when the artifact was created, the summary generator, etc. so we can
# assure that if a rev exists for 1 fwd, there is one for all of them
fps = []
for f, r in zip_longest(fwd_reads, rev_reads):
sample_name = f[0]
fwd_read = f[1]
rev_read = r[1] if r is not None else None
fps.append((sample_name, (fwd_read, rev_read)))
if 'run_prefix' in self.prep_template.categories:
rps = [(k, v) for k, v in
self.prep_template.get_category('run_prefix').items()]
else:
rps = [(v, v.split('.', 1)[1]) for v in self.prep_template.keys()]
rps.sort(key=lambda x: x[1])
demux_samples = set()
for sn, rp in rps:
for i, (bn, fp) in enumerate(fps):
if bn.startswith(rp):
demux_samples.add(sn)
new_fp = self.sample_demux_fps[sn] + self.FWD_READ_SUFFIX
_rename_file(fp[0], new_fp)
if fp[1] is not None:
new_fp = self.sample_demux_fps[
sn] + self.REV_READ_SUFFIX
_rename_file(fp[1], new_fp)
del fps[i]
break
if fps:
error_msg = (
'Discrepancy between filepaths and sample names. Extra'
' filepaths: %s' % ', '.join([fp[0] for fp in fps]))
LogEntry.create('Runtime', error_msg)
raise EBISubmissionError(error_msg)
return demux_samples, \
set(self.samples.keys()).difference(set(demux_samples))
def _generate_demultiplexed_fastq_demux(self, mtime):
"""Modularity helper"""
# An artifact will hold only one file of type
# `preprocessed_demux`. Thus, we only use the first one
# (the only one present)
ar = self.artifact
demux = [x['fp'] for x in ar.filepaths
if x['fp_type'] == 'preprocessed_demux'][0]
demux_samples = set()
with open_file(demux) as demux_fh:
if not isinstance(demux_fh, File):
error_msg = (
"'%s' doesn't look like a demux file" % demux)
LogEntry.create('Runtime', error_msg)
raise EBISubmissionError(error_msg)
for s, i in to_per_sample_ascii(demux_fh,
self.prep_template.keys()):
s = s.decode('ascii')
sample_fp = self.sample_demux_fps[s] + self.FWD_READ_SUFFIX
wrote_sequences = False
with GzipFile(sample_fp, mode='w', mtime=mtime) as fh:
for record in i:
fh.write(record)
wrote_sequences = True
if wrote_sequences:
demux_samples.add(s)
else:
del(self.samples[s])
del(self.samples_prep[s])
del(self.sample_demux_fps[s])
remove(sample_fp)
return demux_samples
def generate_demultiplexed_fastq(self, rewrite_fastq=False, mtime=None):
"""Generates demultiplexed fastq
Parameters
----------
rewrite_fastq : bool, optional
If true, it forces the rewrite of the fastq files
mtime : float, optional
The time to use when creating the gz files. If None, the current
time will be used by gzip.GzipFile. This is useful for testing.
Returns
-------
demux_samples
List of successful demultiplexed samples
Notes
-----
- As a performace feature, this method will check if self.full_ebi_dir
already exists and, if it does, the script will assume that in a
previous execution this step was performed correctly and will simply
read the file names from self.full_ebi_dir
- When the object is created (init), samples, samples_prep and
sample_demux_fps hold values for all available samples in the database.
Here some of those values will be deleted (del's, within the loops) for
those cases where the fastq.gz files weren't written or exist. This is
an indication that they had no sequences and this kind of files are not
accepted in EBI
Raises
------
EBISubmissionError
- The demux file couldn't be read
- All samples are removed
"""
dir_not_exists = not isdir(self.full_ebi_dir)
missing_samples = []
if dir_not_exists or rewrite_fastq:
# if it exists, remove folder and start from scratch
if isdir(self.full_ebi_dir):
rmtree(self.full_ebi_dir)
create_nested_path(self.full_ebi_dir)
if self.artifact.artifact_type == 'per_sample_FASTQ':
demux_samples, missing_samples = \
self._generate_demultiplexed_fastq_per_sample_FASTQ()
else:
demux_samples = self._generate_demultiplexed_fastq_demux(mtime)
else:
# if we are within this else, it means that we already have
# generated the raw files and for some reason the submission
# failed so we don't need to generate the files again and just
# check which files exist in the file path to create our final
# list of samples
demux_samples = set()
extension = self.FWD_READ_SUFFIX
extension_len = len(extension)
all_missing_files = set()
for f in listdir(self.full_ebi_dir):
fpath = join(self.full_ebi_dir, f)
if isfile(fpath) and f.endswith(extension):
demux_samples.add(f[:-extension_len])
else:
all_missing_files.add(f[:-extension_len])
# at this stage we have created/reviewed all the files and have
# all the sample names, however, we are not sure if we are dealing
# with just forwards or if we are dealing with also reverse. The
# easiest way to do this is to review the all_missing_files
missing_files = all_missing_files - demux_samples
if missing_files != all_missing_files:
self.per_sample_FASTQ_reverse = True
missing_samples = set(
self.samples.keys()).difference(demux_samples)
if missing_samples:
for ms in missing_samples:
del(self.samples[ms])
del(self.samples_prep[ms])
del(self.sample_demux_fps[ms])
if not demux_samples:
error_msg = ("All samples were removed from the submission "
"because the demux file is empty or the sample names "
"do not match.")
LogEntry.create('Runtime', error_msg)
raise EBISubmissionError(error_msg)
return demux_samples
|
|
#!/usr/bin/env python
# This file is part of Androguard.
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Androguard is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Androguard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Androguard. If not, see <http://www.gnu.org/licenses/>.
import sys, os, cmd, threading, code, re
from optparse import OptionParser
from androguard.core import *
from androguard.core.androgen import *
from androguard.core.androconf import *
from androguard.core.bytecode import *
from androguard.core.bytecodes.jvm import *
from androguard.core.bytecodes.dvm import *
from androguard.core.bytecodes.apk import *
from androguard.core.analysis.analysis import *
from androguard.core.analysis.ganalysis import *
from androguard.core.analysis.risk import *
from androguard.decompiler.decompiler import *
from androguard.core import androconf
from IPython.frontend.terminal.embed import InteractiveShellEmbed
from IPython.config.loader import Config
from cPickle import dumps, loads
option_0 = { 'name' : ('-i', '--input'), 'help' : 'file : use this filename', 'nargs' : 1 }
option_1 = { 'name' : ('-d', '--display'), 'help' : 'display the file in human readable format', 'action' : 'count' }
option_2 = { 'name' : ('-m', '--method'), 'help' : 'display method(s) respect with a regexp', 'nargs' : 1 }
option_3 = { 'name' : ('-f', '--field'), 'help' : 'display field(s) respect with a regexp', 'nargs' : 1 }
option_4 = { 'name' : ('-s', '--shell'), 'help' : 'open an interactive shell to play more easily with objects', 'action' : 'count' }
option_5 = { 'name' : ('-v', '--version'), 'help' : 'version of the API', 'action' : 'count' }
option_6 = { 'name' : ('-p', '--pretty'), 'help' : 'pretty print !', 'action' : 'count' }
option_8 = { 'name' : ('-x', '--xpermissions'), 'help' : 'show paths of permissions', 'action' : 'count' }
options = [option_0, option_1, option_2, option_3, option_4, option_5, option_6, option_8]
def init_print_colors() :
from IPython.utils import coloransi, io
default_colors(coloransi.TermColors)
CONF["PRINT_FCT"] = io.stdout.write
def interact() :
cfg = Config()
ipshell = InteractiveShellEmbed(config=cfg, banner1="Androlyze version %s" % androconf.ANDROGUARD_VERSION)
init_print_colors()
ipshell()
def save_session(l, filename) :
"""
save your session !
:param l: a list of objects
:type: a list of object
:param filename: output filename to save the session
:type filename: string
:Example:
save_session([a, vm, vmx], "msession.json")
"""
fd = open(filename, "w")
fd.write( dumps(l, -1) )
fd.close()
def load_session(filename) :
"""
load your session !
:param filename: the filename where the session has been saved
:type filename: string
:rtype: the elements of your session :)
:Example:
a, vm, vmx = load_session("mysession.json")
"""
return loads( open(filename, "r").read() )
def AnalyzeAPK(filename, raw=False, decompiler=None) :
"""
Analyze an android application and setup all stuff for a more quickly analysis !
:param filename: the filename of the android application or a buffer which represents the application
:type filename: string
:param raw: True is you would like to use a buffer (optional)
:type raw: boolean
:param decompiler: ded, dex2jad, dad (optional)
:type decompiler: string
:rtype: return the :class:`APK`, :class:`DalvikVMFormat`, and :class:`VMAnalysis` objects
"""
androconf.debug("APK ...")
a = APK(filename, raw)
d, dx = AnalyzeDex( a.get_dex(), raw=True, decompiler=decompiler )
return a, d, dx
def AnalyzeDex(filename, raw=False, decompiler=None) :
"""
Analyze an android dex file and setup all stuff for a more quickly analysis !
:param filename: the filename of the android dex file or a buffer which represents the dex file
:type filename: string
:param raw: True is you would like to use a buffer (optional)
:type raw: boolean
:rtype: return the :class:`DalvikVMFormat`, and :class:`VMAnalysis` objects
"""
androconf.debug("DalvikVMFormat ...")
d = None
if raw == False :
d = DalvikVMFormat( open(filename, "rb").read() )
else :
d = DalvikVMFormat( filename )
androconf.debug("Export VM to python namespace")
d.create_python_export()
androconf.debug("VMAnalysis ...")
dx = uVMAnalysis( d )
androconf.debug("GVMAnais ...")
gx = GVMAnalysis( dx, None )
d.set_vmanalysis( dx )
d.set_gvmanalysis( gx )
RunDecompiler( d, dx, decompiler )
androconf.debug("XREF ...")
d.create_xref()
androconf.debug("DREF ...")
d.create_dref()
return d, dx
def RunDecompiler(d, dx, decompiler) :
"""
Run the decompiler on a specific analysis
:param d: the DalvikVMFormat object
:type d: :class:`DalvikVMFormat` object
:param dx: the analysis of the format
:type dx: :class:`VMAnalysis` object
:param decompiler: the type of decompiler to use ("dad", "dex2jad", "ded")
:type decompiler: string
"""
if decompiler != None :
androconf.debug("Decompiler ...")
decompiler = decompiler.lower()
if decompiler == "dex2jad" :
d.set_decompiler( DecompilerDex2Jad( d, androconf.CONF["PATH_DEX2JAR"], androconf.CONF["BIN_DEX2JAR"], androconf.CONF["PATH_JAD"], androconf.CONF["BIN_JAD"] ) )
elif decompiler == "ded" :
d.set_decompiler( DecompilerDed( d, androconf.CONF["PATH_DED"], androconf.CONF["BIN_DED"] ) )
elif decompiler == "dad" :
d.set_decompiler( DecompilerDAD( d, dx ) )
else :
print "Unknown decompiler, use DAD decompiler by default"
d.set_decompiler( DecompilerDAD( d, dx ) )
def AnalyzeElf(filename, raw=False) :
# avoid to install smiasm for everybody
from androguard.core.binaries.elf import ELF
e = None
if raw == False:
e = ELF( open(filename, "rb").read() )
else:
e = ELF( filename )
ExportElfToPython( e )
return e
def ExportElfToPython(e) :
for function in e.get_functions():
name = "FUNCTION_" + function.name
setattr( e, name, function )
def AnalyzeJAR(filename, raw=False) :
androconf.debug("JAR ...")
a = JAR(filename, raw)
d = AnalyzeClasses( a.get_classes() )
return a, d
def AnalyzeClasses( classes ) :
d = {}
for i in classes :
d[i[0]] = JVMFormat( i[1] )
return d
def main(options, arguments) :
if options.shell != None :
interact()
elif options.input != None :
_a = AndroguardS( options.input )
if options.pretty != None :
init_print_colors()
if options.display != None :
if options.pretty != None :
_a.ianalyze()
_a.pretty_show()
else :
_a.show()
elif options.method != None :
for method in _a.get("method", options.method) :
if options.pretty != None :
_a.ianalyze()
method.pretty_show()
else :
method.show()
elif options.field != None :
for field in _a.get("field", options.field) :
field.show()
elif options.xpermissions != None :
_a.ianalyze()
perms_access = _a.get_analysis().get_permissions( [] )
for perm in perms_access :
print "PERM : ", perm
for path in perms_access[ perm ] :
show_Path( _a.get_vm(), path )
elif options.version != None :
print "Androlyze version %s" % androconf.ANDROGUARD_VERSION
if __name__ == "__main__" :
parser = OptionParser()
for option in options :
param = option['name']
del option['name']
parser.add_option(*param, **option)
options, arguments = parser.parse_args()
sys.argv[:] = arguments
main(options, arguments)
|
|
from __future__ import print_function
import logging
import pprint
import math
import numpy
import os
import operator
import theano
from six.moves import input
from picklable_itertools.extras import equizip
from theano import tensor
from blocks.bricks import Tanh, Initializable
from blocks.bricks.base import application
from blocks.bricks.lookup import LookupTable
from blocks.bricks.recurrent import SimpleRecurrent, Bidirectional
from blocks.bricks.attention import SequenceContentAttention
from blocks.bricks.parallel import Fork
from blocks.bricks.sequence_generators import (
SequenceGenerator, Readout, SoftmaxEmitter, LookupFeedback)
from blocks.config import config
from blocks.graph import ComputationGraph
from fuel.transformers import Mapping, Batch, Padding, Filter
from fuel.datasets import OneBillionWord, TextFile
from fuel.schemes import ConstantScheme
from blocks.serialization import load_parameter_values
from blocks.algorithms import (GradientDescent, Scale,
StepClipping, CompositeRule)
from blocks.initialization import Orthogonal, IsotropicGaussian, Constant
from blocks.model import Model
from blocks.monitoring import aggregation
from blocks.extensions import FinishAfter, Printing, Timing
from blocks.extensions.saveload import Checkpoint
from blocks.extensions.monitoring import TrainingDataMonitoring
from blocks.main_loop import MainLoop
from blocks.filter import VariableFilter
from blocks.utils import named_copy, dict_union
from blocks.search import BeamSearch
config.recursion_limit = 100000
floatX = theano.config.floatX
logger = logging.getLogger(__name__)
# Dictionaries
all_chars = ([chr(ord('a') + i) for i in range(26)] +
[chr(ord('0') + i) for i in range(10)] +
[',', '.', '!', '?', '<UNK>'] +
[' ', '<S>', '</S>'])
code2char = dict(enumerate(all_chars))
char2code = {v: k for k, v in code2char.items()}
def reverse_words(sample):
sentence = sample[0]
result = []
word_start = -1
for i, code in enumerate(sentence):
if code >= char2code[' ']:
if word_start >= 0:
result.extend(sentence[i - 1:word_start - 1:-1])
word_start = -1
result.append(code)
else:
if word_start == -1:
word_start = i
return (result,)
def _lower(s):
return s.lower()
def _transpose(data):
return tuple(array.T for array in data)
def _filter_long(data):
return len(data[0]) <= 100
def _is_nan(log):
return math.isnan(log.current_row['total_gradient_norm'])
class WordReverser(Initializable):
"""The top brick.
It is often convenient to gather all bricks of the model under the
roof of a single top brick.
"""
def __init__(self, dimension, alphabet_size, **kwargs):
super(WordReverser, self).__init__(**kwargs)
encoder = Bidirectional(
SimpleRecurrent(dim=dimension, activation=Tanh()))
fork = Fork([name for name in encoder.prototype.apply.sequences
if name != 'mask'])
fork.input_dim = dimension
fork.output_dims = [dimension for name in fork.input_names]
lookup = LookupTable(alphabet_size, dimension)
transition = SimpleRecurrent(
activation=Tanh(),
dim=dimension, name="transition")
attention = SequenceContentAttention(
state_names=transition.apply.states,
attended_dim=2 * dimension, match_dim=dimension, name="attention")
readout = Readout(
readout_dim=alphabet_size,
source_names=[transition.apply.states[0],
attention.take_glimpses.outputs[0]],
emitter=SoftmaxEmitter(name="emitter"),
feedback_brick=LookupFeedback(alphabet_size, dimension),
name="readout")
generator = SequenceGenerator(
readout=readout, transition=transition, attention=attention,
name="generator")
self.lookup = lookup
self.fork = fork
self.encoder = encoder
self.generator = generator
self.children = [lookup, fork, encoder, generator]
@application
def cost(self, chars, chars_mask, targets, targets_mask):
return self.generator.cost_matrix(
targets, targets_mask,
attended=self.encoder.apply(
**dict_union(
self.fork.apply(self.lookup.apply(chars), as_dict=True),
mask=chars_mask)),
attended_mask=chars_mask)
@application
def generate(self, chars):
return self.generator.generate(
n_steps=3 * chars.shape[0], batch_size=chars.shape[1],
attended=self.encoder.apply(
**dict_union(
self.fork.apply(self.lookup.apply(chars), as_dict=True))),
attended_mask=tensor.ones(chars.shape))
def main(mode, save_path, num_batches, data_path=None):
reverser = WordReverser(100, len(char2code), name="reverser")
if mode == "train":
# Data processing pipeline
dataset_options = dict(dictionary=char2code, level="character",
preprocess=_lower)
if data_path:
dataset = TextFile(data_path, **dataset_options)
else:
dataset = OneBillionWord("training", [99], **dataset_options)
data_stream = dataset.get_example_stream()
data_stream = Filter(data_stream, _filter_long)
data_stream = Mapping(data_stream, reverse_words,
add_sources=("targets",))
data_stream = Batch(data_stream, iteration_scheme=ConstantScheme(10))
data_stream = Padding(data_stream)
data_stream = Mapping(data_stream, _transpose)
# Initialization settings
reverser.weights_init = IsotropicGaussian(0.1)
reverser.biases_init = Constant(0.0)
reverser.push_initialization_config()
reverser.encoder.weights_init = Orthogonal()
reverser.generator.transition.weights_init = Orthogonal()
# Build the cost computation graph
chars = tensor.lmatrix("features")
chars_mask = tensor.matrix("features_mask")
targets = tensor.lmatrix("targets")
targets_mask = tensor.matrix("targets_mask")
batch_cost = reverser.cost(
chars, chars_mask, targets, targets_mask).sum()
batch_size = named_copy(chars.shape[1], "batch_size")
cost = aggregation.mean(batch_cost, batch_size)
cost.name = "sequence_log_likelihood"
logger.info("Cost graph is built")
# Give an idea of what's going on
model = Model(cost)
params = model.get_params()
logger.info("Parameters:\n" +
pprint.pformat(
[(key, value.get_value().shape) for key, value
in params.items()],
width=120))
# Initialize parameters
for brick in model.get_top_bricks():
brick.initialize()
# Define the training algorithm.
cg = ComputationGraph(cost)
algorithm = GradientDescent(
cost=cost, params=cg.parameters,
step_rule=CompositeRule([StepClipping(10.0), Scale(0.01)]))
# Fetch variables useful for debugging
generator = reverser.generator
(energies,) = VariableFilter(
applications=[generator.readout.readout],
name_regex="output")(cg.variables)
(activations,) = VariableFilter(
applications=[generator.transition.apply],
name=generator.transition.apply.states[0])(cg.variables)
max_length = named_copy(chars.shape[0], "max_length")
cost_per_character = named_copy(
aggregation.mean(batch_cost, batch_size * max_length),
"character_log_likelihood")
min_energy = named_copy(energies.min(), "min_energy")
max_energy = named_copy(energies.max(), "max_energy")
mean_activation = named_copy(abs(activations).mean(),
"mean_activation")
observables = [
cost, min_energy, max_energy, mean_activation,
batch_size, max_length, cost_per_character,
algorithm.total_step_norm, algorithm.total_gradient_norm]
for name, param in params.items():
observables.append(named_copy(
param.norm(2), name + "_norm"))
observables.append(named_copy(
algorithm.gradients[param].norm(2), name + "_grad_norm"))
# Construct the main loop and start training!
average_monitoring = TrainingDataMonitoring(
observables, prefix="average", every_n_batches=10)
main_loop = MainLoop(
model=model,
data_stream=data_stream,
algorithm=algorithm,
extensions=[
Timing(),
TrainingDataMonitoring(observables, after_batch=True),
average_monitoring,
FinishAfter(after_n_batches=num_batches)
# This shows a way to handle NaN emerging during
# training: simply finish it.
.add_condition("after_batch", _is_nan),
# Saving the model and the log separately is convenient,
# because loading the whole pickle takes quite some time.
Checkpoint(save_path, every_n_batches=500,
save_separately=["model", "log"]),
Printing(every_n_batches=1)])
main_loop.run()
elif mode == "sample" or mode == "beam_search":
chars = tensor.lmatrix("input")
generated = reverser.generate(chars)
model = Model(generated)
logger.info("Loading the model..")
model.set_param_values(load_parameter_values(save_path))
def generate(input_):
"""Generate output sequences for an input sequence.
Incapsulates most of the difference between sampling and beam
search.
Returns
-------
outputs : list of lists
Trimmed output sequences.
costs : list
The negative log-likelihood of generating the respective
sequences.
"""
if mode == "beam_search":
samples, = VariableFilter(
bricks=[reverser.generator], name="outputs")(
ComputationGraph(generated[1]))
# NOTE: this will recompile beam search functions
# every time user presses Enter. Do not create
# a new `BeamSearch` object every time if
# speed is important for you.
beam_search = BeamSearch(input_.shape[1], samples)
outputs, costs = beam_search.search(
{chars: input_}, char2code['</S>'],
3 * input_.shape[0])
else:
_1, outputs, _2, _3, costs = (
model.get_theano_function()(input_))
outputs = list(outputs.T)
costs = list(costs.T)
for i in range(len(outputs)):
outputs[i] = list(outputs[i])
try:
true_length = outputs[i].index(char2code['</S>']) + 1
except ValueError:
true_length = len(outputs[i])
outputs[i] = outputs[i][:true_length]
costs[i] = costs[i][:true_length].sum()
return outputs, costs
while True:
line = input("Enter a sentence\n")
message = ("Enter the number of samples\n" if mode == "sample"
else "Enter the beam size\n")
batch_size = int(input(message))
encoded_input = [char2code.get(char, char2code["<UNK>"])
for char in line.lower().strip()]
encoded_input = ([char2code['<S>']] + encoded_input +
[char2code['</S>']])
print("Encoder input:", encoded_input)
target = reverse_words((encoded_input,))[0]
print("Target: ", target)
samples, costs = generate(
numpy.repeat(numpy.array(encoded_input)[:, None],
batch_size, axis=1))
messages = []
for sample, cost in equizip(samples, costs):
message = "({})".format(cost)
message += "".join(code2char[code] for code in sample)
if sample == target:
message += " CORRECT!"
messages.append((cost, message))
messages.sort(key=operator.itemgetter(0), reverse=True)
for _, message in messages:
print(message)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the `MapVectorization` optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import time
from absl.testing import parameterized
import numpy as np
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.compat import compat
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import testing
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.platform import test
def _generate_test_combinations(cases):
def reduce_fn(x, y):
name, fn = y
return x + combinations.combine(map_fn=combinations.NamedObject(name, fn))
return functools.reduce(reduce_fn, cases, [])
def _unary_bitwise_test_combinations():
cases = [("Invert", bitwise_ops.invert)]
return _generate_test_combinations(cases)
def _unary_logical_test_combinations():
cases = [("LogicalNot", math_ops.logical_not)]
return _generate_test_combinations(cases)
def _unary_complex_test_combinations():
cases = [
("Angle", math_ops.angle),
("ComplexAbs", math_ops.abs),
("Conj", math_ops.conj),
("Imag", math_ops.imag),
("Real", math_ops.real),
]
return _generate_test_combinations(cases)
def _unary_real_test_combinations():
# acosh requires values x >= 1
def safe_acosh(x):
return math_ops.acosh(1 + math_ops.square(x))
cases = [
("Abs", math_ops.abs),
("Acos", math_ops.acos),
("Acosh", safe_acosh),
("Asin", math_ops.asin),
("Asinh", math_ops.asinh),
("Atan", math_ops.atan),
("Atanh", math_ops.atanh),
("BesselI0e", math_ops.bessel_i0e),
("BesselI1e", math_ops.bessel_i1e),
("Ceil", math_ops.ceil),
("Cos", math_ops.cos),
("Cosh", math_ops.cosh),
("Digamma", math_ops.digamma),
("Elu", nn.elu),
("Erf", math_ops.erf),
("Erfc", math_ops.erfc),
("Exp", math_ops.exp),
("Expm1", math_ops.expm1),
("Floor", math_ops.floor),
("Inv", math_ops.inv),
("IsFinite", math_ops.is_finite),
("IsInf", math_ops.is_inf),
("Lgamma", math_ops.lgamma),
("Log", math_ops.log),
("Log1p", math_ops.log1p),
("Neg", math_ops.negative),
("Reciprocal", math_ops.reciprocal),
("Relu", nn.relu),
("Relu6", nn.relu6),
("Rint", math_ops.rint),
("Round", math_ops.round),
("Rsqrt", math_ops.rsqrt),
("Selu", nn.selu),
("Sigmoid", math_ops.sigmoid),
("Sign", math_ops.sign),
("Sin", math_ops.sin),
("Sinh", math_ops.sinh),
("Softplus", nn.softplus),
("Softsign", nn.softsign),
("Sqrt", math_ops.sqrt),
("Square", math_ops.square),
("Tan", math_ops.tan),
("Tanh", math_ops.tanh),
]
return _generate_test_combinations(cases)
def _binary_bitwise_test_combinations():
cases = [("BitwiseAnd", bitwise_ops.bitwise_and),
("BitwiseOr", bitwise_ops.bitwise_or),
("BitwiseXor", bitwise_ops.bitwise_xor),
("LeftShift", bitwise_ops.left_shift),
("RightShift", bitwise_ops.right_shift)]
return _generate_test_combinations(cases)
def _binary_logical_test_combinations():
cases = [("LogicalAnd", math_ops.logical_and),
("LogicalOr", math_ops.logical_or)]
return _generate_test_combinations(cases)
def _binary_real_test_combinations():
def safe_polygamma(x, y):
return math_ops.polygamma(
math_ops.round(clip_ops.clip_by_value(y, 1, 10)), x * x + 1)
def safe_zeta(x, y):
return math_ops.zeta(x * x + 1, y * y)
cases = [
("Add", math_ops.add),
("AddV2", math_ops.add_v2),
("Atan2", math_ops.atan2),
("Complex", math_ops.complex),
("DivNoNan", math_ops.div_no_nan),
("Equal", math_ops.equal),
("FloorDiv", math_ops.floor_div),
("FloorMod", math_ops.floor_mod),
("Greater", math_ops.greater),
("GreaterEqual", math_ops.greater_equal),
("Igamma", math_ops.igamma),
("Igammac", math_ops.igammac),
("IgammaGradA", math_ops.igamma_grad_a),
("Less", math_ops.less),
("LessEqual", math_ops.less_equal),
("Maximum", math_ops.maximum),
("Minimum", math_ops.minimum),
("Mod", math_ops.mod),
("Mul", math_ops.multiply),
("NotEqual", math_ops.not_equal),
("Polygamma", safe_polygamma),
("Pow", math_ops.pow),
("RealDiv", math_ops.divide),
("SquareDifference", math_ops.squared_difference),
("Sub", math_ops.subtract),
("TruncateMod", math_ops.truncate_mod),
("Zeta", safe_zeta),
]
return _generate_test_combinations(cases)
# TODO(rachelim): Consolidate tests with pfor when APIs are somewhat shared.
class MapVectorizationTest(test_base.DatasetTestBase, parameterized.TestCase):
def _enable_map_vectorization(self, dataset, use_choose=True):
options = dataset_ops.Options()
opt_options = options.experimental_optimization
opt_options.map_vectorization.enabled = True
opt_options.map_vectorization.use_choose_fastest = use_choose
return dataset.with_options(options)
def _get_test_datasets(self,
base_dataset,
map_fn,
num_parallel_calls=None,
expect_optimized=True):
"""Given base dataset and map fn, creates test datasets.
Returns a tuple of (unoptimized dataset, optimized dataset). The
unoptimized dataset has the assertion that Batch follows Map. The optimized
dataset has the assertion that Map follows Batch, and has the
"map_vectorization" optimization applied.
Args:
base_dataset: Input dataset to map->batch
map_fn: Map function to use
num_parallel_calls: (Optional.) num_parallel_calls argument for map
expect_optimized: (Optional.) Whether we expect the optimization to take
place, in which case we will assert that Batch is followed by Map,
otherwise Map followed by Batch. Defaults to True.
Returns:
Tuple of (unoptimized dataset, optimized dataset).
"""
map_node_name = "Map"
if num_parallel_calls is not None:
map_node_name = "ParallelMap"
if compat.forward_compatible(2020, 3, 6):
map_node_name = "ParallelMapV2"
def _make_dataset(node_names):
dataset = base_dataset.apply(testing.assert_next(node_names))
dataset = dataset.map(map_fn, num_parallel_calls)
dataset = dataset.batch(100)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.map_and_batch_fusion = False
dataset = dataset.with_options(options)
return dataset
unoptimized = _make_dataset([map_node_name, "BatchV2"])
# Note that because of the `ChooseDataset` fork, we can't use `assert_next`
# to verify the optimization result.
optimized = _make_dataset(["ChooseFastestBranch"] if expect_optimized else
[map_node_name, "BatchV2"])
optimized = self._enable_map_vectorization(optimized)
return unoptimized, optimized
def _testOptimization(self, map_fn, dataset_factory, num_parallel_calls):
dataset = dataset_factory()
unoptimized, optimized = self._get_test_datasets(dataset, map_fn,
num_parallel_calls)
self.assertDatasetsEqual(unoptimized, optimized)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(num_parallel_calls=[None, 12])))
def testBasic(self, num_parallel_calls):
data = np.random.rand(10, 3)
dataset_factory = lambda: dataset_ops.Dataset.from_tensors(data).repeat(5)
map_fn = lambda x: (x, x + 1)
self._testOptimization(map_fn, dataset_factory, num_parallel_calls)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(num_parallel_calls=[None, 12])))
def testBroadcast(self, num_parallel_calls):
data = np.random.rand(10, 3)
dataset_factory = lambda: dataset_ops.Dataset.from_tensors(data).repeat(5)
value = np.random.rand(1, 1, 1, 1, 1, 1)
map_fn = lambda x: x + value
self._testOptimization(map_fn, dataset_factory, num_parallel_calls)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(num_parallel_calls=[None, 12])))
def testCast(self, num_parallel_calls):
data = np.random.rand(10, 3)
dataset_factory = lambda: dataset_ops.Dataset.from_tensors(data).repeat(5)
map_fn = lambda x: math_ops.cast(x, dtypes.float64)
self._testOptimization(map_fn, dataset_factory, num_parallel_calls)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(num_parallel_calls=[None, 12])))
def testConst(self, num_parallel_calls):
data = np.random.rand(10, 3)
dataset_factory = lambda: dataset_ops.Dataset.from_tensors(data).repeat(5)
map_fn = lambda x: 2
self._testOptimization(map_fn, dataset_factory, num_parallel_calls)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(num_parallel_calls=[None, 12])))
def testCycle(self, num_parallel_calls):
dataset_factory = lambda: dataset_ops.Dataset.from_tensors(1)
def map_fn(x):
c = lambda i: math_ops.less(i, 10)
b = lambda i: math_ops.add(i, 1)
return control_flow_ops.while_loop(c, b, [x])
self._testOptimization(map_fn, dataset_factory, num_parallel_calls)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(num_parallel_calls=[None, 12])))
def testReshape(self, num_parallel_calls):
data = np.random.rand(10, 3)
dataset_factory = lambda: dataset_ops.Dataset.from_tensors(data).repeat(5)
map_fn = lambda x: array_ops.reshape(x, (-1, 30))
self._testOptimization(map_fn, dataset_factory, num_parallel_calls)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(num_parallel_calls=[None, 12])))
def testTranspose(self, num_parallel_calls):
data = np.random.rand(10, 3)
dataset_factory = lambda: dataset_ops.Dataset.from_tensors(data).repeat(5)
map_fn = array_ops.transpose
self._testOptimization(map_fn, dataset_factory, num_parallel_calls)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(num_parallel_calls=[None, 12])))
def testUnstack(self, num_parallel_calls):
data = np.random.rand(10, 3)
dataset_factory = lambda: dataset_ops.Dataset.from_tensors(data).repeat(5)
map_fns = [array_ops.unstack, lambda x: array_ops.unstack(x, axis=-1)]
for map_fn in map_fns:
self._testOptimization(map_fn, dataset_factory, num_parallel_calls)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
_unary_bitwise_test_combinations(),
combinations.combine(num_parallel_calls=[None, 12])))
def testUnaryBitwiseOperations(self, map_fn, num_parallel_calls):
x = np.random.randint(0, 10, (7, 3, 5))
dataset_factory = lambda: dataset_ops.Dataset.from_tensor_slices(x)
self._testOptimization(map_fn, dataset_factory, num_parallel_calls)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
_unary_logical_test_combinations(),
combinations.combine(num_parallel_calls=[None, 12])))
def testUnaryLogicalOperations(self, map_fn, num_parallel_calls):
x = np.random.rand(3, 5)
dataset_factory = lambda: dataset_ops.Dataset.from_tensor_slices(x > 0)
self._testOptimization(map_fn, dataset_factory, num_parallel_calls)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
_unary_complex_test_combinations(),
combinations.combine(num_parallel_calls=[None, 12])))
def testUnaryComplexOperations(self, map_fn, num_parallel_calls):
x = math_ops.complex(np.random.rand(3, 5), np.random.rand(3, 5))
dataset_factory = lambda: dataset_ops.Dataset.from_tensor_slices(x)
self._testOptimization(map_fn, dataset_factory, num_parallel_calls)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
_unary_real_test_combinations(),
combinations.combine(num_parallel_calls=[None, 12])))
def testUnaryRealOperations(self, map_fn, num_parallel_calls):
x = np.random.rand(3, 5)
dataset_factory = lambda: dataset_ops.Dataset.from_tensor_slices(x)
self._testOptimization(map_fn, dataset_factory, num_parallel_calls)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
_binary_bitwise_test_combinations(),
combinations.combine(num_parallel_calls=[None, 12])))
def testBinaryBitwiseOperations(self, map_fn, num_parallel_calls):
x = np.random.randint(0, 10, (7, 3, 5))
y = np.random.randint(0, 10, (3, 5))
dataset_factory = lambda: dataset_ops.Dataset.from_tensors((x, y))
self._testOptimization(map_fn, dataset_factory, num_parallel_calls)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
_binary_logical_test_combinations(),
combinations.combine(num_parallel_calls=[None, 12])))
def testBinaryLogicalOperations(self, map_fn, num_parallel_calls):
x = np.random.rand(7, 3, 5)
y = np.random.rand(3, 5)
dataset_factory = lambda: dataset_ops.Dataset.from_tensors((x > 0, y > 0))
self._testOptimization(map_fn, dataset_factory, num_parallel_calls)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
_binary_real_test_combinations(),
combinations.combine(num_parallel_calls=[None, 12])))
def testBinaryRealOperations(self, map_fn, num_parallel_calls):
x = np.random.rand(7, 3, 5)
y = np.random.rand(3, 5)
dataset_factory = lambda: dataset_ops.Dataset.from_tensors((x, y))
self._testOptimization(map_fn, dataset_factory, num_parallel_calls)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(num_parallel_calls=[None, 12])))
def testDecodeCsv(self, num_parallel_calls):
def dataset_factory():
return dataset_ops.Dataset.from_tensor_slices(["1.0:2:a",
"2.4:5:c"]).repeat(5)
def decode_csv_fn(x):
return parsing_ops.decode_csv(
x,
record_defaults=[
constant_op.constant([], dtypes.float32),
constant_op.constant([], dtypes.int32),
constant_op.constant([], dtypes.string)
],
field_delim=":")
self._testOptimization(decode_csv_fn, dataset_factory, num_parallel_calls)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(num_parallel_calls=[None, 12])))
def testParseSingleExample(self, num_parallel_calls):
def dataset_factory():
def _int64_feature(*values):
return feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=values))
def _bytes_feature(*values):
return feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=[v.encode("utf-8") for v in values]))
# pylint:disable=g-complex-comprehension
return dataset_ops.Dataset.from_tensor_slices(
constant_op.constant([
example_pb2.Example(
features=feature_pb2.Features(
feature={
"dense_int": _int64_feature(i),
"dense_str": _bytes_feature(str(i)),
})).SerializeToString() for i in range(10)
]))
def parse_fn(x):
features = {
"dense_int": parsing_ops.FixedLenFeature((), dtypes.int64, 0),
"dense_str": parsing_ops.FixedLenFeature((), dtypes.string, ""),
}
return parsing_ops.parse_single_example(x, features)
def dense_only_parse_fn(x):
return [
y for y in parse_fn(x)
if not isinstance(y, sparse_tensor.SparseTensor)
]
map_fns = [parse_fn, dense_only_parse_fn]
for map_fn in map_fns:
self._testOptimization(map_fn, dataset_factory, num_parallel_calls)
@combinations.generate(test_base.default_test_combinations())
def testOptimizationBadMapFn(self):
# Test map functions that give an error
def map_fn(x):
# x has leading dimension 5, this will raise an error
return array_ops.gather(x, 10)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
r"indices = 10 is not in \[0, 5\)"):
base_dataset = dataset_ops.Dataset.range(5).repeat(5).batch(
5, drop_remainder=True)
_, optimized = self._get_test_datasets(base_dataset, map_fn)
nxt = dataset_ops.make_one_shot_iterator(optimized).get_next()
self.evaluate(nxt)
@combinations.generate(test_base.default_test_combinations())
def testOptimizationWithCapturedInputs(self):
# Tests that vectorization works with captured inputs.
y = constant_op.constant(1, shape=(2,))
z = constant_op.constant(2, shape=(2,))
def map_fn(x):
return x, y, z
base_dataset = dataset_ops.Dataset.from_tensor_slices([[1, 2],
[3, 4]]).repeat(5)
unoptimized, optimized = self._get_test_datasets(
base_dataset, map_fn, expect_optimized=True)
self.assertDatasetsEqual(optimized, unoptimized)
@combinations.generate(test_base.default_test_combinations())
def testOptimizationWithMapAndBatchFusion(self):
# Tests that vectorization works on fused map and batch.
def map_fn(x):
return x**2
base_dataset = dataset_ops.Dataset.range(1000)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
base_dataset = base_dataset.with_options(options)
def _make_dataset(node_names):
dataset = base_dataset.apply(testing.assert_next(node_names))
dataset = dataset.apply(batching.map_and_batch(map_fn, 100))
return dataset
unoptimized = _make_dataset(["MapAndBatch"])
optimized = _make_dataset(["ChooseFastestBranch"])
optimized = self._enable_map_vectorization(optimized)
self.assertDatasetsEqual(optimized, unoptimized)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
fuse_first=[True, False], fuse_second=[True, False])))
def testOptimizationWithChainedMapAndBatch(self, fuse_first, fuse_second):
# Tests that vectorization works on chained map and batch functions.
def map_fn(x):
return x * 2
def make_apply_fn(is_fused):
if is_fused:
def apply_fn(dataset):
return dataset.apply(
batching.map_and_batch(map_fn, 2, 12, drop_remainder=True))
return apply_fn
else:
def apply_fn(dataset):
return dataset.map(map_fn, 12).batch(2, drop_remainder=True)
return apply_fn
base_dataset = dataset_ops.Dataset.range(1000)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
base_dataset = base_dataset.with_options(options)
apply_fn_1 = make_apply_fn(fuse_first)
apply_fn_2 = make_apply_fn(fuse_second)
def make_dataset():
dataset = base_dataset
dataset = apply_fn_1(dataset)
dataset = apply_fn_2(dataset)
return dataset
unoptimized = make_dataset()
optimized = make_dataset()
optimized = self._enable_map_vectorization(optimized)
self.assertDatasetsEqual(optimized, unoptimized)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
local_determinism=[True, False, None],
global_determinism=[True, False])))
def testOptimizationDeterminism(self, local_determinism, global_determinism):
# Tests that vectorization maintains the determinism setting.
expect_determinism = local_determinism or (local_determinism is None and
global_determinism)
elements = list(range(1000))
def dataset_fn(delay_ms):
def sleep(x):
time.sleep(delay_ms / 1000)
return x
def map_function(x):
if math_ops.equal(x, 0):
return check_ops.ensure_shape(
script_ops.py_func(sleep, [x], x.dtype, stateful=False), ())
else:
return x
dataset = dataset_ops.Dataset.from_tensor_slices(elements)
dataset = dataset.map(
map_function, num_parallel_calls=10, deterministic=local_determinism)
dataset = dataset.batch(1)
opts = dataset_ops.Options()
opts.experimental_deterministic = global_determinism
# Prevent the map/batch from being rewritten as MapAndBatch.
opts.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(opts)
dataset = self._enable_map_vectorization(dataset)
return dataset
self.checkDeterminism(
dataset_fn,
expect_determinism,
expected_elements=[[element] for element in elements])
@combinations.generate(test_base.default_test_combinations())
def testOptimizationIgnoreStateful(self):
def map_fn(x):
with ops.control_dependencies([check_ops.assert_equal(x, np.int64(0))]):
return array_ops.identity(x)
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.map(map_fn)
dataset = dataset.batch(10)
dataset = self._enable_map_vectorization(dataset, use_choose=False)
with self.assertRaises(errors.InvalidArgumentError):
get_next = self.getNext(dataset)
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testOptimizationIgnoreRagged(self):
# Make sure we ignore inputs that might not be uniformly sized
def map_fn(x):
return array_ops.gather(x, np.int64(0))
# output_shape = (?,)
base_dataset = dataset_ops.Dataset.range(20).batch(3, drop_remainder=False)
unoptimized, optimized = self._get_test_datasets(
base_dataset, map_fn, expect_optimized=False)
self.assertDatasetsEqual(unoptimized, optimized)
@combinations.generate(test_base.default_test_combinations())
def testOptimizationIgnoreRaggedMap(self):
# Don't optimize when the output of the map fn shapes are unknown.
def map_fn(x):
return array_ops.tile(x, x)
dataset = dataset_ops.Dataset.range(10).batch(1)
dataset = dataset.map(map_fn)
dataset = dataset.batch(10)
dataset = self._enable_map_vectorization(dataset, use_choose=False)
with self.assertRaises(errors.InvalidArgumentError):
get_next = self.getNext(dataset)
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testOptimizationWithUnknownBatchShape(self):
tensor = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
# Datasets with sparse tensors have unknown output shapes.
base_dataset = dataset_ops.Dataset.from_tensors(tensor)
unoptimized = base_dataset.apply(batching.map_and_batch(lambda x: x, 2))
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
unoptimized = unoptimized.with_options(options)
optimized = self._enable_map_vectorization(unoptimized)
self.assertDatasetsEqual(unoptimized, optimized)
@combinations.generate(test_base.default_test_combinations())
def testOptimizationWithSparseTensor(self):
base_dataset = dataset_ops.Dataset.from_tensors(0)
def map_fn(x):
del x
return sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
# Datasets with sparse tensors have unknown output shapes.
unoptimized = base_dataset.apply(batching.map_and_batch(map_fn, 2))
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
unoptimized = unoptimized.with_options(options)
optimized = self._enable_map_vectorization(unoptimized)
self.assertDatasetsEqual(unoptimized, optimized)
@combinations.generate(test_base.default_test_combinations())
def testOptimizationWithPrefetch(self):
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.map(lambda x: x)
dataset = dataset.prefetch(1)
dataset = dataset.batch(10)
dataset = self._enable_map_vectorization(dataset)
self.assertDatasetProduces(dataset, [list(range(10))])
@combinations.generate(test_base.default_test_combinations())
def testOptimizationWithoutChooseFastest(self):
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.map(lambda x: x**2)
dataset = dataset.batch(10)
dataset = self._enable_map_vectorization(dataset, use_choose=False)
self.assertDatasetProduces(dataset, [[x**2 for x in range(10)]])
if __name__ == "__main__":
test.main()
|
|
# -*- coding: utf-8 -*-
import datetime
import fuse
import mock
import os
import six
import stat
import tempfile
import threading
import time
from girder.cli import mount
from girder.exceptions import ValidationException
from girder.models.file import File
from girder.models.setting import Setting
from girder.models.user import User
from girder.settings import SettingKey
from tests import base
class ServerFuseTestCase(base.TestCase):
def _mountServer(self, path, shouldSucceed=True, maxWait=10, options=None):
"""
For testing, run the mount in the foreground in a thread. If the mount
should succeed, wait a short time for a mount to be ready. In local
testing, this can take 8 to 10 milliseconds.
:param path: the mount path. This waits for <path>/user to exist
:param maxWait: the maximum wait time in seconds.
:param options: fuseOptions to use in the mount. This should include
'foreground' for testing.
"""
kwargs = {
'fuseOptions': options or 'foreground',
'quiet': True,
}
mountThread = threading.Thread(target=mount.mountServer, args=(path, ), kwargs=kwargs)
mountThread.daemon = True
mountThread.start()
if shouldSucceed:
userPath = os.path.join(path, 'user')
endTime = time.time() + maxWait
while time.time() < endTime and not os.path.exists(userPath):
time.sleep(0.001)
self._mountThreads.append(mountThread)
else:
mountThread.join()
return mountThread
def setUp(self):
super(ServerFuseTestCase, self).setUp()
self._mountThreads = []
self.admin = User().findOne({'login': 'admin'})
self.user = User().findOne({'login': 'user'})
self.user2 = User().findOne({'login': 'second'})
self.mountPath = tempfile.mkdtemp()
self._mountServer(path=self.mountPath)
self.extraMountPath = tempfile.mkdtemp()
self.knownPaths = {
'user/admin/Private/Item 1/File 1A': 'File 1A',
'user/admin/Private/Item 1/File 1B': 'File 1B',
'user/admin/Private/Item 2/File 2': 'File 2',
'user/admin/Private/Item Without File/': None,
'user/user/Public/Item 3/File 3': 'File 3',
'user/user/Private/Item 4/File 4': 'File 4',
'user/user/Private/Folder/Item 5/File 5': 'File 5',
'collection/Test Collection/Private/Collection Item/Collection File': 'File 1A',
u'collection/Test Collection/Private/Collection Item/'
u'\u0444\u0430\u0439\u043b \u043a\u043e\u043b\u043b\u0435\u043a'
u'\u0446\u0438\u0438': 'File 1A',
}
self.adminFileName = 'user/admin/Private/Item 1/File 1A'
self.publicFileName = 'user/user/Public/Item 3/File 3'
self.privateFileName = 'user/user/Private/Item 4/File 4'
def tearDown(self):
super(ServerFuseTestCase, self).tearDown()
mount.unmountServer(self.mountPath, quiet=True)
mount.unmountServer(self.extraMountPath, quiet=True)
os.rmdir(self.mountPath)
os.rmdir(self.extraMountPath)
# Join threads that are done
for thread in self._mountThreads:
thread.join()
self._mountThreads = []
def testMainMount(self):
"""
Test the default mount point has access to all of the expected files.
"""
mountpath = self.mountPath
# Check that the mount lists users and collections
self.assertEqual(sorted(os.listdir(mountpath)), sorted(['user', 'collection']))
# Check that all known paths exist and that arbitrary other paths don't
for testpath, contents in six.iteritems(self.knownPaths):
localpath = os.path.join(mountpath, testpath)
# The path must exist
self.assertTrue(os.path.exists(localpath))
# The path plus an arbitrary string must not exist
self.assertFalse(os.path.exists(localpath + '.other'))
# If the path is a file, check that it equals the expected value
# and reports a non-zero size
if contents:
size = os.path.getsize(localpath)
with open(localpath) as file1:
self.assertEqual(file1.read().strip(), contents)
self.assertGreater(size, 0)
# The mtime should be recent
stat = os.stat(localpath)
self.assertGreater(stat.st_mtime, time.time() - 1e5)
# All parents should be folders and have zero size.
subpath = testpath
while '/' in subpath:
subpath = subpath.rsplit('/')[0]
localpath = os.path.join(mountpath, subpath)
self.assertTrue(os.path.isdir(localpath))
self.assertEqual(os.path.getsize(localpath), 0)
# An arbitrary alternate file should not exist
self.assertFalse(os.path.exists(localpath + '.other'))
def testBlockedMount(self):
"""
Test that when a mount point is non-empty the mount fails.
"""
blockFile = os.path.join(self.extraMountPath, 'block')
open(blockFile, 'wb').close()
with mock.patch('girder.plugin.logprint.error') as logprint:
self._mountServer(path=self.extraMountPath, shouldSucceed=False)
logprint.assert_called_once()
os.unlink(blockFile)
def testRWMountWarns(self):
"""
Test that when asking for an RW mount, a warning is issued.
"""
with mock.patch('girder.plugin.logprint.warning') as logprint:
self._mountServer(path=self.extraMountPath, options='foreground,rw=true')
logprint.assert_called_once()
logprint.assert_called_with('Ignoring the rw=True option')
def testFilePath(self):
"""
Test that all files report a FUSE path, and that this results in the
same file as the non-fuse path.
"""
files = list(File().find())
for file in files:
adapter = File().getAssetstoreAdapter(file)
filesystempath = adapter.fullPath(file)
filepath = File().getLocalFilePath(file)
fusepath = File().getGirderMountFilePath(file)
self.assertTrue(os.path.exists(filesystempath))
self.assertTrue(os.path.exists(filepath))
self.assertTrue(os.path.exists(fusepath))
self.assertEqual(filesystempath, filepath)
self.assertNotEqual(filesystempath, fusepath)
self.assertEqual(fusepath[:len(self.mountPath)], self.mountPath)
with open(filepath) as file1:
with open(fusepath) as file2:
self.assertEqual(file1.read(), file2.read())
subpath = fusepath[len(self.mountPath):].lstrip('/')
if self.knownPaths.get(subpath):
with open(fusepath) as file1:
self.assertEqual(file1.read().strip(), self.knownPaths[subpath])
def testFilePathNoLocalPath(self):
"""
Test that if an assetstore adapter doesn't respond to getLocalFilePath,
we always get the fuse path.
"""
from girder.utility.filesystem_assetstore_adapter import FilesystemAssetstoreAdapter
def getLocalFilePath(self, file):
return super(FilesystemAssetstoreAdapter, self).getLocalFilePath(file)
file = File().findOne()
origGetLocalFilePath = FilesystemAssetstoreAdapter.getLocalFilePath
FilesystemAssetstoreAdapter.getLocalFilePath = getLocalFilePath
filepath = File().getLocalFilePath(file)
fusepath = File().getGirderMountFilePath(file)
FilesystemAssetstoreAdapter.getLocalFilePath = origGetLocalFilePath
self.assertTrue(os.path.exists(filepath))
self.assertTrue(os.path.exists(fusepath))
self.assertEqual(filepath, fusepath)
def testRemountAndSetting(self):
"""
Test remounting to a different location.
"""
# Check that the setting for the mount location matches the current
# mount and a file is reachable where we expect.
setting = Setting().get(SettingKey.GIRDER_MOUNT_INFORMATION)
self.assertEqual(setting['path'], self.mountPath)
self.assertTrue(os.path.exists(os.path.join(self.mountPath, self.publicFileName)))
self.assertFalse(os.path.exists(os.path.join(self.extraMountPath, self.publicFileName)))
mount.unmountServer(self.mountPath)
# After unmounting, the setting should be cleared (though perhaps not
# instantly) and files shouldn't be reachable.
endTime = time.time() + 10 # maximum time to wait
while time.time() < endTime:
setting = Setting().get(SettingKey.GIRDER_MOUNT_INFORMATION)
if setting is None:
break
time.sleep(0.05)
setting = Setting().get(SettingKey.GIRDER_MOUNT_INFORMATION)
self.assertIsNone(setting)
self.assertFalse(os.path.exists(os.path.join(self.mountPath, self.publicFileName)))
self.assertFalse(os.path.exists(os.path.join(self.extraMountPath, self.publicFileName)))
# Remounting to a different path should update the setting and make
# files visible again.
self._mountServer(path=self.extraMountPath)
setting = Setting().get(SettingKey.GIRDER_MOUNT_INFORMATION)
self.assertEqual(setting['path'], self.extraMountPath)
self.assertFalse(os.path.exists(os.path.join(self.mountPath, self.publicFileName)))
self.assertTrue(os.path.exists(os.path.join(self.extraMountPath, self.publicFileName)))
def testUnmountWithOpenFiles(self):
"""
Unmounting with open files will return a non-zero value.
"""
path = os.path.join(self.mountPath, self.publicFileName)
fh = open(path)
fh.read(1)
self.assertNotEqual(mount.unmountServer(self.mountPath, quiet=True), 0)
# We should still be able to read from the file.
fh.read(1)
fh.close()
# Now we can unmount successefully
self.assertEqual(mount.unmountServer(self.mountPath, quiet=True), 0)
def testLazyUnmountWithOpenFiles(self):
"""
Lazy unmounting with open files will return a non-zero value.
"""
path = os.path.join(self.mountPath, self.publicFileName)
fh = open(path)
fh.read(1)
self.assertEqual(mount.unmountServer(self.mountPath, lazy=True, quiet=True), 0)
# We should still be able to read from the file.
fh.read(1)
fh.close()
# If we wait, the mount will close
endTime = time.time() + 10 # maximum time to wait
while time.time() < endTime:
if not os.path.exists(path):
break
time.sleep(0.05)
self.assertFalse(os.path.exists(path))
def testSettingValidation(self):
# Mounting and unmounting test valid use, so this just tests invalid
# values.
with six.assertRaisesRegex(self, ValidationException, 'must be a dict'):
Setting().set(SettingKey.GIRDER_MOUNT_INFORMATION, 'not a dict')
with six.assertRaisesRegex(self, ValidationException, 'with the "path" key'):
Setting().set(SettingKey.GIRDER_MOUNT_INFORMATION, {'no path': 'key'})
# Although other tests excerise the individual functions in the FUSE,
# coverage is not reported since it is run in a separate process. Each of
# the operation class functions is tested here.
def testFunctionCall(self):
op = mount.ServerFuse()
self.assertEqual(op.__call__('access', self.publicFileName, os.F_OK), 0)
self.assertEqual(op.__call__('access', self.privateFileName, os.F_OK), 0)
self.assertEqual(op.__call__('access', 'nosuchpath', os.F_OK), 0)
with self.assertRaises(fuse.FuseOSError):
self.assertTrue(op.__call__('read', self.publicFileName, 10, 0, None))
def testFunctionGetPath(self):
op = mount.ServerFuse()
resource = op._getPath(self.publicFileName)
self.assertEqual(resource['model'], 'file')
resource = op._getPath(os.path.dirname(self.publicFileName))
self.assertEqual(resource['model'], 'item')
resource = op._getPath(os.path.dirname(os.path.dirname(self.publicFileName)))
self.assertEqual(resource['model'], 'folder')
resource = op._getPath(self.privateFileName)
self.assertEqual(resource['model'], 'file')
with self.assertRaises(fuse.FuseOSError):
op._getPath('nosuchpath')
def testFunctionStat(self):
op = mount.ServerFuse()
resource = op._getPath(self.publicFileName)
attr = op._stat(resource['document'], resource['model'])
self.assertEqual(attr['st_ino'], -1)
self.assertEqual(attr['st_nlink'], 1)
self.assertGreater(attr['st_mtime'], time.time() - 1e5)
self.assertEqual(attr['st_ctime'], attr['st_mtime'])
self.assertEqual(attr['st_mode'], 0o400 | stat.S_IFREG)
self.assertGreater(attr['st_size'], len(self.knownPaths[self.publicFileName]))
resource['document']['updated'] = datetime.datetime.utcfromtimestamp(time.time() + 1)
File().save(resource['document'])
oldmtime = attr['st_mtime']
resource = op._getPath(self.publicFileName)
attr = op._stat(resource['document'], resource['model'])
self.assertGreater(attr['st_mtime'], oldmtime)
resource = op._getPath(os.path.dirname(self.publicFileName))
attr = op._stat(resource['document'], resource['model'])
self.assertEqual(attr['st_mode'], 0o500 | stat.S_IFDIR)
self.assertEqual(attr['st_size'], 0)
resource = op._getPath(os.path.dirname(os.path.dirname(self.publicFileName)))
attr = op._stat(resource['document'], resource['model'])
self.assertEqual(attr['st_mode'], 0o500 | stat.S_IFDIR)
self.assertEqual(attr['st_size'], 0)
def testFunctionName(self):
op = mount.ServerFuse()
resource = op._getPath(self.publicFileName)
name = op._name(resource['document'], resource['model'])
self.assertEqual(name, os.path.basename(self.publicFileName))
resource = op._getPath(os.path.dirname(self.publicFileName))
name = op._name(resource['document'], resource['model'])
self.assertEqual(name, os.path.basename(os.path.dirname(self.publicFileName)))
def testFunctionList(self):
op = mount.ServerFuse()
resource = op._getPath(os.path.dirname(self.publicFileName))
filelist = op._list(resource['document'], resource['model'])
self.assertIn(os.path.basename(self.publicFileName), filelist)
resource2 = op._getPath(os.path.dirname(os.path.dirname(self.publicFileName)))
filelist = op._list(resource2['document'], resource2['model'])
self.assertIn(os.path.basename(os.path.dirname(self.publicFileName)), filelist)
resource3 = op._getPath(os.path.dirname(self.adminFileName))
filelist = op._list(resource3['document'], resource3['model'])
self.assertIn(os.path.basename(self.adminFileName), filelist)
resource4 = op._getPath(os.path.dirname(os.path.dirname(self.adminFileName)))
filelist = op._list(resource4['document'], resource4['model'])
self.assertIn(os.path.basename(os.path.dirname(self.adminFileName)), filelist)
resource5 = op._getPath(os.path.dirname(os.path.dirname(
os.path.dirname(self.adminFileName))))
filelist = op._list(resource5['document'], resource5['model'])
self.assertIn(os.path.basename(os.path.dirname(
os.path.dirname(self.adminFileName))), filelist)
def testFunctionAccess(self):
op = mount.ServerFuse()
self.assertEqual(op.access(self.publicFileName, os.F_OK), 0)
self.assertEqual(op.access(self.publicFileName, os.R_OK | os.W_OK | os.X_OK), 0)
self.assertEqual(op.access(self.adminFileName, os.F_OK), 0)
self.assertEqual(op.access(self.adminFileName, os.R_OK), 0)
self.assertEqual(op.access('/user', os.F_OK), 0)
def testFunctionGetattr(self):
op = mount.ServerFuse()
attr = op.getattr('/user')
self.assertEqual(attr['st_mode'], 0o500 | stat.S_IFDIR)
self.assertEqual(attr['st_size'], 0)
attr = op.getattr(self.publicFileName)
self.assertEqual(attr['st_ino'], -1)
self.assertEqual(attr['st_nlink'], 1)
self.assertGreater(attr['st_mtime'], time.time() - 1e5)
self.assertEqual(attr['st_ctime'], attr['st_mtime'])
self.assertEqual(attr['st_mode'], 0o400 | stat.S_IFREG)
self.assertGreater(attr['st_size'], len(self.knownPaths[self.publicFileName]))
with self.assertRaises(fuse.FuseOSError):
op.getattr('/user/nosuchuser')
def testFunctionRead(self):
op = mount.ServerFuse()
fh = op.open(self.publicFileName, os.O_RDONLY)
data = op.read(self.publicFileName, 200, 0, fh)
if (isinstance(data, six.binary_type)
and not isinstance(self.knownPaths[self.publicFileName], six.binary_type)):
self.assertEqual(data.decode('utf8').strip(), self.knownPaths[self.publicFileName])
else:
self.assertEqual(data.strip(), self.knownPaths[self.publicFileName])
data2 = op.read(self.publicFileName, 4, 2, fh)
self.assertEqual(data[2:6], data2)
op.release(self.publicFileName, fh)
with self.assertRaises(fuse.FuseOSError):
op.read(self.publicFileName, 4, 2, fh)
def testFunctionReaddir(self):
op = mount.ServerFuse()
path = os.path.dirname(self.publicFileName)
data = op.readdir(path, 0)
self.assertIn(os.path.basename(self.publicFileName), data)
data = op.readdir('/user', 0)
self.assertIn('admin', data)
data = op.readdir('', 0)
self.assertIn('user', data)
self.assertIn('collection', data)
self.assertIn('.', data)
self.assertIn('..', data)
data = op.readdir('/collection', 0)
self.assertEqual(len(data), 3)
def testFunctionOpen(self):
op = mount.ServerFuse()
fh = op.open(self.publicFileName, os.O_RDONLY)
self.assertTrue(isinstance(fh, int))
self.assertIn(fh, op.openFiles)
op.release(self.publicFileName, fh)
path = os.path.dirname(self.publicFileName)
fh = op.open(path, os.O_RDONLY)
self.assertTrue(isinstance(fh, int))
self.assertNotIn(fh, op.openFiles)
for flag in (os.O_APPEND, os.O_ASYNC, os.O_CREAT, os.O_DIRECTORY,
os.O_EXCL, os.O_RDWR, os.O_TRUNC, os.O_WRONLY):
with self.assertRaises(fuse.FuseOSError):
op.open(self.publicFileName, flag)
def testFunctionCreate(self):
op = mount.ServerFuse()
with self.assertRaises(fuse.FuseOSError):
op.create(self.publicFileName, 0)
def testFunctionFlush(self):
op = mount.ServerFuse()
self.assertEqual(op.flush('/user'), 0)
def testFunctionRelease(self):
op = mount.ServerFuse()
fh = op.open(self.publicFileName, os.O_RDONLY)
self.assertIn(fh, op.openFiles)
self.assertEqual(op.release(self.publicFileName, fh), 0)
self.assertNotIn(fh, op.openFiles)
path = os.path.dirname(self.publicFileName)
fh = op.open(path, os.O_RDONLY)
self.assertNotIn(fh, op.openFiles)
self.assertEqual(op.release(path, fh), 0)
def testFunctionDestroy(self):
op = mount.ServerFuse()
self.assertIsNone(op.destroy('/'))
|
|
# Copyright (C) 2016-2019 Virgil Security Inc.
#
# Lead Maintainer: Virgil Security Inc. <support@virgilsecurity.com>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# (1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# (3) Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from virgil_crypto.card_crypto import CardCrypto
from virgil_sdk import CardManager, VirgilCardVerifier
from virgil_sdk.jwt.providers import CallbackJwtProvider
from virgil_sdk.tests import BaseTest
from virgil_sdk.utils import Utils
from virgil_sdk.verification import WhiteList, VerifierCredentials
class CardVerifierTest(BaseTest):
def test_compatibilty_card_verification_white_lists(self):
# STC-10
validator = VirgilCardVerifier(
CardCrypto(),
verify_self_signature=False,
verify_virgil_signature=False,
white_lists=[]
)
card_verifier = VirgilCardVerifier(
CardCrypto(),
verify_self_signature=False,
verify_virgil_signature=False,
white_lists=[]
)
card_manager = CardManager(
card_crypto=CardCrypto(),
access_token_provider=CallbackJwtProvider(self._get_token_from_server),
card_verifier=validator,
sign_callback=self.sign_callback
)
card_from_string = card_manager.import_card(self._compatibility_data["STC-10.as_string"])
private_key_1 = self._crypto.import_private_key(
bytearray(Utils.b64decode(self._compatibility_data["STC-10.private_key1_base64"]))
).private_key
public_key_1 = self._crypto.extract_public_key(private_key_1)
public_key_1_base64 = Utils.b64encode(self._crypto.export_public_key(public_key_1))
key_pair_2 = self._crypto.generate_key_pair()
public_key_2_base64 = Utils.b64encode(self._crypto.export_public_key(key_pair_2.public_key))
key_pair_3 = self._crypto.generate_key_pair()
public_key_3_base64 = Utils.b64encode(self._crypto.export_public_key(key_pair_3.public_key))
self.assertTrue(card_verifier.verify_card(card_from_string))
card_verifier.verify_self_signature = True
self.assertTrue(card_verifier.verify_card(card_from_string))
card_verifier.verify_virgil_signature = True
self.assertTrue(card_verifier.verify_card(card_from_string))
creds_1 = VerifierCredentials(signer="extra", public_key_base64=public_key_1_base64)
white_list_1 = WhiteList(creds_1)
card_verifier.white_lists = [white_list_1]
self.assertTrue(card_verifier.verify_card(card_from_string))
creds_2_1 = VerifierCredentials(signer="extra", public_key_base64=public_key_1_base64)
creds_2_2 = VerifierCredentials(signer="test1", public_key_base64=public_key_2_base64)
white_list_2 = WhiteList([creds_2_1, creds_2_2])
card_verifier.white_lists = [white_list_2]
self.assertTrue(card_verifier.verify_card(card_from_string))
creds_3_1 = VerifierCredentials(signer="extra", public_key_base64=public_key_1_base64)
creds_3_2 = VerifierCredentials(signer="test1", public_key_base64=public_key_2_base64)
creds_3_3 = VerifierCredentials(signer="test1", public_key_base64=public_key_3_base64)
white_list_3_1 = WhiteList([creds_3_1, creds_3_2])
white_list_3_2 = WhiteList(creds_3_3)
card_verifier.white_lists = [white_list_3_1, white_list_3_2]
self.assertFalse(card_verifier.verify_card(card_from_string))
def test_compatibilty_card_verification_self_sign_failed(self):
# STC-11
validator = VirgilCardVerifier(
CardCrypto(),
verify_self_signature=False,
verify_virgil_signature=False,
white_lists=[]
)
card_verifier = VirgilCardVerifier(
CardCrypto(),
verify_self_signature=False,
verify_virgil_signature=False,
white_lists=[]
)
card_manager = CardManager(
card_crypto=CardCrypto(),
access_token_provider=CallbackJwtProvider(self._get_token_from_server),
card_verifier=validator,
sign_callback=self.sign_callback
)
card_from_string = card_manager.import_card(self._compatibility_data["STC-11.as_string"])
self.assertTrue(card_verifier.verify_card(card_from_string))
card_verifier.verify_self_signature = True
self.assertFalse(card_verifier.verify_card(card_from_string))
def test_compatibilty_card_verification_virgil_sign_failed(self):
# STC-12
validator = VirgilCardVerifier(
CardCrypto(),
verify_self_signature=False,
verify_virgil_signature=False,
white_lists=[]
)
card_verifier = VirgilCardVerifier(
CardCrypto(),
verify_self_signature=False,
verify_virgil_signature=False,
white_lists=[]
)
card_manager = CardManager(
card_crypto=CardCrypto(),
access_token_provider=CallbackJwtProvider(self._get_token_from_server),
card_verifier=validator,
sign_callback=self.sign_callback
)
card_from_string = card_manager.import_card(self._compatibility_data["STC-12.as_string"])
self.assertTrue(card_verifier.verify_card(card_from_string))
card_verifier.verify_virgil_signature = True
self.assertFalse(card_verifier.verify_card(card_from_string))
def test_compatibilty_card_verification_virgil_sign_failed_2(self):
# STC-14
validator = VirgilCardVerifier(
CardCrypto(),
verify_self_signature=False,
verify_virgil_signature=False,
white_lists=[]
)
card_verifier = VirgilCardVerifier(
CardCrypto(),
verify_self_signature=False,
verify_virgil_signature=True,
white_lists=[]
)
card_manager = CardManager(
card_crypto=CardCrypto(),
access_token_provider=CallbackJwtProvider(self._get_token_from_server),
card_verifier=validator,
sign_callback=self.sign_callback
)
card_from_string = card_manager.import_card(self._compatibility_data["STC-14.as_string"])
self.assertFalse(card_verifier.verify_card(card_from_string))
def test_compatibilty_card_verification_self_sign_failed_2(self):
# STC-15
validator = VirgilCardVerifier(
CardCrypto(),
verify_self_signature=False,
verify_virgil_signature=False,
white_lists=[]
)
card_verifier = VirgilCardVerifier(
CardCrypto(),
verify_self_signature=True,
verify_virgil_signature=False,
white_lists=[]
)
card_manager = CardManager(
card_crypto=CardCrypto(),
access_token_provider=CallbackJwtProvider(self._get_token_from_server),
card_verifier=validator,
sign_callback=self.sign_callback
)
card_from_string = card_manager.import_card(self._compatibility_data["STC-15.as_string"])
self.assertFalse(card_verifier.verify_card(card_from_string))
def test_compatibilty_card_verification_invalid_custom_sign(self):
# STC-16
validator = VirgilCardVerifier(
CardCrypto(),
verify_self_signature=False,
verify_virgil_signature=False,
white_lists=[]
)
card_verifier = VirgilCardVerifier(
CardCrypto(),
verify_self_signature=False,
verify_virgil_signature=False,
white_lists=[]
)
card_manager = CardManager(
card_crypto=CardCrypto(),
access_token_provider=CallbackJwtProvider(self._get_token_from_server),
card_verifier=validator,
sign_callback=self.sign_callback
)
card_from_string = card_manager.import_card(self._compatibility_data["STC-16.as_string"])
public_key_1_base64 = self._compatibility_data["STC-16.public_key1_base64"]
key_pair_2 = self._crypto.generate_key_pair()
public_key_2_base64 = Utils.b64encode(self._crypto.export_public_key(key_pair_2.public_key))
creds_1 = VerifierCredentials(signer="extra", public_key_base64=public_key_2_base64)
card_verifier.white_lists = [WhiteList(creds_1)]
self.assertFalse(card_verifier.verify_card(card_from_string))
creds_2 = VerifierCredentials(signer="extra", public_key_base64=public_key_1_base64)
card_verifier.white_lists = WhiteList(creds_2)
self.assertTrue(card_verifier.verify_card(card_from_string))
|
|
import argparse
import os
import logging
import configparser
import zendesk
import filesystem
import translate
DEFAULE_LOG_LEVEL = 'WARNING'
CONFIG_FILE = 'zendesk-help-cms.config'
class ImportTask(object):
def execute(self, args):
print('Running import task...')
categories = zendesk.fetcher(args['company_uri'], args['user'], args['password']).fetch()
filesystem.saver(args['root_folder']).save(categories)
print('Done')
class TranslateTask(object):
def execute(self, args):
print('Running translate task...')
categories = filesystem.loader(args['root_folder']).load()
categories = translate.translator(args['webtranslateit_api_key']).create(categories)
filesystem.saver(args['root_folder']).save(categories)
print('Done')
class ExportTask(object):
def execute(self, args):
print('Running translate task...')
categories = filesystem.loader(args['root_folder']).load()
filesystem_client = filesystem.client(args['root_folder'])
zendesk.pusher(args['company_uri'], args['user'], args['password'],
filesystem_client, args['image_cdn'], args['disable_article_comments']).push(categories)
print('Done')
class RemoveTask(object):
def execute(self, args):
print('Running remove task...')
path = os.path.join(args['root_folder'], args['path'])
if not os.path.exists(path):
logging.error('Provided path %s does not exist', path)
return
item = filesystem.loader(args['root_folder']).load_from_path(path)
zendesk.remover(args['company_uri'], args['user'], args['password']).remove(item)
translate.remover(args['webtranslateit_api_key']).remove(item)
filesystem.remover(args['root_folder']).remove(item)
print('Done')
class MoveTask(object):
def execute(self, args):
print('Running move task...')
src = os.path.join(args['root_folder'], args['source'])
dest = os.path.join(args['root_folder'], args['destination'])
if not os.path.exists(src):
logging.error('Provided source %s does not exist', src)
return
if os.path.exists(dest):
logging.error('Provided destination %s already exist', dest)
return
item = filesystem.loader(args['root_folder']).load_from_path(src)
zendesk.mover(args['company_uri'], args['user'], args['password'], args['image_cdn']).move(item, dest)
translate.mover(args['webtranslateit_api_key']).move(item, dest)
filesystem.mover(args['root_folder']).move(item, dest)
print('Done')
class DoctorTask(object):
def execute(self, args):
print('Running doctor task...')
categories = filesystem.loader(args['root_folder']).load()
filesystem_client = filesystem.client(args['root_folder'])
filesystem_doctor = filesystem.doctor(args['root_folder'])
translate_doctor = translate.doctor(args['webtranslateit_api_key'])
zendesk_doctor = zendesk.doctor(
args['company_uri'], args['user'], args['password'], filesystem_client, args['force'])
zendesk_doctor.fix(categories)
filesystem_doctor.fix(categories)
translate_doctor.fix(categories)
filesystem.saver(args['root_folder']).save(categories)
print('Done')
class ConfigTask(object):
"""
Creates config file in the current directory by asking a user to provide the data.
"""
def _read_existing_config(self):
if not os.path.exists(CONFIG_FILE):
return {}
print('There is a config alread present, press ENTER to accept already existing value')
config = configparser.ConfigParser()
config.read(CONFIG_FILE)
return dict(config[config.default_section])
def _read_config_from_input(self, default_config):
if default_config:
default_company_uri = default_config.get('company_uri', '')
company_uri = input('Zendesk\'s company uri (for example test_company.zendesk.com) ({}):'.format(default_company_uri)) or default_company_uri
default_user = default_config.get('user', '')
user = input('Zendesk\'s user name ({}):'.format(default_user)) or default_user
default_password = default_config.get('password', '')
password = input('Zendesk\'s password ({}):'.format(default_password)) or default_password
default_api_key = default_config.get('webtranslateit_api_key', '')
webtranslateit_api_key = input(
'WebTranslateIt private API key ({}):'.format(default_api_key)) or default_api_key
default_image_cdn = default_config.get('image_cdn', '')
image_cdn = input('CDN path for storing images ({}):'.format(default_image_cdn)) or default_image_cdn
default_disable_article_comments = default_config.get('disable_article_comments', '')
disable_article_comments = input('Disable article comments ({}):'.format(default_disable_article_comments))
disable_article_comments = disable_article_comments or default_disable_article_comments
else:
company_uri = input('Zendesk\'s company uri:')
user = input('Zendesk\'s user name:')
password = input('Zendesk\'s password:')
webtranslateit_api_key = input('WebTranslateIt private API key:')
image_cdn = input('CDN path for storing images:')
disable_article_comments = input('Disable article comments:')
return {
'company_uri': company_uri,
'user': user,
'password': password,
'webtranslateit_api_key': webtranslateit_api_key,
'image_cdn': image_cdn,
'disable_article_comments': disable_article_comments
}
def execute(self, args):
existing_config = self._read_existing_config()
user_config = self._read_config_from_input(existing_config)
config = configparser.ConfigParser()
config[config.default_section] = user_config
with open(CONFIG_FILE, 'w') as config_file:
config.write(config_file)
tasks = {
'import': ImportTask(),
'translate': TranslateTask(),
'export': ExportTask(),
'remove': RemoveTask(),
'move': MoveTask(),
'doctor': DoctorTask(),
'config': ConfigTask()
}
def parse_args():
parser = argparse.ArgumentParser()
# Subparsers
subparsers = parser.add_subparsers(help='Task to be performed.', dest='task')
task_parsers = {task_parser: subparsers.add_parser(task_parser) for task_parser in tasks}
# Global settings
parser.add_argument('-l', '--loglevel',
help='Specify log level (DEBUG, INFO, WARNING, ERROR, CRITICAL), default: %s'
% DEFAULE_LOG_LEVEL,
default=DEFAULE_LOG_LEVEL)
parser.add_argument('-r', '--root_folder',
help='Article\'s root folder, default: .',
default=os.getcwd())
parser.add_argument('-f', '--force', help='Don\'t ask questions. YES all the way',
action='store_true', default=False)
parser.add_argument('-v', '--version', help='Show version', action='store_true')
# Task subparser settings
task_parsers['remove'].add_argument('path',
help='Set path for removing an item. The path is relative to the root folder')
task_parsers['move'].add_argument('source', help='Set source section/article')
task_parsers['move'].add_argument('destination', help='Set destination category/section')
return parser.parse_args()
def init_log(loglevel):
num_level = getattr(logging, loglevel.upper(), 'WARNING')
logging.basicConfig(level=num_level)
def parse_config(args):
config = configparser.ConfigParser()
config.read(CONFIG_FILE)
options = dict(config[config.default_section])
options.update(vars(args))
options['image_cdn'] = options.get('image_cdn', '')
options['disable_article_comments'] = bool(options.get('disable_article_comments', False))
return options
def main():
args = parse_args()
if args.version:
import pkg_resources
version = pkg_resources.require('zendesk-helpcenter-cms')[0].version
print(version)
return
init_log(args.loglevel)
options = parse_config(args)
task_name = options.get('task')
if task_name:
task = tasks[task_name]
task.execute(options)
else:
print('No task provided, run with -h to see available options')
if __name__ == '__main__':
main()
|
|
from polyglot.nodeserver_api import Node
import nest
import requests
from nest import utils as nest_utils
import sys
from login import USERNAME, PASSWORD
# Globally disable SSL warnings from requests package.
requests.packages.urllib3.disable_warnings()
NEST_STATES = {0: "off", 1: "heat", 2: "cool", 3: "range", 13: "away"}
def myfloat(value, prec=2):
""" round and return float """
return round(float(value), prec)
class NestControl(Node):
def __init__(self, *args, **kwargs):
super(NestControl, self).__init__(*args, **kwargs)
def _discover(self, **kwargs):
try:
manifest = self.parent.config.get('manifest', {})
self.parent.poly.logger.info("Discovering Nest Products...")
self.parent.poly.logger.info("User: %s", USERNAME)
self.napi = nest.Nest(USERNAME,PASSWORD, cache_ttl=30, local_time=True)
for structure in self.napi.structures:
try:
self.parent.poly.logger.info('Structure : %s' % structure.name)
"""
self.parent.poly.logger.info(' Away : %s' % structure.away)
self.parent.poly.logger.info(' Postal Code : %s' % structure.postal_code)
self.parent.poly.logger.info(' Country : %s' % structure.country_code)
self.parent.poly.logger.info(' dr_reminder_enabled : %s' % structure.dr_reminder_enabled)
self.parent.poly.logger.info(' enhanced_auto_away_enabled : %s' % structure.enhanced_auto_away_enabled)
self.parent.poly.logger.info(' eta_preconditioning_active : %s' % structure.eta_preconditioning_active)
self.parent.poly.logger.info(' house_type : %s' % structure.house_type)
self.parent.poly.logger.info(' hvac_safety_shutoff_enabled : %s' % structure.hvac_safety_shutoff_enabled)
self.parent.poly.logger.info(' num_thermostats : %s' % structure.num_thermostats)
self.parent.poly.logger.info(' measurement_scale : %s' % structure.measurement_scale)
self.parent.poly.logger.info(' renovation_date : %s' % structure.renovation_date)
self.parent.poly.logger.info(' structure_area : %s' % structure.structure_area)
"""
except TypeError as e:
self.parent.poly.logger.info('Nestcontrol _discover Caught exception: %s', e)
for device in self.napi.devices:
try:
self.parent.poly.logger.info('Device: %s' % device.serial[-14:])
"""
self.parent.poly.logger.info(' Where: %s' % device.where)
self.parent.poly.logger.info(' Mode : %s' % device.mode)
self.parent.poly.logger.info(' Fan : %s' % device.fan)
self.parent.poly.logger.info(' Temp : %0.1fF' % nest_utils.c_to_f(device.temperature))
self.parent.poly.logger.info(' Humidity : %0.1f%%' % device.humidity)
self.parent.poly.logger.info(' Away Heat: %0.1fF' % nest_utils.c_to_f(device.away_temperature[0]))
self.parent.poly.logger.info(' Away Cool: %0.1fF' % nest_utils.c_to_f(device.away_temperature[1]))
self.parent.poly.logger.info(' hvac_ac_state : %s' % device.hvac_ac_state)
self.parent.poly.logger.info(' hvac_cool_x2_state : %s' % device.hvac_cool_x2_state)
self.parent.poly.logger.info(' hvac_heater_state : %s' % device.hvac_heater_state)
self.parent.poly.logger.info(' hvac_aux_heater_state : %s' % device.hvac_aux_heater_state)
self.parent.poly.logger.info(' hvac_heat_x2_state : %s' % device.hvac_heat_x2_state)
self.parent.poly.logger.info(' hvac_heat_x3_state : %s' % device.hvac_heat_x3_state)
self.parent.poly.logger.info(' hvac_alt_heat_state : %s' % device.hvac_alt_heat_state)
self.parent.poly.logger.info(' hvac_alt_heat_x2_state: %s' % device.hvac_alt_heat_x2_state)
self.parent.poly.logger.info(' hvac_emer_heat_state : %s' % device.hvac_emer_heat_state)
self.parent.poly.logger.info(' online : %s' % device.online)
self.parent.poly.logger.info(' last_ip : %s' % device.last_ip)
self.parent.poly.logger.info(' local_ip : %s' % device.local_ip)
self.parent.poly.logger.info(' last_connection : %s' % device.last_connection)
self.parent.poly.logger.info(' error_code : %s' % device.error_code)
self.parent.poly.logger.info(' battery_level : %s' % device.battery_level)
"""
except TypeError as e:
self.parent.poly.logger.info('Nestcontrol _discover Caught exception: %s', e)
# ISY only allows 14 character limit on nodes, have to strip the serial number down to the last 14 chars.
address = device.serial[-14:].lower()
lnode = self.parent.get_node(address)
if not lnode:
self.parent.poly.logger.info("New Thermostat Found.")
self.parent.thermostats.append(NestThermostat(self.parent, self.parent.get_node('nestcontrol'),
address, device.temperature, structure.name, device.where, manifest))
self.parent.update_config()
except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError, TypeError) as e:
self.logger.error('Nestcontrol _discover Caught exception: %s', e)
return True
def query(self, **kwargs):
self.parent.report_drivers()
return True
_drivers = {}
_commands = {'DISCOVER': _discover}
node_def_id = 'nestcontrol'
class NestThermostat(Node):
def __init__(self, parent, primary, address, temperature, structurename, location, manifest=None):
self.parent = parent
self.logger = self.parent.poly.logger
self.structurename = structurename
self.location = location
try:
self.logger.info('Initializing New Thermostat')
self.napi = nest.Nest(USERNAME,PASSWORD, local_time=True)
except requests.exceptions.HTTPError as e:
self.logger.error('NestThermostat __init__ Caught exception: %s', e)
self.away = False
self.online = False
self.insidetemp = nest_utils.c_to_f(temperature)
try:
self.name = 'Nest ' + self.structurename + " " + self.location
except TypeError as e:
self.logger.error('Caught TypeError on structurename or location, which means they don\'t exist. Using Generic name.')
self.name = 'Nest Thermostat'
self.address = address
self.logger.info("Adding new Nest Device: %s Current Temp: %i F", self.name, self.insidetemp)
super(NestThermostat, self).__init__(parent, address, self.name, primary, manifest)
self.update_info()
def update_info(self):
self.away = False
try:
self._checkconnect()
self.logger.info("First structure update: %s", self.napi.structures[0].away)
for structure in self.napi.structures:
if self.structurename == structure.name:
if structure.away:
self.away = True
for device in self.napi.devices:
if self.address == device.serial[-14:].lower():
self.mode = device.mode
if device.fan:
self.set_driver('CLIFS', '1')
else:
self.set_driver('CLIFS', '0')
self.online = device.online
self.humidity = device.humidity
if device.hvac_ac_state:
self.state = '2'
elif device.hvac_heater_state:
self.state = '1'
else:
self.state = '0'
self.insidetemp = int(round(nest_utils.c_to_f(device.temperature)))
try:
self.outsidetemp = int(round(nest_utils.c_to_f(self.napi.structures[0].weather.current.temperature)))
except (TypeError) as e:
self.logger.error('NestThermostat update_info Caught an exception: %s', e)
self.outsidetemp = 0
if self.mode == 'range':
self.targetlow = int(round(nest_utils.c_to_f(device.target[0])))
self.targethigh = int(round(nest_utils.c_to_f(device.target[1])))
self.logger.info("Target Temp is a range between %i F and %i F",
self.targetlow, self.targethigh)
else:
self.targetlow = int(round(nest_utils.c_to_f(device.target)))
self.logger.info('Target Temp is %i F', self.targetlow)
self.targethigh = self.targetlow
# TODO, clean this up into a dictionary or something clever.
self.logger.info("Away %s: Mode: %s InsideTemp: %i F OutsideTemp: %i F TargetLow: %i F TargetHigh: %i F",
self.away, self.mode, self.insidetemp, self.outsidetemp, self.targetlow, self.targethigh)
if self.away:
self.set_driver('CLIMD', '13')
elif self.mode == 'range':
self.set_driver('CLIMD', '3')
elif self.mode == 'heat':
self.set_driver('CLIMD', '1')
elif self.mode == 'cool':
self.set_driver('CLIMD', '2')
elif self.mode == 'fan':
self.set_driver('CLIMD', '6')
else:
self.set_driver('CLIMD', '0')
self.set_driver('ST', int(self.insidetemp))
self.set_driver('CLISPC', self.targethigh)
self.set_driver('CLISPH', self.targetlow)
self.set_driver('CLIHUM', self.humidity)
self.set_driver('CLIHCS', self.state)
self.set_driver('GV2', self.outsidetemp)
self.set_driver('GV4', self.online)
except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e:
self.logger.error('NestThermostat update_info Caught exception: %s', e)
return
def _setoff(self, **kwargs):
try:
for device in self.napi.devices:
if self.address == device.serial[-14:].lower():
device.mode = 'off'
except requests.exceptions.HTTPError as e:
self.logger.error('NestThermostat _setoff Caught exception: %s', e)
return True
def _setauto(self, **kwargs):
try:
for device in self.napi.devices:
if self.address == device.serial[-14:].lower():
device.mode = 'range'
except requests.exceptions.HTTPError as e:
self.logger.error('NestThermostat _setauto Caught exception: %s', e)
return True
def _checkconnect(self):
try:
connected = self.napi.devices[0].online
self.logger.info('Connected: %s', connected)
if not connected:
self.napi = nest.Nest(USERNAME,PASSWORD, local_time=True)
return True
except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError, TypeError, requests.exceptions.ReadTimeout) as e:
self.logger.error('CheckConnect: %s', e)
return False
def _setmode(self, **kwargs):
try:
val = kwargs.get('value')
if self._checkconnect():
newstate = NEST_STATES[int(val)]
self.logger.info('Got mode change request from ISY. Setting Nest to: %s', newstate)
if newstate == 'away':
for structure in self.napi.structures:
if self.structurename == structure.name:
structure.away = True
else:
for structure in self.napi.structures:
if self.structurename == structure.name:
structure.away = False
self.away = False
for device in self.napi.devices:
if self.address == device.serial[-14:].lower():
device.mode = newstate
self.set_driver('CLIMD', int(val))
#self.update_info()
except requests.exceptions.HTTPError as e:
self.logger.error('NestThermostat _setauto Caught exception: %s', e)
return True
def _setfan(self, **kwargs):
try:
val = int(kwargs.get('value'))
if self._checkconnect():
for device in self.napi.devices:
if self.address == device.serial[-14:].lower():
if val == 1:
device.fan = True
self.logger.info('Got Set Fan command. Setting fan to \'On\'')
else:
device.fan = False
self.logger.info('Got Set Fan command. Setting fan to \'Auto\'')
self.set_driver('CLIFS', val)
except requests.exceptions.HTTPError as e:
self.logger.error('NestThermostat _settemp Caught exception: %s', e)
return True
def _sethigh(self, **kwargs):
inc = False
try:
try:
val = int(kwargs.get('value'))
except TypeError:
inc = True
self._checkconnect()
for device in self.napi.devices:
if self.address == device.serial[-14:].lower():
if device.mode == 'range':
if not inc:
device.temperature = (device.target[0], nest_utils.f_to_c(val))
self.logger.info("Mode is ranged, Setting upper bound to %i F", val)
else:
val = int(nest_utils.c_to_f(device.target[1]) + 1)
self.logger.info("Mode is ranged, incrementing upper bound to %i F", val)
device.temperature = (device.target[0], nest_utils.f_to_c(val))
else:
if not inc:
device.temperature = int(nest_utils.f_to_c(val))
else:
val = int(nest_utils.c_to_f(device.target) + 1)
device.temperature = nest_utils.f_to_c(val)
self.logger.info("Setting temperature to %i F.", val)
self.set_driver('CLISPC', val)
except requests.exceptions.HTTPError as e:
self.logger.error('NestThermostat _settemp Caught exception: %s', e)
return True
def _setlow(self, **kwargs):
inc = False
val = None
try:
try:
val = int(kwargs.get('value'))
except TypeError:
inc = True
self._checkconnect()
for device in self.napi.devices:
if self.address == device.serial[-14:].lower():
if device.mode == 'range':
if not inc:
device.temperature = (nest_utils.f_to_c(val), device.target[1])
self.logger.info("Mode is ranged, Setting lower bound to %i F", val)
else:
val = int(round(nest_utils.c_to_f(device.target[0]) - 1))
self.logger.info("Mode is ranged, decrementing lower bound to %i F", val)
device.temperature = (nest_utils.f_to_c(val), device.target[1])
else:
if not inc:
device.temperature = nest_utils.f_to_c(val)
else:
val = int(round(nest_utils.c_to_f(device.target) - 1))
device.temperature = nest_utils.f_to_c(val)
self.logger.info("Setting temperature to %i F.", val)
self.set_driver('CLISPH', val)
except requests.exceptions.HTTPError as e:
self.logger.error('NestThermostat _settemp Caught exception: %s', e)
return True
def _beep(self, **kwargs):
return True
def query(self, **kwargs):
self.update_info()
self.report_driver()
return True
_drivers = {
'CLIMD': [0, 67, int], 'CLISPC': [0, 14, int],
'CLISPH': [0, 14, int], 'CLIFS':[0, 99, int],
'CLIHUM':[0, 51, int], 'CLIHCS':[0, 66, int],
'GV1': [0, 14, int], 'GV2': [0, 14, int],
'GV3': [0, 14, int], 'GV4': [0, 2, int],
'ST': [0, 14, int]}
_commands = {'DON': _setauto,
'DOF': _setoff,
'CLIMD': _setmode,
'CLIFS': _setfan,
'BRT': _sethigh,
'DIM': _setlow,
'BEEP': _beep,
'CLISPH': _setlow,
'CLISPC': _sethigh,
'QUERY': query}
node_def_id = 'nestthermostat'
|
|
"""
Ray queries using the pyembree package with the
API wrapped to match our native raytracer.
"""
import numpy as np
from copy import deepcopy
from pyembree import __version__ as _ver
from pyembree import rtcore_scene
from pyembree.mesh_construction import TriangleMesh
from pkg_resources import parse_version
from .ray_util import contains_points
from .. import util
from .. import caching
from .. import intersections
from ..constants import log_time
# the factor of geometry.scale to offset a ray from a triangle
# to reliably not hit its origin triangle
_ray_offset_factor = 1e-4
# we want to clip our offset to a sane distance
_ray_offset_floor = 1e-8
# see if we're using a newer version of the pyembree wrapper
_embree_new = parse_version(_ver) >= parse_version('0.1.4')
# both old and new versions require exact but different type
_embree_dtype = [np.float64, np.float32][int(_embree_new)]
class RayMeshIntersector(object):
def __init__(self,
geometry,
scale_to_box=True):
"""
Do ray- mesh queries.
Parameters
-------------
geometry : Trimesh object
Mesh to do ray tests on
scale_to_box : bool
If true, will scale mesh to approximate
unit cube to avoid problems with extreme
large or small meshes.
"""
self.mesh = geometry
self._scale_to_box = scale_to_box
self._cache = caching.Cache(id_function=self.mesh.crc)
@property
def _scale(self):
"""
Scaling factor for precision.
"""
if self._scale_to_box:
# scale vertices to approximately a cube to help with
# numerical issues at very large/small scales
scale = 100.0 / self.mesh.scale
else:
scale = 1.0
return scale
@caching.cache_decorator
def _scene(self):
"""
A cached version of the pyembree scene.
"""
return _EmbreeWrap(vertices=self.mesh.vertices,
faces=self.mesh.faces,
scale=self._scale)
def intersects_location(self,
ray_origins,
ray_directions,
multiple_hits=True):
"""
Return the location of where a ray hits a surface.
Parameters
----------
ray_origins : (n, 3) float
Origins of rays
ray_directions : (n, 3) float
Direction (vector) of rays
Returns
---------
locations : (m) sequence of (p, 3) float
Intersection points
index_ray : (m,) int
Indexes of ray
index_tri : (m,) int
Indexes of mesh.faces
"""
(index_tri,
index_ray,
locations) = self.intersects_id(
ray_origins=ray_origins,
ray_directions=ray_directions,
multiple_hits=multiple_hits,
return_locations=True)
return locations, index_ray, index_tri
@log_time
def intersects_id(self,
ray_origins,
ray_directions,
multiple_hits=True,
max_hits=20,
return_locations=False):
"""
Find the triangles hit by a list of rays, including
optionally multiple hits along a single ray.
Parameters
----------
ray_origins : (n, 3) float
Origins of rays
ray_directions : (n, 3) float
Direction (vector) of rays
multiple_hits : bool
If True will return every hit along the ray
If False will only return first hit
max_hits : int
Maximum number of hits per ray
return_locations : bool
Should we return hit locations or not
Returns
---------
index_tri : (m,) int
Indexes of mesh.faces
index_ray : (m,) int
Indexes of ray
locations : (m) sequence of (p, 3) float
Intersection points, only returned if return_locations
"""
# make sure input is _dtype for embree
ray_origins = np.asanyarray(
deepcopy(ray_origins),
dtype=np.float64)
ray_directions = np.asanyarray(ray_directions,
dtype=np.float64)
ray_directions = util.unitize(ray_directions)
# since we are constructing all hits, save them to a deque then
# stack into (depth, len(rays)) at the end
result_triangle = []
result_ray_idx = []
result_locations = []
# the mask for which rays are still active
current = np.ones(len(ray_origins), dtype=np.bool)
if multiple_hits or return_locations:
# how much to offset ray to transport to the other side of face
distance = np.clip(_ray_offset_factor * self._scale,
_ray_offset_floor,
np.inf)
ray_offsets = ray_directions * distance
# grab the planes from triangles
plane_origins = self.mesh.triangles[:, 0, :]
plane_normals = self.mesh.face_normals
# use a for loop rather than a while to ensure this exits
# if a ray is offset from a triangle and then is reported
# hitting itself this could get stuck on that one triangle
for query_depth in range(max_hits):
# run the pyembree query
# if you set output=1 it will calculate distance along
# ray, which is bizzarely slower than our calculation
query = self._scene.run(
ray_origins[current],
ray_directions[current])
# basically we need to reduce the rays to the ones that hit
# something
hit = query != -1
# which triangle indexes were hit
hit_triangle = query[hit]
# eliminate rays that didn't hit anything from future queries
current_index = np.nonzero(current)[0]
current_index_no_hit = current_index[np.logical_not(hit)]
current_index_hit = current_index[hit]
current[current_index_no_hit] = False
# append the triangle and ray index to the results
result_triangle.append(hit_triangle)
result_ray_idx.append(current_index_hit)
# if we don't need all of the hits, return the first one
if ((not multiple_hits and
not return_locations) or
not hit.any()):
break
# find the location of where the ray hit the triangle plane
new_origins, valid = intersections.planes_lines(
plane_origins=plane_origins[hit_triangle],
plane_normals=plane_normals[hit_triangle],
line_origins=ray_origins[current],
line_directions=ray_directions[current])
if not valid.all():
# since a plane intersection was invalid we have to go back and
# fix some stuff, we pop the ray index and triangle index,
# apply the valid mask then append it right back to keep our
# indexes intact
result_ray_idx.append(result_ray_idx.pop()[valid])
result_triangle.append(result_triangle.pop()[valid])
# update the current rays to reflect that we couldn't find a
# new origin
current[current_index_hit[np.logical_not(valid)]] = False
# since we had to find the intersection point anyway we save it
# even if we're not going to return it
result_locations.extend(new_origins)
if multiple_hits:
# move the ray origin to the other side of the triangle
ray_origins[current] = new_origins + ray_offsets[current]
else:
break
# stack the deques into nice 1D numpy arrays
index_tri = np.hstack(result_triangle)
index_ray = np.hstack(result_ray_idx)
if return_locations:
locations = (
np.zeros((0, 3), float) if len(result_locations) == 0
else np.array(result_locations))
return index_tri, index_ray, locations
return index_tri, index_ray
@log_time
def intersects_first(self,
ray_origins,
ray_directions):
"""
Find the index of the first triangle a ray hits.
Parameters
----------
ray_origins : (n, 3) float
Origins of rays
ray_directions : (n, 3) float
Direction (vector) of rays
Returns
----------
triangle_index : (n,) int
Index of triangle ray hit, or -1 if not hit
"""
ray_origins = np.asanyarray(deepcopy(ray_origins))
ray_directions = np.asanyarray(ray_directions)
triangle_index = self._scene.run(ray_origins,
ray_directions)
return triangle_index
def intersects_any(self,
ray_origins,
ray_directions):
"""
Check if a list of rays hits the surface.
Parameters
-----------
ray_origins : (n, 3) float
Origins of rays
ray_directions : (n, 3) float
Direction (vector) of rays
Returns
----------
hit : (n,) bool
Did each ray hit the surface
"""
first = self.intersects_first(ray_origins=ray_origins,
ray_directions=ray_directions)
hit = first != -1
return hit
def contains_points(self, points):
"""
Check if a mesh contains a list of points, using ray tests.
If the point is on the surface of the mesh, behavior is undefined.
Parameters
---------
points: (n, 3) points in space
Returns
---------
contains: (n,) bool
Whether point is inside mesh or not
"""
return contains_points(self, points)
class _EmbreeWrap(object):
"""
A light wrapper for PyEmbree scene objects which
allows queries to be scaled to help with precision
issues, as well as selecting the correct dtypes.
"""
def __init__(self, vertices, faces, scale):
scaled = np.array(vertices,
dtype=np.float64)
self.origin = scaled.min(axis=0)
self.scale = float(scale)
scaled = (scaled - self.origin) * self.scale
self.scene = rtcore_scene.EmbreeScene()
# assign the geometry to the scene
TriangleMesh(
scene=self.scene,
vertices=scaled.astype(_embree_dtype),
indices=faces.view(np.ndarray).astype(np.int32))
def run(self, origins, normals, **kwargs):
scaled = (np.array(origins,
dtype=np.float64) - self.origin) * self.scale
return self.scene.run(scaled.astype(_embree_dtype),
normals.astype(_embree_dtype),
**kwargs)
|
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1CustomResourceDefinitionSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'group': 'str',
'names': 'V1beta1CustomResourceDefinitionNames',
'scope': 'str',
'validation': 'V1beta1CustomResourceValidation',
'version': 'str'
}
attribute_map = {
'group': 'group',
'names': 'names',
'scope': 'scope',
'validation': 'validation',
'version': 'version'
}
def __init__(self, group=None, names=None, scope=None, validation=None, version=None):
"""
V1beta1CustomResourceDefinitionSpec - a model defined in Swagger
"""
self._group = None
self._names = None
self._scope = None
self._validation = None
self._version = None
self.discriminator = None
self.group = group
self.names = names
self.scope = scope
if validation is not None:
self.validation = validation
self.version = version
@property
def group(self):
"""
Gets the group of this V1beta1CustomResourceDefinitionSpec.
Group is the group this resource belongs in
:return: The group of this V1beta1CustomResourceDefinitionSpec.
:rtype: str
"""
return self._group
@group.setter
def group(self, group):
"""
Sets the group of this V1beta1CustomResourceDefinitionSpec.
Group is the group this resource belongs in
:param group: The group of this V1beta1CustomResourceDefinitionSpec.
:type: str
"""
if group is None:
raise ValueError("Invalid value for `group`, must not be `None`")
self._group = group
@property
def names(self):
"""
Gets the names of this V1beta1CustomResourceDefinitionSpec.
Names are the names used to describe this custom resource
:return: The names of this V1beta1CustomResourceDefinitionSpec.
:rtype: V1beta1CustomResourceDefinitionNames
"""
return self._names
@names.setter
def names(self, names):
"""
Sets the names of this V1beta1CustomResourceDefinitionSpec.
Names are the names used to describe this custom resource
:param names: The names of this V1beta1CustomResourceDefinitionSpec.
:type: V1beta1CustomResourceDefinitionNames
"""
if names is None:
raise ValueError("Invalid value for `names`, must not be `None`")
self._names = names
@property
def scope(self):
"""
Gets the scope of this V1beta1CustomResourceDefinitionSpec.
Scope indicates whether this resource is cluster or namespace scoped. Default is namespaced
:return: The scope of this V1beta1CustomResourceDefinitionSpec.
:rtype: str
"""
return self._scope
@scope.setter
def scope(self, scope):
"""
Sets the scope of this V1beta1CustomResourceDefinitionSpec.
Scope indicates whether this resource is cluster or namespace scoped. Default is namespaced
:param scope: The scope of this V1beta1CustomResourceDefinitionSpec.
:type: str
"""
if scope is None:
raise ValueError("Invalid value for `scope`, must not be `None`")
self._scope = scope
@property
def validation(self):
"""
Gets the validation of this V1beta1CustomResourceDefinitionSpec.
Validation describes the validation methods for CustomResources This field is alpha-level and should only be sent to servers that enable the CustomResourceValidation feature.
:return: The validation of this V1beta1CustomResourceDefinitionSpec.
:rtype: V1beta1CustomResourceValidation
"""
return self._validation
@validation.setter
def validation(self, validation):
"""
Sets the validation of this V1beta1CustomResourceDefinitionSpec.
Validation describes the validation methods for CustomResources This field is alpha-level and should only be sent to servers that enable the CustomResourceValidation feature.
:param validation: The validation of this V1beta1CustomResourceDefinitionSpec.
:type: V1beta1CustomResourceValidation
"""
self._validation = validation
@property
def version(self):
"""
Gets the version of this V1beta1CustomResourceDefinitionSpec.
Version is the version this resource belongs in
:return: The version of this V1beta1CustomResourceDefinitionSpec.
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""
Sets the version of this V1beta1CustomResourceDefinitionSpec.
Version is the version this resource belongs in
:param version: The version of this V1beta1CustomResourceDefinitionSpec.
:type: str
"""
if version is None:
raise ValueError("Invalid value for `version`, must not be `None`")
self._version = version
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1CustomResourceDefinitionSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
|
import re
import zipfile
import os
from collections import defaultdict
import subprocess
import StringIO
import xlrd
import magic
import messytables
import lib
from ckan.lib import helpers as ckan_helpers
import logging
log = logging.getLogger(__name__)
def sniff_file_format(filepath):
'''For a given filepath, work out what file format it is.
Returns a dict with format as a string, which is the format's canonical
shortname (as defined by ckan's resource_formats.json) and a key that says
if it is contained in a zip or something.
e.g. {'format': 'CSV',
'container': 'zip',
}
or None if it can\'t tell what it is.
Note, log is a logger, either a Celery one or a standard Python logging
one.
'''
format_ = None
log.info('Sniffing file format of: %s', filepath)
filepath_utf8 = filepath.encode('utf8') if isinstance(filepath, unicode) \
else filepath
mime_type = magic.from_file(filepath_utf8, mime=True)
log.info('Magic detects file as: %s', mime_type)
if mime_type:
if mime_type == 'application/xml':
with open(filepath) as f:
buf = f.read(5000)
format_ = get_xml_variant_including_xml_declaration(buf)
elif mime_type == 'application/zip':
format_ = get_zipped_format(filepath)
elif mime_type in ('application/msword', 'application/vnd.ms-office'):
# In the past Magic gives the msword mime-type for Word and other
# MS Office files too, so use BSD File to be sure which it is.
format_ = run_bsd_file(filepath)
if not format_ and is_excel(filepath):
format_ = {'format': 'XLS'}
elif mime_type == 'application/octet-stream':
# Excel files sometimes come up as this
if is_excel(filepath):
format_ = {'format': 'XLS'}
else:
# e.g. Shapefile
format_ = run_bsd_file(filepath)
if not format_:
with open(filepath) as f:
buf = f.read(500)
format_ = is_html(buf)
elif mime_type == 'text/html':
# Magic can mistake IATI for HTML
with open(filepath) as f:
buf = f.read(100)
if is_iati(buf):
format_ = {'format': 'IATI'}
if format_:
return format_
format_tuple = ckan_helpers.resource_formats().get(mime_type)
if format_tuple:
format_ = {'format': format_tuple[1]}
if not format_:
if mime_type.startswith('text/'):
# is it JSON?
with open(filepath, 'rU') as f:
buf = f.read(10000)
if is_json(buf):
format_ = {'format': 'JSON'}
# is it CSV?
elif is_csv(buf):
format_ = {'format': 'CSV'}
elif is_psv(buf):
format_ = {'format': 'PSV'}
if not format_:
log.warning('Mimetype not recognised by CKAN as a data format: %s',
mime_type)
if format_:
log.info('Mimetype translates to filetype: %s',
format_['format'])
if format_['format'] == 'TXT':
# is it JSON?
with open(filepath, 'rU') as f:
buf = f.read(10000)
if is_json(buf):
format_ = {'format': 'JSON'}
# is it CSV?
elif is_csv(buf):
format_ = {'format': 'CSV'}
elif is_psv(buf):
format_ = {'format': 'PSV'}
# XML files without the "<?xml ... ?>" tag end up here
elif is_xml_but_without_declaration(buf):
format_ = get_xml_variant_without_xml_declaration(buf)
elif is_ttl(buf):
format_ = {'format': 'TTL'}
elif format_['format'] == 'HTML':
# maybe it has RDFa in it
with open(filepath) as f:
buf = f.read(100000)
if has_rdfa(buf):
format_ = {'format': 'RDFa'}
else:
# Excel files sometimes not picked up by magic, so try alternative
if is_excel(filepath):
format_ = {'format': 'XLS'}
# BSD file picks up some files that Magic misses
# e.g. some MS Word files
if not format_:
format_ = run_bsd_file(filepath)
if not format_:
log.warning('Could not detect format of file: %s', filepath)
return format_
def is_json(buf):
'''Returns whether this text buffer (potentially truncated) is in
JSON format.'''
string = '"[^"]*"'
string_re = re.compile(string)
number_re = re.compile('-?\d+(\.\d+)?([eE][+-]?\d+)?')
extra_values_re = re.compile('true|false|null')
object_start_re = re.compile('{%s:\s?' % string)
object_middle_re = re.compile('%s:\s?' % string)
object_end_re = re.compile('}')
comma_re = re.compile(',\s?')
array_start_re = re.compile('\[')
array_end_re = re.compile('\]')
any_value_regexs = [string_re, number_re, object_start_re, array_start_re, extra_values_re]
# simplified state machine - just looks at stack of object/array and
# ignores contents of them, beyond just being simple JSON bits
pos = 0
state_stack = [] # stack of 'object', 'array'
number_of_matches = 0
while pos < len(buf):
part_of_buf = buf[pos:]
if pos == 0:
potential_matches = (object_start_re, array_start_re, string_re, number_re, extra_values_re)
elif not state_stack:
# cannot have content beyond the first byte that is not nested
return False
elif state_stack[-1] == 'object':
# any value
potential_matches = [comma_re, object_middle_re, object_end_re] + any_value_regexs
elif state_stack[-1] == 'array':
# any value or end it
potential_matches = any_value_regexs + [comma_re, array_end_re]
for matcher in potential_matches:
if matcher.match(part_of_buf):
if matcher in any_value_regexs and state_stack and state_stack[-1] == 'comma':
state_stack.pop()
if matcher == object_start_re:
state_stack.append('object')
elif matcher == array_start_re:
state_stack.append('array')
elif matcher in (object_end_re, array_end_re):
try:
state_stack.pop()
except IndexError:
# nothing to pop
log.info('Not JSON - %i matches', number_of_matches)
return False
break
else:
# no match
log.info('Not JSON - %i matches', number_of_matches)
return False
match_length = matcher.match(part_of_buf).end()
# print "MATCHED %r %r %s" % (matcher.match(part_of_buf).string[:match_length], matcher.pattern, state_stack)
pos += match_length
number_of_matches += 1
if number_of_matches > 5:
log.info('JSON detected: %i matches', number_of_matches)
return True
log.info('JSON detected: %i matches', number_of_matches)
return True
def is_csv(buf):
'''If the buffer is a CSV file then return True.'''
buf_rows = StringIO.StringIO(buf)
table_set = messytables.CSVTableSet(buf_rows)
return _is_spreadsheet(table_set, 'CSV')
def is_psv(buf):
'''If the buffer is a PSV file then return True.'''
buf_rows = StringIO.StringIO(buf)
table_set = messytables.CSVTableSet(buf_rows, delimiter='|')
return _is_spreadsheet(table_set, 'PSV')
def _is_spreadsheet(table_set, format):
def get_cells_per_row(num_cells, num_rows):
if not num_rows:
return 0
return float(num_cells) / float(num_rows)
num_cells = num_rows = 0
try:
table = table_set.tables[0]
# Iterate through the table.sample (sample because otherwise
# it will barf if there is an unclosed string at the end)
for row in table.sample:
if row:
# Must have enough cells
num_cells += len(row)
num_rows += 1
if num_cells > 20 or num_rows > 10:
cells_per_row = get_cells_per_row(num_cells, num_rows)
# over the long term, 2 columns is the minimum
if cells_per_row > 1.9:
log.info('Is %s because %.1f cells per row (%i cells, %i rows)',
format,
get_cells_per_row(num_cells, num_rows),
num_cells, num_rows)
return True
finally:
pass
# if file is short then be more lenient
if num_cells > 3 or num_rows > 1:
cells_per_row = get_cells_per_row(num_cells, num_rows)
if cells_per_row > 1.5:
log.info('Is %s because %.1f cells per row (%i cells, %i rows)',
format,
get_cells_per_row(num_cells, num_rows),
num_cells, num_rows)
return True
log.info('Not %s - not enough valid cells per row '
'(%i cells, %i rows, %.1f cells per row)',
format, num_cells, num_rows, get_cells_per_row(num_cells, num_rows))
return False
def is_html(buf):
'''If this buffer is HTML, return that format type, else None.'''
xml_re = '.{0,3}\s*(<\?xml[^>]*>\s*)?(<!doctype[^>]*>\s*)?<html[^>]*>'
match = re.match(xml_re, buf, re.IGNORECASE)
if match:
log.info('HTML tag detected')
return {'format': 'HTML'}
log.debug('Not HTML')
def is_iati(buf):
'''If this buffer is IATI format, return that format type, else None.'''
xml_re = '.{0,3}\s*(<\?xml[^>]*>\s*)?(<!doctype[^>]*>\s*)?<iati-(activities|organisations)[^>]*>'
match = re.match(xml_re, buf, re.IGNORECASE)
if match:
log.info('IATI tag detected')
return {'format': 'IATI'}
log.debug('Not IATI')
def is_xml_but_without_declaration(buf):
'''Decides if this is a buffer of XML, but missing the usual <?xml ...?>
tag.'''
xml_re = '.{0,3}\s*(<\?xml[^>]*>\s*)?(<!doctype[^>]*>\s*)?<([^>\s]*)([^>]*)>'
match = re.match(xml_re, buf, re.IGNORECASE)
if match:
top_level_tag_name, top_level_tag_attributes = match.groups()[-2:]
if 'xmlns:' not in top_level_tag_attributes and \
(len(top_level_tag_name) > 20 or
len(top_level_tag_attributes) > 200):
log.debug('Not XML (without declaration) - unlikely length first tag: <%s %s>',
top_level_tag_name, top_level_tag_attributes)
return False
log.info('XML detected - first tag name: <%s>', top_level_tag_name)
return True
log.debug('Not XML (without declaration) - tag not detected')
return False
def get_xml_variant_including_xml_declaration(buf):
'''If this buffer is in a format based on XML and has the <xml>
declaration, return the format type.'''
return get_xml_variant_without_xml_declaration(buf)
log.debug('XML declaration not found: %s', buf)
def get_xml_variant_without_xml_declaration(buf):
'''If this buffer is in a format based on XML, without any XML declaration
or other boilerplate, return the format type.'''
# Parse the XML to find the first tag name.
# Using expat directly, rather than go through xml.sax, since using I
# couldn't see how to give it a string, so used StringIO which failed
# for some files curiously.
import xml.parsers.expat
class GotFirstTag(Exception):
pass
def start_element(name, attrs):
raise GotFirstTag(name)
p = xml.parsers.expat.ParserCreate()
p.StartElementHandler = start_element
try:
p.Parse(buf)
except GotFirstTag, e:
top_level_tag_name = str(e).lower()
except xml.sax.SAXException, e:
log.info('Sax parse error: %s %s', e, buf)
return {'format': 'XML'}
log.info('Top level tag detected as: %s', top_level_tag_name)
top_level_tag_name = top_level_tag_name.replace('rdf:rdf', 'rdf')
top_level_tag_name = top_level_tag_name.replace('wms_capabilities', 'wms') # WMS 1.3
top_level_tag_name = top_level_tag_name.replace('wmt_ms_capabilities', 'wms') # WMS 1.1.1
top_level_tag_name = re.sub('wfs:.*', 'wfs', top_level_tag_name) # WFS 2.0
top_level_tag_name = top_level_tag_name.replace('wfs_capabilities', 'wfs') # WFS 1.0/1.1
top_level_tag_name = top_level_tag_name.replace('feed', 'atom feed')
if top_level_tag_name.lower() == 'capabilities' and \
'xmlns="http://www.opengis.net/wmts/' in buf:
top_level_tag_name = 'wmts'
if top_level_tag_name.lower() in ('coveragedescriptions', 'capabilities') and \
'xmlns="http://www.opengis.net/wcs/' in buf:
top_level_tag_name = 'wcs'
format_tuple = ckan_helpers.resource_formats().get(top_level_tag_name)
if format_tuple:
format_ = {'format': format_tuple[1]}
log.info('XML variant detected: %s', format_tuple[2])
return format_
log.warning('Did not recognise XML format: %s', top_level_tag_name)
return {'format': 'XML'}
def has_rdfa(buf):
'''If the buffer HTML contains RDFa then this returns True'''
# quick check for the key words
if 'about=' not in buf or 'property=' not in buf:
log.debug('Not RDFA')
return False
# more rigorous check for them as tag attributes
about_re = '<[^>]+\sabout="[^"]+"[^>]*>'
property_re = '<[^>]+\sproperty="[^"]+"[^>]*>'
# remove CR to catch tags spanning more than one line
# buf = re.sub('\r\n', ' ', buf)
if not re.search(about_re, buf):
log.debug('Not RDFA')
return False
if not re.search(property_re, buf):
log.debug('Not RDFA')
return False
log.info('RDFA tags found in HTML')
return True
def get_zipped_format(filepath):
'''For a given zip file, return the format of file inside.
For multiple files, choose by the most open, and then by the most
popular extension.'''
# just check filename extension of each file inside
try:
# note: Cannot use "with" with a zipfile before python 2.7
# so we have to close it manually.
zip = zipfile.ZipFile(filepath, 'r')
try:
filepaths = zip.namelist()
finally:
zip.close()
except zipfile.BadZipfile, e:
log.info('Zip file open raised error %s: %s',
e, e.args)
return
except Exception, e:
log.warning('Zip file open raised exception %s: %s',
e, e.args)
return
# Shapefile check - a Shapefile is a zip containing specific files:
# .shp, .dbf and .shx amongst others
extensions = set([f.split('.')[-1].lower() for f in filepaths])
if len(extensions & set(('shp', 'dbf', 'shx'))) == 3:
log.info('Shapefile detected')
return {'format': 'SHP'}
# GTFS check - a GTFS is a zip which containing specific filenames
filenames = set((os.path.basename(f) for f in filepaths))
if not (set(('agency.txt', 'stops.txt', 'routes.txt', 'trips.txt',
'stop_times.txt', 'calendar.txt')) - set(filenames)):
log.info('GTFS detected')
return {'format': 'GTFS'}
top_score = 0
top_scoring_extension_counts = defaultdict(int) # extension: number_of_files
for filepath in filepaths:
extension = os.path.splitext(filepath)[-1][1:].lower()
format_tuple = ckan_helpers.resource_formats().get(extension)
if format_tuple:
score = lib.resource_format_scores().get(format_tuple[1])
if score is not None and score > top_score:
top_score = score
top_scoring_extension_counts = defaultdict(int)
if score == top_score:
top_scoring_extension_counts[extension] += 1
else:
log.info('Zipped file of unknown extension: "%s" (%s)',
extension, filepath)
if not top_scoring_extension_counts:
log.info('Zip has no known extensions: %s', filepath)
return {'format': 'ZIP'}
top_scoring_extension_counts = sorted(top_scoring_extension_counts.items(),
key=lambda x: x[1])
top_extension = top_scoring_extension_counts[-1][0]
log.info('Zip file\'s most popular extension is "%s" (All extensions: %r)',
top_extension, top_scoring_extension_counts)
format_tuple = ckan_helpers.resource_formats()[top_extension]
format_ = {'format': format_tuple[1],
'container': 'ZIP'}
log.info('Zipped file format detected: %s', format_tuple[2])
return format_
def is_excel(filepath):
try:
xlrd.open_workbook(filepath)
except Exception, e:
log.info('Not Excel - failed to load: %s %s', e, e.args)
return False
else:
log.info('Excel file opened successfully')
return True
# same as the python 2.7 subprocess.check_output
def check_output(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise Exception('Non-zero exit status %s: %s' % (retcode, output))
return output
def run_bsd_file(filepath):
'''Run the BSD command-line tool "file" to determine file type. Returns
a format dict or None if it fails.'''
result = check_output(['file', filepath])
match = re.search('Name of Creating Application: ([^,]*),', result)
if match:
app_name = match.groups()[0]
format_map = {'Microsoft Office PowerPoint': 'ppt',
'Microsoft PowerPoint': 'ppt',
'Microsoft Excel': 'xls',
'Microsoft Office Word': 'doc',
'Microsoft Word 10.0': 'doc',
'Microsoft Macintosh Word': 'doc',
}
if app_name in format_map:
extension = format_map[app_name]
format_tuple = ckan_helpers.resource_formats()[extension]
log.info('"file" detected file format: %s',
format_tuple[2])
return {'format': format_tuple[1]}
match = re.search(': ESRI Shapefile', result)
if match:
format_ = {'format': 'SHP'}
log.info('"file" detected file format: %s',
format_['format'])
return format_
log.info('"file" could not determine file format of "%s": %s',
filepath, result)
def is_ttl(buf):
'''If the buffer is a Turtle RDF file then return True.'''
# Turtle spec: "Turtle documents may have the strings '@prefix' or '@base' (case dependent) near the beginning of the document."
at_re = '^@(prefix|base) '
match = re.search(at_re, buf, re.MULTILINE)
if match:
log.info('Turtle RDF detected - @prefix or @base')
return True
# Alternatively look for several triples
num_required_triples = 5
ignore, num_replacements = turtle_regex().subn('', buf, num_required_triples)
if num_replacements >= num_required_triples:
log.info('Turtle RDF detected - %s triples' % num_replacements)
return True
log.debug('Not Turtle RDF - triples not detected (%i)' % num_replacements)
turtle_regex_ = None
def turtle_regex():
'''Return a compiled regex that matches a turtle triple.
Each RDF term may be in these forms:
<url>
"a literal"
"translation"@ru
"literal typed"^^<http://www.w3.org/2001/XMLSchema#string>
"literal typed with prefix"^^xsd:string
'single quotes'
"""triple \n quotes"""
-4.2E-9
false
_:blank_node
No need to worry about prefixed terms, since there would have been a
@prefix already detected for them to be used.
prefix:term :blank_prefix
does not support nested blank nodes, collection, sameas ('a' token)
'''
global turtle_regex_
if not turtle_regex_:
rdf_term = '(<[^ >]+>|_:\S+|".+?"(@\w+)?(\^\^\S+)?|\'.+?\'(@\w+)?(\^\^\S+)?|""".+?"""(@\w+)' \
'?(\^\^\S+)?|\'\'\'.+?\'\'\'(@\w+)?(\^\^\S+)?|[+-]?([0-9]+|[0-9]*\.[0-9]+)(E[+-]?[0-9]+)?|false|true)'
# simple case is: triple_re = '^T T T \.$'.replace('T', rdf_term)
# but extend to deal with multiple predicate-objects:
# triple = '^T T T\s*(;\s*T T\s*)*\.\s*$'.replace('T', rdf_term).replace(' ', '\s+')
triple = '(^T|;)\s*T T\s*(;|\.\s*$)'.replace('T', rdf_term).replace(' ', '\s+')
turtle_regex_ = re.compile(triple, re.MULTILINE)
return turtle_regex_
|
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import os
import mock
import pytest
from google.auth import _helpers
from google.auth import crypt
from google.auth import jwt
from google.oauth2 import service_account
DATA_DIR = os.path.join(os.path.dirname(__file__), '..', 'data')
with open(os.path.join(DATA_DIR, 'privatekey.pem'), 'rb') as fh:
PRIVATE_KEY_BYTES = fh.read()
with open(os.path.join(DATA_DIR, 'public_cert.pem'), 'rb') as fh:
PUBLIC_CERT_BYTES = fh.read()
with open(os.path.join(DATA_DIR, 'other_cert.pem'), 'rb') as fh:
OTHER_CERT_BYTES = fh.read()
SERVICE_ACCOUNT_JSON_FILE = os.path.join(DATA_DIR, 'service_account.json')
with open(SERVICE_ACCOUNT_JSON_FILE, 'r') as fh:
SERVICE_ACCOUNT_INFO = json.load(fh)
@pytest.fixture(scope='module')
def signer():
return crypt.Signer.from_string(PRIVATE_KEY_BYTES, '1')
class TestCredentials(object):
SERVICE_ACCOUNT_EMAIL = 'service-account@example.com'
TOKEN_URI = 'https://example.com/oauth2/token'
credentials = None
@pytest.fixture(autouse=True)
def credentials_fixture(self, signer):
self.credentials = service_account.Credentials(
signer, self.SERVICE_ACCOUNT_EMAIL, self.TOKEN_URI)
def test_from_service_account_info(self):
credentials = service_account.Credentials.from_service_account_info(
SERVICE_ACCOUNT_INFO)
assert (credentials._signer.key_id ==
SERVICE_ACCOUNT_INFO['private_key_id'])
assert (credentials.service_account_email ==
SERVICE_ACCOUNT_INFO['client_email'])
assert credentials._token_uri == SERVICE_ACCOUNT_INFO['token_uri']
def test_from_service_account_info_args(self):
info = SERVICE_ACCOUNT_INFO.copy()
scopes = ['email', 'profile']
subject = 'subject'
additional_claims = {'meta': 'data'}
credentials = service_account.Credentials.from_service_account_info(
info, scopes=scopes, subject=subject,
additional_claims=additional_claims)
assert credentials.service_account_email == info['client_email']
assert credentials._signer.key_id == info['private_key_id']
assert credentials._token_uri == info['token_uri']
assert credentials._scopes == scopes
assert credentials._subject == subject
assert credentials._additional_claims == additional_claims
def test_from_service_account_file(self):
info = SERVICE_ACCOUNT_INFO.copy()
credentials = service_account.Credentials.from_service_account_file(
SERVICE_ACCOUNT_JSON_FILE)
assert credentials.service_account_email == info['client_email']
assert credentials._signer.key_id == info['private_key_id']
assert credentials._token_uri == info['token_uri']
def test_from_service_account_file_args(self):
info = SERVICE_ACCOUNT_INFO.copy()
scopes = ['email', 'profile']
subject = 'subject'
additional_claims = {'meta': 'data'}
credentials = service_account.Credentials.from_service_account_file(
SERVICE_ACCOUNT_JSON_FILE, subject=subject,
scopes=scopes, additional_claims=additional_claims)
assert credentials.service_account_email == info['client_email']
assert credentials._signer.key_id == info['private_key_id']
assert credentials._token_uri == info['token_uri']
assert credentials._scopes == scopes
assert credentials._subject == subject
assert credentials._additional_claims == additional_claims
def test_to_jwt_credentials(self):
jwt_from_svc = self.credentials.to_jwt_credentials()
jwt_from_info = jwt.Credentials.from_service_account_info(
SERVICE_ACCOUNT_INFO)
assert isinstance(jwt_from_svc, jwt.Credentials)
assert jwt_from_svc._signer.key_id == jwt_from_info._signer.key_id
assert jwt_from_svc._issuer == jwt_from_info._issuer
assert jwt_from_svc._subject == jwt_from_info._subject
assert jwt_from_svc._audience == jwt_from_info._audience
def test_default_state(self):
assert not self.credentials.valid
# Expiration hasn't been set yet
assert not self.credentials.expired
# Scopes haven't been specified yet
assert self.credentials.requires_scopes
def test_sign_bytes(self):
to_sign = b'123'
signature = self.credentials.sign_bytes(to_sign)
assert crypt.verify_signature(to_sign, signature, PUBLIC_CERT_BYTES)
def test_signer_email(self):
assert self.credentials.signer_email == self.SERVICE_ACCOUNT_EMAIL
def test_create_scoped(self):
scopes = ['email', 'profile']
credentials = self.credentials.with_scopes(scopes)
assert credentials._scopes == scopes
def test__make_authorization_grant_assertion(self):
token = self.credentials._make_authorization_grant_assertion()
payload = jwt.decode(token, PUBLIC_CERT_BYTES)
assert payload['iss'] == self.SERVICE_ACCOUNT_EMAIL
assert payload['aud'] == self.TOKEN_URI
def test__make_authorization_grant_assertion_scoped(self):
scopes = ['email', 'profile']
credentials = self.credentials.with_scopes(scopes)
token = credentials._make_authorization_grant_assertion()
payload = jwt.decode(token, PUBLIC_CERT_BYTES)
assert payload['scope'] == 'email profile'
def test__make_authorization_grant_assertion_subject(self):
subject = 'user@example.com'
credentials = self.credentials.with_subject(subject)
token = credentials._make_authorization_grant_assertion()
payload = jwt.decode(token, PUBLIC_CERT_BYTES)
assert payload['sub'] == subject
@mock.patch('google.oauth2._client.jwt_grant', autospec=True)
def test_refresh_success(self, jwt_grant_mock):
token = 'token'
jwt_grant_mock.return_value = (
token, _helpers.utcnow() + datetime.timedelta(seconds=500), None)
request_mock = mock.Mock()
# Refresh credentials
self.credentials.refresh(request_mock)
# Check jwt grant call.
assert jwt_grant_mock.called
request, token_uri, assertion = jwt_grant_mock.call_args[0]
assert request == request_mock
assert token_uri == self.credentials._token_uri
assert jwt.decode(assertion, PUBLIC_CERT_BYTES)
# No further assertion done on the token, as there are separate tests
# for checking the authorization grant assertion.
# Check that the credentials have the token.
assert self.credentials.token == token
# Check that the credentials are valid (have a token and are not
# expired)
assert self.credentials.valid
@mock.patch('google.oauth2._client.jwt_grant', autospec=True)
def test_before_request_refreshes(self, jwt_grant_mock):
token = 'token'
jwt_grant_mock.return_value = (
token, _helpers.utcnow() + datetime.timedelta(seconds=500), None)
request_mock = mock.Mock()
# Credentials should start as invalid
assert not self.credentials.valid
# before_request should cause a refresh
self.credentials.before_request(
request_mock, 'GET', 'http://example.com?a=1#3', {})
# The refresh endpoint should've been called.
assert jwt_grant_mock.called
# Credentials should now be valid.
assert self.credentials.valid
|
|
"""
There are two types of functions:
1) defined function like exp or sin that has a name and body
(in the sense that function can be evaluated).
e = exp
2) undefined function with a name but no body. Undefined
functions can be defined using a Function class as follows:
f = Function('f')
(the result will be a Function instance)
3) this isn't implemented yet: anonymous function or lambda function that has
no name but has body with dummy variables. Examples of anonymous function
creation:
f = Lambda(x, exp(x)*x)
f = Lambda(exp(x)*x) # free symbols in the expression define the number of arguments
f = exp * Lambda(x,x)
4) isn't implemented yet: composition of functions, like (sin+cos)(x), this
works in sympy core, but needs to be ported back to SymPy.
Example:
>>> import sympy
>>> f = sympy.Function("f")
>>> from sympy.abc import x
>>> f(x)
f(x)
>>> print sympy.srepr(f(x).func)
Function('f')
>>> f(x).args
(x,)
"""
from basic import Basic, BasicMeta, Atom, S, C
from expr import Expr
from cache import cacheit
from itertools import repeat
#from numbers import Rational, Integer
#from symbol import Symbol
from multidimensional import vectorize
from sympy.utilities.decorator import deprecated
from sympy.utilities import all
from sympy import mpmath
class PoleError(Exception):
pass
class FunctionClass(BasicMeta):
"""
Base class for function classes. FunctionClass is a subclass of type.
Use Function('<function name>' [ , signature ]) to create
undefined function classes.
"""
__metaclass__ = BasicMeta
_new = type.__new__
def __new__(cls, arg1, arg2, arg3=None, **options):
assert not options,`options`
if isinstance(arg1, type):
# the following code gets executed when one types
# FunctionClass(Function, "f")
# i.e. cls = FunctionClass, arg1 = Function, arg2 = "f"
# and we simply do an equivalent of:
# class f(Function):
# ...
# return f
ftype, name, signature = arg1, arg2, arg3
#XXX this probably needs some fixing:
assert ftype.__name__.endswith('Function'),`ftype`
attrdict = ftype.__dict__.copy()
attrdict['undefined_Function'] = True
if signature is not None:
attrdict['signature'] = signature
bases = (ftype,)
return BasicMeta.__new__(cls, name, bases, attrdict)
else:
name, bases, attrdict = arg1, arg2, arg3
return BasicMeta.__new__(cls, name, bases, attrdict)
def __repr__(cls):
return cls.__name__
class Application(Basic):
"""
Base class for applied functions.
Instances of Application represent the result of applying an application of
any type to any object.
"""
__metaclass__ = FunctionClass
__slots__ = []
is_Function = True
nargs = None
@vectorize(1)
@cacheit
def __new__(cls, *args, **options):
args = map(sympify, args)
# these lines should be refactored
for opt in ["nargs", "dummy", "comparable", "noncommutative", "commutative"]:
if opt in options:
del options[opt]
# up to here.
if options.get('evaluate') is False:
return super(Application, cls).__new__(cls, *args, **options)
evaluated = cls.eval(*args)
if evaluated is not None:
return evaluated
# Just undefined functions have nargs == None
if not cls.nargs and hasattr(cls, 'undefined_Function'):
r = super(Application, cls).__new__(cls, *args, **options)
r.nargs = len(args)
return r
return super(Application, cls).__new__(cls, *args, **options)
@classmethod
def eval(cls, *args):
"""
Returns a canonical form of cls applied to arguments args.
The eval() method is called when the class cls is about to be
instantiated and it should return either some simplified instance
(possible of some other class), or if the class cls should be
unmodified, return None.
Example of eval() for the function "sign"
---------------------------------------------
@classmethod
def eval(cls, arg):
if arg is S.NaN:
return S.NaN
if arg is S.Zero: return S.Zero
if arg.is_positive: return S.One
if arg.is_negative: return S.NegativeOne
if isinstance(arg, C.Mul):
coeff, terms = arg.as_coeff_terms()
if coeff is not S.One:
return cls(coeff) * cls(C.Mul(*terms))
"""
return
def count_ops(self, symbolic=True):
# f() args
return 1 + Add(*[ t.count_ops(symbolic) for t in self.args ])
@property
def func(self):
return self.__class__
def _eval_subs(self, old, new):
if self == old:
return new
elif old.is_Function and new.is_Function:
if old == self.func:
if self.nargs is new.nargs or not new.nargs:
return new(*self.args)
# Written down as an elif to avoid a super-long line
elif isinstance(new.nargs,tuple) and self.nargs in new.nargs:
return new(*self.args)
return Basic._seq_subs(self, old, new)
class Function(Application, Expr):
"""
Base class for applied numeric functions.
Constructor of undefined classes.
"""
@vectorize(1)
@cacheit
def __new__(cls, *args, **options):
# NOTE: this __new__ is twofold:
#
# 1 -- it can create another *class*, which can then be instantiated by
# itself e.g. Function('f') creates a new class f(Function)
#
# 2 -- on the other hand, we instantiate -- that is we create an
# *instance* of a class created earlier in 1.
#
# So please keep, both (1) and (2) in mind.
# (1) create new function class
# UC: Function('f')
if cls is Function:
#when user writes Function("f"), do an equivalent of:
#taking the whole class Function(...):
#and rename the Function to "f" and return f, thus:
#In [13]: isinstance(f, Function)
#Out[13]: False
#In [14]: isinstance(f, FunctionClass)
#Out[14]: True
if len(args) == 1 and isinstance(args[0], str):
#always create Function
return FunctionClass(Function, *args)
else:
print args
print type(args[0])
raise TypeError("You need to specify exactly one string")
# (2) create new instance of a class created in (1)
# UC: Function('f')(x)
# UC: sin(x)
return Application.__new__(cls, *args, **options)
@property
def is_commutative(self):
if all(getattr(t, 'is_commutative') for t in self.args):
return True
else:
return False
@classmethod
@deprecated
def canonize(cls, *args):
return cls.eval(*args)
def _eval_evalf(self, prec):
# Lookup mpmath function based on name
fname = self.func.__name__
try:
if not hasattr(mpmath, fname):
from sympy.utilities.lambdify import MPMATH_TRANSLATIONS
fname = MPMATH_TRANSLATIONS[fname]
func = getattr(mpmath, fname)
except (AttributeError, KeyError):
return
# Convert all args to mpf or mpc
try:
args = [arg._to_mpmath(prec) for arg in self.args]
except ValueError:
return
# Set mpmath precision and apply. Make sure precision is restored
# afterwards
orig = mpmath.mp.prec
try:
mpmath.mp.prec = prec
v = func(*args)
finally:
mpmath.mp.prec = orig
return Expr._from_mpmath(v, prec)
def _eval_is_comparable(self):
if self.is_Function:
r = True
for s in self.args:
c = s.is_comparable
if c is None: return
if not c: r = False
return r
return
def _eval_derivative(self, s):
# f(x).diff(s) -> x.diff(s) * f.fdiff(1)(s)
i = 0
l = []
r = S.Zero
for a in self.args:
i += 1
da = a.diff(s)
if da is S.Zero:
continue
if isinstance(self.func, FunctionClass):
df = self.fdiff(i)
l.append(df * da)
return Add(*l)
def _eval_is_commutative(self):
r = True
for a in self._args:
c = a.is_commutative
if c is None: return None
if not c: r = False
return r
def as_base_exp(self):
return self, S.One
def _eval_nseries(self, x, x0, n):
assert len(self.args) == 1
arg = self.args[0]
arg0 = arg.limit(x, 0)
from sympy import oo
if arg0 in [-oo, oo]:
raise PoleError("Cannot expand around %s" % (arg))
if arg0 is not S.Zero:
e = self
e1 = e.expand()
if e == e1:
#for example when e = sin(x+1) or e = sin(cos(x))
#let's try the general algorithm
term = e.subs(x, S.Zero)
series = term
fact = S.One
for i in range(n-1):
i += 1
fact *= Rational(i)
e = e.diff(x)
term = e.subs(x, S.Zero)*(x**i)/fact
term = term.expand()
series += term
return series + C.Order(x**n, x)
return e1.nseries(x, x0, n)
l = []
g = None
for i in xrange(n+2):
g = self.taylor_term(i, arg, g)
g = g.nseries(x, x0, n)
l.append(g)
return Add(*l) + C.Order(x**n, x)
def _eval_is_polynomial(self, syms):
for arg in self.args:
if arg.has(*syms):
return False
return True
def _eval_expand_basic(self, deep=True, **hints):
if not deep:
return self
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_basic'):
newterm = term._eval_expand_basic(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_power_exp(self, deep=True, **hints):
if not deep:
return self
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_power_exp'):
newterm = term._eval_expand_power_exp(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_power_base(self, deep=True, **hints):
if not deep:
return self
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_power_base'):
newterm = term._eval_expand_power_base(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_mul(self, deep=True, **hints):
if not deep:
return self
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_mul'):
newterm = term._eval_expand_mul(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_multinomial(self, deep=True, **hints):
if not deep:
return self
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_multinomail'):
newterm = term._eval_expand_multinomial(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_log(self, deep=True, **hints):
if not deep:
return self
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_log'):
newterm = term._eval_expand_log(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_complex(self, deep=True, **hints):
if deep:
func = self.func(*[ a.expand(deep, **hints) for a in self.args ])
else:
func = self.func(*self.args)
return C.re(func) + S.ImaginaryUnit * C.im(func)
def _eval_expand_trig(self, deep=True, **hints):
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_trig'):
newterm = term._eval_expand_trig(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_func(self, deep=True, **hints):
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_func'):
newterm = term._eval_expand_func(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_rewrite(self, pattern, rule, **hints):
if hints.get('deep', False):
args = [ a._eval_rewrite(pattern, rule, **hints) for a in self ]
else:
args = self.args
if pattern is None or isinstance(self.func, pattern):
if hasattr(self, rule):
rewritten = getattr(self, rule)(*args)
if rewritten is not None:
return rewritten
return self.func(*args, **self._assumptions)
def fdiff(self, argindex=1):
if self.nargs is not None:
if isinstance(self.nargs, tuple):
nargs = self.nargs[-1]
else:
nargs = self.nargs
if not (1<=argindex<=nargs):
raise TypeError("argument index %r is out of range [1,%s]" % (argindex,nargs))
return Derivative(self,self.args[argindex-1],evaluate=False)
def _eval_as_leading_term(self, x):
"""General method for the leading term"""
arg = self.args[0].as_leading_term(x)
if C.Order(1,x).contains(arg):
return arg
else:
return self.func(arg)
@classmethod
def taylor_term(cls, n, x, *previous_terms):
"""General method for the taylor term.
This method is slow, because it differentiates n-times. Subclasses can
redefine it to make it faster by using the "previous_terms".
"""
x = sympify(x)
return cls(x).diff(x, n).subs(x, 0) * x**n / C.Factorial(n)
class WildFunction(Function, Atom):
"""
WildFunction() matches any expression but another WildFunction()
XXX is this as intended, does it work ?
"""
nargs = 1
def __new__(cls, name=None, **assumptions):
if name is None:
name = 'Wf%s' % (C.Symbol.dummycount + 1) # XXX refactor dummy counting
Symbol.dummycount += 1
obj = Function.__new__(cls, name, **assumptions)
obj.name = name
return obj
def matches(self, expr, repl_dict={}, evaluate=False):
if self in repl_dict:
if repl_dict[self] == expr:
return repl_dict
else:
return None
if self.nargs is not None:
if not hasattr(expr,'nargs') or self.nargs != expr.nargs:
return None
repl_dict = repl_dict.copy()
repl_dict[self] = expr
return repl_dict
@property
def is_number(self):
return False
class Derivative(Expr):
"""
Carries out differentiation of the given expression with respect to symbols.
expr must define ._eval_derivative(symbol) method that returns
the differentiation result or None.
Examples:
Derivative(Derivative(expr, x), y) -> Derivative(expr, x, y)
Derivative(expr, x, 3) -> Derivative(expr, x, x, x)
"""
is_Derivative = True
@staticmethod
def _symbolgen(*symbols):
"""
Generator of all symbols in the argument of the Derivative.
Example:
>> ._symbolgen(x, 3, y)
(x, x, x, y)
>> ._symbolgen(x, 10**6)
(x, x, x, x, x, x, x, ...)
The second example shows why we don't return a list, but a generator,
so that the code that calls _symbolgen can return earlier for special
cases, like x.diff(x, 10**6).
"""
last_s = sympify(symbols[len(symbols)-1])
for i in xrange(len(symbols)):
s = sympify(symbols[i])
next_s = None
if s != last_s:
next_s = sympify(symbols[i+1])
if isinstance(s, Integer):
continue
elif isinstance(s, C.Symbol):
# handle cases like (x, 3)
if isinstance(next_s, Integer):
# yield (x, x, x)
for copy_s in repeat(s,int(next_s)):
yield copy_s
else:
yield s
else:
yield s
def __new__(cls, expr, *symbols, **assumptions):
expr = sympify(expr)
if not symbols:
return expr
symbols = Derivative._symbolgen(*symbols)
if expr.is_commutative:
assumptions["commutative"] = True
if "evaluate" in assumptions:
evaluate = assumptions["evaluate"]
del assumptions["evaluate"]
else:
evaluate = False
if not evaluate and not isinstance(expr, Derivative):
symbols = list(symbols)
if len(symbols) == 0:
# We make a special case for 0th derivative, because there
# is no good way to unambiguously print this.
return expr
obj = Expr.__new__(cls, expr, *symbols, **assumptions)
return obj
unevaluated_symbols = []
for s in symbols:
s = sympify(s)
if not isinstance(s, C.Symbol):
raise ValueError('Invalid literal: %s is not a valid variable' % s)
if not expr.has(s):
return S.Zero
obj = expr._eval_derivative(s)
if obj is None:
unevaluated_symbols.append(s)
elif obj is S.Zero:
return S.Zero
else:
expr = obj
if not unevaluated_symbols:
return expr
return Expr.__new__(cls, expr, *unevaluated_symbols, **assumptions)
def _eval_derivative(self, s):
if s not in self.symbols:
obj = self.expr.diff(s)
if isinstance(obj, Derivative):
return Derivative(obj.expr, *(self.symbols+obj.symbols))
return Derivative(obj, *self.symbols)
return Derivative(self.expr, *(self.symbols+(s,)), **{'evaluate': False})
def doit(self, **hints):
expr = self.expr
if hints.get('deep', True):
expr = expr.doit(**hints)
hints['evaluate'] = True
return Derivative(expr, *self.symbols, **hints)
@property
def expr(self):
return self._args[0]
@property
def symbols(self):
return self._args[1:]
def _eval_subs(self, old, new):
if self==old:
return new
return Derivative(*map(lambda x: x._eval_subs(old, new), self.args), **{'evaluate': True})
def matches(self, expr, repl_dict={}, evaluate=False):
# this method needs a cleanup.
if self in repl_dict:
if repl_dict[self] == expr:
return repl_dict
else:
return None
if isinstance(expr, Derivative):
if len(expr.symbols) == len(self.symbols):
#print "MAYBE:",self, expr, repl_dict, evaluate
return Expr.matches(self, expr, repl_dict, evaluate)
#print "NONE:",self, expr, repl_dict, evaluate
return None
#print self, expr, repl_dict, evaluate
stop
if self.nargs is not None:
if self.nargs != expr.nargs:
return None
repl_dict = repl_dict.copy()
repl_dict[self] = expr
return repl_dict
def _eval_lseries(self, x, x0):
stop
arg = self.args[0]
dx = self.args[1]
for term in arg.lseries(x, x0):
yield term.diff(dx)
def _eval_nseries(self, x, x0, n):
arg = self.args[0]
arg = arg.nseries(x, x0, n)
o = arg.getO()
dx = self.args[1]
if o:
return arg.removeO().diff(dx) + arg.getO()/dx
else:
return arg.removeO().diff(dx)
class Lambda(Function):
"""
Lambda(x, expr) represents a lambda function similar to Python's
'lambda x: expr'. A function of several variables is written as
Lambda((x, y, ...), expr).
A simple example:
>>> from sympy import Lambda
>>> from sympy.abc import x
>>> f = Lambda(x, x**2)
>>> f(4)
16
For multivariate functions, use:
>>> from sympy.abc import y, z, t
>>> f2 = Lambda(x, y, z, t, x + y**z + t**z)
>>> f2(1, 2, 3, 4)
73
Multivariate functions can be curries for partial applications:
>>> sum2numbers = Lambda(x, y, x+y)
>>> sum2numbers(1,2)
3
>>> plus1 = sum2numbers(1)
>>> plus1(3)
4
A handy shortcut for lots of arguments:
>>> p = x, y, z
>>> f = Lambda(p, x + y*z)
>>> f(*p)
x + y*z
"""
# a minimum of 2 arguments (parameter, expression) are needed
nargs = 2
def __new__(cls,*args):
assert len(args) >= 2,"Must have at least one parameter and an expression"
if len(args) == 2 and isinstance(args[0], (list, tuple)):
args = tuple(args[0])+(args[1],)
obj = Function.__new__(cls,*args)
obj.nargs = len(args)-1
return obj
@classmethod
def eval(cls,*args):
obj = Expr.__new__(cls, *args)
#use dummy variables internally, just to be sure
nargs = len(args)-1
expression = args[nargs]
funargs = [C.Symbol(arg.name, dummy=True) for arg in args[:nargs]]
#probably could use something like foldl here
for arg,funarg in zip(args[:nargs],funargs):
expression = expression.subs(arg,funarg)
funargs.append(expression)
obj._args = tuple(funargs)
return obj
def apply(self, *args):
"""Applies the Lambda function "self" to the arguments given.
This supports partial application.
Example:
>>> from sympy import Lambda
>>> from sympy.abc import x, y
>>> f = Lambda(x, x**2)
>>> f.apply(4)
16
>>> sum2numbers = Lambda(x,y,x+y)
>>> sum2numbers(1,2)
3
>>> plus1 = sum2numbers(1)
>>> plus1(3)
4
"""
nparams = self.nargs
assert nparams >= len(args),"Cannot call function with more parameters than function variables: %s (%d variables) called with %d arguments" % (str(self),nparams,len(args))
#replace arguments
expression = self.args[self.nargs]
for arg,funarg in zip(args,self.args[:nparams]):
expression = expression.subs(funarg,arg)
#curry the rest
if nparams != len(args):
unused_args = list(self.args[len(args):nparams])
unused_args.append(expression)
return Lambda(*tuple(unused_args))
return expression
def __call__(self, *args):
return self.apply(*args)
def __eq__(self, other):
if isinstance(other, Lambda):
if not len(self.args) == len(other.args):
return False
selfexpr = self.args[self.nargs]
otherexpr = other.args[other.nargs]
for selfarg,otherarg in zip(self.args[:self.nargs],other.args[:other.nargs]):
otherexpr = otherexpr.subs(otherarg,selfarg)
if selfexpr == otherexpr:
return True
# if self.args[1] == other.args[1].subs(other.args[0], self.args[0]):
# return True
return False
@vectorize(0)
def diff(f, *symbols, **kwargs):
"""
Differentiate f with respect to symbols.
This is just a wrapper to unify .diff() and the Derivative class; its
interface is similar to that of integrate(). You can use the same
shortcuts for multiple variables as with Derivative. For example,
diff(f(x), x, x, x) and diff(f(x), x, 3) both return the third derivative
of f(x).
You can pass evaluate=False to get an unevaluated Derivative class. Note
that if there are 0 symbols (such as diff(f(x), x, 0), then the result will
be the function (the zeroth derivative), even if evaluate=False.
This function is vectorized, so you can pass a list for the arguments and
each argument will be mapped to each element of the list. For a single
symbol, you can just pass the symbol normally. For multiple symbols,
pass each group in a tuple. For example, do diff(f(x, y), [x, y]) to get
the derivatives of f(x, y) with respect to x and with respect to y, and
diff(f(x, y), [(x, x), (y, y)]) to get the derivatives of f(x, y) with
respect to x twice and with respect to y twice. You can also mix tuples
and single symbols.
Examples:
>>> from sympy import sin, cos, Function, diff
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> diff(sin(x), x)
cos(x)
>>> diff(f(x), x, x, x)
D(f(x), x, x, x)
>>> diff(f(x), x, 3)
D(f(x), x, x, x)
>>> diff(sin(x)*cos(y), x, 2, y, 2)
cos(y)*sin(x)
>>> diff(f(x, y), [x, y])
[D(f(x, y), x), D(f(x, y), y)]
>>> diff(f(x, y), [(x, x), (y, y)])
[D(f(x, y), x, x), D(f(x, y), y, y)]
>>> diff(f(x, y), [(x, 2), y])
[D(f(x, y), x, x), D(f(x, y), y)]
>>> type(diff(sin(x), x))
cos
>>> type(diff(sin(x), x, evaluate=False))
<class 'sympy.core.function.Derivative'>
>>> type(diff(sin(x), x, 0))
sin
>>> type(diff(sin(x), x, 0, evaluate=False))
sin
See Also
http://documents.wolfram.com/v5/Built-inFunctions/AlgebraicComputation/Calculus/D.html
"""
# @vectorize(1) won't handle symbols in the way that we want, so we have to
# write the for loop manually.
kwargs.setdefault('evaluate', True)
if hasattr(symbols[0], '__iter__'):
retlist = []
for i in symbols[0]:
if hasattr(i, '__iter__'):
retlist.append(Derivative(f, *i, **kwargs))
else:
retlist.append(Derivative(f, i, **kwargs))
return retlist
return Derivative(f,*symbols, **kwargs)
@vectorize(0)
def expand(e, deep=True, power_base=True, power_exp=True, mul=True, \
log=True, multinomial=True, basic=True, **hints):
"""
Expand an expression using methods given as hints.
Hints are applied with arbitrary order so your code shouldn't
depend on the way hints are passed to this method.
Hints evaluated unless explicitly set to False are:
basic, log, multinomial, mul, power_base, and power_exp
The following hints are supported but not applied unless set to True:
complex, func, and trig.
basic is a generic keyword for methods that want to be expanded
automatically. For example, Integral uses expand_basic to expand the
integrand. If you want your class expand methods to run automatically and
they don't fit one of the already automatic methods, wrap it around
_eval_expand_basic.
If deep is set to True, things like arguments of functions are
recursively expanded. Use deep=False to only expand on the top
level.
Also see expand_log, expand_mul, expand_complex, expand_trig,
and expand_func, which are wrappers around those expansion methods.
>>> from sympy import cos, exp
>>> from sympy.abc import x, y, z
mul - Distributes multiplication over addition.
>>> (y*(x + z)).expand(mul=True)
x*y + y*z
complex - Split an expression into real and imaginary parts.
>>> (x+y).expand(complex=True)
I*im(x) + I*im(y) + re(x) + re(y)
>>> cos(x).expand(complex=True)
cos(re(x))*cosh(im(x)) - I*sin(re(x))*sinh(im(x))
power_exp - Expand addition in exponents into multiplied bases.
>>> exp(x+y).expand(power_exp=True)
exp(x)*exp(y)
>>> (2**(x+y)).expand(power_exp=True)
2**x*2**y
power_base - Split powers of multiplied bases.
>>> ((x*y)**z).expand(power_base=True)
x**z*y**z
log - Pull out power of an argument as a coefficient and split logs products
into sums of logs. Note that these only work if the arguments of the log
function have the proper assumptions: the arguments must be positive and the
exponents must be real.
>>> from sympy import log, symbols
>>> log(x**2*y).expand(log=True)
log(y*x**2)
>>> x, y = symbols('xy', positive=True)
>>> log(x**2*y).expand(log=True)
2*log(x) + log(y)
trig - Do trigonometric expansions.
>>> cos(x+y).expand(trig=True)
cos(x)*cos(y) - sin(x)*sin(y)
func - Expand other functions.
>>> from sympy import gamma
>>> gamma(x+1).expand(func=True)
x*gamma(x)
multinomial - Expand (x + y + ...)**n where n is a positive integer.
>>> ((x+y+z)**2).expand(multinomial=True)
2*x*y + 2*x*z + 2*y*z + x**2 + y**2 + z**2
You can shut off methods that you don't want.
>>> (exp(x+y)*(x+y)).expand()
x*exp(x)*exp(y) + y*exp(x)*exp(y)
>>> (exp(x+y)*(x+y)).expand(power_exp=False)
x*exp(x + y) + y*exp(x + y)
>>> (exp(x+y)*(x+y)).expand(mul=False)
(x + y)*exp(x)*exp(y)
Use deep=False to only expand on the top level.
>>> exp(x+exp(x+y)).expand()
exp(x)*exp(exp(x)*exp(y))
>>> exp(x+exp(x+y)).expand(deep=False)
exp(x)*exp(exp(x + y))
Note: because hints are applied in arbitrary order, some hints may
prevent expansion by other hints if they are applied first. In
particular, mul may distribute multiplications and prevent log and
power_base from expanding them. Also, if mul is applied before multinomial,
the expression might not be fully distributed. The solution is to expand
with mul=False first, then run expand_mul if you need further expansion.
Examples:
>>> from sympy import expand_log, expand, expand_mul
>>> x, y, z = symbols('xyz', positive=True)
>> expand(log(x*(y+z))) # could be either one below
log(x*y + x*z)
log(x) + log(y + z)
>>> expand_log(log(x*y+x*z))
log(x*y + x*z)
>> expand(log(x*(y+z)), mul=False)
log(x) + log(y + z)
>> expand((x*(y+z))**x) # could be either one below
(x*y + x*z)**x
x**x*(y + z)**x
>>> expand((x*(y+z))**x, mul=False)
x**x*(y + z)**x
>> expand(x*(y+z)**2) # could be either one below
2*x*y*z + x*y**2 + x*z**2
x*(y + z)**2
>>> expand(x*(y+z)**2, mul=False)
x*(2*y*z + y**2 + z**2)
>>> expand_mul(_)
2*x*y*z + x*y**2 + x*z**2
"""
hints['power_base'] = power_base
hints['power_exp'] = power_exp
hints['mul'] = mul
hints['log'] = log
hints['multinomial'] = multinomial
hints['basic'] = basic
return sympify(e).expand(deep=deep, **hints)
# These are simple wrappers around single hints. Feel free to add ones for
# power_exp, power_base, multinomial, or basic if you need them.
def expand_mul(expr, deep=True):
"""
Wrapper around expand that only uses the mul hint. See the expand
docstring for more information.
Example:
>>> from sympy import symbols, expand_mul, exp, log
>>> x, y = symbols('xy', positive=True)
>>> expand_mul(exp(x+y)*(x+y)*log(x*y**2))
x*exp(x + y)*log(x*y**2) + y*exp(x + y)*log(x*y**2)
"""
return sympify(expr).expand(deep=deep, mul=True, power_exp=False,\
power_base=False, basic=False, multinomial=False, log=False)
def expand_log(expr, deep=True):
"""
Wrapper around expand that only uses the log hint. See the expand
docstring for more information.
Example:
>>> from sympy import symbols, expand_log, exp, log
>>> x, y = symbols('xy', positive=True)
>>> expand_log(exp(x+y)*(x+y)*log(x*y**2))
(x + y)*(2*log(y) + log(x))*exp(x + y)
"""
return sympify(expr).expand(deep=deep, log=True, mul=False,\
power_exp=False, power_base=False, multinomial=False, basic=False)
def expand_func(expr, deep=True):
"""
Wrapper around expand that only uses the func hint. See the expand
docstring for more information.
Example:
>>> from sympy import expand_func, gamma
>>> from sympy.abc import x
>>> expand_func(gamma(x + 2))
x*(1 + x)*gamma(x)
"""
return sympify(expr).expand(deep=deep, func=True, basic=False,\
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
def expand_trig(expr, deep=True):
"""
Wrapper around expand that only uses the trig hint. See the expand
docstring for more information.
Example:
>>> from sympy import expand_trig, sin, cos
>>> from sympy.abc import x, y
>>> expand_trig(sin(x+y)*(x+y))
(x + y)*(cos(x)*sin(y) + cos(y)*sin(x))
"""
return sympify(expr).expand(deep=deep, trig=True, basic=False,\
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
def expand_complex(expr, deep=True):
"""
Wrapper around expand that only uses the complex hint. See the expand
docstring for more information.
Example:
>>> from sympy import expand_complex, I, im, re
>>> from sympy.abc import z
>>> expand_complex(z**(2*I))
I*im(z**(2*I)) + re(z**(2*I))
"""
return sympify(expr).expand(deep=deep, complex=True, basic=False,\
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
from numbers import Rational, Integer
from sympify import sympify
from add import Add
|
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import sys
sys.path.append("..")
from op_test import OpTest
from test_conv2d_op import conv2d_forward_naive
from paddle import ParamAttr
from paddle.regularizer import L2Decay
from paddle.nn.initializer import KaimingNormal
paddle.enable_static()
SEED = 2021
def create_test_channel_last_class(parent):
class TestChannelLastCase(parent):
def init_data_format(self):
self.data_format = "NHWC"
def init_test_case_2(self):
N, C, H, W = self.input_size
self.input_size = [N, H, W, C]
cls_name = "{0}_{1}".format(parent.__name__, "ChannelLast")
TestChannelLastCase.__name__ = cls_name
globals()[cls_name] = TestChannelLastCase
def create_test_padding_SAME_class(parent):
class TestPaddingSMAECase(parent):
def init_paddings(self):
self.pad = [0, 0]
self.padding_algorithm = "SAME"
cls_name = "{0}_{1}".format(parent.__name__, "PaddingSAMEOp")
TestPaddingSMAECase.__name__ = cls_name
globals()[cls_name] = TestPaddingSMAECase
def create_test_padding_VALID_class(parent):
class TestPaddingVALIDCase(parent):
def init_paddings(self):
self.pad = [1, 1]
self.padding_algorithm = "VALID"
cls_name = "{0}_{1}".format(parent.__name__, "PaddingVALIDOp")
TestPaddingVALIDCase.__name__ = cls_name
globals()[cls_name] = TestPaddingVALIDCase
def create_test_fp16_class(parent):
class TestFp16Case(parent):
def init_data_type(self):
self.dtype = np.float16
cls_name = "{0}_{1}".format(parent.__name__, "Fp16")
TestFp16Case.__name__ = cls_name
globals()[cls_name] = TestFp16Case
class TestDepthwiseConvNPU(OpTest):
def setUp(self):
self.set_npu()
self.op_type = "depthwise_conv2d"
self.init_data_format()
self.init_data_type()
self.init_test_case()
self.init_test_case_2()
conv2d_param = {
'stride': self.stride,
'pad': self.pad,
'dilation': self.dilations
}
input = np.random.random(self.input_size).astype(self.dtype)
filter = np.random.uniform(-1, 1, self.filter_size).astype(self.dtype)
output, _, _, _, _ = conv2d_forward_naive(input, filter, self.groups,
conv2d_param, "EXPLICIT",
self.data_format)
output = output.astype(self.dtype)
self.inputs = {
'Input': OpTest.np_dtype_to_fluid_dtype(input),
'Filter': OpTest.np_dtype_to_fluid_dtype(filter)
}
self.attrs = {
'strides': self.stride,
'paddings': self.pad,
'groups': self.groups,
'dilations': self.dilations,
'data_format': self.data_format,
}
self.outputs = {'Output': output}
def set_npu(self):
self.__class__.use_npu = True
self.place = paddle.NPUPlace(0)
def init_test_case(self):
self.pad = [1, 1]
self.dilations = [1, 1]
self.stride = [2, 2]
self.input_size = [2, 12, 5, 5] # NCHW
self.groups = 12
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3]
def test_check_output(self):
self.check_output_with_place(self.place, atol=1e-2)
def test_check_grad(self):
if self.dtype == np.float16:
return
if self.dilations[0] == 1 and self.dilations[1] == 1:
self.check_grad_with_place(
self.place, {'Input', 'Filter'},
'Output',
max_relative_error=0.03,
numeric_place=paddle.CPUPlace())
def test_check_grad_no_filter(self):
if self.dtype == np.float16:
return
self.check_grad_with_place(
self.place, ['Input'],
'Output',
no_grad_set=set(['Filter']),
max_relative_error=0.03,
numeric_place=paddle.CPUPlace())
def test_check_grad_no_input(self):
if self.dtype == np.float16:
return
if self.dilations[0] == 1 and self.dilations[1] == 1:
self.check_grad_with_place(
self.place, ['Filter'],
'Output',
no_grad_set=set(['Input']),
max_relative_error=0.03,
numeric_place=paddle.CPUPlace())
def init_data_format(self):
self.data_format = "NCHW"
def init_data_type(self):
self.dtype = np.float32
def init_test_case_2(self):
pass
class TestDepthwiseConvNPU2(TestDepthwiseConvNPU):
def init_test_case(self):
self.pad = [1, 1]
self.dilations = [1, 1]
self.stride = [1, 1]
self.input_size = [2, 12, 5, 5] # NCHW
self.groups = 12
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3]
class TestDepthwiseConvNPU3(TestDepthwiseConvNPU):
def init_test_case(self):
self.pad = [1, 1]
self.dilations = [1, 1]
self.stride = [2, 2]
self.input_size = [2, 12, 5, 5] # NCHW
self.groups = 12
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3]
class TestDepthwiseConvNPU4(TestDepthwiseConvNPU):
def init_test_case(self):
self.pad = [1, 1]
self.dilations = [1, 1]
self.stride = [1, 1]
self.input_size = [2, 12, 5, 5] # NCHW
self.groups = 12
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3]
class TestDepthwiseConvNPU_Padding(OpTest):
def setUp(self):
self.op_type = "depthwise_conv2d"
self.dtype = np.float32
self.set_npu()
self.init_data_format()
self.init_data_type()
self.init_paddings()
self.init_test_case()
self.init_test_case_2()
conv2d_param = {
'stride': self.stride,
'pad': self.pad,
'dilation': self.dilations
}
input = np.random.random(self.input_size).astype(self.dtype)
filter = np.random.uniform(-1, 1, self.filter_size).astype(self.dtype)
output, _, _, _, _ = conv2d_forward_naive(
input, filter, self.groups, conv2d_param, self.padding_algorithm,
self.data_format)
output = output.astype(self.dtype)
self.inputs = {
'Input': OpTest.np_dtype_to_fluid_dtype(input),
'Filter': OpTest.np_dtype_to_fluid_dtype(filter)
}
self.attrs = {
'strides': self.stride,
'paddings': self.pad,
'padding_algorithm': self.padding_algorithm,
'groups': self.groups,
'dilations': self.dilations,
'data_format': self.data_format
}
self.outputs = {'Output': output}
def set_npu(self):
self.__class__.use_npu = True
self.place = paddle.NPUPlace(0)
def init_test_case(self):
self.pad = [1, 1, 0, 1]
self.dilations = [1, 1]
self.stride = [2, 2]
self.input_size = [2, 12, 5, 5] # NCHW
self.groups = 12
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3]
def test_check_output(self):
self.check_output_with_place(self.place, atol=1e-2)
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad_with_place(
self.place, {'Input', 'Filter'},
'Output',
max_relative_error=0.03,
numeric_place=paddle.CPUPlace())
def test_check_grad_no_filter(self):
if self.dtype == np.float16:
return
self.check_grad_with_place(
self.place, ['Input'],
'Output',
max_relative_error=0.03,
no_grad_set=set(['Filter']),
numeric_place=paddle.CPUPlace())
def test_check_grad_no_input(self):
if self.dtype == np.float16:
return
self.check_grad_with_place(
self.place, ['Filter'],
'Output',
max_relative_error=0.03,
no_grad_set=set(['Input']),
numeric_place=paddle.CPUPlace())
def init_data_format(self):
self.data_format = "NCHW"
def init_data_type(self):
self.dtype = np.float32
def init_paddings(self):
self.pad = [1, 1, 0, 1]
self.padding_algorithm = "EXPLICIT"
def init_test_case_2(self):
pass
class TestDepthwiseConvNPU2_Padding(TestDepthwiseConvNPU_Padding):
def init_test_case(self):
self.pad = [1, 1, 0, 1]
self.dilations = [1, 1]
self.stride = [1, 1]
self.input_size = [2, 12, 5, 5] # NCHW
self.groups = 12
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3]
def init_paddings(self):
self.pad = [0, 1, 0, 2]
self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConvNPU3_Padding(TestDepthwiseConvNPU_Padding):
def init_test_case(self):
self.pad = [1, 1, 0, 1]
self.dilations = [1, 1]
self.stride = [2, 2]
self.input_size = [2, 12, 5, 5] # NCHW
self.groups = 12
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3]
def init_paddings(self):
self.pad = [2, 1, 2, 3]
self.padding_algorithm = "EXPLICIT"
# test channel last
create_test_channel_last_class(TestDepthwiseConvNPU)
create_test_channel_last_class(TestDepthwiseConvNPU2)
create_test_channel_last_class(TestDepthwiseConvNPU_Padding)
create_test_channel_last_class(TestDepthwiseConvNPU2_Padding)
# test padding SAME
create_test_padding_SAME_class(TestDepthwiseConvNPU_Padding)
create_test_padding_SAME_class(TestDepthwiseConvNPU2_Padding)
create_test_padding_SAME_class(TestDepthwiseConvNPU3_Padding)
# test padding VALID
create_test_padding_VALID_class(TestDepthwiseConvNPU_Padding)
create_test_padding_VALID_class(TestDepthwiseConvNPU2_Padding)
create_test_padding_VALID_class(TestDepthwiseConvNPU3_Padding)
create_test_fp16_class(TestDepthwiseConvNPU)
create_test_fp16_class(TestDepthwiseConvNPU2)
create_test_fp16_class(TestDepthwiseConvNPU_Padding)
create_test_fp16_class(TestDepthwiseConvNPU2_Padding)
create_test_fp16_class(TestDepthwiseConvNPU3_Padding)
if __name__ == '__main__':
unittest.main()
|
|
"""Tests for binary operators on subtypes of built-in types."""
import unittest
from test import support
from operator import eq, ne, lt, gt, le, ge
from abc import ABCMeta
def gcd(a, b):
"""Greatest common divisor using Euclid's algorithm."""
while a:
a, b = b%a, a
return b
def isint(x):
"""Test whether an object is an instance of int."""
return isinstance(x, int)
def isnum(x):
"""Test whether an object is an instance of a built-in numeric type."""
for T in int, float, complex:
if isinstance(x, T):
return 1
return 0
def isRat(x):
"""Test wheter an object is an instance of the Rat class."""
return isinstance(x, Rat)
class Rat(object):
"""Rational number implemented as a normalized pair of ints."""
__slots__ = ['_Rat__num', '_Rat__den']
def __init__(self, num=0, den=1):
"""Constructor: Rat([num[, den]]).
The arguments must be ints, and default to (0, 1)."""
if not isint(num):
raise TypeError("Rat numerator must be int (%r)" % num)
if not isint(den):
raise TypeError("Rat denominator must be int (%r)" % den)
# But the zero is always on
if den == 0:
raise ZeroDivisionError("zero denominator")
g = gcd(den, num)
self.__num = int(num//g)
self.__den = int(den//g)
def _get_num(self):
"""Accessor function for read-only 'num' attribute of Rat."""
return self.__num
num = property(_get_num, None)
def _get_den(self):
"""Accessor function for read-only 'den' attribute of Rat."""
return self.__den
den = property(_get_den, None)
def __repr__(self):
"""Convert a Rat to a string resembling a Rat constructor call."""
return "Rat(%d, %d)" % (self.__num, self.__den)
def __str__(self):
"""Convert a Rat to a string resembling a decimal numeric value."""
return str(float(self))
def __float__(self):
"""Convert a Rat to a float."""
return self.__num*1.0/self.__den
def __int__(self):
"""Convert a Rat to an int; self.den must be 1."""
if self.__den == 1:
try:
return int(self.__num)
except OverflowError:
raise OverflowError("%s too large to convert to int" %
repr(self))
raise ValueError("can't convert %s to int" % repr(self))
def __add__(self, other):
"""Add two Rats, or a Rat and a number."""
if isint(other):
other = Rat(other)
if isRat(other):
return Rat(self.__num*other.__den + other.__num*self.__den,
self.__den*other.__den)
if isnum(other):
return float(self) + other
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
"""Subtract two Rats, or a Rat and a number."""
if isint(other):
other = Rat(other)
if isRat(other):
return Rat(self.__num*other.__den - other.__num*self.__den,
self.__den*other.__den)
if isnum(other):
return float(self) - other
return NotImplemented
def __rsub__(self, other):
"""Subtract two Rats, or a Rat and a number (reversed args)."""
if isint(other):
other = Rat(other)
if isRat(other):
return Rat(other.__num*self.__den - self.__num*other.__den,
self.__den*other.__den)
if isnum(other):
return other - float(self)
return NotImplemented
def __mul__(self, other):
"""Multiply two Rats, or a Rat and a number."""
if isRat(other):
return Rat(self.__num*other.__num, self.__den*other.__den)
if isint(other):
return Rat(self.__num*other, self.__den)
if isnum(other):
return float(self)*other
return NotImplemented
__rmul__ = __mul__
def __truediv__(self, other):
"""Divide two Rats, or a Rat and a number."""
if isRat(other):
return Rat(self.__num*other.__den, self.__den*other.__num)
if isint(other):
return Rat(self.__num, self.__den*other)
if isnum(other):
return float(self) / other
return NotImplemented
def __rtruediv__(self, other):
"""Divide two Rats, or a Rat and a number (reversed args)."""
if isRat(other):
return Rat(other.__num*self.__den, other.__den*self.__num)
if isint(other):
return Rat(other*self.__den, self.__num)
if isnum(other):
return other / float(self)
return NotImplemented
def __floordiv__(self, other):
"""Divide two Rats, returning the floored result."""
if isint(other):
other = Rat(other)
elif not isRat(other):
return NotImplemented
x = self/other
return x.__num // x.__den
def __rfloordiv__(self, other):
"""Divide two Rats, returning the floored result (reversed args)."""
x = other/self
return x.__num // x.__den
def __divmod__(self, other):
"""Divide two Rats, returning quotient and remainder."""
if isint(other):
other = Rat(other)
elif not isRat(other):
return NotImplemented
x = self//other
return (x, self - other * x)
def __rdivmod__(self, other):
"""Divide two Rats, returning quotient and remainder (reversed args)."""
if isint(other):
other = Rat(other)
elif not isRat(other):
return NotImplemented
return divmod(other, self)
def __mod__(self, other):
"""Take one Rat modulo another."""
return divmod(self, other)[1]
def __rmod__(self, other):
"""Take one Rat modulo another (reversed args)."""
return divmod(other, self)[1]
def __eq__(self, other):
"""Compare two Rats for equality."""
if isint(other):
return self.__den == 1 and self.__num == other
if isRat(other):
return self.__num == other.__num and self.__den == other.__den
if isnum(other):
return float(self) == other
return NotImplemented
class RatTestCase(unittest.TestCase):
"""Unit tests for Rat class and its support utilities."""
def test_gcd(self):
self.assertEqual(gcd(10, 12), 2)
self.assertEqual(gcd(10, 15), 5)
self.assertEqual(gcd(10, 11), 1)
self.assertEqual(gcd(100, 15), 5)
self.assertEqual(gcd(-10, 2), -2)
self.assertEqual(gcd(10, -2), 2)
self.assertEqual(gcd(-10, -2), -2)
for i in range(1, 20):
for j in range(1, 20):
self.assertTrue(gcd(i, j) > 0)
self.assertTrue(gcd(-i, j) < 0)
self.assertTrue(gcd(i, -j) > 0)
self.assertTrue(gcd(-i, -j) < 0)
def test_constructor(self):
a = Rat(10, 15)
self.assertEqual(a.num, 2)
self.assertEqual(a.den, 3)
a = Rat(10, -15)
self.assertEqual(a.num, -2)
self.assertEqual(a.den, 3)
a = Rat(-10, 15)
self.assertEqual(a.num, -2)
self.assertEqual(a.den, 3)
a = Rat(-10, -15)
self.assertEqual(a.num, 2)
self.assertEqual(a.den, 3)
a = Rat(7)
self.assertEqual(a.num, 7)
self.assertEqual(a.den, 1)
try:
a = Rat(1, 0)
except ZeroDivisionError:
pass
else:
self.fail("Rat(1, 0) didn't raise ZeroDivisionError")
for bad in "0", 0.0, 0j, (), [], {}, None, Rat, unittest:
try:
a = Rat(bad)
except TypeError:
pass
else:
self.fail("Rat(%r) didn't raise TypeError" % bad)
try:
a = Rat(1, bad)
except TypeError:
pass
else:
self.fail("Rat(1, %r) didn't raise TypeError" % bad)
def test_add(self):
self.assertEqual(Rat(2, 3) + Rat(1, 3), 1)
self.assertEqual(Rat(2, 3) + 1, Rat(5, 3))
self.assertEqual(1 + Rat(2, 3), Rat(5, 3))
self.assertEqual(1.0 + Rat(1, 2), 1.5)
self.assertEqual(Rat(1, 2) + 1.0, 1.5)
def test_sub(self):
self.assertEqual(Rat(7, 2) - Rat(7, 5), Rat(21, 10))
self.assertEqual(Rat(7, 5) - 1, Rat(2, 5))
self.assertEqual(1 - Rat(3, 5), Rat(2, 5))
self.assertEqual(Rat(3, 2) - 1.0, 0.5)
self.assertEqual(1.0 - Rat(1, 2), 0.5)
def test_mul(self):
self.assertEqual(Rat(2, 3) * Rat(5, 7), Rat(10, 21))
self.assertEqual(Rat(10, 3) * 3, 10)
self.assertEqual(3 * Rat(10, 3), 10)
self.assertEqual(Rat(10, 5) * 0.5, 1.0)
self.assertEqual(0.5 * Rat(10, 5), 1.0)
def test_div(self):
self.assertEqual(Rat(10, 3) / Rat(5, 7), Rat(14, 3))
self.assertEqual(Rat(10, 3) / 3, Rat(10, 9))
self.assertEqual(2 / Rat(5), Rat(2, 5))
self.assertEqual(3.0 * Rat(1, 2), 1.5)
self.assertEqual(Rat(1, 2) * 3.0, 1.5)
def test_floordiv(self):
self.assertEqual(Rat(10) // Rat(4), 2)
self.assertEqual(Rat(10, 3) // Rat(4, 3), 2)
self.assertEqual(Rat(10) // 4, 2)
self.assertEqual(10 // Rat(4), 2)
def test_eq(self):
self.assertEqual(Rat(10), Rat(20, 2))
self.assertEqual(Rat(10), 10)
self.assertEqual(10, Rat(10))
self.assertEqual(Rat(10), 10.0)
self.assertEqual(10.0, Rat(10))
def test_true_div(self):
self.assertEqual(Rat(10, 3) / Rat(5, 7), Rat(14, 3))
self.assertEqual(Rat(10, 3) / 3, Rat(10, 9))
self.assertEqual(2 / Rat(5), Rat(2, 5))
self.assertEqual(3.0 * Rat(1, 2), 1.5)
self.assertEqual(Rat(1, 2) * 3.0, 1.5)
self.assertEqual(eval('1/2'), 0.5)
# XXX Ran out of steam; TO DO: divmod, div, future division
class OperationLogger:
"""Base class for classes with operation logging."""
def __init__(self, logger):
self.logger = logger
def log_operation(self, *args):
self.logger(*args)
def op_sequence(op, *classes):
"""Return the sequence of operations that results from applying
the operation `op` to instances of the given classes."""
log = []
instances = []
for c in classes:
instances.append(c(log.append))
try:
op(*instances)
except TypeError:
pass
return log
class A(OperationLogger):
def __eq__(self, other):
self.log_operation('A.__eq__')
return NotImplemented
def __le__(self, other):
self.log_operation('A.__le__')
return NotImplemented
def __ge__(self, other):
self.log_operation('A.__ge__')
return NotImplemented
class B(OperationLogger, metaclass=ABCMeta):
def __eq__(self, other):
self.log_operation('B.__eq__')
return NotImplemented
def __le__(self, other):
self.log_operation('B.__le__')
return NotImplemented
def __ge__(self, other):
self.log_operation('B.__ge__')
return NotImplemented
class C(B):
def __eq__(self, other):
self.log_operation('C.__eq__')
return NotImplemented
def __le__(self, other):
self.log_operation('C.__le__')
return NotImplemented
def __ge__(self, other):
self.log_operation('C.__ge__')
return NotImplemented
class V(OperationLogger):
"""Virtual subclass of B"""
def __eq__(self, other):
self.log_operation('V.__eq__')
return NotImplemented
def __le__(self, other):
self.log_operation('V.__le__')
return NotImplemented
def __ge__(self, other):
self.log_operation('V.__ge__')
return NotImplemented
B.register(V)
class OperationOrderTests(unittest.TestCase):
def test_comparison_orders(self):
self.assertEqual(op_sequence(eq, A, A), ['A.__eq__', 'A.__eq__'])
self.assertEqual(op_sequence(eq, A, B), ['A.__eq__', 'B.__eq__'])
self.assertEqual(op_sequence(eq, B, A), ['B.__eq__', 'A.__eq__'])
# C is a subclass of B, so C.__eq__ is called first
self.assertEqual(op_sequence(eq, B, C), ['C.__eq__', 'B.__eq__'])
self.assertEqual(op_sequence(eq, C, B), ['C.__eq__', 'B.__eq__'])
self.assertEqual(op_sequence(le, A, A), ['A.__le__', 'A.__ge__'])
self.assertEqual(op_sequence(le, A, B), ['A.__le__', 'B.__ge__'])
self.assertEqual(op_sequence(le, B, A), ['B.__le__', 'A.__ge__'])
self.assertEqual(op_sequence(le, B, C), ['C.__ge__', 'B.__le__'])
self.assertEqual(op_sequence(le, C, B), ['C.__le__', 'B.__ge__'])
self.assertTrue(issubclass(V, B))
self.assertEqual(op_sequence(eq, B, V), ['B.__eq__', 'V.__eq__'])
self.assertEqual(op_sequence(le, B, V), ['B.__le__', 'V.__ge__'])
if __name__ == "__main__":
unittest.main()
|
|
import copy
import datetime
from django.core.exceptions import EmptyResultSet, FieldError
from django.db.backends import utils as backend_utils
from django.db.models import fields
from django.db.models.query_utils import Q
from django.utils.functional import cached_property
class Combinable:
"""
Provides the ability to combine one or two objects with
some connector. For example F('foo') + F('bar').
"""
# Arithmetic connectors
ADD = '+'
SUB = '-'
MUL = '*'
DIV = '/'
POW = '^'
# The following is a quoted % operator - it is quoted because it can be
# used in strings that also have parameter substitution.
MOD = '%%'
# Bitwise operators - note that these are generated by .bitand()
# and .bitor(), the '&' and '|' are reserved for boolean operator
# usage.
BITAND = '&'
BITOR = '|'
BITLEFTSHIFT = '<<'
BITRIGHTSHIFT = '>>'
def _combine(self, other, connector, reversed, node=None):
if not hasattr(other, 'resolve_expression'):
# everything must be resolvable to an expression
if isinstance(other, datetime.timedelta):
other = DurationValue(other, output_field=fields.DurationField())
else:
other = Value(other)
if reversed:
return CombinedExpression(other, connector, self)
return CombinedExpression(self, connector, other)
#############
# OPERATORS #
#############
def __add__(self, other):
return self._combine(other, self.ADD, False)
def __sub__(self, other):
return self._combine(other, self.SUB, False)
def __mul__(self, other):
return self._combine(other, self.MUL, False)
def __truediv__(self, other):
return self._combine(other, self.DIV, False)
def __mod__(self, other):
return self._combine(other, self.MOD, False)
def __pow__(self, other):
return self._combine(other, self.POW, False)
def __and__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def bitand(self, other):
return self._combine(other, self.BITAND, False)
def bitleftshift(self, other):
return self._combine(other, self.BITLEFTSHIFT, False)
def bitrightshift(self, other):
return self._combine(other, self.BITRIGHTSHIFT, False)
def __or__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def bitor(self, other):
return self._combine(other, self.BITOR, False)
def __radd__(self, other):
return self._combine(other, self.ADD, True)
def __rsub__(self, other):
return self._combine(other, self.SUB, True)
def __rmul__(self, other):
return self._combine(other, self.MUL, True)
def __rtruediv__(self, other):
return self._combine(other, self.DIV, True)
def __rmod__(self, other):
return self._combine(other, self.MOD, True)
def __rpow__(self, other):
return self._combine(other, self.POW, True)
def __rand__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def __ror__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
class BaseExpression:
"""
Base class for all query expressions.
"""
# aggregate specific fields
is_summary = False
_output_field = None
def __init__(self, output_field=None):
if output_field is not None:
self._output_field = output_field
def get_db_converters(self, connection):
return [self.convert_value] + self.output_field.get_db_converters(connection)
def get_source_expressions(self):
return []
def set_source_expressions(self, exprs):
assert len(exprs) == 0
def _parse_expressions(self, *expressions):
return [
arg if hasattr(arg, 'resolve_expression') else (
F(arg) if isinstance(arg, str) else Value(arg)
) for arg in expressions
]
def as_sql(self, compiler, connection):
"""
Responsible for returning a (sql, [params]) tuple to be included
in the current query.
Different backends can provide their own implementation, by
providing an `as_{vendor}` method and patching the Expression:
```
def override_as_sql(self, compiler, connection):
# custom logic
return super().as_sql(compiler, connection)
setattr(Expression, 'as_' + connection.vendor, override_as_sql)
```
Arguments:
* compiler: the query compiler responsible for generating the query.
Must have a compile method, returning a (sql, [params]) tuple.
Calling compiler(value) will return a quoted `value`.
* connection: the database connection used for the current query.
Returns: (sql, params)
Where `sql` is a string containing ordered sql parameters to be
replaced with the elements of the list `params`.
"""
raise NotImplementedError("Subclasses must implement as_sql()")
@cached_property
def contains_aggregate(self):
for expr in self.get_source_expressions():
if expr and expr.contains_aggregate:
return True
return False
@cached_property
def contains_column_references(self):
for expr in self.get_source_expressions():
if expr and expr.contains_column_references:
return True
return False
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
"""
Provides the chance to do any preprocessing or validation before being
added to the query.
Arguments:
* query: the backend query implementation
* allow_joins: boolean allowing or denying use of joins
in this query
* reuse: a set of reusable joins for multijoins
* summarize: a terminal aggregate clause
* for_save: whether this expression about to be used in a save or update
Returns: an Expression to be added to the query.
"""
c = self.copy()
c.is_summary = summarize
c.set_source_expressions([
expr.resolve_expression(query, allow_joins, reuse, summarize)
for expr in c.get_source_expressions()
])
return c
def _prepare(self, field):
"""
Hook used by Lookup.get_prep_lookup() to do custom preparation.
"""
return self
@property
def field(self):
return self.output_field
@cached_property
def output_field(self):
"""
Returns the output type of this expressions.
"""
if self._output_field_or_none is None:
raise FieldError("Cannot resolve expression type, unknown output_field")
return self._output_field_or_none
@cached_property
def _output_field_or_none(self):
"""
Returns the output field of this expression, or None if no output type
can be resolved. Note that the 'output_field' property will raise
FieldError if no type can be resolved, but this attribute allows for
None values.
"""
if self._output_field is None:
self._resolve_output_field()
return self._output_field
def _resolve_output_field(self):
"""
Attempts to infer the output type of the expression. If the output
fields of all source fields match then we can simply infer the same
type here. This isn't always correct, but it makes sense most of the
time.
Consider the difference between `2 + 2` and `2 / 3`. Inferring
the type here is a convenience for the common case. The user should
supply their own output_field with more complex computations.
If a source does not have an `_output_field` then we exclude it from
this check. If all sources are `None`, then an error will be thrown
higher up the stack in the `output_field` property.
"""
if self._output_field is None:
sources = self.get_source_fields()
num_sources = len(sources)
if num_sources == 0:
self._output_field = None
else:
for source in sources:
if self._output_field is None:
self._output_field = source
if source is not None and not isinstance(self._output_field, source.__class__):
raise FieldError(
"Expression contains mixed types. You must set output_field")
def convert_value(self, value, expression, connection, context):
"""
Expressions provide their own converters because users have the option
of manually specifying the output_field which may be a different type
from the one the database returns.
"""
field = self.output_field
internal_type = field.get_internal_type()
if value is None:
return value
elif internal_type == 'FloatField':
return float(value)
elif internal_type.endswith('IntegerField'):
return int(value)
elif internal_type == 'DecimalField':
return backend_utils.typecast_decimal(value)
return value
def get_lookup(self, lookup):
return self.output_field.get_lookup(lookup)
def get_transform(self, name):
return self.output_field.get_transform(name)
def relabeled_clone(self, change_map):
clone = self.copy()
clone.set_source_expressions(
[e.relabeled_clone(change_map) for e in self.get_source_expressions()])
return clone
def copy(self):
c = copy.copy(self)
c.copied = True
return c
def get_group_by_cols(self):
if not self.contains_aggregate:
return [self]
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def get_source_fields(self):
"""
Returns the underlying field types used by this
aggregate.
"""
return [e._output_field_or_none for e in self.get_source_expressions()]
def asc(self, **kwargs):
return OrderBy(self, **kwargs)
def desc(self, **kwargs):
return OrderBy(self, descending=True, **kwargs)
def reverse_ordering(self):
return self
def flatten(self):
"""
Recursively yield this expression and all subexpressions, in
depth-first order.
"""
yield self
for expr in self.get_source_expressions():
if expr:
for inner_expr in expr.flatten():
yield inner_expr
class Expression(BaseExpression, Combinable):
"""
An expression that can be combined with other expressions.
"""
pass
class CombinedExpression(Expression):
def __init__(self, lhs, connector, rhs, output_field=None):
super().__init__(output_field=output_field)
self.connector = connector
self.lhs = lhs
self.rhs = rhs
def __repr__(self):
return "<{}: {}>".format(self.__class__.__name__, self)
def __str__(self):
return "{} {} {}".format(self.lhs, self.connector, self.rhs)
def get_source_expressions(self):
return [self.lhs, self.rhs]
def set_source_expressions(self, exprs):
self.lhs, self.rhs = exprs
def as_sql(self, compiler, connection):
try:
lhs_output = self.lhs.output_field
except FieldError:
lhs_output = None
try:
rhs_output = self.rhs.output_field
except FieldError:
rhs_output = None
if (not connection.features.has_native_duration_field and
((lhs_output and lhs_output.get_internal_type() == 'DurationField') or
(rhs_output and rhs_output.get_internal_type() == 'DurationField'))):
return DurationExpression(self.lhs, self.connector, self.rhs).as_sql(compiler, connection)
if (lhs_output and rhs_output and self.connector == self.SUB and
lhs_output.get_internal_type() in {'DateField', 'DateTimeField', 'TimeField'} and
lhs_output.get_internal_type() == lhs_output.get_internal_type()):
return TemporalSubtraction(self.lhs, self.rhs).as_sql(compiler, connection)
expressions = []
expression_params = []
sql, params = compiler.compile(self.lhs)
expressions.append(sql)
expression_params.extend(params)
sql, params = compiler.compile(self.rhs)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = '(%s)'
sql = connection.ops.combine_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
c.lhs = c.lhs.resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.rhs = c.rhs.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
class DurationExpression(CombinedExpression):
def compile(self, side, compiler, connection):
if not isinstance(side, DurationValue):
try:
output = side.output_field
except FieldError:
pass
else:
if output.get_internal_type() == 'DurationField':
sql, params = compiler.compile(side)
return connection.ops.format_for_duration_arithmetic(sql), params
return compiler.compile(side)
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
expressions = []
expression_params = []
sql, params = self.compile(self.lhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
sql, params = self.compile(self.rhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = '(%s)'
sql = connection.ops.combine_duration_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
class TemporalSubtraction(CombinedExpression):
def __init__(self, lhs, rhs):
super().__init__(lhs, self.SUB, rhs, output_field=fields.DurationField())
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
lhs = compiler.compile(self.lhs, connection)
rhs = compiler.compile(self.rhs, connection)
return connection.ops.subtract_temporals(self.lhs.output_field.get_internal_type(), lhs, rhs)
class F(Combinable):
"""
An object capable of resolving references to existing query objects.
"""
def __init__(self, name):
"""
Arguments:
* name: the name of the field this expression references
"""
self.name = name
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.name)
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
return query.resolve_ref(self.name, allow_joins, reuse, summarize)
def asc(self, **kwargs):
return OrderBy(self, **kwargs)
def desc(self, **kwargs):
return OrderBy(self, descending=True, **kwargs)
class ResolvedOuterRef(F):
"""
An object that contains a reference to an outer query.
In this case, the reference to the outer query has been resolved because
the inner query has been used as a subquery.
"""
def as_sql(self, *args, **kwargs):
raise ValueError(
'This queryset contains a reference to an outer query and may '
'only be used in a subquery.'
)
def _prepare(self, output_field=None):
return self
class OuterRef(F):
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
if isinstance(self.name, self.__class__):
return self.name
return ResolvedOuterRef(self.name)
def _prepare(self, output_field=None):
return self
class Func(Expression):
"""
An SQL function call.
"""
function = None
template = '%(function)s(%(expressions)s)'
arg_joiner = ', '
arity = None # The number of arguments the function accepts.
def __init__(self, *expressions, output_field=None, **extra):
if self.arity is not None and len(expressions) != self.arity:
raise TypeError(
"'%s' takes exactly %s %s (%s given)" % (
self.__class__.__name__,
self.arity,
"argument" if self.arity == 1 else "arguments",
len(expressions),
)
)
super().__init__(output_field=output_field)
self.source_expressions = self._parse_expressions(*expressions)
self.extra = extra
def __repr__(self):
args = self.arg_joiner.join(str(arg) for arg in self.source_expressions)
extra = ', '.join(str(key) + '=' + str(val) for key, val in self.extra.items())
if extra:
return "{}({}, {})".format(self.__class__.__name__, args, extra)
return "{}({})".format(self.__class__.__name__, args)
def get_source_expressions(self):
return self.source_expressions
def set_source_expressions(self, exprs):
self.source_expressions = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
for pos, arg in enumerate(c.source_expressions):
c.source_expressions[pos] = arg.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def as_sql(self, compiler, connection, function=None, template=None, arg_joiner=None, **extra_context):
connection.ops.check_expression_support(self)
sql_parts = []
params = []
for arg in self.source_expressions:
arg_sql, arg_params = compiler.compile(arg)
sql_parts.append(arg_sql)
params.extend(arg_params)
data = self.extra.copy()
data.update(**extra_context)
# Use the first supplied value in this order: the parameter to this
# method, a value supplied in __init__()'s **extra (the value in
# `data`), or the value defined on the class.
if function is not None:
data['function'] = function
else:
data.setdefault('function', self.function)
template = template or data.get('template', self.template)
arg_joiner = arg_joiner or data.get('arg_joiner', self.arg_joiner)
data['expressions'] = data['field'] = arg_joiner.join(sql_parts)
return template % data, params
def as_sqlite(self, compiler, connection):
sql, params = self.as_sql(compiler, connection)
try:
if self.output_field.get_internal_type() == 'DecimalField':
sql = 'CAST(%s AS NUMERIC)' % sql
except FieldError:
pass
return sql, params
def copy(self):
copy = super().copy()
copy.source_expressions = self.source_expressions[:]
copy.extra = self.extra.copy()
return copy
class Value(Expression):
"""
Represents a wrapped value as a node within an expression
"""
def __init__(self, value, output_field=None):
"""
Arguments:
* value: the value this expression represents. The value will be
added into the sql parameter list and properly quoted.
* output_field: an instance of the model field type that this
expression will return, such as IntegerField() or CharField().
"""
super().__init__(output_field=output_field)
self.value = value
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.value)
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
val = self.value
# check _output_field to avoid triggering an exception
if self._output_field is not None:
if self.for_save:
val = self.output_field.get_db_prep_save(val, connection=connection)
else:
val = self.output_field.get_db_prep_value(val, connection=connection)
if hasattr(self._output_field, 'get_placeholder'):
return self._output_field.get_placeholder(val, compiler, connection), [val]
if val is None:
# cx_Oracle does not always convert None to the appropriate
# NULL type (like in case expressions using numbers), so we
# use a literal SQL NULL
return 'NULL', []
return '%s', [val]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = super().resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.for_save = for_save
return c
def get_group_by_cols(self):
return []
class DurationValue(Value):
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
if connection.features.has_native_duration_field:
return super().as_sql(compiler, connection)
return connection.ops.date_interval_sql(self.value)
class RawSQL(Expression):
def __init__(self, sql, params, output_field=None):
if output_field is None:
output_field = fields.Field()
self.sql, self.params = sql, params
super().__init__(output_field=output_field)
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.sql, self.params)
def as_sql(self, compiler, connection):
return '(%s)' % self.sql, self.params
def get_group_by_cols(self):
return [self]
class Star(Expression):
def __repr__(self):
return "'*'"
def as_sql(self, compiler, connection):
return '*', []
class Random(Expression):
def __init__(self):
super().__init__(output_field=fields.FloatField())
def __repr__(self):
return "Random()"
def as_sql(self, compiler, connection):
return connection.ops.random_function_sql(), []
class Col(Expression):
contains_column_references = True
def __init__(self, alias, target, output_field=None):
if output_field is None:
output_field = target
super().__init__(output_field=output_field)
self.alias, self.target = alias, target
def __repr__(self):
return "{}({}, {})".format(
self.__class__.__name__, self.alias, self.target)
def as_sql(self, compiler, connection):
qn = compiler.quote_name_unless_alias
return "%s.%s" % (qn(self.alias), qn(self.target.column)), []
def relabeled_clone(self, relabels):
return self.__class__(relabels.get(self.alias, self.alias), self.target, self.output_field)
def get_group_by_cols(self):
return [self]
def get_db_converters(self, connection):
if self.target == self.output_field:
return self.output_field.get_db_converters(connection)
return (self.output_field.get_db_converters(connection) +
self.target.get_db_converters(connection))
class Ref(Expression):
"""
Reference to column alias of the query. For example, Ref('sum_cost') in
qs.annotate(sum_cost=Sum('cost')) query.
"""
def __init__(self, refs, source):
super().__init__()
self.refs, self.source = refs, source
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.refs, self.source)
def get_source_expressions(self):
return [self.source]
def set_source_expressions(self, exprs):
self.source, = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
# The sub-expression `source` has already been resolved, as this is
# just a reference to the name of `source`.
return self
def relabeled_clone(self, relabels):
return self
def as_sql(self, compiler, connection):
return "%s" % connection.ops.quote_name(self.refs), []
def get_group_by_cols(self):
return [self]
class ExpressionWrapper(Expression):
"""
An expression that can wrap another expression so that it can provide
extra context to the inner expression, such as the output_field.
"""
def __init__(self, expression, output_field):
super().__init__(output_field=output_field)
self.expression = expression
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def as_sql(self, compiler, connection):
return self.expression.as_sql(compiler, connection)
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.expression)
class When(Expression):
template = 'WHEN %(condition)s THEN %(result)s'
def __init__(self, condition=None, then=None, **lookups):
if lookups and condition is None:
condition, lookups = Q(**lookups), None
if condition is None or not isinstance(condition, Q) or lookups:
raise TypeError("__init__() takes either a Q object or lookups as keyword arguments")
super().__init__(output_field=None)
self.condition = condition
self.result = self._parse_expressions(then)[0]
def __str__(self):
return "WHEN %r THEN %r" % (self.condition, self.result)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return [self.condition, self.result]
def set_source_expressions(self, exprs):
self.condition, self.result = exprs
def get_source_fields(self):
# We're only interested in the fields of the result expressions.
return [self.result._output_field_or_none]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
if hasattr(c.condition, 'resolve_expression'):
c.condition = c.condition.resolve_expression(query, allow_joins, reuse, summarize, False)
c.result = c.result.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def as_sql(self, compiler, connection, template=None, **extra_context):
connection.ops.check_expression_support(self)
template_params = extra_context
sql_params = []
condition_sql, condition_params = compiler.compile(self.condition)
template_params['condition'] = condition_sql
sql_params.extend(condition_params)
result_sql, result_params = compiler.compile(self.result)
template_params['result'] = result_sql
sql_params.extend(result_params)
template = template or self.template
return template % template_params, sql_params
def get_group_by_cols(self):
# This is not a complete expression and cannot be used in GROUP BY.
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
class Case(Expression):
"""
An SQL searched CASE expression:
CASE
WHEN n > 0
THEN 'positive'
WHEN n < 0
THEN 'negative'
ELSE 'zero'
END
"""
template = 'CASE %(cases)s ELSE %(default)s END'
case_joiner = ' '
def __init__(self, *cases, default=None, output_field=None, **extra):
if not all(isinstance(case, When) for case in cases):
raise TypeError("Positional arguments must all be When objects.")
super().__init__(output_field)
self.cases = list(cases)
self.default = self._parse_expressions(default)[0]
self.extra = extra
def __str__(self):
return "CASE %s, ELSE %r" % (', '.join(str(c) for c in self.cases), self.default)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return self.cases + [self.default]
def set_source_expressions(self, exprs):
self.cases = exprs[:-1]
self.default = exprs[-1]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
for pos, case in enumerate(c.cases):
c.cases[pos] = case.resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.default = c.default.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def copy(self):
c = super().copy()
c.cases = c.cases[:]
return c
def as_sql(self, compiler, connection, template=None, case_joiner=None, **extra_context):
connection.ops.check_expression_support(self)
if not self.cases:
return compiler.compile(self.default)
template_params = self.extra.copy()
template_params.update(extra_context)
case_parts = []
sql_params = []
for case in self.cases:
try:
case_sql, case_params = compiler.compile(case)
except EmptyResultSet:
continue
case_parts.append(case_sql)
sql_params.extend(case_params)
default_sql, default_params = compiler.compile(self.default)
if not case_parts:
return default_sql, default_params
case_joiner = case_joiner or self.case_joiner
template_params['cases'] = case_joiner.join(case_parts)
template_params['default'] = default_sql
sql_params.extend(default_params)
template = template or template_params.get('template', self.template)
sql = template % template_params
if self._output_field_or_none is not None:
sql = connection.ops.unification_cast_sql(self.output_field) % sql
return sql, sql_params
class Subquery(Expression):
"""
An explicit subquery. It may contain OuterRef() references to the outer
query which will be resolved when it is applied to that query.
"""
template = '(%(subquery)s)'
def __init__(self, queryset, output_field=None, **extra):
self.queryset = queryset
self.extra = extra
if output_field is None and len(self.queryset.query.select) == 1:
output_field = self.queryset.query.select[0].field
super().__init__(output_field)
def copy(self):
clone = super().copy()
clone.queryset = clone.queryset.all()
return clone
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
clone = self.copy()
clone.is_summary = summarize
clone.queryset.query.bump_prefix(query)
# Need to recursively resolve these.
def resolve_all(child):
if hasattr(child, 'children'):
[resolve_all(_child) for _child in child.children]
if hasattr(child, 'rhs'):
child.rhs = resolve(child.rhs)
def resolve(child):
if hasattr(child, 'resolve_expression'):
return child.resolve_expression(
query=query, allow_joins=allow_joins, reuse=reuse,
summarize=summarize, for_save=for_save,
)
return child
resolve_all(clone.queryset.query.where)
for key, value in clone.queryset.query.annotations.items():
if isinstance(value, Subquery):
clone.queryset.query.annotations[key] = resolve(value)
return clone
def get_source_expressions(self):
return [
x for x in [
getattr(expr, 'lhs', None)
for expr in self.queryset.query.where.children
] if x
]
def relabeled_clone(self, change_map):
clone = self.copy()
clone.queryset.query = clone.queryset.query.relabeled_clone(change_map)
clone.queryset.query.external_aliases.update(
alias for alias in change_map.values()
if alias not in clone.queryset.query.tables
)
return clone
def as_sql(self, compiler, connection, template=None, **extra_context):
connection.ops.check_expression_support(self)
template_params = self.extra.copy()
template_params.update(extra_context)
template_params['subquery'], sql_params = self.queryset.query.get_compiler(connection=connection).as_sql()
template = template or template_params.get('template', self.template)
sql = template % template_params
sql = connection.ops.unification_cast_sql(self.output_field) % sql
return sql, sql_params
def _prepare(self, output_field):
# This method will only be called if this instance is the "rhs" in an
# expression: the wrapping () must be removed (as the expression that
# contains this will provide them). SQLite evaluates ((subquery))
# differently than the other databases.
if self.template == '(%(subquery)s)':
clone = self.copy()
clone.template = '%(subquery)s'
return clone
return self
class Exists(Subquery):
template = 'EXISTS(%(subquery)s)'
def __init__(self, *args, negated=False, **kwargs):
self.negated = negated
super().__init__(*args, **kwargs)
def __invert__(self):
return type(self)(self.queryset, self.output_field, negated=(not self.negated), **self.extra)
@property
def output_field(self):
return fields.BooleanField()
def resolve_expression(self, query=None, **kwargs):
# As a performance optimization, remove ordering since EXISTS doesn't
# care about it, just whether or not a row matches.
self.queryset = self.queryset.order_by()
return super().resolve_expression(query, **kwargs)
def as_sql(self, compiler, connection, template=None, **extra_context):
sql, params = super().as_sql(compiler, connection, template, **extra_context)
if self.negated:
sql = 'NOT {}'.format(sql)
return sql, params
def as_oracle(self, compiler, connection, template=None, **extra_context):
# Oracle doesn't allow EXISTS() in the SELECT list, so wrap it with a
# CASE WHEN expression. Change the template since the When expression
# requires a left hand side (column) to compare against.
sql, params = self.as_sql(compiler, connection, template, **extra_context)
sql = 'CASE WHEN {} THEN 1 ELSE 0 END'.format(sql)
return sql, params
class OrderBy(BaseExpression):
template = '%(expression)s %(ordering)s'
def __init__(self, expression, descending=False, nulls_first=False, nulls_last=False):
if nulls_first and nulls_last:
raise ValueError('nulls_first and nulls_last are mutually exclusive')
self.nulls_first = nulls_first
self.nulls_last = nulls_last
self.descending = descending
if not hasattr(expression, 'resolve_expression'):
raise ValueError('expression must be an expression type')
self.expression = expression
def __repr__(self):
return "{}({}, descending={})".format(
self.__class__.__name__, self.expression, self.descending)
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def as_sql(self, compiler, connection, template=None, **extra_context):
if not template:
if self.nulls_last:
template = '%s NULLS LAST' % self.template
elif self.nulls_first:
template = '%s NULLS FIRST' % self.template
connection.ops.check_expression_support(self)
expression_sql, params = compiler.compile(self.expression)
placeholders = {
'expression': expression_sql,
'ordering': 'DESC' if self.descending else 'ASC',
}
placeholders.update(extra_context)
template = template or self.template
return (template % placeholders).rstrip(), params
def as_sqlite(self, compiler, connection):
template = None
if self.nulls_last:
template = '%(expression)s IS NULL, %(expression)s %(ordering)s'
elif self.nulls_first:
template = '%(expression)s IS NOT NULL, %(expression)s %(ordering)s'
return self.as_sql(compiler, connection, template=template)
def as_mysql(self, compiler, connection):
template = None
if self.nulls_last:
template = 'IF(ISNULL(%(expression)s),1,0), %(expression)s %(ordering)s '
elif self.nulls_first:
template = 'IF(ISNULL(%(expression)s),0,1), %(expression)s %(ordering)s '
return self.as_sql(compiler, connection, template=template)
def get_group_by_cols(self):
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def reverse_ordering(self):
self.descending = not self.descending
return self
def asc(self):
self.descending = False
def desc(self):
self.descending = True
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import msgpack
import redis
import pretend
import pytest
from pyramid import viewderivers
import warehouse.sessions
from warehouse.sessions import (
InvalidSession, Session, SessionFactory, includeme, session_view,
)
from warehouse.utils import crypto
class TestInvalidSession:
@pytest.mark.parametrize(
"method",
[
# IDict methods
"__contains__",
"__delitem__",
"__getitem__",
"__iter__",
"__len__",
"__setitem__",
"clear",
"copy",
"fromkeys",
"get",
"items",
"keys",
"pop",
"popitem",
"setdefault",
"update",
"values",
# ISession methods
"invalidate",
"flash",
"changed",
"get_csrf_token",
"peek_flash",
"new_csrf_token",
"pop_flash",
# Our custom methods.
"should_save",
],
)
def test_methods_raise(self, method):
session = InvalidSession()
with pytest.raises(RuntimeError):
getattr(session, method)()
@pytest.mark.parametrize("name", ["created", "new", "sid"])
def test_propery_raises(self, name):
session = InvalidSession()
with pytest.raises(RuntimeError):
getattr(session, name)
class TestSession:
@pytest.mark.parametrize(
("data", "expected"),
[
(None, {}),
({}, {}),
({"foo": "bar"}, {"foo": "bar"}),
]
)
def test_create_new(self, monkeypatch, data, expected):
monkeypatch.setattr(time, "time", lambda: 100)
monkeypatch.setattr(crypto, "random_token", lambda: "123456")
session = Session(data)
assert session == expected
assert session.sid == "123456"
assert session.new
assert session.created == 100
assert not session.invalidated
@pytest.mark.parametrize(
("data", "expected", "new"),
[
(None, {}, True),
({}, {}, True),
({"foo": "bar"}, {"foo": "bar"}, True),
(None, {}, False),
({}, {}, False),
({"foo": "bar"}, {"foo": "bar"}, False),
]
)
def test_create_with_session_id(self, monkeypatch, data, expected, new):
monkeypatch.setattr(time, "time", lambda: 100)
session = Session(data, "wat", new)
assert session == expected
assert session.sid == "wat"
assert session.new is new
assert session.created == 100
assert not session.invalidated
def test_changed_marks_as_changed(self):
session = Session()
assert not session._changed
session.changed()
assert session._changed
def test_invalidate(self, monkeypatch):
session_ids = iter(["123456", "7890"])
monkeypatch.setattr(crypto, "random_token", lambda: next(session_ids))
session = Session({"foo": "bar"}, "original id", False)
assert session == {"foo": "bar"}
assert session.sid == "original id"
assert not session.new
assert not session.invalidated
session.invalidate()
assert session == {}
assert session.sid == "123456"
assert session.new
assert session.invalidated == {"original id"}
session.invalidate()
assert session == {}
assert session.sid == "7890"
assert session.new
assert session.invalidated == {"original id", "123456"}
def test_invalidate_empty(self):
session = Session({"foo": "bar"})
session.invalidate()
assert session == {}
assert session.invalidated == set()
def test_should_save(self):
session = Session()
assert not session.should_save()
session.changed()
assert session.should_save()
@pytest.mark.parametrize(
("data", "method", "args"),
[
({"foo": "bar"}, "__delitem__", ["foo"]),
({}, "__setitem__", ["foo", "bar"]),
({}, "clear", []),
({"foo": "bar"}, "pop", ["foo"]),
({"foo": "bar"}, "popitem", []),
({}, "setdefault", ["foo", "bar"]),
({}, "update", [{"foo": "bar"}]),
],
)
def test_methods_call_changed(self, data, method, args):
session = Session(data)
session.changed = pretend.call_recorder(lambda: None)
getattr(session, method)(*args)
assert session.changed.calls == [pretend.call()]
@pytest.mark.parametrize(
("queue", "expected"),
[
(None, "_flash_messages"),
("foobar", "_flash_messages.foobar"),
],
)
def test_generate_flash_key(self, queue, expected):
session = Session()
assert session._get_flash_queue_key(queue) == expected
def test_flash_messages(self):
session = Session()
assert session.peek_flash() == []
assert session.peek_flash(queue="foo") == []
assert session.pop_flash() == []
assert session.pop_flash(queue="foo") == []
session.flash("A Flash Message")
assert session.peek_flash() == ["A Flash Message"]
assert session.peek_flash(queue="foo") == []
session.flash("Another Flash Message", queue="foo")
assert session.peek_flash() == ["A Flash Message"]
assert session.peek_flash(queue="foo") == ["Another Flash Message"]
session.flash("A Flash Message")
assert session.peek_flash() == ["A Flash Message", "A Flash Message"]
assert session.peek_flash(queue="foo") == ["Another Flash Message"]
session.flash("A Flash Message", allow_duplicate=True)
assert session.peek_flash() == [
"A Flash Message",
"A Flash Message",
"A Flash Message",
]
assert session.peek_flash(queue="foo") == ["Another Flash Message"]
session.flash("A Flash Message", allow_duplicate=False)
assert session.peek_flash() == [
"A Flash Message",
"A Flash Message",
"A Flash Message",
]
assert session.peek_flash(queue="foo") == ["Another Flash Message"]
assert session.pop_flash() == [
"A Flash Message",
"A Flash Message",
"A Flash Message",
]
assert session.pop_flash(queue="foo") == ["Another Flash Message"]
assert session.peek_flash() == []
assert session.peek_flash(queue="foo") == []
assert session.pop_flash() == []
assert session.pop_flash(queue="foo") == []
def test_csrf_token(self, monkeypatch):
tokens = iter(["123456", "7890"])
monkeypatch.setattr(crypto, "random_token", lambda: next(tokens))
session = Session()
assert session._csrf_token_key not in session
assert session.new_csrf_token() == "123456"
assert session._csrf_token_key in session
assert session.get_csrf_token() == "123456"
assert session.new_csrf_token() == "7890"
assert session._csrf_token_key in session
assert session.get_csrf_token() == "7890"
def test_get_csrf_token_empty(self):
session = Session()
session.new_csrf_token = pretend.call_recorder(lambda: "123456")
assert session.get_csrf_token() == "123456"
assert session.new_csrf_token.calls == [pretend.call()]
class TestSessionFactory:
def test_initialize(self, monkeypatch):
timestamp_signer_obj = pretend.stub()
timestamp_signer_create = pretend.call_recorder(
lambda secret, salt: timestamp_signer_obj
)
monkeypatch.setattr(crypto, "TimestampSigner", timestamp_signer_create)
strict_redis_obj = pretend.stub()
strict_redis_cls = pretend.stub(
from_url=pretend.call_recorder(lambda url: strict_redis_obj),
)
monkeypatch.setattr(redis, "StrictRedis", strict_redis_cls)
session_factory = SessionFactory("mysecret", "my url")
assert session_factory.signer is timestamp_signer_obj
assert session_factory.redis is strict_redis_obj
assert timestamp_signer_create.calls == [
pretend.call("mysecret", salt="session"),
]
assert strict_redis_cls.from_url.calls == [pretend.call("my url")]
def test_redis_key(self):
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
assert session_factory._redis_key("my_session_id") == \
"warehouse/session/data/my_session_id"
def test_no_current_session(self, pyramid_request):
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
session_factory._process_response = pretend.stub()
session = session_factory(pyramid_request)
assert len(pyramid_request.response_callbacks) == 1
assert pyramid_request.response_callbacks[0] is \
session_factory._process_response
assert isinstance(session, Session)
assert session._sid is None
assert session.new
def test_invalid_session_id(self, pyramid_request):
pyramid_request.cookies["session_id"] = "invalid!"
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
session_factory._process_response = pretend.stub()
session = session_factory(pyramid_request)
assert len(pyramid_request.response_callbacks) == 1
assert pyramid_request.response_callbacks[0] is \
session_factory._process_response
assert isinstance(session, Session)
assert session._sid is None
assert session.new
def test_valid_session_id_no_data(self, pyramid_request):
pyramid_request.cookies["session_id"] = "123456"
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
session_factory.signer.unsign = pretend.call_recorder(
lambda session_id, max_age: b"123456"
)
session_factory.redis = pretend.stub(
get=pretend.call_recorder(lambda key: None),
)
session_factory._process_response = pretend.stub()
session = session_factory(pyramid_request)
assert len(pyramid_request.response_callbacks) == 1
assert pyramid_request.response_callbacks[0] is \
session_factory._process_response
assert session_factory.signer.unsign.calls == [
pretend.call("123456", max_age=12 * 60 * 60),
]
assert session_factory.redis.get.calls == [
pretend.call("warehouse/session/data/123456"),
]
assert isinstance(session, Session)
assert session._sid is None
assert session.new
def test_valid_session_id_invalid_data(self, pyramid_request):
pyramid_request.cookies["session_id"] = "123456"
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
session_factory.signer.unsign = pretend.call_recorder(
lambda session_id, max_age: b"123456"
)
session_factory.redis = pretend.stub(
get=pretend.call_recorder(lambda key: b"invalid data"),
)
session_factory._process_response = pretend.stub()
session = session_factory(pyramid_request)
assert len(pyramid_request.response_callbacks) == 1
assert pyramid_request.response_callbacks[0] is \
session_factory._process_response
assert session_factory.signer.unsign.calls == [
pretend.call("123456", max_age=12 * 60 * 60),
]
assert session_factory.redis.get.calls == [
pretend.call("warehouse/session/data/123456"),
]
assert isinstance(session, Session)
assert session._sid is None
assert session.new
def test_valid_session_id_valid_data(self, monkeypatch, pyramid_request):
msgpack_unpackb = pretend.call_recorder(
lambda bdata, encoding, use_list: {"foo": "bar"}
)
monkeypatch.setattr(msgpack, "unpackb", msgpack_unpackb)
pyramid_request.cookies["session_id"] = "123456"
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
session_factory.signer.unsign = pretend.call_recorder(
lambda session_id, max_age: b"123456"
)
session_factory.redis = pretend.stub(
get=pretend.call_recorder(lambda key: b"valid data"),
)
session_factory._process_response = pretend.stub()
session = session_factory(pyramid_request)
assert len(pyramid_request.response_callbacks) == 1
assert pyramid_request.response_callbacks[0] is \
session_factory._process_response
assert session_factory.signer.unsign.calls == [
pretend.call("123456", max_age=12 * 60 * 60),
]
assert session_factory.redis.get.calls == [
pretend.call("warehouse/session/data/123456"),
]
assert msgpack_unpackb.calls == [
pretend.call(b"valid data", encoding="utf8", use_list=True),
]
assert isinstance(session, Session)
assert session == {"foo": "bar"}
assert session.sid == "123456"
assert not session.new
def test_no_save_invalid_session(self, pyramid_request):
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
session_factory.redis = pretend.stub()
pyramid_request.session = InvalidSession()
response = pretend.stub()
session_factory._process_response(pyramid_request, response)
def test_noop_unused_session(self, pyramid_request):
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
session_factory.redis = pretend.stub()
pyramid_request.session.invalidated = set()
pyramid_request.session.should_save = pretend.call_recorder(
lambda: False
)
response = pretend.stub()
session_factory._process_response(pyramid_request, response)
assert pyramid_request.session.should_save.calls == [pretend.call()]
def test_invalidated_deletes_no_save(self, pyramid_request):
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
session_factory.redis = pretend.stub(
delete=pretend.call_recorder(lambda key: None)
)
pyramid_request.session.invalidated = ["1", "2"]
pyramid_request.session.should_save = pretend.call_recorder(
lambda: False
)
response = pretend.stub(
delete_cookie=pretend.call_recorder(lambda cookie: None),
)
session_factory._process_response(pyramid_request, response)
assert session_factory.redis.delete.calls == [
pretend.call("warehouse/session/data/1"),
pretend.call("warehouse/session/data/2"),
]
assert pyramid_request.session.should_save.calls == [
pretend.call(),
pretend.call(),
]
assert response.delete_cookie.calls == [pretend.call("session_id")]
def test_invalidated_deletes_save_non_secure(self, monkeypatch,
pyramid_request):
msgpack_packb = pretend.call_recorder(
lambda data, encoding, use_bin_type: b"msgpack data"
)
monkeypatch.setattr(msgpack, "packb", msgpack_packb)
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
session_factory.redis = pretend.stub(
delete=pretend.call_recorder(lambda key: None),
setex=pretend.call_recorder(lambda key, age, data: None),
)
session_factory.signer.sign = pretend.call_recorder(
lambda data: "cookie data"
)
pyramid_request.scheme = "http"
pyramid_request.session.sid = "123456"
pyramid_request.session.invalidated = ["1", "2"]
pyramid_request.session.should_save = pretend.call_recorder(
lambda: True
)
response = pretend.stub(
set_cookie=pretend.call_recorder(
lambda cookie, data, max_age, httponly, secure: None
)
)
session_factory._process_response(pyramid_request, response)
assert session_factory.redis.delete.calls == [
pretend.call("warehouse/session/data/1"),
pretend.call("warehouse/session/data/2"),
]
assert msgpack_packb.calls == [
pretend.call(
pyramid_request.session,
encoding="utf8",
use_bin_type=True,
),
]
assert session_factory.redis.setex.calls == [
pretend.call(
"warehouse/session/data/123456",
12 * 60 * 60,
b"msgpack data",
),
]
assert pyramid_request.session.should_save.calls == [
pretend.call(),
pretend.call(),
]
assert session_factory.signer.sign.calls == [pretend.call(b"123456")]
assert response.set_cookie.calls == [
pretend.call(
"session_id",
"cookie data",
max_age=12 * 60 * 60,
httponly=True,
secure=False,
),
]
class TestSessionView:
def test_has_options(self):
assert set(session_view.options) == {"uses_session"}
@pytest.mark.parametrize("uses_session", [False, None])
def test_invalid_session(self, uses_session):
context = pretend.stub()
request = pretend.stub(session=pretend.stub())
response = pretend.stub()
@pretend.call_recorder
def view(context, request):
assert isinstance(request.session, InvalidSession)
return response
info = pretend.stub(options={})
if uses_session is not None:
info.options["uses_session"] = uses_session
derived_view = session_view(view, info)
assert derived_view(context, request) is response
assert view.calls == [pretend.call(context, request)]
def test_valid_session(self, monkeypatch):
add_vary_cb = pretend.call_recorder(lambda fn: fn)
add_vary = pretend.call_recorder(lambda vary: add_vary_cb)
monkeypatch.setattr(warehouse.sessions, "add_vary", add_vary)
context = pretend.stub()
request = pretend.stub(session=Session())
response = pretend.stub()
@pretend.call_recorder
def view(context, request):
assert isinstance(request.session, Session)
return response
info = pretend.stub(options={"uses_session": True})
derived_view = session_view(view, info)
assert derived_view(context, request) is response
assert view.calls == [pretend.call(context, request)]
assert add_vary.calls == [pretend.call("Cookie")]
assert add_vary_cb.calls == [pretend.call(view)]
def test_includeme(monkeypatch):
session_factory_obj = pretend.stub()
session_factory_cls = pretend.call_recorder(
lambda secret, url: session_factory_obj
)
monkeypatch.setattr(
warehouse.sessions,
"SessionFactory",
session_factory_cls,
)
config = pretend.stub(
set_session_factory=pretend.call_recorder(lambda factory: None),
registry=pretend.stub(
settings={
"sessions.secret": "my secret",
"sessions.url": "my url",
},
),
add_view_deriver=pretend.call_recorder(lambda *a, **kw: None),
)
includeme(config)
assert config.set_session_factory.calls == [
pretend.call(session_factory_obj),
]
assert session_factory_cls.calls == [pretend.call("my secret", "my url")]
assert config.add_view_deriver.calls == [
pretend.call(
session_view,
over="csrf_view",
under=viewderivers.INGRESS,
),
]
|
|
#!/usr/bin/env python
import click
import sys
from solar.core import resource
from solar.core import signals
from solar.core import validation
from solar.core.resource import virtual_resource as vr
from solar import events as evapi
from solar.dblayer.model import ModelMeta
PROFILE = False
#PROFILE = True
if PROFILE:
import StringIO
import cProfile
import pstats
pr = cProfile.Profile()
pr.enable()
# TODO
# Resource for repository OR puppet apt-module in run.pp
# add-apt-repository cloud-archive:juno
# To discuss: install stuff in Docker container
# NOTE
# No copy of manifests, pull from upstream (implemented in the librarian resource)
# Official puppet manifests, not fuel-library
@click.group()
def main():
pass
def prepare_nodes(nodes_count):
resources = vr.create('nodes', 'templates/nodes_with_transports.yaml', {"count": nodes_count})
nodes = [x for x in resources if x.name.startswith('node')]
resources = vr.create('nodes_network', 'templates/nodes_network.yaml', {"count": nodes_count})
nodes_sdn = [x for x in resources if x.name.startswith('node')]
r = {}
for node, node_sdn in zip(nodes, nodes_sdn):
r[node.name] = node
r[node_sdn.name] = node_sdn
# LIBRARIAN
librarian = vr.create('librarian_{}'.format(node.name), 'resources/librarian', {})[0]
r[librarian.name] = librarian
node.connect(librarian, {})
# NETWORKING
# TODO(bogdando) node's IPs should be populated as br-mgmt IPs, but now are hardcoded in templates
signals.connect(node, node_sdn)
node_sdn.connect_with_events(librarian, {'module': 'modules'}, {})
evapi.add_dep(librarian.name, node_sdn.name, actions=('run', 'update'))
signals.connect(node, node_sdn)
node_sdn.connect_with_events(librarian, {'module': 'modules'}, {})
evapi.add_dep(librarian.name, node_sdn.name, actions=('run', 'update'))
return r
def setup_base(node, librarian):
# MARIADB
mariadb_service = vr.create('mariadb_service1', 'resources/mariadb_service', {
'image': 'mariadb',
'port': 3306
})[0]
node.connect(mariadb_service)
# RABBIT
rabbitmq_service = vr.create('rabbitmq_service1', 'resources/rabbitmq_service/', {
'management_port': 15672,
'port': 5672,
})[0]
openstack_vhost = vr.create('openstack_vhost', 'resources/rabbitmq_vhost/', {
'vhost_name': 'openstack'
})[0]
openstack_rabbitmq_user = vr.create('openstack_rabbitmq_user', 'resources/rabbitmq_user/', {
'user_name': 'openstack',
'password': 'openstack_password'
})[0]
node.connect(rabbitmq_service)
rabbitmq_service.connect_with_events(librarian, {'module': 'modules'}, {})
evapi.add_dep(librarian.name, rabbitmq_service.name, actions=('run', 'update'))
rabbitmq_service.connect(openstack_vhost)
rabbitmq_service.connect(openstack_rabbitmq_user)
openstack_vhost.connect(openstack_rabbitmq_user, {
'vhost_name',
})
return {'mariadb_service': mariadb_service,
'rabbitmq_service1': rabbitmq_service,
'openstack_vhost': openstack_vhost,
'openstack_rabbitmq_user': openstack_rabbitmq_user}
def setup_keystone(node, librarian, mariadb_service, openstack_rabbitmq_user):
keystone_puppet = vr.create('keystone_puppet', 'resources/keystone_puppet', {})[0]
keystone_puppet.connect_with_events(librarian, {'module': 'modules'}, {})
evapi.add_dep(librarian.name, keystone_puppet.name, actions=('run', 'update'))
evapi.add_dep(openstack_rabbitmq_user.name, keystone_puppet.name, actions=('run', 'update'))
keystone_db = vr.create('keystone_db', 'resources/mariadb_db/', {
'db_name': 'keystone_db',
'login_user': 'root'
})[0]
keystone_db_user = vr.create('keystone_db_user', 'resources/mariadb_user/', {
'user_name': 'keystone',
'user_password': 'keystone',
})[0]
keystone_service_endpoint = vr.create('keystone_service_endpoint', 'resources/keystone_service_endpoint', {
'endpoint_name': 'keystone',
'adminurl': 'http://{{admin_ip}}:{{admin_port}}/v2.0',
'internalurl': 'http://{{internal_ip}}:{{internal_port}}/v2.0',
'publicurl': 'http://{{public_ip}}:{{public_port}}/v2.0',
'description': 'OpenStack Identity Service',
'type': 'identity'
})[0]
admin_tenant = vr.create('admin_tenant', 'resources/keystone_tenant', {
'tenant_name': 'admin'
})[0]
admin_user = vr.create('admin_user', 'resources/keystone_user', {
'user_name': 'admin',
'user_password': 'admin'
})[0]
admin_role = vr.create('admin_role', 'resources/keystone_role', {
'role_name': 'admin'
})[0]
services_tenant = vr.create('services_tenant', 'resources/keystone_tenant', {
'tenant_name': 'services'
})[0]
admin_role_services = vr.create('admin_role_services', 'resources/keystone_role', {
'role_name': 'admin'
})[0]
node.connect(keystone_db)
node.connect(keystone_db_user)
node.connect(keystone_puppet)
mariadb_service.connect(keystone_db, {
'port': 'login_port',
'root_user': 'login_user',
'root_password': 'login_password',
'ip' : 'db_host',
})
keystone_db.connect(keystone_db_user, {
'db_name',
'login_port',
'login_user',
'login_password',
'db_host'
})
node.connect(keystone_service_endpoint)
keystone_puppet.connect(keystone_service_endpoint, {
'admin_token': 'admin_token',
'admin_port': ['admin_port', 'keystone_admin_port'],
'ip': ['keystone_host', 'admin_ip', 'internal_ip', 'public_ip'],
'port': ['internal_port', 'public_port'],
})
keystone_puppet.connect(admin_tenant)
keystone_puppet.connect(admin_tenant, {
'admin_port': 'keystone_port',
'ip': 'keystone_host'
})
admin_tenant.connect(admin_user)
admin_user.connect(admin_role)
admin_tenant.connect(admin_role, { 'tenant_name' })
admin_user.connect(admin_role_services)
services_tenant.connect(admin_role_services, { 'tenant_name' })
keystone_puppet.connect(services_tenant)
keystone_puppet.connect(services_tenant, {
'admin_port': 'keystone_port',
'ip': 'keystone_host'
})
keystone_db.connect(keystone_puppet, {
'db_name',
})
keystone_db_user.connect(keystone_puppet, {
'user_name': 'db_user',
'user_password': 'db_password',
})
mariadb_service.connect(keystone_puppet, {
'ip': 'db_host',
'port': 'db_port',
})
return {'keystone_puppet': keystone_puppet,
'keystone_db': keystone_db,
'keystone_db_user': keystone_db_user,
'keystone_service_endpoint': keystone_service_endpoint,
'admin_tenant': admin_tenant,
'admin_user': admin_user,
'admin_role': admin_role,
'services_tenant': services_tenant,
'admin_role_services': admin_role_services,
}
def setup_openrc(node, keystone_puppet, admin_user):
# OPENRC
openrc = vr.create('openrc_file', 'resources/openrc_file', {})[0]
node.connect(openrc)
keystone_puppet.connect(openrc, {'ip': 'keystone_host', 'admin_port':'keystone_port'})
admin_user.connect(openrc, {'user_name': 'user_name','user_password':'password', 'tenant_name': 'tenant'})
return {'openrc_file' : openrc}
def setup_neutron(node, librarian, rabbitmq_service, openstack_rabbitmq_user, openstack_vhost):
# NEUTRON
# Deploy chain neutron -> (plugins) -> neutron_server -> ( agents )
neutron_puppet = vr.create('neutron_puppet', 'resources/neutron_puppet', {
'core_plugin': 'neutron.plugins.ml2.plugin.Ml2Plugin'
})[0]
node.connect(neutron_puppet)
neutron_puppet.connect_with_events(librarian, {'module': 'modules'}, {})
evapi.add_dep(librarian.name, neutron_puppet.name, actions=('run', 'update'))
rabbitmq_service.connect(neutron_puppet, {
'ip': 'rabbit_host',
'port': 'rabbit_port'
})
openstack_rabbitmq_user.connect(neutron_puppet, {
'user_name': 'rabbit_user',
'password': 'rabbit_password'})
openstack_vhost.connect(neutron_puppet, {
'vhost_name': 'rabbit_virtual_host'})
return {'neutron_puppet': neutron_puppet}
def setup_neutron_api(node, mariadb_service, admin_user, keystone_puppet, services_tenant, neutron_puppet):
# NEUTRON PLUGIN AND NEUTRON API (SERVER)
neutron_plugins_ml2 = vr.create('neutron_plugins_ml2', 'resources/neutron_plugins_ml2_puppet', {})[0]
node.connect(neutron_plugins_ml2)
neutron_server_puppet = vr.create('neutron_server_puppet', 'resources/neutron_server_puppet', {
'sync_db': True,
})[0]
evapi.add_dep(neutron_puppet.name, neutron_server_puppet.name, actions=('run',))
evapi.add_dep(neutron_plugins_ml2.name, neutron_server_puppet.name, actions=('run',))
evapi.add_dep(neutron_puppet.name, neutron_plugins_ml2.name, actions=('run',))
neutron_db = vr.create('neutron_db', 'resources/mariadb_db/', {
'db_name': 'neutron_db', 'login_user': 'root'})[0]
neutron_db_user = vr.create('neutron_db_user', 'resources/mariadb_user/', {
'user_name': 'neutron', 'user_password': 'neutron', 'login_user': 'root'})[0]
neutron_keystone_user = vr.create('neutron_keystone_user', 'resources/keystone_user', {
'user_name': 'neutron',
'user_password': 'neutron'
})[0]
neutron_keystone_role = vr.create('neutron_keystone_role', 'resources/keystone_role', {
'role_name': 'admin'
})[0]
evapi.add_dep(neutron_keystone_role.name, neutron_server_puppet.name, actions=('run',))
neutron_keystone_service_endpoint = vr.create('neutron_keystone_service_endpoint', 'resources/keystone_service_endpoint', {
'endpoint_name': 'neutron',
'adminurl': 'http://{{admin_ip}}:{{admin_port}}',
'internalurl': 'http://{{internal_ip}}:{{internal_port}}',
'publicurl': 'http://{{public_ip}}:{{public_port}}',
'description': 'OpenStack Network Service',
'type': 'network'
})[0]
node.connect(neutron_db)
node.connect(neutron_db_user)
mariadb_service.connect(neutron_db, {
'port': 'login_port',
'root_password': 'login_password',
'root_user': 'login_user',
'ip' : 'db_host'})
mariadb_service.connect(neutron_db_user, {'port': 'login_port', 'root_password': 'login_password'})
neutron_db.connect(neutron_db_user, {'db_name', 'db_host'})
neutron_db_user.connect(neutron_server_puppet, {
'user_name':'db_user',
'db_name':'db_name',
'user_password':'db_password',
'db_host' : 'db_host'})
mariadb_service.connect(neutron_server_puppet, {
'port': 'db_port',
'ip' : 'db_host'})
node.connect(neutron_server_puppet)
admin_user.connect(neutron_server_puppet, {
'user_name': 'auth_user',
'user_password': 'auth_password',
'tenant_name': 'auth_tenant'
})
keystone_puppet.connect(neutron_server_puppet, {
'ip': 'auth_host',
'port': 'auth_port'
})
services_tenant.connect(neutron_keystone_user)
neutron_keystone_user.connect(neutron_keystone_role)
keystone_puppet.connect(neutron_keystone_service_endpoint, {
'ip': ['ip', 'keystone_host'],
'admin_port': 'keystone_admin_port',
'admin_token': 'admin_token',
})
neutron_puppet.connect(neutron_keystone_service_endpoint, {
'ip': ['admin_ip', 'internal_ip', 'public_ip'],
'bind_port': ['admin_port', 'internal_port', 'public_port'],
})
return {'neutron_server_puppet': neutron_server_puppet,
'neutron_plugins_ml2': neutron_plugins_ml2,
'neutron_db': neutron_db,
'neutron_db_user': neutron_db_user,
'neutron_keystone_user': neutron_keystone_user,
'neutron_keystone_role': neutron_keystone_role,
'neutron_keystone_service_endpoint': neutron_keystone_service_endpoint}
def setup_neutron_agent(node, neutron_server_puppet):
# NEUTRON ML2 PLUGIN & ML2-OVS AGENT WITH GRE
neutron_agents_ml2 = vr.create('neutron_agents_ml2', 'resources/neutron_agents_ml2_ovs_puppet', {
# TODO(bogdando) these should come from the node network resource
'enable_tunneling': True,
'tunnel_types': ['gre'],
'local_ip': '10.1.0.13' # should be the IP addr of the br-mesh int.
})[0]
node.connect(neutron_agents_ml2)
evapi.add_dep(neutron_server_puppet.name, neutron_agents_ml2.name, actions=('run',))
# NEUTRON DHCP, L3, metadata agents
neutron_agents_dhcp = vr.create('neutron_agents_dhcp', 'resources/neutron_agents_dhcp_puppet', {})[0]
node.connect(neutron_agents_dhcp)
evapi.add_dep(neutron_server_puppet.name, neutron_agents_dhcp.name, actions=('run',))
neutron_agents_l3 = vr.create('neutron_agents_l3', 'resources/neutron_agents_l3_puppet', {
# TODO(bogdando) these should come from the node network resource
'metadata_port': 8775,
'external_network_bridge': 'br-floating',
})[0]
node.connect(neutron_agents_l3)
evapi.add_dep(neutron_server_puppet.name, neutron_agents_l3.name, actions=('run',))
neutron_agents_metadata = vr.create('neutron_agents_metadata', 'resources/neutron_agents_metadata_puppet', {
'sh2ared_secret': 'secret',
})[0]
node.connect(neutron_agents_metadata)
neutron_server_puppet.connect(neutron_agents_metadata, {
'auth_host', 'auth_port', 'auth_password',
'auth_tenant', 'auth_user',
})
return {'neutron_agents_ml2': neutron_agents_ml2,
'neutron_agents_dhcp': neutron_agents_dhcp,
'neutron_agents_metadata': neutron_agents_metadata}
def setup_neutron_compute(node, librarian, neutron_puppet, neutron_server_puppet):
# NEUTRON FOR COMPUTE (node1)
# Deploy chain neutron -> (plugins) -> ( agents )
name = node.name
neutron_puppet2 = vr.create('neutron_puppet_{}'.format(name), 'resources/neutron_puppet', {})[0]
neutron_puppet2.connect_with_events(librarian, {'module': 'modules'}, {})
evapi.add_dep(librarian.name, neutron_puppet2.name, actions=('run', 'update'))
dep = evapi.Dep(librarian.name, 'update', state='SUCESS',
child=neutron_puppet2.name, child_action='run')
evapi.add_event(dep)
node.connect(neutron_puppet2)
neutron_puppet.connect(neutron_puppet2, {
'rabbit_host', 'rabbit_port',
'rabbit_user', 'rabbit_password',
'rabbit_virtual_host',
'package_ensure', 'core_plugin',
})
# NEUTRON OVS PLUGIN & AGENT WITH GRE FOR COMPUTE (node1)
neutron_plugins_ml22 = vr.create('neutron_plugins_ml_{}'.format(name), 'resources/neutron_plugins_ml2_puppet', {})[0]
node.connect(neutron_plugins_ml22)
evapi.add_dep(neutron_puppet2.name, neutron_plugins_ml22.name, actions=('run',))
evapi.add_dep(neutron_server_puppet.name, neutron_plugins_ml22.name, actions=('run',))
neutron_agents_ml22 = vr.create('neutron_agents_ml_{}'.format(name), 'resources/neutron_agents_ml2_ovs_puppet', {
# TODO(bogdando) these should come from the node network resource
'enable_tunneling': True,
'tunnel_types': ['gre'],
'local_ip': '10.1.0.14' # Should be the IP addr of the br-mesh int.
})[0]
node.connect(neutron_agents_ml22)
evapi.add_dep(neutron_puppet2.name, neutron_agents_ml22.name, actions=('run',))
evapi.add_dep(neutron_server_puppet.name, neutron_agents_ml22.name, actions=('run',))
return {'neutron_puppet2': neutron_puppet2,
'neutron_plugins_ml22': neutron_plugins_ml22,
'neutron_agents_ml22': neutron_agents_ml22}
def setup_cinder(node, librarian, rabbitmq_service, mariadb_service, keystone_puppet, admin_user, openstack_vhost, openstack_rabbitmq_user, services_tenant):
# CINDER
cinder_puppet = vr.create('cinder_puppet', 'resources/cinder_puppet', {})[0]
cinder_db = vr.create('cinder_db', 'resources/mariadb_db/', {
'db_name': 'cinder_db', 'login_user': 'root'})[0]
cinder_db_user = vr.create('cinder_db_user', 'resources/mariadb_user/', {
'user_name': 'cinder', 'user_password': 'cinder', 'login_user': 'root'})[0]
cinder_keystone_user = vr.create('cinder_keystone_user', 'resources/keystone_user', {
'user_name': 'cinder', 'user_password': 'cinder'})[0]
cinder_keystone_role = vr.create('cinder_keystone_role', 'resources/keystone_role', {
'role_name': 'admin'})[0]
cinder_keystone_service_endpoint = vr.create(
'cinder_keystone_service_endpoint',
'resources/keystone_service_endpoint', {
'endpoint_name': 'cinder',
'adminurl': 'http://{{admin_ip}}:{{admin_port}}/v2/%(tenant_id)s',
'internalurl': 'http://{{internal_ip}}:{{internal_port}}/v2/%(tenant_id)s',
'publicurl': 'http://{{public_ip}}:{{public_port}}/v2/%(tenant_id)s',
'description': 'OpenStack Block Storage Service', 'type': 'volumev2'})[0]
node.connect(cinder_puppet)
cinder_puppet.connect_with_events(librarian, {'module': 'modules'}, {})
evapi.add_dep(librarian.name, cinder_puppet.name, actions=('run', 'update'))
node.connect(cinder_db)
node.connect(cinder_db_user)
rabbitmq_service.connect(cinder_puppet, {'ip': 'rabbit_host', 'port': 'rabbit_port'})
admin_user.connect(cinder_puppet, {'user_name': 'keystone_user', 'user_password': 'keystone_password', 'tenant_name': 'keystone_tenant'}) #?
openstack_vhost.connect(cinder_puppet, {'vhost_name': 'rabbit_virtual_host'})
openstack_rabbitmq_user.connect(cinder_puppet, {'user_name': 'rabbit_userid', 'password': 'rabbit_password'})
mariadb_service.connect(cinder_db, {
'port': 'login_port',
'root_password': 'login_password',
'root_user': 'login_user',
'ip' : 'db_host'})
mariadb_service.connect(cinder_db_user, {'port': 'login_port', 'root_password': 'login_password'})
cinder_db.connect(cinder_db_user, {'db_name', 'db_host'})
cinder_db_user.connect(cinder_puppet, {
'user_name':'db_user',
'db_name':'db_name',
'user_password':'db_password'})
mariadb_service.connect(cinder_puppet, {
'port': 'db_port',
'ip': 'db_host'})
keystone_puppet.connect(cinder_puppet, {'ip': 'keystone_host', 'admin_port': 'keystone_port'}) #or non admin port?
services_tenant.connect(cinder_keystone_user)
cinder_keystone_user.connect(cinder_keystone_role)
cinder_keystone_user.connect(cinder_puppet, {'user_name': 'keystone_user', 'tenant_name': 'keystone_tenant', 'user_password': 'keystone_password'})
mariadb_service.connect(cinder_puppet, {'ip':'ip'})
cinder_puppet.connect(cinder_keystone_service_endpoint, {
'ip': ['ip', 'keystone_host', 'admin_ip', 'internal_ip', 'public_ip'],
'port': ['admin_port', 'internal_port', 'public_port'],})
keystone_puppet.connect(cinder_keystone_service_endpoint, {
'admin_port': 'keystone_admin_port', 'admin_token': 'admin_token'})
# CINDER GLANCE
# Deploy chain: cinder_puppet -> cinder_glance -> ( cinder_api, cinder_scheduler, cinder_volume )
cinder_glance_puppet = vr.create('cinder_glance_puppet', 'resources/cinder_glance_puppet', {})[0]
node.connect(cinder_glance_puppet)
evapi.add_dep(cinder_puppet.name, cinder_glance_puppet.name, actions=('run',))
return {'cinder_puppet': cinder_puppet,
'cinder_db': cinder_db,
'cinder_db_user': cinder_db_user,
'cinder_keystone_user': cinder_keystone_user,
'cinder_keystone_role': cinder_keystone_role,
'cinder_keystone_service_endpoint': cinder_keystone_service_endpoint,
'cinder_glance_puppet': cinder_glance_puppet}
def setup_cinder_api(node, cinder_puppet):
# CINDER API
cinder_api_puppet = vr.create('cinder_api_puppet', 'resources/cinder_api_puppet', {})[0]
node.connect(cinder_api_puppet)
cinder_puppet.connect(cinder_api_puppet, {
'keystone_password', 'keystone_tenant', 'keystone_user'})
cinder_puppet.connect(cinder_api_puppet, {
'keystone_host': 'keystone_auth_host',
'keystone_port': 'keystone_auth_port'})
evapi.add_react(cinder_puppet.name, cinder_api_puppet.name, actions=('update',))
return {'cinder_api_puppet': cinder_api_puppet}
def setup_cinder_scheduler(node, cinder_puppet):
# CINDER SCHEDULER
cinder_scheduler_puppet = vr.create('cinder_scheduler_puppet', 'resources/cinder_scheduler_puppet', {})[0]
node.connect(cinder_scheduler_puppet)
cinder_puppet.connect(cinder_scheduler_puppet)
evapi.add_react(cinder_puppet.name, cinder_scheduler_puppet.name, actions=('update',))
return {'cinder_scheduler_puppet': cinder_scheduler_puppet}
def setup_cinder_volume(node, cinder_puppet):
# CINDER VOLUME
cinder_volume = vr.create('cinder_volume_{}'.format(node.name), 'resources/volume_group',
{'path': '/root/cinder.img', 'volume_name': 'cinder-volume'})[0]
node.connect(cinder_volume)
cinder_volume_puppet = vr.create('cinder_volume_puppet', 'resources/cinder_volume_puppet', {})[0]
node.connect(cinder_volume_puppet)
cinder_puppet.connect(cinder_volume_puppet)
evapi.add_react(cinder_puppet.name, cinder_volume_puppet.name, actions=('update',))
cinder_volume.connect(cinder_volume_puppet, {'volume_name': 'volume_group'})
return {'cinder_volume_puppet': cinder_volume_puppet}
def setup_nova(node, librarian, mariadb_service, rabbitmq_service, admin_user, openstack_vhost, services_tenant, keystone_puppet, openstack_rabbitmq_user):
# NOVA
nova_puppet = vr.create('nova_puppet', 'resources/nova_puppet', {})[0]
nova_db = vr.create('nova_db', 'resources/mariadb_db/', {
'db_name': 'nova_db',
'login_user': 'root'})[0]
nova_db_user = vr.create('nova_db_user', 'resources/mariadb_user/', {
'user_name': 'nova',
'user_password': 'nova',
'login_user': 'root'})[0]
nova_keystone_user = vr.create('nova_keystone_user', 'resources/keystone_user', {
'user_name': 'nova',
'user_password': 'nova'})[0]
nova_keystone_role = vr.create('nova_keystone_role', 'resources/keystone_role', {
'role_name': 'admin'})[0]
nova_keystone_service_endpoint = vr.create('nova_keystone_service_endpoint', 'resources/keystone_service_endpoint', {
'endpoint_name': 'nova',
'adminurl': 'http://{{admin_ip}}:{{admin_port}}/v2/%(tenant_id)s',
'internalurl': 'http://{{internal_ip}}:{{internal_port}}/v2/%(tenant_id)s',
'publicurl': 'http://{{public_ip}}:{{public_port}}/v2/%(tenant_id)s',
'description': 'OpenStack Compute Service',
'type': 'compute'})[0]
node.connect(nova_puppet)
nova_puppet.connect_with_events(librarian, {'module': 'modules'}, {})
evapi.add_dep(librarian.name, nova_puppet.name, actions=('run', 'update'))
node.connect(nova_db)
node.connect(nova_db_user)
mariadb_service.connect(nova_db, {
'port': 'login_port',
'root_password': 'login_password',
'root_user': 'login_user',
'ip' : 'db_host'})
mariadb_service.connect(nova_db_user, {
'port': 'login_port',
'root_password': 'login_password'})
admin_user.connect(nova_puppet, {'user_name': 'keystone_user', 'user_password': 'keystone_password', 'tenant_name': 'keystone_tenant'}) #?
openstack_vhost.connect(nova_puppet, {'vhost_name': 'rabbit_virtual_host'})
nova_db.connect(nova_db_user, {'db_name', 'db_host'})
services_tenant.connect(nova_keystone_user)
nova_keystone_user.connect(nova_keystone_role)
keystone_puppet.connect(nova_puppet, {
'ip': 'keystone_host',
'admin_port': 'keystone_port'})
nova_keystone_user.connect(nova_puppet, {
'user_name': 'keystone_user',
'tenant_name': 'keystone_tenant',
'user_password': 'keystone_password'})
rabbitmq_service.connect(nova_puppet, {
'ip': 'rabbit_host', 'port': 'rabbit_port'})
openstack_rabbitmq_user.connect(nova_puppet, {
'user_name': 'rabbit_userid',
'password': 'rabbit_password'})
keystone_puppet.connect(nova_keystone_service_endpoint, {
'ip': 'keystone_host',
'admin_port': 'keystone_admin_port',
'admin_token': 'admin_token'})
mariadb_service.connect(nova_puppet, {
'ip':'db_host',
'port': 'db_port'})
nova_db_user.connect(nova_puppet, {
'user_name':'db_user',
'db_name':'db_name',
'user_password':'db_password'})
nova_puppet.connect(nova_keystone_service_endpoint, {
'ip': ['ip', 'keystone_host', 'public_ip', 'internal_ip', 'admin_ip'],
'port': ['admin_port', 'internal_port', 'public_port'],
})
return {'nova_puppet': nova_puppet,
'nova_db': nova_db,
'nova_db_user': nova_db_user,
'nova_keystone_user': nova_keystone_user,
'nova_keystone_role': nova_keystone_role,
'nova_keystone_service_endpoint': nova_keystone_service_endpoint}
def setup_nova_api(node, nova_puppet, neutron_agents_metadata):
# NOVA API
nova_api_puppet = vr.create('nova_api_puppet', 'resources/nova_api_puppet', {})[0]
node.connect(nova_api_puppet)
nova_puppet.connect(nova_api_puppet, {
'keystone_tenant': 'admin_tenant_name',
'keystone_user': 'admin_user',
'keystone_password': 'admin_password',
'keystone_host': 'auth_host',
'keystone_port': 'auth_port'})
evapi.add_react(nova_puppet.name, nova_api_puppet.name, actions=('update',))
nova_api_puppet.connect(neutron_agents_metadata, {'ip': 'metadata_ip'})
return {'nova_api_puppet': nova_api_puppet}
def setup_nova_conductor(node, nova_puppet, nova_api_puppet):
# NOVA CONDUCTOR
nova_conductor_puppet = vr.create('nova_conductor_puppet', 'resources/nova_conductor_puppet', {})[0]
node.connect(nova_conductor_puppet)
nova_puppet.connect(nova_conductor_puppet)
evapi.add_dep(nova_api_puppet.name, nova_conductor_puppet.name, actions=('run',))
evapi.add_react(nova_puppet.name, nova_conductor_puppet.name, actions=('update',))
return {'nova_conductor': nova_conductor_puppet}
def setup_nova_scheduler(node, nova_puppet, nova_api_puppet):
# NOVA SCHEDULER
# NOTE(bogdando) Generic service is used. Package and service names for Ubuntu case
# come from https://github.com/openstack/puppet-nova/blob/5.1.0/manifests/params.pp
nova_scheduler_puppet = vr.create('nova_scheduler_puppet', 'resources/nova_generic_service_puppet', {
'title' : 'scheduler', 'package_name': 'nova-scheduler', 'service_name': 'nova-scheduler',
})[0]
node.connect(nova_scheduler_puppet)
evapi.add_dep(nova_puppet.name, nova_scheduler_puppet.name, actions=('run',))
evapi.add_dep(nova_api_puppet.name, nova_scheduler_puppet.name, actions=('run',))
evapi.add_react(nova_puppet.name, nova_scheduler_puppet.name, actions=('update',))
return {'nova_scheduler_puppet': nova_scheduler_puppet}
def setup_nova_compute(node, librarian, nova_puppet, nova_api_puppet, neutron_server_puppet, neutron_keystone_service_endpoint, glance_api_puppet):
# NOVA COMPUTE
# Deploy chain (nova, node_networking(TODO)) -> (nova_compute_libvirt, nova_neutron) -> nova_compute
name = node.name
nova_compute_puppet = vr.create('nova_compute_puppet_{}'.format(name), 'resources/nova_compute_puppet', {})[0]
# TODO (bogdando) figure out how to use it for multiple glance api servers
nova_puppet2 = vr.create('nova_puppet_{}'.format(name), 'resources/nova_puppet', {
'glance_api_servers': '{{glance_api_servers_host}}:{{glance_api_servers_port}}'
})[0]
nova_puppet.connect(nova_puppet2, {
'ensure_package', 'rabbit_host',
'rabbit_password', 'rabbit_port', 'rabbit_userid',
'rabbit_virtual_host', 'db_user', 'db_password',
'db_name', 'db_host', 'keystone_password',
'keystone_port', 'keystone_host', 'keystone_tenant',
'keystone_user',
})
# TODO(bogdando): Make a connection for nova_puppet2.glance_api_servers = "glance_api_puppet.ip:glance_api_puppet.bind_port"
node.connect(nova_puppet2)
nova_puppet2.connect_with_events(librarian, {'module': 'modules'}, {})
evapi.add_dep(librarian.name, nova_puppet2.name, actions=('run', 'update'))
dep = evapi.Dep(librarian.name, 'update', state='SUCESS',
child=nova_puppet2.name, child_action='run')
evapi.add_event(dep)
node.connect(nova_compute_puppet)
evapi.add_dep(nova_puppet2.name, nova_compute_puppet.name, actions=('run',))
evapi.add_dep(nova_api_puppet.name, nova_compute_puppet.name, actions=('run',))
evapi.add_react(nova_puppet2.name, nova_compute_puppet.name, actions=('run', 'update'))
# NOVA COMPUTE LIBVIRT, NOVA_NEUTRON
# NOTE(bogdando): changes nova config, so should notify nova compute service
nova_compute_libvirt_puppet = vr.create('nova_compute_libvirt_puppet_{}'.format(name), 'resources/nova_compute_libvirt_puppet', {})[0]
node.connect(nova_compute_libvirt_puppet)
evapi.add_dep(nova_puppet2.name, nova_compute_libvirt_puppet.name, actions=('run',))
evapi.add_dep(nova_api_puppet.name, nova_compute_libvirt_puppet.name, actions=('run',))
# compute configuration for neutron, use http auth/endpoint protocols, keystone v2 auth hardcoded for the resource
nova_neutron_puppet = vr.create('nova_neutron_puppet_{}'.format(name), 'resources/nova_neutron_puppet', {})[0]
node.connect(nova_neutron_puppet)
evapi.add_dep(nova_puppet2.name, nova_neutron_puppet.name, actions=('run',))
evapi.add_dep(nova_api_puppet.name, nova_neutron_puppet.name, actions=('run',))
neutron_server_puppet.connect(nova_neutron_puppet, {
'auth_password': 'neutron_admin_password',
'auth_user': 'neutron_admin_username',
'auth_type': 'neutron_auth_strategy',
'auth_host': 'auth_host', 'auth_port': 'auth_port',
'auth_protocol': 'auth_protocol',
})
neutron_keystone_service_endpoint.connect(nova_neutron_puppet, {
'internal_ip':'neutron_endpoint_host',
'internal_port':'neutron_endpoint_port',
})
# Update glance_api_service for nova compute
glance_api_puppet.connect(nova_puppet2, {
'ip': 'glance_api_servers_host',
'bind_port': 'glance_api_servers_port'
})
# signals.connect(keystone_puppet, nova_network_puppet, {'ip': 'keystone_host', 'port': 'keystone_port'})
# signals.connect(keystone_puppet, nova_keystone_service_endpoint, {'ip': 'keystone_host', 'admin_port': 'keystone_port', 'admin_token': 'admin_token'})
# signals.connect(rabbitmq_service1, nova_network_puppet, {'ip': 'rabbitmq_host', 'port': 'rabbitmq_port'})
return {'nova_compute_puppet': nova_compute_puppet,
'nova_puppet2': nova_puppet2,
'nova_compute_libvirt_puppet': nova_compute_libvirt_puppet,
'nova_neutron_puppet': nova_neutron_puppet,
'neutron_server_puppet': neutron_server_puppet}
def setup_glance_api(node, librarian, mariadb_service, admin_user, keystone_puppet, services_tenant, cinder_glance_puppet):
# GLANCE (base and API)
glance_api_puppet = vr.create('glance_api_puppet', 'resources/glance_puppet', {})[0]
glance_db_user = vr.create('glance_db_user', 'resources/mariadb_user/', {
'user_name': 'glance', 'user_password': 'glance', 'login_user': 'root'})[0]
glance_db = vr.create('glance_db', 'resources/mariadb_db/', {
'db_name': 'glance', 'login_user': 'root'})[0]
glance_keystone_user = vr.create('glance_keystone_user', 'resources/keystone_user', {
'user_name': 'glance', 'user_password': 'glance123'})[0]
glance_keystone_role = vr.create('glance_keystone_role', 'resources/keystone_role', {
'role_name': 'admin'})[0]
glance_keystone_service_endpoint = vr.create(
'glance_keystone_service_endpoint',
'resources/keystone_service_endpoint', {
'endpoint_name': 'glance',
'adminurl': 'http://{{admin_ip}}:{{admin_port}}',
'internalurl': 'http://{{internal_ip}}:{{internal_port}}',
'publicurl': 'http://{{public_ip}}:{{public_port}}',
'description': 'OpenStack Image Service', 'type': 'image'})[0]
node.connect(glance_api_puppet)
glance_api_puppet.connect_with_events(librarian, {'module': 'modules'}, {})
evapi.add_dep(librarian.name, glance_api_puppet.name, actions=('run', 'update'))
node.connect(glance_db)
node.connect(glance_db_user)
admin_user.connect(glance_api_puppet, {
'user_name': 'keystone_user', 'user_password': 'keystone_password',
'tenant_name': 'keystone_tenant'}) #?
mariadb_service.connect(glance_db, {
'port': 'login_port',
'root_password': 'login_password',
'root_user': 'login_user',
'ip' : 'db_host'})
mariadb_service.connect(glance_db_user, {'port': 'login_port', 'root_password': 'login_password'})
glance_db.connect(glance_db_user, {'db_name', 'db_host'})
glance_db_user.connect(glance_api_puppet, {
'user_name':'db_user',
'db_name':'db_name',
'user_password':'db_password',
'db_host' : 'db_host'})
mariadb_service.connect(glance_api_puppet,{
'port': 'db_port',
'ip': 'db_host'})
keystone_puppet.connect(glance_api_puppet, {'ip': 'keystone_host', 'admin_port': 'keystone_port'}) #or non admin port?
services_tenant.connect(glance_keystone_user)
glance_keystone_user.connect(glance_keystone_role)
glance_keystone_user.connect(glance_api_puppet, {
'user_name': 'keystone_user', 'tenant_name': 'keystone_tenant',
'user_password': 'keystone_password'})
mariadb_service.connect(glance_api_puppet, {'ip':'ip'})
glance_api_puppet.connect(glance_keystone_service_endpoint, {
'ip': ['ip', 'keystone_host', 'admin_ip', 'internal_ip', 'public_ip'],
'bind_port': ['admin_port', 'internal_port', 'public_port'],})
keystone_puppet.connect(glance_keystone_service_endpoint, {
'admin_port': 'keystone_admin_port', 'admin_token': 'admin_token'})
# Update glance_api_service for cinder
glance_api_puppet.connect(cinder_glance_puppet, {
'ip': 'glance_api_servers_host',
'bind_port': 'glance_api_servers_port'
})
return {'glance_api_puppet': glance_api_puppet,
'glance_db_user': glance_db_user,
'glance_db': glance_db,
'glance_keystone_user': glance_keystone_user,
'glance_keystone_role': glance_keystone_role,
'glance_keystone_service_endpoint': glance_keystone_service_endpoint}
def setup_glance_registry(node, glance_api_puppet):
# GLANCE REGISTRY
glance_registry_puppet = vr.create('glance_registry_puppet', 'resources/glance_registry_puppet', {})[0]
node.connect(glance_registry_puppet)
glance_api_puppet.connect(glance_registry_puppet)
evapi.add_react(glance_api_puppet.name, glance_registry_puppet.name, actions=('update',))
# API and registry should not listen same ports
# should not use the same log destination and a pipeline,
# so disconnect them and restore the defaults
signals.disconnect_receiver_by_input(glance_registry_puppet, 'bind_port')
signals.disconnect_receiver_by_input(glance_registry_puppet, 'log_file')
signals.disconnect_receiver_by_input(glance_registry_puppet, 'pipeline')
glance_registry_puppet.update({
'bind_port': 9191,
'log_file': '/var/log/glance/registry.log',
'pipeline': 'keystone',
})
return {'glance_registry_puppet': glance_registry_puppet}
def validate():
has_errors = False
for r in locals().values():
if not isinstance(r, resource.Resource):
continue
print 'Validating {}'.format(r.name)
errors = validation.validate_resource(r)
if errors:
has_errors = True
print 'ERROR: %s: %s' % (r.name, errors)
if has_errors:
sys.exit(1)
def create_controller(node):
r = {r.name: r for r in resource.load_all()}
librarian_node = 'librarian_{}'.format(node)
r.update(setup_base(r[node], r[librarian_node]))
r.update(setup_keystone(r[node], r[librarian_node],
r['mariadb_service'], r['openstack_rabbitmq_user']))
r.update(setup_openrc(r['node0'], r['keystone_puppet'], r['admin_user']))
r.update(setup_neutron(r['node0'], r['librarian_node0'], r['rabbitmq_service1'],
r['openstack_rabbitmq_user'], r['openstack_vhost']))
r.update(setup_neutron_api(r['node0'], r['mariadb_service'], r['admin_user'],
r['keystone_puppet'], r['services_tenant'], r['neutron_puppet']))
r.update(setup_neutron_agent(r['node0'], r['neutron_server_puppet']))
r.update(setup_cinder(r['node0'], r['librarian_node0'], r['rabbitmq_service1'],
r['mariadb_service'], r['keystone_puppet'], r['admin_user'],
r['openstack_vhost'], r['openstack_rabbitmq_user'], r['services_tenant']))
r.update(setup_cinder_api(r['node0'], r['cinder_puppet']))
r.update(setup_cinder_scheduler(r['node0'], r['cinder_puppet']))
r.update(setup_cinder_volume(r['node0'], r['cinder_puppet']))
r.update(setup_nova(r['node0'], r['librarian_node0'], r['mariadb_service'], r['rabbitmq_service1'],
r['admin_user'], r['openstack_vhost'], r['services_tenant'],
r['keystone_puppet'], r['openstack_rabbitmq_user']))
r.update(setup_nova_api(r['node0'], r['nova_puppet'], r['neutron_agents_metadata']))
r.update(setup_nova_conductor(r['node0'], r['nova_puppet'], r['nova_api_puppet']))
r.update(setup_nova_scheduler(r['node0'], r['nova_puppet'], r['nova_api_puppet']))
r.update(setup_glance_api(r['node0'], r['librarian_node0'], r['mariadb_service'], r['admin_user'],
r['keystone_puppet'], r['services_tenant'],
r['cinder_glance_puppet']))
r.update(setup_glance_registry(r['node0'], r['glance_api_puppet']))
return r
def create_compute(node):
r = {r.name: r for r in resource.load_all()}
librarian_node = 'librarian_{}'.format(node)
res = {}
res.update(setup_neutron_compute(r[node], r[librarian_node], r['neutron_puppet'], r['neutron_server_puppet']))
res.update(setup_nova_compute(r[node], r[librarian_node], r['nova_puppet'], r['nova_api_puppet'],
r['neutron_server_puppet'], r['neutron_keystone_service_endpoint'], r['glance_api_puppet']))
return r
@click.command()
def create_all():
ModelMeta.remove_all()
r = prepare_nodes(2)
r.update(create_controller('node0'))
r.update(create_compute('node1'))
print '\n'.join(r.keys())
@click.command()
@click.argument('nodes_count')
def prepare(nodes_count):
r = prepare_nodes(nodes_count)
print '\n'.join(r.keys())
@click.command()
@click.argument('node')
def add_compute(node):
r = create_compute(node)
print '\n'.join(r.keys())
@click.command()
@click.argument('node')
def add_controller(node):
r = create_controller(node)
print '\n'.join(r.keys())
@click.command()
def clear():
ModelMeta.remove_all()
if __name__ == '__main__':
main.add_command(create_all)
main.add_command(prepare)
main.add_command(add_controller)
main.add_command(add_compute)
main.add_command(clear)
main()
if PROFILE:
pr.disable()
s = StringIO.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print s.getvalue()
sys.exit(0)
|
|
"""
contains all available validation functions
"""
import common_utils as utils
import re
import logging
import output_messages
import basedefs
import types
import traceback
import os
import os.path
import tempfile
import cracklib
from setup_controller import Controller
def validateNFSMountPoint(param, options=[]):
""" Validates the correct mount point for NFS local storage """
if validateMountPoint(param) and validateDirSize(param, basedefs.CONST_MINIMUM_SPACE_ISODOMAIN):
return True
return False
def validateMountPoint(path):
logging.info("validating %s as a valid mount point" % (path))
if not utils.verifyStringFormat(path, "^\/[\w\_\-\s]+(\/[\w\_\-\s]+)*\/?$"):
print output_messages.INFO_VAL_PATH_NAME_INVALID
return False
if _isPathInExportFs(path):
print output_messages.INFO_VAL_PATH_NAME_IN_EXPORTS
return False
if os.path.exists(path) and len(os.listdir(path)):
print output_messages.INFO_VAR_PATH_NOT_EMPTY % path
return False
if not _isPathWriteable(_getBasePath(path)):
print output_messages.INFO_VAL_PATH_NOT_WRITEABLE
return False
return True
def validateDirSize(path, size):
availableSpace = utils.getAvailableSpace(_getBasePath(path))
if availableSpace < size:
print output_messages.INFO_VAL_PATH_SPACE % (path,
utils.transformUnits(availableSpace),
utils.transformUnits(size))
return False
return True
def validateInteger(param, options=[]):
try:
int(param)
return True
except:
logging.warn("validateInteger('%s') - failed" %(param))
print output_messages.INFO_VAL_NOT_INTEGER
return False
def validatePort(param, options = []):
#TODO: add actual port check with socket open
logging.debug("Validating %s as a valid TCP Port" % (param))
minVal = 0
controller = Controller()
isProxyEnabled = utils.compareStrIgnoreCase(controller.CONF["OVERRIDE_HTTPD_CONFIG"], "yes")
if not isProxyEnabled:
minVal = 1024
if not validateInteger(param, options):
return False
port = int(param)
if not (port > minVal and port < 65535) :
logging.warn(output_messages.INFO_VAL_PORT_NOT_RANGE %(minVal))
print output_messages.INFO_VAL_PORT_NOT_RANGE %(minVal)
return False
if isProxyEnabled and param in[basedefs.JBOSS_HTTP_PORT, basedefs.JBOSS_HTTPS_PORT, basedefs.JBOSS_AJP_PORT]:
logging.warn(output_messages.INFO_VAL_PORT_OCCUPIED_BY_JBOSS %(param))
print output_messages.INFO_VAL_PORT_OCCUPIED_BY_JBOSS %(param)
return False
(portOpen, process, pid) = utils.isTcpPortOpen(param)
if portOpen:
logging.warn(output_messages.INFO_VAL_PORT_OCCUPIED % (param, process, pid))
print output_messages.INFO_VAL_PORT_OCCUPIED % (param, process, pid)
return False
if isProxyEnabled and not checkAndSetHttpdPortPolicy(param):
logging.warn(output_messages.INFO_VAL_FAILED_ADD_PORT_TO_HTTP_POLICY, port)
print output_messages.INFO_VAL_FAILED_ADD_PORT_TO_HTTP_POLICY % port
return False
return True
def checkAndSetHttpdPortPolicy(port):
def parsePorts(portsStr):
ports = []
for part in portsStr.split(","):
part = part.strip().split("-")
if len(part) > 1:
for port in range(int(part[0]),int(part[1])):
ports.append(port)
else:
ports.append(int(part[0]))
return ports
newPort = int(port)
cmd = [
basedefs.EXEC_SEMANAGE, "port", "-l",
]
out, rc = utils.execCmd(cmdList=cmd) #, "-t", "http_port_t"])
if rc:
return False
httpPortsList = []
pattern = re.compile("^http_port_t\s*tcp\s*([0-9, \-]*)$")
for line in out.splitlines():
httpPortPolicy = re.match(pattern, line)
if httpPortPolicy:
httpPortsList = parsePorts(httpPortPolicy.groups()[0])
logging.debug("http_port_t = %s"%(httpPortsList))
if newPort in httpPortsList:
return True
else:
cmd = [
basedefs.EXEC_SEMANAGE,
"port",
"-a",
"-t", "http_port_t",
"-p", "tcp",
"%d"%(newPort),
]
out, rc = utils.execCmd(cmdList=cmd, failOnError=False, usePipeFiles=True)
if rc:
logging.error(out)
return False
return True
def validateRemotePort(param, options = []):
#Validate that the port is an integer betweeen 1024 and 65535
logging.debug("Validating %s as a valid TCP Port" % (param))
if validateInteger(param, options):
port = int(param)
if (port > 0 and port < 65535):
return True
else:
logging.warn("validatePort('%s') - failed" %(param))
print output_messages.INFO_VAL_PORT_NOT_RANGE
return False
def validateStringNotEmpty(param, options=[]):
if type(param) != types.StringType or len(param) == 0:
logging.warn("validateStringNotEmpty('%s') - failed" %(param))
print output_messages.INFO_VAL_STRING_EMPTY
return False
else:
return True
def validatePassword(param, options=[]):
logging.debug("Validating password")
if not validateStringNotEmpty(param, options):
return False
try:
cracklib.FascistCheck(param)
except:
logging.warn("Password failed check")
logging.warn(traceback.format_exc())
print output_messages.WARN_WEAK_PASS
return True
def validateOptions(param, options=[]):
logging.info("Validating %s as part of %s"%(param, options))
if not validateStringNotEmpty(param, options):
return False
if "yes" in options and param.lower() == "y":
return True
if "no" in options and param.lower() == "n":
return True
if param.lower() in [option.lower() for option in options]:
return True
print output_messages.INFO_VAL_NOT_IN_OPTIONS % (", ".join(options))
return False
def validateOverrideHttpdConfAndChangePortsAccordingly(param, options=[]):
"""
This validation function is specific for the OVERRIDE_HTTPD_CONF param and it does more than validating the answer.
It actually changes the default HTTP/S ports in case the user choose not to override the httpd configuration.
"""
logging.info("validateOverrideHttpdConfAndChangePortsAccordingly %s as part of %s"%(param, options))
retval = validateOptions(param, options)
if retval and param.lower() == "no":
logging.debug("Changing HTTP_PORT & HTTPS_PORT to the default jboss values (8080 & 8443)")
controller = Controller()
utils.setHttpPortsToNonProxyDefault(controller)
elif retval:
#stopping httpd service (in case it's up) when the configuration can be overridden
logging.debug("stopping httpd service")
utils.Service(basedefs.HTTPD_SERVICE_NAME).stop()
return retval
def validateDomain(param, options=[]):
"""
Validate domain name
"""
logging.info("validating %s as a valid domain string" % (param))
(errMsg, rc) = _validateString(param, 1, 1024, "^[\w\-\_]+\.[\w\.\-\_]+\w+$")
# Right now we print a generic error, might want to change it in the future
if rc != 0:
print output_messages.INFO_VAL_NOT_DOMAIN
return False
else:
return True
def validateUser(param, options=[]):
"""
Validate Auth Username
Setting a logical max value of 256
"""
logging.info("validating %s as a valid user name" % (param))
(errMsg, rc) = _validateString(param, 1, 256, "^\w[\w\.\-\_\%\@]{2,}$")
# Right now we print a generic error, might want to change it in the future
if rc != 0:
print output_messages.INFO_VAL_NOT_USER
return False
else:
return True
def validateRemoteHost(param, options=[]):
""" Validate that the we are working with remote DB host
"""
# If we received localhost, use default flow.
# If not local, REMOTE_DB group is run.
# It means returning True if remote, and False if local
if "DB_REMOTE_INSTALL" in param.keys() and param["DB_REMOTE_INSTALL"] == "remote":
return True
else:
return False
def validateRemoteDB(param={}, options=[]):
""" Ensure, that params provided for the remote DB are
working, and if not, issue the correct error.
"""
logging.info("Validating %s as a RemoteDb" % param["DB_HOST"])
if utils.localHost(param["DB_HOST"]):
logging.info("%s is a local host, no connection checks needed" % param["DB_HOST"])
return True
if "DB_ADMIN" not in param.keys():
param["DB_ADMIN"] = basedefs.DB_ADMIN
param["DB_PORT"] = basedefs.DB_PORT
param["DB_PASS"] = param["DB_LOCAL_PASS"]
else:
param["DB_PASS"] = param["DB_REMOTE_PASS"]
# Create a new pgpass, store previous in backupFile
backupFile = _createTempPgPass(param["DB_ADMIN"], param["DB_HOST"],
param["DB_PORT"], param["DB_PASS"])
# Now, let's check credentials:
try:
# Connection check
_checkDbConnection(param["DB_ADMIN"], param["DB_HOST"], param["DB_PORT"])
# DB Create check
_checkCreateDbPrivilege(param["DB_ADMIN"], param["DB_HOST"], param["DB_PORT"])
# UUID extention check.
_checkUUIDExtension(param["DB_ADMIN"], param["DB_HOST"], param["DB_PORT"])
# Delete DB check
_checkDropDbPrivilege(param["DB_ADMIN"], param["DB_HOST"], param["DB_PORT"])
# Everything is fine, return True
return True
except Exception,e:
# Something failed, print the error on screen and return False
print e
return False
finally:
# if the test DB was created, drop it
sqlQuery = "DROP DATABASE IF EXISTS ovirt_engine_test;"
utils.execRemoteSqlCommand(param["DB_ADMIN"],
param["DB_HOST"],
param["DB_PORT"],
basedefs.DB_POSTGRES, sqlQuery, False)
# restore the original pgpass file in all cases
if os.path.exists(backupFile):
os.rename(backupFile, basedefs.DB_PASS_FILE)
def validateFQDN(param, options=[]):
logging.info("Validating %s as a FQDN"%(param))
if not validateDomain(param,options):
return False
try:
#get set of IPs
ipAddresses = utils.getConfiguredIps()
if len(ipAddresses) < 1:
logging.error("Could not find any configured IP address on the host")
raise Exception(output_messages.ERR_EXP_CANT_FIND_IP)
#resolve fqdn
pattern = 'Address: (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
resolvedAddresses = _getPatternFromNslookup(param, pattern)
if len(resolvedAddresses) < 1:
logging.error("Failed to resolve %s"%(param))
print output_messages.ERR_DIDNT_RESOLVED_IP%(param)
return False
#string is generated here since we use it in all latter error messages
prettyString = " ".join(["%s"%string for string in resolvedAddresses])
#compare found IP with list of local IPs and match.
if not resolvedAddresses.issubset(ipAddresses):
logging.error("the following address(es): %s are not configured on this host"%(prettyString))
#different grammar for plural and single
if len(resolvedAddresses) > 1:
print output_messages.ERR_IPS_NOT_CONFIGED%(prettyString, param)
else:
print output_messages.ERR_IPS_NOT_CONFIGED_ON_INT%(prettyString, param)
return False
#reverse resolved IP and compare with given fqdn
counter = 0
pattern = '[\w\.-]+\s+name\s\=\s([\w\.\-]+)\.'
for address in resolvedAddresses:
addressSet = _getPatternFromNslookup(address, pattern)
reResolvedAddress = None
if len(addressSet) > 0:
reResolvedAddress = addressSet.pop()
if reResolvedAddress == param:
counter += 1
else:
logging.warn("%s did not reverse-resolve into %s"%(address,param))
if counter < 1:
logging.error("The following addresses: %s did not reverse resolve into %s"%(prettyString, param))
#different grammar for plural and single
if len(resolvedAddresses) > 1:
print output_messages.ERR_IPS_HAS_NO_PTR%(prettyString, param)
else:
print output_messages.ERR_IP_HAS_NO_PTR%(prettyString, param)
return False
#conditions passed
return True
except:
logging.error(traceback.format_exc())
raise
def validateIsoDomainName(param, options=[]):
"""
Validate ISO domain name against
the required schema (allowed chars)
and max allowed length
"""
logging.info("validating iso domain name")
(errMsg, rc) = _validateString(param, 1, basedefs.CONST_STORAGE_DOMAIN_NAME_SIZE_LIMIT, "^[a-zA-Z0-9_-]+$")
if rc == 3:
# We want to print a specific error
print output_messages.INFO_VAL_ISO_DOMAIN_ILLEGAL_CHARS
return False
elif rc != 0:
print errMsg
return False
else:
return True
def validateOrgName(param, options=[]):
"""
Organization name length must be limited
otherwise CA creation fails
"""
logging.info("validating organization name")
# Filter out special chars: "," "%" "$" "@" "#", "&" "*" "!"
(errMsg, rc) = _validateString(param, 1, basedefs.CONST_ORG_NAME_SIZE_LIMIT, "^[^,\+\%\$\@\#&\*\!]+$")
if rc == 3:
# We want to print a specific error
print output_messages.INFO_VAL_ORG_NAME_ILLEGAL_CHARS
return False
elif rc != 0:
print errMsg
return False
else:
return True
def validatePing(param, options=[]):
"""
Check that provided host answers to ping
"""
if validateStringNotEmpty(param):
cmd = [
"/bin/ping",
"-c", "1",
"%s" % param,
]
out, rc = utils.execCmd(cmdList=cmd)
if rc == 0:
return True
print "\n" + output_messages.ERR_PING + ".\n"
return False
def _checkDbConnection(dbAdminUser, dbHost, dbPort):
""" _checkDbConnection checks connection to the DB"""
# Connection check
logging.info("Trying to connect to the remote database with provided credentials.")
out, rc = utils.execRemoteSqlCommand(dbAdminUser, dbHost, dbPort,
basedefs.DB_POSTGRES, "select 1")
# It error is in "SELECT 1" it means that we have a problem with simple DB connection.
if rc:
logging.error(output_messages.ERR_DB_CONNECTION % dbHost)
raise Exception("\n" + output_messages.ERR_DB_CONNECTION % dbHost + "\n")
else:
logging.info("Successfully connected to the DB host %s." % dbHost)
def _checkCreateDbPrivilege(dbAdminUser, dbHost, dbPort):
""" _checkCreateDbPrivilege checks CREATE DB privilege on DB server"""
logging.info("Creating database 'ovirt_engine_test' on remote server.")
out, rc = utils.execRemoteSqlCommand(dbAdminUser, dbHost, dbPort,
basedefs.DB_POSTGRES, "CREATE DATABASE ovirt_engine_test")
# Error in "CREATE DATABASE", meaning we don't have enough privileges to create database.
if rc:
logging.error(output_messages.ERR_DB_CREATE_PRIV, dbHost)
raise Exception("\n" + output_messages.ERR_DB_CREATE_PRIV % dbHost + "\n")
else:
logging.info("Successfully created temp database on server %s." % dbHost)
def _checkDropDbPrivilege(dbAdminUser, dbHost, dbPort):
""" _checkCreateDbPrivilege checks CREATE DB privilege on DB server"""
logging.info("Deleting the test database from the remote server")
out, rc = utils.execRemoteSqlCommand(dbAdminUser, dbHost, dbPort,
basedefs.DB_POSTGRES, "DROP DATABASE ovirt_engine_test")
# Error in "DROP DATABASE", meaning we don't have enough privileges to drop database.
if rc:
logging.error(output_messages.ERR_DB_DROP_PRIV % dbHost)
raise Exception("\n" + output_messages.ERR_DB_DROP_PRIV % dbHost + ".\n")
else:
logging.info("Successfully deleted database on server %s." % dbHost)
def _checkUUIDExtension(dbAdminUser, dbHost, dbPort):
""" Check that UUID extension is already loaded and raise Exception if not"""
logging.info("Checking that uuid extension is loaded by default on the remote server")
out, rc = utils.execRemoteSqlCommand(dbAdminUser, dbHost, dbPort,
"ovirt_engine_test",
"SELECT uuid_generate_v1();")
# Extension was found
if not rc and out and "1 row" in out:
logging.info("Successfully passed UUID check")
else:
logging.error(output_messages.ERR_DB_UUID)
raise Exception(output_messages.ERR_DB_UUID)
def _createTempPgPass(dbAdminUser, dbHost, dbPort, dbPass):
"""docstring for _createTempPgPass"""
#backup existing .pgpass
backupFile = "%s.%s" % (basedefs.DB_PASS_FILE, utils.getCurrentDateTime())
try:
if (os.path.exists(basedefs.DB_PASS_FILE)):
logging.debug("found existing pgpass file, backing current to %s for validation" % (backupFile))
os.rename(basedefs.DB_PASS_FILE, backupFile)
with open(basedefs.DB_PASS_FILE, "w") as pgPassFile:
pgPassFile.write("%s:%s:*:%s:%s" %
(dbHost, dbPort, dbAdminUser, dbPass))
#make sure the file has still 0600 mod
os.chmod(basedefs.DB_PASS_FILE, 0600)
return backupFile
except:
# Restore original file
if os.path.exists(backupFile):
os.rename(backupFile, basedefs.DB_PASS_FILE)
raise Exception(output_messages.ERR_BACKUP_PGPASS % backupFile)
def _validateString(string, minLen, maxLen, regex=".*"):
"""
Generic func to verify a string
match its min/max length
and doesn't contain illegal chars
The func returns various return codes according to the error
plus a default error message
the calling func can decide if to use to default error msg
or to use a more specific one according the RC.
Return codes:
1 - string length is less than min
2 - string length is more tham max
3 - string contain illegal chars
0 - success
"""
# String length is less than minimum allowed
if len(string) < minLen:
msg = output_messages.INFO_STRING_LEN_LESS_THAN_MIN % (minLen)
return(msg, 1)
# String length is more than max allowed
elif len(string) > maxLen:
msg = output_messages.INFO_STRING_EXCEEDS_MAX_LENGTH % (maxLen)
return(msg, 2)
# String contains illegal chars
elif not utils.verifyStringFormat(string, regex):
return(output_messages.INFO_STRING_CONTAINS_ILLEGAL_CHARS, 3)
else:
# Success
return (None, 0)
def _getPatternFromNslookup(address, pattern):
rePattern = re.compile(pattern)
addresses = set()
output = utils.nslookup(address)
list = output.splitlines()
#do not go over the first 2 lines in nslookup output
for line in list[2:]:
found = rePattern.search(line)
if found:
foundAddress = found.group(1)
logging.debug("%s resolved into %s"%(address, foundAddress))
addresses.add(foundAddress)
return addresses
def _isPathInExportFs(path):
if not os.path.exists(basedefs.FILE_ETC_EXPORTS):
return False
file = open(basedefs.FILE_ETC_EXPORTS)
fileContent = file.readlines()
file.close()
for line in fileContent:
if utils.verifyStringFormat(line, "^%s\s+.+" % (path)):
return True
return False
def _getBasePath(path):
if os.path.exists(path):
return path
# Iterate up in the tree structure until we get an
# existing path
return _getBasePath(os.path.dirname(path.rstrip("/")))
def _isPathWriteable(path):
try:
logging.debug("attempting to write temp file to %s" % (path))
tempfile.TemporaryFile(dir=path)
return True
except:
logging.warning(traceback.format_exc())
logging.warning("%s is not writeable" % path)
return False
def validateIpaAndHttpdStatus(conf):
""""
This function serve as a pre-condition to the ports group. This function will always return True,
Therefore the ports group will always be handled, but this function may changes the flow dynamically
according to http & ipa rpm status.
So, there are two purposes for this function:
1. check whether the relevant httpd configuration files were changed,
As it's an indication for the setup that the httpd application is being actively used,
Therefore we may need to ask (dynamic change) the user whether to override this configuration.
2. Check if IPA is installed and drop port 80/443 support.
"""
controller = Controller()
# Check if IPA installed
if utils.installed(basedefs.IPA_RPM) or utils.installed(basedefs.FREEIPA_RPM):
# Change default ports
logging.debug("IPA rpms detected, disabling http proxy")
print output_messages.WARN_IPA_INSTALLED
utils.setHttpPortsToNonProxyDefault(controller)
# Don't use http proxy
paramToChange = controller.getParamByName("OVERRIDE_HTTPD_CONFIG")
paramToChange.setKey("DEFAULT_VALUE", "no")
else:
if wereHttpdConfFilesChanged:
# If conf files were changed, the user should be asked if he really wants to use ports 80/443
paramToChange = controller.getParamByName("OVERRIDE_HTTPD_CONFIG")
paramToChange.setKey("USE_DEFAULT", False)
# This validator must return true, so ports will always be handled
return True
def wereHttpdConfFilesChanged():
logging.debug("checking whether HTTPD config files were changed")
conf_files = [basedefs.FILE_HTTPD_SSL_CONFIG, basedefs.FILE_HTTPD_CONF]
cmd = [
basedefs.EXEC_RPM,
"-V",
"--nomtime",
"httpd",
"mod_ssl",
]
(output, rc) = utils.execCmd(cmdList=cmd)
for line in output.split(os.linesep):
if len(line) > 0:
changed_file = line.split()[-1]
if changed_file in conf_files:
logging.debug("HTTPD config file %s was changed" %(changed_file))
return True
return False
|
|
"""
Multiclass and multilabel classification strategies
===================================================
This module implements multiclass learning algorithms:
- one-vs-the-rest / one-vs-all
- one-vs-one
- error correcting output codes
The estimators provided in this module are meta-estimators: they require a base
estimator to be provided in their constructor. For example, it is possible to
use these estimators to turn a binary classifier or a regressor into a
multiclass classifier. It is also possible to use these estimators with
multiclass estimators in the hope that their accuracy or runtime performance
improves.
All classifiers in scikit-learn implement multiclass classification; you
only need to use this module if you want to experiment with custom multiclass
strategies.
The one-vs-the-rest meta-classifier also implements a `predict_proba` method,
so long as such a method is implemented by the base classifier. This method
returns probabilities of class membership in both the single label and
multilabel case. Note that in the multilabel case, probabilities are the
marginal probability that a given sample falls in the given class. As such, in
the multilabel case the sum of these probabilities over all possible labels
for a given sample *will not* sum to unity, as they do in the single label
case.
"""
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Author: Hamzeh Alsalhi <93hamsal@gmail.com>
#
# License: BSD 3 clause
import array
import numpy as np
import warnings
import scipy.sparse as sp
import itertools
from .base import BaseEstimator, ClassifierMixin, clone, is_classifier
from .base import MetaEstimatorMixin, is_regressor
from .preprocessing import LabelBinarizer
from .metrics.pairwise import euclidean_distances
from .utils import check_random_state
from .utils.validation import _num_samples
from .utils.validation import check_is_fitted
from .utils.validation import check_X_y, check_array
from .utils.multiclass import (_check_partial_fit_first_call,
check_classification_targets,
_ovr_decision_function)
from .utils.metaestimators import _safe_split, if_delegate_has_method
from .externals.joblib import Parallel
from .externals.joblib import delayed
from .externals.six.moves import zip as izip
__all__ = [
"OneVsRestClassifier",
"OneVsOneClassifier",
"OutputCodeClassifier",
]
def _fit_binary(estimator, X, y, classes=None):
"""Fit a single binary estimator."""
unique_y = np.unique(y)
if len(unique_y) == 1:
if classes is not None:
if y[0] == -1:
c = 0
else:
c = y[0]
warnings.warn("Label %s is present in all training examples." %
str(classes[c]))
estimator = _ConstantPredictor().fit(X, unique_y)
else:
estimator = clone(estimator)
estimator.fit(X, y)
return estimator
def _partial_fit_binary(estimator, X, y):
"""Partially fit a single binary estimator."""
estimator.partial_fit(X, y, np.array((0, 1)))
return estimator
def _predict_binary(estimator, X):
"""Make predictions using a single binary estimator."""
if is_regressor(estimator):
return estimator.predict(X)
try:
score = np.ravel(estimator.decision_function(X))
except (AttributeError, NotImplementedError):
# probabilities of the positive class
score = estimator.predict_proba(X)[:, 1]
return score
def _check_estimator(estimator):
"""Make sure that an estimator implements the necessary methods."""
if (not hasattr(estimator, "decision_function") and
not hasattr(estimator, "predict_proba")):
raise ValueError("The base estimator should implement "
"decision_function or predict_proba!")
class _ConstantPredictor(BaseEstimator):
def fit(self, X, y):
self.y_ = y
return self
def predict(self, X):
check_is_fitted(self, 'y_')
return np.repeat(self.y_, X.shape[0])
def decision_function(self, X):
check_is_fitted(self, 'y_')
return np.repeat(self.y_, X.shape[0])
def predict_proba(self, X):
check_is_fitted(self, 'y_')
return np.repeat([np.hstack([1 - self.y_, self.y_])],
X.shape[0], axis=0)
class OneVsRestClassifier(BaseEstimator, ClassifierMixin, MetaEstimatorMixin):
"""One-vs-the-rest (OvR) multiclass/multilabel strategy
Also known as one-vs-all, this strategy consists in fitting one classifier
per class. For each classifier, the class is fitted against all the other
classes. In addition to its computational efficiency (only `n_classes`
classifiers are needed), one advantage of this approach is its
interpretability. Since each class is represented by one and one classifier
only, it is possible to gain knowledge about the class by inspecting its
corresponding classifier. This is the most commonly used strategy for
multiclass classification and is a fair default choice.
This strategy can also be used for multilabel learning, where a classifier
is used to predict multiple labels for instance, by fitting on a 2-d matrix
in which cell [i, j] is 1 if sample i has label j and 0 otherwise.
In the multilabel learning literature, OvR is also known as the binary
relevance method.
Read more in the :ref:`User Guide <ovr_classification>`.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and one of `decision_function`
or `predict_proba`.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
estimators_ : list of `n_classes` estimators
Estimators used for predictions.
classes_ : array, shape = [`n_classes`]
Class labels.
label_binarizer_ : LabelBinarizer object
Object used to transform multiclass labels to binary labels and
vice-versa.
multilabel_ : boolean
Whether a OneVsRestClassifier is a multilabel classifier.
"""
def __init__(self, estimator, n_jobs=1):
self.estimator = estimator
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : (sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]
Multi-class targets. An indicator matrix turns on multilabel
classification.
Returns
-------
self
"""
# A sparse LabelBinarizer, with sparse_output=True, has been shown to
# outpreform or match a dense label binarizer in all cases and has also
# resulted in less or equal memory consumption in the fit_ovr function
# overall.
self.label_binarizer_ = LabelBinarizer(sparse_output=True)
Y = self.label_binarizer_.fit_transform(y)
Y = Y.tocsc()
self.classes_ = self.label_binarizer_.classes_
columns = (col.toarray().ravel() for col in Y.T)
# In cases where individual estimators are very fast to train setting
# n_jobs > 1 in can results in slower performance due to the overhead
# of spawning threads. See joblib issue #112.
self.estimators_ = Parallel(n_jobs=self.n_jobs)(delayed(_fit_binary)(
self.estimator, X, column, classes=[
"not %s" % self.label_binarizer_.classes_[i],
self.label_binarizer_.classes_[i]])
for i, column in enumerate(columns))
return self
@if_delegate_has_method('estimator')
def partial_fit(self, X, y, classes=None):
"""Partially fit underlying estimators
Should be used when memory is inefficient to train all data.
Chunks of data can be passed in several iteration.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : (sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]
Multi-class targets. An indicator matrix turns on multilabel
classification.
classes : array, shape (n_classes, )
Classes across all calls to partial_fit.
Can be obtained via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is only required in the first call of partial_fit
and can be omitted in the subsequent calls.
Returns
-------
self
"""
if _check_partial_fit_first_call(self, classes):
if not hasattr(self.estimator, "partial_fit"):
raise ValueError(("Base estimator {0}, doesn't have "
"partial_fit method").format(self.estimator))
self.estimators_ = [clone(self.estimator) for _ in range
(self.n_classes_)]
# A sparse LabelBinarizer, with sparse_output=True, has been
# shown to outperform or match a dense label binarizer in all
# cases and has also resulted in less or equal memory consumption
# in the fit_ovr function overall.
self.label_binarizer_ = LabelBinarizer(sparse_output=True)
self.label_binarizer_.fit(self.classes_)
if np.setdiff1d(y, self.classes_):
raise ValueError(("Mini-batch contains {0} while classes " +
"must be subset of {1}").format(np.unique(y),
self.classes_))
Y = self.label_binarizer_.transform(y)
Y = Y.tocsc()
columns = (col.toarray().ravel() for col in Y.T)
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_partial_fit_binary)(self.estimators_[i], X,
next(columns))
for i in range(self.n_classes_))
return self
def predict(self, X):
"""Predict multi-class targets using underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
Returns
-------
y : (sparse) array-like, shape = [n_samples, ], [n_samples, n_classes].
Predicted multi-class targets.
"""
check_is_fitted(self, 'estimators_')
if (hasattr(self.estimators_[0], "decision_function") and
is_classifier(self.estimators_[0])):
thresh = 0
else:
thresh = .5
n_samples = _num_samples(X)
if self.label_binarizer_.y_type_ == "multiclass":
maxima = np.empty(n_samples, dtype=float)
maxima.fill(-np.inf)
argmaxima = np.zeros(n_samples, dtype=int)
for i, e in enumerate(self.estimators_):
pred = _predict_binary(e, X)
np.maximum(maxima, pred, out=maxima)
argmaxima[maxima == pred] = i
return self.classes_[np.array(argmaxima.T)]
else:
indices = array.array('i')
indptr = array.array('i', [0])
for e in self.estimators_:
indices.extend(np.where(_predict_binary(e, X) > thresh)[0])
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
indicator = sp.csc_matrix((data, indices, indptr),
shape=(n_samples, len(self.estimators_)))
return self.label_binarizer_.inverse_transform(indicator)
@if_delegate_has_method(['_first_estimator', 'estimator'])
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by label of classes.
Note that in the multilabel case, each sample can have any number of
labels. This returns the marginal probability that the given sample has
the label in question. For example, it is entirely consistent that two
labels both have a 90% probability of applying to a given sample.
In the single label multiclass case, the rows of the returned matrix
sum to 1.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : (sparse) array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
"""
check_is_fitted(self, 'estimators_')
# Y[i, j] gives the probability that sample i has the label j.
# In the multi-label case, these are not disjoint.
Y = np.array([e.predict_proba(X)[:, 1] for e in self.estimators_]).T
if len(self.estimators_) == 1:
# Only one estimator, but we still want to return probabilities
# for two classes.
Y = np.concatenate(((1 - Y), Y), axis=1)
if not self.multilabel_:
# Then, probabilities should be normalized to 1.
Y /= np.sum(Y, axis=1)[:, np.newaxis]
return Y
@if_delegate_has_method(['_first_estimator', 'estimator'])
def decision_function(self, X):
"""Returns the distance of each sample from the decision boundary for
each class. This can only be used with estimators which implement the
decision_function method.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
"""
check_is_fitted(self, 'estimators_')
if len(self.estimators_) == 1:
return self.estimators_[0].decision_function(X)
return np.array([est.decision_function(X).ravel()
for est in self.estimators_]).T
@property
def multilabel_(self):
"""Whether this is a multilabel classifier"""
return self.label_binarizer_.y_type_.startswith('multilabel')
@property
def n_classes_(self):
return len(self.classes_)
@property
def coef_(self):
check_is_fitted(self, 'estimators_')
if not hasattr(self.estimators_[0], "coef_"):
raise AttributeError(
"Base estimator doesn't have a coef_ attribute.")
coefs = [e.coef_ for e in self.estimators_]
if sp.issparse(coefs[0]):
return sp.vstack(coefs)
return np.vstack(coefs)
@property
def intercept_(self):
check_is_fitted(self, 'estimators_')
if not hasattr(self.estimators_[0], "intercept_"):
raise AttributeError(
"Base estimator doesn't have an intercept_ attribute.")
return np.array([e.intercept_.ravel() for e in self.estimators_])
@property
def _pairwise(self):
"""Indicate if wrapped estimator is using a precomputed Gram matrix"""
return getattr(self.estimator, "_pairwise", False)
@property
def _first_estimator(self):
return self.estimators_[0]
def _fit_ovo_binary(estimator, X, y, i, j):
"""Fit a single binary estimator (one-vs-one)."""
cond = np.logical_or(y == i, y == j)
y = y[cond]
y_binary = np.empty(y.shape, np.int)
y_binary[y == i] = 0
y_binary[y == j] = 1
indcond = np.arange(X.shape[0])[cond]
return _fit_binary(estimator,
_safe_split(estimator, X, None, indices=indcond)[0],
y_binary, classes=[i, j]), indcond
def _partial_fit_ovo_binary(estimator, X, y, i, j):
"""Partially fit a single binary estimator(one-vs-one)."""
cond = np.logical_or(y == i, y == j)
y = y[cond]
y_binary = np.zeros_like(y)
y_binary[y == j] = 1
return _partial_fit_binary(estimator, X[cond], y_binary)
class OneVsOneClassifier(BaseEstimator, ClassifierMixin, MetaEstimatorMixin):
"""One-vs-one multiclass strategy
This strategy consists in fitting one classifier per class pair.
At prediction time, the class which received the most votes is selected.
Since it requires to fit `n_classes * (n_classes - 1) / 2` classifiers,
this method is usually slower than one-vs-the-rest, due to its
O(n_classes^2) complexity. However, this method may be advantageous for
algorithms such as kernel algorithms which don't scale well with
`n_samples`. This is because each individual learning problem only involves
a small subset of the data whereas, with one-vs-the-rest, the complete
dataset is used `n_classes` times.
Read more in the :ref:`User Guide <ovo_classification>`.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and one of `decision_function`
or `predict_proba`.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
estimators_ : list of `n_classes * (n_classes - 1) / 2` estimators
Estimators used for predictions.
classes_ : numpy array of shape [n_classes]
Array containing labels.
"""
def __init__(self, estimator, n_jobs=1):
self.estimator = estimator
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : array-like, shape = [n_samples]
Multi-class targets.
Returns
-------
self
"""
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc'])
check_classification_targets(y)
self.classes_ = np.unique(y)
if len(self.classes_) == 1:
raise ValueError("OneVsOneClassifier can not be fit when only one"
" class is present.")
n_classes = self.classes_.shape[0]
estimators_indices = list(zip(*(Parallel(n_jobs=self.n_jobs)(
delayed(_fit_ovo_binary)
(self.estimator, X, y, self.classes_[i], self.classes_[j])
for i in range(n_classes) for j in range(i + 1, n_classes)))))
self.estimators_ = estimators_indices[0]
try:
self.pairwise_indices_ = (
estimators_indices[1] if self._pairwise else None)
except AttributeError:
self.pairwise_indices_ = None
return self
@if_delegate_has_method(delegate='estimator')
def partial_fit(self, X, y, classes=None):
"""Partially fit underlying estimators
Should be used when memory is inefficient to train all data. Chunks
of data can be passed in several iteration, where the first call
should have an array of all target variables.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : array-like, shape = [n_samples]
Multi-class targets.
classes : array, shape (n_classes, )
Classes across all calls to partial_fit.
Can be obtained via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is only required in the first call of partial_fit
and can be omitted in the subsequent calls.
Returns
-------
self
"""
if _check_partial_fit_first_call(self, classes):
self.estimators_ = [clone(self.estimator) for i in
range(self.n_classes_ *
(self.n_classes_ - 1) // 2)]
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc'])
check_classification_targets(y)
combinations = itertools.combinations(range(self.n_classes_), 2)
self.estimators_ = Parallel(
n_jobs=self.n_jobs)(
delayed(_partial_fit_ovo_binary)(
estimator, X, y, self.classes_[i], self.classes_[j])
for estimator, (i, j) in izip(self.estimators_,
(combinations)))
self.pairwise_indices_ = None
return self
def predict(self, X):
"""Estimate the best class label for each sample in X.
This is implemented as ``argmax(decision_function(X), axis=1)`` which
will return the label of the class with most votes by estimators
predicting the outcome of a decision for each possible class pair.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
Returns
-------
y : numpy array of shape [n_samples]
Predicted multi-class targets.
"""
Y = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_[(Y > 0).astype(np.int)]
return self.classes_[Y.argmax(axis=1)]
def decision_function(self, X):
"""Decision function for the OneVsOneClassifier.
The decision values for the samples are computed by adding the
normalized sum of pair-wise classification confidence levels to the
votes in order to disambiguate between the decision values when the
votes for all the classes are equal leading to a tie.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
Y : array-like, shape = [n_samples, n_classes]
"""
check_is_fitted(self, 'estimators_')
indices = self.pairwise_indices_
if indices is None:
Xs = [X] * len(self.estimators_)
else:
Xs = [X[:, idx] for idx in indices]
predictions = np.vstack([est.predict(Xi)
for est, Xi in zip(self.estimators_, Xs)]).T
confidences = np.vstack([_predict_binary(est, Xi)
for est, Xi in zip(self.estimators_, Xs)]).T
Y = _ovr_decision_function(predictions,
confidences, len(self.classes_))
if self.n_classes_ == 2:
return Y[:, 1]
return Y
@property
def n_classes_(self):
return len(self.classes_)
@property
def _pairwise(self):
"""Indicate if wrapped estimator is using a precomputed Gram matrix"""
return getattr(self.estimator, "_pairwise", False)
class OutputCodeClassifier(BaseEstimator, ClassifierMixin, MetaEstimatorMixin):
"""(Error-Correcting) Output-Code multiclass strategy
Output-code based strategies consist in representing each class with a
binary code (an array of 0s and 1s). At fitting time, one binary
classifier per bit in the code book is fitted. At prediction time, the
classifiers are used to project new points in the class space and the class
closest to the points is chosen. The main advantage of these strategies is
that the number of classifiers used can be controlled by the user, either
for compressing the model (0 < code_size < 1) or for making the model more
robust to errors (code_size > 1). See the documentation for more details.
Read more in the :ref:`User Guide <ecoc>`.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and one of `decision_function`
or `predict_proba`.
code_size : float
Percentage of the number of classes to be used to create the code book.
A number between 0 and 1 will require fewer classifiers than
one-vs-the-rest. A number greater than 1 will require more classifiers
than one-vs-the-rest.
random_state : int, RandomState instance or None, optional, default: None
The generator used to initialize the codebook. If int, random_state is
the seed used by the random number generator; If RandomState instance,
random_state is the random number generator; If None, the random number
generator is the RandomState instance used by `np.random`.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
estimators_ : list of `int(n_classes * code_size)` estimators
Estimators used for predictions.
classes_ : numpy array of shape [n_classes]
Array containing labels.
code_book_ : numpy array of shape [n_classes, code_size]
Binary array containing the code of each class.
References
----------
.. [1] "Solving multiclass learning problems via error-correcting output
codes",
Dietterich T., Bakiri G.,
Journal of Artificial Intelligence Research 2,
1995.
.. [2] "The error coding method and PICTs",
James G., Hastie T.,
Journal of Computational and Graphical statistics 7,
1998.
.. [3] "The Elements of Statistical Learning",
Hastie T., Tibshirani R., Friedman J., page 606 (second-edition)
2008.
"""
def __init__(self, estimator, code_size=1.5, random_state=None, n_jobs=1):
self.estimator = estimator
self.code_size = code_size
self.random_state = random_state
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : numpy array of shape [n_samples]
Multi-class targets.
Returns
-------
self
"""
X, y = check_X_y(X, y)
if self.code_size <= 0:
raise ValueError("code_size should be greater than 0, got {1}"
"".format(self.code_size))
_check_estimator(self.estimator)
random_state = check_random_state(self.random_state)
check_classification_targets(y)
self.classes_ = np.unique(y)
n_classes = self.classes_.shape[0]
code_size_ = int(n_classes * self.code_size)
# FIXME: there are more elaborate methods than generating the codebook
# randomly.
self.code_book_ = random_state.random_sample((n_classes, code_size_))
self.code_book_[self.code_book_ > 0.5] = 1
if hasattr(self.estimator, "decision_function"):
self.code_book_[self.code_book_ != 1] = -1
else:
self.code_book_[self.code_book_ != 1] = 0
classes_index = dict((c, i) for i, c in enumerate(self.classes_))
Y = np.array([self.code_book_[classes_index[y[i]]]
for i in range(X.shape[0])], dtype=np.int)
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_binary)(self.estimator, X, Y[:, i])
for i in range(Y.shape[1]))
return self
def predict(self, X):
"""Predict multi-class targets using underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
Returns
-------
y : numpy array of shape [n_samples]
Predicted multi-class targets.
"""
check_is_fitted(self, 'estimators_')
X = check_array(X)
Y = np.array([_predict_binary(e, X) for e in self.estimators_]).T
pred = euclidean_distances(Y, self.code_book_).argmin(axis=1)
return self.classes_[pred]
|
|
# Copyright 2011 Kurt Le Breton, All Rights Reserved
import mwire
import unittest
class MWireFunctions(unittest.TestCase):
def setUp(self):
self.m = mwire.M('192.168.0.6', 6330)
def test_exists_01(self):
"""
test1="aaa"
test1[1,"x"]="hello"
test1[1,"y"]="world"
test1[1,"y","hello world"]="ok"
test1[1,"z","hello world"]="not ok"
test1: 11
test1[1]: 10
test1[1,"x"]: 1
test1[1,"y"]: 11
test1[1,"y","hello world"]: 1
test1[1,"z"]: 10
test1["x"]: 0
"""
self.m.kill('test1', [])
self.m.set('test1', [], 'aaa')
self.m.set('test1', [1, 'x'], 'hello')
self.m.set('test1', [1, 'y'], 'world')
self.m.set('test1', [1, 'y', 'hello world'], 'ok')
self.m.set('test1', [1, 'z', 'hello world'], 'not ok')
for i in range(2):
self.assertEqual(11, self.m.exists('test1', []))
for i in range(2):
self.assertEqual(10, self.m.exists('test1', [1]))
for i in range(2):
self.assertEqual(1, self.m.exists('test1', [1, 'x']))
for i in range(2):
self.assertEqual(11, self.m.exists('test1', [1, 'y']))
for i in range(2):
self.assertEqual(1, self.m.exists('test1', [1, 'y', 'hello world']))
for i in range(2):
self.assertEqual(10, self.m.exists('test1', [1, 'z']))
for i in range(2):
self.assertEqual(0, self.m.exists('test1', ['x']))
self.assertEqual(False, bool(0))
self.assertEqual(True, bool(1))
self.assertEqual(True, bool(10))
self.assertEqual(True, bool(11))
def test_decrement_01(self):
"""
Client: DECR test1["a","b"]
Server: 2
"""
self.m.kill('test1', [])
self.m.set('test1', ['a', 'b'], '20')
for i in range(20):
self.assertEqual(20 - (i + 1), self.m.decrement('test1', ['a', 'b']))
def test_get_01(self):
"""
Client: GET test1["a","b"]
Server: $5
Server: Hello
Client: GET test1
Server: $22
Server: This is the top level!
"""
self.m.set('test1', ['a', 'b'], 'Hello')
for i in range(2):
self.assertEqual('Hello', self.m.get('test1', ['a', 'b']))
self.m.set('test1', [], 'This is the top level!')
for i in range(2):
self.assertEqual('This is the top level!', self.m.get('test1', []))
def test_get_02(self):
"""
Client: GET test1["xxx"]
Server: $-1
"""
self.m.kill('xxx', [])
for i in range(2):
self.assertEqual(None, self.m.get('xxx', []))
def test_get_03(self):
"""
Client: GET test1["yyy"]
Server: $0
Server:
"""
self.m.set('test1', ['yyy'], '')
for i in range(2):
self.assertEqual('', self.m.get('test1', ['yyy']))
def test_getsubtree_01(self):
"""
test1="aaa"
test1[1,"x"]="hello"
test1[1,"y"]="world"
test1[1,"y","aa"]=12.34
test1[1,"y","ab"]=23.45
test1[1,"y","ab",2,3]=999
test1[1,"y","ad"]=""
test1[1,"y","hello world"]="ok"
test1[1,"z"]=""
test1[1,"z","hello world"]="not ok"
Client: GETSUBTREE myArray[1,"y"]
Server: *12
Server: $-1
Server: $5
Server: world
Server: $4
Server: "aa"
Server: $5
Server: 12.34
Server: $4
Server: "ab"
Server: $5
Server: 23.45
Server: $8
Server: "ab",2,3
Server: $3
Server: 999
Server: $4
Server: "ad"
Server: $-1
Server: $13
Server: "hello world"
Server: $2
Server: ok
"""
self.m.kill('test1', [])
self.m.set('test1', [], 'aaa')
self.m.set('test1', [1, 'x'], 'hello')
self.m.set('test1', [1, 'y'], 'world')
self.m.set('test1', [1, 'y', 'aa'], '12.34')
self.m.set('test1', [1, 'y', 'ab'], '23.45')
self.m.set('test1', [1, 'y', 'ab', 2, 3], '999')
self.m.set('test1', [1, 'y', 'ad'], '')
self.m.set('test1', [1, 'z'], '')
self.m.set('test1', [1, 'z', 'hello world'], 'not ok')
for i in range(2):
data = self.m.getsubtree('test1', [1, 'y'])
self.assertEqual(data[0][0], None)
self.assertEqual(data[0][1], 'world')
self.assertEqual(data[1][0], '"aa"')
self.assertEqual(data[1][1], '12.34')
self.assertEqual(data[2][0], '"ab"')
self.assertEqual(data[2][1], '23.45')
self.assertEqual(data[3][0], '"ab",2,3')
self.assertEqual(data[3][1], '999')
self.assertEqual(data[4][0], '"ad"')
self.assertEqual(data[4][1], '')
def test_halt_01(self):
"""
Client: HALT
"""
self.assertEqual(True, self.m.ping())
self.assertEqual(True, self.m.halt())
self.assertEqual(False, self.m.ping())
self.setUp()
self.assertEqual(True, self.m.ping())
def test_increment_01(self):
"""
Client: INC test1["a","b"]
Server: 2
"""
self.m.kill('test1', [])
for i in range(20):
self.assertEqual(i + 1, self.m.increment('test1', ['a', 'b']))
def test_kill_01(self):
"""
Client: KILL test1["a","b"]
Server: +OK
"""
self.m.set('test1', ['a', 'b'], '')
for i in range(2):
self.assertEqual(True, self.m.kill('test1', ['a', 'b']))
self.m.set('test1', ['a', 'b'], '')
for i in range(2):
self.assertEqual(True, self.m.kill('test1', []))
def test_next_01(self):
"""
test1="aaa"
test1[1,"x"]="hello"
test1[1,"y"]="world"
test1[1,"y","hello world"]="ok"
test1[1,"z","hello world"]="not ok"
Client: NEXT test1[""]
Server: $1
Server: 1
"""
self.m.kill('test1', [])
self.m.set('test1', [], 'aaa')
self.m.set('test1', [1, 'x'], 'hello')
self.m.set('test1', [1, 'y'], 'world')
self.m.set('test1', [1, 'y', 'hello world'], 'ok')
self.m.set('test1', [1, 'z', 'hello world'], 'not ok')
for i in range(2):
self.assertEqual('1', self.m.next('test1', ['']))
def test_next_02(self):
"""
test1="aaa"
test1[1,"x"]="hello"
test1[1,"y"]="world"
test1[1,"y","hello world"]="ok"
test1[1,"z","hello world"]="not ok"
Client: NEXT myArray[1]
Server: $-1
"""
self.m.kill('test1', [])
self.m.set('test1', [], 'aaa')
self.m.set('test1', [1, 'x'], 'hello')
self.m.set('test1', [1, 'y'], 'world')
self.m.set('test1', [1, 'y', 'hello world'], 'ok')
self.m.set('test1', [1, 'z', 'hello world'], 'not ok')
for i in range(2):
self.assertEqual(None, self.m.next('test1', [1]))
def test_next_03(self):
"""
test1="aaa"
test1[1,"x"]="hello"
test1[1,"y"]="world"
test1[1,"y","hello world"]="ok"
test1[1,"z","hello world"]="not ok"
Client: NEXT myArray[1,""]
Server: $1
Server: x
"""
self.m.kill('test1', [])
self.m.set('test1', [], 'aaa')
self.m.set('test1', [1, 'x'], 'hello')
self.m.set('test1', [1, 'y'], 'world')
self.m.set('test1', [1, 'y', 'hello world'], 'ok')
self.m.set('test1', [1, 'z', 'hello world'], 'not ok')
for i in range(2):
self.assertEqual('x', self.m.next('test1', [1,'']))
def test_next_04(self):
"""
test1="aaa"
test1[1,"x"]="hello"
test1[1,"y"]="world"
test1[1,"y","hello world"]="ok"
test1[1,"z","hello world"]="not ok"
Client: NEXT myArray[1,"x"]
Server: $1
Server: y
"""
self.m.kill('test1', [])
self.m.set('test1', [], 'aaa')
self.m.set('test1', [1, 'x'], 'hello')
self.m.set('test1', [1, 'y'], 'world')
self.m.set('test1', [1, 'y', 'hello world'], 'ok')
self.m.set('test1', [1, 'z', 'hello world'], 'not ok')
for i in range(2):
self.assertEqual('y', self.m.next('test1', [1,'x']))
def test_next_05(self):
"""
test1="aaa"
test1[1,"x"]="hello"
test1[1,"y"]="world"
test1[1,"y","hello world"]="ok"
test1[1,"z","hello world"]="not ok"
Client: NEXT myArray[1,"z"]
Server: $-1
"""
self.m.kill('test1', [])
self.m.set('test1', [], 'aaa')
self.m.set('test1', [1, 'x'], 'hello')
self.m.set('test1', [1, 'y'], 'world')
self.m.set('test1', [1, 'y', 'hello world'], 'ok')
self.m.set('test1', [1, 'z', 'hello world'], 'not ok')
for i in range(2):
self.assertEqual(None, self.m.next('test1', [1,'z']))
def test_previous_01(self):
"""
test1="aaa"
test1[1,"x"]="hello"
test1[1,"y"]="world"
test1[1,"y","hello world"]="ok"
test1[1,"z","hello world"]="not ok"
Client: GETALLSUBS myArray
Server: *2
Server: $1
Server: 1
Server: $-1
Client: GETALLSUBS myArray[1]
Server: *6
Server: $1
Server: x
Server: $5
Server: hello
Server: $1
Server: y
Server: $5
Server: world
Server: $1
Server: z
Server: $0
Server:
"""
self.m.kill('test1', [])
self.m.set('test1', [], 'aaa')
self.m.set('test1', [1, 'x'], 'hello')
self.m.set('test1', [1, 'y'], 'world')
self.m.set('test1', [1, 'y', 'hello world'], 'ok')
self.m.set('test1', [1, 'z'], '')
self.m.set('test1', [1, 'z', 'hello world'], 'not ok')
for i in range(2):
data = self.m.getallsubs('test1', [])
self.assertEqual(data[0][0], '1')
self.assertEqual(data[0][1], None)
for i in range(2):
data = self.m.getallsubs('test1', [1])
self.assertEqual(data[0][0], 'x')
self.assertEqual(data[0][1], 'hello')
self.assertEqual(data[1][0], 'y')
self.assertEqual(data[1][1], 'world')
self.assertEqual(data[2][0], 'z')
self.assertEqual(data[2][1], '')
def test_ping_01(self):
m = self.m
for i in range(2):
self.assertEqual(True, m.ping())
def test_query_01(self):
"""
test1="aaa"
test1[1,"x"]="hello"
test1[1,"y"]="world"
test1[1,"y","hello world"]="ok"
test1[1,"z"]=""
test1[1,"z","hello world"]="not ok"
Client: QUERY test1
Server: $14
Server: test1[1,"x"]
Client: QUERY test1[1,"x"]
Server: $14
Server: test1[1,"y"]
Client: QUERY test1[1,"y"]
Server: $28
Server: test1[1,"y","hello world"]
Client: QUERY test1[1,"z","hello world"]
Server: $-1
Client: QUERY test1[1,"a"]
Server: $14
Server: test1[1,"x"]
"""
self.m.kill('test1', [])
self.m.set('test1', [], 'aaa')
self.m.set('test1', [1, 'x'], 'hello')
self.m.set('test1', [1, 'y'], 'world')
self.m.set('test1', [1, 'y', 'hello world'], 'ok')
self.m.set('test1', [1, 'z'], '')
self.m.set('test1', [1, 'z', 'hello world'], 'not ok')
for i in range(2):
self.assertEqual('test1[1,"x"]', self.m.query('test1', []))
for i in range(2):
self.assertEqual('test1[1,"y"]', self.m.query('test1', [1, 'x']))
for i in range(2):
self.assertEqual('test1[1,"y","hello world"]', self.m.query('test1', [1, 'y']))
for i in range(2):
self.assertEqual(None, self.m.query('test1', [1, 'z', 'hello world']))
for i in range(2):
self.assertEqual('test1[1,"x"]', self.m.query('test1', [1, 'a']))
def test_queryget_01(self):
"""
test1="aaa"
test1[1,"x"]="hello"
test1[1,"y"]="world"
test1[1,"y","hello world"]="ok"
test1[1,"z"]=""
test1[1,"z","hello world"]="not ok"
Client: QUERYGET test1
Server: *2
Server: $14
Server: test1[1,"x"]
Server: $5
Server: hello
Client: QUERYGET test1[1,"x"]
Server: *2
Server: $14
Server: test1[1,"y"]
Server: $5
Server: world
Client: QUERYGET test1[1,"y"]
Server: *2
Server: $28
Server: test1[1,"y","hello world"]
Server: $2
Server: ok
Client: QUERYGET test1[1,"z","hello world"]
Server: $-1
Client: QUERYGET test1[1,"a"]
Server: *2
Server: $14
Server: test1[1,"x"]
Server: $5
Server: hello
"""
self.m.kill('test1', [])
self.m.set('test1', [], 'aaa')
self.m.set('test1', [1, 'x'], 'hello')
self.m.set('test1', [1, 'y'], 'world')
self.m.set('test1', [1, 'y', 'hello world'], 'ok')
self.m.set('test1', [1, 'z'], '')
self.m.set('test1', [1, 'z', 'hello world'], 'not ok')
for i in range(2):
data = self.m.queryget('test1', [])
self.assertEqual('test1[1,"x"]', data[0])
self.assertEqual('hello', data[1])
for i in range(2):
data = self.m.queryget('test1', [1, 'x'])
self.assertEqual('test1[1,"y"]', data[0])
self.assertEqual('world', data[1])
for i in range(2):
data = self.m.queryget('test1', [1, 'y'])
self.assertEqual('test1[1,"y","hello world"]', data[0])
self.assertEqual('ok', data[1])
for i in range(2):
data = self.m.queryget('test1', [1, 'z', 'hello world'])
self.assertEqual(None, data[0])
self.assertEqual(None, data[1])
for i in range(2):
data = self.m.queryget('test1', [1, 'a'])
self.assertEqual('test1[1,"x"]', data[0])
self.assertEqual('hello', data[1])
def test_set_01_MORE_TO_BE_DONE(self):
m = self.m
for i in range(2):
self.assertEqual(True, m.set('KLB', ['test_set', i, 'b'], 'Kurt Le Breton'))
@unittest.skip("error in protocol needing to be addressed by mgateway.com")
def test_setsubtree_01(self):
"""
test1="aaa"
test1[1,"x"]="hello"
test1[1,"y"]="world"
test1[1,"y","hello world"]="ok"
test1[1,"z"]=""
test1[1,"z","hello world"]="not ok"
Client: SETSUBTREE myArray
Client: *4
Client: $4
Client: "aa"
Client: $5
Client: 12.34
Client: $4
Client: "ab"
Client: $5
Client: 23.45
Server: +OK
"""
self.m.kill('test1', [])
self.m.set('test1', [], 'aaa')
self.m.set('test1', [1, 'x'], 'hello')
self.m.set('test1', [1, 'y'], 'world')
self.m.set('test1', [1, 'y', 'hello world'], 'ok')
self.m.set('test1', [1, 'z'], '')
self.m.set('test1', [1, 'z', 'hello world'], 'not ok')
for i in range(2):
data = [['aa', 'A12.34'], ['ab', 'A23.45']]
self.assertEqual(True, self.m.setsubtree('test1', [1], data))
data = self.m.queryget('test1', [1, 'z', 'hello world'])
self.assertEqual(data[0], 'aa')
self.assertEqual(data[1], '12.34')
data = self.m.queryget('test1', ['aa'])
self.assertEqual(data[0], 'ab')
self.assertEqual(data[1], '23.45')
@unittest.skip("todo")
def test_lock_01(self):
pass
@unittest.skip("todo")
def test_unlock_01(self):
pass
@unittest.skip("todo")
def test_transaction_start_01(self):
pass
@unittest.skip("todo")
def test_transaction_commit_01(self):
pass
@unittest.skip("todo")
def test_transaction_rollback_01(self):
pass
@unittest.skip("todo")
def test_version_01(self):
pass
@unittest.skip("todo")
def test_mversion_01(self):
pass
@unittest.skip("todo")
def test_monitor_01(self):
pass
@unittest.skip("todo")
def test_mdate_01(self):
pass
@unittest.skip("todo")
def test_function_01(self):
pass
@unittest.skip("todo")
def test_processid_01(self):
pass
if __name__ == '__main__':
unittest.main()
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'DatabaseLock'
db.create_table(u'logical_databaselock', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('database', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'lock', unique=True, to=orm['logical.Database'])),
('task', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'lock', to=orm['notification.TaskHistory'])),
))
db.send_create_signal(u'logical', ['DatabaseLock'])
def backwards(self, orm):
# Deleting model 'DatabaseLock'
db.delete_table(u'logical_databaselock')
models = {
u'account.team': {
'Meta': {'ordering': "[u'name']", 'object_name': 'Team'},
'contacts': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_alocation_limit': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '2'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'logical.credential': {
'Meta': {'ordering': "(u'database', u'user')", 'unique_together': "((u'user', u'database'),)", 'object_name': 'Credential'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'credentials'", 'to': u"orm['logical.Database']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'logical.database': {
'Meta': {'ordering': "(u'name',)", 'unique_together': "((u'name', u'environment'),)", 'object_name': 'Database'},
'backup_path': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_task': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'database'", 'null': 'True', 'to': u"orm['notification.TaskHistory']"}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databases'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DatabaseInfra']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disk_auto_resize': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databases'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_in_quarantine': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_protected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'databases'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['logical.Project']"}),
'quarantine_dt': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'subscribe_to_email_events': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'databases'", 'null': 'True', 'to': u"orm['account.Team']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'used_size_in_bytes': ('django.db.models.fields.FloatField', [], {'default': '0.0'})
},
u'logical.databaselock': {
'Meta': {'object_name': 'DatabaseLock'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'lock'", 'unique': 'True', 'to': u"orm['logical.Database']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'lock'", 'to': u"orm['notification.TaskHistory']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'logical.project': {
'Meta': {'ordering': "[u'name']", 'object_name': 'Project'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'notification.taskhistory': {
'Meta': {'object_name': 'TaskHistory'},
'arguments': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'context': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'db_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'ended_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'task_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'task_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'task_status': ('django.db.models.fields.CharField', [], {'default': "u'PENDING'", 'max_length': '100', 'db_index': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'physical.databaseinfra': {
'Meta': {'object_name': 'DatabaseInfra'},
'capacity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'endpoint_dns': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Engine']"}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_vm_created': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name_prefix': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'name_stamp': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'blank': 'True'}),
'per_database_size_mbytes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Plan']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'physical.diskoffering': {
'Meta': {'object_name': 'DiskOffering'},
'available_size_kb': ('django.db.models.fields.PositiveIntegerField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'size_kb': ('django.db.models.fields.PositiveIntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.engine': {
'Meta': {'ordering': "(u'engine_type__name', u'version')", 'unique_together': "((u'version', u'engine_type'),)", 'object_name': 'Engine'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'engines'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
'engine_upgrade_option': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_engine'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Engine']"}),
'has_users': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'read_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user_data_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'write_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'physical.enginetype': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'EngineType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_in_memory': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environment': {
'Meta': {'object_name': 'Environment'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_of_zones': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.plan': {
'Meta': {'object_name': 'Plan'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'plans'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plans'", 'to': u"orm['physical.Engine']"}),
'engine_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'plans'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
'flipperfox_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'flipperfox_migration_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'has_persistence': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_ha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_db_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'provider': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'replication_topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'replication_topology'", 'null': 'True', 'to': u"orm['physical.ReplicationTopology']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.replicationtopology': {
'Meta': {'object_name': 'ReplicationTopology'},
'class_path': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'replication_topologies'", 'symmetrical': 'False', 'to': u"orm['physical.Engine']"}),
'has_horizontal_scalability': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['logical']
|
|
from __future__ import print_function
import argparse
import six
import sys
import contextlib
from humancrypto import PrivateKey, Certificate, CSR
from humancrypto import pki, yearutil
from humancrypto.error import VerifyMismatchError, PasswordMatchesWrongYear
def do(parser):
def deco(f):
parser.set_defaults(func=f)
return f
return deco
def out(*x):
print(*x)
def _acceptBasicAttributes(parser):
for key in sorted(pki.OID_MAPPING):
ckey = key.replace('_', '-')
parser.add_argument('--{0}'.format(ckey), action='append')
def _basicAttributes2Dict(args):
ret = {}
for key in pki.OID_MAPPING:
values = getattr(args, key)
if values is None:
continue
clean_values = []
for val in values:
if not isinstance(val, six.text_type):
val = val.decode('utf-8')
clean_values.append(val)
ret[key] = clean_values
return ret
def _acceptSomeExtendedAttributes(parser):
parser.add_argument('--subject-alternative-name', action='append')
def _extAttributes2Dict(args):
ret = {}
for key in pki.EXT_MAPPING:
values = getattr(args, key, None)
if values is None:
continue
clean_values = []
for val in values:
if not isinstance(val, six.text_type):
val = val.decode('utf-8')
clean_values.append(val)
ret[key] = clean_values
return ret
ap = argparse.ArgumentParser()
sp = ap.add_subparsers(title='subcommands', dest='command')
# ========================================================
# Year-based stuff
# ========================================================
def add_year(parser, name, key, deprecated=False):
helptext = 'Crypto for year {name}'.format(**locals())
if deprecated:
helptext = 'DEPRECATED. ' + helptext
year_parser = parser.add_parser(name, help=helptext)
year_sp = year_parser.add_subparsers(title='actions', dest='action')
# ---------------------------
# Passwords
# ---------------------------
pw = year_sp.add_parser(
'pw',
help='Password storage/verification')
pw_subs = pw.add_subparsers(
title='action',
dest='subsubcommand')
p = pw_subs.add_parser(
'store',
help='Hash a password for later verification.',
description='Read a password'
' from stdin and write a hash of the password to stdout.')
@do(p)
def store_password(args):
module = yearutil.get_module(key)
pw = sys.stdin.read().encode()
out(module.store_password(pw))
p = pw_subs.add_parser(
'verify',
formatter_class=argparse.RawDescriptionHelpFormatter,
help='Verify that a password matches a stored hash.',
description='Read a password from stdin'
' and compare with the given stored password.'
' Will exit with one of the following codes:\n'
' 0 = OK - password matches\n'
' 1 = FAIL - password does NOT match\n'
" 2 = OK-ish - password matches, but for the wrong year\n")
p.add_argument(
'stored',
help='Stored password.')
@do(p)
def verify_password(args):
module = yearutil.get_module(key)
pw = sys.stdin.read().encode()
if isinstance(args.stored, six.binary_type):
args.stored = args.stored.decode()
try:
module.verify_password(args.stored, pw)
out('ok')
except PasswordMatchesWrongYear:
out('ok-ish: password matches but for different year')
sys.exit(2)
except VerifyMismatchError:
out('incorrect')
sys.exit(1)
# ---------------------------
# Tokens
# ---------------------------
token = year_sp.add_parser(
'token',
description='Writes a secure random token to stdout.'
' By default, output is binary.',
help='Generate a secure random token')
token.add_argument(
'-H',
'--hex',
action='store_true',
help='Output in hexadecimal format')
token.add_argument(
'-U',
'--urlsafe',
action='store_true',
help='Output in a URL safe format')
token.add_argument(
'-L',
'--length',
type=int,
default=None,
help="Byte size of token to generate."
" Defaults to a secure amount.")
@do(token)
def make_token(args):
module = yearutil.get_module(key)
ret = None
if args.hex:
ret = module.random_hex_token(args.length)
elif args.urlsafe:
ret = module.random_urlsafe_token(args.length)
else:
ret = module.random_token(args.length)
out(ret)
add_year(sp, 'y2016', '2016')
add_year(sp, 'y44bc', '44bc', deprecated=True)
# ========================================================
# RSA
# ========================================================
rsa_parser = sp.add_parser(
'rsa',
help='RSA pub/priv key commands')
rsa = rsa_parser.add_subparsers(
title='subcommands',
dest='subcommand')
# --------------------------------------------------------
# create-private
# --------------------------------------------------------
p = rsa.add_parser(
'create-private',
help='Create a private key')
p.add_argument(
'filename',
help='Private key filename')
@do(p)
def create_private(args):
PrivateKey.create().save(args.filename)
out('wrote', args.filename)
# --------------------------------------------------------
# extract-public
# --------------------------------------------------------
p = rsa.add_parser(
'extract-public',
help='Extract a public key from a private key')
p.add_argument(
'privatekey',
help='Private key filename')
p.add_argument(
'publickey',
help='Public key filename')
@do(p)
def extract_public(args):
pub = PrivateKey.load(filename=args.privatekey).public_key
pub.save(args.publickey)
out('wrote', args.publickey)
# --------------------------------------------------------
# self-signed-cert
# --------------------------------------------------------
p = rsa.add_parser(
'self-signed-cert',
help='Create a self-signed certificate')
p.add_argument(
'privatekey',
help='Private key filename')
p.add_argument(
'certfile',
help='Certificate filename')
_acceptBasicAttributes(p)
@do(p)
def self_signed_cert(args):
attribs = _basicAttributes2Dict(args)
priv = PrivateKey.load(filename=args.privatekey)
cert = priv.self_signed_cert(attribs)
cert.save(args.certfile)
out('wrote', args.certfile)
# --------------------------------------------------------
# create-csr
# --------------------------------------------------------
p = rsa.add_parser(
'create-csr',
help='Create a Certificate Signing Request (CSR)')
p.add_argument(
'privatekey',
help='Private key filename')
p.add_argument(
'csr',
help='CSR filename')
p.add_argument(
'--server',
action='store_true',
help='If given, use sane server-certificate defaults.')
p.add_argument(
'--client',
action='store_true',
help='If given, use sane client-certificate defaults.')
_acceptBasicAttributes(p)
_acceptSomeExtendedAttributes(p)
@do(p)
def create_csr(args):
attribs = _basicAttributes2Dict(args)
extensions = _extAttributes2Dict(args)
priv = PrivateKey.load(filename=args.privatekey)
csr = priv.signing_request(
attribs,
extensions=extensions,
server=args.server,
client=args.client)
csr.save(args.csr)
out('wrote', args.csr)
# --------------------------------------------------------
# sign-csr
# --------------------------------------------------------
p = rsa.add_parser(
'sign-csr',
help='Sign a Certificate Signing Request to make a certificate')
p.add_argument(
'signingkey',
help='Filename of private key to sign with.')
p.add_argument(
'signingcert',
help='Filename of certificate to sign with.')
p.add_argument(
'csr',
help='CSR to sign')
p.add_argument(
'cert',
help='Filename to write resulting cert to.')
@do(p)
def sign_csr(args):
signing_key = PrivateKey.load(filename=args.signingkey)
signing_cert = Certificate.load(filename=args.signingcert)
csr = CSR.load(filename=args.csr)
cert = signing_key.sign_csr(csr, signing_cert)
cert.save(args.cert)
out('wrote', args.cert)
@contextlib.contextmanager
def redirect(stdin=None, stdout=None, stderr=None):
former = sys.stdin, sys.stdout, sys.stderr
sys.stdin = stdin or sys.stdin
sys.stdout = stdout or sys.stdout
sys.stderr = stderr or sys.stderr
yield
sys.stdin, sys.stdout, sys.stderr = former
def main(args=None, stdin=None, stdout=None, stderr=None):
parsed = ap.parse_args(args)
with redirect(stdin=stdin, stdout=stdout, stderr=stderr):
parsed.func(parsed)
|
|
from __future__ import absolute_import
import logging
import tempfile
import os.path
from pip.compat import samefile
from pip.exceptions import BadCommand
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip._vendor.packaging.version import parse as parse_version
from pip.utils import display_path, rmtree
from pip.vcs import vcs, VersionControl
urlsplit = urllib_parse.urlsplit
urlunsplit = urllib_parse.urlunsplit
logger = logging.getLogger(__name__)
class Git(VersionControl):
name = 'git'
dirname = '.git'
repo_name = 'clone'
schemes = (
'git', 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file',
)
def __init__(self, url=None, *args, **kwargs):
# Works around an apparent Git bug
# (see http://article.gmane.org/gmane.comp.version-control.git/146500)
if url:
scheme, netloc, path, query, fragment = urlsplit(url)
if scheme.endswith('file'):
initial_slashes = path[:-len(path.lstrip('/'))]
newpath = (
initial_slashes +
urllib_request.url2pathname(path)
.replace('\\', '/').lstrip('/')
)
url = urlunsplit((scheme, netloc, newpath, query, fragment))
after_plus = scheme.find('+') + 1
url = scheme[:after_plus] + urlunsplit(
(scheme[after_plus:], netloc, newpath, query, fragment),
)
super(Git, self).__init__(url, *args, **kwargs)
def get_git_version(self):
VERSION_PFX = 'git version '
version = self.run_command(['version'], show_stdout=False)
if version.startswith(VERSION_PFX):
version = version[len(VERSION_PFX):].split()[0]
else:
version = ''
# get first 3 positions of the git version becasue
# on windows it is x.y.z.windows.t, and this parses as
# LegacyVersion which always smaller than a Version.
version = '.'.join(version.split('.')[:3])
return parse_version(version)
def export(self, location):
"""Export the Git repository at the url to the destination location"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
try:
if not location.endswith('/'):
location = location + '/'
self.run_command(
['checkout-index', '-a', '-f', '--prefix', location],
show_stdout=False, cwd=temp_dir)
finally:
rmtree(temp_dir)
def check_rev_options(self, rev, dest, rev_options):
"""Check the revision options before checkout to compensate that tags
and branches may need origin/ as a prefix.
Returns the SHA1 of the branch or tag if found.
"""
revisions = self.get_short_refs(dest)
origin_rev = 'origin/%s' % rev
if origin_rev in revisions:
# remote branch
return [revisions[origin_rev]]
elif rev in revisions:
# a local tag or branch name
return [revisions[rev]]
else:
logger.warning(
"Could not find a tag or branch '%s', assuming commit.", rev,
)
return rev_options
def check_version(self, dest, rev_options):
"""
Compare the current sha to the ref. ref may be a branch or tag name,
but current rev will always point to a sha. This means that a branch
or tag will never compare as True. So this ultimately only matches
against exact shas.
"""
return self.get_revision(dest).startswith(rev_options[0])
def switch(self, dest, url, rev_options):
self.run_command(['config', 'remote.origin.url', url], cwd=dest)
self.run_command(['checkout', '-q'] + rev_options, cwd=dest)
self.update_submodules(dest)
def update(self, dest, rev_options):
# First fetch changes from the default remote
if self.get_git_version() >= parse_version('1.9.0'):
# fetch tags in addition to everything else
self.run_command(['fetch', '-q', '--tags'], cwd=dest)
else:
self.run_command(['fetch', '-q'], cwd=dest)
# Then reset to wanted revision (maybe even origin/master)
if rev_options:
rev_options = self.check_rev_options(
rev_options[0], dest, rev_options,
)
self.run_command(['reset', '--hard', '-q'] + rev_options, cwd=dest)
#: update submodules
self.update_submodules(dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = [rev]
rev_display = ' (to %s)' % rev
else:
rev_options = ['origin/master']
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.info(
'Cloning %s%s to %s', url, rev_display, display_path(dest),
)
self.run_command(['clone', '-q', url, dest])
if rev:
rev_options = self.check_rev_options(rev, dest, rev_options)
# Only do a checkout if rev_options differs from HEAD
if not self.check_version(dest, rev_options):
self.run_command(
['checkout', '-q'] + rev_options,
cwd=dest,
)
#: repo may contain submodules
self.update_submodules(dest)
def get_url(self, location):
"""Return URL of the first remote encountered."""
remotes = self.run_command(
['config', '--get-regexp', r'remote\..*\.url'],
show_stdout=False, cwd=location)
remotes = remotes.splitlines()
found_remote = remotes[0]
for remote in remotes:
if remote.startswith('remote.origin.url '):
found_remote = remote
break
url = found_remote.split(' ')[1]
return url.strip()
def get_revision(self, location):
current_rev = self.run_command(
['rev-parse', 'HEAD'], show_stdout=False, cwd=location)
return current_rev.strip()
def get_full_refs(self, location):
"""Yields tuples of (commit, ref) for branches and tags"""
output = self.run_command(['show-ref'],
show_stdout=False, cwd=location)
for line in output.strip().splitlines():
commit, ref = line.split(' ', 1)
yield commit.strip(), ref.strip()
def is_ref_remote(self, ref):
return ref.startswith('refs/remotes/')
def is_ref_branch(self, ref):
return ref.startswith('refs/heads/')
def is_ref_tag(self, ref):
return ref.startswith('refs/tags/')
def is_ref_commit(self, ref):
"""A ref is a commit sha if it is not anything else"""
return not any((
self.is_ref_remote(ref),
self.is_ref_branch(ref),
self.is_ref_tag(ref),
))
# Should deprecate `get_refs` since it's ambiguous
def get_refs(self, location):
return self.get_short_refs(location)
def get_short_refs(self, location):
"""Return map of named refs (branches or tags) to commit hashes."""
rv = {}
for commit, ref in self.get_full_refs(location):
ref_name = None
if self.is_ref_remote(ref):
ref_name = ref[len('refs/remotes/'):]
elif self.is_ref_branch(ref):
ref_name = ref[len('refs/heads/'):]
elif self.is_ref_tag(ref):
ref_name = ref[len('refs/tags/'):]
if ref_name is not None:
rv[ref_name] = commit
return rv
def _get_subdirectory(self, location):
"""Return the relative path of setup.py to the git repo root."""
# find the repo root
git_dir = self.run_command(['rev-parse', '--git-dir'],
show_stdout=False, cwd=location).strip()
if not os.path.isabs(git_dir):
git_dir = os.path.join(location, git_dir)
root_dir = os.path.join(git_dir, '..')
# find setup.py
orig_location = location
while not os.path.exists(os.path.join(location, 'setup.py')):
last_location = location
location = os.path.dirname(location)
if location == last_location:
# We've traversed up to the root of the filesystem without
# finding setup.py
logger.warning(
"Could not find setup.py for directory %s (tried all "
"parent directories)",
orig_location,
)
return None
# relative path of setup.py to repo root
if samefile(root_dir, location):
return None
return os.path.relpath(location, root_dir)
def get_src_requirement(self, dist, location):
repo = self.get_url(location)
if not repo.lower().startswith('git:'):
repo = 'git+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
if not repo:
return None
current_rev = self.get_revision(location)
req = '%s@%s#egg=%s' % (repo, current_rev, egg_project_name)
subdirectory = self._get_subdirectory(location)
if subdirectory:
req += '&subdirectory=' + subdirectory
return req
def get_url_rev(self):
"""
Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'.
That's required because although they use SSH they sometimes doesn't
work with a ssh:// scheme (e.g. Github). But we need a scheme for
parsing. Hence we remove it again afterwards and return it as a stub.
"""
if '://' not in self.url:
assert 'file:' not in self.url
self.url = self.url.replace('git+', 'git+ssh://')
url, rev = super(Git, self).get_url_rev()
url = url.replace('ssh://', '')
else:
url, rev = super(Git, self).get_url_rev()
return url, rev
def update_submodules(self, location):
if not os.path.exists(os.path.join(location, '.gitmodules')):
return
self.run_command(
['submodule', 'update', '--init', '--recursive', '-q'],
cwd=location,
)
@classmethod
def controls_location(cls, location):
if super(Git, cls).controls_location(location):
return True
try:
r = cls().run_command(['rev-parse'],
cwd=location,
show_stdout=False,
on_returncode='ignore')
return not r
except BadCommand:
logger.debug("could not determine if %s is under git control "
"because git is not available", location)
return False
vcs.register(Git)
|
|
# -*- coding: utf-8 -*-
import logging
from asyncinit import asyncinit
from .blockchainobject import BlockchainObject
from ..exceptions import AccountDoesNotExistsException
from ..account import Account as SyncAccount, AccountUpdate as SyncAccountUpdate
log = logging.getLogger()
class Account(BlockchainObject, SyncAccount):
""" This class allows to easily access Account data
:param str account_name: Name of the account
:param instance blockchain_instance: instance to use when accesing a RPC
:param bool full: Obtain all account data including orders, positions, etc.
:param bool lazy: Use lazy loading
:param bool full: Obtain all account data including orders, positions,
etc.
:returns: Account data
:rtype: dictionary
:raises .exceptions.AccountDoesNotExistsException: if account
does not exist
Instances of this class are dictionaries that come with additional
methods (see below) that allow dealing with an account and it's
corresponding functions.
.. code-block:: python
from aio.account import Account
account = await Account("init0")
print(account)
.. note:: This class comes with its own caching function to reduce the
load on the API server. Instances of this class can be
refreshed with ``await Account.refresh()``.
"""
async def __init__(self, *args, **kwargs):
self.define_classes()
assert self.type_id
assert self.amount_class
assert self.operations
self.full = kwargs.pop("full", False)
await BlockchainObject.__init__(self, *args, **kwargs)
async def refresh(self):
""" Refresh/Obtain an account's data from the API server
"""
import re
if re.match(r"^1\.2\.[0-9]*$", self.identifier):
result = await self.blockchain.rpc.get_objects([self.identifier])
account = result[0]
else:
result = await self.blockchain.rpc.lookup_account_names([self.identifier])
account = result[0]
if not account:
raise AccountDoesNotExistsException(self.identifier)
self.store(account, account["name"])
self.store(account, account["id"])
if self.full: # pragma: no cover
accounts = await self.blockchain.rpc.get_full_accounts(
[account["id"]], False
)
if accounts and isinstance(accounts, list):
account = accounts[0][1]
else:
raise AccountDoesNotExistsException(self.identifier)
await super(Account, self).__init__(
account["account"], blockchain_instance=self.blockchain
)
for k, v in account.items():
if k != "account":
self[k] = v
else:
await super(Account, self).__init__(
account, blockchain_instance=self.blockchain
)
async def ensure_full(self): # pragma: no cover
if not self.is_fully_loaded:
self.full = True
await self.refresh()
@property
async def balances(self):
""" List balances of an account. This call returns instances of
:class:`amount.Amount`.
"""
balances = await self.blockchain.rpc.get_account_balances(self["id"], [])
return [
await self.amount_class(b, blockchain_instance=self.blockchain)
for b in balances
if int(b["amount"]) > 0
]
async def balance(self, symbol):
""" Obtain the balance of a specific Asset. This call returns instances of
:class:`amount.Amount`.
"""
if isinstance(symbol, dict) and "symbol" in symbol:
symbol = symbol["symbol"]
balances = await self.balances
for b in balances:
if b["symbol"] == symbol:
return b
return await self.amount_class(0, symbol, blockchain_instance=self.blockchain)
async def history(self, first=0, last=0, limit=-1, only_ops=[], exclude_ops=[]):
""" Returns a generator for individual account transactions. The
latest operation will be first. This call can be used in a
``for`` loop.
:param int first: sequence number of the first
transaction to return (*optional*)
:param int last: sequence number of the last
transaction to return (*optional*)
:param int limit: limit number of transactions to
return (*optional*)
:param array only_ops: Limit generator by these
operations (*optional*)
:param array exclude_ops: Exclude these operations from
generator (*optional*).
... note::
only_ops and exclude_ops takes an array of strings:
The full list of operation ID's can be found in
operationids.py.
Example: ['transfer', 'fill_order']
"""
_limit = 100
cnt = 0
if first < 0:
first = 0
while True:
# RPC call
txs = await self.blockchain.rpc.get_account_history(
self["id"],
"1.11.{}".format(last),
_limit,
"1.11.{}".format(first - 1),
api="history",
)
for i in txs:
if (
exclude_ops
and self.operations.getOperationNameForId(i["op"][0]) in exclude_ops
):
continue
if (
not only_ops
or self.operations.getOperationNameForId(i["op"][0]) in only_ops
):
cnt += 1
yield i
if limit >= 0 and cnt >= limit: # pragma: no cover
return
if not txs:
log.info("No more history returned from API node")
break
if len(txs) < _limit:
log.info("Less than {} have been returned.".format(_limit))
break
first = int(txs[-1]["id"].split(".")[2])
async def upgrade(self): # pragma: no cover
""" Upgrade account to life time member
"""
assert callable(self.blockchain.upgrade_account)
return await self.blockchain.upgrade_account(account=self)
async def whitelist(self, account): # pragma: no cover
""" Add an other account to the whitelist of this account
"""
assert callable(self.blockchain.account_whitelist)
return await self.blockchain.account_whitelist(
account, lists=["white"], account=self
)
async def blacklist(self, account): # pragma: no cover
""" Add an other account to the blacklist of this account
"""
assert callable(self.blockchain.account_whitelist)
return await self.blockchain.account_whitelist(
account, lists=["black"], account=self
)
async def nolist(self, account): # pragma: no cover
""" Remove an other account from any list of this account
"""
assert callable(self.blockchain.account_whitelist)
return await self.blockchain.account_whitelist(account, lists=[], account=self)
@asyncinit
class AccountUpdate(SyncAccountUpdate):
""" This purpose of this class is to keep track of account updates
as they are pushed through by :class:`notify.Notify`.
Instances of this class are dictionaries and take the following
form:
... code-block: js
{'id': '2.6.29',
'lifetime_fees_paid': '44261516129',
'most_recent_op': '2.9.0',
'owner': '1.2.29',
'pending_fees': 0,
'pending_vested_fees': 16310,
'total_core_in_orders': '6788845277634',
'total_ops': 0}
"""
async def __init__(self, data, *args, **kwargs):
self.define_classes()
assert self.account_class
if isinstance(data, dict):
super(AccountUpdate, self).__init__(data)
else:
account = await self.account_class(
data, blockchain_instance=self.blockchain
)
result = await self.blockchain.rpc.get_objects(
["2.6.%s" % (account["id"].split(".")[2])]
)
update = result[0]
super(AccountUpdate, self).__init__(update)
@property
async def account(self):
""" In oder to obtain the actual
:class:`account.Account` from this class, you can
use the ``account`` attribute.
"""
account = await self.account_class(
self["owner"], blockchain_instance=self.blockchain
)
# account.refresh()
return account
|
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.common.checkout.baselineoptimizer import BaselineOptimizer
from webkitpy.common.checkout.scm.scm_mock import MockSCM
from webkitpy.common.host_mock import MockHost
from webkitpy.common.net.buildbot.buildbot_mock import MockBuilder
from webkitpy.common.net.layouttestresults import LayoutTestResults
from webkitpy.common.system.executive_mock import MockExecutive
from webkitpy.common.system.executive_mock import MockExecutive2
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.tool.commands.rebaseline import *
from webkitpy.tool.mocktool import MockTool, MockOptions
class _BaseTestCase(unittest.TestCase):
MOCK_WEB_RESULT = 'MOCK Web result, convert 404 to None=True'
WEB_PREFIX = 'http://example.com/f/builders/WebKit Mac10.7/results/layout-test-results'
command_constructor = None
def setUp(self):
self.tool = MockTool()
self.command = self.command_constructor() # lint warns that command_constructor might not be set, but this is intentional; pylint: disable=E1102
self.command.bind_to_tool(self.tool)
self.lion_port = self.tool.port_factory.get_from_builder_name("WebKit Mac10.7")
self.lion_expectations_path = self.lion_port.path_to_generic_test_expectations_file()
self.tool.filesystem.write_text_file(self.tool.filesystem.join(self.lion_port.layout_tests_dir(), "VirtualTestSuites"),
'[]')
# FIXME: crbug.com/279494. We should override builders._exact_matches
# here to point to a set of test ports and restore the value in
# tearDown(), and that way the individual tests wouldn't have to worry
# about it.
def _expand(self, path):
if self.tool.filesystem.isabs(path):
return path
return self.tool.filesystem.join(self.lion_port.layout_tests_dir(), path)
def _read(self, path):
return self.tool.filesystem.read_text_file(self._expand(path))
def _write(self, path, contents):
self.tool.filesystem.write_text_file(self._expand(path), contents)
def _zero_out_test_expectations(self):
for port_name in self.tool.port_factory.all_port_names():
port = self.tool.port_factory.get(port_name)
for path in port.expectations_files():
self._write(path, '')
self.tool.filesystem.written_files = {}
def _setup_mock_builder_data(self):
data = LayoutTestResults.results_from_string("""ADD_RESULTS({
"tests": {
"userscripts": {
"first-test.html": {
"expected": "PASS",
"actual": "IMAGE+TEXT"
},
"second-test.html": {
"expected": "FAIL",
"actual": "IMAGE+TEXT"
}
}
}
});""")
# FIXME: crbug.com/279494 - we shouldn't be mixing mock and real builder names.
for builder in ['MOCK builder', 'MOCK builder (Debug)', 'WebKit Mac10.7']:
self.command._builder_data[builder] = data
class TestCopyExistingBaselinesInternal(_BaseTestCase):
command_constructor = CopyExistingBaselinesInternal
def setUp(self):
super(TestCopyExistingBaselinesInternal, self).setUp()
def test_copying_overwritten_baseline(self):
self.tool.executive = MockExecutive2()
# FIXME: crbug.com/279494. it's confusing that this is the test- port, and not the regular lion port. Really all of the tests should be using the test ports.
port = self.tool.port_factory.get('test-mac-snowleopard')
self._write(port._filesystem.join(port.layout_tests_dir(), 'platform/test-mac-snowleopard/failures/expected/image-expected.txt'), 'original snowleopard result')
old_exact_matches = builders._exact_matches
oc = OutputCapture()
try:
builders._exact_matches = {
"MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
"MOCK SnowLeopard": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
}
options = MockOptions(builder="MOCK SnowLeopard", suffixes="txt", verbose=True, test="failures/expected/image.html", results_directory=None)
oc.capture_output()
self.command.execute(options, [], self.tool)
finally:
out, _, _ = oc.restore_output()
builders._exact_matches = old_exact_matches
self.assertMultiLineEqual(self._read(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-mac-leopard/failures/expected/image-expected.txt')), 'original snowleopard result')
self.assertMultiLineEqual(out, '{"add": [], "remove-lines": [], "delete": []}\n')
def test_copying_overwritten_baseline_to_multiple_locations(self):
self.tool.executive = MockExecutive2()
# FIXME: crbug.com/279494. it's confusing that this is the test- port, and not the regular win port. Really all of the tests should be using the test ports.
port = self.tool.port_factory.get('test-win-win7')
self._write(port._filesystem.join(port.layout_tests_dir(), 'platform/test-win-win7/failures/expected/image-expected.txt'), 'original win7 result')
old_exact_matches = builders._exact_matches
oc = OutputCapture()
try:
builders._exact_matches = {
"MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
"MOCK Linux": {"port_name": "test-linux-x86_64", "specifiers": set(["mock-specifier"])},
"MOCK Win7": {"port_name": "test-win-win7", "specifiers": set(["mock-specifier"])},
}
options = MockOptions(builder="MOCK Win7", suffixes="txt", verbose=True, test="failures/expected/image.html", results_directory=None)
oc.capture_output()
self.command.execute(options, [], self.tool)
finally:
out, _, _ = oc.restore_output()
builders._exact_matches = old_exact_matches
self.assertMultiLineEqual(self._read(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-linux-x86_64/failures/expected/image-expected.txt')), 'original win7 result')
self.assertFalse(self.tool.filesystem.exists(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/mac-leopard/userscripts/another-test-expected.txt')))
self.assertMultiLineEqual(out, '{"add": [], "remove-lines": [], "delete": []}\n')
def test_no_copy_existing_baseline(self):
self.tool.executive = MockExecutive2()
# FIXME: it's confusing that this is the test- port, and not the regular win port. Really all of the tests should be using the test ports.
port = self.tool.port_factory.get('test-win-win7')
self._write(port._filesystem.join(port.layout_tests_dir(), 'platform/test-win-win7/failures/expected/image-expected.txt'), 'original win7 result')
old_exact_matches = builders._exact_matches
oc = OutputCapture()
try:
builders._exact_matches = {
"MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
"MOCK Linux": {"port_name": "test-linux-x86_64", "specifiers": set(["mock-specifier"])},
"MOCK Win7": {"port_name": "test-win-win7", "specifiers": set(["mock-specifier"])},
}
options = MockOptions(builder="MOCK Win7", suffixes="txt", verbose=True, test="failures/expected/image.html", results_directory=None)
oc.capture_output()
self.command.execute(options, [], self.tool)
finally:
out, _, _ = oc.restore_output()
builders._exact_matches = old_exact_matches
self.assertMultiLineEqual(self._read(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-linux-x86_64/failures/expected/image-expected.txt')), 'original win7 result')
self.assertMultiLineEqual(self._read(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-win-win7/failures/expected/image-expected.txt')), 'original win7 result')
self.assertFalse(self.tool.filesystem.exists(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/mac-leopard/userscripts/another-test-expected.txt')))
self.assertMultiLineEqual(out, '{"add": [], "remove-lines": [], "delete": []}\n')
def test_no_copy_skipped_test(self):
self.tool.executive = MockExecutive2()
port = self.tool.port_factory.get('test-win-win7')
fs = self.tool.filesystem
self._write(fs.join(port.layout_tests_dir(), 'platform/test-win-win7/failures/expected/image-expected.txt'), 'original win7 result')
expectations_path = fs.join(port.path_to_generic_test_expectations_file())
self._write(expectations_path, (
"[ Win ] failures/expected/image.html [ Failure ]\n"
"[ Linux ] failures/expected/image.html [ Skip ]\n"))
old_exact_matches = builders._exact_matches
oc = OutputCapture()
try:
builders._exact_matches = {
"MOCK Linux": {"port_name": "test-linux-x86_64", "specifiers": set(["mock-specifier"])},
"MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
"MOCK Win7": {"port_name": "test-win-win7", "specifiers": set(["mock-specifier"])},
}
options = MockOptions(builder="MOCK Win7", suffixes="txt", verbose=True, test="failures/expected/image.html", results_directory=None)
oc.capture_output()
self.command.execute(options, [], self.tool)
finally:
out, _, _ = oc.restore_output()
builders._exact_matches = old_exact_matches
self.assertFalse(fs.exists(fs.join(port.layout_tests_dir(), 'platform/test-linux-x86_64/failures/expected/image-expected.txt')))
self.assertEqual(self._read(fs.join(port.layout_tests_dir(), 'platform/test-win-win7/failures/expected/image-expected.txt')),
'original win7 result')
class TestRebaselineTest(_BaseTestCase):
command_constructor = RebaselineTest # AKA webkit-patch rebaseline-test-internal
def setUp(self):
super(TestRebaselineTest, self).setUp()
self.options = MockOptions(builder="WebKit Mac10.7", test="userscripts/another-test.html", suffixes="txt", results_directory=None)
def test_baseline_directory(self):
command = self.command
self.assertMultiLineEqual(command._baseline_directory("WebKit Mac10.7"), "/mock-checkout/third_party/WebKit/LayoutTests/platform/mac-lion")
self.assertMultiLineEqual(command._baseline_directory("WebKit Mac10.6"), "/mock-checkout/third_party/WebKit/LayoutTests/platform/mac-snowleopard")
def test_rebaseline_updates_expectations_file_noop(self):
self._zero_out_test_expectations()
self._write(self.lion_expectations_path, """Bug(B) [ Mac Linux XP Debug ] fast/dom/Window/window-postmessage-clone-really-deep-array.html [ Pass ]
Bug(A) [ Debug ] : fast/css/large-list-of-rules-crash.html [ Failure ]
""")
self._write("fast/dom/Window/window-postmessage-clone-really-deep-array.html", "Dummy test contents")
self._write("fast/css/large-list-of-rules-crash.html", "Dummy test contents")
self._write("userscripts/another-test.html", "Dummy test contents")
self.options.suffixes = "png,wav,txt"
self.command._rebaseline_test_and_update_expectations(self.options)
self.assertItemsEqual(self.tool.web.urls_fetched,
[self.WEB_PREFIX + '/userscripts/another-test-actual.png',
self.WEB_PREFIX + '/userscripts/another-test-actual.wav',
self.WEB_PREFIX + '/userscripts/another-test-actual.txt'])
new_expectations = self._read(self.lion_expectations_path)
self.assertMultiLineEqual(new_expectations, """Bug(B) [ Mac Linux XP Debug ] fast/dom/Window/window-postmessage-clone-really-deep-array.html [ Pass ]
Bug(A) [ Debug ] : fast/css/large-list-of-rules-crash.html [ Failure ]
""")
def test_rebaseline_test(self):
self.command._rebaseline_test("WebKit Linux", "userscripts/another-test.html", "txt", self.WEB_PREFIX)
self.assertItemsEqual(self.tool.web.urls_fetched, [self.WEB_PREFIX + '/userscripts/another-test-actual.txt'])
def test_rebaseline_test_with_results_directory(self):
self._write("userscripts/another-test.html", "test data")
self._write(self.lion_expectations_path, "Bug(x) [ Mac ] userscripts/another-test.html [ ImageOnlyFailure ]\nbug(z) [ Linux ] userscripts/another-test.html [ ImageOnlyFailure ]\n")
self.options.results_directory = '/tmp'
self.command._rebaseline_test_and_update_expectations(self.options)
self.assertItemsEqual(self.tool.web.urls_fetched, ['file:///tmp/userscripts/another-test-actual.txt'])
def test_rebaseline_reftest(self):
self._write("userscripts/another-test.html", "test data")
self._write("userscripts/another-test-expected.html", "generic result")
OutputCapture().assert_outputs(self, self.command._rebaseline_test_and_update_expectations, args=[self.options],
expected_logs="Cannot rebaseline reftest: userscripts/another-test.html\n")
self.assertDictEqual(self.command._scm_changes, {'add': [], 'remove-lines': [], "delete": []})
def test_rebaseline_test_and_print_scm_changes(self):
self.command._print_scm_changes = True
self.command._scm_changes = {'add': [], 'delete': []}
self.tool._scm.exists = lambda x: False
self.command._rebaseline_test("WebKit Linux", "userscripts/another-test.html", "txt", None)
self.assertDictEqual(self.command._scm_changes, {'add': ['/mock-checkout/third_party/WebKit/LayoutTests/platform/linux/userscripts/another-test-expected.txt'], 'delete': []})
def test_rebaseline_test_internal_with_port_that_lacks_buildbot(self):
self.tool.executive = MockExecutive2()
# FIXME: it's confusing that this is the test- port, and not the regular win port. Really all of the tests should be using the test ports.
port = self.tool.port_factory.get('test-win-win7')
self._write(port._filesystem.join(port.layout_tests_dir(), 'platform/test-win-win7/failures/expected/image-expected.txt'), 'original win7 result')
old_exact_matches = builders._exact_matches
oc = OutputCapture()
try:
builders._exact_matches = {
"MOCK XP": {"port_name": "test-win-xp"},
"MOCK Win7": {"port_name": "test-win-win7"},
}
options = MockOptions(optimize=True, builder="MOCK Win7", suffixes="txt",
verbose=True, test="failures/expected/image.html", results_directory=None)
oc.capture_output()
self.command.execute(options, [], self.tool)
finally:
out, _, _ = oc.restore_output()
builders._exact_matches = old_exact_matches
self.assertMultiLineEqual(self._read(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-win-win7/failures/expected/image-expected.txt')), 'MOCK Web result, convert 404 to None=True')
self.assertFalse(self.tool.filesystem.exists(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-win-xp/failures/expected/image-expected.txt')))
self.assertMultiLineEqual(out, '{"add": [], "remove-lines": [{"test": "failures/expected/image.html", "builder": "MOCK Win7"}], "delete": []}\n')
class TestAbstractParallelRebaselineCommand(_BaseTestCase):
command_constructor = AbstractParallelRebaselineCommand
def test_builders_to_fetch_from(self):
old_exact_matches = builders._exact_matches
try:
builders._exact_matches = {
"MOCK XP": {"port_name": "test-win-xp"},
"MOCK Win7": {"port_name": "test-win-win7"},
"MOCK Win7 (dbg)(1)": {"port_name": "test-win-win7"},
"MOCK Win7 (dbg)(2)": {"port_name": "test-win-win7"},
}
builders_to_fetch = self.command._builders_to_fetch_from(["MOCK XP", "MOCK Win7 (dbg)(1)", "MOCK Win7 (dbg)(2)", "MOCK Win7"])
self.assertEqual(builders_to_fetch, ["MOCK XP", "MOCK Win7"])
finally:
builders._exact_matches = old_exact_matches
class TestRebaselineJson(_BaseTestCase):
command_constructor = RebaselineJson
def setUp(self):
super(TestRebaselineJson, self).setUp()
self.tool.executive = MockExecutive2()
self.old_exact_matches = builders._exact_matches
builders._exact_matches = {
"MOCK builder": {"port_name": "test-mac-snowleopard"},
"MOCK builder (Debug)": {"port_name": "test-mac-snowleopard"},
}
def tearDown(self):
builders._exact_matches = self.old_exact_matches
super(TestRebaselineJson, self).tearDown()
def test_rebaseline_test_passes_on_all_builders(self):
self._setup_mock_builder_data()
def builder_data():
self.command._builder_data['MOCK builder'] = LayoutTestResults.results_from_string("""ADD_RESULTS({
"tests": {
"userscripts": {
"first-test.html": {
"expected": "NEEDSREBASELINE",
"actual": "PASS"
}
}
}
});""")
return self.command._builder_data
self.command.builder_data = builder_data
options = MockOptions(optimize=True, verbose=True, results_directory=None)
self._write(self.lion_expectations_path, "Bug(x) userscripts/first-test.html [ ImageOnlyFailure ]\n")
self._write("userscripts/first-test.html", "Dummy test contents")
self.command._rebaseline(options, {"userscripts/first-test.html": {"MOCK builder": ["txt", "png"]}})
# Note that we have one run_in_parallel() call followed by a run_command()
self.assertEqual(self.tool.executive.calls,
[[['python', 'echo', 'optimize-baselines', '--no-modify-scm', '--suffixes', '', 'userscripts/first-test.html', '--verbose']]])
def test_rebaseline_all(self):
self._setup_mock_builder_data()
options = MockOptions(optimize=True, verbose=True, results_directory=None)
self._write("userscripts/first-test.html", "Dummy test contents")
self.command._rebaseline(options, {"userscripts/first-test.html": {"MOCK builder": ["txt", "png"]}})
# Note that we have one run_in_parallel() call followed by a run_command()
self.assertEqual(self.tool.executive.calls,
[[['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/first-test.html', '--verbose']],
[['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/first-test.html', '--verbose']],
[['python', 'echo', 'optimize-baselines', '--no-modify-scm', '--suffixes', 'txt,png', 'userscripts/first-test.html', '--verbose']]])
def test_rebaseline_debug(self):
self._setup_mock_builder_data()
options = MockOptions(optimize=True, verbose=True, results_directory=None)
self._write("userscripts/first-test.html", "Dummy test contents")
self.command._rebaseline(options, {"userscripts/first-test.html": {"MOCK builder (Debug)": ["txt", "png"]}})
# Note that we have one run_in_parallel() call followed by a run_command()
self.assertEqual(self.tool.executive.calls,
[[['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder (Debug)', '--test', 'userscripts/first-test.html', '--verbose']],
[['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder (Debug)', '--test', 'userscripts/first-test.html', '--verbose']],
[['python', 'echo', 'optimize-baselines', '--no-modify-scm', '--suffixes', 'txt,png', 'userscripts/first-test.html', '--verbose']]])
def test_no_optimize(self):
self._setup_mock_builder_data()
options = MockOptions(optimize=False, verbose=True, results_directory=None)
self._write("userscripts/first-test.html", "Dummy test contents")
self.command._rebaseline(options, {"userscripts/first-test.html": {"MOCK builder (Debug)": ["txt", "png"]}})
# Note that we have only one run_in_parallel() call
self.assertEqual(self.tool.executive.calls,
[[['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder (Debug)', '--test', 'userscripts/first-test.html', '--verbose']],
[['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder (Debug)', '--test', 'userscripts/first-test.html', '--verbose']]])
def test_results_directory(self):
self._setup_mock_builder_data()
options = MockOptions(optimize=False, verbose=True, results_directory='/tmp')
self._write("userscripts/first-test.html", "Dummy test contents")
self.command._rebaseline(options, {"userscripts/first-test.html": {"MOCK builder": ["txt", "png"]}})
# Note that we have only one run_in_parallel() call
self.assertEqual(self.tool.executive.calls,
[[['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/first-test.html', '--results-directory', '/tmp', '--verbose']],
[['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/first-test.html', '--results-directory', '/tmp', '--verbose']]])
class TestRebaselineJsonUpdatesExpectationsFiles(_BaseTestCase):
command_constructor = RebaselineJson
def setUp(self):
super(TestRebaselineJsonUpdatesExpectationsFiles, self).setUp()
self.tool.executive = MockExecutive2()
def mock_run_command(args,
cwd=None,
input=None,
error_handler=None,
return_exit_code=False,
return_stderr=True,
decode_output=False,
env=None):
return '{"add": [], "remove-lines": [{"test": "userscripts/first-test.html", "builder": "WebKit Mac10.7"}]}\n'
self.tool.executive.run_command = mock_run_command
def test_rebaseline_updates_expectations_file(self):
options = MockOptions(optimize=False, verbose=True, results_directory=None)
self._write(self.lion_expectations_path, "Bug(x) [ Mac ] userscripts/first-test.html [ ImageOnlyFailure ]\nbug(z) [ Linux ] userscripts/first-test.html [ ImageOnlyFailure ]\n")
self._write("userscripts/first-test.html", "Dummy test contents")
self._setup_mock_builder_data()
self.command._rebaseline(options, {"userscripts/first-test.html": {"WebKit Mac10.7": ["txt", "png"]}})
new_expectations = self._read(self.lion_expectations_path)
self.assertMultiLineEqual(new_expectations, "Bug(x) [ Mavericks MountainLion Retina SnowLeopard ] userscripts/first-test.html [ ImageOnlyFailure ]\nbug(z) [ Linux ] userscripts/first-test.html [ ImageOnlyFailure ]\n")
def test_rebaseline_updates_expectations_file_all_platforms(self):
options = MockOptions(optimize=False, verbose=True, results_directory=None)
self._write(self.lion_expectations_path, "Bug(x) userscripts/first-test.html [ ImageOnlyFailure ]\n")
self._write("userscripts/first-test.html", "Dummy test contents")
self._setup_mock_builder_data()
self.command._rebaseline(options, {"userscripts/first-test.html": {"WebKit Mac10.7": ["txt", "png"]}})
new_expectations = self._read(self.lion_expectations_path)
self.assertMultiLineEqual(new_expectations, "Bug(x) [ Android Linux Mavericks MountainLion Retina SnowLeopard Win ] userscripts/first-test.html [ ImageOnlyFailure ]\n")
def test_rebaseline_handles_platform_skips(self):
# This test is just like test_rebaseline_updates_expectations_file_all_platforms(),
# except that if a particular port happens to SKIP a test in an overrides file,
# we count that as passing, and do not think that we still need to rebaseline it.
options = MockOptions(optimize=False, verbose=True, results_directory=None)
self._write(self.lion_expectations_path, "Bug(x) userscripts/first-test.html [ ImageOnlyFailure ]\n")
self._write("NeverFixTests", "Bug(y) [ Android ] userscripts [ Skip ]\n")
self._write("userscripts/first-test.html", "Dummy test contents")
self._setup_mock_builder_data()
self.command._rebaseline(options, {"userscripts/first-test.html": {"WebKit Mac10.7": ["txt", "png"]}})
new_expectations = self._read(self.lion_expectations_path)
self.assertMultiLineEqual(new_expectations, "Bug(x) [ Linux Mavericks MountainLion Retina SnowLeopard Win ] userscripts/first-test.html [ ImageOnlyFailure ]\n")
def test_rebaseline_handles_skips_in_file(self):
# This test is like test_Rebaseline_handles_platform_skips, except that the
# Skip is in the same (generic) file rather than a platform file. In this case,
# the Skip line should be left unmodified. Note that the first line is now
# qualified as "[Linux Mac Win]"; if it was unqualified, it would conflict with
# the second line.
options = MockOptions(optimize=False, verbose=True, results_directory=None)
self._write(self.lion_expectations_path,
("Bug(x) [ Linux Mac Win ] userscripts/first-test.html [ ImageOnlyFailure ]\n"
"Bug(y) [ Android ] userscripts/first-test.html [ Skip ]\n"))
self._write("userscripts/first-test.html", "Dummy test contents")
self._setup_mock_builder_data()
self.command._rebaseline(options, {"userscripts/first-test.html": {"WebKit Mac10.7": ["txt", "png"]}})
new_expectations = self._read(self.lion_expectations_path)
self.assertMultiLineEqual(new_expectations,
("Bug(x) [ Linux Mavericks MountainLion Retina SnowLeopard Win ] userscripts/first-test.html [ ImageOnlyFailure ]\n"
"Bug(y) [ Android ] userscripts/first-test.html [ Skip ]\n"))
def test_rebaseline_handles_smoke_tests(self):
# This test is just like test_rebaseline_handles_platform_skips, except that we check for
# a test not being in the SmokeTests file, instead of using overrides files.
# If a test is not part of the smoke tests, we count that as passing on ports that only
# run smoke tests, and do not think that we still need to rebaseline it.
options = MockOptions(optimize=False, verbose=True, results_directory=None)
self._write(self.lion_expectations_path, "Bug(x) userscripts/first-test.html [ ImageOnlyFailure ]\n")
self._write("SmokeTests", "fast/html/article-element.html")
self._write("userscripts/first-test.html", "Dummy test contents")
self._setup_mock_builder_data()
self.command._rebaseline(options, {"userscripts/first-test.html": {"WebKit Mac10.7": ["txt", "png"]}})
new_expectations = self._read(self.lion_expectations_path)
self.assertMultiLineEqual(new_expectations, "Bug(x) [ Linux Mavericks MountainLion Retina SnowLeopard Win ] userscripts/first-test.html [ ImageOnlyFailure ]\n")
class TestRebaseline(_BaseTestCase):
# This command shares most of its logic with RebaselineJson, so these tests just test what is different.
command_constructor = Rebaseline # AKA webkit-patch rebaseline
def test_rebaseline(self):
self.command._builders_to_pull_from = lambda: [MockBuilder('MOCK builder')]
self._write("userscripts/first-test.html", "test data")
self._zero_out_test_expectations()
self._setup_mock_builder_data()
old_exact_matches = builders._exact_matches
try:
builders._exact_matches = {
"MOCK builder": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
}
self.command.execute(MockOptions(results_directory=False, optimize=False, builders=None, suffixes="txt,png", verbose=True), ['userscripts/first-test.html'], self.tool)
finally:
builders._exact_matches = old_exact_matches
calls = filter(lambda x: x != ['qmake', '-v'] and x[0] != 'perl', self.tool.executive.calls)
self.assertEqual(calls,
[[['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/first-test.html', '--verbose']],
[['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/first-test.html', '--verbose']]])
def test_rebaseline_directory(self):
self.command._builders_to_pull_from = lambda: [MockBuilder('MOCK builder')]
self._write("userscripts/first-test.html", "test data")
self._write("userscripts/second-test.html", "test data")
self._setup_mock_builder_data()
old_exact_matches = builders._exact_matches
try:
builders._exact_matches = {
"MOCK builder": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
}
self.command.execute(MockOptions(results_directory=False, optimize=False, builders=None, suffixes="txt,png", verbose=True), ['userscripts'], self.tool)
finally:
builders._exact_matches = old_exact_matches
calls = filter(lambda x: x != ['qmake', '-v'] and x[0] != 'perl', self.tool.executive.calls)
self.assertEqual(calls,
[[['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/first-test.html', '--verbose'],
['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/second-test.html', '--verbose']],
[['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/first-test.html', '--verbose'],
['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/second-test.html', '--verbose']]])
class MockLineRemovingExecutive(MockExecutive):
def run_in_parallel(self, commands):
assert len(commands)
num_previous_calls = len(self.calls)
command_outputs = []
for cmd_line, cwd in commands:
out = self.run_command(cmd_line, cwd=cwd)
if 'rebaseline-test-internal' in cmd_line:
out = '{"add": [], "remove-lines": [{"test": "%s", "builder": "%s"}], "delete": []}\n' % (cmd_line[8], cmd_line[6])
command_outputs.append([0, out, ''])
new_calls = self.calls[num_previous_calls:]
self.calls = self.calls[:num_previous_calls]
self.calls.append(new_calls)
return command_outputs
class TestRebaselineExpectations(_BaseTestCase):
command_constructor = RebaselineExpectations
def setUp(self):
super(TestRebaselineExpectations, self).setUp()
self.options = MockOptions(optimize=False, builders=None, suffixes=['txt'], verbose=False, platform=None, results_directory=None)
def _write_test_file(self, port, path, contents):
abs_path = self.tool.filesystem.join(port.layout_tests_dir(), path)
self.tool.filesystem.write_text_file(abs_path, contents)
def _setup_test_port(self):
test_port = self.tool.port_factory.get('test')
original_get = self.tool.port_factory.get
def get_test_port(port_name=None, options=None, **kwargs):
if not port_name:
return test_port
return original_get(port_name, options, **kwargs)
# Need to make sure all the ports grabbed use the test checkout path instead of the mock checkout path.
# FIXME: crbug.com/279494 - we shouldn't be doing this.
self.tool.port_factory.get = get_test_port
return test_port
def test_rebaseline_expectations(self):
self._zero_out_test_expectations()
self.tool.executive = MockExecutive2()
def builder_data():
self.command._builder_data['MOCK SnowLeopard'] = self.command._builder_data['MOCK Leopard'] = LayoutTestResults.results_from_string("""ADD_RESULTS({
"tests": {
"userscripts": {
"another-test.html": {
"expected": "PASS",
"actual": "PASS TEXT"
},
"images.svg": {
"expected": "FAIL",
"actual": "IMAGE+TEXT"
}
}
}
});""")
return self.command._builder_data
self.command.builder_data = builder_data
self._write("userscripts/another-test.html", "Dummy test contents")
self._write("userscripts/images.svg", "Dummy test contents")
self.command._tests_to_rebaseline = lambda port: {
'userscripts/another-test.html': set(['txt']),
'userscripts/images.svg': set(['png']),
'userscripts/not-actually-failing.html': set(['txt', 'png', 'wav']),
}
old_exact_matches = builders._exact_matches
try:
builders._exact_matches = {
"MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
"MOCK SnowLeopard": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
}
self.command.execute(self.options, [], self.tool)
finally:
builders._exact_matches = old_exact_matches
# FIXME: change this to use the test- ports.
calls = filter(lambda x: x != ['qmake', '-v'], self.tool.executive.calls)
self.assertEqual(self.tool.executive.calls, [
[
['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt', '--builder', 'MOCK Leopard', '--test', 'userscripts/another-test.html'],
['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt', '--builder', 'MOCK SnowLeopard', '--test', 'userscripts/another-test.html'],
['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'png', '--builder', 'MOCK Leopard', '--test', 'userscripts/images.svg'],
['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'png', '--builder', 'MOCK SnowLeopard', '--test', 'userscripts/images.svg'],
],
[
['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'MOCK Leopard', '--test', 'userscripts/another-test.html'],
['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'MOCK SnowLeopard', '--test', 'userscripts/another-test.html'],
['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'png', '--builder', 'MOCK Leopard', '--test', 'userscripts/images.svg'],
['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'png', '--builder', 'MOCK SnowLeopard', '--test', 'userscripts/images.svg'],
],
])
def test_rebaseline_expectations_noop(self):
self._zero_out_test_expectations()
oc = OutputCapture()
try:
oc.capture_output()
self.command.execute(self.options, [], self.tool)
finally:
_, _, logs = oc.restore_output()
self.assertEqual(self.tool.filesystem.written_files, {})
self.assertEqual(logs, 'Did not find any tests marked Rebaseline.\n')
def disabled_test_overrides_are_included_correctly(self):
# This tests that the any tests marked as REBASELINE in the overrides are found, but
# that the overrides do not get written into the main file.
self._zero_out_test_expectations()
self._write(self.lion_expectations_path, '')
self.lion_port.expectations_dict = lambda: {
self.lion_expectations_path: '',
'overrides': ('Bug(x) userscripts/another-test.html [ Failure Rebaseline ]\n'
'Bug(y) userscripts/test.html [ Crash ]\n')}
self._write('/userscripts/another-test.html', '')
self.assertDictEqual(self.command._tests_to_rebaseline(self.lion_port), {'userscripts/another-test.html': set(['png', 'txt', 'wav'])})
self.assertEqual(self._read(self.lion_expectations_path), '')
def test_rebaseline_without_other_expectations(self):
self._write("userscripts/another-test.html", "Dummy test contents")
self._write(self.lion_expectations_path, "Bug(x) userscripts/another-test.html [ Rebaseline ]\n")
self.assertDictEqual(self.command._tests_to_rebaseline(self.lion_port), {'userscripts/another-test.html': ('png', 'wav', 'txt')})
def test_rebaseline_test_passes_everywhere(self):
test_port = self._setup_test_port()
old_builder_data = self.command.builder_data
def builder_data():
self.command._builder_data['MOCK Leopard'] = self.command._builder_data['MOCK SnowLeopard'] = LayoutTestResults.results_from_string("""ADD_RESULTS({
"tests": {
"fast": {
"dom": {
"prototype-taco.html": {
"expected": "FAIL",
"actual": "PASS",
"is_unexpected": true
}
}
}
}
});""")
return self.command._builder_data
self.command.builder_data = builder_data
self.tool.filesystem.write_text_file(test_port.path_to_generic_test_expectations_file(), """
Bug(foo) fast/dom/prototype-taco.html [ Rebaseline ]
""")
self._write_test_file(test_port, 'fast/dom/prototype-taco.html', "Dummy test contents")
self.tool.executive = MockLineRemovingExecutive()
old_exact_matches = builders._exact_matches
try:
builders._exact_matches = {
"MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
"MOCK SnowLeopard": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
}
self.command.execute(self.options, [], self.tool)
self.assertEqual(self.tool.executive.calls, [])
# The mac ports should both be removed since they're the only ones in builders._exact_matches.
self.assertEqual(self.tool.filesystem.read_text_file(test_port.path_to_generic_test_expectations_file()), """
Bug(foo) [ Linux Win ] fast/dom/prototype-taco.html [ Rebaseline ]
""")
finally:
builders._exact_matches = old_exact_matches
class _FakeOptimizer(BaselineOptimizer):
def read_results_by_directory(self, baseline_name):
if baseline_name.endswith('txt'):
return {'LayoutTests/passes/text.html': '123456'}
return {}
class TestOptimizeBaselines(_BaseTestCase):
command_constructor = OptimizeBaselines
def _write_test_file(self, port, path, contents):
abs_path = self.tool.filesystem.join(port.layout_tests_dir(), path)
self.tool.filesystem.write_text_file(abs_path, contents)
def setUp(self):
super(TestOptimizeBaselines, self).setUp()
# FIXME: This is a hack to get the unittest and the BaselineOptimize to both use /mock-checkout
# instead of one using /mock-checkout and one using /test-checkout.
default_port = self.tool.port_factory.get()
self.tool.port_factory.get = lambda port_name=None: default_port
def test_modify_scm(self):
test_port = self.tool.port_factory.get('test')
self._write_test_file(test_port, 'another/test.html', "Dummy test contents")
self._write_test_file(test_port, 'platform/mac-snowleopard/another/test-expected.txt', "result A")
self._write_test_file(test_port, 'another/test-expected.txt', "result A")
old_exact_matches = builders._exact_matches
try:
builders._exact_matches = {
"MOCK Leopard Debug": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
}
OutputCapture().assert_outputs(self, self.command.execute, args=[
MockOptions(suffixes='txt', no_modify_scm=False, platform='test-mac-snowleopard'),
['another/test.html'],
self.tool,
], expected_stdout='{"add": [], "remove-lines": [], "delete": []}\n')
finally:
builders._exact_matches = old_exact_matches
self.assertFalse(self.tool.filesystem.exists(self.tool.filesystem.join(test_port.layout_tests_dir(), 'platform/mac/another/test-expected.txt')))
self.assertTrue(self.tool.filesystem.exists(self.tool.filesystem.join(test_port.layout_tests_dir(), 'another/test-expected.txt')))
def test_no_modify_scm(self):
test_port = self.tool.port_factory.get('test')
self._write_test_file(test_port, 'another/test.html', "Dummy test contents")
self._write_test_file(test_port, 'platform/mac-snowleopard/another/test-expected.txt', "result A")
self._write_test_file(test_port, 'another/test-expected.txt', "result A")
old_exact_matches = builders._exact_matches
try:
builders._exact_matches = {
"MOCK Leopard Debug": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
}
OutputCapture().assert_outputs(self, self.command.execute, args=[
MockOptions(suffixes='txt', no_modify_scm=True, platform='test-mac-snowleopard'),
['another/test.html'],
self.tool,
], expected_stdout='{"add": [], "remove-lines": [], "delete": ["/mock-checkout/third_party/WebKit/LayoutTests/platform/mac-snowleopard/another/test-expected.txt"]}\n')
finally:
builders._exact_matches = old_exact_matches
self.assertFalse(self.tool.filesystem.exists(self.tool.filesystem.join(test_port.layout_tests_dir(), 'platform/mac/another/test-expected.txt')))
self.assertTrue(self.tool.filesystem.exists(self.tool.filesystem.join(test_port.layout_tests_dir(), 'another/test-expected.txt')))
def test_optimize_all_suffixes_by_default(self):
test_port = self.tool.port_factory.get('test')
self._write_test_file(test_port, 'another/test.html', "Dummy test contents")
self._write_test_file(test_port, 'platform/mac-snowleopard/another/test-expected.txt', "result A")
self._write_test_file(test_port, 'platform/mac-snowleopard/another/test-expected.png', "result A png")
self._write_test_file(test_port, 'another/test-expected.txt', "result A")
self._write_test_file(test_port, 'another/test-expected.png', "result A png")
old_exact_matches = builders._exact_matches
try:
builders._exact_matches = {
"MOCK Leopard Debug": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
}
oc = OutputCapture()
oc.capture_output()
self.command.execute(MockOptions(suffixes='txt,wav,png', no_modify_scm=True, platform='test-mac-snowleopard'),
['another/test.html'],
self.tool)
finally:
out, err, logs = oc.restore_output()
builders._exact_matches = old_exact_matches
self.assertEquals(out, '{"add": [], "remove-lines": [], "delete": ["/mock-checkout/third_party/WebKit/LayoutTests/platform/mac-snowleopard/another/test-expected.txt", "/mock-checkout/third_party/WebKit/LayoutTests/platform/mac-snowleopard/another/test-expected.png"]}\n')
self.assertFalse(self.tool.filesystem.exists(self.tool.filesystem.join(test_port.layout_tests_dir(), 'platform/mac/another/test-expected.txt')))
self.assertFalse(self.tool.filesystem.exists(self.tool.filesystem.join(test_port.layout_tests_dir(), 'platform/mac/another/test-expected.png')))
self.assertTrue(self.tool.filesystem.exists(self.tool.filesystem.join(test_port.layout_tests_dir(), 'another/test-expected.txt')))
self.assertTrue(self.tool.filesystem.exists(self.tool.filesystem.join(test_port.layout_tests_dir(), 'another/test-expected.png')))
class TestAnalyzeBaselines(_BaseTestCase):
command_constructor = AnalyzeBaselines
def setUp(self):
super(TestAnalyzeBaselines, self).setUp()
self.port = self.tool.port_factory.get('test')
self.tool.port_factory.get = (lambda port_name=None, options=None: self.port)
self.lines = []
self.command._optimizer_class = _FakeOptimizer
self.command._write = (lambda msg: self.lines.append(msg)) # pylint bug warning about unnecessary lambda? pylint: disable=W0108
def test_default(self):
self.command.execute(MockOptions(suffixes='txt', missing=False, platform=None), ['passes/text.html'], self.tool)
self.assertEqual(self.lines,
['passes/text-expected.txt:',
' (generic): 123456'])
def test_missing_baselines(self):
self.command.execute(MockOptions(suffixes='png,txt', missing=True, platform=None), ['passes/text.html'], self.tool)
self.assertEqual(self.lines,
['passes/text-expected.png: (no baselines found)',
'passes/text-expected.txt:',
' (generic): 123456'])
class TestAutoRebaseline(_BaseTestCase):
command_constructor = AutoRebaseline
def _write_test_file(self, port, path, contents):
abs_path = self.tool.filesystem.join(port.layout_tests_dir(), path)
self.tool.filesystem.write_text_file(abs_path, contents)
def _setup_test_port(self):
test_port = self.tool.port_factory.get('test')
original_get = self.tool.port_factory.get
def get_test_port(port_name=None, options=None, **kwargs):
if not port_name:
return test_port
return original_get(port_name, options, **kwargs)
# Need to make sure all the ports grabbed use the test checkout path instead of the mock checkout path.
# FIXME: crbug.com/279494 - we shouldn't be doing this.
self.tool.port_factory.get = get_test_port
return test_port
def setUp(self):
super(TestAutoRebaseline, self).setUp()
self.command.latest_revision_processed_on_all_bots = lambda: 9000
self.command.bot_revision_data = lambda: [{"builder": "Mock builder", "revision": "9000"}]
def test_release_builders(self):
old_exact_matches = builders._exact_matches
try:
builders._exact_matches = {
"MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
"MOCK Leopard Debug": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
"MOCK Leopard ASAN": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
}
self.assertEqual(self.command._release_builders(), ['MOCK Leopard'])
finally:
builders._exact_matches = old_exact_matches
def test_tests_to_rebaseline(self):
def blame(path):
return """
624c3081c0 path/to/TestExpectations (foobarbaz1@chromium.org 2013-06-14 20:18:46 +0000 11) crbug.com/24182 [ Debug ] path/to/norebaseline.html [ ImageOnlyFailure ]
624c3081c0 path/to/TestExpectations (foobarbaz1@chromium.org 2013-04-28 04:52:41 +0000 13) Bug(foo) path/to/rebaseline-without-bug-number.html [ NeedsRebaseline ]
624c3081c0 path/to/TestExpectations (foobarbaz1@chromium.org 2013-06-14 20:18:46 +0000 11) crbug.com/24182 [ Debug ] path/to/rebaseline-with-modifiers.html [ NeedsRebaseline ]
624c3081c0 path/to/TestExpectations (foobarbaz1@chromium.org 2013-04-28 04:52:41 +0000 12) crbug.com/24182 crbug.com/234 path/to/rebaseline-without-modifiers.html [ NeedsRebaseline ]
6469e754a1 path/to/TestExpectations (foobarbaz1@chromium.org 2013-04-28 04:52:41 +0000 12) crbug.com/24182 path/to/rebaseline-new-revision.html [ NeedsRebaseline ]
624caaaaaa path/to/TestExpectations (foo@chromium.org 2013-04-28 04:52:41 +0000 12) crbug.com/24182 path/to/not-cycled-through-bots.html [ NeedsRebaseline ]
0000000000 path/to/TestExpectations (foo@chromium.org 2013-04-28 04:52:41 +0000 12) crbug.com/24182 path/to/locally-changed-lined.html [ NeedsRebaseline ]
"""
self.tool.scm().blame = blame
min_revision = 9000
self.assertEqual(self.command.tests_to_rebaseline(self.tool, min_revision, print_revisions=False), (
set(['path/to/rebaseline-without-bug-number.html', 'path/to/rebaseline-with-modifiers.html', 'path/to/rebaseline-without-modifiers.html']),
5678,
'foobarbaz1@chromium.org',
set(['24182', '234']),
True))
def test_tests_to_rebaseline_over_limit(self):
def blame(path):
result = ""
for i in range(0, self.command.MAX_LINES_TO_REBASELINE + 1):
result += "624c3081c0 path/to/TestExpectations (foobarbaz1@chromium.org 2013-04-28 04:52:41 +0000 13) crbug.com/24182 path/to/rebaseline-%s.html [ NeedsRebaseline ]\n" % i
return result
self.tool.scm().blame = blame
expected_list_of_tests = []
for i in range(0, self.command.MAX_LINES_TO_REBASELINE):
expected_list_of_tests.append("path/to/rebaseline-%s.html" % i)
min_revision = 9000
self.assertEqual(self.command.tests_to_rebaseline(self.tool, min_revision, print_revisions=False), (
set(expected_list_of_tests),
5678,
'foobarbaz1@chromium.org',
set(['24182']),
True))
def test_commit_message(self):
author = "foo@chromium.org"
revision = 1234
bugs = set()
self.assertEqual(self.command.commit_message(author, revision, bugs),
"""Auto-rebaseline for r1234
http://src.chromium.org/viewvc/blink?view=revision&revision=1234
TBR=foo@chromium.org
""")
bugs = set(["234", "345"])
self.assertEqual(self.command.commit_message(author, revision, bugs),
"""Auto-rebaseline for r1234
http://src.chromium.org/viewvc/blink?view=revision&revision=1234
BUG=234,345
TBR=foo@chromium.org
""")
def test_no_needs_rebaseline_lines(self):
def blame(path):
return """
6469e754a1 path/to/TestExpectations (foobarbaz1@chromium.org 2013-06-14 20:18:46 +0000 11) crbug.com/24182 [ Debug ] path/to/norebaseline.html [ ImageOnlyFailure ]
"""
self.tool.scm().blame = blame
self.command.execute(MockOptions(optimize=True, verbose=False, move_overwritten_baselines=False, results_directory=False), [], self.tool)
self.assertEqual(self.tool.executive.calls, [])
def test_execute(self):
def blame(path):
return """
6469e754a1 path/to/TestExpectations (foobarbaz1@chromium.org 2013-06-14 20:18:46 +0000 11) # Test NeedsRebaseline being in a comment doesn't bork parsing.
6469e754a1 path/to/TestExpectations (foobarbaz1@chromium.org 2013-06-14 20:18:46 +0000 11) crbug.com/24182 [ Debug ] path/to/norebaseline.html [ ImageOnlyFailure ]
6469e754a1 path/to/TestExpectations (foobarbaz1@chromium.org 2013-04-28 04:52:41 +0000 13) Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
6469e754a1 path/to/TestExpectations (foobarbaz1@chromium.org 2013-06-14 20:18:46 +0000 11) crbug.com/24182 [ SnowLeopard ] fast/dom/prototype-strawberry.html [ NeedsRebaseline ]
6469e754a1 path/to/TestExpectations (foobarbaz1@chromium.org 2013-04-28 04:52:41 +0000 12) crbug.com/24182 fast/dom/prototype-chocolate.html [ NeedsRebaseline ]
624caaaaaa path/to/TestExpectations (foo@chromium.org 2013-04-28 04:52:41 +0000 12) crbug.com/24182 path/to/not-cycled-through-bots.html [ NeedsRebaseline ]
0000000000 path/to/TestExpectations (foo@chromium.org 2013-04-28 04:52:41 +0000 12) crbug.com/24182 path/to/locally-changed-lined.html [ NeedsRebaseline ]
"""
self.tool.scm().blame = blame
test_port = self._setup_test_port()
old_builder_data = self.command.builder_data
def builder_data():
old_builder_data()
# have prototype-chocolate only fail on "MOCK Leopard".
self.command._builder_data['MOCK SnowLeopard'] = LayoutTestResults.results_from_string("""ADD_RESULTS({
"tests": {
"fast": {
"dom": {
"prototype-taco.html": {
"expected": "PASS",
"actual": "PASS TEXT",
"is_unexpected": true
},
"prototype-chocolate.html": {
"expected": "FAIL",
"actual": "PASS"
},
"prototype-strawberry.html": {
"expected": "PASS",
"actual": "IMAGE PASS",
"is_unexpected": true
}
}
}
}
});""")
return self.command._builder_data
self.command.builder_data = builder_data
self.tool.filesystem.write_text_file(test_port.path_to_generic_test_expectations_file(), """
crbug.com/24182 [ Debug ] path/to/norebaseline.html [ Rebaseline ]
Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
crbug.com/24182 [ SnowLeopard ] fast/dom/prototype-strawberry.html [ NeedsRebaseline ]
crbug.com/24182 fast/dom/prototype-chocolate.html [ NeedsRebaseline ]
crbug.com/24182 path/to/not-cycled-through-bots.html [ NeedsRebaseline ]
crbug.com/24182 path/to/locally-changed-lined.html [ NeedsRebaseline ]
""")
self._write_test_file(test_port, 'fast/dom/prototype-taco.html', "Dummy test contents")
self._write_test_file(test_port, 'fast/dom/prototype-strawberry.html', "Dummy test contents")
self._write_test_file(test_port, 'fast/dom/prototype-chocolate.html', "Dummy test contents")
self.tool.executive = MockLineRemovingExecutive()
old_exact_matches = builders._exact_matches
try:
builders._exact_matches = {
"MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
"MOCK SnowLeopard": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
}
self.command.tree_status = lambda: 'closed'
self.command.execute(MockOptions(optimize=True, verbose=False, move_overwritten_baselines=False, results_directory=False), [], self.tool)
self.assertEqual(self.tool.executive.calls, [])
self.command.tree_status = lambda: 'open'
self.tool.executive.calls = []
self.command.execute(MockOptions(optimize=True, verbose=False, move_overwritten_baselines=False, results_directory=False), [], self.tool)
self.assertEqual(self.tool.executive.calls, [
[
['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK Leopard', '--test', 'fast/dom/prototype-chocolate.html'],
['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'png', '--builder', 'MOCK SnowLeopard', '--test', 'fast/dom/prototype-strawberry.html'],
['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt', '--builder', 'MOCK Leopard', '--test', 'fast/dom/prototype-taco.html'],
['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt', '--builder', 'MOCK SnowLeopard', '--test', 'fast/dom/prototype-taco.html'],
],
[
['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK Leopard', '--test', 'fast/dom/prototype-chocolate.html'],
['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'png', '--builder', 'MOCK SnowLeopard', '--test', 'fast/dom/prototype-strawberry.html'],
['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'MOCK Leopard', '--test', 'fast/dom/prototype-taco.html'],
['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'MOCK SnowLeopard', '--test', 'fast/dom/prototype-taco.html'],
],
[
['python', 'echo', 'optimize-baselines', '--no-modify-scm', '--suffixes', 'txt,png', 'fast/dom/prototype-chocolate.html'],
['python', 'echo', 'optimize-baselines', '--no-modify-scm', '--suffixes', 'png', 'fast/dom/prototype-strawberry.html'],
['python', 'echo', 'optimize-baselines', '--no-modify-scm', '--suffixes', 'txt', 'fast/dom/prototype-taco.html'],
],
['git', 'cl', 'upload', '-f'],
['git', 'pull'],
['git', 'cl', 'dcommit', '-f'],
['git', 'cl', 'set_close'],
])
# The mac ports should both be removed since they're the only ones in builders._exact_matches.
self.assertEqual(self.tool.filesystem.read_text_file(test_port.path_to_generic_test_expectations_file()), """
crbug.com/24182 [ Debug ] path/to/norebaseline.html [ Rebaseline ]
Bug(foo) [ Linux Win ] fast/dom/prototype-taco.html [ NeedsRebaseline ]
crbug.com/24182 [ Linux Win ] fast/dom/prototype-chocolate.html [ NeedsRebaseline ]
crbug.com/24182 path/to/not-cycled-through-bots.html [ NeedsRebaseline ]
crbug.com/24182 path/to/locally-changed-lined.html [ NeedsRebaseline ]
""")
finally:
builders._exact_matches = old_exact_matches
def test_execute_git_cl_hangs(self):
def blame(path):
return """
6469e754a1 path/to/TestExpectations (foobarbaz1@chromium.org 2013-04-28 04:52:41 +0000 13) Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
"""
self.tool.scm().blame = blame
test_port = self._setup_test_port()
old_builder_data = self.command.builder_data
def builder_data():
old_builder_data()
# have prototype-chocolate only fail on "MOCK Leopard".
self.command._builder_data['MOCK SnowLeopard'] = LayoutTestResults.results_from_string("""ADD_RESULTS({
"tests": {
"fast": {
"dom": {
"prototype-taco.html": {
"expected": "PASS",
"actual": "PASS TEXT",
"is_unexpected": true
}
}
}
}
});""")
return self.command._builder_data
self.command.builder_data = builder_data
self.tool.filesystem.write_text_file(test_port.path_to_generic_test_expectations_file(), """
Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
""")
self._write_test_file(test_port, 'fast/dom/prototype-taco.html', "Dummy test contents")
old_exact_matches = builders._exact_matches
try:
builders._exact_matches = {
"MOCK SnowLeopard": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
}
self.command.SECONDS_BEFORE_GIVING_UP = 0
self.command.tree_status = lambda: 'open'
self.tool.executive.calls = []
self.command.execute(MockOptions(optimize=True, verbose=False, move_overwritten_baselines=False, results_directory=False), [], self.tool)
self.assertEqual(self.tool.executive.calls, [
[
['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt', '--builder', 'MOCK SnowLeopard', '--test', 'fast/dom/prototype-taco.html'],
],
[
['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'MOCK SnowLeopard', '--test', 'fast/dom/prototype-taco.html'],
],
[['python', 'echo', 'optimize-baselines', '--no-modify-scm', '--suffixes', 'txt', 'fast/dom/prototype-taco.html']],
['git', 'cl', 'upload', '-f'],
])
finally:
builders._exact_matches = old_exact_matches
def test_execute_test_passes_everywhere(self):
def blame(path):
return """
6469e754a1 path/to/TestExpectations (foobarbaz1@chromium.org 2013-04-28 04:52:41 +0000 13) Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
"""
self.tool.scm().blame = blame
test_port = self._setup_test_port()
old_builder_data = self.command.builder_data
def builder_data():
self.command._builder_data['MOCK Leopard'] = self.command._builder_data['MOCK SnowLeopard'] = LayoutTestResults.results_from_string("""ADD_RESULTS({
"tests": {
"fast": {
"dom": {
"prototype-taco.html": {
"expected": "FAIL",
"actual": "PASS",
"is_unexpected": true
}
}
}
}
});""")
return self.command._builder_data
self.command.builder_data = builder_data
self.tool.filesystem.write_text_file(test_port.path_to_generic_test_expectations_file(), """
Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
""")
self._write_test_file(test_port, 'fast/dom/prototype-taco.html', "Dummy test contents")
self.tool.executive = MockLineRemovingExecutive()
old_exact_matches = builders._exact_matches
try:
builders._exact_matches = {
"MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
"MOCK SnowLeopard": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
}
self.command.tree_status = lambda: 'open'
self.command.execute(MockOptions(optimize=True, verbose=False, move_overwritten_baselines=False, results_directory=False), [], self.tool)
self.assertEqual(self.tool.executive.calls, [
[['python', 'echo', 'optimize-baselines', '--no-modify-scm', '--suffixes', '', 'fast/dom/prototype-taco.html']],
['git', 'cl', 'upload', '-f'],
['git', 'pull'],
['git', 'cl', 'dcommit', '-f'],
['git', 'cl', 'set_close'],
])
# The mac ports should both be removed since they're the only ones in builders._exact_matches.
self.assertEqual(self.tool.filesystem.read_text_file(test_port.path_to_generic_test_expectations_file()), """
Bug(foo) [ Linux Win ] fast/dom/prototype-taco.html [ NeedsRebaseline ]
""")
finally:
builders._exact_matches = old_exact_matches
class TestRebaselineOMatic(_BaseTestCase):
command_constructor = RebaselineOMatic
def setUp(self):
super(TestRebaselineOMatic, self).setUp()
self._logs = []
def _mock_log_to_server(self, log=''):
self._logs.append(log)
def test_run_logged_command(self):
self.command._verbose = False
self.command._post_log_to_server = self._mock_log_to_server
self.command._run_logged_command(['echo', 'foo'])
self.assertEqual(self.tool.executive.calls, [['echo', 'foo']])
self.assertEqual(self._logs, ['MOCK STDOUT'])
def test_do_one_rebaseline(self):
self.command._verbose = False
self.command._post_log_to_server = self._mock_log_to_server
oc = OutputCapture()
oc.capture_output()
self.command._do_one_rebaseline()
out, _, _ = oc.restore_output()
self.assertEqual(out, '')
self.assertEqual(self.tool.executive.calls, [
['git', 'pull'],
['/mock-checkout/third_party/WebKit/Tools/Scripts/webkit-patch', 'auto-rebaseline'],
])
self.assertEqual(self._logs, ['MOCK STDOUT'])
def test_do_one_rebaseline_verbose(self):
self.command._verbose = True
self.command._post_log_to_server = self._mock_log_to_server
oc = OutputCapture()
oc.capture_output()
self.command._do_one_rebaseline()
out, _, _ = oc.restore_output()
self.assertEqual(out, 'MOCK STDOUT\n')
self.assertEqual(self.tool.executive.calls, [
['git', 'pull'],
['/mock-checkout/third_party/WebKit/Tools/Scripts/webkit-patch', 'auto-rebaseline', '--verbose'],
])
self.assertEqual(self._logs, ['MOCK STDOUT'])
|
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.utils.request_builder."""
import os
from typing import Any, Dict
import unittest
from unittest import mock
import tensorflow as tf
from tfx.components.infra_validator import request_builder
from tfx.proto import infra_validator_pb2
from tfx.types import artifact_utils
from tfx.types import standard_artifacts
from tfx.utils import path_utils
from google.protobuf import json_format
# TODO(b/140306674): Stop using the internal TF API
from tensorflow.core.protobuf import meta_graph_pb2 # pylint: disable=g-direct-tensorflow-import
from tensorflow.core.protobuf import saved_model_pb2 # pylint: disable=g-direct-tensorflow-import
from tensorflow_serving.apis import classification_pb2
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import regression_pb2
_TEST_DATA_ROOT = os.path.join(
os.path.dirname(os.path.dirname(__file__)), 'testdata')
_CSV_EXAMPLE_GEN_URI = os.path.join(_TEST_DATA_ROOT, 'csv_example_gen')
_ESTIMATOR_MODEL_URI = os.path.join(_TEST_DATA_ROOT, 'trainer', 'current')
_KERAS_MODEL_URI = os.path.join(_TEST_DATA_ROOT, 'trainer', 'keras')
def _make_saved_model(payload: Dict[str, Any]):
result = saved_model_pb2.SavedModel()
json_format.ParseDict(payload, result)
return result
def _make_signature_def(payload: Dict[str, Any]):
result = meta_graph_pb2.SignatureDef()
json_format.ParseDict(payload, result)
return result
def _make_request_spec(payload: Dict[str, Any]):
result = infra_validator_pb2.RequestSpec()
json_format.ParseDict(payload, result)
return result
class TestParseSavedModelSignature(tf.test.TestCase):
def _MockSavedModel(self, saved_model_dict):
saved_model_proto = _make_saved_model(saved_model_dict)
saved_model_path = os.path.join(self.get_temp_dir(), 'saved_model.pb')
with open(saved_model_path, 'wb') as f:
f.write(saved_model_proto.SerializeToString())
return os.path.dirname(saved_model_path)
def testParseSavedModelSignature(self):
model_path = self._MockSavedModel({
'meta_graphs': [
{
'meta_info_def': {
'tags': ['serve']
},
'signature_def': {
'foo': {
'method_name': 'tensorflow/serving/predict',
'inputs': {
'x': {
'name': 'serving_default_input:0',
'dtype': 'DT_FLOAT',
'tensor_shape': {
'dim': [
{'size': -1},
{'size': 784},
]
}
}
},
'outputs': {
'y': {
'name': 'StatefulPartitionedCall:0',
'dtype': 'DT_FLOAT',
'tensor_shape': {
'dim': [
{'size': -1},
{'size': 10},
]
}
}
}
}
}
}
]
})
signatures = request_builder._parse_saved_model_signatures(
model_path, tag_set={'serve'}, signature_names=['foo'])
self.assertEqual(signatures['foo'].inputs['x'].dtype,
tf.dtypes.float32.as_datatype_enum)
self.assertEqual(signatures['foo'].inputs['x'].tensor_shape,
tf.TensorShape([None, 784]).as_proto())
self.assertEqual(signatures['foo'].outputs['y'].dtype,
tf.dtypes.float32.as_datatype_enum)
self.assertEqual(signatures['foo'].outputs['y'].tensor_shape,
tf.TensorShape([None, 10]).as_proto())
def testParseSavedModelSignature_FailIfNoMetaGraph(self):
model_path = self._MockSavedModel({
'meta_graphs': []
})
with self.assertRaisesRegex(
RuntimeError,
'MetaGraphDef associated with tags .* could not be found'):
request_builder._parse_saved_model_signatures(
model_path, tag_set={'serve'}, signature_names=['foo'])
def testParseSavedModelSignature_FailIfTagSetNotMatch(self):
model_path = self._MockSavedModel({
'meta_graphs': [
{
'meta_info_def': {
'tags': ['a', 'b']
}
}
]
})
with self.assertRaisesRegex(
RuntimeError,
'MetaGraphDef associated with tags .* could not be found'):
request_builder._parse_saved_model_signatures(
model_path, tag_set={'a', 'c'}, signature_names=['foo'])
def testParseSavedModelSignature_FailIfSignatureNotFound(self):
model_path = self._MockSavedModel({
'meta_graphs': [
{
'meta_info_def': {
'tags': ['serve']
},
'signature_def': {
'foo': {}
}
}
]
})
with self.assertRaisesRegex(
ValueError, 'SignatureDef of name bar could not be found'):
request_builder._parse_saved_model_signatures(
model_path, tag_set={'serve'}, signature_names=['foo', 'bar'])
def testParseSavedModelSignature_DefaultTagSet(self):
model_path = self._MockSavedModel({
'meta_graphs': [
{
'meta_info_def': {
'tags': ['serve']
},
'signature_def': {
'foo': {}
}
}
]
})
signatures = request_builder._parse_saved_model_signatures(
model_path, tag_set=set(), signature_names=['foo'])
self.assertTrue(signatures)
def testParseSavedModelSignature_DefaultSignatureName(self):
model_path = self._MockSavedModel({
'meta_graphs': [
{
'meta_info_def': {
'tags': ['foo']
},
'signature_def': {
'serving_default': {},
}
}
]
})
signatures = request_builder._parse_saved_model_signatures(
model_path, tag_set={'foo'}, signature_names=[])
self.assertTrue(signatures)
class _MockBuilder(request_builder._BaseRequestBuilder):
def BuildRequests(self):
raise NotImplementedError()
class BaseRequestBuilderTest(tf.test.TestCase):
def setUp(self):
super().setUp()
self._examples = standard_artifacts.Examples()
self._examples.uri = _CSV_EXAMPLE_GEN_URI
self._examples.split_names = artifact_utils.encode_split_names(
['train', 'eval'])
def testReadExamplesArtifact(self):
builder = _MockBuilder()
builder.ReadExamplesArtifact(self._examples, num_examples=1)
self.assertEqual(len(builder._records), 1)
self.assertIsInstance(builder._records[0], bytes)
def testReadExamplesArtifact_FailIfSplitNamesEmpty(self):
builder = _MockBuilder()
examples = standard_artifacts.Examples()
examples.uri = self._examples.uri
with self.assertRaises(ValueError):
builder.ReadExamplesArtifact(examples, num_examples=1)
def testReadExamplesArtifact_FailIfSplitNameInvalid(self):
builder = _MockBuilder()
with self.assertRaises(ValueError):
builder.ReadExamplesArtifact(self._examples, num_examples=1,
split_name='non-existing-split')
def testReadExamplesArtifact_FailReadTwice(self):
builder = _MockBuilder()
builder.ReadExamplesArtifact(self._examples, num_examples=1)
with self.assertRaises(RuntimeError):
builder.ReadExamplesArtifact(self._examples, num_examples=1)
class TFServingRpcRequestBuilderTest(tf.test.TestCase):
def setUp(self):
super().setUp()
self._examples = standard_artifacts.Examples()
self._examples.uri = _CSV_EXAMPLE_GEN_URI
self._examples.split_names = artifact_utils.encode_split_names(
['train', 'eval'])
def _GetEstimatorModelSignature(self, signature_names=()):
model_path = path_utils.serving_model_path(_ESTIMATOR_MODEL_URI)
return request_builder._parse_saved_model_signatures(
model_path, tag_set={'serve'}, signature_names=signature_names)
def _GetKerasModelSignature(self):
model_path = path_utils.serving_model_path(_KERAS_MODEL_URI)
return request_builder._parse_saved_model_signatures(
model_path, tag_set={'serve'}, signature_names=['serving_default'])
@unittest.skipIf(
tf.__version__ < '2',
'The test uses testdata only compatible with TF2.')
def testBuildRequests_EstimatorModel_ServingDefault(self):
builder = request_builder._TFServingRpcRequestBuilder(
model_name='foo',
signatures=self._GetEstimatorModelSignature())
builder.ReadExamplesArtifact(self._examples, num_examples=1)
result = builder.BuildRequests()
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], classification_pb2.ClassificationRequest)
self.assertEqual(result[0].model_spec.name, 'foo')
self.assertEqual(result[0].model_spec.signature_name, 'serving_default')
@unittest.skipIf(
tf.__version__ < '2',
'The test uses testdata only compatible with TF2.')
def testBuildRequests_EstimatorModel_Classification(self):
builder = request_builder._TFServingRpcRequestBuilder(
model_name='foo',
signatures=self._GetEstimatorModelSignature(
signature_names=['classification']))
builder.ReadExamplesArtifact(self._examples, num_examples=1)
result = builder.BuildRequests()
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], classification_pb2.ClassificationRequest)
self.assertEqual(result[0].model_spec.name, 'foo')
self.assertEqual(result[0].model_spec.signature_name, 'classification')
@unittest.skipIf(
tf.__version__ < '2',
'The test uses testdata only compatible with TF2.')
def testBuildRequests_EstimatorModel_Regression(self):
builder = request_builder._TFServingRpcRequestBuilder(
model_name='foo',
signatures=self._GetEstimatorModelSignature(
signature_names=['regression']))
builder.ReadExamplesArtifact(self._examples, num_examples=1)
result = builder.BuildRequests()
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], regression_pb2.RegressionRequest)
self.assertEqual(result[0].model_spec.name, 'foo')
self.assertEqual(result[0].model_spec.signature_name, 'regression')
@unittest.skipIf(
tf.__version__ < '2',
'The test uses testdata only compatible with TF2.')
def testBuildRequests_EstimatorModel_Predict(self):
builder = request_builder._TFServingRpcRequestBuilder(
model_name='foo',
signatures=self._GetEstimatorModelSignature(
signature_names=['predict']))
builder.ReadExamplesArtifact(self._examples, num_examples=1)
result = builder.BuildRequests()
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], predict_pb2.PredictRequest)
self.assertEqual(result[0].model_spec.name, 'foo')
self.assertEqual(result[0].model_spec.signature_name, 'predict')
self.assertEqual(len(result[0].inputs), 1)
input_key = list(result[0].inputs.keys())[0]
self.assertEqual(result[0].inputs[input_key].dtype,
tf.dtypes.string.as_datatype_enum)
@unittest.skipIf(
tf.__version__ < '2',
'The test uses testdata only compatible with TF2.')
def testBuildRequests_KerasModel(self):
builder = request_builder._TFServingRpcRequestBuilder(
model_name='foo',
signatures=self._GetKerasModelSignature())
builder.ReadExamplesArtifact(self._examples, num_examples=1)
result = builder.BuildRequests()
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], predict_pb2.PredictRequest)
self.assertEqual(result[0].model_spec.name, 'foo')
self.assertEqual(result[0].model_spec.signature_name, 'serving_default')
def testBuildRequests_PredictMethod(self):
builder = request_builder._TFServingRpcRequestBuilder(
model_name='foo',
signatures={
# Has only one argument with dtype=DT_STRING and shape=(None,).
# This is the only valid form that InfraValidator accepts today.
'serving_default': _make_signature_def({
'method_name': 'tensorflow/serving/predict',
'inputs': {
'x': {
'name': 'serving_default_examples:0',
'dtype': 'DT_STRING',
'tensor_shape': {
'dim': [
{'size': -1},
]
}
}
},
'outputs': {
'y': {
'name': 'StatefulPartitionedCall:0',
'dtype': 'DT_FLOAT',
'tensor_shape': {
'dim': [
{'size': -1},
{'size': 10},
]
}
}
},
})
})
builder.ReadExamplesArtifact(self._examples, num_examples=1)
result = builder.BuildRequests()
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], predict_pb2.PredictRequest)
self.assertEqual(result[0].inputs['x'].dtype,
tf.dtypes.string.as_datatype_enum)
def testBuildRequests_PredictMethod_FailOnInvalidSignature(self):
builder = request_builder._TFServingRpcRequestBuilder(
model_name='foo',
signatures={
# Signature argument is not for serialized tf.Example (i.e. dtype !=
# DT_STRING or shape != (None,)).
'serving_default': _make_signature_def({
'method_name': 'tensorflow/serving/predict',
'inputs': {
'x': {
'name': 'serving_default_input:0',
'dtype': 'DT_FLOAT',
'tensor_shape': {
'dim': [
{'size': -1},
{'size': 784},
]
}
}
},
'outputs': {
'y': {
'name': 'StatefulPartitionedCall:0',
'dtype': 'DT_FLOAT',
'tensor_shape': {
'dim': [
{'size': -1},
{'size': 10},
]
}
}
},
})
})
builder.ReadExamplesArtifact(self._examples, num_examples=1)
with self.assertRaisesRegex(
ValueError, 'Unable to find valid input key from SignatureDef'):
builder.BuildRequests()
class TestBuildRequests(tf.test.TestCase):
def setUp(self):
super().setUp()
self._model_name = 'foo'
self._examples = standard_artifacts.Examples()
self._examples.uri = _CSV_EXAMPLE_GEN_URI
self._examples.split_names = artifact_utils.encode_split_names(
['train', 'eval'])
self._model = standard_artifacts.Model()
self._model.uri = _ESTIMATOR_MODEL_URI
def _PrepareTFServingRequestBuilder(self):
patcher = mock.patch.object(
request_builder, '_TFServingRpcRequestBuilder',
wraps=request_builder._TFServingRpcRequestBuilder)
builder_cls = patcher.start()
self.addCleanup(patcher.stop)
return builder_cls
def testBuildRequests_TFServing(self):
builder_cls = self._PrepareTFServingRequestBuilder()
builder = builder_cls.return_value
request_builder.build_requests(
model_name='foo',
model=self._model,
examples=self._examples,
request_spec=_make_request_spec({
'tensorflow_serving': {
'signature_names': ['serving_default']
},
'split_name': 'eval',
'num_examples': 1
})
)
builder_cls.assert_called_with(
model_name='foo',
signatures={'serving_default': mock.ANY})
builder.ReadExamplesArtifact.assert_called_with(
self._examples,
split_name='eval',
num_examples=1)
builder.BuildRequests.assert_called()
def testBuildRequests_NumberOfRequests(self):
result = request_builder.build_requests(
model_name='foo',
model=self._model,
examples=self._examples,
request_spec=_make_request_spec({
'tensorflow_serving': {
'signature_names': ['classification', 'regression']
},
'split_name': 'eval',
'num_examples': 3
})
)
# Total 6 requests (3 requests for each signature)
self.assertEqual(len(result), 6)
self.assertEqual(
len([r for r in result
if r.model_spec.signature_name == 'classification']), 3)
self.assertEqual(
len([r for r in result
if r.model_spec.signature_name == 'regression']), 3)
def testBuildRequests_DefaultArgument(self):
builder_cls = self._PrepareTFServingRequestBuilder()
builder = builder_cls.return_value
request_builder.build_requests(
model_name='foo',
model=self._model,
examples=self._examples,
request_spec=_make_request_spec({
'tensorflow_serving': {
# 'signature_names': ['serving_default']
},
# 'split_name': 'eval',
# 'num_examples': 1
})
)
builder.ReadExamplesArtifact.assert_called_with(
self._examples,
split_name=None, # Without split_name (will choose any split).
num_examples=1) # Default num_examples = 1.
if __name__ == '__main__':
tf.test.main()
|
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Provides type checking routines.
This module defines type checking utilities in the forms of dictionaries:
VALUE_CHECKERS: A dictionary of field types and a value validation object.
TYPE_TO_BYTE_SIZE_FN: A dictionary with field types and a size computing
function.
TYPE_TO_SERIALIZE_METHOD: A dictionary with field types and serialization
function.
FIELD_TYPE_TO_WIRE_TYPE: A dictionary with field typed and their
coresponding wire types.
TYPE_TO_DESERIALIZE_METHOD: A dictionary with field types and deserialization
function.
"""
__author__ = 'robinson@google.com (Will Robinson)'
import six
if six.PY3:
long = int
from google.protobuf.internal import api_implementation
from google.protobuf.internal import decoder
from google.protobuf.internal import encoder
from google.protobuf.internal import wire_format
from google.protobuf import descriptor
_FieldDescriptor = descriptor.FieldDescriptor
def SupportsOpenEnums(field_descriptor):
return field_descriptor.containing_type.syntax == "proto3"
def GetTypeChecker(field):
"""Returns a type checker for a message field of the specified types.
Args:
field: FieldDescriptor object for this field.
Returns:
An instance of TypeChecker which can be used to verify the types
of values assigned to a field of the specified type.
"""
if (field.cpp_type == _FieldDescriptor.CPPTYPE_STRING and
field.type == _FieldDescriptor.TYPE_STRING):
return UnicodeValueChecker()
if field.cpp_type == _FieldDescriptor.CPPTYPE_ENUM:
if SupportsOpenEnums(field):
# When open enums are supported, any int32 can be assigned.
return _VALUE_CHECKERS[_FieldDescriptor.CPPTYPE_INT32]
else:
return EnumValueChecker(field.enum_type)
return _VALUE_CHECKERS[field.cpp_type]
# None of the typecheckers below make any attempt to guard against people
# subclassing builtin types and doing weird things. We're not trying to
# protect against malicious clients here, just people accidentally shooting
# themselves in the foot in obvious ways.
class TypeChecker(object):
"""Type checker used to catch type errors as early as possible
when the client is setting scalar fields in protocol messages.
"""
def __init__(self, *acceptable_types):
self._acceptable_types = acceptable_types
def CheckValue(self, proposed_value):
"""Type check the provided value and return it.
The returned value might have been normalized to another type.
"""
if not isinstance(proposed_value, self._acceptable_types):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), self._acceptable_types))
raise TypeError(message)
return proposed_value
class TypeCheckerWithDefault(TypeChecker):
def __init__(self, default_value, *acceptable_types):
TypeChecker.__init__(self, acceptable_types)
self._default_value = default_value
def DefaultValue(self):
return self._default_value
# IntValueChecker and its subclasses perform integer type-checks
# and bounds-checks.
class IntValueChecker(object):
"""Checker used for integer fields. Performs type-check and range check."""
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, six.integer_types):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), six.integer_types))
raise TypeError(message)
if not self._MIN <= proposed_value <= self._MAX:
raise ValueError('Value out of range: %d' % proposed_value)
# We force 32-bit values to int and 64-bit values to long to make
# alternate implementations where the distinction is more significant
# (e.g. the C++ implementation) simpler.
proposed_value = self._TYPE(proposed_value)
return proposed_value
def DefaultValue(self):
return 0
class EnumValueChecker(object):
"""Checker used for enum fields. Performs type-check and range check."""
def __init__(self, enum_type):
self._enum_type = enum_type
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, six.integer_types):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), six.integer_types))
raise TypeError(message)
if proposed_value not in self._enum_type.values_by_number:
raise ValueError('Unknown enum value: %d' % proposed_value)
return proposed_value
def DefaultValue(self):
return self._enum_type.values[0].number
class UnicodeValueChecker(object):
"""Checker used for string fields.
Always returns a unicode value, even if the input is of type str.
"""
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, (bytes, six.text_type)):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), (bytes, six.text_type)))
raise TypeError(message)
# If the value is of type 'bytes' make sure that it is valid UTF-8 data.
if isinstance(proposed_value, bytes):
try:
proposed_value = proposed_value.decode('utf-8')
except UnicodeDecodeError:
raise ValueError('%.1024r has type bytes, but isn\'t valid UTF-8 '
'encoding. Non-UTF-8 strings must be converted to '
'unicode objects before being added.' %
(proposed_value))
return proposed_value
def DefaultValue(self):
return u""
class Int32ValueChecker(IntValueChecker):
# We're sure to use ints instead of longs here since comparison may be more
# efficient.
_MIN = -2147483648
_MAX = 2147483647
_TYPE = int
class Uint32ValueChecker(IntValueChecker):
_MIN = 0
_MAX = (1 << 32) - 1
_TYPE = int
class Int64ValueChecker(IntValueChecker):
_MIN = -(1 << 63)
_MAX = (1 << 63) - 1
_TYPE = long
class Uint64ValueChecker(IntValueChecker):
_MIN = 0
_MAX = (1 << 64) - 1
_TYPE = long
# Type-checkers for all scalar CPPTYPEs.
_VALUE_CHECKERS = {
_FieldDescriptor.CPPTYPE_INT32: Int32ValueChecker(),
_FieldDescriptor.CPPTYPE_INT64: Int64ValueChecker(),
_FieldDescriptor.CPPTYPE_UINT32: Uint32ValueChecker(),
_FieldDescriptor.CPPTYPE_UINT64: Uint64ValueChecker(),
_FieldDescriptor.CPPTYPE_DOUBLE: TypeCheckerWithDefault(
0.0, float, int, long),
_FieldDescriptor.CPPTYPE_FLOAT: TypeCheckerWithDefault(
0.0, float, int, long),
_FieldDescriptor.CPPTYPE_BOOL: TypeCheckerWithDefault(
False, bool, int),
_FieldDescriptor.CPPTYPE_STRING: TypeCheckerWithDefault(b'', bytes),
}
# Map from field type to a function F, such that F(field_num, value)
# gives the total byte size for a value of the given type. This
# byte size includes tag information and any other additional space
# associated with serializing "value".
TYPE_TO_BYTE_SIZE_FN = {
_FieldDescriptor.TYPE_DOUBLE: wire_format.DoubleByteSize,
_FieldDescriptor.TYPE_FLOAT: wire_format.FloatByteSize,
_FieldDescriptor.TYPE_INT64: wire_format.Int64ByteSize,
_FieldDescriptor.TYPE_UINT64: wire_format.UInt64ByteSize,
_FieldDescriptor.TYPE_INT32: wire_format.Int32ByteSize,
_FieldDescriptor.TYPE_FIXED64: wire_format.Fixed64ByteSize,
_FieldDescriptor.TYPE_FIXED32: wire_format.Fixed32ByteSize,
_FieldDescriptor.TYPE_BOOL: wire_format.BoolByteSize,
_FieldDescriptor.TYPE_STRING: wire_format.StringByteSize,
_FieldDescriptor.TYPE_GROUP: wire_format.GroupByteSize,
_FieldDescriptor.TYPE_MESSAGE: wire_format.MessageByteSize,
_FieldDescriptor.TYPE_BYTES: wire_format.BytesByteSize,
_FieldDescriptor.TYPE_UINT32: wire_format.UInt32ByteSize,
_FieldDescriptor.TYPE_ENUM: wire_format.EnumByteSize,
_FieldDescriptor.TYPE_SFIXED32: wire_format.SFixed32ByteSize,
_FieldDescriptor.TYPE_SFIXED64: wire_format.SFixed64ByteSize,
_FieldDescriptor.TYPE_SINT32: wire_format.SInt32ByteSize,
_FieldDescriptor.TYPE_SINT64: wire_format.SInt64ByteSize
}
# Maps from field types to encoder constructors.
TYPE_TO_ENCODER = {
_FieldDescriptor.TYPE_DOUBLE: encoder.DoubleEncoder,
_FieldDescriptor.TYPE_FLOAT: encoder.FloatEncoder,
_FieldDescriptor.TYPE_INT64: encoder.Int64Encoder,
_FieldDescriptor.TYPE_UINT64: encoder.UInt64Encoder,
_FieldDescriptor.TYPE_INT32: encoder.Int32Encoder,
_FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Encoder,
_FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Encoder,
_FieldDescriptor.TYPE_BOOL: encoder.BoolEncoder,
_FieldDescriptor.TYPE_STRING: encoder.StringEncoder,
_FieldDescriptor.TYPE_GROUP: encoder.GroupEncoder,
_FieldDescriptor.TYPE_MESSAGE: encoder.MessageEncoder,
_FieldDescriptor.TYPE_BYTES: encoder.BytesEncoder,
_FieldDescriptor.TYPE_UINT32: encoder.UInt32Encoder,
_FieldDescriptor.TYPE_ENUM: encoder.EnumEncoder,
_FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Encoder,
_FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Encoder,
_FieldDescriptor.TYPE_SINT32: encoder.SInt32Encoder,
_FieldDescriptor.TYPE_SINT64: encoder.SInt64Encoder,
}
# Maps from field types to sizer constructors.
TYPE_TO_SIZER = {
_FieldDescriptor.TYPE_DOUBLE: encoder.DoubleSizer,
_FieldDescriptor.TYPE_FLOAT: encoder.FloatSizer,
_FieldDescriptor.TYPE_INT64: encoder.Int64Sizer,
_FieldDescriptor.TYPE_UINT64: encoder.UInt64Sizer,
_FieldDescriptor.TYPE_INT32: encoder.Int32Sizer,
_FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Sizer,
_FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Sizer,
_FieldDescriptor.TYPE_BOOL: encoder.BoolSizer,
_FieldDescriptor.TYPE_STRING: encoder.StringSizer,
_FieldDescriptor.TYPE_GROUP: encoder.GroupSizer,
_FieldDescriptor.TYPE_MESSAGE: encoder.MessageSizer,
_FieldDescriptor.TYPE_BYTES: encoder.BytesSizer,
_FieldDescriptor.TYPE_UINT32: encoder.UInt32Sizer,
_FieldDescriptor.TYPE_ENUM: encoder.EnumSizer,
_FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Sizer,
_FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Sizer,
_FieldDescriptor.TYPE_SINT32: encoder.SInt32Sizer,
_FieldDescriptor.TYPE_SINT64: encoder.SInt64Sizer,
}
# Maps from field type to a decoder constructor.
TYPE_TO_DECODER = {
_FieldDescriptor.TYPE_DOUBLE: decoder.DoubleDecoder,
_FieldDescriptor.TYPE_FLOAT: decoder.FloatDecoder,
_FieldDescriptor.TYPE_INT64: decoder.Int64Decoder,
_FieldDescriptor.TYPE_UINT64: decoder.UInt64Decoder,
_FieldDescriptor.TYPE_INT32: decoder.Int32Decoder,
_FieldDescriptor.TYPE_FIXED64: decoder.Fixed64Decoder,
_FieldDescriptor.TYPE_FIXED32: decoder.Fixed32Decoder,
_FieldDescriptor.TYPE_BOOL: decoder.BoolDecoder,
_FieldDescriptor.TYPE_STRING: decoder.StringDecoder,
_FieldDescriptor.TYPE_GROUP: decoder.GroupDecoder,
_FieldDescriptor.TYPE_MESSAGE: decoder.MessageDecoder,
_FieldDescriptor.TYPE_BYTES: decoder.BytesDecoder,
_FieldDescriptor.TYPE_UINT32: decoder.UInt32Decoder,
_FieldDescriptor.TYPE_ENUM: decoder.EnumDecoder,
_FieldDescriptor.TYPE_SFIXED32: decoder.SFixed32Decoder,
_FieldDescriptor.TYPE_SFIXED64: decoder.SFixed64Decoder,
_FieldDescriptor.TYPE_SINT32: decoder.SInt32Decoder,
_FieldDescriptor.TYPE_SINT64: decoder.SInt64Decoder,
}
# Maps from field type to expected wiretype.
FIELD_TYPE_TO_WIRE_TYPE = {
_FieldDescriptor.TYPE_DOUBLE: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_FLOAT: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_INT64: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_UINT64: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_INT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_FIXED64: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_FIXED32: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_BOOL: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_STRING:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_GROUP: wire_format.WIRETYPE_START_GROUP,
_FieldDescriptor.TYPE_MESSAGE:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_BYTES:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_UINT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_ENUM: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_SFIXED32: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_SFIXED64: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_SINT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_SINT64: wire_format.WIRETYPE_VARINT,
}
|
|
import calendar
import urlparse
import re
import time_util
import struct
import base64
# Also defined in saml2.saml but can't import from there
XSI_NAMESPACE = 'http://www.w3.org/2001/XMLSchema-instance'
XSI_NIL = '{%s}nil' % XSI_NAMESPACE
# ---------------------------------------------------------
class NotValid(Exception):
pass
class OutsideCardinality(Exception):
pass
class MustValueError(ValueError):
pass
class ShouldValueError(ValueError):
pass
# --------------------- validators -------------------------------------
#
NCNAME = re.compile("(?P<NCName>[a-zA-Z_](\w|[_.-])*)")
def valid_ncname(name):
match = NCNAME.match(name)
if not match:
raise NotValid("NCName")
return True
def valid_id(oid):
valid_ncname(oid)
def valid_any_uri(item):
"""very simplistic, ..."""
try:
part = urlparse.urlparse(item)
except Exception:
raise NotValid("AnyURI")
if part[0] == "urn" and part[1] == "": # A urn
return True
# elif part[1] == "localhost" or part[1] == "127.0.0.1":
# raise NotValid("AnyURI")
return True
def valid_date_time(item):
try:
time_util.str_to_time(item)
except Exception:
raise NotValid("dateTime")
return True
def valid_url(url):
try:
_ = urlparse.urlparse(url)
except Exception:
raise NotValid("URL")
# if part[1] == "localhost" or part[1] == "127.0.0.1":
# raise NotValid("URL")
return True
def validate_on_or_after(not_on_or_after, slack):
if not_on_or_after:
now = time_util.utc_now()
nooa = calendar.timegm(time_util.str_to_time(not_on_or_after))
if now > nooa + slack:
raise Exception("Can't use it, it's too old %d > %d" %
(nooa, now))
return nooa
else:
return False
def validate_before(not_before, slack):
if not_before:
now = time_util.utc_now()
nbefore = calendar.timegm(time_util.str_to_time(not_before))
if nbefore > now + slack:
raise Exception("Can't use it yet %d <= %d" % (nbefore, now))
return True
def valid_address(address):
if not (valid_ipv4(address) or valid_ipv6(address)):
raise NotValid("address")
return True
def valid_ipv4(address):
parts = address.split(".")
if len(parts) != 4:
return False
for item in parts:
try:
if not 0 <= int(item) <= 255:
raise NotValid("ipv4")
except ValueError:
return False
return True
#
IPV6_PATTERN = re.compile(r"""
^
\s* # Leading whitespace
(?!.*::.*::) # Only a single wildcard allowed
(?:(?!:)|:(?=:)) # Colon iff it would be part of a wildcard
(?: # Repeat 6 times:
[0-9a-f]{0,4} # A group of at most four hexadecimal digits
(?:(?<=::)|(?<!::):) # Colon unless preceeded by wildcard
){6} #
(?: # Either
[0-9a-f]{0,4} # Another group
(?:(?<=::)|(?<!::):) # Colon unless preceeded by wildcard
[0-9a-f]{0,4} # Last group
(?: (?<=::) # Colon iff preceeded by exacly one colon
| (?<!:) #
| (?<=:) (?<!::) : #
) # OR
| # A v4 address with NO leading zeros
(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)
(?: \.
(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)
){3}
)
\s* # Trailing whitespace
$
""", re.VERBOSE | re.IGNORECASE | re.DOTALL)
def valid_ipv6(address):
"""Validates IPv6 addresses. """
return IPV6_PATTERN.match(address) is not None
def valid_boolean(val):
vall = val.lower()
if vall in ["true", "false", "0", "1"]:
return True
else:
raise NotValid("boolean")
def valid_duration(val):
try:
time_util.parse_duration(val)
except Exception:
raise NotValid("duration")
return True
def valid_string(val):
""" Expects unicode
Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] |
[#x10000-#x10FFFF]
"""
for char in val:
try:
char = ord(char)
except TypeError:
raise NotValid("string")
if char == 0x09 or char == 0x0A or char == 0x0D:
continue
elif 0x20 <= char <= 0xD7FF:
continue
elif 0xE000 <= char <= 0xFFFD:
continue
elif 0x10000 <= char <= 0x10FFFF:
continue
else:
raise NotValid("string")
return True
def valid_unsigned_short(val):
try:
struct.pack("H", int(val))
except struct.error:
raise NotValid("unsigned short")
except ValueError:
raise NotValid("unsigned short")
return True
def valid_non_negative_integer(val):
try:
integer = int(val)
except ValueError:
raise NotValid("non negative integer")
if integer < 0:
raise NotValid("non negative integer")
return True
def valid_integer(val):
try:
int(val)
except ValueError:
raise NotValid("integer")
return True
def valid_base64(val):
try:
base64.b64decode(val)
except Exception:
raise NotValid("base64")
return True
def valid_qname(val):
""" A qname is either
NCName or
NCName ':' NCName
"""
try:
(prefix, localpart) = val.split(":")
return valid_ncname(prefix) and valid_ncname(localpart)
except ValueError:
return valid_ncname(val)
def valid_anytype(val):
""" Goes through all known type validators
:param val: The value to validate
:return: True is value is valid otherwise an exception is raised
"""
for validator in VALIDATOR.values():
try:
if validator(val):
return True
except NotValid:
pass
if isinstance(val, type):
return True
raise NotValid("AnyType")
# -----------------------------------------------------------------------------
VALIDATOR = {
"ID": valid_id,
"NCName": valid_ncname,
"dateTime": valid_date_time,
"anyURI": valid_any_uri,
"nonNegativeInteger": valid_non_negative_integer,
"boolean": valid_boolean,
"unsignedShort": valid_unsigned_short,
"duration": valid_duration,
"base64Binary": valid_base64,
"integer": valid_integer,
"QName": valid_qname,
"anyType": valid_anytype,
"string": valid_string,
}
# -----------------------------------------------------------------------------
def validate_value_type(value, spec):
"""
c_value_type = {'base': 'string', 'enumeration': ['Permit', 'Deny',
'Indeterminate']}
{'member': 'anyURI', 'base': 'list'}
{'base': 'anyURI'}
{'base': 'NCName'}
{'base': 'string'}
"""
if "maxlen" in spec:
return len(value) <= spec["maxlen"]
if spec["base"] == "string":
if "enumeration" in spec:
if value not in spec["enumeration"]:
raise NotValid("value not in enumeration")
else:
return valid_string(value)
elif spec["base"] == "list": # comma separated list of values
for val in [v.strip() for v in value.split(",")]:
valid(spec["member"], val)
else:
return valid(spec["base"], value)
return True
def valid(typ, value):
try:
return VALIDATOR[typ](value)
except KeyError:
try:
(_namespace, typ) = typ.split(":")
except ValueError:
if typ == "":
typ = "string"
return VALIDATOR[typ](value)
def _valid_instance(instance, val):
try:
val.verify()
except NotValid, exc:
raise NotValid("Class '%s' instance: %s" % (
instance.__class__.__name__, exc.args[0]))
except OutsideCardinality, exc:
raise NotValid(
"Class '%s' instance cardinality error: %s" % (
instance.__class__.__name__, exc.args[0]))
ERROR_TEXT = "Wrong type of value '%s' on attribute '%s' expected it to be %s"
def valid_instance(instance):
instclass = instance.__class__
class_name = instclass.__name__
# if instance.text:
# _has_val = True
# else:
# _has_val = False
if instclass.c_value_type and instance.text:
try:
validate_value_type(instance.text.strip(),
instclass.c_value_type)
except NotValid, exc:
raise NotValid("Class '%s' instance: %s" % (class_name,
exc.args[0]))
for (name, typ, required) in instclass.c_attributes.values():
value = getattr(instance, name, '')
if required and not value:
txt = "Required value on property '%s' missing" % name
raise MustValueError("Class '%s' instance: %s" % (class_name, txt))
if value:
try:
if isinstance(typ, type):
if typ.c_value_type:
spec = typ.c_value_type
else:
spec = {"base": "string"} # do I need a default
validate_value_type(value, spec)
else:
valid(typ, value)
except (NotValid, ValueError), exc:
txt = ERROR_TEXT % (value, name, exc.args[0])
raise NotValid("Class '%s' instance: %s" % (class_name, txt))
for (name, _spec) in instclass.c_children.values():
value = getattr(instance, name, '')
try:
_card = instclass.c_cardinality[name]
try:
_cmin = _card["min"]
except KeyError:
_cmin = None
try:
_cmax = _card["max"]
except KeyError:
_cmax = None
except KeyError:
_cmin = _cmax = _card = None
if value:
#_has_val = True
if isinstance(value, list):
_list = True
vlen = len(value)
else:
_list = False
vlen = 1
if _card:
if _cmin is not None and _cmin > vlen:
raise NotValid(
"Class '%s' instance cardinality error: %s" % (
class_name, "less then min (%s<%s)" % (vlen,
_cmin)))
if _cmax is not None and vlen > _cmax:
raise NotValid(
"Class '%s' instance cardinality error: %s" % (
class_name, "more then max (%s>%s)" % (vlen,
_cmax)))
if _list:
for val in value:
# That it is the right class is handled elsewhere
_valid_instance(instance, val)
else:
_valid_instance(instance, value)
else:
if _cmin:
raise NotValid(
"Class '%s' instance cardinality error: %s" % (
class_name, "too few values on %s" % name))
# if not _has_val:
# if class_name != "RequestedAttribute":
# # Not allow unless xsi:nil="true"
# assert instance.extension_attributes
# assert instance.extension_attributes[XSI_NIL] == "true"
return True
def valid_domain_name(dns_name):
m = re.match(
"^[a-z0-9]+([-.]{ 1 }[a-z0-9]+).[a-z]{2,5}(:[0-9]{1,5})?(\/.)?$",
dns_name, "ix")
if not m:
raise ValueError("Not a proper domain name")
|
|
# Copyright (c) 2012 - 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import os
import socket
import ssl
import string
import struct
from eventlet import patcher
import OpenSSL
from oslo_log import log as logging
import six
from six.moves import http_client
from six.moves import urllib
from cinder.i18n import _, _LI
# Handle case where we are running in a monkey patched environment
if patcher.is_monkey_patched('socket'):
from eventlet.green.OpenSSL import SSL
else:
raise ImportError
try:
import pywbem
pywbemAvailable = True
except ImportError:
pywbemAvailable = False
LOG = logging.getLogger(__name__)
def to_bytes(s):
if isinstance(s, six.string_types):
return six.b(s)
else:
return s
def get_default_ca_certs():
"""Gets the default CA certificates if found, otherwise None.
Try to find out system path with ca certificates. This path is cached and
returned. If no path is found out, None is returned.
"""
if not hasattr(get_default_ca_certs, '_path'):
for path in (
'/etc/pki/ca-trust/extracted/openssl/ca-bundle.trust.crt',
'/etc/ssl/certs',
'/etc/ssl/certificates'):
if os.path.exists(path):
get_default_ca_certs._path = path
break
else:
get_default_ca_certs._path = None
return get_default_ca_certs._path
class OpenSSLConnectionDelegator(object):
"""An OpenSSL.SSL.Connection delegator.
Supplies an additional 'makefile' method which http_client requires
and is not present in OpenSSL.SSL.Connection.
Note: Since it is not possible to inherit from OpenSSL.SSL.Connection
a delegator must be used.
"""
def __init__(self, *args, **kwargs):
self.connection = SSL.GreenConnection(*args, **kwargs)
def __getattr__(self, name):
return getattr(self.connection, name)
def makefile(self, *args, **kwargs):
return socket._fileobject(self.connection, *args, **kwargs)
class HTTPSConnection(http_client.HTTPSConnection):
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, ca_certs=None, no_verification=False):
if not pywbemAvailable:
LOG.info(_LI(
'Module PyWBEM not installed. '
'Install PyWBEM using the python-pywbem package.'))
if six.PY3:
excp_lst = (TypeError, ssl.SSLError)
else:
excp_lst = ()
try:
http_client.HTTPSConnection.__init__(self, host, port,
key_file=key_file,
cert_file=cert_file)
self.key_file = None if key_file is None else key_file
self.cert_file = None if cert_file is None else cert_file
self.insecure = no_verification
self.ca_certs = (
None if ca_certs is None else six.text_type(ca_certs))
self.set_context()
# ssl exceptions are reported in various form in Python 3
# so to be compatible, we report the same kind as under
# Python2
except excp_lst as e:
raise pywbem.cim_http.Error(six.text_type(e))
@staticmethod
def host_matches_cert(host, x509):
"""Verify that the certificate matches host.
Verify that the x509 certificate we have received
from 'host' correctly identifies the server we are
connecting to, ie that the certificate's Common Name
or a Subject Alternative Name matches 'host'.
"""
def check_match(name):
# Directly match the name.
if name == host:
return True
# Support single wildcard matching.
if name.startswith('*.') and host.find('.') > 0:
if name[2:] == host.split('.', 1)[1]:
return True
common_name = x509.get_subject().commonName
# First see if we can match the CN.
if check_match(common_name):
return True
# Also try Subject Alternative Names for a match.
san_list = None
for i in range(x509.get_extension_count()):
ext = x509.get_extension(i)
if ext.get_short_name() == b'subjectAltName':
san_list = six.text_type(ext)
for san in ''.join(san_list.split()).split(','):
if san.startswith('DNS:'):
if check_match(san.split(':', 1)[1]):
return True
# Server certificate does not match host.
msg = (_("Host %(host)s does not match x509 certificate contents: "
"CommonName %(commonName)s.")
% {'host': host,
'commonName': common_name})
if san_list is not None:
msg = (_("%(message)s, subjectAltName: %(sanList)s.")
% {'message': msg,
'sanList': san_list})
raise pywbem.cim_http.AuthError(msg)
def verify_callback(self, connection, x509, errnum,
depth, preverify_ok):
if x509.has_expired():
msg = msg = (_("SSL Certificate expired on %s.")
% x509.get_notAfter())
raise pywbem.cim_http.AuthError(msg)
if depth == 0 and preverify_ok:
# We verify that the host matches against the last
# certificate in the chain.
return self.host_matches_cert(self.host, x509)
else:
# Pass through OpenSSL's default result.
return preverify_ok
def set_context(self):
"""Set up the OpenSSL context."""
self.context = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
if self.insecure is not True:
self.context.set_verify(OpenSSL.SSL.VERIFY_PEER,
self.verify_callback)
else:
self.context.set_verify(OpenSSL.SSL.VERIFY_NONE,
lambda *args: True)
if self.cert_file:
try:
self.context.use_certificate_file(self.cert_file)
except Exception as e:
msg = (_("Unable to load cert from %(cert)s %(e)s.")
% {'cert': self.cert_file,
'e': e})
raise pywbem.cim_http.AuthError(msg)
if self.key_file is None:
# We support having key and cert in same file.
try:
self.context.use_privatekey_file(self.cert_file)
except Exception as e:
msg = (_("No key file specified and unable to load key "
"from %(cert)s %(e)s.")
% {'cert': self.cert_file,
'e': e})
raise pywbem.cim_http.AuthError(msg)
if self.key_file:
try:
self.context.use_privatekey_file(self.key_file)
except Exception as e:
msg = (_("Unable to load key from %(cert)s %(e)s.")
% {'cert': self.cert_file,
'e': e})
raise pywbem.cim_http.AuthError(msg)
if self.ca_certs:
try:
self.context.load_verify_locations(to_bytes(self.ca_certs))
except Exception as e:
msg = (_("Unable to load CA from %(cert)s %(e)s.")
% {'cert': self.cert_file,
'e': e})
raise pywbem.cim_http.AuthError(msg)
else:
self.context.set_default_verify_paths()
def connect(self):
result = socket.getaddrinfo(self.host, self.port, 0,
socket.SOCK_STREAM)
if result:
socket_family = result[0][0]
if socket_family == socket.AF_INET6:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
else:
# If due to some reason the address lookup fails - we still
# connect to IPv4 socket. This retains the older behavior.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.timeout is not None:
# '0' microseconds
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO,
struct.pack('LL', 0, 0))
self.sock = OpenSSLConnectionDelegator(self.context, sock)
self.sock.connect((self.host, self.port))
def wbem_request(url, data, creds, headers=None, debug=0, x509=None,
verify_callback=None, ca_certs=None,
no_verification=False):
"""Send request over HTTP.
Send XML data over HTTP to the specified url. Return the
response in XML. Uses Python's build-in http_client. x509 may be a
dictionary containing the location of the SSL certificate and key
files.
"""
if headers is None:
headers = []
host, port, use_ssl = pywbem.cim_http.parse_url(url)
key_file = None
cert_file = None
if use_ssl and x509 is not None:
cert_file = x509.get('cert_file')
key_file = x509.get('key_file')
numTries = 0
localAuthHeader = None
tryLimit = 5
if isinstance(data, six.text_type):
data = data.encode('utf-8')
data = '<?xml version="1.0" encoding="utf-8" ?>\n' + data
if not no_verification and ca_certs is None:
ca_certs = get_default_ca_certs()
elif no_verification:
ca_certs = None
if use_ssl:
h = HTTPSConnection(
host,
port=port,
key_file=key_file,
cert_file=cert_file,
ca_certs=ca_certs,
no_verification=no_verification)
locallogin = None
while numTries < tryLimit:
numTries = numTries + 1
h.putrequest('POST', '/cimom')
h.putheader('Content-type', 'application/xml; charset="utf-8"')
h.putheader('Content-length', len(data))
if localAuthHeader is not None:
h.putheader(*localAuthHeader)
elif creds is not None:
h.putheader('Authorization', 'Basic %s' %
base64.encodestring('%s:%s' % (creds[0], creds[1]))
.replace('\n', ''))
elif locallogin is not None:
h.putheader('PegasusAuthorization', 'Local "%s"' % locallogin)
for hdr in headers:
if isinstance(hdr, six.text_type):
hdr = hdr.encode('utf-8')
s = map(lambda x: string.strip(x), string.split(hdr, ":", 1))
h.putheader(urllib.parse.quote(s[0]), urllib.parse.quote(s[1]))
try:
h.endheaders()
try:
h.send(data)
except socket.error as arg:
if arg[0] != 104 and arg[0] != 32:
raise
response = h.getresponse()
body = response.read()
if response.status != 200:
raise pywbem.cim_http.Error('HTTP error')
except http_client.BadStatusLine as arg:
msg = (_("Bad Status line returned: %(arg)s.")
% {'arg': arg})
raise pywbem.cim_http.Error(msg)
except socket.error as arg:
msg = (_("Socket error:: %(arg)s.")
% {'arg': arg})
raise pywbem.cim_http.Error(msg)
except socket.sslerror as arg:
msg = (_("SSL error: %(arg)s.")
% {'arg': arg})
raise pywbem.cim_http.Error(msg)
break
return body
|
|
from __future__ import absolute_import
import os
import re
import six
import time
import logging
import posixpath
from sentry.models import Project, EventError
from sentry.plugins import Plugin2
from sentry.lang.native.symbolizer import Symbolizer
from sentry.lang.native.utils import find_all_stacktraces, \
find_apple_crash_report_referenced_images, get_sdk_from_event, \
find_stacktrace_referenced_images, get_sdk_from_apple_system_info, \
APPLE_SDK_MAPPING
from sentry.utils.native import parse_addr
logger = logging.getLogger(__name__)
model_re = re.compile(r'^(\S+?)\d')
APP_BUNDLE_PATHS = (
'/var/containers/Bundle/Application/',
'/private/var/containers/Bundle/Application/',
)
SIM_PATH = '/Developer/CoreSimulator/Devices/'
SIM_APP_PATH = '/Containers/Bundle/Application/'
NON_APP_FRAMEWORKS = (
'/Frameworks/libswiftCore.dylib',
)
SIGNAL_NAMES = {
1: 'SIGHUP',
2: 'SIGINT',
3: 'SIGQUIT',
4: 'SIGILL',
5: 'SIGTRAP',
6: 'SIGABRT',
7: 'SIGEMT',
8: 'SIGFPE',
9: 'SIGKILL',
10: 'SIGBUS',
11: 'SIGSEGV',
12: 'SIGSYS',
13: 'SIGPIPE',
14: 'SIGALRM',
15: 'SIGTERM',
16: 'SIGURG',
17: 'SIGSTOP',
18: 'SIGTSTP',
19: 'SIGCONT',
20: 'SIGCHLD',
21: 'SIGTTIN',
22: 'SIGTTOU',
24: 'SIGXCPU',
25: 'SIGXFSZ',
26: 'SIGVTALRM',
27: 'SIGPROF',
28: 'SIGWINCH',
29: 'SIGINFO',
31: 'SIGUSR2',
}
def append_error(data, err):
data.setdefault('errors', []).append(err)
def process_posix_signal(data):
signal = data.get('signal', -1)
signal_name = data.get('name')
if signal_name is None:
signal_name = SIGNAL_NAMES.get(signal)
return {
'signal': signal,
'name': signal_name,
'code': data.get('code'),
'code_name': data.get('code_name'),
}
def exception_from_apple_error_or_diagnosis(error, diagnosis=None):
rv = {}
error = error or {}
mechanism = {}
if 'mach' in error:
mechanism['mach_exception'] = error['mach']
if 'signal' in error:
mechanism['posix_signal'] = process_posix_signal(error['signal'])
if mechanism:
mechanism.setdefault('type', 'cocoa')
rv['mechanism'] = mechanism
# Start by getting the error from nsexception
if error:
nsexception = error.get('nsexception')
if nsexception:
rv['type'] = nsexception['name']
if 'value' in nsexception:
rv['value'] = nsexception['value']
# If we don't have an error yet, try to build one from reason and
# diagnosis
if 'value' not in rv:
if 'reason' in error:
rv['value'] = error['reason']
elif 'diagnosis' in error:
rv['value'] = error['diagnosis']
elif 'mach_exception' in mechanism:
rv['value'] = mechanism['mach_exception'] \
.get('exception_name') or 'Mach Exception'
elif 'posix_signal' in mechanism:
rv['value'] = mechanism['posix_signal'] \
.get('name') or 'Posix Signal'
else:
rv['value'] = 'Unknown'
# Figure out a reasonable type
if 'type' not in rv:
if 'mach_exception' in mechanism:
rv['type'] = 'MachException'
elif 'posix_signal' in mechanism:
rv['type'] = 'Signal'
else:
rv['type'] = 'Unknown'
if rv:
return rv
def is_in_app(frame, app_uuid=None):
if app_uuid is not None:
frame_uuid = frame.get('uuid')
if frame_uuid == app_uuid:
return True
fn = frame.get('package') or ''
if not (fn.startswith(APP_BUNDLE_PATHS) or
(SIM_PATH in fn and SIM_APP_PATH in fn)):
return False
if fn.endswith(NON_APP_FRAMEWORKS):
return False
return True
def convert_stacktrace(frames, system=None, notable_addresses=None):
app_uuid = None
if system:
app_uuid = system.get('app_uuid')
if app_uuid is not None:
app_uuid = app_uuid.lower()
converted_frames = []
for frame in reversed(frames):
fn = frame.get('filename')
# We only record the offset if we found a symbol but we did not
# find a line number. In that case it's the offset in bytes from
# the beginning of the symbol.
function = frame.get('symbol_name') or '<unknown>'
lineno = frame.get('line')
offset = None
if not lineno:
offset = frame['instruction_addr'] - frame['symbol_addr']
cframe = {
'abs_path': fn,
'filename': fn and posixpath.basename(fn) or None,
# This can come back as `None` from the symbolizer, in which
# case we need to fill something else in or we will fail
# later fulfill the interface requirements which say that a
# function needs to be provided.
'function': function,
'package': frame.get('object_name'),
'symbol_addr': '0x%x' % frame['symbol_addr'],
'instruction_addr': '0x%x' % frame['instruction_addr'],
'instruction_offset': offset,
'lineno': lineno,
}
cframe['in_app'] = is_in_app(cframe, app_uuid)
converted_frames.append(cframe)
if converted_frames and notable_addresses:
converted_frames[-1]['vars'] = notable_addresses
if converted_frames:
return {'frames': converted_frames}
def inject_apple_backtrace(data, frames, diagnosis=None, error=None,
system=None, notable_addresses=None,
thread_id=None):
stacktrace = convert_stacktrace(frames, system, notable_addresses)
if error or diagnosis:
error = error or {}
exc = exception_from_apple_error_or_diagnosis(error, diagnosis)
if exc is not None:
exc['stacktrace'] = stacktrace
exc['thread_id'] = thread_id
data['sentry.interfaces.Exception'] = {'values': [exc]}
# Since we inject the exception late we need to make sure that
# we set the event type to error as it would be set to
# 'default' otherwise.
data['type'] = 'error'
return True
data['sentry.interfaces.Stacktrace'] = stacktrace
return False
def inject_apple_device_data(data, system):
contexts = data.setdefault('contexts', {})
device = contexts.setdefault('device', {})
os = contexts.setdefault('os', {})
try:
os['name'] = APPLE_SDK_MAPPING[system['system_name']]
except LookupError:
os['name'] = system.get('system_name') or 'Generic Apple'
if 'system_version' in system:
os['version'] = system['system_version']
if 'os_version' in system:
os['build'] = system['os_version']
if 'kernel_version' in system:
os['kernel_version'] = system['kernel_version']
if 'jailbroken' in system:
os['rooted'] = system['jailbroken']
if 'cpu_arch' in system:
device['arch'] = system['cpu_arch']
if 'model' in system:
device['model_id'] = system['model']
if 'machine' in system:
device['model'] = system['machine']
match = model_re.match(system['machine'])
if match is not None:
device['family'] = match.group(1)
def dump_crash_report(report):
import json
with open('/tmp/sentry-apple-crash-report-%s.json' % time.time(), 'w') as f:
json.dump(report, f, indent=2)
def preprocess_apple_crash_event(data):
"""This processes the "legacy" AppleCrashReport."""
crash_report = data['sentry.interfaces.AppleCrashReport']
if os.environ.get('SENTRY_DUMP_APPLE_CRASH_REPORT') == '1':
dump_crash_report(crash_report)
project = Project.objects.get_from_cache(
id=data['project'],
)
system = None
errors = []
threads = []
crash = crash_report['crash']
crashed_thread = None
threads = {}
raw_threads = {}
for raw_thread in crash['threads']:
if raw_thread['crashed'] and raw_thread.get('backtrace'):
crashed_thread = raw_thread
raw_threads[raw_thread['index']] = raw_thread
threads[raw_thread['index']] = {
'id': raw_thread['index'],
'name': raw_thread.get('name'),
'current': raw_thread.get('current_thread', False),
'crashed': raw_thread.get('crashed', False),
}
sdk_info = get_sdk_from_apple_system_info(system)
referenced_images = find_apple_crash_report_referenced_images(
crash_report['binary_images'], raw_threads.values())
sym = Symbolizer(project, crash_report['binary_images'],
referenced_images=referenced_images)
with sym:
if crashed_thread is None:
append_error(data, {
'type': EventError.NATIVE_NO_CRASHED_THREAD,
})
else:
system = crash_report.get('system')
try:
bt, errors = sym.symbolize_backtrace(
crashed_thread['backtrace']['contents'], sdk_info)
for error in errors:
append_error(data, error)
if inject_apple_backtrace(data, bt, crash.get('diagnosis'),
crash.get('error'), system,
crashed_thread.get('notable_addresses'),
crashed_thread['index']):
# We recorded an exception, so in this case we can
# skip having the stacktrace.
threads[crashed_thread['index']]['stacktrace'] = None
except Exception:
logger.exception('Failed to symbolicate')
errors.append({
'type': EventError.NATIVE_INTERNAL_FAILURE,
'error': 'The symbolicator encountered an internal failure',
})
for thread in six.itervalues(threads):
# If we were told to skip the stacktrace, skip it indeed
if thread.get('stacktrace', Ellipsis) is None:
continue
raw_thread = raw_threads.get(thread['id'])
if raw_thread is None or not raw_thread.get('backtrace'):
continue
bt, errors = sym.symbolize_backtrace(
raw_thread['backtrace']['contents'], sdk_info)
for error in errors:
append_error(data, error)
thread['stacktrace'] = convert_stacktrace(
bt, system, raw_thread.get('notable_addresses'))
if threads:
data['threads'] = {
'values': sorted(threads.values(), key=lambda x: x['id']),
}
if system:
inject_apple_device_data(data, system)
return data
def resolve_frame_symbols(data):
debug_meta = data['debug_meta']
debug_images = debug_meta['images']
sdk_info = get_sdk_from_event(data)
stacktraces = find_all_stacktraces(data)
if not stacktraces:
return
project = Project.objects.get_from_cache(
id=data['project'],
)
errors = []
referenced_images = find_stacktrace_referenced_images(
debug_images, stacktraces)
sym = Symbolizer(project, debug_images,
referenced_images=referenced_images)
frame = None
idx = -1
def report_error(e):
errors.append({
'type': EventError.NATIVE_INTERNAL_FAILURE,
'frame': frame,
'error': 'frame #%d: %s: %s' % (
idx,
e.__class__.__name__,
six.text_type(e),
)
})
processed_frames = []
with sym:
for stacktrace in stacktraces:
for idx, frame in enumerate(stacktrace['frames']):
if 'image_addr' not in frame or \
'instruction_addr' not in frame or \
'symbol_addr' not in frame:
continue
try:
sfrm = sym.symbolize_frame({
'object_name': frame.get('package'),
'object_addr': frame['image_addr'],
'instruction_addr': frame['instruction_addr'],
'symbol_addr': frame['symbol_addr'],
}, sdk_info, report_error=report_error)
if not sfrm:
continue
# XXX: log here if symbol could not be found?
frame['function'] = sfrm.get('symbol_name') or \
frame.get('function') or '<unknown>'
frame['abs_path'] = sfrm.get('filename') or None
if frame['abs_path']:
frame['filename'] = posixpath.basename(frame['abs_path'])
if sfrm.get('line') is not None:
frame['lineno'] = sfrm['line']
else:
frame['instruction_offset'] = \
parse_addr(sfrm['instruction_addr']) - \
parse_addr(sfrm['symbol_addr'])
if sfrm.get('column') is not None:
frame['colno'] = sfrm['column']
frame['package'] = sfrm['object_name'] or frame.get('package')
frame['symbol_addr'] = '0x%x' % parse_addr(sfrm['symbol_addr'])
frame['instruction_addr'] = '0x%x' % parse_addr(
sfrm['instruction_addr'])
frame['in_app'] = is_in_app(frame)
processed_frames.append(frame)
except Exception:
logger.exception('Failed to symbolicate')
errors.append({
'type': EventError.NATIVE_INTERNAL_FAILURE,
'error': 'The symbolicator encountered an internal failure',
})
if errors:
data.setdefault('errors', []).extend(errors)
return data
class NativePlugin(Plugin2):
can_disable = False
def get_event_preprocessors(self, data, **kwargs):
rv = []
if data.get('sentry.interfaces.AppleCrashReport'):
rv.append(preprocess_apple_crash_event)
if data.get('debug_meta'):
rv.append(resolve_frame_symbols)
return rv
|
|
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processes crontab files and try to catch common errors.
Parse crontab files and check each type of line for potential syntax errors.
Each line is classified as: comment / blank, variable assignment, standard
action line, @ extention action line, an unknown line.
Nothing is done for comment / blank lines.
Variable assignments are checked to make sure there's not a $ on the
right hand side. If there is a warning is emitted. It's a valid syntax
but is usually done in error.
Standard action lines are inspected to make sure the first 5 fields are valid
and within range. The user name is checked. The command is checked for bare
%'s. The last two generate warnings as they can potentially be valid. There
are some weird configurations of the first 5 fields that are valid but are
marked as errors.
A brief description of each class and function:
Parsing cron time fields:
FSM: Finite state machine class - used to parse crontab fields.
Action*: Action functions for FSM transitions.
InitCronFSM: Instatiate an FSM and create the grammar for crontab files.
Checking cron time fields:
CronTimeField: Used to check limits for a cron time field using an
instance of the CronTimeFieldLimit class.
CT*: Subclasses of CronTimeField representing each kind of cron time
field.
CronTimeFieldLimit: Limits for each time field position.
InitCronTimeFieldLimits: Creates CronTimeFieldLimit instances for each
cron time field position - hour, minute, etc.
Parse each line:
CronLine*: Classes that act on the parsed cron lines.
CronLineTimeAction: Superclass for time/action cron lines.
CronLineAt: Subclass that acts on @period timespec cron lines.
CronLineTime: Subclass that acts on 5 field timespec cron lines.
CronLineFactory: Creates a CronLine* instance to act on the parsed
cron line.
Logging class to pretty-print output:
LogCounter: A logging class that provides a summary of warnings and
errors.
Putting it all together:
CheckCrontab: Checks the a crontab file.
"""
from __future__ import print_function
# For Python 2.5
from __future__ import with_statement
__author__ = 'lyda@google.com (Kevin Lyda)'
import copy
import os
import pwd
import re
import string
# The following extensions imply further postprocessing or that the slack
# role was for a cron that allowed dots in cron scripts.
FILE_RE_WHITELIST = [re.compile(x) for x in
(r'\.in$', r'\.cron$', r'\.disabled$', r'^(\S+\.)?cron\.d$')]
class FSM(object):
"""Finite State Machine.
A simple FSM that is used to parse the time fields in a crontab file.
"""
def __init__(self, data_out_init):
"""Creates FSM with the initial values for data_out.
Args:
data_out_init: Must be a dictionary object.
"""
self.data_out_init = data_out_init
self.states = {}
self.end = {}
def AddTransition(self, chars, state, action, next_state):
"""Adds a transition.
Adds a transition based on a set of characters and the current state.
If a given input char is found in chars and the FSM is currently in
state, then action is performed and the FSM is set to the next_state.
Args:
chars: String of chars this transition applies to.
state: State this transition applies to.
action: Action to perform. This is called with two arguments -
the data_out variable and the input char.
next_state: Set the FSM to this state once action is complete.
"""
if state not in self.states:
self.states[state] = {}
self.states[state].update([(char, (action, next_state))
for char in chars])
def AddEndState(self, state, action):
"""Handle the end state of the FSM.
This specifies the action to perform when the FSM exhausts its
data_in and is in state.
Args:
state: The state this applies to.
action: The action to perform. This is called with just the
data_out variable.
"""
self.end[state] = action
def Run(self, data_in):
"""Run the FSM with the given data_in input.
Touch each char of data_in with his noodley appendage.
Args:
data_in: The input data to parse.
Returns:
data_out: Whatever the actions have generated; usually a parse tree.
Raises:
LookupError: If no transition can be found, this is raised.
"""
data_out = copy.deepcopy(self.data_out_init)
cur_state = 'start'
parsed = ''
for char in data_in:
(action, next_state) = self.states.get(cur_state, {}
).get(char, (None, None))
if not action:
data_out['parser_error'] = ('"%s[[%s]]%s"'
% (parsed, char,
data_in[len(parsed)+1:len(data_in)]))
return data_out
action(data_out, char)
cur_state = next_state
parsed += char
if cur_state not in self.end:
data_out['parser_error'] = '"%s" is incomplete' % parsed
return data_out
self.end[cur_state](data_out)
return data_out
def action_time(data_out, char):
"""Add a char to time."""
data_out['time'] += char
def action_star(data_out, char):
"""Add a char to time."""
data_out['time'] = char
def action_dash(data_out, unused_char):
"""Move time to range, reset time."""
data_out['range'] = data_out['time']
data_out['time'] = ''
def action_step(data_out, char):
"""Add a char to step."""
data_out['step'] += char
def action_noop(unused_data_out, unused_char):
"""Do nothing."""
pass
def action_time_comma(data_out, unused_char=''):
"""Move time to cron_times, reset time."""
data_out['cron_times'].append(CTTime(int(data_out['time'])))
data_out['time'] = ''
def action_star_comma(data_out, unused_char=''):
"""Set cron_times, reset time."""
data_out['cron_times'].append(CTStar())
data_out['time'] = ''
def action_star_step_comma(data_out, unused_char=''):
"""Set cron_times, reset time & step."""
data_out['cron_times'].append(CTStarStep(int(data_out['step'])))
data_out['time'] = ''
data_out['step'] = ''
def action_text_comma(data_out, unused_char=''):
"""Set cron_times from time, reset time."""
data_out['cron_times'].append(CTText(data_out['time']))
data_out['time'] = ''
def action_range_comma(data_out, unused_char=''):
"""Set cron_times from range & time, reset range & time."""
data_out['cron_times'].append(CTRange(int(data_out['range']),
int(data_out['time'])))
data_out['range'] = ''
data_out['time'] = ''
def action_text_range_comma(data_out, unused_char=''):
"""Set cron_times from range & time, reset range & time."""
data_out['cron_times'].append(CTTextRange(data_out['range'],
data_out['time']))
data_out['range'] = ''
data_out['time'] = ''
def action_range_step_comma(data_out, unused_char=''):
"""Set cron_times from range, time & step, reset range, time & step."""
data_out['cron_times'].append(CTRangeStep(int(data_out['range']),
int(data_out['time']),
int(data_out['step'])))
data_out['range'] = ''
data_out['time'] = ''
data_out['step'] = ''
def action_text_range_step_comma(data_out, unused_char=''):
"""Set cron_times from range, time & step, reset range, time & step."""
data_out['cron_times'].append(CTTextRangeStep(data_out['range'],
data_out['time'],
int(data_out['step'])))
data_out['range'] = ''
data_out['time'] = ''
data_out['step'] = ''
def InitCronFSM():
"""Initialise the FSM with the rules for a cron time field.
Returns:
An initialised finite state machine.
"""
fsm = FSM(dict({'time': '',
'range': '',
'step': '',
'cron_times': []}))
# Case: *
fsm.AddTransition('*', 'start', action_star, 'star')
fsm.AddTransition('*', 'next', action_star, 'star')
fsm.AddEndState('star', action_star_comma)
fsm.AddTransition(',', 'star', action_star_comma, 'next')
# Case: */<number>
fsm.AddTransition('/', 'star', action_noop, 'start_star_step')
fsm.AddTransition(string.digits, 'start_star_step', action_step,
'star_step')
fsm.AddTransition(string.digits, 'star_step', action_step, 'star_step')
fsm.AddEndState('star_step', action_star_step_comma)
fsm.AddTransition(',', 'star_step', action_star_step_comma, 'next')
# Case: <number>
fsm.AddTransition(string.digits, 'start', action_time, 'time')
fsm.AddTransition(string.digits, 'next', action_time, 'time')
fsm.AddTransition(string.digits, 'time', action_time, 'time')
fsm.AddEndState('time', action_time_comma)
fsm.AddTransition(',', 'time', action_time_comma, 'next')
# Case: <number>-<number>
fsm.AddTransition('-', 'time', action_dash, 'start_range')
fsm.AddTransition(string.digits, 'start_range', action_time, 'range')
fsm.AddTransition(string.digits, 'range', action_time, 'range')
fsm.AddEndState('range', action_range_comma)
fsm.AddTransition(',', 'range', action_range_comma, 'next')
# Case: <number>-<number>/<number>
fsm.AddTransition('/', 'range', action_noop, 'start_range_step')
fsm.AddTransition(string.digits, 'start_range_step',
action_step, 'range_step')
fsm.AddTransition(string.digits, 'range_step', action_step, 'range_step')
fsm.AddEndState('range_step', action_range_step_comma)
fsm.AddTransition(',', 'range_step', action_range_step_comma, 'next')
# Case: <text>
fsm.AddTransition(string.ascii_letters, 'start', action_time, 'text')
fsm.AddTransition(string.ascii_letters, 'next', action_time, 'text')
fsm.AddTransition(string.ascii_letters, 'text', action_time, 'text')
fsm.AddEndState('text', action_text_comma)
fsm.AddTransition(',', 'text', action_text_comma, 'next')
# Case: <text>-<text>
fsm.AddTransition('-', 'text', action_dash, 'start_text_range')
fsm.AddTransition(string.ascii_letters, 'start_text_range', action_time,
'text_range')
fsm.AddTransition(string.ascii_letters, 'text_range', action_time,
'text_range')
fsm.AddEndState('text_range', action_text_range_comma)
fsm.AddTransition(',', 'text_range', action_text_range_comma, 'next')
# Case: <text>-<text>/<text>
fsm.AddTransition('/', 'text_range', action_noop, 'start_text_range_step')
fsm.AddTransition(string.digits, 'start_text_range_step', action_step,
'text_range_step')
fsm.AddTransition(string.digits, 'text_range_step', action_step,
'text_range_step')
fsm.AddEndState('text_range_step', action_text_range_step_comma)
fsm.AddTransition(',', 'text_range_step', action_text_range_step_comma,
'next')
return fsm
class CronTimeField(object):
"""CronTimeField superclass for various time specifiers in cron fields."""
def __init__(self):
self._text = None
self._kind = None
self._start = None
self._end = None
self._step = None
def __str__(self):
return self._text
@property
def Kind(self):
"""Kind field."""
return self._kind
@property
def Start(self):
"""Start value of this field."""
return self._start
@property
def End(self):
"""End value of this field."""
return self._end
@property
def Step(self):
"""Step for this field."""
return self._step
def CheckLowStep(self, diagnostics, cron_time_field):
"""Checks if a step is too low for a field."""
if self._step < 1:
diagnostics.append('%d is too low for field "%s" (%s)'
% (self._step, cron_time_field.name, self))
def CheckHighStep(self, diagnostics, cron_time_field):
"""Checks if a step is too high for a field."""
if self._step > self._end:
diagnostics.append('the step (%d) is greater than the last number'
' (%d) in field "%s" (%s)'
% (self._step, self._end,
cron_time_field.name, self))
def CheckLowNum(self, diagnostics, time_field, cron_time_field):
"""Checks if a number is too low for a field."""
if time_field < cron_time_field.min_time:
diagnostics.append('%d is too low for field "%s" (%s)'
% (time_field, cron_time_field.name, self))
def CheckHighNum(self, diagnostics, time_field, cron_time_field):
"""Checks if a number is too high for a field."""
if time_field > cron_time_field.max_time:
diagnostics.append('%d is too high for field "%s" (%s)'
% (time_field, cron_time_field.name, self))
def CheckRange(self, diagnostics, cron_time_field):
"""Checks if a range isn't too high for a field."""
if self._start > self._end:
diagnostics.append('%d is greater than %d in field "%s" (%s)'
% (self._start, self._end, cron_time_field.name,
self))
def CheckValidText(self, diagnostics, time_field, cron_time_field):
"""Checks if a field has valid text."""
if time_field.lower() not in cron_time_field.valid_text:
diagnostics.append('%s is not valid for field "%s" (%s)'
% (time_field, cron_time_field.name, self))
class CTTime(CronTimeField):
"""CronTimeField subclass for <number>."""
def __init__(self, start_time):
"""Initialize CTRange with start_time."""
CronTimeField.__init__(self)
self._kind = 'time'
self._start = start_time
self._text = '%d' % start_time
def GetDiagnostics(self, cron_time_field):
"""Checks for issues with a time field."""
diagnostics = []
self.CheckLowNum(diagnostics, self._start, cron_time_field)
self.CheckHighNum(diagnostics, self._start, cron_time_field)
return diagnostics
class CTRange(CronTimeField):
"""CronTimeField subclass for <number>-<number>."""
def __init__(self, start_time, end_time):
"""Initialize CTRange with start_time and end_time."""
CronTimeField.__init__(self)
self._kind = 'range'
self._start = start_time
self._end = end_time
self._text = '%d-%d' % (start_time, end_time)
def GetDiagnostics(self, cron_time_field):
"""Checks for issues with a range field."""
diagnostics = []
self.CheckRange(diagnostics, cron_time_field)
self.CheckLowNum(diagnostics, self._start, cron_time_field)
self.CheckHighNum(diagnostics, self._end, cron_time_field)
return diagnostics
class CTRangeStep(CronTimeField):
"""CronTimeField subclass for <number>-<number>/<number>."""
def __init__(self, start_time, end_time, step_count):
"""Initialize CTRangeStep with start_time, end_time and step_count."""
CronTimeField.__init__(self)
self._kind = 'range_step'
self._start = start_time
self._end = end_time
self._step = step_count
self._text = '%d-%d/%d' % (start_time, end_time, step_count)
def GetDiagnostics(self, cron_time_field):
"""Checks for issues with a range/step field."""
diagnostics = []
self.CheckRange(diagnostics, cron_time_field)
self.CheckLowNum(diagnostics, self._start, cron_time_field)
self.CheckHighNum(diagnostics, self._end, cron_time_field)
self.CheckLowStep(diagnostics, cron_time_field)
self.CheckHighStep(diagnostics, cron_time_field)
self.CheckHighNum(diagnostics, self._step, cron_time_field)
return diagnostics
class CTStar(CronTimeField):
"""CronTimeField subclass for *."""
def __init__(self):
"""Initialize CTStar."""
CronTimeField.__init__(self)
self._kind = 'star'
self._text = '*'
def GetDiagnostics(self, unused_cron_time_field):
"""Checks for issues with a star field."""
return []
def ChkCTStarOnly(cron_time_field):
"""Checks if a crontab field is only a *.
Args:
cron_time_field: Parsed cron time field to check.
Returns:
True if there's only a * in this field.
"""
if not cron_time_field:
return True
if len(cron_time_field) == 1 and cron_time_field[0].Kind == 'star':
return True
return False
class CTStarStep(CronTimeField):
"""CronTimeField subclass for */<number>."""
def __init__(self, step_count):
"""Initialize CTStarStep with step_count."""
CronTimeField.__init__(self)
self._kind = 'star_step'
self._step = step_count
self._text = '*/%d' % step_count
def GetDiagnostics(self, cron_time_field):
"""Checks for issues with a star/step field."""
diagnostics = []
self.CheckLowStep(diagnostics, cron_time_field)
self.CheckHighNum(diagnostics, self._step, cron_time_field)
return diagnostics
class CTText(CronTimeField):
"""CronTimeField subclass for <text>."""
def __init__(self, start_time):
"""Initialize CTText with start_time."""
CronTimeField.__init__(self)
self._kind = 'text'
self._start = start_time
self._text = '%s' % start_time
def GetDiagnostics(self, cron_time_field):
"""Checks for issues with a text field."""
diagnostics = []
self.CheckValidText(diagnostics, self._start, cron_time_field)
return diagnostics
class CTTextRange(CronTimeField):
"""CronTimeField subclass for <text>-<text>."""
def __init__(self, start_time, end_time):
"""Initialize CTTextRange with start_time and end_time."""
CronTimeField.__init__(self)
self._kind = 'text_range'
self._start = start_time
self._end = end_time
self._text = '%s-%s' % (start_time, end_time)
def GetDiagnostics(self, cron_time_field):
"""Checks for issues with a text range field."""
diagnostics = []
self.CheckValidText(diagnostics, self._start, cron_time_field)
self.CheckValidText(diagnostics, self._end, cron_time_field)
return diagnostics
class CTTextRangeStep(CronTimeField):
"""CronTimeField subclass for <text>-<text>."""
def __init__(self, start_time, end_time, step_count):
"""Initialize CTTextRangeStep with start_time, end_time and step_count."""
CronTimeField.__init__(self)
self._kind = 'text_range_step'
self._start = start_time
self._end = end_time
self._step = step_count
self._text = '%s-%s/%s' % (start_time, end_time, step_count)
def GetDiagnostics(self, cron_time_field):
"""Checks for issues with a text range / step field."""
diagnostics = []
self.CheckValidText(diagnostics, self._start, cron_time_field)
self.CheckValidText(diagnostics, self._end, cron_time_field)
self.CheckLowStep(diagnostics, cron_time_field)
self.CheckHighNum(diagnostics, self._step, cron_time_field)
return diagnostics
class CronTimeFieldLimit(object):
"""Class to represent the limits of a crontab time field."""
def __init__(self, min_time, max_time, valid_text):
"""Initialise the limits."""
self.min_time = min_time
self.max_time = max_time
self.valid_text = valid_text
self._name = None
def _GetName(self):
"""Return the name."""
return self._name
def _SetName(self, name):
"""Set the name."""
self._name = name
name = property(_GetName, _SetName,
doc="""Gets or Sets the name of this field.""")
def InitCronTimeFieldLimits():
"""Instantiate the CronTimeField objects for the five cron time fields.
Returns:
A tuple of 5 instantiated CronTimeField objects for minute, hour,
day of month, month and day of week.
"""
cron_time_field_limits = {
'minute': CronTimeFieldLimit(0, 59, []),
'hour': CronTimeFieldLimit(0, 23, []),
'day of month': CronTimeFieldLimit(1, 31, []),
'month': CronTimeFieldLimit(1, 12,
['jan', 'feb', 'mar', 'apr',
'may', 'jun', 'jul', 'aug',
'sep', 'oct', 'nov', 'dec']),
'day of week': CronTimeFieldLimit(0, 7,
['sun', 'mon', 'tue', 'wed',
'thu', 'fri', 'sat'])
}
for field_name in cron_time_field_limits:
cron_time_field_limits[field_name].name = field_name
return cron_time_field_limits
class CronLineEmpty(object):
"""For empty lines."""
def ValidateAndLog(self, log):
"""Nothing really to validate for empty lines."""
pass
class CronLineChkCrontabCmd(object):
"""For chkcrontab command lines."""
def __init__(self, command, msg_kind):
self.command = command
self.msg_kind = msg_kind
def ValidateAndLog(self, log):
"""Validates a chkcrontab command line and logs any errors and warnings.
Args:
log: A LogCounter instance to record issues.
"""
if not log.ValidMsgKind(self.msg_kind):
log.LineError(log.MSG_CHKCRONTAB_ERROR,
'"%s" is an unknown error message.' % self.msg_kind)
if self.command == 'disable-msg':
log.Ignore(self.msg_kind)
elif self.command == 'enable-msg':
log.Unignore(self.msg_kind)
else:
log.LineError(log.MSG_CHKCRONTAB_ERROR,
'Invalid chkcrontab command - must be'
' enable-msg or disable-msg.')
class CronLineComment(object):
"""For Comment lines."""
def ValidateAndLog(self, log):
"""Nothing really to validate for Comment lines."""
pass
class CronLineAssignment(object):
"""For var assignment lines."""
def __init__(self, variable):
self.variable = variable
def ValidateAndLog(self, log):
"""Validates an assignment line and logs any errors and warnings.
Args:
log: A LogCounter instance to record issues.
"""
# An assignment like /^FOO=\s*$/ will trigger a "bad minute" error.
if not self.variable.strip(string.whitespace):
log.LineError(log.MSG_QUOTE_VALUES,
'Variable assignments in crontabs must contain'
' non-whitespace characters (try quotes).')
# Warn when FOO=$BAR as users expect shell-like behaviour.
if '$' in self.variable:
log.LineWarn(log.MSG_SHELL_VAR,
'Variable assignments in crontabs are not like shell.'
' $VAR is not expanded.')
if re.match('".+" ?#', self.variable) or re.match('[^"].*#', self.variable):
log.LineError(log.MSG_COMMENT,
'Variable assignments in crontabs are not like shell.'
' # comment is not allowed.')
class CronLineTimeAction(object):
"""Checks cron lines that specify a time and an action.
Must be used as a subclass - subclass must implement _CheckTimeField.
"""
def __init__(self, time_field, user, command, options):
self.time_field = time_field
self.user = user
self.command = command
self.whitelisted_users = []
if hasattr(options, 'whitelisted_users'):
self.whitelisted_users = options.whitelisted_users
self.check_passwd = True
if hasattr(options, 'check_passwd'):
self.check_passwd = options.check_passwd
def _CheckTimeField(self, log):
"""Virtual method to be implemented by subclasses to check time field."""
pass
def ValidateAndLog(self, log):
"""Validates an @ time spec line and logs any errors and warnings.
Args:
log: A LogCounter instance to record issues.
"""
self._CheckTimeField(log)
# User checks.
if self.user in self.whitelisted_users:
pass
elif len(self.user) > 31:
log.LineError(log.MSG_INVALID_USER,
'Username too long "%s"' % self.user)
elif self.user.startswith('-'):
log.LineError(log.MSG_INVALID_USER, 'Invalid username "%s"' % self.user)
elif re.search(r'[\s!"#$%&\'()*+,/:;<=>?@[\\\]^`{|}~]', self.user):
log.LineError(log.MSG_INVALID_USER, 'Invalid username "%s"' % self.user)
elif self.check_passwd:
try:
pwd.getpwnam(self.user)
except KeyError:
log.LineWarn(log.MSG_USER_NOT_FOUND,
'User "%s" not found.' % self.user)
else:
log.LineWarn(log.MSG_USER_NOT_FOUND,
'User "%s" not found.' % self.user)
# Command checks.
if self.command.startswith('%') or re.search(r'[^\\]%', self.command):
log.LineWarn(log.MSG_BARE_PERCENT, 'A bare % is a line break in'
' crontab and is commonly not intended.')
class CronLineAt(CronLineTimeAction):
"""For cron lines specified with @ time specs."""
def _CheckTimeField(self, log):
"""Checks the @ time field.
Args:
log: A LogCounter instance to record issues.
"""
valid_at_periods = ('reboot', 'yearly', 'annually', 'monthly',
'weekly', 'daily', 'midnight', 'hourly')
if self.time_field not in valid_at_periods:
log.LineError(log.MSG_INVALID_AT,
'Invalid @ directive "%s"' % self.time_field)
class CronLineTime(CronLineTimeAction):
"""For cron lines specified with 5 field time specs."""
def _CheckTimeField(self, log):
"""Validates a 5 field time spec line and logs any errors and warnings.
Args:
log: A LogCounter instance to record issues.
"""
cron_time_field_names = ('minute', 'hour', 'day of month',
'month', 'day of week')
cron_time_field_limits = InitCronTimeFieldLimits()
fsm = InitCronFSM()
# Check the first five fields individually.
parsed_cron_time_fields = {}
for field in cron_time_field_names:
parsed_cron_time_fields[field] = fsm.Run(self.time_field[field])
if 'parser_error' in parsed_cron_time_fields[field]:
log.LineError(log.MSG_FIELD_PARSE_ERROR,
'Failed to fully parse "%s" field here: %s'
% (field,
parsed_cron_time_fields[field]['parser_error']))
# Check the time field according to the cron_time_fields[field] rules.
for cron_time in parsed_cron_time_fields[field]['cron_times']:
for line_error in (cron_time.
GetDiagnostics(cron_time_field_limits[field])):
log.LineError(log.MSG_FIELD_VALUE_ERROR, line_error)
# Check the first five fields collectively.
if ChkCTStarOnly(parsed_cron_time_fields['minute']['cron_times']):
if not ChkCTStarOnly(parsed_cron_time_fields['hour']['cron_times']):
log.LineWarn(log.MSG_HOURS_NOT_MINUTES,
'Cron will run this every minute for the hours set.')
class CronLineUnknown(object):
"""For unrecognised cron lines."""
def ValidateAndLog(self, log):
"""Emits an error for unrecognised cron lines.
Args:
log: A LogCounter instance to record issues.
"""
log.LineError(log.MSG_LINE_ERROR, 'Failed to parse line.')
class CronLineFactory(object):
"""Classify a line in a cron field by what type of line it is."""
def __init__(self):
pass
def ParseLine(self, line, options ):
"""Classify a line.
Args:
line: The line to classify.
options: a dictionary with options to pass to the CronLineAction Object
Returns:
A CronLine* class (must have a ValidateAndLog method).
"""
chkcrontab_cmd = re.compile('##*\s*chkcrontab:\s*(.*)=(.*)')
assignment_line_re = re.compile('[a-zA-Z_][a-zA-Z0-9_]*\s*=(.*)')
at_line_re = re.compile('@(\S+)\s+(\S+)\s+(.*)')
cron_time_field_re = '[\*0-9a-zA-Z,/-]+'
time_field_job_line_re = re.compile(
'^\s*(%s)\s+(%s)\s+(%s)\s+(%s)\s+(%s)\s+(\S+)\s+(.*)' %
(cron_time_field_re, cron_time_field_re, cron_time_field_re,
cron_time_field_re, cron_time_field_re))
if not line:
return CronLineEmpty()
if line.startswith('#'):
match = chkcrontab_cmd.match(line)
if match:
return CronLineChkCrontabCmd(match.groups()[0], match.groups()[1])
else:
return CronLineComment()
match = assignment_line_re.match(line)
if match:
return CronLineAssignment(match.groups()[0])
match = at_line_re.match(line)
if match:
return CronLineAt(match.groups()[0], match.groups()[1],
match.groups()[2], options)
# Is this line a cron job specifier?
match = time_field_job_line_re.match(line)
if match:
field = {
'minute': match.groups()[0],
'hour': match.groups()[1],
'day of month': match.groups()[2],
'month': match.groups()[3],
'day of week': match.groups()[4],
}
return CronLineTime(field, match.groups()[5], match.groups()[6], options)
return CronLineUnknown()
class LogMsgKindNotFound(Exception):
"""Exception for broken log messages."""
pass
# TODO(lyda): Revisit this. A possible alternative is:
# MessageCollector - has a collection of messages; methods for printing
# and summarising them.
# Message - super-class for message objects.
# MessageExampleError - a class for EXAMPLE_ERROR - the __init__ method
# would take the args to fill the string. The MsgKind method would
# generate a string off the class name. And there would be a __str__
# method obviously.
class LogCounter(object):
"""A log class that collects stats on warnings and errors.
This log class collects stats on the number of warnings and errors.
It also has some methods for queueing up warnings and errors and then
emiting them with the relevant line_no and line.
"""
_msg_kinds = set(('BARE_PERCENT',
'CHKCRONTAB_ERROR',
'FIELD_PARSE_ERROR',
'FIELD_VALUE_ERROR',
'INVALID_AT',
'INVALID_USER',
'LINE_ERROR',
'QUOTE_VALUES',
'SHELL_VAR',
'USER_NOT_FOUND',
'HOURS_NOT_MINUTES',
'COMMENT'))
def __init__(self, quiet=False):
"""Inits LogCounter."""
self._error_count = 0
self._warn_count = 0
self._ignored = set()
self._line_errors = []
self._line_warns = []
self._quiet = quiet
def Ignore(self, msg_kind):
"""Start ignoring a category of message.
Args:
msg_kind: The category of message.
"""
self._ignored.add(msg_kind)
def Unignore(self, msg_kind):
"""Stop ignoring a category of message.
Args:
msg_kind: The category of message.
"""
self._ignored.discard(msg_kind)
def ValidMsgKind(self, msg_kind):
"""Check that msg_kind is a valid error.
Args:
msg_kind: The category of message.
Returns:
True if it's valid.
False if not valid.
"""
return msg_kind in self._msg_kinds
def __getattr__(self, msg_kind):
"""Return value for msg_kind.
Args:
msg_kind: The category of message.
Returns:
String for msg_kind if valid.
Raises:
LogMsgKindNotFound: Raised if not a valid log message.
"""
if msg_kind.startswith('MSG_'):
if msg_kind[4:] in self._msg_kinds:
return msg_kind[4:]
raise LogMsgKindNotFound()
def Warn(self, message):
"""Print warning.
Immediately print warning message. Increment warning counter.
Args:
message: The message to print as a warning.
"""
if not self._quiet:
print('W:', message)
self._warn_count += 1
def LineWarn(self, msg_kind, line_warn):
"""Queue warning.
Queue a warning message to print later. Increment warning counter.
Args:
msg_kind: The category of message.
line_warn: The message to queue as a warning.
"""
if msg_kind not in self._ignored:
self._line_warns.append('%s: %s' % (msg_kind, line_warn))
self._warn_count += 1
def Error(self, message):
"""Print error.
Immediately print error message. Increment error counter.
Args:
message: The message to print as a error.
"""
if not self._quiet:
print('E:', message)
self._error_count += 1
def LineError(self, msg_kind, line_error):
"""Queue error.
Queue a error message to print later. Increment error counter.
Args:
msg_kind: The category of message.
line_error: The message to queue as a error.
"""
if msg_kind not in self._ignored:
self._line_errors.append('%s: %s' % (msg_kind, line_error))
self._error_count += 1
def Emit(self, line_no, line):
"""Print queued warnings and errors.
Print the queued warnings and errors if they exist. Reset queues.
Prefix all this with the relevant context - line_no and line.
Args:
line_no: Line number these queued warnings and errors apply to.
line: Line these queued warnings and errors apply to.
"""
if self._line_errors or self._line_warns:
spacer = ' ' * len('%d' % line_no)
line_error_fmt = 'e: %s %%s' % spacer
line_warn_fmt = 'w: %s %%s' % spacer
if not self._quiet:
if self._line_errors:
print('E: %d: %s' % (line_no, line))
else:
print('W: %d: %s' % (line_no, line))
for line_error in self._line_errors:
print(line_error_fmt % line_error)
for line_warn in self._line_warns:
print(line_warn_fmt % line_warn)
self._line_errors = []
self._line_warns = []
def Summary(self):
"""Print summary of all warnings and errors.
Print the warning and error counts if they exist.
Returns:
2: If there were any errors.
1: If there were any warnings but no errors.
0: If there were no errors or warnings.
"""
more_info = 'See http://goo.gl/7XS9q for more info.'
if self._error_count > 0:
if not self._quiet:
print('E: There were %d errors and %d warnings.'
% (self._error_count, self._warn_count))
print(more_info)
return 2
elif self._warn_count > 0:
if not self._quiet:
print('W: There were %d warnings.' % self._warn_count)
print(more_info)
return 1
else:
return 0
@property
def warn_count(self):
"""Accessor method for the warning count."""
return self._warn_count
@property
def error_count(self):
"""Accessor method for the error count."""
return self._error_count
def check_crontab(arguments, log):
"""Check a crontab file.
Checks arguments.crontab for a variety of errors or potential errors.
This only works with the crontab format found in /etc/crontab and
/etc/cron.d.
Args:
arguments: ArgumentParser object containing the crontab file and options.
log: A LogCounter object.
Returns:
0 if there were no errors.
>0 if there were errors.
Note: warnings alone will not yield a non-zero exit code.
"""
# Check if the file even exists.
if not os.path.exists(arguments.crontab):
log.Warn('File "%s" does not exist.' % arguments.crontab)
return log.Summary()
# Add the any specified users to the whitelist
#if arguments.whitelisted_users:
# USER_WHITELIST.update(arguments.whitelisted_users)
# Check the file name.
if re.search('[^A-Za-z0-9_-]', os.path.basename(arguments.crontab)):
in_whitelist = False
for pattern in FILE_RE_WHITELIST:
if pattern.search(os.path.basename(arguments.crontab)):
in_whitelist = True
break
if not in_whitelist:
log.Warn('Cron will not process this file - its name must match'
' [A-Za-z0-9_-]+ .')
line_no = 0
cron_line_factory = CronLineFactory()
with open(arguments.crontab, 'r') as crontab_f:
for line in crontab_f:
missing_newline = line[-1] != "\n"
line = line.strip()
line_no += 1
cron_line = cron_line_factory.ParseLine(line,arguments)
cron_line.ValidateAndLog(log)
log.Emit(line_no, line)
# are we missing a trailing newline?
if missing_newline:
log.Error('Cron will not process this file - missing trailing newline')
# Summarize the log messages if there were any.
return log.Summary()
|
|
from __future__ import division
import sys
import albow # used for translation update
from pygame import Rect, Surface, image
from pygame.locals import K_RETURN, K_KP_ENTER, K_ESCAPE, K_TAB, KEYDOWN, SRCALPHA
from pygame.mouse import set_cursor
from pygame.cursors import arrow as arrow_cursor
from pygame.transform import rotozoom
from vectors import add, subtract
from utils import frame_rect
import theme
from theme import ThemeProperty, FontProperty
import resource
from numpy import fromstring
debug_rect = False
debug_tab = True
root_widget = None
current_cursor = None
def overridable_property(name, doc=None):
"""Creates a property which calls methods get_xxx and set_xxx of
the underlying object to get and set the property value, so that
the property's behaviour may be easily overridden by subclasses."""
getter_name = intern('get_' + name)
setter_name = intern('set_' + name)
return property(
lambda self: getattr(self, getter_name)(),
lambda self, value: getattr(self, setter_name)(value),
None,
doc)
def rect_property(name):
def get(self):
return getattr(self._rect, name)
def set(self, value):
r = self._rect
old_size = r.size
setattr(r, name, value)
new_size = r.size
if old_size != new_size:
self._resized(old_size)
return property(get, set)
# noinspection PyPropertyAccess
class Widget(object):
# rect Rect bounds in parent's coordinates
# parent Widget containing widget
# subwidgets [Widget] contained widgets
# focus_switch Widget subwidget to receive key events
# fg_color color or None to inherit from parent
# bg_color color to fill background, or None
# visible boolean
# border_width int width of border to draw around widget, or None
# border_color color or None to use widget foreground color
# tab_stop boolean stop on this widget when tabbing
# anchor string of 'ltrb'
font = FontProperty('font')
fg_color = ThemeProperty('fg_color')
bg_color = ThemeProperty('bg_color')
bg_image = ThemeProperty('bg_image')
scale_bg = ThemeProperty('scale_bg')
border_width = ThemeProperty('border_width')
border_color = ThemeProperty('border_color')
sel_color = ThemeProperty('sel_color')
margin = ThemeProperty('margin')
menu_bar = overridable_property('menu_bar')
is_gl_container = overridable_property('is_gl_container')
tab_stop = False
enter_response = None
cancel_response = None
anchor = 'ltwh'
debug_resize = False
_menubar = None
_visible = True
_is_gl_container = False
redraw_every_event = True
tooltip = None
tooltipText = None
doNotTranslate = False
def __init__(self, rect=None, **kwds):
if rect and not isinstance(rect, Rect):
raise TypeError("Widget rect not a pygame.Rect")
self._rect = Rect(rect or (0, 0, 100, 100))
#-# Translation live update preparation
self.__lang = albow.translate.getLang()
self.__update_translation = False
self.shrink_wrapped = False
#-#
self.parent = None
self.subwidgets = []
self.focus_switch = None
self.is_modal = False
self.set(**kwds)
self.root = self.get_root()
self.setup_spacings()
#-# Translation live update preparation
@property
def get_update_translation(self):
return self.__update_translation
def set_update_translation(self, v):
if v:
self.font = self.predict_font({})
for widget in self.subwidgets:
widget.set_update_translation(v)
if self.shrink_wrapped:
self.shrink_wrap()
if hasattr(self, 'calc_size'):
self.calc_size()
self.invalidate()
self.__update_translation = v
#-#
def setup_spacings(self):
def new_size(size):
size = float(size * 1000)
size = size / float(100)
size = int(size * resource.font_proportion / 1000)
return size
self.margin = new_size(self.margin)
if hasattr(self, 'spacing'):
self.spacing = new_size(self.spacing)
def set(self, **kwds):
for name, value in kwds.iteritems():
if not hasattr(self, name):
raise TypeError("Unexpected keyword argument '%s'" % name)
setattr(self, name, value)
def get_rect(self):
return self._rect
def set_rect(self, x):
old_size = self._rect.size
self._rect = Rect(x)
self._resized(old_size)
# def get_anchor(self):
# if self.hstretch:
# chars ='lr'
# elif self.hmove:
# chars = 'r'
# else:
# chars = 'l'
# if self.vstretch:
# chars += 'tb'
# elif self.vmove:
# chars += 'b'
# else:
# chars += 't'
# return chars
#
# def set_anchor(self, chars):
# self.hmove = 'r' in chars and not 'l' in chars
# self.vmove = 'b' in chars and not 't' in chars
# self.hstretch = 'r' in chars and 'l' in chars
# self.vstretch = 'b' in chars and 't' in chars
#
# anchor = property(get_anchor, set_anchor)
resizing_axes = {'h': 'lr', 'v': 'tb'}
resizing_values = {'': [0], 'm': [1], 's': [0, 1]}
def set_resizing(self, axis, value):
chars = self.resizing_axes[axis]
anchor = self.anchor
for c in chars:
anchor = anchor.replace(c, '')
for i in self.resizing_values[value]:
anchor += chars[i]
self.anchor = anchor + value
def _resized(self, (old_width, old_height)):
new_width, new_height = self._rect.size
dw = new_width - old_width
dh = new_height - old_height
if dw or dh:
self.resized(dw, dh)
def resized(self, dw, dh):
if self.debug_resize:
print "Widget.resized:", self, "by", (dw, dh), "to", self.size
for widget in self.subwidgets:
widget.parent_resized(dw, dh)
def parent_resized(self, dw, dh):
debug_resize = self.debug_resize or getattr(self.parent, 'debug_resize', False)
if debug_resize:
print "Widget.parent_resized:", self, "by", (dw, dh)
left, top, width, height = self._rect
move = False
resize = False
anchor = self.anchor
if dw:
factors = [1, 1, 1] # left, width, right
if 'r' in anchor:
factors[2] = 0
if 'w' in anchor:
factors[1] = 0
if 'l' in anchor:
factors[0] = 0
if any(factors):
resize = factors[1]
move = factors[0] or factors[2]
#print "lwr", factors
left += factors[0] * dw / sum(factors)
width += factors[1] * dw / sum(factors)
#left = (left + width) + factors[2] * dw / sum(factors) - width
if dh:
factors = [1, 1, 1] # bottom, height, top
if 't' in anchor:
factors[2] = 0
if 'h' in anchor:
factors[1] = 0
if 'b' in anchor:
factors[0] = 0
if any(factors):
resize = factors[1]
move = factors[0] or factors[2]
#print "bht", factors
top += factors[2] * dh / sum(factors)
height += factors[1] * dh / sum(factors)
#top = (top + height) + factors[0] * dh / sum(factors) - height
if resize:
if debug_resize:
print "Widget.parent_resized: changing rect to", (left, top, width, height)
self.rect = (left, top, width, height)
elif move:
if debug_resize:
print "Widget.parent_resized: moving to", (left, top)
self._rect.topleft = (left, top)
rect = property(get_rect, set_rect)
left = rect_property('left')
right = rect_property('right')
top = rect_property('top')
bottom = rect_property('bottom')
width = rect_property('width')
height = rect_property('height')
size = rect_property('size')
topleft = rect_property('topleft')
topright = rect_property('topright')
bottomleft = rect_property('bottomleft')
bottomright = rect_property('bottomright')
midleft = rect_property('midleft')
midright = rect_property('midright')
midtop = rect_property('midtop')
midbottom = rect_property('midbottom')
center = rect_property('center')
centerx = rect_property('centerx')
centery = rect_property('centery')
def get_visible(self):
return self._visible
def set_visible(self, x):
self._visible = x
visible = overridable_property('visible')
def add(self, arg, index=None):
if arg:
if isinstance(arg, Widget):
if index is not None:
arg.set_parent(self, index)
else:
arg.set_parent(self)
else:
for item in arg:
self.add(item)
def add_centered(self, widget):
w, h = self.size
widget.center = w // 2, h // 2
self.add(widget)
def remove(self, widget):
if widget in self.subwidgets:
widget.set_parent(None)
def set_parent(self, parent, index=None):
if parent is not self.parent:
if self.parent:
self.parent._remove(self)
self.parent = parent
if parent:
parent._add(self, index)
def all_parents(self):
widget = self
parents = []
while widget.parent:
parents.append(widget.parent)
widget = widget.parent
return parents
def _add(self, widget, index=None):
if index is not None:
self.subwidgets.insert(index, widget)
else:
self.subwidgets.append(widget)
if hasattr(widget, "idleevent"):
#print "Adding idle handler for ", widget
self.root.add_idle_handler(widget)
def _remove(self, widget):
if hasattr(widget, "idleevent"):
#print "Removing idle handler for ", widget
self.root.remove_idle_handler(widget)
self.subwidgets.remove(widget)
if self.focus_switch is widget:
self.focus_switch = None
def draw_all(self, surface):
if self.visible:
surf_rect = surface.get_rect()
bg_image = self.bg_image
if bg_image:
assert isinstance(bg_image, Surface)
if self.scale_bg:
bg_width, bg_height = bg_image.get_size()
width, height = self.size
if width > bg_width or height > bg_height:
hscale = width / bg_width
vscale = height / bg_height
bg_image = rotozoom(bg_image, 0.0, max(hscale, vscale))
r = bg_image.get_rect()
r.center = surf_rect.center
surface.blit(bg_image, r)
else:
bg = self.bg_color
if bg:
surface.fill(bg)
self.draw(surface)
bw = self.border_width
if bw:
bc = self.border_color or self.fg_color
frame_rect(surface, bc, surf_rect, bw)
for widget in self.subwidgets:
sub_rect = widget.rect
if debug_rect:
print "Widget: Drawing subwidget %s of %s with rect %s" % (
widget, self, sub_rect)
sub_rect = surf_rect.clip(sub_rect)
if sub_rect.width > 0 and sub_rect.height > 0:
try:
sub = surface.subsurface(sub_rect)
except ValueError, e:
if str(e) == "subsurface rectangle outside surface area":
self.diagnose_subsurface_problem(surface, widget)
else:
raise
else:
widget.draw_all(sub)
self.draw_over(surface)
def diagnose_subsurface_problem(self, surface, widget):
mess = "Widget %s %s outside parent surface %s %s" % (
widget, widget.rect, self, surface.get_rect())
sys.stderr.write("%s\n" % mess)
surface.fill((255, 0, 0), widget.rect)
def draw(self, surface):
pass
def draw_over(self, surface):
pass
def find_widget(self, pos):
for widget in self.subwidgets[::-1]:
if widget.visible:
r = widget.rect
if r.collidepoint(pos):
return widget.find_widget(subtract(pos, r.topleft))
return self
def handle_mouse(self, name, event):
self.augment_mouse_event(event)
self.call_handler(name, event)
self.setup_cursor(event)
def mouse_down(self, event):
self.call_parent_handler("mouse_down", event)
def mouse_up(self, event):
self.call_parent_handler("mouse_up", event)
def augment_mouse_event(self, event):
event.dict['local'] = self.global_to_local(event.pos)
def setup_cursor(self, event):
global current_cursor
cursor = self.get_cursor(event) or arrow_cursor
if cursor is not current_cursor:
set_cursor(*cursor)
current_cursor = cursor
def dispatch_key(self, name, event):
if self.visible:
if event.cmd and event.type == KEYDOWN:
menubar = self._menubar
if menubar and menubar.handle_command_key(event):
return
widget = self.focus_switch
if widget:
widget.dispatch_key(name, event)
else:
self.call_handler(name, event)
else:
self.call_parent_handler(name, event)
def get_focus(self):
widget = self
while 1:
focus = widget.focus_switch
if not focus:
break
widget = focus
return widget
def notify_attention_loss(self):
widget = self
while 1:
if widget.is_modal:
break
parent = widget.parent
if not parent:
break
focus = parent.focus_switch
if focus and focus is not widget:
self.root.notMove = False
focus.dispatch_attention_loss()
widget = parent
def dispatch_attention_loss(self):
widget = self
while widget:
widget.attention_lost()
widget = widget.focus_switch
def attention_lost(self):
pass
def handle_command(self, name, *args):
method = getattr(self, name, None)
if method:
return method(*args)
else:
parent = self.next_handler()
if parent:
return parent.handle_command(name, *args)
def next_handler(self):
if not self.is_modal:
return self.parent
def call_handler(self, name, *args):
method = getattr(self, name, None)
if method:
return method(*args)
else:
return 'pass'
def call_parent_handler(self, name, *args):
parent = self.next_handler()
if parent:
parent.call_handler(name, *args)
def global_to_local(self, p):
return subtract(p, self.local_to_global_offset())
def local_to_global(self, p):
return add(p, self.local_to_global_offset())
def local_to_global_offset(self):
d = self.topleft
parent = self.parent
if parent:
d = add(d, parent.local_to_global_offset())
return d
def key_down(self, event):
k = event.key
#print "Widget.key_down:", k ###
if k == K_RETURN or k == K_KP_ENTER:
if self.enter_response is not None:
self.dismiss(self.enter_response)
return
elif k == K_ESCAPE:
self.root.fix_sticky_ctrl()
if self.cancel_response is not None:
self.dismiss(self.cancel_response)
return
elif k == K_TAB:
self.tab_to_next()
return
self.call_parent_handler('key_down', event)
def key_up(self, event):
self.call_parent_handler('key_up', event)
def is_inside(self, container):
widget = self
while widget:
if widget is container:
return True
widget = widget.parent
return False
@property
def is_hover(self):
return self.root.hover_widget is self
def present(self, centered=True):
#print "Widget: presenting with rect", self.rect
if self.root is None:
self.root = self.get_root()
if "ControlPanel" not in str(self):
self.root.notMove = True
if centered:
self.center = self.root.center
self.root.add(self)
try:
self.root.run_modal(self)
self.dispatch_attention_loss()
finally:
self.root.remove(self)
#print "Widget.present: returning", self.modal_result
if "ControlPanel" not in str(self):
self.root.notMove = False
return self.modal_result
def dismiss(self, value=True):
self.root.notMove = False
self.modal_result = value
def get_root(self):
return root_widget
def get_top_widget(self):
top = self
while top.parent and not top.is_modal:
top = top.parent
return top
def focus(self):
parent = self.next_handler()
if parent:
parent.focus_on(self)
def focus_on(self, subwidget):
old_focus = self.focus_switch
if old_focus is not subwidget:
if old_focus:
old_focus.dispatch_attention_loss()
self.focus_switch = subwidget
self.focus()
def has_focus(self):
return self.is_modal or (self.parent and self.parent.focused_on(self))
def focused_on(self, widget):
return self.focus_switch is widget and self.has_focus()
def focus_chain(self):
result = []
widget = self
while widget:
result.append(widget)
widget = widget.focus_switch
return result
def shrink_wrap(self):
contents = self.subwidgets
if contents:
rects = [widget.rect for widget in contents]
#rmax = Rect.unionall(rects) # broken in PyGame 1.7.1
rmax = rects.pop()
for r in rects:
rmax = rmax.union(r)
self._rect.size = add(rmax.topleft, rmax.bottomright)
#-# Translation live update preparation
self.shrink_wrapped = True
#-#
def invalidate(self):
if self.root:
self.root.bonus_draw_time = False
@staticmethod
def get_cursor(event):
return arrow_cursor
def predict(self, kwds, name):
try:
return kwds[name]
except KeyError:
return theme.root.get(self.__class__, name)
def predict_attr(self, kwds, name):
try:
return kwds[name]
except KeyError:
return getattr(self, name)
def init_attr(self, kwds, name):
try:
return kwds.pop(name)
except KeyError:
return getattr(self, name)
def predict_font(self, kwds, name='font'):
return kwds.get(name) or theme.root.get_font(self.__class__, name)
def get_margin_rect(self):
r = Rect((0, 0), self.size)
d = -2 * self.margin
r.inflate_ip(d, d)
return r
def set_size_for_text(self, width, nlines=1):
if width is not None:
font = self.font
d = 2 * self.margin
if isinstance(width, basestring):
width, height = font.size(width)
width += d + 2
else:
height = font.size("X")[1]
self.size = (width, height * nlines + d)
def tab_to_first(self):
chain = self.get_tab_order()
if chain:
chain[0].focus()
def tab_to_next(self):
top = self.get_top_widget()
chain = top.get_tab_order()
try:
i = chain.index(self)
except ValueError:
return
target = chain[(i + 1) % len(chain)]
target.focus()
def get_tab_order(self):
result = []
self.collect_tab_order(result)
return result
def collect_tab_order(self, result):
if self.visible:
if self.tab_stop:
result.append(self)
for child in self.subwidgets:
child.collect_tab_order(result)
# def tab_to_first(self, start = None):
# if debug_tab:
# print "Enter Widget.tab_to_first:", self ###
# print "...start =", start ###
# if not self.visible:
# if debug_tab: print "...invisible" ###
# self.tab_to_next_in_parent(start)
# elif self.tab_stop:
# if debug_tab: print "...stopping here" ###
# self.focus()
# else:
# if debug_tab: print "...tabbing to next" ###
# self.tab_to_next(start or self)
# if debug_tab: print "Exit Widget.tab_to_first:", self ###
#
# def tab_to_next(self, start = None):
# if debug_tab:
# print "Enter Widget.tab_to_next:", self ###
# print "...start =", start ###
# sub = self.subwidgets
# if sub:
# if debug_tab: print "...tabbing to first subwidget" ###
# sub[0].tab_to_first(start or self)
# else:
# if debug_tab: print "...tabbing to next in parent" ###
# self.tab_to_next_in_parent(start)
# if debug_tab: print "Exit Widget.tab_to_next:", self ###
#
# def tab_to_next_in_parent(self, start):
# if debug_tab:
# print "Enter Widget.tab_to_next_in_parent:", self ###
# print "...start =", start ###
# parent = self.parent
# if parent and not self.is_modal:
# if debug_tab: print "...telling parent to tab to next" ###
# parent.tab_to_next_after(self, start)
# else:
# if self is not start:
# if debug_tab: print "...wrapping back to first" ###
# self.tab_to_first(start)
# if debug_tab: print "Exit Widget.tab_to_next_in_parent:", self ###
#
# def tab_to_next_after(self, last, start):
# if debug_tab:
# print "Enter Widget.tab_to_next_after:", self, last ###
# print "...start =", start ###
# sub = self.subwidgets
# i = sub.index(last) + 1
# if debug_tab: print "...next index =", i, "of", len(sub) ###
# if i < len(sub):
# if debug_tab: print "...tabbing there" ###
# sub[i].tab_to_first(start)
# else:
# if debug_tab: print "...tabbing to next in parent" ###
# self.tab_to_next_in_parent(start)
# if debug_tab: print "Exit Widget.tab_to_next_after:", self, last ###
def inherited(self, attribute):
value = getattr(self, attribute)
if value is not None:
return value
else:
parent = self.next_handler()
if parent:
return parent.inherited(attribute)
def __contains__(self, event):
r = Rect(self._rect)
r.left = 0
r.top = 0
p = self.global_to_local(event.pos)
return r.collidepoint(p)
def get_mouse(self):
return self.root.get_mouse_for(self)
def get_menu_bar(self):
return self._menubar
def set_menu_bar(self, menubar):
if menubar is not self._menubar:
if self._menubar:
self.remove(self._menubar)
self._menubar = menubar
if menubar:
if menubar.width == 0:
menubar.width = self.width
menubar.anchor = 'lr'
self.add(menubar)
def get_is_gl_container(self):
return self._is_gl_container
def set_is_gl_container(self, x):
self._is_gl_container = x
def gl_draw_all(self, root, offset):
if not self.visible:
return
from OpenGL import GL, GLU
rect = self.rect.move(offset)
if self.is_gl_container:
self.gl_draw_self(root, offset)
suboffset = rect.topleft
for subwidget in self.subwidgets:
subwidget.gl_draw_all(root, suboffset)
else:
try:
surface = Surface(self.size, SRCALPHA)
except Exception:
#size error?
return
self.draw_all(surface)
data = image.tostring(surface, 'RGBA', 1)
w, h = root.size
GL.glViewport(0, 0, w, h)
GL.glMatrixMode(GL.GL_PROJECTION)
GL.glLoadIdentity()
GLU.gluOrtho2D(0, w, 0, h)
GL.glMatrixMode(GL.GL_MODELVIEW)
GL.glLoadIdentity()
GL.glRasterPos2i(max(rect.left, 0), max(h - rect.bottom, 0))
GL.glPushAttrib(GL.GL_COLOR_BUFFER_BIT)
GL.glEnable(GL.GL_BLEND)
GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)
GL.glDrawPixels(self.width, self.height,
GL.GL_RGBA, GL.GL_UNSIGNED_BYTE, fromstring(data, dtype='uint8'))
GL.glPopAttrib()
GL.glFlush()
def gl_draw_self(self, root, offset):
pass
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ArmTemplatesOperations:
"""ArmTemplatesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.devtestlabs.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
lab_name: str,
artifact_source_name: str,
expand: Optional[str] = None,
filter: Optional[str] = None,
top: Optional[int] = None,
orderby: Optional[str] = None,
**kwargs
) -> AsyncIterable["_models.ArmTemplateList"]:
"""List azure resource manager templates in a given artifact source.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param lab_name: The name of the lab.
:type lab_name: str
:param artifact_source_name: The name of the artifact source.
:type artifact_source_name: str
:param expand: Specify the $expand query. Example: 'properties($select=displayName)'.
:type expand: str
:param filter: The filter to apply to the operation. Example: '$filter=contains(name,'myName').
:type filter: str
:param top: The maximum number of resources to return from the operation. Example: '$top=10'.
:type top: int
:param orderby: The ordering expression for the results, using OData notation. Example:
'$orderby=name desc'.
:type orderby: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ArmTemplateList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.devtestlabs.models.ArmTemplateList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ArmTemplateList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-09-15"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'artifactSourceName': self._serialize.url("artifact_source_name", artifact_source_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ArmTemplateList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/artifactsources/{artifactSourceName}/armtemplates'} # type: ignore
async def get(
self,
resource_group_name: str,
lab_name: str,
artifact_source_name: str,
name: str,
expand: Optional[str] = None,
**kwargs
) -> "_models.ArmTemplate":
"""Get azure resource manager template.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param lab_name: The name of the lab.
:type lab_name: str
:param artifact_source_name: The name of the artifact source.
:type artifact_source_name: str
:param name: The name of the azure resource manager template.
:type name: str
:param expand: Specify the $expand query. Example: 'properties($select=displayName)'.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ArmTemplate, or the result of cls(response)
:rtype: ~azure.mgmt.devtestlabs.models.ArmTemplate
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ArmTemplate"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-09-15"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'artifactSourceName': self._serialize.url("artifact_source_name", artifact_source_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ArmTemplate', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/artifactsources/{artifactSourceName}/armtemplates/{name}'} # type: ignore
|
|
#! /usr/bin/env python
"""Read/write data from an ESRI ASCII file into a RasterModelGrid.
ESRI ASCII functions
++++++++++++++++++++
.. autosummary::
:toctree: generated/
~landlab.io.esri_ascii.read_asc_header
~landlab.io.esri_ascii.read_esri_ascii
~landlab.io.esri_ascii.write_esri_ascii
"""
import os
import re
import six
import numpy as np
_VALID_HEADER_KEYS = [
'ncols', 'nrows', 'xllcorner', 'xllcenter', 'yllcorner',
'yllcenter', 'cellsize', 'nodata_value',
]
_HEADER_KEY_REGEX_PATTERN = re.compile(r'\s*(?P<key>[a-zA-z]\w+)')
_HEADER_REGEX_PATTERN = re.compile(
r'\s*(?P<key>[a-zA-Z]\w+)\s+(?P<value>[\w.+-]+)')
_HEADER_VALUE_TESTS = {
'nrows': (int, lambda x: x > 0),
'ncols': (int, lambda x: x > 0),
'cellsize': (float, lambda x: x > 0),
'xllcorner': (float, lambda x: True),
'xllcenter': (float, lambda x: True),
'yllcorner': (float, lambda x: True),
'yllcenter': (float, lambda x: True),
'nodata_value': (float, lambda x: True),
}
class Error(Exception):
"""Base class for errors in this module."""
pass
class BadHeaderLineError(Error):
"""Raise this error for a bad header is line."""
def __init__(self, line):
self._line = line
def __str__(self):
return self._line
class MissingRequiredKeyError(Error):
"""Raise this error when a header is missing a required key."""
def __init__(self, key):
self._key = key
def __str__(self):
return self._key
class KeyTypeError(Error):
"""Raise this error when a header's key value is of the wrong type."""
def __init__(self, key, expected_type):
self._key = key
self._type = str(expected_type)
def __str__(self):
return 'Unable to convert %s to %s' % (self._key, self._type)
class KeyValueError(Error):
"""Raise this error when a header's key value has a bad value."""
def __init__(self, key, message):
self._key = key
self._msg = message
def __str__(self):
return '%s: %s' % (self._key, self._msg)
class DataSizeError(Error):
"""Raise this error if the size of data does not match the header."""
def __init__(self, size, expected_size):
self._actual = size
self._expected = expected_size
def __str__(self):
return '%s != %s' % (self._actual, self._expected)
class MismatchGridDataSizeError(Error):
"""Raise this error if the data size does not match the grid size."""
def __init__(self, size, expected_size):
self._actual = size
self._expected = expected_size
def __str__(self):
return '(data size) %s != %s (grid size)' \
% (self._actual, self._expected)
def _parse_header_key_value(line):
"""Parse a header line into a key-value pair.
Parameters
----------
line : str
Header line.
Returns
-------
(str, str)
Header key-value pair
Raises
------
BadHeaderLineError
The is something wrong with the header line.
"""
match = _HEADER_KEY_REGEX_PATTERN.match(line)
if match is None:
return None
# raise BadHeaderLineError(line)
match = _HEADER_REGEX_PATTERN.match(line)
if match is None:
raise BadHeaderLineError(line)
(key, value) = (match.group('key').lower(), match.group('value'))
if key in _VALID_HEADER_KEYS:
return (key, value)
else:
raise BadHeaderLineError(line)
def _header_lines(asc_file):
"""Iterate over header lines for a ESRI ASCII file.
Parameters
----------
asc_file : file_like
File-like object for an ESRI ASCII file.
Yields
------
str
Header line.
"""
pos = asc_file.tell()
line = asc_file.readline()
while len(line) > 0:
if len(line.strip()) > 0:
item = _parse_header_key_value(line)
if item:
yield item
else:
asc_file.seek(pos, 0)
break
pos = asc_file.tell()
line = asc_file.readline()
def _header_is_valid(header):
"""Check if the ESRI ASCII header is valid.
Parameters
----------
header : dict
Header as key-values pairs.
Raises
------
MissingRequiredKeyError
The header is missing a required key.
KeyTypeError
The header has the key but its values is of the wrong type.
"""
header_keys = set(header)
required_keys = set(['ncols', 'nrows', 'cellsize'])
if not required_keys.issubset(header_keys):
raise MissingRequiredKeyError(', '.join(required_keys - header_keys))
for keys in [('xllcenter', 'xllcorner'), ('yllcenter', 'yllcorner')]:
if len(set(keys) & header_keys) != 1:
raise MissingRequiredKeyError('|'.join(keys))
for (key, requires) in _HEADER_VALUE_TESTS.items():
to_type, is_valid = requires
if key not in header:
continue
try:
header[key] = to_type(header[key])
except ValueError:
raise KeyTypeError(key, to_type)
if not is_valid(header[key]):
raise KeyValueError(key, 'Bad value')
return True
def read_asc_header(asc_file):
"""Read header information from an ESRI ASCII raster file.
The header contains the following variables,
- *ncols*: Number of cell columns
- *nrows*: Number of cell rows
- *xllcenter* or *xllcorner*: X (column) coordinate of lower-left
coordinate of grid (by center or lower-left corner of the cell)
- *yllcenter*, *yllcorner*: Y (row) coordinate of lower-left
coordinate of grid (by center or lower-left corner of the cell)
- *cellsize*: Grid spacing between rows and columns
- *nodata_value*: No-data value (optional)
Parameters
----------
asc_file : file_like
File-like object from which to read header.
Returns
-------
dict
Header as key-value pairs.
Raises
------
MissingRequiredKeyError
The header is missing a required key.
KeyTypeError
The header has the key but its values is of the wrong type.
Examples
--------
>>> from six import StringIO
>>> from landlab.io.esri_ascii import read_asc_header
>>> contents = StringIO('''
... nrows 100
... ncols 200
... cellsize 1.5
... xllcenter 0.5
... yllcenter -0.5
... ''')
>>> hdr = read_asc_header(contents)
>>> hdr['nrows'], hdr['ncols']
(100, 200)
>>> hdr['cellsize']
1.5
>>> hdr['xllcenter'], hdr['yllcenter']
(0.5, -0.5)
``MissingRequiredKey`` is raised if the header does not contain all of the
necessary keys.
>>> contents = StringIO('''
... ncols 200
... cellsize 1.5
... xllcenter 0.5
... yllcenter -0.5
... ''')
>>> read_asc_header(contents) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
MissingRequiredKeyError: nrows
``KeyTypeError`` is raises if a value is of the wrong type. For instance,
``nrows`` and ``ncols`` must be ``int``.
>>> contents = StringIO('''
... nrows 100.5
... ncols 200
... cellsize 1.5
... xllcenter 0.5
... yllcenter -0.5
... ''')
>>> read_asc_header(contents) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyTypeError: Unable to convert nrows to <type 'int'>
"""
header = dict()
for (key, value) in _header_lines(asc_file):
header[key] = value
_header_is_valid(header)
return header
def _read_asc_data(asc_file):
"""Read gridded data from an ESRI ASCII data file.
Parameters
----------
asc_file : file-like
File-like object of the data file pointing to the start of the data.
.. note::
First row of the data is at the top of the raster grid, the second
row is the second from the top, and so on.
"""
return np.loadtxt(asc_file)
def read_esri_ascii(asc_file, grid=None, reshape=False, name=None, halo=0):
"""Read :py:class:`~landlab.RasterModelGrid` from an ESRI ASCII file.
Read data from *asc_file*, an ESRI_ ASCII file, into a
:py:class:`~landlab.RasterModelGrid`. *asc_file* is either the name of
the data file or is a file-like object.
The grid and data read from the file are returned as a tuple
(*grid*, *data*) where *grid* is an instance of
:py:class:`~landlab.RasterModelGrid` and *data* is a numpy
array of doubles with that has been reshaped to have the number of rows
and columns given in the header.
.. _ESRI: http://resources.esri.com/help/9.3/arcgisengine/java/GP_ToolRef/spatial_analyst_tools/esri_ascii_raster_format.htm
Parameters
----------
asc_file : str of file-like
Data file to read.
reshape : boolean, optional
Reshape the returned array, otherwise return a flattened array.
name : str, optional
Add data to the grid as a named field.
grid : *grid* , optional
Adds data to an existing *grid* instead of creating a new one.
halo : integer, optional
Adds outer border of depth halo to the *grid*.
Returns
-------
(grid, data) : tuple
A newly-created RasterModel grid and the associated node data.
Raises
------
DataSizeError
Data are not the same size as indicated by the header file.
MismatchGridDataSizeError
If a grid is passed, the size of the grid does not agree with the
size of the data.
Examples
--------
Assume that fop is the name of a file that contains text below
(make sure you have your path correct):
ncols 3
nrows 4
xllcorner 1.
yllcorner 2.
cellsize 10.
NODATA_value -9999
0. 1. 2.
3. 4. 5.
6. 7. 8.
9. 10. 11.
--------
>>> from landlab.io import read_esri_ascii
>>> (grid, data) = read_esri_ascii('fop') # doctest: +SKIP
>>> #grid is an object of type RasterModelGrid with 4 rows and 3 cols
>>> #data contains an array of length 4*3 that is equal to
>>> # [9., 10., 11., 6., 7., 8., 3., 4., 5., 0., 1., 2.]
>>> (grid, data) = read_esri_ascii('fop', halo=1) # doctest: +SKIP
>>> #now the data has a nodata_value ring of -9999 around it. So array is
>>> # [-9999, -9999, -9999, -9999, -9999, -9999,
>>> # -9999, 9., 10., 11., -9999,
>>> # -9999, 6., 7., 8., -9999,
>>> # -9999, 3., 4., 5., -9999,
>>> # -9999, 0., 1., 2. -9999,
>>> # -9999, -9999, -9999, -9999, -9999, -9999]
"""
from ..grid import RasterModelGrid
if isinstance(asc_file, six.string_types):
file_name = asc_file
with open(file_name, 'r') as asc_file:
header = read_asc_header(asc_file)
data = _read_asc_data(asc_file)
else:
header = read_asc_header(asc_file)
data = _read_asc_data(asc_file)
#There is no reason for halo to be negative.
#Assume that if a negative value is given it should be 0.
if halo <= 0:
shape = (header['nrows'], header['ncols'])
if data.size != shape[0] * shape[1]:
raise DataSizeError(shape[0] * shape[1], data.size)
else:
shape = (header['nrows'] + 2 * halo, header['ncols'] + 2 * halo)
#check to see if a nodata_value was given. If not, assign -9999.
if 'nodata_value' in header.keys():
nodata_value = header['nodata_value']
else:
header['nodata_value'] = -9999.
nodata_value = header['nodata_value']
if data.size != (shape[0] - 2 * halo) * (shape[1] - 2 * halo):
raise DataSizeError(shape[0] * shape[1], data.size)
spacing = (header['cellsize'], header['cellsize'])
#origin = (header['xllcorner'], header['yllcorner'])
data = np.flipud(data)
#REMEMBER, shape contains the size with halo in place
#header contains the shape of the original data
#Add halo below
if halo > 0:
helper_row = np.ones(shape[1]) * nodata_value
#for the first halo row(s), add num cols worth of nodata vals to data
for i in range(0, halo):
data = np.insert(data,0,helper_row)
#then for header['nrows'] add halo number nodata vals, header['ncols']
#of data, then halo number of nodata vals
helper_row_ends = np.ones(halo) * nodata_value
for i in range(halo, header['nrows']+halo):
#this adds at the beginning of the row
data = np.insert(data,i * shape[1],helper_row_ends)
#this adds at the end of the row
data = np.insert(data,(i + 1) * shape[1] - halo,helper_row_ends)
#for the last halo row(s), add num cols worth of nodata vals to data
for i in range(header['nrows']+halo,shape[0]):
data = np.insert(data,data.size,helper_row)
if not reshape:
data = data.flatten()
if grid is not None:
if (grid.number_of_node_rows != shape[0]) or \
(grid.number_of_node_columns != shape[1]):
raise MismatchGridDataSizeError(shape[0] * shape[1], \
grid.number_of_node_rows * grid.number_of_node_columns )
if grid is None:
grid = RasterModelGrid(shape, spacing=spacing)
if name:
grid.add_field('node', name, data)
return (grid, data)
def write_esri_ascii(path, fields, names=None, clobber=False):
"""Write landlab fields to ESRI ASCII.
Write the data and grid information for *fields* to *path* in the ESRI
ASCII format.
Parameters
----------
path : str
Path to output file.
fields : field-like
Landlab field object that holds a grid and associated values.
names : iterable of str, optional
Names of the fields to include in the output file. If not provided,
write all fields.
clobber : boolean
If *path* exists, clobber the existing file, otherwise raise an
exception.
Examples
--------
>>> import numpy as np
>>> from landlab.testing.tools import cdtemp
>>> from landlab import RasterModelGrid
>>> from landlab.io.esri_ascii import write_esri_ascii
>>> grid = RasterModelGrid((4, 5), spacing=(2., 2.))
>>> _ = grid.add_field('node', 'air__temperature', np.arange(20.))
>>> with cdtemp() as _:
... files = write_esri_ascii('test.asc', grid)
>>> files
['test.asc']
>>> _ = grid.add_field('node', 'land_surface__elevation', np.arange(20.))
>>> with cdtemp() as _:
... files = write_esri_ascii('test.asc', grid)
>>> files.sort()
>>> files
['test_air__temperature.asc', 'test_land_surface__elevation.asc']
"""
if os.path.exists(path) and not clobber:
raise ValueError('file exists')
if isinstance(names, six.string_types):
names = [names]
names = names or fields.at_node.keys()
if len(names) == 1:
paths = [path]
elif len(names) > 1:
(base, ext) = os.path.splitext(path)
paths = [base + '_' + name + ext for name in names]
else:
raise ValueError('no node fields to write')
bad_names = set(names) - set(fields.at_node.keys())
if len(bad_names) > 0:
raise ValueError('unknown field name(s): %s' % ','.join(bad_names))
header = {
'ncols': fields.number_of_node_columns,
'nrows': fields.number_of_node_rows,
'xllcorner': fields.node_x[0],
'yllcorner': fields.node_y[0],
'cellsize': fields.dx,
}
for path, name in zip(paths, names):
header_lines = ['%s %s' % (key, str(val))
for key, val in list(header.items())]
data = fields.at_node[name].reshape(header['nrows'], header['ncols'])
np.savetxt(path, np.flipud(data), header=os.linesep.join(header_lines),
comments='')
return paths
|
|
import nose.tools
from angr import SimState, SimHeapPTMalloc
# TODO: Make these tests more architecture-independent (note dependencies of some behavior on chunk metadata size)
def chunk_iterators_are_same(iterator1, iterator2):
for ck in iterator1:
ck2 = next(iterator2)
if ck.base != ck2.base:
return False
if ck.is_free() != ck2.is_free():
return False
try:
next(iterator2)
except StopIteration:
return True
return False
def same_heap_states(state1, state2):
return chunk_iterators_are_same(state1.heap.chunks(), state2.heap.chunks())
def max_sym_var_val(state):
return state.libc.max_variable_size
def run_malloc_maximizes_sym_arg(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
sc = s.copy()
x = s.solver.BVS("x", 32)
s.solver.add(x.UGE(0))
s.solver.add(x.ULE(max_sym_var_val(s)))
s.heap.malloc(x)
sc.heap.malloc(max_sym_var_val(sc))
nose.tools.assert_true(same_heap_states(s, sc))
def test_malloc_maximizes_sym_arg():
for arch in ('X86', 'AMD64'):
yield run_malloc_maximizes_sym_arg, arch
def run_free_maximizes_sym_arg(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
p = s.heap.malloc(50)
sc = s.copy()
x = s.solver.BVS("x", 32)
s.solver.add(x.UGE(0))
s.solver.add(x.ULE(p))
s.heap.free(x)
sc.heap.free(p)
nose.tools.assert_true(same_heap_states(s, sc))
def test_free_maximizes_sym_arg():
for arch in ('X86', 'AMD64'):
yield run_free_maximizes_sym_arg, arch
def run_calloc_maximizes_sym_arg(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
sc = s.copy()
x = s.solver.BVS("x", 32)
s.solver.add(x.UGE(0))
s.solver.add(x.ULE(20))
y = s.solver.BVS("y", 32)
s.solver.add(y.UGE(0))
s.solver.add(y.ULE(6))
s.heap.calloc(x, y)
sc.heap.calloc(20, 6)
nose.tools.assert_true(same_heap_states(s, sc))
def test_calloc_maximizes_sym_arg():
for arch in ('X86', 'AMD64'):
yield run_calloc_maximizes_sym_arg, arch
def run_realloc_maximizes_sym_arg(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
p = s.heap.malloc(50)
sc = s.copy()
x = s.solver.BVS("x", 32)
s.solver.add(x.UGE(0))
s.solver.add(x.ULE(p))
y = s.solver.BVS("y", 32)
s.solver.add(y.UGE(0))
s.solver.add(y.ULE(max_sym_var_val(s)))
s.heap.realloc(x, y)
sc.heap.realloc(p, max_sym_var_val(sc))
nose.tools.assert_true(same_heap_states(s, sc))
def test_realloc_maximizes_sym_arg():
for arch in ('X86', 'AMD64'):
yield run_realloc_maximizes_sym_arg, arch
def run_malloc_no_space_returns_null(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
sc = s.copy()
p1 = s.heap.malloc(0x2000)
nose.tools.assert_equals(p1, 0)
nose.tools.assert_true(same_heap_states(s, sc))
def test_malloc_no_space_returns_null():
for arch in ('X86', 'AMD64'):
yield run_malloc_no_space_returns_null, arch
def run_calloc_no_space_returns_null(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
sc = s.copy()
p1 = s.heap.calloc(0x500, 4)
nose.tools.assert_equals(p1, 0)
nose.tools.assert_true(same_heap_states(s, sc))
def test_calloc_no_space_returns_null():
for arch in ('X86', 'AMD64'):
yield run_calloc_no_space_returns_null, arch
def run_realloc_no_space_returns_null(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
p1 = s.heap.malloc(20)
sc = s.copy()
p2 = s.heap.realloc(p1, 0x2000)
nose.tools.assert_equals(p2, 0)
nose.tools.assert_true(same_heap_states(s, sc))
def test_realloc_no_space_returns_null():
for arch in ('X86', 'AMD64'):
yield run_realloc_no_space_returns_null, arch
def run_first_fit_and_free_malloced_makes_available(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
s.heap.malloc(20)
p1 = s.heap.malloc(50)
s.heap.free(p1)
p2 = s.heap.malloc(30)
nose.tools.assert_equals(p1, p2)
def test_first_fit_and_free_malloced_makes_available():
for arch in ('X86', 'AMD64'):
yield run_first_fit_and_free_malloced_makes_available, arch
def run_free_calloced_makes_available(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
s.heap.calloc(20, 5)
p1 = s.heap.calloc(30, 4)
s.heap.free(p1)
p2 = s.heap.calloc(15, 8)
nose.tools.assert_equals(p1, p2)
def test_free_calloced_makes_available():
for arch in ('X86', 'AMD64'):
yield run_free_calloced_makes_available, arch
def run_realloc_moves_and_frees(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
s.heap.malloc(20)
p1 = s.heap.malloc(60)
s.heap.malloc(200)
p2 = s.heap.realloc(p1, 300)
p3 = s.heap.malloc(30)
nose.tools.assert_equals(p1, p3)
nose.tools.assert_less(p1, p2)
def test_realloc_moves_and_frees():
for arch in ('X86', 'AMD64'):
yield run_realloc_moves_and_frees, arch
def run_realloc_near_same_size(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
s.heap.malloc(20)
p1 = s.heap.malloc(61)
s.heap.malloc(80)
sc = s.copy()
p2 = s.heap.realloc(p1, 62)
nose.tools.assert_equals(p1, p2)
nose.tools.assert_true(same_heap_states(s, sc))
def test_realloc_near_same_size():
for arch in ('X86', 'AMD64'):
yield run_realloc_near_same_size, arch
def run_needs_space_for_metadata(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
sc = s.copy()
p1 = s.heap.malloc(0x1000)
nose.tools.assert_equals(p1, 0)
nose.tools.assert_true(same_heap_states(s, sc))
def test_needs_space_for_metadata():
for arch in ('X86', 'AMD64'):
yield run_needs_space_for_metadata, arch
def run_unusable_amount_returns_null(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
s.heap.malloc(0x1000 - 4 * s.heap._chunk_size_t_size)
sc = s.copy()
p = s.heap.malloc(1)
nose.tools.assert_equals(p, 0)
nose.tools.assert_true(same_heap_states(s, sc))
def test_unusable_amount_returns_null():
for arch in ('X86', 'AMD64'):
yield run_unusable_amount_returns_null, arch
def run_free_null_preserves_state(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
s.heap.malloc(30)
p = s.heap.malloc(40)
s.heap.malloc(50)
s.heap.free(p)
s2 = s.copy()
s2.heap.free(0)
nose.tools.assert_true(same_heap_states(s, s2))
def test_free_null_preserves_state():
for arch in ('X86', 'AMD64'):
yield run_free_null_preserves_state, arch
def run_skips_chunks_too_small(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
s.heap.malloc(30)
p = s.heap.malloc(50)
s.heap.malloc(40)
s.heap.free(p)
p2 = s.heap.calloc(20, 5)
nose.tools.assert_less(p, p2)
def test_skips_chunks_too_small():
for arch in ('X86', 'AMD64'):
yield run_skips_chunks_too_small, arch
def run_calloc_multiplies(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
s.heap.malloc(30)
sc = s.copy()
s.heap.malloc(100)
sc.heap.calloc(4, 25)
nose.tools.assert_true(same_heap_states(s, sc))
def test_calloc_multiplies():
for arch in ('X86', 'AMD64'):
yield run_calloc_multiplies, arch
def run_calloc_clears(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
s.memory.store(0xd0000000 + 2 * s.heap._chunk_size_t_size, s.solver.BVV(-1, 100 * 8))
sc = s.copy()
p1 = s.heap.calloc(6, 5)
p2 = sc.heap.malloc(30)
v1 = s.memory.load(p1, 30)
v2 = sc.memory.load(p2, 30)
nose.tools.assert_true(s.solver.is_true(v1 == 0))
nose.tools.assert_true(sc.solver.is_true(v2 == -1))
def test_calloc_clears():
for arch in ('X86', 'AMD64'):
yield run_calloc_clears, arch
if __name__ == "__main__":
g = globals().copy()
for func_name, func in g.items():
if func_name.startswith("test_") and hasattr(func, '__call__'):
for r, a in func():
r(a)
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
import base64
import frappe
import os
import unittest
from frappe import _
from frappe.core.doctype.file.file import move_file, get_files_in_folder
from frappe.utils import get_files_path
# test_records = frappe.get_test_records('File')
test_content1 = 'Hello'
test_content2 = 'Hello World'
def make_test_doc():
d = frappe.new_doc('ToDo')
d.description = 'Test'
d.save()
return d.doctype, d.name
class TestSimpleFile(unittest.TestCase):
def setUp(self):
self.attached_to_doctype, self.attached_to_docname = make_test_doc()
self.test_content = test_content1
_file = frappe.get_doc({
"doctype": "File",
"file_name": "test1.txt",
"attached_to_doctype": self.attached_to_doctype,
"attached_to_name": self.attached_to_docname,
"content": self.test_content})
_file.save()
self.saved_file_url = _file.file_url
def test_save(self):
_file = frappe.get_doc("File", {"file_url": self.saved_file_url})
content = _file.get_content()
self.assertEqual(content, self.test_content)
def tearDown(self):
# File gets deleted on rollback, so blank
pass
class TestBase64File(unittest.TestCase):
def setUp(self):
self.attached_to_doctype, self.attached_to_docname = make_test_doc()
self.test_content = base64.b64encode(test_content1.encode('utf-8'))
_file = frappe.get_doc({
"doctype": "File",
"file_name": "test_base64.txt",
"attached_to_doctype": self.attached_to_doctype,
"attached_to_docname": self.attached_to_docname,
"content": self.test_content,
"decode": True})
_file.save()
self.saved_file_url = _file.file_url
def test_saved_content(self):
_file = frappe.get_doc("File", {"file_url": self.saved_file_url})
content = _file.get_content()
self.assertEqual(content, test_content1)
def tearDown(self):
# File gets deleted on rollback, so blank
pass
class TestSameFileName(unittest.TestCase):
def test_saved_content(self):
self.attached_to_doctype, self.attached_to_docname = make_test_doc()
self.test_content1 = test_content1
self.test_content2 = test_content2
_file1 = frappe.get_doc({
"doctype": "File",
"file_name": "testing.txt",
"attached_to_doctype": self.attached_to_doctype,
"attached_to_name": self.attached_to_docname,
"content": self.test_content1})
_file1.save()
_file2 = frappe.get_doc({
"doctype": "File",
"file_name": "testing.txt",
"attached_to_doctype": self.attached_to_doctype,
"attached_to_name": self.attached_to_docname,
"content": self.test_content2})
_file2.save()
self.saved_file_url1 = _file1.file_url
self.saved_file_url2 = _file2.file_url
_file = frappe.get_doc("File", {"file_url": self.saved_file_url1})
content1 = _file.get_content()
self.assertEqual(content1, self.test_content1)
_file = frappe.get_doc("File", {"file_url": self.saved_file_url2})
content2 = _file.get_content()
self.assertEqual(content2, self.test_content2)
def test_saved_content_private(self):
_file1 = frappe.get_doc({
"doctype": "File",
"file_name": "testing-private.txt",
"content": test_content1,
"is_private": 1
}).insert()
_file2 = frappe.get_doc({
"doctype": "File",
"file_name": "testing-private.txt",
"content": test_content2,
"is_private": 1
}).insert()
_file = frappe.get_doc("File", {"file_url": _file1.file_url})
self.assertEqual(_file.get_content(), test_content1)
_file = frappe.get_doc("File", {"file_url": _file2.file_url})
self.assertEqual(_file.get_content(), test_content2)
class TestSameContent(unittest.TestCase):
def setUp(self):
self.attached_to_doctype1, self.attached_to_docname1 = make_test_doc()
self.attached_to_doctype2, self.attached_to_docname2 = make_test_doc()
self.test_content1 = test_content1
self.test_content2 = test_content1
self.orig_filename = 'hello.txt'
self.dup_filename = 'hello2.txt'
_file1 = frappe.get_doc({
"doctype": "File",
"file_name": self.orig_filename,
"attached_to_doctype": self.attached_to_doctype1,
"attached_to_name": self.attached_to_docname1,
"content": self.test_content1})
_file1.save()
_file2 = frappe.get_doc({
"doctype": "File",
"file_name": self.dup_filename,
"attached_to_doctype": self.attached_to_doctype2,
"attached_to_name": self.attached_to_docname2,
"content": self.test_content2})
_file2.save()
def test_saved_content(self):
self.assertFalse(os.path.exists(get_files_path(self.dup_filename)))
def test_attachment_limit(self):
doctype, docname = make_test_doc()
from frappe.custom.doctype.property_setter.property_setter import make_property_setter
limit_property = make_property_setter('ToDo', None, 'max_attachments', 1, 'int', for_doctype=True)
file1 = frappe.get_doc({
"doctype": "File",
"file_name": 'test-attachment',
"attached_to_doctype": doctype,
"attached_to_name": docname,
"content": 'test'
})
file1.insert()
file2 = frappe.get_doc({
"doctype": "File",
"file_name": 'test-attachment',
"attached_to_doctype": doctype,
"attached_to_name": docname,
"content": 'test2'
})
self.assertRaises(frappe.exceptions.AttachmentLimitReached, file2.insert)
limit_property.delete()
frappe.clear_cache(doctype='ToDo')
def tearDown(self):
# File gets deleted on rollback, so blank
pass
class TestFile(unittest.TestCase):
def setUp(self):
frappe.set_user('Administrator')
self.delete_test_data()
self.upload_file()
def tearDown(self):
try:
frappe.get_doc("File", {"file_name": "file_copy.txt"}).delete()
except frappe.DoesNotExistError:
pass
def delete_test_data(self):
for f in frappe.db.sql('''select name, file_name from tabFile where
is_home_folder = 0 and is_attachments_folder = 0 order by creation desc'''):
frappe.delete_doc("File", f[0])
def upload_file(self):
_file = frappe.get_doc({
"doctype": "File",
"file_name": "file_copy.txt",
"attached_to_name": "",
"attached_to_doctype": "",
"folder": self.get_folder("Test Folder 1", "Home").name,
"content": "Testing file copy example."})
_file.save()
self.saved_folder = _file.folder
self.saved_name = _file.name
self.saved_filename = get_files_path(_file.file_name)
def get_folder(self, folder_name, parent_folder="Home"):
return frappe.get_doc({
"doctype": "File",
"file_name": _(folder_name),
"is_folder": 1,
"folder": _(parent_folder)
}).insert()
def tests_after_upload(self):
self.assertEqual(self.saved_folder, _("Home/Test Folder 1"))
file_folder = frappe.db.get_value("File", self.saved_name, "folder")
self.assertEqual(file_folder, _("Home/Test Folder 1"))
def test_file_copy(self):
folder = self.get_folder("Test Folder 2", "Home")
file = frappe.get_doc("File", {"file_name": "file_copy.txt"})
move_file([{"name": file.name}], folder.name, file.folder)
file = frappe.get_doc("File", {"file_name": "file_copy.txt"})
self.assertEqual(_("Home/Test Folder 2"), file.folder)
def test_folder_depth(self):
result1 = self.get_folder("d1", "Home")
self.assertEqual(result1.name, "Home/d1")
result2 = self.get_folder("d2", "Home/d1")
self.assertEqual(result2.name, "Home/d1/d2")
result3 = self.get_folder("d3", "Home/d1/d2")
self.assertEqual(result3.name, "Home/d1/d2/d3")
result4 = self.get_folder("d4", "Home/d1/d2/d3")
_file = frappe.get_doc({
"doctype": "File",
"file_name": "folder_copy.txt",
"attached_to_name": "",
"attached_to_doctype": "",
"folder": result4.name,
"content": "Testing folder copy example"})
_file.save()
def test_folder_copy(self):
folder = self.get_folder("Test Folder 2", "Home")
folder = self.get_folder("Test Folder 3", "Home/Test Folder 2")
_file = frappe.get_doc({
"doctype": "File",
"file_name": "folder_copy.txt",
"attached_to_name": "",
"attached_to_doctype": "",
"folder": folder.name,
"content": "Testing folder copy example"})
_file.save()
move_file([{"name": folder.name}], 'Home/Test Folder 1', folder.folder)
file = frappe.get_doc("File", {"file_name":"folder_copy.txt"})
file_copy_txt = frappe.get_value("File", {"file_name":"file_copy.txt"})
if file_copy_txt:
frappe.get_doc("File", file_copy_txt).delete()
self.assertEqual(_("Home/Test Folder 1/Test Folder 3"), file.folder)
def test_default_folder(self):
d = frappe.get_doc({
"doctype": "File",
"file_name": _("Test_Folder"),
"is_folder": 1
})
d.save()
self.assertEqual(d.folder, "Home")
def test_on_delete(self):
file = frappe.get_doc("File", {"file_name": "file_copy.txt"})
file.delete()
self.assertEqual(frappe.db.get_value("File", _("Home/Test Folder 1"), "file_size"), 0)
folder = self.get_folder("Test Folder 3", "Home/Test Folder 1")
_file = frappe.get_doc({
"doctype": "File",
"file_name": "folder_copy.txt",
"attached_to_name": "",
"attached_to_doctype": "",
"folder": folder.name,
"content": "Testing folder copy example"})
_file.save()
folder = frappe.get_doc("File", "Home/Test Folder 1/Test Folder 3")
self.assertRaises(frappe.ValidationError, folder.delete)
def test_same_file_url_update(self):
attached_to_doctype1, attached_to_docname1 = make_test_doc()
attached_to_doctype2, attached_to_docname2 = make_test_doc()
file1 = frappe.get_doc({
"doctype": "File",
"file_name": 'file1.txt',
"attached_to_doctype": attached_to_doctype1,
"attached_to_name": attached_to_docname1,
"is_private": 1,
"content": test_content1}).insert()
file2 = frappe.get_doc({
"doctype": "File",
"file_name": 'file2.txt',
"attached_to_doctype": attached_to_doctype2,
"attached_to_name": attached_to_docname2,
"is_private": 1,
"content": test_content1}).insert()
self.assertEqual(file1.is_private, file2.is_private, 1)
self.assertEqual(file1.file_url, file2.file_url)
self.assertTrue(os.path.exists(file1.get_full_path()))
file1.is_private = 0
file1.save()
file2 = frappe.get_doc('File', file2.name)
self.assertEqual(file1.is_private, file2.is_private, 0)
self.assertEqual(file1.file_url, file2.file_url)
self.assertTrue(os.path.exists(file2.get_full_path()))
def test_parent_directory_validation_in_file_url(self):
file1 = frappe.get_doc({
"doctype": "File",
"file_name": 'parent_dir.txt',
"attached_to_doctype": "",
"attached_to_name": "",
"is_private": 1,
"content": test_content1}).insert()
file1.file_url = '/private/files/../test.txt'
self.assertRaises(frappe.exceptions.ValidationError, file1.save)
# No validation to see if file exists
file1.reload()
file1.file_url = '/private/files/parent_dir2.txt'
file1.save()
class TestAttachment(unittest.TestCase):
test_doctype = 'Test For Attachment'
def setUp(self):
if frappe.db.exists('DocType', self.test_doctype):
return
frappe.get_doc(
doctype='DocType',
name=self.test_doctype,
module='Custom',
custom=1,
fields=[
{'label': 'Title', 'fieldname': 'title', 'fieldtype': 'Data'},
{'label': 'Attachment', 'fieldname': 'attachment', 'fieldtype': 'Attach'},
]
).insert()
def tearDown(self):
frappe.delete_doc('DocType', self.test_doctype)
def test_file_attachment_on_update(self):
doc = frappe.get_doc(
doctype=self.test_doctype,
title='test for attachment on update'
).insert()
file = frappe.get_doc({
'doctype': 'File',
'file_name': 'test_attach.txt',
'content': 'Test Content'
})
file.save()
doc.attachment = file.file_url
doc.save()
exists = frappe.db.exists('File', {
'file_name': 'test_attach.txt',
'file_url': file.file_url,
'attached_to_doctype': self.test_doctype,
'attached_to_name': doc.name,
'attached_to_field': 'attachment'
})
self.assertTrue(exists)
class TestAttachmentsAccess(unittest.TestCase):
def test_attachments_access(self):
frappe.set_user('test4@example.com')
self.attached_to_doctype, self.attached_to_docname = make_test_doc()
frappe.get_doc({
"doctype": "File",
"file_name": 'test_user.txt',
"attached_to_doctype": self.attached_to_doctype,
"attached_to_name": self.attached_to_docname,
"content": 'Testing User'
}).insert()
frappe.get_doc({
"doctype": "File",
"file_name": "test_user_home.txt",
"content": 'User Home',
}).insert()
frappe.set_user('test@example.com')
frappe.get_doc({
"doctype": "File",
"file_name": 'test_system_manager.txt',
"attached_to_doctype": self.attached_to_doctype,
"attached_to_name": self.attached_to_docname,
"content": 'Testing System Manager'
}).insert()
frappe.get_doc({
"doctype": "File",
"file_name": "test_sm_home.txt",
"content": 'System Manager Home',
}).insert()
system_manager_files = [file.file_name for file in get_files_in_folder('Home')['files']]
system_manager_attachments_files = [file.file_name for file in get_files_in_folder('Home/Attachments')['files']]
frappe.set_user('test4@example.com')
user_files = [file.file_name for file in get_files_in_folder('Home')['files']]
user_attachments_files = [file.file_name for file in get_files_in_folder('Home/Attachments')['files']]
self.assertIn('test_sm_home.txt', system_manager_files)
self.assertNotIn('test_sm_home.txt', user_files)
self.assertIn('test_user_home.txt', system_manager_files)
self.assertIn('test_user_home.txt', user_files)
self.assertIn('test_system_manager.txt', system_manager_attachments_files)
self.assertNotIn('test_system_manager.txt', user_attachments_files)
self.assertIn('test_user.txt', system_manager_attachments_files)
self.assertIn('test_user.txt', user_attachments_files)
frappe.set_user('Administrator')
frappe.db.rollback()
|
|
#!/usr/bin/python
"""\
xml2cython: process xml files generated by gccxml and generate cython code
Usage:
xml2cython header xmlfile
By default, xml2cython pull out every function available in the xmlfile. There
are some basic filter you can use to limit the functions pulled out:
- -f/--filter-function-name: only pull out functions whose name match the
given string.
- -l/--location-filter: only pull out function which are declared in a file
whose name matches the given string.
Example:
xml2cython -f 'foo_' -l 'foo' header xmlfile
Will only pull out functions whose name match foo_ and which are declared
in file whose name match foo. Using regular expression instead of simple
strings should work"""
import getopt
import sys
import re
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import cycodegenlib
from cycodegenlib.tp_puller import TypePuller
from cycodegenlib.misc import classify, query_items
from cycodegenlib.cycodegen import generate_cython
def generate_main(xml, output, lfilter=None, ffilter=None, funcs_list=None):
items, named, locations = query_items(xml)
funcs, tpdefs, enumvals, enums, structs, vars, unions, anoenums = \
classify(items, locations, lfilter=lfilter)
if ffilter is None:
ffilter = lambda x: True
if funcs_list:
kept_funcs = [i for i in funcs.values() if ffilter(i.name) \
and i.name in funcs_list]
else:
kept_funcs = [i for i in funcs.values() if ffilter(i.name)]
puller = TypePuller(items)
for f in kept_funcs:
puller.pull(f)
needed = puller.values()
# Filter "anonymous" enumerations according to location
if lfilter:
anoenumvals = []
for v in anoenums.values():
anoenumvals.extend(v.values)
else:
anoenumvals = enumvals.values()
# Order 'anonymous' enum values alphabetically
def cmpenum(a, b):
return cmp(a.name, b.name)
anoenumvals.sort(cmpenum)
# List of items to generate code for
gen = list(needed) #+ kept_funcs
generate_cython(output, gen, anoenumvals)
class Usage(Exception):
def __init__(self, msg):
self.msg = """\
usage: xml2cython [options] headerfile xmlfile
%s""" % msg
def main(argv=None):
if argv is None:
argv = sys.argv
# parse command line options
try:
try:
opts, args = getopt.getopt(argv[1:], "ho:l:f:i:V",
["help", "output", "location-filter",
"function-name-filter",
"input-file-filter", "--version"])
except getopt.error, msg:
raise Usage(msg)
except Usage, e:
print >>sys.stderr, e.msg
print >>sys.stderr, "for help use --help"
return 2
# process options
output = None
lfilter_str = None
ffilter_str = None
ifilter = None
for o, a in opts:
if o in ("-h", "--help"):
print __doc__
return 0
elif o in ("-o", "--output"):
output = a
elif o in ("-l", "--location-filter"):
lfilter_str = a
elif o in ("-f", "--function-name-filter"):
ffilter_str = a
elif o in ("-i", "--input-file-filter"):
ifilter = a
elif o in ("-V", "--version"):
print "xml2cython: use cycodegenlib version", cycodegenlib.version
return 0
if len(args) != 2:
print >>sys.stderr, "Error, exactly one input file must be specified"
print >>sys.stderr, "for help use --help"
return 2
header_input = args[0]
xml_input = args[1]
lfilter = None
if lfilter_str:
lfilter = re.compile(lfilter_str).search
ffilter = None
if ffilter_str:
ffilter = re.compile(ffilter_str).search
# Input file filter
funcs = []
if ifilter:
a = open(ifilter, 'r')
try:
funcs.extend(a.read().splitlines())
finally:
a.close()
# Generate cython code
out = StringIO()
try:
generate_main(xml_input, out, lfilter=lfilter,
ffilter=ffilter, funcs_list=funcs)
if output:
f = open(output, 'w')
try:
f.write("# This file was generated by cycodegenlib, DO NOT EDIT\n")
f.write("# Generated by command %s\n" % " ".join(argv))
f.write("# Codegenlib version %s\n" % cycodegenlib.version)
f.write("cdef extern from '%s':\n" % header_input)
f.write(out.getvalue())
finally:
f.close()
else:
print out.getvalue()
finally:
out.close()
if __name__ == '__main__':
sys.exit(main())
# #root = 'asoundlib'
# #root = 'CoreAudio_AudioHardware'
# root = 'foo'
# header_name = '%s.h' % root
# #header_matcher = re.compile('alsa')
# header_matcher = re.compile(header_name)
# #header_matcher = re.compile('AudioHardware')
# xml_name = '%s.xml' % root
# pyx_name = '_%s.pyx' % root
# if sys.platform[:7] == 'darwin':
# so_name = root
# else:
# so_name = 'lib%s.so' % root
#
# items, named, locations = query_items(xml_name)
# funcs, tpdefs, enumvals, enums, structs, vars, unions = \
# classify(items, locations, lfilter=header_matcher.search)
#
# #arguments = signatures_types(funcs.values())
# #print "Need to pull out arguments", [named[i] for i in arguments]
#
# puller = TypePuller(items)
# for f in funcs.values():
# puller.pull(f)
#
# needed = puller.values()
# #print "Pulled out items:", [named[i] for i in needed]
#
# # Order 'anonymous' enum values alphabetically
# def cmpenum(a, b):
# return cmp(a.name, b.name)
# anoenumvals = enumvals.values()
# anoenumvals.sort(cmpenum)
#
# # List of items to generate code for
# #gen = enumvals.values() + list(needed) + funcs.values()
# gen = list(needed) + funcs.values()
#
# #gen_names = [named[i] for i in gen]
#
# cython_code = [cy_generate(i) for i in gen]
#
# output = open(pyx_name, 'w')
# output.write("cdef extern from '%s':\n" % header_name)
# output.write("\tcdef enum:\n")
# for i in anoenumvals:
# output.write("\t\t%s = %d\n" % (i.name, int(i.value)))
# for i in cython_code:
# if not i:
# continue
# if len(i) > 1:
# output.write("\t%s\n" % i[0])
# for j in i[1:]:
# output.write("\t%s\n" % j)
# else:
# output.write("\t%s\n" % i[0])
# output.close()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import inspect
import logging
import os
import sys
import time
from collections import namedtuple
import six
import yaml
from box import Box
from jinja2 import Environment
from jinja2 import FileSystemLoader
from jinja2 import select_autoescape
from slackclient import SlackClient
from .scheduler import Task
log = logging.getLogger(__name__)
Listener = namedtuple(
'Listener', (
'rule',
'view_func',
'trigger',
'doc',
'options',
),
)
class Tangerine(object):
jinja_environment = Environment
def __init__(self, slack_token=None, settings=None):
settings = settings or {}
settings.setdefault('tangerine', {})
settings['tangerine'].setdefault('sleep', 0.5)
settings['tangerine'].setdefault('template_folder', 'templates')
self.settings = Box(settings, frozen_box=True, default_box=True)
self.listeners = []
self.scheduled_tasks = []
self.client = SlackClient(
slack_token or self.settings.tangerine.auth_token,
)
self.sleep = self.settings.tangerine.sleep
@classmethod
def config_from_yaml(cls, path_to_yaml):
with open(path_to_yaml, 'r') as ymlfile:
settings = yaml.load(ymlfile)
log.info('settings from %s loaded successfully', path_to_yaml)
return cls(settings=settings)
def _verify_rule(self, supplied_rule):
"""Rules must be callable with (user, message) in the signature.
Strings are automatically converted to callables that match.
"""
# If string, make a simple match callable
if isinstance(supplied_rule, six.string_types):
return lambda user, message: supplied_rule in message.lower()
if not six.callable(supplied_rule):
raise ValueError('Bot rules must be callable or strings')
expected = ('user', 'message')
signature = tuple(inspect.getargspec(supplied_rule).args)
try:
# Support class- and instance-methods where first arg is
# something like `self` or `cls`.
assert len(signature) in (2, 3)
assert expected == signature or expected == signature[-2:]
except AssertionError:
msg = 'Rule signuture must have only 2 arguments: user, message'
raise ValueError(msg)
return supplied_rule
def listen_for(self, rule, **options):
"""Decorator for adding a Rule. See guidelines for rules."""
trigger = None
if isinstance(rule, six.string_types):
trigger = rule
rule = self._verify_rule(rule)
def decorator(f):
self.add_listener(rule, f, trigger, f.__doc__, **options)
return f
return decorator
def cron(self, schedule, **options):
def decorator(f):
self.add_cron(schedule, f, **options)
return f
return decorator
def run(self):
self.running = True
if self.client.rtm_connect():
try:
self.event_loop()
except (KeyboardInterrupt, SystemExit):
log.info('attempting graceful shutdown...')
self.running = False
try:
sys.exit(0)
except SystemExit:
os._exit(0)
def event_loop(self):
while self.running:
time.sleep(self.sleep)
self.process_stream()
self.process_scheduled_tasks()
def read_stream(self):
data = self.client.rtm_read()
if not data:
return data
return [Box(d) for d in data][0]
def process_stream(self):
data = self.read_stream()
if not data or data.type != 'message' or 'user' not in data:
return
self.respond(data.user, data.text, data.channel)
def process_scheduled_tasks(self):
now = datetime.datetime.now()
for idx, task in enumerate(self.scheduled_tasks):
if now > task.next_run:
t = self.scheduled_tasks.pop(idx)
t.run()
self.add_cron(t.schedule, t.fn, **t.options)
def respond(self, user, message, channel):
sendable = {
'user': user,
'message': message,
'channel': channel,
}
if not message:
return
for rule, view_func, _, _, options in self.listeners:
if rule(user, message):
args = inspect.getargspec(view_func).args
kwargs = {k: v for k, v in sendable.items() if k in args}
response = view_func(**kwargs)
if response:
if 'hide_typing' not in options:
# TODO(nficano): this should be configurable
time.sleep(.2)
self.client.server.send_to_websocket({
'type': 'typing',
'channel': channel,
})
time.sleep(.5)
if '{user.username}' in response:
response = response.replace(
'{user.username}',
self.get_user_name(user),
)
self.speak(response, channel)
def add_listener(self, rule, view_func, trigger, docs, **options):
"""Adds a listener to the listeners container; verifies that
`rule` and `view_func` are callable.
"""
if not six.callable(rule):
raise TypeError('rule should be callable')
if not six.callable(view_func):
raise TypeError('view_func should be callable')
self.listeners.append(
Listener(rule, view_func, trigger, docs, options),
)
def add_cron(self, schedule, f, **options):
self.scheduled_tasks.append(Task(schedule, f, **options))
def speak(self, message, channel, **kwargs):
self.client.api_call(
'chat.postMessage', as_user=True,
channel=channel, text=message, **kwargs,
)
def get_user_info(self, user_id):
return self.client.api_call('users.info', user=user_id)
def get_user_name(self, user_id):
user = self.get_user_info(user_id)
return user.get('user', {}).get('name')
def get_user_id_from_username(self, username):
for m in self.client.api_call('users.list')['members']:
if username.lower() == m.get('name', '').lower():
return m['id']
def get_channel_id_from_name(self, channel):
channel = channel.lower().replace('#', '')
types = ','.join(['public_channel', 'private_channel'])
response = self.client.api_call(
'conversations.list', types=types, limit=1000,
)
for c in response['channels']:
if channel == c['name'].lower():
return c['id']
response = self.client.api_call('channels.list', limit=1000)
for c in response['channels']:
if channel == c['name'].lower():
return c['id']
def get_channel_name_from_channel_id(self, channel_id):
types = ','.join(['public_channel', 'private_channel'])
response = self.client.api_call(
'conversations.list', types=types, limit=1000,
)
for c in response['channels']:
if channel_id == c['id']:
return c['name']
response = self.client.api_call('channels.list', limit=1000)
for c in response['channels']:
if channel_id == c['id']:
return c['name']
def get_template_path(self):
if os.path.isabs(self.settings.tangerine.template_folder):
return self.settings.tangerine.template_folder
else:
return os.path.join(
os.getcwd(),
self.settings.tangerine.template_folder,
)
def get_jinja_environment(self):
return self.jinja_environment(
loader=FileSystemLoader(self.get_template_path()),
autoescape=select_autoescape(['txt']),
)
def render_template(self, template_name, **context):
env = self.get_jinja_environment()
template = env.get_template(template_name)
return template.render(**context)
|
|
'''
GTF gene models
See: http://mblab.wustl.edu/GTF22.html
GTF is a 1-based format, meaning that start sites start counting at 1. We
read in those start sites and subtract one so that the internal representation
is 0-based.
Notes:
All internal positions returned are 0-based.
If a transcript is missing a CDS or exon region, the (start, end) is used.
If a transcript is missing a start/stop codon the start/end is used (+/- 3 bases)
This class assumes a GTF format file. GFF3 isn't supported (yet).
'''
import sys
import os
from ngsutils.support.ngs_utils import gzip_aware_open
from ngsutils.support import symbols, quoted_split
from eta import ETA
import datetime
try:
import cPickle as pickle
except:
import pickle
class GTF(object):
_version = 1.3
__binsize = 10000
def __init__(self, filename=None, cache_enabled=True, quiet=False, fileobj=None):
if not filename and not fileobj:
raise ValueError('Must pass either a filename or a fileobj')
if fileobj:
fobj = fileobj
cache_enabled = False
eta = None
else:
fobj = gzip_aware_open(filename)
eta = ETA(os.stat(filename).st_size, fileobj=fobj)
cachefile = os.path.join(os.path.dirname(filename), '.%s.cache' % os.path.basename(filename))
self._genes = {}
self._pos = 0
self._gene_bins = {}
self._gene_names = {}
self._gene_ids = {}
warned = False
if cache_enabled and os.path.exists(cachefile):
self._load_cache(cachefile)
if not self._genes:
if not quiet:
sys.stderr.write('Reading GTF file... (%s) \n' % filename)
for linenum, line in enumerate(fobj):
try:
idx = line.find('#')
if idx > -1:
if idx == 0:
continue
line = line[:-idx]
chrom, source, feature, start, end, score, strand, frame, attrs = line.rstrip().split('\t')
source = symbols[source]
start = int(start) - 1 # Note: 1-based
end = int(end)
attributes = {}
for key, val in [x.split(' ', 1) for x in [x.strip() for x in quoted_split(attrs, ';')] if x and ' ' in x]:
if val[0] == '"' and val[-1] == '"':
val = val[1:-1]
attributes[key] = val
gid = None
if 'isoform_id' in attributes:
gid = attributes['isoform_id']
elif 'gene_name' in attributes: # use gene_name if we have it.
gid = attributes['gene_name']
# elif 'tss_id' in attributes: # iGenomes GTF files... are strange. use gene_name first.
# gid = attributes['tss_id']
elif 'gene_id' in attributes:
gid = attributes['gene_id']
if not warned and not quiet:
sys.stderr.write('\nGTF file potentially missing isoform annotation! Each transcript may be treated separately. (%s)\n' % gid)
sys.stderr.write('%s\n\n' % (str(attributes)))
warned = True
else:
if not warned and not quiet:
sys.stderr.write('\nNot a valid GTF file! Maybe GFF?\n')
sys.stderr.write('%s\n\n' % (str(attributes)))
warned = True
first_key = None
attributes = {}
for key, val in [x.split('=', 1) for x in [x.strip() for x in quoted_split(attrs, ';')] if x and '=' in x]:
if not first_key:
first_key = key
if val[0] == '"' and val[-1] == '"':
val = val[1:-1]
attributes[key] = val
if not attributes:
gid = 'id_%s' % linenum
if not warned and not quiet:
sys.stderr.write('\nGTF file missing annotations! Using line numbers as IDs\n')
warned = True
else:
gid = attributes[first_key]
if not warned and not quiet:
sys.stderr.write('\nGTF file missing annotations (gene_id, transcript_id)! Assuming GFF? Taking first attribute as ID (%s=%s)\n' % (first_key, gid))
sys.stderr.write('%s\n\n' % (str(attributes)))
warned = True
if eta:
eta.print_status(extra=gid)
except:
import traceback
sys.stderr.write('Error parsing line:\n%s\n' % line)
traceback.print_exc()
sys.exit(1)
if not gid in self._genes or chrom != self._genes[gid].chrom:
self._genes[gid] = _GTFGene(gid, chrom, source, **attributes)
if 'gene_name' in attributes:
gene_name = attributes['gene_name']
if not gene_name in self._gene_names:
self._gene_names[gene_name] = [gid]
else:
self._gene_names[gene_name].append(gid)
if gid != attributes['gene_id']:
self._gene_ids[attributes['gene_id']] = gid
self._genes[gid].add_feature(attributes['transcript_id'] if 'transcript_id' in attributes else gid, feature, start, end, strand)
if eta:
eta.done()
if filename and fobj != sys.stdin:
fobj.close()
for gid in self._genes:
gene = self._genes[gid]
start_bin = gene.start / GTF.__binsize
end_bin = gene.end / GTF.__binsize
for bin in xrange(start_bin, end_bin+1):
if not (gene.chrom, bin) in self._gene_bins:
self._gene_bins[(gene.chrom, bin)] = [gid]
else:
self._gene_bins[(gene.chrom, bin)].append(gid)
if cache_enabled:
try:
self._write_cache(cachefile)
except Exception, e:
sys.stderr.write("Error saving cache: %s!\n" % str(e))
pass # do nothing if we can't write the cache.
def _load_cache(self, cachefile):
sys.stderr.write('Reading GTF file (cached)...')
started_t = datetime.datetime.now()
try:
with open(cachefile) as cache:
version, genes, gene_bins, gene_names, gene_ids = pickle.load(cache)
if version == GTF._version:
self._genes, self._gene_bins = genes, gene_bins
self._gene_names = gene_names
self._gene_ids = gene_ids
sys.stderr.write('(%s sec)\n' % (datetime.datetime.now() - started_t).seconds)
else:
sys.stderr.write('Error reading cached file... Processing original file.\n')
except:
self._genes = {}
self._gene_bins = {}
sys.stderr.write('Failed reading cache! Processing original file.\n')
def _write_cache(self, cachefile):
sys.stderr.write('(saving GTF cache)...')
with open(cachefile, 'w') as cache:
pickle.dump((GTF._version, self._genes, self._gene_bins, self._gene_names, self._gene_ids), cache)
sys.stderr.write('\n')
def fsize(self):
return len(self._genes)
def tell(self):
return self._pos
def find(self, chrom, start, end=None, strand=None):
if not end:
end = start
if end < start:
raise ValueError('[gtf.find] Error: End must be smaller than start!')
startbin = start / GTF.__binsize
if end:
endbin = end / GTF.__binsize
else:
endbin = startbin
proc_list = set()
for bin in xrange(startbin, endbin + 1):
if (chrom, bin) in self._gene_bins:
for gid in self._gene_bins[(chrom, bin)]:
if gid in proc_list:
continue
proc_list.add(gid)
g_start = self._genes[gid].start
g_end = self._genes[gid].end
g_strand = self._genes[gid].strand
if strand and g_strand != strand:
continue
if g_start <= start <= g_end or g_start <= end <= g_end:
# gene spans the query boundary
yield self._genes[gid]
elif start <= g_start <= end and start <= g_end <= end:
# gene is completely inside boundary
yield self._genes[gid]
def get_by_id(self, gene_id):
if gene_id in self._gene_ids:
return self._genes[self._gene_ids[gene_id]]
elif gene_id in self._genes:
return self._genes[gene_id]
return None
def get_by_name(self, gene_name):
if gene_name in self._gene_names:
for g in self._genes[self._gene_names[gene_name]]:
yield g
@property
def genes(self):
self._pos = 0
proc_list = set()
for chrbin in sorted(self._gene_bins):
for gene_id in self._gene_bins[chrbin]:
if gene_id in proc_list:
continue
yield self._genes[gene_id]
proc_list.add(gene_id)
self._pos += 1
class _GTFGene(object):
"""
Stores info for a single gene_id
A gene consists of one or more transcripts. It *must* have a gene_id, and chrom
It may also contain a other attributes (isoform_id, gene_biotype, etc).
The gid is used to merge multiple transcripts together into one GTFGene record.
This is a separate attribute that will be either (in order):
isoform_id, gene_name, gene_id
"""
def __init__(self, gid, chrom, source, gene_id=None, gene_name=None, **attributes):
self.gid = gid
self.gene_id = gene_id if gene_id else gid
self.gene_name = gene_name if gene_name else gene_id
self.attributes = attributes
self.chrom = chrom
self.source = source
self._transcripts = {}
self._regions = []
self.start = None
self.end = None
self.strand = None
def __repr__(self):
return '%s(%s) %s:%s-%s[%s]' % (self.gene_name, self.gid, self.chrom, self.start, self.end, self.strand)
@property
def transcripts(self):
for t in self._transcripts:
yield self._transcripts[t]
def add_feature(self, transcript_id, feature, start, end, strand):
if not transcript_id in self._transcripts:
self._transcripts[transcript_id] = _GTFTranscript(transcript_id, strand)
t = self._transcripts[transcript_id]
# this start/end will cover the entire transcript.
# this way if there is only a 'gene' annotation,
# we can still get a gene/exon start/end
if self.start is None or start < self.start:
self.start = start
if self.end is None or end > self.end:
self.end = end
if self.strand is None:
self.strand = strand
if t.start is None or start < t.start:
t.start = start
if t.end is None or end > t.end:
t.end = end
if feature == 'exon':
t._exons.append((start, end))
elif feature == 'CDS':
t._cds.append((start, end))
elif feature == 'start_codon':
t._start_codon = (start, end)
elif feature == 'stop_codon':
t._stop_codon = (start, end)
else:
# this is an unsupported feature - possibly add a debug message
pass
@property
def regions(self):
# these are potentially memory-intensive, so they are calculated on the
# fly when needed.
if not self._regions:
all_starts = []
all_ends = []
tids = []
for tid in self._transcripts:
tids.append(tid)
starts = []
ends = []
for start, end in self._transcripts[tid].exons:
starts.append(start)
ends.append(end)
all_starts.append(starts)
all_ends.append(ends)
self._regions = calc_regions(self.start, self.end, tids, all_starts, all_ends)
i = 0
for start, end, const, names in self._regions:
i += 1
yield (i, start, end, const, names)
class _GTFTranscript(object):
def __init__(self, transcript_id, strand):
self.transcript_id = transcript_id
self.strand = strand
self._exons = []
self._cds = []
self._start_codon = None
self._stop_codon = None
self.start = None
self.end = None
def __repr__(self):
return '<transcript id="%s" strand="%s" start="%s" end="%s" exons="%s">' % (self.transcript_id, self.strand, self.start, self.end, ','.join(['%s->%s' % (s, e) for s, e in self.exons]))
@property
def exons(self):
if self._exons:
return self._exons
else:
return [(self.start, self.end)]
@property
def cds(self):
if self._cds:
return self._cds
else:
return []
@property
def has_cds(self):
if self._cds:
return True
return False
@property
def utr_5(self):
utr = []
if self._cds and self._exons:
if self.strand == '+':
cds_start = self._cds[0][0]
for s, e in self._exons:
if e < cds_start:
utr.append((s, e))
else:
utr.append((s, cds_start))
break
else:
cds_start = self._cds[-1][1]
for s, e in self._exons[::-1]:
if s > cds_start:
utr.append((s, e))
else:
utr.append((cds_start, e))
break
utr.sort()
return utr
@property
def utr_3(self):
utr = []
if self._cds and self._exons:
if self.strand == '+':
cds_end = self._cds[-1][1]
for s, e in self._exons[::-1]:
if s > cds_end:
utr.append((s, e))
else:
utr.append((cds_end, e))
break
utr.sort()
else:
cds_end = self._cds[0][0]
for s, e in self._exons:
if e < cds_end:
utr.append((s, e))
else:
utr.append((s, cds_end))
break
return utr
@property
def start_codon(self):
if self._start_codon:
return self._start_codon
elif self.strand == '+':
return (self.start, self.start + 3)
else:
return (self.end - 3, self.end)
@property
def stop_codon(self):
if self._stop_codon:
return self._stop_codon
elif self.strand == '-':
return (self.start, self.start + 3)
else:
return (self.end - 3, self.end)
def calc_regions(txStart, txEnd, kg_names, kg_starts, kg_ends):
'''
This takes a list of start/end positions (one set per isoform)
It splits these into regions by assigning each base in the
txStart-txEnd range a number. The number is a bit-mask representing
each isoform that includes that base in a coding region. Each
isoform has a number 2^N, so the bit-mask is just a number.
Next, it scans the map for regions with the same bit-mask.
When a different bitmask is found, the previous region (if not intron)
is added to the list of regions.
Returns a list of tuples:
(start,end,is_const,names) where names is a comma-separated string
of accns that make up the region
Test:
foo: 100->110, 125->135, 150->160, 175->200
bar: 100->110, 125->135, 175->200
baz: 100->110, 150->160, 175->200
>>> list(calc_regions(100, 200, ['foo', 'bar', 'baz'], [[100, 125, 150, 175], [100, 125, 175], [100, 150, 175]], [[110, 135, 160, 200], [110, 135, 200], [110, 160, 200]]))
[(100, 110, True, 'foo,bar,baz'), (125, 135, False, 'foo,bar'), (150, 160, False, 'foo,baz'), (175, 200, True, 'foo,bar,baz')]
# overhangs...
baz: 100->120, 150->160, 170->200
>>> list(calc_regions(100, 200, ['foo', 'bar', 'baz'], [[100, 125, 150, 175], [100, 125, 175], [100, 150, 170]], [[110, 135, 160, 200], [110, 135, 200], [120, 160, 200]]))
[(100, 110, True, 'foo,bar,baz'), (110, 120, False, 'baz'), (125, 135, False, 'foo,bar'), (150, 160, False, 'foo,baz'), (170, 175, False, 'baz'), (175, 200, True, 'foo,bar,baz')]
foo: 100->110, 120->130, 140->150
bar: 100->110, 140->150
>>> list(calc_regions(100, 150, ['foo', 'bar'], [[100, 120, 140], [100, 140,]], [[110, 130, 150], [110, 150]]))
[(100, 110, True, 'foo,bar'), (120, 130, False, 'foo'), (140, 150, True, 'foo,bar')]
foo: 100->110, 120->130, 140->150
bar: 100->115, 140->150 # 3' overhang
>>> list(calc_regions(100, 150, ['foo', 'bar'], [[100, 120, 140], [100, 140,]], [[110, 130, 150], [115, 150]]))
[(100, 110, True, 'foo,bar'), (110, 115, False, 'bar'), (120, 130, False, 'foo'), (140, 150, True, 'foo,bar')]
foo: 100->110, 120->130, 140->150
bar: 100->110, 135->150 # 5' overhang
>>> list(calc_regions(100, 150, ['foo', 'bar'], [[100, 120, 140], [100, 135,]], [[110, 130, 150], [110, 150]]))
[(100, 110, True, 'foo,bar'), (120, 130, False, 'foo'), (135, 140, False, 'bar'), (140, 150, True, 'foo,bar')]
'''
maskmap = [0, ] * (txEnd - txStart)
mask = 1
mask_start_end = {}
mask_names = {}
for name, starts, ends in zip(kg_names, kg_starts, kg_ends):
mask_start = None
mask_end = None
mask_names[mask] = name
starts = [int(x) for x in starts]
ends = [int(x) for x in ends]
for start, end in zip(starts, ends):
if not mask_start:
mask_start = int(start)
mask_end = int(end)
for i in xrange(start - txStart, end - txStart):
maskmap[i] = maskmap[i] | mask
mask_start_end[mask] = (mask_start, mask_end)
mask = mask * 2
# print mask_start_end
# print maskmap
last_val = 0
regions = []
region_start = 0
def _add_region(start, end, value):
rstart = start + txStart
rend = end + txStart
const = True
names = []
'''
This code only calls a region alt, if the transcript actually spans this region.
example - two transcripts:
1) xxxxxx------xxxx------xxx---xxxx--xxxxxxxxx
2) xxx------xxxx--xxx-------xxxx--xxxxxxxxxxxxx
const/alt cccccc------cccc--aaa-aaa---cccc--ccccccccccccc
I'm not sure if this is a good idea or not... What this *should* be is:
1) xxxxxx------xxxx------xxx---xxxx----xxxxxxxxx
2) xxx------xxxx--xxx-------xxxx----xxxxxxxxxxxxx
3) xxx------xxxx--xxx-------xxxxxa
const/alt aaaccc------cccc--aaa-aaa---ccccaa--cccccccccaaaa
Where alt-5 and alt-3 are only dictated by transcripts that contain them...
There should be extra code to handle this situation...
## TODO: Add alt-5/alt-3 code
'''
# this keeps track of which transcripts go into each region
# and if it is const or alt
for mask in mask_start_end:
mstart, mend = mask_start_end[mask]
if rstart >= mstart and rend <= mend:
if value & mask == 0:
const = False
else:
names.append(mask_names[mask])
regions.append((rstart, rend, const, ','.join(names)))
for i in xrange(0, len(maskmap)):
if maskmap[i] == last_val:
continue
if last_val:
_add_region(region_start, i, last_val)
region_start = i
else:
region_start = i
last_val = maskmap[i]
if last_val:
_add_region(region_start, i + 1, last_val) # extend by one...
return regions
|
|
# coding=utf-8
"""Utils functions for supporting the code."""
from __future__ import absolute_import
from __future__ import division
from absl import flags
import numpy as np
from numpy.core.numeric import False_ # pylint: disable=unused-import
import tensorflow.compat.v1 as tf
from tqdm import tqdm
from ieg.dataset_utils import augmentation_transforms
from ieg.dataset_utils import autoaugment
from ieg.dataset_utils import randaugment
FLAGS = flags.FLAGS
GODD_POLICIES = autoaugment.cifar10_policies()
RANDOM_POLICY_OPS = randaugment.RANDOM_POLICY_OPS
BLUR_OPS = randaugment.BLUR_OPS
_IMAGE_SIZE = 224
_CROP_PADDING = 32
# Does multiprocessing speed things up?
POOL = None
def cifar_process(image, augmentation=True):
"""Map function for cifar dataset.
Args:
image: An image tensor.
augmentation: If True, process train images.
Returns:
A processed image tensor.
"""
# label = tf.cast(label, dtype=tf.int32)
image = tf.math.divide(tf.cast(image, dtype=tf.float32), 255.0)
if augmentation:
image = tf.image.resize_image_with_crop_or_pad(image, 32 + 4, 32 + 4)
# Randomly crop a [HEIGHT, WIDTH] section of the image.
image = tf.image.random_crop(image, [32, 32, 3])
# Randomly flip the image horizontally.
image = tf.image.random_flip_left_right(image)
image = tf.clip_by_value(image, 0, 1)
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
def apply_autoaugment(data, no_policy=False):
"""Python written Autoaugment for preprocessing image.
Args:
data: can be
A list: (3D image, policy)
A list: (4D image, policy) A numpy image
no_policy: If True, does not used learned AutoAugment policies
Returns:
A 3D or 4D processed images
"""
if not isinstance(data, (list, tuple)):
epoch_policy = GODD_POLICIES[np.random.choice(len(GODD_POLICIES))]
image = data
else:
image, epoch_policy = data
if len(image.shape) == 3:
images = [image]
else:
images = image
res = []
for img in images:
assert img.max() <= 1 and img.min() >= -1
# ! image is assumed to be normalized to [-1, 1]
if no_policy:
final_img = img
else:
final_img = augmentation_transforms.apply_policy(epoch_policy, img)
final_img = augmentation_transforms.random_flip(
augmentation_transforms.zero_pad_and_crop(final_img, 4))
final_img = augmentation_transforms.cutout_numpy(final_img)
res.append(final_img.astype(np.float32))
res = np.concatenate(res, 0)
return res
def random_blur(images, magnitude=10, nops=1):
"""Apply random blurs for a batch of data."""
# using shared policies are better
policies = [(policy, 0.5, mag) for (policy, mag) in zip(
np.random.choice(BLUR_OPS, nops), np.random.randint(1, magnitude, nops))]
policies = [policies] * images.shape[0]
if POOL is not None:
jobs = [(image.squeeze(), policy) for image, policy in zip(
np.split(images.copy(), images.shape[0], axis=0), policies)]
augmented_images = POOL.map(apply_randomaugment, jobs)
else:
augmented_images = []
for image, policy in zip(images.copy(), policies):
final_img = apply_randomaugment((image, policy))
augmented_images.append(final_img)
augmented_images = np.stack(augmented_images, axis=0)
return augmented_images
def apply_randomaugment(data):
"""Apply random augmentations."""
image, epoch_policy = data
if len(image.shape) == 3:
images = [image]
else:
images = image
res = []
for img in images:
# ! image is assumed to be normalized to [-1, 1]
final_img = randaugment.apply_policy(epoch_policy, img)
final_img = randaugment.random_flip(
randaugment.zero_pad_and_crop(final_img, 4))
final_img = randaugment.cutout_numpy(final_img)
res.append(final_img.astype(np.float32))
res = np.concatenate(res, 0)
return res
def pool_policy_augmentation(images):
"""Batch AutoAugment.
Given a 4D numpy tensor of images,
perform AutoAugment using apply_autoaugment().
Args:
images: 4D numpy tensor
Returns:
A 4D numpy tensor of processed images.
"""
# Use the same policy for all batch data seems work better.
policies = [GODD_POLICIES[np.random.choice(len(GODD_POLICIES))]
] * images.shape[0]
jobs = [(image.squeeze(), policy) for image, policy in zip(
np.split(images.copy(), images.shape[0], axis=0), policies)]
if POOL is None:
jobs = np.split(images.copy(), images.shape[0], axis=0)
augmented_images = map(apply_autoaugment, jobs)
else:
augmented_images = POOL.map(apply_autoaugment, jobs)
augmented_images = np.stack(augmented_images, axis=0)
return augmented_images
def random_augmentation(images, magnitude=10, nops=2):
"""Apply random augmentations for a batch of data."""
# using shared policies are better
policies = [(policy, 0.5, mag) for (policy, mag) in zip(
np.random.choice(RANDOM_POLICY_OPS, nops),
np.random.randint(1, magnitude, nops))]
policies = [policies] * images.shape[0]
if POOL is not None:
jobs = [(image.squeeze(), policy) for image, policy in zip(
np.split(images.copy(), images.shape[0], axis=0), policies)]
augmented_images = POOL.map(apply_randomaugment, jobs)
else:
augmented_images = []
for image, policy in zip(images.copy(), policies):
final_img = apply_randomaugment((image, policy))
augmented_images.append(final_img)
augmented_images = np.stack(augmented_images, axis=0)
return augmented_images
def autoaug_batch_process_map_fn(images, labels):
"""tf.data.Dataset map function to enable python AutoAugmnet with tf.py_func.
It is usually called after tf.data.Dataset is batched.
Args:
images: A 4D tensor of a batch of images.
labels: labels of images.
Returns:
A 5D tensor of processed images [Bx2xHxWx3].
"""
if FLAGS.aug_type == 'autoaug':
aa_images = tf.py_func(pool_policy_augmentation, [tf.identity(images)],
[tf.float32])
elif FLAGS.aug_type == 'randaug':
aa_images = tf.py_func(random_augmentation, [tf.identity(images)],
[tf.float32])
elif FLAGS.aug_type == 'default':
aa_images = tf.py_func(cifar_process, [tf.identity(images)], [tf.float32])
else:
raise NotImplementedError('{} aug_type does not exist'.format(
FLAGS.aug_type))
aa_images = tf.reshape(aa_images, [-1] + images.shape.as_list()[1:])
images = tf.concat([tf.expand_dims(images, 1),
tf.expand_dims(aa_images, 1)],
axis=1)
return images, labels
def distorted_bounding_box_crop(image_bytes,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0),
max_attempts=100,
scope=None):
"""Generates cropped_image using one of the bboxes randomly distorted.
See `tf.image.sample_distorted_bounding_box` for more documentation.
Args:
image_bytes: `Tensor` of binary image data.
bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]` where
each coordinate is [0, 1) and the coordinates are arranged as `[ymin,
xmin, ymax, xmax]`. If num_boxes is 0 then use the whole image.
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped area
of the image must contain at least this fraction of any bounding box
supplied.
aspect_ratio_range: An optional list of `float`s. The cropped area of the
image must have an aspect ratio = width / height within this range.
area_range: An optional list of `float`s. The cropped area of the image must
contain a fraction of the supplied image within in this range.
max_attempts: An optional `int`. Number of attempts at generating a cropped
region of the image of the specified constraints. After `max_attempts`
failures, return the entire image.
scope: Optional `str` for name scope.
Returns:
cropped image `Tensor`
"""
with tf.name_scope(scope, 'distorted_bounding_box_crop', [image_bytes, bbox]):
shape = tf.image.extract_jpeg_shape(image_bytes)
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
shape,
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, _ = sample_distorted_bounding_box
# Crop the image to the specified bounding box.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop_window = tf.stack([offset_y, offset_x, target_height, target_width])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
return image
def _at_least_x_are_equal(a, b, x):
"""Checks if at least `x` of `a` and `b` `Tensors` are equal."""
match = tf.equal(a, b)
match = tf.cast(match, tf.int32)
return tf.greater_equal(tf.reduce_sum(match), x)
def _decode_and_random_crop(image_bytes, image_size):
"""Makes a random crop of image_size."""
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
image = distorted_bounding_box_crop(
image_bytes,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(3. / 4, 4. / 3.),
area_range=(0.08, 1.0),
max_attempts=10,
scope=None)
original_shape = tf.image.extract_jpeg_shape(image_bytes)
bad = _at_least_x_are_equal(original_shape, tf.shape(image), 3)
image = tf.cond(
bad, lambda: _decode_and_center_crop(image_bytes, image_size),
lambda: tf.compat.v1.image.resize( # pylint: disable=g-long-lambda
image, [image_size, image_size],
method=tf.image.ResizeMethod.BILINEAR,
align_corners=False))
return image
def _decode_and_center_crop(image_bytes, image_size):
"""Crops to center of image with padding then scales image_size."""
shape = tf.image.extract_jpeg_shape(image_bytes)
image_height = shape[0]
image_width = shape[1]
padded_center_crop_size = tf.cast(
((image_size / (image_size + _CROP_PADDING)) *
tf.cast(tf.minimum(image_height, image_width), tf.float32)), tf.int32)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
crop_window = tf.stack([
offset_height, offset_width, padded_center_crop_size,
padded_center_crop_size
])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
return tf.compat.v1.image.resize(
image, [image_size, image_size],
method=tf.image.ResizeMethod.BILINEAR,
align_corners=False)
def _flip(image):
"""Random horizontal image flip."""
image = tf.image.random_flip_left_right(image)
return image
def preprocess_for_train(image_bytes,
use_bfloat16,
image_size=_IMAGE_SIZE,
autoaugment_name=None):
"""Preprocesses the given image for evaluation.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
use_bfloat16: `bool` for whether to use bfloat16.
image_size: image size.
autoaugment_name: `string` that is the name of the autoaugment policy to
apply to the image. If the value is `None` autoaugment will not be
applied.
Returns:
A preprocessed image `Tensor`.
"""
image = _decode_and_random_crop(image_bytes, image_size)
image = _flip(image)
image = tf.reshape(image, [image_size, image_size, 3])
image = tf.cast(image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32)
if autoaugment_name:
tf.logging.info('Apply AutoAugment policy {}'.format(autoaugment_name))
image = tf.clip_by_value(image, 0.0, 255.0)
image = tf.cast(image, dtype=tf.uint8)
# Random aug should also work.
image = autoaugment.distort_image_with_autoaugment(image, autoaugment_name)
image = tf.cast(image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32)
return image
def preprocess_for_eval(image_bytes, use_bfloat16, image_size=_IMAGE_SIZE):
"""Preprocesses the given image for evaluation.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
use_bfloat16: `bool` for whether to use bfloat16.
image_size: image size.
Returns:
A preprocessed image `Tensor`.
"""
image = _decode_and_center_crop(image_bytes, image_size)
image = tf.reshape(image, [image_size, image_size, 3])
image = tf.cast(image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32)
return image
def cutout(image, pad_size, replace=0):
"""Applies cutout (https://arxiv.org/abs/1708.04552) to image."""
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# Sample the center location in the image where the zero mask will be applied
cutout_center_height = tf.random_uniform(
shape=[], minval=0, maxval=image_height, dtype=tf.int32)
cutout_center_width = tf.random_uniform(
shape=[], minval=0, maxval=image_width, dtype=tf.int32)
lower_pad = tf.maximum(0, cutout_center_height - pad_size)
upper_pad = tf.maximum(0, image_height - cutout_center_height - pad_size)
left_pad = tf.maximum(0, cutout_center_width - pad_size)
right_pad = tf.maximum(0, image_width - cutout_center_width - pad_size)
cutout_shape = [
image_height - (lower_pad + upper_pad),
image_width - (left_pad + right_pad)
]
padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]
mask = tf.pad(
tf.zeros(cutout_shape, dtype=image.dtype),
padding_dims,
constant_values=1)
mask = tf.expand_dims(mask, -1)
mask = tf.tile(mask, [1, 1, 3])
image = tf.where(
tf.equal(mask, 0),
tf.ones_like(image, dtype=image.dtype) * replace, image)
return image
def imagenet_preprocess_image(image_bytes,
is_training=False,
use_bfloat16=False,
image_size=_IMAGE_SIZE,
autoaugment_name=None,
use_cutout=False):
"""Preprocesses the given image.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
is_training: `bool` for whether the preprocessing is for training.
use_bfloat16: `bool` for whether to use bfloat16.
image_size: image size.
autoaugment_name: `string` that is the name of the autoaugment policy to
apply to the image. If the value is `None` autoaugment will not be
applied.
use_cutout: 'bool' for whether use cutout.
Returns:
A preprocessed image `Tensor` with value range of [-1, 1].
"""
if is_training:
image = preprocess_for_train(image_bytes, use_bfloat16, image_size,
autoaugment_name)
if use_cutout:
image = cutout(image, pad_size=8)
else:
image = preprocess_for_eval(image_bytes, use_bfloat16, image_size)
# Clip the extra values
image = tf.clip_by_value(image, 0.0, 255.0)
image = tf.math.divide(image, 255.0)
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
def _bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
if isinstance(value, type(tf.constant(0))):
value = value.numpy(
) # BytesList won't unpack a string from an EagerTensor.
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _int64_feature(value):
"""Returns an int64_list from a bool / enum / int / uint."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def image_example(image_str, label, dat_id=None):
"""Creates tf example."""
feature = {
'label': _int64_feature(label),
'image': _bytes_feature(image_str),
}
if dat_id is not None:
feature.update({'id': _int64_feature(dat_id)})
return tf.train.Example(features=tf.train.Features(feature=feature))
def _parse_image_function(example_proto, include_id=False):
"""Parses tf example."""
# Parse the input tf.train.Example proto using the dictionary above.
image_feature_description = {
'label': tf.io.FixedLenFeature([], tf.int64),
'image': tf.io.FixedLenFeature([], tf.string),
}
if include_id:
image_feature_description.update(
{'id': tf.io.FixedLenFeature([], tf.int64)})
results = tf.io.parse_single_example(example_proto, image_feature_description)
results['image'] = tf.io.decode_jpeg(
results['image'], channels=3, name='parse_image_function_decode_jpeg')
results['label'] = tf.cast(results['label'], tf.int32)
return results
def write_tfrecords(record_file,
ds,
ds_size,
nshard=50,
include_ids=True,
filter_fn=None):
"""Rewrites ds as tfrecords that contains data ids."""
ds = ds.shuffle(ds_size)
next_item = tf.data.make_one_shot_iterator(ds).get_next()
dat_id = 0
part_num = 0
per_shard_sample = ds_size // nshard + (0 if ds_size % nshard == 0 else 1)
count = 0
write_last_round = False
with tf.Session() as sess:
img_pl = tf.placeholder(tf.uint8)
img_str_tf = tf.io.encode_jpeg(img_pl)
while True:
try:
image_str_batch = []
label_batch = []
for _ in range(per_shard_sample):
image, label = sess.run(next_item)
image_str = sess.run(img_str_tf, feed_dict={img_pl: image})
if filter_fn is None or filter_fn(label):
image_str_batch.append(image_str)
label_batch.append(label)
count += 1
except tf.errors.OutOfRangeError:
if write_last_round:
tf.logging.info(
'Generate {} tfrecords ({} samples) with data ids.'.format(
nshard, count))
break
write_last_round = True
part_path = record_file + '-{:05d}-of-{:05d}'.format(part_num, nshard)
part_num += 1
with tf.io.TFRecordWriter(part_path) as writer:
for image_str, label in tqdm(
zip(image_str_batch, label_batch),
desc='Write tfrecord #%d' % part_num):
tf_example = image_example(image_str, label,
dat_id if include_ids else None)
dat_id += 1
writer.write(tf_example.SerializeToString())
def read_tf_records(record_file, train, include_ids=False):
"""Reads tfrecords and convert to tf.data with data ids."""
def fetch_dataset_fn(filename):
buffer_size = 8 * 1024 * 1024 # 8 MiB per file
dataset = tf.data.TFRecordDataset(filename, buffer_size=buffer_size)
return dataset
dataset = tf.data.Dataset.list_files(record_file, shuffle=train)
dataset = dataset.interleave(
fetch_dataset_fn,
block_length=16,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
filenames = tf.io.gfile.glob(record_file)
nums = sum(1 for filename in filenames # pylint: disable=g-complex-comprehension
for _ in tf.python_io.tf_record_iterator(filename))
return dataset.map(
lambda x: _parse_image_function(x, include_id=include_ids),
num_parallel_calls=tf.data.experimental.AUTOTUNE), nums
|
|
"""Contains the Tilt mode code"""
# tilt.py
# Mission Pinball Framework
# Written by Brian Madden & Gabe Knuth
# Released under the MIT License. (See license info at the end of this file.)
# Documentation and more info at http://missionpinball.com/mpf
from mpf.system.config import CaseInsensitiveDict
from mpf.system.mode import Mode
from mpf.system.timing import Timing
class Tilt(Mode):
def mode_init(self):
self._balls_to_collect = 0
self._last_warning_tick = 0
self.ball_ending_tilted_queue = None
self.tilt_event_handlers = set()
self.last_tilt_warning_switch_tick = 0
self.tilt_config = self.machine.config_processor.process_config2(
config_spec='tilt',
source=self._get_merged_settings('tilt'),
section_name='tilt')
def mode_start(self, **kwargs):
self._register_switch_handlers()
for event in self.tilt_config['reset_warnings_events']:
self.add_mode_event_handler(event, self.reset_warnings)
def mode_stop(self, **kwargs):
self._remove_switch_handlers()
self.reset_warnings_handlers = set()
def _register_switch_handlers(self):
for switch in self.machine.switches.items_tagged(
self.tilt_config['tilt_warning_switch_tag']):
self.machine.switch_controller.add_switch_handler(
switch_name=switch.name,
callback=self._tilt_warning_switch_handler)
for switch in self.machine.switches.items_tagged(
self.tilt_config['tilt_switch_tag']):
self.machine.switch_controller.add_switch_handler(
switch_name=switch.name,
callback=self.tilt)
for switch in self.machine.switches.items_tagged(
self.tilt_config['slam_tilt_switch_tag']):
self.machine.switch_controller.add_switch_handler(
switch_name=switch.name,
callback=self.slam_tilt)
def _remove_switch_handlers(self):
for switch in self.machine.switches.items_tagged(
self.tilt_config['tilt_warning_switch_tag']):
self.machine.switch_controller.remove_switch_handler(
switch_name=switch.name,
callback=self._tilt_warning_switch_handler)
for switch in self.machine.switches.items_tagged(
self.tilt_config['tilt_switch_tag']):
self.machine.switch_controller.remove_switch_handler(
switch_name=switch.name,
callback=self.tilt)
for switch in self.machine.switches.items_tagged(
self.tilt_config['slam_tilt_switch_tag']):
self.machine.switch_controller.remove_switch_handler(
switch_name=switch.name,
callback=self.slam_tilt)
def tilt_warning(self):
"""Processes a tilt warning. If the number of warnings is the number to
cause a tilt, a tilt will be processed.
"""
self.last_tilt_warning_switch_tick = self.machine.tick_num
if not self.player:
return
self.log.debug("Tilt Warning")
self._last_warning_tick = self.machine.tick_num
self.player[self.tilt_config['tilt_warnings_player_var']] += 1
warnings = self.player[self.tilt_config['tilt_warnings_player_var']]
if warnings >= self.tilt_config['warnings_to_tilt']:
self.tilt()
else:
self.machine.events.post('tilt_warning',
warnings=warnings,
warnings_remaining=(self.tilt_config['warnings_to_tilt'] -
warnings))
self.machine.events.post('tilt_warning_{}'.format(warnings))
def reset_warnings(self, **kwargs):
"""Resets the tilt warnings for the current player."""
try:
self.player[self.tilt_config['tilt_warnings_player_var']] = 0
except AttributeError:
pass
def tilt(self, **kwargs):
"""Causes the ball to tilt."""
if not self.machine.game:
return
self._balls_to_collect = self.machine.playfield.balls
# todo use collection
self.log.debug("Processing Tilt. Balls to collect: %s",
self._balls_to_collect)
self.machine.game.tilted = True
self.machine.events.post('tilt')
self._disable_autofires()
self._disable_flippers()
self.tilt_event_handlers.add(
self.machine.events.add_handler('ball_ending',
self._ball_ending_tilted))
for device in self.machine.ball_devices:
if 'drain' in device.tags:
self.tilt_event_handlers.add(
self.machine.events.add_handler(
'balldevice_{}_ball_enter'.format(device.name),
self._tilted_ball_drain))
else:
self.tilt_event_handlers.add(
self.machine.events.add_handler(
'balldevice_{}_ball_enter'.format(device.name),
self._tilted_ball_entered_non_drain_device))
self.machine.game.ball_ending()
def _disable_flippers(self):
for flipper in self.machine.flippers:
flipper.disable()
def _disable_autofires(self):
for autofire in self.machine.autofires:
autofire.disable()
def _tilted_ball_drain(self, new_balls, unclaimed_balls, device):
self._balls_to_collect -= unclaimed_balls
self.log.debug("Tilted ball drain. Balls to collect: %s",
self._balls_to_collect)
if self._balls_to_collect <= 0:
self._tilt_done()
return {'unclaimed_balls': 0}
def _tilted_ball_entered_non_drain_device(self, new_balls, unclaimed_balls,
device):
return {'unclaimed_balls': unclaimed_balls}
def _tilt_switch_handler(self):
self.tilt()
def _tilt_warning_switch_handler(self):
if (self._last_warning_tick + self.tilt_config['multiple_hit_window']
<= self.machine.tick_num):
self.tilt_warning()
def _ball_ending_tilted(self, queue):
self.ball_ending_tilted_queue = queue
queue.wait()
if not self._balls_to_collect:
self._tilt_done()
def _tilt_done(self):
if self.tilt_settle_ms_remaining():
self.delay.reset(ms=self.tilt_settle_ms_remaining(),
callback=self._tilt_done,
name='tilt')
else:
self.machine.game.tilted = False
self.machine.events.post('tilt_clear')
self.ball_ending_tilted_queue.clear()
self.machine.events.remove_handlers_by_keys(self.tilt_event_handlers)
self.tilt_event_handlers = set()
def tilt_settle_ms_remaining(self):
"""Returns the amount of milliseconds remaining until the tilt settle
time has cleared.
"""
ticks = (self.machine.tick_num - self.last_tilt_warning_switch_tick -
self.tilt_config['settle_time'])
if ticks >= 0:
return 0
else:
return abs(ticks * Timing.ms_per_tick)
def slam_tilt(self):
self.machine.events.post('slam_tilt')
self.game_ended()
|
|
"""Tests for the WLED sensor platform."""
from datetime import datetime
from unittest.mock import MagicMock, patch
import pytest
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN, SensorDeviceClass
from homeassistant.components.wled.const import DOMAIN
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_ICON,
ATTR_UNIT_OF_MEASUREMENT,
DATA_BYTES,
ELECTRIC_CURRENT_MILLIAMPERE,
PERCENTAGE,
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
STATE_UNKNOWN,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from homeassistant.helpers.entity import EntityCategory
from homeassistant.util import dt as dt_util
from tests.common import MockConfigEntry
async def test_sensors(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_wled: MagicMock,
) -> None:
"""Test the creation and values of the WLED sensors."""
registry = er.async_get(hass)
# Pre-create registry entries for disabled by default sensors
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"aabbccddeeff_uptime",
suggested_object_id="wled_rgb_light_uptime",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"aabbccddeeff_free_heap",
suggested_object_id="wled_rgb_light_free_memory",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"aabbccddeeff_wifi_signal",
suggested_object_id="wled_rgb_light_wifi_signal",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"aabbccddeeff_wifi_rssi",
suggested_object_id="wled_rgb_light_wifi_rssi",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"aabbccddeeff_wifi_channel",
suggested_object_id="wled_rgb_light_wifi_channel",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"aabbccddeeff_wifi_bssid",
suggested_object_id="wled_rgb_light_wifi_bssid",
disabled_by=None,
)
# Setup
mock_config_entry.add_to_hass(hass)
test_time = datetime(2019, 11, 11, 9, 10, 32, tzinfo=dt_util.UTC)
with patch("homeassistant.components.wled.sensor.utcnow", return_value=test_time):
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get("sensor.wled_rgb_light_estimated_current")
assert state
assert (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == ELECTRIC_CURRENT_MILLIAMPERE
)
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.CURRENT
assert state.state == "470"
entry = registry.async_get("sensor.wled_rgb_light_estimated_current")
assert entry
assert entry.unique_id == "aabbccddeeff_estimated_current"
assert entry.entity_category is EntityCategory.DIAGNOSTIC
state = hass.states.get("sensor.wled_rgb_light_uptime")
assert state
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.TIMESTAMP
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) is None
assert state.state == "2019-11-11T09:10:00+00:00"
entry = registry.async_get("sensor.wled_rgb_light_uptime")
assert entry
assert entry.unique_id == "aabbccddeeff_uptime"
assert entry.entity_category is EntityCategory.DIAGNOSTIC
state = hass.states.get("sensor.wled_rgb_light_free_memory")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:memory"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_BYTES
assert state.state == "14600"
assert entry.entity_category is EntityCategory.DIAGNOSTIC
entry = registry.async_get("sensor.wled_rgb_light_free_memory")
assert entry
assert entry.unique_id == "aabbccddeeff_free_heap"
assert entry.entity_category is EntityCategory.DIAGNOSTIC
state = hass.states.get("sensor.wled_rgb_light_wifi_signal")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:wifi"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "76"
assert entry.entity_category is EntityCategory.DIAGNOSTIC
entry = registry.async_get("sensor.wled_rgb_light_wifi_signal")
assert entry
assert entry.unique_id == "aabbccddeeff_wifi_signal"
assert entry.entity_category is EntityCategory.DIAGNOSTIC
state = hass.states.get("sensor.wled_rgb_light_wifi_rssi")
assert state
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.SIGNAL_STRENGTH
assert (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
== SIGNAL_STRENGTH_DECIBELS_MILLIWATT
)
assert state.state == "-62"
entry = registry.async_get("sensor.wled_rgb_light_wifi_rssi")
assert entry
assert entry.unique_id == "aabbccddeeff_wifi_rssi"
assert entry.entity_category is EntityCategory.DIAGNOSTIC
state = hass.states.get("sensor.wled_rgb_light_wifi_channel")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:wifi"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) is None
assert state.state == "11"
entry = registry.async_get("sensor.wled_rgb_light_wifi_channel")
assert entry
assert entry.unique_id == "aabbccddeeff_wifi_channel"
assert entry.entity_category is EntityCategory.DIAGNOSTIC
state = hass.states.get("sensor.wled_rgb_light_wifi_bssid")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:wifi"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) is None
assert state.state == "AA:AA:AA:AA:AA:BB"
entry = registry.async_get("sensor.wled_rgb_light_wifi_bssid")
assert entry
assert entry.unique_id == "aabbccddeeff_wifi_bssid"
assert entry.entity_category is EntityCategory.DIAGNOSTIC
@pytest.mark.parametrize(
"entity_id",
(
"sensor.wled_rgb_light_uptime",
"sensor.wled_rgb_light_free_memory",
"sensor.wled_rgb_light_wi_fi_signal",
"sensor.wled_rgb_light_wi_fi_rssi",
"sensor.wled_rgb_light_wi_fi_channel",
"sensor.wled_rgb_light_wi_fi_bssid",
),
)
async def test_disabled_by_default_sensors(
hass: HomeAssistant, init_integration: MockConfigEntry, entity_id: str
) -> None:
"""Test the disabled by default WLED sensors."""
registry = er.async_get(hass)
state = hass.states.get(entity_id)
assert state is None
entry = registry.async_get(entity_id)
assert entry
assert entry.disabled
assert entry.disabled_by is er.RegistryEntryDisabler.INTEGRATION
@pytest.mark.parametrize(
"key",
[
"bssid",
"channel",
"rssi",
"signal",
],
)
async def test_no_wifi_support(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_wled: MagicMock,
key: str,
) -> None:
"""Test missing Wi-Fi information from WLED device."""
registry = er.async_get(hass)
# Pre-create registry entries for disabled by default sensors
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
f"aabbccddeeff_wifi_{key}",
suggested_object_id=f"wled_rgb_light_wifi_{key}",
disabled_by=None,
)
# Remove Wi-Fi info
device = mock_wled.update.return_value
device.info.wifi = None
# Setup
mock_config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get(f"sensor.wled_rgb_light_wifi_{key}")
assert state
assert state.state == STATE_UNKNOWN
|
|
'''
Test actioning.py execute_runbook()
'''
import mock
import unittest
from actioning import execute_runbook
import actioning
class ExecuteRunbooksTest(unittest.TestCase):
''' Run unit tests against the execute_runbook method '''
def setUp(self):
''' Setup mocked data '''
self.config = mock.Mock()
self.dbc = mock.Mock()
self.logger = mock.Mock(**{
'info.return_value' : True,
'debug.return_value' : True,
'critical.return_value' : True,
'warn.return_value' : True,
'error.return_value' : True
})
self.target = {'ip' : '10.0.0.1'}
def tearDown(self):
''' Destroy mocked data '''
self.config = None
self.dbc = None
self.logger = None
self.target = None
class TestCMDonRemote(ExecuteRunbooksTest):
''' Test when the action is a command from Remote '''
@mock.patch('actioning.core.fab.set_env')
@mock.patch('actioning.fabric.api.env')
@mock.patch('actioning.fabric.api.hide')
@mock.patch('actioning.fabric.api.local')
@mock.patch('actioning.fabric.api.put')
@mock.patch('actioning.fabric.api.run')
def runTest(self, mock_run, mock_put, mock_local, mock_hide, mock_env, mock_set_env):
''' Execute test '''
# Set mock_env to empty dict
mock_env = mock.MagicMock(spec={})
mock_set_env.return_value = mock_env
mock_local.return_value = mock.MagicMock(**{ 'succeeded': True})
mock_run.return_value = mock.MagicMock(**{ 'succeeded': True})
mock_put = True
mock_hide = True
action = {
'type' : 'cmd',
'execute_from' : 'remote',
'cmd' : "bash"
}
results = execute_runbook(action, self.target, self.config, self.logger)
self.assertTrue(results)
self.assertFalse(self.logger.warn.called)
self.assertTrue(mock_local.called)
mock_local.assert_called_with("bash", capture=True)
class TestCMDonHostTarget(ExecuteRunbooksTest):
''' Test when the action is a command from Remote '''
@mock.patch('actioning.core.fab.set_env')
@mock.patch('actioning.fabric.api.env')
@mock.patch('actioning.fabric.api.hide')
@mock.patch('actioning.fabric.api.local')
@mock.patch('actioning.fabric.api.put')
@mock.patch('actioning.fabric.api.run')
def runTest(self, mock_run, mock_put, mock_local, mock_hide, mock_env, mock_set_env):
''' Execute test '''
# Set mock_env to empty dict
mock_env = mock.MagicMock(spec={})
mock_set_env.return_value = mock_env
mock_local.return_value = mock.MagicMock(**{ 'succeeded': True})
mock_run.return_value = mock.MagicMock(**{ 'succeeded': True})
mock_put = True
mock_hide = True
action = {
'type' : 'cmd',
'execute_from' : 'host',
'host' : '192.168.0.1',
'cmd' : "bash"
}
results = execute_runbook(action, self.target, self.config, self.logger)
self.assertTrue(results)
self.assertFalse(self.logger.warn.called)
self.assertFalse(mock_local.called)
self.assertTrue(mock_run.called)
mock_run.assert_called_with("bash")
class TestCMDWithBadTarget(ExecuteRunbooksTest):
''' Test when the action is a command from Remote '''
@mock.patch('actioning.core.fab.set_env')
@mock.patch('actioning.fabric.api.env')
@mock.patch('actioning.fabric.api.hide')
@mock.patch('actioning.fabric.api.local')
@mock.patch('actioning.fabric.api.put')
@mock.patch('actioning.fabric.api.run')
def runTest(self, mock_run, mock_put, mock_local, mock_hide, mock_env, mock_set_env):
''' Execute test '''
# Set mock_env to empty dict
mock_env = mock.MagicMock(spec={})
mock_set_env.return_value = mock_env
mock_local.return_value = mock.MagicMock(**{ 'succeeded': True})
mock_run.return_value = mock.MagicMock(**{ 'succeeded': True})
mock_put = True
mock_hide = True
action = {
'type' : 'cmd',
'execute_from' : 'flarget',
'cmd' : "bash"
}
results = execute_runbook(action, self.target, self.config, self.logger)
self.assertFalse(results)
self.assertTrue(self.logger.warn.called)
class TestPluginonRemote(ExecuteRunbooksTest):
''' Test when the action is a Plugin on Remote '''
@mock.patch('actioning.core.fab.set_env')
@mock.patch('actioning.fabric.api.env')
@mock.patch('actioning.fabric.api.hide')
@mock.patch('actioning.fabric.api.local')
@mock.patch('actioning.fabric.api.put')
@mock.patch('actioning.fabric.api.run')
def runTest(self, mock_run, mock_put, mock_local, mock_hide, mock_env, mock_set_env):
''' Execute test '''
# Set mock_env to empty dict
mock_env = mock.MagicMock(spec={})
mock_set_env.return_value = mock_env
mock_local.return_value = mock.MagicMock(**{ 'succeeded': True})
mock_run.return_value = mock.MagicMock(**{ 'succeeded': True})
mock_put = True
mock_hide = True
action = {
'type' : 'plugin',
'plugin' : 'yes.py',
'args' : 'arrrrrgs',
'execute_from' : 'ontarget',
}
config = {
'plugin_path' : '/some/dir',
'actioning' : {
'upload_path' : '/some/dir'
}
}
self.config = mock.MagicMock(spec_set=config)
results = execute_runbook(action, self.target, self.config, self.logger)
self.assertTrue(results)
self.assertFalse(self.logger.warn.called)
self.assertFalse(mock_local.called)
self.assertTrue(mock_run.called)
self.assertTrue(mock_run.call_count == 3)
class TestPluginonTarget(ExecuteRunbooksTest):
''' Test when the action is a Plugin on Target '''
@mock.patch('actioning.core.fab.set_env')
@mock.patch('actioning.fabric.api.env')
@mock.patch('actioning.fabric.api.hide')
@mock.patch('actioning.fabric.api.local')
@mock.patch('actioning.fabric.api.put')
@mock.patch('actioning.fabric.api.run')
@mock.patch('actioning.shutil.copyfile')
@mock.patch('actioning.os.chmod')
@mock.patch('actioning.os.remove')
def runTest(self, mock_remove, mock_chmod, mock_copyfile, mock_run, mock_put, mock_local, mock_hide, mock_env, mock_set_env):
''' Execute test '''
# Set mock_env to empty dict
mock_env = mock.MagicMock(spec={})
mock_set_env.return_value = mock_env
mock_local.return_value = mock.MagicMock(**{ 'succeeded': True})
mock_run.return_value = mock.MagicMock(**{ 'succeeded': True})
mock_put = True
mock_hide = True
mock_remove = True
mock_chmod = True
mock_copyfile = True
action = {
'type' : 'plugin',
'plugin' : 'yes.py',
'args' : 'arrrrrgs',
'execute_from' : 'remote',
}
config = {
'plugin_path' : '/some/dir',
'actioning' : {
'upload_path' : '/some/dir'
}
}
self.config = mock.MagicMock(spec_set=config)
results = execute_runbook(action, self.target, self.config, self.logger)
self.assertTrue(results)
self.assertFalse(self.logger.warn.called)
self.assertFalse(mock_run.called)
self.assertTrue(mock_local.called)
self.assertTrue(mock_local.call_count == 1)
class TestPluginonHostTarget(ExecuteRunbooksTest):
''' Test when the action is a Plugin on Target '''
@mock.patch('actioning.core.fab.set_env')
@mock.patch('actioning.fabric.api.env')
@mock.patch('actioning.fabric.api.hide')
@mock.patch('actioning.fabric.api.local')
@mock.patch('actioning.fabric.api.put')
@mock.patch('actioning.fabric.api.run')
@mock.patch('actioning.shutil.copyfile')
@mock.patch('actioning.os.chmod')
@mock.patch('actioning.os.remove')
def runTest(self, mock_remove, mock_chmod, mock_copyfile, mock_run, mock_put, mock_local, mock_hide, mock_env, mock_set_env):
''' Execute test '''
# Set mock_env to empty dict
mock_env = mock.MagicMock(spec={})
mock_set_env.return_value = mock_env
mock_local.return_value = mock.MagicMock(**{ 'succeeded': True})
mock_run.return_value = mock.MagicMock(**{ 'succeeded': True})
mock_put = True
mock_hide = True
mock_remove = True
mock_chmod = True
mock_copyfile = True
action = {
'type' : 'plugin',
'plugin' : 'yes.py',
'args' : 'arrrrrgs',
'execute_from' : 'host',
'host': '192.168.0.1'
}
config = {
'plugin_path' : '/some/dir',
'actioning' : {
'upload_path' : '/some/dir'
}
}
self.config = mock.MagicMock(spec_set=config)
results = execute_runbook(action, self.target, self.config, self.logger)
self.assertTrue(results)
self.assertFalse(self.logger.warn.called)
self.assertTrue(mock_run.called)
self.assertFalse(mock_local.called)
self.assertTrue(mock_run.call_count == 3)
class TestPluginBadTarget(ExecuteRunbooksTest):
''' Test when the action is a Plugin on an invalid target '''
@mock.patch('actioning.core.fab.set_env')
@mock.patch('actioning.fabric.api.env')
@mock.patch('actioning.fabric.api.hide')
@mock.patch('actioning.fabric.api.local')
@mock.patch('actioning.fabric.api.put')
@mock.patch('actioning.fabric.api.run')
@mock.patch('actioning.shutil.copyfile')
@mock.patch('actioning.os.chmod')
@mock.patch('actioning.os.remove')
def runTest(self, mock_remove, mock_chmod, mock_copyfile, mock_run, mock_put, mock_local, mock_hide, mock_env, mock_set_env):
''' Execute test '''
# Set mock_env to empty dict
mock_env = mock.MagicMock(spec={})
mock_set_env.return_value = mock_env
mock_local.return_value = mock.MagicMock(**{ 'succeeded': True})
mock_run.return_value = mock.MagicMock(**{ 'succeeded': True})
mock_put = True
mock_hide = True
mock_remove = True
mock_chmod = True
mock_copyfile = True
action = {
'type' : 'plugin',
'plugin' : 'yes.py',
'args' : 'arrrrrgs',
'execute_from' : 'plarget',
}
config = {
'plugin_path' : '/some/dir',
'actioning' : {
'upload_path' : '/some/dir'
}
}
self.config = mock.MagicMock(spec_set=config)
results = execute_runbook(action, self.target, self.config, self.logger)
self.assertFalse(results)
self.assertTrue(self.logger.warn.called)
class TestWithOverride(ExecuteRunbooksTest):
''' Test when the action is a command from Remote '''
@mock.patch('actioning.core.fab.set_env')
@mock.patch('actioning.fabric.api.env')
@mock.patch('actioning.fabric.api.hide')
@mock.patch('actioning.fabric.api.local')
@mock.patch('actioning.fabric.api.put')
@mock.patch('actioning.fabric.api.run')
def runTest(self, mock_run, mock_put, mock_local, mock_hide, mock_env, mock_set_env):
''' Execute test '''
# Set mock_env to empty dict
mock_env = mock.MagicMock(spec={})
mock_set_env.return_value = mock_env
mock_local.return_value = mock.MagicMock(**{ 'succeeded': True})
mock_run.return_value = mock.MagicMock(**{ 'succeeded': True})
mock_put = True
mock_hide = True
action = {
'type' : 'cmd',
'execute_from' : 'remote',
'cmd' : "bash",
'credentials' : 'fake'
}
results = execute_runbook(action, self.target, self.config, self.logger)
self.assertTrue(results)
self.assertFalse(self.logger.warn.called)
self.assertTrue(mock_local.called)
mock_local.assert_called_with("bash", capture=True)
mock_set_env.assert_called_with(self.config, mock_env, override="fake")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.