gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
import sys
import csv
import json
import shutil
from collections import OrderedDict
pg_system = [
(1024 ** 5, 'PB'),
(1024 ** 4, 'TB'),
(1024 ** 3, 'GB'),
(1024 ** 2, 'MB'),
(1024 ** 1, 'kB'),
(1024 ** 0, 'B'),
]
pg_time = [
(1000 * 1 * 60, 'min'),
(1000 ** 0, 'ms'),
(1000 ** 1, 's'),
]
# def create_tuning_config(t_minval=None, t_maxval=None, t_minval_type=None, t_maxval_type=None,
# t_resource_type=None, t_weight_samples=False,
# t_step=None, t_enumvals=None,
# t_powers_of_2=False, t_additional_values=[], t_dependent=False,
# t_notes=''):
# cfg = {}
# cfg['t_minval'] = t_minval
# cfg['t_minval_type'] = t_minval_type
# cfg['t_maxval'] = t_maxval
# cfg['t_maxval_type'] = t_maxval_type
# cfg['t_resource_type'] = t_resource_type
# cfg['t_step'] = t_step
# cfg['t_enumvals'] = t_enumvals
# cfg['t_powers_of_2'] = t_powers_of_2
# cfg['t_additional_values'] = t_additional_values
# cfg['t_dependent'] = t_dependent
# cfg['t_weight_samples'] = t_weight_samples
#
# return cfg
STRING = 1
INTEGER = 2
REAL = 3
BOOL = 4
ENUM = 5
TIMESTAMP = 6
TYPE_NAMES = {
'string': STRING,
'integer': INTEGER,
'real': REAL,
'bool': BOOL,
'enum': ENUM,
'timestamp': TIMESTAMP
}
UNIT_BYTES = 1
UNIT_MS = 2
UNIT_OTHER = 3
def convert(size, system=pg_system):
for factor, suffix in system:
if size.endswith(suffix):
if len(size) == len(suffix):
amount = 1
else:
amount = int(size[:-len(suffix)])
return amount * factor
return None
params = OrderedDict()
with open("settings.csv", "r") as f:
reader = csv.reader(f, delimiter=',')
header = None
for i, row in enumerate(reader):
if i == 0:
header = list(row)
else:
param = {}
param['name'] = row[header.index('name')]
param['vartype'] = TYPE_NAMES[row[header.index('vartype')]]
param['category'] = row[header.index('category')]
param['enumvals'] = row[header.index('enumvals')]
param['context'] = row[header.index('context')]
param['unit'] = None
param['tunable'] = None
param['scope'] = 'global'
param['summary'] = row[header.index('short_desc')]
param['description'] = row[header.index('extra_desc')]
default = row[header.index('boot_val')]
minval = row[header.index('min_val')]
maxval = row[header.index('max_val')]
if param['vartype'] == INTEGER:
default = int(default)
minval = int(minval)
maxval = int(maxval)
elif param['vartype'] == REAL:
default = float(default)
minval = float(minval)
maxval = float(maxval)
else:
assert minval == ''
assert maxval == ''
minval = None
maxval = None
param['minval'] = minval
param['maxval'] = maxval
param['default'] = default
if param['enumvals'] != '':
enumvals = param['enumvals'][1:-1].split(',')
for i, enumval in enumerate(enumvals):
if enumval.startswith('\"') and enumval.endswith('\"'):
enumvals[i] = enumval[1:-1]
param['enumvals'] = ','.join(enumvals)
else:
param['enumvals'] = None
pg_unit = row[header.index('unit')]
if pg_unit != '':
factor = convert(pg_unit)
if factor is None:
factor = convert(pg_unit, system=pg_time)
assert factor is not None
param['unit'] = UNIT_MS
else:
param['unit'] = UNIT_BYTES
if param['default'] > 0:
param['default'] = param['default'] * factor
if param['minval'] > 0:
param['minval'] = param['minval'] * factor
if param['maxval'] > 0:
param['maxval'] = param['maxval'] * factor
else:
param['unit'] = UNIT_OTHER
# Internal params are read-only
if param['context'] == 'internal':
param['tunable'] = 'no'
# All string param types are not tunable in 9.6
if param['vartype'] == STRING:
param['tunable'] = 'no'
# We do not tune autovacuum (yet)
if param['name'].startswith('autovacuum'):
param['tunable'] = 'no'
# No need to tune debug params
if param['name'].startswith('debug'):
param['tunable'] = 'no'
# Don't want to disable query tuning options
if param['name'].startswith('enable'):
param['tunable'] = 'no'
# These options control a special-case query optimizer
if param['name'].startswith('geqo'):
param['tunable'] = 'no'
# Do not tune logging settings
if param['name'].startswith('log'):
param['tunable'] = 'no'
# Do not tune SSL settings
if param['name'].startswith('ssl'):
param['tunable'] = 'no'
# Do not tune syslog settings
if param['name'].startswith('syslog'):
param['tunable'] = 'no'
# Do not tune TPC settings
if param['name'].startswith('tcp'):
param['tunable'] = 'no'
if param['name'].startswith('trace'):
param['tunable'] = 'no'
if param['name'].startswith('track'):
param['tunable'] = 'no'
# We do not tune autovacuum (yet)
if param['name'].startswith('vacuum'):
param['tunable'] = 'no'
# Do not tune replication settings
if param['category'].startswith('Replication'):
param['tunable'] = 'no'
params[param['name']] = param
# We only want to tune some settings
params['allow_system_table_mods']['tunable'] = 'no'
params['archive_mode']['tunable'] = 'no'
params['archive_timeout']['tunable'] = 'no'
params['array_nulls']['tunable'] = 'no'
params['authentication_timeout']['tunable'] = 'no'
params['backend_flush_after']['tunable'] = 'yes'
params['backslash_quote']['tunable'] = 'no'
params['bgwriter_delay']['tunable'] = 'yes'
params['bgwriter_flush_after']['tunable'] = 'yes'
params['bgwriter_lru_maxpages']['tunable'] = 'yes'
params['bgwriter_lru_multiplier']['tunable'] = 'yes'
params['bonjour']['tunable'] = 'no'
params['bonjour_name']['tunable'] = 'no'
params['bytea_output']['tunable'] = 'no'
params['check_function_bodies']['tunable'] = 'no'
params['checkpoint_completion_target']['tunable'] = 'yes'
params['checkpoint_flush_after']['tunable'] = 'yes'
params['checkpoint_timeout']['tunable'] = 'yes'
params['checkpoint_warning']['tunable'] = 'no'
params['client_min_messages']['tunable'] = 'no'
params['commit_delay']['tunable'] = 'yes'
params['commit_siblings']['tunable'] = 'yes'
params['constraint_exclusion']['tunable'] = 'no'
params['cpu_index_tuple_cost']['tunable'] = 'maybe'
params['cpu_operator_cost']['tunable'] = 'maybe'
params['cpu_tuple_cost']['tunable'] = 'maybe'
params['cursor_tuple_fraction']['tunable'] = 'maybe'
params['db_user_namespace']['tunable'] = 'no'
params['deadlock_timeout']['tunable'] = 'yes'
params['default_statistics_target']['tunable'] = 'yes'
params['default_transaction_deferrable']['tunable'] = 'no'
params['default_transaction_isolation']['tunable'] = 'no'
params['default_transaction_read_only']['tunable'] = 'no'
params['default_with_oids']['tunable'] = 'no'
params['dynamic_shared_memory_type']['tunable'] = 'no'
params['effective_cache_size']['tunable'] = 'yes'
params['effective_io_concurrency']['tunable'] = 'yes'
params['escape_string_warning']['tunable'] = 'no'
params['exit_on_error']['tunable'] = 'no'
params['extra_float_digits']['tunable'] = 'no'
params['force_parallel_mode']['tunable'] = 'no'
params['from_collapse_limit']['tunable'] = 'yes'
params['fsync']['tunable'] = 'no' # dangerous
params['full_page_writes']['tunable'] = 'no' # dangerous
params['gin_fuzzy_search_limit']['tunable'] = 'no'
params['gin_pending_list_limit']['tunable'] = 'no'
params['huge_pages']['tunable'] = 'no'
params['idle_in_transaction_session_timeout']['tunable'] = 'no'
params['ignore_checksum_failure']['tunable'] = 'no'
params['ignore_system_indexes']['tunable'] = 'no'
params['IntervalStyle']['tunable'] = 'no'
params['join_collapse_limit']['tunable'] = 'yes'
params['krb_caseins_users']['tunable'] = 'no'
params['lo_compat_privileges']['tunable'] = 'no'
params['lock_timeout']['tunable'] = 'no' # Tuning is not recommended in Postgres 9.6 manual
params['maintenance_work_mem']['tunable'] = 'yes'
params['max_connections']['tunable'] = 'no' # This is set based on # of client connections
params['max_files_per_process']['tunable'] = 'no' # Should only be increased if OS complains
params['max_locks_per_transaction']['tunable'] = 'no'
params['max_parallel_workers_per_gather']['tunable'] = 'yes' # Must be < max_worker_processes
params['max_pred_locks_per_transaction']['tunable'] = 'no'
params['max_prepared_transactions']['tunable'] = 'no'
params['max_replication_slots']['tunable'] = 'no'
params['max_stack_depth']['tunable'] = 'no'
params['max_wal_senders']['tunable'] = 'no'
params['max_wal_size']['tunable'] = 'yes'
params['max_worker_processes']['tunable'] = 'yes'
params['min_parallel_relation_size']['tunable'] = 'yes'
params['min_wal_size']['tunable'] = 'yes'
params['old_snapshot_threshold']['tunable'] = 'no'
params['operator_precedence_warning']['tunable'] = 'no'
params['parallel_setup_cost']['tunable'] = 'maybe'
params['parallel_tuple_cost']['tunable'] = 'maybe'
params['password_encryption']['tunable'] = 'no'
params['port']['tunable'] = 'no'
params['post_auth_delay']['tunable'] = 'no'
params['pre_auth_delay']['tunable'] = 'no'
params['quote_all_identifiers']['tunable'] = 'no'
params['random_page_cost']['tunable'] = 'yes'
params['replacement_sort_tuples']['tunable'] = 'no'
params['restart_after_crash']['tunable'] = 'no'
params['row_security']['tunable'] = 'no'
params['seq_page_cost']['tunable'] = 'yes'
params['session_replication_role']['tunable'] = 'no'
params['shared_buffers']['tunable'] = 'yes'
params['sql_inheritance']['tunable'] = 'no'
params['standard_conforming_strings']['tunable'] = 'no'
params['statement_timeout']['tunable'] = 'no'
params['superuser_reserved_connections']['tunable'] = 'no'
params['synchronize_seqscans']['tunable'] = 'no'
params['synchronous_commit']['tunable'] = 'no' # dangerous
params['temp_buffers']['tunable'] = 'yes'
params['temp_file_limit']['tunable'] = 'no'
params['transaction_deferrable']['tunable'] = 'no'
params['transaction_isolation']['tunable'] = 'no'
params['transaction_read_only']['tunable'] = 'no'
params['transform_null_equals']['tunable'] = 'no'
params['unix_socket_permissions']['tunable'] = 'no'
params['update_process_title']['tunable'] = 'no'
params['wal_buffers']['tunable'] = 'yes'
params['wal_compression']['tunable'] = 'no'
params['wal_keep_segments']['tunable'] = 'no'
params['wal_level']['tunable'] = 'no'
params['wal_log_hints']['tunable'] = 'no'
params['wal_sync_method']['tunable'] = 'yes'
params['wal_writer_delay']['tunable'] = 'yes'
params['wal_writer_flush_after']['tunable'] = 'yes'
params['work_mem']['tunable'] = 'yes'
params['xmlbinary']['tunable'] = 'no'
params['xmloption']['tunable'] = 'no'
params['zero_damaged_pages']['tunable'] = 'no'
with open('tunable_params.txt', 'w') as f:
for opt in ['yes', 'maybe', 'no', '']:
f.write(opt.upper() + '\n')
f.write('---------------------------------------------------\n')
for p, pdict in params.iteritems():
if pdict['tunable'] == opt:
f.write('{}\t{}\t{}\n'.format(p, pdict['vartype'], pdict['unit']))
f.write('\n')
# MAX_MEM = 36 # 64GB or 2^36
#
# # backend_flush_after - range between 0 & 2MB
# # max = 2^21, eff_min = 2^13 (8kB), step either 0.5 or 1
# # other_values = [0]
# # powers_of_2 = true
# params['backend_flush_after']['tuning_config'] = create_tuning_config(
# t_minval=13, t_maxval=21, t_step=0.5, t_additional_values=[0],
# t_powers_of_2=True, t_weight_samples=True)
#
# # bgwriter_delay
# # true minval = 10, maxval = 500, step = 10
# params['bgwriter_delay']['tuning_config'] = create_tuning_config(
# t_minval=10, t_maxval=500, t_step=10)
#
# # bgwriter_flush_after
# # same as backend_flush_after
# params['bgwriter_flush_after']['tuning_config'] = create_tuning_config(
# t_minval=13, t_maxval=21, t_step=0.5, t_additional_values=[0],
# t_powers_of_2=True, t_weight_samples=True)
#
# # bgwriter_lru_maxpages
# # minval = 0, maxval = 1000, step = 50
# params['bgwriter_lru_maxpages']['tuning_config'] = create_tuning_config(
# t_minval=0, t_maxval=1000, t_step=50)
#
# # bgwriter_lru_multiplier
# # minval = 0.0, maxval = 10.0, step = 0.5
# params['bgwriter_lru_multiplier']['tuning_config'] = create_tuning_config(
# t_minval=0.0, t_maxval=10.0, t_step=0.5)
#
# # checkpoint_completion_target
# # minval = 0.0, maxval = 1.0, step = 0.1
# params['checkpoint_completion_target']['tuning_config'] = create_tuning_config(
# t_minval=0.0, t_maxval=1.0, t_step=0.1)
#
# # checkpoint_flush_after
# # same as backend_flush_after
# params['checkpoint_flush_after']['tuning_config'] = create_tuning_config(
# t_minval=13, t_maxval=21, t_step=0.5, t_additional_values=[0], t_powers_of_2=True)
#
# # checkpoint_timeout
# # minval = 5min, maxval = 3 hours, step = 5min
# # other_values = 1min (maybe)
# params['checkpoint_timeout']['tuning_config'] = create_tuning_config(
# t_minval=300000, t_maxval=10800000, t_step=300000, t_additional_values=[60000])
#
# # commit_delay
# # minval = 0, maxval = 10000, step = 500
# params['commit_delay']['tuning_config'] = create_tuning_config(
# t_minval=0, t_maxval=10000, t_step=500)
#
# # commit_siblings
# # minval = 0, maxval = 20, step = 1
# params['commit_siblings']['tuning_config'] = create_tuning_config(
# t_minval=0, t_maxval=20, t_step=1)
#
# # deadlock_timeout
# # minval = 500, maxval = 20000, step = 500
# params['deadlock_timeout']['tuning_config'] = create_tuning_config(
# t_minval=500, t_maxval=20000, t_step=500)
#
# # default_statistics_target
# # minval = 50, maxval = 2000, step = 50
# params['default_statistics_target']['tuning_config'] = create_tuning_config(
# t_minval=50, t_maxval=2000, t_step=50)
#
# # effective_cache_size
# # eff_min = 256MB = 2^19, eff_max = over max memory (by 25%)
# # other_values = []
# # powers_of_2 = true
# params['effective_cache_size']['tuning_config'] = create_tuning_config(
# t_minval=19, t_maxval=1.25, t_maxval_type='percentage', t_resource_type='memory',
# t_step=0.5, t_powers_of_2=True, t_weight_samples=True,
# t_notes='t_maxval = 25% amt greater than max memory')
#
# # effective_io_concurrency
# # minval = 0, maxval = 10, step = 1
# params['effective_io_concurrency']['tuning_config'] = create_tuning_config(
# t_minval=0, t_maxval=10, t_step=1)
#
# # from_collapse_limit
# # minval = 4, maxval = 40, step = 4
# # other_values = 1
# params['from_collapse_limit']['tuning_config'] = create_tuning_config(
# t_minval=4, t_maxval=40, t_step=4, t_additional_values=[1])
#
# # join_collapse_limit
# # minval = 4, maxval = 40, step = 4
# # other_values = 1
# params['join_collapse_limit']['tuning_config'] = create_tuning_config(
# t_minval=4, t_maxval=40, t_step=4, t_additional_values=[1])
#
# # random_page_cost
# # minval = current value of seq_page_cost, maxval = seq_page_cost + 5, step = 0.5
# params['random_page_cost']['tuning_config'] = create_tuning_config(
# t_minval=None, t_maxval=None, t_step=0.5, t_dependent=True,
# t_notes='t_minval = current value of seq_page_cost, t_maxval = seq_page_cost + 5')
#
# # seq_page_cost
# # minval = 0.0, maxval = 2.0, step = 0.1
# params['seq_page_cost']['tuning_config'] = create_tuning_config(
# t_minval=0.0, t_maxval=2.0, t_step=0.1)
#
# # maintenance_work_mem
# # eff_min 8MB, eff_max = 1/2 - 3/4
# params['maintenance_work_mem']['tuning_config'] = create_tuning_config(
# t_minval=23, t_maxval=0.4, t_maxval_type='percentage', t_resource_type='memory',
# t_step=0.5, t_powers_of_2=True, #t_weight_samples=True,
# t_notes='t_maxval = 40% of total memory')
#
# # max_parallel_workers_per_gather
# # minval = 0, maxval = current value of max_worker_processes
# params['max_parallel_workers_per_gather']['tuning_config'] = create_tuning_config(
# t_minval=0, t_maxval=None, t_step=1, t_dependent=True,
# t_notes='t_maxval = max_worker_processes')
#
# # max_wal_size
# # eff_min = 2^25, eff_max = 10GB? some percentage of total disk space?
# params['max_wal_size']['tuning_config'] = create_tuning_config(
# t_minval=25, t_maxval=33.5, t_step=0.5, t_powers_of_2=True,
# t_weight_samples=True, t_notes='t_maxval = some % of total disk space')
#
# # max_worker_processes
# # min = 4, max = 16, step = 2
# params['max_worker_processes']['tuning_config'] = create_tuning_config(
# t_minval=4, t_maxval=16, t_step=2)
#
# # min_parallel_relation_size
# # min = 1MB = 2^20, max = 2^30
# params['min_parallel_relation_size']['tuning_config'] = create_tuning_config(
# t_minval=20, t_maxval=2^30, t_step=0.5, t_powers_of_2=True)
#
# # min_wal_size
# # default = 80MB, some min, then max is up to current max_wal_size
# params['min_wal_size']['tuning_config'] = create_tuning_config(
# t_minval=25, t_maxval=None, t_step=0.5, t_powers_of_2=True,
# t_dependent=True, t_notes='t_maxval = max_wal_size')
#
# # shared buffers
# # min = 8388608 = 2^23, max = 70% of total memory
# params['shared_buffers']['tuning_config'] = create_tuning_config(
# t_minval=23, t_maxval=0.7, t_maxval_type='percentage', t_resource_type='memory',
# t_step=0.5, t_powers_of_2=True, t_weight_samples=True,
# t_notes='t_maxval = 70% of total memory')
#
# # temp buffers
# # min ~ 2^20, max = some percent of total memory
# params['temp_buffers']['tuning_config'] = create_tuning_config(
# t_minval=20, t_maxval=0.25, t_maxval_type='percentage', t_resource_type='memory',
# t_step=0.5, t_powers_of_2=True, t_weight_samples=True,
# t_notes='t_maxval = some % of total memory')
#
# # wal_buffers
# # min = 32kB = 2^15, max = 2GB
# # other_values = [-1]
# params['wal_buffers']['tuning_config'] = create_tuning_config(
# t_minval=15, t_maxval=30.5, t_step=0.5, t_powers_of_2=True,
# t_additional_values=[-1], t_weight_samples=True)
#
# # wal_sync_method
# # enum: [open_datasync, fdatasync, fsync, open_sync]
# params['wal_sync_method']['tuning_config'] = create_tuning_config(
# t_enumvals=['open_datasync', 'fdatasync', 'fsync', 'open_sync'])
#
# # wal_writer_delay
# # min = 50ms, max = 1000ms, step = 50ms
# # other_values = 10ms
# params['wal_writer_delay']['tuning_config'] = create_tuning_config(
# t_minval=50, t_maxval=1000, t_step=50, t_additional_values=[10])
#
# # wal_writer_flush_after
# # same as backend_flush_after
# params['wal_writer_flush_after']['tuning_config'] = create_tuning_config(
# t_minval=13, t_maxval=21, t_step=0.5, t_additional_values=[0], t_powers_of_2=True)
#
# # work_mem
# # min = 64kB = 2^16, max = some percent of total memory
# params['work_mem']['tuning_config'] = create_tuning_config(
# t_minval=16, t_maxval=0.3, t_maxval_type='percentage', t_resource_type='memory',
# t_step=0.5, t_powers_of_2=True, t_weight_samples=True, t_dependent=True,
# t_notes='t_maxval = 30% of total memory')
# max_name_len = 0
# contexts = set()
# for pname, pinfo in params.iteritems():
# if pinfo['tunable'] == 'yes':
# assert pinfo['tuning_config'] is not None
# if pinfo['unit'] == 'bytes':
# assert pinfo['tuning_config']['t_powers_of_2'] == True
# if len(pname) > max_name_len:
# max_name_len = len(pname)
# contexts.add(pinfo['context'])
# print "Max name length: {}".format(max_name_len)
# print "Contexts: {}".format(contexts)
with open("settings.json", "w") as f:
json.dump(params, f, indent=4)
# maxlen = 0
# for pname, pinfo in params.iteritems():
# length = len(str(pinfo['default']))
# if length > maxlen:
# maxlen = length
# print pname, length
# print "maxlen: {}".format(maxlen)
json_settings = []
sorted_knob_names = []
for pname, pinfo in sorted(params.iteritems()):
entry = {}
entry['model'] = 'website.KnobCatalog'
fields = dict(pinfo)
if fields['tunable'] == 'yes':
fields['tunable'] = True
else:
fields['tunable'] = False
for k,v in fields.iteritems():
if v is not None and not isinstance(v, str) and not isinstance(v, bool):
fields[k] = str(v)
fields['dbms'] = 1
entry['fields'] = fields
json_settings.append(entry)
sorted_knob_names.append(pname)
with open("postgres-96_knobs.json", "w") as f:
json.dump(json_settings, f, indent=4)
shutil.copy("postgres-96_knobs.json", "../../../preload/postgres-96_knobs.json")
#sorted_knobs = [{
# 'model': 'website.PipelineResult',
# 'fields': {
# "dbms": 1,
# "task_type": 1,
# "component": 4,
# "hardware": 17,
# "version_id": 0,
# "value": json.dumps(sorted_knob_names),
# }
#}]
#
#fname = 'postgres-96_sorted_knob_labels.json'
#with open(fname, "w") as f:
# json.dump(sorted_knobs, f, indent=4)
#
#shutil.copy(fname, "../../../preload/")
|
|
"""
.. Copyright (c) 2014, 2015 Marshall Farrier
license http://opensource.org/licenses/MIT
Options - price (:mod:`pynance.opt.price`)
==================================================
.. currentmodule:: pynance.opt.price
"""
from __future__ import absolute_import
import pandas as pd
from ._common import _getprice
from ._common import _relevant_rows
from . import _constants
class Price(object):
"""
Wrapper class for :class:`pandas.DataFrame` for retrieving
options prices.
Objects of this class are not intended for direct instantiation
but are created as attributes of objects of type :class:`~pynance.opt.core.Options`.
.. versionadded:: 0.3.0
Parameters
----------
df : :class:`pandas.DataFrame`
Options data.
Attributes
----------
data : :class:`pandas.DataFrame`
Options data.
Methods
-------
.. automethod:: exps
.. automethod:: get
.. automethod:: metrics
.. automethod:: strikes
"""
def __init__(self, df):
self.data = df
def get(self, opttype, strike, expiry):
"""
Price as midpoint between bid and ask.
Parameters
----------
opttype : str
'call' or 'put'.
strike : numeric
Strike price.
expiry : date-like
Expiration date. Can be a :class:`datetime.datetime` or
a string that :mod:`pandas` can interpret as such, e.g.
'2015-01-01'.
Returns
-------
out : float
Examples
--------
>>> geopts = pn.opt.get('ge')
>>> geopts.price.get('call', 26., '2015-09-18')
0.94
"""
_optrow = _relevant_rows(self.data, (strike, expiry, opttype,),
"No key for {} strike {} {}".format(expiry, strike, opttype))
return _getprice(_optrow)
def metrics(self, opttype, strike, expiry):
"""
Basic metrics for a specific option.
Parameters
----------
opttype : str ('call' or 'put')
strike : numeric
Strike price.
expiry : date-like
Expiration date. Can be a :class:`datetime.datetime` or
a string that :mod:`pandas` can interpret as such, e.g.
'2015-01-01'.
Returns
-------
out : :class:`pandas.DataFrame`
"""
_optrow = _relevant_rows(self.data, (strike, expiry, opttype,),
"No key for {} strike {} {}".format(expiry, strike, opttype))
_index = ['Opt_Price', 'Time_Val', 'Last', 'Bid', 'Ask', 'Vol', 'Open_Int', 'Underlying_Price', 'Quote_Time']
_out = pd.DataFrame(index=_index, columns=['Value'])
_out.loc['Opt_Price', 'Value'] = _opt_price = _getprice(_optrow)
for _name in _index[2:]:
_out.loc[_name, 'Value'] = _optrow.loc[:, _name].values[0]
_eq_price = _out.loc['Underlying_Price', 'Value']
if opttype == 'put':
_out.loc['Time_Val'] = _get_put_time_val(_opt_price, strike, _eq_price)
else:
_out.loc['Time_Val'] = _get_call_time_val(_opt_price, strike, _eq_price)
return _out
def strikes(self, opttype, expiry):
"""
Retrieve option prices for all strikes of a given type with a given expiration.
Parameters
----------
opttype : str ('call' or 'put')
expiry : date-like
Expiration date. Can be a :class:`datetime.datetime` or
a string that :mod:`pandas` can interpret as such, e.g.
'2015-01-01'.
Returns
----------
df : :class:`pandas.DataFrame`
eq : float
Price of underlying.
qt : datetime.datetime
Time of quote.
See Also
--------
:meth:`exps`
"""
_relevant = _relevant_rows(self.data, (slice(None), expiry, opttype,),
"No key for {} {}".format(expiry, opttype))
_index = _relevant.index.get_level_values('Strike')
_columns = ['Price', 'Time_Val', 'Last', 'Bid', 'Ask', 'Vol', 'Open_Int']
_df = pd.DataFrame(index=_index, columns=_columns)
_underlying = _relevant.loc[:, 'Underlying_Price'].values[0]
_quotetime = pd.to_datetime(_relevant.loc[:, 'Quote_Time'].values[0], utc=True).to_datetime()
for _col in _columns[2:]:
_df.loc[:, _col] = _relevant.loc[:, _col].values
_df.loc[:, 'Price'] = (_df.loc[:, 'Bid'] + _df.loc[:, 'Ask']) / 2.
_set_tv_strike_ix(_df, opttype, 'Price', 'Time_Val', _underlying)
return _df, _underlying, _quotetime
def exps(self, opttype, strike):
"""
Prices for given strike on all available dates.
Parameters
----------
opttype : str ('call' or 'put')
strike : numeric
Returns
----------
df : :class:`pandas.DataFrame`
eq : float
Price of underlying.
qt : :class:`datetime.datetime`
Time of quote.
See Also
--------
:meth:`strikes`
"""
_relevant = _relevant_rows(self.data, (strike, slice(None), opttype,),
"No key for {} {}".format(strike, opttype))
_index = _relevant.index.get_level_values('Expiry')
_columns = ['Price', 'Time_Val', 'Last', 'Bid', 'Ask', 'Vol', 'Open_Int']
_df = pd.DataFrame(index=_index, columns=_columns)
_eq = _relevant.loc[:, 'Underlying_Price'].values[0]
_qt = pd.to_datetime(_relevant.loc[:, 'Quote_Time'].values[0], utc=True).to_datetime()
for _col in _columns[2:]:
_df.loc[:, _col] = _relevant.loc[:, _col].values
_df.loc[:, 'Price'] = (_df.loc[:, 'Bid'] + _df.loc[:, 'Ask']) / 2.
_set_tv_other_ix(_df, opttype, 'Price', 'Time_Val', _eq, strike)
return _df, _eq, _qt
def _set_tv_other_ix(df, opttype, pricecol, tvcol, eqprice, strike):
if opttype == 'put':
if strike <= eqprice:
df.loc[:, tvcol] = df.loc[:, pricecol]
else:
_diff = eqprice - strike
df.loc[:, tvcol] = df.loc[:, pricecol] + _diff
else:
if eqprice <= strike:
df.loc[:, tvcol] = df.loc[:, pricecol]
else:
_diff = strike - eqprice
df.loc[:, tvcol] = df.loc[:, pricecol] + _diff
def _set_tv_strike_ix(df, opttype, pricecol, tvcol, eqprice):
df.loc[:, tvcol] = df.loc[:, pricecol]
if opttype == 'put':
_mask = (df.index > eqprice)
df.loc[_mask, tvcol] += eqprice - df.index[_mask]
else:
_mask = (df.index < eqprice)
df.loc[_mask, tvcol] += df.index[_mask] - eqprice
return
def _get_put_time_val(putprice, strike, eqprice):
if strike <= eqprice:
return putprice
return round(putprice + eqprice - strike, _constants.NDIGITS_SIG)
def _get_call_time_val(callprice, strike, eqprice):
if eqprice <= strike:
return callprice
return round(callprice + strike - eqprice, _constants.NDIGITS_SIG)
|
|
# -*- coding: utf-8 -*-
# From implementation generated from reading ui file 'ui_som_om.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_som_om:
def setupUi(self, som_om):
som_om.setObjectName("som_om")
som_om.resize(438, 777)
self.layoutWidget = QtWidgets.QWidget(som_om)
self.layoutWidget.setGeometry(QtCore.QRect(10, 10, 421, 34))
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.label_5 = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_5.setFont(font)
self.label_5.setObjectName("label_5")
self.verticalLayout_2.addWidget(self.label_5)
self.databaseName = QtWidgets.QLabel(self.layoutWidget)
self.databaseName.setObjectName("databaseName")
self.verticalLayout_2.addWidget(self.databaseName)
self.layoutWidget1 = QtWidgets.QWidget(som_om)
self.layoutWidget1.setGeometry(QtCore.QRect(332, 47, 101, 461))
self.layoutWidget1.setObjectName("layoutWidget1")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.layoutWidget1)
self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.label_6 = QtWidgets.QLabel(self.layoutWidget1)
self.label_6.setAlignment(QtCore.Qt.AlignCenter)
self.label_6.setObjectName("label_6")
self.verticalLayout_3.addWidget(self.label_6)
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.norm = QtWidgets.QComboBox(self.layoutWidget1)
self.norm.setObjectName("norm")
self.verticalLayout.addWidget(self.norm)
self.norm_1 = QtWidgets.QComboBox(self.layoutWidget1)
self.norm_1.setObjectName("norm_1")
self.verticalLayout.addWidget(self.norm_1)
self.norm_2 = QtWidgets.QComboBox(self.layoutWidget1)
self.norm_2.setObjectName("norm_2")
self.verticalLayout.addWidget(self.norm_2)
self.norm_3 = QtWidgets.QComboBox(self.layoutWidget1)
self.norm_3.setObjectName("norm_3")
self.verticalLayout.addWidget(self.norm_3)
self.norm_4 = QtWidgets.QComboBox(self.layoutWidget1)
self.norm_4.setObjectName("norm_4")
self.verticalLayout.addWidget(self.norm_4)
self.norm_5 = QtWidgets.QComboBox(self.layoutWidget1)
self.norm_5.setObjectName("norm_5")
self.verticalLayout.addWidget(self.norm_5)
self.norm_6 = QtWidgets.QComboBox(self.layoutWidget1)
self.norm_6.setObjectName("norm_6")
self.verticalLayout.addWidget(self.norm_6)
self.norm_7 = QtWidgets.QComboBox(self.layoutWidget1)
self.norm_7.setObjectName("norm_7")
self.verticalLayout.addWidget(self.norm_7)
self.norm_8 = QtWidgets.QComboBox(self.layoutWidget1)
self.norm_8.setObjectName("norm_8")
self.verticalLayout.addWidget(self.norm_8)
self.norm_9 = QtWidgets.QComboBox(self.layoutWidget1)
self.norm_9.setObjectName("norm_9")
self.verticalLayout.addWidget(self.norm_9)
self.norm_10 = QtWidgets.QComboBox(self.layoutWidget1)
self.norm_10.setObjectName("norm_10")
self.verticalLayout.addWidget(self.norm_10)
self.norm_11 = QtWidgets.QComboBox(self.layoutWidget1)
self.norm_11.setObjectName("norm_11")
self.verticalLayout.addWidget(self.norm_11)
self.norm_12 = QtWidgets.QComboBox(self.layoutWidget1)
self.norm_12.setObjectName("norm_12")
self.verticalLayout.addWidget(self.norm_12)
self.norm_13 = QtWidgets.QComboBox(self.layoutWidget1)
self.norm_13.setObjectName("norm_13")
self.verticalLayout.addWidget(self.norm_13)
self.norm_14 = QtWidgets.QComboBox(self.layoutWidget1)
self.norm_14.setObjectName("norm_14")
self.verticalLayout.addWidget(self.norm_14)
self.norm_15 = QtWidgets.QComboBox(self.layoutWidget1)
self.norm_15.setObjectName("norm_15")
self.verticalLayout.addWidget(self.norm_15)
self.norm_16 = QtWidgets.QComboBox(self.layoutWidget1)
self.norm_16.setObjectName("norm_16")
self.verticalLayout.addWidget(self.norm_16)
self.verticalLayout_3.addLayout(self.verticalLayout)
self.layoutWidget2 = QtWidgets.QWidget(som_om)
self.layoutWidget2.setGeometry(QtCore.QRect(10, 620, 421, 148))
self.layoutWidget2.setObjectName("layoutWidget2")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.layoutWidget2)
self.verticalLayout_5.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.filtLab = QtWidgets.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.filtLab.setFont(font)
self.filtLab.setObjectName("filtLab")
self.gridLayout.addWidget(self.filtLab, 0, 0, 1, 1)
self.label = QtWidgets.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 1, 1, 1)
self.filterChan = QtWidgets.QComboBox(self.layoutWidget2)
self.filterChan.setEditable(False)
self.filterChan.setObjectName("filterChan")
self.gridLayout.addWidget(self.filterChan, 1, 0, 1, 1)
self.label_3 = QtWidgets.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 2, 0, 1, 1)
self.label_4 = QtWidgets.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.gridLayout.addWidget(self.label_4, 2, 1, 1, 1)
self.outClass = QtWidgets.QLineEdit(self.layoutWidget2)
self.outClass.setObjectName("outClass")
self.gridLayout.addWidget(self.outClass, 3, 0, 1, 1)
self.outError = QtWidgets.QLineEdit(self.layoutWidget2)
self.outError.setObjectName("outError")
self.gridLayout.addWidget(self.outError, 3, 1, 1, 1)
self.filterVal = QtWidgets.QLineEdit(self.layoutWidget2)
self.filterVal.setObjectName("filterVal")
self.gridLayout.addWidget(self.filterVal, 1, 1, 1, 1)
self.verticalLayout_5.addLayout(self.gridLayout)
self.gridLayout_2 = QtWidgets.QGridLayout()
self.gridLayout_2.setObjectName("gridLayout_2")
self.classButton = QtWidgets.QPushButton(self.layoutWidget2)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.classButton.setFont(font)
self.classButton.setObjectName("classButton")
self.gridLayout_2.addWidget(self.classButton, 0, 0, 1, 1)
self.progLabel = QtWidgets.QLabel(self.layoutWidget2)
self.progLabel.setObjectName("progLabel")
self.gridLayout_2.addWidget(self.progLabel, 0, 1, 1, 1)
self.stopButton = QtWidgets.QPushButton(self.layoutWidget2)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.stopButton.setFont(font)
self.stopButton.setObjectName("stopButton")
self.gridLayout_2.addWidget(self.stopButton, 1, 0, 1, 1)
self.progressBar = QtWidgets.QProgressBar(self.layoutWidget2)
self.progressBar.setProperty("value", 0)
self.progressBar.setTextVisible(False)
self.progressBar.setObjectName("progressBar")
self.gridLayout_2.addWidget(self.progressBar, 1, 1, 1, 1)
self.verticalLayout_5.addLayout(self.gridLayout_2)
self.line = QtWidgets.QFrame(som_om)
self.line.setGeometry(QtCore.QRect(12, 511, 421, 16))
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.layoutWidget3 = QtWidgets.QWidget(som_om)
self.layoutWidget3.setGeometry(QtCore.QRect(14, 57, 311, 450))
self.layoutWidget3.setObjectName("layoutWidget3")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.layoutWidget3)
self.verticalLayout_4.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.label_9 = QtWidgets.QLabel(self.layoutWidget3)
self.label_9.setText("")
self.label_9.setObjectName("label_9")
self.verticalLayout_4.addWidget(self.label_9)
self.label_2 = QtWidgets.QLabel(self.layoutWidget3)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_2.setFont(font)
self.label_2.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_2.setObjectName("label_2")
self.verticalLayout_4.addWidget(self.label_2)
self.chan_1 = QtWidgets.QComboBox(self.layoutWidget3)
self.chan_1.setObjectName("chan_1")
self.verticalLayout_4.addWidget(self.chan_1)
self.chan_2 = QtWidgets.QComboBox(self.layoutWidget3)
self.chan_2.setObjectName("chan_2")
self.verticalLayout_4.addWidget(self.chan_2)
self.chan_3 = QtWidgets.QComboBox(self.layoutWidget3)
self.chan_3.setObjectName("chan_3")
self.verticalLayout_4.addWidget(self.chan_3)
self.chan_4 = QtWidgets.QComboBox(self.layoutWidget3)
self.chan_4.setObjectName("chan_4")
self.verticalLayout_4.addWidget(self.chan_4)
self.chan_5 = QtWidgets.QComboBox(self.layoutWidget3)
self.chan_5.setObjectName("chan_5")
self.verticalLayout_4.addWidget(self.chan_5)
self.chan_6 = QtWidgets.QComboBox(self.layoutWidget3)
self.chan_6.setObjectName("chan_6")
self.verticalLayout_4.addWidget(self.chan_6)
self.chan_7 = QtWidgets.QComboBox(self.layoutWidget3)
self.chan_7.setObjectName("chan_7")
self.verticalLayout_4.addWidget(self.chan_7)
self.chan_8 = QtWidgets.QComboBox(self.layoutWidget3)
self.chan_8.setObjectName("chan_8")
self.verticalLayout_4.addWidget(self.chan_8)
self.chan_9 = QtWidgets.QComboBox(self.layoutWidget3)
self.chan_9.setObjectName("chan_9")
self.verticalLayout_4.addWidget(self.chan_9)
self.chan_10 = QtWidgets.QComboBox(self.layoutWidget3)
self.chan_10.setObjectName("chan_10")
self.verticalLayout_4.addWidget(self.chan_10)
self.chan_11 = QtWidgets.QComboBox(self.layoutWidget3)
self.chan_11.setObjectName("chan_11")
self.verticalLayout_4.addWidget(self.chan_11)
self.chan_12 = QtWidgets.QComboBox(self.layoutWidget3)
self.chan_12.setObjectName("chan_12")
self.verticalLayout_4.addWidget(self.chan_12)
self.chan_13 = QtWidgets.QComboBox(self.layoutWidget3)
self.chan_13.setObjectName("chan_13")
self.verticalLayout_4.addWidget(self.chan_13)
self.chan_14 = QtWidgets.QComboBox(self.layoutWidget3)
self.chan_14.setObjectName("chan_14")
self.verticalLayout_4.addWidget(self.chan_14)
self.chan_15 = QtWidgets.QComboBox(self.layoutWidget3)
self.chan_15.setObjectName("chan_15")
self.verticalLayout_4.addWidget(self.chan_15)
self.chan_16 = QtWidgets.QComboBox(self.layoutWidget3)
self.chan_16.setObjectName("chan_16")
self.verticalLayout_4.addWidget(self.chan_16)
self.layoutWidget4 = QtWidgets.QWidget(som_om)
self.layoutWidget4.setGeometry(QtCore.QRect(88, 530, 238, 76))
self.layoutWidget4.setObjectName("layoutWidget4")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.layoutWidget4)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.verticalLayout_7 = QtWidgets.QVBoxLayout()
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.similarity = QtWidgets.QLabel(self.layoutWidget4)
self.similarity.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.similarity.setObjectName("similarity")
self.verticalLayout_7.addWidget(self.similarity)
self.label_7 = QtWidgets.QLabel(self.layoutWidget4)
self.label_7.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_7.setObjectName("label_7")
self.verticalLayout_7.addWidget(self.label_7)
self.label_8 = QtWidgets.QLabel(self.layoutWidget4)
self.label_8.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_8.setObjectName("label_8")
self.verticalLayout_7.addWidget(self.label_8)
self.horizontalLayout.addLayout(self.verticalLayout_7)
self.verticalLayout_6 = QtWidgets.QVBoxLayout()
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.similarity_func = QtWidgets.QComboBox(self.layoutWidget4)
self.similarity_func.setCurrentText("")
self.similarity_func.setObjectName("similarity_func")
self.verticalLayout_6.addWidget(self.similarity_func)
self.nClasses = QtWidgets.QComboBox(self.layoutWidget4)
self.nClasses.setObjectName("nClasses")
self.verticalLayout_6.addWidget(self.nClasses)
self.anomPercent = QtWidgets.QLineEdit(self.layoutWidget4)
self.anomPercent.setObjectName("anomPercent")
self.verticalLayout_6.addWidget(self.anomPercent)
self.horizontalLayout.addLayout(self.verticalLayout_6)
self.layoutWidget.raise_()
self.layoutWidget.raise_()
self.layoutWidget.raise_()
self.layoutWidget.raise_()
self.layoutWidget.raise_()
self.line.raise_()
self.retranslateUi(som_om)
self.similarity_func.setCurrentIndex(-1)
QtCore.QMetaObject.connectSlotsByName(som_om)
def retranslateUi(self, som_om):
_translate = QtCore.QCoreApplication.translate
som_om.setWindowTitle(_translate("som_om", "Self Organizing Classification"))
self.label_5.setText(_translate("som_om", "Database:"))
self.databaseName.setText(_translate("som_om", "database name..."))
self.label_6.setText(_translate("som_om", "normalize"))
self.filtLab.setText(_translate("som_om", "Filter on:"))
self.label.setText(_translate("som_om", "Filter value:"))
self.label_3.setText(_translate("som_om", "Save classification to:"))
self.label_4.setText(_translate("som_om", "Save fit to:"))
self.classButton.setText(_translate("som_om", "Classify"))
self.progLabel.setText(_translate("som_om", "..."))
self.stopButton.setText(_translate("som_om", "Stop"))
self.label_2.setText(_translate("som_om", "Analyse data:"))
self.similarity.setText(_translate("som_om", "Similarity function"))
self.label_7.setText(_translate("som_om", "Base classes"))
self.label_8.setText(_translate("som_om", "Anomalous %"))
self.similarity_func.setToolTip(_translate("som_om", "Method to determine similarity"))
self.nClasses.setToolTip(_translate("som_om", "Number of base classes"))
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import os
import tempfile
from dlrn.api import app
from dlrn import db
from dlrn.tests import base
from dlrn import utils
from flask import json
def mocked_session(url):
db_fd, filepath = tempfile.mkstemp()
session = db.getSession("sqlite:///%s" % filepath)
utils.loadYAML(session, './dlrn/tests/samples/commits_2.yaml')
return session
def mocked_getpackages(**kwargs):
return [{'upstream': 'https://github.com/openstack/python-pysaml2',
'name': 'python-pysaml2', 'maintainers': 'test@test.com'},
{'upstream': 'https://github.com/openstack/python-alembic',
'name': 'python-alembic', 'maintainers': 'test@test.com'},
{'upstream': 'https://github.com/openstack/puppet-stdlib',
'name': 'puppet-stdlib', 'maintainers': 'test@test.com'},
{'upstream': 'https://github.com/openstack/puppet-apache',
'name': 'puppet-apache', 'maintainers': 'test@test.com'}]
class DLRNAPIGraphQLTestCase(base.TestCase):
def setUp(self):
super(DLRNAPIGraphQLTestCase, self).setUp()
self.db_fd, self.filepath = tempfile.mkstemp()
app.config['DB_PATH'] = "sqlite:///%s" % self.filepath
app.config['REPO_PATH'] = '/tmp'
self.app = app.test_client()
self.app.testing = True
def tearDown(self):
os.close(self.db_fd)
os.unlink(self.filepath)
super(DLRNAPIGraphQLTestCase, self).tearDown()
@mock.patch('dlrn.api.graphql.getSession', side_effect=mocked_session)
class TestBasic(DLRNAPIGraphQLTestCase):
def test_failed_query(self, db_mock):
response = self.app.get('/api/graphql?query={ foo { id } }')
self.assertEqual(response.status_code, 400)
@mock.patch('dlrn.api.graphql.getSession', side_effect=mocked_session)
class TestCommitsQuery(DLRNAPIGraphQLTestCase):
def test_basic_query(self, db_mock):
response = self.app.get('/api/graphql?query={ commits { id } }')
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertEqual(len(data['data']['commits']), 5)
def test_filtered_query(self, db_mock):
query = """
query {
commits(projectName: "python-alembic")
{
id
}
}
"""
response = self.app.get('/api/graphql?query=%s' % query)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertEqual(len(data['data']['commits']), 2)
def test_filtered_query_commitHash(self, db_mock):
query = """
query {
commits(commitHash: "1c67b1ab8c6fe273d4e175a14f0df5d3cbbd0edc")
{
id
}
}
"""
response = self.app.get('/api/graphql?query=%s' % query)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertEqual(len(data['data']['commits']), 1)
def test_filtered_query_distroHash(self, db_mock):
query = """
query {
commits(distroHash: "008678d7b0e20fbae185f2bb1bd0d9d167586211")
{
id
}
}
"""
response = self.app.get('/api/graphql?query=%s' % query)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertEqual(len(data['data']['commits']), 2)
def test_filtered_query_extendedHash_full(self, db_mock):
query = """
query {
commits(extendedHash: "1234567890_1234567890")
{
id
}
}
"""
response = self.app.get('/api/graphql?query=%s' % query)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertEqual(len(data['data']['commits']), 1)
def test_filtered_query_extendedHash_wildcard(self, db_mock):
query = """
query {
commits(extendedHash: "1234567890_%")
{
id
}
}
"""
response = self.app.get('/api/graphql?query=%s' % query)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertEqual(len(data['data']['commits']), 1)
def test_filtered_query_extendedHash_wildcard_noresult(self, db_mock):
query = """
query {
commits(extendedHash: "abcdef%")
{
id
}
}
"""
response = self.app.get('/api/graphql?query=%s' % query)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertEqual(len(data['data']['commits']), 0)
def test_filtered_query_component(self, db_mock):
query = """
query {
commits(component: "tripleo")
{
id
}
}
"""
response = self.app.get('/api/graphql?query=%s' % query)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertEqual(len(data['data']['commits']), 3)
def test_badfiltered_query(self, db_mock):
query = """
query {
commits(projectFoo: "python-alembic")
{
id
}
}
"""
response = self.app.get('/api/graphql?query=%s' % query)
self.assertEqual(response.status_code, 400)
def test_non_existing_filtered_query(self, db_mock):
query = """
query {
commits(projectName: "python-bar")
{
id
}
}
"""
response = self.app.get('/api/graphql?query=%s' % query)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertEqual(len(data['data']['commits']), 0)
def test_get_multiple_fields(self, db_mock):
query = """
query {
commits(projectName: "puppet-stdlib")
{
status
component
commitHash
}
}
"""
response = self.app.get('/api/graphql?query=%s' % query)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertEqual(len(data['data']['commits']), 2)
self.assertEqual(data['data']['commits'][0]['status'],
'SUCCESS')
self.assertEqual(data['data']['commits'][0]['component'],
'tripleo')
self.assertEqual(data['data']['commits'][0]['commitHash'],
'93eee77657978547f5fad1cb8cd30b570da83e68')
# We are only getting the fields we asked for, and not more
assert 'distroHash' not in data['data']['commits'][0]
def test_get_limit(self, db_mock):
query = """
query {
commits(projectName: "python-alembic", limit: 1)
{
id
}
}
"""
response = self.app.get('/api/graphql?query=%s' % query)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertEqual(len(data['data']['commits']), 1)
def test_get_offset(self, db_mock):
query = """
query {
commits(offset: 2)
{
id
}
}
"""
response = self.app.get('/api/graphql?query=%s' % query)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertEqual(len(data['data']['commits']), 3)
@mock.patch('dlrn.api.graphql.getSession', side_effect=mocked_session)
class TestcivoteQuery(DLRNAPIGraphQLTestCase):
def test_basic_query(self, db_mock):
response = self.app.get('/api/graphql?query={ civote { id } }')
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertEqual(len(data['data']['civote']), 5)
def test_get_offset(self, db_mock):
query = """
query {
civote(offset: 2)
{
id
}
}
"""
response = self.app.get('/api/graphql?query=%s' % query)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertEqual(len(data['data']['civote']), 3)
def test_get_limit(self, db_mock):
query = """
query {
civote(limit: 2)
{
id
}
}
"""
response = self.app.get('/api/graphql?query=%s' % query)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertEqual(len(data['data']['civote']), 2)
def test_filtered_query(self, db_mock):
query = """
query {
civote(commitId: 5627)
{
id
}
}
"""
response = self.app.get('/api/graphql?query=%s' % query)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertEqual(len(data['data']['civote']), 2)
def test_filtered_query_component(self, db_mock):
query = """
query {
civote(ciName: "another-ci")
{
id
}
}
"""
response = self.app.get('/api/graphql?query=%s' % query)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertEqual(len(data['data']['civote']), 1)
def test_filtered_query_civote_true(self, db_mock):
query = """
query {
civote(ciVote: true)
{
id
}
}
"""
response = self.app.get('/api/graphql?query=%s' % query)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertEqual(len(data['data']['civote']), 4)
def test_filtered_query_civote_false(self, db_mock):
query = """
query {
civote(ciVote: false)
{
id
}
}
"""
response = self.app.get('/api/graphql?query=%s' % query)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertEqual(len(data['data']['civote']), 1)
def test_filtered_query_inprogress_true(self, db_mock):
query = """
query {
civote(ciInProgress: true)
{
id
}
}
"""
response = self.app.get('/api/graphql?query=%s' % query)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertEqual(len(data['data']['civote']), 1)
def test_filtered_query_inprogress_false(self, db_mock):
query = """
query {
civote(ciInProgress: false)
{
id
}
}
"""
response = self.app.get('/api/graphql?query=%s' % query)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertEqual(len(data['data']['civote']), 4)
def test_badfiltered_query(self, db_mock):
query = """
query {
civote(commitId: "TextnotInt")
{
id
}
}
"""
response = self.app.get('/api/graphql?query=%s' % query)
self.assertEqual(response.status_code, 400)
def test_get_multiple_fields(self, db_mock):
query = """
query {
civote(commitId: 5627)
{
commitId
ciName
ciVote
ciInProgress
timestamp
user
}
}
"""
response = self.app.get('/api/graphql?query=%s' % query)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertEqual(len(data['data']['civote']), 2)
self.assertEqual(data['data']['civote'][0]['commitId'], 5627)
self.assertEqual(data['data']['civote'][0]['ciName'],
'current-passed-ci')
self.assertEqual(data['data']['civote'][0]['ciVote'], False)
self.assertEqual(data['data']['civote'][0]['ciInProgress'], True)
self.assertEqual(data['data']['civote'][0]['timestamp'], 1441635090)
self.assertEqual(data['data']['civote'][0]['user'], 'foo')
assert 'component' not in data['data']['civote'][0]
@mock.patch('dlrn.api.graphql.getSession', side_effect=mocked_session)
class TestCIVoteAggregationQuery(DLRNAPIGraphQLTestCase):
def test_basic_query(self, db_mock):
response = self.app.get('/api/graphql?query={ civoteAgg { id } }')
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertEqual(len(data['data']['civoteAgg']), 3)
def test_get_offset(self, db_mock):
query = """
query {
civoteAgg(offset: 2)
{
id
}
}
"""
response = self.app.get('/api/graphql?query=%s' % query)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertEqual(len(data['data']['civoteAgg']), 1)
def test_get_limit(self, db_mock):
query = """
query {
civoteAgg(limit: 2)
{
id
}
}
"""
response = self.app.get('/api/graphql?query=%s' % query)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertEqual(len(data['data']['civoteAgg']), 2)
def test_filtered_query(self, db_mock):
query = """
query {
civoteAgg(refHash: "12345678")
{
id
}
}
"""
response = self.app.get('/api/graphql?query=%s' % query)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertEqual(len(data['data']['civoteAgg']), 2)
def test_filtered_civote_true(self, db_mock):
query = """
query {
civoteAgg(ciVote: true)
{
id
}
}
"""
response = self.app.get('/api/graphql?query=%s' % query)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertEqual(len(data['data']['civoteAgg']), 2)
def test_filtered_civote_false(self, db_mock):
query = """
query {
civoteAgg(ciVote: false)
{
id
}
}
"""
response = self.app.get('/api/graphql?query=%s' % query)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertEqual(len(data['data']['civoteAgg']), 1)
def test_filtered_ciinprogress_false(self, db_mock):
query = """
query {
civoteAgg(ciInProgress: false)
{
id
}
}
"""
response = self.app.get('/api/graphql?query=%s' % query)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertEqual(len(data['data']['civoteAgg']), 3)
def test_filtered_ciinprogress_true(self, db_mock):
query = """
query {
civoteAgg(ciInProgress: true)
{
id
}
}
"""
response = self.app.get('/api/graphql?query=%s' % query)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertEqual(len(data['data']['civoteAgg']), 0)
def test_badfiltered_query(self, db_mock):
query = """
query {
civoteAgg(commit_id: "TextnotInt")
{
id
}
}
"""
response = self.app.get('/api/graphql?query=%s' % query)
self.assertEqual(response.status_code, 400)
def test_get_multiple_fields(self, db_mock):
query = """
query {
civoteAgg(refHash: "12345678")
{
id
refHash
ciName
ciUrl
ciVote
ciInProgress
timestamp
notes
}
}
"""
response = self.app.get('/api/graphql?query=%s' % query)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertEqual(len(data['data']['civoteAgg']), 2)
self.assertEqual(data['data']['civoteAgg'][0]['refHash'], '12345678')
self.assertEqual(data['data']['civoteAgg'][0]['ciName'], 'phase2-ci')
self.assertEqual(data['data']['civoteAgg'][0]['ciUrl'],
'http://dummyci.example.com/phase2-ci')
self.assertEqual(data['data']['civoteAgg'][0]['ciVote'], False)
self.assertEqual(data['data']['civoteAgg'][0]['ciInProgress'], False)
self.assertEqual(data['data']['civoteAgg'][0]['timestamp'],
1441635195)
self.assertEqual(data['data']['civoteAgg'][0]['notes'], '')
assert 'user' not in data['data']['civoteAgg'][0]
@mock.patch('dlrn.drivers.rdoinfo.RdoInfoDriver.getpackages',
side_effect=mocked_getpackages)
@mock.patch('dlrn.api.graphql.getSession', side_effect=mocked_session)
class TestPackageStatusQuery(DLRNAPIGraphQLTestCase):
def test_basic_query(self, db_mock, gp_mock):
response = self.app.get('/api/graphql?query={ packageStatus { id } }')
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertEqual(len(data['data']['packageStatus']), 4)
def test_filtered_query(self, db_mock, gp_mock):
query = """
query {
packageStatus(projectName: "python-alembic")
{
id
}
}
"""
response = self.app.get('/api/graphql?query=%s' % query)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertEqual(len(data['data']['packageStatus']), 1)
def test_filtered_query_status(self, db_mock, gp_mock):
query = """
query {
packageStatus(status: "NO_BUILD")
{
id
}
}
"""
response = self.app.get('/api/graphql?query=%s' % query)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertEqual(len(data['data']['packageStatus']), 1)
def test_filtered_query_missing(self, db_mock, gp_mock):
query = """
query {
packageStatus(status: "FAILED")
{
id
}
}
"""
response = self.app.get('/api/graphql?query=%s' % query)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertEqual(len(data['data']['packageStatus']), 0)
def test_badfiltered_query(self, db_mock, gp_mock):
query = """
query {
packageStatus(statuserror: "RETRY")
{
id
}
}
"""
response = self.app.get('/api/graphql?query=%s' % query)
self.assertEqual(response.status_code, 400)
def test_get_multiple_fields(self, db_mock, gp_mock):
query = """
query {
packageStatus(status: "SUCCESS")
{
id
projectName
status
lastSuccess
}
}
"""
response = self.app.get('/api/graphql?query=%s' % query)
data = json.loads(response.data)
self.assertEqual(len(data['data']['packageStatus']), 3)
self.assertEqual(data['data']['packageStatus'][0]['projectName'],
'python-pysaml2')
self.assertEqual(data['data']['packageStatus'][0]['status'],
'SUCCESS')
self.assertEqual(data['data']['packageStatus'][0]['lastSuccess'], None)
assert 'firstFailureCommit' not in data['data']['packageStatus'][0]
|
|
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import os
import os.path
import sys
import jinja2
import six
from six.moves import configparser
from six.moves.urllib import parse as urllib_parse
def escape(buff):
"""Because otherwise Firefox is a sad panda."""
return buff.replace(',', '%2c').replace('-', '%2D')
def generate_dashboard_url(dashboard):
"""Generate a dashboard URL from a given definition."""
try:
title = dashboard.get('dashboard', 'title')
except configparser.NoOptionError:
raise ValueError("option 'title' in section 'dashboard' not set")
try:
foreach = dashboard.get('dashboard', 'foreach')
except configparser.NoOptionError:
raise ValueError("option 'foreach' in section 'dashboard' not set")
try:
baseurl = dashboard.get('dashboard', 'baseurl')
except configparser.NoOptionError:
baseurl = 'https://review.openstack.org/#/dashboard/?'
url = baseurl
url += escape(urllib_parse.urlencode({'title': title,
'foreach': foreach}))
for section in dashboard.sections():
if not section.startswith('section'):
continue
try:
query = dashboard.get(section, 'query')
except configparser.NoOptionError:
raise ValueError("option 'query' in '%s' not set" % section)
title = section[9:-1]
encoded = escape(urllib_parse.urlencode({title: query}))
url += "&%s" % encoded
return url
def get_options():
"""Parse command line arguments and options."""
parser = argparse.ArgumentParser(
description='Create a Gerrit dashboard URL from specified dashboard '
'definition files')
parser.add_argument('dashboard_paths', nargs='+',
metavar='dashboard_path',
help='Path to a dashboard definition file or a '
'directory containing a set of dashboard '
'definition files with the file suffix .dash.')
parser.add_argument('--check-only', default=False, action="store_true",
help='Only check the syntax of the specified '
'dasbhoard files')
parser.add_argument('--template', default='single.txt',
help='Name of template')
# Find path to template_dir
# We need to support running with and without installation
if os.path.exists('templates'):
template_dir = 'templates'
elif os.path.exists('/usr/local/share/gerrit-dash-creator/templates'):
template_dir = '/usr/local/share/gerrit-dash-creator/templates'
else:
template_dir = os.path.join(sys.prefix, 'share',
'gerrit-dash-creator', 'templates')
parser.add_argument('--template-directory',
default=template_dir,
help='Directory to scan for template files')
parser.add_argument('--template-file', default=None,
help='Location of a specific template file')
return parser.parse_args()
def read_dashboard_file(dashboard_file):
"""Read and parse a dashboard definition from a specified file."""
if (not os.path.isfile(dashboard_file) or
not os.access(dashboard_file, os.R_OK)):
raise ValueError("dashboard file '%s' is missing or "
"is not readable" % dashboard_file)
dashboard = configparser.ConfigParser()
dashboard.readfp(open(dashboard_file))
return dashboard
def load_template(template_file=None, template_directory=None,
template_name=None):
"""Load the specified template."""
if template_file:
template_name = os.path.basename(template_file)
template_directory = os.path.dirname(os.path.abspath(template_file))
try:
loader = jinja2.FileSystemLoader(template_directory)
environment = jinja2.Environment(loader=loader)
template = environment.get_template(template_name)
except (jinja2.exceptions.TemplateError, IOError) as e:
print("error: opening template '%s' failed: %s" %
(template_name, e.__class__.__name__))
return
return template
def get_configuration(dashboard):
"""Returns the configuration of a dashboard as string."""
configuration = six.StringIO()
dashboard.write(configuration)
result = configuration.getvalue()
configuration.close()
return result
def generate_dashboard_urls(dashboards, template):
"""Prints the dashboard URLs of a set of dashboards."""
result = 0
for dashboard_file in dashboards:
dashboard = dashboards[dashboard_file]
try:
url = generate_dashboard_url(dashboard)
except ValueError as e:
raise ValueError("generating dashboard '%s' failed: %s" %
(dashboard_file, e))
result = 1
continue
variables = {
'url': url,
'title': dashboard.get('dashboard', 'title') or None,
'description': dashboard.get('dashboard', 'description') or None,
'configuration': get_configuration(dashboard)
}
print(template.render(variables))
return result
def load_dashboards(paths):
"""Load specified dashboards from files or directories."""
dashboards = {}
for dashboard_path in paths:
dashboard_files = []
if os.path.isdir(dashboard_path):
for root, dirs, files in os.walk(dashboard_path):
for file in files:
if file.endswith('.dash'):
dashboard_files.append(os.path.join(root, file))
else:
dashboard_files.append(dashboard_path)
for dashboard_file in dashboard_files:
try:
dashboards[dashboard_file] = read_dashboard_file(
dashboard_file
)
except configparser.Error as e:
raise ValueError("dashboard file '%s' cannot be "
"parsed: %s" % (dashboard_file, e))
return dashboards
def main():
"""Entrypoint."""
opts = get_options()
template = None
if not opts.check_only:
template = load_template(
template_file=opts.template_file,
template_directory=opts.template_directory,
template_name=opts.template
)
try:
dashboards = load_dashboards(opts.dashboard_paths)
if not opts.check_only and template:
generate_dashboard_urls(dashboards, template)
elif not opts.check_only and not template:
return 1
except ValueError as e:
print("error: %s" % e)
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
|
|
#!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import json
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/serviceaccount_secret -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_serviceaccount_secret
short_description: Module to manage openshift service account secrets
description:
- Manage openshift service account secrets programmatically.
options:
state:
description:
- If present, the service account will be linked with the secret if it is not already. If absent, the service account will be unlinked from the secret if it is already linked. If list, information about the service account secrets will be gathered and returned as part of the Ansible call results.
required: false
default: present
choices: ["present", "absent", "list"]
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: false
aliases: []
service_account:
description:
- Name of the service account.
required: true
default: None
aliases: []
namespace:
description:
- Namespace of the service account and secret.
required: true
default: None
aliases: []
secret:
description:
- The secret that should be linked to the service account.
required: false
default: None
aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: get secrets of a service account
oc_serviceaccount_secret:
state: list
service_account: builder
namespace: default
register: sasecretout
- name: Link a service account to a specific secret
oc_serviceaccount_secret:
service_account: builder
secret: mynewsecret
namespace: default
register: sasecretout
'''
# -*- -*- -*- End included fragment: doc/serviceaccount_secret -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# noqa: E301,E302
class YeditException(Exception):
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@separator.setter
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key % ''.join(common_separators), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
return None
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
yfd.write(contents)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
if hasattr(yaml, 'RoundTripDumper'):
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
# pylint: disable=no-member
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
else:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
else:
self.yaml_dict = yaml.safe_load(contents)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. %s' % err)
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# pylint: disable=no-member,maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in ' +
'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
if hasattr(yaml, 'round_trip_dump'):
# pylint: disable=no-member
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
else:
tmp_copy = copy.deepcopy(self.yaml_dict)
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
if hasattr(yaml, 'round_trip_dump'):
# pylint: disable=no-member
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
else:
tmp_copy = copy.deepcopy(self.yaml_dict)
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
% (inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# If vtype is not str then go ahead and attempt to yaml load it.
if isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming ' +
'value. value=[%s] vtype=[%s]'
% (type(inc_value), vtype))
return inc_value
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(module):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=module.params['src'],
backup=module.params['backup'],
separator=module.params['separator'])
if module.params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and \
module.params['state'] != 'present':
return {'failed': True,
'msg': 'Error opening file [%s]. Verify that the ' +
'file exists, that it is has correct' +
' permissions, and is valid yaml.'}
if module.params['state'] == 'list':
if module.params['content']:
content = Yedit.parse_value(module.params['content'],
module.params['content_type'])
yamlfile.yaml_dict = content
if module.params['key']:
rval = yamlfile.get(module.params['key']) or {}
return {'changed': False, 'result': rval, 'state': "list"}
elif module.params['state'] == 'absent':
if module.params['content']:
content = Yedit.parse_value(module.params['content'],
module.params['content_type'])
yamlfile.yaml_dict = content
if module.params['update']:
rval = yamlfile.pop(module.params['key'],
module.params['value'])
else:
rval = yamlfile.delete(module.params['key'])
if rval[0] and module.params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
elif module.params['state'] == 'present':
# check if content is different than what is in the file
if module.params['content']:
content = Yedit.parse_value(module.params['content'],
module.params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
module.params['value'] is None:
return {'changed': False,
'result': yamlfile.yaml_dict,
'state': "present"}
yamlfile.yaml_dict = content
# we were passed a value; parse it
if module.params['value']:
value = Yedit.parse_value(module.params['value'],
module.params['value_type'])
key = module.params['key']
if module.params['update']:
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
module.params['curr_value_format']) # noqa: E501
rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
elif module.params['append']:
rval = yamlfile.append(key, value)
else:
rval = yamlfile.put(key, value)
if rval[0] and module.params['src']:
yamlfile.write()
return {'changed': rval[0],
'result': rval[1], 'state': "present"}
# no edits to make
if module.params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': "present"}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, rname, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource, rname]
if selector:
cmd.append('--selector=%s' % selector)
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["%s=%s" % (key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, rname=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector:
cmd.append('--selector=%s' % selector)
elif rname:
cmd.append(rname)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
cmd.append('--schedulable=%s' % schedulable)
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
if grace_period:
cmd.append('--grace-period=%s' % int(grace_period))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode(), stderr.decode()
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = []
if oadm:
cmds = ['oadm']
else:
cmds = ['oc']
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
cmds.extend(cmd)
rval = {}
results = ''
err = None
if self.verbose:
print(' '.join(cmds))
returncode, stdout, stderr = self._run(cmds, input_data)
rval = {"returncode": returncode,
"results": results,
"cmd": ' '.join(cmds)}
if returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.args:
err = err.args
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {}})
return rval
class Utils(object):
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(contents)
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
# "v3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = version[1:4]
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import yum
yum_base = yum.YumBase()
if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
return True
return False
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(value)
print(user_def[key])
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(api_values)
print(user_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self):
'''return all options as a string'''
return self.stringify()
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
for key, data in self.config_options.items():
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/serviceaccount.py -*- -*- -*-
class ServiceAccountConfig(object):
'''Service account config class
This class stores the options and returns a default service account
'''
# pylint: disable=too-many-arguments
def __init__(self, sname, namespace, kubeconfig, secrets=None, image_pull_secrets=None):
self.name = sname
self.kubeconfig = kubeconfig
self.namespace = namespace
self.secrets = secrets or []
self.image_pull_secrets = image_pull_secrets or []
self.data = {}
self.create_dict()
def create_dict(self):
''' instantiate a properly structured volume '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'ServiceAccount'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
self.data['secrets'] = []
if self.secrets:
for sec in self.secrets:
self.data['secrets'].append({"name": sec})
self.data['imagePullSecrets'] = []
if self.image_pull_secrets:
for sec in self.image_pull_secrets:
self.data['imagePullSecrets'].append({"name": sec})
class ServiceAccount(Yedit):
''' Class to wrap the oc command line tools '''
image_pull_secrets_path = "imagePullSecrets"
secrets_path = "secrets"
def __init__(self, content):
'''ServiceAccount constructor'''
super(ServiceAccount, self).__init__(content=content)
self._secrets = None
self._image_pull_secrets = None
@property
def image_pull_secrets(self):
''' property for image_pull_secrets '''
if self._image_pull_secrets is None:
self._image_pull_secrets = self.get(ServiceAccount.image_pull_secrets_path) or []
return self._image_pull_secrets
@image_pull_secrets.setter
def image_pull_secrets(self, secrets):
''' property for secrets '''
self._image_pull_secrets = secrets
@property
def secrets(self):
''' property for secrets '''
if not self._secrets:
self._secrets = self.get(ServiceAccount.secrets_path) or []
return self._secrets
@secrets.setter
def secrets(self, secrets):
''' property for secrets '''
self._secrets = secrets
def delete_secret(self, inc_secret):
''' remove a secret '''
remove_idx = None
for idx, sec in enumerate(self.secrets):
if sec['name'] == inc_secret:
remove_idx = idx
break
if remove_idx:
del self.secrets[remove_idx]
return True
return False
def delete_image_pull_secret(self, inc_secret):
''' remove a image_pull_secret '''
remove_idx = None
for idx, sec in enumerate(self.image_pull_secrets):
if sec['name'] == inc_secret:
remove_idx = idx
break
if remove_idx:
del self.image_pull_secrets[remove_idx]
return True
return False
def find_secret(self, inc_secret):
'''find secret'''
for secret in self.secrets:
if secret['name'] == inc_secret:
return secret
return None
def find_image_pull_secret(self, inc_secret):
'''find secret'''
for secret in self.image_pull_secrets:
if secret['name'] == inc_secret:
return secret
return None
def add_secret(self, inc_secret):
'''add secret'''
if self.secrets:
self.secrets.append({"name": inc_secret}) # pylint: disable=no-member
else:
self.put(ServiceAccount.secrets_path, [{"name": inc_secret}])
def add_image_pull_secret(self, inc_secret):
'''add image_pull_secret'''
if self.image_pull_secrets:
self.image_pull_secrets.append({"name": inc_secret}) # pylint: disable=no-member
else:
self.put(ServiceAccount.image_pull_secrets_path, [{"name": inc_secret}])
# -*- -*- -*- End included fragment: lib/serviceaccount.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_serviceaccount_secret.py -*- -*- -*-
class OCServiceAccountSecret(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
kind = 'sa'
def __init__(self, config, verbose=False):
''' Constructor for OpenshiftOC '''
super(OCServiceAccountSecret, self).__init__(config.namespace, kubeconfig=config.kubeconfig, verbose=verbose)
self.config = config
self.verbose = verbose
self._service_account = None
@property
def service_account(self):
''' Property for the service account '''
if not self._service_account:
self.get()
return self._service_account
@service_account.setter
def service_account(self, data):
''' setter for the service account '''
self._service_account = data
def exists(self, in_secret):
''' verifies if secret exists in the service account '''
result = self.service_account.find_secret(in_secret)
if not result:
return False
return True
def get(self):
''' get the service account definition from the master '''
sao = self._get(OCServiceAccountSecret.kind, self.config.name)
if sao['returncode'] == 0:
self.service_account = ServiceAccount(content=sao['results'][0])
sao['results'] = self.service_account.get('secrets')
return sao
def delete(self):
''' delete secrets '''
modified = []
for rem_secret in self.config.secrets:
modified.append(self.service_account.delete_secret(rem_secret))
if any(modified):
return self._replace_content(OCServiceAccountSecret.kind, self.config.name, self.service_account.yaml_dict)
return {'returncode': 0, 'changed': False}
def put(self):
''' place secrets into sa '''
modified = False
for add_secret in self.config.secrets:
if not self.service_account.find_secret(add_secret):
self.service_account.add_secret(add_secret)
modified = True
if modified:
return self._replace_content(OCServiceAccountSecret.kind, self.config.name, self.service_account.yaml_dict)
return {'returncode': 0, 'changed': False}
@staticmethod
# pylint: disable=too-many-return-statements,too-many-branches
# TODO: This function should be refactored into its individual parts.
def run_ansible(params, check_mode):
''' run the ansible idempotent code '''
sconfig = ServiceAccountConfig(params['service_account'],
params['namespace'],
params['kubeconfig'],
[params['secret']],
None)
oc_sa_sec = OCServiceAccountSecret(sconfig, verbose=params['debug'])
state = params['state']
api_rval = oc_sa_sec.get()
#####
# Get
#####
if state == 'list':
return {'changed': False, 'results': api_rval['results'], 'state': "list"}
########
# Delete
########
if state == 'absent':
if oc_sa_sec.exists(params['secret']):
if check_mode:
return {'changed': True, 'msg': 'Would have removed the " + \
"secret from the service account.'}
api_rval = oc_sa_sec.delete()
return {'changed': True, 'results': api_rval, 'state': "absent"}
return {'changed': False, 'state': "absent"}
if state == 'present':
########
# Create
########
if not oc_sa_sec.exists(params['secret']):
if check_mode:
return {'changed': True, 'msg': 'Would have added the ' + \
'secret to the service account.'}
# Create it here
api_rval = oc_sa_sec.put()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_sa_sec.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': "present"}
return {'changed': False, 'results': api_rval, 'state': "present"}
return {'failed': True,
'changed': False,
'msg': 'Unknown state passed. %s' % state,
'state': 'unknown'}
# -*- -*- -*- End included fragment: class/oc_serviceaccount_secret.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_serviceaccount_secret.py -*- -*- -*-
def main():
'''
ansible oc module to manage service account secrets.
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
namespace=dict(default=None, required=True, type='str'),
secret=dict(default=None, type='str'),
service_account=dict(required=True, type='str'),
),
supports_check_mode=True,
)
rval = OCServiceAccountSecret.run_ansible(module.params, module.check_mode)
if 'failed' in rval:
module.fail_json(**rval)
module.exit_json(**rval)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_serviceaccount_secret.py -*- -*- -*-
|
|
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for the defacl command."""
from __future__ import absolute_import
import re
from gslib.cs_api_map import ApiSelector
import gslib.tests.testcase as case
from gslib.tests.testcase.integration_testcase import SkipForS3
from gslib.tests.util import ObjectToURI as suri
PUBLIC_READ_JSON_ACL_TEXT = '"entity":"allUsers","role":"READER"'
@SkipForS3('S3 does not support default object ACLs.')
class TestDefacl(case.GsUtilIntegrationTestCase):
"""Integration tests for the defacl command."""
_defacl_ch_prefix = ['defacl', 'ch']
_defacl_get_prefix = ['defacl', 'get']
_defacl_set_prefix = ['defacl', 'set']
def _MakeScopeRegex(self, role, entity_type, email_address):
template_regex = (r'\{.*"entity":\s*"%s-%s".*"role":\s*"%s".*\}' %
(entity_type, email_address, role))
return re.compile(template_regex, flags=re.DOTALL)
def testChangeDefaultAcl(self):
"""Tests defacl ch."""
bucket = self.CreateBucket()
test_regex = self._MakeScopeRegex(
'OWNER', 'group', self.GROUP_TEST_ADDRESS)
test_regex2 = self._MakeScopeRegex(
'READER', 'group', self.GROUP_TEST_ADDRESS)
json_text = self.RunGsUtil(self._defacl_get_prefix +
[suri(bucket)], return_stdout=True)
self.assertNotRegexpMatches(json_text, test_regex)
self.RunGsUtil(self._defacl_ch_prefix +
['-g', self.GROUP_TEST_ADDRESS+':FC', suri(bucket)])
json_text2 = self.RunGsUtil(self._defacl_get_prefix +
[suri(bucket)], return_stdout=True)
self.assertRegexpMatches(json_text2, test_regex)
self.RunGsUtil(self._defacl_ch_prefix +
['-g', self.GROUP_TEST_ADDRESS+':READ', suri(bucket)])
json_text3 = self.RunGsUtil(self._defacl_get_prefix +
[suri(bucket)], return_stdout=True)
self.assertRegexpMatches(json_text3, test_regex2)
stderr = self.RunGsUtil(self._defacl_ch_prefix +
['-g', self.GROUP_TEST_ADDRESS+':WRITE',
suri(bucket)],
return_stderr=True, expected_status=1)
self.assertIn('WRITER cannot be set as a default object ACL', stderr)
def testChangeDefaultAclEmpty(self):
"""Tests adding and removing an entry from an empty default object ACL."""
bucket = self.CreateBucket()
# First, clear out the default object ACL on the bucket.
self.RunGsUtil(self._defacl_set_prefix + ['private', suri(bucket)])
json_text = self.RunGsUtil(self._defacl_get_prefix +
[suri(bucket)], return_stdout=True)
empty_regex = r'\[\]\s*'
self.assertRegexpMatches(json_text, empty_regex)
group_regex = self._MakeScopeRegex(
'READER', 'group', self.GROUP_TEST_ADDRESS)
self.RunGsUtil(self._defacl_ch_prefix +
['-g', self.GROUP_TEST_ADDRESS+':READ', suri(bucket)])
json_text2 = self.RunGsUtil(self._defacl_get_prefix +
[suri(bucket)], return_stdout=True)
self.assertRegexpMatches(json_text2, group_regex)
if self.test_api == ApiSelector.JSON:
# TODO: Enable when JSON service respects creating a private (no entries)
# default object ACL via PATCH. For now, only supported in XML.
return
# After adding and removing a group, the default object ACL should be empty.
self.RunGsUtil(self._defacl_ch_prefix +
['-d', self.GROUP_TEST_ADDRESS, suri(bucket)])
json_text3 = self.RunGsUtil(self._defacl_get_prefix +
[suri(bucket)], return_stdout=True)
self.assertRegexpMatches(json_text3, empty_regex)
def testChangeMultipleBuckets(self):
"""Tests defacl ch on multiple buckets."""
bucket1 = self.CreateBucket()
bucket2 = self.CreateBucket()
test_regex = self._MakeScopeRegex(
'READER', 'group', self.GROUP_TEST_ADDRESS)
json_text = self.RunGsUtil(self._defacl_get_prefix + [suri(bucket1)],
return_stdout=True)
self.assertNotRegexpMatches(json_text, test_regex)
json_text = self.RunGsUtil(self._defacl_get_prefix + [suri(bucket2)],
return_stdout=True)
self.assertNotRegexpMatches(json_text, test_regex)
self.RunGsUtil(self._defacl_ch_prefix +
['-g', self.GROUP_TEST_ADDRESS+':READ',
suri(bucket1), suri(bucket2)])
json_text = self.RunGsUtil(self._defacl_get_prefix + [suri(bucket1)],
return_stdout=True)
self.assertRegexpMatches(json_text, test_regex)
json_text = self.RunGsUtil(self._defacl_get_prefix + [suri(bucket2)],
return_stdout=True)
self.assertRegexpMatches(json_text, test_regex)
def testChangeMultipleAcls(self):
"""Tests defacl ch with multiple ACL entries."""
bucket = self.CreateBucket()
test_regex_group = self._MakeScopeRegex(
'READER', 'group', self.GROUP_TEST_ADDRESS)
test_regex_user = self._MakeScopeRegex(
'OWNER', 'user', self.USER_TEST_ADDRESS)
json_text = self.RunGsUtil(self._defacl_get_prefix + [suri(bucket)],
return_stdout=True)
self.assertNotRegexpMatches(json_text, test_regex_group)
self.assertNotRegexpMatches(json_text, test_regex_user)
self.RunGsUtil(self._defacl_ch_prefix +
['-g', self.GROUP_TEST_ADDRESS+':READ',
'-u', self.USER_TEST_ADDRESS+':fc', suri(bucket)])
json_text = self.RunGsUtil(self._defacl_get_prefix + [suri(bucket)],
return_stdout=True)
self.assertRegexpMatches(json_text, test_regex_group)
self.assertRegexpMatches(json_text, test_regex_user)
def testEmptyDefAcl(self):
bucket = self.CreateBucket()
self.RunGsUtil(self._defacl_set_prefix + ['private', suri(bucket)])
stdout = self.RunGsUtil(self._defacl_get_prefix + [suri(bucket)],
return_stdout=True)
self.assertEquals(stdout.rstrip(), '[]')
self.RunGsUtil(self._defacl_ch_prefix +
['-u', self.USER_TEST_ADDRESS+':fc', suri(bucket)])
def testDeletePermissionsWithCh(self):
"""Tests removing permissions with defacl ch."""
bucket = self.CreateBucket()
test_regex = self._MakeScopeRegex(
'OWNER', 'user', self.USER_TEST_ADDRESS)
json_text = self.RunGsUtil(
self._defacl_get_prefix + [suri(bucket)], return_stdout=True)
self.assertNotRegexpMatches(json_text, test_regex)
self.RunGsUtil(self._defacl_ch_prefix +
['-u', self.USER_TEST_ADDRESS+':fc', suri(bucket)])
json_text = self.RunGsUtil(
self._defacl_get_prefix + [suri(bucket)], return_stdout=True)
self.assertRegexpMatches(json_text, test_regex)
self.RunGsUtil(self._defacl_ch_prefix +
['-d', self.USER_TEST_ADDRESS, suri(bucket)])
json_text = self.RunGsUtil(
self._defacl_get_prefix + [suri(bucket)], return_stdout=True)
self.assertNotRegexpMatches(json_text, test_regex)
def testTooFewArgumentsFails(self):
"""Tests calling defacl with insufficient number of arguments."""
# No arguments for get, but valid subcommand.
stderr = self.RunGsUtil(self._defacl_get_prefix, return_stderr=True,
expected_status=1)
self.assertIn('command requires at least', stderr)
# No arguments for set, but valid subcommand.
stderr = self.RunGsUtil(self._defacl_set_prefix, return_stderr=True,
expected_status=1)
self.assertIn('command requires at least', stderr)
# No arguments for ch, but valid subcommand.
stderr = self.RunGsUtil(self._defacl_ch_prefix, return_stderr=True,
expected_status=1)
self.assertIn('command requires at least', stderr)
# Neither arguments nor subcommand.
stderr = self.RunGsUtil(['defacl'], return_stderr=True, expected_status=1)
self.assertIn('command requires at least', stderr)
class TestDefaclOldAlias(TestDefacl):
_defacl_ch_prefix = ['chdefacl']
_defacl_get_prefix = ['getdefacl']
_defacl_set_prefix = ['setdefacl']
|
|
data = (
'Ku ', # 0x00
'Ke ', # 0x01
'Tang ', # 0x02
'Kun ', # 0x03
'Ni ', # 0x04
'Jian ', # 0x05
'Dui ', # 0x06
'Jin ', # 0x07
'Gang ', # 0x08
'Yu ', # 0x09
'E ', # 0x0a
'Peng ', # 0x0b
'Gu ', # 0x0c
'Tu ', # 0x0d
'Leng ', # 0x0e
'[?] ', # 0x0f
'Ya ', # 0x10
'Qian ', # 0x11
'[?] ', # 0x12
'An ', # 0x13
'[?] ', # 0x14
'Duo ', # 0x15
'Nao ', # 0x16
'Tu ', # 0x17
'Cheng ', # 0x18
'Yin ', # 0x19
'Hun ', # 0x1a
'Bi ', # 0x1b
'Lian ', # 0x1c
'Guo ', # 0x1d
'Die ', # 0x1e
'Zhuan ', # 0x1f
'Hou ', # 0x20
'Bao ', # 0x21
'Bao ', # 0x22
'Yu ', # 0x23
'Di ', # 0x24
'Mao ', # 0x25
'Jie ', # 0x26
'Ruan ', # 0x27
'E ', # 0x28
'Geng ', # 0x29
'Kan ', # 0x2a
'Zong ', # 0x2b
'Yu ', # 0x2c
'Huang ', # 0x2d
'E ', # 0x2e
'Yao ', # 0x2f
'Yan ', # 0x30
'Bao ', # 0x31
'Ji ', # 0x32
'Mei ', # 0x33
'Chang ', # 0x34
'Du ', # 0x35
'Tuo ', # 0x36
'Yin ', # 0x37
'Feng ', # 0x38
'Zhong ', # 0x39
'Jie ', # 0x3a
'Zhen ', # 0x3b
'Feng ', # 0x3c
'Gang ', # 0x3d
'Chuan ', # 0x3e
'Jian ', # 0x3f
'Pyeng ', # 0x40
'Toride ', # 0x41
'Xiang ', # 0x42
'Huang ', # 0x43
'Leng ', # 0x44
'Duan ', # 0x45
'[?] ', # 0x46
'Xuan ', # 0x47
'Ji ', # 0x48
'Ji ', # 0x49
'Kuai ', # 0x4a
'Ying ', # 0x4b
'Ta ', # 0x4c
'Cheng ', # 0x4d
'Yong ', # 0x4e
'Kai ', # 0x4f
'Su ', # 0x50
'Su ', # 0x51
'Shi ', # 0x52
'Mi ', # 0x53
'Ta ', # 0x54
'Weng ', # 0x55
'Cheng ', # 0x56
'Tu ', # 0x57
'Tang ', # 0x58
'Que ', # 0x59
'Zhong ', # 0x5a
'Li ', # 0x5b
'Peng ', # 0x5c
'Bang ', # 0x5d
'Sai ', # 0x5e
'Zang ', # 0x5f
'Dui ', # 0x60
'Tian ', # 0x61
'Wu ', # 0x62
'Cheng ', # 0x63
'Xun ', # 0x64
'Ge ', # 0x65
'Zhen ', # 0x66
'Ai ', # 0x67
'Gong ', # 0x68
'Yan ', # 0x69
'Kan ', # 0x6a
'Tian ', # 0x6b
'Yuan ', # 0x6c
'Wen ', # 0x6d
'Xie ', # 0x6e
'Liu ', # 0x6f
'Ama ', # 0x70
'Lang ', # 0x71
'Chang ', # 0x72
'Peng ', # 0x73
'Beng ', # 0x74
'Chen ', # 0x75
'Cu ', # 0x76
'Lu ', # 0x77
'Ou ', # 0x78
'Qian ', # 0x79
'Mei ', # 0x7a
'Mo ', # 0x7b
'Zhuan ', # 0x7c
'Shuang ', # 0x7d
'Shu ', # 0x7e
'Lou ', # 0x7f
'Chi ', # 0x80
'Man ', # 0x81
'Biao ', # 0x82
'Jing ', # 0x83
'Qi ', # 0x84
'Shu ', # 0x85
'Di ', # 0x86
'Zhang ', # 0x87
'Kan ', # 0x88
'Yong ', # 0x89
'Dian ', # 0x8a
'Chen ', # 0x8b
'Zhi ', # 0x8c
'Xi ', # 0x8d
'Guo ', # 0x8e
'Qiang ', # 0x8f
'Jin ', # 0x90
'Di ', # 0x91
'Shang ', # 0x92
'Mu ', # 0x93
'Cui ', # 0x94
'Yan ', # 0x95
'Ta ', # 0x96
'Zeng ', # 0x97
'Qi ', # 0x98
'Qiang ', # 0x99
'Liang ', # 0x9a
'[?] ', # 0x9b
'Zhui ', # 0x9c
'Qiao ', # 0x9d
'Zeng ', # 0x9e
'Xu ', # 0x9f
'Shan ', # 0xa0
'Shan ', # 0xa1
'Ba ', # 0xa2
'Pu ', # 0xa3
'Kuai ', # 0xa4
'Dong ', # 0xa5
'Fan ', # 0xa6
'Que ', # 0xa7
'Mo ', # 0xa8
'Dun ', # 0xa9
'Dun ', # 0xaa
'Dun ', # 0xab
'Di ', # 0xac
'Sheng ', # 0xad
'Duo ', # 0xae
'Duo ', # 0xaf
'Tan ', # 0xb0
'Deng ', # 0xb1
'Wu ', # 0xb2
'Fen ', # 0xb3
'Huang ', # 0xb4
'Tan ', # 0xb5
'Da ', # 0xb6
'Ye ', # 0xb7
'Sho ', # 0xb8
'Mama ', # 0xb9
'Yu ', # 0xba
'Qiang ', # 0xbb
'Ji ', # 0xbc
'Qiao ', # 0xbd
'Ken ', # 0xbe
'Yi ', # 0xbf
'Pi ', # 0xc0
'Bi ', # 0xc1
'Dian ', # 0xc2
'Jiang ', # 0xc3
'Ye ', # 0xc4
'Yong ', # 0xc5
'Bo ', # 0xc6
'Tan ', # 0xc7
'Lan ', # 0xc8
'Ju ', # 0xc9
'Huai ', # 0xca
'Dang ', # 0xcb
'Rang ', # 0xcc
'Qian ', # 0xcd
'Xun ', # 0xce
'Lan ', # 0xcf
'Xi ', # 0xd0
'He ', # 0xd1
'Ai ', # 0xd2
'Ya ', # 0xd3
'Dao ', # 0xd4
'Hao ', # 0xd5
'Ruan ', # 0xd6
'Mama ', # 0xd7
'Lei ', # 0xd8
'Kuang ', # 0xd9
'Lu ', # 0xda
'Yan ', # 0xdb
'Tan ', # 0xdc
'Wei ', # 0xdd
'Huai ', # 0xde
'Long ', # 0xdf
'Long ', # 0xe0
'Rui ', # 0xe1
'Li ', # 0xe2
'Lin ', # 0xe3
'Rang ', # 0xe4
'Ten ', # 0xe5
'Xun ', # 0xe6
'Yan ', # 0xe7
'Lei ', # 0xe8
'Ba ', # 0xe9
'[?] ', # 0xea
'Shi ', # 0xeb
'Ren ', # 0xec
'[?] ', # 0xed
'Zhuang ', # 0xee
'Zhuang ', # 0xef
'Sheng ', # 0xf0
'Yi ', # 0xf1
'Mai ', # 0xf2
'Ke ', # 0xf3
'Zhu ', # 0xf4
'Zhuang ', # 0xf5
'Hu ', # 0xf6
'Hu ', # 0xf7
'Kun ', # 0xf8
'Yi ', # 0xf9
'Hu ', # 0xfa
'Xu ', # 0xfb
'Kun ', # 0xfc
'Shou ', # 0xfd
'Mang ', # 0xfe
'Zun ', # 0xff
)
|
|
# Copyright 2014-2015 The Alive authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy, operator
from common import *
from codegen import CVariable, CFieldAccess
def allTyEqual(vars, Ty):
c = [vars[0].type.typevar == Ty]
for i in range(1, len(vars)):
c += [vars[0].type == vars[i].type]
return c
def mkTyEqual(types):
return [types[0] == types[i] for i in range(1, len(types))]
def create_mem_if_needed(ptr, val, state, qvars):
# if we are dealing with an arbitrary pointer, assume it points to something
# that can (arbitrarily) hold 7 elements.
if isinstance(val.type, PtrType):
block_size = val.type.getSize()
elif isinstance(val.type, UnknownType) and val.type.myType == Type.Ptr:
block_size = val.type.types[Type.Ptr].getSize()
else:
return
num_elems = 7
size = block_size * num_elems
state.addInputMem(ptr, qvars, block_size, num_elems)
class Type:
Int, Ptr, Array, Unknown = list(range(4))
def __repr__(self):
return ''
def typeMismatch(self, expected):
raise ParseError('%s type required' % expected, str(self))
def ensureIntType(self, size = None):
self.typeMismatch('int')
def ensurePtrType(self):
self.typeMismatch('pointer')
def ensureFirstClass(self):
self.typeMismatch('first class')
def ensureIntPtrOrVector(self):
self.typeMismatch('int/ptr/vector')
################################
def getMostSpecificType(t1, t2):
def _ErrorOnTypeMismatch(cond):
if cond:
raise ParseError('Type mismatch: %s vs %s' % (t1, t2))
if isinstance(t1, UnknownType):
return t2
if isinstance(t2, UnknownType):
return t1
_ErrorOnTypeMismatch(t1.__class__ != t2.__class__)
if isinstance(t1, IntType):
_ErrorOnTypeMismatch(t1.defined and t2.defined and
t1.getSize() != t2.getSize())
return t1 if t1.defined else t2
if isinstance(t1, PtrType):
t1id = id(t1.type)
return t1 if id(getMostSpecificType(t1.type, t2.type)) == t1id else t2
assert False
################################
class UnknownType(Type):
def __init__(self, d = 0):
self.types = {self.Int: IntType(),
self.Ptr: PtrType(depth = d),
self.Array: ArrayType(depth = d),
}
self.myType = self.Unknown
def ensureIntType(self, size = None):
return IntType(size)
def ensurePtrType(self):
return PtrType()
def ensureFirstClass(self):
# Restrict to ints, pointers, FPs, vectors
del self.types[self.Array]
return self
def ensureIntPtrOrVector(self):
# only ints, ptrs, or vectors of ints/ptrs
del self.types[self.Array]
return self
def setName(self, name):
self.typevar = Int('t_' + name)
for t in self.types.values():
t.setName(name)
def _getSizeUnknown(self, idx):
if idx == len(self.types)-1:
return self.types[idx].getSize()
return If(self.typevar == idx,
self.types[idx].getSize(),
self._getSizeUnknown(idx+1))
def getSize(self):
if self.myType != self.Unknown:
return self.types[self.myType].getSize()
return self._getSizeUnknown(0)
def getIntType(self, c):
if self.myType == self.Unknown and self.Int in self.types:
self.myType = self.Int
c += [self.typevar == self.Int]
if self.myType == self.Int:
return self.types[self.Int]
return None
def getPointeeType(self):
assert self.myType == self.Unknown or self.myType == self.Ptr
self.myType = self.Ptr
return self.types[self.Ptr].getPointeeType()
def getUnderlyingType(self):
assert self.myType == self.Ptr or self.myType == self.Array
return self.types[self.myType].getUnderlyingType()
def fixupTypes(self, types):
self.myType = types.get_interp(self.typevar).as_long()
self.types[self.myType].fixupTypes(types)
def __eq__(self, other):
if self.myType != self.Unknown:
return And(self.typevar == self.myType,
self.types[self.myType] == other)
for i,type in self.types.items():
if isinstance(other, type.__class__):
self.myType = i
return And(self.typevar == i, type == other)
assert isinstance(other, UnknownType)
c = []
for i,type in self.types.items():
if i in other.types:
c += [And(self.typevar == i,
other.typevar == i,
type == other.types[i])]
return mk_or(c)
def _intcmp(self, op, other):
c = []
op1 = self.getIntType(c)
if op1 is None:
return BoolVal(False)
return mk_and(c + [op(op1, other)])
def __lt__(self, other):
return self._intcmp(operator.lt, other)
def __gt__(self, other):
return self._intcmp(operator.gt, other)
def __ge__(self, other):
return self._intcmp(operator.ge, other)
def ensureTypeDepth(self, depth):
c = []
for i in range(len(self.types)):
c += [Or(self.types[i].ensureTypeDepth(depth), self.typevar != i)]
return mk_and(c)
def getTypeConstraints(self):
if self.myType != self.Unknown:
return self.types[self.myType].getTypeConstraints()
return mk_or([t.getTypeConstraints() for t in self.types.values()])
################################
class NamedType(UnknownType):
def __init__(self, name):
UnknownType.__init__(self)
self.type = UnknownType()
self.name = name
def __repr__(self):
return self.name
def ensureIntType(self, size = None):
self.myType = self.Int
if size != None:
self.types[self.Int] = IntType(size)
self.type = self.type.ensureIntType(size)
return self
def ensurePtrType(self):
self.myType = self.Ptr
self.type = self.type.ensurePtrType()
return self
def setName(self, name):
UnknownType.setName(self, self.name)
self.type.setName(name)
def getTypeConstraints(self):
return And(self.type == self,
UnknownType.getTypeConstraints(self),
self.type.getTypeConstraints())
################################
class IntType(Type):
def __init__(self, size = None):
if size == None:
self.defined = False
return
self.size = size
self.defined = True
assert isinstance(self.size, int)
def ensureIntType(self, size = None):
assert self.defined == False or size == None or size == self.size
if size != None:
self.size = size
self.defined = True
return self
def ensureFirstClass(self):
return self
def ensureIntPtrOrVector(self):
return self
def setName(self, name):
self.typevar = Int('t_' + name)
self.bitsvar = Int('size_' + name)
def __repr__(self):
if self.defined:
return 'i' + str(self.size)
return ''
def getSize(self):
if hasattr(self, 'size'):
return self.size
return self.bitsvar
def fixupTypes(self, types):
size = types.get_interp(self.bitsvar).as_long()
assert self.defined == False or self.size == size
self.size = size
def __eq__(self, other):
if isinstance(other, IntType):
return self.bitsvar == other.bitsvar
if isinstance(other, int):
return self.bitsvar == other
if isinstance(other, UnknownType):
return other == self
return BoolVal(False)
def _cmp(self, op, other):
if isinstance(other, IntType):
return op(self.bitsvar, other.bitsvar)
if isinstance(other, int):
return op(self.bitsvar, other)
if isinstance(other, UnknownType):
c = []
op2 = other.getIntType(c)
return mk_and(c + [op(self.bitsvar, op2.bitsvar)])
assert False
def __lt__(self, other):
return self._cmp(operator.lt, other)
def __gt__(self, other):
return self._cmp(operator.gt, other)
def __ge__(self, other):
return self._cmp(operator.ge, other)
def ensureTypeDepth(self, depth):
return BoolVal(depth == 0)
def getTypeConstraints(self):
c = [self.typevar == Type.Int]
if self.defined:
c += [self.bitsvar == self.getSize()]
else:
# Integers are assumed to be up to 64 bits.
# We bias towards 4/8 bits, as counterexamples become easier to understand
c += [Or(self.bitsvar == 8, self.bitsvar == 4,
And(self.bitsvar > 0, self.bitsvar <= 64))]
return And(c)
################################
class PtrType(Type):
def __init__(self, type = None, depth = 0):
if type is None:
# limit type nesting to 1 level
if depth >= 0:
type = IntType()
else:
type = UnknownType(depth+1)
self.type = type
assert isinstance(self.type, Type)
def ensurePtrType(self):
return self
def ensureFirstClass(self):
return self
def ensureIntPtrOrVector(self):
return self
def __repr__(self):
return str(self.type) + '*'
def setName(self, name):
self.typevar = Int('t_' + name)
self.type.setName('*' + name)
def getSize(self):
if hasattr(self, 'size'):
return self.size
return Int('ptrsize')
def getPointeeType(self):
return self.type
def getUnderlyingType(self):
return self.type
def __eq__(self, other):
if isinstance(other, PtrType):
return self.type == other.type
if isinstance(other, UnknownType):
return other == self
return BoolVal(False)
def fixupTypes(self, types):
self.size = get_ptr_size()
self.type.fixupTypes(types)
def ensureTypeDepth(self, depth):
return BoolVal(False) if depth == 0 else self.type.ensureTypeDepth(depth-1)
def getTypeConstraints(self):
return And(self.typevar == Type.Ptr,
self.type.getTypeConstraints())
################################
class ArrayType(Type):
def __init__(self, elems = None, type = None, depth = 0):
if elems is None:
assert type is None
# limit type nesting to 1 level
if depth >= 0:
type = IntType()
else:
type = UnknownType(depth+1)
elems = Input('#' + mk_unique_id(), IntType(4)) # enough for [1,7]
self.elems = TypeFixedValue(elems, 1, 7)
self.type = type
assert isinstance(self.type, Type)
def __repr__(self):
return '[%s x %s]' % (self.elems, self.type)
def setName(self, name):
self.typevar = Int('t_' + name)
self.type.setName('[' + name + ']')
self.elems.setName(name, 'elems')
def __eq__(self, other):
if isinstance(other, ArrayType):
return And(self.type == other.type, self.elems == other.elems)
if isinstance(other, UnknownType):
return other == self
return BoolVal(False)
def getSize(self):
return self.elems.getValue() * self.type.getSize()
def getUnderlyingType(self):
return self.type
def fixupTypes(self, types):
self.elems.fixupTypes(types)
self.type.fixupTypes(types)
def ensureTypeDepth(self, depth):
return BoolVal(False) if depth == 0 else self.type.ensureTypeDepth(depth-1)
def getTypeConstraints(self):
return And(self.typevar == Type.Array,
self.elems.getTypeConstraints(),
self.type.getTypeConstraints())
################################
class Value:
def __deepcopy__(self, m):
# Disable deep copy.
return self
def getName(self):
return self.name
def getUniqueName(self):
return self.name
def isConst(self):
return False
def setName(self, name):
self.name = name
if hasattr(self, 'type'):
self.type = copy.deepcopy(self.type)
self.type.setName(name)
for attr in dir(self):
a = getattr(self, attr)
if isinstance(a, TypeFixedValue):
a.setName(name, attr)
elif isinstance(a, Type) and attr != 'type':
a = copy.deepcopy(a)
a.setName('%s_%s_%s' % (name, attr, mk_unique_id()))
setattr(self, attr, a)
elif isinstance(a, list):
newa = []
for e in a:
if isinstance(e, Type):
e = copy.deepcopy(e)
e.setName('%s_%s_%s' % (name, attr, mk_unique_id()))
newa += [e]
setattr(self, attr, newa)
def getTypeConstraints(self):
c = []
for attr in dir(self):
a = getattr(self, attr)
if isinstance(a, (Type, Value)):
c += [a.getTypeConstraints()]
elif isinstance(a, list):
for e in a:
if isinstance(e, (Type, Value)):
c += [e.getTypeConstraints()]
return mk_and(c)
def fixupTypes(self, types):
for attr in dir(self):
a = getattr(self, attr)
if isinstance(a, (Type, Value)):
a.fixupTypes(types)
elif isinstance(a, list):
for e in a:
if isinstance(e, (Type, Value)):
e.fixupTypes(types)
def countUsers(self, m):
for attr in dir(self):
a = getattr(self, attr)
if isinstance(a, Value):
name = a.getUniqueName()
m[name] = m.get(name, 0) + 1
################################
class TypeFixedValue(Value):
def __init__(self, v, min, max):
assert isinstance(v, Value)
assert isinstance(v.type, IntType)
self.v = v
self.min = min
self.max = max
def setName(self, name, attr):
self.name = self.v.getName()
self.smtvar = Int('val_%s_%s' % (name, attr))
def getValue(self):
return getattr(self, 'val', self.smtvar)
def getType(self):
return self.v.type
def __repr__(self):
return str(self.v)
def __eq__(self, other):
assert isinstance(other, TypeFixedValue)
return self.smtvar == other.smtvar
def toSMT(self, defined, poison, state, qvars):
return self.val
def getTypeConstraints(self):
c = [self.v.getTypeConstraints()]
if self.v.isConst():
c += [self.smtvar == self.v.val]
if not self.v.type.defined:
c += [self.v.type == self.max.bit_length() + int(self.max >= 0)]
else:
if self.v.type.defined:
mymin = min(self.min, (1 << self.v.type.getSize()) - 1)
mymax = min(self.max, (1 << self.v.type.getSize()) - 1)
else:
mymin = self.min
mymax = self.max
c += [self.smtvar >= mymin, self.smtvar <= mymax]
if not self.v.type.defined:
c += [self.v.type >= self.max.bit_length() + int(self.max >= 0)]
return mk_and(c)
def fixupTypes(self, types):
self.v.fixupTypes(types)
self.val = types.get_interp(self.smtvar).as_long()
################################
class Input(Value):
def __init__(self, name, type):
self.type = type
self.setName(name)
assert isinstance(self.type, Type)
def __repr__(self):
return self.getName()
def toSMT(self, defined, poison, state, qvars):
v = BitVec(self.name, self.type.getSize())
create_mem_if_needed(v, self, state, [])
return v
def register_types(self, manager):
if self.name[0] == 'C':
min = IntType()
else:
min = UnknownType()
manager.register_type(self, self.type, min)
def _ensure_constant(self):
name = self.getName()
if name[0] != 'C':
raise AliveError('Input {0} used in an expression'.format(name))
def get_APInt_or_u64(self, manager):
return self.get_APInt(manager)
def get_APInt(self, manager):
self._ensure_constant()
return manager.get_cexp(self).arr('getValue', [])
def get_Value(self, manager):
assert False
# this should have been called through the manager
|
|
"""Test the stacking classifier and regressor."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: BSD 3 clause
import pytest
import numpy as np
import scipy.sparse as sparse
from sklearn.base import BaseEstimator
from sklearn.base import ClassifierMixin
from sklearn.base import RegressorMixin
from sklearn.base import clone
from sklearn.exceptions import ConvergenceWarning
from sklearn.datasets import load_iris
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_breast_cancer
from sklearn.datasets import make_regression
from sklearn.datasets import make_classification
from sklearn.dummy import DummyClassifier
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.svm import LinearSVC
from sklearn.svm import LinearSVR
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import scale
from sklearn.ensemble import StackingClassifier
from sklearn.ensemble import StackingRegressor
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import KFold
from sklearn.utils._mocking import CheckingClassifier
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_allclose_dense_sparse
from sklearn.utils._testing import ignore_warnings
X_diabetes, y_diabetes = load_diabetes(return_X_y=True)
X_iris, y_iris = load_iris(return_X_y=True)
@pytest.mark.parametrize(
"cv", [3, StratifiedKFold(n_splits=3, shuffle=True, random_state=42)]
)
@pytest.mark.parametrize(
"final_estimator", [None, RandomForestClassifier(random_state=42)]
)
@pytest.mark.parametrize("passthrough", [False, True])
def test_stacking_classifier_iris(cv, final_estimator, passthrough):
# prescale the data to avoid convergence warning without using a pipeline
# for later assert
X_train, X_test, y_train, y_test = train_test_split(
scale(X_iris), y_iris, stratify=y_iris, random_state=42
)
estimators = [("lr", LogisticRegression()), ("svc", LinearSVC())]
clf = StackingClassifier(
estimators=estimators,
final_estimator=final_estimator,
cv=cv,
passthrough=passthrough,
)
clf.fit(X_train, y_train)
clf.predict(X_test)
clf.predict_proba(X_test)
assert clf.score(X_test, y_test) > 0.8
X_trans = clf.transform(X_test)
expected_column_count = 10 if passthrough else 6
assert X_trans.shape[1] == expected_column_count
if passthrough:
assert_allclose(X_test, X_trans[:, -4:])
clf.set_params(lr="drop")
clf.fit(X_train, y_train)
clf.predict(X_test)
clf.predict_proba(X_test)
if final_estimator is None:
# LogisticRegression has decision_function method
clf.decision_function(X_test)
X_trans = clf.transform(X_test)
expected_column_count_drop = 7 if passthrough else 3
assert X_trans.shape[1] == expected_column_count_drop
if passthrough:
assert_allclose(X_test, X_trans[:, -4:])
def test_stacking_classifier_drop_column_binary_classification():
# check that a column is dropped in binary classification
X, y = load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, _ = train_test_split(
scale(X), y, stratify=y, random_state=42
)
# both classifiers implement 'predict_proba' and will both drop one column
estimators = [
("lr", LogisticRegression()),
("rf", RandomForestClassifier(random_state=42)),
]
clf = StackingClassifier(estimators=estimators, cv=3)
clf.fit(X_train, y_train)
X_trans = clf.transform(X_test)
assert X_trans.shape[1] == 2
# LinearSVC does not implement 'predict_proba' and will not drop one column
estimators = [("lr", LogisticRegression()), ("svc", LinearSVC())]
clf.set_params(estimators=estimators)
clf.fit(X_train, y_train)
X_trans = clf.transform(X_test)
assert X_trans.shape[1] == 2
def test_stacking_classifier_drop_estimator():
# prescale the data to avoid convergence warning without using a pipeline
# for later assert
X_train, X_test, y_train, _ = train_test_split(
scale(X_iris), y_iris, stratify=y_iris, random_state=42
)
estimators = [("lr", "drop"), ("svc", LinearSVC(random_state=0))]
rf = RandomForestClassifier(n_estimators=10, random_state=42)
clf = StackingClassifier(
estimators=[("svc", LinearSVC(random_state=0))], final_estimator=rf, cv=5
)
clf_drop = StackingClassifier(estimators=estimators, final_estimator=rf, cv=5)
clf.fit(X_train, y_train)
clf_drop.fit(X_train, y_train)
assert_allclose(clf.predict(X_test), clf_drop.predict(X_test))
assert_allclose(clf.predict_proba(X_test), clf_drop.predict_proba(X_test))
assert_allclose(clf.transform(X_test), clf_drop.transform(X_test))
def test_stacking_regressor_drop_estimator():
# prescale the data to avoid convergence warning without using a pipeline
# for later assert
X_train, X_test, y_train, _ = train_test_split(
scale(X_diabetes), y_diabetes, random_state=42
)
estimators = [("lr", "drop"), ("svr", LinearSVR(random_state=0))]
rf = RandomForestRegressor(n_estimators=10, random_state=42)
reg = StackingRegressor(
estimators=[("svr", LinearSVR(random_state=0))], final_estimator=rf, cv=5
)
reg_drop = StackingRegressor(estimators=estimators, final_estimator=rf, cv=5)
reg.fit(X_train, y_train)
reg_drop.fit(X_train, y_train)
assert_allclose(reg.predict(X_test), reg_drop.predict(X_test))
assert_allclose(reg.transform(X_test), reg_drop.transform(X_test))
@pytest.mark.parametrize("cv", [3, KFold(n_splits=3, shuffle=True, random_state=42)])
@pytest.mark.parametrize(
"final_estimator, predict_params",
[
(None, {}),
(RandomForestRegressor(random_state=42), {}),
(DummyRegressor(), {"return_std": True}),
],
)
@pytest.mark.parametrize("passthrough", [False, True])
def test_stacking_regressor_diabetes(cv, final_estimator, predict_params, passthrough):
# prescale the data to avoid convergence warning without using a pipeline
# for later assert
X_train, X_test, y_train, _ = train_test_split(
scale(X_diabetes), y_diabetes, random_state=42
)
estimators = [("lr", LinearRegression()), ("svr", LinearSVR())]
reg = StackingRegressor(
estimators=estimators,
final_estimator=final_estimator,
cv=cv,
passthrough=passthrough,
)
reg.fit(X_train, y_train)
result = reg.predict(X_test, **predict_params)
expected_result_length = 2 if predict_params else 1
if predict_params:
assert len(result) == expected_result_length
X_trans = reg.transform(X_test)
expected_column_count = 12 if passthrough else 2
assert X_trans.shape[1] == expected_column_count
if passthrough:
assert_allclose(X_test, X_trans[:, -10:])
reg.set_params(lr="drop")
reg.fit(X_train, y_train)
reg.predict(X_test)
X_trans = reg.transform(X_test)
expected_column_count_drop = 11 if passthrough else 1
assert X_trans.shape[1] == expected_column_count_drop
if passthrough:
assert_allclose(X_test, X_trans[:, -10:])
@pytest.mark.parametrize("fmt", ["csc", "csr", "coo"])
def test_stacking_regressor_sparse_passthrough(fmt):
# Check passthrough behavior on a sparse X matrix
X_train, X_test, y_train, _ = train_test_split(
sparse.coo_matrix(scale(X_diabetes)).asformat(fmt), y_diabetes, random_state=42
)
estimators = [("lr", LinearRegression()), ("svr", LinearSVR())]
rf = RandomForestRegressor(n_estimators=10, random_state=42)
clf = StackingRegressor(
estimators=estimators, final_estimator=rf, cv=5, passthrough=True
)
clf.fit(X_train, y_train)
X_trans = clf.transform(X_test)
assert_allclose_dense_sparse(X_test, X_trans[:, -10:])
assert sparse.issparse(X_trans)
assert X_test.format == X_trans.format
@pytest.mark.parametrize("fmt", ["csc", "csr", "coo"])
def test_stacking_classifier_sparse_passthrough(fmt):
# Check passthrough behavior on a sparse X matrix
X_train, X_test, y_train, _ = train_test_split(
sparse.coo_matrix(scale(X_iris)).asformat(fmt), y_iris, random_state=42
)
estimators = [("lr", LogisticRegression()), ("svc", LinearSVC())]
rf = RandomForestClassifier(n_estimators=10, random_state=42)
clf = StackingClassifier(
estimators=estimators, final_estimator=rf, cv=5, passthrough=True
)
clf.fit(X_train, y_train)
X_trans = clf.transform(X_test)
assert_allclose_dense_sparse(X_test, X_trans[:, -4:])
assert sparse.issparse(X_trans)
assert X_test.format == X_trans.format
def test_stacking_classifier_drop_binary_prob():
# check that classifier will drop one of the probability column for
# binary classification problem
# Select only the 2 first classes
X_, y_ = scale(X_iris[:100]), y_iris[:100]
estimators = [("lr", LogisticRegression()), ("rf", RandomForestClassifier())]
clf = StackingClassifier(estimators=estimators)
clf.fit(X_, y_)
X_meta = clf.transform(X_)
assert X_meta.shape[1] == 2
class NoWeightRegressor(RegressorMixin, BaseEstimator):
def fit(self, X, y):
self.reg = DummyRegressor()
return self.reg.fit(X, y)
def predict(self, X):
return np.ones(X.shape[0])
class NoWeightClassifier(ClassifierMixin, BaseEstimator):
def fit(self, X, y):
self.clf = DummyClassifier(strategy="stratified")
return self.clf.fit(X, y)
@pytest.mark.parametrize(
"y, params, type_err, msg_err",
[
(y_iris, {"estimators": None}, ValueError, "Invalid 'estimators' attribute,"),
(y_iris, {"estimators": []}, ValueError, "Invalid 'estimators' attribute,"),
(
y_iris,
{
"estimators": [
("lr", LogisticRegression()),
("svm", SVC(max_iter=5e4)),
],
"stack_method": "predict_proba",
},
ValueError,
"does not implement the method predict_proba",
),
(
y_iris,
{
"estimators": [
("lr", LogisticRegression()),
("cor", NoWeightClassifier()),
]
},
TypeError,
"does not support sample weight",
),
(
y_iris,
{
"estimators": [
("lr", LogisticRegression()),
("cor", LinearSVC(max_iter=5e4)),
],
"final_estimator": NoWeightClassifier(),
},
TypeError,
"does not support sample weight",
),
],
)
def test_stacking_classifier_error(y, params, type_err, msg_err):
with pytest.raises(type_err, match=msg_err):
clf = StackingClassifier(**params, cv=3)
clf.fit(scale(X_iris), y, sample_weight=np.ones(X_iris.shape[0]))
@pytest.mark.parametrize(
"y, params, type_err, msg_err",
[
(
y_diabetes,
{"estimators": None},
ValueError,
"Invalid 'estimators' attribute,",
),
(y_diabetes, {"estimators": []}, ValueError, "Invalid 'estimators' attribute,"),
(
y_diabetes,
{"estimators": [("lr", LinearRegression()), ("cor", NoWeightRegressor())]},
TypeError,
"does not support sample weight",
),
(
y_diabetes,
{
"estimators": [("lr", LinearRegression()), ("cor", LinearSVR())],
"final_estimator": NoWeightRegressor(),
},
TypeError,
"does not support sample weight",
),
],
)
def test_stacking_regressor_error(y, params, type_err, msg_err):
with pytest.raises(type_err, match=msg_err):
reg = StackingRegressor(**params, cv=3)
reg.fit(scale(X_diabetes), y, sample_weight=np.ones(X_diabetes.shape[0]))
@pytest.mark.parametrize(
"estimator, X, y",
[
(
StackingClassifier(
estimators=[
("lr", LogisticRegression(random_state=0)),
("svm", LinearSVC(random_state=0)),
]
),
X_iris[:100],
y_iris[:100],
), # keep only classes 0 and 1
(
StackingRegressor(
estimators=[
("lr", LinearRegression()),
("svm", LinearSVR(random_state=0)),
]
),
X_diabetes,
y_diabetes,
),
],
ids=["StackingClassifier", "StackingRegressor"],
)
def test_stacking_randomness(estimator, X, y):
# checking that fixing the random state of the CV will lead to the same
# results
estimator_full = clone(estimator)
estimator_full.set_params(
cv=KFold(shuffle=True, random_state=np.random.RandomState(0))
)
estimator_drop = clone(estimator)
estimator_drop.set_params(lr="drop")
estimator_drop.set_params(
cv=KFold(shuffle=True, random_state=np.random.RandomState(0))
)
assert_allclose(
estimator_full.fit(X, y).transform(X)[:, 1:],
estimator_drop.fit(X, y).transform(X),
)
def test_stacking_classifier_stratify_default():
# check that we stratify the classes for the default CV
clf = StackingClassifier(
estimators=[
("lr", LogisticRegression(max_iter=1e4)),
("svm", LinearSVC(max_iter=1e4)),
]
)
# since iris is not shuffled, a simple k-fold would not contain the
# 3 classes during training
clf.fit(X_iris, y_iris)
@pytest.mark.parametrize(
"stacker, X, y",
[
(
StackingClassifier(
estimators=[
("lr", LogisticRegression()),
("svm", LinearSVC(random_state=42)),
],
final_estimator=LogisticRegression(),
cv=KFold(shuffle=True, random_state=42),
),
*load_breast_cancer(return_X_y=True),
),
(
StackingRegressor(
estimators=[
("lr", LinearRegression()),
("svm", LinearSVR(random_state=42)),
],
final_estimator=LinearRegression(),
cv=KFold(shuffle=True, random_state=42),
),
X_diabetes,
y_diabetes,
),
],
ids=["StackingClassifier", "StackingRegressor"],
)
def test_stacking_with_sample_weight(stacker, X, y):
# check that sample weights has an influence on the fitting
# note: ConvergenceWarning are catch since we are not worrying about the
# convergence here
n_half_samples = len(y) // 2
total_sample_weight = np.array(
[0.1] * n_half_samples + [0.9] * (len(y) - n_half_samples)
)
X_train, X_test, y_train, _, sample_weight_train, _ = train_test_split(
X, y, total_sample_weight, random_state=42
)
with ignore_warnings(category=ConvergenceWarning):
stacker.fit(X_train, y_train)
y_pred_no_weight = stacker.predict(X_test)
with ignore_warnings(category=ConvergenceWarning):
stacker.fit(X_train, y_train, sample_weight=np.ones(y_train.shape))
y_pred_unit_weight = stacker.predict(X_test)
assert_allclose(y_pred_no_weight, y_pred_unit_weight)
with ignore_warnings(category=ConvergenceWarning):
stacker.fit(X_train, y_train, sample_weight=sample_weight_train)
y_pred_biased = stacker.predict(X_test)
assert np.abs(y_pred_no_weight - y_pred_biased).sum() > 0
def test_stacking_classifier_sample_weight_fit_param():
# check sample_weight is passed to all invocations of fit
stacker = StackingClassifier(
estimators=[("lr", CheckingClassifier(expected_fit_params=["sample_weight"]))],
final_estimator=CheckingClassifier(expected_fit_params=["sample_weight"]),
)
stacker.fit(X_iris, y_iris, sample_weight=np.ones(X_iris.shape[0]))
@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning")
@pytest.mark.parametrize(
"stacker, X, y",
[
(
StackingClassifier(
estimators=[
("lr", LogisticRegression()),
("svm", LinearSVC(random_state=42)),
],
final_estimator=LogisticRegression(),
),
*load_breast_cancer(return_X_y=True),
),
(
StackingRegressor(
estimators=[
("lr", LinearRegression()),
("svm", LinearSVR(random_state=42)),
],
final_estimator=LinearRegression(),
),
X_diabetes,
y_diabetes,
),
],
ids=["StackingClassifier", "StackingRegressor"],
)
def test_stacking_cv_influence(stacker, X, y):
# check that the stacking affects the fit of the final estimator but not
# the fit of the base estimators
# note: ConvergenceWarning are catch since we are not worrying about the
# convergence here
stacker_cv_3 = clone(stacker)
stacker_cv_5 = clone(stacker)
stacker_cv_3.set_params(cv=3)
stacker_cv_5.set_params(cv=5)
stacker_cv_3.fit(X, y)
stacker_cv_5.fit(X, y)
# the base estimators should be identical
for est_cv_3, est_cv_5 in zip(stacker_cv_3.estimators_, stacker_cv_5.estimators_):
assert_allclose(est_cv_3.coef_, est_cv_5.coef_)
# the final estimator should be different
with pytest.raises(AssertionError, match="Not equal"):
assert_allclose(
stacker_cv_3.final_estimator_.coef_, stacker_cv_5.final_estimator_.coef_
)
@pytest.mark.parametrize(
"make_dataset, Stacking, Estimator",
[
(make_classification, StackingClassifier, LogisticRegression),
(make_regression, StackingRegressor, LinearRegression),
],
)
def test_stacking_without_n_features_in(make_dataset, Stacking, Estimator):
# Stacking supports estimators without `n_features_in_`. Regression test
# for #17353
class MyEstimator(Estimator):
"""Estimator without n_features_in_"""
def fit(self, X, y):
super().fit(X, y)
del self.n_features_in_
X, y = make_dataset(random_state=0, n_samples=100)
stacker = Stacking(estimators=[("lr", MyEstimator())])
msg = f"{Stacking.__name__} object has no attribute n_features_in_"
with pytest.raises(AttributeError, match=msg):
stacker.n_features_in_
# Does not raise
stacker.fit(X, y)
msg = "'MyEstimator' object has no attribute 'n_features_in_'"
with pytest.raises(AttributeError, match=msg):
stacker.n_features_in_
|
|
#!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Bitcoin test framework primitive and message structures
CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
data structures that should map to corresponding structures in
bitcoin/primitives
msg_block, msg_tx, msg_headers, etc.:
data structures that represent network messages
ser_*, deser_*: functions that handle serialization/deserialization."""
from codecs import encode
import copy
import hashlib
from io import BytesIO
import random
import socket
import struct
import time
from test_framework.siphash import siphash256
from test_framework.util import hex_str_to_bytes, bytes_to_hex_str
MIN_VERSION_SUPPORTED = 60001
MY_VERSION = 70014 # past bip-31 for ping/pong
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37)
MAX_INV_SZ = 50000
MAX_LOCATOR_SZ = 101
MAX_BLOCK_BASE_SIZE = 1000000
COIN = 100000000 # 1 btc in satoshis
BIP125_SEQUENCE_NUMBER = 0xfffffffd # Sequence number that is BIP 125 opt-in and BIP 68-opt-out
NODE_NETWORK = (1 << 0)
# NODE_GETUTXO = (1 << 1)
NODE_BLOOM = (1 << 2)
NODE_WITNESS = (1 << 3)
NODE_NETWORK_LIMITED = (1 << 10)
MSG_TX = 1
MSG_BLOCK = 2
MSG_WITNESS_FLAG = 1 << 30
MSG_TYPE_MASK = 0xffffffff >> 2
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def ripemd160(s):
return hashlib.new('ripemd160', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
# ser_function_name: Allow for an alternate serialization function on the
# entries in the vector (we use this for serializing the vector of transactions
# for a witness block).
def ser_vector(l, ser_function_name=None):
r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return bytes_to_hex_str(obj.serialize())
# Objects that map to bitcoind objects, which can be serialized/deserialized
class CAddress():
def __init__(self):
self.time = 0
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f, with_time=True):
if with_time:
self.time = struct.unpack("<i", f.read(4))[0]
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self, with_time=True):
r = b""
if with_time:
r += struct.pack("<i", self.time)
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
class CInv():
typemap = {
0: "Error",
1: "TX",
2: "Block",
1|MSG_WITNESS_FLAG: "WitnessTx",
2|MSG_WITNESS_FLAG : "WitnessBlock",
4: "CompactBlock"
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator():
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint():
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn():
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), bytes_to_hex_str(self.scriptSig),
self.nSequence)
class CTxOut():
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
bytes_to_hex_str(self.scriptPubKey))
class CScriptWitness():
def __init__(self):
# stack is a vector of strings
self.stack = []
def __repr__(self):
return "CScriptWitness(%s)" % \
(",".join([bytes_to_hex_str(x) for x in self.stack]))
def is_null(self):
if self.stack:
return False
return True
class CTxInWitness():
def __init__(self):
self.scriptWitness = CScriptWitness()
def deserialize(self, f):
self.scriptWitness.stack = deser_string_vector(f)
def serialize(self):
return ser_string_vector(self.scriptWitness.stack)
def __repr__(self):
return repr(self.scriptWitness)
def is_null(self):
return self.scriptWitness.is_null()
class CTxWitness():
def __init__(self):
self.vtxinwit = []
def deserialize(self, f):
for i in range(len(self.vtxinwit)):
self.vtxinwit[i].deserialize(f)
def serialize(self):
r = b""
# This is different than the usual vector serialization --
# we omit the length of the vector, which is required to be
# the same length as the transaction's vin vector.
for x in self.vtxinwit:
r += x.serialize()
return r
def __repr__(self):
return "CTxWitness(%s)" % \
(';'.join([repr(x) for x in self.vtxinwit]))
def is_null(self):
for x in self.vtxinwit:
if not x.is_null():
return False
return True
class CTransaction():
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.wit = CTxWitness()
self.nLockTime = 0
self.sha256 = None
self.hash = None
self.nTime = int(time.time())
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
self.wit = copy.deepcopy(tx.wit)
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nTime = struct.unpack("<I", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# Not sure why flags can't be zero, but this
# matches the implementation in bitcoind
if (flags != 0):
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
if flags != 0:
self.wit.vtxinwit = [CTxInWitness() for i in range(len(self.vin))]
self.wit.deserialize(f)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<I", self.nTime)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
# Only serialize with witness when explicitly called for
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
if (len(self.wit.vtxinwit) != len(self.vin)):
# vtxinwit must have the same length as vin
self.wit.vtxinwit = self.wit.vtxinwit[:len(self.vin)]
for i in range(len(self.wit.vtxinwit), len(self.vin)):
self.wit.vtxinwit.append(CTxInWitness())
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
# Regular serialization is with witness -- must explicitly
# call serialize_without_witness to exclude witness data.
def serialize(self):
return self.serialize_with_witness()
# Recalculate the txid (transaction hash without witness)
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.hash
# We will only cache the serialization without witness in
# self.sha256 and self.hash -- those are expected to be the txid.
def calc_sha256(self, with_witness=False):
if with_witness:
# Don't cache the result, just return it
return uint256_from_str(hash256(self.serialize_with_witness()))
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = encode(hash256(self.serialize_without_witness())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s wit=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), repr(self.wit), self.nLockTime)
class CBlockHeader():
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 1
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
self.prevoutStake = COutPoint(0, 0xffffffff)
self.vchBlockSig = b""
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
self.prevoutStake = COutPoint()
self.prevoutStake.deserialize(f)
self.vchBlockSig = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
def is_pos(self):
return self.prevoutStake and (self.prevoutStake.hash != 0 or self.prevoutStake.n != 0xffffffff)
def solve_stake(self, stakeModifier, prevouts):
target = uint256_from_compact(self.nBits)
for prevout in prevouts:
nValue, txBlockTime = prevouts[prevout]
data = b""
data += ser_uint256(stakeModifier)
data += struct.pack("<I", txBlockTime)
data += prevout.serialize()
data += struct.pack("<I", self.nTime)
posHash = uint256_from_str(hash256(data))
if posHash <= target:
self.prevoutStake = prevout
return True
return False
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx, "serialize_without_witness")
r += ser_string(self.vchBlockSig)
return r
# Calculate the merkle root given a vector of transaction hashes
@classmethod
def get_merkle_root(cls, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_sha256(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def sign_block(self, key, low_s=True):
data = b""
data += struct.pack("<i", self.nVersion)
data += ser_uint256(self.hashPrevBlock)
data += ser_uint256(self.hashMerkleRoot)
data += struct.pack("<I", self.nTime)
data += struct.pack("<I", self.nBits)
data += struct.pack("<I", self.nNonce)
sha256NoSig = hash256(data)
self.vchBlockSig = key.sign(sha256NoSig, low_s=low_s)
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class PrefilledTransaction():
def __init__(self, index=0, tx = None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self, with_witness=True):
r = b""
r += ser_compact_size(self.index)
if with_witness:
r += self.tx.serialize_with_witness()
else:
r += self.tx.serialize_without_witness()
return r
def serialize_without_witness(self):
return self.serialize(with_witness=False)
def serialize_with_witness(self):
return self.serialize(with_witness=True)
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs():
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for i in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
# When using version 2 compact blocks, we must serialize with_witness.
def serialize(self, with_witness=False):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
r += ser_vector(self.prefilled_txn, "serialize_without_witness")
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# P2P version of the above that will use witness serialization (for compact
# block version 2)
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
def serialize(self):
return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True)
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs():
def __init__(self, p2pheaders_and_shortids = None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
self.use_witness = False
if p2pheaders_and_shortids != None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
if self.use_witness:
ret = P2PHeaderAndShortWitnessIDs()
else:
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [ key0, key1 ]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list = [0], use_witness = False):
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
self.shortids = []
self.use_witness = use_witness
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
if use_witness:
tx_hash = block.vtx[i].calc_sha256(with_witness=True)
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest():
def __init__(self, blockhash=0, indexes = None):
self.blockhash = blockhash
self.indexes = indexes if indexes != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for i in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x-last_index-1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x+last_index+1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions():
def __init__(self, blockhash=0, transactions = None):
self.blockhash = blockhash
self.transactions = transactions if transactions != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self, with_witness=True):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
r += ser_vector(self.transactions, "serialize_without_witness")
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
class CPartialMerkleTree():
def __init__(self):
self.nTransactions = 0
self.vHash = []
self.vBits = []
self.fBad = False
def deserialize(self, f):
self.nTransactions = struct.unpack("<i", f.read(4))[0]
self.vHash = deser_uint256_vector(f)
vBytes = deser_string(f)
self.vBits = []
for i in range(len(vBytes) * 8):
self.vBits.append(vBytes[i//8] & (1 << (i % 8)) != 0)
def serialize(self):
r = b""
r += struct.pack("<i", self.nTransactions)
r += ser_uint256_vector(self.vHash)
vBytesArray = bytearray([0x00] * ((len(self.vBits) + 7)//8))
for i in range(len(self.vBits)):
vBytesArray[i // 8] |= self.vBits[i] << (i % 8)
r += ser_string(bytes(vBytesArray))
return r
def __repr__(self):
return "CPartialMerkleTree(nTransactions=%d, vHash=%s, vBits=%s)" % (self.nTransactions, repr(self.vHash), repr(self.vBits))
class CMerkleBlock():
def __init__(self):
self.header = CBlockHeader()
self.txn = CPartialMerkleTree()
def deserialize(self, f):
self.header.deserialize(f)
self.txn.deserialize(f)
def serialize(self):
r = b""
r += self.header.serialize()
r += self.txn.serialize()
return r
def __repr__(self):
return "CMerkleBlock(header=%s, txn=%s)" % (repr(self.header), repr(self.txn))
# Objects that correspond to messages on the wire
class msg_version():
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = NODE_NETWORK | NODE_WITNESS
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
self.nRelay = MY_RELAY
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f, False)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f, False)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
# self.strSubVer = deser_string(f)
self.strSubVer = "!! KumaCoin breaks Bitcoin Protocol !!"
f.read(26) # subversion field
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.nRelay = struct.unpack("<b", f.read(1))[0]
except:
self.nRelay = 0
else:
self.nRelay = 0
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize(False)
r += self.addrFrom.serialize(False)
r += struct.pack("<Q", self.nNonce)
r += b"\x96\xc3\x63\xd0\xbe\x5b\xe8\x48\x11\x2f\x41\x6e\x74\x65\x6e\x6e\x61\x3a\x30\x2e\x38\x2e\x39\x2e\x32\x2f" # dirty hack
# r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
# r += struct.pack("<b", self.nRelay) # KumaCoin does not support newer protocol
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.nRelay)
class msg_verack():
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr():
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_inv():
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata():
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks():
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx():
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_without_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_witness_tx(msg_tx):
def serialize(self):
return self.tx.serialize_with_witness()
class msg_block():
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize(with_witness=False)
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic():
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_witness_block(msg_block):
def serialize(self):
r = self.block.serialize(with_witness=True)
return r
class msg_getaddr():
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping():
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong():
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool():
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders():
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders():
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers():
command = b"headers"
def __init__(self, headers=None):
self.headers = headers if headers is not None else []
def deserialize(self, f):
# comment in bitcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject():
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
class msg_feefilter():
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct():
command = b"sendcmpct"
def __init__(self):
self.announce = False
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock():
command = b"cmpctblock"
def __init__(self, header_and_shortids = None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn():
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn():
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=False)
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_witness_blocktxn(msg_blocktxn):
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=True)
return r
|
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import os
from collections import namedtuple
from datetime import datetime, time, timedelta
from io import BytesIO
from operator import attrgetter
import pytz
from flask import current_app
from PIL import Image
from sqlalchemy import Date, cast
from sqlalchemy.orm import joinedload
from indico.core.config import config
from indico.core.db import db
from indico.core.db.sqlalchemy.links import LinkType
from indico.modules.events import Event
from indico.modules.events.contributions import Contribution
from indico.modules.events.sessions import Session
from indico.modules.events.sessions.models.blocks import SessionBlock
from indico.modules.events.timetable.models.entries import TimetableEntry
from indico.modules.events.timetable.util import find_latest_entry_end_dt
from indico.util.caching import memoize_request
from indico.util.date_time import now_utc, server_to_utc
from indico.util.iterables import group_list
from indico.util.string import crc32
ROOM_PHOTO_DIMENSIONS = (290, 170)
TempReservationOccurrence = namedtuple('ReservationOccurrenceTmp', ('start_dt', 'end_dt', 'reservation'))
TempReservationConcurrentOccurrence = namedtuple('ReservationOccurrenceTmp', ('start_dt', 'end_dt', 'reservations'))
@memoize_request
def rb_check_user_access(user):
"""Check if the user has access to the room booking system."""
from indico.modules.rb import rb_settings
if rb_is_admin(user):
return True
if not rb_settings.acls.get('authorized_principals'): # everyone has access
return True
return rb_settings.acls.contains_user('authorized_principals', user)
@memoize_request
def rb_is_admin(user):
"""Check if the user is a room booking admin."""
from indico.modules.rb import rb_settings
if user.is_admin:
return True
return rb_settings.acls.contains_user('admin_principals', user)
def build_rooms_spritesheet():
from indico.modules.rb import rb_cache
from indico.modules.rb.models.rooms import Room
image_width, image_height = ROOM_PHOTO_DIMENSIONS
rooms = Room.query.filter(Room.photo).options(joinedload('photo')).all()
room_count = len(rooms)
sprite_width = (image_width * (room_count + 1)) # +1 for the placeholder
sprite_height = image_height
sprite = Image.new(mode='RGB', size=(sprite_width, sprite_height), color=(0, 0, 0))
# Placeholder image at position 0
no_photo_path = 'web/static/images/rooms/large_photos/NoPhoto.jpg'
no_photo_image = Image.open(os.path.join(current_app.root_path, no_photo_path))
image = no_photo_image.resize(ROOM_PHOTO_DIMENSIONS, Image.ANTIALIAS)
sprite.paste(image, (0, 0))
mapping = {}
for count, room in enumerate(rooms, start=1):
location = image_width * count
image = Image.open(BytesIO(room.photo.data)).resize(ROOM_PHOTO_DIMENSIONS, Image.ANTIALIAS)
sprite.paste(image, (location, 0))
mapping[room.id] = count
output = BytesIO()
sprite.save(output, 'JPEG')
value = output.getvalue()
token = crc32(value)
rb_cache.set_many({
'rooms-sprite': value,
'rooms-sprite-mapping': mapping,
'rooms-sprite-token': token,
})
return token
def get_resized_room_photo(room):
photo = Image.open(BytesIO(room.photo.data)).resize(ROOM_PHOTO_DIMENSIONS, Image.ANTIALIAS)
output = BytesIO()
photo.save(output, 'JPEG')
return output.getvalue()
def remove_room_spritesheet_photo(room):
from indico.modules.rb import rb_cache
mapping = rb_cache.get('rooms-sprite-mapping')
if not mapping or room.id not in mapping:
return
del mapping[room.id]
rb_cache.set('rooms-sprite-mapping', mapping)
def group_by_occurrence_date(occurrences, sort_by=None):
key = lambda obj: obj.start_dt.date()
if sort_by is None:
sort_by = key
return group_list(occurrences, key=key, sort_by=sort_by)
def serialize_occurrences(data):
from indico.modules.rb.schemas import reservation_occurrences_schema
return {dt.isoformat(): reservation_occurrences_schema.dump(data) for dt, data in data.items()}
def serialize_blockings(data):
from indico.modules.rb.schemas import simple_blockings_schema
return {dt.isoformat(): simple_blockings_schema.dump(data) for dt, data in data.items()}
def serialize_nonbookable_periods(data):
from indico.modules.rb.schemas import nonbookable_periods_schema
return {dt.isoformat(): nonbookable_periods_schema.dump(data) for dt, data in data.items()}
def serialize_unbookable_hours(data):
from indico.modules.rb.schemas import bookable_hours_schema
return [bookable_hours_schema.dump(d) for d in data]
def serialize_concurrent_pre_bookings(data):
from indico.modules.rb.schemas import concurrent_pre_bookings_schema
return {dt.isoformat(): concurrent_pre_bookings_schema.dump(data) for dt, data in data.items()}
def get_linked_object(type_, id_):
if type_ == LinkType.event:
return Event.get(id_, is_deleted=False)
elif type_ == LinkType.contribution:
return (Contribution.query
.filter(Contribution.id == id_,
~Contribution.is_deleted,
Contribution.event.has(is_deleted=False))
.first())
elif type_ == LinkType.session_block:
return (SessionBlock.query
.filter(SessionBlock.id == id_,
SessionBlock.session.has(db.and_(~Session.is_deleted,
Session.event.has(is_deleted=False))))
.first())
def is_booking_start_within_grace_period(start_dt, user, allow_admin=False):
from indico.modules.rb import rb_settings
if allow_admin and rb_is_admin(user):
return True
default_tz = pytz.timezone(config.DEFAULT_TIMEZONE)
start_dt_localized = default_tz.localize(start_dt)
grace_period = rb_settings.get('grace_period')
if grace_period is None:
today = server_to_utc(datetime.now()).astimezone(default_tz).date()
return start_dt_localized.date() >= today
start_dt_utc = start_dt_localized.astimezone(pytz.utc)
grace_period = timedelta(hours=grace_period)
return start_dt_utc >= now_utc() - grace_period
def serialize_booking_details(booking):
from indico.modules.rb.operations.blockings import filter_blocked_rooms, get_rooms_blockings, group_blocked_rooms
from indico.modules.rb.operations.bookings import (get_booking_occurrences, get_existing_room_occurrences,
group_blockings, group_nonbookable_periods)
from indico.modules.rb.operations.misc import get_rooms_nonbookable_periods, get_rooms_unbookable_hours
from indico.modules.rb.schemas import reservation_details_schema, reservation_occurrences_schema_with_permissions
attributes = reservation_details_schema.dump(booking)
date_range, occurrences = get_booking_occurrences(booking)
booking_details = dict(attributes)
occurrences_by_type = dict(bookings={}, cancellations={}, rejections={}, other={}, blockings={},
unbookable_hours={}, nonbookable_periods={}, overridable_blockings={})
booking_details['occurrences'] = occurrences_by_type
booking_details['date_range'] = [dt.isoformat() for dt in date_range]
for dt, [occ] in occurrences.items():
serialized_occ = reservation_occurrences_schema_with_permissions.dump([occ])
if occ.is_cancelled:
occurrences_by_type['cancellations'][dt.isoformat()] = serialized_occ
elif occ.is_rejected:
occurrences_by_type['rejections'][dt.isoformat()] = serialized_occ
occurrences_by_type['bookings'][dt.isoformat()] = serialized_occ if occ.is_valid else []
start_dt = datetime.combine(booking.start_dt, time.min)
end_dt = datetime.combine(booking.end_dt, time.max)
unbookable_hours = get_rooms_unbookable_hours([booking.room]).get(booking.room.id, [])
other_bookings = get_existing_room_occurrences(booking.room, start_dt, end_dt, booking.repeat_frequency,
booking.repeat_interval, skip_booking_id=booking.id)
blocked_rooms = get_rooms_blockings([booking.room], start_dt.date(), end_dt.date())
overridable_blockings = group_blocked_rooms(filter_blocked_rooms(blocked_rooms,
overridable_only=True,
explicit=True)).get(booking.room.id, [])
nonoverridable_blockings = group_blocked_rooms(filter_blocked_rooms(blocked_rooms,
nonoverridable_only=True,
explicit=True)).get(booking.room.id, [])
nonbookable_periods = get_rooms_nonbookable_periods([booking.room], start_dt, end_dt).get(booking.room.id, [])
nonbookable_periods_grouped = group_nonbookable_periods(nonbookable_periods, date_range)
occurrences_by_type['other'] = serialize_occurrences(group_by_occurrence_date(other_bookings))
occurrences_by_type['blockings'] = serialize_blockings(group_blockings(nonoverridable_blockings, date_range))
occurrences_by_type['overridable_blockings'] = serialize_blockings(group_blockings(overridable_blockings,
date_range))
occurrences_by_type['unbookable_hours'] = serialize_unbookable_hours(unbookable_hours)
occurrences_by_type['nonbookable_periods'] = serialize_nonbookable_periods(nonbookable_periods_grouped)
return booking_details
def serialize_availability(availability):
for data in availability.values():
data['blockings'] = serialize_blockings(data.get('blockings', {}))
data['overridable_blockings'] = serialize_blockings(data.get('overridable_blockings', {}))
data['nonbookable_periods'] = serialize_nonbookable_periods(data.get('nonbookable_periods', {}))
data['unbookable_hours'] = serialize_unbookable_hours(data.get('unbookable_hours', {}))
data['concurrent_pre_bookings'] = serialize_concurrent_pre_bookings(data.get('concurrent_pre_bookings', {}))
data.update({k: serialize_occurrences(data[k]) if k in data else {}
for k in ('candidates', 'conflicting_candidates', 'pre_bookings', 'bookings', 'conflicts',
'pre_conflicts', 'rejections', 'cancellations')})
return availability
def generate_spreadsheet_from_occurrences(occurrences):
"""Generate spreadsheet data from a given booking occurrence list.
:param occurrences: The booking occurrences to include in the spreadsheet
"""
headers = ['Room', 'Booking ID', 'Booked for', 'Reason', 'Occurrence start', 'Occurrence end']
rows = [{'Room': occ.reservation.room.full_name,
'Booking ID': occ.reservation.id,
'Booked for': occ.reservation.booked_for_name,
'Reason': occ.reservation.booking_reason,
'Occurrence start': occ.start_dt,
'Occurrence end': occ.end_dt}
for occ in occurrences]
return headers, rows
def _find_first_entry_start_dt(event, day):
"""Find the first timetable entry on a given day."""
if not (event.start_dt_local.date() <= day <= event.end_dt_local.date()):
raise ValueError('Day out of event bounds.')
entries = event.timetable_entries.filter(TimetableEntry.parent_id.is_(None),
cast(TimetableEntry.start_dt.astimezone(event.tzinfo), Date) == day).all()
return min(entries, key=attrgetter('start_dt')).start_dt.astimezone(event.tzinfo) if entries else None
def _find_latest_entry_end_dt(event, day):
dt = find_latest_entry_end_dt(event, day)
if dt:
return dt.astimezone(event.tzinfo)
def get_booking_params_for_event(event):
"""Get a set of RB interface parameters suitable for this event.
These parameters can then be used to construct a URL that will lead to a
pre-filled search that matches the start/end times for a given day.
:param event: `Event` object
"""
is_single_day = event.start_dt_local.date() == event.end_dt_local.date()
params = {
'link_type': 'event',
'link_id': event.id,
'text': f'#{event.room.id}' if event.room else None,
}
all_times = {day: (_find_first_entry_start_dt(event, day), _find_latest_entry_end_dt(event, day))
for day in event.iter_days(tzinfo=event.tzinfo)}
# if the timetable is empty on a given day, use (start_dt, end_dt) of the event
all_times = [((day, (event.start_dt_local, event.end_dt_local)) if times[0] is None else (day, times))
for day, times in all_times.items()]
same_times = len({times for (_, times) in all_times}) == 1
if is_single_day or same_times:
params['sd'] = event.start_dt_local.date().isoformat()
if event.start_dt_local.time() < event.end_dt_local.time():
# if we have suitable times we provide enough data to immediately run a search.
# XXX: if filtersAreSet also checked for times we could provide dates/recurrence
# as well even when we don't know suitable times.. but that would require extra
# code to handle the case of a custom RB interface where no times are used at all
params.update({
'ed': None if is_single_day else event.end_dt_local.date().isoformat(),
'recurrence': 'single' if is_single_day else 'daily',
'st': event.start_dt_local.strftime('%H:%M'),
'et': event.end_dt_local.strftime('%H:%M'),
'number': 1,
'interval': 'week',
})
return {
'type': 'same_times',
'params': params
}
else:
time_info = sorted(
(day, {
# if we have a proper start/end time, we provide all args to search
'number': 1,
'interval': 'week',
'recurrence': 'single',
'sd': day.isoformat(),
'st': start.strftime('%H:%M'),
'et': end.strftime('%H:%M')
} if start.time() < end.time() else {
# if not (empty days or event end time < event start time), we just
# populate the day and let the user specify the times manually
'sd': day.isoformat(),
}) for day, (start, end) in all_times
)
return {
'type': 'mixed_times',
'params': params,
'time_info': time_info
}
def get_prebooking_collisions(reservation):
from indico.modules.rb.models.reservation_occurrences import ReservationOccurrence
valid_occurrences = reservation.occurrences.filter(ReservationOccurrence.is_valid).all()
return ReservationOccurrence.find_overlapping_with(reservation.room, valid_occurrences, reservation.id).all()
|
|
# coding: utf-8
"""
Classes to describe object that help distinguishing events.
"""
__all__ = ["Channel", "Category"]
from order.unique import UniqueObject, unique_tree
from order.mixins import CopyMixin, AuxDataMixin, TagMixin, SelectionMixin, LabelMixin
from order.util import to_root_latex
@unique_tree(plural="categories", parents=-1, deep_children=True, deep_parents=True)
class Category(UniqueObject, CopyMixin, AuxDataMixin, TagMixin, SelectionMixin, LabelMixin):
""" __init__(name, id="+", channel=None, categories=None, label=None, label_short=None, selection=None, selection_mode=None, tags=None, aux=None, context=None)
Class that describes an analysis category. This is not to be confused with an analysis
:py:class:`Channel`. While the definition of a channel can be understood as being fixed by e.g.
the final state of an event, a category describes an arbitrary sub phase-space. Therefore, a
category can be (optionally) uniquely assigned to a channel - it *has* a channel.
Also, categories can be nested, i.e., they can have child and parent categories.
**Arguments**
*channel* should be a reference to a :py:class:`Channel` instance or *None*. Child categories
are initialized with *categories*.
*label* and *label_short* are forwarded to the :py:class:`~order.mixins.LabelMixin`, *selection*
and *selection_mode* to the :py:class:`~order.mixins.SelectionMixin`, *tags* to the
:py:class:`~order.mixins.TagMixin`, *aux* to the :py:class:`~order.mixins.AuxDataMixin`, and
*name*, *id* (defaulting to an auto id) and *context* to the
:py:class:`~order.unique.UniqueObject` constructor.
**Copy behavior**
All attributes are copied **except** for references to child and parent categories. If set, the
*channel* reference is kept. Also note the copy behavior of
:py:class:`~order.unique.UniqueObject`'s.
**Example**
.. code-block:: python
import order as od
# toggle the default selection mode to Root-style selection string concatenation
od.Category.default_selection_mode = "root"
cat = od.Category("4j",
label="4 jets",
label_short="4j",
selection="nJets == 4",
)
# note that no id needs to be passed to the Category constructor
# its id is set automatically based on the maximum id of currently existing category
# instances plus one (which is - of course - one in this example)
cat.id
# -> 1
cat.label
# -> "4 jets"
# add a channel
ch = od.Channel("dilepton", 1,
label="Dilepton",
label_short="DL"
)
cat.channel = ch
# the category is also assigned to the channel now
cat in ch.categories
# -> True
# and we can create the full category label
cat.full_label
# -> "Dilepton, 4 jets"
# and the short version of it
cat.full_label_short
# -> "DL, 4j"
# add a sub category
cat2 = cat.add_category("4j_2b",
label=cat.label + ", 2 b-tags",
)
# set the selection string (could also be set in add_category above)
cat2.selection = [cat.selection, "nBTags == 2"]
cat2.selection
# -> "(nJets == 4) && (nBTags == 2)"
**Members**
.. py:attribute:: channel
type: Channel, None
The channel instance of this category, or *None* when not set.
.. py:attribute:: full_label
type: string
read-only
The label of this category, prefix with the channel label if a channel is set.
.. py:attribute:: full_label_short
type: string
read-only
The short label of this category, prefix with the short channel label if a channel is set.
.. py:attribute:: full_label_root
type: string
read-only
The label of this category, prefix with the channel label if a channel is set, converted to
ROOT-style latex.
.. py:attribute:: full_label_short_root
type: string
read-only
The short label of this category, prefix with the short channel label if a channel is set,
converted to ROOT-style latex.
"""
# attributes for copying
copy_specs = [{"attr": "channel", "ref": True}] + UniqueObject.copy_specs + \
AuxDataMixin.copy_specs + TagMixin.copy_specs + SelectionMixin.copy_specs + \
LabelMixin.copy_specs
def __init__(self, name, id=UniqueObject.AUTO_ID, channel=None, categories=None, label=None,
label_short=None, selection=None, selection_mode=None, tags=None, aux=None,
context=None):
UniqueObject.__init__(self, name, id, context=context)
CopyMixin.__init__(self)
AuxDataMixin.__init__(self, aux=aux)
TagMixin.__init__(self, tags=tags)
SelectionMixin.__init__(self, selection=selection, selection_mode=selection_mode)
LabelMixin.__init__(self, label=label, label_short=label_short)
# register empty attributes
self._channel = None
# set initial values
if channel is not None:
self.channel = channel
# set initial child categories
if categories is not None:
self.extend_categories(categories)
@property
def channel(self):
# channel getter
return self._channel
@channel.setter
def channel(self, channel):
# channel setter
if channel is not None and not isinstance(channel, Channel):
raise TypeError("invalid channel type: {}".format(channel))
# remove this category from the current channels' categories index
if self._channel:
self._channel.categories.remove(self)
# add this category to the channels' categories index
if channel:
channel.categories.add(self)
self._channel = channel
@property
def full_label(self):
if self.channel:
return "{}, {}".format(self.channel.label, self.label)
else:
return self.label
@property
def full_label_short(self):
if self.channel:
return "{}, {}".format(self.channel.label_short, self.label_short)
else:
return self.label_short
@property
def full_label_root(self):
return to_root_latex(self.full_label)
@property
def full_label_short_root(self):
return to_root_latex(self.full_label_short)
@unique_tree(parents=1, deep_children=True, deep_parents=True)
@unique_tree(cls=Category, plural="categories", parents=False, deep_children=True)
class Channel(UniqueObject, CopyMixin, AuxDataMixin, TagMixin, LabelMixin):
"""
An object that descibes an analysis channel, often defined by a particular decay *channel* that
results in distinct final state objects. A channel can have parent-child relations to other
channels with one parent per child, and child relations to categories.
**Arguments**
References to contained categories are initialized with *categories*. *label* and *label_short*
are passed to the :py:class:`~order.mixins.LabelMixin`, *tags* to the
:py:class:`~order.mixins.TagMixin`, *aux* to the :py:class:`~order.mixins.AuxDataMixin`, and
*name*, *id* and *context* to the :py:class:`~order.unique.UniqueObject` constructor.
**Copy behavior**
All attributes are copied **except** for references to child channels and the parent channel as
well as categories. Also note the copy behavior of :py:class:`~order.unique.UniqueObject`'s.
**Example**
.. code-block:: python
import order as od
# create a channel
SL_channel = od.Channel("SL", 1, label="lepton+jets")
# add child channels
e_channel = SL_channel.add_channel("e", 1, label="e+jets")
mu_channel = SL_channel.add_channel("mu", 2)
len(SL_channel.channels)
# -> 2
len(e_channel.parent_channels)
# -> 1
e_channel.parent_channel
# -> SL_channel
# add categories
cat_e_2j = e_channel.add_category("e_2j",
label="2 jets",
selection="nJets == 2",
)
# print the category label
cat_e_2j.full_label
# -> "e+jets, 2 jets"
**Members**
"""
# attributes for copying
copy_specs = UniqueObject.copy_specs + AuxDataMixin.copy_specs + TagMixin.copy_specs + \
LabelMixin.copy_specs
def __init__(self, name, id, categories=None, label=None, label_short=None, tags=None, aux=None,
context=None):
UniqueObject.__init__(self, name, id, context=context)
CopyMixin.__init__(self)
AuxDataMixin.__init__(self, aux=aux)
TagMixin.__init__(self, tags=tags)
LabelMixin.__init__(self, label=label, label_short=label_short)
# set initial categories
if categories is not None:
self.extend_categories(categories)
def add_category(self, *args, **kwargs):
"""
Adds a child category. See :py:meth:`UniqueObjectIndex.add` for more info. Also sets the
*channel* of the added category to *this* instance.
"""
category = self.categories.add(*args, **kwargs)
# update the category's channel
category.channel = None
category._channel = self
return category
def remove_category(self, *args, **kwargs):
"""
Removes a child category. See :py:meth:`UniqueObjectIndex.remove` for more info. Also resets
the *channel* of the added category.
"""
category = self.categories.remove(*args, **kwargs)
# reset the category's channel
if category:
category._channel = None
return category
|
|
# oxAuth is available under the MIT License (2008). See http://opensource.org/licenses/MIT for full text.
# Copyright (c) 2016, Gluu
#
# Author: Yuriy Movchan
#
# Requires the following custom properties and values:
# otp_type: totp/htop
# issuer: Gluu Inc
# otp_conf_file: /etc/certs/otp_configuration.json
#
# These are non mandatory custom properties and values:
# label: Gluu OTP
# qr_options: { width: 400, height: 400 }
# registration_uri: https://ce-dev.gluu.org/identity/register
import jarray
import json
import sys
from com.google.common.io import BaseEncoding
from com.lochbridge.oath.otp import HOTP
from com.lochbridge.oath.otp import HOTPValidator
from com.lochbridge.oath.otp import HmacShaAlgorithm
from com.lochbridge.oath.otp import TOTP
from com.lochbridge.oath.otp.keyprovisioning import OTPAuthURIBuilder
from com.lochbridge.oath.otp.keyprovisioning import OTPKey
from com.lochbridge.oath.otp.keyprovisioning.OTPKey import OTPType
from java.security import SecureRandom
from java.util import Arrays
from java.util.concurrent import TimeUnit
from javax.faces.application import FacesMessage
from org.gluu.jsf2.message import FacesMessages
from org.xdi.model.custom.script.type.auth import PersonAuthenticationType
from org.xdi.oxauth.security import Identity
from org.xdi.oxauth.service import UserService, AuthenticationService, SessionIdService
from org.xdi.oxauth.util import ServerUtil
from org.xdi.service.cdi.util import CdiUtil
from org.xdi.util import StringHelper
class PersonAuthentication(PersonAuthenticationType):
def __init__(self, currentTimeMillis):
self.currentTimeMillis = currentTimeMillis
def init(self, configurationAttributes):
print "OTP. Initialization"
if not configurationAttributes.containsKey("otp_type"):
print "OTP. Initialization. Property otp_type is mandatory"
return False
self.otpType = configurationAttributes.get("otp_type").getValue2()
if not self.otpType in ["hotp", "totp"]:
print "OTP. Initialization. Property value otp_type is invalid"
return False
if not configurationAttributes.containsKey("issuer"):
print "OTP. Initialization. Property issuer is mandatory"
return False
self.otpIssuer = configurationAttributes.get("issuer").getValue2()
self.customLabel = None
if configurationAttributes.containsKey("label"):
self.customLabel = configurationAttributes.get("label").getValue2()
self.customQrOptions = {}
if configurationAttributes.containsKey("qr_options"):
self.customQrOptions = configurationAttributes.get("qr_options").getValue2()
self.registrationUri = None
if configurationAttributes.containsKey("registration_uri"):
self.registrationUri = configurationAttributes.get("registration_uri").getValue2()
validOtpConfiguration = self.loadOtpConfiguration(configurationAttributes)
if not validOtpConfiguration:
return False
print "OTP. Initialized successfully"
return True
def destroy(self, configurationAttributes):
print "OTP. Destroy"
print "OTP. Destroyed successfully"
return True
def getApiVersion(self):
return 1
def isValidAuthenticationMethod(self, usageType, configurationAttributes):
return True
def getAlternativeAuthenticationMethod(self, usageType, configurationAttributes):
return None
def authenticate(self, configurationAttributes, requestParameters, step):
authenticationService = CdiUtil.bean(AuthenticationService)
identity = CdiUtil.bean(Identity)
credentials = identity.getCredentials()
self.setRequestScopedParameters(identity)
if step == 1:
print "OTP. Authenticate for step 1"
authenticated_user = self.processBasicAuthentication(credentials)
if authenticated_user == None:
return False
otp_auth_method = "authenticate"
# Uncomment this block if you need to allow user second OTP registration
#enrollment_mode = ServerUtil.getFirstValue(requestParameters, "loginForm:registerButton")
#if StringHelper.isNotEmpty(enrollment_mode):
# otp_auth_method = "enroll"
if otp_auth_method == "authenticate":
user_enrollments = self.findEnrollments(authenticated_user.getUserId())
if len(user_enrollments) == 0:
otp_auth_method = "enroll"
print "OTP. Authenticate for step 1. There is no OTP enrollment for user '%s'. Changing otp_auth_method to '%s'" % (authenticated_user.getUserId(), otp_auth_method)
if otp_auth_method == "enroll":
print "OTP. Authenticate for step 1. Setting count steps: '%s'" % 3
identity.setWorkingParameter("otp_count_login_steps", 3)
print "OTP. Authenticate for step 1. otp_auth_method: '%s'" % otp_auth_method
identity.setWorkingParameter("otp_auth_method", otp_auth_method)
return True
elif step == 2:
print "OTP. Authenticate for step 2"
authenticationService = CdiUtil.bean(AuthenticationService)
user = authenticationService.getAuthenticatedUser()
if user == None:
print "OTP. Authenticate for step 2. Failed to determine user name"
return False
session_id_validation = self.validateSessionId(identity)
if not session_id_validation:
return False
# Restore state from session
otp_auth_method = identity.getWorkingParameter("otp_auth_method")
if otp_auth_method == 'enroll':
auth_result = ServerUtil.getFirstValue(requestParameters, "auth_result")
if not StringHelper.isEmpty(auth_result):
print "OTP. Authenticate for step 2. User not enrolled OTP"
return False
print "OTP. Authenticate for step 2. Skipping this step during enrollment"
return True
otp_auth_result = self.processOtpAuthentication(requestParameters, user.getUserId(), identity, otp_auth_method)
print "OTP. Authenticate for step 2. OTP authentication result: '%s'" % otp_auth_result
return otp_auth_result
elif step == 3:
print "OTP. Authenticate for step 3"
authenticationService = CdiUtil.bean(AuthenticationService)
user = authenticationService.getAuthenticatedUser()
if user == None:
print "OTP. Authenticate for step 2. Failed to determine user name"
return False
session_id_validation = self.validateSessionId(identity)
if not session_id_validation:
return False
# Restore state from session
otp_auth_method = identity.getWorkingParameter("otp_auth_method")
if otp_auth_method != 'enroll':
return False
otp_auth_result = self.processOtpAuthentication(requestParameters, user.getUserId(), identity, otp_auth_method)
print "OTP. Authenticate for step 3. OTP authentication result: '%s'" % otp_auth_result
return otp_auth_result
else:
return False
def prepareForStep(self, configurationAttributes, requestParameters, step):
identity = CdiUtil.bean(Identity)
credentials = identity.getCredentials()
self.setRequestScopedParameters(identity)
if step == 1:
print "OTP. Prepare for step 1"
return True
elif step == 2:
print "OTP. Prepare for step 2"
session_id_validation = self.validateSessionId(identity)
if not session_id_validation:
return False
otp_auth_method = identity.getWorkingParameter("otp_auth_method")
print "OTP. Prepare for step 2. otp_auth_method: '%s'" % otp_auth_method
if otp_auth_method == 'enroll':
authenticationService = CdiUtil.bean(AuthenticationService)
user = authenticationService.getAuthenticatedUser()
if user == None:
print "OTP. Prepare for step 2. Failed to load user enty"
return False
if self.otpType == "hotp":
otp_secret_key = self.generateSecretHotpKey()
otp_enrollment_request = self.generateHotpSecretKeyUri(otp_secret_key, self.otpIssuer, user.getAttribute("displayName"))
elif self.otpType == "totp":
otp_secret_key = self.generateSecretTotpKey()
otp_enrollment_request = self.generateTotpSecretKeyUri(otp_secret_key, self.otpIssuer, user.getAttribute("displayName"))
else:
print "OTP. Prepare for step 2. Unknown OTP type: '%s'" % self.otpType
return False
print "OTP. Prepare for step 2. Prepared enrollment request for user: '%s'" % user.getUserId()
identity.setWorkingParameter("otp_secret_key", self.toBase64Url(otp_secret_key))
identity.setWorkingParameter("otp_enrollment_request", otp_enrollment_request)
return True
elif step == 3:
print "OTP. Prepare for step 3"
session_id_validation = self.validateSessionId(identity)
if not session_id_validation:
return False
otp_auth_method = identity.getWorkingParameter("otp_auth_method")
print "OTP. Prepare for step 3. otp_auth_method: '%s'" % otp_auth_method
if otp_auth_method == 'enroll':
return True
return False
def getExtraParametersForStep(self, configurationAttributes, step):
return Arrays.asList("otp_auth_method", "otp_count_login_steps", "otp_secret_key", "otp_enrollment_request")
def getCountAuthenticationSteps(self, configurationAttributes):
identity = CdiUtil.bean(Identity)
if identity.isSetWorkingParameter("otp_count_login_steps"):
return StringHelper.toInteger("%s" % identity.getWorkingParameter("otp_count_login_steps"))
else:
return 2
def getPageForStep(self, configurationAttributes, step):
if step == 2:
identity = CdiUtil.bean(Identity)
otp_auth_method = identity.getWorkingParameter("otp_auth_method")
print "OTP. Gep page for step 2. otp_auth_method: '%s'" % otp_auth_method
if otp_auth_method == 'enroll':
return "/auth/otp/enroll.xhtml"
else:
return "/auth/otp/otplogin.xhtml"
elif step == 3:
return "/auth/otp/otplogin.xhtml"
return ""
def logout(self, configurationAttributes, requestParameters):
return True
def setRequestScopedParameters(self, identity):
if self.registrationUri != None:
identity.setWorkingParameter("external_registration_uri", self.registrationUri)
if self.customLabel != None:
identity.setWorkingParameter("qr_label", self.customLabel)
identity.setWorkingParameter("qr_options", self.customQrOptions)
def loadOtpConfiguration(self, configurationAttributes):
print "OTP. Load OTP configuration"
if not configurationAttributes.containsKey("otp_conf_file"):
return False
otp_conf_file = configurationAttributes.get("otp_conf_file").getValue2()
# Load configuration from file
f = open(otp_conf_file, 'r')
try:
otpConfiguration = json.loads(f.read())
except:
print "OTP. Load OTP configuration. Failed to load configuration from file:", otp_conf_file
return False
finally:
f.close()
# Check configuration file settings
try:
self.hotpConfiguration = otpConfiguration["htop"]
self.totpConfiguration = otpConfiguration["totp"]
hmacShaAlgorithm = self.totpConfiguration["hmacShaAlgorithm"]
hmacShaAlgorithmType = None
if StringHelper.equalsIgnoreCase(hmacShaAlgorithm, "sha1"):
hmacShaAlgorithmType = HmacShaAlgorithm.HMAC_SHA_1
elif StringHelper.equalsIgnoreCase(hmacShaAlgorithm, "sha256"):
hmacShaAlgorithmType = HmacShaAlgorithm.HMAC_SHA_256
elif StringHelper.equalsIgnoreCase(hmacShaAlgorithm, "sha512"):
hmacShaAlgorithmType = HmacShaAlgorithm.HMAC_SHA_512
else:
print "OTP. Load OTP configuration. Invalid TOTP HMAC SHA algorithm: '%s'" % hmacShaAlgorithm
self.totpConfiguration["hmacShaAlgorithmType"] = hmacShaAlgorithmType
except:
print "OTP. Load OTP configuration. Invalid configuration file '%s' format. Exception: '%s'" % (otp_conf_file, sys.exc_info()[1])
return False
return True
def processBasicAuthentication(self, credentials):
userService = CdiUtil.bean(UserService)
authenticationService = CdiUtil.bean(AuthenticationService)
user_name = credentials.getUsername()
user_password = credentials.getPassword()
logged_in = False
if StringHelper.isNotEmptyString(user_name) and StringHelper.isNotEmptyString(user_password):
logged_in = authenticationService.authenticate(user_name, user_password)
if not logged_in:
return None
find_user_by_uid = userService.getUser(user_name)
if find_user_by_uid == None:
print "OTP. Process basic authentication. Failed to find user '%s'" % user_name
return None
return find_user_by_uid
def findEnrollments(self, user_name, skipPrefix = True):
result = []
userService = CdiUtil.bean(UserService)
user = userService.getUser(user_name, "oxExternalUid")
if user == None:
print "OTP. Find enrollments. Failed to find user"
return result
user_custom_ext_attribute = userService.getCustomAttribute(user, "oxExternalUid")
if user_custom_ext_attribute == None:
return result
otp_prefix = "%s:" % self.otpType
otp_prefix_length = len(otp_prefix)
for user_external_uid in user_custom_ext_attribute.getValues():
index = user_external_uid.find(otp_prefix)
if index != -1:
if skipPrefix:
enrollment_uid = user_external_uid[otp_prefix_length:]
else:
enrollment_uid = user_external_uid
result.append(enrollment_uid)
return result
def validateSessionId(self, identity):
session_id = CdiUtil.bean(SessionIdService).getSessionIdFromCookie()
if StringHelper.isEmpty(session_id):
print "OTP. Validate session id. Failed to determine session_id"
return False
otp_auth_method = identity.getWorkingParameter("otp_auth_method")
if not otp_auth_method in ['enroll', 'authenticate']:
print "OTP. Validate session id. Failed to authenticate user. otp_auth_method: '%s'" % otp_auth_method
return False
return True
def processOtpAuthentication(self, requestParameters, user_name, identity, otp_auth_method):
facesMessages = CdiUtil.bean(FacesMessages)
facesMessages.setKeepMessages()
userService = CdiUtil.bean(UserService)
otpCode = ServerUtil.getFirstValue(requestParameters, "loginForm:otpCode")
if StringHelper.isEmpty(otpCode):
facesMessages.add(FacesMessage.SEVERITY_ERROR, "Failed to authenticate. OTP code is empty")
print "OTP. Process OTP authentication. otpCode is empty"
return False
if otp_auth_method == "enroll":
# Get key from session
otp_secret_key_encoded = identity.getWorkingParameter("otp_secret_key")
if otp_secret_key_encoded == None:
print "OTP. Process OTP authentication. OTP secret key is invalid"
return False
otp_secret_key = self.fromBase64Url(otp_secret_key_encoded)
if self.otpType == "hotp":
validation_result = self.validateHotpKey(otp_secret_key, 1, otpCode)
if (validation_result != None) and validation_result["result"]:
print "OTP. Process HOTP authentication during enrollment. otpCode is valid"
# Store HOTP Secret Key and moving factor in user entry
otp_user_external_uid = "hotp:%s;%s" % ( otp_secret_key_encoded, validation_result["movingFactor"] )
# Add otp_user_external_uid to user's external GUID list
find_user_by_external_uid = userService.addUserAttribute(user_name, "oxExternalUid", otp_user_external_uid)
if find_user_by_external_uid != None:
return True
print "OTP. Process HOTP authentication during enrollment. Failed to update user entry"
elif self.otpType == "totp":
validation_result = self.validateTotpKey(otp_secret_key, otpCode)
if (validation_result != None) and validation_result["result"]:
print "OTP. Process TOTP authentication during enrollment. otpCode is valid"
# Store TOTP Secret Key and moving factor in user entry
otp_user_external_uid = "totp:%s" % otp_secret_key_encoded
# Add otp_user_external_uid to user's external GUID list
find_user_by_external_uid = userService.addUserAttribute(user_name, "oxExternalUid", otp_user_external_uid)
if find_user_by_external_uid != None:
return True
print "OTP. Process TOTP authentication during enrollment. Failed to update user entry"
elif otp_auth_method == "authenticate":
user_enrollments = self.findEnrollments(user_name)
if len(user_enrollments) == 0:
print "OTP. Process OTP authentication. There is no OTP enrollment for user '%s'" % user_name
facesMessages.add(FacesMessage.SEVERITY_ERROR, "There is no valid OTP user enrollments")
return False
if self.otpType == "hotp":
for user_enrollment in user_enrollments:
user_enrollment_data = user_enrollment.split(";")
otp_secret_key_encoded = user_enrollment_data[0]
# Get current moving factor from user entry
moving_factor = StringHelper.toInteger(user_enrollment_data[1])
otp_secret_key = self.fromBase64Url(otp_secret_key_encoded)
# Validate TOTP
validation_result = self.validateHotpKey(otp_secret_key, moving_factor, otpCode)
if (validation_result != None) and validation_result["result"]:
print "OTP. Process HOTP authentication during authentication. otpCode is valid"
otp_user_external_uid = "hotp:%s;%s" % ( otp_secret_key_encoded, moving_factor )
new_otp_user_external_uid = "hotp:%s;%s" % ( otp_secret_key_encoded, validation_result["movingFactor"] )
# Update moving factor in user entry
find_user_by_external_uid = userService.replaceUserAttribute(user_name, "oxExternalUid", otp_user_external_uid, new_otp_user_external_uid)
if find_user_by_external_uid != None:
return True
print "OTP. Process HOTP authentication during authentication. Failed to update user entry"
elif self.otpType == "totp":
for user_enrollment in user_enrollments:
otp_secret_key = self.fromBase64Url(user_enrollment)
# Validate TOTP
validation_result = self.validateTotpKey(otp_secret_key, otpCode)
if (validation_result != None) and validation_result["result"]:
print "OTP. Process TOTP authentication during authentication. otpCode is valid"
return True
facesMessages.add(FacesMessage.SEVERITY_ERROR, "Failed to authenticate. OTP code is invalid")
print "OTP. Process OTP authentication. OTP code is invalid"
return False
# Shared HOTP/TOTP methods
def generateSecretKey(self, keyLength):
bytes = jarray.zeros(keyLength, "b")
secureRandom = SecureRandom()
secureRandom.nextBytes(bytes)
return bytes
# HOTP methods
def generateSecretHotpKey(self):
keyLength = self.hotpConfiguration["keyLength"]
return self.generateSecretKey(keyLength)
def generateHotpKey(self, secretKey, movingFactor):
digits = self.hotpConfiguration["digits"]
hotp = HOTP.key(secretKey).digits(digits).movingFactor(movingFactor).build()
return hotp.value()
def validateHotpKey(self, secretKey, movingFactor, totpKey):
lookAheadWindow = self.hotpConfiguration["lookAheadWindow"]
digits = self.hotpConfiguration["digits"]
htopValidationResult = HOTPValidator.lookAheadWindow(lookAheadWindow).validate(secretKey, movingFactor, digits, totpKey)
if htopValidationResult.isValid():
return { "result": True, "movingFactor": htopValidationResult.getNewMovingFactor() }
return { "result": False, "movingFactor": None }
def generateHotpSecretKeyUri(self, secretKey, issuer, userDisplayName):
digits = self.hotpConfiguration["digits"]
secretKeyBase32 = self.toBase32(secretKey)
otpKey = OTPKey(secretKeyBase32, OTPType.HOTP)
label = issuer + " %s" % userDisplayName
otpAuthURI = OTPAuthURIBuilder.fromKey(otpKey).label(label).issuer(issuer).digits(digits).build()
return otpAuthURI.toUriString()
# TOTP methods
def generateSecretTotpKey(self):
keyLength = self.totpConfiguration["keyLength"]
return self.generateSecretKey(keyLength)
def generateTotpKey(self, secretKey):
digits = self.totpConfiguration["digits"]
timeStep = self.totpConfiguration["timeStep"]
hmacShaAlgorithmType = self.totpConfiguration["hmacShaAlgorithmType"]
totp = TOTP.key(secretKey).digits(digits).timeStep(TimeUnit.SECONDS.toMillis(timeStep)).hmacSha(hmacShaAlgorithmType).build()
return totp.value()
def validateTotpKey(self, secretKey, totpKey):
localTotpKey = self.generateTotpKey(secretKey)
if StringHelper.equals(localTotpKey, totpKey):
return { "result": True }
return { "result": False }
def generateTotpSecretKeyUri(self, secretKey, issuer, userDisplayName):
digits = self.totpConfiguration["digits"]
timeStep = self.totpConfiguration["timeStep"]
secretKeyBase32 = self.toBase32(secretKey)
otpKey = OTPKey(secretKeyBase32, OTPType.TOTP)
label = issuer + " %s" % userDisplayName
otpAuthURI = OTPAuthURIBuilder.fromKey(otpKey).label(label).issuer(issuer).digits(digits).timeStep(TimeUnit.SECONDS.toMillis(timeStep)).build()
return otpAuthURI.toUriString()
# Utility methods
def toBase32(self, bytes):
return BaseEncoding.base32().omitPadding().encode(bytes)
def toBase64Url(self, bytes):
return BaseEncoding.base64Url().encode(bytes)
def fromBase64Url(self, chars):
return BaseEncoding.base64Url().decode(chars)
|
|
import time
import threading
import logging
import os
import json
import select
import re
from queue import Queue
from importlib import import_module
import serial
LOGGER = logging.getLogger(__name__)
class Gateway(object):
"""Base implementation for a RFlink Gateway."""
# pylint: disable=too-many-instance-attributes
def __init__(self, event_callback=None):
"""Setup Gateway."""
self.queue = Queue()
self.lock = threading.Lock()
self.event_callback = event_callback
self.sensors = {}
self.metric = True # if true - use metric, if false - use imperial
self.debug = True # if true - print all received messages
_const = import_module('rflink.const_43')
self.const = _const
def setup_logging(self):
"""Set the logging level to debug."""
if self.debug:
logging.basicConfig(level=logging.DEBUG)
def logic(self, data):
"""Parse the data and respond to it appropriately.
Response is returned to the caller and has to be sent
data as a RFlink command string.
"""
try:
msg = Packet(data)
except ValueError:
return None
if msg.packet_type == self.const.MessageType.received:
return msg.decoded
return None
def alert(self, nid):
"""Tell anyone who wants to know that a sensor was updated.
"""
if self.event_callback is not None:
try:
self.event_callback('sensor_update', nid)
except Exception as exception: # pylint: disable=W0703
LOGGER.exception(exception)
def handle_queue(self, queue=None):
"""Handle queue.
If queue is not empty, get the function and any args and kwargs
from the queue. Run the function and return output.
"""
if queue is None:
queue = self.queue
if not queue.empty():
func, args, kwargs = queue.get()
reply = func(*args, **kwargs)
queue.task_done()
return reply
return None
def fill_queue(self, func, args=None, kwargs=None, queue=None):
"""Put a function in a queue.
Put the function 'func', a tuple of arguments 'args' and a dict
of keyword arguments 'kwargs', as a tuple in the queue.
"""
if args is None:
args = ()
if kwargs is None:
kwargs = {}
if queue is None:
queue = self.queue
queue.put((func, args, kwargs))
class SerialGateway(Gateway, threading.Thread):
"""Serial gateway for RFlink."""
# pylint: disable=too-many-arguments
def __init__(self, port, event_callback=None,
baud=57600, timeout=3.0,
reconnect_timeout=10.0):
"""Setup serial gateway."""
threading.Thread.__init__(self)
Gateway.__init__(self, event_callback)
self.serial = None
self.port = port
self.baud = baud
self.timeout = timeout
self.reconnect_timeout = reconnect_timeout
self._stop_event = threading.Event()
def connect(self):
"""Connect to the serial port."""
if self.serial:
LOGGER.info('Already connected to %s', self.port)
return True
try:
LOGGER.info('Trying to connect to %s', self.port)
self.serial = serial.Serial(self.port, self.baud,
timeout=self.timeout)
if self.serial.isOpen():
LOGGER.info('%s is open...', self.serial.name)
LOGGER.info('Connected to %s', self.port)
else:
LOGGER.info('%s is not open...', self.serial.name)
self.serial = None
return False
except serial.SerialException:
LOGGER.error('Unable to connect to %s', self.port)
return False
return True
def disconnect(self):
"""Disconnect from the serial port."""
if self.serial is not None:
LOGGER.info('Disconnecting from %s', self.serial.name)
self.serial.close()
self.serial = None
def stop(self):
"""Stop the background thread."""
self.disconnect()
LOGGER.info('Stopping thread')
self._stop_event.set()
def run(self):
"""Background thread that reads messages from the gateway."""
self.setup_logging()
while not self._stop_event.is_set():
if self.serial is None and not self.connect():
time.sleep(self.reconnect_timeout)
continue
response = self.handle_queue()
if response is not None:
self.send(response.encode())
try:
line = ""
line = repr(self.readlineCR())
if (line):
print(line)
continue
except serial.SerialException:
LOGGER.exception('Serial exception')
continue
except TypeError:
# pyserial has a bug that causes a TypeError to be thrown when
# the port disconnects instead of a SerialException
self.disconnect()
continue
try:
string = line
except ValueError:
LOGGER.warning(
'Error decoding message from gateway, '
'probably received bad byte. ')
print(line)
print(string)
continue
self.fill_queue(self.logic, (string,))
def send(self, packet):
"""Write a Message to the gateway."""
if not packet or not isinstance(packet, str):
LOGGER.warning('Missing string! No message sent!')
return
# Lock to make sure only one thread writes at a time to serial port.
with self.lock:
self.serial.write(packet.encode())
def readlineCR(self):
line = ""
while True:
line = self.serial.readline().decode()
if (line != '' and line != None):
line = line.strip('\r').strip('\n')
return line
class Packet:
def __init__(self, data=None):
"""Setup message."""
self.packet_type = 0
self.device_name = ''
self.message_id = 0
self.device_id = ''
self.payload = '' # All data except payload are integers
self.decoded = {}
if data is not None:
self.payload = data
self.decode()
def decode(self):
packet = re.split(';', self.payload)
print("Packet contains: " + str(len(packet)) + " items")
if len(packet) > 3:
self.packet_type = packet[0]
del packet[0]
self.message_id = packet[0]
del packet[0]
self.device_name = packet[0]
del packet[0]
del packet[-1]
if device_name == 'DEBUG':
logging.debug(self)
for k in packet:
data = re.split('=', k)
if data[0] == 'ID':
self.device_id = data[1]
del data[:-1]
if len(data) >= 2:
print(data[0])
if data[0] in ('TEMP', 'WINCHL', 'WINTMP', 'RAIN',
'RAINRATE', 'WINSP', 'AWINSP', 'WINGS'):
data[1] = str(int(data[1], 16)/10)
print(data[1])
self.decoded[(data[0])] = data[1]
def encode(self):
"""Encode a command string from message."""
try:
return ';'.join([str(f) for f in [
self.device_name,
self.device_id,
self.payload,
]]) + '\n\r'
except ValueError:
LOGGER.exception('Error encoding message to gateway')
return None
def copy(self, **kwargs):
"""Copy a message, optionally replace attributes with kwargs."""
msg = Message(self.encode())
for key, val in kwargs.items():
setattr(msg, key, val)
return msg
|
|
import logging
from datetime import datetime
import gevent
import gevent.util
from apispec import APISpec
from apispec.ext.marshmallow import MarshmallowPlugin
from gevent.queue import Queue, Empty
from pyramid.httpexceptions import HTTPUnauthorized, HTTPFound
from pyramid.security import remember, forget, NO_PERMISSION_REQUIRED
from pyramid.view import view_config, view_defaults
from pyramid_apispec.helpers import add_pyramid_paths
from channelstream import operations, utils, patched_json as json, __version__
from channelstream.server_state import get_state, STATS
from channelstream.validation import schemas
log = logging.getLogger(__name__)
class SharedUtils(object):
def __init__(self, request):
self.request = request
def get_channel_info(
self,
req_channels=None,
include_history=True,
include_connections=False,
include_users=False,
exclude_channels=None,
return_public_state=False,
):
"""
Gets channel information for req_channels or all channels
if req_channels is None
:param: include_history (bool) will include message history
for the channel
:param: include_connections (bool) will include connection list
for users
:param: include_users (bool) will include user list for the channel
:param: exclude_channels (bool) will exclude specific channels
from info list (handy to exclude global broadcast)
"""
server_state = get_state()
if not exclude_channels:
exclude_channels = []
start_time = datetime.utcnow()
json_data = {"channels": {}, "users": []}
users_to_list = set()
# select everything for empty list
if req_channels is None:
channel_instances = server_state.channels.values()
else:
channel_instances = [
server_state.channels[c]
for c in req_channels
if c in server_state.channels
]
for channel_inst in channel_instances:
if channel_inst.name in exclude_channels:
continue
channel_info = channel_inst.get_info(
include_history=include_history, include_users=include_users
)
json_data["channels"][channel_inst.name] = channel_info
users_to_list.update(channel_info["users"])
for username in users_to_list:
user = server_state.users[username]
json_data["users"].append(
{
"user": username,
"state": user.state
if not return_public_state
else user.public_state,
}
)
log.info("info time: %s" % (datetime.utcnow() - start_time))
return json_data
def get_common_info(self, channels, info_config):
"""
Return channel information based on requirements
:param channels:
:param info_config:
:return:
"""
include_history = info_config.get("include_history", True)
include_users = info_config.get("include_users", True)
exclude_channels = info_config.get("exclude_channels", [])
include_connections = info_config.get("include_connections", False)
return_public_state = info_config.get("return_public_state", False)
channels_info = self.get_channel_info(
channels,
include_history=include_history,
include_connections=include_connections,
include_users=include_users,
exclude_channels=exclude_channels,
return_public_state=return_public_state,
)
return channels_info
@view_config(route_name="connect", request_method="POST", renderer="json")
def connect(request):
"""
Connect view
---
post:
security:
- APIKeyHeader: []
tags:
- "API"
summary: "connects users to the server"
description: ""
operationId: "connect"
consumes:
- "application/json"
produces:
- "application/json"
parameters:
- in: "body"
name: "body"
description: "Request JSON body"
required: true
schema:
$ref: "#/definitions/ConnectBody"
responses:
422:
description: "Unprocessable Entity"
200:
description: "Success"
schema:
$ref: '#/definitions/ConnectBody'
"""
shared_utils = SharedUtils(request)
schema = schemas.ConnectBodySchema(context={"request": request})
json_body = schema.load(request.json_body)
channels = sorted(json_body["channels"])
connection, user = operations.connect(
username=json_body["username"],
fresh_user_state=json_body["fresh_user_state"],
state_public_keys=json_body["state_public_keys"],
update_user_state=json_body["user_state"],
conn_id=json_body["conn_id"],
channels=channels,
channel_configs=json_body["channel_configs"],
)
# get info config for channel information
channels_info = shared_utils.get_common_info(channels, json_body["info"])
return {
"conn_id": connection.id,
"state": user.state,
"username": user.username,
"public_state": user.public_state,
"channels": channels,
"channels_info": channels_info,
}
@view_config(route_name="subscribe", request_method="POST", renderer="json")
def subscribe(request):
"""
Subscribe view
---
post:
security:
- APIKeyHeader: []
tags:
- "API"
summary: "Subscribes connection to new channels"
description: ""
operationId: "subscribe"
consumes:
- "application/json"
produces:
- "application/json"
parameters:
- in: "body"
name: "body"
description: "Request JSON body"
required: true
schema:
$ref: "#/definitions/SubscribeBody"
responses:
422:
description: "Unprocessable Entity"
200:
description: "Success"
"""
server_state = get_state()
shared_utils = SharedUtils(request)
schema = schemas.SubscribeBodySchema(context={"request": request})
json_body = schema.load(request.json_body)
connection = server_state.connections.get(json_body["conn_id"])
channels = json_body["channels"]
channel_configs = json_body.get("channel_configs", {})
subscribed_to = operations.subscribe(
connection=connection, channels=channels, channel_configs=channel_configs
)
# get info config for channel information
current_channels = connection.channels
channels_info = shared_utils.get_common_info(current_channels, json_body["info"])
return {
"channels": current_channels,
"channels_info": channels_info,
"subscribed_to": sorted(subscribed_to),
}
@view_config(route_name="unsubscribe", request_method="POST", renderer="json")
def unsubscribe(request):
"""
Unsubscribe view
---
post:
security:
- APIKeyHeader: []
tags:
- "API"
summary: "Removes connection from channels"
description: ""
operationId: "unsubscribe"
consumes:
- "application/json"
produces:
- "application/json"
parameters:
- in: "body"
name: "body"
description: "Request JSON body"
required: true
schema:
$ref: "#/definitions/UnsubscribeBody"
responses:
422:
description: "Unprocessable Entity"
200:
description: "Success"
"""
server_state = get_state()
shared_utils = SharedUtils(request)
schema = schemas.UnsubscribeBodySchema(context={"request": request})
json_body = schema.load(request.json_body)
connection = server_state.connections.get(json_body["conn_id"])
unsubscribed_from = operations.unsubscribe(
connection=connection, unsubscribe_channels=json_body["channels"]
)
# get info config for channel information
current_channels = connection.channels
channels_info = shared_utils.get_common_info(current_channels, json_body["info"])
return {
"channels": current_channels,
"channels_info": channels_info,
"unsubscribed_from": sorted(unsubscribed_from),
}
@view_config(
route_name="api_listen",
request_method="GET",
renderer="json",
permission=NO_PERMISSION_REQUIRED,
)
def listen(request):
"""
Handles long polling connections
---
get:
tags:
- "Client API"
summary: "Handles long polling connections"
description: ""
operationId: "listen"
produces:
- "application/json"
responses:
200:
description: "Success"
"""
server_state = get_state()
config = request.registry.settings
conn_id = utils.uuid_from_string(request.params.get("conn_id"))
connection = server_state.connections.get(conn_id)
if not connection:
raise HTTPUnauthorized()
# attach a queue to connection
connection.queue = Queue()
connection.deliver_catchup_messages()
request.response.app_iter = yield_response(request, connection, config)
return request.response
def yield_response(request, connection, config):
messages = await_data(connection, config)
connection.mark_activity()
cb = request.params.get("callback")
if cb:
resp = cb + "(" + json.dumps(messages) + ")"
else:
resp = json.dumps(messages)
yield resp.encode("utf8")
def await_data(connection, config):
messages = []
# block for first message - wake up after a while
try:
messages.extend(connection.queue.get(timeout=config["wake_connections_after"]))
except Empty:
pass
# get more messages if enqueued takes up total 0.25
while True:
try:
messages.extend(connection.queue.get(timeout=0.25))
except Empty:
break
return messages
@view_config(route_name="user_state", request_method="POST", renderer="json")
def user_state(request):
"""
Sets the state of a user object
---
post:
security:
- APIKeyHeader: []
tags:
- "API"
summary: "set the status of specific user"
description: ""
operationId: "user_state"
consumes:
- "application/json"
produces:
- "application/json"
parameters:
- in: "body"
name: "body"
description: "Request JSON body"
required: true
schema:
$ref: "#/definitions/UserStateBody"
responses:
422:
description: "Unprocessable Entity"
200:
description: "Success"
"""
server_state = get_state()
schema = schemas.UserStateBodySchema(context={"request": request})
data = schema.load(request.json_body)
user_inst = server_state.users[data["user"]]
# can be empty list!
if data["state_public_keys"] is not None:
user_inst.state_public_keys = data["state_public_keys"]
changed = operations.change_user_state(
user_inst=user_inst, user_state=data["user_state"]
)
return {
"user_state": user_inst.state,
"changed_state": changed,
"public_keys": user_inst.state_public_keys,
}
def shared_messages(request):
server_state = get_state()
schema = schemas.MessageBodySchema(context={"request": request}, many=True)
data = schema.load(request.json_body)
data = [m for m in data if m.get("channel") or m.get("pm_users")]
for msg in data:
gevent.spawn(operations.pass_message, msg, server_state.stats)
return list(data)
# prepare v1 version
# @view_config(route_name="api_v1_messages", request_method="POST", renderer="json")
def messages_post(request):
"""
Send message to channels and/or users
---
post:
security:
- APIKeyHeader: []
tags:
- "V1 API (future stable)"
summary: "Send message to channels and/or users"
description: ""
operationId: "message"
consumes:
- "application/json"
produces:
- "application/json"
parameters:
- in: "body"
name: "body"
description: "Request JSON body"
required: true
schema:
$ref: "#/definitions/MessagesBody"
responses:
422:
description: "Unprocessable Entity"
200:
description: "Success"
"""
return shared_messages(request)
@view_config(route_name="message", request_method="POST", renderer="json")
def message(request):
"""
Send message to channels and/or users
---
post:
security:
- APIKeyHeader: []
tags:
- "API"
summary: "Send message to channels and/or users"
description: ""
operationId: "message"
consumes:
- "application/json"
produces:
- "application/json"
parameters:
- in: "body"
name: "body"
description: "Request JSON body"
required: true
schema:
$ref: "#/definitions/MessagesBody"
responses:
422:
description: "Unprocessable Entity"
200:
description: "Success"
"""
return shared_messages(request)
@view_config(route_name="message", request_method="PATCH", renderer="json")
def messages_patch(request):
"""
Edit existing message in history and emit changes
---
patch:
security:
- APIKeyHeader: []
tags:
- "API"
summary: "Edit existing message in history and emit changes"
description: ""
operationId: "edit_messages"
consumes:
- "application/json"
produces:
- "application/json"
parameters:
- in: "body"
name: "body"
description: "Request JSON body"
required: true
schema:
$ref: "#/definitions/MessageEditBody"
responses:
422:
description: "Unprocessable Entity"
200:
description: "Success"
"""
schema = schemas.MessageEditBodySchema(context={"request": request}, many=True)
data = schema.load(request.json_body)
for msg in data:
gevent.spawn(operations.edit_message, msg)
return data
@view_config(route_name="message", request_method="DELETE", renderer="json")
def messages_delete(request):
"""
Delete message from history and emit changes
---
delete:
security:
- APIKeyHeader: []
tags:
- "API"
summary: "Delete message from history and emit changes"
description: ""
operationId: "messages_delete"
consumes:
- "application/json"
produces:
- "application/json"
parameters:
- in: "body"
name: "body"
description: "Request JSON body"
required: true
schema:
$ref: "#/definitions/MessagesDeleteBody"
responses:
422:
description: "Unprocessable Entity"
200:
description: "Success"
"""
schema = schemas.MessagesDeleteBodySchema(context={"request": request}, many=True)
data = schema.load(request.json_body)
for msg in data:
gevent.spawn(operations.delete_message, msg)
return data
@view_config(
route_name="api_disconnect", renderer="json", permission=NO_PERMISSION_REQUIRED
)
def disconnect(request):
"""
Permanently remove connection from server
---
get:
tags:
- "Client API"
summary: "Permanently remove connection from server"
description: ""
operationId: "disconnect"
consumes:
- "application/json"
produces:
- "application/json"
parameters:
- in: query
schema:
type: string
name: "conn_id"
description: "Connection Id"
responses:
422:
description: "Unprocessable Entity"
200:
description: "Success"
post:
tags:
- "Client API"
summary: "Permanently remove connection from server"
description: ""
operationId: "disconnect"
consumes:
- "application/json"
produces:
- "application/json"
parameters:
- in: "body"
name: "body"
description: "Request JSON body"
schema:
$ref: "#/definitions/DisconnectBody"
responses:
422:
description: "Unprocessable Entity"
200:
description: "Success"
"""
schema = schemas.DisconnectBodySchema(context={"request": request})
if request.method != "POST":
payload = {"conn_id": request.GET.get("conn_id")}
else:
json_body = request.json_body
payload = {"conn_id": json_body.get("conn_id")}
data = schema.load(payload)
return operations.disconnect(conn_id=data["conn_id"])
@view_config(route_name="channel_config", request_method="POST", renderer="json")
def channel_config(request):
"""
Set channel configuration
---
post:
security:
- APIKeyHeader: []
tags:
- "API"
summary: "Set channel configuration"
description: ""
operationId: "channel_config"
consumes:
- "application/json"
produces:
- "application/json"
parameters:
- in: "body"
name: "body"
description: "Request JSON body"
schema:
$ref: "#/definitions/ChannelConfigBody"
responses:
422:
description: "Unprocessable Entity"
200:
description: "Success"
"""
shared_utils = SharedUtils(request)
deserialized = {}
schema = schemas.ChannelConfigSchema(context={"request": request})
json_body = request.json_body
for k in json_body.keys():
deserialized[k] = schema.load(json_body[k])
operations.set_channel_config(channel_configs=deserialized)
channels_info = shared_utils.get_channel_info(
deserialized.keys(), include_history=False, include_users=False
)
return channels_info
@view_config(route_name="info", renderer="json")
def info(request):
"""
Returns channel information
---
post:
security:
- APIKeyHeader: []
tags:
- "API"
summary: "Returns channel information"
description: ""
operationId: "info"
consumes:
- "application/json"
produces:
- "application/json"
parameters:
- in: "body"
name: "body"
description: "Request JSON body"
schema:
$ref: "#/definitions/ChannelInfoBody"
responses:
422:
description: "Unprocessable Entity"
200:
description: "Success"
"""
server_state = get_state()
shared_utils = SharedUtils(request)
if not request.body:
req_channels = server_state.channels.keys()
info_config = {
"include_history": True,
"include_users": True,
"exclude_channels": [],
"include_connections": True,
}
else:
schema = schemas.ChannelInfoBodySchema(context={"request": request})
data = schema.load(request.json_body)
# get info config for channel information
info_config = data.get("info") or {}
req_channels = info_config.get("channels", None)
info_config["include_connections"] = info_config.get(
"include_connections", True
)
channels_info = shared_utils.get_common_info(req_channels, info_config)
return channels_info
@view_defaults(route_name="action", renderer="json", permission="admin")
class ServerViews(object):
def __init__(self, request):
self.request = request
self.utils = SharedUtils(request)
@view_config(route_name="admin", renderer="templates/admin.jinja2")
def admin(self):
"""
Serve admin page html
:return:
"""
return {}
@view_config(
route_name="admin_action", match_param=("action=debug",), renderer="string"
)
def admin_debug(self):
return "\n".join(gevent.util.format_run_info())
@view_config(
route_name="admin_action",
match_param=("action=sign_in",),
renderer="templates/sign_in.jinja2",
permission=NO_PERMISSION_REQUIRED,
)
def admin_sign_in(self):
if self.request.method == "POST":
admin_user = self.request.registry.settings["admin_user"]
admin_secret = self.request.registry.settings["admin_secret"]
username = self.request.POST.get("username", "").strip()
password = self.request.POST.get("password", "").strip()
if username == admin_user and password == admin_secret:
headers = remember(self.request, admin_user)
url = self.request.route_url("admin")
return HTTPFound(url, headers=headers)
else:
# make potential brute forcing non-feasible
gevent.sleep(0.5)
return {}
@view_config(
route_name="admin_action",
match_param=("action=sign_out",),
renderer="string",
permission=NO_PERMISSION_REQUIRED,
)
def admin_sign_out(self):
headers = forget(self.request)
url = self.request.route_url("admin_action", action="sign_in")
return HTTPFound(url, headers=headers)
@view_config(
route_name="admin_json", renderer="json", request_method=("POST", "GET")
)
def admin_json(self):
"""
Admin json
---
get:
tags:
- "Admin API"
summary: "Return server information in json format for admin panel
purposes"
description: ""
operationId: "admin_json"
consumes:
- "application/json"
produces:
- "application/json"
parameters:
- in: "body"
name: "body"
description: "Response info configuration"
responses:
422:
description: "Unprocessable Entity"
200:
description: "Success"
post:
tags:
- "Admin API"
summary: "Return server information in json format for admin panel
purposes"
description: ""
operationId: "admin_json"
consumes:
- "application/json"
produces:
- "application/json"
parameters:
- in: "body"
name: "body"
description: "Response info configuration"
responses:
422:
description: "Unprocessable Entity"
200:
description: "Success"
"""
server_state = get_state()
uptime = datetime.utcnow() - STATS["started_on"]
uptime = str(uptime).split(".")[0]
remembered_user_count = len([user for user in server_state.users.items()])
active_users = [
user for user in server_state.users.values() if user.connections
]
unique_user_count = len(active_users)
total_connections = sum([len(user.connections) for user in active_users])
channels_info = self.utils.get_common_info(
None,
{
"include_history": True,
"include_users": True,
"exclude_channels": [],
"include_connections": True,
},
)
return {
"remembered_user_count": remembered_user_count,
"unique_user_count": unique_user_count,
"total_connections": total_connections,
"total_channels": len(server_state.channels.keys()),
"total_messages": server_state.stats["total_messages"],
"total_unique_messages": server_state.stats["total_unique_messages"],
"channels": channels_info["channels"],
"users": [user.get_info(include_connections=True) for user in active_users],
"uptime": uptime,
"version": str(__version__),
}
@view_config(route_name="openapi_spec", renderer="json")
def api_spec(self):
"""
OpenApi 2.0 spec
---
get:
tags:
- "OpenApi 2.0 spec"
summary: "Return openapi spec
purposes"
description: ""
operationId: "api_spec"
consumes:
- "application/json"
produces:
- "application/json"
parameters:
responses:
200:
description: "Success"
"""
spec = APISpec(
title="Channelstream API",
version="0.7.0",
openapi_version="2.0.0",
plugins=(MarshmallowPlugin(),),
)
spec.components.schema("ConnectBody", schema=schemas.ConnectBodySchema)
spec.components.schema("SubscribeBody", schema=schemas.SubscribeBodySchema)
spec.components.schema("UnsubscribeBody", schema=schemas.UnsubscribeBodySchema)
spec.components.schema("UserStateBody", schema=schemas.UserStateBodySchema)
spec.components.schema(
"MessagesBody", schema=schemas.MessageBodySchema(many=True)
)
spec.components.schema("MessageBody", schema=schemas.MessageBodySchema())
spec.components.schema(
"MessageEditBody", schema=schemas.MessageEditBodySchema(many=True)
)
spec.components.schema(
"MessagesDeleteBody", schema=schemas.MessagesDeleteBodySchema(many=True)
)
spec.components.schema("DisconnectBody", schema=schemas.DisconnectBodySchema)
spec.components.schema("ChannelConfigBody", schema=schemas.ChannelConfigSchema)
spec.components.schema("ChannelInfoBody", schema=schemas.ChannelInfoBodySchema)
# api
add_pyramid_paths(spec, "connect", request=self.request)
add_pyramid_paths(spec, "subscribe", request=self.request)
add_pyramid_paths(spec, "unsubscribe", request=self.request)
add_pyramid_paths(spec, "user_state", request=self.request)
add_pyramid_paths(spec, "message", request=self.request)
add_pyramid_paths(spec, "channel_config", request=self.request)
add_pyramid_paths(spec, "info", request=self.request)
add_pyramid_paths(spec, "api_listen", request=self.request)
add_pyramid_paths(spec, "api_listen_ws", request=self.request)
add_pyramid_paths(spec, "api_disconnect", request=self.request)
add_pyramid_paths(spec, "admin_json", request=self.request)
spec_dict = spec.to_dict()
spec_dict["securityDefinitions"] = {
"APIKeyHeader": {
"type": "apiKey",
"name": "X-Channelstream-Secret",
"in": "header",
}
}
return spec_dict
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""Utilities for running or stopping processes"""
import errno
import logging
import os
import pty
import select
import shlex
import signal
import subprocess
import sys
import termios
import tty
from contextlib import contextmanager
from typing import Dict, List
import psutil
from lockfile.pidlockfile import PIDLockFile
from airflow.configuration import conf
from airflow.exceptions import AirflowException
log = logging.getLogger(__name__)
# When killing processes, time to wait after issuing a SIGTERM before issuing a
# SIGKILL.
DEFAULT_TIME_TO_WAIT_AFTER_SIGTERM = conf.getint('core', 'KILLED_TASK_CLEANUP_TIME')
def reap_process_group(
pgid: int,
logger,
sig: 'signal.Signals' = signal.SIGTERM,
timeout: int = DEFAULT_TIME_TO_WAIT_AFTER_SIGTERM,
) -> Dict[int, int]:
"""
Tries really hard to terminate all processes in the group (including grandchildren). Will send
sig (SIGTERM) to the process group of pid. If any process is alive after timeout
a SIGKILL will be send.
:param pgid: process group id to kill
:param logger: log handler
:param sig: signal type
:param timeout: how much time a process has to terminate
"""
returncodes = {}
def on_terminate(p):
logger.info("Process %s (%s) terminated with exit code %s", p, p.pid, p.returncode)
returncodes[p.pid] = p.returncode
def signal_procs(sig):
try:
os.killpg(pgid, sig)
except OSError as err:
# If operation not permitted error is thrown due to run_as_user,
# use sudo -n(--non-interactive) to kill the process
if err.errno == errno.EPERM:
subprocess.check_call(
["sudo", "-n", "kill", "-" + str(int(sig))] + [str(p.pid) for p in children]
)
else:
raise
if pgid == os.getpgid(0):
raise RuntimeError("I refuse to kill myself")
try:
parent = psutil.Process(pgid)
children = parent.children(recursive=True)
children.append(parent)
except psutil.NoSuchProcess:
# The process already exited, but maybe it's children haven't.
children = []
for proc in psutil.process_iter():
try:
if os.getpgid(proc.pid) == pgid and proc.pid != 0:
children.append(proc)
except OSError:
pass
logger.info("Sending %s to GPID %s", sig, pgid)
try:
signal_procs(sig)
except OSError as err:
# No such process, which means there is no such process group - our job
# is done
if err.errno == errno.ESRCH:
return returncodes
_, alive = psutil.wait_procs(children, timeout=timeout, callback=on_terminate)
if alive:
for proc in alive:
logger.warning("process %s did not respond to SIGTERM. Trying SIGKILL", proc)
try:
signal_procs(signal.SIGKILL)
except OSError as err:
if err.errno != errno.ESRCH:
raise
_, alive = psutil.wait_procs(alive, timeout=timeout, callback=on_terminate)
if alive:
for proc in alive:
logger.error("Process %s (%s) could not be killed. Giving up.", proc, proc.pid)
return returncodes
def execute_in_subprocess(cmd: List[str]):
"""
Execute a process and stream output to logger
:param cmd: command and arguments to run
:type cmd: List[str]
"""
log.info("Executing cmd: %s", " ".join(shlex.quote(c) for c in cmd))
with subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=0, close_fds=True
) as proc:
log.info("Output:")
if proc.stdout:
with proc.stdout:
for line in iter(proc.stdout.readline, b''):
log.info("%s", line.decode().rstrip())
exit_code = proc.wait()
if exit_code != 0:
raise subprocess.CalledProcessError(exit_code, cmd)
def execute_interactive(cmd: List[str], **kwargs):
"""
Runs the new command as a subprocess and ensures that the terminal's state is restored to its original
state after the process is completed e.g. if the subprocess hides the cursor, it will be restored after
the process is completed.
"""
log.info("Executing cmd: %s", " ".join(shlex.quote(c) for c in cmd))
old_tty = termios.tcgetattr(sys.stdin)
tty.setraw(sys.stdin.fileno())
# open pseudo-terminal to interact with subprocess
master_fd, slave_fd = pty.openpty()
try: # pylint: disable=too-many-nested-blocks
# use os.setsid() make it run in a new process group, or bash job control will not be enabled
with subprocess.Popen(
cmd, stdin=slave_fd, stdout=slave_fd, stderr=slave_fd, universal_newlines=True, **kwargs
) as proc:
while proc.poll() is None:
readable_fbs, _, _ = select.select([sys.stdin, master_fd], [], [])
if sys.stdin in readable_fbs:
input_data = os.read(sys.stdin.fileno(), 10240)
os.write(master_fd, input_data)
if master_fd in readable_fbs:
output_data = os.read(master_fd, 10240)
if output_data:
os.write(sys.stdout.fileno(), output_data)
finally:
# restore tty settings back
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty)
def kill_child_processes_by_pids(pids_to_kill: List[int], timeout: int = 5) -> None:
"""
Kills child processes for the current process.
First, it sends the SIGTERM signal, and after the time specified by the `timeout` parameter, sends
the SIGKILL signal, if the process is still alive.
:param pids_to_kill: List of PID to be killed.
:type pids_to_kill: List[int]
:param timeout: The time to wait before sending the SIGKILL signal.
:type timeout: Optional[int]
"""
this_process = psutil.Process(os.getpid())
# Only check child processes to ensure that we don't have a case
# where we kill the wrong process because a child process died
# but the PID got reused.
child_processes = [
x for x in this_process.children(recursive=True) if x.is_running() and x.pid in pids_to_kill
]
# First try SIGTERM
for child in child_processes:
log.info("Terminating child PID: %s", child.pid)
child.terminate()
log.info("Waiting up to %s seconds for processes to exit...", timeout)
try:
psutil.wait_procs(
child_processes, timeout=timeout, callback=lambda x: log.info("Terminated PID %s", x.pid)
)
except psutil.TimeoutExpired:
log.debug("Ran out of time while waiting for processes to exit")
# Then SIGKILL
child_processes = [
x for x in this_process.children(recursive=True) if x.is_running() and x.pid in pids_to_kill
]
if child_processes:
log.info("SIGKILL processes that did not terminate gracefully")
for child in child_processes:
log.info("Killing child PID: %s", child.pid)
child.kill()
child.wait()
@contextmanager
def patch_environ(new_env_variables: Dict[str, str]):
"""
Sets environment variables in context. After leaving the context, it restores its original state.
:param new_env_variables: Environment variables to set
"""
current_env_state = {key: os.environ.get(key) for key in new_env_variables.keys()}
os.environ.update(new_env_variables)
try: # pylint: disable=too-many-nested-blocks
yield
finally:
for key, old_value in current_env_state.items():
if old_value is None:
if key in os.environ:
del os.environ[key]
else:
os.environ[key] = old_value
def check_if_pidfile_process_is_running(pid_file: str, process_name: str):
"""
Checks if a pidfile already exists and process is still running.
If process is dead then pidfile is removed.
:param pid_file: path to the pidfile
:param process_name: name used in exception if process is up and
running
"""
pid_lock_file = PIDLockFile(path=pid_file)
# If file exists
if pid_lock_file.is_locked():
# Read the pid
pid = pid_lock_file.read_pid()
if pid is None:
return
try:
# Check if process is still running
proc = psutil.Process(pid)
if proc.is_running():
raise AirflowException(f"The {process_name} is already running under PID {pid}.")
except psutil.NoSuchProcess:
# If process is dead remove the pidfile
pid_lock_file.break_lock()
|
|
"""
This paver file is intended to help with the release process as much as
possible. It relies on virtualenv to generate 'bootstrap' environments as
independent from the user system as possible (e.g. to make sure the sphinx doc
is built against the built scipy, not an installed one).
The release is assumed to be done on OS X. See release.sh for a script that
employs the Paver tasks defined in this file, and builds everything required
for a release at once.
Building a Windows installer from Wine
======================================
The Python version the installer is for can be specified with the ``-p 2.6``
switch (this works for all installer tasks). To build a simple (no SSE
instructions) installer, do::
paver bdist_wininst_simple
This assumes that blas/lapack are in c:\local\lib inside drive_c. You will
have to make sure your Wine python locations (WINE_PYS) are configured
correctly.
The superpack requires all the Atlas libraries for every arch to be installed
(see SITECFG), and can then be built as follows::
paver bdist_superpack
Building an installer for OS X
==============================
For a simple installer, which is just an mpkg inside a dmg, do::
paver simple_dmg
For a more fancy installer which includes documentation and looks better, do::
paver pdf # needs to be done only once
paver dmg
Building changelog + notes
==========================
Assumes you have git and the binaries/tarballs in installers/::
paver write_release_and_log
This automatically put the checksum into NOTES.txt, and write the Changelog
which can be uploaded to Github Releases (and maybe sourceforge for historical
reasons, see gh-4939).
TODO
====
- make it more easily customizable (through command line args)
- missing targets: install & test, sdist test, debian packaging
- fix bdist_mpkg: we build the same source twice -> how to make sure we use
the same underlying python for egg install in venv and for bdist_mpkg
"""
import os
import sys
import subprocess
import re
import shutil
import warnings
from hashlib import md5
from hashlib import sha256
try:
from paver.tasks import VERSION as _PVER
if not _PVER >= '1.0':
raise RuntimeError("paver version >= 1.0 required (was %s)" % _PVER)
except (ImportError, e):
raise RuntimeError("paver version >= 1.0 required")
import paver
import paver.doctools
import paver.path
from paver.easy import options, Bunch, task, needs, dry, sh, call_task, cmdopts
sys.path.insert(0, os.path.dirname(__file__))
try:
setup_py = __import__("setup")
FULLVERSION = setup_py.VERSION
# This is duplicated from setup.py
if os.path.exists('.git'):
GIT_REVISION = setup_py.git_version()
else:
GIT_REVISION = "Unknown"
if not setup_py.ISRELEASED:
if GIT_REVISION == "Unknown":
FULLVERSION += '.dev0+Unknown'
else:
FULLVERSION += '.dev0+' + GIT_REVISION[:7]
finally:
sys.path.pop(0)
try:
# Ensure sensible file permissions
os.umask(0o022)
except AttributeError:
# No umask on non-posix
pass
#-----------------------------------
# Things to be changed for a release
#-----------------------------------
# Source of the release notes
RELEASE = 'doc/release/1.4.0-notes.rst'
# Start/end of the log (from git)
LOG_START = 'v1.3.0'
LOG_END = 'master'
#-------------------------------------------------------
# Hardcoded build/install dirs, virtualenv options, etc.
#-------------------------------------------------------
# Default python version
PYVER="3.6"
# Paver options object, holds all default dirs
options(bootstrap=Bunch(bootstrap_dir="bootstrap"),
virtualenv=Bunch(packages_to_install=["sphinx==1.8.5", "numpydoc"],
no_site_packages=False),
sphinx=Bunch(builddir="build", sourcedir="source", docroot='doc'),
superpack=Bunch(builddir="build-superpack",
bindir=os.path.join("build-superpack","binaries")),
installers=Bunch(releasedir="release",
installersdir=os.path.join("release", "installers")),
doc=Bunch(doc_root="doc",
sdir=os.path.join("doc", "source"),
bdir=os.path.join("doc", "build"),
bdir_latex=os.path.join("doc", "build", "latex"),
destdir_pdf=os.path.join("build_doc", "pdf")),
html=Bunch(builddir=os.path.join("build", "html")),
dmg=Bunch(python_version=PYVER),
bdist_wininst_simple=Bunch(python_version=PYVER),)
# Where we can find BLAS/LAPACK/ATLAS on Windows/Wine
SITECFG = {"sse3" : {'BLAS': 'None', 'LAPACK': 'None',
'ATLAS': r'C:\local\lib\atlas\sse3'},
"sse2" : {'BLAS': 'None', 'LAPACK': 'None',
'ATLAS': r'C:\local\lib\atlas\sse2'},
"nosse" : {'ATLAS': 'None', 'BLAS': r'C:\local\lib\atlas\nosse',
'LAPACK': r'C:\local\lib\atlas\nosse'}}
# Wine config for win32 builds
if sys.platform == "win32":
WINE_PY35 = [r"C:\Python35\python.exe"]
WINDOWS_ENV = os.environ
MAKENSIS = ["makensis"]
elif sys.platform == "darwin":
WINE_PY35 = ["wine", os.environ['HOME'] + "/.wine/drive_c/Python35/python.exe"]
WINDOWS_ENV = os.environ
WINDOWS_ENV["DYLD_FALLBACK_LIBRARY_PATH"] = "/usr/X11/lib:/usr/lib"
MAKENSIS = ["wine", "makensis"]
else:
WINE_PY35 = [os.environ['HOME'] + "/.wine/drive_c/Python35/python.exe"]
WINDOWS_ENV = os.environ
MAKENSIS = ["wine", "makensis"]
WINE_PYS = {'3.5':WINE_PY35}
# Framework Python locations on OS X
MPKG_PYTHON = {
"3.5": "/Library/Frameworks/Python.framework/Versions/3.5/bin/python3"
}
# Full path to the *static* gfortran runtime
LIBGFORTRAN_A_PATH = "/usr/local/lib/libgfortran.a"
#--------------------------------------
# Utility functions and bootstrap stuff
#--------------------------------------
def parse_numpy_version(pyexec):
if isinstance(pyexec, str):
cmd = [pyexec, "-c", "'import numpy; print(numpy.version.version)'"]
else:
# sequence for pyexec
cmd = pyexec + ["-c", "'import numpy; print(numpy.version.version)'"]
# Execute in shell because launching python from python does not work
# (hangs)
p = subprocess.Popen(" ".join(cmd), stdout=subprocess.PIPE, shell=True)
out = p.communicate()[0]
if p.returncode:
raise RuntimeError("Command %s failed" % " ".join(cmd))
a = re.compile("^([0-9]+)\.([0-9]+)\.([0-9]+)")
if a:
return tuple([int(i) for i in a.match(out).groups()[:3]])
else:
raise ValueError("Could not parse version (%s)" % out)
@task
def bootstrap():
"""create virtualenv in ./install"""
try:
import virtualenv
except ImportError:
raise RuntimeError("virtualenv is needed for bootstrap")
bdir = options.bootstrap_dir
if not os.path.exists(bdir):
os.makedirs(bdir)
bscript = "bootstrap.py"
options.virtualenv.script_name = os.path.join(options.bootstrap_dir,
bscript)
options.bootstrap.no_site_packages = False
call_task('paver.virtual.bootstrap')
sh('cd %s; %s %s' % (bdir, sys.executable, bscript))
@task
def clean():
"""Remove build, dist, egg-info garbage."""
d = ['build', 'dist', 'scipy.egg-info']
for i in d:
if os.path.exists(i):
shutil.rmtree(i)
bdir = os.path.join('doc', options.sphinx.builddir)
if os.path.exists(bdir):
shutil.rmtree(bdir)
@task
def clean_bootstrap():
bdir = os.path.join(options.bootstrap.bootstrap_dir)
if os.path.exists(bdir):
shutil.rmtree(bdir)
@task
@needs('clean', 'clean_bootstrap')
def nuke():
"""Remove everything: build dir, installers, bootstrap dirs, etc..."""
for d in [options.superpack.builddir, options.installers.releasedir]:
if os.path.exists(d):
shutil.rmtree(d)
#--------------------
# Documentation tasks
#--------------------
@task
def html(options):
"""Build scipy documentation and put it into build/docs"""
# Don't use paver html target because of scipy bootstrapping problems
subprocess.check_call(["make", "html"], cwd="doc")
builtdocs = paver.path.path("doc") / options.sphinx.builddir / "html"
options.html.builddir.rmtree()
builtdocs.copytree(options.html.builddir)
@task
def latex():
"""Build scipy documentation in latex format."""
subprocess.check_call(["make", "latex"], cwd="doc")
@task
@needs('latex')
def pdf():
bdir_latex = options.doc.bdir_latex
destdir_pdf = options.doc.destdir_pdf
def build_pdf():
subprocess.check_call(["make", "all-pdf"], cwd=str(bdir_latex))
dry("Build pdf doc", build_pdf)
if os.path.exists(destdir_pdf):
shutil.rmtree(destdir_pdf)
os.makedirs(destdir_pdf)
ref = os.path.join(bdir_latex, "scipy-ref.pdf")
shutil.copy(ref, os.path.join(destdir_pdf, "reference.pdf"))
def tarball_name(type='gztar'):
root = 'scipy-%s' % FULLVERSION
if type == 'gztar':
return root + '.tar.gz'
elif type == 'xztar':
return root + '.tar.xz'
elif type == 'tar':
return root + '.tar'
elif type == 'zip':
return root + '.zip'
raise ValueError("Unknown type %s" % type)
@task
def sdist():
# First clean the repo and update submodules (for up-to-date doc html theme
# and Sphinx extensions)
sh('git clean -xdf')
sh('git submodule init')
sh('git submodule update')
# Fix file permissions
sh('chmod -R a+rX *')
# To be sure to bypass paver when building sdist... paver + scipy.distutils
# do not play well together.
# Cython is run over all Cython files in setup.py, so generated C files
# will be included.
sh('python setup.py sdist --formats=gztar,zip')
sh('python setup.py sdist --formats=tar')
if os.path.exists(os.path.join('dist', tarball_name("xztar"))):
os.unlink(os.path.join('dist', tarball_name("xztar")))
sh('xz %s' % os.path.join('dist', tarball_name("tar")), ignore_error=True)
# Copy the sdists into installers dir
if not os.path.exists(options.installers.installersdir):
os.makedirs(options.installers.installersdir)
if not os.path.exists(os.path.join('dist', tarball_name("xztar"))):
warnings.warn("Could not create tar.xz! Do you have xz installed?")
else:
t = 'xztar'
source = os.path.join('dist', tarball_name(t))
target = os.path.join(options.installers.installersdir, tarball_name(t))
shutil.copy(source, target)
for t in ['gztar', 'zip']:
source = os.path.join('dist', tarball_name(t))
target = os.path.join(options.installers.installersdir, tarball_name(t))
shutil.copy(source, target)
@task
def release(options):
"""sdists, release notes and changelog. Docs and wheels are built in
separate steps (see doc/source/dev/releasing.rst).
"""
# Source tarballs
sdist()
# README (gpg signed) and Changelog
write_release_and_log()
#---------------------------------------
# Windows installers (Wine-based builds)
#---------------------------------------
def superpack_name(pyver, numver):
"""Return the filename of the superpack installer."""
return 'scipy-%s-win32-superpack-python%s.exe' % (numver, pyver)
def internal_wininst_name(arch):
"""Return the name of the wininst as it will be inside the superpack (i.e.
with the arch encoded."""
ext = '.exe'
return "scipy-%s-%s%s" % (FULLVERSION, arch, ext)
def wininst_name(pyver):
"""Return the name of the installer built by wininst command."""
ext = '.exe'
return "scipy-%s.win32-py%s%s" % (FULLVERSION, pyver, ext)
def bdist_wininst_arch(pyver, arch):
"""Arch specific wininst build."""
if os.path.exists("build"):
shutil.rmtree("build")
_bdist_wininst(pyver, SITECFG[arch])
def prepare_nsis_script(pyver, numver):
if not os.path.exists(options.superpack.builddir):
os.makedirs(options.superpack.builddir)
tpl = os.path.join('tools/win32/build_scripts/nsis_scripts', 'scipy-superinstaller.nsi.in')
source = open(tpl, 'r')
target = open(os.path.join(options.superpack.builddir, 'scipy-superinstaller.nsi'), 'w')
installer_name = superpack_name(pyver, numver)
cnt = "".join(source.readlines())
cnt = cnt.replace('@SCIPY_INSTALLER_NAME@', installer_name)
for arch in ['nosse', 'sse2', 'sse3']:
cnt = cnt.replace('@%s_BINARY@' % arch.upper(),
internal_wininst_name(arch))
target.write(cnt)
@task
def bdist_wininst_nosse(options):
"""Build the nosse wininst installer."""
bdist_wininst_arch(options.python_version, 'nosse')
@task
def bdist_wininst_sse2(options):
"""Build the sse2 wininst installer."""
bdist_wininst_arch(options.python_version, 'sse2')
@task
def bdist_wininst_sse3(options):
"""Build the sse3 wininst installer."""
bdist_wininst_arch(options.python_version, 'sse3')
@task
@cmdopts([("python-version=", "p", "python version")])
def bdist_superpack(options):
"""Build all arch specific wininst installers."""
pyver = options.python_version
def copy_bdist(arch):
# Copy the wininst in dist into the release directory
source = os.path.join('dist', wininst_name(pyver))
target = os.path.join(options.superpack.bindir, internal_wininst_name(arch))
if os.path.exists(target):
os.remove(target)
if not os.path.exists(os.path.dirname(target)):
os.makedirs(os.path.dirname(target))
try:
os.rename(source, target)
except OSError:
# May be due to dev version having 'Unknown' in name, if git isn't
# found. This can be the case when compiling under Wine.
ix = source.find('.dev0+') + 6
source = source[:ix] + 'Unknown' + source[ix+7:]
os.rename(source, target)
bdist_wininst_arch(pyver, 'nosse')
copy_bdist("nosse")
bdist_wininst_arch(pyver, 'sse2')
copy_bdist("sse2")
bdist_wininst_arch(pyver, 'sse3')
copy_bdist("sse3")
prepare_nsis_script(pyver, FULLVERSION)
subprocess.check_call(MAKENSIS + ['scipy-superinstaller.nsi'],
cwd=options.superpack.builddir)
# Copy the superpack into installers dir
if not os.path.exists(options.installers.installersdir):
os.makedirs(options.installers.installersdir)
source = os.path.join(options.superpack.builddir,
superpack_name(pyver, FULLVERSION))
target = os.path.join(options.installers.installersdir,
superpack_name(pyver, FULLVERSION))
shutil.copy(source, target)
@task
@cmdopts([('python_version=', 'p', 'Python version to build the installer against')])
def bdist_wininst_simple():
"""Simple wininst-based installer."""
call_task("clean")
env = os.environ.copy()
for k, v in SITECFG['nosse'].items():
env[k] = v
_bdist_wininst(options.bdist_wininst_simple.python_version, env)
def _bdist_wininst(pyver, cfg_env=None):
cmd = WINE_PYS[pyver] + ['setup.py', 'build', '-c', 'mingw32', 'bdist_wininst']
if cfg_env:
for k, v in WINDOWS_ENV.items():
cfg_env[k] = v
else:
cfg_env = WINDOWS_ENV
try:
subprocess.check_call(cmd, env=cfg_env)
except subprocess.CalledProcessError:
# Too many open files to compile in one go, so re-run.
print('RESTART WINDOWS BUILD. See gh-2709.')
subprocess.check_call(cmd, env=cfg_env)
#--------------------
# Mac OS X installers
#--------------------
def dmg_name(fullversion, pyver, osxver=None):
"""Return name for dmg installer.
Notes
-----
Python 2.7 has two binaries, one for 10.3 (ppc, i386) and one for 10.6
(i386, x86_64). All other Python versions at python.org at the moment
have binaries for 10.3 only. The "macosx%s" part of the dmg name should
correspond to the python.org naming scheme.
"""
# assume that for the py2.7/osx10.6 build the deployment target is set
# (should be done in the release script).
if not osxver:
osxver = os.environ.get('MACOSX_DEPLOYMENT_TARGET', '10.3')
return "scipy-%s-py%s-python.org-macosx%s.dmg" % (fullversion, pyver,
osxver)
def macosx_version():
if not sys.platform == 'darwin':
raise ValueError("Not darwin ??")
st = subprocess.Popen(["sw_vers"], stdout=subprocess.PIPE)
out = st.stdout.readlines()
ver = re.compile("ProductVersion:\s+([0-9]+)\.([0-9]+)\.([0-9]+)")
for i in out:
m = ver.match(i)
if m:
return m.groups()
def mpkg_name(pyver):
maj, min = macosx_version()[:2]
return "scipy-%s-py%s-macosx%s.%s.mpkg" % (FULLVERSION, pyver, maj, min)
def prepare_static_gfortran_runtime(d):
if not os.path.exists(d):
os.makedirs(d)
shutil.copy(LIBGFORTRAN_A_PATH, d)
@task
@cmdopts([('python_version=', 'p', 'Python version to build the installer against')])
def bdist_mpkg():
call_task("clean")
try:
pyver = options.bdist_mpkg.python_version
except AttributeError:
pyver = PYVER
_build_mpkg(pyver)
def _build_mpkg(pyver):
numver = parse_numpy_version(MPKG_PYTHON[pyver])
numverstr = ".".join(["%i" % i for i in numver])
if pyver < "3.3":
if not numver == (1, 8, 2):
raise ValueError("Scipy 0.19.x should be built against numpy "
"1.8.2, (detected %s) for Python >= 3.4" % numverstr)
prepare_static_gfortran_runtime("build")
# account for differences between Python 2.7.1 versions from python.org
if os.environ.get('MACOSX_DEPLOYMENT_TARGET', None) == "10.6":
ldflags = "-undefined dynamic_lookup -bundle -arch i386 -arch x86_64 -Wl,-search_paths_first"
else:
ldflags = "-undefined dynamic_lookup -bundle -arch i386 -arch ppc -Wl,-search_paths_first"
ldflags += " -L%s" % os.path.join(os.path.dirname(__file__), "build")
sh("LDFLAGS='%s' %s setup.py bdist_mpkg" % (ldflags, MPKG_PYTHON[pyver]))
@task
@cmdopts([("python_version=", "p", "python version")])
def dmg():
try:
pyver = options.dmg.python_version
except Exception:
pyver = PYVER
idirs = options.installers.installersdir
# Check if doc exists. If not, say so and quit.
docpath = options.doc.destdir_pdf
ref = os.path.join(docpath, "reference.pdf")
if not os.path.exists(ref):
warnings.warn("Docs need to be built first! \n%s not found." % docpath)
dmg_n = dmg_name(FULLVERSION, pyver)
dmg = paver.path.path('scipy-macosx-installer') / dmg_n
if dmg.exists():
dmg.remove()
call_task("clean")
_build_mpkg(pyver)
macosx_installer_dir = "tools/scipy-macosx-installer"
dmg = os.path.join(macosx_installer_dir, dmg_name(FULLVERSION, pyver))
if os.path.exists(dmg):
os.remove(dmg)
# Clean the image source
content = os.path.join(macosx_installer_dir, 'content')
if os.path.exists(content):
shutil.rmtree(content)
os.makedirs(content)
# Copy mpkg into image source
mpkg_source = os.path.join("dist", mpkg_name(pyver))
mpkg_target = os.path.join(content, "scipy-%s-py%s.mpkg" % (FULLVERSION, pyver))
shutil.copytree(mpkg_source, mpkg_target)
# Copy docs into image source
pdf_docs = os.path.join(content, "Documentation")
if os.path.exists(pdf_docs):
shutil.rmtree(pdf_docs)
os.makedirs(pdf_docs)
shutil.copy(ref, os.path.join(pdf_docs, "reference.pdf"))
# Build the dmg
cmd = ["./new-create-dmg", "--pkgname", os.path.basename(mpkg_target),
"--volname", "scipy", os.path.basename(dmg), "./content"]
st = subprocess.check_call(cmd, cwd=macosx_installer_dir)
source = dmg
target = os.path.join(idirs, os.path.basename(dmg))
if not os.path.exists(os.path.dirname(target)):
os.makedirs(os.path.dirname(target))
shutil.copy(source, target)
@task
@cmdopts([('python_version=', 'p', 'Python version to build the installer against')])
def simple_dmg():
try:
pyver = options.simple_dmg.python_version
except AttributeError:
pyver = PYVER
src_dir = "dmg-source"
# Clean the source dir
if os.path.exists(src_dir):
shutil.rmtree(src_dir)
os.makedirs(src_dir)
# Build the mpkg
clean()
_build_mpkg(pyver)
# Build the dmg
shutil.copytree(os.path.join("dist", mpkg_name(pyver)),
os.path.join(src_dir, mpkg_name(pyver)))
_create_dmg(pyver, src_dir, "Scipy Universal %s" % FULLVERSION)
def _create_dmg(pyver, src_dir, volname=None):
# Build the dmg
image_name = dmg_name(FULLVERSION, pyver)
image = paver.path.path(image_name)
image.remove()
cmd = ["hdiutil", "create", image_name, "-srcdir", src_dir]
if volname:
cmd.extend(["-volname", "'%s'" % volname])
sh(" ".join(cmd))
#----------------------------
# Release notes and Changelog
#----------------------------
def compute_md5(idirs):
released = paver.path.path(idirs).listdir()
checksums = []
for f in sorted(released):
m = md5(open(f, 'rb').read())
checksums.append('%s %s' % (m.hexdigest(), os.path.basename(f)))
return checksums
def compute_sha256(idirs):
# better checksum so gpg signed README.txt containing the sums can be used
# to verify the binaries instead of signing all binaries
released = paver.path.path(idirs).listdir()
checksums = []
for f in sorted(released):
m = sha256(open(f, 'rb').read())
checksums.append('%s %s' % (m.hexdigest(), os.path.basename(f)))
return checksums
def write_release_task(options, filename='NOTES.txt'):
idirs = options.installers.installersdir
source = paver.path.path(RELEASE)
target = paver.path.path(filename)
if target.exists():
target.remove()
tmp_target = paver.path.path(filename + '.tmp')
source.copy(tmp_target)
with open(str(tmp_target), 'a') as ftarget:
ftarget.writelines("""
Checksums
=========
MD5
~~~
""")
ftarget.writelines(['%s\n' % c for c in compute_md5(idirs)])
ftarget.writelines("""
SHA256
~~~~~~
""")
ftarget.writelines(['%s\n' % c for c in compute_sha256(idirs)])
# Sign release; on some platforms gpg2 may actually
# be named gpg
cmd = ['gpg2', '--clearsign', '--armor']
if hasattr(options, 'gpg_key'):
cmd += ['--default-key', options.gpg_key]
cmd += ['--output', str(target), str(tmp_target)]
subprocess.check_call(cmd)
print("signed %s" % (target,))
tmp_target.remove()
def write_log_task(filename='Changelog'):
st = subprocess.Popen(
['git', 'log', '%s..%s' % (LOG_START, LOG_END)],
stdout=subprocess.PIPE)
out = st.communicate()[0].decode()
a = open(filename, 'w')
a.writelines(out)
a.close()
@task
@cmdopts([('gpg_key=', 'g', 'GPG key to use for signing')])
def write_release(options):
write_release_task(options)
@task
def write_log():
write_log_task()
@task
@cmdopts([('gpg_key=', 'g', 'GPG key to use for signing')])
def write_release_and_log(options):
write_release_task(options, os.path.join(options.installers.releasedir, 'README'))
write_log_task(os.path.join(options.installers.releasedir, 'Changelog'))
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
from collections import defaultdict
import logging
import os
import shutil
from twitter.common.collections.orderedset import OrderedSet
from pants import binary_util
from pants.backend.jvm.jvm_debug_config import JvmDebugConfig
from pants.backend.jvm.targets.scala_library import ScalaLibrary
from pants.backend.jvm.tasks.checkstyle import Checkstyle
from pants.backend.jvm.tasks.jvm_binary_task import JvmBinaryTask
from pants.backend.jvm.tasks.jvm_tool_task_mixin import JvmToolTaskMixin
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.source_root import SourceRoot
from pants.base.target import Target
from pants.goal.goal import Goal
from pants.util.dirutil import safe_mkdir, safe_walk
logger = logging.getLogger(__name__)
# We use custom checks for scala and java targets here for 2 reasons:
# 1.) jvm_binary could have either a scala or java source file attached so we can't do a pure
# target type test
# 2.) the target may be under development in which case it may not have sources yet - its pretty
# common to write a BUILD and ./pants goal idea the target inside to start development at which
# point there are no source files yet - and the developer intents to add them using the ide.
def is_scala(target):
return target.has_sources('.scala') or target.is_scala
def is_java(target):
return target.has_sources('.java') or target.is_java
# XXX(pl): JVM hairball violator (or is it just JVM specific?)
class IdeGen(JvmBinaryTask, JvmToolTaskMixin):
@classmethod
def register_options(cls, register):
super(IdeGen, cls).register_options(register)
register('--project-name', default='project',
help='Specifies the name to use for the generated project.')
register('--project-dir',
help='Specifies the directory to output the generated project files to.')
register('--project-cwd',
help='Specifies the directory the generated project should use as the cwd for '
'processes it launches. Note that specifying this trumps --{0}-project-dir '
'and not all project related files will be stored there.'
.format(cls.options_scope))
register('--intransitive', action='store_true', default=False,
help='Limits the sources included in the generated project to just '
'those owned by the targets specified on the command line.')
register('--python', action='store_true', default=False,
help='Adds python support to the generated project configuration.')
register('--java', action='store_true', default=True,
help='Includes java sources in the project; otherwise compiles them and adds them '
'to the project classpath.')
register('--java-language-level', type=int, default=7,
help='Sets the java language and jdk used to compile the project\'s java sources.')
register('--java-jdk-name', default=None,
help='Sets the jdk used to compile the project\'s java sources. If unset the default '
'jdk name for the --java-language-level is used')
register('--scala', action='store_true', default=True,
help='Includes scala sources in the project; otherwise compiles them and adds them '
'to the project classpath.')
register('--use-source-root', action='store_true', default=False,
help='Use source_root() settings to collapse sourcepaths in project and determine '
'which paths are used for tests. This is usually what you want if your repo '
' uses a maven style directory layout.')
register('--infer-test-from-siblings', action='store_true',
help='When determining if a path should be added to the IDE, check to see if any of '
'its sibling source_root() entries define test targets. This is usually what '
'you want so that resource directories under test source roots are picked up as '
'test paths.')
class Error(TaskError):
"""IdeGen Error."""
class TargetUtil(object):
def __init__(self, context):
self.context = context
@property
def build_graph(self):
return self.context.build_graph
def get_all_addresses(self, buildfile):
return set(self.context.address_mapper.addresses_in_spec_path(buildfile.spec_path))
def get(self, address):
self.context.build_graph.inject_address(address)
return self.context.build_graph.get_target(address)
def __init__(self, *args, **kwargs):
super(IdeGen, self).__init__(*args, **kwargs)
self.project_name = self.get_options().project_name
self.python = self.get_options().python
self.skip_java = not self.get_options().java
self.skip_scala = not self.get_options().scala
self.use_source_root = self.get_options().use_source_root
self.java_language_level = self.get_options().java_language_level
if self.get_options().java_jdk_name:
self.java_jdk = self.get_options().java_jdk_name
else:
self.java_jdk = '1.%d' % self.java_language_level
# Always tack on the project name to the work dir so each project gets its own linked jars,
# etc. See https://github.com/pantsbuild/pants/issues/564
if self.get_options().project_dir:
self.gen_project_workdir = os.path.abspath(
os.path.join(self.get_options().project_dir, self.project_name))
else:
self.gen_project_workdir = os.path.abspath(
os.path.join(self.workdir, self.__class__.__name__, self.project_name))
self.cwd = (
os.path.abspath(self.get_options().project_cwd) if
self.get_options().project_cwd else self.gen_project_workdir
)
self.intransitive = self.get_options().intransitive
self.checkstyle_suppression_files = self.context.config.getdefault(
'checkstyle_suppression_files', type=list, default=[]
)
# Everywhere else, debug_port is specified in the 'jvm' section. Use that as a default if none
# is specified in the 'ide' section.
jvm_config_debug_port = JvmDebugConfig.debug_port(self.context.config)
self.debug_port = self.context.config.getint('ide', 'debug_port', default=jvm_config_debug_port)
self.checkstyle_bootstrap_key = 'checkstyle'
self.register_jvm_tool_from_config(self.checkstyle_bootstrap_key, self.context.config,
ini_section='checkstyle',
ini_key='bootstrap-tools',
default=['//:twitter-checkstyle'])
self.scalac_bootstrap_key = None
if not self.skip_scala:
self.scalac_bootstrap_key = 'scalac'
self.register_jvm_tool_from_config(self.scalac_bootstrap_key, self.context.config,
ini_section='scala-compile',
ini_key='compile-bootstrap-tools',
default=['//:scala-compiler-2.9.3'])
def prepare(self, round_manager):
if self.python:
round_manager.require('python')
if not self.skip_java:
round_manager.require('java')
if not self.skip_scala:
round_manager.require('scala')
round_manager.require_data('ivy_jar_products')
round_manager.require('jar_dependencies')
def _prepare_project(self):
targets, self._project = self.configure_project(
self.context.targets(),
self.checkstyle_suppression_files,
self.debug_port)
self.configure_compile_context(targets)
def configure_project(self, targets, checkstyle_suppression_files, debug_port):
jvm_targets = [t for t in targets if t.has_label('jvm') or t.has_label('java')]
if self.intransitive:
jvm_targets = set(self.context.target_roots).intersection(jvm_targets)
project = Project(self.project_name,
self.python,
self.skip_java,
self.skip_scala,
self.use_source_root,
get_buildroot(),
checkstyle_suppression_files,
debug_port,
jvm_targets,
not self.intransitive,
self.context.new_workunit,
self.TargetUtil(self.context))
if self.python:
python_source_paths = self.context.config.getlist('ide', 'python_source_paths', default=[])
python_test_paths = self.context.config.getlist('ide', 'python_test_paths', default=[])
python_lib_paths = self.context.config.getlist('ide', 'python_lib_paths', default=[])
project.configure_python(python_source_paths, python_test_paths, python_lib_paths)
extra_source_paths = self.context.config.getlist('ide', 'extra_jvm_source_paths', default=[])
extra_test_paths = self.context.config.getlist('ide', 'extra_jvm_test_paths', default=[])
all_targets = project.configure_jvm(extra_source_paths, extra_test_paths)
return all_targets, project
def configure_compile_context(self, targets):
"""
Trims the context's target set to just those targets needed as jars on the IDE classpath.
All other targets only contribute their external jar dependencies and excludes to the
classpath definition.
"""
def is_cp(target):
return (
target.is_codegen or
# Some IDEs need annotation processors pre-compiled, others are smart enough to detect and
# proceed in 2 compile rounds
target.is_apt or
(self.skip_java and is_java(target)) or
(self.skip_scala and is_scala(target)) or
(self.intransitive and target not in self.context.target_roots)
)
jars = OrderedSet()
excludes = OrderedSet()
compiles = OrderedSet()
def prune(target):
if target.is_jvm:
if target.excludes:
excludes.update(target.excludes)
jars.update(jar for jar in target.jar_dependencies if jar.rev)
if is_cp(target):
target.walk(compiles.add)
for target in targets:
target.walk(prune)
self.context.replace_targets(compiles)
self.jar_dependencies = jars
self.context.log.debug('pruned to cp:\n\t%s' % '\n\t'.join(
str(t) for t in self.context.targets())
)
def map_internal_jars(self, targets):
internal_jar_dir = os.path.join(self.gen_project_workdir, 'internal-libs')
safe_mkdir(internal_jar_dir, clean=True)
internal_source_jar_dir = os.path.join(self.gen_project_workdir, 'internal-libsources')
safe_mkdir(internal_source_jar_dir, clean=True)
internal_jars = self.context.products.get('jars')
internal_source_jars = self.context.products.get('source_jars')
for target in targets:
mappings = internal_jars.get(target)
if mappings:
for base, jars in mappings.items():
if len(jars) != 1:
raise IdeGen.Error('Unexpected mapping, multiple jars for %s: %s' % (target, jars))
jar = jars[0]
cp_jar = os.path.join(internal_jar_dir, jar)
shutil.copy(os.path.join(base, jar), cp_jar)
cp_source_jar = None
mappings = internal_source_jars.get(target)
if mappings:
for base, jars in mappings.items():
if len(jars) != 1:
raise IdeGen.Error(
'Unexpected mapping, multiple source jars for %s: %s' % (target, jars)
)
jar = jars[0]
cp_source_jar = os.path.join(internal_source_jar_dir, jar)
shutil.copy(os.path.join(base, jar), cp_source_jar)
self._project.internal_jars.add(ClasspathEntry(cp_jar, source_jar=cp_source_jar))
def _get_jar_paths(self, jars=None, confs=None):
"""Returns a list of dicts containing the paths of various jar file resources.
Keys include 'default' (normal jar path), 'sources' (path to source jar), and 'javadoc'
(path to doc jar). None of them are guaranteed to be present, but 'sources' and 'javadoc'
will never be present if 'default' isn't.
:param jardeps: JarDependency objects to resolve paths for
:param confs: List of key types to return (eg ['default', 'sources']). Just returns 'default' if
left unspecified.
"""
# TODO(Garrett Malmquist): Get mapping working for source and javadoc jars.
ivy_products = self.context.products.get_data('ivy_jar_products')
classpath_maps = []
for info_group in ivy_products.values():
for info in info_group:
for module in info.modules_by_ref.values():
for artifact in module.artifacts:
classpath_maps.append({'default': artifact.path})
return classpath_maps
def map_external_jars(self):
external_jar_dir = os.path.join(self.gen_project_workdir, 'external-libs')
safe_mkdir(external_jar_dir, clean=True)
external_source_jar_dir = os.path.join(self.gen_project_workdir, 'external-libsources')
safe_mkdir(external_source_jar_dir, clean=True)
external_javadoc_jar_dir = os.path.join(self.gen_project_workdir, 'external-libjavadoc')
safe_mkdir(external_javadoc_jar_dir, clean=True)
confs = ['default', 'sources', 'javadoc']
for entry in self._get_jar_paths(confs=confs):
jar = entry.get('default')
if jar:
cp_jar = os.path.join(external_jar_dir, os.path.basename(jar))
shutil.copy(jar, cp_jar)
cp_source_jar = None
source_jar = entry.get('sources')
if source_jar:
cp_source_jar = os.path.join(external_source_jar_dir, os.path.basename(source_jar))
shutil.copy(source_jar, cp_source_jar)
cp_javadoc_jar = None
javadoc_jar = entry.get('javadoc')
if javadoc_jar:
cp_javadoc_jar = os.path.join(external_javadoc_jar_dir, os.path.basename(javadoc_jar))
shutil.copy(javadoc_jar, cp_javadoc_jar)
self._project.external_jars.add(ClasspathEntry(cp_jar,
source_jar=cp_source_jar,
javadoc_jar=cp_javadoc_jar))
def execute(self):
"""Stages IDE project artifacts to a project directory and generates IDE configuration files."""
self._prepare_project()
def _checkstyle_enabled():
for goal in Goal.all():
if goal.has_task_of_type(Checkstyle):
return True
return False
if _checkstyle_enabled():
checkstyle_classpath = self.tool_classpath(self.checkstyle_bootstrap_key)
else:
checkstyle_classpath = []
if self.scalac_bootstrap_key:
scalac_classpath = self.tool_classpath(self.scalac_bootstrap_key)
else:
scalac_classpath = []
self._project.set_tool_classpaths(checkstyle_classpath, scalac_classpath)
targets = self.context.targets()
self.map_internal_jars(targets)
self.map_external_jars()
idefile = self.generate_project(self._project)
if idefile:
binary_util.ui_open(idefile)
def generate_project(self, project):
raise NotImplementedError('Subclasses must generate a project for an ide')
class ClasspathEntry(object):
"""Represents a classpath entry that may have sources available."""
def __init__(self, jar, source_jar=None, javadoc_jar=None):
self.jar = jar
self.source_jar = source_jar
self.javadoc_jar = javadoc_jar
class SourceSet(object):
"""Models a set of source files."""
def __init__(self, root_dir, source_base, path, is_test):
"""
:param string root_dir: full path to the root of the project containing this source set
:param string source_base: the relative path from root_dir to the base of this source set
:param string path: relative path from the source_base to the base of the sources in this set
:param bool is_test: true iff the sources contained by this set implement test cases
"""
self.root_dir = root_dir
self.source_base = source_base
self.path = path
self.is_test = is_test
self._excludes = []
@property
def excludes(self):
"""Paths relative to self.path that are excluded from this source set."""
return self._excludes
@property
def _key_tuple(self):
"""Creates a tuple from the attributes used as a key to uniquely identify a SourceSet"""
return (self.root_dir, self.source_base, self.path)
def __str__(self):
return str(self._key_tuple)
def __eq__(self, other):
return self._key_tuple == other._key_tuple
def __cmp__(self, other):
return cmp(self._key_tuple, other._key_tuple)
class Project(object):
"""Models a generic IDE project that is comprised of a set of BUILD targets."""
@staticmethod
def extract_resource_extensions(resources):
"""Returns the set of unique extensions (including the .) from the given resource files."""
if resources:
for resource in resources:
_, ext = os.path.splitext(resource)
yield ext
@staticmethod
def _collapse_by_source_root(source_sets):
"""Collapse SourceSets with common source roots into one SourceSet instance.
Use the registered source roots to collapse all source paths under a root.
If any test type of target is allowed under the root, the path is determined to be
a test path. This method will give unpredictable results if source root entries overlap.
:param list source_sets: SourceSets to analyze
:returns: list of SourceSets collapsed to the source root paths.
"""
roots_found = set() # remember the roots we've already encountered
collapsed_source_sets = []
for source in source_sets:
query = os.path.join(source.source_base, source.path)
source_root = SourceRoot.find_by_path(query)
if not source_root:
collapsed_source_sets.append(source)
elif not source_root in roots_found:
roots_found.add(source_root)
collapsed_source_sets.append(SourceSet(source.root_dir, source_root, "", source.is_test))
return collapsed_source_sets
def __init__(self, name, has_python, skip_java, skip_scala, use_source_root, root_dir,
checkstyle_suppression_files, debug_port, targets, transitive, workunit_factory,
target_util):
"""Creates a new, unconfigured, Project based at root_dir and comprised of the sources visible
to the given targets."""
self.target_util = target_util
self.name = name
self.root_dir = root_dir
self.targets = OrderedSet(targets)
self.transitive = transitive
self.workunit_factory = workunit_factory
self.sources = []
self.py_sources = []
self.py_libs = []
self.resource_extensions = set()
self.has_python = has_python
self.skip_java = skip_java
self.skip_scala = skip_scala
self.use_source_root = use_source_root
self.has_scala = False
self.has_tests = False
self.checkstyle_suppression_files = checkstyle_suppression_files # Absolute paths.
self.debug_port = debug_port
self.internal_jars = OrderedSet()
self.external_jars = OrderedSet()
def configure_python(self, source_roots, test_roots, lib_roots):
self.py_sources.extend(SourceSet(get_buildroot(), root, None, False) for root in source_roots)
self.py_sources.extend(SourceSet(get_buildroot(), root, None, True) for root in test_roots)
for root in lib_roots:
for path in os.listdir(os.path.join(get_buildroot(), root)):
if os.path.isdir(os.path.join(get_buildroot(), root, path)) or path.endswith('.egg'):
self.py_libs.append(SourceSet(get_buildroot(), root, path, False))
def configure_jvm(self, extra_source_paths, extra_test_paths):
"""
Configures this project's source sets returning the full set of targets the project is
comprised of. The full set can be larger than the initial set of targets when any of the
initial targets only has partial ownership of its source set's directories.
"""
# TODO(John Sirois): much waste lies here, revisit structuring for more readable and efficient
# construction of source sets and excludes ... and add a test!
analyzed = OrderedSet()
targeted = set()
def relative_sources(target):
sources = target.payload.sources.relative_to_buildroot()
return [os.path.relpath(source, target.target_base) for source in sources]
def source_target(target):
result = ((self.transitive or target in self.targets) and
target.has_sources() and
(not (self.skip_java and is_java(target)) and
not (self.skip_scala and is_scala(target))))
return result
def configure_source_sets(relative_base, sources, is_test):
absolute_base = os.path.join(self.root_dir, relative_base)
paths = set([os.path.dirname(source) for source in sources])
for path in paths:
absolute_path = os.path.join(absolute_base, path)
# Note, this can add duplicate source paths to self.sources(). We'll de-dup them later,
# because we want to prefer test paths.
targeted.add(absolute_path)
self.sources.append(SourceSet(self.root_dir, relative_base, path, is_test))
def find_source_basedirs(target):
dirs = set()
if source_target(target):
absolute_base = os.path.join(self.root_dir, target.target_base)
dirs.update([os.path.join(absolute_base, os.path.dirname(source))
for source in relative_sources(target)])
return dirs
def configure_target(target):
if target not in analyzed:
analyzed.add(target)
self.has_scala = not self.skip_scala and (self.has_scala or is_scala(target))
# Hack for java_sources and Eclipse/IntelliJ: add java_sources to project
if isinstance(target, ScalaLibrary):
for java_source in target.java_sources:
configure_target(java_source)
if target.has_resources:
resources_by_basedir = defaultdict(set)
for resources in target.resources:
resources_by_basedir[target.target_base].update(relative_sources(resources))
for basedir, resources in resources_by_basedir.items():
self.resource_extensions.update(Project.extract_resource_extensions(resources))
configure_source_sets(basedir, resources, is_test=target.is_test)
if target.has_sources():
test = target.is_test
self.has_tests = self.has_tests or test
base = target.target_base
configure_source_sets(base, relative_sources(target), is_test=test)
# Other BUILD files may specify sources in the same directory as this target. Those BUILD
# files might be in parent directories (globs('a/b/*.java')) or even children directories if
# this target globs children as well. Gather all these candidate BUILD files to test for
# sources they own that live in the directories this targets sources live in.
target_dirset = find_source_basedirs(target)
if target.address.is_synthetic:
return [] # Siblings don't make sense for synthetic addresses.
candidates = self.target_util.get_all_addresses(target.address.build_file)
for ancestor in target.address.build_file.ancestors():
candidates.update(self.target_util.get_all_addresses(ancestor))
for sibling in target.address.build_file.siblings():
candidates.update(self.target_util.get_all_addresses(sibling))
for descendant in target.address.build_file.descendants():
candidates.update(self.target_util.get_all_addresses(descendant))
def is_sibling(target):
return source_target(target) and target_dirset.intersection(find_source_basedirs(target))
return filter(is_sibling, [self.target_util.get(a) for a in candidates if a != target.address])
for target in self.targets:
target.walk(configure_target, predicate=source_target)
def full_path(source_set):
return os.path.join(source_set.root_dir, source_set.source_base, source_set.path)
def dedup_sources(source_set_list):
"""Sometimes two targets with the same path are added to the source set. One is a target where
is_test evaluates to True and the other were it evaluates to False. When this happens,
make sure we prefer the SourceSet with is_test set to True.
"""
deduped_sources = set(filter(lambda source_set: source_set.is_test, source_set_list))
for source_set in source_set_list:
if not source_set.is_test and source_set not in deduped_sources:
deduped_sources.add(source_set)
# re-sort the list, makes the generated project easier to read.
return sorted(list(deduped_sources))
# Check if there are any overlapping source_sets, and output an error message if so.
# Overlapping source_sets cause serious problems with package name inference.
overlap_error = ('SourceSets {current} and {previous} evaluate to the same full path.'
' This can be caused by multiple BUILD targets claiming the same source,'
' e.g., if a BUILD target in a parent directory contains an rglobs() while'
' a BUILD target in a subdirectory of that uses a globs() which claims the'
' same sources. This may cause package names to be inferred incorrectly (e.g.,'
' you might see src.com.foo.bar.Main instead of com.foo.bar.Main).')
source_full_paths = {}
for source_set in sorted(self.sources, key=full_path):
full = full_path(source_set)
if full in source_full_paths:
previous_set = source_full_paths[full]
logger.debug(overlap_error.format(current=source_set, previous=previous_set))
source_full_paths[full] = source_set
# We need to figure out excludes, in doing so there are 2 cases we should not exclude:
# 1.) targets depend on A only should lead to an exclude of B
# A/BUILD
# A/B/BUILD
#
# 2.) targets depend on A and C should not lead to an exclude of B (would wipe out C)
# A/BUILD
# A/B
# A/B/C/BUILD
#
# 1 approach: build set of all paths and parent paths containing BUILDs our targets depend on -
# these are unexcludable
unexcludable_paths = set()
for source_set in self.sources:
parent = os.path.join(self.root_dir, source_set.source_base, source_set.path)
while True:
unexcludable_paths.add(parent)
parent, _ = os.path.split(parent)
# no need to add the repo root or above, all source paths and extra paths are children
if parent == self.root_dir:
break
for source_set in self.sources:
paths = set()
source_base = os.path.join(self.root_dir, source_set.source_base)
for root, dirs, _ in safe_walk(os.path.join(source_base, source_set.path)):
if dirs:
paths.update([os.path.join(root, directory) for directory in dirs])
unused_children = paths - targeted
if unused_children:
for child in unused_children:
if child not in unexcludable_paths:
source_set.excludes.append(os.path.relpath(child, source_base))
targets = OrderedSet()
for target in self.targets:
target.walk(lambda target: targets.add(target), source_target)
targets.update(analyzed - targets)
self.sources.extend(SourceSet(get_buildroot(), p, None, False) for p in extra_source_paths)
self.sources.extend(SourceSet(get_buildroot(), p, None, True) for p in extra_test_paths)
if self.use_source_root:
self.sources = Project._collapse_by_source_root(self.sources)
self.sources = dedup_sources(self.sources)
return targets
def set_tool_classpaths(self, checkstyle_classpath, scalac_classpath):
self.checkstyle_classpath = checkstyle_classpath
self.scala_compiler_classpath = scalac_classpath
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# ChemPy - A chemistry toolkit for Python
#
# Copyright (c) 2010 by Joshua W. Allen (jwallen@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This module contains classes and functions for working with chemical reactions.
From the `IUPAC Compendium of Chemical Terminology
<http://dx.doi.org/10.1351/goldbook>`_, a chemical reaction is "a process that
results in the interconversion of chemical species".
In ChemPy, a chemical reaction is called a Reaction object and is represented in
memory as an instance of the :class:`Reaction` class.
"""
import cython
import math
import numpy
import constants
from exception import ChemPyError
from species import Species
from kinetics import ArrheniusModel
################################################################################
class ReactionError(Exception):
"""
An exception class for exceptional behavior involving :class:`Reaction`
objects. In addition to a string `message` describing the exceptional
behavior, this class stores the `reaction` that caused the behavior.
"""
def __init__(self, reaction, message=''):
self.reaction = reaction
self.message = message
def __str__(self):
string = "Reaction: "+str(self.reaction) + '\n'
for reactant in self.reaction.reactants:
string += reactant.toAdjacencyList() + '\n'
for product in self.reaction.products:
string += product.toAdjacencyList() + '\n'
if self.message: string += "Message: "+self.message
return string
################################################################################
class Reaction:
"""
A chemical reaction.
=================== =========================== ============================
Attribute Type Description
=================== =========================== ============================
`index` :class:`int` A unique nonnegative integer index
`reactants` :class:`list` The reactant species (as :class:`Species` objects)
`products` :class:`list` The product species (as :class:`Species` objects)
`kinetics` :class:`KineticsModel` The kinetics model to use for the reaction
`reversible` ``bool`` ``True`` if the reaction is reversible, ``False`` if not
`transitionState` :class:`TransitionState` The transition state
`thirdBody` ``bool`` ``True`` if the reaction if the reaction kinetics imply a third body, ``False`` if not
=================== =========================== ============================
"""
def __init__(self, index=-1, reactants=None, products=None, kinetics=None, reversible=True, transitionState=None, thirdBody=False):
self.index = index
self.reactants = reactants
self.products = products
self.kinetics = kinetics
self.reversible = reversible
self.transitionState = transitionState
self.thirdBody = thirdBody
def __repr__(self):
"""
Return a string representation of the reaction, suitable for console output.
"""
return "<Reaction %i '%s'>" % (self.index, str(self))
def __str__(self):
"""
Return a string representation of the reaction, in the form 'A + B <=> C + D'.
"""
arrow = ' <=> '
if not self.reversible: arrow = ' -> '
return arrow.join([' + '.join([str(s) for s in self.reactants]), ' + '.join([str(s) for s in self.products])])
def hasTemplate(self, reactants, products):
"""
Return ``True`` if the reaction matches the template of `reactants`
and `products`, which are both lists of :class:`Species` objects, or
``False`` if not.
"""
return ((all([spec in self.reactants for spec in reactants]) and
all([spec in self.products for spec in products])) or
(all([spec in self.products for spec in reactants]) and
all([spec in self.reactants for spec in products])))
def getEnthalpyOfReaction(self, T):
"""
Return the enthalpy of reaction in J/mol evaluated at temperature
`T` in K.
"""
cython.declare(dHrxn=cython.double, reactant=Species, product=Species)
dHrxn = 0.0
for reactant in self.reactants:
dHrxn -= reactant.thermo.getEnthalpy(T)
for product in self.products:
dHrxn += product.thermo.getEnthalpy(T)
return dHrxn
def getEntropyOfReaction(self, T):
"""
Return the entropy of reaction in J/mol*K evaluated at temperature `T`
in K.
"""
cython.declare(dSrxn=cython.double, reactant=Species, product=Species)
dSrxn = 0.0
for reactant in self.reactants:
dSrxn -= reactant.thermo.getEntropy(T)
for product in self.products:
dSrxn += product.thermo.getEntropy(T)
return dSrxn
def getFreeEnergyOfReaction(self, T):
"""
Return the Gibbs free energy of reaction in J/mol evaluated at
temperature `T` in K.
"""
cython.declare(dGrxn=cython.double, reactant=Species, product=Species)
dGrxn = 0.0
for reactant in self.reactants:
dGrxn -= reactant.thermo.getFreeEnergy(T)
for product in self.products:
dGrxn += product.thermo.getFreeEnergy(T)
return dGrxn
def getEquilibriumConstant(self, T, type='Kc'):
"""
Return the equilibrium constant for the reaction at the specified
temperature `T` in K. The `type` parameter lets you specify the
quantities used in the equilibrium constant: ``Ka`` for activities,
``Kc`` for concentrations (default), or ``Kp`` for pressures. Note that
this function currently assumes an ideal gas mixture.
"""
cython.declare(dGrxn=cython.double, K=cython.double, C0=cython.double, P0=cython.double)
# Use free energy of reaction to calculate Ka
dGrxn = self.getFreeEnergyOfReaction(T)
K = numpy.exp(-dGrxn / constants.R / T)
# Convert Ka to Kc or Kp if specified
P0 = 1e5
if type == 'Kc':
# Convert from Ka to Kc; C0 is the reference concentration
C0 = P0 / constants.R / T
K *= C0 ** (len(self.products) - len(self.reactants))
elif type == 'Kp':
# Convert from Ka to Kp; P0 is the reference pressure
K *= P0 ** (len(self.products) - len(self.reactants))
elif type != 'Ka' and type != '':
raise ChemPyError('Invalid type "%s" passed to Reaction.getEquilibriumConstant(); should be "Ka", "Kc", or "Kp".')
return K
def getEnthalpiesOfReaction(self, Tlist):
"""
Return the enthalpies of reaction in J/mol evaluated at temperatures
`Tlist` in K.
"""
return numpy.array([self.getEnthalpyOfReaction(T) for T in Tlist], numpy.float64)
def getEntropiesOfReaction(self, Tlist):
"""
Return the entropies of reaction in J/mol*K evaluated at temperatures
`Tlist` in K.
"""
return numpy.array([self.getEntropyOfReaction(T) for T in Tlist], numpy.float64)
def getFreeEnergiesOfReaction(self, Tlist):
"""
Return the Gibbs free energies of reaction in J/mol evaluated at
temperatures `Tlist` in K.
"""
return numpy.array([self.getFreeEnergyOfReaction(T) for T in Tlist], numpy.float64)
def getEquilibriumConstants(self, Tlist, type='Kc'):
"""
Return the equilibrium constants for the reaction at the specified
temperatures `Tlist` in K. The `type` parameter lets you specify the
quantities used in the equilibrium constant: ``Ka`` for activities,
``Kc`` for concentrations (default), or ``Kp`` for pressures. Note that
this function currently assumes an ideal gas mixture.
"""
return numpy.array([self.getEquilibriumConstant(T, type) for T in Tlist], numpy.float64)
def getStoichiometricCoefficient(self, spec):
"""
Return the stoichiometric coefficient of species `spec` in the reaction.
The stoichiometric coefficient is increased by one for each time `spec`
appears as a product and decreased by one for each time `spec` appears
as a reactant.
"""
cython.declare(stoich=cython.int, reactant=Species, product=Species)
stoich = 0
for reactant in self.reactants:
if reactant is spec: stoich -= 1
for product in self.products:
if product is spec: stoich += 1
return stoich
def getRate(self, T, P, conc, totalConc=-1.0):
"""
Return the net rate of reaction at temperature `T` and pressure `P`. The
parameter `conc` is a map with species as keys and concentrations as
values. A reactant not found in the `conc` map is treated as having zero
concentration.
If passed a `totalConc`, it won't bother recalculating it.
"""
cython.declare(rateConstant=cython.double, equilibriumConstant=cython.double)
cython.declare(forward=cython.double, reverse=cython.double, speciesConc=cython.double)
# Calculate total concentration
if totalConc == -1.0:
totalConc=sum( conc.values() )
# Evaluate rate constant
rateConstant = self.kinetics.getRateCoefficient(T, P)
if self.thirdBody: rateConstant *= totalConc
# Evaluate equilibrium constant
equilibriumConstant = self.getEquilibriumConstant(T)
# Evaluate forward concentration product
forward = 1.0
for reactant in self.reactants:
if reactant in conc:
speciesConc = conc[reactant]
forward = forward * speciesConc
else:
forward = 0.0
break
# Evaluate reverse concentration product
reverse = 1.0
for product in self.products:
if product in conc:
speciesConc = conc[product]
reverse = reverse * speciesConc
else:
reverse = 0.0
break
# Return rate
return rateConstant * (forward - reverse / equilibriumConstant)
def generateReverseRateCoefficient(self, Tlist):
"""
Generate and return a rate coefficient model for the reverse reaction
using a supplied set of temperatures `Tlist`. Currently this only
works if the `kinetics` attribute is an :class:`ArrheniusModel` object.
"""
if not isinstance(self.kinetics, ArrheniusModel):
raise ReactionError("ArrheniusModel kinetics required to use Reaction.generateReverseRateCoefficient(), but %s object encountered." % (self.kinetics.__class__))
cython.declare(klist=numpy.ndarray, i=cython.int, kf=ArrheniusModel, kr=ArrheniusModel)
kf = self.kinetics
# Determine the values of the reverse rate coefficient k_r(T) at each temperature
klist = numpy.zeros_like(Tlist)
for i in range(len(Tlist)):
klist[i] = kf.getRateCoefficient(Tlist[i]) / self.getEquilibriumConstant(Tlist[i])
# Fit and return an Arrhenius model to the k_r(T) data
kr = ArrheniusModel()
kr.fitToData(Tlist, klist, kf.T0)
return kr
def calculateTSTRateCoefficients(self, Tlist, tunneling=''):
return numpy.array([self.calculateTSTRateCoefficient(T, tunneling) for T in Tlist], numpy.float64)
def calculateTSTRateCoefficient(self, T, tunneling=''):
"""
Evaluate the forward rate coefficient for the reaction with
corresponding transition state `TS` at temperature `T` in K using
(canonical) transition state theory. The TST equation is
.. math:: k(T) = \\kappa(T) \\frac{k_\\mathrm{B} T}{h} \\frac{Q^\\ddagger(T)}{Q^\\mathrm{A}(T) Q^\\mathrm{B}(T)} \\exp \\left( -\\frac{E_0}{k_\\mathrm{B} T} \\right)
where :math:`Q^\\ddagger` is the partition function of the transition state,
:math:`Q^\\mathrm{A}` and :math:`Q^\\mathrm{B}` are the partition function
of the reactants, :math:`E_0` is the ground-state energy difference from
the transition state to the reactants, :math:`T` is the absolute
temperature, :math:`k_\\mathrm{B}` is the Boltzmann constant, and :math:`h`
is the Planck constant. :math:`\\kappa(T)` is an optional tunneling
correction.
"""
cython.declare(E0=cython.double)
# Determine barrier height
E0 = self.transitionState.E0 - sum([spec.E0 for spec in self.reactants])
# Determine TST rate constant at each temperature
Qreac = 1.0
for spec in self.reactants: Qreac *= spec.states.getPartitionFunction(T) / (constants.R * T / 1e5)
Qts = self.transitionState.states.getPartitionFunction(T) / (constants.R * T / 1e5)
k = self.transitionState.degeneracy * (constants.kB * T / constants.h * Qts / Qreac * numpy.exp(-E0 / constants.R / T))
# Apply tunneling correction
if tunneling.lower() == 'wigner':
k *= self.calculateWignerTunnelingCorrection(T)
elif tunneling.lower() == 'eckart':
k *= self.calculateEckartTunnelingCorrection(T)
return k
def calculateWignerTunnelingCorrection(self, T):
"""
Calculate and return the value of the Wigner tunneling correction for
the reaction with corresponding transition state `TS` at the list of
temperatures `Tlist` in K. The Wigner formula is
.. math:: \\kappa(T) = 1 + \\frac{1}{24} \\left( \\frac{h | \\nu_\\mathrm{TS} |}{ k_\\mathrm{B} T} \\right)^2
where :math:`h` is the Planck constant, :math:`\\nu_\\mathrm{TS}` is the
negative frequency, :math:`k_\\mathrm{B}` is the Boltzmann constant, and
:math:`T` is the absolute temperature.
The Wigner correction only requires information about the transition
state, not the reactants or products, but is also generally less
accurate than the Eckart correction.
"""
frequency = abs(self.transitionState.frequency)
return 1.0 + (constants.h * constants.c * 100.0 * frequency / constants.kB / T)**2 / 24.0
def calculateEckartTunnelingCorrection(self, T):
"""
Calculate and return the value of the Eckart tunneling correction for
the reaction with corresponding transition state `TS` at the list of
temperatures `Tlist` in K. The Eckart formula is
.. math:: \\kappa(T) = e^{\\beta \\Delta V_1} \\int_0^\\infty
\\left[ 1 - \\frac{\\cosh (2 \\pi a - 2 \\pi b) + \\cosh (2 \\pi d)}{\\cosh (2 \\pi a + 2 \\pi b) + \\cosh (2 \\pi d)} \\right] e^{- \\beta E} \\ d(\\beta E)
where
.. math:: 2 \\pi a = \\frac{2 \\sqrt{\\alpha_1 \\xi}}{\\alpha_1^{-1/2} + \\alpha_2^{-1/2}}
.. math:: 2 \\pi b = \\frac{2 \\sqrt{| (\\xi - 1) \\alpha_1 + \\alpha_2|}}{\\alpha_1^{-1/2} + \\alpha_2^{-1/2}}
.. math:: 2 \\pi d = 2 \\sqrt{| \\alpha_1 \\alpha_2 - 4 \\pi^2 / 16|}
.. math:: \\alpha_1 = 2 \\pi \\frac{\\Delta V_1}{h | \\nu_\\mathrm{TS} |}
.. math:: \\alpha_2 = 2 \\pi \\frac{\\Delta V_2}{h | \\nu_\\mathrm{TS} |}
.. math:: \\xi = \\frac{E}{\\Delta V_1}
:math:`\\Delta V_1` and :math:`\\Delta V_2` are the thermal energy
difference between the transition state and the reactants and products,
respectively; :math:`\\nu_\\mathrm{TS}` is the negative frequency,
:math:`h` is the Planck constant, :math:`k_\\mathrm{B}` is the
Boltzmann constant, and :math:`T` is the absolute temperature. If
product data is not available, then it is assumed that
:math:`\\alpha_2 \\approx \\alpha_1`.
The Eckart correction requires information about the reactants as well
as the transition state. For best results, information about the
products should also be given. (The former is called the symmetric
Eckart correction, the latter the asymmetric Eckart correction.) This
extra information allows the Eckart correction to generally give a
better result than the Wignet correction.
"""
cython.declare(frequency=cython.double, alpha1=cython.double, alpha2=cython.double, dV1=cython.double, dV2=cython.double)
cython.declare(kappa=cython.double, E_kT=numpy.ndarray, f=numpy.ndarray, integral=cython.double)
cython.declare(i=cython.int, tol=cython.double, fcrit=cython.double, E_kTmin=cython.double, E_kTmax=cython.double)
frequency = abs(self.transitionState.frequency)
# Calculate intermediate constants
dV1 = self.transitionState.E0 - sum([spec.E0 for spec in self.reactants]) # [=] J/mol
#if all([spec.states is not None for spec in self.products]):
# Product data available, so use asymmetric Eckart correction
dV2 = self.transitionState.E0 - sum([spec.E0 for spec in self.products]) # [=] J/mol
#else:
## Product data not available, so use asymmetric Eckart correction
#dV2 = dV1
# Tunneling must be done in the exothermic direction, so swap if this
# isn't the case
if dV2 < dV1: dV1, dV2 = dV2, dV1
alpha1 = 2 * math.pi * dV1 / constants.Na / (constants.h * constants.c * 100.0 * frequency)
alpha2 = 2 * math.pi * dV2 / constants.Na / (constants.h * constants.c * 100.0 * frequency)
# Integrate to get Eckart correction
kappa = 0.0
# First we need to determine the lower and upper bounds at which to
# truncate the integral
tol = 1e-3
E_kT = numpy.arange(0.0, 1000.01, 0.1)
f = numpy.zeros_like(E_kT)
for j in range(len(E_kT)):
f[j] = self.__eckartIntegrand(E_kT[j], constants.R * T, dV1, alpha1, alpha2)
# Find the cutoff values of the integrand
fcrit = tol * f.max()
x = (f > fcrit).nonzero()
E_kTmin = E_kT[x[0][0]]
E_kTmax = E_kT[x[0][-1]]
# Now that we know the bounds we can formally integrate
import scipy.integrate
integral = scipy.integrate.quad(self.__eckartIntegrand, E_kTmin, E_kTmax,
args=(constants.R * T,dV1,alpha1,alpha2,))[0]
kappa = integral * math.exp(dV1 / constants.R / T)
# Return the calculated Eckart correction
return kappa
def __eckartIntegrand(self, E_kT, kT, dV1, alpha1, alpha2):
# Evaluate the integrand of the Eckart tunneling correction integral
# for the given values
# E_kT = energy scaled by kB * T (dimensionless)
# kT = Boltzmann constant * T [=] J/mol
# dV1 = energy difference between TS and reactants [=] J/mol
# alpha1, alpha2 dimensionless
cython.declare(xi=cython.double, twopia=cython.double, twopib=cython.double, twopid=cython.double, kappaE=cython.double)
from math import sqrt, exp, cosh, pi
xi = E_kT * kT / dV1
# 2 * pi * a
twopia = 2*sqrt(alpha1*xi)/(1/sqrt(alpha1)+1/sqrt(alpha2))
# 2 * pi * b
twopib = 2*sqrt(abs((xi-1)*alpha1+alpha2))/(1/sqrt(alpha1)+1/sqrt(alpha2))
# 2 * pi * d
twopid = 2*sqrt(abs(alpha1*alpha2-4*pi*pi/16))
# We use different approximate versions of the integrand to avoid
# domain errors when evaluating cosh(x) for large x
# If all of 2*pi*a, 2*pi*b, and 2*pi*d are sufficiently small,
# compute as normal
if twopia < 200 and twopib < 200 and twopid < 200:
kappaE = 1 - (cosh(twopia-twopib)+cosh(twopid)) / (cosh(twopia+twopib)+cosh(twopid))
# If one of the following is true, then we can eliminate most of the
# exponential terms after writing out the definition of cosh and
# dividing all terms by exp(2*pi*d)
elif twopia-twopib-twopid > 10 or twopib-twopia-twopid > 10 or twopia+twopib-twopid > 10:
kappaE = 1 - exp(-2*twopia) - exp(-2*twopib) - exp(-twopia-twopib+twopid) - exp(-twopia-twopib-twopid)
# Otherwise expand each cosh(x) in terms of its exponentials and divide
# all terms by exp(2*pi*d) before evaluating
else:
kappaE = 1 - (exp(twopia-twopib-twopid) + exp(-twopia+twopib-twopid) + 1 + exp(-2*twopid)) / (exp(twopia+twopib-twopid) + exp(-twopia-twopib-twopid) + 1 + exp(-2*twopid))
# Complete and return integrand
return exp(-E_kT) * kappaE
################################################################################
class ReactionModel:
"""
A chemical reaction model, composed of a list of species and a list of
reactions.
=============== =========================== ================================
Attribute Type Description
=============== =========================== ================================
`species` :class:`list` The species involved in the reaction model
`reactions` :class:`list` The reactions comprising the reaction model
`stoichiometry` :class:`numpy.ndarray` The stoichiometric matrix for the reaction model, stored as a sparse matrix
=============== =========================== ================================
"""
def __init__(self, species=None, reactions=None):
self.species = species or []
self.reactions = reactions or []
self.stoichiometry = None
def generateStoichiometryMatrix(self):
"""
Generate the stoichiometry matrix corresponding to the current
reaction system. The stoichiometry matrix is defined such that the
rows correspond to the `index` attribute of each species object, while
the columns correspond to the `index` attribute of each reaction object.
The generated matrix is not returned, but is instead stored in the
`stoichiometry` attribute for future use.
"""
cython.declare(rxn=Reaction, spec=Species, i=cython.int, j=cython.int, nu=cython.int)
from scipy import sparse
# Use dictionary-of-keys format to efficiently assemble stoichiometry matrix
self.stoichiometry = sparse.dok_matrix((len(self.species), len(self.reactions)), numpy.float64)
for rxn in self.reactions:
j = rxn.index - 1
# Only need to iterate over the species involved in the reaction,
# not all species in the reaction model
for spec in rxn.reactants:
i = spec.index - 1
nu = rxn.getStoichiometricCoefficient(spec)
if nu != 0: self.stoichiometry[i,j] = nu
for spec in rxn.products:
i = spec.index - 1
nu = rxn.getStoichiometricCoefficient(spec)
if nu != 0: self.stoichiometry[i,j] = nu
# Convert to compressed-sparse-row format for efficient use in matrix operations
self.stoichiometry.tocsr()
def getReactionRates(self, T, P, Ci):
"""
Return an array of reaction rates for each reaction in the model core
and edge. The id of the reaction is the index into the vector.
"""
cython.declare(rxnRates=numpy.ndarray, rxn=Reaction, j=cython.int)
rxnRates = numpy.zeros(len(self.reactions), numpy.float64)
for rxn in self.reactions:
j = rxn.index - 1
rxnRates[j] = rxn.getRate(T, P, Ci)
return rxnRates
|
|
# Copyright 2012 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib
from lxml import etree
from tempest.common import rest_client
from tempest.common import xml_utils as common
from tempest import config
from tempest import exceptions
CONF = config.CONF
class VolumeTypesClientXML(rest_client.RestClient):
"""
Client class to send CRUD Volume Types API requests to a Cinder endpoint
"""
TYPE = "xml"
def __init__(self, auth_provider):
super(VolumeTypesClientXML, self).__init__(auth_provider)
self.service = CONF.volume.catalog_type
self.build_interval = CONF.compute.build_interval
self.build_timeout = CONF.compute.build_timeout
def _parse_volume_type(self, body):
vol_type = dict((attr, body.get(attr)) for attr in body.keys())
for child in body.getchildren():
tag = child.tag
if tag.startswith("{"):
ns, tag = tag.split("}", 1)
if tag == 'extra_specs':
vol_type['extra_specs'] = dict((meta.get('key'),
meta.text)
for meta in list(child))
else:
vol_type[tag] = common.xml_to_json(child)
return vol_type
def _parse_volume_type_extra_specs(self, body):
extra_spec = dict((attr, body.get(attr)) for attr in body.keys())
for child in body.getchildren():
tag = child.tag
if tag.startswith("{"):
ns, tag = tag.split("}", 1)
else:
extra_spec[tag] = common.xml_to_json(child)
return extra_spec
def list_volume_types(self, params=None):
"""List all the volume_types created."""
url = 'types'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = etree.fromstring(body)
volume_types = []
if body is not None:
volume_types += [self._parse_volume_type(vol)
for vol in list(body)]
self.expected_success(200, resp.status)
return resp, volume_types
def get_volume_type(self, type_id):
"""Returns the details of a single volume_type."""
url = "types/%s" % str(type_id)
resp, body = self.get(url)
body = etree.fromstring(body)
self.expected_success(200, resp.status)
return resp, self._parse_volume_type(body)
def create_volume_type(self, name, **kwargs):
"""
Creates a new Volume_type.
name(Required): Name of volume_type.
Following optional keyword arguments are accepted:
extra_specs: A dictionary of values to be used as extra_specs.
"""
vol_type = common.Element("volume_type", xmlns=common.XMLNS_11)
if name:
vol_type.add_attr('name', name)
extra_specs = kwargs.get('extra_specs')
if extra_specs:
_extra_specs = common.Element('extra_specs')
vol_type.append(_extra_specs)
for key, value in extra_specs.items():
spec = common.Element('extra_spec')
spec.add_attr('key', key)
spec.append(common.Text(value))
_extra_specs.append(spec)
resp, body = self.post('types', str(common.Document(vol_type)))
body = common.xml_to_json(etree.fromstring(body))
self.expected_success(200, resp.status)
return resp, body
def delete_volume_type(self, type_id):
"""Deletes the Specified Volume_type."""
resp, body = self.delete("types/%s" % str(type_id))
self.expected_success(202, resp.status)
def list_volume_types_extra_specs(self, vol_type_id, params=None):
"""List all the volume_types extra specs created."""
url = 'types/%s/extra_specs' % str(vol_type_id)
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = etree.fromstring(body)
extra_specs = []
if body is not None:
extra_specs += [self._parse_volume_type_extra_specs(spec)
for spec in list(body)]
self.expected_success(200, resp.status)
return resp, extra_specs
def get_volume_type_extra_specs(self, vol_type_id, extra_spec_name):
"""Returns the details of a single volume_type extra spec."""
url = "types/%s/extra_specs/%s" % (str(vol_type_id),
str(extra_spec_name))
resp, body = self.get(url)
body = etree.fromstring(body)
self.expected_success(200, resp.status)
return resp, self._parse_volume_type_extra_specs(body)
def create_volume_type_extra_specs(self, vol_type_id, extra_spec):
"""
Creates a new Volume_type extra spec.
vol_type_id: Id of volume_type.
extra_specs: A dictionary of values to be used as extra_specs.
"""
url = "types/%s/extra_specs" % str(vol_type_id)
extra_specs = common.Element("extra_specs", xmlns=common.XMLNS_11)
if extra_spec:
if isinstance(extra_spec, list):
extra_specs.append(extra_spec)
else:
for key, value in extra_spec.items():
spec = common.Element('extra_spec')
spec.add_attr('key', key)
spec.append(common.Text(value))
extra_specs.append(spec)
else:
extra_specs = None
resp, body = self.post(url, str(common.Document(extra_specs)))
body = common.xml_to_json(etree.fromstring(body))
self.expected_success(200, resp.status)
return resp, body
def delete_volume_type_extra_specs(self, vol_id, extra_spec_name):
"""Deletes the Specified Volume_type extra spec."""
resp, body = self.delete("types/%s/extra_specs/%s" % (
(str(vol_id)), str(extra_spec_name)))
self.expected_success(202, resp.status)
return resp, body
def update_volume_type_extra_specs(self, vol_type_id, extra_spec_name,
extra_spec):
"""
Update a volume_type extra spec.
vol_type_id: Id of volume_type.
extra_spec_name: Name of the extra spec to be updated.
extra_spec: A dictionary of with key as extra_spec_name and the
updated value.
"""
url = "types/%s/extra_specs/%s" % (str(vol_type_id),
str(extra_spec_name))
extra_specs = common.Element("extra_specs", xmlns=common.XMLNS_11)
if extra_spec is not None:
for key, value in extra_spec.items():
spec = common.Element('extra_spec')
spec.add_attr('key', key)
spec.append(common.Text(value))
extra_specs.append(spec)
resp, body = self.put(url, str(common.Document(extra_specs)))
body = common.xml_to_json(etree.fromstring(body))
self.expected_success(200, resp.status)
return resp, body
def is_resource_deleted(self, id):
try:
self.get_volume_type(id)
except exceptions.NotFound:
return True
return False
|
|
# module_eigrp.py
#
# Copyright 2009 Daniel Mende <dmende@ernw.de>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import signal
import threading
import socket
import struct
import time
import cmd
#import md5
import dnet
import dpkt
import pcap
import gobject
import gtk
import gtk.glade
EIGRP_PROTOCOL_NUMBER = 0x58
EIGRP_MULTICAST_ADDRESS = "224.0.0.10"
EIGRP_MULTICAST_MAC = "01:00:5e:00:00:0a"
DEFAULT_HOLD_TIME = 5
SO_BINDTODEVICE = 25
### HELPER_FUNKTIONS ###
def ichecksum_func(data, sum=0):
''' Compute the Internet Checksum of the supplied data. The checksum is
initialized to zero. Place the return value in the checksum field of a
packet. When the packet is received, check the checksum, by passing
in the checksum field of the packet and the data. If the result is zero,
then the checksum has not detected an error.
'''
# make 16 bit words out of every two adjacent 8 bit words in the packet
# and add them up
for i in xrange(0,len(data),2):
if i + 1 >= len(data):
sum += ord(data[i]) & 0xFF
else:
w = ((ord(data[i]) << 8) & 0xFF00) + (ord(data[i+1]) & 0xFF)
sum += w
# take only 16 bits out of the 32 bit sum and add up the carries
while (sum >> 16) > 0:
sum = (sum & 0xFFFF) + (sum >> 16)
# one's complement the result
sum = ~sum
return sum & 0xFFFF
### EIGRP_PACKET_STRUCTURES ###
class eigrp_address:
def __init__(self, addr, len=4):
self.addr = dnet.ip_aton(addr)
self.len = len
def render(self):
return self.addr + struct.pack("!B", self.len)
class eigrp_packet:
EIGRP_VERSION = 2
EIGRP_OPTCODE_UPDATE = 1
EIGRP_OPTCODE_RESERVED = 2
EIGRP_OPTCODE_QUERY = 3
EIGRP_OPTCODE_REPLY = 4
EIGRP_OPTCODE_HELLO = 5
EIGRP_FLAGS_INIT = 0x00000001
EIGRP_FLAGS_COND_RECV = 0x00000008
def __init__(self, optcode = None, flags = None, seq_num = None, ack_num = None, as_num = None, data = None):
self.optcode = optcode
self.checksum = 0
self.flags = flags
self.seq_num = seq_num
self.ack_num = ack_num
self.as_num = as_num
self.data = data
def parse(self, data):
payload = data[20:]
self.optcode, self.checksum, self.flags, self.seq_num, self.ack_num, self.as_num = struct.unpack("!xBHIIII", data[:20])
return payload
def render(self):
data = ""
auth = None
auth_pos = None
if self.data:
for i in self.data:
if i.__class__ == eigrp_authentication:
auth = i
auth_pos = len(data)
else:
data += i.render()
if auth:
#data = data[0:auth_pos] + auth.render(struct.pack("!BBHIIII", self.EIGRP_VERSION, self.optcode, self.checksum, self.flags, self.seq_num, self.ack_num, self.as_num) + data) + data[auth_pos:]
data = data[0:auth_pos] + auth.render(struct.pack("!BBIIII", self.EIGRP_VERSION, self.optcode, self.flags, self.seq_num, self.ack_num, self.as_num)) + data[auth_pos:]
#data = data[0:auth_pos] + auth.render(struct.pack("!BIII", self.optcode, self.as_num, self.flags, self.seq_num) ) + data[auth_pos:]
ret = struct.pack("!BBHIIII", self.EIGRP_VERSION, self.optcode, 0, self.flags, self.seq_num, self.ack_num, self.as_num)
self.checksum = ichecksum_func(ret + data)
return struct.pack("!BBHIIII", self.EIGRP_VERSION, self.optcode, self.checksum, self.flags, self.seq_num, self.ack_num, self.as_num) + data
class eigrp_tlv:
EIGRP_TYPE_PARAM = 0x0001
EIGRP_TYPE_AUTH = 0x0002
EIGRP_TYPE_SEQENCE = 0x0003
EIGRP_TYPE_VERSION = 0x0004
EIGRP_TYPE_NEXT_MULTICAST_SEQ = 0x0005
EIGRP_TYPE_INTERNAL_ROUTE = 0x0102
EIGRP_TYPE_EXTERNAL_ROUTE = 0x0103
def __init__(self, type=None):
self.type = type
self.len = None
self.data = None
def parse(self, data):
self.type, self.len = struct.unpack("!HH", data[:4])
self.data = data[4:self.len]
if self.len >= len(data):
return False
else:
return data[self.len:]
def render(self, data=None):
if data and not self.data:
return struct.pack("!HH", self.type, len(data) + 4) + data
if not data and self.data:
return struct.pack("!HH", self.type, self.len) + self.data
class eigrp_param(eigrp_tlv):
def __init__(self, k1, k2, k3, k4, k5, hold_time):
eigrp_tlv.__init__(self, eigrp_tlv.EIGRP_TYPE_PARAM)
self.k1 = k1
self.k2 = k2
self.k3 = k3
self.k4 = k4
self.k5 = k5
self.hold_time = hold_time
def render(self):
return eigrp_tlv.render(self, struct.pack("!BBBBBxH", self.k1, self.k2, self.k3, self.k4, self.k5, self.hold_time))
class eigrp_authentication(eigrp_tlv):
def __init__(self, key, hash="md5", key_id = 1):
eigrp_tlv.__init__(self, eigrp_tlv.EIGRP_TYPE_AUTH)
self.key = key
self.hash = hash
self.key_id = key_id
def render(self, data):
#if self.hash == "md5":
#m = md5.new()
#m.update(self.key)
#m.update(data)
##m.update(self.key)
#return eigrp_tlv.render(self, struct.pack("!4BI12B", 0x00, 0x02, 0x00, 0x10, self.key_id, 0x00, 0x00, 0x00, 0x00 ,0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00) + m.digest())
#else:
return ""
class eigrp_sequence(eigrp_tlv):
def __init__(self, addr):
eigrp_tlv.__init__(self, eigrp_tlv.EIGRP_TYPE_SEQENCE)
self.addr = addr
def render(self):
return eigrp_tlv.render(self, addr.render())
class eigrp_next_multicast_seq(eigrp_tlv):
def __init__(self, seq):
eigrp_tlv.__init__(self, eigrp_tlv.EIGRP_TYPE_NEXT_MULTICAST_SEQ)
self.seq = seq
def render(self):
return eigrp_tlv.render(self, struct.pack("!I", self.seq))
class eigrp_version(eigrp_tlv):
def __init__(self, ios_ver=0xc04, eigrp_ver=0x102):
eigrp_tlv.__init__(self, eigrp_tlv.EIGRP_TYPE_VERSION)
self.ios_ver = ios_ver
self.eigrp_ver = eigrp_ver
def render(self):
return eigrp_tlv.render(self, struct.pack("!HH", self.ios_ver, self.eigrp_ver))
class eigrp_internal_route(eigrp_tlv):
def __init__(self, next_hop = None, delay = None, bandwidth = None, mtu = None, hop_count = None, reliability = None, load = None, prefix = None, dest = None):
eigrp_tlv.__init__(self, eigrp_tlv.EIGRP_TYPE_INTERNAL_ROUTE)
if next_hop:
self.next_hop = dnet.ip_aton(next_hop)
else:
self.next_hop = next_hop
self.delay = delay
self.bandwidth = bandwidth
self.mtu = mtu
self.hop_count = hop_count
self.reliability = reliability
self.load = load
self.prefix = prefix
if dest:
self.dest = dnet.ip_aton(dest)
else:
self.dest = dest
def render(self):
mtu_and_hop = (self.mtu << 8) + self.hop_count
dest = ""
for x in xrange(0, self.prefix / 8):
dest += self.dest[x:x+1]
return eigrp_tlv.render(self, self.next_hop + struct.pack("!IIIBBxxB", self.delay, self.bandwidth, mtu_and_hop, self.reliability, self.load, self.prefix) + dest)
def parse(self, data):
self.next_hop = dnet.ip_ntoa(data[:4])
(self.delay, self.bandwidth, mtu_and_hop, self.reliability, self.load, self.prefix) = struct.unpack("!IIIBBxxB", data[4:21])
self.mtu = mtu_and_hop >> 8
self.hop_count = mtu_and_hop & 0x000000ff
self.dest = dnet.ip_ntoa(data[21:] + '\0' * (25 - len(data)))
class eigrp_external_route(eigrp_tlv):
EIGRP_EXTERNAL_PROTO_OSPF = 6
def __init__(self, next_hop = None, originating_router = None, originating_as = None, arbitrary_tag = None, external_metric = None, external_proto = None, flags = None, delay = None, bandwidth = None, mtu = None, hop_count = None, reliability = None, load = None, prefix = None, dest = None):
eigrp_tlv.__init__(self, eigrp_tlv.EIGRP_TYPE_EXTERNAL_ROUTE)
if next_hop:
self.next_hop = dnet.ip_aton(next_hop)
else:
self.next_hop = next_hop
if originating_router:
self.originating_router = dnet.ip_aton(originating_router)
else:
self.originating_router = originating_router
self.originating_as = originating_as
self.arbitrary_tag = arbitrary_tag
self.external_metric = external_metric
self.external_proto = external_proto
self.flags = flags
self.delay = delay
self.bandwidth = bandwidth
self.mtu = mtu
self.hop_count = hop_count
self.reliability = reliability
self.load = load
self.prefix = prefix
if dest:
self.dest = dnet.ip_aton(dest)
else:
self.dest = dest
def render(self):
mtu_and_hop = (self.mtu << 8) + self.hop_count
dest = ""
for x in xrange(0, self.prefix / 8):
dest += self.dest[x:x+1]
return eigrp_tlv.render(self, self.next_hop + self.originating_router + struct.pack("!IIIxxBBIIIBBxxB", self.originating_as, self.arbitrary_tag, self.external_metric, self.external_proto, self.flags, self.delay, self.bandwidth, mtu_and_hop, self.reliability, self.load, self.prefix) + dest)
def parse(self, data):
self.next_hop = dnet.ip_ntoa(data[:4])
self.originating_router = dnet.ip_ntoa(data[4:8])
(self.originating_as, self.arbitrary_tag, self.external_metric, self.external_proto, self.flags, self.delay, self.bandwidth, mtu_and_hop, self.reliability, self.load, self.prefix) = struct.unpack("!IIIxxBBIIIBBxxB", data[8:41])
self.mtu = mtu_and_hop >> 8
self.hop_count = mtu_and_hop & 0x000000ff
self.dest = dnet.ip_ntoa(data[41:] + '\0' * (45 - len(data)))
### THREAD_CLASSES ###
class eigrp_hello_thread(threading.Thread):
def __init__(self, parent, interface, as_num, auth=None):
threading.Thread.__init__(self)
self.parent = parent
self.interface = interface
self.running = True
self.as_num = as_num
self.auth = auth
def send_multicast(self, data):
ip_hdr = dpkt.ip.IP( ttl=2,
p=dpkt.ip.IP_PROTO_EIGRP,
src=self.parent.address,
dst=dnet.ip_aton(EIGRP_MULTICAST_ADDRESS),
data=data
)
ip_hdr.len += len(ip_hdr.data)
eth_hdr = dpkt.ethernet.Ethernet( dst=dnet.eth_aton(EIGRP_MULTICAST_MAC),
src=self.parent.mac,
type=dpkt.ethernet.ETH_TYPE_IP,
data=str(ip_hdr)
)
self.parent.dnet.send(str(eth_hdr))
def hello(self):
timer = DEFAULT_HOLD_TIME
while self.running:
if timer == DEFAULT_HOLD_TIME:
timer = 0
params = eigrp_param(1, 0, 1, 0, 0, 15)
version = eigrp_version(self.parent.ios_ver, self.parent.eigrp_ver) #(0xc02, 0x300)
args = [params, version]
if self.auth:
args.insert(0, self.auth)
msg = eigrp_packet(eigrp_packet.EIGRP_OPTCODE_HELLO, 0, 0, 0, self.as_num, args)
data = msg.render()
if not self.parent.spoof:
self.send_multicast(data)
else:
ip_hdr = dpkt.ip.IP( ttl=2,
p=dpkt.ip.IP_PROTO_EIGRP,
src=self.parent.spoof,
dst=dnet.ip_aton(EIGRP_MULTICAST_ADDRESS),
data=data
)
ip_hdr.len += len(ip_hdr.data)
eth_hdr = dpkt.ethernet.Ethernet( dst=dnet.eth_aton(EIGRP_MULTICAST_MAC),
src=self.parent.mac,
type=dpkt.ethernet.ETH_TYPE_IP,
data=str(ip_hdr)
)
self.parent.dnet.send(str(eth_hdr))
timer += 1
time.sleep(1)
def run(self):
self.hello()
self.parent.log("EIGRP: Hello thread on %s terminated" % (self.interface))
def quit(self):
self.running = False
class eigrp_peer(threading.Thread):
def __init__(self, parent, mac, peer, as_num, auth=None):
threading.Thread.__init__(self)
self.parent = parent
self.sem = threading.Semaphore()
self.mac = mac
self.peer = peer
self.as_num = as_num
self.sock = None
self.msg = eigrp_packet(eigrp_packet.EIGRP_OPTCODE_UPDATE, eigrp_packet.EIGRP_FLAGS_INIT, 0, 0, self.as_num, None)
self.running = True
self.seq_num = 0
self.auth = auth
def send_unicast(self, mac, ip, data):
ip_hdr = dpkt.ip.IP( ttl=2,
p=dpkt.ip.IP_PROTO_EIGRP,
src=self.parent.address,
dst=ip,
data=data
)
ip_hdr.len += len(ip_hdr.data)
eth_hdr = dpkt.ethernet.Ethernet( dst=mac,
src=self.parent.mac,
type=dpkt.ethernet.ETH_TYPE_IP,
data=str(ip_hdr)
)
self.parent.dnet.send(str(eth_hdr))
def send(self):
while self.running:
if self.parent.hello_thread and self.parent.hello_thread.is_alive() or self.parent.goodbye_thread and self.parent.goodbye_thread.is_alive():
self.sem.acquire()
if self.msg:
if self.auth:
self.msg.data.insert(0, self.auth)
if not self.msg.optcode == eigrp_packet.EIGRP_OPTCODE_HELLO:
self.msg.seq_num = self.seq_num
self.seq_num += 1
data = self.msg.render()
if not self.parent.spoof:
self.send_unicast(self.mac, self.peer, data)
else:
ip_hdr = dpkt.ip.IP( ttl=2,
p=dpkt.ip.IP_PROTO_EIGRP,
src=self.parent.spoof,
dst=self.peer,
data=data
)
ip_hdr.len += len(ip_hdr.data)
eth_hdr = dpkt.ethernet.Ethernet( dst=self.mac,
src=self.parent.mac,
type=dpkt.ethernet.ETH_TYPE_IP,
data=str(ip_hdr)
)
self.parent.dnet.send(str(eth_hdr))
self.msg = None
self.sem.release()
time.sleep(0.1)
else:
time.sleep(1)
def input(self, data):
packet = eigrp_packet()
payload = packet.parse(data)
if not packet.optcode == eigrp_packet.EIGRP_OPTCODE_HELLO:
reply = eigrp_packet(eigrp_packet.EIGRP_OPTCODE_HELLO, 0, 0, packet.seq_num, self.as_num)
self.seq_num = packet.seq_num + 1
self.sem.acquire()
self.msg = reply
self.sem.release()
if packet.optcode == eigrp_packet.EIGRP_OPTCODE_UPDATE and len(payload) > 4:
tlv = eigrp_tlv()
while payload:
payload = tlv.parse(payload)
if tlv.type == eigrp_tlv.EIGRP_TYPE_INTERNAL_ROUTE:
route = eigrp_internal_route()
route.parse(tlv.data)
if route.next_hop == "0.0.0.0":
route.next_hop = dnet.ip_ntoa(self.peer)
route_str = route.dest + "/" + str(route.prefix) + " via " + route.next_hop
for i in xrange(self.parent.treestore.iter_n_children(self.iter)):
(test_str,) = self.parent.treestore.get(self.parent.treestore.iter_nth_child(self.iter, i), self.parent.TREE_AS_ROW)
if test_str == route_str:
return
self.parent.treestore.append(self.iter, ["INTERNAL_ROUTE", route_str])
if tlv.type == eigrp_tlv.EIGRP_TYPE_EXTERNAL_ROUTE:
route = eigrp_external_route()
route.parse(tlv.data)
if route.next_hop == "0.0.0.0":
route.next_hop = dnet.ip_ntoa(self.peer)
route_str = route.dest + "/" + str(route.prefix) + " via " + route.next_hop + " on AS# " + str(route.originating_as) + ", type " + str(route.external_proto)
for i in xrange(self.parent.treestore.iter_n_children(self.iter)):
(test_str,) = self.parent.treestore.get(self.parent.treestore.iter_nth_child(self.iter, i), self.parent.TREE_AS_ROW)
if test_str == route_str:
return
self.parent.treestore.append(self.iter, ["EXTERNAL_ROUTE", route_str])
def update(self, msg):
self.sem.acquire()
self.msg = msg
self.sem.release()
def run(self):
self.iter = self.parent.treestore.append(None, [dnet.ip_ntoa(self.peer), str(self.as_num)])
self.send()
self.parent.log("EIGRP: Peer " + socket.inet_ntoa(self.peer) + " terminated")
if self.parent.treestore:
if self.parent.treestore.iter_is_valid(self.iter):
self.parent.treestore.remove(self.iter)
del self.parent.peers[self.peer]
def quit(self):
self.running = False
class eigrp_goodbye(threading.Thread):
def __init__(self, parent, peer, as_num):
threading.Thread.__init__(self)
self.parent = parent
self.peer = peer
self.as_num = as_num
self.running = True
def run(self):
params = eigrp_param(255, 255, 255, 255, 255, 15)
version = eigrp_version() #(0xc02, 0x300)
args = [params, version]
msg = eigrp_packet(eigrp_packet.EIGRP_OPTCODE_HELLO, 0, 0, 0, self.as_num, args)
while self.running:
self.parent.peers[self.peer].update(msg)
self.parent.goodbye_progressbar.pulse()
time.sleep(1)
self.parent.log("EIGRP: Goodbye thread terminated")
def quit(self):
self.running = False
### MODULE_CLASS ###
class mod_class(object):
TREE_HOST_ROW = 0
TREE_AS_ROW = 1
def __init__(self, parent, platform):
self.parent = parent
self.platform = platform
self.name = "eigrp"
self.group = "ROUTING"
self.gladefile = "/modules/module_eigrp.glade"
self.treestore = gtk.TreeStore(str, str)
self.filter = False
self.hello_thread = None
self.goodbye_thread = None
self.peers = None
#(0xc02, 0x300)
self.ios_ver = 0xc04
self.eigrp_ver = 0x102
def start_mod(self):
self.hello_thread = None
self.goodbye_thread = None
self.spoof = False
self.auth = None
self.as_num = None
self.peers = {}
self.listen_for_auth = False
def shut_mod(self):
if self.hello_thread:
if self.hello_thread.running:
self.hello_thread.quit()
if self.goodbye_thread:
if self.goodbye_thread.running:
self.goodbye_thread.quit()
if self.peers:
for i in self.peers:
self.peers[i].quit()
if self.filter:
self.log("EIGRP: Removing lokal packet filter for EIGRP")
if self.platform == "Linux":
os.system("iptables -D INPUT -i %s -p %i -j DROP" % (self.interface, dpkt.ip.IP_PROTO_EIGRP))
elif self.platform == "Darwin":
os.system("ipfw -q delete 31335")
elif self.platform == "Windows":
os.system("netsh advfirewall firewall del rule name=eigrp")
else:
self.fw.delete(self.ospf_filter)
self.filter = False
self.treestore.clear()
def get_root(self):
self.glade_xml = gtk.glade.XML(self.parent.data_dir + self.gladefile)
dic = { "on_hello_togglebutton_toggled" : self.on_hello_togglebutton_toggled,
"on_spoof_togglebutton_toggled" : self.on_spoof_togglebutton_toggled,
"on_goodbye_button_clicked" : self.on_goodbye_button_clicked,
"on_add_button_clicked" : self.on_add_button_clicked,
"on_del_button_clicked" : self.on_del_button_clicked,
"on_clear_button_clicked" : self.on_clear_button_clicked,
"on_update_button_clicked" : self.on_update_button_clicked,
"on_stop_button_clicked" : self.on_stop_button_clicked
}
self.glade_xml.signal_autoconnect(dic)
self.hello_togglebutton = self.glade_xml.get_widget("hello_togglebutton")
self.spoof_togglebutton = self.glade_xml.get_widget("spoof_togglebutton")
self.interface_entry = self.glade_xml.get_widget("interface_entry")
self.as_spinbutton = self.glade_xml.get_widget("as_spinbutton")
self.spoof_entry = self.glade_xml.get_widget("spoof_entry")
self.update_textview = self.glade_xml.get_widget("update_textview")
self.treeview = self.glade_xml.get_widget("neighbor_treeview")
self.treeview.set_model(self.treestore)
self.treeview.set_headers_visible(True)
column = gtk.TreeViewColumn()
column.set_title("Host")
render_text = gtk.CellRendererText()
column.pack_start(render_text, expand=True)
column.add_attribute(render_text, 'text', self.TREE_HOST_ROW)
self.treeview.append_column(column)
column = gtk.TreeViewColumn()
column.set_title("AS")
render_text = gtk.CellRendererText()
column.pack_start(render_text, expand=True)
column.add_attribute(render_text, 'text', self.TREE_AS_ROW)
self.treeview.append_column(column)
self.treeview.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
self.goodbye_window = self.glade_xml.get_widget("goodbye_window")
#self.goodbye_window.set_parent(self.parent.window)
self.goodbye_label = self.glade_xml.get_widget("goodbye_label")
self.goodbye_progressbar = self.glade_xml.get_widget("goodbye_progressbar")
self.notebook = self.glade_xml.get_widget("notebook")
self.next_hop_entry = self.glade_xml.get_widget("next_hop_entry")
self.delay_spinbutton = self.glade_xml.get_widget("delay_spinbutton")
self.bandwidth_spinbutton = self.glade_xml.get_widget("bandwidth_spinbutton")
self.mtu_spinbutton = self.glade_xml.get_widget("mtu_spinbutton")
self.hop_count_spinbutton = self.glade_xml.get_widget("hop_count_spinbutton")
self.reliability_spinbutton = self.glade_xml.get_widget("reliability_spinbutton")
self.load_spinbutton = self.glade_xml.get_widget("load_spinbutton")
self.prefix_spinbutton = self.glade_xml.get_widget("prefix_spinbutton")
self.destination_entry = self.glade_xml.get_widget("destination_entry")
self.next_hop_entry1 = self.glade_xml.get_widget("next_hop_entry1")
self.delay_spinbutton1 = self.glade_xml.get_widget("delay_spinbutton1")
self.bandwidth_spinbutton1 = self.glade_xml.get_widget("bandwidth_spinbutton1")
self.mtu_spinbutton1 = self.glade_xml.get_widget("mtu_spinbutton1")
self.hop_count_spinbutton1 = self.glade_xml.get_widget("hop_count_spinbutton1")
self.reliability_spinbutton1 = self.glade_xml.get_widget("reliability_spinbutton1")
self.load_spinbutton1 = self.glade_xml.get_widget("load_spinbutton1")
self.prefix_spinbutton1 = self.glade_xml.get_widget("prefix_spinbutton1")
self.destination_entry1 = self.glade_xml.get_widget("destination_entry1")
self.orig_router_entry = self.glade_xml.get_widget("orig_router_entry")
self.orig_as_spinbutton = self.glade_xml.get_widget("orig_as_spinbutton")
self.external_metric_spinbutton = self.glade_xml.get_widget("external_metric_spinbutton")
self.external_id_spinbutton = self.glade_xml.get_widget("external_id_spinbutton")
return self.glade_xml.get_widget("root")
def log(self, msg):
self.__log(msg, self.name)
def set_log(self, log):
self.__log = log
def get_ip_checks(self):
return (self.check_ip, self.input_ip)
def check_ip(self, ip):
if ip.p == dpkt.ip.IP_PROTO_EIGRP:
return (True, False)
return (False, False)
def set_ip(self, ip, mask):
self.address = dnet.ip_aton(ip)
self.mask = dnet.ip_aton(mask)
def set_fw(self, fw):
self.fw = fw
def set_int(self, interface):
self.interface = interface
self.eigrp_filter = { "device" : self.interface,
"op" : dnet.FW_OP_BLOCK,
"dir" : dnet.FW_DIR_IN,
"proto" : dpkt.ip.IP_PROTO_EIGRP,
"src" : dnet.addr("0.0.0.0/0", dnet.ADDR_TYPE_IP),
"dst" : dnet.addr("0.0.0.0/0", dnet.ADDR_TYPE_IP),
"sport" : [0, 0],
"dport" : [0, 0]
}
def set_dnet(self, dnet):
self.dnet = dnet
self.mac = dnet.eth.get()
# LISTENING #
def input_ip(self, eth, ip, timestamp):
if ip.dst == dnet.ip_aton("224.0.0.10"):
if ip.src != self.address and ip.src != self.spoof:
self.disp_multicast(str(ip.data), eth.src, ip.src)
if self.listen_for_auth and ip.src == self.address:
self.disp_auth(str(ip.data))
elif ip.dst == self.address or ip.dst == self.spoof:
self.disp_unicast(str(ip.data), eth.src, ip.src)
def disp_auth(self, data):
packet = eigrp_packet()
payload = packet.parse(data)
if packet.optcode == eigrp_packet.EIGRP_OPTCODE_HELLO:
tlv = eigrp_tlv()
while True:
payload = tlv.parse(payload)
if tlv.type == eigrp_tlv.EIGRP_TYPE_AUTH:
self.auth = tlv
self.log("EIGRP: Got authentication data from " + socket.inet_ntoa(self.address))
self.running = False
break
if not payload:
break
def disp_multicast(self, data, mac, src):
#print "disp_multicast from " + socket.inet_ntoa(src)
if src not in self.peers:
packet = eigrp_packet()
packet.parse(data)
#if self.hello_thread and self.hello_thread.is_alive():
self.add_peer(mac, src, packet.as_num)
else:
self.peers[src].input(data)
def disp_unicast(self, data, mac, src):
#print "disp_unicast from " + socket.inet_ntoa(src)
if src not in self.peers:
packet = eigrp_packet()
packet.parse(data)
#if self.hello_thread and self.hello_thread.is_alive():
self.add_peer(mac, src, packet.as_num)
else:
self.peers[src].input(data)
# PEER HANDLING #
def add_peer(self, mac, src, as_num, data=None):
self.log("EIGRP: Got new peer " + socket.inet_ntoa(src))
self.peers[src] = eigrp_peer(self, mac, src, as_num, self.auth)
self.peers[src].start()
if data:
self.peers[src].input(data)
# SIGNALS #
def on_hello_togglebutton_toggled(self, btn):
if btn.get_property("active"):
self.as_num = int(self.as_spinbutton.get_value())
self.as_spinbutton.set_property("sensitive", False)
if not self.filter:
self.log("EIGRP: Setting lokal packet filter for EIGRP")
if self.platform == "Linux":
os.system("iptables -A INPUT -i %s -p %i -j DROP" % (self.interface, dpkt.ip.IP_PROTO_EIGRP))
elif self.platform == "Darwin":
os.system("ipfw -q add 31335 deny eigrp from any to any")
elif self.platform == "Windows":
os.system("netsh advfirewall firewall add rule name=eigrp dir=in protocol=%i action=block" % dpkt.ip.IP_PROTO_EIGRP)
else:
self.fw.add(self.eigrp_filter)
self.filter = True
try:
self.spoof_togglebutton.set_property("sensitive", False)
if self.spoof_togglebutton.get_property("active"):
self.hello_thread = eigrp_hello_thread(self, self.interface, self.as_num, self.auth)
else:
self.hello_thread = eigrp_hello_thread(self, self.interface, self.as_num, self.auth)
except Exception, e:
self.log("EIGRP: Cant start hello thread on %s: %s" % (self.interface, e))
if not self.listen_togglebutton.get_property("active"):
self.spoof_togglebutton.set_property("sensitive", True)
self.as_entry.set_property("sensitive", True)
return
self.hello_thread.start()
self.log("EIGRP: Hello thread on %s started" % (self.interface))
else:
if self.filter:
self.log("EIGRP: Removing lokal packet filter for EIGRP")
if self.platform =="Linux":
os.system("iptables -D INPUT -i %s -p %i -j DROP" % (self.interface, dpkt.ip.IP_PROTO_EIGRP))
elif self.platform == "Darwin":
os.system("ipfw -q delete 31335")
elif self.platform == "Windows":
os.system("netsh advfirewall firewall del rule name=eigrp")
else:
self.fw.delete(self.eigrp_filter)
self.filter = False
self.hello_thread.quit()
self.spoof_togglebutton.set_property("sensitive", True)
self.as_spinbutton.set_property("sensitive", True)
def on_spoof_togglebutton_toggled(self, btn):
if btn.get_property("active"):
self.spoof = dnet.ip_aton(self.spoof_entry.get_text())
self.spoof_entry.set_property("sensitive", False)
else:
self.spoof_entry.set_property("sensitive", True)
self.spoof = False
def on_goodbye_button_clicked(self, data):
select = self.treeview.get_selection()
(model, paths) = select.get_selected_rows()
if len(paths) == 1:
parent = model.iter_parent(model.get_iter(paths[0]))
if not parent:
parent = model.get_iter(paths[0])
host = model.get_value(parent, self.TREE_HOST_ROW)
peer = dnet.ip_aton(host)
self.peers[peer].msg = None
self.goodbye_thread = eigrp_goodbye(self, peer, self.peers[peer].as_num)
self.goodbye_label.set_label("Sending Goodbye Messages to %s..." % (host))
self.goodbye_window.show_all()
self.goodbye_thread.start()
self.log("EIGRP: Goodbye thread started for %s" % (host))
def on_add_button_clicked(self, data):
dialog = gtk.MessageDialog(self.parent.window, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_QUESTION, gtk.BUTTONS_OK_CANCEL, "Enter IP Address to add:")
entry = gtk.Entry(0)
dialog.vbox.pack_start(entry)
entry.show()
ret = dialog.run()
dialog.destroy()
if ret == gtk.RESPONSE_OK:
try:
peer = entry.get_text()
arp = dnet.arp()
mac = arp.get(dnet.addr(peer))
if not mac:
raise Exception("Unable to get mac address")
self.add_peer(mac.data, dnet.ip_aton(peer), int(self.as_spinbutton.get_value()))
except Exception, e:
self.log("EIGRP: Cant add peer %s: %s" % (peer, e))
def on_del_button_clicked(self, data):
select = self.treeview.get_selection()
(model, paths) = select.get_selected_rows()
for i in paths:
parent = model.iter_parent(model.get_iter(i))
if not parent:
parent = model.get_iter(i)
host = model.get_value(parent, self.TREE_HOST_ROW)
peer = dnet.ip_aton(host)
self.peers[peer].quit()
def on_clear_button_clicked(self, data):
#self.treestore.clear()
for i in self.peers:
self.peers[i].quit()
def on_update_button_clicked(self, data):
page = self.notebook.get_current_page()
if page == 0:
msg = eigrp_packet( eigrp_packet.EIGRP_OPTCODE_UPDATE,
eigrp_packet.EIGRP_FLAGS_COND_RECV,
0,
0,
int(self.as_spinbutton.get_value()),
[ eigrp_internal_route(
self.next_hop_entry.get_text(),
int(self.delay_spinbutton.get_value()),
int(self.bandwidth_spinbutton.get_value()),
int(self.mtu_spinbutton.get_value()),
int(self.hop_count_spinbutton.get_value()),
int(self.reliability_spinbutton.get_value()),
int(self.load_spinbutton.get_value()),
int(self.prefix_spinbutton.get_value()),
self.destination_entry.get_text()
)
]
)
elif page == 1:
msg = eigrp_packet( eigrp_packet.EIGRP_OPTCODE_UPDATE,
eigrp_packet.EIGRP_FLAGS_COND_RECV,
0,
0,
int(self.as_spinbutton.get_value()),
[ eigrp_external_route(
self.next_hop_entry1.get_text(),
self.orig_router_entry.get_text(),
int(self.orig_as_spinbutton.get_value()),
0,
int(self.external_metric_spinbutton.get_value()),
int(self.external_id_spinbutton.get_value()),
0,
int(self.delay_spinbutton1.get_value()),
int(self.bandwidth_spinbutton1.get_value()),
int(self.mtu_spinbutton1.get_value()),
int(self.hop_count_spinbutton1.get_value()),
int(self.reliability_spinbutton1.get_value()),
int(self.load_spinbutton1.get_value()),
int(self.prefix_spinbutton1.get_value()),
self.destination_entry1.get_text()
)
]
)
elif page == 2:
buffer = self.update_textview.get_buffer()
text = buffer.get_text(buffer.get_start_iter(), buffer.get_end_iter())
if text == "":
return
exec("msg = " + text)
select = self.treeview.get_selection()
(model, paths) = select.get_selected_rows()
for i in paths:
parent = model.iter_parent(model.get_iter(i))
if not parent:
parent = model.get_iter(i)
host = model.get_value(parent, self.TREE_HOST_ROW)
self.log("EIGRP: Sending update to %s" % (host))
peer = dnet.ip_aton(host)
self.peers[peer].update(msg)
#~ #bcast
#~ ip_hdr = dpkt.ip.IP( ttl=2,
#~ p=dpkt.ip.IP_PROTO_EIGRP,
#~ src=self.address,
#~ dst=dnet.ip_aton(EIGRP_MULTICAST_ADDRESS),
#~ data=msg.render()
#~ )
#~ ip_hdr.len += len(ip_hdr.data)
#~ eth_hdr = dpkt.ethernet.Ethernet( dst=dnet.eth_aton(EIGRP_MULTICAST_MAC),
#~ src=self.mac,
#~ type=dpkt.ethernet.ETH_TYPE_IP,
#~ data=str(ip_hdr)
#~ )
#~ self.dnet.send(str(eth_hdr))
def on_stop_button_clicked(self, data):
self.goodbye_thread.quit()
self.goodbye_window.hide_all()
def get_config_dict(self):
return { "ios_ver" : { "value" : "0x%x" % self.ios_ver,
"type" : "str",
"min" : 3,
"max" : 6
},
"eigrp_ver" : { "value" : "0x%x" % self.eigrp_ver,
"type" : "str",
"min" : 3,
"max" : 6
}
}
def set_config_dict(self, dict):
if dict:
self.ios_ver = int(dict["ios_ver"]["value"], 0)
self.eigrp_ver = int(dict["eigrp_ver"]["value"], 0)
|
|
import json
import logging
import os
import shutil
import sys
import time
import urllib2
import warnings
# Dropping a table inexplicably produces a warning despite
# the "IF EXISTS" clause. Squelch these warnings.
warnings.simplefilter('ignore')
import MySQLdb
import environment
import utils
from mysql_flavor import mysql_flavor
from protocols_flavor import protocols_flavor
from vtdb import tablet
tablet_cell_map = {
62344: 'nj',
62044: 'nj',
41983: 'nj',
31981: 'ny',
}
def get_backup_storage_flags():
return ['-backup_storage_implementation', 'file',
'-file_backup_storage_root',
os.path.join(environment.tmproot, 'backupstorage')]
def get_all_extra_my_cnf(extra_my_cnf):
all_extra_my_cnf = [environment.vttop + "/config/mycnf/default-fast.cnf"]
flavor_my_cnf = mysql_flavor().extra_my_cnf()
if flavor_my_cnf:
all_extra_my_cnf.append(flavor_my_cnf)
if extra_my_cnf:
all_extra_my_cnf.append(extra_my_cnf)
return all_extra_my_cnf
class Tablet(object):
"""This class helps manage a vttablet or vtocc instance.
To use it for vttablet, you need to use init_tablet and/or
start_vttablet. For vtocc, you can just call start_vtocc.
If you use it to start as vtocc, many of the support functions
that are meant for vttablet will not work."""
default_uid = 62344
seq = 0
tablets_running = 0
default_db_config = {
'app': {
'uname': 'vt_app',
'charset': 'utf8'
},
'dba': {
'uname': 'vt_dba',
'charset': 'utf8'
},
'filtered': {
'uname': 'vt_filtered',
'charset': 'utf8'
},
'repl': {
'uname': 'vt_repl',
'charset': 'utf8'
}
}
# this will eventually be coming from the proto3
tablet_type_value = {
"UNKNOWN": 0,
"IDLE": 1,
"MASTER": 2,
"REPLICA": 3,
"RDONLY": 4,
"BATCH": 4,
"SPARE": 5,
"EXPERIMENTAL": 6,
"SCHEMA_UPGRADE": 7,
"BACKUP": 8,
"RESTORE": 9,
"WORKER": 10,
"SCRAP": 11,
}
def __init__(self, tablet_uid=None, port=None, mysql_port=None, cell=None,
use_mysqlctld=False):
self.tablet_uid = tablet_uid or (Tablet.default_uid + Tablet.seq)
self.port = port or (environment.reserve_ports(1))
self.mysql_port = mysql_port or (environment.reserve_ports(1))
self.grpc_port = environment.reserve_ports(1)
self.use_mysqlctld = use_mysqlctld
Tablet.seq += 1
if cell:
self.cell = cell
else:
self.cell = tablet_cell_map.get(tablet_uid, 'nj')
self.proc = None
# filled in during init_tablet
self.keyspace = None
self.shard = None
# utility variables
self.tablet_alias = 'test_%s-%010d' % (self.cell, self.tablet_uid)
self.zk_tablet_path = (
'/zk/test_%s/vt/tablets/%010d' % (self.cell, self.tablet_uid))
def update_stream_python_endpoint(self):
protocol = protocols_flavor().binlog_player_python_protocol()
port = self.port
if protocol == "gorpc":
from vtdb import gorpc_update_stream
elif protocol == "grpc":
# import the grpc update stream client implementation, change the port
from vtdb import grpc_update_stream
port = self.grpc_port
return (protocol, 'localhost:%d' % port)
def mysqlctl(self, cmd, extra_my_cnf=None, with_ports=False, verbose=False):
extra_env = {}
all_extra_my_cnf = get_all_extra_my_cnf(extra_my_cnf)
if all_extra_my_cnf:
extra_env['EXTRA_MY_CNF'] = ':'.join(all_extra_my_cnf)
args = environment.binary_args('mysqlctl') + [
'-log_dir', environment.vtlogroot,
'-tablet_uid', str(self.tablet_uid)]
if self.use_mysqlctld:
args.extend(['-mysqlctl_socket', os.path.join(self.tablet_dir, 'mysqlctl.sock')])
if with_ports:
args.extend(['-port', str(self.port),
'-mysql_port', str(self.mysql_port)])
self._add_dbconfigs(args)
if verbose:
args.append('-alsologtostderr')
args.extend(cmd)
return utils.run_bg(args, extra_env=extra_env)
def mysqlctld(self, cmd, extra_my_cnf=None, with_ports=False, verbose=False):
extra_env = {}
all_extra_my_cnf = get_all_extra_my_cnf(extra_my_cnf)
if all_extra_my_cnf:
extra_env['EXTRA_MY_CNF'] = ':'.join(all_extra_my_cnf)
args = environment.binary_args('mysqlctld') + [
'-log_dir', environment.vtlogroot,
'-tablet_uid', str(self.tablet_uid),
'-mysql_port', str(self.mysql_port),
'-socket_file', os.path.join(self.tablet_dir, 'mysqlctl.sock')]
self._add_dbconfigs(args)
if verbose:
args.append('-alsologtostderr')
args.extend(cmd)
return utils.run_bg(args, extra_env=extra_env)
def init_mysql(self, extra_my_cnf=None):
if self.use_mysqlctld:
return self.mysqlctld(
['-bootstrap_archive', mysql_flavor().bootstrap_archive()],
extra_my_cnf=extra_my_cnf)
else:
return self.mysqlctl(
['init', '-bootstrap_archive', mysql_flavor().bootstrap_archive()],
extra_my_cnf=extra_my_cnf, with_ports=True)
def start_mysql(self):
return self.mysqlctl(['start'], with_ports=True)
def shutdown_mysql(self):
return self.mysqlctl(['shutdown'], with_ports=True)
def teardown_mysql(self):
if utils.options.keep_logs:
return self.shutdown_mysql()
return self.mysqlctl(['teardown', '-force'])
def remove_tree(self):
if utils.options.keep_logs:
return
try:
shutil.rmtree(self.tablet_dir)
except OSError as e:
if utils.options.verbose == 2:
print >> sys.stderr, e, self.tablet_dir
def mysql_connection_parameters(self, dbname, user='vt_dba'):
return dict(user=user,
unix_socket=self.tablet_dir + '/mysql.sock',
db=dbname)
def connect(self, dbname='', user='vt_dba', **params):
params.update(self.mysql_connection_parameters(dbname, user))
conn = MySQLdb.Connect(**params)
return conn, conn.cursor()
def connect_dict(self, dbname='', user='vt_dba', **params):
params.update(self.mysql_connection_parameters(dbname, user))
conn = MySQLdb.Connect(**params)
return conn, MySQLdb.cursors.DictCursor(conn)
# Query the MySQL instance directly
def mquery(self, dbname, query, write=False, user='vt_dba', conn_params={}):
conn, cursor = self.connect(dbname, user=user, **conn_params)
if write:
conn.begin()
if isinstance(query, basestring):
query = [query]
for q in query:
# logging.debug("mysql(%s,%s): %s", self.tablet_uid, dbname, q)
cursor.execute(q)
if write:
conn.commit()
try:
return cursor.fetchall()
finally:
conn.close()
def assert_table_count(self, dbname, table, n, where=''):
result = self.mquery(dbname, 'select count(*) from ' + table + ' ' + where)
if result[0][0] != n:
raise utils.TestError('expected %d rows in %s' % (n, table), result)
def reset_replication(self):
self.mquery('', mysql_flavor().reset_replication_commands())
def populate(self, dbname, create_sql, insert_sqls=[]):
self.create_db(dbname)
if isinstance(create_sql, basestring):
create_sql = [create_sql]
for q in create_sql:
self.mquery(dbname, q)
for q in insert_sqls:
self.mquery(dbname, q, write=True)
def has_db(self, name):
rows = self.mquery('', 'show databases')
for row in rows:
dbname = row[0]
if dbname == name:
return True
return False
def drop_db(self, name):
self.mquery('', 'drop database if exists %s' % name)
while self.has_db(name):
logging.debug('%s sleeping while waiting for database drop: %s',
self.tablet_alias, name)
time.sleep(0.3)
self.mquery('', 'drop database if exists %s' % name)
def create_db(self, name):
self.drop_db(name)
self.mquery('', 'create database %s' % name)
def clean_dbs(self):
logging.debug('mysql(%s): removing all databases', self.tablet_uid)
rows = self.mquery('', 'show databases')
for row in rows:
dbname = row[0]
if dbname in ['information_schema', 'mysql']:
continue
self.drop_db(dbname)
def wait_check_db_var(self, name, value):
for _ in range(3):
try:
return self.check_db_var(name, value)
except utils.TestError as e:
print >> sys.stderr, 'WARNING: ', e
time.sleep(1.0)
raise e
def check_db_var(self, name, value):
row = self.get_db_var(name)
if row != (name, value):
raise utils.TestError('variable not set correctly', name, row)
def get_db_var(self, name):
conn, cursor = self.connect()
try:
cursor.execute("show variables like '%s'" % name)
return cursor.fetchone()
finally:
conn.close()
def update_addrs(self):
args = [
'UpdateTabletAddrs',
'-hostname', 'localhost',
'-ip-addr', '127.0.0.1',
'-mysql-port', '%d' % self.mysql_port,
'-vt-port', '%d' % self.port,
self.tablet_alias
]
return utils.run_vtctl(args)
def scrap(self, force=False, skip_rebuild=False):
args = ['ScrapTablet']
if force:
args.append('-force')
if skip_rebuild:
args.append('-skip-rebuild')
args.append(self.tablet_alias)
utils.run_vtctl(args, auto_log=True)
def init_tablet(self, tablet_type, keyspace=None, shard=None, force=True,
start=False, dbname=None, parent=True, wait_for_start=True,
include_mysql_port=True, **kwargs):
self.tablet_type = tablet_type
self.keyspace = keyspace
self.shard = shard
if dbname is None:
self.dbname = 'vt_' + (self.keyspace or 'database')
else:
self.dbname = dbname
args = ['InitTablet',
'-hostname', 'localhost',
'-port', str(self.port)]
if include_mysql_port:
args.extend(['-mysql_port', str(self.mysql_port)])
if force:
args.append('-force')
if parent:
args.append('-parent')
if dbname:
args.extend(['-db-name-override', dbname])
if keyspace:
args.extend(['-keyspace', keyspace])
if shard:
args.extend(['-shard', shard])
args.extend([self.tablet_alias, tablet_type])
utils.run_vtctl(args)
if start:
if not wait_for_start:
expected_state = None
elif tablet_type == 'master' or tablet_type == 'replica' or tablet_type == 'rdonly' or tablet_type == 'batch':
expected_state = 'SERVING'
else:
expected_state = 'NOT_SERVING'
self.start_vttablet(wait_for_state=expected_state, **kwargs)
def conn(self, user=None, password=None):
conn = tablet.TabletConnection(
'localhost:%d' % self.port, self.tablet_type, self.keyspace,
self.shard, 30, caller_id='dev')
conn.dial()
return conn
@property
def tablet_dir(self):
return '%s/vt_%010d' % (environment.vtdataroot, self.tablet_uid)
def grpc_enabled(self):
return protocols_flavor().tabletconn_protocol() == 'grpc' or \
protocols_flavor().tablet_manager_protocol() == 'grpc' or \
protocols_flavor().binlog_player_protocol() == 'grpc'
def flush(self):
utils.curl('http://localhost:%s%s' %
(self.port, environment.flush_logs_url),
stderr=utils.devnull, stdout=utils.devnull)
def _start_prog(self, binary, port=None, auth=False, memcache=False,
wait_for_state='SERVING', filecustomrules=None, zkcustomrules=None,
schema_override=None,
repl_extra_flags={}, table_acl_config=None,
lameduck_period=None, security_policy=None,
extra_args=None, extra_env=None):
environment.prog_compile(binary)
args = environment.binary_args(binary)
args.extend(['-port', '%s' % (port or self.port),
'-log_dir', environment.vtlogroot])
self._add_dbconfigs(args, repl_extra_flags)
if memcache:
args.extend(['-rowcache-bin', environment.memcached_bin()])
memcache_socket = os.path.join(self.tablet_dir, 'memcache.sock')
args.extend(['-rowcache-socket', memcache_socket])
args.extend(['-enable-rowcache'])
if auth:
args.extend(
['-auth-credentials',
os.path.join(
environment.vttop, 'test', 'test_data',
'authcredentials_test.json')])
if filecustomrules:
args.extend(['-filecustomrules', filecustomrules])
if zkcustomrules:
args.extend(['-zkcustomrules', zkcustomrules])
if schema_override:
args.extend(['-schema-override', schema_override])
if table_acl_config:
args.extend(['-table-acl-config', table_acl_config])
args.extend(['-queryserver-config-strict-table-acl'])
if protocols_flavor().service_map():
args.extend(['-service_map', ",".join(protocols_flavor().service_map())])
if self.grpc_enabled():
args.extend(['-grpc_port', str(self.grpc_port)])
if lameduck_period:
args.extend(['-lameduck-period', lameduck_period])
if security_policy:
args.extend(['-security_policy', security_policy])
if extra_args:
args.extend(extra_args)
args.extend(['-enable-autocommit'])
stderr_fd = open(os.path.join(environment.vtlogroot, '%s-%d.stderr' % (binary, self.tablet_uid)), 'w')
# increment count only the first time
if not self.proc:
Tablet.tablets_running += 1
self.proc = utils.run_bg(args, stderr=stderr_fd, extra_env=extra_env)
log_message = "Started vttablet: %s (%s) with pid: %s - Log files: %s/vttablet.*.{INFO,WARNING,ERROR,FATAL}.*.%s" % \
(self.tablet_uid, self.tablet_alias, self.proc.pid, environment.vtlogroot, self.proc.pid)
# This may race with the stderr output from the process (though that's usually empty).
stderr_fd.write(log_message + '\n')
stderr_fd.close()
logging.debug(log_message)
# wait for query service to be in the right state
if wait_for_state:
if binary == 'vttablet':
self.wait_for_vttablet_state(wait_for_state, port=port)
else:
self.wait_for_vtocc_state(wait_for_state, port=port)
return self.proc
def start_vttablet(self, port=None, auth=False, memcache=False,
wait_for_state='SERVING', filecustomrules=None, zkcustomrules=None,
schema_override=None,
repl_extra_flags={}, table_acl_config=None,
lameduck_period=None, security_policy=None,
target_tablet_type=None, full_mycnf_args=False,
extra_args=None, extra_env=None, include_mysql_port=True,
init_tablet_type=None, init_keyspace=None,
init_shard=None, init_db_name_override=None,
supports_backups=False):
"""Starts a vttablet process, and returns it.
The process is also saved in self.proc, so it's easy to kill as well.
"""
args = []
# Use "localhost" as hostname because Travis CI worker hostnames are too long for MySQL replication.
args.extend(['-tablet_hostname', 'localhost'])
args.extend(['-tablet-path', self.tablet_alias])
args.extend(environment.topo_server().flags())
args.extend(['-binlog_player_protocol',
protocols_flavor().binlog_player_protocol()])
args.extend(['-tablet_manager_protocol',
protocols_flavor().tablet_manager_protocol()])
args.extend(['-pid_file', os.path.join(self.tablet_dir, 'vttablet.pid')])
if self.use_mysqlctld:
args.extend(['-mysqlctl_socket', os.path.join(self.tablet_dir, 'mysqlctl.sock')])
if full_mycnf_args:
# this flag is used to specify all the mycnf_ flags, to make
# sure that code works and can fork actions.
relay_log_path = os.path.join(self.tablet_dir, 'relay-logs',
'vt-%010d-relay-bin' % self.tablet_uid)
args.extend([
'-mycnf_server_id', str(self.tablet_uid),
'-mycnf_data_dir', os.path.join(self.tablet_dir, 'data'),
'-mycnf_innodb_data_home_dir', os.path.join(self.tablet_dir,
'innodb', 'data'),
'-mycnf_innodb_log_group_home_dir', os.path.join(self.tablet_dir,
'innodb', 'logs'),
'-mycnf_socket_file', os.path.join(self.tablet_dir, 'mysql.sock'),
'-mycnf_error_log_path', os.path.join(self.tablet_dir, 'error.log'),
'-mycnf_slow_log_path', os.path.join(self.tablet_dir,
'slow-query.log'),
'-mycnf_relay_log_path', relay_log_path,
'-mycnf_relay_log_index_path', relay_log_path + '.index',
'-mycnf_relay_log_info_path', os.path.join(self.tablet_dir,
'relay-logs',
'relay-log.info'),
'-mycnf_bin_log_path', os.path.join(self.tablet_dir, 'bin-logs',
'vt-%010d-bin' % self.tablet_uid),
'-mycnf_master_info_file', os.path.join(self.tablet_dir,
'master.info'),
'-mycnf_pid_file', os.path.join(self.tablet_dir, 'mysql.pid'),
'-mycnf_tmp_dir', os.path.join(self.tablet_dir, 'tmp'),
'-mycnf_slave_load_tmp_dir', os.path.join(self.tablet_dir, 'tmp'),
])
if include_mysql_port:
args.extend(['-mycnf_mysql_port', str(self.mysql_port)])
if target_tablet_type:
self.tablet_type = target_tablet_type
args.extend(['-target_tablet_type', target_tablet_type,
'-health_check_interval', '2s',
'-enable_replication_lag_check',
'-degraded_threshold', '5s'])
# this is used to run InitTablet as part of the vttablet startup
if init_tablet_type:
self.tablet_type = init_tablet_type
args.extend(['-init_tablet_type', init_tablet_type])
if init_keyspace:
self.keyspace = init_keyspace
self.shard = init_shard
args.extend(['-init_keyspace', init_keyspace,
'-init_shard', init_shard])
if init_db_name_override:
self.dbname = init_db_name_override
args.extend(['-init_db_name_override', init_db_name_override])
else:
self.dbname = 'vt_' + init_keyspace
if supports_backups:
args.extend(['-restore_from_backup'] + get_backup_storage_flags())
args.extend(['-rpc-error-only-in-reply=true'])
if extra_args:
args.extend(extra_args)
return self._start_prog(binary='vttablet', port=port, auth=auth,
memcache=memcache, wait_for_state=wait_for_state,
filecustomrules=filecustomrules,
zkcustomrules=zkcustomrules,
schema_override=schema_override,
repl_extra_flags=repl_extra_flags,
table_acl_config=table_acl_config,
lameduck_period=lameduck_period, extra_args=args,
security_policy=security_policy, extra_env=extra_env)
def start_vtocc(self, port=None, auth=False, memcache=False,
wait_for_state='SERVING', filecustomrules=None,
schema_override=None,
repl_extra_flags={}, table_acl_config=None,
lameduck_period=None, security_policy=None,
keyspace=None, shard=False,
extra_args=None):
"""Starts a vtocc process, and returns it.
The process is also saved in self.proc, so it's easy to kill as well.
"""
self.keyspace = keyspace
self.shard = shard
self.dbname = 'vt_' + (self.keyspace or 'database')
args = []
args.extend(["-db-config-app-unixsocket", self.tablet_dir + '/mysql.sock'])
args.extend(["-db-config-dba-unixsocket", self.tablet_dir + '/mysql.sock'])
args.extend(["-db-config-app-keyspace", keyspace])
args.extend(["-db-config-app-shard", shard])
args.extend(["-binlog-path", "foo"])
if extra_args:
args.extend(extra_args)
return self._start_prog(binary='vtocc', port=port, auth=auth,
memcache=memcache, wait_for_state=wait_for_state,
filecustomrules=filecustomrules,
schema_override=schema_override,
repl_extra_flags=repl_extra_flags,
table_acl_config=table_acl_config,
lameduck_period=lameduck_period, extra_args=args,
security_policy=security_policy)
def wait_for_vttablet_state(self, expected, timeout=60.0, port=None):
self.wait_for_vtocc_state(expected, timeout=timeout, port=port)
def wait_for_vtocc_state(self, expected, timeout=60.0, port=None):
while True:
v = utils.get_vars(port or self.port)
last_seen_state = "?"
if v == None:
if self.proc.poll() is not None:
raise utils.TestError('vttablet died while test waiting for state %s' % expected)
logging.debug(
' vttablet %s not answering at /debug/vars, waiting...',
self.tablet_alias)
else:
if 'TabletStateName' not in v:
logging.debug(
' vttablet %s not exporting TabletStateName, waiting...',
self.tablet_alias)
else:
s = v['TabletStateName']
last_seen_state = s
if s != expected:
logging.debug(
' vttablet %s in state %s != %s', self.tablet_alias, s,
expected)
else:
break
timeout = utils.wait_step('waiting for state %s (last seen state: %s)' % (expected, last_seen_state),
timeout,
sleep_time=0.1)
def wait_for_mysqlctl_socket(self, timeout=30.0):
mysql_sock = os.path.join(self.tablet_dir, 'mysql.sock')
mysqlctl_sock = os.path.join(self.tablet_dir, 'mysqlctl.sock')
while True:
if os.path.exists(mysql_sock) and os.path.exists(mysqlctl_sock):
return
timeout = utils.wait_step('waiting for mysql and mysqlctl socket files: %s %s' % (mysql_sock, mysqlctl_sock), timeout)
def _add_dbconfigs(self, args, repl_extra_flags={}):
config = dict(self.default_db_config)
if self.keyspace:
config['app']['dbname'] = self.dbname
config['repl']['dbname'] = self.dbname
config['repl'].update(repl_extra_flags)
for key1 in config:
for key2 in config[key1]:
args.extend(['-db-config-' + key1 + '-' + key2, config[key1][key2]])
def get_status(self):
return utils.get_status(self.port)
def get_healthz(self):
return urllib2.urlopen('http://localhost:%d/healthz' % self.port).read()
def kill_vttablet(self, wait=True):
logging.debug('killing vttablet: %s, wait: %s', self.tablet_alias, str(wait))
if self.proc is not None:
Tablet.tablets_running -= 1
if self.proc.poll() is None:
self.proc.terminate()
if wait:
self.proc.wait()
self.proc = None
def hard_kill_vttablet(self):
logging.debug('hard killing vttablet: %s', self.tablet_alias)
if self.proc is not None:
Tablet.tablets_running -= 1
if self.proc.poll() is None:
self.proc.kill()
self.proc.wait()
self.proc = None
def wait_for_binlog_server_state(self, expected, timeout=30.0):
while True:
v = utils.get_vars(self.port)
if v == None:
if self.proc.poll() is not None:
raise utils.TestError('vttablet died while test waiting for binlog state %s' % expected)
logging.debug(' vttablet not answering at /debug/vars, waiting...')
else:
if 'UpdateStreamState' not in v:
logging.debug(
' vttablet not exporting BinlogServerState, waiting...')
else:
s = v['UpdateStreamState']
if s != expected:
logging.debug(" vttablet's binlog server in state %s != %s", s,
expected)
else:
break
timeout = utils.wait_step('waiting for binlog server state %s' % expected,
timeout, sleep_time=0.5)
logging.debug('tablet %s binlog service is in state %s',
self.tablet_alias, expected)
def wait_for_binlog_player_count(self, expected, timeout=30.0):
while True:
v = utils.get_vars(self.port)
if v == None:
if self.proc.poll() is not None:
raise utils.TestError('vttablet died while test waiting for binlog count %s' % expected)
logging.debug(' vttablet not answering at /debug/vars, waiting...')
else:
if 'BinlogPlayerMapSize' not in v:
logging.debug(
' vttablet not exporting BinlogPlayerMapSize, waiting...')
else:
s = v['BinlogPlayerMapSize']
if s != expected:
logging.debug(" vttablet's binlog player map has count %d != %d",
s, expected)
else:
break
timeout = utils.wait_step('waiting for binlog player count %d' % expected,
timeout, sleep_time=0.5)
logging.debug('tablet %s binlog player has %d players',
self.tablet_alias, expected)
@classmethod
def check_vttablet_count(klass):
if Tablet.tablets_running > 0:
raise utils.TestError('This test is not killing all its vttablets')
def execute(self, sql, bindvars=None, transaction_id=None, auto_log=True):
"""execute uses 'vtctl VtTabletExecute' to execute a command.
"""
args = [
'VtTabletExecute',
'-keyspace', self.keyspace,
'-shard', self.shard,
]
if bindvars:
args.extend(['-bind_variables', json.dumps(bindvars)])
if transaction_id:
args.extend(['-transaction_id', str(transaction_id)])
args.extend([self.tablet_alias, sql])
return utils.run_vtctl_json(args, auto_log=auto_log)
def begin(self, auto_log=True):
"""begin uses 'vtctl VtTabletBegin' to start a transaction.
"""
args = [
'VtTabletBegin',
'-keyspace', self.keyspace,
'-shard', self.shard,
self.tablet_alias,
]
result = utils.run_vtctl_json(args, auto_log=auto_log)
return result['transaction_id']
def commit(self, transaction_id, auto_log=True):
"""commit uses 'vtctl VtTabletCommit' to commit a transaction.
"""
args = [
'VtTabletCommit',
'-keyspace', self.keyspace,
'-shard', self.shard,
self.tablet_alias,
str(transaction_id),
]
return utils.run_vtctl(args, auto_log=auto_log)
def rollback(self, transaction_id, auto_log=True):
"""rollback uses 'vtctl VtTabletRollback' to rollback a transaction.
"""
args = [
'VtTabletRollback',
'-keyspace', self.keyspace,
'-shard', self.shard,
self.tablet_alias,
str(transaction_id),
]
return utils.run_vtctl(args, auto_log=auto_log)
def kill_tablets(tablets):
for t in tablets:
logging.debug('killing vttablet: %s', t.tablet_alias)
if t.proc is not None:
Tablet.tablets_running -= 1
t.proc.terminate()
for t in tablets:
if t.proc is not None:
t.proc.wait()
t.proc = None
|
|
from __future__ import annotations
import abc
from collections import defaultdict
from functools import partial
import inspect
import re
from typing import (
TYPE_CHECKING,
Any,
Callable,
DefaultDict,
Dict,
Hashable,
Iterable,
Iterator,
List,
Sequence,
cast,
)
import warnings
import numpy as np
from pandas._config import option_context
from pandas._libs import lib
from pandas._typing import (
AggFuncType,
AggFuncTypeBase,
AggFuncTypeDict,
AggObjType,
Axis,
NDFrameT,
)
from pandas.util._decorators import cache_readonly
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.cast import is_nested_object
from pandas.core.dtypes.common import (
is_dict_like,
is_extension_array_dtype,
is_list_like,
is_sequence,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCNDFrame,
ABCSeries,
)
from pandas.core.algorithms import safe_sort
from pandas.core.base import (
DataError,
SelectionMixin,
SpecificationError,
)
import pandas.core.common as com
from pandas.core.construction import (
array as pd_array,
create_series_with_explicit_dtype,
ensure_wrapped_if_datetimelike,
)
if TYPE_CHECKING:
from pandas import (
DataFrame,
Index,
Series,
)
from pandas.core.groupby import GroupBy
from pandas.core.resample import Resampler
from pandas.core.window.rolling import BaseWindow
ResType = Dict[int, Any]
def frame_apply(
obj: DataFrame,
func: AggFuncType,
axis: Axis = 0,
raw: bool = False,
result_type: str | None = None,
args=None,
kwargs=None,
) -> FrameApply:
"""construct and return a row or column based frame apply object"""
axis = obj._get_axis_number(axis)
klass: type[FrameApply]
if axis == 0:
klass = FrameRowApply
elif axis == 1:
klass = FrameColumnApply
return klass(
obj,
func,
raw=raw,
result_type=result_type,
args=args,
kwargs=kwargs,
)
class Apply(metaclass=abc.ABCMeta):
axis: int
def __init__(
self,
obj: AggObjType,
func,
raw: bool,
result_type: str | None,
args,
kwargs,
):
self.obj = obj
self.raw = raw
self.args = args or ()
self.kwargs = kwargs or {}
if result_type not in [None, "reduce", "broadcast", "expand"]:
raise ValueError(
"invalid value for result_type, must be one "
"of {None, 'reduce', 'broadcast', 'expand'}"
)
self.result_type = result_type
# curry if needed
if (
(kwargs or args)
and not isinstance(func, (np.ufunc, str))
and not is_list_like(func)
):
def f(x):
return func(x, *args, **kwargs)
else:
f = func
self.orig_f: AggFuncType = func
self.f: AggFuncType = f
@abc.abstractmethod
def apply(self) -> DataFrame | Series:
pass
def agg(self) -> DataFrame | Series | None:
"""
Provide an implementation for the aggregators.
Returns
-------
Result of aggregation, or None if agg cannot be performed by
this method.
"""
obj = self.obj
arg = self.f
args = self.args
kwargs = self.kwargs
if isinstance(arg, str):
return self.apply_str()
if is_dict_like(arg):
return self.agg_dict_like()
elif is_list_like(arg):
# we require a list, but not a 'str'
return self.agg_list_like()
if callable(arg):
f = com.get_cython_func(arg)
if f and not args and not kwargs:
return getattr(obj, f)()
# caller can react
return None
def transform(self) -> DataFrame | Series:
"""
Transform a DataFrame or Series.
Returns
-------
DataFrame or Series
Result of applying ``func`` along the given axis of the
Series or DataFrame.
Raises
------
ValueError
If the transform function fails or does not transform.
"""
obj = self.obj
func = self.orig_f
axis = self.axis
args = self.args
kwargs = self.kwargs
is_series = obj.ndim == 1
if obj._get_axis_number(axis) == 1:
assert not is_series
return obj.T.transform(func, 0, *args, **kwargs).T
if is_list_like(func) and not is_dict_like(func):
func = cast(List[AggFuncTypeBase], func)
# Convert func equivalent dict
if is_series:
func = {com.get_callable_name(v) or v: v for v in func}
else:
func = {col: func for col in obj}
if is_dict_like(func):
func = cast(AggFuncTypeDict, func)
return self.transform_dict_like(func)
# func is either str or callable
func = cast(AggFuncTypeBase, func)
try:
result = self.transform_str_or_callable(func)
except TypeError:
raise
except Exception as err:
raise ValueError("Transform function failed") from err
# Functions that transform may return empty Series/DataFrame
# when the dtype is not appropriate
if (
isinstance(result, (ABCSeries, ABCDataFrame))
and result.empty
and not obj.empty
):
raise ValueError("Transform function failed")
if not isinstance(result, (ABCSeries, ABCDataFrame)) or not result.index.equals(
obj.index
):
raise ValueError("Function did not transform")
return result
def transform_dict_like(self, func):
"""
Compute transform in the case of a dict-like func
"""
from pandas.core.reshape.concat import concat
obj = self.obj
args = self.args
kwargs = self.kwargs
# transform is currently only for Series/DataFrame
assert isinstance(obj, ABCNDFrame)
if len(func) == 0:
raise ValueError("No transform functions were provided")
func = self.normalize_dictlike_arg("transform", obj, func)
results: dict[Hashable, DataFrame | Series] = {}
failed_names = []
all_type_errors = True
for name, how in func.items():
colg = obj._gotitem(name, ndim=1)
try:
results[name] = colg.transform(how, 0, *args, **kwargs)
except Exception as err:
if str(err) in {
"Function did not transform",
"No transform functions were provided",
}:
raise err
else:
if not isinstance(err, TypeError):
all_type_errors = False
failed_names.append(name)
# combine results
if not results:
klass = TypeError if all_type_errors else ValueError
raise klass("Transform function failed")
if len(failed_names) > 0:
warnings.warn(
f"{failed_names} did not transform successfully. If any error is "
f"raised, this will raise in a future version of pandas. "
f"Drop these columns/ops to avoid this warning.",
FutureWarning,
stacklevel=find_stack_level(),
)
return concat(results, axis=1)
def transform_str_or_callable(self, func) -> DataFrame | Series:
"""
Compute transform in the case of a string or callable func
"""
obj = self.obj
args = self.args
kwargs = self.kwargs
if isinstance(func, str):
return self._try_aggregate_string_function(obj, func, *args, **kwargs)
if not args and not kwargs:
f = com.get_cython_func(func)
if f:
return getattr(obj, f)()
# Two possible ways to use a UDF - apply or call directly
try:
return obj.apply(func, args=args, **kwargs)
except Exception:
return func(obj, *args, **kwargs)
def agg_list_like(self) -> DataFrame | Series:
"""
Compute aggregation in the case of a list-like argument.
Returns
-------
Result of aggregation.
"""
from pandas.core.reshape.concat import concat
obj = self.obj
arg = cast(List[AggFuncTypeBase], self.f)
if not isinstance(obj, SelectionMixin):
# i.e. obj is Series or DataFrame
selected_obj = obj
elif obj._selected_obj.ndim == 1:
# For SeriesGroupBy this matches _obj_with_exclusions
selected_obj = obj._selected_obj
else:
selected_obj = obj._obj_with_exclusions
results = []
keys = []
failed_names = []
depr_nuisance_columns_msg = (
"{} did not aggregate successfully. If any error is "
"raised this will raise in a future version of pandas. "
"Drop these columns/ops to avoid this warning."
)
# degenerate case
if selected_obj.ndim == 1:
for a in arg:
colg = obj._gotitem(selected_obj.name, ndim=1, subset=selected_obj)
try:
new_res = colg.aggregate(a)
except TypeError:
failed_names.append(com.get_callable_name(a) or a)
else:
results.append(new_res)
# make sure we find a good name
name = com.get_callable_name(a) or a
keys.append(name)
# multiples
else:
indices = []
for index, col in enumerate(selected_obj):
colg = obj._gotitem(col, ndim=1, subset=selected_obj.iloc[:, index])
try:
# Capture and suppress any warnings emitted by us in the call
# to agg below, but pass through any warnings that were
# generated otherwise.
# This is necessary because of https://bugs.python.org/issue29672
# See GH #43741 for more details
with warnings.catch_warnings(record=True) as record:
new_res = colg.aggregate(arg)
if len(record) > 0:
match = re.compile(depr_nuisance_columns_msg.format(".*"))
for warning in record:
if re.match(match, str(warning.message)):
failed_names.append(col)
else:
warnings.warn_explicit(
message=warning.message,
category=warning.category,
filename=warning.filename,
lineno=warning.lineno,
)
except (TypeError, DataError):
failed_names.append(col)
except ValueError as err:
# cannot aggregate
if "Must produce aggregated value" in str(err):
# raised directly in _aggregate_named
failed_names.append(col)
elif "no results" in str(err):
# reached in test_frame_apply.test_nuiscance_columns
# where the colg.aggregate(arg) ends up going through
# the selected_obj.ndim == 1 branch above with arg == ["sum"]
# on a datetime64[ns] column
failed_names.append(col)
else:
raise
else:
results.append(new_res)
indices.append(index)
keys = selected_obj.columns.take(indices)
# if we are empty
if not len(results):
raise ValueError("no results")
if len(failed_names) > 0:
warnings.warn(
depr_nuisance_columns_msg.format(failed_names),
FutureWarning,
stacklevel=find_stack_level(),
)
try:
concatenated = concat(results, keys=keys, axis=1, sort=False)
except TypeError as err:
# we are concatting non-NDFrame objects,
# e.g. a list of scalars
from pandas import Series
result = Series(results, index=keys, name=obj.name)
if is_nested_object(result):
raise ValueError(
"cannot combine transform and aggregation operations"
) from err
return result
else:
# Concat uses the first index to determine the final indexing order.
# The union of a shorter first index with the other indices causes
# the index sorting to be different from the order of the aggregating
# functions. Reindex if this is the case.
index_size = concatenated.index.size
full_ordered_index = next(
result.index for result in results if result.index.size == index_size
)
return concatenated.reindex(full_ordered_index, copy=False)
def agg_dict_like(self) -> DataFrame | Series:
"""
Compute aggregation in the case of a dict-like argument.
Returns
-------
Result of aggregation.
"""
from pandas import Index
from pandas.core.reshape.concat import concat
obj = self.obj
arg = cast(AggFuncTypeDict, self.f)
if not isinstance(obj, SelectionMixin):
# i.e. obj is Series or DataFrame
selected_obj = obj
selection = None
else:
selected_obj = obj._selected_obj
selection = obj._selection
arg = self.normalize_dictlike_arg("agg", selected_obj, arg)
if selected_obj.ndim == 1:
# key only used for output
colg = obj._gotitem(selection, ndim=1)
results = {key: colg.agg(how) for key, how in arg.items()}
else:
# key used for column selection and output
results = {
key: obj._gotitem(key, ndim=1).agg(how) for key, how in arg.items()
}
# set the final keys
keys = list(arg.keys())
# Avoid making two isinstance calls in all and any below
is_ndframe = [isinstance(r, ABCNDFrame) for r in results.values()]
# combine results
if all(is_ndframe):
keys_to_use: Iterable[Hashable]
keys_to_use = [k for k in keys if not results[k].empty]
# Have to check, if at least one DataFrame is not empty.
keys_to_use = keys_to_use if keys_to_use != [] else keys
if selected_obj.ndim == 2:
# keys are columns, so we can preserve names
ktu = Index(keys_to_use)
ktu._set_names(selected_obj.columns.names)
keys_to_use = ktu
axis = 0 if isinstance(obj, ABCSeries) else 1
result = concat(
{k: results[k] for k in keys_to_use}, axis=axis, keys=keys_to_use
)
elif any(is_ndframe):
# There is a mix of NDFrames and scalars
raise ValueError(
"cannot perform both aggregation "
"and transformation operations "
"simultaneously"
)
else:
from pandas import Series
# we have a dict of scalars
# GH 36212 use name only if obj is a series
if obj.ndim == 1:
obj = cast("Series", obj)
name = obj.name
else:
name = None
result = Series(results, name=name)
return result
def apply_str(self) -> DataFrame | Series:
"""
Compute apply in case of a string.
Returns
-------
result: Series or DataFrame
"""
# Caller is responsible for checking isinstance(self.f, str)
f = cast(str, self.f)
obj = self.obj
# Support for `frame.transform('method')`
# Some methods (shift, etc.) require the axis argument, others
# don't, so inspect and insert if necessary.
func = getattr(obj, f, None)
if callable(func):
sig = inspect.getfullargspec(func)
if "axis" in sig.args:
self.kwargs["axis"] = self.axis
elif self.axis != 0:
raise ValueError(f"Operation {f} does not support axis=1")
return self._try_aggregate_string_function(obj, f, *self.args, **self.kwargs)
def apply_multiple(self) -> DataFrame | Series:
"""
Compute apply in case of a list-like or dict-like.
Returns
-------
result: Series, DataFrame, or None
Result when self.f is a list-like or dict-like, None otherwise.
"""
return self.obj.aggregate(self.f, self.axis, *self.args, **self.kwargs)
def normalize_dictlike_arg(
self, how: str, obj: DataFrame | Series, func: AggFuncTypeDict
) -> AggFuncTypeDict:
"""
Handler for dict-like argument.
Ensures that necessary columns exist if obj is a DataFrame, and
that a nested renamer is not passed. Also normalizes to all lists
when values consists of a mix of list and non-lists.
"""
assert how in ("apply", "agg", "transform")
# Can't use func.values(); wouldn't work for a Series
if (
how == "agg"
and isinstance(obj, ABCSeries)
and any(is_list_like(v) for _, v in func.items())
) or (any(is_dict_like(v) for _, v in func.items())):
# GH 15931 - deprecation of renaming keys
raise SpecificationError("nested renamer is not supported")
if obj.ndim != 1:
# Check for missing columns on a frame
cols = set(func.keys()) - set(obj.columns)
if len(cols) > 0:
cols_sorted = list(safe_sort(list(cols)))
raise KeyError(f"Column(s) {cols_sorted} do not exist")
is_aggregator = lambda x: isinstance(x, (list, tuple, dict))
# if we have a dict of any non-scalars
# eg. {'A' : ['mean']}, normalize all to
# be list-likes
# Cannot use func.values() because arg may be a Series
if any(is_aggregator(x) for _, x in func.items()):
new_func: AggFuncTypeDict = {}
for k, v in func.items():
if not is_aggregator(v):
# mypy can't realize v is not a list here
new_func[k] = [v] # type:ignore[list-item]
else:
new_func[k] = v
func = new_func
return func
def _try_aggregate_string_function(self, obj, arg: str, *args, **kwargs):
"""
if arg is a string, then try to operate on it:
- try to find a function (or attribute) on ourselves
- try to find a numpy function
- raise
"""
assert isinstance(arg, str)
f = getattr(obj, arg, None)
if f is not None:
if callable(f):
return f(*args, **kwargs)
# people may try to aggregate on a non-callable attribute
# but don't let them think they can pass args to it
assert len(args) == 0
assert len([kwarg for kwarg in kwargs if kwarg not in ["axis"]]) == 0
return f
f = getattr(np, arg, None)
if f is not None and hasattr(obj, "__array__"):
# in particular exclude Window
return f(obj, *args, **kwargs)
raise AttributeError(
f"'{arg}' is not a valid function for '{type(obj).__name__}' object"
)
class NDFrameApply(Apply):
"""
Methods shared by FrameApply and SeriesApply but
not GroupByApply or ResamplerWindowApply
"""
@property
def index(self) -> Index:
return self.obj.index
@property
def agg_axis(self) -> Index:
return self.obj._get_agg_axis(self.axis)
class FrameApply(NDFrameApply):
obj: DataFrame
# ---------------------------------------------------------------
# Abstract Methods
@property
@abc.abstractmethod
def result_index(self) -> Index:
pass
@property
@abc.abstractmethod
def result_columns(self) -> Index:
pass
@property
@abc.abstractmethod
def series_generator(self) -> Iterator[Series]:
pass
@abc.abstractmethod
def wrap_results_for_axis(
self, results: ResType, res_index: Index
) -> DataFrame | Series:
pass
# ---------------------------------------------------------------
@property
def res_columns(self) -> Index:
return self.result_columns
@property
def columns(self) -> Index:
return self.obj.columns
@cache_readonly
def values(self):
return self.obj.values
@cache_readonly
def dtypes(self) -> Series:
return self.obj.dtypes
def apply(self) -> DataFrame | Series:
"""compute the results"""
# dispatch to agg
if is_list_like(self.f):
return self.apply_multiple()
# all empty
if len(self.columns) == 0 and len(self.index) == 0:
return self.apply_empty_result()
# string dispatch
if isinstance(self.f, str):
return self.apply_str()
# ufunc
elif isinstance(self.f, np.ufunc):
with np.errstate(all="ignore"):
results = self.obj._mgr.apply("apply", func=self.f)
# _constructor will retain self.index and self.columns
return self.obj._constructor(data=results)
# broadcasting
if self.result_type == "broadcast":
return self.apply_broadcast(self.obj)
# one axis empty
elif not all(self.obj.shape):
return self.apply_empty_result()
# raw
elif self.raw:
return self.apply_raw()
return self.apply_standard()
def agg(self):
obj = self.obj
axis = self.axis
# TODO: Avoid having to change state
self.obj = self.obj if self.axis == 0 else self.obj.T
self.axis = 0
result = None
try:
result = super().agg()
except TypeError as err:
exc = TypeError(
"DataFrame constructor called with "
f"incompatible data and dtype: {err}"
)
raise exc from err
finally:
self.obj = obj
self.axis = axis
if axis == 1:
result = result.T if result is not None else result
if result is None:
result = self.obj.apply(self.orig_f, axis, args=self.args, **self.kwargs)
return result
def apply_empty_result(self):
"""
we have an empty result; at least 1 axis is 0
we will try to apply the function to an empty
series in order to see if this is a reduction function
"""
assert callable(self.f)
# we are not asked to reduce or infer reduction
# so just return a copy of the existing object
if self.result_type not in ["reduce", None]:
return self.obj.copy()
# we may need to infer
should_reduce = self.result_type == "reduce"
from pandas import Series
if not should_reduce:
try:
r = self.f(Series([], dtype=np.float64))
except Exception:
pass
else:
should_reduce = not isinstance(r, Series)
if should_reduce:
if len(self.agg_axis):
r = self.f(Series([], dtype=np.float64))
else:
r = np.nan
return self.obj._constructor_sliced(r, index=self.agg_axis)
else:
return self.obj.copy()
def apply_raw(self):
"""apply to the values as a numpy array"""
def wrap_function(func):
"""
Wrap user supplied function to work around numpy issue.
see https://github.com/numpy/numpy/issues/8352
"""
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, str):
result = np.array(result, dtype=object)
return result
return wrapper
result = np.apply_along_axis(wrap_function(self.f), self.axis, self.values)
# TODO: mixed type case
if result.ndim == 2:
return self.obj._constructor(result, index=self.index, columns=self.columns)
else:
return self.obj._constructor_sliced(result, index=self.agg_axis)
def apply_broadcast(self, target: DataFrame) -> DataFrame:
assert callable(self.f)
result_values = np.empty_like(target.values)
# axis which we want to compare compliance
result_compare = target.shape[0]
for i, col in enumerate(target.columns):
res = self.f(target[col])
ares = np.asarray(res).ndim
# must be a scalar or 1d
if ares > 1:
raise ValueError("too many dims to broadcast")
elif ares == 1:
# must match return dim
if result_compare != len(res):
raise ValueError("cannot broadcast result")
result_values[:, i] = res
# we *always* preserve the original index / columns
result = self.obj._constructor(
result_values, index=target.index, columns=target.columns
)
return result
def apply_standard(self):
results, res_index = self.apply_series_generator()
# wrap results
return self.wrap_results(results, res_index)
def apply_series_generator(self) -> tuple[ResType, Index]:
assert callable(self.f)
series_gen = self.series_generator
res_index = self.result_index
results = {}
with option_context("mode.chained_assignment", None):
for i, v in enumerate(series_gen):
# ignore SettingWithCopy here in case the user mutates
results[i] = self.f(v)
if isinstance(results[i], ABCSeries):
# If we have a view on v, we need to make a copy because
# series_generator will swap out the underlying data
results[i] = results[i].copy(deep=False)
return results, res_index
def wrap_results(self, results: ResType, res_index: Index) -> DataFrame | Series:
from pandas import Series
# see if we can infer the results
if len(results) > 0 and 0 in results and is_sequence(results[0]):
return self.wrap_results_for_axis(results, res_index)
# dict of scalars
# the default dtype of an empty Series will be `object`, but this
# code can be hit by df.mean() where the result should have dtype
# float64 even if it's an empty Series.
constructor_sliced = self.obj._constructor_sliced
if constructor_sliced is Series:
result = create_series_with_explicit_dtype(
results, dtype_if_empty=np.float64
)
else:
result = constructor_sliced(results)
result.index = res_index
return result
def apply_str(self) -> DataFrame | Series:
# Caller is responsible for checking isinstance(self.f, str)
# TODO: GH#39993 - Avoid special-casing by replacing with lambda
if self.f == "size":
# Special-cased because DataFrame.size returns a single scalar
obj = self.obj
value = obj.shape[self.axis]
return obj._constructor_sliced(value, index=self.agg_axis)
return super().apply_str()
class FrameRowApply(FrameApply):
axis = 0
def apply_broadcast(self, target: DataFrame) -> DataFrame:
return super().apply_broadcast(target)
@property
def series_generator(self):
return (self.obj._ixs(i, axis=1) for i in range(len(self.columns)))
@property
def result_index(self) -> Index:
return self.columns
@property
def result_columns(self) -> Index:
return self.index
def wrap_results_for_axis(
self, results: ResType, res_index: Index
) -> DataFrame | Series:
"""return the results for the rows"""
if self.result_type == "reduce":
# e.g. test_apply_dict GH#8735
res = self.obj._constructor_sliced(results)
res.index = res_index
return res
elif self.result_type is None and all(
isinstance(x, dict) for x in results.values()
):
# Our operation was a to_dict op e.g.
# test_apply_dict GH#8735, test_apply_reduce_to_dict GH#25196 #37544
res = self.obj._constructor_sliced(results)
res.index = res_index
return res
try:
result = self.obj._constructor(data=results)
except ValueError as err:
if "All arrays must be of the same length" in str(err):
# e.g. result = [[2, 3], [1.5], ['foo', 'bar']]
# see test_agg_listlike_result GH#29587
res = self.obj._constructor_sliced(results)
res.index = res_index
return res
else:
raise
if not isinstance(results[0], ABCSeries):
if len(result.index) == len(self.res_columns):
result.index = self.res_columns
if len(result.columns) == len(res_index):
result.columns = res_index
return result
class FrameColumnApply(FrameApply):
axis = 1
def apply_broadcast(self, target: DataFrame) -> DataFrame:
result = super().apply_broadcast(target.T)
return result.T
@property
def series_generator(self):
values = self.values
values = ensure_wrapped_if_datetimelike(values)
assert len(values) > 0
# We create one Series object, and will swap out the data inside
# of it. Kids: don't do this at home.
ser = self.obj._ixs(0, axis=0)
mgr = ser._mgr
if is_extension_array_dtype(ser.dtype):
# values will be incorrect for this block
# TODO(EA2D): special case would be unnecessary with 2D EAs
obj = self.obj
for i in range(len(obj)):
yield obj._ixs(i, axis=0)
else:
for (arr, name) in zip(values, self.index):
# GH#35462 re-pin mgr in case setitem changed it
ser._mgr = mgr
mgr.set_values(arr)
ser.name = name
yield ser
@property
def result_index(self) -> Index:
return self.index
@property
def result_columns(self) -> Index:
return self.columns
def wrap_results_for_axis(
self, results: ResType, res_index: Index
) -> DataFrame | Series:
"""return the results for the columns"""
result: DataFrame | Series
# we have requested to expand
if self.result_type == "expand":
result = self.infer_to_same_shape(results, res_index)
# we have a non-series and don't want inference
elif not isinstance(results[0], ABCSeries):
result = self.obj._constructor_sliced(results)
result.index = res_index
# we may want to infer results
else:
result = self.infer_to_same_shape(results, res_index)
return result
def infer_to_same_shape(self, results: ResType, res_index: Index) -> DataFrame:
"""infer the results to the same shape as the input object"""
result = self.obj._constructor(data=results)
result = result.T
# set the index
result.index = res_index
# infer dtypes
result = result.infer_objects()
return result
class SeriesApply(NDFrameApply):
obj: Series
axis = 0
def __init__(
self,
obj: Series,
func: AggFuncType,
convert_dtype: bool,
args,
kwargs,
):
self.convert_dtype = convert_dtype
super().__init__(
obj,
func,
raw=False,
result_type=None,
args=args,
kwargs=kwargs,
)
def apply(self) -> DataFrame | Series:
obj = self.obj
if len(obj) == 0:
return self.apply_empty_result()
# dispatch to agg
if is_list_like(self.f):
return self.apply_multiple()
if isinstance(self.f, str):
# if we are a string, try to dispatch
return self.apply_str()
return self.apply_standard()
def agg(self):
result = super().agg()
if result is None:
f = self.f
kwargs = self.kwargs
# string, list-like, and dict-like are entirely handled in super
assert callable(f)
# we can be called from an inner function which
# passes this meta-data
kwargs.pop("_level", None)
# try a regular apply, this evaluates lambdas
# row-by-row; however if the lambda is expected a Series
# expression, e.g.: lambda x: x-x.quantile(0.25)
# this will fail, so we can try a vectorized evaluation
# we cannot FIRST try the vectorized evaluation, because
# then .agg and .apply would have different semantics if the
# operation is actually defined on the Series, e.g. str
try:
result = self.obj.apply(f)
except (ValueError, AttributeError, TypeError):
result = f(self.obj)
return result
def apply_empty_result(self) -> Series:
obj = self.obj
return obj._constructor(dtype=obj.dtype, index=obj.index).__finalize__(
obj, method="apply"
)
def apply_standard(self) -> DataFrame | Series:
f = self.f
obj = self.obj
with np.errstate(all="ignore"):
if isinstance(f, np.ufunc):
return f(obj)
# row-wise access
if is_extension_array_dtype(obj.dtype) and hasattr(obj._values, "map"):
# GH#23179 some EAs do not have `map`
mapped = obj._values.map(f)
else:
values = obj.astype(object)._values
# error: Argument 2 to "map_infer" has incompatible type
# "Union[Callable[..., Any], str, List[Union[Callable[..., Any], str]],
# Dict[Hashable, Union[Union[Callable[..., Any], str],
# List[Union[Callable[..., Any], str]]]]]"; expected
# "Callable[[Any], Any]"
mapped = lib.map_infer(
values,
f, # type: ignore[arg-type]
convert=self.convert_dtype,
)
if len(mapped) and isinstance(mapped[0], ABCSeries):
# GH 25959 use pd.array instead of tolist
# so extension arrays can be used
return obj._constructor_expanddim(pd_array(mapped), index=obj.index)
else:
return obj._constructor(mapped, index=obj.index).__finalize__(
obj, method="apply"
)
class GroupByApply(Apply):
def __init__(
self,
obj: GroupBy[NDFrameT],
func: AggFuncType,
args,
kwargs,
):
kwargs = kwargs.copy()
self.axis = obj.obj._get_axis_number(kwargs.get("axis", 0))
super().__init__(
obj,
func,
raw=False,
result_type=None,
args=args,
kwargs=kwargs,
)
def apply(self):
raise NotImplementedError
def transform(self):
raise NotImplementedError
class ResamplerWindowApply(Apply):
axis = 0
obj: Resampler | BaseWindow
def __init__(
self,
obj: Resampler | BaseWindow,
func: AggFuncType,
args,
kwargs,
):
super().__init__(
obj,
func,
raw=False,
result_type=None,
args=args,
kwargs=kwargs,
)
def apply(self):
raise NotImplementedError
def transform(self):
raise NotImplementedError
def reconstruct_func(
func: AggFuncType | None, **kwargs
) -> tuple[bool, AggFuncType | None, list[str] | None, list[int] | None]:
"""
This is the internal function to reconstruct func given if there is relabeling
or not and also normalize the keyword to get new order of columns.
If named aggregation is applied, `func` will be None, and kwargs contains the
column and aggregation function information to be parsed;
If named aggregation is not applied, `func` is either string (e.g. 'min') or
Callable, or list of them (e.g. ['min', np.max]), or the dictionary of column name
and str/Callable/list of them (e.g. {'A': 'min'}, or {'A': [np.min, lambda x: x]})
If relabeling is True, will return relabeling, reconstructed func, column
names, and the reconstructed order of columns.
If relabeling is False, the columns and order will be None.
Parameters
----------
func: agg function (e.g. 'min' or Callable) or list of agg functions
(e.g. ['min', np.max]) or dictionary (e.g. {'A': ['min', np.max]}).
**kwargs: dict, kwargs used in is_multi_agg_with_relabel and
normalize_keyword_aggregation function for relabelling
Returns
-------
relabelling: bool, if there is relabelling or not
func: normalized and mangled func
columns: list of column names
order: list of columns indices
Examples
--------
>>> reconstruct_func(None, **{"foo": ("col", "min")})
(True, defaultdict(<class 'list'>, {'col': ['min']}), ('foo',), array([0]))
>>> reconstruct_func("min")
(False, 'min', None, None)
"""
relabeling = func is None and is_multi_agg_with_relabel(**kwargs)
columns: list[str] | None = None
order: list[int] | None = None
if not relabeling:
if isinstance(func, list) and len(func) > len(set(func)):
# GH 28426 will raise error if duplicated function names are used and
# there is no reassigned name
raise SpecificationError(
"Function names must be unique if there is no new column names "
"assigned"
)
elif func is None:
# nicer error message
raise TypeError("Must provide 'func' or tuples of '(column, aggfunc).")
if relabeling:
func, columns, order = normalize_keyword_aggregation(kwargs)
return relabeling, func, columns, order
def is_multi_agg_with_relabel(**kwargs) -> bool:
"""
Check whether kwargs passed to .agg look like multi-agg with relabeling.
Parameters
----------
**kwargs : dict
Returns
-------
bool
Examples
--------
>>> is_multi_agg_with_relabel(a="max")
False
>>> is_multi_agg_with_relabel(a_max=("a", "max"), a_min=("a", "min"))
True
>>> is_multi_agg_with_relabel()
False
"""
return all(isinstance(v, tuple) and len(v) == 2 for v in kwargs.values()) and (
len(kwargs) > 0
)
def normalize_keyword_aggregation(kwargs: dict) -> tuple[dict, list[str], list[int]]:
"""
Normalize user-provided "named aggregation" kwargs.
Transforms from the new ``Mapping[str, NamedAgg]`` style kwargs
to the old Dict[str, List[scalar]]].
Parameters
----------
kwargs : dict
Returns
-------
aggspec : dict
The transformed kwargs.
columns : List[str]
The user-provided keys.
col_idx_order : List[int]
List of columns indices.
Examples
--------
>>> normalize_keyword_aggregation({"output": ("input", "sum")})
(defaultdict(<class 'list'>, {'input': ['sum']}), ('output',), array([0]))
"""
from pandas.core.indexes.base import Index
# Normalize the aggregation functions as Mapping[column, List[func]],
# process normally, then fixup the names.
# TODO: aggspec type: typing.Dict[str, List[AggScalar]]
# May be hitting https://github.com/python/mypy/issues/5958
# saying it doesn't have an attribute __name__
aggspec: DefaultDict = defaultdict(list)
order = []
columns, pairs = list(zip(*kwargs.items()))
for column, aggfunc in pairs:
aggspec[column].append(aggfunc)
order.append((column, com.get_callable_name(aggfunc) or aggfunc))
# uniquify aggfunc name if duplicated in order list
uniquified_order = _make_unique_kwarg_list(order)
# GH 25719, due to aggspec will change the order of assigned columns in aggregation
# uniquified_aggspec will store uniquified order list and will compare it with order
# based on index
aggspec_order = [
(column, com.get_callable_name(aggfunc) or aggfunc)
for column, aggfuncs in aggspec.items()
for aggfunc in aggfuncs
]
uniquified_aggspec = _make_unique_kwarg_list(aggspec_order)
# get the new index of columns by comparison
col_idx_order = Index(uniquified_aggspec).get_indexer(uniquified_order)
# error: Incompatible return value type (got "Tuple[defaultdict[Any, Any],
# Any, ndarray]", expected "Tuple[Dict[Any, Any], List[str], List[int]]")
return aggspec, columns, col_idx_order # type: ignore[return-value]
def _make_unique_kwarg_list(
seq: Sequence[tuple[Any, Any]]
) -> Sequence[tuple[Any, Any]]:
"""
Uniquify aggfunc name of the pairs in the order list
Examples:
--------
>>> kwarg_list = [('a', '<lambda>'), ('a', '<lambda>'), ('b', '<lambda>')]
>>> _make_unique_kwarg_list(kwarg_list)
[('a', '<lambda>_0'), ('a', '<lambda>_1'), ('b', '<lambda>')]
"""
return [
(pair[0], "_".join([pair[1], str(seq[:i].count(pair))]))
if seq.count(pair) > 1
else pair
for i, pair in enumerate(seq)
]
def relabel_result(
result: DataFrame | Series,
func: dict[str, list[Callable | str]],
columns: Iterable[Hashable],
order: Iterable[int],
) -> dict[Hashable, Series]:
"""
Internal function to reorder result if relabelling is True for
dataframe.agg, and return the reordered result in dict.
Parameters:
----------
result: Result from aggregation
func: Dict of (column name, funcs)
columns: New columns name for relabelling
order: New order for relabelling
Examples:
---------
>>> result = DataFrame({"A": [np.nan, 2, np.nan],
... "C": [6, np.nan, np.nan], "B": [np.nan, 4, 2.5]}) # doctest: +SKIP
>>> funcs = {"A": ["max"], "C": ["max"], "B": ["mean", "min"]}
>>> columns = ("foo", "aab", "bar", "dat")
>>> order = [0, 1, 2, 3]
>>> _relabel_result(result, func, columns, order) # doctest: +SKIP
dict(A=Series([2.0, NaN, NaN, NaN], index=["foo", "aab", "bar", "dat"]),
C=Series([NaN, 6.0, NaN, NaN], index=["foo", "aab", "bar", "dat"]),
B=Series([NaN, NaN, 2.5, 4.0], index=["foo", "aab", "bar", "dat"]))
"""
from pandas.core.indexes.base import Index
reordered_indexes = [
pair[0] for pair in sorted(zip(columns, order), key=lambda t: t[1])
]
reordered_result_in_dict: dict[Hashable, Series] = {}
idx = 0
reorder_mask = not isinstance(result, ABCSeries) and len(result.columns) > 1
for col, fun in func.items():
s = result[col].dropna()
# In the `_aggregate`, the callable names are obtained and used in `result`, and
# these names are ordered alphabetically. e.g.
# C2 C1
# <lambda> 1 NaN
# amax NaN 4.0
# max NaN 4.0
# sum 18.0 6.0
# Therefore, the order of functions for each column could be shuffled
# accordingly so need to get the callable name if it is not parsed names, and
# reorder the aggregated result for each column.
# e.g. if df.agg(c1=("C2", sum), c2=("C2", lambda x: min(x))), correct order is
# [sum, <lambda>], but in `result`, it will be [<lambda>, sum], and we need to
# reorder so that aggregated values map to their functions regarding the order.
# However there is only one column being used for aggregation, not need to
# reorder since the index is not sorted, and keep as is in `funcs`, e.g.
# A
# min 1.0
# mean 1.5
# mean 1.5
if reorder_mask:
fun = [
com.get_callable_name(f) if not isinstance(f, str) else f for f in fun
]
col_idx_order = Index(s.index).get_indexer(fun)
s = s[col_idx_order]
# assign the new user-provided "named aggregation" as index names, and reindex
# it based on the whole user-provided names.
s.index = reordered_indexes[idx : idx + len(fun)]
reordered_result_in_dict[col] = s.reindex(columns, copy=False)
idx = idx + len(fun)
return reordered_result_in_dict
# TODO: Can't use, because mypy doesn't like us setting __name__
# error: "partial[Any]" has no attribute "__name__"
# the type is:
# typing.Sequence[Callable[..., ScalarResult]]
# -> typing.Sequence[Callable[..., ScalarResult]]:
def _managle_lambda_list(aggfuncs: Sequence[Any]) -> Sequence[Any]:
"""
Possibly mangle a list of aggfuncs.
Parameters
----------
aggfuncs : Sequence
Returns
-------
mangled: list-like
A new AggSpec sequence, where lambdas have been converted
to have unique names.
Notes
-----
If just one aggfunc is passed, the name will not be mangled.
"""
if len(aggfuncs) <= 1:
# don't mangle for .agg([lambda x: .])
return aggfuncs
i = 0
mangled_aggfuncs = []
for aggfunc in aggfuncs:
if com.get_callable_name(aggfunc) == "<lambda>":
aggfunc = partial(aggfunc)
aggfunc.__name__ = f"<lambda_{i}>"
i += 1
mangled_aggfuncs.append(aggfunc)
return mangled_aggfuncs
def maybe_mangle_lambdas(agg_spec: Any) -> Any:
"""
Make new lambdas with unique names.
Parameters
----------
agg_spec : Any
An argument to GroupBy.agg.
Non-dict-like `agg_spec` are pass through as is.
For dict-like `agg_spec` a new spec is returned
with name-mangled lambdas.
Returns
-------
mangled : Any
Same type as the input.
Examples
--------
>>> maybe_mangle_lambdas('sum')
'sum'
>>> maybe_mangle_lambdas([lambda: 1, lambda: 2]) # doctest: +SKIP
[<function __main__.<lambda_0>,
<function pandas...._make_lambda.<locals>.f(*args, **kwargs)>]
"""
is_dict = is_dict_like(agg_spec)
if not (is_dict or is_list_like(agg_spec)):
return agg_spec
mangled_aggspec = type(agg_spec)() # dict or OrderedDict
if is_dict:
for key, aggfuncs in agg_spec.items():
if is_list_like(aggfuncs) and not is_dict_like(aggfuncs):
mangled_aggfuncs = _managle_lambda_list(aggfuncs)
else:
mangled_aggfuncs = aggfuncs
mangled_aggspec[key] = mangled_aggfuncs
else:
mangled_aggspec = _managle_lambda_list(agg_spec)
return mangled_aggspec
def validate_func_kwargs(
kwargs: dict,
) -> tuple[list[str], list[str | Callable[..., Any]]]:
"""
Validates types of user-provided "named aggregation" kwargs.
`TypeError` is raised if aggfunc is not `str` or callable.
Parameters
----------
kwargs : dict
Returns
-------
columns : List[str]
List of user-provied keys.
func : List[Union[str, callable[...,Any]]]
List of user-provided aggfuncs
Examples
--------
>>> validate_func_kwargs({'one': 'min', 'two': 'max'})
(['one', 'two'], ['min', 'max'])
"""
tuple_given_message = "func is expected but received {} in **kwargs."
columns = list(kwargs)
func = []
for col_func in kwargs.values():
if not (isinstance(col_func, str) or callable(col_func)):
raise TypeError(tuple_given_message.format(type(col_func).__name__))
func.append(col_func)
if not columns:
no_arg_message = "Must provide 'func' or named aggregation **kwargs."
raise TypeError(no_arg_message)
return columns, func
|
|
# Copyright (c) 2013-2015 Centre for Advanced Internet Architectures,
# Swinburne University of Technology. All rights reserved.
#
# Author: Sebastian Zander (szander@swin.edu.au)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
## @package routersetup
# Router setup
#
# $Id: routersetup.py 1268 2015-04-22 07:04:19Z szander $
import config
from fabric.api import task, hosts, run, execute, abort, env, settings
from hostint import get_netint_cached, get_address_pair
from hosttype import get_type_cached
## Initialise single dummynet pipe
# Same queue but different delay/loss emulation
# @param counter Queue ID number
# @param source Source, can be an IP address or hostname or a subnet
# (e.g. 192.168.1.0/24)
# @param dest Destination, can be an IP address or hostname or a subnet
# (e.g. 192.168.1.0/24)
# @param rate Rate limit in bytes, e.g. 100000000 (100Mbps in bytes),
# 10kbit, 100mbit
# @param delay Emulated delay in millieseconds
# @param rtt Emulated rtt in millieseconds (needed only for determining
# queue size if not explicitly specified)
# @param loss Loss rate
# @param queue_size Queue size in slots (if a number) or bytes
# (e.g. specified as XKbytes, where X is a number)
# @param queue_size_mult Multiply 'bdp' queue size with this factor
# (must be a floating point)
# @param queue_disc Queueing discipline: fifo (default), red (RED)
# @param queue_disc_params: If queue_disc=='red' this must be set to:
# w_q/min_th/max_th/max_p (see ipfw man page for details)
# @param bidir If '0' pipe only in forward direction, if '1' two pipes (one
# in foward and one in backward direction)
def init_dummynet_pipe(counter='1', source='', dest='', rate='', delay='',
rtt='', loss='', queue_size='', queue_size_mult='1.0',
queue_disc='', queue_disc_params='', bidir='0'):
queue_size = str(queue_size)
if queue_size.lower() == 'bdp':
# this only works if rate is specified as a number of bytes/second
if rtt == '':
rtt = str(2 * int(delay))
queue_size = int(float(rate) * (float(rtt) / 1000.0) / 8)
if queue_size < 2048:
queue_size = 2048
if queue_size_mult != '1.0':
queue_size = int(float(queue_size) * float(queue_size_mult))
queue_size = str(queue_size)
if queue_disc != 'fifo' and queue_disc != 'red':
abort("Only queuing disciplines for Dummynet are 'fifo' and 'red'")
# ipfw rule number
rule_no = str(int(counter) * 100)
# configure pipe
config_pipe_cmd = 'ipfw pipe %s config' % counter
if rate != '':
config_pipe_cmd += ' bw %sbits/s' % rate
if delay != '':
config_pipe_cmd += ' delay %sms' % delay
if loss != "":
config_pipe_cmd += ' plr %s' % loss
if queue_size != "":
config_pipe_cmd += ' queue %s' % queue_size
if queue_disc == 'red':
config_pipe_cmd += ' red %s' % queue_disc_params
run(config_pipe_cmd)
# create pipe rule
create_pipe_cmd = 'ipfw add %s pipe %s ip from %s to %s out' % (
rule_no, counter, source, dest)
run(create_pipe_cmd)
if bidir == '1':
create_pipe_cmd = 'ipfw add %s pipe %s ip from %s to %s out' % (
rule_no, counter, dest, source)
run(create_pipe_cmd)
## Initialse tc (Linux)
## setup a class (htb qdisc) for each interface with rate limits
## setup actual qdisc (e.g. codel) as leaf qdisc for class
## then redirect traffic to pseudo interface and apply netem to emulate
## delay and/or loss
# @param counter Queue ID number
# @param source Source, can be an IP address or hostname or a subnet
# (e.g. 192.168.1.0/24)
# @param dest Destination, can be an IP address or hostname or a subnet
# (e.g. 192.168.1.0/24)
# @param rate Rate limit in bytes, e.g. 100000000 (100Mbps in bytes), 10kbit, 100mbit
# @param delay Emulated delay in millieseconds
# @param rtt Emulated rtt in millieseconds (needed only for determining
# queue size if not explicitly specified)
# @param loss Loss rate
# @param queue_size Can be in packets or bytes depending on queue_disc; if in bytes
# can use units, e.g. 1kb
# @param queue_size_mult Multiply 'bdp' queue size with this factor
# (must be a floating point)
# @param queue_disc fifo (mapped to pfifo, FreeBSD compatibility), fq_codel, codel, red,
# choke, pfifo, pie (only as patch), ...
# @param queue_disc_params Parameters for queing discipline, see man pages for queuing
# disciplines
# @param bidir If '0' (pipe only in forward direction),
# if '1' (two pipes in both directions)
# @param attach_to_queue Specify number of existing queue to use, but emulate
# different delay/loss
def init_tc_pipe(counter='1', source='', dest='', rate='', delay='', rtt='', loss='',
queue_size='', queue_size_mult='1.0', queue_disc='',
queue_disc_params='', bidir='0', attach_to_queue=''):
# compatibility with FreeBSD
if queue_disc == 'fifo':
# pfifo is the default for HTB classes
queue_disc = 'pfifo'
queue_size = str(queue_size)
if queue_size.lower() == 'bdp':
_rate = rate.replace('kbit', '000')
_rate = _rate.replace('mbit', '000000')
if rtt == '':
rtt = str(2 * int(delay))
if queue_disc == 'pfifo' or queue_disc == 'codel' or \
queue_disc == 'fq_codel' or queue_disc == 'pie':
# queue size in packets
avg_packet = 600 # average packet size
queue_size = int(
float(_rate) * (float(rtt) / 1000.0) / 8 / avg_packet)
if queue_size_mult != '1.0':
queue_size = int(float(queue_size) * float(queue_size_mult))
if queue_size < 1:
queue_size = 1 # minimum 1 packet
queue_size = str(queue_size)
elif queue_disc == 'bfifo' or queue_disc == 'red':
# queue size in bytes
queue_size = int(float(_rate) * (float(rtt) / 1000.0) / 8)
if queue_size_mult != '1.0':
queue_size = int(float(queue_size) * float(queue_size_mult))
if queue_size < 2048:
queue_size = 2048 # minimum 2kB
queue_size = str(queue_size)
else:
abort(
'Can\'t specify \'bdp\' for queuing discipline %s' %
queue_disc)
# class/handle numbers
class_no = str(int(counter) + 0)
if attach_to_queue == '':
queue_class_no = class_no
else:
# if attach_to_queue is set we attach this to existing (previously
# configured pipe). this means packets will go through an existing htb
# and leaf qdisc, but a separate netem.
# so we can have different flows going through the same bottleneck
# queue, but with different emulated delays or loss rates
queue_class_no = attach_to_queue
netem_class_no = class_no
qdisc_no = str(int(counter) + 1000)
netem_no = str(int(counter) + 1000)
# disciplines: fq_codel, codel, red, choke, pfifo, pfifo_fast (standard
# magic), pie (only as patch), ...
if queue_disc == '':
queue_disc = 'pfifo'
# for pie we need to make sure the kernel module is loaded (for kernel pre
# 3.14 only, for new kernels it happens automatically via tc use!)
if queue_disc == 'pie':
with settings(warn_only=True):
run('modprobe pie')
if rate == '':
rate = '1000mbit'
if queue_size == '':
# set default queue size to 1000 packet (massive but default for e.g.
# codel)
queue_size = '1000'
if loss != '':
# convert to percentage
loss = str(float(loss) * 100)
interfaces = get_netint_cached(env.host_string, int_no=-1)
# our approach works as follows:
# - shaping, aqm and delay/loss emulation is done on egress interface
# (as usual)
# - use htb qdisc for rate limiting with the aqm qdisc (e.g. pfifo, codel)
# as leave node
# - after shaping and aqm, emulate loss and delay with netem
# - for each "pipe" we setup a new class on all (two) interfaces
# - if pipes are unidirectional a class is only used on one of the two ifaces;
# otherwise it is used on both interfaces (XXX could optimise the
# unidirectional case and omit unused pipes)
# - traffic flow is as follows:
# 1. packets are marked by iptables in mangle table POSTROUTING hook
# depending on defined source/dest (unique mark for each pipe)
# 2. marked packets are classified into appropriate class (1-1 mapping
# between marks and classes) and redirected to pseudo interface
# 3. pseudo interface does the shaping with htb and aqm (leaf qdisc)
# 4. packets go back to actual interface
# 5. actual interface does network emulation (delay/loss), here htb is set to
# max rate (1Gbps) and pfifo is used (effectively no shaping or aqm here)
# note that according to my information the htb has a build-in buffer of 1
# packet as well (cannot be changed)
cnt = 0
for interface in interfaces:
pseudo_interface = 'ifb' + str(cnt)
# config rate limiting on pseudo interface
config_tc_cmd = 'tc class add dev %s parent 1: classid 1:%s htb rate %s ceil %s' % \
(pseudo_interface, queue_class_no, rate, rate)
if attach_to_queue == '':
run(config_tc_cmd)
# config queuing discipline and buffer limit on pseudo interface
config_tc_cmd = 'tc qdisc add dev %s parent 1:%s handle %s: %s limit %s %s' % \
(pseudo_interface,
queue_class_no,
qdisc_no,
queue_disc,
queue_size,
queue_disc_params)
if attach_to_queue == '':
run(config_tc_cmd)
# configure filter to classify traffic based on mark on pseudo device
config_tc_cmd = 'tc filter add dev %s protocol ip parent 1: ' \
'handle %s fw flowid 1:%s' % (
pseudo_interface, class_no, queue_class_no)
run(config_tc_cmd)
# configure class for actual interface with max rate
config_tc_cmd = 'tc class add dev %s parent 1: classid 1:%s ' \
'htb rate 1000mbit ceil 1000mbit' % \
(interface, netem_class_no)
run(config_tc_cmd)
# config netem on actual interface
config_tc_cmd = 'tc qdisc add dev %s parent 1:%s handle %s: ' \
'netem limit 1000' % (
interface, netem_class_no, netem_no)
if delay != "":
config_tc_cmd += " delay %sms" % delay
if loss != "":
config_tc_cmd += " loss %s%%" % loss
run(config_tc_cmd)
# configure filter to redirect traffic to pseudo device first and also
# classify traffic based on mark after leaving the pseudo interface traffic
# will go back to actual interface
config_tc_cmd = 'tc filter add dev %s protocol ip parent 1: handle %s ' \
'fw flowid 1:%s action mirred egress redirect dev %s' % \
(interface, class_no, netem_class_no, pseudo_interface)
run(config_tc_cmd)
cnt += 1
# filter on specific ips
config_it_cmd = 'iptables -t mangle -A POSTROUTING -s %s -d %s -j MARK --set-mark %s' % \
(source, dest, class_no)
run(config_it_cmd)
if bidir == '1':
config_it_cmd = 'iptables -t mangle -A POSTROUTING -s %s -d %s -j MARK --set-mark %s' % \
(dest, source, class_no)
run(config_it_cmd)
## Show dummynet pipes
def show_dummynet_pipes():
run('ipfw -a list')
run('ipfw -a pipe list')
## Show tc setup
def show_tc_setup():
interfaces = get_netint_cached(env.host_string, int_no=-1)
run('tc -d -s qdisc show')
cnt = 0
for interface in interfaces:
run('tc -d -s class show dev %s' % interface)
run('tc -d -s filter show dev %s' % interface)
pseudo_interface = 'ifb' + str(cnt)
run('tc -d -s class show dev %s' % pseudo_interface)
run('tc -d -s filter show dev %s' % pseudo_interface)
cnt += 1
run('iptables -t mangle -vL')
## Show pipe setup
@task
def show_pipes():
"Show pipe setup on router"
# get type of current host
htype = get_type_cached(env.host_string)
if htype == 'FreeBSD':
execute(show_dummynet_pipes)
elif htype == 'Linux':
execute(show_tc_setup)
else:
abort("Router must be running FreeBSD or Linux")
## Configure a pipe on the router, encompassing rate shaping, AQM,
## loss/delay emulation
## For parameter explanations see descriptions of init_dummynet_pipe() and init_tc_pipe()
## Note: attach_to_queue only works for Linux
@task
def init_pipe(counter='1', source='', dest='', rate='', delay='', rtt='', loss='',
queue_size='', queue_size_mult='1.0', queue_disc='',
queue_disc_params='', bidir='0', attach_to_queue=''):
"Configure pipe on router, including rate shaping, AQM, loss/delay emulation"
# get internal addresses
dummy, source_internal = get_address_pair(source)
dummy, dest_internal = get_address_pair(dest)
# get type of current host
htype = get_type_cached(env.host_string)
if htype == 'FreeBSD':
execute(
init_dummynet_pipe,
counter,
source_internal,
dest_internal,
rate,
delay,
rtt,
loss,
queue_size,
queue_size_mult,
queue_disc,
queue_disc_params,
bidir)
elif htype == 'Linux':
execute(
init_tc_pipe,
counter,
source_internal,
dest_internal,
rate,
delay,
rtt,
loss,
queue_size,
queue_size_mult,
queue_disc,
queue_disc_params,
bidir,
attach_to_queue)
else:
abort("Router must be running FreeBSD or Linux")
|
|
"""
Unspecified error handling tests
"""
import numpy as np
import os
from numba import jit, njit, typed, int64, types
from numba.core import errors
import numba.core.typing.cffi_utils as cffi_support
from numba.experimental import structref
from numba.extending import (overload, intrinsic, overload_method,
overload_attribute)
from numba.core.compiler import CompilerBase
from numba.core.untyped_passes import (TranslateByteCode, FixupArgs,
IRProcessing,)
from numba.core.typed_passes import (NopythonTypeInference, DeadCodeElimination,
NoPythonBackend, NativeLowering)
from numba.core.compiler_machinery import PassManager
from numba.core.types.functions import _err_reasons as error_reasons
from numba.tests.support import (skip_parfors_unsupported, override_config,
SerialMixin, skip_unless_scipy)
import unittest
# used in TestMiscErrorHandling::test_handling_of_write_to_*_global
_global_list = [1, 2, 3, 4]
_global_dict = typed.Dict.empty(int64, int64)
class TestErrorHandlingBeforeLowering(unittest.TestCase):
def test_unsupported_make_function_return_inner_func(self):
def func(x):
""" return the closure """
z = x + 1
def inner(x):
return x + z
return inner
for pipeline in jit, njit:
with self.assertRaises(errors.TypingError) as raises:
pipeline(func)(1)
expected = "Cannot capture the non-constant value"
self.assertIn(expected, str(raises.exception))
class TestUnsupportedReporting(unittest.TestCase):
def test_unsupported_numpy_function(self):
# np.asanyarray(list) currently unsupported
@njit
def func():
np.asanyarray([1,2,3])
with self.assertRaises(errors.TypingError) as raises:
func()
expected = "Use of unsupported NumPy function 'numpy.asanyarray'"
self.assertIn(expected, str(raises.exception))
class TestMiscErrorHandling(unittest.TestCase):
def test_use_of_exception_for_flow_control(self):
# constant inference uses exceptions with no Loc specified to determine
# flow control, this asserts that the construction of the lowering
# error context handler works in the case of an exception with no Loc
# specified. See issue #3135.
@njit
def fn(x):
return 10**x
a = np.array([1.0],dtype=np.float64)
fn(a) # should not raise
def test_commented_func_definition_is_not_a_definition(self):
# See issue #4056, the commented def should not be found as the
# definition for reporting purposes when creating the synthetic
# traceback because it is commented! Use of def in docstring would also
# cause this issue hence is tested.
def foo_commented():
#def commented_definition()
raise Exception('test_string')
def foo_docstring():
""" def docstring containing def might match function definition!"""
raise Exception('test_string')
for func in (foo_commented, foo_docstring):
with self.assertRaises(Exception) as raises:
func()
self.assertIn("test_string", str(raises.exception))
def test_use_of_ir_unknown_loc(self):
# for context see # 3390
class TestPipeline(CompilerBase):
def define_pipelines(self):
name = 'bad_DCE_pipeline'
pm = PassManager(name)
pm.add_pass(TranslateByteCode, "analyzing bytecode")
pm.add_pass(FixupArgs, "fix up args")
pm.add_pass(IRProcessing, "processing IR")
# remove dead before type inference so that the Arg node is
# removed and the location of the arg cannot be found
pm.add_pass(DeadCodeElimination, "DCE")
# typing
pm.add_pass(NopythonTypeInference, "nopython frontend")
pm.add_pass(NativeLowering, "native lowering")
pm.add_pass(NoPythonBackend, "nopython mode backend")
pm.finalize()
return [pm]
@njit(pipeline_class=TestPipeline)
def f(a):
return 0
with self.assertRaises(errors.TypingError) as raises:
f(iter([1,2])) # use a type that Numba doesn't recognize
expected = 'File "unknown location", line 0:'
self.assertIn(expected, str(raises.exception))
def check_write_to_globals(self, func):
with self.assertRaises(errors.TypingError) as raises:
func()
expected = ["The use of a", "in globals, is not supported as globals"]
for ex in expected:
self.assertIn(ex, str(raises.exception))
def test_handling_of_write_to_reflected_global(self):
@njit
def foo():
_global_list[0] = 10
self.check_write_to_globals(foo)
def test_handling_of_write_to_typed_dict_global(self):
@njit
def foo():
_global_dict[0] = 10
self.check_write_to_globals(foo)
@skip_parfors_unsupported
def test_handling_forgotten_numba_internal_import(self):
@njit(parallel=True)
def foo():
for i in prange(10): # noqa: F821 prange is not imported
pass
with self.assertRaises(errors.TypingError) as raises:
foo()
expected = ("'prange' looks like a Numba internal function, "
"has it been imported")
self.assertIn(expected, str(raises.exception))
def test_handling_unsupported_generator_expression(self):
def foo():
(x for x in range(10))
expected = "The use of yield in a closure is unsupported."
for dec in jit(forceobj=True), njit:
with self.assertRaises(errors.UnsupportedError) as raises:
dec(foo)()
self.assertIn(expected, str(raises.exception))
def test_handling_undefined_variable(self):
@njit
def foo():
return a # noqa: F821
expected = "NameError: name 'a' is not defined"
with self.assertRaises(errors.TypingError) as raises:
foo()
self.assertIn(expected, str(raises.exception))
class TestConstantInferenceErrorHandling(unittest.TestCase):
def test_basic_error(self):
# issue 3717
@njit
def problem(a,b):
if a == b:
raise Exception("Equal numbers: %i %i", a, b)
return a * b
with self.assertRaises(errors.ConstantInferenceError) as raises:
problem(1,2)
msg1 = "Constant inference not possible for: arg(0, name=a)"
msg2 = 'raise Exception("Equal numbers: %i %i", a, b)'
self.assertIn(msg1, str(raises.exception))
self.assertIn(msg2, str(raises.exception))
class TestErrorMessages(unittest.TestCase):
def test_specific_error(self):
given_reason = "specific_reason"
def foo():
pass
@overload(foo)
def ol_foo():
raise errors.NumbaValueError(given_reason)
@njit
def call_foo():
foo()
with self.assertRaises(errors.TypingError) as raises:
call_foo()
excstr = str(raises.exception)
self.assertIn(error_reasons['specific_error'].splitlines()[0], excstr)
self.assertIn(given_reason, excstr)
def test_no_match_error(self):
def foo():
pass
@overload(foo)
def ol_foo():
return None # emulate no impl available for type
@njit
def call_foo():
foo()
with self.assertRaises(errors.TypingError) as raises:
call_foo()
excstr = str(raises.exception)
self.assertIn("No match", excstr)
@skip_unless_scipy
def test_error_function_source_is_correct(self):
""" Checks that the reported source location for an overload is the
overload implementation source, not the actual function source from the
target library."""
@njit
def foo():
np.linalg.svd("chars")
with self.assertRaises(errors.TypingError) as raises:
foo()
excstr = str(raises.exception)
self.assertIn(error_reasons['specific_error'].splitlines()[0], excstr)
expected_file = os.path.join("numba", "np", "linalg.py")
expected = f"Overload in function 'svd_impl': File: {expected_file}:"
self.assertIn(expected.format(expected_file), excstr)
def test_concrete_template_source(self):
# hits ConcreteTemplate
@njit
def foo():
return 'a' + 1
with self.assertRaises(errors.TypingError) as raises:
foo()
excstr = str(raises.exception)
self.assertIn("Overload of function 'add'", excstr)
# there'll be numerous matched templates that don't work but as they
# are mostly "overload_glue"s they'll just appear as "No match".
self.assertIn("No match.", excstr)
def test_abstract_template_source(self):
# hits AbstractTemplate
@njit
def foo():
return len(1)
with self.assertRaises(errors.TypingError) as raises:
foo()
excstr = str(raises.exception)
self.assertIn("Overload of function 'len'", excstr)
def test_callable_template_source(self):
# hits CallableTemplate
@njit
def foo():
return np.angle(1)
with self.assertRaises(errors.TypingError) as raises:
foo()
excstr = str(raises.exception)
self.assertIn("No implementation of function Function(<function angle",
excstr)
def test_overloadfunction_template_source(self):
# hits _OverloadFunctionTemplate
def bar(x):
pass
@overload(bar)
def ol_bar(x):
pass
@njit
def foo():
return bar(1)
with self.assertRaises(errors.TypingError) as raises:
foo()
excstr = str(raises.exception)
# there will not be "numerous" matched templates, there's just one,
# the one above, so assert it is reported
self.assertNotIn("<numerous>", excstr)
expected_file = os.path.join("numba", "tests",
"test_errorhandling.py")
expected_ol = f"Overload of function 'bar': File: {expected_file}:"
self.assertIn(expected_ol.format(expected_file), excstr)
self.assertIn("No match.", excstr)
def test_intrinsic_template_source(self):
# hits _IntrinsicTemplate
given_reason1 = "x must be literal"
given_reason2 = "array.ndim must be 1"
@intrinsic
def myintrin(typingctx, x, arr):
if not isinstance(x, types.IntegerLiteral):
raise errors.RequireLiteralValue(given_reason1)
if arr.ndim != 1:
raise errors.NumbaValueError(given_reason2)
sig = types.intp(x, arr)
def codegen(context, builder, signature, args):
pass
return sig, codegen
@njit
def call_intrin():
arr = np.zeros((2, 2))
myintrin(1, arr)
with self.assertRaises(errors.TypingError) as raises:
call_intrin()
excstr = str(raises.exception)
self.assertIn(error_reasons['specific_error'].splitlines()[0], excstr)
self.assertIn(given_reason1, excstr)
self.assertIn(given_reason2, excstr)
self.assertIn("Intrinsic in function", excstr)
def test_overloadmethod_template_source(self):
# doesn't hit _OverloadMethodTemplate for source as it's a nested
# exception
@overload_method(types.UnicodeType, 'isnonsense')
def ol_unicode_isnonsense(self):
pass
@njit
def foo():
"abc".isnonsense()
with self.assertRaises(errors.TypingError) as raises:
foo()
excstr = str(raises.exception)
self.assertIn("Overload of function 'ol_unicode_isnonsense'", excstr)
def test_overloadattribute_template_source(self):
# doesn't hit _OverloadMethodTemplate for source as it's a nested
# exception
@overload_attribute(types.UnicodeType, 'isnonsense')
def ol_unicode_isnonsense(self):
pass
@njit
def foo():
"abc".isnonsense
with self.assertRaises(errors.TypingError) as raises:
foo()
excstr = str(raises.exception)
self.assertIn("Overload of function 'ol_unicode_isnonsense'", excstr)
def test_external_function_pointer_template_source(self):
from numba.tests.ctypes_usecases import c_cos
@njit
def foo():
c_cos('a')
with self.assertRaises(errors.TypingError) as raises:
foo()
excstr = str(raises.exception)
self.assertIn("Type Restricted Function in function 'unknown'", excstr)
@unittest.skipUnless(cffi_support.SUPPORTED, "CFFI not supported")
def test_cffi_function_pointer_template_source(self):
from numba.tests import cffi_usecases as mod
mod.init()
func = mod.cffi_cos
@njit
def foo():
func('a')
with self.assertRaises(errors.TypingError) as raises:
foo()
excstr = str(raises.exception)
self.assertIn("Type Restricted Function in function 'unknown'", excstr)
def test_missing_source(self):
@structref.register
class ParticleType(types.StructRef):
pass
class Particle(structref.StructRefProxy):
def __new__(cls, pos, mass):
return structref.StructRefProxy.__new__(cls, pos)
# didn't provide the required mass argument ----^
structref.define_proxy(Particle, ParticleType, ["pos", "mass"])
with self.assertRaises(errors.TypingError) as raises:
Particle(pos=1, mass=2)
excstr = str(raises.exception)
self.assertIn("missing a required argument: 'mass'", excstr)
class TestDeveloperSpecificErrorMessages(SerialMixin, unittest.TestCase):
def test_bound_function_error_string(self):
# See PR #5952
def foo(x):
x.max(-1) # axis not supported
with override_config('DEVELOPER_MODE', 1):
with self.assertRaises(errors.TypingError) as raises:
njit("void(int64[:,:])")(foo)
excstr = str(raises.exception)
self.assertIn("args not supported", excstr)
class TestCapturedErrorHandling(SerialMixin, unittest.TestCase):
"""Checks that the way errors are captured changes depending on the env
var "NUMBA_CAPTURED_ERRORS".
"""
def test_error_in_overload(self):
def bar(x):
pass
@overload(bar)
def ol_bar(x):
x.some_invalid_attr # doesn't exist!
def impl(x):
pass
return impl
for style, err_class in (('new_style', AttributeError),
('old_style', errors.TypingError)):
with override_config('CAPTURED_ERRORS', style):
with self.assertRaises(err_class) as raises:
@njit('void(int64)')
def foo(x):
bar(x)
expected = "object has no attribute 'some_invalid_attr'"
self.assertIn(expected, str(raises.exception))
if __name__ == '__main__':
unittest.main()
|
|
# Seshdash imports
from seshdash.models import Sesh_User, Sesh_Site,Site_Weather_Data,BoM_Data_Point, Alert_Rule, Sesh_Alert, RMC_status, Slack_Channel
from seshdash.utils.send_mail import send_mail
from seshdash.utils.send_sms import send_sms
from seshdash.utils.model_tools import get_model_from_string, get_latest_instance
from seshdash.utils.model_tools import get_model_first_reference, get_measurement_from_rule, get_measurement_verbose_name
from seshdash.utils.reporting import get_measurement_unit
from seshdash.utils.time_utils import get_epoch_from_datetime
from seshdash.utils.send_slack import Slack
# django helper imports
from django.utils import timezone
from guardian.shortcuts import get_users_with_perms, get_groups_with_perms
from django.conf import settings
from dateutil import parser
from django.template.loader import get_template
# Influx relates clients
from seshdash.data.db import influx
from seshdash.data.db.kapacitor import Kapacitor
# Misc
import logging
#Insantiating the logger
logger = logging.getLogger(__name__)
#initialize global kapacitor
# kap = Kapacitor()
# Sends an email if the received data point fails to pass the defined rules for its site.
# New Alert Rules can be defined as below:
# Alert_Rule.objects.create(site = site, check_field="soc", value=30, operator="gt")
# Alert_Rule.objects.create(site = site, check_field="soc", value=35.5, operator="eq")
# Alert_Rule.objects.create(site = site, check_field="battery_voltage", value=25, operator="lt")
# Rules are based on sites. So you need to define a rule for each site if you want to check same configurations in several sites.
# A Sesh_Alert object is created for each 'alert triggered and an email is send if the rule has send_mail option true
def alert_generator():
""" Generates alerts for a given site """
mails = []
sms_numbers = []
rules = Alert_Rule.objects.all()
for rule in rules:
site = rule.site
site_groups = get_groups_with_perms(site)
# Get datapoint and real value
data_point, real_value = get_alert_check_value(rule)
if data_point is not None and real_value is not None:
if check_alert(rule, real_value):
alert_obj = alert_factory(site, rule, data_point)
# if alert_obj is created
if alert_obj is not None:
content = get_alert_content(site, rule, data_point, real_value, alert_obj)
mails, sms_numbers = get_recipients_for_site(site)
# reporting
logging.debug("Alert triggered sending alerts out %s"%mails)
alert_obj.emailSent = send_mail("Alert Mail", mails, content)
alert_obj.smsSent = send_sms(sms_numbers, content)
slack_msg = get_slack_alert_msg("Alert Triggered", alert_obj)
alert_obj.slackSent = send_alert_slack(site_groups, slack_msg)
alert_obj.save()
def render_alert_script(data_for_alert):
"""
Utility funciton to find alert template
@params data_for_alert - dictionary conatining data to render in alert
"""
template = ""
try:
template_file = "%s/%s.tick"%(
self.settings.KAPACITOR_TEMPLATE_FOLDER,
self.settings.ALERT_TEMPLATE_NAME)
template = get_template(template_file)
except TemplateDoesNotExist, e:
logging.exception("Unable to find template %s"%template_file)
if template:
template.render(data_for_alert)
return template
def create_alert(site, alert):
"""
Wrapper function to create alerts in kapacitor using django alert tempaltes
@params site - site which the alert is getting created for
@params alert - Alert Rule object
"""
data_for_alert = {}
# Generate a unique ID for alert
alert_id = "%s#%s"%(site.site_name, alert.pk )
alert_opr = alert.OPERATOR_MAPPING[alert.operator]
data_for_alert['id '] = alert_id
data_for_alert['where_filter_lambda'] = 'lambda: \'site\'=%s'%site.site_name
data_for_alert['error_lambda'] = 'lambda: \'value\' %s %s'(alert_opr, alert.value)
# TODO this is hard coded bake this into model, 5m i 5 minutes
data_for_alert['time_window'] = '5m'
alert_script = render_alert_script(data_for_alert)
res = kap.create_task(alert_id, dbrps = self.settings.KAPACITOR_DBRPS, script=alert_script)
def send_alert_slack(site_groups, content):
"""
Sends the alert message to specific channels in slack for organisations
"""
for site_group in site_groups:
try:
sesh_organisation = site_group.sesh_organisation
except RelatedObjectDoesNotExist:
logging.error("There is not associated sesh organisation for group %s " % site_group)
return False
if sesh_organisation.send_slack:
channels = sesh_organisation.slack_channel.all().filter(is_alert_channel=True)
slack = Slack(sesh_organisation.slack_token) # instantiate the api for the organisation
for channel in channels:
response = slack.send_message_to_channel(channel.name, content)
if not response:
logging.error('Failed to send message for %s in %s' % (sesh_organisation, channel))
return False
else:
logger.debug("Slack reports disabled for %s organisation " % sesh_organisation)
return False
return True
def get_slack_alert_msg(subject, alert):
"""
Function to generate alert messages provided the
subject and the alert obj
"""
msg = ''
data_point, value = get_alert_check_value(alert.alert)
msg += subject
msg += '\n'
msg += 'rule: ' + str(alert.alert)
msg += '\n'
msg += 'found: ' + str(value)
msg += '\n'
msg += 'At site: ' + str(alert.site)
return msg
def get_alert_check_value(rule):
"""
Returns the value to check for alert from latest data point
This are the latest data point referring to a rule for a specific site
"""
site = rule.site
if is_mysql_rule(rule):
model, field_name = rule.check_field.split('#')
latest_data_point = get_latest_data_point_mysql(site, rule)
if latest_data_point is not None:
data_point_value = getattr(latest_data_point, field_name)
else:
data_point_value = None
logger.error("No data points for %s", model)
return latest_data_point, data_point_value
elif is_influx_rule(rule):
# Getting the datapoint from influx
latest_data_point = get_latest_point_influx(site, rule)
if latest_data_point is not None:
data_point_value = latest_data_point['value']
return latest_data_point, data_point_value
else:
return None, None
else:
return None, None
def get_latest_point_influx(site, rule):
latest_data_point = influx.get_latest_point_site(site, rule.check_field, settings.INFLUX_DB)
return latest_data_point
def get_latest_point_value_influx(site, rule):
latest_data_point_value = get_latest_point_influx(site,rule)
return latest_data_point_value['value']
def is_influx_rule(rule):
"""
A function that detects if the alert rule defined uses influx,
Influx rules should not contain a '#' because split returns
"""
if len(rule.check_field.split('#')) == 1:
return True
else:
return False
def is_mysql_rule(rule):
"""
A function that detects if the alert rule defined, uses mysql
"""
if len(rule.check_field.split('#')) == 2:
return True
else:
return False
def check_alert(rule, data_point_value):
""" Checks the alert and returns boolean value true if there is alert and false otherwise """
ops = {'lt': lambda x,y: x<y,
'gt': lambda x,y: x>y,
'eq' : lambda x,y: x==y,
}
""" If there is an alert """
if ops[rule.operator](data_point_value,rule.value):
return True
else:
return False
def get_message_alert(alert):
"""
Returns an alert mesage represetnation
"""
measurement = get_measurement_from_rule(alert.alert)
return "At site %s \n %s is %s %s%s" % (alert.site, get_measurement_verbose_name(measurement),
alert.alert.get_operator_display(), alert.alert.value,
get_measurement_unit(measurement))
def get_alert_content(site, rule, data_point, value, alert):
""" Returns a dictionary containing information about the alert """
content = {}
content_str = get_message_alert(alert)
# Get ready content for email
content['site'] = site.site_name
content['alert_str'] = content_str
content['alert'] = alert
# Handling content for influx
if is_influx_rule(rule):
content['time'] = data_point['time']
else:
content['time'] = data_point.time
content['data_point'] = data_point
return content
def get_recipients_for_site(site):
""" Returns mails and sms of users with allowance to recieve messages for site """
users = get_users_with_perms(site)
mails = []
sms_numbers = []
for user in users:
mails.append(user.email)
if user.on_call and user.send_sms and user.phone_number:
sms_numbers.append(user.phone_number)
return mails, sms_numbers
def alert_factory(site, rule, data_point):
""" Creating an alert object """
# Getting the last alert for rule
point = rule.alert_point.last()
alert_obj = None
# If the last alert exists does not exist
if point is None:
alert_obj = create_alert_instance(site, rule, data_point)
# if there is a last alert check if it is silenced
else:
# if the last alert is silenced create an alert
if point.isSilence is True:
alert_obj = create_alert_instance(site, rule, data_point)
return alert_obj
def create_alert_instance(site, rule, data_point):
if is_mysql_rule(rule):
alert_obj = Sesh_Alert.objects.create(
site = site,
alert=rule,
date=timezone.now(),
isSilence=False,
emailSent=False,
slackSent=False,
smsSent=False,
point_model=type(data_point).__name__,
point_id= str(data_point.id ))
alert_obj.save()
# Set data point to point to alert
data_point.target_alert = alert_obj
data_point.save()
elif is_influx_rule(rule):
alert_obj = Sesh_Alert.objects.create(
site = site,
alert = rule,
date = timezone.now(),
isSilence=False,
emailSent=False,
slackSent=False,
smsSent=False,
point_model='influx',
point_id = get_epoch_from_datetime(parser.parse(data_point['time'])))
alert_obj.save()
return alert_obj
def get_unsilenced_alerts():
""" Return the unsilenced alerts of a site if any, otherwiser returns false """
unsilenced_alerts = Sesh_Alert.objects.filter(isSilence=False)
if unsilenced_alerts:
return unsilenced_alerts
else:
return []
def get_latest_instance_site(site, model):
""" Returns latest instance for models with site """
latest_instance_site = model.objects.filter(site=site).order_by('-id')
if latest_instance_site:
return latest_instance_site[0]
else:
return None
def get_latest_data_point_mysql(site, rule):
""" Returns the latest point in the model specified in the rule checkfield"""
model, field_name = rule.check_field.strip().split('#') # Get model and field names
# Getting the model name and the latest value of the model field
model = get_model_from_string(model) # returns a model class ex 'BoM_Data_Point'
latest_data_point = get_latest_instance_site(site, model)
return latest_data_point
def get_latest_data_point_value_mysql(site, rule):
""" Returns the value to check for the value of the latest point for model in the rule checkfield """
model, field_name = rule.check_field.strip().split('#')
# Getting the model name and the latest value of the model field
model = get_model_from_string(model)
latest_data_point = get_latest_instance_site(site, model)
latest_data_point_value = getattr(latest_data_point, field_name)
return latest_data_point_value
def get_alert_point(alert):
""" Returns a point that triggers the alert """
model_name = alert.point_model
rule = alert.alert
check_field = alert.alert.check_field
if is_influx_rule(rule):
point = influx.get_point(check_field, alert.point_id)
else:
point = get_model_first_reference(model_name, alert)
return point
def get_alert_point_value(alert, point=None):
""" Returns the value that triggers the alert """
rule = alert.alert
if not point:
point = get_alert_point(alert)
# Handle if alert has no point, (no puns intended)
if not point:
return None
# Get the alert data
if is_mysql_rule(rule):
model, field_name = rule.check_field.strip().split('#')
value = getattr(point, field_name)
elif is_influx_rule(rule):
value = point['value']
return value
def alert_status_check():
"""
Checks if the alert is still valid and silences it if it is invalid
"""
unsilenced_alerts = get_unsilenced_alerts()
logger.debug("Running alert status check")
for alert in unsilenced_alerts:
site = alert.site
rule = alert.alert
if is_mysql_rule(rule):
latest_data_point_value = get_latest_data_point_value_mysql(site, rule)
elif is_influx_rule(rule):
latest_data_point_value = get_latest_point_value_influx(site, rule)
else:
raise Exception("Invalid alert Rule")
if check_alert(rule, latest_data_point_value):
logger.debug("Alert is still valid")
else:
# Silencing the alert and generating email content
logger.debug("Alert is not valid, silencing alert")
alert.isSilence = True
alert.save()
data_point, data_point_value = get_alert_check_value(alert.alert)
# Handle no data point getting returned
if not data_point_value:
logger.warning("Now DP found for alert skipping ")
return None
content = get_alert_content(site, rule, data_point, data_point_value, alert)
mails, sms_numbers = get_recipients_for_site(site)
site_groups = get_groups_with_perms(site)
# Reporting
if mails:
send_mail('Alert Silenced', mails, content)
if sms_numbers:
send_sms(content, sms_numbers)
slack_msg = get_slack_alert_msg("Alert silenced", alert)
send_alert_slack(site_groups, slack_msg)
|
|
"""
Support for Telldus Live.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/tellduslive/
"""
from datetime import datetime, timedelta
import logging
import voluptuous as vol
from homeassistant.const import (
ATTR_BATTERY_LEVEL, DEVICE_DEFAULT_NAME,
CONF_TOKEN, CONF_HOST,
EVENT_HOMEASSISTANT_START)
from homeassistant.helpers import discovery
from homeassistant.components.discovery import SERVICE_TELLDUSLIVE
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import track_point_in_utc_time
from homeassistant.util.dt import utcnow
from homeassistant.util.json import load_json, save_json
APPLICATION_NAME = 'Home Assistant'
DOMAIN = 'tellduslive'
REQUIREMENTS = ['tellduslive==0.10.4']
_LOGGER = logging.getLogger(__name__)
TELLLDUS_CONFIG_FILE = 'tellduslive.conf'
KEY_CONFIG = 'tellduslive_config'
CONF_TOKEN_SECRET = 'token_secret'
CONF_UPDATE_INTERVAL = 'update_interval'
PUBLIC_KEY = 'THUPUNECH5YEQA3RE6UYUPRUZ2DUGUGA'
NOT_SO_PRIVATE_KEY = 'PHES7U2RADREWAFEBUSTUBAWRASWUTUS'
MIN_UPDATE_INTERVAL = timedelta(seconds=5)
DEFAULT_UPDATE_INTERVAL = timedelta(minutes=1)
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_UPDATE_INTERVAL, default=DEFAULT_UPDATE_INTERVAL): (
vol.All(cv.time_period, vol.Clamp(min=MIN_UPDATE_INTERVAL)))
}),
}, extra=vol.ALLOW_EXTRA)
ATTR_LAST_UPDATED = 'time_last_updated'
CONFIG_INSTRUCTIONS = """
To link your TelldusLive account:
1. Click the link below
2. Login to Telldus Live
3. Authorize {app_name}.
4. Click the Confirm button.
[Link TelldusLive account]({auth_url})
"""
def setup(hass, config, session=None):
"""Set up the Telldus Live component."""
from tellduslive import Session, supports_local_api
config_filename = hass.config.path(TELLLDUS_CONFIG_FILE)
conf = load_json(config_filename)
def request_configuration(host=None):
"""Request TelldusLive authorization."""
configurator = hass.components.configurator
hass.data.setdefault(KEY_CONFIG, {})
data_key = host or DOMAIN
# Configuration already in progress
if hass.data[KEY_CONFIG].get(data_key):
return
_LOGGER.info('Configuring TelldusLive %s',
'local client: {}'.format(host) if host else
'cloud service')
session = Session(public_key=PUBLIC_KEY,
private_key=NOT_SO_PRIVATE_KEY,
host=host,
application=APPLICATION_NAME)
auth_url = session.authorize_url
if not auth_url:
_LOGGER.warning('Failed to retrieve authorization URL')
return
_LOGGER.debug('Got authorization URL %s', auth_url)
def configuration_callback(callback_data):
"""Handle the submitted configuration."""
session.authorize()
res = setup(hass, config, session)
if not res:
configurator.notify_errors(
hass.data[KEY_CONFIG].get(data_key),
'Unable to connect.')
return
conf.update(
{host: {CONF_HOST: host,
CONF_TOKEN: session.access_token}} if host else
{DOMAIN: {CONF_TOKEN: session.access_token,
CONF_TOKEN_SECRET: session.access_token_secret}})
save_json(config_filename, conf)
# Close all open configurators: for now, we only support one
# tellstick device, and configuration via either cloud service
# or via local API, not both at the same time
for instance in hass.data[KEY_CONFIG].values():
configurator.request_done(instance)
hass.data[KEY_CONFIG][data_key] = \
configurator.request_config(
'TelldusLive ({})'.format(
'LocalAPI' if host
else 'Cloud service'),
configuration_callback,
description=CONFIG_INSTRUCTIONS.format(
app_name=APPLICATION_NAME,
auth_url=auth_url),
submit_caption='Confirm',
entity_picture='/static/images/logo_tellduslive.png',
)
def tellstick_discovered(service, info):
"""Run when a Tellstick is discovered."""
_LOGGER.info('Discovered tellstick device')
if DOMAIN in hass.data:
_LOGGER.debug('Tellstick already configured')
return
host, device = info[:2]
if not supports_local_api(device):
_LOGGER.debug('Tellstick does not support local API')
# Configure the cloud service
hass.async_add_job(request_configuration)
return
_LOGGER.debug('Tellstick does support local API')
# Ignore any known devices
if conf and host in conf:
_LOGGER.debug('Discovered already known device: %s', host)
return
# Offer configuration of both live and local API
request_configuration()
request_configuration(host)
discovery.listen(hass, SERVICE_TELLDUSLIVE, tellstick_discovered)
if session:
_LOGGER.debug('Continuing setup configured by configurator')
elif conf and CONF_HOST in next(iter(conf.values())):
# For now, only one local device is supported
_LOGGER.debug('Using Local API pre-configured by configurator')
session = Session(**next(iter(conf.values())))
elif DOMAIN in conf:
_LOGGER.debug('Using TelldusLive cloud service '
'pre-configured by configurator')
session = Session(PUBLIC_KEY, NOT_SO_PRIVATE_KEY,
application=APPLICATION_NAME, **conf[DOMAIN])
elif config.get(DOMAIN):
_LOGGER.info('Found entry in configuration.yaml. '
'Requesting TelldusLive cloud service configuration')
request_configuration()
if CONF_HOST in config.get(DOMAIN, {}):
_LOGGER.info('Found TelldusLive host entry in configuration.yaml. '
'Requesting Telldus Local API configuration')
request_configuration(config.get(DOMAIN).get(CONF_HOST))
return True
else:
_LOGGER.info('Tellstick discovered, awaiting discovery callback')
return True
if not session.is_authorized:
_LOGGER.error(
'Authentication Error')
return False
client = TelldusLiveClient(hass, config, session)
hass.data[DOMAIN] = client
if session:
client.update()
else:
hass.bus.listen(EVENT_HOMEASSISTANT_START, client.update)
return True
class TelldusLiveClient:
"""Get the latest data and update the states."""
def __init__(self, hass, config, session):
"""Initialize the Tellus data object."""
self.entities = []
self._hass = hass
self._config = config
self._interval = config.get(DOMAIN, {}).get(
CONF_UPDATE_INTERVAL, DEFAULT_UPDATE_INTERVAL)
_LOGGER.debug('Update interval %s', self._interval)
self._client = session
def update(self, *args):
"""Periodically poll the servers for current state."""
_LOGGER.debug('Updating')
try:
self._sync()
finally:
track_point_in_utc_time(
self._hass, self.update, utcnow() + self._interval)
def _sync(self):
"""Update local list of devices."""
if not self._client.update():
_LOGGER.warning('Failed request')
def identify_device(device):
"""Find out what type of HA component to create."""
from tellduslive import (DIM, UP, TURNON)
if device.methods & DIM:
return 'light'
if device.methods & UP:
return 'cover'
if device.methods & TURNON:
return 'switch'
if device.methods == 0:
return 'binary_sensor'
_LOGGER.warning(
"Unidentified device type (methods: %d)", device.methods)
return 'switch'
def discover(device_id, component):
"""Discover the component."""
discovery.load_platform(
self._hass, component, DOMAIN, [device_id], self._config)
known_ids = {entity.device_id for entity in self.entities}
for device in self._client.devices:
if device.device_id in known_ids:
continue
if device.is_sensor:
for item in device.items:
discover((device.device_id, item.name, item.scale),
'sensor')
else:
discover(device.device_id,
identify_device(device))
for entity in self.entities:
entity.changed()
def device(self, device_id):
"""Return device representation."""
return self._client.device(device_id)
def is_available(self, device_id):
"""Return device availability."""
return device_id in self._client.device_ids
class TelldusLiveEntity(Entity):
"""Base class for all Telldus Live entities."""
def __init__(self, hass, device_id):
"""Initialize the entity."""
self._id = device_id
self._client = hass.data[DOMAIN]
self._client.entities.append(self)
self._name = self.device.name
_LOGGER.debug('Created device %s', self)
def changed(self):
"""Return the property of the device might have changed."""
if self.device.name:
self._name = self.device.name
self.schedule_update_ha_state()
@property
def device_id(self):
"""Return the id of the device."""
return self._id
@property
def device(self):
"""Return the representation of the device."""
return self._client.device(self.device_id)
@property
def _state(self):
"""Return the state of the device."""
return self.device.state
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def assumed_state(self):
"""Return true if unable to access real state of entity."""
return True
@property
def name(self):
"""Return name of device."""
return self._name or DEVICE_DEFAULT_NAME
@property
def available(self):
"""Return true if device is not offline."""
return self._client.is_available(self.device_id)
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {}
if self._battery_level:
attrs[ATTR_BATTERY_LEVEL] = self._battery_level
if self._last_updated:
attrs[ATTR_LAST_UPDATED] = self._last_updated
return attrs
@property
def _battery_level(self):
"""Return the battery level of a device."""
from tellduslive import (BATTERY_LOW,
BATTERY_UNKNOWN,
BATTERY_OK)
if self.device.battery == BATTERY_LOW:
return 1
if self.device.battery == BATTERY_UNKNOWN:
return None
if self.device.battery == BATTERY_OK:
return 100
return self.device.battery # Percentage
@property
def _last_updated(self):
"""Return the last update of a device."""
return str(datetime.fromtimestamp(self.device.lastUpdated)) \
if self.device.lastUpdated else None
|
|
#!/usr/bin/python
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Script that deploys a Chrome build to a device.
The script supports deploying Chrome from these sources:
1. A local build output directory, such as chromium/src/out/[Debug|Release].
2. A Chrome tarball uploaded by a trybot/official-builder to GoogleStorage.
3. A Chrome tarball existing locally.
The script copies the necessary contents of the source location (tarball or
build directory) and rsyncs the contents of the staging directory onto your
device's rootfs.
"""
import collections
import contextlib
import functools
import logging
import multiprocessing
import os
import optparse
import shlex
import time
from chromite.buildbot import constants
from chromite.buildbot import cbuildbot_results as results_lib
from chromite.cros.commands import cros_chrome_sdk
from chromite.lib import chrome_util
from chromite.lib import cros_build_lib
from chromite.lib import commandline
from chromite.lib import gs
from chromite.lib import osutils
from chromite.lib import parallel
from chromite.lib import remote_access as remote
from chromite.lib import stats
from chromite.scripts import lddtree
_USAGE = "deploy_chrome [--]\n\n %s" % __doc__
KERNEL_A_PARTITION = 2
KERNEL_B_PARTITION = 4
KILL_PROC_MAX_WAIT = 10
POST_KILL_WAIT = 2
MOUNT_RW_COMMAND = 'mount -o remount,rw /'
LSOF_COMMAND = 'lsof %s/chrome'
_CHROME_DIR = '/opt/google/chrome'
def _UrlBaseName(url):
"""Return the last component of the URL."""
return url.rstrip('/').rpartition('/')[-1]
class DeployFailure(results_lib.StepFailure):
"""Raised whenever the deploy fails."""
DeviceInfo = collections.namedtuple(
'DeviceInfo', ['target_dir_size', 'target_fs_free'])
class DeployChrome(object):
"""Wraps the core deployment functionality."""
def __init__(self, options, tempdir, staging_dir):
"""Initialize the class.
Arguments:
options: Optparse result structure.
tempdir: Scratch space for the class. Caller has responsibility to clean
it up.
"""
self.tempdir = tempdir
self.options = options
self.staging_dir = staging_dir
self.host = remote.RemoteAccess(options.to, tempdir, port=options.port)
self._rootfs_is_still_readonly = multiprocessing.Event()
def _GetRemoteMountFree(self, remote_dir):
result = self.host.RemoteSh('df -k %s' % remote_dir)
line = result.output.splitlines()[1]
return int(line.split()[3])
def _GetRemoteDirSize(self, remote_dir):
result = self.host.RemoteSh('du -ks %s' % remote_dir)
return int(result.output.split()[0])
def _GetStagingDirSize(self):
result = cros_build_lib.DebugRunCommand(['du', '-ks', self.staging_dir],
redirect_stdout=True)
return int(result.output.split()[0])
def _ChromeFileInUse(self):
result = self.host.RemoteSh(LSOF_COMMAND % (self.options.target_dir,),
error_code_ok=True)
return result.returncode == 0
def _DisableRootfsVerification(self):
if not self.options.force:
logging.error('Detected that the device has rootfs verification enabled.')
logging.info('This script can automatically remove the rootfs '
'verification, which requires that it reboot the device.')
logging.info('Make sure the device is in developer mode!')
logging.info('Skip this prompt by specifying --force.')
if not cros_build_lib.BooleanPrompt('Remove roots verification?', False):
# Since we stopped Chrome earlier, it's good form to start it up again.
if self.options.startui:
logging.info('Starting Chrome...')
self.host.RemoteSh('start ui')
raise DeployFailure('Need rootfs verification to be disabled. '
'Aborting.')
logging.info('Removing rootfs verification from %s', self.options.to)
# Running in VM's cause make_dev_ssd's firmware sanity checks to fail.
# Use --force to bypass the checks.
cmd = ('/usr/share/vboot/bin/make_dev_ssd.sh --partitions %d '
'--remove_rootfs_verification --force')
for partition in (KERNEL_A_PARTITION, KERNEL_B_PARTITION):
self.host.RemoteSh(cmd % partition, error_code_ok=True)
# A reboot in developer mode takes a while (and has delays), so the user
# will have time to read and act on the USB boot instructions below.
logging.info('Please remember to press Ctrl-U if you are booting from USB.')
self.host.RemoteReboot()
# Now that the machine has been rebooted, we need to kill Chrome again.
self._KillProcsIfNeeded()
# Make sure the rootfs is writable now.
self._MountRootfsAsWritable(error_code_ok=False)
def _CheckUiJobStarted(self):
# status output is in the format:
# <job_name> <status> ['process' <pid>].
# <status> is in the format <goal>/<state>.
try:
result = self.host.RemoteSh('status ui')
except cros_build_lib.RunCommandError as e:
if 'Unknown job' in e.result.error:
return False
else:
raise e
return result.output.split()[1].split('/')[0] == 'start'
def _KillProcsIfNeeded(self):
if self._CheckUiJobStarted():
logging.info('Shutting down Chrome...')
self.host.RemoteSh('stop ui')
# Developers sometimes run session_manager manually, in which case we'll
# need to help shut the chrome processes down.
try:
with cros_build_lib.SubCommandTimeout(KILL_PROC_MAX_WAIT):
while self._ChromeFileInUse():
logging.warning('The chrome binary on the device is in use.')
logging.warning('Killing chrome and session_manager processes...\n')
self.host.RemoteSh("pkill 'chrome|session_manager'",
error_code_ok=True)
# Wait for processes to actually terminate
time.sleep(POST_KILL_WAIT)
logging.info('Rechecking the chrome binary...')
except cros_build_lib.TimeoutError:
msg = ('Could not kill processes after %s seconds. Please exit any '
'running chrome processes and try again.' % KILL_PROC_MAX_WAIT)
raise DeployFailure(msg)
def _MountRootfsAsWritable(self, error_code_ok=True):
"""Mount the rootfs as writable.
If the command fails, and error_code_ok is True, then this function sets
self._rootfs_is_still_readonly.
Arguments:
error_code_ok: See remote.RemoteAccess.RemoteSh for details.
"""
result = self.host.RemoteSh(MOUNT_RW_COMMAND, error_code_ok=error_code_ok)
if result.returncode:
self._rootfs_is_still_readonly.set()
def _GetDeviceInfo(self):
steps = [
functools.partial(self._GetRemoteDirSize, self.options.target_dir),
functools.partial(self._GetRemoteMountFree, self.options.target_dir)
]
return_values = parallel.RunParallelSteps(steps, return_values=True)
return DeviceInfo(*return_values)
def _CheckDeviceFreeSpace(self, device_info):
"""See if target device has enough space for Chrome.
Arguments:
device_info: A DeviceInfo named tuple.
"""
effective_free = device_info.target_dir_size + device_info.target_fs_free
staging_size = self._GetStagingDirSize()
if effective_free < staging_size:
raise DeployFailure(
'Not enough free space on the device. Required: %s MB, '
'actual: %s MB.' % (staging_size/1024, effective_free/1024))
if device_info.target_fs_free < (100 * 1024):
logging.warning('The device has less than 100MB free. deploy_chrome may '
'hang during the transfer.')
def _Deploy(self):
logging.info('Copying Chrome to %s on device...', self.options.target_dir)
# Show the output (status) for this command.
self.host.Rsync('%s/' % os.path.abspath(self.staging_dir),
self.options.target_dir,
inplace=True, debug_level=logging.INFO,
verbose=self.options.verbose)
if self.options.startui:
logging.info('Starting Chrome...')
self.host.RemoteSh('start ui')
def _CheckConnection(self):
try:
logging.info('Testing connection to the device...')
self.host.RemoteSh('true')
except cros_build_lib.RunCommandError as ex:
logging.error('Error connecting to the test device.')
raise DeployFailure(ex)
def _PrepareStagingDir(self):
_PrepareStagingDir(self.options, self.tempdir, self.staging_dir)
def Perform(self):
# If requested, just do the staging step.
if self.options.staging_only:
self._PrepareStagingDir()
return 0
# Run setup steps in parallel. If any step fails, RunParallelSteps will
# stop printing output at that point, and halt any running steps.
steps = [self._GetDeviceInfo, self._PrepareStagingDir,
self._CheckConnection, self._KillProcsIfNeeded,
self._MountRootfsAsWritable]
ret = parallel.RunParallelSteps(steps, halt_on_error=True,
return_values=True)
self._CheckDeviceFreeSpace(ret[0])
# If we failed to mark the rootfs as writable, try disabling rootfs
# verification.
if self._rootfs_is_still_readonly.is_set():
self._DisableRootfsVerification()
# Actually deploy Chrome to the device.
self._Deploy()
def ValidateGypDefines(_option, _opt, value):
"""Convert GYP_DEFINES-formatted string to dictionary."""
return chrome_util.ProcessGypDefines(value)
class CustomOption(commandline.Option):
"""Subclass Option class to implement path evaluation."""
TYPES = commandline.Option.TYPES + ('gyp_defines',)
TYPE_CHECKER = commandline.Option.TYPE_CHECKER.copy()
TYPE_CHECKER['gyp_defines'] = ValidateGypDefines
def _CreateParser():
"""Create our custom parser."""
parser = commandline.OptionParser(usage=_USAGE, option_class=CustomOption,
caching=True)
# TODO(rcui): Have this use the UI-V2 format of having source and target
# device be specified as positional arguments.
parser.add_option('--force', action='store_true', default=False,
help='Skip all prompts (i.e., for disabling of rootfs '
'verification). This may result in the target '
'machine being rebooted.')
sdk_board_env = os.environ.get(cros_chrome_sdk.SDKFetcher.SDK_BOARD_ENV)
parser.add_option('--board', default=sdk_board_env,
help="The board the Chrome build is targeted for. When in "
"a 'cros chrome-sdk' shell, defaults to the SDK "
"board.")
parser.add_option('--build-dir', type='path',
help='The directory with Chrome build artifacts to deploy '
'from. Typically of format <chrome_root>/out/Debug. '
'When this option is used, the GYP_DEFINES '
'environment variable must be set.')
parser.add_option('--target-dir', type='path',
help='Target directory on device to deploy Chrome into.',
default=_CHROME_DIR)
parser.add_option('-g', '--gs-path', type='gs_path',
help='GS path that contains the chrome to deploy.')
parser.add_option('--nostartui', action='store_false', dest='startui',
default=True,
help="Don't restart the ui daemon after deployment.")
parser.add_option('--nostrip', action='store_false', dest='dostrip',
default=True,
help="Don't strip binaries during deployment. Warning: "
"the resulting binaries will be very large!")
parser.add_option('-p', '--port', type=int, default=remote.DEFAULT_SSH_PORT,
help='Port of the target device to connect to.')
parser.add_option('-t', '--to',
help='The IP address of the CrOS device to deploy to.')
parser.add_option('-v', '--verbose', action='store_true', default=False,
help='Show more debug output.')
group = optparse.OptionGroup(parser, 'Advanced Options')
group.add_option('-l', '--local-pkg-path', type='path',
help='Path to local chrome prebuilt package to deploy.')
group.add_option('--sloppy', action='store_true', default=False,
help='Ignore when mandatory artifacts are missing.')
group.add_option('--staging-flags', default=None, type='gyp_defines',
help='Extra flags to control staging. Valid flags are - %s'
% ', '.join(chrome_util.STAGING_FLAGS))
group.add_option('--strict', action='store_true', default=False,
help='Stage artifacts based on the GYP_DEFINES environment '
'variable and --staging-flags, if set. Enforce that '
'all optional artifacts are deployed.')
group.add_option('--strip-flags', default=None,
help="Flags to call the 'strip' binutil tool with. "
"Overrides the default arguments.")
parser.add_option_group(group)
# GYP_DEFINES that Chrome was built with. Influences which files are staged
# when --build-dir is set. Defaults to reading from the GYP_DEFINES
# enviroment variable.
parser.add_option('--gyp-defines', default=None, type='gyp_defines',
help=optparse.SUPPRESS_HELP)
# Path of an empty directory to stage chrome artifacts to. Defaults to a
# temporary directory that is removed when the script finishes. If the path
# is specified, then it will not be removed.
parser.add_option('--staging-dir', type='path', default=None,
help=optparse.SUPPRESS_HELP)
# Only prepare the staging directory, and skip deploying to the device.
parser.add_option('--staging-only', action='store_true', default=False,
help=optparse.SUPPRESS_HELP)
# Path to a binutil 'strip' tool to strip binaries with. The passed-in path
# is used as-is, and not normalized. Used by the Chrome ebuild to skip
# fetching the SDK toolchain.
parser.add_option('--strip-bin', default=None, help=optparse.SUPPRESS_HELP)
return parser
def _ParseCommandLine(argv):
"""Parse args, and run environment-independent checks."""
parser = _CreateParser()
(options, args) = parser.parse_args(argv)
if not any([options.gs_path, options.local_pkg_path, options.build_dir]):
parser.error('Need to specify either --gs-path, --local-pkg-path, or '
'--build-dir')
if options.build_dir and any([options.gs_path, options.local_pkg_path]):
parser.error('Cannot specify both --build_dir and '
'--gs-path/--local-pkg-patch')
if options.build_dir and not options.board:
parser.error('--board is required when --build-dir is specified.')
if options.gs_path and options.local_pkg_path:
parser.error('Cannot specify both --gs-path and --local-pkg-path')
if not (options.staging_only or options.to):
parser.error('Need to specify --to')
if (options.strict or options.staging_flags) and not options.build_dir:
parser.error('--strict and --staging-flags require --build-dir to be '
'set.')
if options.staging_flags and not options.strict:
parser.error('--staging-flags requires --strict to be set.')
if options.sloppy and options.strict:
parser.error('Cannot specify both --strict and --sloppy.')
return options, args
def _PostParseCheck(options, _args):
"""Perform some usage validation (after we've parsed the arguments
Args:
options/args: The options/args object returned by optparse
"""
if options.local_pkg_path and not os.path.isfile(options.local_pkg_path):
cros_build_lib.Die('%s is not a file.', options.local_pkg_path)
if not options.gyp_defines:
gyp_env = os.getenv('GYP_DEFINES', None)
if gyp_env is not None:
options.gyp_defines = chrome_util.ProcessGypDefines(gyp_env)
logging.debug('GYP_DEFINES taken from environment: %s',
options.gyp_defines)
if options.strict and not options.gyp_defines:
cros_build_lib.Die('When --strict is set, the GYP_DEFINES environment '
'variable must be set.')
if options.build_dir:
chrome_path = os.path.join(options.build_dir, 'chrome')
if os.path.isfile(chrome_path):
deps = lddtree.ParseELF(chrome_path)
if 'libbase.so' in deps['libs']:
cros_build_lib.Warning(
'Detected a component build of Chrome. component build is '
'not working properly for Chrome OS. See crbug.com/196317. '
'Use at your own risk!')
def _FetchChromePackage(cache_dir, tempdir, gs_path):
"""Get the chrome prebuilt tarball from GS.
Returns: Path to the fetched chrome tarball.
"""
gs_ctx = gs.GSContext.Cached(cache_dir, init_boto=True)
files = gs_ctx.LS(gs_path).output.splitlines()
files = [found for found in files if
_UrlBaseName(found).startswith('%s-' % constants.CHROME_PN)]
if not files:
raise Exception('No chrome package found at %s' % gs_path)
elif len(files) > 1:
# - Users should provide us with a direct link to either a stripped or
# unstripped chrome package.
# - In the case of being provided with an archive directory, where both
# stripped and unstripped chrome available, use the stripped chrome
# package.
# - Stripped chrome pkg is chromeos-chrome-<version>.tar.gz
# - Unstripped chrome pkg is chromeos-chrome-<version>-unstripped.tar.gz.
files = [f for f in files if not 'unstripped' in f]
assert len(files) == 1
logging.warning('Multiple chrome packages found. Using %s', files[0])
filename = _UrlBaseName(files[0])
logging.info('Fetching %s...', filename)
gs_ctx.Copy(files[0], tempdir, print_cmd=False)
chrome_path = os.path.join(tempdir, filename)
assert os.path.exists(chrome_path)
return chrome_path
@contextlib.contextmanager
def _StripBinContext(options):
if not options.dostrip:
yield None
elif options.strip_bin:
yield options.strip_bin
else:
sdk = cros_chrome_sdk.SDKFetcher(options.cache_dir, options.board)
components = (sdk.TARGET_TOOLCHAIN_KEY, constants.CHROME_ENV_TAR)
with sdk.Prepare(components=components) as ctx:
env_path = os.path.join(ctx.key_map[constants.CHROME_ENV_TAR].path,
constants.CHROME_ENV_FILE)
strip_bin = osutils.SourceEnvironment(env_path, ['STRIP'])['STRIP']
strip_bin = os.path.join(ctx.key_map[sdk.TARGET_TOOLCHAIN_KEY].path,
'bin', os.path.basename(strip_bin))
yield strip_bin
def _PrepareStagingDir(options, tempdir, staging_dir):
"""Place the necessary files in the staging directory.
The staging directory is the directory used to rsync the build artifacts over
to the device. Only the necessary Chrome build artifacts are put into the
staging directory.
"""
osutils.SafeMakedirs(staging_dir)
os.chmod(staging_dir, 0755)
if options.build_dir:
with _StripBinContext(options) as strip_bin:
strip_flags = (None if options.strip_flags is None else
shlex.split(options.strip_flags))
chrome_util.StageChromeFromBuildDir(
staging_dir, options.build_dir, strip_bin, strict=options.strict,
sloppy=options.sloppy, gyp_defines=options.gyp_defines,
staging_flags=options.staging_flags,
strip_flags=strip_flags)
else:
pkg_path = options.local_pkg_path
if options.gs_path:
pkg_path = _FetchChromePackage(options.cache_dir, tempdir,
options.gs_path)
assert pkg_path
logging.info('Extracting %s...', pkg_path)
# Extract only the ./opt/google/chrome contents, directly into the staging
# dir, collapsing the directory hierarchy.
cros_build_lib.DebugRunCommand(
['tar', '--strip-components', '4', '--extract',
'--preserve-permissions', '--file', pkg_path, '.%s' % _CHROME_DIR],
cwd=staging_dir)
def main(argv):
options, args = _ParseCommandLine(argv)
_PostParseCheck(options, args)
# Set cros_build_lib debug level to hide RunCommand spew.
if options.verbose:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.INFO)
with stats.UploadContext() as queue:
cmd_stats = stats.Stats.SafeInit(cmd_line=argv, cmd_base='deploy_chrome')
if cmd_stats:
queue.put([cmd_stats, stats.StatsUploader.URL, 1])
with osutils.TempDir(set_global=True) as tempdir:
staging_dir = options.staging_dir
if not staging_dir:
staging_dir = os.path.join(tempdir, 'chrome')
deploy = DeployChrome(options, tempdir, staging_dir)
try:
deploy.Perform()
except results_lib.StepFailure as ex:
raise SystemExit(str(ex).strip())
|
|
# -*- coding: utf-8 -*-
"""This file contains the wifi.log (Mac OS X) parser."""
import logging
import re
import pyparsing
from plaso.containers import time_events
from plaso.lib import errors
from plaso.lib import eventdata
from plaso.lib import timelib
from plaso.parsers import manager
from plaso.parsers import text_parser
__author__ = 'Joaquin Moreno Garijo (bastionado@gmail.com)'
class MacWifiLogEvent(time_events.TimestampEvent):
"""Convenience class for a Mac Wifi log line event."""
DATA_TYPE = u'mac:wifilog:line'
def __init__(self, timestamp, agent, function, text, action):
"""Initializes the event object.
Args:
timestamp: the timestamp, contains the number of microseconds from
January 1, 1970 00:00:00 UTC.
agent: TODO
function: TODO
text: The log message
action: A string containing known WiFI actions, e.g. connected to
an AP, configured, etc. If the action is not known,
the value is the message of the log (text variable).
"""
super(MacWifiLogEvent, self).__init__(
timestamp, eventdata.EventTimestamp.ADDED_TIME)
self.agent = agent
self.function = function
self.text = text
self.action = action
class MacWifiLogParser(text_parser.PyparsingSingleLineTextParser):
"""Parse text based on wifi.log file."""
NAME = u'macwifi'
DESCRIPTION = u'Parser for Mac OS X wifi.log files.'
_ENCODING = u'utf-8'
# Regular expressions for known actions.
RE_CONNECTED = re.compile(r'Already\sassociated\sto\s(.*)\.\sBailing')
RE_WIFI_PARAMETERS = re.compile(
r'\[ssid=(.*?), bssid=(.*?), security=(.*?), rssi=')
# Define how a log line should look like.
WIFI_LINE = (
text_parser.PyparsingConstants.MONTH.setResultsName(u'day_of_week') +
text_parser.PyparsingConstants.MONTH.setResultsName(u'month') +
text_parser.PyparsingConstants.ONE_OR_TWO_DIGITS.setResultsName(u'day') +
text_parser.PyparsingConstants.TIME_MSEC.setResultsName(u'time') +
pyparsing.Literal(u'<') +
pyparsing.CharsNotIn(u'>').setResultsName(u'agent') +
pyparsing.Literal(u'>') +
pyparsing.CharsNotIn(u':').setResultsName(u'function') +
pyparsing.Literal(u':') +
pyparsing.SkipTo(pyparsing.lineEnd).setResultsName(u'text'))
WIFI_HEADER = (
text_parser.PyparsingConstants.MONTH.setResultsName(u'day_of_week') +
text_parser.PyparsingConstants.MONTH.setResultsName(u'month') +
text_parser.PyparsingConstants.ONE_OR_TWO_DIGITS.setResultsName(u'day') +
text_parser.PyparsingConstants.TIME_MSEC.setResultsName(u'time') +
pyparsing.Literal(u'***Starting Up***'))
# Define the available log line structures.
LINE_STRUCTURES = [
(u'logline', WIFI_LINE),
(u'header', WIFI_HEADER)]
def __init__(self):
"""Initializes a parser object."""
super(MacWifiLogParser, self).__init__()
self._last_month = None
self._year_use = 0
def _GetAction(self, agent, function, text):
"""Parse the well know actions for easy reading.
Args:
agent: The device that generate the entry.
function: The function or action called by the agent.
text: Mac Wifi log text.
Returns:
know_action: A formatted string representing the known (or common) action.
"""
if not agent.startswith(u'airportd'):
return text
# TODO: replace "x in y" checks by startswith if possible.
if u'airportdProcessDLILEvent' in function:
interface = text.split()[0]
return u'Interface {0:s} turn up.'.format(interface)
if u'doAutoJoin' in function:
match = re.match(self.RE_CONNECTED, text)
if match:
ssid = match.group(1)[1:-1]
else:
ssid = u'Unknown'
return u'Wifi connected to SSID {0:s}'.format(ssid)
if u'processSystemPSKAssoc' in function:
wifi_parameters = self.RE_WIFI_PARAMETERS.search(text)
if wifi_parameters:
ssid = wifi_parameters.group(1)
bssid = wifi_parameters.group(2)
security = wifi_parameters.group(3)
if not ssid:
ssid = u'Unknown'
if not bssid:
bssid = u'Unknown'
if not security:
security = u'Unknown'
return (
u'New wifi configured. BSSID: {0:s}, SSID: {1:s}, '
u'Security: {2:s}.').format(bssid, ssid, security)
return text
def _ConvertToTimestamp(self, day, month, year, time):
"""Converts date and time values into a timestamp.
This is a timestamp_string as returned by using
text_parser.PyparsingConstants structures:
08, Nov, [20, 36, 37], 222]
Args:
day: an integer representing the day.
month: an integer representing the month.
year: an integer representing the year.
time: a list containing integers with the number of
hours, minutes and seconds.
Returns:
The timestamp which is an integer containing the number of micro seconds
since January 1, 1970, 00:00:00 UTC.
Raises:
TimestampError: if the timestamp cannot be created from the date and
time values.
"""
time_values, milliseconds = time
hours, minutes, seconds = time_values
microseconds = milliseconds * 1000
return timelib.Timestamp.FromTimeParts(
year, month, day, hours, minutes, seconds, microseconds=microseconds)
def _ParseLogLine(self, parser_mediator, structure):
"""Parse a single log line and produce an event object.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
structure: A pyparsing.ParseResults object from a line in the
log file.
"""
if not self._year_use:
self._year_use = parser_mediator.GetEstimatedYear()
# Gap detected between years.
month = timelib.MONTH_DICT.get(structure.month.lower())
if not self._last_month:
self._last_month = month
if month < self._last_month:
self._year_use += 1
try:
timestamp = self._ConvertToTimestamp(
structure.day, month, self._year_use, structure.time)
except errors.TimestampError as exception:
parser_mediator.ProduceExtractionError(
u'unable to determine timestamp with error: {0:s}'.format(
exception))
return
self._last_month = month
text = structure.text
# Due to the use of CharsNotIn pyparsing structure contains whitespaces
# that need to be removed.
function = structure.function.strip()
action = self._GetAction(structure.agent, function, text)
event_object = MacWifiLogEvent(
timestamp, structure.agent, function, text, action)
parser_mediator.ProduceEvent(event_object)
def ParseRecord(self, parser_mediator, key, structure):
"""Parses a log record structure and produces events.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
key: An identification string indicating the name of the parsed
structure.
structure: A pyparsing.ParseResults object from a line in the
log file.
"""
if key == u'logline':
self._ParseLogLine(parser_mediator, structure)
elif key != u'header':
logging.warning(
u'Unable to parse record, unknown structure: {0:s}'.format(key))
def VerifyStructure(self, parser_mediator, line):
"""Verify that this file is a Mac Wifi log file.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
line: A single line from the text file.
Returns:
True if this is the correct parser, False otherwise.
"""
try:
_ = self.WIFI_HEADER.parseString(line)
except pyparsing.ParseException:
logging.debug(u'Not a Mac Wifi log file')
return False
return True
manager.ParsersManager.RegisterParser(MacWifiLogParser)
|
|
#!/usr/bin/env python3
# Copyright (C) 2013, 2014 by Yu-Jie Lin
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import print_function
import codecs
import sys
from distutils.core import Command, setup
from unittest import TestLoader, TextTestRunner
try:
from wheel.bdist_wheel import bdist_wheel
except ImportError:
bdist_wheel = None
# scripts to be exculded from checking
EXCLUDE_SCRIPTS = ()
script_name = '1to001'
# ============================================================================
# https://groups.google.com/d/msg/comp.lang.python/pAeiF0qwtY0/H9Ki0WOctBkJ
# Work around mbcs bug in distutils.
# http://bugs.python.org/issue10945
try:
codecs.lookup('mbcs')
except LookupError:
ascii = codecs.lookup('ascii')
func = lambda name, enc=ascii: {True: enc}.get(name == 'mbcs')
codecs.register(func)
# ============================================================================
class cmd_isort(Command):
description = 'run isort'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
import isort
except ImportError:
print(('Cannot import isort, you forgot to install?\n'
'run `pip install isort` to install.'), file=sys.stderr)
sys.exit(1)
print()
print('Options')
print('=======')
print()
print('Exclude:', EXCLUDE_SCRIPTS)
print()
files = ['setup.py', script_name]
print('Results')
print('=======')
print()
fails = 0
for f in files:
# unfortunately, we have to do it twice
if isort.SortImports(f, check=True).incorrectly_sorted:
fails += 1
print()
isort.SortImports(f, show_diff=True)
print()
print()
print('Statistics')
print('==========')
print()
print('%d files failed to pass' % fails)
class cmd_pep8(Command):
description = 'run pep8'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
import pep8
except ImportError:
print(('Cannot import pep8, you forgot to install?\n'
'run `pip install pep8` to install.'), file=sys.stderr)
sys.exit(1)
p8 = pep8.StyleGuide()
# do not include code not written in b.py
p8.options.exclude += EXCLUDE_SCRIPTS
# ignore four-space indentation error
p8.options.ignore += ('E111', 'E121')
print()
print('Options')
print('=======')
print()
print('Exclude:', p8.options.exclude)
print('Ignore :', p8.options.ignore)
print()
print('Results')
print('=======')
print()
report = p8.check_files('.')
print()
print('Statistics')
print('==========')
print()
report.print_statistics()
print('%-7d Total errors and warnings' % report.get_count())
class cmd_pyflakes(Command):
description = 'run Pyflakes'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
from pyflakes import api
from pyflakes import reporter as modReporter
except ImportError:
print(('Cannot import pyflakes, you forgot to install?\n'
'run `pip install pyflakes` to install.'), file=sys.stderr)
sys.exit(1)
from os.path import basename
reporter = modReporter._makeDefaultReporter()
# monkey patch for exclusion of pathes
api_iterSourceCode = api.iterSourceCode
def _iterSourceCode(paths):
for path in api_iterSourceCode(paths):
if basename(path) not in EXCLUDE_SCRIPTS:
yield path
api.iterSourceCode = _iterSourceCode
print()
print('Options')
print('=======')
print()
print('Exclude:', EXCLUDE_SCRIPTS)
print()
print('Results')
print('=======')
print()
warnings = api.checkRecursive('.', reporter)
print()
print('Total warnings: %d' % warnings)
class cmd_pylint(Command):
description = 'run Pylint'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
from pylint import lint
except ImportError:
print(('Cannot import pylint, you forgot to install?\n'
'run `pip install pylint` to install.'), file=sys.stderr)
sys.exit(1)
print()
print('Options')
print('=======')
print()
print('Exclude:', EXCLUDE_SCRIPTS)
files = ['setup.py', 'wxitx']
args = [
'--ignore=%s' % ','.join(EXCLUDE_SCRIPTS),
'--output-format=colorized',
'--include-ids=y',
'--indent-string=" "',
] + files
print()
lint.Run(args)
class cmd_test(Command):
description = 'run tests'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
loader = TestLoader()
tests = loader.discover(start_dir='tests')
runner = TextTestRunner(verbosity=2)
runner.run(tests)
# ============================================================================
with open(script_name) as f:
meta = dict(
(k.strip(' _'), eval(v)) for k, v in
# There will be a '\n', with eval(), it's safe to ignore
(line.split('=') for line in f if line.startswith('__'))
)
# renaming meta-data keys
meta_renames = [
('program', 'name'),
('website', 'url'),
('email', 'author_email'),
]
for old, new in meta_renames:
if old in meta:
meta[new] = meta[old]
del meta[old]
# keep these
meta_keys = ['name', 'description', 'version', 'license', 'url', 'author',
'author_email']
meta = dict([m for m in meta.items() if m[0] in meta_keys])
with open('README.rst') as f:
long_description = f.read()
classifiers = [
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.3',
'Topic :: Utilities',
]
setup_d = dict(
long_description=long_description,
cmdclass={
'isort': cmd_isort,
'pep8': cmd_pep8,
'pyflakes': cmd_pyflakes,
'pylint': cmd_pylint,
'test': cmd_test,
},
classifiers=classifiers,
scripts=[script_name],
**meta
)
if bdist_wheel:
setup_d['cmdclass']['bdist_wheel'] = bdist_wheel
if __name__ == '__main__':
setup(**setup_d)
|
|
from cornice import Service
from pyramid.security import Everyone
from daybed.permissions import (
invert_permissions_matrix, merge_permissions, default_model_permissions
)
from daybed.backends.exceptions import ModelNotFound
from daybed.views.errors import forbidden_view
from daybed.schemas.validators import (
model_validator, permissions_validator, definition_validator
)
models = Service(name='models', path='/models', description='Models')
model = Service(name='model',
path='/models/{model_id}',
description='Model',
renderer="jsonp",
cors_origins=('*',))
definition = Service(name='model-definition',
path='/models/{model_id}/definition',
description='Model Definitions',
renderer="jsonp",
cors_origins=('*',))
permissions = Service(name='model-permissions',
path='/models/{model_id}/permissions',
description='Model permissions',
renderer="jsonp",
cors_origins=('*',))
@definition.get(permission='get_definition')
def get_definition(request):
"""Retrieves a model definition."""
model_id = request.matchdict['model_id']
try:
return request.db.get_model_definition(model_id)
except ModelNotFound:
request.errors.add('path', model_id, "model not found")
request.errors.status = "404 Not Found"
@definition.put(validators=(definition_validator,), permission='put_model')
def put_definition(request):
"""Create or update a model definition."""
model_id = request.matchdict['model_id']
try:
permissions = request.db.get_model_permissions(model_id)
permissions = invert_permissions_matrix(permissions)
except ModelNotFound:
permissions = {}
model = {
'permissions': permissions,
'definition': request.data_clean,
'records': [] # Won't erase existing records
}
request.data_clean = model
handle_put_model(request, create=(not permissions))
return model['definition']
@permissions.get(permission='get_permissions')
def get_permissions(request):
"""Retrieves a model permissions."""
model_id = request.matchdict['model_id']
try:
permissions = request.db.get_model_permissions(model_id)
return invert_permissions_matrix(permissions)
except ModelNotFound:
request.errors.add('path', model_id, "model not found")
request.errors.status = "404 Not Found"
@permissions.patch(permission='put_permissions',
validators=(permissions_validator,))
def patch_permissions(request):
"""Update a model permissions."""
model_id = request.matchdict['model_id']
definition = request.db.get_model_definition(model_id)
current_permissions = request.db.get_model_permissions(model_id)
permissions = merge_permissions(current_permissions, request.data_clean)
request.db.put_model(definition, permissions, model_id)
return invert_permissions_matrix(permissions)
@permissions.put(permission='put_permissions',
validators=(permissions_validator,))
def put_permissions(request):
"""Update a model permissions."""
model_id = request.matchdict['model_id']
definition = request.db.get_model_definition(model_id)
permissions = merge_permissions({}, request.data_clean)
request.db.put_model(definition, permissions, model_id)
return invert_permissions_matrix(permissions)
@models.get(permission='get_models')
def get_models(request):
"""Return the list of modelname readable by the user."""
return {"models": request.db.get_models(request.principals)}
@models.post(permission='post_model', validators=(model_validator,))
def post_models(request):
"""Creates a model with the given definition and records, if any."""
if request.credentials_id:
credentials_id = request.credentials_id
else:
credentials_id = Everyone
specified_perms = request.data_clean['permissions']
default_perms = default_model_permissions(credentials_id)
permissions = merge_permissions(default_perms, specified_perms)
model_id = request.db.put_model(
definition=request.data_clean['definition'],
permissions=permissions)
request.notify('ModelCreated', model_id)
for record in request.data_clean['records']:
record_id = request.db.put_record(model_id, record, [credentials_id])
request.notify('RecordCreated', model_id, record_id)
request.response.status = "201 Created"
location = '%s/models/%s' % (request.application_url, model_id)
request.response.headers['location'] = str(location)
return {'id': model_id}
@model.delete(permission='delete_model')
def delete_model(request):
"""Deletes a model and its records."""
model_id = request.matchdict['model_id']
try:
model = request.db.delete_model(model_id)
except ModelNotFound:
request.errors.status = "404 Not Found"
request.errors.add('path', model_id, "model not found")
return
request.notify('ModelDeleted', model_id)
model["permissions"] = invert_permissions_matrix(model["permissions"])
return model
@model.get(permission='get_model')
def get_model(request):
"""Retrieves the full model, definition and records."""
model_id = request.matchdict['model_id']
try:
definition = request.db.get_model_definition(model_id)
except ModelNotFound:
request.errors.add('path', model_id, "model not found")
request.errors.status = "404 Not Found"
return
if "read_all_records" not in request.permissions:
records = request.db.get_records_with_authors(model_id)
records = [r["record"] for r in records
if set(request.principals).intersection(r["authors"])]
else:
records = request.db.get_records(model_id)
permissions = request.db.get_model_permissions(model_id)
return {'definition': definition,
'records': records,
'permissions': invert_permissions_matrix(permissions)}
@model.put(validators=(model_validator,), permission='post_model')
def put_model(request):
model_id = request.matchdict['model_id']
try:
request.db.get_model_definition(model_id)
if request.has_permission('put_model'):
try:
request.db.delete_model(model_id)
except ModelNotFound:
pass
return handle_put_model(request)
except ModelNotFound:
return handle_put_model(request, create=True)
return forbidden_view(request)
def handle_put_model(request, create=False):
model_id = request.matchdict['model_id']
if request.credentials_id:
credentials_id = request.credentials_id
else:
credentials_id = Everyone
specified_perms = request.data_clean['permissions']
default_perms = default_model_permissions(credentials_id)
permissions = merge_permissions(default_perms, specified_perms)
request.db.put_model(request.data_clean['definition'],
permissions,
model_id)
event = 'ModelCreated' if create else 'ModelUpdated'
request.notify(event, model_id)
for record in request.data_clean['records']:
record_id = request.db.put_record(model_id, record, [credentials_id])
request.notify('RecordCreated', model_id, record_id)
return {"id": model_id}
|
|
from DOJO.dojo import Dojo
from PERSON.person import Person, Staff, Fellow
from ROOM.room import Room, Office, LivingSpace
from room_allocator import InteractiveRoomAllocator
import unittest
from cmd import Cmd
from sqlalchemy import create_engine
from status import State
from sqlalchemy.orm import sessionmaker
import pickle
import sys
from io import StringIO
class CreateRoomTest(unittest.TestCase):
def setUp(self):
self.interactive_session = InteractiveRoomAllocator(Dojo())
# Test interactive session inherits from CMD
def test_interactive_session_inherits_Cmd(self):
self.assertTrue(issubclass(InteractiveRoomAllocator, Cmd),
msg='InteractiveRoomAllocator class must inherit Cmd')
# Test intro is correct
def test_correct_intro(self):
self.assertEqual(self.interactive_session.intro,
"\n\n>>>>>>>>>>>>>>>>> Eugene's random room allocator for Andela <<<<<<<<<<<<<<<<<<\n",
msg='Wrong intro')
# Test prompt is correct
def test_correct_prompt(self):
self.assertEqual(self.interactive_session.prompt, "\nRoom_Allocator: ", msg='Wrong prompt')
# Test create_room command creates a Room in some_dojo
def test_create_room_creates_Room(self):
arg = {'<room_type>': 'office', '<room_name>': ['Yellow']}
# Unwrap the do_create_room function to pass arg directly to the function called by create_room command
self.interactive_session.do_create_room.__wrapped__(self.interactive_session, arg)
self.assertTrue(isinstance(self.interactive_session.andela_dojo['office_spaces'][arg['<room_name>'][0].lower()], Room),
msg='create_room command must create a Room')
# Test create_room command creates an Office in some_dojo for type = 'office'
def test_create_room_creates_Office(self):
arg = {'<room_type>': 'office', '<room_name>': ['Yellow']}
# Unwrap the do_create_room function to pass arg directly to the function called by create_room command
self.interactive_session.do_create_room.__wrapped__(self.interactive_session, arg)
self.assertTrue(
isinstance(self.interactive_session.andela_dojo['office_spaces'][arg['<room_name>'][0].lower()], Office),
msg='create_room command must create an Office for type equal to "office"')
# Test create_room command creates a Living Space in some_dojo for type being 'living'
def test_create_room_creates_LivingSpace(self):
arg = {'<room_type>': 'living', '<room_name>': ['Purple']}
# Unwrap the do_create_room function to pass arg directly to the function called by create_room command
self.interactive_session.do_create_room.__wrapped__(self.interactive_session, arg)
self.assertTrue(
isinstance(self.interactive_session.andela_dojo['living_spaces'][arg['<room_name>'][0].lower()], LivingSpace),
msg='create_room command must create a LivingSpace for type equal to "living"')
def test_create_room_can_create_multiple_Office(self):
arg = {'<room_type>': 'office', '<room_name>': ['Yellow', 'Black', 'Red']}
# Unwrap the do_create_room function to pass arg directly to the function called by create_room command
self.interactive_session.do_create_room.__wrapped__(self.interactive_session, arg)
self.assertTrue(
isinstance(self.interactive_session.andela_dojo['office_spaces'][arg['<room_name>'][0].lower()], Office),
msg='create_room command must be able to create several Offices at once')
self.assertTrue(
isinstance(self.interactive_session.andela_dojo['office_spaces'][arg['<room_name>'][1].lower()], Office),
msg='create_room command must be able to create several Offices at once')
self.assertTrue(
isinstance(self.interactive_session.andela_dojo['office_spaces'][arg['<room_name>'][2].lower()], Office),
msg='create_room command must be able to create several Offices at once')
def test_create_room_can_create_multiple_LivingSpace(self):
arg = {'<room_type>': 'living', '<room_name>': ['Orange', 'Blue', 'Cream']}
# Unwrap the do_create_room function to pass arg directly to the function called by create_room command
self.interactive_session.do_create_room.__wrapped__(self.interactive_session, arg)
self.assertTrue(
isinstance(self.interactive_session.andela_dojo['living_spaces'][arg['<room_name>'][0].lower()], LivingSpace),
msg='create_room command must be able to create several LivingSpaces at once')
self.assertTrue(
isinstance(self.interactive_session.andela_dojo['living_spaces'][arg['<room_name>'][1].lower()], LivingSpace),
msg='create_room command must be able to create several LivingSpaces at once')
self.assertTrue(
isinstance(self.interactive_session.andela_dojo['living_spaces'][arg['<room_name>'][2].lower()], LivingSpace),
msg='create_room command must be able to create several LivingSpaces at once')
def test_create_room_reallocates_unallocated_people(self):
arg_office = {'<room_type>': 'office', '<room_name>': ['Cream']}
arg_living = {'<room_type>': 'living', '<room_name>': ['Green']}
arg_fellow = {'<first_name>': 'Aretha', '<last_name>': 'Franklin', '<Fellow_or_Staff>': 'felLOW', '<wants_accommodation>': 'Y'}
arg_staff = {'<first_name>': 'Ella', '<last_name>': 'Fitz', '<Fellow_or_Staff>': 'Staff', '<wants_accommodation>': None}
self.interactive_session.do_add_person.__wrapped__(self.interactive_session, arg_fellow)
self.interactive_session.do_add_person.__wrapped__(self.interactive_session, arg_staff)
self.assertTrue(len(self.interactive_session.andela_dojo['unallocated']['Office']) == 2,
msg='Added person not unallocated office before room is created')
self.assertTrue(len(self.interactive_session.andela_dojo['unallocated']['Living_Space']) == 1,
msg='Added person not unallocated living space before room is created')
self.interactive_session.do_create_room.__wrapped__(self.interactive_session, arg_office)
self.interactive_session.do_create_room.__wrapped__(self.interactive_session, arg_living)
self.assertTrue(len(self.interactive_session.andela_dojo['unallocated']['Office']) == 0,
msg='Reallocated person not removed from unallocated office')
self.assertTrue(len(self.interactive_session.andela_dojo['unallocated']['Living_Space']) == 0,
msg='Reallocated person not removed from unallocated living_space')
class AddPersonTest(unittest.TestCase):
def setUp(self):
self.interactive_session = InteractiveRoomAllocator(Dojo())
def test_unallocated_Person(self):
arg = {'<first_name>': 'Aretha', '<last_name>': 'Franklin', '<Fellow_or_Staff>': 'Staff', '<wants_accommodation>': 'N'}
# Unwrap the do_create_room function to pass arg directly to the function called by create_room command
self.interactive_session.do_add_person.__wrapped__(self.interactive_session, arg)
self.assertTrue(
isinstance(self.interactive_session.andela_dojo['unallocated']['Office']['af0'], Person),
msg='add_person command must create Person with unallocated office if their is no free office space')
arg = {'<first_name>': 'Thelonius', '<last_name>': 'Monk', '<Fellow_or_Staff>': 'Fellow', '<wants_accommodation>': 'Y'}
# Unwrap the do_create_room function to pass arg directly to the function called by create_room command
self.interactive_session.do_add_person.__wrapped__(self.interactive_session, arg)
self.assertTrue(
isinstance(self.interactive_session.andela_dojo['unallocated']['Living_Space']['tm0'],
Person),
msg='add_person command must create Person with unallocated living space if their is no free living space')
def test_staff_is_allocated_office(self):
arg_person = {'<first_name>': 'John', '<last_name>': 'Hopkins', '<Fellow_or_Staff>': 'Staff', '<wants_accommodation>': 'N'}
arg_office = {'<room_type>': 'office', '<room_name>': ['Orange']}
self.interactive_session.do_create_room.__wrapped__(self.interactive_session, arg_office)
self.interactive_session.do_add_person.__wrapped__(self.interactive_session, arg_person)
self.assertTrue(isinstance(
self.interactive_session.andela_dojo['office_spaces'][arg_office['<room_name>'][0].lower()].occupants['Staff']['jh0'], Staff),
msg='add_person command must create Staff and assign them an office.')
def test_fellow_is_allocted_office_and_living_space_when_desired(self):
arg_person = {'<first_name>': 'Larry', '<last_name>': 'King', '<Fellow_or_Staff>': 'Fellow',
'<wants_accommodation>': 'Y'}
arg_office = {'<room_type>': 'office', '<room_name>': ['Orange']}
arg_living = {'<room_type>': 'living', '<room_name>': ['Black']}
self.interactive_session.do_create_room.__wrapped__(self.interactive_session, arg_office)
self.interactive_session.do_create_room.__wrapped__(self.interactive_session, arg_living)
self.interactive_session.do_add_person.__wrapped__(self.interactive_session, arg_person)
self.assertTrue(isinstance(
self.interactive_session.andela_dojo['office_spaces'][arg_office['<room_name>'][0].lower()].occupants['Fellows']['lk0'], Fellow),
msg='add_person command must create Fellow and assign them an office and living room if they wish.')
def test_fellow_is_allocated_office_if_living_space_not_desired(self):
arg_person = {'<first_name>': 'Larry', '<last_name>': 'King', '<Fellow_or_Staff>': 'Fellow',
'<wants_accommodation>': None}
arg_office = {'<room_type>': 'office', '<room_name>': ['Orange']}
self.interactive_session.do_create_room.__wrapped__(self.interactive_session, arg_office)
self.interactive_session.do_add_person.__wrapped__(self.interactive_session, arg_person)
self.assertTrue(isinstance(
self.interactive_session.andela_dojo['office_spaces'][arg_office['<room_name>'][0].lower()].occupants[
'Fellows']['lk0'], Fellow),
msg='add_person command must create Fellow and assign them an office and living room if they wish.')
class PrintRoomTest(unittest.TestCase):
def setUp(self):
self.interactive_session = InteractiveRoomAllocator(Dojo())
self.original_print = sys.stdout
arg_fellow = {'<first_name>': 'Larry', '<last_name>': 'King', '<Fellow_or_Staff>': 'Fellow',
'<wants_accommodation>': 'Y'}
arg_staff = {'<first_name>': 'Jimmy', '<last_name>': 'Kimmel', '<Fellow_or_Staff>': 'staff',
'<wants_accommodation>': None}
arg_office = {'<room_type>': 'office', '<room_name>': ['Orange']}
arg_living = {'<room_type>': 'living', '<room_name>': ['Black']}
self.interactive_session.do_create_room.__wrapped__(self.interactive_session, arg_office)
self.interactive_session.do_create_room.__wrapped__(self.interactive_session, arg_living)
self.interactive_session.do_add_person.__wrapped__(self.interactive_session, arg_fellow)
self.interactive_session.do_add_person.__wrapped__(self.interactive_session, arg_staff)
def test_print_room_prints_room_occupants(self):
sys.stdout = StringIO()
self.test_print = sys.stdout
self.interactive_session.do_print_room.__wrapped__(self.interactive_session, {'<room_name>':'black'})
output = "Fellows in living space: Black\n" \
"----------------------------------------\n" \
"Larry king\n\n\n" \
"Office space with such name does not exist\n\n"
self.assertEqual(self.test_print.getvalue(), output, msg='Print_room not printing correct information')
sys.stdout = self.original_print
def test_print_for_non_existent_room(self):
sys.stdout = StringIO()
self.test_print = sys.stdout
self.interactive_session.do_print_room.__wrapped__(self.interactive_session, {'<room_name>':'blue'})
output = "Living space with such name does not exist\n\nOffice space with such name does not exist\n\n"
self.assertEqual(self.test_print.getvalue(), output, msg="Print_room does not give correct output for non-existent rooms")
sys.stdout = self.original_print
def test_print_for_office_allocations(self):
sys.stdout = StringIO()
self.test_print = sys.stdout
self.interactive_session.do_print_room.__wrapped__(self.interactive_session, {'<room_name>':'orange'})
output = "Living space with such name does not exist\n\n" \
"Staff in office space: Orange\n" \
"----------------------------------------\n" \
"Jimmy kimmel\n\n\n" \
"Fellows in office space: Orange\n" \
"----------------------------------------\n" \
"Larry king\n\n\n"
self.assertEqual(self.test_print.getvalue(), output, msg="Print_room does not give correct output for offices")
sys.stdout = self.original_print
class PrintAllocationsTest(unittest.TestCase):
def setUp(self):
self.interactive_session = InteractiveRoomAllocator(Dojo())
self.original_print = sys.stdout
arg_fellow = {'<first_name>': 'Larry', '<last_name>': 'King', '<Fellow_or_Staff>': 'Fellow',
'<wants_accommodation>': 'Y'}
arg_staff = {'<first_name>': 'Jimmy', '<last_name>': 'Kimmel', '<Fellow_or_Staff>': 'staff',
'<wants_accommodation>': None}
arg_office = {'<room_type>': 'office', '<room_name>': ['Orange']}
arg_living = {'<room_type>': 'living', '<room_name>': ['Black']}
self.interactive_session.do_create_room.__wrapped__(self.interactive_session, arg_office)
self.interactive_session.do_create_room.__wrapped__(self.interactive_session, arg_living)
self.interactive_session.do_add_person.__wrapped__(self.interactive_session, arg_fellow)
self.interactive_session.do_add_person.__wrapped__(self.interactive_session, arg_staff)
def test_printed_allocations(self):
sys.stdout = StringIO()
test_print = sys.stdout
self.interactive_session.do_print_allocations.__wrapped__(self.interactive_session, {"<output>": None})
output = "Fellows in living space: Black\n" \
"----------------------------------------\n" \
"(lk0)Larry king, \n\n\n" \
"Occupants of office space: Orange\n" \
"----------------------------------------\n" \
"(lk0)Larry king, (jk1)Jimmy kimmel, \n\n"
self.assertEqual(test_print.getvalue(), output, msg="Print_allocations gives incorrect output")
sys.stdout = self.original_print
class SaveStateTest(unittest.TestCase):
def setUp(self):
self.interactive_session = InteractiveRoomAllocator(Dojo())
def test_data_saved_by_save_state(self):
self.interactive_session.do_save_state.__wrapped__(self.interactive_session, {'<output>': None})
engine = create_engine('sqlite:///database/interactive_status.db', echo=False)
Session = sessionmaker(bind=engine)
session = Session()
for back in session.query(State).filter(State.state_name == 'default'):
requested_state = pickle.loads(back.state_file)
self.assertTrue(isinstance(requested_state, Dojo), msg='save_state does not save the dojo object ')
class PrintUnallocatedTest(unittest.TestCase):
def setUp(self):
self.interactive_session = InteractiveRoomAllocator(Dojo())
self.original_print = sys.stdout
self.arg_fellow = {'<first_name>': 'Larry', '<last_name>': 'King', '<Fellow_or_Staff>': 'Fellow',
'<wants_accommodation>': 'Y'}
def test_print_unallocated_living_space(self):
self.interactive_session.do_add_person.__wrapped__(self.interactive_session, self.arg_fellow)
sys.stdout = StringIO()
test_print = sys.stdout
self.interactive_session.do_print_unallocated.__wrapped__(self.interactive_session, {'<output>': None})
output = "\nPersons with unallocated living space:\n" \
"----------------------------------------\n" \
"Larry king, \n\n" \
"\nPersons with unallocated office space:\n" \
"----------------------------------------\n" \
"Larry king, \n\n"
self.assertEqual(test_print.getvalue(), output, msg='print_unallocated command is malfunctioning')
sys.stdout = self.original_print
class LoadStateTest(unittest.TestCase):
def setUp(self):
self.interactive_session = InteractiveRoomAllocator(Dojo())
arg_office = {'<room_type>': 'office', '<room_name>': ['Orange']}
self.interactive_session.do_create_room.__wrapped__(self.interactive_session, arg_office)
self.interactive_session.do_save_state.__wrapped__(self.interactive_session, {'<output>': 'test_run'})
def test_load_state(self):
self.assertTrue(len(self.interactive_session.andela_dojo['office_spaces']) == 1,
msg='Object has not been saved before resetting')
self.interactive_session = InteractiveRoomAllocator(Dojo())
self.assertTrue(len(self.interactive_session.andela_dojo['office_spaces']) == 0,
msg='Object has not been reset')
self.interactive_session.do_load_state.__wrapped__(self.interactive_session, {'<output>': 'test_run'})
self.assertTrue(len(self.interactive_session.andela_dojo['office_spaces']) == 1,
msg='Object not reloaded after being reset')
class LoadPeopleTest(unittest.TestCase):
def setUp(self):
self.interactive_session = InteractiveRoomAllocator(Dojo())
def test_load_people(self):
self.interactive_session.do_load_people.__wrapped__(self.interactive_session, {'<load_file>': None})
self.assertTrue(len(self.interactive_session.andela_dojo['unallocated']['Office']) == 7,
msg='load_people failed to load people into offices')
self.assertTrue(len(self.interactive_session.andela_dojo['unallocated']['Living_Space']) == 4,
msg='load_people failed to load people into living spaces')
class ReallocatePersonTest(unittest.TestCase):
def setUp(self):
self.interactive_session = InteractiveRoomAllocator(Dojo())
arg_fellow = {'<first_name>': 'Jimmy', '<last_name>': 'Kimmel', '<Fellow_or_Staff>': 'fellow',
'<wants_accommodation>': 'Y'}
arg_staff = {'<first_name>': 'Larry', '<last_name>': 'kING', '<Fellow_or_Staff>': 'STAFF',
'<wants_accommodation>': None}
arg_office_1 = {'<room_type>': 'office', '<room_name>': ['Brown']}
arg_office_2 = {'<room_type>': 'office', '<room_name>': ['Yellow']}
arg_living_space_1 = {'<room_type>': 'living', '<room_name>': ['White']}
arg_living_space_2 = {'<room_type>': 'living', '<room_name>': ['Red']}
self.interactive_session.do_create_room.__wrapped__(self.interactive_session, arg_office_1)
self.interactive_session.do_create_room.__wrapped__(self.interactive_session, arg_living_space_1)
self.interactive_session.do_add_person.__wrapped__(self.interactive_session, arg_fellow)
self.interactive_session.do_add_person.__wrapped__(self.interactive_session, arg_staff)
self.interactive_session.do_create_room.__wrapped__(self.interactive_session, arg_office_2)
self.interactive_session.do_create_room.__wrapped__(self.interactive_session, arg_living_space_2)
def test_reallocate_person(self):
self.interactive_session.do_reallocate_person.__wrapped__(self.interactive_session,
{'<person_identifier>': 'jk0',
'<new_room_name>': 'yellow'})
self.interactive_session.do_reallocate_person.__wrapped__(self.interactive_session,
{'<person_identifier>': 'lk1',
'<new_room_name>': 'yellow'})
self.assertTrue(len(self.interactive_session.andela_dojo['office_spaces']['brown'].occupants['Fellows']) == 0,
msg='reallocate_person does not remove person from original office.')
self.assertTrue(len(self.interactive_session.andela_dojo['office_spaces']['yellow'].occupants['Fellows']) == 1,
msg='reallocate_person does not move fellow to new office.')
self.assertTrue(len(self.interactive_session.andela_dojo['office_spaces']['yellow'].occupants['Staff']) == 1,
msg='reallocate_person does not move staff to new office.')
self.interactive_session.do_reallocate_person.__wrapped__(self.interactive_session,
{'<person_identifier>': 'jk0',
'<new_room_name>': 'red'})
self.assertTrue(len(self.interactive_session.andela_dojo['living_spaces']['white'].occupants) == 0,
msg='reallocate_person does not remove person from original living space.')
self.assertTrue(len(self.interactive_session.andela_dojo['living_spaces']['red'].occupants) == 1,
msg='reallocate_person does not move person to new living space.')
if __name__ == '__main__':
unittest.main()
|
|
import json
import dcos
import pytest
import shakedown
from tests.command import (
cassandra_api_url,
check_health,
get_cassandra_config,
install,
marathon_api_url,
request,
spin,
uninstall,
unset_ssl_verification,
)
from tests.defaults import DEFAULT_NODE_COUNT, PACKAGE_NAME
def bump_cpu_count_config():
config = get_cassandra_config()
config['env']['CASSANDRA_CPUS'] = str(
float(config['env']['CASSANDRA_CPUS']) + 0.1
)
return request(
dcos.http.put,
marathon_api_url('apps/cassandra'),
json=config
)
counter = 0
def get_and_verify_plan(predicate=lambda r: True):
global counter
def fn():
return dcos.http.get(cassandra_api_url('plan'))
def success_predicate(result):
global counter
message = 'Request to /plan failed'
try:
body = result.json()
except:
return False, message
if counter < 3:
counter += 1
if predicate(body): counter = 0
return predicate(body), message
return spin(fn, success_predicate).json()
def get_node_host():
def fn():
try:
return shakedown.get_service_ips(PACKAGE_NAME)
except IndexError:
return set()
def success_predicate(result):
return len(result) == DEFAULT_NODE_COUNT, 'Nodes failed to return'
return spin(fn, success_predicate).pop()
def get_scheduler_host():
return shakedown.get_service_ips('marathon').pop()
def kill_task_with_pattern(pattern, host=None):
command = (
"sudo kill -9 "
"$(ps ax | grep {} | grep -v grep | tr -s ' ' | sed 's/^ *//g' | "
"cut -d ' ' -f 1)".format(pattern)
)
if host is None:
result = shakedown.run_command_on_master(command)
else:
result = shakedown.run_command_on_agent(host, command)
if not result:
raise RuntimeError(
'Failed to kill task with pattern "{}"'.format(pattern)
)
def run_cleanup():
payload = {'nodes': ['*']}
request(
dcos.http.put,
cassandra_api_url('cleanup/start'),
json=payload,
)
def run_planned_operation(operation, failure=lambda: None):
plan = get_and_verify_plan()
operation()
pred = lambda p: (
plan['phases'][1]['id'] != p['phases'][1]['id'] or
len(plan['phases']) < len(p['phases']) or
p['status'] == 'InProgress'
)
next_plan = get_and_verify_plan(
lambda p: (
plan['phases'][1]['id'] != p['phases'][1]['id'] or
len(plan['phases']) < len(p['phases']) or
p['status'] == 'InProgress'
)
)
failure()
completed_plan = get_and_verify_plan(lambda p: p['status'] == 'Complete')
def run_repair():
payload = {'nodes': ['*']}
request(
dcos.http.put,
cassandra_api_url('repair/start'),
json=payload,
)
def _block_on_adminrouter():
def get_master_ip():
return shakedown.master_ip()
def is_up(ip):
return ip, "Failed to fetch master ip"
# wait for adminrouter to recover
print("Ensuring adminrouter is up...")
ip = spin(get_master_ip, is_up)
print("Adminrouter is up. Master IP: {}".format(ip))
# install once up-front, reuse install for tests (MUCH FASTER):
def setup_module():
unset_ssl_verification()
uninstall()
install()
check_health()
def teardown_module():
uninstall()
@pytest.mark.recovery
def test_kill_task_in_node():
kill_task_with_pattern('CassandraDaemon', get_node_host())
check_health()
@pytest.mark.recovery
def test_kill_all_task_in_node():
for host in shakedown.get_service_ips(PACKAGE_NAME):
kill_task_with_pattern('CassandraDaemon', host)
check_health()
@pytest.mark.recovery
def test_scheduler_died():
kill_task_with_pattern('cassandra.scheduler.Main', get_scheduler_host())
check_health()
@pytest.mark.recovery
def test_executor_killed():
kill_task_with_pattern('cassandra.executor.Main', get_node_host())
check_health()
@pytest.mark.recovery
def test_all_executors_killed():
for host in shakedown.get_service_ips(PACKAGE_NAME):
kill_task_with_pattern('cassandra.executor.Main', host)
check_health()
@pytest.mark.recovery
def test_master_killed():
kill_task_with_pattern('mesos-master')
check_health()
_block_on_adminrouter()
@pytest.mark.recovery
def test_zk_killed():
kill_task_with_pattern('zookeeper')
check_health()
_block_on_adminrouter()
@pytest.mark.recovery
def test_partition():
host = get_node_host()
_block_on_adminrouter()
shakedown.partition_agent(host)
shakedown.reconnect_agent(host)
check_health()
@pytest.mark.recovery
def test_partition_master_both_ways():
shakedown.partition_master()
shakedown.reconnect_master()
check_health()
@pytest.mark.recovery
def test_partition_master_incoming():
shakedown.partition_master(incoming=True, outgoing=False)
shakedown.reconnect_master()
check_health()
@pytest.mark.recovery
def test_partition_master_outgoing():
shakedown.partition_master(incoming=False, outgoing=True)
shakedown.reconnect_master()
check_health()
@pytest.mark.recovery
def test_all_partition():
hosts = shakedown.get_service_ips(PACKAGE_NAME)
for host in hosts:
shakedown.partition_agent(host)
for host in hosts:
shakedown.reconnect_agent(host)
check_health()
@pytest.mark.recovery
def test_config_update_then_kill_task_in_node():
host = get_node_host()
run_planned_operation(
bump_cpu_count_config,
lambda: kill_task_with_pattern('CassandraDaemon', host)
)
check_health()
@pytest.mark.recovery
def test_config_update_then_kill_all_task_in_node():
hosts = shakedown.get_service_ips(PACKAGE_NAME)
run_planned_operation(
bump_cpu_count_config,
lambda: [kill_task_with_pattern('CassandraDaemon', h) for h in hosts]
)
check_health()
@pytest.mark.recovery
def test_config_update_then_scheduler_died():
host = get_scheduler_host()
run_planned_operation(
bump_cpu_count_config,
lambda: kill_task_with_pattern('cassandra.scheduler.Main', host)
)
check_health()
@pytest.mark.recovery
def test_config_update_then_executor_killed():
host = get_node_host()
run_planned_operation(
bump_cpu_count_config,
lambda: kill_task_with_pattern('cassandra.executor.Main', host)
)
check_health()
@pytest.mark.recovery
def test_config_update_then_all_executors_killed():
hosts = shakedown.get_service_ips(PACKAGE_NAME)
run_planned_operation(
bump_cpu_count_config,
lambda: [
kill_task_with_pattern('cassandra.executor.Main', h) for h in hosts
]
)
check_health()
@pytest.mark.recovery
def test_config_update_then_master_killed():
run_planned_operation(
bump_cpu_count_config, lambda: kill_task_with_pattern('mesos-master')
)
check_health()
@pytest.mark.recovery
def test_config_update_then_zk_killed():
run_planned_operation(
bump_cpu_count_config, lambda: kill_task_with_pattern('zookeeper')
)
check_health()
@pytest.mark.recovery
def test_config_update_then_partition():
host = get_node_host()
def partition():
shakedown.partition_agent(host)
shakedown.reconnect_agent(host)
run_planned_operation(bump_cpu_count_config, partition)
check_health()
@pytest.mark.recovery
def test_config_update_then_all_partition():
hosts = shakedown.get_service_ips(PACKAGE_NAME)
def partition():
for host in hosts:
shakedown.partition_agent(host)
for host in hosts:
shakedown.reconnect_agent(host)
run_planned_operation(bump_cpu_count_config, partition)
check_health()
@pytest.mark.recovery
def test_cleanup_then_kill_task_in_node():
host = get_node_host()
run_planned_operation(
run_cleanup,
lambda: kill_task_with_pattern('CassandraDaemon', host)
)
check_health()
@pytest.mark.recovery
def test_cleanup_then_kill_all_task_in_node():
hosts = shakedown.get_service_ips(PACKAGE_NAME)
run_planned_operation(
run_cleanup,
lambda: [kill_task_with_pattern('CassandraDaemon', h) for h in hosts]
)
check_health()
@pytest.mark.recovery
def test_cleanup_then_scheduler_died():
host = get_scheduler_host()
run_planned_operation(
run_cleanup,
lambda: kill_task_with_pattern('cassandra.scheduler.Main', host)
)
check_health()
@pytest.mark.recovery
def test_cleanup_then_executor_killed():
host = get_node_host()
run_planned_operation(
run_cleanup,
lambda: kill_task_with_pattern('cassandra.executor.Main', host)
)
check_health()
@pytest.mark.recovery
def test_cleanup_then_all_executors_killed():
hosts = shakedown.get_service_ips(PACKAGE_NAME)
run_planned_operation(
run_cleanup(),
lambda: [
kill_task_with_pattern('cassandra.executor.Main', h) for h in hosts
]
)
check_health()
@pytest.mark.recovery
def test_cleanup_then_master_killed():
run_planned_operation(
run_cleanup(), lambda: kill_task_with_pattern('mesos-master')
)
check_health()
@pytest.mark.recovery
def test_cleanup_then_zk_killed():
run_planned_operation(
run_cleanup(), lambda: kill_task_with_pattern('zookeeper')
)
check_health()
@pytest.mark.recovery
def test_cleanup_then_partition():
host = get_node_host()
def partition():
shakedown.partition_agent(host)
shakedown.reconnect_agent(host)
run_planned_operation(run_cleanup, partition)
check_health()
@pytest.mark.recovery
def test_cleanup_then_all_partition():
hosts = shakedown.get_service_ips(PACKAGE_NAME)
def partition():
for host in hosts:
shakedown.partition_agent(host)
for host in hosts:
shakedown.reconnect_agent(host)
run_planned_operation(run_cleanup, partition)
check_health()
@pytest.mark.recovery
def test_repair_then_kill_task_in_node():
host = get_node_host()
run_planned_operation(
run_repair,
lambda: kill_task_with_pattern('CassandraDaemon', host)
)
check_health()
@pytest.mark.recovery
def test_repair_then_kill_all_task_in_node():
hosts = shakedown.get_service_ips(PACKAGE_NAME)
run_planned_operation(
run_repair,
lambda: [kill_task_with_pattern('CassandraDaemon', h) for h in hosts]
)
check_health()
@pytest.mark.recovery
def test_repair_then_scheduler_died():
host = get_scheduler_host()
run_planned_operation(
run_repair,
lambda: kill_task_with_pattern('cassandra.scheduler.Main', host)
)
check_health()
@pytest.mark.recovery
def test_repair_then_executor_killed():
host = get_node_host()
run_planned_operation(
run_repair,
lambda: kill_task_with_pattern('cassandra.executor.Main', host)
)
check_health()
@pytest.mark.recovery
def test_repair_then_all_executors_killed():
hosts = shakedown.get_service_ips(PACKAGE_NAME)
run_planned_operation(
run_repair,
lambda: [
kill_task_with_pattern('cassandra.executor.Main', h) for h in hosts
]
)
check_health()
@pytest.mark.recovery
def test_repair_then_master_killed():
run_planned_operation(
run_repair,
lambda: kill_task_with_pattern('mesos-master')
)
check_health()
@pytest.mark.recovery
def test_repair_then_zk_killed():
run_planned_operation(
run_repair,
lambda: kill_task_with_pattern('zookeeper')
)
check_health()
@pytest.mark.recovery
def test_repair_then_partition():
host = get_node_host()
def partition():
shakedown.partition_agent(host)
shakedown.reconnect_agent(host)
run_planned_operation(run_repair, partition)
check_health()
@pytest.mark.recovery
def test_repair_then_all_partition():
hosts = shakedown.get_service_ips(PACKAGE_NAME)
def partition():
for host in hosts:
shakedown.partition_agent(host)
for host in hosts:
shakedown.reconnect_agent(host)
run_planned_operation(run_repair, partition)
check_health()
|
|
""" Defines a KernelClient that provides thread-safe sockets with async callbacks on message replies.
"""
import atexit
import errno
import sys
from threading import Thread, Event
import time
# import ZMQError in top-level namespace, to avoid ugly attribute-error messages
# during garbage collection of threads at exit:
from zmq import ZMQError
from zmq.eventloop import ioloop, zmqstream
# Local imports
from traitlets import Type, Instance
from jupyter_client.channels import HBChannel
from jupyter_client import KernelClient
class ThreadedZMQSocketChannel(object):
"""A ZMQ socket invoking a callback in the ioloop"""
session = None
socket = None
ioloop = None
stream = None
_inspect = None
def __init__(self, socket, session, loop):
"""Create a channel.
Parameters
----------
socket : :class:`zmq.Socket`
The ZMQ socket to use.
session : :class:`session.Session`
The session to use.
loop
A pyzmq ioloop to connect the socket to using a ZMQStream
"""
super(ThreadedZMQSocketChannel, self).__init__()
self.socket = socket
self.session = session
self.ioloop = loop
evt = Event()
def setup_stream():
self.stream = zmqstream.ZMQStream(self.socket, self.ioloop)
self.stream.on_recv(self._handle_recv)
evt.set()
self.ioloop.add_callback(setup_stream)
evt.wait()
_is_alive = False
def is_alive(self):
return self._is_alive
def start(self):
self._is_alive = True
def stop(self):
self._is_alive = False
def close(self):
if self.socket is not None:
try:
self.socket.close(linger=0)
except Exception:
pass
self.socket = None
def send(self, msg):
"""Queue a message to be sent from the IOLoop's thread.
Parameters
----------
msg : message to send
This is threadsafe, as it uses IOLoop.add_callback to give the loop's
thread control of the action.
"""
def thread_send():
self.session.send(self.stream, msg)
self.ioloop.add_callback(thread_send)
def _handle_recv(self, msg):
"""Callback for stream.on_recv.
Unpacks message, and calls handlers with it.
"""
ident,smsg = self.session.feed_identities(msg)
msg = self.session.deserialize(smsg)
# let client inspect messages
if self._inspect:
self._inspect(msg)
self.call_handlers(msg)
def call_handlers(self, msg):
"""This method is called in the ioloop thread when a message arrives.
Subclasses should override this method to handle incoming messages.
It is important to remember that this method is called in the thread
so that some logic must be done to ensure that the application level
handlers are called in the application thread.
"""
pass
def process_events(self):
"""Subclasses should override this with a method
processing any pending GUI events.
"""
pass
def flush(self, timeout=1.0):
"""Immediately processes all pending messages on this channel.
This is only used for the IOPub channel.
Callers should use this method to ensure that :meth:`call_handlers`
has been called for all messages that have been received on the
0MQ SUB socket of this channel.
This method is thread safe.
Parameters
----------
timeout : float, optional
The maximum amount of time to spend flushing, in seconds. The
default is one second.
"""
# We do the IOLoop callback process twice to ensure that the IOLoop
# gets to perform at least one full poll.
stop_time = time.time() + timeout
for i in range(2):
self._flushed = False
self.ioloop.add_callback(self._flush)
while not self._flushed and time.time() < stop_time:
time.sleep(0.01)
def _flush(self):
"""Callback for :method:`self.flush`."""
self.stream.flush()
self._flushed = True
class IOLoopThread(Thread):
"""Run a pyzmq ioloop in a thread to send and receive messages
"""
_exiting = False
ioloop = None
def __init__(self):
super(IOLoopThread, self).__init__()
self.daemon = True
@staticmethod
@atexit.register
def _notice_exit():
# Class definitions can be torn down during interpreter shutdown.
# We only need to set _exiting flag if this hasn't happened.
if IOLoopThread is not None:
IOLoopThread._exiting = True
def start(self):
"""Start the IOLoop thread
Don't return until self.ioloop is defined,
which is created in the thread
"""
self._start_event = Event()
Thread.start(self)
self._start_event.wait()
def run(self):
"""Run my loop, ignoring EINTR events in the poller"""
if 'asyncio' in sys.modules:
# tornado may be using asyncio,
# ensure an eventloop exists for this thread
import asyncio
asyncio.set_event_loop(asyncio.new_event_loop())
self.ioloop = ioloop.IOLoop()
# signal that self.ioloop is defined
self._start_event.set()
while True:
try:
self.ioloop.start()
except ZMQError as e:
if e.errno == errno.EINTR:
continue
else:
raise
except Exception:
if self._exiting:
break
else:
raise
else:
break
def stop(self):
"""Stop the channel's event loop and join its thread.
This calls :meth:`~threading.Thread.join` and returns when the thread
terminates. :class:`RuntimeError` will be raised if
:meth:`~threading.Thread.start` is called again.
"""
if self.ioloop is not None:
self.ioloop.add_callback(self.ioloop.stop)
self.join()
self.close()
self.ioloop = None
def close(self):
if self.ioloop is not None:
try:
self.ioloop.close(all_fds=True)
except Exception:
pass
class ThreadedKernelClient(KernelClient):
""" A KernelClient that provides thread-safe sockets with async callbacks on message replies.
"""
@property
def ioloop(self):
return self.ioloop_thread.ioloop
ioloop_thread = Instance(IOLoopThread, allow_none=True)
def start_channels(self, shell=True, iopub=True, stdin=True, hb=True, control=True):
self.ioloop_thread = IOLoopThread()
self.ioloop_thread.start()
if shell:
self.shell_channel._inspect = self._check_kernel_info_reply
super(ThreadedKernelClient, self).start_channels(shell, iopub, stdin, hb, control)
def _check_kernel_info_reply(self, msg):
"""This is run in the ioloop thread when the kernel info reply is received
"""
if msg['msg_type'] == 'kernel_info_reply':
self._handle_kernel_info_reply(msg)
self.shell_channel._inspect = None
def stop_channels(self):
super(ThreadedKernelClient, self).stop_channels()
if self.ioloop_thread.is_alive():
self.ioloop_thread.stop()
iopub_channel_class = Type(ThreadedZMQSocketChannel)
shell_channel_class = Type(ThreadedZMQSocketChannel)
stdin_channel_class = Type(ThreadedZMQSocketChannel)
hb_channel_class = Type(HBChannel)
control_channel_class = Type(ThreadedZMQSocketChannel)
|
|
"""Unit tests for contextlib.py, and other context managers."""
import io
import sys
import tempfile
import unittest
from contextlib import * # Tests __all__
from test import support
try:
import threading
except ImportError:
threading = None
class ContextManagerTestCase(unittest.TestCase):
def test_contextmanager_plain(self):
state = []
@contextmanager
def woohoo():
state.append(1)
yield 42
state.append(999)
with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
self.assertEqual(state, [1, 42, 999])
def test_contextmanager_finally(self):
state = []
@contextmanager
def woohoo():
state.append(1)
try:
yield 42
finally:
state.append(999)
with self.assertRaises(ZeroDivisionError):
with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
raise ZeroDivisionError()
self.assertEqual(state, [1, 42, 999])
def test_contextmanager_no_reraise(self):
@contextmanager
def whee():
yield
ctx = whee()
ctx.__enter__()
# Calling __exit__ should not result in an exception
self.assertFalse(ctx.__exit__(TypeError, TypeError("foo"), None))
def test_contextmanager_trap_yield_after_throw(self):
@contextmanager
def whoo():
try:
yield
except:
yield
ctx = whoo()
ctx.__enter__()
self.assertRaises(
RuntimeError, ctx.__exit__, TypeError, TypeError("foo"), None
)
def test_contextmanager_except(self):
state = []
@contextmanager
def woohoo():
state.append(1)
try:
yield 42
except ZeroDivisionError as e:
state.append(e.args[0])
self.assertEqual(state, [1, 42, 999])
with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
raise ZeroDivisionError(999)
self.assertEqual(state, [1, 42, 999])
def test_contextmanager_except_stopiter(self):
stop_exc = StopIteration('spam')
@contextmanager
def woohoo():
yield
try:
with self.assertWarnsRegex(PendingDeprecationWarning,
"StopIteration"):
with woohoo():
raise stop_exc
except Exception as ex:
self.assertIs(ex, stop_exc)
else:
self.fail('StopIteration was suppressed')
def test_contextmanager_except_pep479(self):
code = """\
from __future__ import generator_stop
from contextlib import contextmanager
@contextmanager
def woohoo():
yield
"""
locals = {}
exec(code, locals, locals)
woohoo = locals['woohoo']
stop_exc = StopIteration('spam')
try:
with woohoo():
raise stop_exc
except Exception as ex:
self.assertIs(ex, stop_exc)
else:
self.fail('StopIteration was suppressed')
def test_contextmanager_do_not_unchain_non_stopiteration_exceptions(self):
@contextmanager
def test_issue29692():
try:
yield
except Exception as exc:
raise RuntimeError('issue29692:Chained') from exc
try:
with test_issue29692():
raise ZeroDivisionError
except Exception as ex:
self.assertIs(type(ex), RuntimeError)
self.assertEqual(ex.args[0], 'issue29692:Chained')
self.assertIsInstance(ex.__cause__, ZeroDivisionError)
try:
with test_issue29692():
raise StopIteration('issue29692:Unchained')
except Exception as ex:
self.assertIs(type(ex), StopIteration)
self.assertEqual(ex.args[0], 'issue29692:Unchained')
self.assertIsNone(ex.__cause__)
def _create_contextmanager_attribs(self):
def attribs(**kw):
def decorate(func):
for k,v in kw.items():
setattr(func,k,v)
return func
return decorate
@contextmanager
@attribs(foo='bar')
def baz(spam):
"""Whee!"""
return baz
def test_contextmanager_attribs(self):
baz = self._create_contextmanager_attribs()
self.assertEqual(baz.__name__,'baz')
self.assertEqual(baz.foo, 'bar')
@support.requires_docstrings
def test_contextmanager_doc_attrib(self):
baz = self._create_contextmanager_attribs()
self.assertEqual(baz.__doc__, "Whee!")
@support.requires_docstrings
def test_instance_docstring_given_cm_docstring(self):
baz = self._create_contextmanager_attribs()(None)
self.assertEqual(baz.__doc__, "Whee!")
def test_keywords(self):
# Ensure no keyword arguments are inhibited
@contextmanager
def woohoo(self, func, args, kwds):
yield (self, func, args, kwds)
with woohoo(self=11, func=22, args=33, kwds=44) as target:
self.assertEqual(target, (11, 22, 33, 44))
class ClosingTestCase(unittest.TestCase):
@support.requires_docstrings
def test_instance_docs(self):
# Issue 19330: ensure context manager instances have good docstrings
cm_docstring = closing.__doc__
obj = closing(None)
self.assertEqual(obj.__doc__, cm_docstring)
def test_closing(self):
state = []
class C:
def close(self):
state.append(1)
x = C()
self.assertEqual(state, [])
with closing(x) as y:
self.assertEqual(x, y)
self.assertEqual(state, [1])
def test_closing_error(self):
state = []
class C:
def close(self):
state.append(1)
x = C()
self.assertEqual(state, [])
with self.assertRaises(ZeroDivisionError):
with closing(x) as y:
self.assertEqual(x, y)
1 / 0
self.assertEqual(state, [1])
class FileContextTestCase(unittest.TestCase):
def testWithOpen(self):
tfn = tempfile.mktemp()
try:
f = None
with open(tfn, "w") as f:
self.assertFalse(f.closed)
f.write("Booh\n")
self.assertTrue(f.closed)
f = None
with self.assertRaises(ZeroDivisionError):
with open(tfn, "r") as f:
self.assertFalse(f.closed)
self.assertEqual(f.read(), "Booh\n")
1 / 0
self.assertTrue(f.closed)
finally:
support.unlink(tfn)
@unittest.skipUnless(threading, 'Threading required for this test.')
class LockContextTestCase(unittest.TestCase):
def boilerPlate(self, lock, locked):
self.assertFalse(locked())
with lock:
self.assertTrue(locked())
self.assertFalse(locked())
with self.assertRaises(ZeroDivisionError):
with lock:
self.assertTrue(locked())
1 / 0
self.assertFalse(locked())
def testWithLock(self):
lock = threading.Lock()
self.boilerPlate(lock, lock.locked)
def testWithRLock(self):
lock = threading.RLock()
self.boilerPlate(lock, lock._is_owned)
def testWithCondition(self):
lock = threading.Condition()
def locked():
return lock._is_owned()
self.boilerPlate(lock, locked)
def testWithSemaphore(self):
lock = threading.Semaphore()
def locked():
if lock.acquire(False):
lock.release()
return False
else:
return True
self.boilerPlate(lock, locked)
def testWithBoundedSemaphore(self):
lock = threading.BoundedSemaphore()
def locked():
if lock.acquire(False):
lock.release()
return False
else:
return True
self.boilerPlate(lock, locked)
class mycontext(ContextDecorator):
"""Example decoration-compatible context manager for testing"""
started = False
exc = None
catch = False
def __enter__(self):
self.started = True
return self
def __exit__(self, *exc):
self.exc = exc
return self.catch
class TestContextDecorator(unittest.TestCase):
@support.requires_docstrings
def test_instance_docs(self):
# Issue 19330: ensure context manager instances have good docstrings
cm_docstring = mycontext.__doc__
obj = mycontext()
self.assertEqual(obj.__doc__, cm_docstring)
def test_contextdecorator(self):
context = mycontext()
with context as result:
self.assertIs(result, context)
self.assertTrue(context.started)
self.assertEqual(context.exc, (None, None, None))
def test_contextdecorator_with_exception(self):
context = mycontext()
with self.assertRaisesRegex(NameError, 'foo'):
with context:
raise NameError('foo')
self.assertIsNotNone(context.exc)
self.assertIs(context.exc[0], NameError)
context = mycontext()
context.catch = True
with context:
raise NameError('foo')
self.assertIsNotNone(context.exc)
self.assertIs(context.exc[0], NameError)
def test_decorator(self):
context = mycontext()
@context
def test():
self.assertIsNone(context.exc)
self.assertTrue(context.started)
test()
self.assertEqual(context.exc, (None, None, None))
def test_decorator_with_exception(self):
context = mycontext()
@context
def test():
self.assertIsNone(context.exc)
self.assertTrue(context.started)
raise NameError('foo')
with self.assertRaisesRegex(NameError, 'foo'):
test()
self.assertIsNotNone(context.exc)
self.assertIs(context.exc[0], NameError)
def test_decorating_method(self):
context = mycontext()
class Test(object):
@context
def method(self, a, b, c=None):
self.a = a
self.b = b
self.c = c
# these tests are for argument passing when used as a decorator
test = Test()
test.method(1, 2)
self.assertEqual(test.a, 1)
self.assertEqual(test.b, 2)
self.assertEqual(test.c, None)
test = Test()
test.method('a', 'b', 'c')
self.assertEqual(test.a, 'a')
self.assertEqual(test.b, 'b')
self.assertEqual(test.c, 'c')
test = Test()
test.method(a=1, b=2)
self.assertEqual(test.a, 1)
self.assertEqual(test.b, 2)
def test_typo_enter(self):
class mycontext(ContextDecorator):
def __unter__(self):
pass
def __exit__(self, *exc):
pass
with self.assertRaises(AttributeError):
with mycontext():
pass
def test_typo_exit(self):
class mycontext(ContextDecorator):
def __enter__(self):
pass
def __uxit__(self, *exc):
pass
with self.assertRaises(AttributeError):
with mycontext():
pass
def test_contextdecorator_as_mixin(self):
class somecontext(object):
started = False
exc = None
def __enter__(self):
self.started = True
return self
def __exit__(self, *exc):
self.exc = exc
class mycontext(somecontext, ContextDecorator):
pass
context = mycontext()
@context
def test():
self.assertIsNone(context.exc)
self.assertTrue(context.started)
test()
self.assertEqual(context.exc, (None, None, None))
def test_contextmanager_as_decorator(self):
@contextmanager
def woohoo(y):
state.append(y)
yield
state.append(999)
state = []
@woohoo(1)
def test(x):
self.assertEqual(state, [1])
state.append(x)
test('something')
self.assertEqual(state, [1, 'something', 999])
# Issue #11647: Ensure the decorated function is 'reusable'
state = []
test('something else')
self.assertEqual(state, [1, 'something else', 999])
class TestExitStack(unittest.TestCase):
@support.requires_docstrings
def test_instance_docs(self):
# Issue 19330: ensure context manager instances have good docstrings
cm_docstring = ExitStack.__doc__
obj = ExitStack()
self.assertEqual(obj.__doc__, cm_docstring)
def test_no_resources(self):
with ExitStack():
pass
def test_callback(self):
expected = [
((), {}),
((1,), {}),
((1,2), {}),
((), dict(example=1)),
((1,), dict(example=1)),
((1,2), dict(example=1)),
]
result = []
def _exit(*args, **kwds):
"""Test metadata propagation"""
result.append((args, kwds))
with ExitStack() as stack:
for args, kwds in reversed(expected):
if args and kwds:
f = stack.callback(_exit, *args, **kwds)
elif args:
f = stack.callback(_exit, *args)
elif kwds:
f = stack.callback(_exit, **kwds)
else:
f = stack.callback(_exit)
self.assertIs(f, _exit)
for wrapper in stack._exit_callbacks:
self.assertIs(wrapper.__wrapped__, _exit)
self.assertNotEqual(wrapper.__name__, _exit.__name__)
self.assertIsNone(wrapper.__doc__, _exit.__doc__)
self.assertEqual(result, expected)
def test_push(self):
exc_raised = ZeroDivisionError
def _expect_exc(exc_type, exc, exc_tb):
self.assertIs(exc_type, exc_raised)
def _suppress_exc(*exc_details):
return True
def _expect_ok(exc_type, exc, exc_tb):
self.assertIsNone(exc_type)
self.assertIsNone(exc)
self.assertIsNone(exc_tb)
class ExitCM(object):
def __init__(self, check_exc):
self.check_exc = check_exc
def __enter__(self):
self.fail("Should not be called!")
def __exit__(self, *exc_details):
self.check_exc(*exc_details)
with ExitStack() as stack:
stack.push(_expect_ok)
self.assertIs(stack._exit_callbacks[-1], _expect_ok)
cm = ExitCM(_expect_ok)
stack.push(cm)
self.assertIs(stack._exit_callbacks[-1].__self__, cm)
stack.push(_suppress_exc)
self.assertIs(stack._exit_callbacks[-1], _suppress_exc)
cm = ExitCM(_expect_exc)
stack.push(cm)
self.assertIs(stack._exit_callbacks[-1].__self__, cm)
stack.push(_expect_exc)
self.assertIs(stack._exit_callbacks[-1], _expect_exc)
stack.push(_expect_exc)
self.assertIs(stack._exit_callbacks[-1], _expect_exc)
1/0
def test_enter_context(self):
class TestCM(object):
def __enter__(self):
result.append(1)
def __exit__(self, *exc_details):
result.append(3)
result = []
cm = TestCM()
with ExitStack() as stack:
@stack.callback # Registered first => cleaned up last
def _exit():
result.append(4)
self.assertIsNotNone(_exit)
stack.enter_context(cm)
self.assertIs(stack._exit_callbacks[-1].__self__, cm)
result.append(2)
self.assertEqual(result, [1, 2, 3, 4])
def test_close(self):
result = []
with ExitStack() as stack:
@stack.callback
def _exit():
result.append(1)
self.assertIsNotNone(_exit)
stack.close()
result.append(2)
self.assertEqual(result, [1, 2])
def test_pop_all(self):
result = []
with ExitStack() as stack:
@stack.callback
def _exit():
result.append(3)
self.assertIsNotNone(_exit)
new_stack = stack.pop_all()
result.append(1)
result.append(2)
new_stack.close()
self.assertEqual(result, [1, 2, 3])
def test_exit_raise(self):
with self.assertRaises(ZeroDivisionError):
with ExitStack() as stack:
stack.push(lambda *exc: False)
1/0
def test_exit_suppress(self):
with ExitStack() as stack:
stack.push(lambda *exc: True)
1/0
def test_exit_exception_chaining_reference(self):
# Sanity check to make sure that ExitStack chaining matches
# actual nested with statements
class RaiseExc:
def __init__(self, exc):
self.exc = exc
def __enter__(self):
return self
def __exit__(self, *exc_details):
raise self.exc
class RaiseExcWithContext:
def __init__(self, outer, inner):
self.outer = outer
self.inner = inner
def __enter__(self):
return self
def __exit__(self, *exc_details):
try:
raise self.inner
except:
raise self.outer
class SuppressExc:
def __enter__(self):
return self
def __exit__(self, *exc_details):
type(self).saved_details = exc_details
return True
try:
with RaiseExc(IndexError):
with RaiseExcWithContext(KeyError, AttributeError):
with SuppressExc():
with RaiseExc(ValueError):
1 / 0
except IndexError as exc:
self.assertIsInstance(exc.__context__, KeyError)
self.assertIsInstance(exc.__context__.__context__, AttributeError)
# Inner exceptions were suppressed
self.assertIsNone(exc.__context__.__context__.__context__)
else:
self.fail("Expected IndexError, but no exception was raised")
# Check the inner exceptions
inner_exc = SuppressExc.saved_details[1]
self.assertIsInstance(inner_exc, ValueError)
self.assertIsInstance(inner_exc.__context__, ZeroDivisionError)
def test_exit_exception_chaining(self):
# Ensure exception chaining matches the reference behaviour
def raise_exc(exc):
raise exc
saved_details = None
def suppress_exc(*exc_details):
nonlocal saved_details
saved_details = exc_details
return True
try:
with ExitStack() as stack:
stack.callback(raise_exc, IndexError)
stack.callback(raise_exc, KeyError)
stack.callback(raise_exc, AttributeError)
stack.push(suppress_exc)
stack.callback(raise_exc, ValueError)
1 / 0
except IndexError as exc:
self.assertIsInstance(exc.__context__, KeyError)
self.assertIsInstance(exc.__context__.__context__, AttributeError)
# Inner exceptions were suppressed
self.assertIsNone(exc.__context__.__context__.__context__)
else:
self.fail("Expected IndexError, but no exception was raised")
# Check the inner exceptions
inner_exc = saved_details[1]
self.assertIsInstance(inner_exc, ValueError)
self.assertIsInstance(inner_exc.__context__, ZeroDivisionError)
def test_exit_exception_non_suppressing(self):
# http://bugs.python.org/issue19092
def raise_exc(exc):
raise exc
def suppress_exc(*exc_details):
return True
try:
with ExitStack() as stack:
stack.callback(lambda: None)
stack.callback(raise_exc, IndexError)
except Exception as exc:
self.assertIsInstance(exc, IndexError)
else:
self.fail("Expected IndexError, but no exception was raised")
try:
with ExitStack() as stack:
stack.callback(raise_exc, KeyError)
stack.push(suppress_exc)
stack.callback(raise_exc, IndexError)
except Exception as exc:
self.assertIsInstance(exc, KeyError)
else:
self.fail("Expected KeyError, but no exception was raised")
def test_exit_exception_with_correct_context(self):
# http://bugs.python.org/issue20317
@contextmanager
def gets_the_context_right(exc):
try:
yield
finally:
raise exc
exc1 = Exception(1)
exc2 = Exception(2)
exc3 = Exception(3)
exc4 = Exception(4)
# The contextmanager already fixes the context, so prior to the
# fix, ExitStack would try to fix it *again* and get into an
# infinite self-referential loop
try:
with ExitStack() as stack:
stack.enter_context(gets_the_context_right(exc4))
stack.enter_context(gets_the_context_right(exc3))
stack.enter_context(gets_the_context_right(exc2))
raise exc1
except Exception as exc:
self.assertIs(exc, exc4)
self.assertIs(exc.__context__, exc3)
self.assertIs(exc.__context__.__context__, exc2)
self.assertIs(exc.__context__.__context__.__context__, exc1)
self.assertIsNone(
exc.__context__.__context__.__context__.__context__)
def test_exit_exception_with_existing_context(self):
# Addresses a lack of test coverage discovered after checking in a
# fix for issue 20317 that still contained debugging code.
def raise_nested(inner_exc, outer_exc):
try:
raise inner_exc
finally:
raise outer_exc
exc1 = Exception(1)
exc2 = Exception(2)
exc3 = Exception(3)
exc4 = Exception(4)
exc5 = Exception(5)
try:
with ExitStack() as stack:
stack.callback(raise_nested, exc4, exc5)
stack.callback(raise_nested, exc2, exc3)
raise exc1
except Exception as exc:
self.assertIs(exc, exc5)
self.assertIs(exc.__context__, exc4)
self.assertIs(exc.__context__.__context__, exc3)
self.assertIs(exc.__context__.__context__.__context__, exc2)
self.assertIs(
exc.__context__.__context__.__context__.__context__, exc1)
self.assertIsNone(
exc.__context__.__context__.__context__.__context__.__context__)
def test_body_exception_suppress(self):
def suppress_exc(*exc_details):
return True
try:
with ExitStack() as stack:
stack.push(suppress_exc)
1/0
except IndexError as exc:
self.fail("Expected no exception, got IndexError")
def test_exit_exception_chaining_suppress(self):
with ExitStack() as stack:
stack.push(lambda *exc: True)
stack.push(lambda *exc: 1/0)
stack.push(lambda *exc: {}[1])
def test_excessive_nesting(self):
# The original implementation would die with RecursionError here
with ExitStack() as stack:
for i in range(10000):
stack.callback(int)
def test_instance_bypass(self):
class Example(object): pass
cm = Example()
cm.__exit__ = object()
stack = ExitStack()
self.assertRaises(AttributeError, stack.enter_context, cm)
stack.push(cm)
self.assertIs(stack._exit_callbacks[-1], cm)
def test_dont_reraise_RuntimeError(self):
# https://bugs.python.org/issue27122
class UniqueException(Exception): pass
class UniqueRuntimeError(RuntimeError): pass
@contextmanager
def second():
try:
yield 1
except Exception as exc:
raise UniqueException("new exception") from exc
@contextmanager
def first():
try:
yield 1
except Exception as exc:
raise exc
# The UniqueRuntimeError should be caught by second()'s exception
# handler which chain raised a new UniqueException.
with self.assertRaises(UniqueException) as err_ctx:
with ExitStack() as es_ctx:
es_ctx.enter_context(second())
es_ctx.enter_context(first())
raise UniqueRuntimeError("please no infinite loop.")
exc = err_ctx.exception
self.assertIsInstance(exc, UniqueException)
self.assertIsInstance(exc.__context__, UniqueRuntimeError)
self.assertIsNone(exc.__context__.__context__)
self.assertIsNone(exc.__context__.__cause__)
self.assertIs(exc.__cause__, exc.__context__)
class TestRedirectStream:
redirect_stream = None
orig_stream = None
@support.requires_docstrings
def test_instance_docs(self):
# Issue 19330: ensure context manager instances have good docstrings
cm_docstring = self.redirect_stream.__doc__
obj = self.redirect_stream(None)
self.assertEqual(obj.__doc__, cm_docstring)
def test_no_redirect_in_init(self):
orig_stdout = getattr(sys, self.orig_stream)
self.redirect_stream(None)
self.assertIs(getattr(sys, self.orig_stream), orig_stdout)
def test_redirect_to_string_io(self):
f = io.StringIO()
msg = "Consider an API like help(), which prints directly to stdout"
orig_stdout = getattr(sys, self.orig_stream)
with self.redirect_stream(f):
print(msg, file=getattr(sys, self.orig_stream))
self.assertIs(getattr(sys, self.orig_stream), orig_stdout)
s = f.getvalue().strip()
self.assertEqual(s, msg)
def test_enter_result_is_target(self):
f = io.StringIO()
with self.redirect_stream(f) as enter_result:
self.assertIs(enter_result, f)
def test_cm_is_reusable(self):
f = io.StringIO()
write_to_f = self.redirect_stream(f)
orig_stdout = getattr(sys, self.orig_stream)
with write_to_f:
print("Hello", end=" ", file=getattr(sys, self.orig_stream))
with write_to_f:
print("World!", file=getattr(sys, self.orig_stream))
self.assertIs(getattr(sys, self.orig_stream), orig_stdout)
s = f.getvalue()
self.assertEqual(s, "Hello World!\n")
def test_cm_is_reentrant(self):
f = io.StringIO()
write_to_f = self.redirect_stream(f)
orig_stdout = getattr(sys, self.orig_stream)
with write_to_f:
print("Hello", end=" ", file=getattr(sys, self.orig_stream))
with write_to_f:
print("World!", file=getattr(sys, self.orig_stream))
self.assertIs(getattr(sys, self.orig_stream), orig_stdout)
s = f.getvalue()
self.assertEqual(s, "Hello World!\n")
class TestRedirectStdout(TestRedirectStream, unittest.TestCase):
redirect_stream = redirect_stdout
orig_stream = "stdout"
class TestRedirectStderr(TestRedirectStream, unittest.TestCase):
redirect_stream = redirect_stderr
orig_stream = "stderr"
class TestSuppress(unittest.TestCase):
@support.requires_docstrings
def test_instance_docs(self):
# Issue 19330: ensure context manager instances have good docstrings
cm_docstring = suppress.__doc__
obj = suppress()
self.assertEqual(obj.__doc__, cm_docstring)
def test_no_result_from_enter(self):
with suppress(ValueError) as enter_result:
self.assertIsNone(enter_result)
def test_no_exception(self):
with suppress(ValueError):
self.assertEqual(pow(2, 5), 32)
def test_exact_exception(self):
with suppress(TypeError):
len(5)
def test_exception_hierarchy(self):
with suppress(LookupError):
'Hello'[50]
def test_other_exception(self):
with self.assertRaises(ZeroDivisionError):
with suppress(TypeError):
1/0
def test_no_args(self):
with self.assertRaises(ZeroDivisionError):
with suppress():
1/0
def test_multiple_exception_args(self):
with suppress(ZeroDivisionError, TypeError):
1/0
with suppress(ZeroDivisionError, TypeError):
len(5)
def test_cm_is_reentrant(self):
ignore_exceptions = suppress(Exception)
with ignore_exceptions:
pass
with ignore_exceptions:
len(5)
with ignore_exceptions:
with ignore_exceptions: # Check nested usage
len(5)
outer_continued = True
1/0
self.assertTrue(outer_continued)
if __name__ == "__main__":
unittest.main()
|
|
r"""OS routines for Mac, DOS, NT, or Posix depending on what system we're on.
This exports:
- all functions from posix, nt, os2, mac, or ce, e.g. unlink, stat, etc.
- os.path is one of the modules posixpath, ntpath, or macpath
- os.name is 'posix', 'nt', 'os2', 'mac', 'ce' or 'riscos'
- os.curdir is a string representing the current directory ('.' or ':')
- os.pardir is a string representing the parent directory ('..' or '::')
- os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
- os.extsep is the extension separator ('.' or '/')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
#'
import sys
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
__all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep",
"defpath", "name", "path", "devnull", "extsep"]
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
if 'posix' in _names:
name = 'posix'
linesep = '\n'
from posix import *
try:
from posix import _exit
except ImportError:
pass
import posixpath as path
import posix
__all__.extend(_get_exports_list(posix))
del posix
elif 'nt' in _names:
name = 'nt'
linesep = '\r\n'
from nt import *
try:
from nt import _exit
except ImportError:
pass
import ntpath as path
import nt
__all__.extend(_get_exports_list(nt))
del nt
elif 'os2' in _names:
name = 'os2'
linesep = '\r\n'
from os2 import *
try:
from os2 import _exit
except ImportError:
pass
if sys.version.find('EMX GCC') == -1:
import ntpath as path
else:
import os2emxpath as path
from _emx_link import link
import os2
__all__.extend(_get_exports_list(os2))
del os2
elif 'mac' in _names:
name = 'mac'
linesep = '\r'
from mac import *
try:
from mac import _exit
except ImportError:
pass
import macpath as path
import mac
__all__.extend(_get_exports_list(mac))
del mac
elif 'ce' in _names:
name = 'ce'
linesep = '\r\n'
from ce import *
try:
from ce import _exit
except ImportError:
pass
# We can use the standard Windows path.
import ntpath as path
import ce
__all__.extend(_get_exports_list(ce))
del ce
elif 'riscos' in _names:
name = 'riscos'
linesep = '\n'
from riscos import *
try:
from riscos import _exit
except ImportError:
pass
import riscospath as path
import riscos
__all__.extend(_get_exports_list(riscos))
del riscos
elif 'efi' in _names:
name = 'efi'
linesep = '\r\n'
from efi import *
try:
from efi import _exit
except ImportError:
pass
import efipath
__all__.extend(_get_exports_list(efipath))
path = efipath
del efipath
else:
raise ImportError, 'no os specific module found'
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
del _names
#'
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0777):
"""makedirs(path [, mode=0777])
Super-mkdir; create a leaf directory and all intermediate ones.
Works like mkdir, except that any intermediate path segment (not
just the rightmost) will be created if it does not exist. This is
recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
makedirs(head, mode)
if tail == curdir: # xxx/newdir/. exists if xxx/newdir exists
return
mkdir(name, mode)
def removedirs(name):
"""removedirs(path)
Super-rmdir; remove a leaf directory and empty all intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except error:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned way until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except error:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
def walk(top, topdown=True, onerror=None):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune
the search, or to impose a specific order of visiting. Modifying
dirnames when topdown is false is ineffective, since the directories in
dirnames have already been generated by the time dirnames itself is
generated.
By default errors from the os.listdir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an os.error instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
from os.path import join, getsize
for root, dirs, files in walk('python/Lib/email'):
print root, "consumes",
print sum([getsize(join(root, name)) for name in files]),
print "bytes in", len(files), "non-directory files"
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
from os.path import join, isdir, islink
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.path.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that listdir and error are globals in this module due
# to earlier import-*.
names = listdir(top)
except error, err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
path = join(top, name)
if not islink(path):
for x in walk(path, topdown, onerror):
yield x
if not topdown:
yield top, dirs, nondirs
__all__.append("walk")
# Make sure os.environ exists, at least
try:
environ
except NameError:
environ = {}
def execl(file, *args):
"""execl(file, *args)
Execute the executable file with argument list args, replacing the
current process. """
execv(file, args)
def execle(file, *args):
"""execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. """
env = args[-1]
execve(file, args[:-1], env)
def execlp(file, *args):
"""execlp(file, *args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process. """
execvp(file, args)
def execlpe(file, *args):
"""execlpe(file, *args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env, replacing the current
process. """
env = args[-1]
execvpe(file, args[:-1], env)
def execvp(file, args):
"""execp(file, args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process.
args may be a list or tuple of strings. """
_execvpe(file, args)
def execvpe(file, args, env):
"""execvpe(file, args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env , replacing the
current process.
args may be a list or tuple of strings. """
_execvpe(file, args, env)
__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
def _execvpe(file, args, env=None):
from errno import ENOENT, ENOTDIR
if env is not None:
func = execve
argrest = (args, env)
else:
func = execv
argrest = (args,)
env = environ
head, tail = path.split(file)
if head:
func(file, *argrest)
return
if 'PATH' in env:
envpath = env['PATH']
else:
envpath = defpath
PATH = envpath.split(pathsep)
saved_exc = None
saved_tb = None
for dir in PATH:
fullname = path.join(dir, file)
try:
func(fullname, *argrest)
except error, e:
tb = sys.exc_info()[2]
if (e.errno != ENOENT and e.errno != ENOTDIR
and saved_exc is None):
saved_exc = e
saved_tb = tb
if saved_exc:
raise error, saved_exc, saved_tb
raise error, e, tb
# Change environ to automatically call putenv() if it exists
try:
# This will fail if there's no putenv
putenv
except NameError:
pass
else:
import UserDict
# Fake unsetenv() for Windows
# not sure about os2 here but
# I'm guessing they are the same.
if name in ('os2', 'nt'):
def unsetenv(key):
putenv(key, "")
if name == "riscos":
# On RISC OS, all env access goes through getenv and putenv
from riscosenviron import _Environ
elif name in ('os2', 'nt'): # Where Env Var Names Must Be UPPERCASE
# But we store them as upper case
class _Environ(UserDict.IterableUserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
data = self.data
for k, v in environ.items():
data[k.upper()] = v
def __setitem__(self, key, item):
putenv(key, item)
self.data[key.upper()] = item
def __getitem__(self, key):
return self.data[key.upper()]
try:
unsetenv
except NameError:
def __delitem__(self, key):
del self.data[key.upper()]
else:
def __delitem__(self, key):
unsetenv(key)
del self.data[key.upper()]
def has_key(self, key):
return key.upper() in self.data
def __contains__(self, key):
return key.upper() in self.data
def get(self, key, failobj=None):
return self.data.get(key.upper(), failobj)
def update(self, dict=None, **kwargs):
if dict:
try:
keys = dict.keys()
except AttributeError:
# List of (key, value)
for k, v in dict:
self[k] = v
else:
# got keys
# cannot use items(), since mappings
# may not have them.
for k in keys:
self[k] = dict[k]
if kwargs:
self.update(kwargs)
def copy(self):
return dict(self)
else: # Where Env Var Names Can Be Mixed Case
class _Environ(UserDict.IterableUserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
self.data = environ
def __setitem__(self, key, item):
putenv(key, item)
self.data[key] = item
def update(self, dict=None, **kwargs):
if dict:
try:
keys = dict.keys()
except AttributeError:
# List of (key, value)
for k, v in dict:
self[k] = v
else:
# got keys
# cannot use items(), since mappings
# may not have them.
for k in keys:
self[k] = dict[k]
if kwargs:
self.update(kwargs)
try:
unsetenv
except NameError:
pass
else:
def __delitem__(self, key):
unsetenv(key)
del self.data[key]
def copy(self):
return dict(self)
environ = _Environ(environ)
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default."""
return environ.get(key, default)
__all__.append("getenv")
def _exists(name):
try:
eval(name)
return True
except NameError:
return False
# Supply spawn*() (probably only for Unix)
if _exists("fork") and not _exists("spawnv") and _exists("execv"):
P_WAIT = 0
P_NOWAIT = P_NOWAITO = 1
# XXX Should we support P_DETACH? I suppose it could fork()**2
# and close the std I/O streams. Also, P_OVERLAY is the same
# as execv*()?
def _spawnvef(mode, file, args, env, func):
# Internal helper; func is the exec*() function to use
pid = fork()
if not pid:
# Child
try:
if env is None:
func(file, args)
else:
func(file, args, env)
except:
_exit(127)
else:
# Parent
if mode == P_NOWAIT:
return pid # Caller is responsible for waiting!
while 1:
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
elif WIFSIGNALED(sts):
return -WTERMSIG(sts)
elif WIFEXITED(sts):
return WEXITSTATUS(sts)
else:
raise error, "Not stopped, signaled or exited???"
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execv)
def spawnve(mode, file, args, env):
"""spawnve(mode, file, args, env) -> integer
Execute file with arguments from args in a subprocess with the
specified environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execve)
# Note: spawnvp[e] is't currently supported on Windows
def spawnvp(mode, file, args):
"""spawnvp(mode, file, args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execvp)
def spawnvpe(mode, file, args, env):
"""spawnvpe(mode, file, args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execvpe)
if _exists("spawnv"):
# These aren't supplied by the basic Windows code
# but can be easily implemented in Python
def spawnl(mode, file, *args):
"""spawnl(mode, file, *args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnv(mode, file, args)
def spawnle(mode, file, *args):
"""spawnle(mode, file, *args, env) -> integer
Execute file with arguments from args in a subprocess with the
supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnve(mode, file, args[:-1], env)
__all__.extend(["spawnv", "spawnve", "spawnl", "spawnle",])
if _exists("spawnvp"):
# At the moment, Windows doesn't implement spawnvp[e],
# so it won't have spawnlp[e] either.
def spawnlp(mode, file, *args):
"""spawnlp(mode, file, *args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnvp(mode, file, args)
def spawnlpe(mode, file, *args):
"""spawnlpe(mode, file, *args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnvpe(mode, file, args[:-1], env)
__all__.extend(["spawnvp", "spawnvpe", "spawnlp", "spawnlpe",])
# Supply popen2 etc. (for Unix)
if _exists("fork"):
if not _exists("popen2"):
def popen2(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout) are returned."""
import popen2
stdout, stdin = popen2.popen2(cmd, bufsize)
return stdin, stdout
__all__.append("popen2")
if not _exists("popen3"):
def popen3(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout, child_stderr) are returned."""
import popen2
stdout, stdin, stderr = popen2.popen3(cmd, bufsize)
return stdin, stdout, stderr
__all__.append("popen3")
if not _exists("popen4"):
def popen4(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout_stderr) are returned."""
import popen2
stdout, stdin = popen2.popen4(cmd, bufsize)
return stdin, stdout
__all__.append("popen4")
import copy_reg as _copy_reg
def _make_stat_result(tup, dict):
return stat_result(tup, dict)
def _pickle_stat_result(sr):
(type, args) = sr.__reduce__()
return (_make_stat_result, args)
try:
_copy_reg.pickle(stat_result, _pickle_stat_result, _make_stat_result)
except NameError: # stat_result may not exist
pass
def _make_statvfs_result(tup, dict):
return statvfs_result(tup, dict)
def _pickle_statvfs_result(sr):
(type, args) = sr.__reduce__()
return (_make_statvfs_result, args)
try:
_copy_reg.pickle(statvfs_result, _pickle_statvfs_result,
_make_statvfs_result)
except NameError: # statvfs_result may not exist
pass
if not _exists("urandom"):
def urandom(n):
"""urandom(n) -> str
Return a string of n random bytes suitable for cryptographic use.
"""
try:
_urandomfd = open("/dev/urandom", O_RDONLY)
except:
raise NotImplementedError("/dev/urandom (or equivalent) not found")
bytes = ""
while len(bytes) < n:
bytes += read(_urandomfd, n - len(bytes))
close(_urandomfd)
return bytes
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_queue_build_request_initial(
subscription_id: str,
resource_group_name: str,
registry_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2018-02-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/queueBuild')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_get_build_source_upload_url_request(
subscription_id: str,
resource_group_name: str,
registry_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-02-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/getBuildSourceUploadUrl')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class RegistriesOperations(object):
"""RegistriesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerregistry.v2018_02_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _queue_build_initial(
self,
resource_group_name: str,
registry_name: str,
build_request: "_models.QueueBuildRequest",
**kwargs: Any
) -> Optional["_models.Build"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.Build"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(build_request, 'QueueBuildRequest')
request = build_queue_build_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
content_type=content_type,
json=_json,
template_url=self._queue_build_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Build', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_queue_build_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/queueBuild'} # type: ignore
@distributed_trace
def begin_queue_build(
self,
resource_group_name: str,
registry_name: str,
build_request: "_models.QueueBuildRequest",
**kwargs: Any
) -> LROPoller["_models.Build"]:
"""Creates a new build based on the request parameters and add it to the build queue.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param build_request: The parameters of a build that needs to queued.
:type build_request: ~azure.mgmt.containerregistry.v2018_02_01_preview.models.QueueBuildRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Build or the result of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.containerregistry.v2018_02_01_preview.models.Build]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Build"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._queue_build_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
build_request=build_request,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Build', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_queue_build.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/queueBuild'} # type: ignore
@distributed_trace
def get_build_source_upload_url(
self,
resource_group_name: str,
registry_name: str,
**kwargs: Any
) -> "_models.SourceUploadDefinition":
"""Get the upload location for the user to be able to upload the source.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SourceUploadDefinition, or the result of cls(response)
:rtype: ~azure.mgmt.containerregistry.v2018_02_01_preview.models.SourceUploadDefinition
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SourceUploadDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_build_source_upload_url_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
template_url=self.get_build_source_upload_url.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SourceUploadDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_build_source_upload_url.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/getBuildSourceUploadUrl'} # type: ignore
|
|
#==========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
import sys
import unittest
import SimpleITK as sitk
import numpy as np
sizeX = 4
sizeY = 5
sizeZ = 3
class TestNumpySimpleITKInterface(unittest.TestCase):
""" This tests numpy array <-> SimpleITK Image conversion. """
def setUp(self):
pass
def _helper_check_sitk_to_numpy_type(self, sitkType, numpyType):
image = sitk.Image( (9, 10), sitkType, 1 )
a = sitk.GetArrayFromImage( image )
self.assertEqual( numpyType, a.dtype )
self.assertEqual( (10, 9), a.shape )
def test_type_to_numpy(self):
"try all sitk pixel type to convert to numpy"
self._helper_check_sitk_to_numpy_type(sitk.sitkUInt8, np.uint8)
self._helper_check_sitk_to_numpy_type(sitk.sitkUInt16, np.uint16)
self._helper_check_sitk_to_numpy_type(sitk.sitkUInt32, np.uint32)
if sitk.sitkUInt64 != sitk.sitkUnknown:
self._helper_check_sitk_to_numpy_type(sitk.sitkUInt64, np.uint64)
self._helper_check_sitk_to_numpy_type(sitk.sitkInt8, np.int8)
self._helper_check_sitk_to_numpy_type(sitk.sitkInt16, np.int16)
self._helper_check_sitk_to_numpy_type(sitk.sitkInt32, np.int32)
if sitk.sitkInt64 != sitk.sitkUnknown:
self._helper_check_sitk_to_numpy_type(sitk.sitkInt64, np.int64)
self._helper_check_sitk_to_numpy_type(sitk.sitkFloat32, np.float32)
self._helper_check_sitk_to_numpy_type(sitk.sitkFloat64, np.float64)
self._helper_check_sitk_to_numpy_type(sitk.sitkComplexFloat32, np.complex64)
self._helper_check_sitk_to_numpy_type(sitk.sitkComplexFloat64, np.complex128)
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorUInt8, np.uint8)
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorInt8, np.int8)
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorUInt16, np.uint16)
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorInt16, np.int16)
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorUInt32, np.uint32)
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorInt32, np.int32)
if sitk.sitkVectorUInt64 != sitk.sitkUnknown:
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorUInt64, np.uint64)
if sitk.sitkVectorInt64 != sitk.sitkUnknown:
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorInt64, np.int64)
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorFloat32, np.float32)
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorFloat64, np.float64)
#self._helper_check_sitk_to_numpy_type(sitk.sitkLabelUInt8, np.uint8)
#self._helper_check_sitk_to_numpy_type(sitk.sitkLabelUInt16, np.uint16)
#self._helper_check_sitk_to_numpy_type(sitk.sitkLabelUInt32, np.uint32)
#self._helper_check_sitk_to_numpy_type(sitk.sitkLabelUInt64, np.uint64)
def test_to_numpy_and_back(self):
"""Test converting an image to numpy and back"""
img = sitk.GaussianSource( sitk.sitkFloat32, [100,100], sigma=[10]*3, mean = [50,50] )
h = sitk.Hash( img )
# convert the image to and fro a numpy array
img = sitk.GetImageFromArray( sitk.GetArrayFromImage( img ) )
self.assertEqual( h, sitk.Hash( img ))
def test_isVector(self):
""" Test Behavior of isVector option. """
# Check 2D
nda = np.arange(6, dtype=np.float32).reshape([2,3])
img = sitk.GetImageFromArray(nda)
self.assertEqual(img.GetSize(), nda.shape[::-1])
self.assertEqual(img.GetPixelID(), sitk.sitkFloat32)
self.assertEqual(img.GetPixel([1,0]), 1)
img = sitk.GetImageFromArray(nda, isVector=False)
self.assertEqual(img.GetSize(), nda.shape[::-1])
self.assertEqual(img.GetPixelID(), sitk.sitkFloat32)
self.assertEqual(img.GetPixel([1,0]), 1)
img = sitk.GetImageFromArray(nda, isVector=True)
self.assertEqual(img.GetSize(), nda.shape[::-1])
self.assertEqual(img.GetPixelID(), sitk.sitkVectorFloat32)
self.assertEqual(img.GetNumberOfComponentsPerPixel(), 1)
self.assertEqual(img.GetPixel([1,0]), (1,))
# Check 3D
nda = np.arange(30, dtype=np.float32).reshape([2,3,5])
img = sitk.GetImageFromArray(nda)
self.assertEqual(img.GetSize(), nda.shape[::-1])
self.assertEqual(img.GetPixelID(), sitk.sitkFloat32)
self.assertEqual(img.GetPixel([1,0,0]), 1)
self.assertEqual(img.GetPixel([0,1,0]), 5)
img = sitk.GetImageFromArray(nda, isVector=False)
self.assertEqual(img.GetSize(), nda.shape[::-1])
self.assertEqual(img.GetPixelID(), sitk.sitkFloat32)
self.assertEqual(img.GetPixel([1,0,0]), 1)
self.assertEqual(img.GetPixel([0,1,0]), 5)
img = sitk.GetImageFromArray(nda, isVector=True)
self.assertEqual(img.GetSize(), (3,2))
self.assertEqual(img.GetPixelID(), sitk.sitkVectorFloat32)
self.assertEqual(img.GetNumberOfComponentsPerPixel(), 5)
self.assertEqual(img.GetPixel([1,0,0]), (5,6,7,8,9))
self.assertEqual(img.GetPixel([0,1,0]), (15,16,17,18,19))
# Check 4D
nda = np.arange(210, dtype=np.float32).reshape([3,5,7,2])
# Special case to default to VectorImage
img = sitk.GetImageFromArray(nda)
self.assertEqual(img.GetSize(), (7,5,3))
self.assertEqual(img.GetPixelID(), sitk.sitkVectorFloat32)
self.assertEqual(img.GetNumberOfComponentsPerPixel(), 2)
self.assertEqual(img.GetPixel([1,0,0]), (2,3))
self.assertEqual(img.GetPixel([0,1,0]), (14,15))
img = sitk.GetImageFromArray(nda, isVector=True)
self.assertEqual(img.GetSize(), (7,5,3))
self.assertEqual(img.GetPixelID(), sitk.sitkVectorFloat32)
self.assertEqual(img.GetNumberOfComponentsPerPixel(), 2)
self.assertEqual(img.GetPixel([1,0,0]), (2,3))
self.assertEqual(img.GetPixel([0,1,0]), (14,15))
# 4D Image may not be supported by SimpleITK
try:
sitk.Image([1]*4, sitk.sitkUInt8)
except RuntimeError:
return
img = sitk.GetImageFromArray(nda, isVector=False)
self.assertEqual(img.GetSize(), nda.shape[::-1])
self.assertEqual(img.GetPixelID(), sitk.sitkFloat32)
self.assertEqual(img.GetPixel([1,0,0,0]), 1)
self.assertEqual(img.GetPixel([0,1,0,0]), 2)
nda = np.arange(210*9, dtype=np.float32).reshape([3,5,7,9,2])
img = sitk.GetImageFromArray(nda, isVector=True)
self.assertEqual(img.GetSize(), nda.shape[-2::-1])
self.assertEqual(img.GetPixelID(), sitk.sitkVectorFloat32)
self.assertEqual(img.GetPixel([1,0,0,0]), (2,3))
self.assertEqual(img.GetPixel([0,1,0,0]), (18,19))
self.assertEqual(img.GetPixel([0,0,1,0]), (126,127))
def test_complex_image_to_numpy(self):
"""Test converting back and forth between numpy and SimpleITK
images where the SimpleITK image is of complex type."""
# Check 2D
img_real = sitk.GaussianSource( sitk.sitkFloat32, [100,100], sigma=[10]*3, mean = [50,50] )
img_imaginary = sitk.GaussianSource( sitk.sitkFloat32, [100,100], sigma=[20]*3, mean = [10,10] )
img = sitk.RealAndImaginaryToComplex(img_real, img_imaginary);
h = sitk.Hash( img )
nda = sitk.GetArrayFromImage(img)
self.assertEqual(nda.shape, (100,100))
self.assertEqual(nda[0,0], img.GetPixel([0,0]))
self.assertEqual(nda[2,1], img.GetPixel([1,2]))
img2 = sitk.GetImageFromArray( nda )
self.assertEqual( h, sitk.Hash(img2) )
self.assertEqual( img.GetSize(), (100,100) )
# check 4D
img = sitk.Image( [10, 9, 8, 7], sitk.sitkComplexFloat64 )
h = sitk.Hash( img )
nda = sitk.GetArrayFromImage(img)
self.assertEqual(nda.shape, (7,8,9,10))
self.assertEqual(nda[0,0,0,0], 0+0j)
img2 = sitk.GetImageFromArray(nda)
self.assertEqual(img2.GetSize(), img.GetSize())
self.assertEqual(img2.GetNumberOfComponentsPerPixel(), img.GetNumberOfComponentsPerPixel())
self.assertEqual(h, sitk.Hash(img2))
def test_vector_image_to_numpy(self):
"""Test converting back and forth between numpy and SimpleITK
images were the SimpleITK image has multiple componets and
stored as a VectorImage."""
# Check 2D
img = sitk.PhysicalPointSource(sitk.sitkVectorFloat32, [3,4])
h = sitk.Hash( img )
nda = sitk.GetArrayFromImage(img)
self.assertEqual(nda.shape, (4,3,2))
self.assertEqual(nda[0,0].tolist(), [0,0])
self.assertEqual(nda[2,1].tolist(), [1,2])
self.assertEqual(nda[0,:,0].tolist(), [0,1,2])
img2 = sitk.GetImageFromArray( nda, isVector=True)
self.assertEqual( h, sitk.Hash(img2) )
# check 3D
img = sitk.PhysicalPointSource(sitk.sitkVectorFloat32, [3,4,5])
h = sitk.Hash( img )
nda = sitk.GetArrayFromImage(img)
self.assertEqual(nda.shape, (5,4,3,3))
self.assertEqual(nda[0,0,0].tolist(), [0,0,0])
self.assertEqual(nda[0,0,:,0].tolist(), [0,1,2])
self.assertEqual(nda[0,:,1,1].tolist(), [0,1,2,3])
img2 = sitk.GetImageFromArray(nda)
self.assertEqual(img2.GetSize(), img.GetSize())
self.assertEqual(img2.GetNumberOfComponentsPerPixel(), img.GetNumberOfComponentsPerPixel())
self.assertEqual(h, sitk.Hash(img2))
def test_non_contiguous(self):
"""Test converting non-contiguous numpy arrays to SimpleITK Image"""
arr = np.arange(5*7*11, dtype=np.int32)
arr.shape = (5,7,11)
img = sitk.GetImageFromArray(arr[::2,...])
self.assertEqual(img.GetSize(), (11, 7, 3))
farr = np.asarray(arr, order='F')
img = sitk.GetImageFromArray(arr)
self.assertEqual(img.GetSize(), (11, 7, 5))
img = sitk.GetImageFromArray(arr[2:,:,::3])
self.assertEqual(img.GetSize(), (4, 7, 3))
def test_legacy(self):
"""Test SimpleITK Image to numpy array."""
# self.assertRaises(TypeError, sitk.GetArrayFromImage, 3)
# 2D image
image = sitk.Image(sizeX, sizeY, sitk.sitkInt32)
for j in range(sizeY):
for i in range(sizeX):
image[i, j] = j*sizeX + i
print(sitk.GetArrayFromImage(image))
self.assertEqual( type (sitk.GetArrayFromImage(image)), np.ndarray )
# 3D image
image = sitk.Image(sizeX, sizeY, sizeZ, sitk.sitkFloat32)
for k in range(sizeZ):
for j in range(sizeY):
for i in range(sizeX):
image[i, j, k] = (sizeY*k +j)*sizeX + i
print(sitk.GetArrayFromImage(image))
self.assertEqual( type (sitk.GetArrayFromImage(image)), np.ndarray )
def test_legacy_array2sitk(self):
"""Test numpy array to SimpleITK Image."""
arr = np.arange(20, dtype=np.float64)
arr.shape = (sizeY, sizeX)
image = sitk.GetImageFromArray(arr)
self.assertEqual(image.GetSize(), (sizeX, sizeY))
self.assertEqual(image[0,0], 0.0)
self.assertEqual(image[1,1], 5.0)
self.assertEqual(image[2,2], 10.0)
arr = np.arange(60, dtype=np.int16)
arr.shape = (sizeZ, sizeY, sizeX)
image = sitk.GetImageFromArray(arr)
self.assertEqual(image.GetSize(), (sizeX, sizeY, sizeZ))
self.assertEqual(image[0,0,0], 0)
self.assertEqual(image[1,1,1], 25)
self.assertEqual(image[2,2,2], 50)
if __name__ == '__main__':
unittest.main()
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class cachepolicy_cachepolicylabel_binding(base_resource) :
""" Binding class showing the cachepolicylabel that can be bound to cachepolicy.
"""
def __init__(self) :
self._boundto = ""
self._priority = 0
self._activepolicy = 0
self._gotopriorityexpression = ""
self._labeltype = ""
self._labelname = ""
self._policyname = ""
self.___count = 0
@property
def policyname(self) :
ur"""Name of the cache policy about which to display details.<br/>Minimum length = 1.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
ur"""Name of the cache policy about which to display details.<br/>Minimum length = 1
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def boundto(self) :
ur"""Location where policy is bound.
"""
try :
return self._boundto
except Exception as e:
raise e
@boundto.setter
def boundto(self, boundto) :
ur"""Location where policy is bound.
"""
try :
self._boundto = boundto
except Exception as e:
raise e
@property
def priority(self) :
ur"""Priority.
"""
try :
return self._priority
except Exception as e:
raise e
@property
def labelname(self) :
ur"""Name of the label to invoke if the current policy rule evaluates to TRUE.
"""
try :
return self._labelname
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
ur"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@property
def labeltype(self) :
ur"""Type of policy label invocation.<br/>Possible values = reqvserver, resvserver, policylabel.
"""
try :
return self._labeltype
except Exception as e:
raise e
@property
def activepolicy(self) :
ur"""Indicates whether policy is bound or not.
"""
try :
return self._activepolicy
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(cachepolicy_cachepolicylabel_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.cachepolicy_cachepolicylabel_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.policyname is not None :
return str(self.policyname)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, policyname) :
ur""" Use this API to fetch cachepolicy_cachepolicylabel_binding resources.
"""
try :
obj = cachepolicy_cachepolicylabel_binding()
obj.policyname = policyname
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, policyname, filter_) :
ur""" Use this API to fetch filtered set of cachepolicy_cachepolicylabel_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = cachepolicy_cachepolicylabel_binding()
obj.policyname = policyname
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, policyname) :
ur""" Use this API to count cachepolicy_cachepolicylabel_binding resources configued on NetScaler.
"""
try :
obj = cachepolicy_cachepolicylabel_binding()
obj.policyname = policyname
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, policyname, filter_) :
ur""" Use this API to count the filtered set of cachepolicy_cachepolicylabel_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = cachepolicy_cachepolicylabel_binding()
obj.policyname = policyname
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Labeltype:
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class cachepolicy_cachepolicylabel_binding_response(base_response) :
def __init__(self, length=1) :
self.cachepolicy_cachepolicylabel_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.cachepolicy_cachepolicylabel_binding = [cachepolicy_cachepolicylabel_binding() for _ in range(length)]
|
|
#!/usr/bin/python
# Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
"""
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
"""
import time
from pybvc.controller.controller import Controller
from pybvc.openflowdev.ofswitch import (OFSwitch,
FlowEntry,
Match,
Instruction,
SetDlSrcAction,
SetDlDstAction,
SetFieldAction,
OutputAction)
from pybvc.common.utils import load_dict_from_file
from pybvc.common.status import STATUS
from pybvc.common.constants import ETH_TYPE_IPv4
def delete_flows(ofswitch, table_id, flow_ids):
for flow_id in flow_ids:
result = ofswitch.delete_flow(table_id, flow_id)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow with id of '%s' successfully removed "
"from the Controller" % flow_id)
else:
print ("!!!Flow '%s' removal error, reason: %s" %
(flow_id, status.brief()))
def of_demo_38():
f = "cfg.yml"
d = {}
if(load_dict_from_file(f, d) is False):
print("Config file '%s' read error: " % f)
exit(0)
try:
ctrlIpAddr = d['ctrlIpAddr']
ctrlPortNum = d['ctrlPortNum']
ctrlUname = d['ctrlUname']
ctrlPswd = d['ctrlPswd']
nodeName = d['nodeName']
rundelay = d['rundelay']
except:
print ("Failed to get Controller device attributes")
exit(0)
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
print ("<<< Demo 38 Start")
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd)
ofswitch = OFSwitch(ctrl, nodeName)
print ("<<< 'Controller': %s, 'OpenFlow' switch: '%s'" %
(ctrlIpAddr, nodeName))
first_flow_id = 110
# ---------------------------------------------------
# First flow entry
# ---------------------------------------------------
table_id = 0
flow_id = first_flow_id
flow_name = "Modify source and destination MAC addresses example1"
priority = 700
cookie = 1100
match_in_port = 109
match_eth_type = ETH_TYPE_IPv4
match_ipv4_src_addr = "10.0.0.4/32"
act_mod_src_mac_addr = "00:00:00:11:23:ae"
act_mod_dst_mac_addr = "a0:ff:29:01:19:61"
act_out_port = 112
print "\n"
print ("<<< Set OpenFlow flow on the Controller")
print (" Match: Input Port (%s)\n"
" Ethernet Type (%s)\n"
" IPv4 Source Address (%s)" %
(match_in_port,
hex(match_eth_type),
match_ipv4_src_addr))
print (" Actions: Modify Ethernet Source MAC Address (%s)\n"
" Modify Ethernet Destination MAC Address (%s)\n"
" Output (%s)" %
(act_mod_src_mac_addr, act_mod_dst_mac_addr, act_out_port))
time.sleep(rundelay)
# Allocate a placeholder for the Flow Entry
flow_entry1 = FlowEntry()
# Generic attributes of the Flow Entry
flow_entry1.set_flow_table_id(table_id)
flow_entry1.set_flow_name(flow_name)
flow_entry1.set_flow_id(flow_id)
flow_entry1.set_flow_cookie(cookie)
flow_entry1.set_flow_priority(priority)
flow_entry1.set_flow_hard_timeout(0)
flow_entry1.set_flow_idle_timeout(0)
# Instructions/Actions for the Flow Entry
instruction = Instruction(instruction_order=0)
action_order = 0
action = SetDlSrcAction(action_order)
action.set_dl_src(act_mod_src_mac_addr)
instruction.add_apply_action(action)
action_order += 1
action = SetDlDstAction(action_order)
action.set_dl_dst(act_mod_dst_mac_addr)
instruction.add_apply_action(action)
action_order += 1
action = OutputAction(action_order)
action.set_outport(act_out_port)
instruction.add_apply_action(action)
flow_entry1.add_instruction(instruction)
# Match Fields for the Flow Entry
match = Match()
match.set_in_port(match_in_port)
match.set_eth_type(match_eth_type)
match.set_ipv4_src(match_ipv4_src_addr)
flow_entry1.add_match(match)
print ("\n")
print ("<<< Flow to send:")
print flow_entry1.get_payload()
time.sleep(rundelay)
result = ofswitch.add_modify_flow(flow_entry1)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully added to the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.detailed())
delete_flows(ofswitch, table_id, range(first_flow_id, flow_id + 1))
exit(0)
# ---------------------------------------------------
# Second flow entry
# ---------------------------------------------------
table_id = 0
flow_id += 1
flow_name = "Modify source and destination MAC addresses example2"
priority = 700
cookie = 1100
match_in_port = 109
match_eth_type = ETH_TYPE_IPv4
match_ipv4_src_addr = "192.1.0.11/32"
act_mod_src_mac_addr = "00:1c:42:80:bd:66"
act_mod_dst_mac_addr = "aa:1d:40:60:7c:9f"
act_out_port = 112
print "\n"
print ("<<< Set OpenFlow flow on the Controller")
print (" Match: Input Port (%s)\n"
" Ethernet Type (%s)\n"
" IPv4 Source Address (%s)" %
(match_in_port,
hex(match_eth_type),
match_ipv4_src_addr))
print (" Actions: Set Field (Ethernet Source MAC Address %s)\n"
" Set Field (Ethernet Destination MAC Address %s)\n"
" Output (%s)" %
(act_mod_src_mac_addr, act_mod_dst_mac_addr, act_out_port))
time.sleep(rundelay)
# Allocate a placeholder for the Flow Entry
flow_entry2 = FlowEntry()
# Generic attributes of the Flow Entry
flow_entry2.set_flow_table_id(table_id)
flow_entry2.set_flow_name(flow_name)
flow_entry2.set_flow_id(flow_id)
flow_entry2.set_flow_cookie(cookie)
flow_entry2.set_flow_priority(priority)
flow_entry2.set_flow_hard_timeout(0)
flow_entry2.set_flow_idle_timeout(0)
# Instructions/Actions for the Flow Entry
instruction = Instruction(instruction_order=0)
action_order = 0
action = SetFieldAction(action_order)
action.set_eth_src(act_mod_src_mac_addr)
instruction.add_apply_action(action)
action_order += 1
action = SetFieldAction(action_order)
action.set_eth_dst(act_mod_dst_mac_addr)
instruction.add_apply_action(action)
action_order += 1
action = OutputAction(action_order)
action.set_outport(act_out_port)
instruction.add_apply_action(action)
flow_entry2.add_instruction(instruction)
# Match Fields for the Flow Entry
match = Match()
match.set_in_port(match_in_port)
match.set_eth_type(match_eth_type)
match.set_ipv4_src(match_ipv4_src_addr)
flow_entry2.add_match(match)
print ("\n")
print ("<<< Flow to send:")
print flow_entry2.get_payload()
time.sleep(rundelay)
result = ofswitch.add_modify_flow(flow_entry2)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully added to the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.detailed())
delete_flows(ofswitch, table_id, range(first_flow_id, flow_id + 1))
exit(0)
print ("\n")
print ("<<< Delete flows from the Controller's cache "
"and from the table '%s' on the '%s' node" % (table_id, nodeName))
time.sleep(rundelay)
delete_flows(ofswitch, table_id, range(first_flow_id, flow_id + 1))
print ("\n")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
print (">>> Demo End")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
if __name__ == "__main__":
of_demo_38()
|
|
'''
Created on Jul 1, 2009
This module contains tests for the face recognition algorithms.
@author: bolme
'''
import unittest
import pyvision as pv
import numpy as np
#from optic_flow import *
#from distance import *
#import cv2.cv as cv
import os.path
class _TestNormalize(unittest.TestCase):
def setUp(self):
# Eye coordinates generated automatically
#leye = pv.Point(250.336538,174.074519)
#reye = pv.Point(343.828125,180.042067)
fname = os.path.join(pv.__path__[0],'data','misc','lena.jpg')
im = pv.Image(fname,bw_annotate=True)
#affine = pv.AffineFromPoints(leye,reye,pv.Point(48.0,64.0),pv.Point(144.0,64.0),(192,192))
self.tile = im
def test_1_meanStd(self):
'''meanStd Normalization: norm.mean() = 0.0 and norm.std() = 1.0....'''
ilog = None
if 'ilog' in globals().keys():
ilog = globals()['ilog']
norm = pv.meanStd(self.tile)
if ilog != None:
ilog.log(norm,label="meanStd_Normalization")
mat = norm.asMatrix2D()
self.assertAlmostEqual(mat.mean(),0.0,places=3)
self.assertAlmostEqual(mat.std(),1.0,places=3)
def test_2_meanUnit(self):
'''meanUnit Normalization: norm.mean() = 0.0 and ||norm|| = 1.0....'''
ilog = None
if 'ilog' in globals().keys():
ilog = globals()['ilog']
norm = pv.meanUnit(self.tile)
if ilog != None:
ilog.log(norm,label="meanUnit_Normalization")
mat = norm.asMatrix2D()
self.assertAlmostEqual(mat.mean(),0.0)
length = np.sqrt((mat**2).sum())
self.assertAlmostEqual(length,1.0,places=4)
def test_3_unit(self):
'''unit Normalization: ||norm|| = 1.0 and dot(norm,im)/||im|| = 1.0.'''
ilog = None
if 'ilog' in globals().keys():
ilog = globals()['ilog']
norm = pv.unit(self.tile)
if ilog != None:
ilog.log(norm,label="unit_Normalization")
mat = norm.asMatrix2D()
length = np.sqrt((mat**2).sum())
self.assertAlmostEqual(length,1.0,places=3)
mat = norm.asMatrix2D()
mat = mat.flatten()
im = self.tile.asMatrix2D().flatten()
proj = np.dot(mat,im)
length = np.sqrt((im**2).sum())
self.assertAlmostEqual(proj/length,1.0,places=3)
def test_4_bandPass(self):
'''bandPassFilter Normalization: ...................................'''
ilog = None
if 'ilog' in globals().keys():
ilog = globals()['ilog']
norm = pv.bandPassFilter(self.tile,10.0,4.0)
if ilog != None:
ilog.log(norm,label="bandPass_Normalization")
mat = norm.asMatrix2D()
self.assertAlmostEqual(mat.mean(),0.0,places=4)
self.assertAlmostEqual(mat.std(),12.090113839874826,places=3)
def test_5_lowPass(self):
'''lowPassFilter Normalization: ....................................'''
ilog = None
if 'ilog' in globals().keys():
ilog = globals()['ilog']
norm = pv.lowPassFilter(self.tile,10.0)
if ilog != None:
ilog.log(norm,label="lowPass_Normalization")
mat = norm.asMatrix2D()
self.assertAlmostEqual(mat.mean(),123.69997406005859,places=3)
self.assertAlmostEqual(mat.std(),36.886999835117216,places=3)
def test_6_highPass(self):
'''highPassFilter Normalization: ...................................'''
ilog = None
if 'ilog' in globals().keys():
ilog = globals()['ilog']
norm = pv.highPassFilter(self.tile,10.0)
if ilog != None:
ilog.log(norm,label="highPass_Normalization")
mat = norm.asMatrix2D()
self.assertAlmostEqual(mat.mean(),0.0,places=4)
self.assertAlmostEqual(mat.std(),22.936873341661158,places=3)
def test_7_veryHighPass(self):
'''highPassFilter Normalization: sigma = 1.5........................'''
ilog = None
if 'ilog' in globals().keys():
ilog = globals()['ilog']
# This setting corsponds to the default gaussian in selfQuotient
norm = pv.highPassFilter(self.tile,1.5)
if ilog != None:
ilog.log(norm,label="veryHighPass_Normalization")
mat = norm.asMatrix2D()
self.assertAlmostEqual(mat.mean(),0.0,places=4)
self.assertAlmostEqual(mat.std(),8.0027218003238687,places=3)
def test_8_selfQuotient(self):
'''selfQuotient Normalization: .....................................'''
ilog = None
if 'ilog' in globals().keys():
ilog = globals()['ilog']
norm = pv.selfQuotientImage(self.tile)
if ilog != None:
ilog.log(norm,label="selfQuotient_Normalization")
mat = norm.asMatrix2D()
self.assertAlmostEqual(mat.mean(),0.98861616849899292,places=3)
self.assertAlmostEqual(mat.std(),0.1647989569275968,places=3)
class _TestSURF(unittest.TestCase):
def test_1_SURF(self):
'''SURF Lena: ......................................................'''
ilog = None
if 'ilog' in globals().keys():
ilog = globals()['ilog']
filename = os.path.join(pv.__path__[0],'data','misc','lena.jpg')
im = pv.Image(filename)
timer = pv.Timer()
keypoints,descriptors = pv.surf(im)
timer.mark("LenaSurf")
if ilog != None:
ilog(timer,"SURFLena")
for each in keypoints:
im.annotateCircle(pv.Point(each[0][0],each[0][1]), each[2])
if ilog != None:
ilog(im,'SurfKeypoints')
self.assertEqual(len(keypoints),len(descriptors))
self.assertEqual(len(keypoints),774)
#print descriptors
def test_2_SURF(self):
'''SURF Taz: .......................................................'''
ilog = None
if 'ilog' in globals().keys():
ilog = globals()['ilog']
filename = os.path.join(pv.__path__[0],'data','test','TAZ_0010.jpg')
im = pv.Image(filename)
timer = pv.Timer()
keypoints,descriptors = pv.surf(im)
timer.mark("TazSurf")
if ilog != None:
ilog(timer,"SURFTaz")
for each in keypoints:
im.annotateCircle(pv.Point(each[0][0],each[0][1]), each[2])
if ilog != None:
ilog(im,'SurfKeypoints')
self.assertEqual(len(keypoints),len(descriptors))
self.assertEqual(len(keypoints),367)
class _TestDistance(unittest.TestCase):
def setUp(self):
'''Initialize the tests'''
def test_1_bool2Ubyte(self):
'''distance::boolToUbyte ...........................................'''
a = np.random.randint(2,size=16) > 0
b = pv.boolToUbyte(a)
c = pv.ubyteToBool(b)
d = pv.boolToUbyte(c)
self.assert_((a == c).sum() == 16)
self.assert_((b == d).sum() == 2)
a = np.random.randint(2,size=5000) > 0
b = pv.boolToUbyte(a)
c = pv.ubyteToBool(b)
d = pv.boolToUbyte(c)
self.assert_((a == c).sum() == 5000)
self.assert_((b == d).sum() == 625)
def test_2_hamming(self):
'''distance::hamming 1..............................................'''
a = np.random.randint(2,size=16) > 0
b = np.random.randint(2,size=16) > 0
bin_hamming = pv.hamming(a,b)
a = pv.boolToUbyte(a)
b = pv.boolToUbyte(b)
byte_hamming = pv.hamming(a,b)
self.assertEquals(bin_hamming,byte_hamming)
def test_3_hamming(self):
'''distance::hamming 2..............................................'''
a = np.random.randint(2,size=1769472) > 0
b = np.random.randint(2,size=1769472) > 0
bin_hamming = pv.hamming(a,b)
a = pv.boolToUbyte(a)
b = pv.boolToUbyte(b)
byte_hamming = pv.hamming(a,b)
self.assertEquals(bin_hamming,byte_hamming)
def test():
'''Run the face test suite.'''
pv.disableCommercialUseWarnings()
normalize_suite = unittest.TestLoader().loadTestsFromTestCase(_TestNormalize)
surf_suite = unittest.TestLoader().loadTestsFromTestCase(_TestSURF)
dist_suite = unittest.TestLoader().loadTestsFromTestCase(_TestDistance)
test_suites = [
normalize_suite,
surf_suite,
dist_suite
]
pyvision_suite = unittest.TestSuite(test_suites)
unittest.TextTestRunner(verbosity=2).run(pyvision_suite)
if __name__ == '__main__':
# By default run the test suite
unittest.main(testRunner = unittest.TextTestRunner(verbosity=2))
|
|
from __future__ import absolute_import
from django.conf import settings
from django.contrib.auth import authenticate, login, get_backends
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponseForbidden, HttpResponse
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext, loader
from django.utils.timezone import now
from django.utils.cache import patch_cache_control
from django.core.exceptions import ValidationError
from django.core import validators
from django.contrib.auth.views import login as django_login_page, \
logout_then_login as django_logout_then_login
from django.forms.models import model_to_dict
from django.core.mail import send_mail
from django.middleware.csrf import get_token
from zerver.models import Message, UserProfile, Stream, Subscription, Huddle, \
Recipient, Realm, UserMessage, DefaultStream, RealmEmoji, RealmAlias, \
RealmFilter, \
PreregistrationUser, get_client, MitUser, UserActivity, PushDeviceToken, \
get_stream, UserPresence, get_recipient, \
split_email_to_domain, resolve_email_to_domain, email_to_username, get_realm, \
completely_open, get_unique_open_realm, remote_user_to_email
from zerver.lib.actions import do_change_password, do_change_full_name, do_change_is_admin, \
do_activate_user, do_create_user, \
internal_send_message, update_user_presence, do_events_register, \
get_status_dict, do_change_enable_offline_email_notifications, \
do_change_enable_digest_emails, do_set_realm_name, do_set_realm_restricted_to_domain, \
do_set_realm_invite_required, do_set_realm_invite_by_admins_only, get_default_subs, \
user_email_is_unique, do_invite_users, do_refer_friend, compute_mit_user_fullname, \
do_set_muted_topics, clear_followup_emails_queue, do_update_pointer, realm_user_count
from zerver.lib.push_notifications import num_push_devices_for_user
from zerver.forms import RegistrationForm, HomepageForm, ToSForm, \
CreateUserForm, is_inactive, OurAuthenticationForm
from django.views.decorators.csrf import csrf_exempt
from django_auth_ldap.backend import LDAPBackend, _LDAPUser
from zerver.lib import bugdown
from zerver.lib.validator import check_string, check_list, check_bool
from zerver.decorator import require_post, authenticated_json_post_view, \
has_request_variables, authenticated_json_view, to_non_negative_int, \
JsonableError, get_user_profile_by_email, REQ, require_realm_admin
from zerver.lib.avatar import avatar_url
from zerver.lib.upload import upload_message_image_through_web_client, \
get_signed_upload_url, get_realm_for_filename
from zerver.lib.response import json_success, json_error
from zerver.lib.utils import statsd, generate_random_token
from zproject.backends import password_auth_enabled, dev_auth_enabled
from confirmation.models import Confirmation
import requests
import subprocess
import calendar
import datetime
import ujson
import simplejson
import re
import urllib
import base64
import time
import logging
import jwt
import hashlib
import hmac
from zerver.lib.rest import rest_dispatch as _rest_dispatch
rest_dispatch = csrf_exempt((lambda request, *args, **kwargs: _rest_dispatch(request, globals(), *args, **kwargs)))
def name_changes_disabled(realm):
return settings.NAME_CHANGES_DISABLED or realm.name_changes_disabled
@require_post
def accounts_register(request):
key = request.POST['key']
confirmation = Confirmation.objects.get(confirmation_key=key)
prereg_user = confirmation.content_object
email = prereg_user.email
mit_beta_user = isinstance(confirmation.content_object, MitUser)
try:
existing_user_profile = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
existing_user_profile = None
validators.validate_email(email)
# If someone invited you, you are joining their realm regardless
# of your e-mail address.
#
# MitUsers can't be referred and don't have a referred_by field.
if not mit_beta_user and prereg_user.referred_by:
realm = prereg_user.referred_by.realm
domain = realm.domain
if realm.restricted_to_domain and domain != resolve_email_to_domain(email):
return render_to_response("zerver/closed_realm.html", {"closed_domain_name": realm.name})
elif not mit_beta_user and prereg_user.realm:
# You have a realm set, even though nobody referred you. This
# happens if you sign up through a special URL for an open
# realm.
domain = prereg_user.realm.domain
else:
domain = resolve_email_to_domain(email)
realm = get_realm(domain)
if realm and realm.deactivated:
# The user is trying to register for a deactivated realm. Advise them to
# contact support.
return render_to_response("zerver/deactivated.html",
{"deactivated_domain_name": realm.name,
"zulip_administrator": settings.ZULIP_ADMINISTRATOR})
try:
if existing_user_profile is not None and existing_user_profile.is_mirror_dummy:
# Mirror dummy users to be activated must be inactive
is_inactive(email)
else:
# Other users should not already exist at all.
user_email_is_unique(email)
except ValidationError:
return HttpResponseRedirect(reverse('django.contrib.auth.views.login') + '?email=' + urllib.quote_plus(email))
name_validated = False
full_name = None
if request.POST.get('from_confirmation'):
try:
del request.session['authenticated_full_name']
except KeyError:
pass
if domain == "mit.edu":
hesiod_name = compute_mit_user_fullname(email)
form = RegistrationForm(
initial={'full_name': hesiod_name if "@" not in hesiod_name else ""})
name_validated = True
elif settings.POPULATE_PROFILE_VIA_LDAP:
for backend in get_backends():
if isinstance(backend, LDAPBackend):
ldap_attrs = _LDAPUser(backend, backend.django_to_ldap_username(email)).attrs
try:
request.session['authenticated_full_name'] = ldap_attrs[settings.AUTH_LDAP_USER_ATTR_MAP['full_name']][0]
name_validated = True
# We don't use initial= here, because if the form is
# complete (that is, no additional fields need to be
# filled out by the user) we want the form to validate,
# so they can be directly registered without having to
# go through this interstitial.
form = RegistrationForm(
{'full_name': request.session['authenticated_full_name']})
# FIXME: This will result in the user getting
# validation errors if they have to enter a password.
# Not relevant for ONLY_SSO, though.
break
except TypeError:
# Let the user fill out a name and/or try another backend
form = RegistrationForm()
elif 'full_name' in request.POST:
form = RegistrationForm(
initial={'full_name': request.POST.get('full_name')}
)
else:
form = RegistrationForm()
else:
postdata = request.POST.copy()
if name_changes_disabled(realm):
# If we populate profile information via LDAP and we have a
# verified name from you on file, use that. Otherwise, fall
# back to the full name in the request.
try:
postdata.update({'full_name': request.session['authenticated_full_name']})
name_validated = True
except KeyError:
pass
form = RegistrationForm(postdata)
if not password_auth_enabled(realm):
form['password'].field.required = False
if form.is_valid():
if password_auth_enabled(realm):
password = form.cleaned_data['password']
else:
# SSO users don't need no passwords
password = None
full_name = form.cleaned_data['full_name']
short_name = email_to_username(email)
first_in_realm = len(UserProfile.objects.filter(realm=realm, is_bot=False)) == 0
# FIXME: sanitize email addresses and fullname
if existing_user_profile is not None and existing_user_profile.is_mirror_dummy:
try:
user_profile = existing_user_profile
do_activate_user(user_profile)
do_change_password(user_profile, password)
do_change_full_name(user_profile, full_name)
except UserProfile.DoesNotExist:
user_profile = do_create_user(email, password, realm, full_name, short_name,
prereg_user=prereg_user,
newsletter_data={"IP": request.META['REMOTE_ADDR']})
else:
user_profile = do_create_user(email, password, realm, full_name, short_name,
prereg_user=prereg_user,
newsletter_data={"IP": request.META['REMOTE_ADDR']})
# This logs you in using the ZulipDummyBackend, since honestly nothing
# more fancy than this is required.
login(request, authenticate(username=user_profile.email, use_dummy_backend=True))
if first_in_realm:
do_change_is_admin(user_profile, True)
return HttpResponseRedirect(reverse('zerver.views.initial_invite_page'))
else:
return HttpResponseRedirect(reverse('zerver.views.home'))
return render_to_response('zerver/register.html',
{'form': form,
'company_name': domain,
'email': email,
'key': key,
'full_name': request.session.get('authenticated_full_name', None),
'lock_name': name_validated and name_changes_disabled(realm),
# password_auth_enabled is normally set via our context processor,
# but for the registration form, there is no logged in user yet, so
# we have to set it here.
'password_auth_enabled': password_auth_enabled(realm),
},
context_instance=RequestContext(request))
@login_required(login_url = settings.HOME_NOT_LOGGED_IN)
def accounts_accept_terms(request):
email = request.user.email
domain = resolve_email_to_domain(email)
if request.method == "POST":
form = ToSForm(request.POST)
if form.is_valid():
full_name = form.cleaned_data['full_name']
send_mail('Terms acceptance for ' + full_name,
loader.render_to_string('zerver/tos_accept_body.txt',
{'name': full_name,
'email': email,
'ip': request.META['REMOTE_ADDR'],
'browser': request.META['HTTP_USER_AGENT']}),
settings.EMAIL_HOST_USER,
["all@zulip.com"])
do_change_full_name(request.user, full_name)
return redirect(home)
else:
form = ToSForm()
return render_to_response('zerver/accounts_accept_terms.html',
{ 'form': form, 'company_name': domain, 'email': email },
context_instance=RequestContext(request))
from zerver.lib.ccache import make_ccache
@authenticated_json_view
@has_request_variables
def webathena_kerberos_login(request, user_profile,
cred=REQ(default=None)):
if cred is None:
return json_error("Could not find Kerberos credential")
if not user_profile.realm.domain == "mit.edu":
return json_error("Webathena login only for mit.edu realm")
try:
parsed_cred = ujson.loads(cred)
user = parsed_cred["cname"]["nameString"][0]
if user == "golem":
# Hack for an mit.edu user whose Kerberos username doesn't
# match what he zephyrs as
user = "ctl"
assert(user == user_profile.email.split("@")[0])
ccache = make_ccache(parsed_cred)
except Exception:
return json_error("Invalid Kerberos cache")
# TODO: Send these data via (say) rabbitmq
try:
subprocess.check_call(["ssh", "zulip@zmirror2.zulip.net", "--",
"/home/zulip/zulip/bots/process_ccache",
user,
user_profile.api_key,
base64.b64encode(ccache)])
except Exception:
logging.exception("Error updating the user's ccache")
return json_error("We were unable to setup mirroring for you")
return json_success()
def api_endpoint_docs(request):
raw_calls = open('templates/zerver/api_content.json', 'r').read()
calls = ujson.loads(raw_calls)
langs = set()
for call in calls:
call["endpoint"] = "%s/v1/%s" % (settings.EXTERNAL_API_URI, call["endpoint"])
call["example_request"]["curl"] = call["example_request"]["curl"].replace("https://api.zulip.com", settings.EXTERNAL_API_URI)
response = call['example_response']
if not '\n' in response:
# For 1-line responses, pretty-print them
extended_response = response.replace(", ", ",\n ")
else:
extended_response = response
call['rendered_response'] = bugdown.convert("~~~ .py\n" + extended_response + "\n~~~\n", "default")
for example_type in ('request', 'response'):
for lang in call.get('example_' + example_type, []):
langs.add(lang)
return render_to_response(
'zerver/api_endpoints.html', {
'content': calls,
'langs': langs,
},
context_instance=RequestContext(request))
@authenticated_json_post_view
@has_request_variables
def json_invite_users(request, user_profile, invitee_emails=REQ):
if not invitee_emails:
return json_error("You must specify at least one email address.")
invitee_emails = set(re.split(r'[, \n]', invitee_emails))
stream_names = request.POST.getlist('stream')
if not stream_names:
return json_error("You must specify at least one stream for invitees to join.")
# We unconditionally sub you to the notifications stream if it
# exists and is public.
notifications_stream = user_profile.realm.notifications_stream
if notifications_stream and not notifications_stream.invite_only:
stream_names.append(notifications_stream.name)
streams = []
for stream_name in stream_names:
stream = get_stream(stream_name, user_profile.realm)
if stream is None:
return json_error("Stream does not exist: %s. No invites were sent." % stream_name)
streams.append(stream)
ret_error, error_data = do_invite_users(user_profile, invitee_emails, streams)
if ret_error is not None:
return json_error(data=error_data, msg=ret_error)
else:
return json_success()
def create_homepage_form(request, user_info=None):
if user_info:
return HomepageForm(user_info, domain=request.session.get("domain"))
# An empty fields dict is not treated the same way as not
# providing it.
return HomepageForm(domain=request.session.get("domain"))
def maybe_send_to_registration(request, email, full_name=''):
form = create_homepage_form(request, user_info={'email': email})
request.verified_email = None
if form.is_valid():
# Construct a PreregistrationUser object and send the user over to
# the confirmation view.
prereg_user = None
if settings.ONLY_SSO:
try:
prereg_user = PreregistrationUser.objects.filter(email__iexact=email).latest("invited_at")
except PreregistrationUser.DoesNotExist:
prereg_user = create_preregistration_user(email, request)
else:
prereg_user = create_preregistration_user(email, request)
return redirect("".join((
settings.EXTERNAL_URI_SCHEME,
request.get_host(),
"/",
# Split this so we only get the part after the /
Confirmation.objects.get_link_for_object(prereg_user).split("/", 3)[3],
'?full_name=',
# urllib does not handle Unicode, so coerece to encoded byte string
# Explanation: http://stackoverflow.com/a/5605354/90777
urllib.quote_plus(full_name.encode('utf8')))))
else:
return render_to_response('zerver/accounts_home.html', {'form': form},
context_instance=RequestContext(request))
def login_or_register_remote_user(request, remote_username, user_profile, full_name=''):
if user_profile is None or user_profile.is_mirror_dummy:
# Since execution has reached here, the client specified a remote user
# but no associated user account exists. Send them over to the
# PreregistrationUser flow.
return maybe_send_to_registration(request, remote_user_to_email(remote_username), full_name)
else:
login(request, user_profile)
return HttpResponseRedirect("%s%s" % (settings.EXTERNAL_URI_SCHEME,
request.get_host()))
def remote_user_sso(request):
try:
remote_user = request.META["REMOTE_USER"]
except KeyError:
raise JsonableError("No REMOTE_USER set.")
user_profile = authenticate(remote_user=remote_user)
return login_or_register_remote_user(request, remote_user, user_profile)
@csrf_exempt
def remote_user_jwt(request):
try:
json_web_token = request.POST["json_web_token"]
payload, signing_input, header, signature = jwt.load(json_web_token)
except KeyError:
raise JsonableError("No JSON web token passed in request")
except jwt.DecodeError:
raise JsonableError("Bad JSON web token")
remote_user = payload.get("user", None)
if remote_user is None:
raise JsonableError("No user specified in JSON web token claims")
domain = payload.get('realm', None)
if domain is None:
raise JsonableError("No domain specified in JSON web token claims")
email = "%s@%s" % (remote_user, domain)
try:
jwt.verify_signature(payload, signing_input, header, signature,
settings.JWT_AUTH_KEYS[domain])
# We do all the authentication we need here (otherwise we'd have to
# duplicate work), but we need to call authenticate with some backend so
# that the request.backend attribute gets set.
user_profile = authenticate(username=email, use_dummy_backend=True)
except (jwt.DecodeError, jwt.ExpiredSignature):
raise JsonableError("Bad JSON web token signature")
except KeyError:
raise JsonableError("Realm not authorized for JWT login")
except UserProfile.DoesNotExist:
user_profile = None
return login_or_register_remote_user(request, email, user_profile, remote_user)
def google_oauth2_csrf(request, value):
return hmac.new(get_token(request).encode('utf-8'), value, hashlib.sha256).hexdigest()
def start_google_oauth2(request):
uri = 'https://accounts.google.com/o/oauth2/auth?'
cur_time = str(int(time.time()))
csrf_state = '{}:{}'.format(
cur_time,
google_oauth2_csrf(request, cur_time),
)
prams = {
'response_type': 'code',
'client_id': settings.GOOGLE_OAUTH2_CLIENT_ID,
'redirect_uri': ''.join((
settings.EXTERNAL_URI_SCHEME,
request.get_host(),
reverse('zerver.views.finish_google_oauth2'),
)),
'scope': 'profile email',
'state': csrf_state,
}
return redirect(uri + urllib.urlencode(prams))
# Workaround to support the Python-requests 1.0 transition of .json
# from a property to a function
requests_json_is_function = callable(requests.Response.json)
def extract_json_response(resp):
if requests_json_is_function:
return resp.json()
else:
return resp.json
def finish_google_oauth2(request):
error = request.GET.get('error')
if error == 'access_denied':
return redirect('/')
elif error is not None:
logging.error('Error from google oauth2 login %r', request.GET)
return HttpResponse(status=400)
value, hmac_value = request.GET.get('state').split(':')
if hmac_value != google_oauth2_csrf(request, value):
raise Exception('Google oauth2 CSRF error')
resp = requests.post(
'https://www.googleapis.com/oauth2/v3/token',
data={
'code': request.GET.get('code'),
'client_id': settings.GOOGLE_OAUTH2_CLIENT_ID,
'client_secret': settings.GOOGLE_OAUTH2_CLIENT_SECRET,
'redirect_uri': ''.join((
settings.EXTERNAL_URI_SCHEME,
request.get_host(),
reverse('zerver.views.finish_google_oauth2'),
)),
'grant_type': 'authorization_code',
},
)
if resp.status_code != 200:
raise Exception('Could not convert google pauth2 code to access_token\r%r' % resp.text)
access_token = extract_json_response(resp)['access_token']
resp = requests.get(
'https://www.googleapis.com/plus/v1/people/me',
params={'access_token': access_token}
)
if resp.status_code != 200:
raise Exception('Google login failed making API call\r%r' % resp.text)
body = extract_json_response(resp)
try:
full_name = body['name']['formatted']
except KeyError:
# Only google+ users have a formated name. I am ignoring i18n here.
full_name = u'{} {}'.format(
body['name']['givenName'], body['name']['familyName']
)
for email in body['emails']:
if email['type'] == 'account':
break
else:
raise Exception('Google oauth2 account email not found %r' % body)
email_address = email['value']
user_profile = authenticate(username=email_address, use_dummy_backend=True)
return login_or_register_remote_user(request, email_address, user_profile, full_name)
def login_page(request, **kwargs):
extra_context = kwargs.pop('extra_context', {})
if dev_auth_enabled():
users = UserProfile.objects.filter(is_bot=False, is_active=True)
extra_context['direct_admins'] = sorted([u.email for u in users if u.is_admin()])
extra_context['direct_users'] = sorted([u.email for u in users if not u.is_admin()])
template_response = django_login_page(
request, authentication_form=OurAuthenticationForm,
extra_context=extra_context, **kwargs)
try:
template_response.context_data['email'] = request.GET['email']
except KeyError:
pass
return template_response
def dev_direct_login(request, **kwargs):
# This function allows logging in without a password and should only be called in development environments.
# It may be called if the DevAuthBackend is included in settings.AUTHENTICATION_BACKENDS
if (not dev_auth_enabled()) or settings.PRODUCTION:
# This check is probably not required, since authenticate would fail without an enabled DevAuthBackend.
raise Exception('Direct login not supported.')
email = request.POST['direct_email']
user_profile = authenticate(username=email)
login(request, user_profile)
return HttpResponseRedirect("%s%s" % (settings.EXTERNAL_URI_SCHEME,
request.get_host()))
@authenticated_json_post_view
@has_request_variables
def json_bulk_invite_users(request, user_profile,
invitee_emails=REQ(validator=check_list(check_string))):
invitee_emails = set(invitee_emails)
streams = get_default_subs(user_profile)
ret_error, error_data = do_invite_users(user_profile, invitee_emails, streams)
if ret_error is not None:
return json_error(data=error_data, msg=ret_error)
else:
# Report bulk invites to internal Zulip.
invited = PreregistrationUser.objects.filter(referred_by=user_profile)
internal_message = "%s <`%s`> invited %d people to Zulip." % (
user_profile.full_name, user_profile.email, invited.count())
internal_send_message(settings.NEW_USER_BOT, "stream", "signups",
user_profile.realm.domain, internal_message)
return json_success()
@login_required(login_url = settings.HOME_NOT_LOGGED_IN)
def initial_invite_page(request):
user = request.user
# Only show the bulk-invite page for the first user in a realm
domain_count = len(UserProfile.objects.filter(realm=user.realm))
if domain_count > 1:
return redirect('zerver.views.home')
params = {'company_name': user.realm.domain}
if (user.realm.restricted_to_domain):
params['invite_suffix'] = user.realm.domain
return render_to_response('zerver/initial_invite_page.html', params,
context_instance=RequestContext(request))
@require_post
def logout_then_login(request, **kwargs):
return django_logout_then_login(request, kwargs)
def create_preregistration_user(email, request):
domain = request.session.get("domain")
if completely_open(domain):
# Clear the "domain" from the session object; it's no longer needed
request.session["domain"] = None
# The user is trying to sign up for a completely open realm,
# so create them a PreregistrationUser for that realm
return PreregistrationUser.objects.create(email=email,
realm=get_realm(domain))
# MIT users who are not explicitly signing up for an open realm
# require special handling (They may already have an (inactive)
# account, for example)
if split_email_to_domain(email) == "mit.edu":
return MitUser.objects.get_or_create(email=email)[0]
return PreregistrationUser.objects.create(email=email)
def accounts_home_with_domain(request, domain):
if completely_open(domain):
# You can sign up for a completely open realm through a
# special registration path that contains the domain in the
# URL. We store this information in the session rather than
# elsewhere because we don't have control over URL or form
# data for folks registering through OpenID.
request.session["domain"] = domain
return accounts_home(request)
else:
return HttpResponseRedirect(reverse('zerver.views.accounts_home'))
def send_registration_completion_email(email, request):
"""
Send an email with a confirmation link to the provided e-mail so the user
can complete their registration.
"""
prereg_user = create_preregistration_user(email, request)
context = {'support_email': settings.ZULIP_ADMINISTRATOR,
'voyager': settings.VOYAGER}
Confirmation.objects.send_confirmation(prereg_user, email,
additional_context=context)
def accounts_home(request):
# First we populate request.session with a domain if
# there is a single realm, which is open.
# This is then used in HomepageForm and in creating a PreregistrationUser
unique_realm = get_unique_open_realm()
if unique_realm:
request.session['domain'] = unique_realm.domain
if request.method == 'POST':
form = create_homepage_form(request, user_info=request.POST)
if form.is_valid():
email = form.cleaned_data['email']
send_registration_completion_email(email, request)
return HttpResponseRedirect(reverse('send_confirm', kwargs={'email': email}))
try:
email = request.POST['email']
# Note: We don't check for uniqueness
is_inactive(email)
except ValidationError:
return HttpResponseRedirect(reverse('django.contrib.auth.views.login') + '?email=' + urllib.quote_plus(email))
else:
form = create_homepage_form(request)
return render_to_response('zerver/accounts_home.html',
{'form': form, 'current_url': request.get_full_path},
context_instance=RequestContext(request))
def approximate_unread_count(user_profile):
not_in_home_view_recipients = [sub.recipient.id for sub in \
Subscription.objects.filter(
user_profile=user_profile, in_home_view=False)]
muted_topics = ujson.loads(user_profile.muted_topics)
# If muted_topics is empty, it looks like []. If it is non-empty, it look
# like [[u'devel', u'test']]. We should switch to a consistent envelope, but
# until we do we still have both in the database.
if muted_topics:
muted_topics = muted_topics[0]
return UserMessage.objects.filter(
user_profile=user_profile, message_id__gt=user_profile.pointer).exclude(
message__recipient__type=Recipient.STREAM,
message__recipient__id__in=not_in_home_view_recipients).exclude(
message__subject__in=muted_topics).exclude(
flags=UserMessage.flags.read).count()
def sent_time_in_epoch_seconds(user_message):
# user_message is a UserMessage object.
if not user_message:
return None
# We have USE_TZ = True, so our datetime objects are timezone-aware.
# Return the epoch seconds in UTC.
return calendar.timegm(user_message.message.pub_date.utctimetuple())
@login_required(login_url = settings.HOME_NOT_LOGGED_IN)
def home(request):
# We need to modify the session object every two weeks or it will expire.
# This line makes reloading the page a sufficient action to keep the
# session alive.
request.session.modified = True
user_profile = request.user
request._email = request.user.email
request.client = get_client("website")
narrow = []
narrow_stream = None
narrow_topic = request.GET.get("topic")
if request.GET.get("stream"):
try:
narrow_stream = get_stream(request.GET.get("stream"), user_profile.realm)
assert(narrow_stream is not None)
assert(narrow_stream.is_public())
narrow = [["stream", narrow_stream.name]]
except Exception:
logging.exception("Narrow parsing")
if narrow_topic is not None:
narrow.append(["topic", narrow_topic])
register_ret = do_events_register(user_profile, request.client,
apply_markdown=True, narrow=narrow)
user_has_messages = (register_ret['max_message_id'] != -1)
# Reset our don't-spam-users-with-email counter since the
# user has since logged in
if not user_profile.last_reminder is None:
user_profile.last_reminder = None
user_profile.save(update_fields=["last_reminder"])
# Brand new users get the tutorial
needs_tutorial = settings.TUTORIAL_ENABLED and \
user_profile.tutorial_status != UserProfile.TUTORIAL_FINISHED
first_in_realm = realm_user_count(user_profile.realm) == 1
# If you are the only person in the realm and you didn't invite
# anyone, we'll continue to encourage you to do so on the frontend.
prompt_for_invites = first_in_realm and \
not PreregistrationUser.objects.filter(referred_by=user_profile).count()
if user_profile.pointer == -1 and user_has_messages:
# Put the new user's pointer at the bottom
#
# This improves performance, because we limit backfilling of messages
# before the pointer. It's also likely that someone joining an
# organization is interested in recent messages more than the very
# first messages on the system.
register_ret['pointer'] = register_ret['max_message_id']
user_profile.last_pointer_updater = request.session.session_key
if user_profile.pointer == -1:
latest_read = None
else:
try:
latest_read = UserMessage.objects.get(user_profile=user_profile,
message__id=user_profile.pointer)
except UserMessage.DoesNotExist:
# Don't completely fail if your saved pointer ID is invalid
logging.warning("%s has invalid pointer %s" % (user_profile.email, user_profile.pointer))
latest_read = None
desktop_notifications_enabled = user_profile.enable_desktop_notifications
if narrow_stream is not None:
desktop_notifications_enabled = False
if user_profile.realm.notifications_stream:
notifications_stream = user_profile.realm.notifications_stream.name
else:
notifications_stream = ""
# Pass parameters to the client-side JavaScript code.
# These end up in a global JavaScript Object named 'page_params'.
page_params = dict(
voyager = settings.VOYAGER,
debug_mode = settings.DEBUG,
test_suite = settings.TEST_SUITE,
poll_timeout = settings.POLL_TIMEOUT,
login_page = settings.HOME_NOT_LOGGED_IN,
maxfilesize = settings.MAX_FILE_UPLOAD_SIZE,
password_auth_enabled = password_auth_enabled(user_profile.realm),
have_initial_messages = user_has_messages,
subbed_info = register_ret['subscriptions'],
unsubbed_info = register_ret['unsubscribed'],
email_dict = register_ret['email_dict'],
people_list = register_ret['realm_users'],
bot_list = register_ret['realm_bots'],
initial_pointer = register_ret['pointer'],
initial_presences = register_ret['presences'],
initial_servertime = time.time(), # Used for calculating relative presence age
fullname = user_profile.full_name,
email = user_profile.email,
domain = user_profile.realm.domain,
realm_name = register_ret['realm_name'],
realm_invite_required = register_ret['realm_invite_required'],
realm_invite_by_admins_only = register_ret['realm_invite_by_admins_only'],
realm_restricted_to_domain = register_ret['realm_restricted_to_domain'],
enter_sends = user_profile.enter_sends,
left_side_userlist = register_ret['left_side_userlist'],
referrals = register_ret['referrals'],
realm_emoji = register_ret['realm_emoji'],
needs_tutorial = needs_tutorial,
first_in_realm = first_in_realm,
prompt_for_invites = prompt_for_invites,
notifications_stream = notifications_stream,
# Stream message notification settings:
stream_desktop_notifications_enabled =
user_profile.enable_stream_desktop_notifications,
stream_sounds_enabled = user_profile.enable_stream_sounds,
# Private message and @-mention notification settings:
desktop_notifications_enabled = desktop_notifications_enabled,
sounds_enabled =
user_profile.enable_sounds,
enable_offline_email_notifications =
user_profile.enable_offline_email_notifications,
enable_offline_push_notifications =
user_profile.enable_offline_push_notifications,
twenty_four_hour_time = register_ret['twenty_four_hour_time'],
enable_digest_emails = user_profile.enable_digest_emails,
event_queue_id = register_ret['queue_id'],
last_event_id = register_ret['last_event_id'],
max_message_id = register_ret['max_message_id'],
unread_count = approximate_unread_count(user_profile),
furthest_read_time = sent_time_in_epoch_seconds(latest_read),
staging = settings.ZULIP_COM_STAGING or settings.DEVELOPMENT,
alert_words = register_ret['alert_words'],
muted_topics = register_ret['muted_topics'],
realm_filters = register_ret['realm_filters'],
is_admin = user_profile.is_admin(),
can_create_streams = user_profile.can_create_streams(),
name_changes_disabled = name_changes_disabled(user_profile.realm),
has_mobile_devices = num_push_devices_for_user(user_profile) > 0,
autoscroll_forever = user_profile.autoscroll_forever,
default_desktop_notifications = user_profile.default_desktop_notifications,
avatar_url = avatar_url(user_profile),
mandatory_topics = user_profile.realm.mandatory_topics,
show_digest_email = user_profile.realm.show_digest_email,
)
if narrow_stream is not None:
# In narrow_stream context, initial pointer is just latest message
recipient = get_recipient(Recipient.STREAM, narrow_stream.id)
try:
initial_pointer = Message.objects.filter(recipient=recipient).order_by('id').reverse()[0].id
except IndexError:
initial_pointer = -1
page_params["narrow_stream"] = narrow_stream.name
if narrow_topic is not None:
page_params["narrow_topic"] = narrow_topic
page_params["narrow"] = [dict(operator=term[0], operand=term[1]) for term in narrow]
page_params["max_message_id"] = initial_pointer
page_params["initial_pointer"] = initial_pointer
page_params["have_initial_messages"] = (initial_pointer != -1)
statsd.incr('views.home')
show_invites = True
# Some realms only allow admins to invite users
if user_profile.realm.invite_by_admins_only and not user_profile.is_admin():
show_invites = False
product_name = "Zulip"
page_params['product_name'] = product_name
request._log_data['extra'] = "[%s]" % (register_ret["queue_id"],)
response = render_to_response('zerver/index.html',
{'user_profile': user_profile,
'page_params' : simplejson.encoder.JSONEncoderForHTML().encode(page_params),
'nofontface': is_buggy_ua(request.META["HTTP_USER_AGENT"]),
'avatar_url': avatar_url(user_profile),
'show_debug':
settings.DEBUG and ('show_debug' in request.GET),
'pipeline': settings.PIPELINE,
'show_invites': show_invites,
'is_admin': user_profile.is_admin(),
'show_webathena': user_profile.realm.domain == "mit.edu",
'enable_feedback': settings.ENABLE_FEEDBACK,
'embedded': narrow_stream is not None,
'product_name': product_name
},
context_instance=RequestContext(request))
patch_cache_control(response, no_cache=True, no_store=True, must_revalidate=True)
return response
@login_required(login_url = settings.HOME_NOT_LOGGED_IN)
def desktop_home(request):
return HttpResponseRedirect(reverse('zerver.views.home'))
def is_buggy_ua(agent):
"""Discrimiate CSS served to clients based on User Agent
Due to QTBUG-3467, @font-face is not supported in QtWebKit.
This may get fixed in the future, but for right now we can
just serve the more conservative CSS to all our desktop apps.
"""
return ("Humbug Desktop/" in agent or "Zulip Desktop/" in agent or "ZulipDesktop/" in agent) and \
not "Mac" in agent
def get_pointer_backend(request, user_profile):
return json_success({'pointer': user_profile.pointer})
@authenticated_json_post_view
def json_update_pointer(request, user_profile):
return update_pointer_backend(request, user_profile)
@has_request_variables
def update_pointer_backend(request, user_profile,
pointer=REQ(converter=to_non_negative_int)):
if pointer <= user_profile.pointer:
return json_success()
try:
UserMessage.objects.get(
user_profile=user_profile,
message__id=pointer
)
except UserMessage.DoesNotExist:
raise JsonableError("Invalid message ID")
request._log_data["extra"] = "[%s]" % (pointer,)
update_flags = (request.client.name.lower() in ['android', "zulipandroid"])
do_update_pointer(user_profile, pointer, update_flags=update_flags)
return json_success()
def generate_client_id():
return generate_random_token(32)
@authenticated_json_post_view
def json_get_profile(request, user_profile):
return get_profile_backend(request, user_profile)
# The order of creation of the various dictionaries are important.
# We filter on {userprofile,stream,subscription_recipient}_ids.
@require_realm_admin
def export(request, user_profile):
if (Message.objects.filter(sender__realm=user_profile.realm).count() > 1000000 or
UserMessage.objects.filter(user_profile__realm=user_profile.realm).count() > 3000000):
return json_error("Realm has too much data for non-batched export.")
response = {}
response['zerver_realm'] = [model_to_dict(x)
for x in Realm.objects.select_related().filter(id=user_profile.realm.id)]
response['zerver_userprofile'] = [model_to_dict(x, exclude=["password", "api_key"])
for x in UserProfile.objects.select_related().filter(realm=user_profile.realm)]
userprofile_ids = set(userprofile["id"] for userprofile in response['zerver_userprofile'])
response['zerver_stream'] = [model_to_dict(x, exclude=["email_token"])
for x in Stream.objects.select_related().filter(realm=user_profile.realm, invite_only=False)]
stream_ids = set(x["id"] for x in response['zerver_stream'])
response['zerver_usermessage'] = [model_to_dict(x) for x in UserMessage.objects.select_related()
if x.user_profile_id in userprofile_ids]
user_recipients = [model_to_dict(x)
for x in Recipient.objects.select_related().filter(type=1)
if x.type_id in userprofile_ids]
stream_recipients = [model_to_dict(x)
for x in Recipient.objects.select_related().filter(type=2)
if x.type_id in stream_ids]
stream_recipient_ids = set(x["id"] for x in stream_recipients)
# only check for subscriptions to streams
response['zerver_subscription'] = [model_to_dict(x) for x in Subscription.objects.select_related()
if x.user_profile_id in userprofile_ids
and x.recipient_id in stream_recipient_ids]
subscription_recipient_ids = set(x["recipient"] for x in response['zerver_subscription'])
huddle_recipients = [model_to_dict(r)
for r in Recipient.objects.select_related().filter(type=3)
if r.type_id in subscription_recipient_ids]
huddle_ids = set(x["type_id"] for x in huddle_recipients)
response["zerver_recipient"] = user_recipients + stream_recipients + huddle_recipients
response['zerver_huddle'] = [model_to_dict(h)
for h in Huddle.objects.select_related()
if h.id in huddle_ids]
recipient_ids = set(x["id"] for x in response['zerver_recipient'])
response["zerver_message"] = [model_to_dict(m) for m in Message.objects.select_related()
if m.recipient_id in recipient_ids
and m.sender_id in userprofile_ids]
for (table, model) in [("defaultstream", DefaultStream),
("realmemoji", RealmEmoji),
("realmalias", RealmAlias),
("realmfilter", RealmFilter)]:
response["zerver_"+table] = [model_to_dict(x) for x in
model.objects.select_related().filter(realm_id=user_profile.realm.id)]
return json_success(response)
def get_profile_backend(request, user_profile):
result = dict(pointer = user_profile.pointer,
client_id = generate_client_id(),
max_message_id = -1)
messages = Message.objects.filter(usermessage__user_profile=user_profile).order_by('-id')[:1]
if messages:
result['max_message_id'] = messages[0].id
return json_success(result)
@require_realm_admin
@has_request_variables
def update_realm(request, user_profile, name=REQ(validator=check_string, default=None),
restricted_to_domain=REQ(validator=check_bool, default=None),
invite_required=REQ(validator=check_bool, default=None),
invite_by_admins_only=REQ(validator=check_bool, default=None)):
realm = user_profile.realm
data = {}
if name is not None and realm.name != name:
do_set_realm_name(realm, name)
data['name'] = 'updated'
if restricted_to_domain is not None and realm.restricted_to_domain != restricted_to_domain:
do_set_realm_restricted_to_domain(realm, restricted_to_domain)
data['restricted_to_domain'] = restricted_to_domain
if invite_required is not None and realm.invite_required != invite_required:
do_set_realm_invite_required(realm, invite_required)
data['invite_required'] = invite_required
if invite_by_admins_only is not None and realm.invite_by_admins_only != invite_by_admins_only:
do_set_realm_invite_by_admins_only(realm, invite_by_admins_only)
data['invite_by_admins_only'] = invite_by_admins_only
return json_success(data)
@authenticated_json_post_view
@has_request_variables
def json_upload_file(request, user_profile):
if len(request.FILES) == 0:
return json_error("You must specify a file to upload")
if len(request.FILES) != 1:
return json_error("You may only upload one file at a time")
user_file = request.FILES.values()[0]
if ((settings.MAX_FILE_UPLOAD_SIZE * 1024 * 1024) < user_file._get_size()):
return json_error("File Upload is larger than allowed limit")
uri = upload_message_image_through_web_client(request, user_file, user_profile)
return json_success({'uri': uri})
@login_required(login_url = settings.HOME_NOT_LOGGED_IN)
@has_request_variables
def get_uploaded_file(request, realm_id, filename,
redir=REQ(validator=check_bool, default=True)):
if settings.LOCAL_UPLOADS_DIR is not None:
return HttpResponseForbidden() # Should have been served by nginx
user_profile = request.user
url_path = "%s/%s" % (realm_id, filename)
if realm_id == "unk":
realm_id = get_realm_for_filename(url_path)
if realm_id is None:
# File does not exist
return json_error("That file does not exist.", status=404)
# Internal users can access all uploads so we can receive attachments in cross-realm messages
if user_profile.realm.id == int(realm_id) or user_profile.realm.domain == 'zulip.com':
uri = get_signed_upload_url(url_path)
if redir:
return redirect(uri)
else:
return json_success({'uri': uri})
else:
return HttpResponseForbidden()
@require_realm_admin
@has_request_variables
def create_user_backend(request, user_profile, email=REQ, password=REQ,
full_name=REQ, short_name=REQ):
form = CreateUserForm({'full_name': full_name, 'email': email})
if not form.is_valid():
return json_error('Bad name or username')
# Check that the new user's email address belongs to the admin's realm
realm = user_profile.realm
domain = resolve_email_to_domain(email)
if realm.domain != domain:
return json_error("Email '%s' does not belong to domain '%s'" % (email, realm.domain))
try:
get_user_profile_by_email(email)
return json_error("Email '%s' already in use" % (email,))
except UserProfile.DoesNotExist:
pass
do_create_user(email, password, realm, full_name, short_name)
return json_success()
@csrf_exempt
@require_post
@has_request_variables
def api_fetch_api_key(request, username=REQ, password=REQ):
return_data = {}
if username == "google-oauth2-token":
user_profile = authenticate(google_oauth2_token=password, return_data=return_data)
else:
user_profile = authenticate(username=username, password=password)
if user_profile is None:
if return_data.get("valid_attestation") == True:
# We can leak that the user is unregistered iff they present a valid authentication string for the user.
return json_error("This user is not registered; do so from a browser.", data={"reason": "unregistered"}, status=403)
return json_error("Your username or password is incorrect.", data={"reason": "incorrect_creds"}, status=403)
if not user_profile.is_active:
return json_error("Your account has been disabled.", data={"reason": "disabled"}, status=403)
return json_success({"api_key": user_profile.api_key, "email": user_profile.email})
@authenticated_json_post_view
@has_request_variables
def json_fetch_api_key(request, user_profile, password=REQ(default='')):
if password_auth_enabled(user_profile.realm) and not user_profile.check_password(password):
return json_error("Your username or password is incorrect.")
return json_success({"api_key": user_profile.api_key})
@csrf_exempt
def api_fetch_google_client_id(request):
if not settings.GOOGLE_CLIENT_ID:
return json_error("GOOGLE_CLIENT_ID is not configured", status=400)
return json_success({"google_client_id": settings.GOOGLE_CLIENT_ID})
def get_status_list(requesting_user_profile):
return {'presences': get_status_dict(requesting_user_profile),
'server_timestamp': time.time()}
@has_request_variables
def update_active_status_backend(request, user_profile, status=REQ,
new_user_input=REQ(validator=check_bool, default=False)):
status_val = UserPresence.status_from_string(status)
if status_val is None:
raise JsonableError("Invalid presence status: %s" % (status,))
else:
update_user_presence(user_profile, request.client, now(), status_val,
new_user_input)
ret = get_status_list(user_profile)
if user_profile.realm.domain == "mit.edu":
try:
activity = UserActivity.objects.get(user_profile = user_profile,
query="get_events_backend",
client__name="zephyr_mirror")
ret['zephyr_mirror_active'] = \
(activity.last_visit.replace(tzinfo=None) >
datetime.datetime.utcnow() - datetime.timedelta(minutes=5))
except UserActivity.DoesNotExist:
ret['zephyr_mirror_active'] = False
return json_success(ret)
@authenticated_json_post_view
def json_update_active_status(request, user_profile):
return update_active_status_backend(request, user_profile)
@authenticated_json_post_view
def json_get_active_statuses(request, user_profile):
return json_success(get_status_list(user_profile))
@authenticated_json_post_view
def json_events_register(request, user_profile):
return events_register_backend(request, user_profile)
# Does not need to be authenticated because it's called from rest_dispatch
@has_request_variables
def api_events_register(request, user_profile,
apply_markdown=REQ(default=False, validator=check_bool),
all_public_streams=REQ(default=None, validator=check_bool)):
return events_register_backend(request, user_profile,
apply_markdown=apply_markdown,
all_public_streams=all_public_streams)
def _default_all_public_streams(user_profile, all_public_streams):
if all_public_streams is not None:
return all_public_streams
else:
return user_profile.default_all_public_streams
def _default_narrow(user_profile, narrow):
default_stream = user_profile.default_events_register_stream
if not narrow and user_profile.default_events_register_stream is not None:
narrow = [('stream', default_stream.name)]
return narrow
@has_request_variables
def events_register_backend(request, user_profile, apply_markdown=True,
all_public_streams=None,
event_types=REQ(validator=check_list(check_string), default=None),
narrow=REQ(validator=check_list(check_list(check_string, length=2)), default=[]),
queue_lifespan_secs=REQ(converter=int, default=0)):
all_public_streams = _default_all_public_streams(user_profile, all_public_streams)
narrow = _default_narrow(user_profile, narrow)
ret = do_events_register(user_profile, request.client, apply_markdown,
event_types, queue_lifespan_secs, all_public_streams,
narrow=narrow)
return json_success(ret)
@authenticated_json_post_view
@has_request_variables
def json_refer_friend(request, user_profile, email=REQ):
if not email:
return json_error("No email address specified")
if user_profile.invites_granted - user_profile.invites_used <= 0:
return json_error("Insufficient invites")
do_refer_friend(user_profile, email);
return json_success()
@authenticated_json_post_view
@has_request_variables
def json_set_muted_topics(request, user_profile,
muted_topics=REQ(validator=check_list(check_list(check_string, length=2)), default=[])):
do_set_muted_topics(user_profile, muted_topics)
return json_success()
def add_push_device_token(request, user_profile, token, kind, ios_app_id=None):
if token == '' or len(token) > 4096:
return json_error('Empty or invalid length token')
# If another user was previously logged in on the same device and didn't
# properly log out, the token will still be registered to the wrong account
PushDeviceToken.objects.filter(token=token).delete()
# Overwrite with the latest value
token, created = PushDeviceToken.objects.get_or_create(user=user_profile,
token=token,
kind=kind,
ios_app_id=ios_app_id)
if not created:
token.last_updated = now()
token.save(update_fields=['last_updated'])
return json_success()
@has_request_variables
def add_apns_device_token(request, user_profile, token=REQ, appid=REQ(default=settings.ZULIP_IOS_APP_ID)):
return add_push_device_token(request, user_profile, token, PushDeviceToken.APNS, ios_app_id=appid)
@has_request_variables
def add_android_reg_id(request, user_profile, token=REQ):
return add_push_device_token(request, user_profile, token, PushDeviceToken.GCM)
def remove_push_device_token(request, user_profile, token, kind):
if token == '' or len(token) > 4096:
return json_error('Empty or invalid length token')
try:
token = PushDeviceToken.objects.get(token=token, kind=kind)
token.delete()
except PushDeviceToken.DoesNotExist:
return json_error("Token does not exist")
return json_success()
@has_request_variables
def remove_apns_device_token(request, user_profile, token=REQ):
return remove_push_device_token(request, user_profile, token, PushDeviceToken.APNS)
@has_request_variables
def remove_android_reg_id(request, user_profile, token=REQ):
return remove_push_device_token(request, user_profile, token, PushDeviceToken.GCM)
def generate_204(request):
return HttpResponse(content=None, status=204)
def process_unsubscribe(token, type, unsubscribe_function):
try:
confirmation = Confirmation.objects.get(confirmation_key=token)
except Confirmation.DoesNotExist:
return render_to_response('zerver/unsubscribe_link_error.html')
user_profile = confirmation.content_object
unsubscribe_function(user_profile)
return render_to_response('zerver/unsubscribe_success.html',
{"subscription_type": type,
"external_host": settings.EXTERNAL_HOST})
# Email unsubscribe functions. All have the function signature
# processor(user_profile).
def do_missedmessage_unsubscribe(user_profile):
do_change_enable_offline_email_notifications(user_profile, False)
def do_welcome_unsubscribe(user_profile):
clear_followup_emails_queue(user_profile.email)
def do_digest_unsubscribe(user_profile):
do_change_enable_digest_emails(user_profile, False)
# The keys are part of the URL for the unsubscribe link and must be valid
# without encoding.
# The values are a tuple of (display name, unsubscribe function), where the
# display name is what we call this class of email in user-visible text.
email_unsubscribers = {
"missed_messages": ("missed messages", do_missedmessage_unsubscribe),
"welcome": ("welcome", do_welcome_unsubscribe),
"digest": ("digest", do_digest_unsubscribe)
}
# Login NOT required. These are for one-click unsubscribes.
def email_unsubscribe(request, type, token):
if type in email_unsubscribers:
display_name, unsubscribe_function = email_unsubscribers[type]
return process_unsubscribe(token, display_name, unsubscribe_function)
return render_to_response('zerver/unsubscribe_link_error.html', {},
context_instance=RequestContext(request))
|
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import datetime
import iso8601
import netaddr
from nova.conductor import rpcapi as conductor_rpcapi
from nova import context
from nova import exception
from nova.objects import base
from nova.objects import utils
from nova.openstack.common import timeutils
from nova import test
class MyObj(base.NovaPersistentObject, base.NovaObject):
version = '1.5'
fields = {'foo': int,
'bar': str,
'missing': str,
}
@staticmethod
def _from_db_object(context, obj, db_obj):
self = MyObj()
self.foo = db_obj['foo']
self.bar = db_obj['bar']
self.missing = db_obj['missing']
return self
def obj_load_attr(self, attrname):
setattr(self, attrname, 'loaded!')
@base.remotable_classmethod
def query(cls, context):
obj = cls()
obj.foo = 1
obj.bar = 'bar'
obj.obj_reset_changes()
return obj
@base.remotable
def marco(self, context):
return 'polo'
@base.remotable
def update_test(self, context):
if context.project_id == 'alternate':
self.bar = 'alternate-context'
else:
self.bar = 'updated'
@base.remotable
def save(self, context):
self.obj_reset_changes()
@base.remotable
def refresh(self, context):
self.foo = 321
self.bar = 'refreshed'
self.obj_reset_changes()
@base.remotable
def modify_save_modify(self, context):
self.bar = 'meow'
self.save()
self.foo = 42
class MyObj2(object):
@classmethod
def obj_name(cls):
return 'MyObj'
@base.remotable_classmethod
def query(cls, *args, **kwargs):
pass
class RandomMixInWithNoFields(object):
"""Used to test object inheritance using a mixin that has no fields."""
pass
class TestSubclassedObject(RandomMixInWithNoFields, MyObj):
fields = {'new_field': str}
class TestMetaclass(test.TestCase):
def test_obj_tracking(self):
class NewBaseClass(object):
__metaclass__ = base.NovaObjectMetaclass
fields = {}
@classmethod
def obj_name(cls):
return cls.__name__
class Test1(NewBaseClass):
@staticmethod
def obj_name():
return 'fake1'
class Test2(NewBaseClass):
pass
class Test2v2(NewBaseClass):
@staticmethod
def obj_name():
return 'Test2'
expected = {'fake1': [Test1], 'Test2': [Test2, Test2v2]}
self.assertEqual(expected, NewBaseClass._obj_classes)
# The following should work, also.
self.assertEqual(expected, Test1._obj_classes)
self.assertEqual(expected, Test2._obj_classes)
class TestUtils(test.TestCase):
def test_datetime_or_none(self):
naive_dt = timeutils.utcnow()
dt = timeutils.parse_isotime(timeutils.isotime(naive_dt))
self.assertEqual(utils.datetime_or_none(dt), dt)
self.assertEqual(utils.datetime_or_none(dt),
naive_dt.replace(tzinfo=iso8601.iso8601.Utc(),
microsecond=0))
self.assertEqual(utils.datetime_or_none(None), None)
self.assertRaises(ValueError, utils.datetime_or_none, 'foo')
def test_datetime_or_str_or_none(self):
dts = timeutils.isotime()
dt = timeutils.parse_isotime(dts)
self.assertEqual(utils.datetime_or_str_or_none(dt), dt)
self.assertEqual(utils.datetime_or_str_or_none(None), None)
self.assertEqual(utils.datetime_or_str_or_none(dts), dt)
self.assertRaises(ValueError, utils.datetime_or_str_or_none, 'foo')
def test_int_or_none(self):
self.assertEqual(utils.int_or_none(1), 1)
self.assertEqual(utils.int_or_none('1'), 1)
self.assertEqual(utils.int_or_none(None), None)
self.assertRaises(ValueError, utils.int_or_none, 'foo')
def test_str_or_none(self):
class Obj(object):
pass
self.assertEqual(utils.str_or_none('foo'), 'foo')
self.assertEqual(utils.str_or_none(1), '1')
self.assertEqual(utils.str_or_none(None), None)
def test_ip_or_none(self):
ip4 = netaddr.IPAddress('1.2.3.4', 4)
ip6 = netaddr.IPAddress('1::2', 6)
self.assertEqual(utils.ip_or_none(4)('1.2.3.4'), ip4)
self.assertEqual(utils.ip_or_none(6)('1::2'), ip6)
self.assertEqual(utils.ip_or_none(4)(None), None)
self.assertEqual(utils.ip_or_none(6)(None), None)
self.assertRaises(netaddr.AddrFormatError, utils.ip_or_none(4), 'foo')
self.assertRaises(netaddr.AddrFormatError, utils.ip_or_none(6), 'foo')
def test_list_of_strings_or_none(self):
self.assertEqual(utils.list_of_strings_or_none(None), None)
self.assertEqual(utils.list_of_strings_or_none(['1', '2']),
['1', '2'])
self.assertRaises(ValueError,
utils.list_of_strings_or_none, 'foo')
self.assertRaises(ValueError,
utils.list_of_strings_or_none, [1, 2])
self.assertRaises(ValueError,
utils.list_of_strings_or_none, ['1', 2])
def test_dict_of_strings_or_none(self):
self.assertEqual(utils.dict_of_strings_or_none(None), None)
self.assertEqual(utils.dict_of_strings_or_none({'1': '2'}),
{'1': '2'})
self.assertRaises(ValueError,
utils.dict_of_strings_or_none, {'1': '2', '3': 4})
self.assertRaises(ValueError,
utils.dict_of_strings_or_none, {'1': '2', 3: '4'})
self.assertRaises(ValueError,
utils.dict_of_strings_or_none, {'1': '2', 3: '4'})
self.assertRaises(ValueError,
utils.dict_of_strings_or_none, 'foo')
def test_dt_serializer(self):
class Obj(object):
foo = utils.dt_serializer('bar')
obj = Obj()
obj.bar = timeutils.parse_isotime('1955-11-05T00:00:00Z')
self.assertEqual(obj.foo(), '1955-11-05T00:00:00Z')
obj.bar = None
self.assertEqual(obj.foo(), None)
obj.bar = 'foo'
self.assertRaises(AttributeError, obj.foo)
def test_dt_deserializer(self):
dt = timeutils.parse_isotime('1955-11-05T00:00:00Z')
self.assertEqual(utils.dt_deserializer(None, timeutils.isotime(dt)),
dt)
self.assertEqual(utils.dt_deserializer(None, None), None)
self.assertRaises(ValueError, utils.dt_deserializer, None, 'foo')
def test_obj_to_primitive_list(self):
class MyList(base.ObjectListBase, base.NovaObject):
pass
mylist = MyList()
mylist.objects = [1, 2, 3]
self.assertEqual([1, 2, 3], base.obj_to_primitive(mylist))
def test_obj_to_primitive_dict(self):
myobj = MyObj()
myobj.foo = 1
myobj.bar = 'foo'
self.assertEqual({'foo': 1, 'bar': 'foo'},
base.obj_to_primitive(myobj))
def test_obj_to_primitive_recursive(self):
class MyList(base.ObjectListBase, base.NovaObject):
pass
mylist = MyList()
mylist.objects = [MyObj(), MyObj()]
for i, value in enumerate(mylist):
value.foo = i
self.assertEqual([{'foo': 0}, {'foo': 1}],
base.obj_to_primitive(mylist))
def test_obj_make_list(self):
class MyList(base.ObjectListBase, base.NovaObject):
pass
db_objs = [{'foo': 1, 'bar': 'baz', 'missing': 'banana'},
{'foo': 2, 'bar': 'bat', 'missing': 'apple'},
]
mylist = base.obj_make_list('ctxt', MyList(), MyObj, db_objs)
self.assertEqual(2, len(mylist))
self.assertEqual('ctxt', mylist._context)
for index, item in enumerate(mylist):
self.assertEqual(db_objs[index]['foo'], item.foo)
self.assertEqual(db_objs[index]['bar'], item.bar)
self.assertEqual(db_objs[index]['missing'], item.missing)
def compare_obj(test, obj, db_obj, subs=None, allow_missing=None):
"""Compare a NovaObject and a dict-like database object.
This automatically converts TZ-aware datetimes and iterates over
the fields of the object.
:param:test: The TestCase doing the comparison
:param:obj: The NovaObject to examine
:param:db_obj: The dict-like database object to use as reference
:param:subs: A dict of objkey=dbkey field substitutions
:param:allow_missing: A list of fields that may not be in db_obj
"""
if subs is None:
subs = {}
if allow_missing is None:
allow_missing = []
for key in obj.fields:
if key in allow_missing and not obj.obj_attr_is_set(key):
continue
obj_val = obj[key]
db_key = subs.get(key, key)
db_val = db_obj[db_key]
if isinstance(obj_val, datetime.datetime):
obj_val = obj_val.replace(tzinfo=None)
test.assertEqual(db_val, obj_val)
class _BaseTestCase(test.TestCase):
def setUp(self):
super(_BaseTestCase, self).setUp()
self.remote_object_calls = list()
self.context = context.RequestContext('fake-user', 'fake-project')
def compare_obj(self, obj, db_obj, subs=None, allow_missing=None):
compare_obj(self, obj, db_obj, subs=subs, allow_missing=allow_missing)
class _LocalTest(_BaseTestCase):
def setUp(self):
super(_LocalTest, self).setUp()
# Just in case
base.NovaObject.indirection_api = None
def assertRemotes(self):
self.assertEqual(self.remote_object_calls, [])
@contextlib.contextmanager
def things_temporarily_local():
# Temporarily go non-remote so the conductor handles
# this request directly
_api = base.NovaObject.indirection_api
base.NovaObject.indirection_api = None
yield
base.NovaObject.indirection_api = _api
class _RemoteTest(_BaseTestCase):
def _testable_conductor(self):
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.remote_object_calls = list()
orig_object_class_action = \
self.conductor_service.manager.object_class_action
orig_object_action = \
self.conductor_service.manager.object_action
def fake_object_class_action(*args, **kwargs):
self.remote_object_calls.append((kwargs.get('objname'),
kwargs.get('objmethod')))
with things_temporarily_local():
result = orig_object_class_action(*args, **kwargs)
return result
self.stubs.Set(self.conductor_service.manager, 'object_class_action',
fake_object_class_action)
def fake_object_action(*args, **kwargs):
self.remote_object_calls.append((kwargs.get('objinst'),
kwargs.get('objmethod')))
with things_temporarily_local():
result = orig_object_action(*args, **kwargs)
return result
self.stubs.Set(self.conductor_service.manager, 'object_action',
fake_object_action)
# Things are remoted by default in this session
base.NovaObject.indirection_api = conductor_rpcapi.ConductorAPI()
def setUp(self):
super(_RemoteTest, self).setUp()
self._testable_conductor()
def assertRemotes(self):
self.assertNotEqual(self.remote_object_calls, [])
class _TestObject(object):
def test_hydration_type_error(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5',
'nova_object.data': {'foo': 'a'}}
self.assertRaises(ValueError, MyObj.obj_from_primitive, primitive)
def test_hydration(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5',
'nova_object.data': {'foo': 1}}
obj = MyObj.obj_from_primitive(primitive)
self.assertEqual(obj.foo, 1)
def test_hydration_bad_ns(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'foo',
'nova_object.version': '1.5',
'nova_object.data': {'foo': 1}}
self.assertRaises(exception.UnsupportedObjectError,
MyObj.obj_from_primitive, primitive)
def test_dehydration(self):
expected = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5',
'nova_object.data': {'foo': 1}}
obj = MyObj()
obj.foo = 1
obj.obj_reset_changes()
self.assertEqual(obj.obj_to_primitive(), expected)
def test_object_property(self):
obj = MyObj()
obj.foo = 1
self.assertEqual(obj.foo, 1)
def test_object_property_type_error(self):
obj = MyObj()
def fail():
obj.foo = 'a'
self.assertRaises(ValueError, fail)
def test_object_dict_syntax(self):
obj = MyObj()
obj.foo = 123
obj.bar = 'bar'
self.assertEqual(obj['foo'], 123)
self.assertEqual(sorted(obj.items(), key=lambda x: x[0]),
[('bar', 'bar'), ('foo', 123)])
self.assertEqual(sorted(list(obj.iteritems()), key=lambda x: x[0]),
[('bar', 'bar'), ('foo', 123)])
def test_load(self):
obj = MyObj()
self.assertEqual(obj.bar, 'loaded!')
def test_load_in_base(self):
class Foo(base.NovaObject):
fields = {'foobar': int}
obj = Foo()
# NOTE(danms): Can't use assertRaisesRegexp() because of py26
raised = False
try:
obj.foobar
except NotImplementedError as ex:
raised = True
self.assertTrue(raised)
self.assertTrue('foobar' in str(ex))
def test_loaded_in_primitive(self):
obj = MyObj()
obj.foo = 1
obj.obj_reset_changes()
self.assertEqual(obj.bar, 'loaded!')
expected = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5',
'nova_object.changes': ['bar'],
'nova_object.data': {'foo': 1,
'bar': 'loaded!'}}
self.assertEqual(obj.obj_to_primitive(), expected)
def test_changes_in_primitive(self):
obj = MyObj()
obj.foo = 123
self.assertEqual(obj.obj_what_changed(), set(['foo']))
primitive = obj.obj_to_primitive()
self.assertTrue('nova_object.changes' in primitive)
obj2 = MyObj.obj_from_primitive(primitive)
self.assertEqual(obj2.obj_what_changed(), set(['foo']))
obj2.obj_reset_changes()
self.assertEqual(obj2.obj_what_changed(), set())
def test_unknown_objtype(self):
self.assertRaises(exception.UnsupportedObjectError,
base.NovaObject.obj_class_from_name, 'foo', '1.0')
def test_with_alternate_context(self):
ctxt1 = context.RequestContext('foo', 'foo')
ctxt2 = context.RequestContext('bar', 'alternate')
obj = MyObj.query(ctxt1)
obj.update_test(ctxt2)
self.assertEqual(obj.bar, 'alternate-context')
self.assertRemotes()
def test_orphaned_object(self):
obj = MyObj.query(self.context)
obj._context = None
self.assertRaises(exception.OrphanedObjectError,
obj.update_test)
self.assertRemotes()
def test_changed_1(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(obj.obj_what_changed(), set(['foo']))
obj.update_test(self.context)
self.assertEqual(obj.obj_what_changed(), set(['foo', 'bar']))
self.assertEqual(obj.foo, 123)
self.assertRemotes()
def test_changed_2(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(obj.obj_what_changed(), set(['foo']))
obj.save(self.context)
self.assertEqual(obj.obj_what_changed(), set([]))
self.assertEqual(obj.foo, 123)
self.assertRemotes()
def test_changed_3(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(obj.obj_what_changed(), set(['foo']))
obj.refresh(self.context)
self.assertEqual(obj.obj_what_changed(), set([]))
self.assertEqual(obj.foo, 321)
self.assertEqual(obj.bar, 'refreshed')
self.assertRemotes()
def test_changed_4(self):
obj = MyObj.query(self.context)
obj.bar = 'something'
self.assertEqual(obj.obj_what_changed(), set(['bar']))
obj.modify_save_modify(self.context)
self.assertEqual(obj.obj_what_changed(), set(['foo']))
self.assertEqual(obj.foo, 42)
self.assertEqual(obj.bar, 'meow')
self.assertRemotes()
def test_static_result(self):
obj = MyObj.query(self.context)
self.assertEqual(obj.bar, 'bar')
result = obj.marco()
self.assertEqual(result, 'polo')
self.assertRemotes()
def test_updates(self):
obj = MyObj.query(self.context)
self.assertEqual(obj.foo, 1)
obj.update_test()
self.assertEqual(obj.bar, 'updated')
self.assertRemotes()
def test_base_attributes(self):
dt = datetime.datetime(1955, 11, 5)
obj = MyObj()
obj.created_at = dt
obj.updated_at = dt
obj.deleted_at = None
obj.deleted = False
expected = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5',
'nova_object.changes':
['deleted', 'created_at', 'deleted_at', 'updated_at'],
'nova_object.data':
{'created_at': timeutils.isotime(dt),
'updated_at': timeutils.isotime(dt),
'deleted_at': None,
'deleted': False,
}
}
self.assertEqual(obj.obj_to_primitive(), expected)
def test_contains(self):
obj = MyObj()
self.assertFalse('foo' in obj)
obj.foo = 1
self.assertTrue('foo' in obj)
self.assertFalse('does_not_exist' in obj)
def test_obj_attr_is_set(self):
obj = MyObj()
obj.foo = 1
self.assertTrue(obj.obj_attr_is_set('foo'))
self.assertFalse(obj.obj_attr_is_set('bar'))
self.assertRaises(AttributeError, obj.obj_attr_is_set, 'bang')
def test_get(self):
obj = MyObj()
obj.foo = 1
# Foo has value, should not get the default
self.assertEqual(obj.get('foo', 2), 1)
# Foo has value, should return the value without error
self.assertEqual(obj.get('foo'), 1)
# Bar is not loaded, so we should get the default
self.assertEqual(obj.get('bar', 'not-loaded'), 'not-loaded')
# Bar without a default should lazy-load
self.assertEqual(obj.get('bar'), 'loaded!')
# Bar now has a default, but loaded value should be returned
self.assertEqual(obj.get('bar', 'not-loaded'), 'loaded!')
# Invalid attribute should raise AttributeError
self.assertRaises(AttributeError, obj.get, 'nothing')
# ...even with a default
self.assertRaises(AttributeError, obj.get, 'nothing', 3)
def test_object_inheritance(self):
base_fields = base.NovaPersistentObject.fields.keys()
myobj_fields = ['foo', 'bar', 'missing'] + base_fields
myobj3_fields = ['new_field']
self.assertTrue(issubclass(TestSubclassedObject, MyObj))
self.assertEqual(len(myobj_fields), len(MyObj.fields))
self.assertEqual(set(myobj_fields), set(MyObj.fields.keys()))
self.assertEqual(len(myobj_fields) + len(myobj3_fields),
len(TestSubclassedObject.fields))
self.assertEqual(set(myobj_fields) | set(myobj3_fields),
set(TestSubclassedObject.fields.keys()))
def test_get_changes(self):
obj = MyObj()
self.assertEqual({}, obj.obj_get_changes())
obj.foo = 123
self.assertEqual({'foo': 123}, obj.obj_get_changes())
obj.bar = 'test'
self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes())
obj.obj_reset_changes()
self.assertEqual({}, obj.obj_get_changes())
def test_obj_fields(self):
class TestObj(base.NovaObject):
fields = {'foo': int}
obj_extra_fields = ['bar']
@property
def bar(self):
return 'this is bar'
obj = TestObj()
self.assertEqual(['foo', 'bar'], obj.obj_fields)
class TestObject(_LocalTest, _TestObject):
pass
class TestRemoteObject(_RemoteTest, _TestObject):
def test_major_version_mismatch(self):
MyObj2.version = '2.0'
self.assertRaises(exception.IncompatibleObjectVersion,
MyObj2.query, self.context)
def test_minor_version_greater(self):
MyObj2.version = '1.6'
self.assertRaises(exception.IncompatibleObjectVersion,
MyObj2.query, self.context)
def test_minor_version_less(self):
MyObj2.version = '1.2'
obj = MyObj2.query(self.context)
self.assertEqual(obj.bar, 'bar')
self.assertRemotes()
class TestObjectListBase(test.TestCase):
def test_list_like_operations(self):
class Foo(base.ObjectListBase, base.NovaObject):
pass
objlist = Foo()
objlist._context = 'foo'
objlist.objects = [1, 2, 3]
self.assertEqual(list(objlist), objlist.objects)
self.assertEqual(len(objlist), 3)
self.assertIn(2, objlist)
self.assertEqual(list(objlist[:1]), [1])
self.assertEqual(objlist[:1]._context, 'foo')
self.assertEqual(objlist[2], 3)
self.assertEqual(objlist.count(1), 1)
self.assertEqual(objlist.index(2), 1)
def test_serialization(self):
class Foo(base.ObjectListBase, base.NovaObject):
pass
class Bar(base.NovaObject):
fields = {'foo': str}
obj = Foo()
obj.objects = []
for i in 'abc':
bar = Bar()
bar.foo = i
obj.objects.append(bar)
obj2 = base.NovaObject.obj_from_primitive(obj.obj_to_primitive())
self.assertFalse(obj is obj2)
self.assertEqual([x.foo for x in obj],
[y.foo for y in obj2])
class TestObjectSerializer(_BaseTestCase):
def test_serialize_entity_primitive(self):
ser = base.NovaObjectSerializer()
for thing in (1, 'foo', [1, 2], {'foo': 'bar'}):
self.assertEqual(thing, ser.serialize_entity(None, thing))
def test_deserialize_entity_primitive(self):
ser = base.NovaObjectSerializer()
for thing in (1, 'foo', [1, 2], {'foo': 'bar'}):
self.assertEqual(thing, ser.deserialize_entity(None, thing))
def test_object_serialization(self):
ser = base.NovaObjectSerializer()
obj = MyObj()
primitive = ser.serialize_entity(self.context, obj)
self.assertTrue('nova_object.name' in primitive)
obj2 = ser.deserialize_entity(self.context, primitive)
self.assertTrue(isinstance(obj2, MyObj))
self.assertEqual(self.context, obj2._context)
def test_object_serialization_iterables(self):
ser = base.NovaObjectSerializer()
obj = MyObj()
for iterable in (list, tuple, set):
thing = iterable([obj])
primitive = ser.serialize_entity(self.context, thing)
self.assertEqual(1, len(primitive))
for item in primitive:
self.assertFalse(isinstance(item, base.NovaObject))
thing2 = ser.deserialize_entity(self.context, primitive)
self.assertEqual(1, len(thing2))
for item in thing2:
self.assertTrue(isinstance(item, MyObj))
|
|
#TODO add bitstamp, CampBx
"""
Active Markets
NOTE:
-> regarding ask/bids list orientation:
- bids: next imminent bid in orderbook and falling
- asks: next imminent ask in orderbook and raising
-> on HTML parsing:
1. get absolute path to the elements in Firefox with firebug
get_bids = '/html/body/div/div[2]/div/section/div/div/table/tr/td/table/tr[2]/td[2]/strong//text()'
2. REMOVE the "/tbody/" tags in get_bids - the browser writes them!!!
3. loop over fields
4. raise error if strange values = html page changed
"""
import sys
import urllib2
import string
import json
import lxml.html
from lxml import etree
import re
BASE_CURRENCY = "USD"
DEPOSIT_BITINSTANT = 0.01 #%
DEPOSIT_MtGoxCode = 0
DEPOSIT_LIBERTY_RESERVE = 1 #%
DEPOSIT_PAXUM = 0
WITHDRAWAL_LIBERTY_RESERVE = 1 #%
WITHDRAWAL_MtGoxCode = 0
WITHDRAWAL_PAXUM = 0
WITHDRAWAL_PAYPAL = 5 #%
def errorFunction(sys_error, generalMsg):
sys.stderr.write("\n" + generalMsg + "%s\n" % str(sys_error))
def getHtml(url):
try:
website = urllib2.urlopen(url)
return website
except urllib2.HTTPError, e:
sys.stderr.write("\nCannot retrieve URL: %s\n" % url)
sys.stderr.write("\nHTTP Error Code: %s" % str(e.code))
except urllib2.URLError, e:
sys.stderr.write("\nCannot retrieve URL: %s\n" % url)
sys.stderr.write("\nHTTP Error Code: %s" % str(e.reason[1]))
def getExchangeRates(sourceCurrency):
try:
if sourceCurrency != BASE_CURRENCY:
baseUrl = 'http://www.google.com/ig/calculator?hl=en&q='
website = getHtml(baseUrl + '1' + sourceCurrency + '=?' + BASE_CURRENCY)
website_html = website.read()
reObject = re.search(".*rhs: \"(\d\.\d*)", website_html)
isInBASE_CURRENCY = reObject.group(1)
return float(isInBASE_CURRENCY)
else:
return 1
except:
e = sys.exc_info()[1]
errorFunction(e, "<getExchangeRates>: ")
sys.stderr.write("\nArg: ")
sys.stderr.write(sourceCurrency)
sys.exit()
#expecting list of the form [[price1, amount1],[price2, amount2], ...]
def convertToBASE_CURRENCY(myObject, sourceCurrency):
if sourceCurrency != BASE_CURRENCY:
try:
isInBASE_CURRENCY = getExchangeRates(sourceCurrency)
lenBids = len(myObject.bids)
lenAsks = len(myObject.asks)
for i in xrange(lenBids):
myObject.bids[i][0] = myObject.bids[i][0] * isInBASE_CURRENCY
for i in xrange(lenAsks):
myObject.asks[i][0] = myObject.asks[i][0] * isInBASE_CURRENCY
except:
e = sys.exc_info()[1]
errorFunction(e, "<convertToBASE_CURRENCY>: ")
sys.exit()
def getPriceFromString(stringData, id_start_tag, offset_start, id_end_tag, offset_end):
try:
id_price_field_position = string.find(stringData, id_start_tag)
price_field_starts = id_price_field_position + offset_start
price_field_could_end = id_price_field_position + offset_end
curr_price_field = stringData[price_field_starts: price_field_could_end]
#print "field:", curr_price_field
id_price_field_end_position = string.find(curr_price_field, id_end_tag)
if id_price_field_end_position > 0:
myPriceString = stringData[price_field_starts: price_field_starts + id_price_field_end_position]
else:
myPriceString = stringData[price_field_starts:]
#print "priceString:", myPriceString
#check if decimals separated by comma
comma_position = string.find(myPriceString, ",")
if comma_position > 0:
head = myPriceString[0: comma_position]
tail = myPriceString[comma_position + 1:]
myPriceString = head + "." + tail
curr_price = float(myPriceString)
return curr_price
except:
e = sys.exc_info()[1]
errorFunction(e, "<EXITING><getCurrentPriceFromString>: ")
sys.exit()
"""
falling sequence for bids
raising sequence for asks
"""
def checkOrientation(myObject):
try:
myObject.bids = sorted(myObject.bids, key=lambda item: item[0], reverse=True) #largest first
myObject.asks = sorted(myObject.asks, key=lambda item: item[0]) #smallest first
except:
e = sys.exc_info()[1]
errorFunction(e, "<checkOrientation>: ")
#format of input is [[price1, amount1], [price2, amount2]]
def jsonGetBidAskFields(myObject):
try:
website = getHtml(myObject.baseUrl + myObject.currency)
website_html = website.read()
data = json.loads(website_html)
asks_string = data['asks']
bids_string = data['bids']
#convert string to float
lenAsks = len(asks_string)
lenBids = len(bids_string)
for i in xrange(lenAsks):
price = float(asks_string[i][0])
amount = float(asks_string[i][1])
myObject.asks.append([price, amount])
for i in xrange(lenBids):
price = float(bids_string[i][0])
amount = float(bids_string[i][1])
myObject.bids.append([price, amount])
checkOrientation(myObject)
except:
e = sys.exc_info()[1]
problemMaker = myObject.__name__
errorFunction(e, "<jsonGetBidAskFields>: " + problemMaker)
## exchanges
#####################################################
#####################################################
def getBCHTML(myObject):
try:
website = getHtml(myObject.baseUrl + myObject.currency)
website_html = website.read()
html = lxml.html.fromstring(website_html)
#fill myObject.bids/asks
bids_html = '/html/body/div/div[3]/div[2]/table/tr[*]/td//text()'
asks_html = '/html/body/div/div[3]/div[3]/table/tr[*]/td//text()'
bids_data = html.xpath(bids_html) #[u'6.2600 EUR\xa0\xa0', '0.892 BTC', u'5.583 EUR\xa0\xa0', u'6.2500 EUR\xa0\xa0', '1.500 BTC', u'9.375
#[price, vol, total]
asks_data = html.xpath(asks_html)
#get bids
index_pr = 0
index_vol = 1
while index_pr < len(bids_data) - 2:
field_price = bids_data[index_pr]
index_pr_stop = string.find(field_price, "EUR")
price_string = field_price[0:index_pr_stop].replace(',', '')
price = float(price_string)
index_pr += 3
field_vol = bids_data[index_vol]
index_vol_stop = string.find(field_vol, "BTC")
vol_string = field_vol[0:index_vol_stop].replace(',', '')
vol = float(vol_string)
index_vol += 3
myObject.bids.append([price, vol])
#get asks
index_pr = 0
index_vol = 1
while index_pr < len(asks_data) - 2:
field_price = asks_data[index_pr]
index_pr_stop = string.find(field_price, "EUR")
price_string = field_price[0:index_pr_stop].replace(',', '')
price = float(price_string)
index_pr += 3
field_vol = asks_data[index_vol]
index_vol_stop = string.find(field_vol, "BTC")
vol_string = field_vol[0:index_vol_stop].replace(',', '')
vol = float(vol_string)
index_vol += 3
myObject.asks.append([price, vol])
#check if orientation right, else list.reverse
checkOrientation(myObject)
except:
e = sys.exc_info()[1]
errorFunction(e, "<EXITING><getBCHTML>: ")
sys.exit()
class BitcoinCentral:
transferDuration = 0 #number of confirmations
feeDeposit = {"BTC": 0.0005}
feeWithdrawal = {"BTC": 0.01}
feeTransaction = 0 #free
feeDepositDiffCurrency = 0 #0.25 %
limitsFunds = 1000000 #no limits
baseUrl = "https://bitcoin-central.net/order_book"
class bcEUR(BitcoinCentral):
def __init__(self):
self.asks = []
self.bids = []
self.currency = ""
getBCHTML(self)
convertToBASE_CURRENCY(self, "EUR")
""" #escrow, not exchange
def getbtcdeEUROrderbook(myObject):
try:
orderbookAsks = "fleft w450"
orderbookBids = "fright w450"
website = getHtml(myObject.baseUrl)
website_html = website.read()
html = lxml.html.fromstring(website_html)
#fill myObject.bids
get_bids = '//article[@class = "' + orderbookBids + '"]//table//tr//node()[text()]'
fields = html.xpath(get_bids)
lenFields = len(fields)
for i in xrange(4, lenFields - 2, 7):
amountEl = fields[i + 1]
priceEl = fields[i + 2]
#format: 5,04 euro_symbol. Ignoring the euro_symbol:
decoded_from_utf = priceEl.text.encode('ascii', 'ignore')
amount = getPriceFromString(amountEl.text, "", 0, "(", 100)
price = getPriceFromString(decoded_from_utf, "", 0, " ", 20)
myObject.bids.append([price, amount])
#fill myObject.asks
get_asks = '//article[@class = "' + orderbookAsks + '"]//table//tr//node()[text()]'
fields = html.xpath(get_asks)
lenFields = len(fields)
for i in xrange(4, lenFields - 2, 7):
amountEl = fields[i + 1]
priceEl = fields[i + 2]
#format: 5,04 euro_symbol. Ignoring the euro_symbol:
decoded_from_utf = priceEl.text.encode('ascii', 'ignore')
amount = getPriceFromString(amountEl.text, "", 0, "(", 100)
price = getPriceFromString(decoded_from_utf, "", 0, " ", 20)
myObject.asks.append([price, amount])
#check if orientation right, else list.reverse
checkOrientation(myObject)
except:
e = sys.exc_info()[1]
errorFunction(e, "<EXITING><getbtcdeEUROrderbook>: ")
sys.exit()
class BitcoinDe:
transferDuration = 3 #number of confirmations
feeTransaction = 0.005
feeWithdrawal = 0.01 #BTC's
feeDepositDiffCurrency = 0 #%
limitsFunds = 0 #$
baseUrl = "https://www.bitcoin.de/en/market"
class btcdeEUR(BitcoinDe):
def __init__(self):
self.asks = []
self.bids = []
getbtcdeEUROrderbook(self)
convertToBASE_CURRENCY(self, "EUR")
"""
#Bitmarket #escrow, not exchange
#https://btc-e.com/page/2
class Btce:
transferDuration = 1 #number of confirmations
feeDeposit = {"BTC": 0.0005,
"LibertyReserve": DEPOSIT_LIBERTY_RESERVE,
"Paxum": DEPOSIT_PAXUM}
feeWithdrawal = {"BTC": 0.01, #BTC's
"LibertyReserve": WITHDRAWAL_LIBERTY_RESERVE,
"Paxum": WITHDRAWAL_PAXUM,
"PayPal": WITHDRAWAL_PAYPAL}
feeTransaction = 0.002 #1 = 100%
feeDepositDiffCurrency = 0 #%
limitsFundsBTC = 501 #??
baseUrl = "https://btc-e.com/api/"
class btceUSD(Btce):
def __init__(self):
self.asks = []
self.bids = []
self.currency = "2/1/depth"
jsonGetBidAskFields(self)
#https://bitnz.com/fees
class Bitnz:
transferDuration = 0 #number of confirmations
feeDeposit = {"BTC": 0.0005}
feeWithdrawal = {"BTC": 0.0005}
feeTransaction = 0.005 #0.5 %
feeDepositDiffCurrency = 0 #NZD free
limitsFunds = 1000 #$ #??
baseUrl = "https://bitnz.com/api/0/orderbook"
class bitNZ(Bitnz):
def __init__(self):
self.asks = []
self.bids = []
self.currency = ""
jsonGetBidAskFields(self)
convertToBASE_CURRENCY(self, "NZD")
#API: view funds, view orders, make order, cancel order #https://btcex.com/site/page/api
#https://btcex.com/site/page/rules?language=en
class BTCex:
transferDuration = 0 #number of confirmations
feeDeposit = {"BTC": 0.0005,
"LibertyReserve": DEPOSIT_LIBERTY_RESERVE}
feeWithdrawal = {"BTC": 0.01, #BTC's
"LibertyReserve": WITHDRAWAL_LIBERTY_RESERVE}
feeTransaction = 0.0055
feeDepositDiffCurrency = 0 #%
limitsFundsBTC = 0
accBTCaddr = "13UfCmQeJPetKPaMZCbcrMtpr3nQzr1jBy"
baseUrl = "https://btcex.com/site/orderbooksjson/"
class btcexUSD(BTCex):
def __init__(self):
self.asks = []
self.bids = []
self.currency = "id/2"
jsonGetBidAskFields(self)
#https://www.cavirtex.com/home
def getVirtexOrderbook(myObject):
try:
website = getHtml(myObject.baseUrl)
website_html = website.read()
html = lxml.html.fromstring(website_html)
orderbookBids = "orderbook_buy"
orderbookAsks = "orderbook_sell"
"""
do bids first
"""
#some fucked up html shit where the most recent element has an extra </b> tag...
get_most_recent = '//div[contains(@id, "' + orderbookBids + '")]//table/tr/td/node()[text()]'
get_others = '//div[contains(@id, "' + orderbookBids + '")]//table/tr/node()[text()]'
fields_most_recent = html.xpath(get_most_recent)
fields_others = html.xpath(get_others)
#first field
amountEl = fields_most_recent[1]
priceEl = fields_most_recent[2]
price = float(priceEl.text)
amount = getPriceFromString(amountEl.text, "", 0, "/", 15)
myObject.bids.append([price, amount])
#0: "Created", 1: "Amount", 2: "Price", 3: "Value"; so we're starting from 4
docLen = len(fields_others)
for i in xrange(4, docLen - 2, 4):
amountEl = fields_others[i + 1]
priceEl = fields_others[i + 2]
price = float(priceEl.text)
amount = getPriceFromString(amountEl.text, "", 0, "/", 15)
myObject.bids.append([price, amount])
"""
now the same for asks
"""
get_most_recent = '//div[contains(@id, "' + orderbookAsks + '")]//table/tr/td/node()[text()]'
get_others = '//div[contains(@id, "' + orderbookAsks + '")]//table/tr/node()[text()]'
fields_most_recent = html.xpath(get_most_recent)
fields_others = html.xpath(get_others)
#first field
amountEl = fields_most_recent[1]
priceEl = fields_most_recent[2]
price = float(priceEl.text)
amount = getPriceFromString(amountEl.text, "", 0, "/", 15)
myObject.asks.append([price, amount])
#other fields
docLen = len(fields_others)
for i in xrange(4, docLen - 2, 4):
amountEl = fields_others[i + 1]
priceEl = fields_others[i + 2]
price = float(priceEl.text)
amount = getPriceFromString(amountEl.text, "", 0, "/", 15)
myObject.asks.append([price, amount])
#check if orientation right, else list.reverse
checkOrientation(myObject)
except:
e = sys.exc_info()[1]
errorFunction(e, "<EXITING><getVirtexOrderbook>: ")
sys.exit()
class CaVirtex:
transferDuration = 6 #number of confirmations
feeDeposit = {"BTC": 0.0005}
feeWithdrawal = {"BTC": 0.0005}
feeTransaction = 0.0059
feeDepositDiffCurrency = 0 #%
limitsFunds = 5000 #$
accBTCaddr = "1NC7thtNC3o76L68zEmuwxdjotTrRC1Vch"
baseUrl = "https://www.cavirtex.com/orderbook"
class virtexCAD(CaVirtex):
def __init__(self):
self.asks = []
self.bids = []
getVirtexOrderbook(self)
convertToBASE_CURRENCY(self, "CAD")
#https://cryptoxchange.com/Plan/PlanSelection
#trading API: https://cryptoxchange.com/t/cryptoapi
#is like a dictionary: {price:2332, amount:87689}
def jsonGetCryptoBidAskFields(myObject, currency):
try:
website = getHtml(myObject.baseUrl + currency)
website_html = website.read()
data = json.loads(website_html)
asks_string = data['asks']
bids_string = data['bids']
#fill asks
for dictionary in asks_string:
price_string = dictionary['price']
amount_string = dictionary['amount']
price = float(price_string)
amount = float(amount_string)
myObject.asks.append([price, amount])
#fill bids
for dictionary in bids_string:
price_string = dictionary['price']
amount_string = dictionary['amount']
price = float(price_string)
amount = float(amount_string)
myObject.bids.append([price, amount])
checkOrientation(myObject)
except:
e = sys.exc_info()[1]
errorFunction(e, "<EXITING><jsonGetCryptoBidAskFields>: ")
sys.exit()
class CryptoXchange:
transferDuration = 0 #number of confirmations
feeDeposit = {"BTC": 0.0005,
"Mt.Gox code": 0.006, #%
"Bitinstant_LibertyReserve": DEPOSIT_BITINSTANT,
"Bitinstant_Paxum": DEPOSIT_BITINSTANT}
feeWithdrawal = {"BTC": 0.0005,
"Mt.Gox code": 0.006} #%
feeTransaction = 0.005
feeDepositDiffCurrency = 0 #%
limitsFunds = 560 #$
accBTCaddr = "14bMFCJ2C11bVxdrCkRZZevbBtMVB7Smtg"
baseUrl = "https://cryptoxchange.com/api/v0/"
currencyUSD = "data/BTCUSD/orderbook.json"
currencyAUD = "data/BTCAUD/orderbook.json"
currencyBTCNMC = "data/BTCNMC/orderbook.json"
currencyBTCLTC = "data/BTCLTC/orderbook.json"
class cryptoxUSD(CryptoXchange):
def __init__(self):
self.asks = []
self.bids = []
jsonGetCryptoBidAskFields(self, self.currencyUSD)
class cryptoxAUD(CryptoXchange):
def __init__(self):
self.asks = []
self.bids = []
jsonGetCryptoBidAskFields(self, self.currencyAUD)
convertToBASE_CURRENCY(self, "AUD")
#https://intersango.com/fees.php
class Intersango:
transferDuration = 5 #number of confirmations
feeDeposit = {"BTC": 0.0005,
"Paxum": DEPOSIT_PAXUM}
feeWithdrawal = {"BTC": 0.0005,
"Paxum": WITHDRAWAL_PAXUM} #%
feeTransaction = 0.0095 #for takers = matched trade
feeDepositDiffCurrency = 0 #%
limitsFunds = 0 #$
accBTCaddr = "1LVsQDYiMxKJ9FZzM8bSEWdqYM94UmTF7h"
baseUrl = "https://intersango.com/api/depth.php/"
"""
currency_pair_id is an optional GET parameter to all data api calls
1 = BTC:GBP
2 = BTC:EUR
3 = BTC:USD
4 = BTC:PLN
"""
class intrsngGBP(Intersango):
def __init__(self):
self.asks = []
self.bids = []
self.currency = "?currency_pair_id=1"
jsonGetBidAskFields(self)
convertToBASE_CURRENCY(self, "GBP")
class intrsngEUR(Intersango):
def __init__(self):
self.asks = []
self.bids = []
self.currency = "?currency_pair_id=2"
jsonGetBidAskFields(self)
convertToBASE_CURRENCY(self, "EUR")
class intrsngUSD(Intersango):
def __init__(self):
self.asks = []
self.bids = []
self.currency = "?currency_pair_id=3"
jsonGetBidAskFields(self)
class intrsngPLN(Intersango):
def __init__(self):
self.asks = []
self.bids = []
self.currency = "?currency_pair_id=4"
jsonGetBidAskFields(self)
convertToBASE_CURRENCY(self, "PLN")
#https://imcex.com/
def getImcexHTML(myObject):
try:
website = getHtml(myObject.baseUrl + myObject.currency)
website_html = website.read()
html = lxml.html.fromstring(website_html)
#fill myObject.bids
#get_bids = '//article[@class = "' + orderbookBids + '"]//table//tr//node()[text()]'
vol_html = '/html/body/div/div[2]/div/section/div/div/table/tr/td/table/tr[*]/td[2]/strong//text()'
price_html = ''
vol_list = html.xpath(vol_html)
#since vol_list returns vol of bids and asks
bids_vol = vol_list[: len(vol_list) / 2]
asks_vol = vol_list[len(vol_list) / 2:]
startpos = 2
for index in xrange(startpos, 22):
price_html = '/html/body/div/div[2]/div/section/div/div/table/tr/td/table/tr[' + str(index) + ']/td[4]//text()'
price_list = html.xpath(price_html) #['\n4.1400 LREUR\n', '\n8.9900 ', 'LREUR', '\n']
price_bid = float(price_list[0][1:7])
price_ask = float(price_list[1][1:7])
myObject.bids.append([price_bid, float(bids_vol[index - startpos])])
myObject.asks.append([price_ask, float(asks_vol[index - startpos])])
#check if orientation right, else list.reverse
checkOrientation(myObject)
except:
e = sys.exc_info()[1]
errorFunction(e, "<EXITING><getImcexHTML>: ")
sys.exit()
class Imcex:
transferDuration = 0 #number of confirmations
feeDeposit = {"BTC": 0.0005}
feeWithdrawal = {"BTC": 0.0005}
feeTransaction = 0.001
feeDepositDiffCurrency = 0
limitsFunds = 1000 #$ #??
accBTCaddr = ""
baseUrl = "https://imcex.com/en/charts/BTC/"
class imcexEUR(Imcex):
def __init__(self):
self.asks = []
self.bids = []
self.currency = "LREUR"
getImcexHTML(self)
convertToBASE_CURRENCY(self, "EUR")
class imcexUSD(Imcex):
def __init__(self):
self.asks = []
self.bids = []
self.currency = "LRUSD"
getImcexHTML(self)
convertToBASE_CURRENCY(self, "USD")
#http://www.mercadobitcoin.com.br/taxas/
def getMrcdHTML(myObject):
try:
website = getHtml(myObject.baseUrl + myObject.currency)
website_html = website.read()
html = lxml.html.fromstring(website_html)
#fill myObject.bids/asks
vol_html = '/html/body/div[3]/div[2]/table/tr/td/div/div[2]/table//text()'
data_string = html.xpath(vol_html) #['Volume (BTC)', u'Pre\xe7o (R$)', '3,61005599', '16,48000', '15,32411000', '16,11012', '130,00000000', '16,11010', '0,40540000', '15,75000', '12,00000000', '15,60011', '8,19583300', '15,60000', '4,00000000', '15,15000', '0,10000000', '15,00010', '30,88633790', '14,87100', '0,10000000', '14,50000', 'Volume (BTC)', u'Pre\xe7o (R$)', '85,30000000', '16,49000',
#check where data for bids/asks begins/ends
index_start_bidask_field = [] #only 2: for asks, for bids
for index, item in enumerate(data_string):
if item == 'Volume (BTC)':
index_start_bidask_field.append(index)
"""
print "STOPS: ", index_start_bidask_field
"""
index = 0
bids_vol_index = index_start_bidask_field[0] + 2
bids_price_index = 0
asks_vol_index = index_start_bidask_field[1] + 2
asks_price_index = 0
end_index = index_start_bidask_field[2]
while bids_vol_index < asks_vol_index - 2: #since we take two fields at the time
bids_price_index = bids_vol_index + 1 #offset bods_start_index, then every second field
vol = float(data_string[bids_vol_index].replace(',', '.'))
price = float(data_string[bids_price_index].replace(',', '.'))
myObject.bids.append([price, vol])
bids_vol_index += 2 #offset bods_start_index, then every second field
"""
print "\nbids_", asks_vol_index
print "vol: ", vol
print "pr: ", price
"""
while asks_vol_index < end_index - 2:
asks_price_index = asks_vol_index + 1
vol = float(data_string[asks_vol_index].replace(',', '.'))
price = float(data_string[asks_price_index].replace(',', '.'))
myObject.asks.append([price, vol])
asks_vol_index += 2
"""
print "asks_", asks_vol_index
print "\nvol: ", data_string[asks_vol_index]
print "pr: ", data_string[asks_price_index]
"""
#check if orientation right, else list.reverse
checkOrientation(myObject)
except:
e = sys.exc_info()[1]
errorFunction(e, "<EXITING><getMrcdHTML>: ")
sys.exit()
class Mrcd:
transferDuration = 0 #number of confirmations
feeDeposit = {"BTC": 0.0005}
feeWithdrawal = {"BTC": 0.0005}
feeTransaction = 0.006 #0.6 %
feeDepositDiffCurrency = 0.016
limitsFunds = 0
accBTCaddr = ""
baseUrl = "http://www.mercadobitcoin.com.br/mercado/"
class mrcdBRL(Mrcd):
def __init__(self):
self.asks = []
self.bids = []
self.currency = ""
getMrcdHTML(self)
convertToBASE_CURRENCY(self, "BRL")
#https://mtgox.com/fee-schedule
class Mtgox:
transferDuration = 0 #number of confirmations
feeDeposit = {"BTC": 0.0005,
"Mt.Gox code": DEPOSIT_MtGoxCode, #Mt.Gox redeem code via Bitinstant
"LibertyReserve": DEPOSIT_LIBERTY_RESERVE,
"Paxum": DEPOSIT_PAXUM}
feeWithdrawal = {"BTC": 0.0005,
"Mt.Gox code": WITHDRAWAL_MtGoxCode,
"Paxum": WITHDRAWAL_PAXUM}
feeTransaction = 0.006 #0.6 %
feeDepositDiffCurrency = 0.0025 #0.25 %
limitsFunds = 10000 #$
baseUrl = "https://mtgox.com/api/0/data/getDepth.php"
class mtgoxAUD(Mtgox):
def __init__(self):
self.asks = []
self.bids = []
self.currency = "?Currency=AUD"
jsonGetBidAskFields(self)
convertToBASE_CURRENCY(self, "AUD")
class mtgoxCAD(Mtgox):
def __init__(self):
self.asks = []
self.bids = []
self.currency = "?Currency=CAD"
jsonGetBidAskFields(self)
convertToBASE_CURRENCY(self, "CAD")
class mtgoxCHF(Mtgox):
def __init__(self):
self.asks = []
self.bids = []
self.currency = "?Currency=CHF"
jsonGetBidAskFields(self)
convertToBASE_CURRENCY(self, "CHF")
class mtgoxCNY(Mtgox):
def __init__(self):
self.asks = []
self.bids = []
self.currency = "?Currency=CNY"
jsonGetBidAskFields(self)
convertToBASE_CURRENCY(self, "CNY")
class mtgoxDKK(Mtgox):
def __init__(self):
self.asks = []
self.bids = []
self.currency = "?Currency=DKK"
jsonGetBidAskFields(self)
convertToBASE_CURRENCY(self, "DKK")
class mtgoxEUR(Mtgox):
def __init__(self):
self.asks = []
self.bids = []
self.currency = "?Currency=EUR"
jsonGetBidAskFields(self)
convertToBASE_CURRENCY(self, "EUR")
class mtgoxGBP(Mtgox):
def __init__(self):
self.asks = []
self.bids = []
self.currency = "?Currency=GBP"
jsonGetBidAskFields(self)
convertToBASE_CURRENCY(self, "GBP")
class mtgoxHKD(Mtgox):
def __init__(self):
self.asks = []
self.bids = []
self.currency = "?Currency=HKD"
jsonGetBidAskFields(self)
convertToBASE_CURRENCY(self, "HKD")
class mtgoxJPY(Mtgox):
def __init__(self):
self.asks = []
self.bids = []
self.currency = "?Currency=JPY"
jsonGetBidAskFields(self)
convertToBASE_CURRENCY(self, "JPY")
class mtgoxNZD(Mtgox):
def __init__(self):
self.asks = []
self.bids = []
self.currency = "?Currency=NZD"
jsonGetBidAskFields(self)
convertToBASE_CURRENCY(self, "NZD")
class mtgoxPLN(Mtgox):
def __init__(self):
self.asks = []
self.bids = []
self.currency = "?Currency=PLN"
jsonGetBidAskFields(self)
convertToBASE_CURRENCY(self, "PLN")
class mtgoxRUB(Mtgox):
def __init__(self):
self.asks = []
self.bids = []
self.currency = "?Currency=RUB"
jsonGetBidAskFields(self)
convertToBASE_CURRENCY(self, "RUB")
class mtgoxUSD(Mtgox):
def __init__(self):
self.asks = []
self.bids = []
self.currency = "?Currency=USD"
jsonGetBidAskFields(self)
convertToBASE_CURRENCY(self, "USD")
class mtgoxSEK(Mtgox):
def __init__(self):
self.asks = []
self.bids = []
self.currency = "?Currency=SEK"
jsonGetBidAskFields(self)
convertToBASE_CURRENCY(self, "SEK")
class mtgoxSGD(Mtgox):
def __init__(self):
self.asks = []
self.bids = []
self.currency = "?Currency=SGD"
jsonGetBidAskFields(self)
convertToBASE_CURRENCY(self, "SGD")
class mtgoxTHB(Mtgox):
def __init__(self):
self.asks = []
self.bids = []
self.currency = "?Currency=THB"
jsonGetBidAskFields(self)
convertToBASE_CURRENCY(self, "THB")
""" DEAD!
class TradeHill:
transferDuration = 5 #10min-1hr, number of confirmations
feeDeposit = {"Bitinstant_MtGox":DEPOSIT_BITINSTANT, #Mt.Gox redeem code via Bitinstant
"Bitinstant_LibertyReserve":DEPOSIT_LIBERTY_RESERVE,
"Paxum":DEPOSIT_PAXUM}
feeWithdrawal = {"Paxum":WITHDRAWAL_PAXUM}
feeTransaction = 0
feeDepositDiffCurrency = 0 #%
limitsFunds = 0 #$
accBTCaddr = "1ASqVSG9dpCDACpRyMap7sSAXjqsLxjLbE"
baseUrl = "https://api.tradehill.com/APIv1/"
currencyUSD = "USD/Orderbook"
currencyEUR = "EUR/Orderbook"
class thUSD(TradeHill):
def __init__(self):
self.bids = []
self.asks = []
jsonGetBidAskFields(self, self.currencyUSD)
#orderbook isn't sorted
self.bids = sorted(self.bids, key = lambda field: field[0])
self.asks = sorted(self.asks, key = lambda field: field[0])
class thEUR(TradeHill):
def __init__(self):
self.bids = []
self.asks = []
jsonGetBidAskFields(self, self.currencyEUR)
#orderbook isn't sorted
self.bids = sorted(self.bids, key = lambda field: field[0])
self.asks = sorted(self.asks, key = lambda field: field[0])
convertToBASE_CURRENCY(self, "EUR")
"""
#https://vircurex.com/welcome/help?locale=en
#trading API: https://vircurex.com/welcome/api?locale=en
class Vicurex:
transferDuration = 6 #number of confirmations
feeDeposit = {"BTC": 0.0005,
"LibertyReserve": DEPOSIT_LIBERTY_RESERVE}
feeWithdrawal = {"BTC": 0.01,
"LibertyReserve": WITHDRAWAL_LIBERTY_RESERVE}
feeTransaction = 0.005
feeWithdrawal = 0.01 #BTC
feeDepositDiffCurrency = 0
limitsFunds = 0
accBTCaddr = "17JXELyTiq7XtJZpe8P61whwGCHMxBtWUH"
baseUrl = "https://vircurex.com/api/orderbook.json"
class vcxEUR(Vicurex):
def __init__(self):
self.asks = []
self.bids = []
self.currency = "?base=BTC&alt=eur"
jsonGetBidAskFields(self)
convertToBASE_CURRENCY(self, "EUR")
class vcxUSD(Vicurex):
def __init__(self):
self.asks = []
self.bids = []
self.currency = "?base=BTC&alt=usd"
jsonGetBidAskFields(self)
convertToBASE_CURRENCY(self, "USD")
|
|
#!/usr/bin/env python
import sys
class Feed( object ):
def __init__( self, entries ):
self.entries = entries
self.pos = 0
def peek( self ):
return self.entries[self.pos]
def skip( self ):
self.pos += 1
def next( self ):
out = self.entries[self.pos]
self.pos += 1
return out
def add( self, entry ):
self.entries.append( entry )
def empty( self ):
return not ( self.pos < len( self.entries ) )
def EventsParser( feed ):
events_feed = Feed([])
entry = feed.next()
if entry != "{":
raise SyntaxError
entry = feed.next()
while entry != "}":
events_feed.add( entry )
entry = feed.next()
out = {}
while not events_feed.empty():
event_name = events_feed.next()
arrow = events_feed.next()
func = events_feed.next()
if arrow != "->":
raise SyntaxError
if not func.endswith(";"):
if events_feed.peek() != ';':
raise SyntaxError
else:
events_feed.skip()
func = func.replace( ";", "" )
out[event_name] = func
#print out
return out
def ConditionParser( feed ):
cond_feed = Feed([])
name = feed.next()
entry = feed.next()
if entry != "{":
raise SyntaxError
entry = feed.next()
while entry != "}":
cond_feed.add( entry )
entry = feed.next()
out = {}
while not cond_feed.empty():
addr = cond_feed.next()
state = cond_feed.next()
if not state.endswith( ";" ):
if cond_feed.peek() != ";":
raise SyntaxError
else:
cond_feed.skip()
state = state.replace( ";", "" )
(port, pin) = addr.split( ":" )
if port not in out:
out[port] = {}
if state not in out[port]:
out[port][state] = set()
out[port][state].add( pin )
return (name, out)
def OnEmitParser( feed ):
name = feed.next()
emit = feed.next()
if emit != "emit":
raise SyntaxError
events = set()
event = feed.next()
events.add( event.replace( ";", "" ) )
#print "event", event
if not event.endswith( ";" ):
event = feed.next()
#print "event", event
if event != ";":
events.add( event.replace( ";", "" ) )
return (name, events)
def WorkflowParser( feed ):
out = []
name = feed.next()
wf_feed = Feed([])
entry = feed.next()
if entry != "{":
raise SyntaxError
entry = feed.next()
while entry != "}":
wf_feed.add( entry )
entry = feed.next()
while not wf_feed.empty():
step = wf_feed.next()
if step == ";":
break
elif step.endswith( ";" ):
if not wf_feed.empty():
raise SyntaxError
out.append( step.replace( ";", "" ) )
break
out.append( step.replace( ";", "" ) )
arrow = wf_feed.next()
if arrow != "->":
raise SyntaxError
else:
continue
return (name, out)
def InputConfigParser( feed ):
out = {}
entry = feed.next()
if entry != "{":
raise SyntaxError
while True:
entry = feed.next()
if entry == "}":
break
if not entry.endswith( ";" ):
if entry.peek() != ";":
raise SyntaxError
else:
feed.skip()
(port, pin) = entry.split(":")
if port not in out:
out[port] = set()
out[port].add( pin.replace( ";", "" ) )
return out
def OutputConfigParser( feed ):
out = {}
entry = feed.next()
if entry != "{":
raise SyntaxError
while True:
entry = feed.next()
if entry == "}":
break
if not entry.endswith( ";" ):
if feed.peek() != ";":
raise SyntaxError
feed.skip()
(port, pin) = entry.split(":")
if port not in out:
out[port] = set()
out[port].add( pin.replace( ";", "") )
return out
def PeripheralsConfigParser( feed ):
out = {}
entry = feed.next()
if entry != "{":
raise SyntaxError
while True:
entry = feed.next()
if entry == "}":
break
value = feed.next()
if not value.endswith( ";" ):
if feed.peek() != ";":
raise SyntaxError
else:
feed.skip()
out[entry] = value.replace( ";", "" ) == "on"
return out
def ConfigureParser( feed ):
out = {}
c_feed = Feed([])
entry = feed.next()
if entry != "{":
raise SyntaxError
cnt = 1
while cnt > 0:
entry = feed.next()
if entry == "{":
cnt += 1
if entry == "}":
cnt -= 1
if cnt > 0:
c_feed.add( entry )
while not c_feed.empty():
section = c_feed.next()
if section == "input":
out['input'] = InputConfigParser( c_feed )
elif section == "output":
out['output'] = OutputConfigParser( c_feed )
elif section == "peripherals":
out['peripherals'] = PeripheralsConfigParser( c_feed )
else:
raise SyntaxError
return out
def RunParser( feed ):
out = []
entry = feed.next()
if entry != "{":
raise SyntaxError
entry = feed.next()
while entry != "}":
out.append( entry.replace( ";", "" ) )
entry = feed.next()
return out
lines = []
tokens = []
for line in open( sys.argv[1] ):
if "//" in line:
line = line[:line.find("//")]
line = line.strip()
if len( line ) < 1:
continue
#print line
lines.append( line )
tokens.extend( line.split() )
#print tokens
feed = Feed( tokens )
out = {}
while not feed.empty():
entry = feed.next()
print "main", entry
if entry == "events":
out['events'] = EventsParser( feed )
elif entry == "condition":
if 'conditions' not in out:
out['conditions'] = {}
(key, cond) = ConditionParser( feed )
out['conditions'][key] = cond
elif entry == "on":
if 'emissions' not in out:
out['emissions'] = {}
(key, events) = OnEmitParser( feed )
if key not in out['emissions']:
out['emissions'][key] = []
out['emissions'][key].append( events )
elif entry == "workflow":
if "workflows" not in out:
out['workflows'] = {}
(key, flow) = WorkflowParser( feed )
out['workflows'][key] = flow
elif entry == "configure":
out['configuration'] = ConfigureParser( feed )
elif entry == "run":
out['main_loop'] = RunParser( feed )
else:
break
import pprint
pprint.pprint( out )
|
|
import time
from django.http import HttpResponse
from django.core.cache import cache
from django import get_version as django_version
from django.core.mail import send_mail, mail_admins
from django.conf import settings
from django.utils.translation import ugettext as _
from django.template import loader, TemplateDoesNotExist
from django.contrib.sites.models import Site
from piston3.decorator import decorator
__version__ = '0.3dev'
def get_version():
return __version__
def format_error(error):
return u"Piston/{0!s} (Django {1!s}) crash report:\n\n{2!s}".format(get_version(), django_version(), error)
class RCFactory(object):
"""
Status codes.
"""
CODES = dict(ALL_OK=('OK', 200),
CREATED=('Created', 201),
DELETED=('', 204), # 204 says "Don't send a body!"
BAD_REQUEST=('Bad Request', 400),
FORBIDDEN=('Forbidden', 401),
NOT_FOUND=('Not Found', 404),
DUPLICATE_ENTRY=('Conflict/Duplicate', 409),
NOT_HERE=('Gone', 410),
INTERNAL_ERROR=('Internal Error', 500),
NOT_IMPLEMENTED=('Not Implemented', 501),
THROTTLED=('Throttled', 503))
def __getattr__(self, attr):
"""
Returns a fresh `HttpResponse` when getting
an "attribute". This is backwards compatible
with 0.2, which is important.
"""
try:
(r, c) = self.CODES.get(attr)
except TypeError:
raise AttributeError(attr)
return HttpResponse(r, content_type='text/plain', status=c)
rc = RCFactory()
class FormValidationError(Exception):
def __init__(self, form):
self.form = form
class HttpStatusCode(Exception):
def __init__(self, response):
self.response = response
def validate(v_form, operation='POST'):
@decorator
def wrap(f, self, request, *a, **kwa):
form = v_form(getattr(request, operation))
if form.is_valid():
setattr(request, 'form', form)
return f(self, request, *a, **kwa)
else:
raise FormValidationError(form)
return wrap
def throttle(max_requests, timeout=60 * 60, extra=''):
"""
Simple throttling decorator, caches
the amount of requests made in cache.
If used on a view where users are required to
log in, the username is used, otherwise the
IP address of the originating request is used.
Parameters::
- `max_requests`: The maximum number of requests
- `timeout`: The timeout for the cache entry (default: 1 hour)
"""
@decorator
def wrap(f, self, request, *args, **kwargs):
if request.user.is_authenticated():
ident = request.user.username
else:
ident = request.META.get('REMOTE_ADDR')
if hasattr(request, 'throttle_extra'):
"""
Since we want to be able to throttle on a per-
application basis, it's important that we realize
that `throttle_extra` might be set on the request
object. If so, append the identifier name with it.
"""
ident += ':{0!s}'.format(str(request.throttle_extra))
if ident:
"""
Preferrably we'd use incr/decr here, since they're
atomic in memcached, but it's in django-trunk so we
can't use it yet. If someone sees this after it's in
stable, you can change it here.
"""
ident += ':{0!s}'.format(extra)
now = time.time()
count, expiration = cache.get(ident, (1, None))
if expiration is None:
expiration = now + timeout
if count >= max_requests and expiration > now:
t = rc.THROTTLED
wait = int(expiration - now)
t.content = 'Throttled, wait {0:d} seconds.'.format(wait)
t['Retry-After'] = wait
return t
cache.set(ident, (count + 1, expiration), (expiration - now))
return f(self, request, *args, **kwargs)
return wrap
def coerce_put_post(request):
"""
Django doesn't particularly understand REST.
In case we send data over PUT, Django won't
actually look at the data and load it. We need
to twist its arm here.
The try/except abominiation here is due to a bug
in mod_python. This should fix it.
"""
if request.method == "PUT":
# Bug fix: if _load_post_and_files has already been called, for
# example by middleware accessing request.POST, the below code to
# pretend the request is a POST instead of a PUT will be too late
# to make a difference. Also calling _load_post_and_files will result
# in the following exception:
# AttributeError: You cannot set the upload handlers after the upload has been processed.
# The fix is to check for the presence of the _post field which is set
# the first time _load_post_and_files is called (both by wsgi.py and
# modpython.py). If it's set, the request has to be 'reset' to redo
# the query value parsing in POST mode.
if hasattr(request, '_post'):
del request._post
del request._files
try:
request.method = "POST"
request._load_post_and_files()
request.method = "PUT"
except AttributeError:
request.META['REQUEST_METHOD'] = 'POST'
request._load_post_and_files()
request.META['REQUEST_METHOD'] = 'PUT'
request.PUT = request.POST
class MimerDataException(Exception):
"""
Raised if the content_type and data don't match
"""
pass
class Mimer(object):
TYPES = dict()
def __init__(self, request):
self.request = request
def is_multipart(self):
content_type = self.content_type()
if content_type is not None:
return content_type.lstrip().startswith('multipart')
return False
def loader_for_type(self, ctype):
"""
Gets a function ref to deserialize content
for a certain mimetype.
"""
for loadee, mimes in Mimer.TYPES.iteritems():
for mime in mimes:
if ctype.startswith(mime):
return loadee
def content_type(self):
"""
Returns the content type of the request in all cases where it is
different than a submitted form - application/x-www-form-urlencoded
"""
type_formencoded = "application/x-www-form-urlencoded"
ctype = self.request.META.get('CONTENT_TYPE', type_formencoded)
if type_formencoded in ctype:
return None
return ctype
def translate(self):
"""
Will look at the `Content-type` sent by the client, and maybe
deserialize the contents into the format they sent. This will
work for JSON, YAML, XML and Pickle. Since the data is not just
key-value (and maybe just a list), the data will be placed on
`request.data` instead, and the handler will have to read from
there.
It will also set `request.content_type` so the handler has an easy
way to tell what's going on. `request.content_type` will always be
None for form-encoded and/or multipart form data (what your browser sends.)
"""
ctype = self.content_type()
self.request.content_type = ctype
if not self.is_multipart() and ctype:
loadee = self.loader_for_type(ctype)
if loadee:
try:
self.request.data = loadee(self.request.body)
# Reset both POST and PUT from request, as its
# misleading having their presence around.
self.request.POST = self.request.PUT = dict()
except (TypeError, ValueError):
# This also catches if loadee is None.
raise MimerDataException
else:
self.request.data = None
return self.request
@classmethod
def register(cls, loadee, types):
cls.TYPES[loadee] = types
@classmethod
def unregister(cls, loadee):
return cls.TYPES.pop(loadee)
def translate_mime(request):
request = Mimer(request).translate()
def require_mime(*mimes):
"""
Decorator requiring a certain mimetype. There's a nifty
helper called `require_extended` below which requires everything
we support except for post-data via form.
"""
@decorator
def wrap(f, self, request, *args, **kwargs):
m = Mimer(request)
realmimes = set()
rewrite = {'json': 'application/json',
'yaml': 'application/x-yaml',
'xml': 'text/xml',
'pickle': 'application/python-pickle'}
for idx, mime in enumerate(mimes):
realmimes.add(rewrite.get(mime, mime))
if not m.content_type() in realmimes:
return rc.BAD_REQUEST
return f(self, request, *args, **kwargs)
return wrap
require_extended = require_mime('json', 'yaml', 'xml', 'pickle')
def send_consumer_mail(consumer):
"""
Send a consumer an email depending on what their status is.
"""
try:
subject = settings.PISTON_OAUTH_EMAIL_SUBJECTS[consumer.status]
except AttributeError:
subject = "Your API Consumer for {0!s} ".format(Site.objects.get_current().name)
if consumer.status == "accepted":
subject += "was accepted!"
elif consumer.status == "canceled":
subject += "has been canceled."
elif consumer.status == "rejected":
subject += "has been rejected."
else:
subject += "is awaiting approval."
template = "piston/mails/consumer_{0!s}.txt".format(consumer.status)
try:
body = loader.render_to_string(template,
{'consumer': consumer, 'user': consumer.user})
except TemplateDoesNotExist:
"""
They haven't set up the templates, which means they might not want
these emails sent.
"""
return
try:
sender = settings.PISTON_FROM_EMAIL
except AttributeError:
sender = settings.DEFAULT_FROM_EMAIL
if consumer.user:
send_mail(
_(subject), body, sender, [consumer.user.email], fail_silently=True)
if consumer.status == 'pending' and len(settings.ADMINS):
mail_admins(_(subject), body, fail_silently=True)
if settings.DEBUG and consumer.user:
print "Mail being sent, to={0!s}".format(consumer.user.email)
print "Subject: {0!s}".format(_(subject))
print body
|
|
"""Utilities for comparing files and directories.
Classes:
dircmp
Functions:
cmp(f1, f2, shallow=1, use_statcache=0) -> int
cmpfiles(a, b, common) -> ([], [], [])
"""
import os
import stat
import statcache
__all__ = ["cmp","dircmp","cmpfiles"]
_cache = {}
BUFSIZE=8*1024
def cmp(f1, f2, shallow=1, use_statcache=0):
"""Compare two files.
Arguments:
f1 -- First file name
f2 -- Second file name
shallow -- Just check stat signature (do not read the files).
defaults to 1.
use_statcache -- Do not stat() each file directly: go through
the statcache module for more efficiency.
Return value:
integer -- 1 if the files are the same, 0 otherwise.
This function uses a cache for past comparisons and the results,
with a cache invalidation mechanism relying on stale signatures.
Of course, if 'use_statcache' is true, this mechanism is defeated,
and the cache will never grow stale.
"""
if use_statcache:
stat_function = statcache.stat
else:
stat_function = os.stat
s1 = _sig(stat_function(f1))
s2 = _sig(stat_function(f2))
if s1[0] != stat.S_IFREG or s2[0] != stat.S_IFREG:
return 0
if shallow and s1 == s2:
return 1
if s1[1] != s2[1]:
return 0
result = _cache.get((f1, f2))
if result and (s1, s2) == result[:2]:
return result[2]
outcome = _do_cmp(f1, f2)
_cache[f1, f2] = s1, s2, outcome
return outcome
def _sig(st):
return (stat.S_IFMT(st[stat.ST_MODE]),
st[stat.ST_SIZE],
st[stat.ST_MTIME])
def _do_cmp(f1, f2):
bufsize = BUFSIZE
fp1 = open(f1, 'rb')
fp2 = open(f2, 'rb')
while 1:
b1 = fp1.read(bufsize)
b2 = fp2.read(bufsize)
if b1 != b2:
return 0
if not b1:
return 1
# Directory comparison class.
#
class dircmp:
"""A class that manages the comparison of 2 directories.
dircmp(a,b,ignore=None,hide=None)
A and B are directories.
IGNORE is a list of names to ignore,
defaults to ['RCS', 'CVS', 'tags'].
HIDE is a list of names to hide,
defaults to [os.curdir, os.pardir].
High level usage:
x = dircmp(dir1, dir2)
x.report() -> prints a report on the differences between dir1 and dir2
or
x.report_partial_closure() -> prints report on differences between dir1
and dir2, and reports on common immediate subdirectories.
x.report_full_closure() -> like report_partial_closure,
but fully recursive.
Attributes:
left_list, right_list: The files in dir1 and dir2,
filtered by hide and ignore.
common: a list of names in both dir1 and dir2.
left_only, right_only: names only in dir1, dir2.
common_dirs: subdirectories in both dir1 and dir2.
common_files: files in both dir1 and dir2.
common_funny: names in both dir1 and dir2 where the type differs between
dir1 and dir2, or the name is not stat-able.
same_files: list of identical files.
diff_files: list of filenames which differ.
funny_files: list of files which could not be compared.
subdirs: a dictionary of dircmp objects, keyed by names in common_dirs.
"""
def __init__(self, a, b, ignore=None, hide=None): # Initialize
self.left = a
self.right = b
if hide is None:
self.hide = [os.curdir, os.pardir] # Names never to be shown
else:
self.hide = hide
if ignore is None:
self.ignore = ['RCS', 'CVS', 'tags'] # Names ignored in comparison
else:
self.ignore = ignore
def phase0(self): # Compare everything except common subdirectories
self.left_list = _filter(os.listdir(self.left),
self.hide+self.ignore)
self.right_list = _filter(os.listdir(self.right),
self.hide+self.ignore)
self.left_list.sort()
self.right_list.sort()
__p4_attrs = ('subdirs',)
__p3_attrs = ('same_files', 'diff_files', 'funny_files')
__p2_attrs = ('common_dirs', 'common_files', 'common_funny')
__p1_attrs = ('common', 'left_only', 'right_only')
__p0_attrs = ('left_list', 'right_list')
def __getattr__(self, attr):
if attr in self.__p4_attrs:
self.phase4()
elif attr in self.__p3_attrs:
self.phase3()
elif attr in self.__p2_attrs:
self.phase2()
elif attr in self.__p1_attrs:
self.phase1()
elif attr in self.__p0_attrs:
self.phase0()
else:
raise AttributeError, attr
return getattr(self, attr)
def phase1(self): # Compute common names
a_only, b_only = [], []
common = {}
b = {}
for fnm in self.right_list:
b[fnm] = 1
for x in self.left_list:
if b.get(x, 0):
common[x] = 1
else:
a_only.append(x)
for x in self.right_list:
if common.get(x, 0):
pass
else:
b_only.append(x)
self.common = common.keys()
self.left_only = a_only
self.right_only = b_only
def phase2(self): # Distinguish files, directories, funnies
self.common_dirs = []
self.common_files = []
self.common_funny = []
for x in self.common:
a_path = os.path.join(self.left, x)
b_path = os.path.join(self.right, x)
ok = 1
try:
a_stat = statcache.stat(a_path)
except os.error, why:
# print 'Can\'t stat', a_path, ':', why[1]
ok = 0
try:
b_stat = statcache.stat(b_path)
except os.error, why:
# print 'Can\'t stat', b_path, ':', why[1]
ok = 0
if ok:
a_type = stat.S_IFMT(a_stat[stat.ST_MODE])
b_type = stat.S_IFMT(b_stat[stat.ST_MODE])
if a_type != b_type:
self.common_funny.append(x)
elif stat.S_ISDIR(a_type):
self.common_dirs.append(x)
elif stat.S_ISREG(a_type):
self.common_files.append(x)
else:
self.common_funny.append(x)
else:
self.common_funny.append(x)
def phase3(self): # Find out differences between common files
xx = cmpfiles(self.left, self.right, self.common_files)
self.same_files, self.diff_files, self.funny_files = xx
def phase4(self): # Find out differences between common subdirectories
# A new dircmp object is created for each common subdirectory,
# these are stored in a dictionary indexed by filename.
# The hide and ignore properties are inherited from the parent
self.subdirs = {}
for x in self.common_dirs:
a_x = os.path.join(self.left, x)
b_x = os.path.join(self.right, x)
self.subdirs[x] = dircmp(a_x, b_x, self.ignore, self.hide)
def phase4_closure(self): # Recursively call phase4() on subdirectories
self.phase4()
for x in self.subdirs.keys():
self.subdirs[x].phase4_closure()
def report(self): # Print a report on the differences between a and b
# Output format is purposely lousy
print 'diff', self.left, self.right
if self.left_only:
self.left_only.sort()
print 'Only in', self.left, ':', self.left_only
if self.right_only:
self.right_only.sort()
print 'Only in', self.right, ':', self.right_only
if self.same_files:
self.same_files.sort()
print 'Identical files :', self.same_files
if self.diff_files:
self.diff_files.sort()
print 'Differing files :', self.diff_files
if self.funny_files:
self.funny_files.sort()
print 'Trouble with common files :', self.funny_files
if self.common_dirs:
self.common_dirs.sort()
print 'Common subdirectories :', self.common_dirs
if self.common_funny:
self.common_funny.sort()
print 'Common funny cases :', self.common_funny
def report_partial_closure(self): # Print reports on self and on subdirs
self.report()
for x in self.subdirs.keys():
print
self.subdirs[x].report()
def report_full_closure(self): # Report on self and subdirs recursively
self.report()
for x in self.subdirs.keys():
print
self.subdirs[x].report_full_closure()
def cmpfiles(a, b, common, shallow=1, use_statcache=0):
"""Compare common files in two directories.
a, b -- directory names
common -- list of file names found in both directories
shallow -- if true, do comparison based solely on stat() information
use_statcache -- if true, use statcache.stat() instead of os.stat()
Returns a tuple of three lists:
files that compare equal
files that are different
filenames that aren't regular files.
"""
res = ([], [], [])
for x in common:
ax = os.path.join(a, x)
bx = os.path.join(b, x)
res[_cmp(ax, bx, shallow, use_statcache)].append(x)
return res
# Compare two files.
# Return:
# 0 for equal
# 1 for different
# 2 for funny cases (can't stat, etc.)
#
def _cmp(a, b, sh, st):
try:
return not abs(cmp(a, b, sh, st))
except os.error:
return 2
# Return a copy with items that occur in skip removed.
#
def _filter(list, skip):
result = []
for item in list:
if item not in skip: result.append(item)
return result
# Demonstration and testing.
#
def demo():
import sys
import getopt
options, args = getopt.getopt(sys.argv[1:], 'r')
if len(args) != 2:
raise getopt.error, 'need exactly two args'
dd = dircmp(args[0], args[1])
if ('-r', '') in options:
dd.report_full_closure()
else:
dd.report()
if __name__ == '__main__':
demo()
|
|
# -*- coding: utf-8 -*-
import pytest
from marshmallow import fields
from marshmallow.marshalling import Marshaller, Unmarshaller, missing
from marshmallow.exceptions import ValidationError
from tests.base import User
def test_missing_is_falsy():
assert bool(missing) is False
class TestMarshaller:
@pytest.fixture()
def marshal(self):
return Marshaller()
def test_prefix(self):
u = User("Foo", email="foo@bar.com")
marshal = Marshaller(prefix='usr_')
result = marshal(u, {"email": fields.Email(), 'name': fields.String()})
assert result['usr_name'] == u.name
assert result['usr_email'] == u.email
def test_marshalling_generator(self, marshal):
gen = (u for u in [User("Foo"), User("Bar")])
res = marshal(gen, {"name": fields.String()}, many=True)
assert len(res) == 2
def test_default_to_missing(self, marshal):
u = {'name': 'Foo'}
res = marshal(u, {'name': fields.String(),
'email': fields.Email(default=missing)})
assert res['name'] == u['name']
assert 'email' not in res
def test_serialize_fields_with_load_only_param(self, marshal):
u = User('Foo', email='foo@bar.com')
fields_dict = {
'name': fields.String(),
'email': fields.Email(load_only=True),
}
result = marshal(u, fields_dict)
assert result['name'] == 'Foo'
assert 'email' not in result
def test_serialize_with_load_only_doesnt_validate(self, marshal):
fields_dict = {
'email': fields.Email(load_only=True)
}
marshal({'email': 'invalid'}, fields_dict)
assert 'email' not in marshal.errors
def test_stores_indices_of_errors_when_many_equals_true(self, marshal):
users = [
{'email': 'bar@example.com'},
{'email': 'foobar'},
{'email': 'invalid'},
]
marshal(users, {'email': fields.Email()}, many=True)
# 2nd and 3rd elements have an error
assert 1 in marshal.errors
assert 2 in marshal.errors
assert 'email' in marshal.errors[1]
assert 'email' in marshal.errors[2]
def test_doesnt_store_errors_when_index_errors_equals_false(self, marshal):
users = [
{'email': 'bar@example.com'},
{'email': 'foobar'},
{'email': 'invalid'},
]
marshal(users, {'email': fields.Email()}, many=True, index_errors=False)
assert 1 not in marshal.errors
assert 'email' in marshal.errors
class TestUnmarshaller:
@pytest.fixture
def unmarshal(self):
return Unmarshaller()
def test_extra_data_is_ignored(self, unmarshal):
fields_ = {'name': fields.Str()}
ret = unmarshal({'extra': 42, 'name': 'Steve'}, fields_)
assert 'extra' not in ret
def test_strict_mode_many(self, unmarshal):
users = [
{'email': 'foobar'},
{'email': 'bar@example.com'}
]
with pytest.raises(ValidationError) as excinfo:
unmarshal(users, {'email': fields.Email()}, strict=True, many=True)
assert 'Not a valid email address.' in str(excinfo)
def test_stores_errors(self, unmarshal):
data = {'email': 'invalid-email'}
unmarshal(data, {"email": fields.Email()})
assert "email" in unmarshal.errors
def test_stores_indices_of_errors_when_many_equals_true(self, unmarshal):
users = [
{'email': 'bar@example.com'},
{'email': 'foobar'},
{'email': 'invalid'},
]
unmarshal(users, {'email': fields.Email()}, many=True)
# 2nd and 3rd elements have an error
assert 1 in unmarshal.errors
assert 2 in unmarshal.errors
assert 'email' in unmarshal.errors[1]
assert 'email' in unmarshal.errors[2]
def test_doesnt_store_errors_when_index_errors_equals_false(self, unmarshal):
users = [
{'email': 'bar@example.com'},
{'email': 'foobar'},
{'email': 'invalid'},
]
unmarshal(users, {'email': fields.Email()}, many=True, index_errors=False)
assert 1 not in unmarshal.errors
assert 'email' in unmarshal.errors
def test_deserialize(self, unmarshal):
user_data = {
'age': '12'
}
result = unmarshal.deserialize(user_data, {'age': fields.Integer()})
assert result['age'] == 12
def test_extra_fields(self, unmarshal):
data = {'name': 'Mick'}
fields_dict = {'name': fields.String(), 'age': fields.Integer()}
# data doesn't have to have all the fields in the schema
result = unmarshal(data, fields_dict)
assert result['name'] == data['name']
assert 'age' not in result
def test_deserialize_many(self, unmarshal):
users_data = [
{'name': 'Mick', 'age': '71'},
{'name': 'Keith', 'age': '70'}
]
fields_dict = {
'name': fields.String(),
'age': fields.Integer(),
}
result = unmarshal.deserialize(users_data, fields_dict, many=True)
assert isinstance(result, list)
user = result[0]
assert user['age'] == 71
def test_deserialize_strict_raises_error(self, unmarshal):
with pytest.raises(ValidationError):
unmarshal(
{'email': 'invalid', 'name': 'Mick'},
{'email': fields.Email(), 'name': fields.String()},
strict=True
)
def test_deserialize_stores_errors(self, unmarshal):
user_data = {
'email': 'invalid',
'age': 'nan',
'name': 'Valid Name',
}
fields_dict = {
'email': fields.Email(),
'age': fields.Integer(),
'name': fields.String(),
}
unmarshal(user_data, fields_dict)
errors = unmarshal.errors
assert 'email' in errors
assert 'age' in errors
assert 'name' not in errors
def test_deserialize_fields_with_attribute_param(self, unmarshal):
data = {
'username': 'mick@stones.com',
'name': 'Mick'
}
fields_dict = {
'username': fields.Email(attribute='email'),
'name': fields.String(attribute='firstname'),
}
result = unmarshal.deserialize(data, fields_dict)
assert result['email'] == 'mick@stones.com'
assert result['firstname'] == 'Mick'
def test_deserialize_fields_with_load_from_param(self, unmarshal):
data = {
'Name': 'Mick',
'UserName': 'foo@bar.com',
'years': '42'
}
fields_dict = {
'name': fields.String(load_from='Name'),
'username': fields.Email(attribute='email', load_from='UserName'),
'years': fields.Integer(attribute='age', load_from='Years')
}
result = unmarshal.deserialize(data, fields_dict)
assert result['name'] == 'Mick'
assert result['email'] == 'foo@bar.com'
assert result['age'] == 42
def test_deserialize_fields_with_dump_only_param(self, unmarshal):
data = {
'name': 'Mick',
'years': '42',
}
fields_dict = {
'name': fields.String(),
'years': fields.Integer(dump_only=True),
'always_invalid': fields.Field(validate=lambda f: False, dump_only=True)
}
result = unmarshal.deserialize(data, fields_dict)
assert result['name'] == 'Mick'
assert 'years' not in result
assert 'always_invalid' not in unmarshal.errors
|
|
from __future__ import unicode_literals
from __future__ import absolute_import
from collections import namedtuple
import logging
import re
import os
import sys
from operator import attrgetter
import six
from docker.errors import APIError
from docker.utils import create_host_config, LogConfig
from docker.utils.ports import build_port_bindings, split_port
from . import __version__
from .config import DOCKER_CONFIG_KEYS, merge_environment
from .const import (
DEFAULT_TIMEOUT,
LABEL_CONTAINER_NUMBER,
LABEL_ONE_OFF,
LABEL_PROJECT,
LABEL_SERVICE,
LABEL_VERSION,
LABEL_CONFIG_HASH,
)
from .container import Container
from .legacy import check_for_legacy_containers
from .progress_stream import stream_output, StreamOutputError
from .utils import json_hash, parallel_execute
from .config.validation import VALID_NAME_CHARS
log = logging.getLogger(__name__)
DOCKER_START_KEYS = [
'cap_add',
'cap_drop',
'devices',
'dns',
'dns_search',
'env_file',
'extra_hosts',
'read_only',
'net',
'log_driver',
'log_opt',
'mem_limit',
'memswap_limit',
'pid',
'privileged',
'restart',
'volumes_from',
'security_opt',
]
class BuildError(Exception):
def __init__(self, service, reason):
self.service = service
self.reason = reason
class ConfigError(ValueError):
pass
class NeedsBuildError(Exception):
def __init__(self, service):
self.service = service
class NoSuchImageError(Exception):
pass
VolumeSpec = namedtuple('VolumeSpec', 'external internal mode')
ServiceName = namedtuple('ServiceName', 'project service number')
ConvergencePlan = namedtuple('ConvergencePlan', 'action containers')
class Service(object):
def __init__(self, name, client=None, project='default', links=None, external_links=None, volumes_from=None, net=None, **options):
if not re.match('^%s+$' % VALID_NAME_CHARS, project):
raise ConfigError('Invalid project name "%s" - only %s are allowed' % (project, VALID_NAME_CHARS))
self.name = name
self.client = client
self.project = project
self.links = links or []
self.external_links = external_links or []
self.volumes_from = volumes_from or []
self.net = net or None
self.options = options
def containers(self, stopped=False, one_off=False, filters={}):
filters.update({'label': self.labels(one_off=one_off)})
containers = filter(None, [
Container.from_ps(self.client, container)
for container in self.client.containers(
all=stopped,
filters=filters)])
if not containers:
check_for_legacy_containers(
self.client,
self.project,
[self.name],
)
return containers
def get_container(self, number=1):
"""Return a :class:`compose.container.Container` for this service. The
container must be active, and match `number`.
"""
labels = self.labels() + ['{0}={1}'.format(LABEL_CONTAINER_NUMBER, number)]
for container in self.client.containers(filters={'label': labels}):
return Container.from_ps(self.client, container)
raise ValueError("No container found for %s_%s" % (self.name, number))
def start(self, **options):
for c in self.containers(stopped=True):
self.start_container_if_stopped(c, **options)
# TODO: remove these functions, project takes care of starting/stopping,
def stop(self, **options):
for c in self.containers():
log.info("Stopping %s..." % c.name)
c.stop(**options)
def pause(self, **options):
for c in self.containers(filters={'status': 'running'}):
log.info("Pausing %s..." % c.name)
c.pause(**options)
def unpause(self, **options):
for c in self.containers(filters={'status': 'paused'}):
log.info("Unpausing %s..." % c.name)
c.unpause()
def kill(self, **options):
for c in self.containers():
log.info("Killing %s..." % c.name)
c.kill(**options)
def restart(self, **options):
for c in self.containers():
log.info("Restarting %s..." % c.name)
c.restart(**options)
# end TODO
def scale(self, desired_num, timeout=DEFAULT_TIMEOUT):
"""
Adjusts the number of containers to the specified number and ensures
they are running.
- creates containers until there are at least `desired_num`
- stops containers until there are at most `desired_num` running
- starts containers until there are at least `desired_num` running
- removes all stopped containers
"""
if self.custom_container_name() and desired_num > 1:
log.warn('The "%s" service is using the custom container name "%s". '
'Docker requires each container to have a unique name. '
'Remove the custom name to scale the service.'
% (self.name, self.custom_container_name()))
if self.specifies_host_port():
log.warn('The "%s" service specifies a port on the host. If multiple containers '
'for this service are created on a single host, the port will clash.'
% self.name)
def create_and_start(service, number):
container = service.create_container(number=number, quiet=True)
container.start()
return container
running_containers = self.containers(stopped=False)
num_running = len(running_containers)
if desired_num == num_running:
# do nothing as we already have the desired number
log.info('Desired container number already achieved')
return
if desired_num > num_running:
# we need to start/create until we have desired_num
all_containers = self.containers(stopped=True)
if num_running != len(all_containers):
# we have some stopped containers, let's start them up again
stopped_containers = sorted([c for c in all_containers if not c.is_running], key=attrgetter('number'))
num_stopped = len(stopped_containers)
if num_stopped + num_running > desired_num:
num_to_start = desired_num - num_running
containers_to_start = stopped_containers[:num_to_start]
else:
containers_to_start = stopped_containers
parallel_execute(
objects=containers_to_start,
obj_callable=lambda c: c.start(),
msg_index=lambda c: c.name,
msg="Starting"
)
num_running += len(containers_to_start)
num_to_create = desired_num - num_running
next_number = self._next_container_number()
container_numbers = [
number for number in range(
next_number, next_number + num_to_create
)
]
parallel_execute(
objects=container_numbers,
obj_callable=lambda n: create_and_start(service=self, number=n),
msg_index=lambda n: n,
msg="Creating and starting"
)
if desired_num < num_running:
num_to_stop = num_running - desired_num
sorted_running_containers = sorted(running_containers, key=attrgetter('number'))
containers_to_stop = sorted_running_containers[-num_to_stop:]
parallel_execute(
objects=containers_to_stop,
obj_callable=lambda c: c.stop(timeout=timeout),
msg_index=lambda c: c.name,
msg="Stopping"
)
self.remove_stopped()
def remove_stopped(self, **options):
containers = [c for c in self.containers(stopped=True) if not c.is_running]
parallel_execute(
objects=containers,
obj_callable=lambda c: c.remove(**options),
msg_index=lambda c: c.name,
msg="Removing"
)
def create_container(self,
one_off=False,
do_build=True,
previous_container=None,
number=None,
quiet=False,
**override_options):
"""
Create a container for this service. If the image doesn't exist, attempt to pull
it.
"""
self.ensure_image_exists(
do_build=do_build,
)
container_options = self._get_container_create_options(
override_options,
number or self._next_container_number(one_off=one_off),
one_off=one_off,
previous_container=previous_container,
)
if 'name' in container_options and not quiet:
log.info("Creating %s..." % container_options['name'])
return Container.create(self.client, **container_options)
def ensure_image_exists(self,
do_build=True):
try:
self.image()
return
except NoSuchImageError:
pass
if self.can_be_built():
if do_build:
self.build()
else:
raise NeedsBuildError(self)
else:
self.pull()
def image(self):
try:
return self.client.inspect_image(self.image_name)
except APIError as e:
if e.response.status_code == 404 and e.explanation and 'No such image' in str(e.explanation):
raise NoSuchImageError("Image '{}' not found".format(self.image_name))
else:
raise
@property
def image_name(self):
if self.can_be_built():
return self.full_name
else:
return self.options['image']
def convergence_plan(self,
allow_recreate=True,
force_recreate=False):
if force_recreate and not allow_recreate:
raise ValueError("force_recreate and allow_recreate are in conflict")
containers = self.containers(stopped=True)
if not containers:
return ConvergencePlan('create', [])
if not allow_recreate:
return ConvergencePlan('start', containers)
if force_recreate or self._containers_have_diverged(containers):
return ConvergencePlan('recreate', containers)
stopped = [c for c in containers if not c.is_running]
if stopped:
return ConvergencePlan('start', stopped)
return ConvergencePlan('noop', containers)
def _containers_have_diverged(self, containers):
config_hash = None
try:
config_hash = self.config_hash
except NoSuchImageError as e:
log.debug(
'Service %s has diverged: %s',
self.name, six.text_type(e),
)
return True
has_diverged = False
for c in containers:
container_config_hash = c.labels.get(LABEL_CONFIG_HASH, None)
if container_config_hash != config_hash:
log.debug(
'%s has diverged: %s != %s',
c.name, container_config_hash, config_hash,
)
has_diverged = True
return has_diverged
def execute_convergence_plan(self,
plan,
do_build=True,
timeout=DEFAULT_TIMEOUT):
(action, containers) = plan
if action == 'create':
container = self.create_container(
do_build=do_build,
)
self.start_container(container)
return [container]
elif action == 'recreate':
return [
self.recreate_container(
c,
timeout=timeout
)
for c in containers
]
elif action == 'start':
for c in containers:
self.start_container_if_stopped(c)
return containers
elif action == 'noop':
for c in containers:
log.info("%s is up-to-date" % c.name)
return containers
else:
raise Exception("Invalid action: {}".format(action))
def recreate_container(self,
container,
timeout=DEFAULT_TIMEOUT):
"""Recreate a container.
The original container is renamed to a temporary name so that data
volumes can be copied to the new container, before the original
container is removed.
"""
log.info("Recreating %s..." % container.name)
try:
container.stop(timeout=timeout)
except APIError as e:
if (e.response.status_code == 500
and e.explanation
and 'no such process' in str(e.explanation)):
pass
else:
raise
# Use a hopefully unique container name by prepending the short id
self.client.rename(
container.id,
'%s_%s' % (container.short_id, container.name))
new_container = self.create_container(
do_build=False,
previous_container=container,
number=container.labels.get(LABEL_CONTAINER_NUMBER),
quiet=True,
)
self.start_container(new_container)
container.remove()
return new_container
def start_container_if_stopped(self, container):
if container.is_running:
return container
else:
log.info("Starting %s..." % container.name)
return self.start_container(container)
def start_container(self, container):
container.start()
return container
def remove_duplicate_containers(self, timeout=DEFAULT_TIMEOUT):
for c in self.duplicate_containers():
log.info('Removing %s...' % c.name)
c.stop(timeout=timeout)
c.remove()
def duplicate_containers(self):
containers = sorted(
self.containers(stopped=True),
key=lambda c: c.get('Created'),
)
numbers = set()
for c in containers:
if c.number in numbers:
yield c
else:
numbers.add(c.number)
@property
def config_hash(self):
return json_hash(self.config_dict())
def config_dict(self):
return {
'options': self.options,
'image_id': self.image()['Id'],
}
def get_dependency_names(self):
net_name = self.get_net_name()
return (self.get_linked_names() +
self.get_volumes_from_names() +
([net_name] if net_name else []))
def get_linked_names(self):
return [s.name for (s, _) in self.links]
def get_volumes_from_names(self):
return [s.name for s in self.volumes_from if isinstance(s, Service)]
def get_net_name(self):
if isinstance(self.net, Service):
return self.net.name
else:
return
def get_container_name(self, number, one_off=False):
# TODO: Implement issue #652 here
return build_container_name(self.project, self.name, number, one_off)
# TODO: this would benefit from github.com/docker/docker/pull/11943
# to remove the need to inspect every container
def _next_container_number(self, one_off=False):
containers = filter(None, [
Container.from_ps(self.client, container)
for container in self.client.containers(
all=True,
filters={'label': self.labels(one_off=one_off)})
])
numbers = [c.number for c in containers]
return 1 if not numbers else max(numbers) + 1
def _get_links(self, link_to_self):
links = []
for service, link_name in self.links:
for container in service.containers():
links.append((container.name, link_name or service.name))
links.append((container.name, container.name))
links.append((container.name, container.name_without_project))
if link_to_self:
for container in self.containers():
links.append((container.name, self.name))
links.append((container.name, container.name))
links.append((container.name, container.name_without_project))
for external_link in self.external_links:
if ':' not in external_link:
link_name = external_link
else:
external_link, link_name = external_link.split(':')
links.append((external_link, link_name))
return links
def _get_volumes_from(self):
volumes_from = []
for volume_source in self.volumes_from:
if isinstance(volume_source, Service):
containers = volume_source.containers(stopped=True)
if not containers:
volumes_from.append(volume_source.create_container().id)
else:
volumes_from.extend(map(attrgetter('id'), containers))
elif isinstance(volume_source, Container):
volumes_from.append(volume_source.id)
return volumes_from
def _get_net(self):
if not self.net:
return None
if isinstance(self.net, Service):
containers = self.net.containers()
if len(containers) > 0:
net = 'container:' + containers[0].id
else:
log.warning("Warning: Service %s is trying to use reuse the network stack "
"of another service that is not running." % (self.net.name))
net = None
elif isinstance(self.net, Container):
net = 'container:' + self.net.id
else:
net = self.net
return net
def _get_container_create_options(
self,
override_options,
number,
one_off=False,
previous_container=None):
add_config_hash = (not one_off and not override_options)
container_options = dict(
(k, self.options[k])
for k in DOCKER_CONFIG_KEYS if k in self.options)
container_options.update(override_options)
if self.custom_container_name() and not one_off:
container_options['name'] = self.custom_container_name()
else:
container_options['name'] = self.get_container_name(number, one_off)
if add_config_hash:
config_hash = self.config_hash
if 'labels' not in container_options:
container_options['labels'] = {}
container_options['labels'][LABEL_CONFIG_HASH] = config_hash
log.debug("Added config hash: %s" % config_hash)
if 'detach' not in container_options:
container_options['detach'] = True
# If a qualified hostname was given, split it into an
# unqualified hostname and a domainname unless domainname
# was also given explicitly. This matches the behavior of
# the official Docker CLI in that scenario.
if ('hostname' in container_options
and 'domainname' not in container_options
and '.' in container_options['hostname']):
parts = container_options['hostname'].partition('.')
container_options['hostname'] = parts[0]
container_options['domainname'] = parts[2]
if 'ports' in container_options or 'expose' in self.options:
ports = []
all_ports = container_options.get('ports', []) + self.options.get('expose', [])
for port_range in all_ports:
internal_range, _ = split_port(port_range)
for port in internal_range:
port = str(port)
if '/' in port:
port = tuple(port.split('/'))
ports.append(port)
container_options['ports'] = ports
override_options['binds'] = merge_volume_bindings(
container_options.get('volumes') or [],
previous_container)
if 'volumes' in container_options:
container_options['volumes'] = dict(
(parse_volume_spec(v).internal, {})
for v in container_options['volumes'])
container_options['environment'] = merge_environment(
self.options.get('environment'),
override_options.get('environment'))
if previous_container:
container_options['environment']['affinity:container'] = ('=' + previous_container.id)
container_options['image'] = self.image_name
container_options['labels'] = build_container_labels(
container_options.get('labels', {}),
self.labels(one_off=one_off),
number)
# Delete options which are only used when starting
for key in DOCKER_START_KEYS:
container_options.pop(key, None)
container_options['host_config'] = self._get_container_host_config(
override_options,
one_off=one_off)
return container_options
def _get_container_host_config(self, override_options, one_off=False):
options = dict(self.options, **override_options)
port_bindings = build_port_bindings(options.get('ports') or [])
privileged = options.get('privileged', False)
cap_add = options.get('cap_add', None)
cap_drop = options.get('cap_drop', None)
log_config = LogConfig(
type=options.get('log_driver', 'json-file'),
config=options.get('log_opt', None)
)
pid = options.get('pid', None)
security_opt = options.get('security_opt', None)
dns = options.get('dns', None)
if isinstance(dns, six.string_types):
dns = [dns]
dns_search = options.get('dns_search', None)
if isinstance(dns_search, six.string_types):
dns_search = [dns_search]
restart = parse_restart_spec(options.get('restart', None))
extra_hosts = build_extra_hosts(options.get('extra_hosts', None))
read_only = options.get('read_only', None)
devices = options.get('devices', None)
return create_host_config(
links=self._get_links(link_to_self=one_off),
port_bindings=port_bindings,
binds=options.get('binds'),
volumes_from=self._get_volumes_from(),
privileged=privileged,
network_mode=self._get_net(),
devices=devices,
dns=dns,
dns_search=dns_search,
restart_policy=restart,
cap_add=cap_add,
cap_drop=cap_drop,
mem_limit=options.get('mem_limit'),
memswap_limit=options.get('memswap_limit'),
log_config=log_config,
extra_hosts=extra_hosts,
read_only=read_only,
pid_mode=pid,
security_opt=security_opt
)
def build(self, no_cache=False):
log.info('Building %s...' % self.name)
path = six.binary_type(self.options['build'])
build_output = self.client.build(
path=path,
tag=self.image_name,
stream=True,
rm=True,
pull=False,
nocache=no_cache,
dockerfile=self.options.get('dockerfile', None),
)
try:
all_events = stream_output(build_output, sys.stdout)
except StreamOutputError as e:
raise BuildError(self, unicode(e))
# Ensure the HTTP connection is not reused for another
# streaming command, as the Docker daemon can sometimes
# complain about it
self.client.close()
image_id = None
for event in all_events:
if 'stream' in event:
match = re.search(r'Successfully built ([0-9a-f]+)', event.get('stream', ''))
if match:
image_id = match.group(1)
if image_id is None:
raise BuildError(self, event if all_events else 'Unknown')
return image_id
def can_be_built(self):
return 'build' in self.options
@property
def full_name(self):
"""
The tag to give to images built for this service.
"""
return '%s_%s' % (self.project, self.name)
def labels(self, one_off=False):
return [
'{0}={1}'.format(LABEL_PROJECT, self.project),
'{0}={1}'.format(LABEL_SERVICE, self.name),
'{0}={1}'.format(LABEL_ONE_OFF, "True" if one_off else "False")
]
def custom_container_name(self):
return self.options.get('container_name')
def specifies_host_port(self):
for port in self.options.get('ports', []):
if ':' in str(port):
return True
return False
def pull(self):
if 'image' not in self.options:
return
repo, tag, separator = parse_repository_tag(self.options['image'])
tag = tag or 'latest'
log.info('Pulling %s (%s%s%s)...' % (self.name, repo, separator, tag))
output = self.client.pull(
repo,
tag=tag,
stream=True,
)
stream_output(output, sys.stdout)
# Names
def build_container_name(project, service, number, one_off=False):
bits = [project, service]
if one_off:
bits.append('run')
return '_'.join(bits + [str(number)])
# Images
def parse_repository_tag(repo_path):
"""Splits image identification into base image path, tag/digest
and it's separator.
Example:
>>> parse_repository_tag('user/repo@sha256:digest')
('user/repo', 'sha256:digest', '@')
>>> parse_repository_tag('user/repo:v1')
('user/repo', 'v1', ':')
"""
tag_separator = ":"
digest_separator = "@"
if digest_separator in repo_path:
repo, tag = repo_path.rsplit(digest_separator, 1)
return repo, tag, digest_separator
repo, tag = repo_path, ""
if tag_separator in repo_path:
repo, tag = repo_path.rsplit(tag_separator, 1)
if "/" in tag:
repo, tag = repo_path, ""
return repo, tag, tag_separator
# Volumes
def merge_volume_bindings(volumes_option, previous_container):
"""Return a list of volume bindings for a container. Container data volumes
are replaced by those from the previous container.
"""
volume_bindings = dict(
build_volume_binding(parse_volume_spec(volume))
for volume in volumes_option or []
if ':' in volume)
if previous_container:
volume_bindings.update(
get_container_data_volumes(previous_container, volumes_option))
return volume_bindings.values()
def get_container_data_volumes(container, volumes_option):
"""Find the container data volumes that are in `volumes_option`, and return
a mapping of volume bindings for those volumes.
"""
volumes = []
volumes_option = volumes_option or []
container_volumes = container.get('Volumes') or {}
image_volumes = container.image_config['ContainerConfig'].get('Volumes') or {}
for volume in set(volumes_option + image_volumes.keys()):
volume = parse_volume_spec(volume)
# No need to preserve host volumes
if volume.external:
continue
volume_path = container_volumes.get(volume.internal)
# New volume, doesn't exist in the old container
if not volume_path:
continue
# Copy existing volume from old container
volume = volume._replace(external=volume_path)
volumes.append(build_volume_binding(volume))
return dict(volumes)
def build_volume_binding(volume_spec):
return volume_spec.internal, "{}:{}:{}".format(*volume_spec)
def parse_volume_spec(volume_config):
parts = volume_config.split(':')
if len(parts) > 3:
raise ConfigError("Volume %s has incorrect format, should be "
"external:internal[:mode]" % volume_config)
if len(parts) == 1:
external = None
internal = os.path.normpath(parts[0])
else:
external = os.path.normpath(parts[0])
internal = os.path.normpath(parts[1])
mode = parts[2] if len(parts) == 3 else 'rw'
return VolumeSpec(external, internal, mode)
# Labels
def build_container_labels(label_options, service_labels, number, one_off=False):
labels = label_options or {}
labels.update(label.split('=', 1) for label in service_labels)
labels[LABEL_CONTAINER_NUMBER] = str(number)
labels[LABEL_VERSION] = __version__
return labels
# Restart policy
def parse_restart_spec(restart_config):
if not restart_config:
return None
parts = restart_config.split(':')
if len(parts) > 2:
raise ConfigError("Restart %s has incorrect format, should be "
"mode[:max_retry]" % restart_config)
if len(parts) == 2:
name, max_retry_count = parts
else:
name, = parts
max_retry_count = 0
return {'Name': name, 'MaximumRetryCount': int(max_retry_count)}
# Extra hosts
def build_extra_hosts(extra_hosts_config):
if not extra_hosts_config:
return {}
if isinstance(extra_hosts_config, list):
extra_hosts_dict = {}
for extra_hosts_line in extra_hosts_config:
if not isinstance(extra_hosts_line, six.string_types):
raise ConfigError(
"extra_hosts_config \"%s\" must be either a list of strings or a string->string mapping," %
extra_hosts_config
)
host, ip = extra_hosts_line.split(':')
extra_hosts_dict.update({host.strip(): ip.strip()})
extra_hosts_config = extra_hosts_dict
if isinstance(extra_hosts_config, dict):
return extra_hosts_config
raise ConfigError(
"extra_hosts_config \"%s\" must be either a list of strings or a string->string mapping," %
extra_hosts_config
)
|
|
#!/usr/bin/env python3
# Copyright 2014 Alex K (wtwf.com)
"""
Fixes up some posts imported from blogger with permalinks
posts imported with: https://gist.github.com/ngauthier/1506614
make all.txt with this (it finds all the blogger posts' urls):
for yr in $(seq 2005 2014); do \
for mo in $(seq -w 1 12); do \
sleep 1; wget -q -O - \
"http://blog.wtwf.com/?action=getTitles&widgetId=BlogArchive1&widgetType=BlogArchive&responseType=js&path=http%3A%2F%2Fblog.wtwf.com%2F${yr}_${mo}_01_archive.html" \
| egrep -o "http://blog.wtwf.com[^']*"; done; done | tee all.txt
make rpc.json from the blogger_rpc call that looks like this:
https://draft.blogger.com/blogger_rpc?blogID=10...44
"""
__author__ = "wtwf.com (Alex K)"
import collections
import getopt
import logging
import os
import sys
import urllib.parse
import yaml
import json
YAML_SEP = "---\n"
FILE_PREFIX_LENGTH = 5
POSTS_DIR = "_posts"
LABELS_DIR = "search/label"
def Usage(code, msg=""):
"""Show a usage message."""
if code:
fd = sys.stderr
else:
fd = sys.stdout
PROGRAM = os.path.basename(
sys.argv[0]
) # pylint: disable=invalid-name,unused-variable
print(__doc__, file=fd)
if msg:
print(msg, file=fd)
sys.exit(code)
def Main():
"""Run."""
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
try:
opts, args = getopt.getopt(
sys.argv[1:], "h", "help,permalinks,tags,import_labels".split(",")
)
except getopt.error as msg:
Usage(1, msg)
if args or not opts:
Usage(1)
for opt, arg in opts:
if opt in ("-h", "--help"):
Usage(0)
if opt == "--permalinks":
FixPermalinks()
if opt == "--import_labels":
FixLabels()
MakeTagsFiles()
if opt == "--tags":
MakeTagsFiles()
def FixPermalinks():
"""fix it to add permalinks."""
urls = LoadExistingUrls()
for file_name in os.listdir(POSTS_DIR):
key = KeyFromFileName(file_name)
if key:
perm = urls.get(key)
if perm:
SetJekyllVariable(file_name, "permalink", perm)
else:
logging.error("unable to find permalink for %r", file_name)
def LoadExistingUrls():
"""Load the list of existing urls."""
urls = {}
for url in open("all.txt"):
url = urllib.parse.urlparse(url.strip())
key = KeyFromPath(url.path)
if key:
urls[key] = url.path
return urls
def SetJekyllVariable(file_name, key, value):
"""Update a variable in the jenkins section of a post file."""
file_name = os.path.join(POSTS_DIR, file_name)
contents = open(file_name).read()
sections = contents.split(YAML_SEP)
if len(sections) < 2:
logging.fatal("invalid file format: %r", file_name)
jenky = yaml.safe_load(sections[1])
jenky[key] = value
sections[1] = yaml.dump(jenky, default_flow_style=False)
open(file_name, "w").write(YAML_SEP.join(sections))
def GetJekyllVariable(file_name, key):
"""Update a variable in the jenkins section of a post file."""
file_name = os.path.join(POSTS_DIR, file_name)
if not os.path.isfile(file_name):
return
contents = open(file_name).read()
sections = contents.split(YAML_SEP)
if len(sections) < 2:
logging.fatal("invalid file format: %r", file_name)
jenky = yaml.safe_load(sections[1])
return jenky.get(key)
def FixLabels():
logging.info("Fixing Labels")
blogger = json.load(open("rpc.json"))
posts = blogger["result"]["2"]
file_map = GetFileMap()
for post in posts:
title = post["2"]
url = post["9"]
date = post["6"]
labels = post.get("8")
state = post["7"]
file_name = FindPostFileFromUrl(file_map, url, date)
missing = []
if file_name and labels:
logging.info("%s: %s", title, file_name)
labels = list(map(str, labels)) # builtin map pylint: disable=W0141
SetJekyllVariable(file_name, "tags", labels)
else:
missing.append("Unable to find file for: %s %s" % (state, title))
if missing:
logging.warn("\n".join(missing))
def GetFileMap():
file_map = {}
for file_name in os.listdir(POSTS_DIR):
key = KeyFromFileName(file_name)
if key:
file_map[key] = file_name
key = KeyFromFileNameDate(file_name)
if key:
if key in file_map:
# Collision - two posts on the same day, use neither!
file_map[key] = "_ambiguous post_"
else:
file_map[key] = file_name
return file_map
def KeyFromFileName(file_name):
parts = file_name.split("-", 3)
if len(parts) < 3:
return None
del parts[2]
parts[-1] = parts[-1][0:FILE_PREFIX_LENGTH]
return "-".join(parts)
def UrlFromFilename(file_name):
parts = file_name.split("-", 3)
if len(parts) < 3:
return None
del parts[2]
if parts[-1].endswith(".md"):
parts[-1] = parts[-1][0:-3] + ".html"
return "/" + "/".join(parts)
def KeyFromFileNameDate(file_name):
parts = file_name.split("-", 3)
if len(parts) < 3:
return None
del parts[3]
return "-".join(parts)
def KeyFromPath(path):
paths = path.lstrip("/").split("/", 2)
if len(paths) > 2:
paths[-1] = paths[-1][0:FILE_PREFIX_LENGTH]
return "-".join(paths)
return None
def FindPostFileFromUrl(file_map, url, date):
url = urllib.parse.urlparse(url)
key = KeyFromPath(url.path)
file_name = file_map.get(key)
if not file_name:
date_parts = list(
map(int, date.split("/"))
) # builtin map pylint: disable=W0141
if len(date_parts) == 3:
key = "20%02d-%02d-%02d" % (date_parts[2], date_parts[0], date_parts[1])
file_name = file_map.get(key)
return file_name
def MakeTagsFiles():
MakeTagsFilesForLabels(*GetAllTags())
def GetAllTags():
all_labels = set()
posts = collections.defaultdict(list)
for file_name in os.listdir(POSTS_DIR):
labels = GetJekyllVariable(file_name, "tags")
permalink = GetJekyllVariable(file_name, "permalink")
title = GetJekyllVariable(file_name, "title")
if labels:
for label in labels:
if not permalink:
permalink = UrlFromFilename(file_name)
posts[label].append({"url": permalink, "title": title})
all_labels.update(labels)
return (sorted(list(all_labels)), posts)
def MakeTagsFilesForLabels(labels, posts):
template = """---
layout: blog_by_tag
tag: %(tag)s
permalink: %(url)s
---
"""
tags = open("_data/tags.yaml", "w")
logging.info(sorted(posts.keys()))
for label in labels:
base = os.path.join(LABELS_DIR, label)
url = "/%s.html" % base
file_name = "%s.md" % base
label_file = open(file_name, "w")
label_file.write(template % {"url": url, "tag": label})
tags.write(
yaml.dump(
[{"slug": label, "url": url, "name": label, "posts": posts[label]}]
)
)
if __name__ == "__main__":
Main()
|
|
# -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2004, 2006 Sean C. Gillies
# Copyright (c) 2007 STFC <http://www.stfc.ac.uk>
#
# Authors :
# Dominic Lowe <d.lowe@rl.ac.uk>
#
# Contact email: d.lowe@rl.ac.uk
# =============================================================================
from owslib.coverage.wcsBase import WCSBase, WCSCapabilitiesReader, ServiceException
from urllib.parse import urlencode
from owslib.util import openURL, testXMLValue
from owslib.etree import etree
from owslib.crs import Crs
import os
import errno
import logging
from owslib.util import log
# function to save writing out WCS namespace in full each time
def ns(tag):
return '{http://www.opengis.net/wcs}' + tag
class WebCoverageService_1_0_0(WCSBase):
"""Abstraction for OGC Web Coverage Service (WCS), version 1.0.0
Implements IWebCoverageService.
"""
def __getitem__(self, name):
''' check contents dictionary to allow dict like access to service layers'''
if name in list(self.__getattribute__('contents').keys()):
return self.__getattribute__('contents')[name]
else:
raise KeyError("No content named %s" % name)
def __init__(self, url, xml, cookies, auth=None):
super(WebCoverageService_1_0_0, self).__init__(auth)
self.version = '1.0.0'
self.url = url
self.cookies = cookies
# initialize from saved capability document or access the server
reader = WCSCapabilitiesReader(self.version, self.cookies, self.auth)
if xml:
self._capabilities = reader.readString(xml)
else:
self._capabilities = reader.read(self.url)
# check for exceptions
se = self._capabilities.find('ServiceException')
if se is not None:
err_message = str(se.text).strip()
raise ServiceException(err_message, xml)
self.updateSequence = self._capabilities.attrib.get('updateSequence')
# serviceIdentification metadata
subelem = self._capabilities.find(ns('Service'))
self.identification = ServiceIdentification(subelem)
# serviceProvider metadata
subelem = self._capabilities.find(ns('Service/') + ns('responsibleParty'))
self.provider = ServiceProvider(subelem)
# serviceOperations metadata
self.operations = []
for elem in self._capabilities.find(ns('Capability/') + ns('Request'))[:]:
self.operations.append(OperationMetadata(elem))
# serviceContents metadata
self.contents = {}
for elem in self._capabilities.findall(ns('ContentMetadata/') + ns('CoverageOfferingBrief')):
cm = ContentMetadata(elem, self)
self.contents[cm.id] = cm
# Some WCS servers (wrongly) advertise 'Content' OfferingBrief instead.
if self.contents == {}:
for elem in self._capabilities.findall(ns('ContentMetadata/') + ns('ContentOfferingBrief')):
cm = ContentMetadata(elem, self)
self.contents[cm.id] = cm
# exceptions
self.exceptions = [f.text for f in self._capabilities.findall('Capability/Exception/Format')]
def items(self):
'''supports dict-like items() access'''
items = []
for item in self.contents:
items.append((item, self.contents[item]))
return items
def __makeString(self, value):
# using repr unconditionally breaks things in some circumstances if a value is already a string
if type(value) is not str:
sval = repr(value)
else:
sval = value
return sval
def getCoverage(self, identifier=None, bbox=None, time=None, format=None, crs=None, width=None, height=None,
resx=None, resy=None, resz=None, parameter=None, method='Get', **kwargs):
"""Request and return a coverage from the WCS as a file-like object
note: additional **kwargs helps with multi-version implementation
core keyword arguments should be supported cross version
example:
cvg=wcs.getCoverage(identifier=['TuMYrRQ4'], timeSequence=['2792-06-01T00:00:00.0'], bbox=(-112,36,-106,41),
format='cf-netcdf')
is equivalent to:
http://myhost/mywcs?SERVICE=WCS&REQUEST=GetCoverage&IDENTIFIER=TuMYrRQ4&VERSION=1.1.0&BOUNDINGBOX=-180,-90,180,90&TIME=2792-06-01T00:00:00.0&FORMAT=cf-netcdf
"""
if log.isEnabledFor(logging.DEBUG):
msg = 'WCS 1.0.0 DEBUG: Parameters passed to GetCoverage: identifier={}, bbox={}, time={}, format={}, crs={}, width={}, height={}, resx={}, resy={}, resz={}, parameter={}, method={}, other_arguments={}' # noqa
log.debug(msg.format(
identifier, bbox, time, format, crs, width, height, resx, resy, resz, parameter, method, str(kwargs)))
try:
base_url = next((m.get('url') for m in self.getOperationByName('GetCoverage').methods
if m.get('type').lower() == method.lower()))
except StopIteration:
base_url = self.url
log.debug('WCS 1.0.0 DEBUG: base url of server: %s' % base_url)
# process kwargs
request = {'version': self.version, 'request': 'GetCoverage', 'service': 'WCS'}
assert len(identifier) > 0
request['Coverage'] = identifier
# request['identifier'] = ','.join(identifier)
if bbox:
request['BBox'] = ','.join([self.__makeString(x) for x in bbox])
else:
request['BBox'] = None
if time:
request['time'] = ','.join(time)
if crs:
request['crs'] = crs
request['format'] = format
if width:
request['width'] = width
if height:
request['height'] = height
if resx:
request['resx'] = resx
if resy:
request['resy'] = resy
if resz:
request['resz'] = resz
# anything else e.g. vendor specific parameters must go through kwargs
if kwargs:
for kw in kwargs:
request[kw] = kwargs[kw]
# encode and request
data = urlencode(request)
log.debug('WCS 1.0.0 DEBUG: Second part of URL: %s' % data)
u = openURL(base_url, data, method, self.cookies, auth=self.auth)
return u
def getOperationByName(self, name):
"""Return a named operation item."""
for item in self.operations:
if item.name == name:
return item
raise KeyError("No operation named %s" % name)
class OperationMetadata(object):
"""Abstraction for WCS metadata.
Implements IMetadata.
"""
def __init__(self, elem):
"""."""
self.name = elem.tag.split('}')[1]
# self.formatOptions = [f.text for f in elem.findall('{http://www.opengis.net/wcs/1.1/ows}Parameter/{http://www.opengis.net/wcs/1.1/ows}AllowedValues/{http://www.opengis.net/wcs/1.1/ows}Value')] # noqa
self.methods = []
for resource in elem.findall(ns('DCPType/') + ns('HTTP/') + ns('Get/') + ns('OnlineResource')):
url = resource.attrib['{http://www.w3.org/1999/xlink}href']
self.methods.append({'type': 'Get', 'url': url})
for resource in elem.findall(ns('DCPType/') + ns('HTTP/') + ns('Post/') + ns('OnlineResource')):
url = resource.attrib['{http://www.w3.org/1999/xlink}href']
self.methods.append({'type': 'Post', 'url': url})
class ServiceIdentification(object):
""" Abstraction for ServiceIdentification metadata """
def __init__(self, elem):
# properties
self.type = 'OGC:WCS'
self.version = '1.0.0'
self.service = testXMLValue(elem.find(ns('name')))
self.abstract = testXMLValue(elem.find(ns('description')))
self.title = testXMLValue(elem.find(ns('label')))
self.keywords = [f.text for f in elem.findall(ns('keywords') + '/' + ns('keyword'))]
# note: differs from 'rights' in interface
self.fees = elem.find(ns('fees')).text
self.accessConstraints = elem.find(ns('accessConstraints')).text
class ServiceProvider(object):
""" Abstraction for WCS ResponsibleParty
Implements IServiceProvider"""
def __init__(self, elem):
# it's not uncommon for the service provider info to be missing
# so handle case where None is passed in
if elem is None:
self.name = None
self.url = None
self.contact = None
else:
self.name = testXMLValue(elem.find(ns('organisationName')))
self.url = self.name # there is no definitive place for url WCS, repeat organisationName
self.contact = ContactMetadata(elem)
class ContactMetadata(object):
''' implements IContactMetadata'''
def __init__(self, elem):
try:
self.name = elem.find(ns('individualName')).text
except AttributeError:
self.name = None
try:
self.organization = elem.find(ns('organisationName')).text
except AttributeError:
self.organization = None
try:
self.address = elem.find(ns('contactInfo') + '/' + ns('address') + '/' + ns('deliveryPoint')).text
except AttributeError:
self.address = None
try:
self.city = elem.find(ns('contactInfo') + '/' + ns('address') + '/' + ns('city')).text
except AttributeError:
self.city = None
try:
self.region = elem.find(ns('contactInfo') + '/' + ns('address') + '/' + ns('administrativeArea')).text
except AttributeError:
self.region = None
try:
self.postcode = elem.find(ns('contactInfo') + '/' + ns('address') + '/' + ns('postalCode')).text
except AttributeError:
self.postcode = None
try:
self.country = elem.find(ns('contactInfo') + '/' + ns('address') + '/' + ns('country')).text
except AttributeError:
self.country = None
try:
self.email = elem.find(ns('contactInfo') + '/' + ns('address') + '/' + ns('electronicMailAddress')).text
except AttributeError:
self.email = None
class ContentMetadata(object):
"""
Implements IContentMetadata
"""
def __init__(self, elem, service):
"""Initialize. service is required so that describeCoverage requests may be made"""
# TODO - examine the parent for bounding box info.
# self._parent=parent
self._elem = elem
self._service = service
self.id = elem.find(ns('name')).text
self.title = testXMLValue(elem.find(ns('label')))
self.abstract = testXMLValue(elem.find(ns('description')))
self.keywords = [f.text for f in elem.findall(ns('keywords') + '/' + ns('keyword'))]
self.boundingBox = None # needed for iContentMetadata harmonisation
self.boundingBoxWGS84 = None
b = elem.find(ns('lonLatEnvelope'))
if b is not None:
gmlpositions = b.findall('{http://www.opengis.net/gml}pos')
lc = gmlpositions[0].text
uc = gmlpositions[1].text
self.boundingBoxWGS84 = (
float(lc.split()[0]), float(lc.split()[1]),
float(uc.split()[0]), float(uc.split()[1]),
)
# others not used but needed for iContentMetadata harmonisation
self.styles = None
self.crsOptions = None
self.defaulttimeposition = None
# grid is either a gml:Grid or a gml:RectifiedGrid if supplied as part of the DescribeCoverage response.
def _getGrid(self):
if not hasattr(self, 'descCov'):
self.descCov = self._service.getDescribeCoverage(self.id)
gridelem = self.descCov.find(
ns('CoverageOffering/') + ns('domainSet/') + ns('spatialDomain/') + '{http://www.opengis.net/gml}RectifiedGrid') # noqa
if gridelem is not None:
grid = RectifiedGrid(gridelem)
else:
gridelem = self.descCov.find(
ns('CoverageOffering/') + ns('domainSet/') + ns('spatialDomain/') + '{http://www.opengis.net/gml}Grid') # noqa
grid = Grid(gridelem)
return grid
grid = property(_getGrid, None)
# timelimits are the start/end times, timepositions are all timepoints.
# WCS servers can declare one or both or neither of these.
def _getTimeLimits(self):
timepoints, timelimits = [], []
b = self._elem.find(ns('lonLatEnvelope'))
if b is not None:
timepoints = b.findall('{http://www.opengis.net/gml}timePosition')
else:
# have to make a describeCoverage request...
if not hasattr(self, 'descCov'):
self.descCov = self._service.getDescribeCoverage(self.id)
for pos in self.descCov.findall(
ns('CoverageOffering/') + ns('domainSet/') + ns('temporalDomain/') + '{http://www.opengis.net/gml}timePosition'): # noqa
timepoints.append(pos)
if timepoints:
timelimits = [timepoints[0].text, timepoints[1].text]
return timelimits
timelimits = property(_getTimeLimits, None)
def _getTimePositions(self):
timepositions = []
if not hasattr(self, 'descCov'):
self.descCov = self._service.getDescribeCoverage(self.id)
for pos in self.descCov.findall(
ns('CoverageOffering/') + ns('domainSet/') + ns('temporalDomain/') + '{http://www.opengis.net/gml}timePosition'): # noqa
timepositions.append(pos.text)
return timepositions
timepositions = property(_getTimePositions, None)
def _getOtherBoundingBoxes(self):
''' incomplete, should return other bounding boxes not in WGS84
#TODO: find any other bounding boxes. Need to check for gml:EnvelopeWithTimePeriod.'''
bboxes = []
if not hasattr(self, 'descCov'):
self.descCov = self._service.getDescribeCoverage(self.id)
for envelope in self.descCov.findall(
ns('CoverageOffering/') + ns('domainSet/') + ns('spatialDomain/') + '{http://www.opengis.net/gml}Envelope'): # noqa
bbox = {}
bbox['nativeSrs'] = envelope.attrib['srsName']
gmlpositions = envelope.findall('{http://www.opengis.net/gml}pos')
lc = gmlpositions[0].text.split()
uc = gmlpositions[1].text.split()
bbox['bbox'] = (
float(lc[0]), float(lc[1]),
float(uc[0]), float(uc[1])
)
bboxes.append(bbox)
return bboxes
boundingboxes = property(_getOtherBoundingBoxes, None)
def _getSupportedCRSProperty(self):
# gets supported crs info
crss = []
for elem in self._service.getDescribeCoverage(self.id).findall(
ns('CoverageOffering/') + ns('supportedCRSs/') + ns('responseCRSs')):
for crs in elem.text.split(' '):
crss.append(Crs(crs))
for elem in self._service.getDescribeCoverage(self.id).findall(
ns('CoverageOffering/') + ns('supportedCRSs/') + ns('requestResponseCRSs')):
for crs in elem.text.split(' '):
crss.append(Crs(crs))
for elem in self._service.getDescribeCoverage(self.id).findall(
ns('CoverageOffering/') + ns('supportedCRSs/') + ns('nativeCRSs')):
for crs in elem.text.split(' '):
crss.append(Crs(crs))
return crss
supportedCRS = property(_getSupportedCRSProperty, None)
def _getSupportedFormatsProperty(self):
# gets supported formats info
frmts = []
for elem in self._service.getDescribeCoverage(self.id).findall(
ns('CoverageOffering/') + ns('supportedFormats/') + ns('formats')):
frmts.append(elem.text)
return frmts
supportedFormats = property(_getSupportedFormatsProperty, None)
def _getAxisDescriptionsProperty(self):
# gets any axis descriptions contained in the rangeset (requires a DescribeCoverage call to server).
axisDescs = []
for elem in self._service.getDescribeCoverage(self.id).findall(
ns('CoverageOffering/') + ns('rangeSet/') + ns('RangeSet/') + ns('axisDescription/') + ns('AxisDescription')): # noqa
axisDescs.append(AxisDescription(elem)) # create a 'AxisDescription' object.
return axisDescs
axisDescriptions = property(_getAxisDescriptionsProperty, None)
# Adding classes to represent gml:grid and gml:rectifiedgrid. One of these is used for the cvg.grid property
# (where cvg is a member of the contents dictionary)
# There is no simple way to convert the offset values in a rectifiedgrid grid to real values without CRS understanding,
# therefore this is beyond the current scope of owslib, so the representation here is purely to provide access
# to the information in the GML.
class Grid(object):
''' Simple grid class to provide axis and value information for a gml grid '''
def __init__(self, grid):
self.axislabels = []
self.dimension = None
self.lowlimits = []
self.highlimits = []
if grid is not None:
self.dimension = int(grid.get('dimension'))
self.lowlimits = grid.find(
'{http://www.opengis.net/gml}limits/{http://www.opengis.net/gml}GridEnvelope/{http://www.opengis.net/gml}low').text.split(' ') # noqa
self.highlimits = grid.find(
'{http://www.opengis.net/gml}limits/{http://www.opengis.net/gml}GridEnvelope/{http://www.opengis.net/gml}high').text.split(' ') # noqa
for axis in grid.findall('{http://www.opengis.net/gml}axisName'):
self.axislabels.append(axis.text)
class RectifiedGrid(Grid):
''' RectifiedGrid class, extends Grid with additional offset vector information '''
def __init__(self, rectifiedgrid):
super(RectifiedGrid, self).__init__(rectifiedgrid)
self.origin = rectifiedgrid.find(
'{http://www.opengis.net/gml}origin/{http://www.opengis.net/gml}pos').text.split()
self.offsetvectors = []
for offset in rectifiedgrid.findall('{http://www.opengis.net/gml}offsetVector'):
self.offsetvectors.append(offset.text.split())
class AxisDescription(object):
''' Class to represent the AxisDescription element optionally found as part of the RangeSet and used to
define ordinates of additional dimensions such as wavelength bands or pressure levels'''
def __init__(self, axisdescElem):
self.name = self.label = None
self.values = []
for elem in axisdescElem.getchildren():
if elem.tag == ns('name'):
self.name = elem.text
elif elem.tag == ns('label'):
self.label = elem.text
elif elem.tag == ns('values'):
for child in elem.getchildren():
self.values.append(child.text)
|
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1SelfSubjectAccessReview(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1beta1SelfSubjectAccessReviewSpec',
'status': 'V1beta1SubjectAccessReviewStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None):
"""
V1beta1SelfSubjectAccessReview - a model defined in Swagger
"""
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""
Gets the api_version of this V1beta1SelfSubjectAccessReview.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this V1beta1SelfSubjectAccessReview.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1beta1SelfSubjectAccessReview.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this V1beta1SelfSubjectAccessReview.
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""
Gets the kind of this V1beta1SelfSubjectAccessReview.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this V1beta1SelfSubjectAccessReview.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1beta1SelfSubjectAccessReview.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1beta1SelfSubjectAccessReview.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1beta1SelfSubjectAccessReview.
:return: The metadata of this V1beta1SelfSubjectAccessReview.
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1beta1SelfSubjectAccessReview.
:param metadata: The metadata of this V1beta1SelfSubjectAccessReview.
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""
Gets the spec of this V1beta1SelfSubjectAccessReview.
Spec holds information about the request being evaluated. user and groups must be empty
:return: The spec of this V1beta1SelfSubjectAccessReview.
:rtype: V1beta1SelfSubjectAccessReviewSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""
Sets the spec of this V1beta1SelfSubjectAccessReview.
Spec holds information about the request being evaluated. user and groups must be empty
:param spec: The spec of this V1beta1SelfSubjectAccessReview.
:type: V1beta1SelfSubjectAccessReviewSpec
"""
if spec is None:
raise ValueError("Invalid value for `spec`, must not be `None`")
self._spec = spec
@property
def status(self):
"""
Gets the status of this V1beta1SelfSubjectAccessReview.
Status is filled in by the server and indicates whether the request is allowed or not
:return: The status of this V1beta1SelfSubjectAccessReview.
:rtype: V1beta1SubjectAccessReviewStatus
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this V1beta1SelfSubjectAccessReview.
Status is filled in by the server and indicates whether the request is allowed or not
:param status: The status of this V1beta1SelfSubjectAccessReview.
:type: V1beta1SubjectAccessReviewStatus
"""
self._status = status
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1SelfSubjectAccessReview):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
|
""" A stylish alternative for caching your map tiles.
TileStache is a Python-based server application that can serve up map tiles
based on rendered geographic data. You might be familiar with TileCache
(http://tilecache.org), the venerable open source WMS server from MetaCarta.
TileStache is similar, but we hope simpler and better-suited to the needs of
designers and cartographers.
Documentation available at http://tilestache.org/doc/
"""
from __future__ import print_function
import os.path
__version__ = open(os.path.join(os.path.dirname(__file__), 'VERSION')).read().strip()
import re
from sys import stdout
try:
from urlparse import parse_qs
except ImportError:
from cgi import parse_qs
try:
from io import StringIO
except ImportError:
# Python 2
from StringIO import StringIO
from os.path import dirname, join as pathjoin, realpath
from datetime import datetime, timedelta
try:
from urllib.parse import urljoin, urlparse
except ImportError:
# Python 2
from urlparse import urljoin, urlparse
from wsgiref.headers import Headers
try:
from urllib.request import urlopen
except ImportError:
# Python 2
from urllib import urlopen
from os import getcwd
from time import time
try:
import http.client as httplib
except ImportError:
# Python 2
import httplib
import logging
try:
from json import load as json_load
from json import loads as json_loads
except ImportError:
from simplejson import load as json_load
from simplejson import loads as json_loads
from ModestMaps.Core import Coordinate
# dictionary of configuration objects for requestLayer().
_previous_configs = {}
from . import Core
from . import Config
# regular expression for PATH_INFO
_pathinfo_pat = re.compile(r'^/?(?P<l>\w.+)/(?P<z>\d+)/(?P<x>-?\d+)/(?P<y>-?\d+)\.(?P<e>\w+)$')
_preview_pat = re.compile(r'^/?(?P<l>\w.+)/(preview\.html)?$')
def getTile(layer, coord, extension, ignore_cached=False):
''' Get a type string and tile binary for a given request layer tile.
This function is documented as part of TileStache's public API:
http://tilestache.org/doc/#tilestache-gettile
Arguments:
- layer: instance of Core.Layer to render.
- coord: one ModestMaps.Core.Coordinate corresponding to a single tile.
- extension: filename extension to choose response type, e.g. "png" or "jpg".
- ignore_cached: always re-render the tile, whether it's in the cache or not.
This is the main entry point, after site configuration has been loaded
and individual tiles need to be rendered.
'''
status_code, headers, body = layer.getTileResponse(coord, extension, ignore_cached)
mime = headers.get('Content-Type')
return mime, body
def getPreview(layer):
""" Get a type string and dynamic map viewer HTML for a given layer.
"""
return 200, Headers([('Content-Type', 'text/html')]), Core._preview(layer)
def parseConfig(configHandle):
""" Parse a configuration file and return a Configuration object.
Configuration could be a Python dictionary or a file formatted as JSON. In both cases
it needs to be formatted with two sections, "cache" and "layers":
{
"cache": { ... },
"layers": {
"layer-1": { ... },
"layer-2": { ... },
...
}
}
The full path to the file is significant, used to
resolve any relative paths found in the configuration.
See the Caches module for more information on the "caches" section,
and the Core and Providers modules for more information on the
"layers" section.
"""
if isinstance(configHandle, dict):
config_dict = configHandle
dirpath = '.'
else:
scheme, host, path, p, q, f = urlparse(configHandle)
if scheme == '':
scheme = 'file'
path = realpath(path)
if scheme == 'file':
with open(path) as file:
config_dict = json_load(file)
else:
config_dict = json_load(urlopen(configHandle))
dirpath = '%s://%s%s' % (scheme, host, dirname(path).rstrip('/') + '/')
return Config.buildConfiguration(config_dict, dirpath)
parseConfigfile = parseConfig # Deprecated function
def splitPathInfo(pathinfo):
""" Converts a PATH_INFO string to layer name, coordinate, and extension parts.
Example: "/layer/0/0/0.png", leading "/" optional.
"""
if pathinfo == '/':
return None, None, None
if _pathinfo_pat.match(pathinfo or ''):
path = _pathinfo_pat.match(pathinfo)
layer, row, column, zoom, extension = [path.group(p) for p in 'lyxze']
coord = Coordinate(int(row), int(column), int(zoom))
elif _preview_pat.match(pathinfo or ''):
path = _preview_pat.match(pathinfo)
layer, extension = path.group('l'), 'html'
coord = None
else:
raise Core.KnownUnknown('Bad path: "%s". I was expecting something more like "/example/0/0/0.png"' % pathinfo)
return layer, coord, extension
def mergePathInfo(layer, coord, extension):
""" Converts layer name, coordinate and extension back to a PATH_INFO string.
See also splitPathInfo().
"""
z = coord.zoom
x = coord.column
y = coord.row
return '/%(layer)s/%(z)d/%(x)d/%(y)d.%(extension)s' % locals()
def requestLayer(config, path_info):
""" Return a Layer.
Requires a configuration and PATH_INFO (e.g. "/example/0/0/0.png").
Config parameter can be a file path string for a JSON configuration file
or a configuration object with 'cache', 'layers', and 'dirpath' properties.
"""
if type(config) in (str, unicode):
#
# Should be a path to a configuration file we can load;
# build a tuple key into previously-seen config objects.
#
key = hasattr(config, '__hash__') and (config, getcwd())
if key in _previous_configs:
config = _previous_configs[key]
else:
config = parseConfig(config)
if key:
_previous_configs[key] = config
else:
assert hasattr(config, 'cache'), 'Configuration object must have a cache.'
assert hasattr(config, 'layers'), 'Configuration object must have layers.'
assert hasattr(config, 'dirpath'), 'Configuration object must have a dirpath.'
# ensure that path_info is at least a single "/"
path_info = '/' + (path_info or '').lstrip('/')
if path_info == '/':
return Core.Layer(config, None, None)
layername = splitPathInfo(path_info)[0]
if layername not in config.layers:
raise Core.KnownUnknown('"%s" is not a layer I know about. Here are some that I do know about: %s.' % (layername, ', '.join(sorted(config.layers.keys()))))
return config.layers[layername]
def requestHandler(config_hint, path_info, query_string=None):
""" Generate a mime-type and response body for a given request.
This function is documented as part of TileStache's public API:
http://tilestache.org/doc/#tilestache-requesthandler
TODO: replace this with requestHandler2() in TileStache 2.0.0.
Calls requestHandler2().
"""
status_code, headers, content = requestHandler2(config_hint, path_info, query_string)
mimetype = headers.get('Content-Type')
return mimetype, content
def requestHandler2(config_hint, path_info, query_string=None, script_name=''):
""" Generate a set of headers and response body for a given request.
TODO: Replace requestHandler() with this function in TileStache 2.0.0.
Requires a configuration and PATH_INFO (e.g. "/example/0/0/0.png").
Config_hint parameter can be a path string for a JSON configuration file
or a configuration object with 'cache', 'layers', and 'dirpath' properties.
Query string is optional, currently used for JSON callbacks.
Calls Layer.getTileResponse() to render actual tiles, and getPreview() to render preview.html.
"""
headers = Headers([])
try:
# ensure that path_info is at least a single "/"
path_info = '/' + (path_info or '').lstrip('/')
layer = requestLayer(config_hint, path_info)
query = parse_qs(query_string or '')
try:
callback = query['callback'][0]
except KeyError:
callback = None
#
# Special case for index page.
#
if path_info == '/':
mimetype, content = getattr(layer.config, 'index', ('text/plain', 'TileStache says hello.'))
return 200, Headers([('Content-Type', mimetype)]), content
coord, extension = splitPathInfo(path_info)[1:]
if extension == 'html' and coord is None:
status_code, headers, content = getPreview(layer)
elif extension.lower() in layer.redirects:
other_extension = layer.redirects[extension.lower()]
redirect_uri = script_name
redirect_uri += mergePathInfo(layer.name(), coord, other_extension)
if query_string:
redirect_uri += '?' + query_string
headers['Location'] = redirect_uri
headers['Content-Type'] = 'text/plain'
return 302, headers, 'You are being redirected to %s\n' % redirect_uri
else:
status_code, headers, content = layer.getTileResponse(coord, extension)
if layer.allowed_origin:
headers.setdefault('Access-Control-Allow-Origin', layer.allowed_origin)
if callback and 'json' in headers['Content-Type']:
headers['Content-Type'] = 'application/javascript; charset=utf-8'
content = '%s(%s)' % (callback, content)
if layer.max_cache_age is not None:
expires = datetime.utcnow() + timedelta(seconds=layer.max_cache_age)
headers.setdefault('Expires', expires.strftime('%a, %d %b %Y %H:%M:%S GMT'))
headers.setdefault('Cache-Control', 'public, max-age=%d' % layer.max_cache_age)
except Core.KnownUnknown as e:
out = StringIO()
print >> out, 'Known unknown!'
print >> out, e
print >> out, ''
print >> out, '\n'.join(Core._rummy())
headers['Content-Type'] = 'text/plain'
status_code, content = 500, out.getvalue()
return status_code, headers, content
def cgiHandler(environ, config='./tilestache.cfg', debug=False):
""" Read environment PATH_INFO, load up configuration, talk to stdout by CGI.
This function is documented as part of TileStache's public API:
http://tilestache.org/doc/#cgi
Calls requestHandler().
Config parameter can be a file path string for a JSON configuration file
or a configuration object with 'cache', 'layers', and 'dirpath' properties.
"""
if debug:
import cgitb
cgitb.enable()
path_info = environ.get('PATH_INFO', None)
query_string = environ.get('QUERY_STRING', None)
script_name = environ.get('SCRIPT_NAME', None)
status_code, headers, content = requestHandler2(config, path_info, query_string, script_name)
headers.setdefault('Content-Length', str(len(content)))
# output the status code as a header
stdout.write('Status: %d\n' % status_code)
# output gathered headers
for k, v in headers.items():
stdout.write('%s: %s\n' % (k, v))
stdout.write('\n')
stdout.write(content)
class WSGITileServer:
""" Create a WSGI application that can handle requests from any server that talks WSGI.
This class is documented as part of TileStache's public API:
http://tilestache.org/doc/#wsgi
The WSGI application is an instance of this class. Example:
app = WSGITileServer('/path/to/tilestache.cfg')
werkzeug.serving.run_simple('localhost', 8080, app)
"""
def __init__(self, config, autoreload=False):
""" Initialize a callable WSGI instance.
Config parameter can be a file path string for a JSON configuration
file or a configuration object with 'cache', 'layers', and
'dirpath' properties.
Optional autoreload boolean parameter causes config to be re-read
on each request, applicable only when config is a JSON file.
"""
if type(config) in (str, unicode, dict):
self.autoreload = autoreload
self.config_path = config
try:
self.config = parseConfig(config)
except:
print("Error loading Tilestache config:")
raise
else:
assert hasattr(config, 'cache'), 'Configuration object must have a cache.'
assert hasattr(config, 'layers'), 'Configuration object must have layers.'
assert hasattr(config, 'dirpath'), 'Configuration object must have a dirpath.'
self.autoreload = False
self.config_path = None
self.config = config
def __call__(self, environ, start_response):
"""
"""
if self.autoreload: # re-parse the config file on every request
try:
self.config = parseConfig(self.config_path)
except Exception as e:
raise Core.KnownUnknown("Error loading Tilestache config file:\n%s" % str(e))
try:
layer, coord, ext = splitPathInfo(environ['PATH_INFO'])
except Core.KnownUnknown as e:
return self._response(start_response, 400, str(e))
#
# WSGI behavior is different from CGI behavior, because we may not want
# to return a chatty rummy for likely-deployed WSGI vs. testing CGI.
#
if layer and layer not in self.config.layers:
return self._response(start_response, 404)
path_info = environ.get('PATH_INFO', None)
query_string = environ.get('QUERY_STRING', None)
script_name = environ.get('SCRIPT_NAME', None)
status_code, headers, content = requestHandler2(self.config, path_info, query_string, script_name)
return self._response(start_response, status_code, str(content), headers)
def _response(self, start_response, code, content='', headers=None):
"""
"""
headers = headers or Headers([])
if content:
headers.setdefault('Content-Length', str(len(content)))
start_response('%d %s' % (code, httplib.responses[code]), headers.items())
return [content]
def modpythonHandler(request):
""" Handle a mod_python request.
TODO: Upgrade to new requestHandler() so this can return non-200 HTTP.
Calls requestHandler().
Example Apache configuration for TileStache:
<Directory /home/migurski/public_html/TileStache>
AddHandler mod_python .py
PythonHandler TileStache::modpythonHandler
PythonOption config /etc/tilestache.cfg
</Directory>
Configuration options, using PythonOption directive:
- config: path to configuration file, defaults to "tilestache.cfg",
using request.filename as the current working directory.
"""
from mod_python import apache
config_path = request.get_options().get('config', 'tilestache.cfg')
config_path = realpath(pathjoin(dirname(request.filename), config_path))
path_info = request.path_info
query_string = request.args
mimetype, content = requestHandler(config_path, path_info, query_string)
request.status = apache.HTTP_OK
request.content_type = mimetype
request.set_content_length(len(content))
request.send_http_header()
request.write(content)
return apache.OK
|
|
import sys
class LinkedListCode:
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def __init__(self):
pass
'''Linked List Cycle'''
def hasCycle(self, head):
if not head or not head.next:
return False
fast = slow = head
while slow.next and fast.next.next:
slow = slow.next
fast = fast.next.next
if slow == fast:
return True
return False
'''Linked List Cycle II'''
def detectCycle(self, head):
if not head or not head.next:
return None
slow = fast = head
head2 = None
while fast.next and fast.next.next:
slow = slow.next
fast = fast.next.next
if slow == fast:
slow = head
while slow != fast:
slow = slow.next
fast = fast.next
return fast
return None
'''Intersection of Two Linked Lists'''
def getIntersectionNode(self, headA, headB):
p1 = headA
p2 = headB
while p1 != p2:
p1 = p1.next if p1 else headB
p2 = p2.next if p2 else headA
return p1
'''Remove Duplicates from Sorted List'''
def deleteDuplicates(self, head):
if not head or not head.next:
return head
last = head
p = head.next
while p:
if last.val != p.val:
last = p
else:
last.next = p.next
p = p.next
return head
'''Remove Duplicates from Sorted List II'''
def deleteDuplicates2(self, head):
if not head or not head.next:
return head
p1 = head
cur = None
result = None
last = None
while p1:
if (not p1.next or p1.val != p1.next.val) and (not last or p1.val != last.val):
if cur:
cur.next = p1
cur = p1
else:
result = p1
cur = p1
last = p1
p1 = p1.next
if cur:
cur.next = None
return result
'''Merge Two Sorted Lists'''
def mergeTwoLists(self, l1, l2):
p1 = l1
p2 = l2
cur = result = None
while p1 or p2:
if not p1 or (p2 and p2.val <= p1.val):
if not result:
cur = p2
result = cur
else:
cur.next = p2
cur = cur.next
p2 = p2.next
else:
if not result:
cur = p1
result = cur
else:
cur.next = p1
cur = cur.next
p1 = p1.next
return result
'''Merge k Sorted Lists'''
def mergeKLists(self, lists):
if len(lists) < 1:
return None
temp = lists[:]
while len(temp) != 1:
next = temp[:]
for i in range(0, len(temp), 2):
if i+1 > len(temp) - 1:
continue
newList = self.mergeTwoLists(temp[i], temp[i+1])
next.remove(temp[i])
next.remove(temp[i+1])
next.append(newList)
temp = next
return temp[0]
'''Reverse Linked List'''
def reverseList(self, head):
pre = None
while head:
temp = head
head = head.next
temp.next = pre
pre = temp
return pre
'''Reverse Linked List II'''
def reverseBetween(self, head, m, n):
i = 1
cur = head
preStart = None
start = None
result = head
pre = None
while cur:
if i < m:
if i == m - 1:
preStart = cur
cur = cur.next
elif i > n:
start.next = cur
break
else:
if i == n and preStart:
preStart.next = cur
temp = cur
cur = temp.next
temp.next = pre
if start == None:
start = temp
pre = temp
i += 1
return head if m > 1 else pre
'''Swap Nodes in Pairs'''
def swapPairs(self, head):
if not head or not head.next:
return head
i = 0
pre = None
cur = head
result = None
preStart = None
while cur:
if i % 2 == 1:
temp = cur
cur = temp.next
pre.next = temp.next
temp.next = pre
if preStart:
preStart.next = temp
preStart = pre
if not result:
result = temp
else:
pre = cur
cur = cur.next
i += 1
return result if result else head
'Sort List'
def sortList(self, head):
if not head or not head.next:
return head
slow = fast = head
while fast.next and fast.next.next:
slow = slow.next
fast = fast.next
middle = slow.next
slow.next = None
return self.mergeTwoLists(self.sortList(head), self.sortList(middle))
''' Rotate List'''
def rotateRight(self, head, k):
if k == 0 or not head:
return head
cur = head
total = 0
result = None
while cur:
cur = cur.next
total += 1
k = k % total
if k == 0:
return head
cur = head
i = 1
while cur:
if total - i == k:
temp = cur
cur = cur.next
result = cur
temp.next = None
elif i == total:
cur.next = head
break
else:
cur = cur.next
i += 1
return result
'''Reorder List'''
def reorderList(self, head):
if not head:
return
fast = slow = head
while fast.next and fast.next.next:
fast = fast.next.next
slow = slow.next
if fast == head:
return
middle = slow
backList = middle.next
middle = slow.next
slow.next = None
cur = middle
pre = None
while cur:
temp = cur
cur = cur.next
temp.next = pre
pre = temp
middle = pre
cur1 = head
cur2 = middle
while cur1 and cur2:
temp1 = cur1
temp2 = cur2
cur1 = cur1.next
cur2 = cur2.next
temp1.next = temp2
temp2.next = cur1
return
'''Partition List'''
def partition(self, head, x):
if not head or not head.next:
return head
small = smallHead = None
big = bigHead = None
cur = head
while cur:
temp = cur
cur = cur.next
if temp.val < x:
if smallHead:
small.next = temp
small = temp
else:
small = smallHead = temp
else:
if bigHead:
big.next = temp
big = temp
else:
big = bigHead = temp
temp.next = None
if not smallHead:
return bigHead
else:
small.next = bigHead
return smallHead
'''Add Two Numbers'''
def addTwoNumbers(self, l1, l2):
cur1 = l1
cur2 = l2
cur = head = None
carry = 0
while cur1 or cur2:
value = 0
if cur1 and cur2:
value = cur1.val + cur2.val
cur1 = cur1.next
cur2 = cur2.next
elif not cur1:
value = cur2.val
cur2 = cur2.next
else:
value = cur1.val
cur1 = cur1.next
value += carry
remain = value % 10
carry = value / 10
if not cur:
head = cur = ListNode(remain)
else:
cur.next = ListNode(remain)
cur = cur.next
if carry == 1:
cur.next = ListNode(1)
return head
class RandomListNode:
def __init__(self, x):
self.label = x
self.next = None
self.random = None
'''Copy List with Random Pointer'''
def copyRandomList(self, head):
cur = head
rCur = result = None
mapping = {}
cur = head
while cur:
temp = cur
cur = cur.next
mapping[temp] = rCur
while cur:
temp = cur
cur = cur.next
if not result:
rCur = result = RandomListNode(temp.label)
if temp.random:
mapping[temp] = rCur
else :
rCur.next = RandomListNode(temp.label)
if temp.random:
mapping[temp] = rCur
rCur = rCur.next
|
|
# Copyright (c) 2012 NetApp, Inc. All rights reserved.
# Copyright (c) 2014 Ben Swartzlander. All rights reserved.
# Copyright (c) 2014 Navneet Singh. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Bob Callaway. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for NetApp NFS storage.
"""
import math
import os
import re
import shutil
import threading
import time
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import units
import six
from six.moves import urllib
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder import utils
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume.drivers import nfs
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
@six.add_metaclass(utils.TraceWrapperWithABCMetaclass)
class NetAppNfsDriver(nfs.NfsDriver):
"""Base class for NetApp NFS driver for Data ONTAP."""
# do not increment this as it may be used in volume type definitions
VERSION = "1.0.0"
REQUIRED_FLAGS = ['netapp_login', 'netapp_password',
'netapp_server_hostname']
def __init__(self, *args, **kwargs):
na_utils.validate_instantiation(**kwargs)
self._execute = None
self._context = None
self._app_version = kwargs.pop("app_version", "unknown")
super(NetAppNfsDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(na_opts.netapp_connection_opts)
self.configuration.append_config_values(na_opts.netapp_basicauth_opts)
self.configuration.append_config_values(na_opts.netapp_transport_opts)
self.configuration.append_config_values(na_opts.netapp_img_cache_opts)
self.configuration.append_config_values(na_opts.netapp_nfs_extra_opts)
def set_execute(self, execute):
self._execute = execute
def do_setup(self, context):
super(NetAppNfsDriver, self).do_setup(context)
self._context = context
na_utils.check_flags(self.REQUIRED_FLAGS, self.configuration)
self.zapi_client = None
self.ssc_enabled = False
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met."""
super(NetAppNfsDriver, self).check_for_setup_error()
def get_pool(self, volume):
"""Return pool name where volume resides.
:param volume: The volume hosted by the driver.
:return: Name of the pool where given volume is hosted.
"""
return volume['provider_location']
def create_volume(self, volume):
"""Creates a volume.
:param volume: volume reference
"""
LOG.debug('create_volume on %s', volume['host'])
self._ensure_shares_mounted()
# get share as pool name
pool_name = volume_utils.extract_host(volume['host'], level='pool')
if pool_name is None:
msg = _("Pool is not available in the volume host field.")
raise exception.InvalidHost(reason=msg)
extra_specs = na_utils.get_volume_extra_specs(volume)
try:
volume['provider_location'] = pool_name
LOG.debug('Using pool %s.', pool_name)
self._do_create_volume(volume)
self._do_qos_for_volume(volume, extra_specs)
return {'provider_location': volume['provider_location']}
except Exception:
LOG.exception(_LE("Exception creating vol %(name)s on "
"pool %(pool)s."),
{'name': volume['name'],
'pool': volume['provider_location']})
# We need to set this for the model update in order for the
# manager to behave correctly.
volume['provider_location'] = None
finally:
if self.ssc_enabled:
self._update_stale_vols(self._get_vol_for_share(pool_name))
msg = _("Volume %(vol)s could not be created in pool %(pool)s.")
raise exception.VolumeBackendAPIException(data=msg % {
'vol': volume['name'], 'pool': pool_name})
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
source = {
'name': snapshot['name'],
'size': snapshot['volume_size'],
'id': snapshot['volume_id'],
}
return self._clone_source_to_destination_volume(source, volume)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
source = {'name': src_vref['name'],
'size': src_vref['size'],
'id': src_vref['id']}
return self._clone_source_to_destination_volume(source, volume)
def _clone_source_to_destination_volume(self, source, destination_volume):
share = self._get_volume_location(source['id'])
extra_specs = na_utils.get_volume_extra_specs(destination_volume)
try:
destination_volume['provider_location'] = share
self._clone_with_extension_check(
source, destination_volume)
self._do_qos_for_volume(destination_volume, extra_specs)
return {'provider_location': destination_volume[
'provider_location']}
except Exception:
LOG.exception(_LE("Exception creating volume %(name)s from source "
"%(source)s on share %(share)s."),
{'name': destination_volume['id'],
'source': source['name'],
'share': destination_volume['provider_location']})
msg = _("Volume %s could not be created on shares.")
raise exception.VolumeBackendAPIException(data=msg % (
destination_volume['id']))
def _clone_with_extension_check(self, source, destination_volume):
source_size = source['size']
source_id = source['id']
source_name = source['name']
destination_volume_size = destination_volume['size']
self._clone_backing_file_for_volume(source_name,
destination_volume['name'],
source_id)
path = self.local_path(destination_volume)
if self._discover_file_till_timeout(path):
self._set_rw_permissions(path)
if destination_volume_size != source_size:
try:
self.extend_volume(destination_volume,
destination_volume_size)
except Exception:
LOG.error(_LE("Resizing %s failed. Cleaning "
"volume."), destination_volume['name'])
self._cleanup_volume_on_failure(destination_volume)
raise exception.CinderException(
_("Resizing clone %s failed.")
% destination_volume['name'])
else:
raise exception.CinderException(_("NFS file %s not discovered.")
% destination_volume['name'])
def _cleanup_volume_on_failure(self, volume):
LOG.debug('Cleaning up, failed operation on %s', volume['name'])
vol_path = self.local_path(volume)
if os.path.exists(vol_path):
LOG.debug('Found %s, deleting ...', vol_path)
self._delete_file_at_path(vol_path)
else:
LOG.debug('Could not find %s, continuing ...', vol_path)
def _do_qos_for_volume(self, volume, extra_specs, cleanup=False):
"""Set QoS policy on backend from volume type information."""
raise NotImplementedError()
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
self._clone_backing_file_for_volume(snapshot['volume_name'],
snapshot['name'],
snapshot['volume_id'])
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
nfs_mount = self._get_provider_location(snapshot.volume_id)
if self._volume_not_present(nfs_mount, snapshot.name):
return True
self._execute('rm', self._get_volume_path(nfs_mount, snapshot.name),
run_as_root=self._execute_as_root)
def _get_volume_location(self, volume_id):
"""Returns NFS mount address as <nfs_ip_address>:<nfs_mount_dir>."""
nfs_server_ip = self._get_host_ip(volume_id)
export_path = self._get_export_path(volume_id)
return nfs_server_ip + ':' + export_path
def _clone_backing_file_for_volume(self, volume_name, clone_name,
volume_id, share=None):
"""Clone backing file for Cinder volume."""
raise NotImplementedError()
def _get_provider_location(self, volume_id):
"""Returns provider location for given volume."""
volume = self.db.volume_get(self._context, volume_id)
return volume.provider_location
def _get_host_ip(self, volume_id):
"""Returns IP address for the given volume."""
return self._get_provider_location(volume_id).rsplit(':')[0]
def _get_export_path(self, volume_id):
"""Returns NFS export path for the given volume."""
return self._get_provider_location(volume_id).rsplit(':')[1]
def _volume_not_present(self, nfs_mount, volume_name):
"""Check if volume exists."""
try:
self._try_execute('ls', self._get_volume_path(nfs_mount,
volume_name))
except processutils.ProcessExecutionError:
# If the volume isn't present
return True
return False
def _try_execute(self, *command, **kwargs):
# NOTE(vish): Volume commands can partially fail due to timing, but
# running them a second time on failure will usually
# recover nicely.
tries = 0
while True:
try:
self._execute(*command, **kwargs)
return True
except processutils.ProcessExecutionError:
tries += 1
if tries >= self.configuration.num_shell_tries:
raise
LOG.exception(_LE("Recovering from a failed execute. "
"Try number %s"), tries)
time.sleep(tries ** 2)
def _get_volume_path(self, nfs_share, volume_name):
"""Get volume path.
Get volume path (local fs path) for given volume name on given nfs
share.
:param nfs_share: string, example 172.18.194.100:/var/nfs
:param volume_name: string,
example volume-91ee65ec-c473-4391-8c09-162b00c68a8c
"""
return os.path.join(self._get_mount_point_for_share(nfs_share),
volume_name)
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
raise NotImplementedError()
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
super(NetAppNfsDriver, self).copy_image_to_volume(
context, volume, image_service, image_id)
LOG.info(_LI('Copied image to volume %s using regular download.'),
volume['id'])
self._register_image_in_cache(volume, image_id)
def _register_image_in_cache(self, volume, image_id):
"""Stores image in the cache."""
file_name = 'img-cache-%s' % image_id
LOG.info(_LI("Registering image in cache %s"), file_name)
try:
self._do_clone_rel_img_cache(
volume['name'], file_name,
volume['provider_location'], file_name)
except Exception as e:
LOG.warning(_LW('Exception while registering image %(image_id)s'
' in cache. Exception: %(exc)s'),
{'image_id': image_id, 'exc': e})
def _find_image_in_cache(self, image_id):
"""Finds image in cache and returns list of shares with file name."""
result = []
if getattr(self, '_mounted_shares', None):
for share in self._mounted_shares:
dir = self._get_mount_point_for_share(share)
file_name = 'img-cache-%s' % image_id
file_path = '%s/%s' % (dir, file_name)
if os.path.exists(file_path):
LOG.debug('Found cache file for image %(image_id)s'
' on share %(share)s',
{'image_id': image_id, 'share': share})
result.append((share, file_name))
return result
def _do_clone_rel_img_cache(self, src, dst, share, cache_file):
"""Do clone operation w.r.t image cache file."""
@utils.synchronized(cache_file, external=True)
def _do_clone():
dir = self._get_mount_point_for_share(share)
file_path = '%s/%s' % (dir, dst)
if not os.path.exists(file_path):
LOG.info(_LI('Cloning from cache to destination %s'), dst)
self._clone_backing_file_for_volume(src, dst, volume_id=None,
share=share)
_do_clone()
@utils.synchronized('clean_cache')
def _spawn_clean_cache_job(self):
"""Spawns a clean task if not running."""
if getattr(self, 'cleaning', None):
LOG.debug('Image cache cleaning in progress. Returning... ')
return
else:
# Set cleaning to True
self.cleaning = True
t = threading.Timer(0, self._clean_image_cache)
t.start()
def _clean_image_cache(self):
"""Clean the image cache files in cache of space crunch."""
try:
LOG.debug('Image cache cleaning in progress.')
thres_size_perc_start =\
self.configuration.thres_avl_size_perc_start
thres_size_perc_stop = \
self.configuration.thres_avl_size_perc_stop
for share in getattr(self, '_mounted_shares', []):
try:
total_size, total_avl = \
self._get_capacity_info(share)
avl_percent = int((total_avl / total_size) * 100)
if avl_percent <= thres_size_perc_start:
LOG.info(_LI('Cleaning cache for share %s.'), share)
eligible_files = self._find_old_cache_files(share)
threshold_size = int(
(thres_size_perc_stop * total_size) / 100)
bytes_to_free = int(threshold_size - total_avl)
LOG.debug('Files to be queued for deletion %s',
eligible_files)
self._delete_files_till_bytes_free(
eligible_files, share, bytes_to_free)
else:
continue
except Exception as e:
LOG.warning(_LW('Exception during cache cleaning'
' %(share)s. Message - %(ex)s'),
{'share': share, 'ex': e})
continue
finally:
LOG.debug('Image cache cleaning done.')
self.cleaning = False
def _shortlist_del_eligible_files(self, share, old_files):
"""Prepares list of eligible files to be deleted from cache."""
raise NotImplementedError()
def _find_old_cache_files(self, share):
"""Finds the old files in cache."""
mount_fs = self._get_mount_point_for_share(share)
threshold_minutes = self.configuration.expiry_thres_minutes
cmd = ['find', mount_fs, '-maxdepth', '1', '-name',
'img-cache*', '-amin', '+%s' % threshold_minutes]
res, _err = self._execute(*cmd, run_as_root=self._execute_as_root)
if res:
old_file_paths = res.strip('\n').split('\n')
mount_fs_len = len(mount_fs)
old_files = [x[mount_fs_len + 1:] for x in old_file_paths]
eligible_files = self._shortlist_del_eligible_files(
share, old_files)
return eligible_files
return []
def _delete_files_till_bytes_free(self, file_list, share, bytes_to_free=0):
"""Delete files from disk till bytes are freed or list exhausted."""
LOG.debug('Bytes to free %s', bytes_to_free)
if file_list and bytes_to_free > 0:
sorted_files = sorted(file_list, key=lambda x: x[1], reverse=True)
mount_fs = self._get_mount_point_for_share(share)
for f in sorted_files:
if f:
file_path = '%s/%s' % (mount_fs, f[0])
LOG.debug('Delete file path %s', file_path)
@utils.synchronized(f[0], external=True)
def _do_delete():
if self._delete_file_at_path(file_path):
return True
return False
if _do_delete():
bytes_to_free -= int(f[1])
if bytes_to_free <= 0:
return
def _delete_file_at_path(self, path):
"""Delete file from disk and return result as boolean."""
try:
LOG.debug('Deleting file at path %s', path)
cmd = ['rm', '-f', path]
self._execute(*cmd, run_as_root=self._execute_as_root)
return True
except Exception as ex:
LOG.warning(_LW('Exception during deleting %s'), ex)
return False
def clone_image(self, context, volume,
image_location, image_meta,
image_service):
"""Create a volume efficiently from an existing image.
image_location is a string whose format depends on the
image service backend in use. The driver should use it
to determine whether cloning is possible.
Returns a dict of volume properties eg. provider_location,
boolean indicating whether cloning occurred.
"""
image_id = image_meta['id']
cloned = False
post_clone = False
extra_specs = na_utils.get_volume_extra_specs(volume)
try:
cache_result = self._find_image_in_cache(image_id)
if cache_result:
cloned = self._clone_from_cache(volume, image_id, cache_result)
else:
cloned = self._direct_nfs_clone(volume, image_location,
image_id)
if cloned:
self._do_qos_for_volume(volume, extra_specs)
post_clone = self._post_clone_image(volume)
except Exception as e:
msg = e.msg if getattr(e, 'msg', None) else e
LOG.info(_LI('Image cloning unsuccessful for image'
' %(image_id)s. Message: %(msg)s'),
{'image_id': image_id, 'msg': msg})
finally:
cloned = cloned and post_clone
share = volume['provider_location'] if cloned else None
bootable = True if cloned else False
return {'provider_location': share, 'bootable': bootable}, cloned
def _clone_from_cache(self, volume, image_id, cache_result):
"""Clones a copy from image cache."""
cloned = False
LOG.info(_LI('Cloning image %s from cache'), image_id)
for res in cache_result:
# Repeat tries in other shares if failed in some
(share, file_name) = res
LOG.debug('Cache share: %s', share)
if (share and
self._is_share_vol_compatible(volume, share)):
try:
self._do_clone_rel_img_cache(
file_name, volume['name'], share, file_name)
cloned = True
volume['provider_location'] = share
break
except Exception:
LOG.warning(_LW('Unexpected exception during'
' image cloning in share %s'), share)
return cloned
def _direct_nfs_clone(self, volume, image_location, image_id):
"""Clone directly in nfs share."""
LOG.info(_LI('Checking image clone %s from glance share.'), image_id)
cloned = False
image_locations = self._construct_image_nfs_url(image_location)
run_as_root = self._execute_as_root
for loc in image_locations:
share = self._is_cloneable_share(loc)
if share and self._is_share_vol_compatible(volume, share):
LOG.debug('Share is cloneable %s', share)
volume['provider_location'] = share
(__, ___, img_file) = loc.rpartition('/')
dir_path = self._get_mount_point_for_share(share)
img_path = '%s/%s' % (dir_path, img_file)
img_info = image_utils.qemu_img_info(img_path,
run_as_root=run_as_root)
if img_info.file_format == 'raw':
LOG.debug('Image is raw %s', image_id)
self._clone_backing_file_for_volume(
img_file, volume['name'],
volume_id=None, share=share)
cloned = True
break
else:
LOG.info(
_LI('Image will locally be converted to raw %s'),
image_id)
dst = '%s/%s' % (dir_path, volume['name'])
image_utils.convert_image(img_path, dst, 'raw',
run_as_root=run_as_root)
data = image_utils.qemu_img_info(dst,
run_as_root=run_as_root)
if data.file_format != "raw":
raise exception.InvalidResults(
_("Converted to raw, but"
" format is now %s") % data.file_format)
else:
cloned = True
self._register_image_in_cache(
volume, image_id)
break
return cloned
def _post_clone_image(self, volume):
"""Do operations post image cloning."""
LOG.info(_LI('Performing post clone for %s'), volume['name'])
vol_path = self.local_path(volume)
if self._discover_file_till_timeout(vol_path):
self._set_rw_permissions(vol_path)
self._resize_image_file(vol_path, volume['size'])
return True
raise exception.InvalidResults(
_("NFS file could not be discovered."))
def _resize_image_file(self, path, new_size):
"""Resize the image file on share to new size."""
LOG.debug('Checking file for resize')
if self._is_file_size_equal(path, new_size):
return
else:
LOG.info(_LI('Resizing file to %sG'), new_size)
image_utils.resize_image(path, new_size,
run_as_root=self._execute_as_root)
if self._is_file_size_equal(path, new_size):
return
else:
raise exception.InvalidResults(
_('Resizing image file failed.'))
def _is_file_size_equal(self, path, size):
"""Checks if file size at path is equal to size."""
data = image_utils.qemu_img_info(path,
run_as_root=self._execute_as_root)
virt_size = data.virtual_size / units.Gi
if virt_size == size:
return True
else:
return False
def _discover_file_till_timeout(self, path, timeout=45):
"""Checks if file size at path is equal to size."""
# Sometimes nfs takes time to discover file
# Retrying in case any unexpected situation occurs
retry_seconds = timeout
sleep_interval = 2
while True:
if os.path.exists(path):
return True
else:
if retry_seconds <= 0:
LOG.warning(_LW('Discover file retries exhausted.'))
return False
else:
time.sleep(sleep_interval)
retry_seconds -= sleep_interval
def _is_cloneable_share(self, image_location):
"""Finds if the image at location is cloneable."""
conn, dr = self._check_get_nfs_path_segs(image_location)
return self._check_share_in_use(conn, dr)
def _check_get_nfs_path_segs(self, image_location):
"""Checks if the nfs path format is matched.
WebNFS url format with relative-path is supported.
Accepting all characters in path-names and checking
against the mounted shares which will contain only
allowed path segments. Returns connection and dir details.
"""
conn, dr = None, None
if image_location:
nfs_loc_pattern = \
('^nfs://(([\w\-\.]+:{1}[\d]+|[\w\-\.]+)(/[^\/].*)'
'*(/[^\/\\\\]+)$)')
matched = re.match(nfs_loc_pattern, image_location, flags=0)
if not matched:
LOG.debug('Image location not in the'
' expected format %s', image_location)
else:
conn = matched.group(2)
dr = matched.group(3) or '/'
return conn, dr
def _share_match_for_ip(self, ip, shares):
"""Returns the share that is served by ip.
Multiple shares can have same dir path but
can be served using different ips. It finds the
share which is served by ip on same nfs server.
"""
raise NotImplementedError()
def _check_share_in_use(self, conn, dir):
"""Checks if share is cinder mounted and returns it."""
try:
if conn:
host = conn.split(':')[0]
ip = na_utils.resolve_hostname(host)
share_candidates = []
for sh in self._mounted_shares:
sh_exp = sh.split(':')[1]
if sh_exp == dir:
share_candidates.append(sh)
if share_candidates:
LOG.debug('Found possible share matches %s',
share_candidates)
return self._share_match_for_ip(ip, share_candidates)
except Exception:
LOG.warning(_LW("Unexpected exception while "
"short listing used share."))
return None
def _construct_image_nfs_url(self, image_location):
"""Construct direct url for nfs backend.
It creates direct url from image_location
which is a tuple with direct_url and locations.
Returns array of urls with nfs scheme if nfs store
else returns url. It needs to be verified
by backend before use.
"""
direct_url, locations = image_location
if not direct_url and not locations:
raise exception.NotFound(_('Image location not present.'))
urls = []
if not locations:
urls.append(direct_url)
else:
for location in locations:
url = location['url']
if not location['metadata']:
urls.append(url)
break
location_type = location['metadata'].get('type')
if not location_type or location_type.lower() != "nfs":
urls.append(url)
break
share_location = location['metadata'].get('share_location')
mountpoint = location['metadata'].get('mountpoint')
if not share_location or not mountpoint:
urls.append(url)
break
url_parse = urllib.parse.urlparse(url)
abs_path = os.path.join(url_parse.netloc, url_parse.path)
rel_path = os.path.relpath(abs_path, mountpoint)
direct_url = "%s/%s" % (share_location, rel_path)
urls.append(direct_url)
return urls
def extend_volume(self, volume, new_size):
"""Extend an existing volume to the new size."""
LOG.info(_LI('Extending volume %s.'), volume['name'])
path = self.local_path(volume)
self._resize_image_file(path, new_size)
def _is_share_vol_compatible(self, volume, share):
"""Checks if share is compatible with volume to host it."""
raise NotImplementedError()
def _check_share_can_hold_size(self, share, size):
"""Checks if volume can hold image with size."""
_tot_size, tot_available = self._get_capacity_info(
share)
if tot_available < size:
msg = _("Container size smaller than required file size.")
raise exception.VolumeDriverException(msg)
def _move_nfs_file(self, source_path, dest_path):
"""Moves source to destination."""
@utils.synchronized(dest_path, external=True)
def _move_file(src, dst):
if os.path.exists(dst):
LOG.warning(_LW("Destination %s already exists."), dst)
return False
self._execute('mv', src, dst, run_as_root=self._execute_as_root)
return True
try:
return _move_file(source_path, dest_path)
except Exception as e:
LOG.warning(_LW('Exception moving file %(src)s. Message - %(e)s'),
{'src': source_path, 'e': e})
return False
def _get_export_ip_path(self, volume_id=None, share=None):
"""Returns export ip and path.
One of volume id or share is used to return the values.
"""
if volume_id:
host_ip = self._get_host_ip(volume_id)
export_path = self._get_export_path(volume_id)
elif share:
host_ip = share.split(':')[0]
export_path = share.split(':')[1]
else:
raise exception.InvalidInput(
'A volume ID or share was not specified.')
return host_ip, export_path
def _get_share_capacity_info(self, nfs_share):
"""Returns the share capacity metrics needed by the scheduler."""
used_ratio = self.configuration.nfs_used_ratio
oversub_ratio = self.configuration.nfs_oversub_ratio
# The scheduler's capacity filter will reduce the amount of
# free space that we report to it by the reserved percentage.
reserved_ratio = 1 - used_ratio
reserved_percentage = round(100 * reserved_ratio)
total_size, total_available = self._get_capacity_info(nfs_share)
apparent_size = total_size * oversub_ratio
apparent_size_gb = na_utils.round_down(
apparent_size / units.Gi, '0.01')
apparent_free_size = total_available * oversub_ratio
apparent_free_gb = na_utils.round_down(
float(apparent_free_size) / units.Gi, '0.01')
capacity = dict()
capacity['reserved_percentage'] = reserved_percentage
capacity['total_capacity_gb'] = apparent_size_gb
capacity['free_capacity_gb'] = apparent_free_gb
return capacity
def _get_capacity_info(self, nfs_share):
"""Get total capacity and free capacity in bytes for an nfs share."""
export_path = nfs_share.rsplit(':', 1)[1]
return self.zapi_client.get_flexvol_capacity(export_path)
def _check_volume_type(self, volume, share, file_name, extra_specs):
"""Match volume type for share file."""
raise NotImplementedError()
def _convert_vol_ref_share_name_to_share_ip(self, vol_ref):
"""Converts the share point name to an IP address
The volume reference may have a DNS name portion in the share name.
Convert that to an IP address and then restore the entire path.
:param vol_ref: Driver-specific information used to identify a volume
:return: A volume reference where share is in IP format.
"""
# First strip out share and convert to IP format.
share_split = vol_ref.rsplit(':', 1)
vol_ref_share_ip = na_utils.resolve_hostname(share_split[0])
# Now place back into volume reference.
vol_ref_share = vol_ref_share_ip + ':' + share_split[1]
return vol_ref_share
def _get_share_mount_and_vol_from_vol_ref(self, vol_ref):
"""Get the NFS share, the NFS mount, and the volume from reference
Determine the NFS share point, the NFS mount point, and the volume
(with possible path) from the given volume reference. Raise exception
if unsuccessful.
:param vol_ref: Driver-specific information used to identify a volume
:return: NFS Share, NFS mount, volume path or raise error
"""
# Check that the reference is valid.
if 'source-name' not in vol_ref:
reason = _('Reference must contain source-name element.')
raise exception.ManageExistingInvalidReference(
existing_ref=vol_ref, reason=reason)
vol_ref_name = vol_ref['source-name']
self._ensure_shares_mounted()
# If a share was declared as '1.2.3.4:/a/b/c' in the nfs_shares_config
# file, but the admin tries to manage the file located at
# 'my.hostname.com:/a/b/c/d.vol', this might cause a lookup miss below
# when searching self._mounted_shares to see if we have an existing
# mount that would work to access the volume-to-be-managed (a string
# comparison is done instead of IP comparison).
vol_ref_share = self._convert_vol_ref_share_name_to_share_ip(
vol_ref_name)
for nfs_share in self._mounted_shares:
cfg_share = self._convert_vol_ref_share_name_to_share_ip(nfs_share)
(orig_share, work_share, file_path) = \
vol_ref_share.partition(cfg_share)
if work_share == cfg_share:
file_path = file_path[1:] # strip off leading path divider
LOG.debug("Found possible share %s; checking mount.",
work_share)
nfs_mount = self._get_mount_point_for_share(nfs_share)
vol_full_path = os.path.join(nfs_mount, file_path)
if os.path.isfile(vol_full_path):
LOG.debug("Found share %(share)s and vol %(path)s on "
"mount %(mnt)s",
{'share': nfs_share, 'path': file_path,
'mnt': nfs_mount})
return nfs_share, nfs_mount, file_path
else:
LOG.debug("vol_ref %(ref)s not on share %(share)s.",
{'ref': vol_ref_share, 'share': nfs_share})
raise exception.ManageExistingInvalidReference(
existing_ref=vol_ref,
reason=_('Volume not found on configured storage backend.'))
def manage_existing(self, volume, existing_vol_ref):
"""Manages an existing volume.
The specified Cinder volume is to be taken into Cinder management.
The driver will verify its existence and then rename it to the
new Cinder volume name. It is expected that the existing volume
reference is an NFS share point and some [/path]/volume;
e.g., 10.10.32.1:/openstack/vol_to_manage
or 10.10.32.1:/openstack/some_directory/vol_to_manage
:param volume: Cinder volume to manage
:param existing_vol_ref: Driver-specific information used to identify a
volume
"""
# Attempt to find NFS share, NFS mount, and volume path from vol_ref.
(nfs_share, nfs_mount, vol_path) = \
self._get_share_mount_and_vol_from_vol_ref(existing_vol_ref)
LOG.debug("Asked to manage NFS volume %(vol)s, with vol ref %(ref)s",
{'vol': volume['id'],
'ref': existing_vol_ref['source-name']})
extra_specs = na_utils.get_volume_extra_specs(volume)
self._check_volume_type(volume, nfs_share, vol_path, extra_specs)
if vol_path == volume['name']:
LOG.debug("New Cinder volume %s name matches reference name: "
"no need to rename.", volume['name'])
else:
src_vol = os.path.join(nfs_mount, vol_path)
dst_vol = os.path.join(nfs_mount, volume['name'])
try:
shutil.move(src_vol, dst_vol)
LOG.debug("Setting newly managed Cinder volume name to %s",
volume['name'])
self._set_rw_permissions_for_all(dst_vol)
except (OSError, IOError) as err:
exception_msg = (_("Failed to manage existing volume %(name)s,"
" because rename operation failed:"
" Error msg: %(msg)s."),
{'name': existing_vol_ref['source-name'],
'msg': err})
raise exception.VolumeBackendAPIException(data=exception_msg)
try:
self._do_qos_for_volume(volume, extra_specs, cleanup=False)
except Exception as err:
exception_msg = (_("Failed to set QoS for existing volume "
"%(name)s, Error msg: %(msg)s.") %
{'name': existing_vol_ref['source-name'],
'msg': six.text_type(err)})
raise exception.VolumeBackendAPIException(data=exception_msg)
return {'provider_location': nfs_share}
def manage_existing_get_size(self, volume, existing_vol_ref):
"""Returns the size of volume to be managed by manage_existing.
When calculating the size, round up to the next GB.
:param volume: Cinder volume to manage
:param existing_vol_ref: Existing volume to take under management
"""
# Attempt to find NFS share, NFS mount, and volume path from vol_ref.
(nfs_share, nfs_mount, vol_path) = \
self._get_share_mount_and_vol_from_vol_ref(existing_vol_ref)
try:
LOG.debug("Asked to get size of NFS vol_ref %s.",
existing_vol_ref['source-name'])
file_path = os.path.join(nfs_mount, vol_path)
file_size = float(utils.get_file_size(file_path)) / units.Gi
vol_size = int(math.ceil(file_size))
except (OSError, ValueError):
exception_message = (_("Failed to manage existing volume "
"%(name)s, because of error in getting "
"volume size."),
{'name': existing_vol_ref['source-name']})
raise exception.VolumeBackendAPIException(data=exception_message)
LOG.debug("Reporting size of NFS volume ref %(ref)s as %(size)d GB.",
{'ref': existing_vol_ref['source-name'], 'size': vol_size})
return vol_size
def unmanage(self, volume):
"""Removes the specified volume from Cinder management.
Does not delete the underlying backend storage object. A log entry
will be made to notify the Admin that the volume is no longer being
managed.
:param volume: Cinder volume to unmanage
"""
vol_str = CONF.volume_name_template % volume['id']
vol_path = os.path.join(volume['provider_location'], vol_str)
LOG.info(_LI("Cinder NFS volume with current path \"%(cr)s\" is "
"no longer being managed."), {'cr': vol_path})
@utils.synchronized('update_stale')
def _update_stale_vols(self, volume=None, reset=False):
"""Populates stale vols with vol and returns set copy."""
raise NotImplementedError
def _get_vol_for_share(self, nfs_share):
"""Gets the ssc vol with given share."""
raise NotImplementedError
|
|
#!/usr/bin/env python
#===============================================================================
# Copyright 2015 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
"""
test_abstract_ingester.py - tests for the top level ingestion algorithm
"""
import re
import os
import logging
import unittest
import subprocess
from agdc import dbutil
from agdc.cube_util import DatasetError
from agdc.abstract_ingester import AbstractIngester
#
# Set up logger.
#
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
#
# Constants
#
TEMP_TILE_DIR = 'temp_tile_dir'
DATASET_PATH_DICT = {
'single_path': ['path1'],
'multi_path': ['path1', 'path2', 'path3'],
'skip_one': ['skip1'],
'skip_two': ['path1', 'skip2', 'path3'],
'skip_three': ['path1', 'path2', 'skip3'],
'skip_four': ['skip1', 'skip2', 'path3', 'path4'],
'rollback_one': ['rollback1'],
'rollback_two': ['path1', 'rollback2', 'path3'],
'rollback_three': ['path1', 'path2', 'rollback3'],
'rollback_four': ['path1', 'path2', 'rollback3', 'rollback4'],
'mixed_ops': ['rollback1', 'rollback2', 'path3', 'path4',
'skip5', 'skip6'],
'no_paths': ['rollback1', 'skip2'],
'empty': []
}
DATASET_DICT = {
'path1': 'dataset1',
'path2': 'dataset2',
'path3': 'dataset3',
'path4': 'dataset4',
}
TILE_TYPE_DICT = {
'dataset1': [1],
'dataset2': [1, 2],
'dataset3': [1, 2, 3],
'dataset4': [4]
}
BANDS_DICT = {
('dataset1', 1): 'bands1.1',
('dataset2', 1): 'bands2.1',
('dataset2', 2): 'bands2.2',
('dataset3', 1): 'bands3.1',
('dataset3', 2): 'bands3.2',
('dataset3', 3): 'bands3.3',
('dataset4', 4): 'bands4.4'
}
COVERAGE_DICT = {
('dataset1', 1): ['tile1', 'empty2', 'tile3'],
('dataset2', 1): ['tile4'],
('dataset2', 2): ['tile5', 'tile6'],
('dataset3', 1): ['tile1', 'tile2', 'tile3', 'tile4', 'empty5', 'empty6'],
('dataset3', 2): ['tile7', 'empty8'],
('dataset3', 3): ['empty9'],
('dataset4', 4): ['tile4']
}
#
# Database Classes
#
# pylint: disable = missing-docstring
#
# Many of the methods are simple and self documenting and so do not need
# docstrings.
#
class DummyCollection(object):
"""Dummy collection class for testing."""
def __init__(self):
self.tiles = []
# pylint: disable = no-self-use
#
# These methods do not use object data because this is a dummy
# class for testing, but the methods in a real implementation will,
# so these take self as a parameter for consistancy.
def check_metadata(self, dataset):
"""Raise a DatasetError if the dataset path starts with 'skip'."""
LOGGER.info("Check metadata.")
if re.match(r'^skip', dataset.dataset_path):
raise DatasetError("Testing skip dataset.")
def get_temp_tile_directory(self):
LOGGER.info("Get temporary tile directory.")
LOGGER.info(" returning: '%s'", TEMP_TILE_DIR)
return TEMP_TILE_DIR
def begin_transaction(self):
LOGGER.info("Begin transaction.")
def commit_transaction(self):
LOGGER.info("Commit transaction.")
def rollback_transaction(self):
LOGGER.info("Rollback transaction.")
def create_acquisition_record(self, dataset):
LOGGER.info("Create acquistion record:")
LOGGER.info(" dataset = %s", dataset)
acquisition_record = DummyAcquisitionRecord(self, dataset)
LOGGER.info(" returning: %s", acquisition_record)
return acquisition_record
def create_tile_contents(self, tile_type_id, tile_footprint, band_stack):
LOGGER.info("Create tile contents:")
LOGGER.info(" tile_type_id = %s", tile_type_id)
LOGGER.info(" tile_footprint = %s", tile_footprint)
LOGGER.info(" band_stack = %s", band_stack)
tile_contents = DummyTileContents(tile_type_id,
tile_footprint,
band_stack)
LOGGER.info(" returning: %s", tile_contents)
return tile_contents
def print_tiles(self):
"""Print the final tile list to the log file."""
print_tiles("output tiles", self.tiles)
# pylint: enable = no-self-use
class DummyAcquisitionRecord(object):
"""Dummy aquisition record class for testing."""
def __init__(self, collection, dataset):
self.collection = collection
self.dataset = dataset
def __str__(self):
return "[AcquisitionRecord %s]" % self.dataset
def create_dataset_record(self, dataset):
"""Raise a DatasetError if the dataset path starts with 'rollback'."""
LOGGER.info("Create dataset record:")
LOGGER.info(" dataset = %s", dataset)
if re.match(r'^rollback', dataset.dataset_path):
raise DatasetError("Testing transaction rollback.")
assert self.dataset is dataset, \
"Mismatched datasets in acquisition record."
dataset_record = DummyDatasetRecord(self.collection, self.dataset)
LOGGER.info(" returning: %s", dataset_record)
return dataset_record
class DummyDatasetRecord(object):
"""Dummy dataset record class for testing."""
def __init__(self, collection, dataset):
self.collection = collection
self.dataset_id = DATASET_DICT[dataset.dataset_path]
def __str__(self):
return "[DatasetRecord %s]" % self.dataset_id
def mark_as_tiled(self):
LOGGER.info("%s: mark as tiled.", self)
def list_tile_types(self):
LOGGER.info("%s: list tile types.", self)
tile_types = TILE_TYPE_DICT[self.dataset_id]
LOGGER.info(" returning: %s", tile_types)
return tile_types
def get_tile_bands(self, tile_type_id):
LOGGER.info("%s: get tile bands:", self)
LOGGER.info(" tile_type_id = %s", tile_type_id)
tile_bands = BANDS_DICT[(self.dataset_id, tile_type_id)]
LOGGER.info(" returning: %s", tile_bands)
return tile_bands
def get_coverage(self, tile_type_id):
LOGGER.info("%s: get_coverage:", self)
LOGGER.info(" tile_type_id = %s", tile_type_id)
coverage = COVERAGE_DICT[(self.dataset_id, tile_type_id)]
LOGGER.info(" returning: %s", coverage)
return coverage
def create_tile_record(self, tile_contents):
LOGGER.info("%s: create tile record:", self)
LOGGER.info(" tile_contents = %s", tile_contents)
return DummyTileRecord(self.collection,
self.dataset_id,
tile_contents)
class DummyTileRecord(object):
"""Dummy tile record class for testing."""
def __init__(self, collection, dataset_id, tile_contents):
"""Creates a dummy tile record, and adds the tile to the
collection tile list."""
self.collection = collection
self.dataset_id = dataset_id
self.tile_footprint = tile_contents.tile_footprint
self.band_list = tile_contents.band_stack.band_list
assert tile_contents.reprojected, \
"Expected tile_contents to have been reprojected."
tile_tuple = (self.dataset_id, self.tile_footprint, self.band_list)
self.collection.tiles.append(tile_tuple)
def __str__(self):
return "[TileRecord %s %s %s]" % \
(self.dataset_id, self.tile_footprint, self.band_list)
def make_mosaics(self):
LOGGER.info("%s: make mosaics", self)
class DummyTileContents(object):
"""Dummy tile contents class for testing."""
def __init__(self, tile_type_id, tile_footprint, band_stack):
self.tile_type_id = tile_type_id
self.tile_footprint = tile_footprint
self.band_stack = band_stack
self.reprojected = False
self.removed = False
assert band_stack.vrt_built, \
"Expected band_stack to have had a vrt built."
def __str__(self):
return ("[TileContents %s %s %s]" %
(self.tile_type_id, self.tile_footprint, self.band_stack))
def reproject(self):
LOGGER.info("%s: reproject", self)
self.reprojected = True
def has_data(self):
"""Returns False if the tile footprint starts with 'empty',
True otherwise."""
LOGGER.info("%s: has_data", self)
assert not self.removed, "%s: has been removed." % self
result = bool(not re.match(r'^empty', self.tile_footprint))
LOGGER.info(" returning: %s", result)
return result
def remove(self):
LOGGER.info("%s: remove", self)
self.removed = True
#
# Dataset Classes
#
class DummyDataset(object):
"""Dummy dataset class for testing."""
def __init__(self, dataset_path):
self.dataset_path = dataset_path
def __str__(self):
return "[Dataset %s]" % self.dataset_path
#pylint:disable=no-self-use
def get_x_ref(self):
return None
def get_y_ref(self):
return None
def get_start_datetime(self):
return None
#pylint:enable=no-self-use
def stack_bands(self, band_list):
LOGGER.info("%s: stack_bands:", self)
LOGGER.info(" band_list = %s", band_list)
band_stack = DummyBandStack(band_list)
LOGGER.info(" returning: %s", band_stack)
return band_stack
class DummyBandStack(object):
"""Dummy band stack class for testing."""
def __init__(self, band_list):
self.band_list = band_list
self.vrt_built = False
def __str__(self):
return "[BandStack %s]" % self.band_list
def buildvrt(self, temp_dir):
LOGGER.info("%s: buildvrt:", self)
LOGGER.info(" temp_dir = '%s'", temp_dir)
assert temp_dir == TEMP_TILE_DIR, \
"Unexpected temp_dir, should be '%s'." % TEMP_TILE_DIR
self.vrt_built = True
# pylint: enable = missing-docstring
#
# DummyIngester class
#
class DummyIngester(AbstractIngester):
"""Dummy Ingester subclass for testing."""
def __init__(self, collection):
"""Initialise the source_dir cache then call Ingester init"""
self.source_dir = None
AbstractIngester.__init__(self, collection=collection)
def find_datasets(self, source_dir):
"""Cache source directory then return dummy dataset paths."""
LOGGER.info("Ingester: find datasets")
LOGGER.info(" source_dir = %s", source_dir)
self.source_dir = source_dir
dataset_list = DATASET_PATH_DICT[source_dir]
LOGGER.info(" returning: %s", dataset_list)
return dataset_list
def open_dataset(self, dataset_path):
"""Check dataset_path then return dummy dataset object."""
LOGGER.info("Ingester: open dataset")
LOGGER.info(" dataset_path = %s", dataset_path)
assert dataset_path in DATASET_PATH_DICT[self.source_dir], \
"Unexpected dataset path while opening dataset."
dataset = DummyDataset(dataset_path)
LOGGER.info(" returning: %s", dataset)
return dataset
#
# Utility functions
#
def print_tiles(title, tiles):
"""Print a list of tiles to the log file."""
LOGGER.info("")
LOGGER.info("%s:", title)
for tile in tiles:
LOGGER.info(" %s", tile)
#
# Test suite
#
# pylint: disable=too-many-public-methods
#
# Disabled to avoid complaints about the unittest.TestCase class (which has too
# many public methods according to pylint).
#
class TestIngester(unittest.TestCase):
"""Unit test for the AbstractIngester class.
This is a partially abstract class, so the DummyIngester subclass
(defined above) is actually under test here."""
MODULE = 'abstract_ingester'
SUITE = 'TestIngester'
OUTPUT_DIR = dbutil.output_directory(MODULE, SUITE)
EXPECTED_DIR = dbutil.expected_directory(MODULE, SUITE)
def setUp(self):
#
# Parse out the name of the test case and use it to name a logfile
#
match = re.search(r'\.([^\.]+)$', self.id())
if match:
name = match.group(1)
else:
name = 'TestIngester'
logfile_name = "%s.log" % name
self.logfile_path = os.path.join(self.OUTPUT_DIR, logfile_name)
self.expected_path = os.path.join(self.EXPECTED_DIR, logfile_name)
#
# Set up a handler to log to the logfile, and attach it to the
# root logger.
#
self.handler = logging.FileHandler(self.logfile_path, mode='w')
self.handler.setLevel(logging.INFO)
self.handler.setFormatter(logging.Formatter('%(message)s'))
root_logger = logging.getLogger()
root_logger.addHandler(self.handler)
root_logger.setLevel(logging.DEBUG)
#
# Create the collection and ingester
#
self.collection = DummyCollection()
self.ingester = DummyIngester(self.collection)
def tearDown(self):
#
# Flush the handler and remove it from the root logger.
#
self.handler.flush()
root_logger = logging.getLogger()
root_logger.removeHandler(self.handler)
def check_log_file(self):
"""If an expected logfile exists, check to see if it matches."""
self.handler.flush()
if not os.path.isfile(self.expected_path):
self.skipTest("Expected log file not found.")
else:
try:
subprocess.check_output(['diff',
self.logfile_path,
self.expected_path])
except subprocess.CalledProcessError as err:
self.fail("Log file does not match the expected log file:\n" +
err.output)
def remove_log_file(self):
"""Remove the logfile from the output directory."""
os.remove(self.logfile_path)
def check_tiles(self, source_dir):
"""Check the tiles recorded in the collection against expectations."""
output_tiles = self.collection.tiles
expected_tiles = self.generate_tiles(source_dir)
self.assertEqual(set(output_tiles), set(expected_tiles))
@staticmethod
def generate_tiles(source_dir):
"""Generate the expected tiles for a given source directory.
This replicates the ingest algorithm, only it is much simpler
because it only has to deal with the test data."""
tiles = []
for dataset_path in DATASET_PATH_DICT[source_dir]:
if not re.match(r'(skip)|(rollback)', dataset_path):
dataset_id = DATASET_DICT[dataset_path]
for tile_type_id in TILE_TYPE_DICT[dataset_id]:
tup = (dataset_id, tile_type_id)
bands = BANDS_DICT[tup]
for tile_footprint in COVERAGE_DICT[tup]:
if not re.match(r'empty', tile_footprint):
tiles.append((dataset_id, tile_footprint, bands))
return tiles
def test_single_path_tiles(self):
"""Test for a single dataset path: check tiles."""
self.ingester.ingest('single_path')
self.check_tiles('single_path')
self.remove_log_file()
def test_multi_path_tiles(self):
"""Test for multiple dataset paths: check tiles."""
self.ingester.ingest('multi_path')
self.check_tiles('multi_path')
self.remove_log_file()
def test_skip_one_tiles(self):
"""Test for skipped datasets, test one: check tiles."""
self.ingester.ingest('skip_one')
self.check_tiles('skip_one')
self.remove_log_file()
def test_skip_two_tiles(self):
"""Test for skipped datasets, test two: check tiles."""
self.ingester.ingest('skip_two')
self.check_tiles('skip_two')
self.remove_log_file()
def test_skip_three_tiles(self):
"""Test for skipped datasets, test three: check tiles."""
self.ingester.ingest('skip_three')
self.check_tiles('skip_three')
self.remove_log_file()
def test_skip_four_tiles(self):
"""Test for skipped datasets, test four: check tiles."""
self.ingester.ingest('skip_four')
self.check_tiles('skip_four')
self.remove_log_file()
def test_rollback_one_tiles(self):
"""Test for transaction rollback, test one: check tiles."""
self.ingester.ingest('rollback_one')
self.check_tiles('rollback_one')
self.remove_log_file()
def test_rollback_two_tiles(self):
"""Test for transaction rollback, test two: check tiles."""
self.ingester.ingest('rollback_two')
self.check_tiles('rollback_two')
self.remove_log_file()
def test_rollback_three_tiles(self):
"""Test for transaction rollback, test three: check tiles."""
self.ingester.ingest('rollback_three')
self.check_tiles('rollback_three')
self.remove_log_file()
def test_rollback_four_tiles(self):
"""Test for transaction rollback, test four: check tiles."""
self.ingester.ingest('rollback_four')
self.check_tiles('rollback_four')
self.remove_log_file()
def test_mixed_ops_tiles(self):
"""Test for mixed dataset operations: check tiles."""
self.ingester.ingest('mixed_ops')
self.check_tiles('mixed_ops')
self.remove_log_file()
def test_no_paths_tiles(self):
"""Test for source directory with no valid datasets: check tiles."""
self.ingester.ingest('no_paths')
self.check_tiles('no_paths')
self.remove_log_file()
def test_empty_tiles(self):
"""Test for source directory with no datasets: check tiles."""
self.ingester.ingest('empty')
self.check_tiles('empty')
self.remove_log_file()
def test_single_path_log(self):
"""Test for a single dataset path: check tiles."""
self.ingester.ingest('single_path')
self.collection.print_tiles()
self.check_log_file()
def test_multi_path_log(self):
"""Test for multiple dataset paths: check log file."""
self.ingester.ingest('multi_path')
self.collection.print_tiles()
self.check_log_file()
def test_skip_one_log(self):
"""Test for skipped datasets, test one: check log file."""
self.ingester.ingest('skip_one')
self.collection.print_tiles()
self.check_log_file()
def test_skip_two_log(self):
"""Test for skipped datasets, test two: check log file."""
self.ingester.ingest('skip_two')
self.collection.print_tiles()
self.check_log_file()
def test_skip_three_log(self):
"""Test for skipped datasets, test three: check log file."""
self.ingester.ingest('skip_three')
self.collection.print_tiles()
self.check_log_file()
def test_skip_four_log(self):
"""Test for skipped datasets, test four: check log file."""
self.ingester.ingest('skip_four')
self.collection.print_tiles()
self.check_log_file()
def test_rollback_one_log(self):
"""Test for transaction rollback, test one: check log file."""
self.ingester.ingest('rollback_one')
self.collection.print_tiles()
self.check_log_file()
def test_rollback_two_log(self):
"""Test for transaction rollback, test two: check log file."""
self.ingester.ingest('rollback_two')
self.collection.print_tiles()
self.check_log_file()
def test_rollback_three_log(self):
"""Test for transaction rollback, test three: check log file."""
self.ingester.ingest('rollback_three')
self.collection.print_tiles()
self.check_log_file()
def test_rollback_four_log(self):
"""Test for transaction rollback, test four: check log file."""
self.ingester.ingest('rollback_four')
self.collection.print_tiles()
self.check_log_file()
def test_mixed_ops_log(self):
"""Test for mixed dataset operations: check log file."""
self.ingester.ingest('mixed_ops')
self.collection.print_tiles()
self.check_log_file()
def test_no_paths_log(self):
"""Test for source directory with no valid datasets: check log file."""
self.ingester.ingest('no_paths')
self.collection.print_tiles()
self.check_log_file()
def test_empty_log(self):
"""Test for source directory with no datasets: check log file."""
self.ingester.ingest('empty')
self.collection.print_tiles()
self.check_log_file()
#
# Define test suites
#
def the_suite():
"""Returns a test suite of all the tests in this module."""
test_classes = [TestIngester]
suite_list = map(unittest.defaultTestLoader.loadTestsFromTestCase,
test_classes)
suite = unittest.TestSuite(suite_list)
return suite
#
# Run unit tests if in __main__
#
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(the_suite())
|
|
from pyiso import client_factory, BALANCING_AUTHORITIES
from pyiso.base import BaseClient
from pyiso.eu import EUClient
from unittest import TestCase
from datetime import datetime, timedelta
import unittest
import pytz
import mock
import libfaketime
libfaketime.reexec_if_needed()
class TestBaseLoad(TestCase):
def setUp(self):
# set up expected values from base client
bc = BaseClient()
self.MARKET_CHOICES = bc.MARKET_CHOICES
self.FREQUENCY_CHOICES = bc.FREQUENCY_CHOICES
# set up other expected values
self.BA_CHOICES = BALANCING_AUTHORITIES.keys()
def _run_test(self, ba_name, expect_data=True, tol_min=0, **kwargs):
# set up
c = client_factory(ba_name)
# get data
data = c.get_load(**kwargs)
# test number
if expect_data:
self.assertGreaterEqual(len(data), 1)
else:
self.assertEqual(data, [])
# test contents
for dp in data:
# test key names
self.assertEqual(set(['load_MW', 'ba_name',
'timestamp', 'freq', 'market']),
set(dp.keys()))
# test values
self.assertEqual(dp['timestamp'].tzinfo, pytz.utc)
self.assertIn(dp['ba_name'], self.BA_CHOICES)
# test for numeric gen
self.assertGreaterEqual(dp['load_MW']+1, dp['load_MW'])
# test correct temporal relationship to now
if c.options['forecast']:
self.assertGreaterEqual(dp['timestamp'],
pytz.utc.localize(datetime.utcnow())-timedelta(minutes=tol_min))
else:
self.assertLess(dp['timestamp'], pytz.utc.localize(datetime.utcnow()))
# test within date range
start_at = c.options.get('start_at', False)
end_at = c.options.get('end_at', False)
if start_at and end_at:
self.assertGreaterEqual(dp['timestamp'], start_at)
self.assertLessEqual(dp['timestamp'], end_at)
# return
return data
def _run_notimplemented_test(self, ba_name, **kwargs):
# set up
c = client_factory(ba_name)
# method not implemented yet
self.assertRaises(NotImplementedError, c.get_load)
def _run_null_repsonse_test(self, ba_name, **kwargs):
# set up
c = client_factory(ba_name)
# mock request
with mock.patch.object(c, 'request') as mock_request:
mock_request.return_value = None
# get data
data = c.get_load(**kwargs)
# test
self.assertEqual(data, [])
class TestBPALoad(TestBaseLoad):
def test_null_response_latest(self):
self._run_null_repsonse_test('BPA', latest=True)
def test_latest(self):
# basic test
data = self._run_test('BPA', latest=True, market=self.MARKET_CHOICES.fivemin)
# test all timestamps are equal
timestamps = [d['timestamp'] for d in data]
self.assertEqual(len(set(timestamps)), 1)
# test flags
for dp in data:
self.assertEqual(dp['market'], self.MARKET_CHOICES.fivemin)
self.assertEqual(dp['freq'], self.FREQUENCY_CHOICES.fivemin)
def test_date_range(self):
# basic test
today = datetime.today().replace(tzinfo=pytz.utc)
data = self._run_test('BPA', start_at=today-timedelta(days=2),
end_at=today-timedelta(days=1))
# test all timestamps are equal
timestamps = [d['timestamp'] for d in data]
self.assertGreater(len(set(timestamps)), 1)
def test_date_range_strings(self):
# basic test
self._run_test('BPA', start_at='2016-05-01', end_at='2016-05-03')
def test_date_range_farpast(self):
# basic test
today = datetime.today().replace(tzinfo=pytz.utc)
data = self._run_test('BPA', start_at=today-timedelta(days=20),
end_at=today-timedelta(days=10))
# test timestamps are not equal
timestamps = [d['timestamp'] for d in data]
self.assertGreater(len(set(timestamps)), 1)
class TestCAISOLoad(TestBaseLoad):
def test_null_response_latest(self):
self._run_null_repsonse_test('CAISO', latest=True)
def test_latest(self):
# basic test
data = self._run_test('CAISO', latest=True, market=self.MARKET_CHOICES.fivemin)
# test all timestamps are equal
timestamps = [d['timestamp'] for d in data]
self.assertEqual(len(set(timestamps)), 1)
# test flags
for dp in data:
self.assertEqual(dp['market'], self.MARKET_CHOICES.fivemin)
self.assertEqual(dp['freq'], self.FREQUENCY_CHOICES.fivemin)
def test_date_range(self):
# basic test
today = datetime.today().replace(tzinfo=pytz.utc)
data = self._run_test('CAISO', start_at=today-timedelta(days=2),
end_at=today-timedelta(days=1),
tol_min=1)
# test timestamps are not equal
timestamps = [d['timestamp'] for d in data]
self.assertGreater(len(set(timestamps)), 1)
def test_date_range_strings(self):
# basic test
self._run_test('CAISO', start_at='2016-05-01', end_at='2016-05-03')
# @freezegun.freeze_time('2015-05-20 14:30', tz_offset=0, tick=True)
# @requests_mock.mock()
# def test_forecast(self, mocker):
# url = 'http://oasis.caiso.com/oasisapi/SingleZip'
# with open('responses/SLD_FCST.zip', 'rb') as ffile:
# mocker.get(url, content=ffile.read())
#
def test_forecast(self):
# basic test
today = datetime.today().replace(tzinfo=pytz.utc)
data = self._run_test('CAISO', start_at=today+timedelta(hours=4),
end_at=today+timedelta(days=2),
tol_min=4*60)
# test timestamps are not equal
timestamps = [d['timestamp'] for d in data]
self.assertGreater(len(set(timestamps)), 1)
class TestERCOTLoad(TestBaseLoad):
def test_null_response_latest(self):
self._run_null_repsonse_test('ERCOT', latest=True)
def test_null_response_forecast(self):
today = datetime.today().replace(tzinfo=pytz.utc)
self._run_null_repsonse_test('ERCOT', start_at=today + timedelta(hours=20),
end_at=today+timedelta(days=2))
def test_latest(self):
# basic test
data = self._run_test('ERCOT', latest=True, market=self.MARKET_CHOICES.fivemin)
# test all timestamps are equal
timestamps = [d['timestamp'] for d in data]
self.assertEqual(len(set(timestamps)), 1)
# test flags
for dp in data:
self.assertEqual(dp['market'], self.MARKET_CHOICES.fivemin)
self.assertEqual(dp['freq'], self.FREQUENCY_CHOICES.fivemin)
def test_forecast(self):
# basic test
today = datetime.today().replace(tzinfo=pytz.utc)
data = self._run_test('ERCOT', start_at=today + timedelta(hours=20),
end_at=today+timedelta(days=2))
# test timestamps are not equal
timestamps = [d['timestamp'] for d in data]
self.assertGreater(len(set(timestamps)), 1)
# test timestamps in range
self.assertGreaterEqual(min(timestamps), today+timedelta(hours=20))
self.assertLessEqual(min(timestamps), today+timedelta(days=2))
class TestISONELoad(TestBaseLoad):
def test_null_response_latest(self):
self._run_null_repsonse_test('ISONE', latest=True)
def test_latest(self):
# basic test
data = self._run_test('ISONE', latest=True)
# test all timestamps are equal
timestamps = [d['timestamp'] for d in data]
self.assertEqual(len(set(timestamps)), 1)
# test flags
for dp in data:
self.assertEqual(dp['market'], self.MARKET_CHOICES.fivemin)
self.assertEqual(dp['freq'], self.FREQUENCY_CHOICES.fivemin)
def test_date_range(self):
# basic test
today = datetime.today().replace(tzinfo=pytz.utc)
data = self._run_test('ISONE', start_at=today-timedelta(days=2),
end_at=today-timedelta(days=1))
# test timestamps are not equal
timestamps = [d['timestamp'] for d in data]
self.assertGreater(len(set(timestamps)), 1)
def test_date_range_strings(self):
# basic test
self._run_test('ISONE', start_at='2016-05-01', end_at='2016-05-03')
def test_forecast(self):
# basic test
data = self._run_test('ISONE', forecast=True, market='DAHR', freq='1hr')
# test timestamps are not equal
timestamps = [d['timestamp'] for d in data]
self.assertGreater(len(set(timestamps)), 1)
class TestMISOLoad(TestBaseLoad):
def test_null_response_forecast(self):
today = pytz.utc.localize(datetime.utcnow())
self._run_null_repsonse_test('MISO', start_at=today + timedelta(hours=2),
end_at=today+timedelta(days=2))
def test_forecast(self):
# basic test
today = pytz.utc.localize(datetime.utcnow())
data = self._run_test('MISO', start_at=today + timedelta(hours=2),
end_at=today+timedelta(days=2))
# test timestamps are not equal
timestamps = [d['timestamp'] for d in data]
self.assertGreater(len(set(timestamps)), 1)
# test timestamps in range
self.assertGreaterEqual(min(timestamps), today+timedelta(hours=2))
self.assertLessEqual(min(timestamps), today+timedelta(days=2))
class TestNEVPLoad(TestBaseLoad):
def test_null_response_latest(self):
self._run_null_repsonse_test('NEVP', latest=True)
def test_latest(self):
# basic test
data = self._run_test('NEVP', latest=True)
# test all timestamps are equal
timestamps = [d['timestamp'] for d in data]
self.assertEqual(len(set(timestamps)), 1)
# test flags
for dp in data:
self.assertEqual(dp['market'], self.MARKET_CHOICES.hourly)
self.assertEqual(dp['freq'], self.FREQUENCY_CHOICES.hourly)
def test_date_range(self):
# basic test
today = datetime.today().replace(tzinfo=pytz.utc)
data = self._run_test('NEVP', start_at=today-timedelta(days=1),
end_at=today)
# test all timestamps are equal
timestamps = [d['timestamp'] for d in data]
self.assertGreater(len(set(timestamps)), 1)
def test_date_range_strings(self):
# basic test
self._run_test('NEVP', start_at='2016-05-01', end_at='2016-05-03')
# @libfaketime.fake_time('2016-05-20 14:45')
# @requests_mock.mock()
# def test_date_range_farpast(self, mocker):
# url = ('http://www.oasis.oati.com/NEVP/NEVPdocs/inetloading/'
# 'Monthly_Ties_and_Loads_L_from_04_01_2016_to_04_30_2016_.html')
# with open('responses/NEVP_load_farpast.htm', 'r') as ffile:
# mocker.get(url, content=ffile.read())
#
def test_date_range_farpast(self):
# basic test
today = datetime.today().replace(tzinfo=pytz.utc)
data = self._run_test('NEVP', start_at=today-timedelta(days=35),
end_at=today-timedelta(days=33))
self.assertEqual(len(data), 2*24)
class TestNYISOLoad(TestBaseLoad):
def test_null_response_latest(self):
self._run_null_repsonse_test('NYISO', latest=True)
def test_latest(self):
# basic test
data = self._run_test('NYISO', latest=True, market=self.MARKET_CHOICES.fivemin)
# test all timestamps are equal
timestamps = [d['timestamp'] for d in data]
self.assertEqual(len(set(timestamps)), 1)
# test flags
for dp in data:
self.assertEqual(dp['market'], self.MARKET_CHOICES.fivemin)
self.assertEqual(dp['freq'], self.FREQUENCY_CHOICES.fivemin)
def test_date_range(self):
# basic test
today = datetime.today().replace(tzinfo=pytz.utc)
data = self._run_test('NYISO', start_at=today-timedelta(days=2),
end_at=today-timedelta(days=1))
# test timestamps are not equal
timestamps = [d['timestamp'] for d in data]
self.assertGreater(len(set(timestamps)), 1)
def test_date_range_strings(self):
# basic test
self._run_test('NYISO', start_at='2016-05-01', end_at='2016-05-03')
def test_forecast(self):
# basic test
today = datetime.today().replace(tzinfo=pytz.utc)
data = self._run_test('NYISO', start_at=today + timedelta(hours=20),
end_at=today+timedelta(days=2))
# test timestamps are not equal
timestamps = [d['timestamp'] for d in data]
self.assertGreater(len(set(timestamps)), 1)
# test timestamps in range
self.assertGreaterEqual(min(timestamps), today+timedelta(hours=20))
self.assertLessEqual(min(timestamps), today+timedelta(days=2))
class TestPJMLoad(TestBaseLoad):
def test_null_response_latest(self):
self._run_null_repsonse_test('PJM', latest=True)
def test_latest(self):
# basic test
data = self._run_test('PJM', latest=True, market=self.MARKET_CHOICES.fivemin)
# test all timestamps are equal
timestamps = [d['timestamp'] for d in data]
self.assertEqual(len(set(timestamps)), 1)
# test flags
for dp in data:
self.assertEqual(dp['market'], self.MARKET_CHOICES.fivemin)
self.assertEqual(dp['freq'], self.FREQUENCY_CHOICES.fivemin)
def test_forecast(self):
# basic test
today = datetime.today().replace(tzinfo=pytz.utc)
data = self._run_test('PJM', start_at=today + timedelta(hours=20),
end_at=today+timedelta(days=2))
# test timestamps are not equal
timestamps = [d['timestamp'] for d in data]
self.assertGreater(len(set(timestamps)), 1)
# test timestamps in range
self.assertGreaterEqual(min(timestamps), today+timedelta(hours=20))
self.assertLessEqual(min(timestamps), today+timedelta(days=2))
def test_historical(self):
start_at = datetime(2015, 1, 2, 0, tzinfo=pytz.utc)
end_at = datetime(2015, 12, 31, 23, tzinfo=pytz.utc)
data = self._run_test('PJM', start_at=start_at, end_at=end_at)
timestamps = [d['timestamp'] for d in data]
# 364 days, except for DST transition hours
# TODO handle DST transitions instead of dropping them
self.assertEqual(len(set(timestamps)), 364*24-2)
def test_date_range_strings(self):
data = self._run_test('PJM', start_at='2016-06-10', end_at='2016-06-11')
timestamps = [d['timestamp'] for d in data]
# 3 days plus 1 hr
self.assertEqual(len(set(timestamps)), 24 + 1)
class TestSPPLoad(TestBaseLoad):
def test_failing(self):
self._run_notimplemented_test('SPP')
class TestSPPCLoad(TestBaseLoad):
def test_null_response(self):
self._run_null_repsonse_test('SPPC', latest=True)
def test_latest(self):
# basic test
data = self._run_test('SPPC', latest=True)
# test all timestamps are equal
timestamps = [d['timestamp'] for d in data]
self.assertEqual(len(set(timestamps)), 1)
# test flags
for dp in data:
self.assertEqual(dp['market'], self.MARKET_CHOICES.hourly)
self.assertEqual(dp['freq'], self.FREQUENCY_CHOICES.hourly)
def test_date_range(self):
# basic test
today = datetime.today().replace(tzinfo=pytz.utc)
data = self._run_test('SPPC', start_at=today-timedelta(days=1),
end_at=today)
# test all timestamps are equal
timestamps = [d['timestamp'] for d in data]
self.assertGreater(len(set(timestamps)), 1)
# @freezegun.freeze_time('2015-05-20 11:30', tz_offset=0, tick=True)
# @requests_mock.mock()
# def test_date_range_farpast(self, mocker):
# url = ('http://www.oasis.oati.com/NEVP/NEVPdocs/inetloading/'
# 'Monthly_Ties_and_Loads_L_from_04_01_2015_to_04_30_2015_.html')
# with open('responses/SPPC_load_farpast.htm', 'r') as ffile:
# mocker.get(url, content=ffile.read())
def test_date_range_farpast(self):
# basic test
today = datetime.today().replace(tzinfo=pytz.utc)
data = self._run_test('SPPC', start_at=today-timedelta(days=35),
end_at=today-timedelta(days=33))
def test_date_range_strings(self):
# basic test
self._run_test('SPPC', start_at='2016-05-01', end_at='2016-05-03')
class TestSVERILoad(TestBaseLoad):
def setUp(self):
super(TestSVERILoad, self).setUp()
self.bas = [k for k, v in BALANCING_AUTHORITIES.items() if v['module'] == 'sveri']
def test_null_response(self):
self._run_null_repsonse_test(self.bas[0], latest=True)
def test_latest_all(self):
for ba in self.bas:
self._test_latest(ba)
def test_date_range_all(self):
for ba in self.bas:
self._test_date_range(ba)
def _test_latest(self, ba):
# basic test
data = self._run_test(ba, latest=True)
# test all timestamps are equal
timestamps = [d['timestamp'] for d in data]
self.assertEqual(len(set(timestamps)), 1)
# test flags
for dp in data:
self.assertEqual(dp['market'], self.MARKET_CHOICES.fivemin)
self.assertEqual(dp['freq'], self.FREQUENCY_CHOICES.fivemin)
def _test_date_range(self, ba):
# basic test
today = datetime.today().replace(tzinfo=pytz.utc)
data = self._run_test(ba, start_at=today - timedelta(days=3),
end_at=today - timedelta(days=2), market=self.MARKET_CHOICES.fivemin)
# test timestamps are different
timestamps = [d['timestamp'] for d in data]
self.assertGreater(len(set(timestamps)), 1)
# test flags
for dp in data:
self.assertEqual(dp['market'], self.MARKET_CHOICES.fivemin)
self.assertEqual(dp['freq'], self.FREQUENCY_CHOICES.fivemin)
@unittest.skip('Not ready')
class TestEULoad(TestBaseLoad):
def setUp(self):
super(TestEULoad, self).setUp()
self.BA_CHOICES = EUClient.CONTROL_AREAS.keys()
def test_latest(self):
# basic test
data = self._run_test('EU', latest=True, market=self.MARKET_CHOICES.hourly,
control_area='IT')
# test all timestamps are equal
timestamps = [d['timestamp'] for d in data]
self.assertEqual(len(set(timestamps)), 1)
# test flags
for dp in data:
self.assertEqual(dp['market'], self.MARKET_CHOICES.hourly)
self.assertEqual(dp['freq'], self.FREQUENCY_CHOICES.hourly)
def test_date_range(self):
# basic test
today = datetime.today().replace(tzinfo=pytz.utc)
data = self._run_test('EU', start_at=today-timedelta(days=2),
end_at=today-timedelta(days=1),
control_area='IT')
# test timestamps are not equal
timestamps = [d['timestamp'] for d in data]
self.assertGreater(len(set(timestamps)), 1)
def test_forecast(self):
# basic test
today = datetime.today().replace(tzinfo=pytz.utc)
data = self._run_test('EU', start_at=today+timedelta(hours=20),
end_at=today+timedelta(days=1),
control_area='IT')
# test timestamps are not equal
timestamps = [d['timestamp'] for d in data]
self.assertGreater(len(set(timestamps)), 1)
|
|
import os
import random
import string
from flask.ext.sqlalchemy import (
orm,
)
from flask_mail import (
Message,
)
from werkzeug.security import (
check_password_hash,
generate_password_hash,
)
from chatschoolette import (
db,
mail,
login_manager,
)
from chatschoolette.mod_account.models import (
Interest,
Profile,
profile_interests,
)
from chatschoolette.mod_chat.models import (
ChatMessage,
ChatQueue,
PrivateChat,
)
@login_manager.user_loader
def user_loader(user_id):
return User.query.get(user_id)
friends_table = db.Table(
'friends_table',
db.Column('friend1_id', db.Integer, db.ForeignKey('user.id'), primary_key=True),
db.Column('friend2_id', db.Integer, db.ForeignKey('user.id'), primary_key=True),
)
private_chats_table = db.Table(
'private_chats_table',
db.Column('user_id', db.Integer, db.ForeignKey('user.id'), primary_key=True),
db.Column('private_chat_id', db.Integer, db.ForeignKey('private_chat.id'), primary_key=True),
)
DANK_MEMES = [
"Who is Champ?",
"DAE LOVE COMIC SANS?",
"ayy lmao",
"Kappa",
"Wow. Such notification. Very alert.",
]
class User(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
chatroom_id = db.Column(db.Integer, db.ForeignKey('chatroom.id'))
textchatroom_id = db.Column(db.Integer, db.ForeignKey('textchatroom.id'))
username = db.Column(db.String(32), index=True, unique=True)
email = db.Column(db.String(64), index=True, unique=True)
password = db.Column(db.String(64))
is_admin = db.Column(db.Boolean)
_is_active = db.Column(db.Boolean)
banned = False
activation_key = db.relationship(
'ActivationKey',
uselist=False,
backref='user',
)
pw_reset = db.relationship(
'PasswordReset',
uselist=False,
backref='user',
)
profile = db.relationship(
'Profile',
uselist=False,
backref='user',
)
messages = db.relationship(
'ChatMessage',
backref='user',
)
queue_position = db.relationship(
'ChatQueue',
uselist=False,
backref='user',
)
friends = db.relationship(
'User',
secondary=friends_table,
primaryjoin=id==friends_table.c.friend1_id,
secondaryjoin=id==friends_table.c.friend2_id,
)
private_chats = db.relationship(
'PrivateChat',
secondary=private_chats_table,
backref='users',
)
notifications = db.relationship(
'Notification',
backref='user',
)
def __init__(self, username, email, password, is_admin=True):
self.username = username
self.email = email
self.password = generate_password_hash(password)
self.is_admin = is_admin
self.friends = User.query.all()
self.private_chats = []
self.notifications = []
self._is_active = False
self.banned = False
self.activation_key = ActivationKey()
self.send_activation_key()
# Call the method to load local variables NOT stored in the db
self.init_on_load()
@orm.reconstructor
def init_on_load(self):
# Any user that is logged in is automatically authenticated.
self._is_authenticated = True
@property
def is_authenticated(self):
return self._is_authenticated
@property
def is_active(self):
return self._is_active
@is_active.setter
def is_active(self, value):
if self.activation_key:
db.session.delete(self.activation_key)
self._is_active = value
@property
def is_anonymous(self):
return not self.is_authenticated
def check_password(self, password):
return check_password_hash(self.password, password)
def get_id(self):
return self.id
def notify(self, text, url=None):
self.notifications.append(Notification(user=self, text=text, url=url))
db.session.commit()
def __repr__(self):
return '<User %r>' % self.username
def send_activation_key(self):
title = 'Activate your ChatSchoolette account NOW!'
content = 'Please go to this link: '
url = '{site}/auth/activate/{key}'.format(
site=os.environ['SITE_URL'],
key=self.activation_key.key,
)
sender = 'cs490testing@gmail.com'
recipient = self.email
msg = Message(title, sender=sender, recipients=[recipient])
msg.body = content + url
try:
mail.send(msg)
except:
print('COULD NOT SEND EMAIL!')
def send_password_reset(self):
pw_reset = PasswordReset(
user_id=self.id,
key=''.join(
random.choice(
string.ascii_letters + string.digits
) for _ in range(60)
),
)
db.session.add(pw_reset)
db.session.commit()
title = 'Reset your ChatSchoolette Password NOW!'
content = 'Please go to this link: '
url = '{site}/auth/register/{key}'.format(
site=os.environ['SITE_URL'],
key=pw_reset.key,
)
sender = 'cs490testing@gmail.com'
recipient = self.email
msg = Message(title, sender=sender, recipients=[recipient])
msg.body = content + url
try:
mail.send(msg)
except:
print('COULD NOT SEND EMAIL!')
def reset_password(self, new_pw):
pw_reset = PasswordReset.query.filter_by(user_id=self.id).first()
if pw_reset is not None:
db.session.delete(pw_reset)
self.password = generate_password_hash(new_pw)
db.session.commit()
def update_account(self, form):
if form.password.data != '':
self.reset_password(form.password.data)
self.profile.gender = form.gender.data
self.profile.body = form.profile_description.data
# Update the user's interests
self.profile.interests = [
Interest.get_or_create(interest)
for interest in form.interests
]
if form.profile_picture.has_file():
self.profile.set_profile_picture(form.profile_picture)
db.session.commit()
@classmethod
def get_by_username(cls, username):
return User.query.filter_by(username=username).first()
@classmethod
def get_by_email(cls, email):
return User.query.filter_by(email=email).first()
class PasswordReset(db.Model):
__tablename__ = "password_reset"
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), index=True)
key = db.Column(db.String(64), index=True)
def __init__(self, user_id, key):
self.user_id = user_id
self.key = key
def __repr__(self):
return '<PasswordReset for %r>' % self.user.username
class ActivationKey(db.Model):
__tablename__ = "activation_key"
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), index=True)
key = db.Column(db.String(64), index=True)
def __init__(self):
self.key = ''.join(
random.choice(
string.ascii_letters + string.digits
) for _ in range(60)
)
def __repr__(self):
return '<ActivationKey for %r>' % self.user.username
class Notification(db.Model):
__tablename__ = 'notification'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), index=True)
text = db.Column(db.String(256))
url = db.Column(db.String(128))
def __init__(self, user, text, url=None):
self.user = user
self.text = random.choice(DANK_MEMES)
self.url = url
def __repr__(self):
return '<Notification: %r>' % self.text
|
|
################################################################################
# Copyright (C) 2015 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
"""
Unit tests for `concatenate` module.
"""
import warnings
warnings.simplefilter("error")
import numpy as np
from bayespy.nodes import (Concatenate,
GaussianARD,
Gamma)
from bayespy.utils import random
from bayespy.utils.misc import TestCase
class TestConcatenate(TestCase):
"""
Unit tests for Concatenate node.
"""
def test_init(self):
"""
Test the creation of Concatenate node
"""
# One parent only
X = GaussianARD(0, 1, plates=(3,), shape=())
Y = Concatenate(X)
self.assertEqual(Y.plates, (3,))
self.assertEqual(Y.dims, ( (), () ))
X = GaussianARD(0, 1, plates=(3,), shape=(2,4))
Y = Concatenate(X)
self.assertEqual(Y.plates, (3,))
self.assertEqual(Y.dims, ( (2,4), (2,4,2,4) ))
# Two parents
X1 = GaussianARD(0, 1, plates=(2,), shape=())
X2 = GaussianARD(0, 1, plates=(3,), shape=())
Y = Concatenate(X1, X2)
self.assertEqual(Y.plates, (5,))
self.assertEqual(Y.dims, ( (), () ))
# Two parents with shapes
X1 = GaussianARD(0, 1, plates=(2,), shape=(4,6))
X2 = GaussianARD(0, 1, plates=(3,), shape=(4,6))
Y = Concatenate(X1, X2)
self.assertEqual(Y.plates, (5,))
self.assertEqual(Y.dims, ( (4,6), (4,6,4,6) ))
# Two parents with non-default axis
X1 = GaussianARD(0, 1, plates=(2,4), shape=())
X2 = GaussianARD(0, 1, plates=(3,4), shape=())
Y = Concatenate(X1, X2, axis=-2)
self.assertEqual(Y.plates, (5,4))
self.assertEqual(Y.dims, ( (), () ))
# Three parents
X1 = GaussianARD(0, 1, plates=(2,), shape=())
X2 = GaussianARD(0, 1, plates=(3,), shape=())
X3 = GaussianARD(0, 1, plates=(4,), shape=())
Y = Concatenate(X1, X2, X3)
self.assertEqual(Y.plates, (9,))
self.assertEqual(Y.dims, ( (), () ))
# Constant parent
X1 = [7.2, 3.5]
X2 = GaussianARD(0, 1, plates=(3,), shape=())
Y = Concatenate(X1, X2)
self.assertEqual(Y.plates, (5,))
self.assertEqual(Y.dims, ( (), () ))
# Different moments
X1 = GaussianARD(0, 1, plates=(3,))
X2 = Gamma(1, 1, plates=(4,))
self.assertRaises(ValueError,
Concatenate,
X1,
X2)
# Incompatible shapes
X1 = GaussianARD(0, 1, plates=(3,), shape=(2,))
X2 = GaussianARD(0, 1, plates=(2,), shape=())
self.assertRaises(ValueError,
Concatenate,
X1,
X2)
# Incompatible plates
X1 = GaussianARD(0, 1, plates=(4,3), shape=())
X2 = GaussianARD(0, 1, plates=(5,2,), shape=())
self.assertRaises(ValueError,
Concatenate,
X1,
X2)
pass
def test_message_to_child(self):
"""
Test the message to child of Concatenate node.
"""
# Two parents without shapes
X1 = GaussianARD(0, 1, plates=(2,), shape=())
X2 = GaussianARD(0, 1, plates=(3,), shape=())
Y = Concatenate(X1, X2)
u1 = X1.get_moments()
u2 = X2.get_moments()
u = Y.get_moments()
self.assertAllClose((u[0]*np.ones((5,)))[:2],
u1[0]*np.ones((2,)))
self.assertAllClose((u[1]*np.ones((5,)))[:2],
u1[1]*np.ones((2,)))
self.assertAllClose((u[0]*np.ones((5,)))[2:],
u2[0]*np.ones((3,)))
self.assertAllClose((u[1]*np.ones((5,)))[2:],
u2[1]*np.ones((3,)))
# Two parents with shapes
X1 = GaussianARD(0, 1, plates=(2,), shape=(4,))
X2 = GaussianARD(0, 1, plates=(3,), shape=(4,))
Y = Concatenate(X1, X2)
u1 = X1.get_moments()
u2 = X2.get_moments()
u = Y.get_moments()
self.assertAllClose((u[0]*np.ones((5,4)))[:2],
u1[0]*np.ones((2,4)))
self.assertAllClose((u[1]*np.ones((5,4,4)))[:2],
u1[1]*np.ones((2,4,4)))
self.assertAllClose((u[0]*np.ones((5,4)))[2:],
u2[0]*np.ones((3,4)))
self.assertAllClose((u[1]*np.ones((5,4,4)))[2:],
u2[1]*np.ones((3,4,4)))
# Test with non-constant axis
X1 = GaussianARD(0, 1, plates=(2,4), shape=())
X2 = GaussianARD(0, 1, plates=(3,4), shape=())
Y = Concatenate(X1, X2, axis=-2)
u1 = X1.get_moments()
u2 = X2.get_moments()
u = Y.get_moments()
self.assertAllClose((u[0]*np.ones((5,4)))[:2],
u1[0]*np.ones((2,4)))
self.assertAllClose((u[1]*np.ones((5,4)))[:2],
u1[1]*np.ones((2,4)))
self.assertAllClose((u[0]*np.ones((5,4)))[2:],
u2[0]*np.ones((3,4)))
self.assertAllClose((u[1]*np.ones((5,4)))[2:],
u2[1]*np.ones((3,4)))
# Test with constant parent
X1 = np.random.randn(2, 4)
X2 = GaussianARD(0, 1, plates=(3,), shape=(4,))
Y = Concatenate(X1, X2)
u1 = Y.parents[0].get_moments()
u2 = X2.get_moments()
u = Y.get_moments()
self.assertAllClose((u[0]*np.ones((5,4)))[:2],
u1[0]*np.ones((2,4)))
self.assertAllClose((u[1]*np.ones((5,4,4)))[:2],
u1[1]*np.ones((2,4,4)))
self.assertAllClose((u[0]*np.ones((5,4)))[2:],
u2[0]*np.ones((3,4)))
self.assertAllClose((u[1]*np.ones((5,4,4)))[2:],
u2[1]*np.ones((3,4,4)))
pass
def test_message_to_parent(self):
"""
Test the message to parents of Concatenate node.
"""
# Two parents without shapes
X1 = GaussianARD(0, 1, plates=(2,), shape=())
X2 = GaussianARD(0, 1, plates=(3,), shape=())
Z = Concatenate(X1, X2)
Y = GaussianARD(Z, 1)
Y.observe(np.random.randn(*Y.get_shape(0)))
m1 = X1._message_from_children()
m2 = X2._message_from_children()
m = Z._message_from_children()
self.assertAllClose((m[0]*np.ones((5,)))[:2],
m1[0]*np.ones((2,)))
self.assertAllClose((m[1]*np.ones((5,)))[:2],
m1[1]*np.ones((2,)))
self.assertAllClose((m[0]*np.ones((5,)))[2:],
m2[0]*np.ones((3,)))
self.assertAllClose((m[1]*np.ones((5,)))[2:],
m2[1]*np.ones((3,)))
# Two parents with shapes
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
X1 = GaussianARD(0, 1, plates=(2,), shape=(4,6))
X2 = GaussianARD(0, 1, plates=(3,), shape=(4,6))
Z = Concatenate(X1, X2)
Y = GaussianARD(Z, 1)
Y.observe(np.random.randn(*Y.get_shape(0)))
m1 = X1._message_from_children()
m2 = X2._message_from_children()
m = Z._message_from_children()
self.assertAllClose((m[0]*np.ones((5,4,6)))[:2],
m1[0]*np.ones((2,4,6)))
self.assertAllClose((m[1]*np.ones((5,4,6,4,6)))[:2],
m1[1]*np.ones((2,4,6,4,6)))
self.assertAllClose((m[0]*np.ones((5,4,6)))[2:],
m2[0]*np.ones((3,4,6)))
self.assertAllClose((m[1]*np.ones((5,4,6,4,6)))[2:],
m2[1]*np.ones((3,4,6,4,6)))
# Two parents with non-default concatenation axis
X1 = GaussianARD(0, 1, plates=(2,4), shape=())
X2 = GaussianARD(0, 1, plates=(3,4), shape=())
Z = Concatenate(X1, X2, axis=-2)
Y = GaussianARD(Z, 1)
Y.observe(np.random.randn(*Y.get_shape(0)))
m1 = X1._message_from_children()
m2 = X2._message_from_children()
m = Z._message_from_children()
self.assertAllClose((m[0]*np.ones((5,4)))[:2],
m1[0]*np.ones((2,4)))
self.assertAllClose((m[1]*np.ones((5,4)))[:2],
m1[1]*np.ones((2,4)))
self.assertAllClose((m[0]*np.ones((5,4)))[2:],
m2[0]*np.ones((3,4)))
self.assertAllClose((m[1]*np.ones((5,4)))[2:],
m2[1]*np.ones((3,4)))
# Constant parent
X1 = np.random.randn(2,4,6)
X2 = GaussianARD(0, 1, plates=(3,), shape=(4,6))
Z = Concatenate(X1, X2)
Y = GaussianARD(Z, 1)
Y.observe(np.random.randn(*Y.get_shape(0)))
m1 = Z._message_to_parent(0)
m2 = X2._message_from_children()
m = Z._message_from_children()
self.assertAllClose((m[0]*np.ones((5,4,6)))[:2],
m1[0]*np.ones((2,4,6)))
self.assertAllClose((m[1]*np.ones((5,4,6,4,6)))[:2],
m1[1]*np.ones((2,4,6,4,6)))
self.assertAllClose((m[0]*np.ones((5,4,6)))[2:],
m2[0]*np.ones((3,4,6)))
self.assertAllClose((m[1]*np.ones((5,4,6,4,6)))[2:],
m2[1]*np.ones((3,4,6,4,6)))
pass
def test_mask_to_parent(self):
"""
Test the mask handling in Concatenate node
"""
pass
|
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
COHORTE debug servlet, to visualize the state of the framework in a browser
:author: Thomas Calmant
:license: Apache Software License 2.0
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Python standard library
import logging
import sys
import threading
import traceback
# Pelix framework
from pelix.ipopo.decorators import ComponentFactory, Provides, \
Validate, Invalidate, Property, Requires
import pelix.constants
import pelix.framework
import pelix.http
import pelix.ipopo.constants
import pelix.shell
from pelix.shell.ipopo import ipopo_state_to_str
# Cohorte
import cohorte
# ------------------------------------------------------------------------------
# Documentation strings format
__docformat__ = "restructuredtext en"
# Version
__version_info__ = (1, 1, 0)
__version__ = ".".join(str(x) for x in __version_info__)
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
DEFAULT_DEBUG_PATH = '/debug'
""" Default servlet path """
CSS_STYLE = """
body {
font-family: sans;
}
h1 {
text-align: center;
}
h2 {
margin-top: 2em;
}
dt {
font-style: italic;
}
dd {
margin-bottom: 1em;
}
table {
font-size: 80%;
}
table tr th {
background: #DDD;
}
table tr td {
vertical-align: top;
}
table tr:nth-child(odd) td {
background: #EFF;
}
table tr:nth-child(even) td {
background: #CFF;
}
table tr td pre {
font-family: courier;
font-size: 100%;
}
"""
# ------------------------------------------------------------------------------
@ComponentFactory('cohorte-debug-servlet-factory')
@Provides(pelix.http.HTTP_SERVLET)
@Requires("_ipopo", pelix.ipopo.constants.SERVICE_IPOPO)
@Requires("_utils", pelix.shell.SERVICE_SHELL_UTILS)
@Property('_path', pelix.http.HTTP_SERVLET_PATH, DEFAULT_DEBUG_PATH)
class DebugServlet(object):
"""
COHORTE HTTP Signals receiver servlet
"""
def __init__(self):
"""
Constructor
"""
# Servlet path
self._path = DEFAULT_DEBUG_PATH
# Injected services
self._ipopo = None
self._utils = None
# Bundle context
self._context = None
def make_all(self, request):
"""
Aggregates all content makers
:param request: The HTTP request
:return: The aggregation of the result of all other make_* methods
"""
# Get the list of makers
makers = sorted(member for member in dir(self)
if member.startswith('make') and member != 'make_all')
lines = []
errors = []
for maker_name in makers:
maker = getattr(self, maker_name)
try:
# Store the result of the maker, if any
content = maker(request)
if content:
lines.append(content)
except Exception as ex:
errors.append('<li>Error calling {0}: {1}</li>'
.format(maker_name, ex))
if errors:
# Add the errors part only if needed
lines.append("<h2>Page generation errors</h2>\n<ul>"
"\n{errors}\n</ul>".format(errors=errors))
return '\n'.join(lines)
def make_basic(self, request):
"""
Prints basic isolate information
"""
lines = ['<dl>']
for prop_var in sorted(dir(cohorte)):
if prop_var.startswith('PROP'):
key = getattr(cohorte, prop_var)
lines.append('<dt>{0}</dt>\n<dd>{1}</dd>'
.format(key, self._context.get_property(key)))
lines.append('</dl>')
return "<h2>Isolate information</h2>\n{body}\n" \
.format(body='\n'.join(lines))
def make_instances(self, request):
"""
Prints iPOPO components instances details
"""
headers = ('Name', 'Factory', 'State')
instances = self._ipopo.get_instances()
# Lines are already sorted
lines = ((name, factory, ipopo_state_to_str(state))
for name, factory, state in instances)
table = self._utils.make_table(headers, lines)
return '<h2>iPOPO Instances</h2><pre>' + table + '</pre>'
def make_bundles(self, request):
"""
Lists the bundles installed
"""
lines = ['<table>',
'<tr>',
'<th>Bundle ID</th>',
'<th>Bundle Name</th>',
'<th>Bundle State</th>',
'</tr>']
states = {pelix.framework.Bundle.ACTIVE: 'ACTIVE',
pelix.framework.Bundle.INSTALLED: 'INSTALLED',
pelix.framework.Bundle.RESOLVED: 'RESOLVED',
pelix.framework.Bundle.STARTING: 'STARTING',
pelix.framework.Bundle.STOPPING: 'STOPPING',
pelix.framework.Bundle.UNINSTALLED: 'UNINSTALLED'}
for bundle in self._context.get_bundles():
# New line
lines.append('<tr>')
lines.append('<td>{0}</td>'.format(bundle.get_bundle_id()))
lines.append('<td>{0}</td>'.format(bundle.get_symbolic_name()))
lines.append('<td>{0}</td>'.format(
states.get(bundle.get_state(), '<UNKNOWN>')))
lines.append('</tr>')
lines.append('</table>\n')
return "<h2>Pelix bundles</h2>\n{table}\n" \
.format(table='\n'.join(lines))
def make_services(self, request):
"""
Lists the services registered
"""
lines = ['<table>',
'<tr>',
'<th>Service ID</th>',
'<th>Service Ranking</th>',
'<th>Specifications</th>',
'<th>Bundle</th>',
'<th>Properties</th>',
'</tr>']
for svc_ref in self._context.get_all_service_references(None, None):
# New line
lines.append('<tr>')
# Important properties
for name in (pelix.constants.SERVICE_ID,
pelix.constants.SERVICE_RANKING,
pelix.constants.OBJECTCLASS):
lines.append('<td>{0}</td>'.format(svc_ref.get_property(name)))
# Bundle
bundle = svc_ref.get_bundle()
lines.append('<td>{0} ({1})</td>'.format(
bundle.get_symbolic_name(), bundle.get_bundle_id()))
# All properties
lines.append('<td><dl>')
for key, value in svc_ref.get_properties().items():
lines.append('<dt>{0}</dt>\n<dd>{1}</dd>'.format(key, value))
lines.append('</dl></td>')
lines.append('</tr>')
lines.append('</table>\n')
return "<h2>Pelix services</h2>\n{table}\n" \
.format(table='\n'.join(lines))
@staticmethod
def make_threads(request):
"""
Prepares the section about process threads
"""
# Get the current thread ID
current_id = threading.current_thread().ident
lines = ['<table>',
'<tr>',
'<th>Thread ID</th>',
'<th>Thread Stack</th>',
'</tr>']
for thread_id, stack in sys._current_frames().items():
# New line
lines.append('<tr>')
# Prepare the thread ID string
if thread_id == current_id:
suffix = " (current)"
else:
suffix = ""
# Thread ID
lines.append('<td>')
lines.append("{0}{1}".format(thread_id, suffix))
lines.append('</td>')
# Prepare the stack string
lines.append('<td><dl>')
for filename, lineno, name, line in traceback.extract_stack(stack):
# Line position
stack_line = '<dt>{0}@{1}'.format(filename, lineno)
if name:
stack_line += ' :: {0}(...)'.format(name)
stack_line += '</dt>\n<dd>'
if line:
# Line content
stack_line += '<pre>{0}</pre>'.format(line.strip())
lines.append(stack_line + '</dd>')
lines.append('</dl></tr>')
lines.append('</table>')
return "<h2>Threads</h2>\n{table}\n".format(table='\n'.join(lines))
def do_GET(self, request, response):
"""
Handles a GET request
:param request: The HTTP request bean
:param request: The HTTP response handler
"""
query = request.get_path()[len(self._path) + 1:].split('/')
action = query[0].lower()
# To complete the page title
subtitle = ""
# Make the body
if not action:
content = self.make_all(request)
else:
maker = getattr(self, 'make_' + action, None)
if not maker:
content = self.make_all(request)
else:
subtitle = " - {0}".format(action)
content = maker(request)
# Make the HTML result
page = """<html>
<head>
<title>COHORTE Debug{title_suffix}</title>
<style type="text/css">
{css}
</style>
</head>
<body>
<h1>COHORTE Debug{title_suffix}</h1>
{body}
</body>
</html>""".format(title_suffix=subtitle, css=CSS_STYLE, body=content)
# Send the result
response.send_content(200, page)
@Validate
def validate(self, context):
"""
Component validated
:param context: The bundle context
"""
# Store the framework access
self._context = context
_logger.info("Debug servlet Ready")
@Invalidate
def invalidate(self, context):
"""
Component invalidated
:param context: The bundle context
"""
# Clear the framework access
self._context = None
_logger.info("Debug servlet Gone")
|
|
from __future__ import absolute_import, division, print_function
import pytest
pytest.importorskip('numpy')
import dask.array as da
from dask.utils import ignoring
from dask.array.reductions import arg_aggregate
import numpy as np
def eq(a, b):
if isinstance(a, da.Array):
a = a.compute()
if isinstance(b, da.Array):
b = b.compute()
if isinstance(a, (np.generic, np.ndarray)):
return np.allclose(a, b)
else:
return a == b
def test_arg_reduction():
pairs = [([4, 3, 5], [10, 11, 12]),
([3, 5, 1], [1, 2, 3])]
result = arg_aggregate(np.min, np.argmin, (100, 100), pairs)
assert eq(result, np.array([101, 11, 103]))
def reduction_1d_test(da_func, darr, np_func, narr, use_dtype=True):
assert eq(da_func(darr), np_func(narr))
assert eq(da_func(darr, keepdims=True), np_func(narr, keepdims=True))
if use_dtype:
assert eq(da_func(darr, dtype='f8'), np_func(narr, dtype='f8'))
assert eq(da_func(darr, dtype='i8'), np_func(narr, dtype='i8'))
def test_reductions_1D_float():
x = np.arange(5).astype('f4')
a = da.from_array(x, chunks=(2,))
reduction_1d_test(da.sum, a, np.sum, x)
reduction_1d_test(da.prod, a, np.prod, x)
reduction_1d_test(da.mean, a, np.mean, x)
reduction_1d_test(da.var, a, np.var, x)
reduction_1d_test(da.std, a, np.std, x)
reduction_1d_test(da.min, a, np.min, x, False)
reduction_1d_test(da.max, a, np.max, x, False)
reduction_1d_test(da.any, a, np.any, x, False)
reduction_1d_test(da.all, a, np.all, x, False)
reduction_1d_test(da.nansum, a, np.nansum, x)
with ignoring(AttributeError):
reduction_1d_test(da.nanprod, a, np.nanprod, x)
reduction_1d_test(da.nanmean, a, np.mean, x)
reduction_1d_test(da.nanvar, a, np.var, x)
reduction_1d_test(da.nanstd, a, np.std, x)
reduction_1d_test(da.nanmin, a, np.nanmin, x, False)
reduction_1d_test(da.nanmax, a, np.nanmax, x, False)
assert eq(da.argmax(a, axis=0), np.argmax(x, axis=0))
assert eq(da.argmin(a, axis=0), np.argmin(x, axis=0))
assert eq(da.nanargmax(a, axis=0), np.nanargmax(x, axis=0))
assert eq(da.nanargmin(a, axis=0), np.nanargmin(x, axis=0))
def test_reductions_1D_int():
x = np.arange(5).astype('i4')
a = da.from_array(x, chunks=(2,))
reduction_1d_test(da.sum, a, np.sum, x)
reduction_1d_test(da.prod, a, np.prod, x)
reduction_1d_test(da.mean, a, np.mean, x)
reduction_1d_test(da.var, a, np.var, x)
reduction_1d_test(da.std, a, np.std, x)
reduction_1d_test(da.min, a, np.min, x, False)
reduction_1d_test(da.max, a, np.max, x, False)
reduction_1d_test(da.any, a, np.any, x, False)
reduction_1d_test(da.all, a, np.all, x, False)
reduction_1d_test(da.nansum, a, np.nansum, x)
with ignoring(AttributeError):
reduction_1d_test(da.nanprod, a, np.nanprod, x)
reduction_1d_test(da.nanmean, a, np.mean, x)
reduction_1d_test(da.nanvar, a, np.var, x)
reduction_1d_test(da.nanstd, a, np.std, x)
reduction_1d_test(da.nanmin, a, np.nanmin, x, False)
reduction_1d_test(da.nanmax, a, np.nanmax, x, False)
assert eq(da.argmax(a, axis=0), np.argmax(x, axis=0))
assert eq(da.argmin(a, axis=0), np.argmin(x, axis=0))
assert eq(da.nanargmax(a, axis=0), np.nanargmax(x, axis=0))
assert eq(da.nanargmin(a, axis=0), np.nanargmin(x, axis=0))
def reduction_2d_test(da_func, darr, np_func, narr, use_dtype=True):
assert eq(da_func(darr), np_func(narr))
assert eq(da_func(darr, keepdims=True), np_func(narr, keepdims=True))
assert eq(da_func(darr, axis=0), np_func(narr, axis=0))
assert eq(da_func(darr, axis=1), np_func(narr, axis=1))
assert eq(da_func(darr, axis=1, keepdims=True),
np_func(narr, axis=1, keepdims=True))
assert eq(da_func(darr, axis=(1, 0)), np_func(narr, axis=(1, 0)))
if use_dtype:
assert eq(da_func(darr, dtype='f8'), np_func(narr, dtype='f8'))
assert eq(da_func(darr, dtype='i8'), np_func(narr, dtype='i8'))
def test_reductions_2D_float():
x = np.arange(1, 122).reshape((11, 11)).astype('f4')
a = da.from_array(x, chunks=(4, 4))
b = a.sum(keepdims=True)
assert b._keys() == [[(b.name, 0, 0)]]
reduction_2d_test(da.sum, a, np.sum, x)
reduction_2d_test(da.prod, a, np.prod, x)
reduction_2d_test(da.mean, a, np.mean, x)
reduction_2d_test(da.var, a, np.var, x, False) # Difference in dtype algo
reduction_2d_test(da.std, a, np.std, x, False) # Difference in dtype algo
reduction_2d_test(da.min, a, np.min, x, False)
reduction_2d_test(da.max, a, np.max, x, False)
reduction_2d_test(da.any, a, np.any, x, False)
reduction_2d_test(da.all, a, np.all, x, False)
reduction_2d_test(da.nansum, a, np.nansum, x)
with ignoring(AttributeError):
reduction_2d_test(da.nanprod, a, np.nanprod, x)
reduction_2d_test(da.nanmean, a, np.mean, x)
reduction_2d_test(da.nanvar, a, np.nanvar, x, False) # Difference in dtype algo
reduction_2d_test(da.nanstd, a, np.nanstd, x, False) # Difference in dtype algo
reduction_2d_test(da.nanmin, a, np.nanmin, x, False)
reduction_2d_test(da.nanmax, a, np.nanmax, x, False)
assert eq(da.argmax(a, axis=0), np.argmax(x, axis=0))
assert eq(da.argmin(a, axis=0), np.argmin(x, axis=0))
assert eq(da.nanargmax(a, axis=0), np.nanargmax(x, axis=0))
assert eq(da.nanargmin(a, axis=0), np.nanargmin(x, axis=0))
def test_reductions_2D_int():
x = np.arange(1, 122).reshape((11, 11)).astype('i4')
a = da.from_array(x, chunks=(4, 4))
reduction_2d_test(da.sum, a, np.sum, x)
reduction_2d_test(da.prod, a, np.prod, x)
reduction_2d_test(da.mean, a, np.mean, x)
reduction_2d_test(da.var, a, np.var, x, False) # Difference in dtype algo
reduction_2d_test(da.std, a, np.std, x, False) # Difference in dtype algo
reduction_2d_test(da.min, a, np.min, x, False)
reduction_2d_test(da.max, a, np.max, x, False)
reduction_2d_test(da.any, a, np.any, x, False)
reduction_2d_test(da.all, a, np.all, x, False)
reduction_2d_test(da.nansum, a, np.nansum, x)
with ignoring(AttributeError):
reduction_2d_test(da.nanprod, a, np.nanprod, x)
reduction_2d_test(da.nanmean, a, np.mean, x)
reduction_2d_test(da.nanvar, a, np.nanvar, x, False) # Difference in dtype algo
reduction_2d_test(da.nanstd, a, np.nanstd, x, False) # Difference in dtype algo
reduction_2d_test(da.nanmin, a, np.nanmin, x, False)
reduction_2d_test(da.nanmax, a, np.nanmax, x, False)
assert eq(da.argmax(a, axis=0), np.argmax(x, axis=0))
assert eq(da.argmin(a, axis=0), np.argmin(x, axis=0))
assert eq(da.nanargmax(a, axis=0), np.nanargmax(x, axis=0))
assert eq(da.nanargmin(a, axis=0), np.nanargmin(x, axis=0))
assert eq(da.argmax(a, axis=1), np.argmax(x, axis=1))
assert eq(da.argmin(a, axis=1), np.argmin(x, axis=1))
assert eq(da.nanargmax(a, axis=1), np.nanargmax(x, axis=1))
assert eq(da.nanargmin(a, axis=1), np.nanargmin(x, axis=1))
def test_moment():
def moment(x, n, axis=None):
return ((x - x.mean(axis=axis, keepdims=True))**n).sum(
axis=axis)/np.ones_like(x).sum(axis=axis)
# Poorly conditioned
x = np.array([1., 2., 3.]*10).reshape((3, 10)) + 1e8
a = da.from_array(x, chunks=5)
assert eq(a.moment(2), moment(x, 2))
assert eq(a.moment(3), moment(x, 3))
assert eq(a.moment(4), moment(x, 4))
x = np.arange(1, 122).reshape((11, 11)).astype('f8')
a = da.from_array(x, chunks=(4, 4))
assert eq(a.moment(4, axis=1), moment(x, 4, axis=1))
assert eq(a.moment(4, axis=(1, 0)), moment(x, 4, axis=(1, 0)))
def test_reductions_with_negative_axes():
x = np.random.random((4, 4, 4))
a = da.from_array(x, chunks=2)
assert eq(a.argmin(axis=-1), x.argmin(axis=-1))
assert eq(a.sum(axis=-1), x.sum(axis=-1))
assert eq(a.sum(axis=(0, -1)), x.sum(axis=(0, -1)))
def test_nan():
x = np.array([[1, np.nan, 3, 4],
[5, 6, 7, np.nan],
[9, 10, 11, 12]])
d = da.from_array(x, chunks=(2, 2))
assert eq(np.nansum(x), da.nansum(d))
assert eq(np.nansum(x, axis=0), da.nansum(d, axis=0))
assert eq(np.nanmean(x, axis=1), da.nanmean(d, axis=1))
assert eq(np.nanmin(x, axis=1), da.nanmin(d, axis=1))
assert eq(np.nanmax(x, axis=(0, 1)), da.nanmax(d, axis=(0, 1)))
assert eq(np.nanvar(x), da.nanvar(d))
assert eq(np.nanstd(x, axis=0), da.nanstd(d, axis=0))
assert eq(np.nanargmin(x, axis=0), da.nanargmin(d, axis=0))
assert eq(np.nanargmax(x, axis=0), da.nanargmax(d, axis=0))
with ignoring(AttributeError):
assert eq(np.nanprod(x), da.nanprod(d))
|
|
# This file is part of the ISIS IBEX application.
# Copyright (C) 2012-2016 Science & Technology Facilities Council.
# All rights reserved.
#
# This program is distributed in the hope that it will be useful.
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License v1.0 which accompanies this distribution.
# EXCEPT AS EXPRESSLY SET FORTH IN THE ECLIPSE PUBLIC LICENSE V1.0, THE PROGRAM
# AND ACCOMPANYING MATERIALS ARE PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND. See the Eclipse Public License v1.0 for more details.
#
# You should have received a copy of the Eclipse Public License v1.0
# along with this program; if not, you can obtain a copy from
# https://www.eclipse.org/org/documents/epl-v10.php or
# http://opensource.org/licenses/eclipse-1.0.php
"""
Utilities for running block server and related ioc's.
"""
import datetime
import threading
import six
import time
import zlib
import re
import json
import codecs
import binascii
from xml.etree import ElementTree
from server_common.loggers.logger import Logger
from server_common.common_exceptions import MaxAttemptsExceededException
# Default to base class - does not actually log anything
LOGGER = Logger()
_LOGGER_LOCK = threading.RLock() # To prevent message interleaving between different threads.
class SEVERITY(object):
"""
Standard message severities.
"""
INFO = "INFO"
MINOR = "MINOR"
MAJOR = "MAJOR"
def char_waveform(length):
"""
Helper function for creating a char waveform PV.
Args:
length: The length of the array.
Return:
The dictionary to add to the PVDB.
"""
return {'type': 'char', 'count': length, 'value': [0]}
def set_logger(logger):
"""Sets the logger used by the print_and_log function.
Args:
logger (Logger): The logger to use. Must inherit from Logger.
"""
global LOGGER
LOGGER = logger
def print_and_log(message, severity=SEVERITY.INFO, src="BLOCKSVR"):
"""Prints the specified message to the console and writes it to the log.
Args:
message (string|exception): The message to log
severity (string, optional): Gives the severity of the message. Expected serverities are MAJOR, MINOR and INFO.
Default severity is INFO.
src (string, optional): Gives the source of the message. Default source is BLOCKSVR.
"""
with _LOGGER_LOCK:
message = "[{}] {}: {}".format(datetime.datetime.now(), severity, message)
print(message)
LOGGER.write_to_log(message, severity, src)
def compress_and_hex(value):
"""Compresses the inputted string and encodes it as hex.
Args:
value (str): The string to be compressed
Returns:
bytes : A compressed and hexed version of the inputted string
"""
assert type(value) == str, \
"Non-str argument passed to compress_and_hex, maybe Python 2/3 compatibility issue\n" \
"Argument was type {} with value {}".format(value.__class__.__name__, value)
compr = zlib.compress(bytes(value) if six.PY2 else bytes(value, "utf-8"))
return binascii.hexlify(compr)
def dehex_and_decompress(value):
"""Decompresses the inputted string, assuming it is in hex encoding.
Args:
value (bytes): The string to be decompressed, encoded in hex
Returns:
bytes : A decompressed version of the inputted string
"""
assert type(value) == bytes, \
"Non-bytes argument passed to dehex_and_decompress, maybe Python 2/3 compatibility issue\n" \
"Argument was type {} with value {}".format(value.__class__.__name__, value)
return zlib.decompress(binascii.unhexlify(value))
def dehex_and_decompress_waveform(value):
"""Decompresses the inputted waveform, assuming it is a array of integers representing characters (null terminated).
Args:
value (list[int]): The string to be decompressed
Returns:
bytes : A decompressed version of the inputted string
"""
assert type(value) == list, \
"Non-list argument passed to dehex_and_decompress_waveform\n" \
"Argument was type {} with value {}".format(value.__class__.__name__, value)
unicode_rep = waveform_to_string(value)
bytes_rep = unicode_rep.encode("ascii")
return dehex_and_decompress(bytes_rep)
def convert_to_json(value):
"""Converts the inputted object to JSON format.
Args:
value (obj): The object to be converted
Returns:
string : The JSON representation of the inputted object
"""
return json.dumps(value)
def convert_from_json(value):
"""Converts the inputted string into a JSON object.
Args:
value (string): The JSON representation of an object
Returns:
obj : An object corresponding to the given string
"""
return json.loads(value)
def parse_boolean(string):
"""Parses an xml true/false value to boolean
Args:
string (string): String containing the xml representation of true/false
Returns:
bool : A python boolean representation of the string
Raises:
ValueError : If the supplied string is not "true" or "false"
"""
if string.lower() == "true":
return True
elif string.lower() == "false":
return False
else:
raise ValueError(str(string) + ': Attribute must be "true" or "false"')
def value_list_to_xml(value_list, grp, group_tag, item_tag):
"""Converts a list of values to corresponding xml.
Args:
value_list (dist[str, dict[object, object]]): The dictionary of names and their values, values are in turn a
dictonary of names and value {name: {parameter : value, parameter : value}}
grp (ElementTree.SubElement): The SubElement object to append the list on to
group_tag (string): The tag that corresponds to the group for the items given in the list e.g. macros
item_tag (string): The tag that corresponds to each item in the list e.g. macro
"""
xml_list = ElementTree.SubElement(grp, group_tag)
if len(value_list) > 0:
for n, c in value_list.items():
xml_item = ElementTree.SubElement(xml_list, item_tag)
xml_item.set("name", n)
for cn, cv in c.items():
xml_item.set(str(cn), str(cv))
def check_pv_name_valid(name):
"""Checks that text conforms to the ISIS PV naming standard
Args:
name (string): The text to be checked
Returns:
bool : True if text conforms to standard, False otherwise
"""
if re.match(r"[A-Za-z0-9_]*", name) is None:
return False
return True
def create_pv_name(name, current_pvs, default_pv, limit=6, allow_colon=False):
"""Uses the given name as a basis for a valid PV.
Args:
name (string): The basis for the PV
current_pvs (list): List of already allocated pvs
default_pv (string): Basis for the PV if name is unreasonable, must be a valid PV name
limit (integer): Character limit for the PV
allow_colon (bool): If True, pv name is allowed to contain colons; if False, remove the colons
Returns:
string : A valid PV
"""
pv_text = name.upper().replace(" ", "_")
replacement_string = r'[^:a-zA-Z0-9_]' if allow_colon else r'\W'
pv_text = re.sub(replacement_string, '', pv_text)
# Check some edge cases of unreasonable names
if re.search(r"[^0-9_]", pv_text) is None or pv_text == '':
pv_text = default_pv
# Cut down pvs to limit
pv_text = pv_text[0:limit]
# Make sure PVs are unique
i = 1
pv = pv_text
# Append a number if the PV already exists
while pv in current_pvs:
if len(pv) > limit - 2:
pv = pv[0:limit - 2]
pv += format(i, '02d')
i += 1
return pv
def parse_xml_removing_namespace(file_path):
"""Creates an Element object from a given xml file, removing the namespace.
Args:
file_path (string): The location of the xml file
Returns:
Element : A object holding all the xml information
"""
it = ElementTree.iterparse(file_path)
for _, el in it:
if ':' in el.tag:
el.tag = el.tag.split('}', 1)[1]
return it.root
def waveform_to_string(data):
"""
Args:
data: waveform as null terminated string
Returns: waveform as a sting
"""
output = six.text_type()
for i in data:
if i == 0:
break
output += six.unichr(i)
return output
def ioc_restart_pending(ioc_pv, channel_access):
"""Check if a particular IOC is restarting. Assumes it has suitable restart PV
Args:
ioc_pv: The base PV for the IOC with instrument PV prefix
channel_access (ChannelAccess): The channel access object to be used for accessing PVs
Return
bool: True if restarting, else False
"""
return channel_access.caget(ioc_pv + ":RESTART", as_string=True) == "Busy"
def retry(max_attempts, interval, exception):
"""
Attempt to perform a function a number of times in specified intervals before failing.
Args:
max_attempts: The maximum number of tries to execute the function
interval: The retry interval
exception: The type of exception to handle by retrying
Returns:
The input function wrapped in a retry loop
"""
def _tags_decorator(func):
def _wrapper(*args, **kwargs):
attempts = 0
ex = ValueError("Max attempts should be > 0, it is {}".format(max_attempts))
while attempts < max_attempts:
try:
return func(*args, **kwargs)
except exception as ex:
attempts += 1
time.sleep(interval)
raise MaxAttemptsExceededException(ex)
return _wrapper
return _tags_decorator
def remove_from_end(string, text_to_remove):
"""
Remove a String from the end of a string if it exists
Args:
string (str): string to edit
text_to_remove (str): the text to remove
Returns: the string with the text removed
"""
if string is not None and string.endswith(text_to_remove):
return string[:-len(text_to_remove)]
return string
def lowercase_and_make_unique(in_list):
"""
Takes a collection of strings, and returns it with all strings lowercased and with duplicates removed.
Args:
in_list (List[str]): the collection of strings to operate on
Returns:
set[str]: the lowercased unique set of strings.
"""
return {x.lower() for x in in_list}
def parse_date_time_arg_exit_on_fail(date_arg, error_code=1):
"""
Parse a date argument and exit the program with an error code if that argument is not a date
Args:
date_arg: date argument to parse
error_code: the error code to exit with if it is not a date
Returns:
a date time of the argument
"""
try:
return datetime.datetime.strptime(date_arg, "%Y-%m-%dT%H:%M:%S")
except (ValueError, TypeError) as ex:
print(f"Can not interpret date '{date_arg}' error: {ex}")
exit(error_code)
|
|
""" The configuration bits of TileStache.
TileStache configuration is stored in JSON files, and is composed of two main
top-level sections: "cache" and "layers". There are examples of both in this
minimal sample configuration:
{
"cache": {"name": "Test"},
"layers": {
"example": {
"provider": {"name": "mapnik", "mapfile": "examples/style.xml"},,
"projection": "spherical mercator"
}
}
}
The contents of the "cache" section are described in greater detail in the
TileStache.Caches module documentation. Here is a different sample:
"cache": {
"name": "Disk",
"path": "/tmp/stache",
"umask": "0000"
}
The "layers" section is a dictionary of layer names which are specified in the
URL of an individual tile. More detail on the configuration of individual layers
can be found in the TileStache.Core module documentation. Another sample:
{
"cache": ...,
"layers":
{
"example-name":
{
"provider": { ... },
"metatile": { ... },
"preview": { ... },
"stale lock timeout": ...,
"projection": ...
}
}
}
Configuration also supports these additional settings:
- "logging": one of "debug", "info", "warning", "error" or "critical", as
described in Python's logging module: http://docs.python.org/howto/logging.html
- "index": configurable index pages for the front page of an instance.
A custom index can be specified as a filename relative to the configuration
location. Typically an HTML document would be given here, but other kinds of
files such as images can be used, with MIME content-type headers determined
by mimetypes.guess_type. A simple text greeting is displayed if no index
is provided.
In-depth explanations of the layer components can be found in the module
documentation for TileStache.Providers, TileStache.Core, and TileStache.Geography.
"""
import sys
import logging
from os.path import join as pathjoin
from mimetypes import guess_type
from json import dumps
try:
from json import dumps as json_dumps
except ImportError:
from simplejson import dumps as json_dumps
from ModestMaps.Geo import Location
from ModestMaps.Core import Coordinate
from . import Core
from . import Caches
from . import Providers
from . import Geography
from . import PixelEffects
from .py3_compat import reduce, urljoin, urlparse, urlopen
class Configuration:
""" A complete site configuration, with a collection of Layer objects.
Attributes:
cache:
Cache instance, e.g. TileStache.Caches.Disk etc.
See TileStache.Caches for details on what makes
a usable cache.
layers:
Dictionary of layers keyed by name.
When creating a custom layers dictionary, e.g. for dynamic
layer collections backed by some external configuration,
these dictionary methods must be provided for a complete
collection of layers:
keys():
Return list of layer name strings.
items():
Return list of (name, layer) pairs.
__contains__(key):
Return boolean true if given key is an existing layer.
__getitem__(key):
Return existing layer object for given key or raise KeyError.
dirpath:
Local filesystem path for this configuration,
useful for expanding relative paths.
Optional attribute:
index:
Mimetype, content tuple for default index response.
"""
def __init__(self, cache, dirpath):
self.cache = cache
self.dirpath = dirpath
self.layers = {}
self.index = 'text/plain', 'TileStache bellows hello.'
class Bounds:
""" Coordinate bounding box for tiles.
"""
def __init__(self, upper_left_high, lower_right_low):
""" Two required Coordinate objects defining tile pyramid bounds.
Boundaries are inclusive: upper_left_high is the left-most column,
upper-most row, and highest zoom level; lower_right_low is the
right-most column, furthest-dwn row, and lowest zoom level.
"""
self.upper_left_high = upper_left_high
self.lower_right_low = lower_right_low
def excludes(self, tile):
""" Check a tile Coordinate against the bounds, return true/false.
"""
if tile.zoom > self.upper_left_high.zoom:
# too zoomed-in
return True
if tile.zoom < self.lower_right_low.zoom:
# too zoomed-out
return True
# check the top-left tile corner against the lower-right bound
_tile = tile.zoomTo(self.lower_right_low.zoom)
if _tile.column > self.lower_right_low.column:
# too far right
return True
if _tile.row > self.lower_right_low.row:
# too far down
return True
# check the bottom-right tile corner against the upper-left bound
__tile = tile.right().down().zoomTo(self.upper_left_high.zoom)
if __tile.column < self.upper_left_high.column:
# too far left
return True
if __tile.row < self.upper_left_high.row:
# too far up
return True
return False
def __str__(self):
return 'Bound %s - %s' % (self.upper_left_high, self.lower_right_low)
class BoundsList:
""" Multiple coordinate bounding boxes for tiles.
"""
def __init__(self, bounds):
""" Single argument is a list of Bounds objects.
"""
self.bounds = bounds
def excludes(self, tile):
""" Check a tile Coordinate against the bounds, return false if none match.
"""
for bound in self.bounds:
if not bound.excludes(tile):
return False
# Nothing worked.
return True
def buildConfiguration(config_dict, dirpath='.'):
""" Build a configuration dictionary into a Configuration object.
The second argument is an optional dirpath that specifies where in the
local filesystem the parsed dictionary originated, to make it possible
to resolve relative paths. It might be a path or more likely a full
URL including the "file://" prefix.
"""
scheme, h, path, p, q, f = urlparse(dirpath)
if scheme in ('', 'file'):
sys.path.insert(0, path)
cache_dict = config_dict.get('cache', {})
cache = _parseConfigCache(cache_dict, dirpath)
config = Configuration(cache, dirpath)
for (name, layer_dict) in config_dict.get('layers', {}).items():
config.layers[name] = _parseConfigLayer(layer_dict, config, dirpath)
if 'index' in config_dict:
index_href = urljoin(dirpath, config_dict['index'])
index_body = urlopen(index_href).read()
index_type = guess_type(index_href)
config.index = index_type[0], index_body
if 'logging' in config_dict:
level = config_dict['logging'].upper()
if hasattr(logging, level):
logging.basicConfig(level=getattr(logging, level))
return config
def enforcedLocalPath(relpath, dirpath, context='Path'):
""" Return a forced local path, relative to a directory.
Throw an error if the combination of path and directory seems to
specify a remote path, e.g. "/path" and "http://example.com".
Although a configuration file can be parsed from a remote URL, some
paths (e.g. the location of a disk cache) must be local to the server.
In cases where we mix a remote configuration location with a local
cache location, e.g. "http://example.com/tilestache.cfg", the disk path
must include the "file://" prefix instead of an ambiguous absolute
path such as "/tmp/tilestache".
"""
parsed_dir = urlparse(dirpath)
parsed_rel = urlparse(relpath)
if parsed_rel.scheme not in ('file', ''):
raise Core.KnownUnknown('%s path must be a local file path, absolute or "file://", not "%s".' % (context, relpath))
if parsed_dir.scheme not in ('file', '') and parsed_rel.scheme != 'file':
raise Core.KnownUnknown('%s path must start with "file://" in a remote configuration ("%s" relative to %s)' % (context, relpath, dirpath))
if parsed_rel.scheme == 'file':
# file:// is an absolute local reference for the disk cache.
return parsed_rel.path
if parsed_dir.scheme == 'file':
# file:// is an absolute local reference for the directory.
return urljoin(parsed_dir.path, parsed_rel.path)
# nothing has a scheme, it's probably just a bunch of
# dumb local paths, so let's see what happens next.
return pathjoin(dirpath, relpath)
def _parseConfigCache(cache_dict, dirpath):
""" Used by parseConfig() to parse just the cache parts of a config.
"""
if 'name' in cache_dict:
_class = Caches.getCacheByName(cache_dict['name'])
kwargs = {}
def add_kwargs(*keys):
""" Populate named keys in kwargs from cache_dict.
"""
for key in keys:
if key in cache_dict:
kwargs[key] = cache_dict[key]
if _class is Caches.Test:
if cache_dict.get('verbose', False):
kwargs['logfunc'] = lambda msg: sys.stderr.write(msg + '\n')
elif _class is Caches.Disk:
kwargs['path'] = enforcedLocalPath(cache_dict['path'], dirpath, 'Disk cache path')
if 'umask' in cache_dict:
kwargs['umask'] = int(cache_dict['umask'], 8)
add_kwargs('dirs', 'gzip')
elif _class is Caches.Multi:
kwargs['tiers'] = [_parseConfigCache(tier_dict, dirpath)
for tier_dict in cache_dict['tiers']]
elif _class is Caches.Memcache.Cache:
if 'key prefix' in cache_dict:
kwargs['key_prefix'] = cache_dict['key prefix']
add_kwargs('servers', 'lifespan', 'revision')
elif _class is Caches.Redis.Cache:
if 'key prefix' in cache_dict:
kwargs['key_prefix'] = cache_dict['key prefix']
add_kwargs('host', 'port', 'db')
elif _class is Caches.S3.Cache:
add_kwargs('bucket', 'access', 'secret', 'use_locks', 'path', 'reduced_redundancy', 'policy')
else:
raise Exception('Unknown cache: %s' % cache_dict['name'])
elif 'class' in cache_dict:
_class = Core.loadClassPath(cache_dict['class'])
kwargs = cache_dict.get('kwargs', {})
kwargs = dict( [(str(k), v) for (k, v) in kwargs.items()] )
else:
raise Exception('Missing required cache name or class: %s' % json_dumps(cache_dict))
cache = _class(**kwargs)
return cache
def _parseLayerBounds(bounds_dict, projection):
"""
"""
north, west = bounds_dict.get('north', 89), bounds_dict.get('west', -180)
south, east = bounds_dict.get('south', -89), bounds_dict.get('east', 180)
high, low = bounds_dict.get('high', 31), bounds_dict.get('low', 0)
try:
ul_hi = projection.locationCoordinate(Location(north, west)).zoomTo(high)
lr_lo = projection.locationCoordinate(Location(south, east)).zoomTo(low)
except TypeError:
raise Core.KnownUnknown('Bad bounds for layer, need north, south, east, west, high, and low: ' + dumps(bounds_dict))
return Bounds(ul_hi, lr_lo)
def _parseConfigLayer(layer_dict, config, dirpath):
""" Used by parseConfig() to parse just the layer parts of a config.
"""
projection = layer_dict.get('projection', 'spherical mercator')
projection = Geography.getProjectionByName(projection)
#
# Add cache lock timeouts and preview arguments
#
layer_kwargs = {}
if 'cache lifespan' in layer_dict:
layer_kwargs['cache_lifespan'] = int(layer_dict['cache lifespan'])
if 'stale lock timeout' in layer_dict:
layer_kwargs['stale_lock_timeout'] = int(layer_dict['stale lock timeout'])
if 'write cache' in layer_dict:
layer_kwargs['write_cache'] = bool(layer_dict['write cache'])
if 'allowed origin' in layer_dict:
layer_kwargs['allowed_origin'] = str(layer_dict['allowed origin'])
if 'maximum cache age' in layer_dict:
layer_kwargs['max_cache_age'] = int(layer_dict['maximum cache age'])
if 'redirects' in layer_dict:
layer_kwargs['redirects'] = dict(layer_dict['redirects'])
if 'tile height' in layer_dict:
layer_kwargs['tile_height'] = int(layer_dict['tile height'])
if 'preview' in layer_dict:
preview_dict = layer_dict['preview']
for (key, func) in zip(('lat', 'lon', 'zoom', 'ext'), (float, float, int, str)):
if key in preview_dict:
layer_kwargs['preview_' + key] = func(preview_dict[key])
#
# Do the bounds
#
if 'bounds' in layer_dict:
if type(layer_dict['bounds']) is dict:
layer_kwargs['bounds'] = _parseLayerBounds(layer_dict['bounds'], projection)
elif type(layer_dict['bounds']) is list:
bounds = [_parseLayerBounds(b, projection) for b in layer_dict['bounds']]
layer_kwargs['bounds'] = BoundsList(bounds)
else:
raise Core.KnownUnknown('Layer bounds must be a dictionary, not: ' + dumps(layer_dict['bounds']))
#
# Do the metatile
#
meta_dict = layer_dict.get('metatile', {})
metatile_kwargs = {}
for k in ('buffer', 'rows', 'columns'):
if k in meta_dict:
metatile_kwargs[k] = int(meta_dict[k])
metatile = Core.Metatile(**metatile_kwargs)
#
# Do the per-format options
#
jpeg_kwargs = {}
png_kwargs = {}
if 'jpeg options' in layer_dict:
jpeg_kwargs = dict([(str(k), v) for (k, v) in layer_dict['jpeg options'].items()])
if 'png options' in layer_dict:
png_kwargs = dict([(str(k), v) for (k, v) in layer_dict['png options'].items()])
#
# Do pixel effect
#
pixel_effect = None
if 'pixel effect' in layer_dict:
pixel_effect_dict = layer_dict['pixel effect']
pixel_effect_name = pixel_effect_dict.get('name')
if pixel_effect_name in PixelEffects.all:
pixel_effect_kwargs = {}
for k, v in pixel_effect_dict.items():
if k != 'name':
pixel_effect_kwargs[str(k)] = float(v)
PixelEffectClass = PixelEffects.all[pixel_effect_name]
pixel_effect = PixelEffectClass(**pixel_effect_kwargs)
#
# Do the provider
#
provider_dict = layer_dict['provider']
if 'name' in provider_dict:
_class = Providers.getProviderByName(provider_dict['name'])
provider_kwargs = _class.prepareKeywordArgs(provider_dict)
elif 'class' in provider_dict:
_class = Core.loadClassPath(provider_dict['class'])
provider_kwargs = provider_dict.get('kwargs', {})
provider_kwargs = dict( [(str(k), v) for (k, v) in provider_kwargs.items()] )
else:
raise Exception('Missing required provider name or class: %s' % json_dumps(provider_dict))
#
# Finish him!
#
layer = Core.Layer(config, projection, metatile, **layer_kwargs)
layer.provider = _class(layer, **provider_kwargs)
layer.setSaveOptionsJPEG(**jpeg_kwargs)
layer.setSaveOptionsPNG(**png_kwargs)
layer.pixel_effect = pixel_effect
return layer
|
|
#!/usr/bin/env python
# Landsat Util
# License: CC0 1.0 Universal
import argparse
import textwrap
import json
from os.path import join
from urllib2 import URLError
from dateutil.parser import parse
import pycurl
from boto.exception import NoAuthHandlerFound
from downloader import Downloader, IncorrectSceneId
from search import Search
from uploader import Uploader
from utils import reformat_date, convert_to_integer_list, timer, exit, get_file
from mixins import VerbosityMixin
from image import Process, FileDoesNotExist
from __init__ import __version__
import settings
DESCRIPTION = """Landsat-util is a command line utility that makes it easy to
search, download, and process Landsat imagery.
Commands:
Search:
landsat.py search [-p --pathrow] [--lat] [--lon] [-l LIMIT] [-s START] [-e END] [-c CLOUD] [-h]
optional arguments:
-p, --pathrow Paths and Rows in order separated by comma. Use quotes "001,003".
Example: path,row,path,row 001,001,190,204
--lat Latitude
--lon Longitude
-l LIMIT, --limit LIMIT
Search return results limit default is 10
-s START, --start START
Start Date - Most formats are accepted e.g.
Jun 12 2014 OR 06/12/2014
-e END, --end END End Date - Most formats are accepted e.g.
Jun 12 2014 OR 06/12/2014
-c CLOUD, --cloud CLOUD
Maximum cloud percentage. Default: 20 perct
-h, --help Show this help message and exit
Download:
landsat download sceneID [sceneID ...] [-h] [-b --bands]
positional arguments:
sceneID Provide Full sceneIDs. You can add as many sceneIDs as you wish
Example: landast download LC81660392014196LGN00
optional arguments:
-b --bands If you specify bands, landsat-util will try to download the band from S3.
If the band does not exist, an error is returned
-h, --help Show this help message and exit
-d, --dest Destination path
-p, --process Process the image after download
--pansharpen Whether to also pansharpen the processed image.
Pansharpening requires larger memory
-u --upload Upload to S3 after the image processing completed
--key Amazon S3 Access Key (You can also be set AWS_ACCESS_KEY_ID as
Environment Variables)
--secret Amazon S3 Secret Key (You can also be set AWS_SECRET_ACCESS_KEY as
Environment Variables)
--bucket Bucket name (required if uploading to s3)
--region URL to S3 region e.g. s3-us-west-2.amazonaws.com
--force-unzip Force unzip tar file
Process:
landsat.py process path [-h] [-b --bands] [-p --pansharpen]
positional arguments:
path Path to the landsat image folder or zip file
optional arguments:
-b --bands Specify bands. The bands should be written in sequence with no spaces
Default: Natural colors (432)
Example --bands 432
--pansharpen Whether to also pansharpen the process image.
Pansharpening requires larger memory
-v, --verbose Show verbose output
-h, --help Show this help message and exit
-u --upload Upload to S3 after the image processing completed
--key Amazon S3 Access Key (You can also be set AWS_ACCESS_KEY_ID as
Environment Variables)
--secret Amazon S3 Secret Key (You can also be set AWS_SECRET_ACCESS_KEY as
Environment Variables)
--bucket Bucket name (required if uploading to s3)
--region URL to S3 region e.g. s3-us-west-2.amazonaws.com
--force-unzip Force unzip tar file
"""
def args_options():
""" Generates an arugment parser.
:returns:
Parser object
"""
parser = argparse.ArgumentParser(prog='landsat',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent(DESCRIPTION))
subparsers = parser.add_subparsers(help='Landsat Utility',
dest='subs')
parser.add_argument('--version', action='version', version='%(prog)s version ' + __version__)
# Search Logic
parser_search = subparsers.add_parser('search',
help='Search Landsat metdata')
# Global search options
parser_search.add_argument('-l', '--limit', default=10, type=int,
help='Search return results limit\n'
'default is 100')
parser_search.add_argument('-s', '--start',
help='Start Date - Most formats are accepted '
'e.g. Jun 12 2014 OR 06/12/2014')
parser_search.add_argument('-e', '--end',
help='End Date - Most formats are accepted '
'e.g. Jun 12 2014 OR 06/12/2014')
parser_search.add_argument('-c', '--cloud', type=float, default=20.0,
help='Maximum cloud percentage '
'default is 20 perct')
parser_search.add_argument('-p', '--pathrow',
help='Paths and Rows in order separated by comma. Use quotes ("001").'
'Example: path,row,path,row 001,001,190,204')
parser_search.add_argument('--lat', type=float, help='The latitude')
parser_search.add_argument('--lon', type=float, help='The longitude')
parser_download = subparsers.add_parser('download',
help='Download images from Google Storage')
parser_download.add_argument('scenes',
metavar='sceneID',
nargs="+",
help="Provide Full sceneID, e.g. LC81660392014196LGN00")
parser_download.add_argument('-b', '--bands', help='If you specify bands, landsat-util will try to download '
'the band from S3. If the band does not exist, an error is returned')
parser_download.add_argument('-d', '--dest', help='Destination path')
parser_download.add_argument('-p', '--process', help='Process the image after download', action='store_true')
parser_download.add_argument('--pansharpen', action='store_true',
help='Whether to also pansharpen the process '
'image. Pansharpening requires larger memory')
parser_download.add_argument('-u', '--upload', action='store_true',
help='Upload to S3 after the image processing completed')
parser_download.add_argument('--key', help='Amazon S3 Access Key (You can also be set AWS_ACCESS_KEY_ID as '
'Environment Variables)')
parser_download.add_argument('--secret', help='Amazon S3 Secret Key (You can also be set AWS_SECRET_ACCESS_KEY '
'as Environment Variables)')
parser_download.add_argument('--bucket', help='Bucket name (required if uploading to s3)')
parser_download.add_argument('--region', help='URL to S3 region e.g. s3-us-west-2.amazonaws.com')
parser_download.add_argument('--force-unzip', help='Force unzip tar file', action='store_true')
parser_process = subparsers.add_parser('process', help='Process Landsat imagery')
parser_process.add_argument('path',
help='Path to the compressed image file')
parser_process.add_argument('--pansharpen', action='store_true',
help='Whether to also pansharpen the process '
'image. Pansharpening requires larger memory')
parser_process.add_argument('-b', '--bands', help='specify band combinations. Default is 432'
'Example: --bands 321')
parser_process.add_argument('-v', '--verbose', action='store_true',
help='Turn on verbosity')
parser_process.add_argument('-u', '--upload', action='store_true',
help='Upload to S3 after the image processing completed')
parser_process.add_argument('--key', help='Amazon S3 Access Key (You can also be set AWS_ACCESS_KEY_ID as '
'Environment Variables)')
parser_process.add_argument('--secret', help='Amazon S3 Secret Key (You can also be set AWS_SECRET_ACCESS_KEY '
'as Environment Variables)')
parser_process.add_argument('--bucket', help='Bucket name (required if uploading to s3)')
parser_process.add_argument('--region', help='URL to S3 region e.g. s3-us-west-2.amazonaws.com')
parser_process.add_argument('--force-unzip', help='Force unzip tar file', action='store_true')
return parser
def main(args):
"""
Main function - launches the program.
:param args:
The Parser arguments
:type args:
Parser object
:returns:
List
:example:
>>> ["The latitude and longitude values must be valid numbers", 1]
"""
v = VerbosityMixin()
if args:
if args.subs == 'process':
verbose = True if args.verbose else False
force_unzip = True if args.force_unzip else False
stored = process_image(args.path, args.bands, verbose, args.pansharpen, force_unzip)
if args.upload:
u = Uploader(args.key, args.secret, args.region)
u.run(args.bucket, get_file(stored), stored)
return ["The output is stored at %s" % stored]
elif args.subs == 'search':
try:
if args.start:
args.start = reformat_date(parse(args.start))
if args.end:
args.end = reformat_date(parse(args.end))
except (TypeError, ValueError):
return ["You date format is incorrect. Please try again!", 1]
s = Search()
try:
lat = float(args.lat) if args.lat else None
lon = float(args.lon) if args.lon else None
except ValueError:
return ["The latitude and longitude values must be valid numbers", 1]
result = s.search(paths_rows=args.pathrow,
lat=lat,
lon=lon,
limit=args.limit,
start_date=args.start,
end_date=args.end,
cloud_max=args.cloud)
if result['status'] == 'SUCCESS':
v.output('%s items were found' % result['total'], normal=True, arrow=True)
if result['total'] > 100:
return ['Over 100 results. Please narrow your search', 1]
else:
v.output(json.dumps(result, sort_keys=True, indent=4), normal=True, color='green')
return ['Search completed!']
elif result['status'] == 'error':
return [result['message'], 1]
elif args.subs == 'download':
d = Downloader(download_dir=args.dest)
try:
bands = convert_to_integer_list(args.bands)
if args.pansharpen:
bands.append(8)
downloaded = d.download(args.scenes, bands)
if args.process:
force_unzip = True if args.force_unzip else False
for scene, src in downloaded.iteritems():
if args.dest:
path = join(args.dest, scene)
else:
path = join(settings.DOWNLOAD_DIR, scene)
# Keep using Google if the image is before 2015
if src == 'google':
path = path + '.tar.bz'
stored = process_image(path, args.bands, False, args.pansharpen, force_unzip)
if args.upload:
try:
u = Uploader(args.key, args.secret, args.region)
except NoAuthHandlerFound:
return ["Could not authenticate with AWS", 1]
except URLError:
return ["Connection timeout. Probably the region parameter is incorrect", 1]
u.run(args.bucket, get_file(stored), stored)
v.output("The output is stored at %s" % stored, normal=True, arrow=True)
return ['Image Processing Completed', 0]
else:
return ['Download Completed', 0]
except IncorrectSceneId:
return ['The SceneID provided was incorrect', 1]
def process_image(path, bands=None, verbose=False, pansharpen=False, force_unzip=None):
""" Handles constructing and image process.
:param path:
The path to the image that has to be processed
:type path:
String
:param bands:
List of bands that has to be processed. (optional)
:type bands:
List
:param verbose:
Sets the level of verbosity. Default is False.
:type verbose:
boolean
:param pansharpen:
Whether to pansharpen the image. Default is False.
:type pansharpen:
boolean
:returns:
(String) path to the processed image
"""
try:
bands = convert_to_integer_list(bands)
p = Process(path, bands=bands, verbose=verbose, force_unzip=force_unzip)
except IOError:
exit("Zip file corrupted", 1)
except FileDoesNotExist as e:
exit(e.message, 1)
return p.run(pansharpen)
def __main__():
global parser
parser = args_options()
args = parser.parse_args()
with timer():
exit(*main(args))
if __name__ == "__main__":
try:
__main__()
except (KeyboardInterrupt, pycurl.error):
exit('Received Ctrl + C... Exiting! Bye.', 1)
|
|
"""
This script reads the compact workflows.yml file, and and generates files in
.github/workflows/ suitable for the limited expressivity of GitHub's workflow
definition language.
The point is that we had/have a lot of duplications between files in
.github/workflows/, so we use this script to make it easier to update them
and keep them in sync.
"""
import enum
import pathlib
import yaml
ROOT_PATH = pathlib.Path(__file__).parent
DEFINITION_PATH = ROOT_PATH / "workflows.yml"
GH_WORKFLOW_DIR = ROOT_PATH / ".github" / "workflows"
class script:
def __init__(self, *lines):
self.data = "\n".join(lines)
def script_representer(dumper, data: script):
return dumper.represent_scalar("tag:yaml.org,2002:str", data.data, style="|")
class Dumper(yaml.Dumper):
pass
Dumper.add_representer(script, script_representer)
class VersionFlavor(enum.Enum):
STABLE = "stable"
"""A statically defined version, that we already tested irctest on.
This is ran on PRs and master, because failure guarantees it's a bug in
the new irctest commit/PR."""
RELEASE = "release"
"""The last release of the project. This should usually pass.
We don't currently use this."""
DEVEL = "devel"
"""The last commit of the project. This allows us to catch bugs in other
software early in their development process."""
DEVEL_RELEASE = "devel_release"
"""Ditto, but if the project uses a specific branch for their current
release series, it uses that branch instead"""
def get_install_steps(*, software_config, software_id, version_flavor):
name = software_config["name"]
if "install_steps" in software_config:
path = "placeholder" # TODO: remove this
install_steps = software_config["install_steps"][version_flavor.value]
if install_steps is None:
return None
else:
ref = software_config["refs"][version_flavor.value]
if ref is None:
return None
path = software_config["path"]
install_steps = [
{
"name": f"Checkout {name}",
"uses": "actions/checkout@v2",
"with": {
"repository": software_config["repository"],
"ref": ref,
"path": path,
},
},
*software_config.get("pre_deps", []),
{
"name": f"Build {name}",
"run": script(software_config["build_script"]),
},
]
return install_steps
def get_build_job(*, software_config, software_id, version_flavor):
if not software_config["separate_build_job"]:
return None
if "install_steps" in software_config:
path = "placeholder" # TODO: remove this
else:
path = software_config["path"]
if software_config.get("cache", True):
cache = [
{
"name": "Cache dependencies",
"uses": "actions/cache@v2",
"with": {
"path": f"~/.cache\n${{ github.workspace }}/{path}\n",
"key": "3-${{ runner.os }}-"
+ software_id
+ "-"
+ version_flavor.value,
},
}
]
else:
cache = []
install_steps = get_install_steps(
software_config=software_config,
software_id=software_id,
version_flavor=version_flavor,
)
if install_steps is None:
return None
return {
"runs-on": "ubuntu-latest",
"steps": [
{
"name": "Create directories",
"run": "cd ~/; mkdir -p .local/ go/",
},
*cache,
{"uses": "actions/checkout@v2"},
{
"name": "Set up Python 3.7",
"uses": "actions/setup-python@v2",
"with": {"python-version": 3.7},
},
*install_steps,
*upload_steps(software_id),
],
}
def get_test_job(*, config, test_config, test_id, version_flavor, jobs):
if version_flavor.value in test_config.get("exclude_versions", []):
return None
env = ""
needs = []
downloads = []
install_steps = []
for software_id in test_config.get("software", []):
if software_id == "anope":
# TODO: don't hardcode anope here
software_config = {"separate_build_job": True}
else:
software_config = config["software"][software_id]
env += test_config.get("env", {}).get(version_flavor.value, "") + " "
if "prefix" in software_config:
env += f"PATH={software_config['prefix']}/bin:$PATH "
if software_config["separate_build_job"]:
needs.append(f"build-{software_id}")
downloads.append(
{
"name": "Download build artefacts",
"uses": "actions/download-artifact@v2",
"with": {"name": f"installed-{software_id}", "path": "~"},
}
)
else:
new_install_steps = get_install_steps(
software_config=software_config,
software_id=software_id,
version_flavor=version_flavor,
)
if new_install_steps is None:
# This flavor does not need to be built
return None
install_steps.extend(new_install_steps)
if not set(needs) <= jobs:
# One of the dependencies does not exist for this flavor
assert version_flavor != VersionFlavor.STABLE, set(needs) - jobs
return None
if downloads:
unpack = [
{
"name": "Unpack artefacts",
"run": r"cd ~; find -name 'artefacts-*.tar.gz' -exec tar -xzf '{}' \;",
},
]
else:
# All the software is built in the same job, nothing to unpack
unpack = []
return {
"runs-on": "ubuntu-latest",
"needs": needs,
"steps": [
{"uses": "actions/checkout@v2"},
{
"name": "Set up Python 3.7",
"uses": "actions/setup-python@v2",
"with": {"python-version": 3.7},
},
*downloads,
*unpack,
*install_steps,
{
"name": "Install Atheme",
"run": "sudo apt-get install atheme-services",
},
{
"name": "Install irctest dependencies",
"run": script(
"python -m pip install --upgrade pip",
"pip install pytest pytest-xdist -r requirements.txt",
*(
software_config["extra_deps"]
if "extra_deps" in software_config
else []
),
),
},
{
"name": "Test with pytest",
"run": (
f"PYTEST_ARGS='--junit-xml pytest.xml' "
f"PATH=$HOME/.local/bin:$PATH "
f"{env}make {test_id}"
),
},
{
"name": "Publish results",
"if": "always()",
"uses": "actions/upload-artifact@v2",
"with": {
"name": f"pytest results {test_id} ({version_flavor.value})",
"path": "pytest.xml",
},
},
],
}
def get_build_job_anope():
return {
"runs-on": "ubuntu-latest",
"steps": [
{"uses": "actions/checkout@v2"},
{
"name": "Create directories",
"run": "cd ~/; mkdir -p .local/ go/",
},
{
"name": "Cache Anope",
"uses": "actions/cache@v2",
"with": {
"path": "~/.cache\n${{ github.workspace }}/anope\n",
"key": "3-${{ runner.os }}-anope-2.0.9",
},
},
{
"name": "Checkout Anope",
"uses": "actions/checkout@v2",
"with": {
"repository": "anope/anope",
"ref": "2.0.9",
"path": "anope",
},
},
{
"name": "Build Anope",
"run": script(
"cd $GITHUB_WORKSPACE/anope/",
"cp $GITHUB_WORKSPACE/data/anope/* .",
"CFLAGS=-O0 ./Config -quick",
"make -C build -j 4",
"make -C build install",
),
},
*upload_steps("anope"),
],
}
def upload_steps(software_id):
"""Make a tarball (to preserve permissions) and upload"""
return [
{
"name": "Make artefact tarball",
"run": f"cd ~; tar -czf artefacts-{software_id}.tar.gz .local/ go/",
},
{
"name": "Upload build artefacts",
"uses": "actions/upload-artifact@v2",
"with": {
"name": f"installed-{software_id}",
"path": "~/artefacts-*.tar.gz",
# We only need it for the next step of the workflow, so let's
# just delete it ASAP to avoid wasting resources
"retention-days": 1,
},
},
]
def generate_workflow(config: dict, version_flavor: VersionFlavor):
on: dict
if version_flavor == VersionFlavor.STABLE:
on = {"push": None, "pull_request": None}
else:
# Run every saturday and sunday 8:51 UTC, and every day at 17:51
# (minute choosen at random, hours and days is so that I'm available
# to fix bugs it detects)
on = {
"schedule": [
{"cron": "51 8 * * 6"},
{"cron": "51 8 * * 0"},
{"cron": "51 17 * * *"},
],
"workflow_dispatch": None,
}
jobs = {}
jobs["build-anope"] = get_build_job_anope()
for software_id in config["software"]:
software_config = config["software"][software_id]
build_job = get_build_job(
software_config=software_config,
software_id=software_id,
version_flavor=version_flavor,
)
if build_job is not None:
jobs[f"build-{software_id}"] = build_job
for test_id in config["tests"]:
test_config = config["tests"][test_id]
test_job = get_test_job(
config=config,
test_config=test_config,
test_id=test_id,
version_flavor=version_flavor,
jobs=set(jobs),
)
if test_job is not None:
jobs[f"test-{test_id}"] = test_job
jobs["publish-test-results"] = {
"name": "Publish Unit Tests Results",
"needs": sorted({f"test-{test_id}" for test_id in config["tests"]} & set(jobs)),
"runs-on": "ubuntu-latest",
# the build-and-test job might be skipped, we don't need to run
# this job then
"if": "success() || failure()",
"steps": [
{
"name": "Download Artifacts",
"uses": "actions/download-artifact@v2",
"with": {"path": "artifacts"},
},
{
"name": "Publish Unit Test Results",
"uses": "EnricoMi/publish-unit-test-result-action@v1",
"with": {"files": "artifacts/**/*.xml"},
},
],
}
workflow = {
"name": f"irctest with {version_flavor.value} versions",
"on": on,
"jobs": jobs,
}
workflow_filename = GH_WORKFLOW_DIR / f"test-{version_flavor.value}.yml"
with open(workflow_filename, "wt") as fd:
fd.write("# This file was auto-generated by make_workflows.py.\n")
fd.write("# Do not edit it manually, modifications will be lost.\n\n")
fd.write(yaml.dump(workflow, Dumper=Dumper))
def main():
with open(DEFINITION_PATH) as fd:
config = yaml.load(fd, Loader=yaml.Loader)
generate_workflow(config, version_flavor=VersionFlavor.STABLE)
generate_workflow(config, version_flavor=VersionFlavor.DEVEL)
generate_workflow(config, version_flavor=VersionFlavor.DEVEL_RELEASE)
if __name__ == "__main__":
main()
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the Licenseself.
"""
Point DNS Driver
"""
__all__ = [
'PointDNSException',
'Redirect',
'MailRedirect',
'PointDNSDriver'
]
import sys
try:
import simplejson as json
except ImportError:
import json
from libcloud.utils.py3 import httplib
from libcloud.common.types import ProviderError
from libcloud.common.types import MalformedResponseError
from libcloud.common.pointdns import PointDNSConnection
from libcloud.common.exceptions import BaseHTTPError
from libcloud.dns.types import Provider, RecordType
from libcloud.dns.types import ZoneDoesNotExistError
from libcloud.dns.types import RecordDoesNotExistError
from libcloud.dns.base import DNSDriver, Zone, Record
class PointDNSException(ProviderError):
def __init__(self, value, http_code, driver=None):
super(PointDNSException, self).__init__(value=value,
http_code=http_code,
driver=driver)
self.args = (http_code, value)
class Redirect(object):
"""
Point DNS redirect.
"""
def __init__(self, id, name, data, type, driver, zone, iframe=None,
query=False):
"""
:param id: Redirect id.
:type id: ``str``
:param name: The FQDN for the record.
:type name: ``str``
:param data: The data field. (redirect_to)
:type data: ``str``
:param type: The type of redirects 301, 302 or 0 for iframes.
:type type: ``str``
:param driver: DNSDriver instance.
:type driver: :class:`DNSDriver`
:param zone: Zone where redirect belongs.
:type zone: :class:`Zone`
:param iframe: Title of iframe (optional).
:type iframe: ``str``
:param query: boolean Information about including query string when
redirecting. (optional).
:type query: ``bool``
"""
self.id = str(id) if id else None
self.name = name
self.data = data
self.type = str(type) if type else None
self.driver = driver
self.zone = zone
self.iframe = iframe
self.query = query
def update(self, data, name=None, type=None, iframe=None, query=None):
return self.driver.ex_update_redirect(redirect=self, name=name,
data=data, type=type,
iframe=iframe, query=query)
def delete(self):
return self.driver.ex_delete_redirect(redirect=self)
def __repr__(self):
return ('<PointDNSRedirect: name=%s, data=%s, type=%s ...>' %
(self.name, self.data, self.type))
class MailRedirect(object):
"""
Point DNS mail redirect.
"""
def __init__(self, id, source, destination, zone, driver):
"""
:param id: MailRedirect id.
:type id: ``str``
:param source: The source address of mail redirect.
:type source: ``str``
:param destination: The destination address of mail redirect.
:type destination: ``str``
:param zone: Zone where mail redirect belongs.
:type zone: :class:`Zone`
:param driver: DNSDriver instance.
:type driver: :class:`DNSDriver`
"""
self.id = str(id) if id else None
self.source = source
self.destination = destination
self.zone = zone
self.driver = driver
def update(self, destination, source=None):
return self.driver.ex_update_mail_redirect(mail_r=self,
destination=destination,
source=None)
def delete(self):
return self.driver.ex_delete_mail_redirect(mail_r=self)
def __repr__(self):
return ('<PointDNSMailRedirect: source=%s, destination=%s,zone=%s ...>'
% (self.source, self.destination, self.zone.id))
class PointDNSDriver(DNSDriver):
type = Provider.POINTDNS
name = 'Point DNS'
website = 'https://pointhq.com/'
connectionCls = PointDNSConnection
RECORD_TYPE_MAP = {
RecordType.A: 'A',
RecordType.AAAA: 'AAAA',
RecordType.ALIAS: 'ALIAS',
RecordType.CNAME: 'CNAME',
RecordType.MX: 'MX',
RecordType.NS: 'NS',
RecordType.PTR: 'PTR',
RecordType.SRV: 'SRV',
RecordType.SSHFP: 'SSHFP',
RecordType.TXT: 'TXT'
}
def list_zones(self):
"""
Return a list of zones.
:return: ``list`` of :class:`Zone`
"""
response = self.connection.request('/zones')
zones = self._to_zones(response.object)
return zones
def list_records(self, zone):
"""
Return a list of records for the provided zone.
:param zone: Zone to list records for.
:type zone: :class:`Zone`
:return: ``list`` of :class:`Record`
"""
response = self.connection.request('/zones/%s/records' % zone.id)
records = self._to_records(response.object, zone)
return records
def get_zone(self, zone_id):
"""
Return a Zone instance.
:param zone_id: ID of the required zone
:type zone_id: ``str``
:rtype: :class:`Zone`
"""
try:
response = self.connection.request('/zones/%s' % zone_id)
except MalformedResponseError:
e = sys.exc_info()[1]
if e.body == 'Not found':
raise ZoneDoesNotExistError(driver=self,
value="The zone doesn't exists",
zone_id=zone_id)
raise e
zone = self._to_zone(response.object)
return zone
def get_record(self, zone_id, record_id):
"""
Return a Record instance.
:param zone_id: ID of the required zone
:type zone_id: ``str``
:param record_id: ID of the required record
:type record_id: ``str``
:rtype: :class:`Record`
"""
try:
response = self.connection.request('/zones/%s/records/%s' %
(zone_id, record_id))
except MalformedResponseError:
e = sys.exc_info()[1]
if e.body == 'Not found':
raise RecordDoesNotExistError(value="Record doesn't exists",
driver=self,
record_id=record_id)
raise e
record = self._to_record(response.object, zone_id=zone_id)
return record
def create_zone(self, domain, type='master', ttl=None, extra=None):
"""
Create a new zone.
:param domain: Zone domain name (e.g. example.com)
:type domain: ``str``
:param type: Zone type (All zones are master by design).
:type type: ``str``
:param ttl: TTL for new records. (optional)
:type ttl: ``int``
:param extra: Extra attributes (driver specific). (optional)
:type extra: ``dict``
:rtype: :class:`Zone`
"""
r_json = {'name': domain}
if ttl is not None:
r_json['ttl'] = ttl
if extra is not None:
r_json.update(extra)
r_data = json.dumps({'zone': r_json})
try:
response = self.connection.request('/zones', method='POST',
data=r_data)
except BaseHTTPError:
e = sys.exc_info()[1]
raise PointDNSException(value=e.message, http_code=e.code,
driver=self)
zone = self._to_zone(response.object)
return zone
def create_record(self, name, zone, type, data, extra=None):
"""
Create a new record.
:param name: Record name without the domain name (e.g. www).
Note: If you want to create a record for a base domain
name, you should specify empty string ('') for this
argument.
:type name: ``str``
:param zone: Zone where the requested record is created.
:type zone: :class:`Zone`
:param type: DNS record type (A, AAAA, ...).
:type type: :class:`RecordType`
:param data: Data for the record (depends on the record type).
:type data: ``str``
:param extra: Extra attributes (driver specific). (optional)
:type extra: ``dict``
:rtype: :class:`Record`
"""
r_json = {'name': name, 'data': data, 'record_type': type}
if extra is not None:
r_json.update(extra)
r_data = json.dumps({'zone_record': r_json})
try:
response = self.connection.request('/zones/%s/records' % zone.id,
method='POST', data=r_data)
except BaseHTTPError:
e = sys.exc_info()[1]
raise PointDNSException(value=e.message, http_code=e.code,
driver=self)
record = self._to_record(response.object, zone=zone)
return record
def update_zone(self, zone, domain, type='master', ttl=None, extra=None):
"""
Update en existing zone.
:param zone: Zone to update.
:type zone: :class:`Zone`
:param domain: Zone domain name (e.g. example.com)
:type domain: ``str``
:param type: Zone type (All zones are master by design).
:type type: ``str``
:param ttl: TTL for new records. (optional)
:type ttl: ``int``
:param extra: Extra attributes (group, user-id). (optional)
:type extra: ``dict``
:rtype: :class:`Zone`
"""
r_json = {'name': domain}
if extra is not None:
r_json.update(extra)
r_data = json.dumps({'zone': r_json})
try:
response = self.connection.request('/zones/%s' % zone.id,
method='PUT', data=r_data)
except (BaseHTTPError, MalformedResponseError):
e = sys.exc_info()[1]
if isinstance(e, MalformedResponseError) and e.body == 'Not found':
raise ZoneDoesNotExistError(value="Zone doesn't exists",
driver=self,
zone_id=zone.id)
raise PointDNSException(value=e.message, http_code=e.code,
driver=self)
zone = self._to_zone(response.object)
return zone
def update_record(self, record, name, type, data, extra=None):
"""
Update an existing record.
:param record: Record to update.
:type record: :class:`Record`
:param name: Record name without the domain name (e.g. www).
Note: If you want to create a record for a base domain
name, you should specify empty string ('') for this
argument.
:type name: ``str``
:param type: DNS record type (A, AAAA, ...).
:type type: :class:`RecordType`
:param data: Data for the record (depends on the record type).
:type data: ``str``
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
:rtype: :class:`Record`
"""
zone = record.zone
r_json = {'name': name, 'data': data, 'record_type': type}
if extra is not None:
r_json.update(extra)
r_data = json.dumps({'zone_record': r_json})
try:
response = self.connection.request('/zones/%s/records/%s' %
(zone.id, record.id),
method='PUT', data=r_data)
except (BaseHTTPError, MalformedResponseError):
e = sys.exc_info()[1]
if isinstance(e, MalformedResponseError) and e.body == 'Not found':
raise RecordDoesNotExistError(value="Record doesn't exists",
driver=self,
record_id=record.id)
raise PointDNSException(value=e.message, http_code=e.code,
driver=self)
record = self._to_record(response.object, zone=zone)
return record
def delete_zone(self, zone):
"""
Delete a zone.
Note: This will delete all the records belonging to this zone.
:param zone: Zone to delete.
:type zone: :class:`Zone`
:rtype: ``bool``
"""
try:
self.connection.request('/zones/%s' % zone.id, method='DELETE')
except MalformedResponseError:
e = sys.exc_info()[1]
if e.body == 'Not found':
raise ZoneDoesNotExistError(driver=self,
value="The zone doesn't exists",
zone_id=zone.id)
raise e
return True
def delete_record(self, record):
"""
Delete a record.
:param record: Record to delete.
:type record: :class:`Record`
:rtype: ``bool``
"""
zone_id = record.zone.id
record_id = record.id
try:
self.connection.request('/zones/%s/records/%s' % (zone_id,
record_id),
method='DELETE')
except MalformedResponseError:
e = sys.exc_info()[1]
if e.body == 'Not found':
raise RecordDoesNotExistError(value="Record doesn't exists",
driver=self,
record_id=record_id)
raise e
return True
def ex_list_redirects(self, zone):
"""
:param zone: Zone to list redirects for.
:type zone: :class:`Zone`
:rtype: ``list`` of :class:`Record`
"""
response = self.connection.request('/zones/%s/redirects' % zone.id)
redirects = self._to_redirects(response.object, zone)
return redirects
def ex_list_mail_redirects(self, zone):
"""
:param zone: Zone to list redirects for.
:type zone: :class:`Zone`
:rtype: ``list`` of :class:`MailRedirect`
"""
response = self.connection.request('/zones/%s/mail_redirects' %
zone.id)
mail_redirects = self._to_mail_redirects(response.object, zone)
return mail_redirects
def ex_create_redirect(self, redirect_to, name, type, zone, iframe=None,
query=None):
"""
:param redirect_to: The data field. (redirect_to)
:type redirect_to: ``str``
:param name: The FQDN for the record.
:type name: ``str``
:param type: The type of redirects 301, 302 or 0 for iframes.
:type type: ``str``
:param zone: Zone to list redirects for.
:type zone: :class:`Zone`
:param iframe: Title of iframe (optional).
:type iframe: ``str``
:param query: boolean Information about including query string when
redirecting. (optional).
:type query: ``bool``
:rtype: :class:`Record`
"""
r_json = {'name': name, 'redirect_to': redirect_to}
if type is not None:
r_json['redirect_type'] = type
if iframe is not None:
r_json['iframe_title'] = iframe
if query is not None:
r_json['redirect_query_string'] = query
r_data = json.dumps({'zone_redirect': r_json})
try:
response = self.connection.request('/zones/%s/redirects' % zone.id,
method='POST', data=r_data)
except (BaseHTTPError, MalformedResponseError):
e = sys.exc_info()[1]
raise PointDNSException(value=e.message, http_code=e.code,
driver=self)
redirect = self._to_redirect(response.object, zone=zone)
return redirect
def ex_create_mail_redirect(self, destination, source, zone):
"""
:param destination: The destination address of mail redirect.
:type destination: ``str``
:param source: The source address of mail redirect.
:type source: ``str``
:param zone: Zone to list redirects for.
:type zone: :class:`Zone`
:rtype: ``list`` of :class:`MailRedirect`
"""
r_json = {'destination_address': destination, 'source_address': source}
r_data = json.dumps({'zone_mail_redirect': r_json})
try:
response = self.connection.request('/zones/%s/mail_redirects' %
zone.id, method='POST',
data=r_data)
except (BaseHTTPError, MalformedResponseError):
e = sys.exc_info()[1]
raise PointDNSException(value=e.message, http_code=e.code,
driver=self)
mail_redirect = self._to_mail_redirect(response.object, zone=zone)
return mail_redirect
def ex_get_redirect(self, zone_id, redirect_id):
"""
:param zone: Zone to list redirects for.
:type zone: :class:`Zone`
:param redirect_id: Redirect id.
:type redirect_id: ``str``
:rtype: ``list`` of :class:`Redirect`
"""
try:
response = self.connection.request('/zones/%s/redirects/%s' %
(zone_id, redirect_id))
except (BaseHTTPError, MalformedResponseError):
e = sys.exc_info()[1]
if isinstance(e, MalformedResponseError) and e.body == 'Not found':
raise PointDNSException(value='Couldn\'t found redirect',
http_code=httplib.NOT_FOUND,
driver=self)
raise PointDNSException(value=e.message, http_code=e.code,
driver=self)
redirect = self._to_redirect(response.object, zone_id=zone_id)
return redirect
def ex_get_mail_redirects(self, zone_id, mail_r_id):
"""
:param zone: Zone to list redirects for.
:type zone: :class:`Zone`
:param mail_r_id: Mail redirect id.
:type mail_r_id: ``str``
:rtype: ``list`` of :class:`MailRedirect`
"""
try:
response = self.connection.request('/zones/%s/mail_redirects/%s' %
(zone_id, mail_r_id))
except (BaseHTTPError, MalformedResponseError):
e = sys.exc_info()[1]
if isinstance(e, MalformedResponseError) and e.body == 'Not found':
raise PointDNSException(value='Couldn\'t found mail redirect',
http_code=httplib.NOT_FOUND,
driver=self)
raise PointDNSException(value=e.message, http_code=e.code,
driver=self)
mail_redirect = self._to_mail_redirect(response.object,
zone_id=zone_id)
return mail_redirect
def ex_update_redirect(self, redirect, redirect_to=None, name=None,
type=None, iframe=None, query=None):
"""
:param redirect: Record to update
:type id: :class:`Redirect`
:param redirect_to: The data field. (optional).
:type redirect_to: ``str``
:param name: The FQDN for the record.
:type name: ``str``
:param type: The type of redirects 301, 302 or 0 for iframes.
(optional).
:type type: ``str``
:param iframe: Title of iframe (optional).
:type iframe: ``str``
:param query: boolean Information about including query string when
redirecting. (optional).
:type query: ``bool``
:rtype: ``list`` of :class:`Redirect`
"""
zone_id = redirect.zone.id
r_json = {}
if redirect_to is not None:
r_json['redirect_to'] = redirect_to
if name is not None:
r_json['name'] = name
if type is not None:
r_json['record_type'] = type
if iframe is not None:
r_json['iframe_title'] = iframe
if query is not None:
r_json['redirect_query_string'] = query
r_data = json.dumps({'zone_redirect': r_json})
try:
response = self.connection.request('/zones/%s/redirects/%s' %
(zone_id, redirect.id),
method='PUT', data=r_data)
except (BaseHTTPError, MalformedResponseError):
e = sys.exc_info()[1]
if isinstance(e, MalformedResponseError) and e.body == 'Not found':
raise PointDNSException(value='Couldn\'t found redirect',
http_code=httplib.NOT_FOUND,
driver=self)
raise PointDNSException(value=e.message, http_code=e.code,
driver=self)
redirect = self._to_redirect(response.object, zone=redirect.zone)
return redirect
def ex_update_mail_redirect(self, mail_r, destination, source=None):
"""
:param mail_r: Mail redirect to update
:type mail_r: :class:`MailRedirect`
:param destination: The destination address of mail redirect.
:type destination: ``str``
:param source: The source address of mail redirect. (optional)
:type source: ``str``
:rtype: ``list`` of :class:`MailRedirect`
"""
zone_id = mail_r.zone.id
r_json = {'destination_address': destination}
if source is not None:
r_json['source_address'] = source
r_data = json.dumps({'zone_redirect': r_json})
try:
response = self.connection.request('/zones/%s/mail_redirects/%s' %
(zone_id, mail_r.id),
method='PUT', data=r_data)
except (BaseHTTPError, MalformedResponseError):
e = sys.exc_info()[1]
if isinstance(e, MalformedResponseError) and e.body == 'Not found':
raise PointDNSException(value='Couldn\'t found mail redirect',
http_code=httplib.NOT_FOUND,
driver=self)
raise PointDNSException(value=e.message, http_code=e.code,
driver=self)
mail_redirect = self._to_mail_redirect(response.object,
zone=mail_r.zone)
return mail_redirect
def ex_delete_redirect(self, redirect):
"""
:param mail_r: Redirect to delete
:type mail_r: :class:`Redirect`
:rtype: ``bool``
"""
zone_id = redirect.zone.id
redirect_id = redirect.id
try:
self.connection.request('/zones/%s/redirects/%s' % (zone_id,
redirect_id), method='DELETE')
except (BaseHTTPError, MalformedResponseError):
e = sys.exc_info()[1]
if isinstance(e, MalformedResponseError) and e.body == 'Not found':
raise PointDNSException(value='Couldn\'t found redirect',
http_code=httplib.NOT_FOUND,
driver=self)
raise PointDNSException(value=e.message, http_code=e.code,
driver=self)
return True
def ex_delete_mail_redirect(self, mail_r):
"""
:param mail_r: Mail redirect to update
:type mail_r: :class:`MailRedirect`
:rtype: ``bool``
"""
zone_id = mail_r.zone.id
mail_r_id = mail_r.id
try:
self.connection.request('/zones/%s/mail_redirects/%s' % (zone_id,
mail_r_id), method='DELETE')
except (BaseHTTPError, MalformedResponseError):
e = sys.exc_info()[1]
if isinstance(e, MalformedResponseError) and e.body == 'Not found':
raise PointDNSException(value='Couldn\'t found mail redirect',
http_code=httplib.NOT_FOUND,
driver=self)
raise PointDNSException(value=e.message, http_code=e.code,
driver=self)
return True
def _to_zones(self, data):
zones = []
for zone in data:
_zone = self._to_zone(zone)
zones.append(_zone)
return zones
def _to_zone(self, data):
zone = data.get('zone')
id = zone.get('id')
name = zone.get('name')
ttl = zone.get('ttl')
extra = {'group': zone.get('group'),
'user-id': zone.get('user-id')}
# All zones are a primary ones by design, so they
# assume that are the master source of info about the
# zone, which is the case when domain DNS records
# points to PointDNS nameservers.
type = 'master'
return Zone(id=id, domain=name, type=type, ttl=ttl, driver=self,
extra=extra)
def _to_records(self, data, zone):
records = []
for item in data:
record = self._to_record(item, zone=zone)
records.append(record)
return records
def _to_record(self, data, zone_id=None, zone=None):
if not zone: # We need zone_id or zone
zone = self.get_zone(zone_id)
record = data.get('zone_record')
id = record.get('id')
name = record.get('name')
type = record.get('record_type')
data = record.get('data')
extra = {'ttl': record.get('ttl'),
'zone_id': record.get('zone_id'),
'aux': record.get('aux')}
return Record(id, name, type, data, zone, self, extra=extra)
def _to_redirects(self, data, zone):
redirects = []
for item in data:
redirect = self._to_redirect(item, zone=zone)
redirects.append(redirect)
return redirects
def _to_redirect(self, data, zone_id=None, zone=None):
if not zone: # We need zone_id or zone
zone = self.get_zone(zone_id)
record = data.get('zone_redirect')
id = record.get('id')
name = record.get('name')
redirect_to = record.get('redirect_to')
type = record.get('redirect_type')
iframe = record.get('iframe_title')
query = record.get('redirect_query_string')
return Redirect(id, name, redirect_to, type, self, zone,
iframe=iframe, query=query)
def _to_mail_redirects(self, data, zone):
mail_redirects = []
for item in data:
mail_redirect = self._to_mail_redirect(item, zone=zone)
mail_redirects.append(mail_redirect)
return mail_redirects
def _to_mail_redirect(self, data, zone_id=None, zone=None):
if not zone: # We need zone_id or zone
zone = self.get_zone(zone_id)
record = data.get('zone_mail_redirect')
id = record.get('id')
destination = record.get('destination_address')
source = record.get('source_address')
return MailRedirect(id, source, destination, zone, self)
|
|
#!/usr/bin/env python2.7
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates the appropriate build.json data for all the naming tests."""
import yaml
import collections
import hashlib
import json
_LOCAL_DNS_SERVER_ADDRESS = '127.0.0.1:15353'
_TARGET_RECORDS_TO_SKIP_AGAINST_GCE = [
# TODO: enable this once able to upload the very large TXT record
# in this group to GCE DNS.
'ipv4-config-causing-fallback-to-tcp',
]
def _append_zone_name(name, zone_name):
return '%s.%s' % (name, zone_name)
def _build_expected_addrs_cmd_arg(expected_addrs):
out = []
for addr in expected_addrs:
out.append('%s,%s' % (addr['address'], str(addr['is_balancer'])))
return ';'.join(out)
def _data_for_type(r_type, r_data, common_zone_name):
if r_type in ['A', 'AAAA']:
return ' '.join(map(lambda x: '\"%s\"' % x, r_data))
if r_type == 'SRV':
assert len(r_data) == 1
target = r_data[0].split(' ')[3]
uploadable_target = '%s.%s' % (target, common_zone_name)
uploadable = r_data[0].split(' ')
uploadable[3] = uploadable_target
return '\"%s\"' % ' '.join(uploadable)
if r_type == 'TXT':
assert len(r_data) == 1
chunks = []
all_data = r_data[0]
cur = 0
# Split TXT records that span more than 255 characters (the single
# string length-limit in DNS) into multiple strings. Each string
# needs to be wrapped with double-quotes, and all inner double-quotes
# are escaped. The wrapping double-quotes and inner backslashes can be
# counted towards the 255 character length limit (as observed with gcloud),
# so make sure all strings fit within that limit.
while len(all_data[cur:]) > 0:
next_chunk = '\"'
while len(next_chunk) < 254 and len(all_data[cur:]) > 0:
if all_data[cur] == '\"':
if len(next_chunk) < 253:
next_chunk += '\\\"'
else:
break
else:
next_chunk += all_data[cur]
cur += 1
next_chunk += '\"'
if len(next_chunk) > 255:
raise Exception('Bug: next chunk is too long.')
chunks.append(next_chunk)
# Wrap the whole record in single quotes to make sure all strings
# are associated with the same TXT record (to make it one bash token for
# gcloud)
return '\'%s\'' % ' '.join(chunks)
# Convert DNS records from their "within a test group" format
# of the yaml file to an easier form for the templates to use.
def _gcloud_uploadable_form(test_cases, common_zone_name):
out = []
for group in test_cases:
if group['record_to_resolve'] in _TARGET_RECORDS_TO_SKIP_AGAINST_GCE:
continue
for record_name in group['records'].keys():
r_ttl = None
all_r_data = {}
for r_data in group['records'][record_name]:
# enforce records have the same TTL only for simplicity
if r_ttl is None:
r_ttl = r_data['TTL']
assert r_ttl == r_data['TTL'], '%s and %s differ' % (r_ttl, r_data['TTL'])
r_type = r_data['type']
if all_r_data.get(r_type) is None:
all_r_data[r_type] = []
all_r_data[r_type].append(r_data['data'])
for r_type in all_r_data.keys():
for r in out:
assert r['name'] != record_name or r['type'] != r_type, 'attempt to add a duplicate record'
out.append({
'name': record_name,
'ttl': r_ttl,
'type': r_type,
'data': _data_for_type(r_type, all_r_data[r_type], common_zone_name)
})
return out
def _gce_dns_zone_id(resolver_component_data):
dns_name = resolver_component_data['resolver_tests_common_zone_name']
return dns_name.replace('.', '-') + 'zone-id'
def _resolver_test_cases(resolver_component_data, records_to_skip):
out = []
for test_case in resolver_component_data['resolver_component_tests']:
if test_case['record_to_resolve'] in records_to_skip:
continue
out.append({
'target_name': _append_zone_name(test_case['record_to_resolve'],
resolver_component_data['resolver_tests_common_zone_name']),
'expected_addrs': _build_expected_addrs_cmd_arg(test_case['expected_addrs']),
'expected_chosen_service_config': (test_case['expected_chosen_service_config'] or ''),
'expected_lb_policy': (test_case['expected_lb_policy'] or ''),
})
return out
def main():
resolver_component_data = ''
with open('test/cpp/naming/resolver_test_record_groups.yaml') as f:
resolver_component_data = yaml.load(f)
json = {
'resolver_tests_common_zone_name': resolver_component_data['resolver_tests_common_zone_name'],
'resolver_gce_integration_tests_zone_id': _gce_dns_zone_id(resolver_component_data),
'all_integration_test_records': _gcloud_uploadable_form(resolver_component_data['resolver_component_tests'],
resolver_component_data['resolver_tests_common_zone_name']),
'resolver_gce_integration_test_cases': _resolver_test_cases(resolver_component_data, _TARGET_RECORDS_TO_SKIP_AGAINST_GCE),
'resolver_component_test_cases': _resolver_test_cases(resolver_component_data, []),
'targets': [
{
'name': 'resolver_component_test' + unsecure_build_config_suffix,
'build': 'test',
'language': 'c++',
'gtest': False,
'run': False,
'src': ['test/cpp/naming/resolver_component_test.cc'],
'platforms': ['linux', 'posix', 'mac'],
'deps': [
'grpc++_test_util' + unsecure_build_config_suffix,
'grpc_test_util' + unsecure_build_config_suffix,
'gpr_test_util',
'grpc++' + unsecure_build_config_suffix,
'grpc' + unsecure_build_config_suffix,
'gpr',
'grpc++_test_config',
],
} for unsecure_build_config_suffix in ['_unsecure', '']
] + [
{
'name': 'resolver_component_tests_runner_invoker' + unsecure_build_config_suffix,
'build': 'test',
'language': 'c++',
'gtest': False,
'run': True,
'src': ['test/cpp/naming/resolver_component_tests_runner_invoker.cc'],
'platforms': ['linux', 'posix', 'mac'],
'deps': [
'grpc++_test_util',
'grpc_test_util',
'gpr_test_util',
'grpc++',
'grpc',
'gpr',
'grpc++_test_config',
],
'args': [
'--test_bin_name=resolver_component_test%s' % unsecure_build_config_suffix,
'--running_under_bazel=false',
],
} for unsecure_build_config_suffix in ['_unsecure', '']
] + [
{
'name': 'address_sorting_test' + unsecure_build_config_suffix,
'build': 'test',
'language': 'c++',
'gtest': True,
'run': True,
'src': ['test/cpp/naming/address_sorting_test.cc'],
'platforms': ['linux', 'posix', 'mac'],
'deps': [
'grpc++_test_util' + unsecure_build_config_suffix,
'grpc_test_util' + unsecure_build_config_suffix,
'gpr_test_util',
'grpc++' + unsecure_build_config_suffix,
'grpc' + unsecure_build_config_suffix,
'gpr',
'grpc++_test_config',
],
} for unsecure_build_config_suffix in ['_unsecure', '']
]
}
print(yaml.dump(json))
if __name__ == '__main__':
main()
|
|
"""
Test functions for models.tools
"""
from statsmodels.compat.python import lrange, range
import numpy as np
from numpy.random import standard_normal
from numpy.testing import (assert_equal, assert_array_equal,
assert_almost_equal, assert_string_equal, TestCase)
from nose.tools import (assert_true, assert_false, assert_raises)
from statsmodels.datasets import longley
from statsmodels.tools import tools
from statsmodels.tools.tools import pinv_extended
from statsmodels.compat.numpy import np_matrix_rank
class TestTools(TestCase):
def test_add_constant_list(self):
x = lrange(1,5)
x = tools.add_constant(x)
y = np.asarray([[1,1,1,1],[1,2,3,4.]]).T
assert_equal(x, y)
def test_add_constant_1d(self):
x = np.arange(1,5)
x = tools.add_constant(x)
y = np.asarray([[1,1,1,1],[1,2,3,4.]]).T
assert_equal(x, y)
def test_add_constant_has_constant1d(self):
x = np.ones(5)
x = tools.add_constant(x)
assert_equal(x, np.ones(5))
def test_add_constant_has_constant2d(self):
x = np.asarray([[1,1,1,1],[1,2,3,4.]])
y = tools.add_constant(x)
assert_equal(x,y)
def test_recipr(self):
X = np.array([[2,1],[-1,0]])
Y = tools.recipr(X)
assert_almost_equal(Y, np.array([[0.5,1],[0,0]]))
def test_recipr0(self):
X = np.array([[2,1],[-4,0]])
Y = tools.recipr0(X)
assert_almost_equal(Y, np.array([[0.5,1],[-0.25,0]]))
def test_rank(self):
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
X = standard_normal((40,10))
self.assertEquals(tools.rank(X), np_matrix_rank(X))
X[:,0] = X[:,1] + X[:,2]
self.assertEquals(tools.rank(X), np_matrix_rank(X))
def test_extendedpinv(self):
X = standard_normal((40, 10))
np_inv = np.linalg.pinv(X)
np_sing_vals = np.linalg.svd(X, 0, 0)
sm_inv, sing_vals = pinv_extended(X)
assert_almost_equal(np_inv, sm_inv)
assert_almost_equal(np_sing_vals, sing_vals)
def test_extendedpinv_singular(self):
X = standard_normal((40, 10))
X[:, 5] = X[:, 1] + X[:, 3]
np_inv = np.linalg.pinv(X)
np_sing_vals = np.linalg.svd(X, 0, 0)
sm_inv, sing_vals = pinv_extended(X)
assert_almost_equal(np_inv, sm_inv)
assert_almost_equal(np_sing_vals, sing_vals)
def test_fullrank(self):
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
X = standard_normal((40,10))
X[:,0] = X[:,1] + X[:,2]
Y = tools.fullrank(X)
self.assertEquals(Y.shape, (40,9))
self.assertEquals(tools.rank(Y), 9)
X[:,5] = X[:,3] + X[:,4]
Y = tools.fullrank(X)
self.assertEquals(Y.shape, (40,8))
warnings.simplefilter("ignore")
self.assertEquals(tools.rank(Y), 8)
def test_estimable():
rng = np.random.RandomState(20120713)
N, P = (40, 10)
X = rng.normal(size=(N, P))
C = rng.normal(size=(1, P))
isestimable = tools.isestimable
assert_true(isestimable(C, X))
assert_true(isestimable(np.eye(P), X))
for row in np.eye(P):
assert_true(isestimable(row, X))
X = np.ones((40, 2))
assert_true(isestimable([1, 1], X))
assert_false(isestimable([1, 0], X))
assert_false(isestimable([0, 1], X))
assert_false(isestimable(np.eye(2), X))
halfX = rng.normal(size=(N, 5))
X = np.hstack([halfX, halfX])
assert_false(isestimable(np.hstack([np.eye(5), np.zeros((5, 5))]), X))
assert_false(isestimable(np.hstack([np.zeros((5, 5)), np.eye(5)]), X))
assert_true(isestimable(np.hstack([np.eye(5), np.eye(5)]), X))
# Test array-like for design
XL = X.tolist()
assert_true(isestimable(np.hstack([np.eye(5), np.eye(5)]), XL))
# Test ValueError for incorrect number of columns
X = rng.normal(size=(N, 5))
for n in range(1, 4):
assert_raises(ValueError, isestimable, np.ones((n,)), X)
assert_raises(ValueError, isestimable, np.eye(4), X)
class TestCategoricalNumerical(object):
#TODO: use assert_raises to check that bad inputs are taken care of
def __init__(self):
#import string
stringabc = 'abcdefghijklmnopqrstuvwxy'
self.des = np.random.randn(25,2)
self.instr = np.floor(np.arange(10,60, step=2)/10)
x=np.zeros((25,5))
x[:5,0]=1
x[5:10,1]=1
x[10:15,2]=1
x[15:20,3]=1
x[20:25,4]=1
self.dummy = x
structdes = np.zeros((25,1),dtype=[('var1', 'f4'),('var2', 'f4'),
('instrument','f4'),('str_instr','a10')])
structdes['var1'] = self.des[:,0][:,None]
structdes['var2'] = self.des[:,1][:,None]
structdes['instrument'] = self.instr[:,None]
string_var = [stringabc[0:5], stringabc[5:10],
stringabc[10:15], stringabc[15:20],
stringabc[20:25]]
string_var *= 5
self.string_var = np.array(sorted(string_var))
structdes['str_instr'] = self.string_var[:,None]
self.structdes = structdes
self.recdes = structdes.view(np.recarray)
def test_array2d(self):
des = np.column_stack((self.des, self.instr, self.des))
des = tools.categorical(des, col=2)
assert_array_equal(des[:,-5:], self.dummy)
assert_equal(des.shape[1],10)
def test_array1d(self):
des = tools.categorical(self.instr)
assert_array_equal(des[:,-5:], self.dummy)
assert_equal(des.shape[1],6)
def test_array2d_drop(self):
des = np.column_stack((self.des, self.instr, self.des))
des = tools.categorical(des, col=2, drop=True)
assert_array_equal(des[:,-5:], self.dummy)
assert_equal(des.shape[1],9)
def test_array1d_drop(self):
des = tools.categorical(self.instr, drop=True)
assert_array_equal(des, self.dummy)
assert_equal(des.shape[1],5)
def test_recarray2d(self):
des = tools.categorical(self.recdes, col='instrument')
# better way to do this?
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_recarray2dint(self):
des = tools.categorical(self.recdes, col=2)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_recarray1d(self):
instr = self.structdes['instrument'].view(np.recarray)
dum = tools.categorical(instr)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names[-5:]]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 6)
def test_recarray1d_drop(self):
instr = self.structdes['instrument'].view(np.recarray)
dum = tools.categorical(instr, drop=True)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 5)
def test_recarray2d_drop(self):
des = tools.categorical(self.recdes, col='instrument', drop=True)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 8)
def test_structarray2d(self):
des = tools.categorical(self.structdes, col='instrument')
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_structarray2dint(self):
des = tools.categorical(self.structdes, col=2)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_structarray1d(self):
instr = self.structdes['instrument'].view(dtype=[('var1', 'f4')])
dum = tools.categorical(instr)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names[-5:]]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 6)
def test_structarray2d_drop(self):
des = tools.categorical(self.structdes, col='instrument', drop=True)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 8)
def test_structarray1d_drop(self):
instr = self.structdes['instrument'].view(dtype=[('var1', 'f4')])
dum = tools.categorical(instr, drop=True)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 5)
# def test_arraylike2d(self):
# des = tools.categorical(self.structdes.tolist(), col=2)
# test_des = des[:,-5:]
# assert_array_equal(test_des, self.dummy)
# assert_equal(des.shape[1], 9)
# def test_arraylike1d(self):
# instr = self.structdes['instrument'].tolist()
# dum = tools.categorical(instr)
# test_dum = dum[:,-5:]
# assert_array_equal(test_dum, self.dummy)
# assert_equal(dum.shape[1], 6)
# def test_arraylike2d_drop(self):
# des = tools.categorical(self.structdes.tolist(), col=2, drop=True)
# test_des = des[:,-5:]
# assert_array_equal(test__des, self.dummy)
# assert_equal(des.shape[1], 8)
# def test_arraylike1d_drop(self):
# instr = self.structdes['instrument'].tolist()
# dum = tools.categorical(instr, drop=True)
# assert_array_equal(dum, self.dummy)
# assert_equal(dum.shape[1], 5)
class TestCategoricalString(TestCategoricalNumerical):
# comment out until we have type coercion
# def test_array2d(self):
# des = np.column_stack((self.des, self.instr, self.des))
# des = tools.categorical(des, col=2)
# assert_array_equal(des[:,-5:], self.dummy)
# assert_equal(des.shape[1],10)
# def test_array1d(self):
# des = tools.categorical(self.instr)
# assert_array_equal(des[:,-5:], self.dummy)
# assert_equal(des.shape[1],6)
# def test_array2d_drop(self):
# des = np.column_stack((self.des, self.instr, self.des))
# des = tools.categorical(des, col=2, drop=True)
# assert_array_equal(des[:,-5:], self.dummy)
# assert_equal(des.shape[1],9)
def test_array1d_drop(self):
des = tools.categorical(self.string_var, drop=True)
assert_array_equal(des, self.dummy)
assert_equal(des.shape[1],5)
def test_recarray2d(self):
des = tools.categorical(self.recdes, col='str_instr')
# better way to do this?
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_recarray2dint(self):
des = tools.categorical(self.recdes, col=3)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_recarray1d(self):
instr = self.structdes['str_instr'].view(np.recarray)
dum = tools.categorical(instr)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names[-5:]]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 6)
def test_recarray1d_drop(self):
instr = self.structdes['str_instr'].view(np.recarray)
dum = tools.categorical(instr, drop=True)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 5)
def test_recarray2d_drop(self):
des = tools.categorical(self.recdes, col='str_instr', drop=True)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 8)
def test_structarray2d(self):
des = tools.categorical(self.structdes, col='str_instr')
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_structarray2dint(self):
des = tools.categorical(self.structdes, col=3)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_structarray1d(self):
instr = self.structdes['str_instr'].view(dtype=[('var1', 'a10')])
dum = tools.categorical(instr)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names[-5:]]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 6)
def test_structarray2d_drop(self):
des = tools.categorical(self.structdes, col='str_instr', drop=True)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 8)
def test_structarray1d_drop(self):
instr = self.structdes['str_instr'].view(dtype=[('var1', 'a10')])
dum = tools.categorical(instr, drop=True)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 5)
def test_arraylike2d(self):
pass
def test_arraylike1d(self):
pass
def test_arraylike2d_drop(self):
pass
def test_arraylike1d_drop(self):
pass
def test_rec_issue302():
arr = np.rec.fromrecords([[10], [11]], names='group')
actual = tools.categorical(arr)
expected = np.rec.array([(10, 1.0, 0.0), (11, 0.0, 1.0)],
dtype=[('group', int), ('group_10', float), ('group_11', float)])
assert_array_equal(actual, expected)
def test_issue302():
arr = np.rec.fromrecords([[10, 12], [11, 13]], names=['group', 'whatever'])
actual = tools.categorical(arr, col=['group'])
expected = np.rec.array([(10, 12, 1.0, 0.0), (11, 13, 0.0, 1.0)],
dtype=[('group', int), ('whatever', int), ('group_10', float),
('group_11', float)])
assert_array_equal(actual, expected)
def test_pandas_const_series():
dta = longley.load_pandas()
series = dta.exog['GNP']
series = tools.add_constant(series, prepend=False)
assert_string_equal('const', series.columns[1])
assert_equal(series.var(0)[1], 0)
def test_pandas_const_series_prepend():
dta = longley.load_pandas()
series = dta.exog['GNP']
series = tools.add_constant(series, prepend=True)
assert_string_equal('const', series.columns[0])
assert_equal(series.var(0)[0], 0)
def test_pandas_const_df():
dta = longley.load_pandas().exog
dta = tools.add_constant(dta, prepend=False)
assert_string_equal('const', dta.columns[-1])
assert_equal(dta.var(0)[-1], 0)
def test_pandas_const_df_prepend():
dta = longley.load_pandas().exog
# regression test for #1025
dta['UNEMP'] /= dta['UNEMP'].std()
dta = tools.add_constant(dta, prepend=True)
assert_string_equal('const', dta.columns[0])
assert_equal(dta.var(0)[0], 0)
def test_chain_dot():
A = np.arange(1,13).reshape(3,4)
B = np.arange(3,15).reshape(4,3)
C = np.arange(5,8).reshape(3,1)
assert_equal(tools.chain_dot(A,B,C), np.array([[1820],[4300],[6780]]))
class TestNanDot(object):
@classmethod
def setupClass(cls):
nan = np.nan
cls.mx_1 = np.array([[nan, 1.], [2., 3.]])
cls.mx_2 = np.array([[nan, nan], [2., 3.]])
cls.mx_3 = np.array([[0., 0.], [0., 0.]])
cls.mx_4 = np.array([[1., 0.], [1., 0.]])
cls.mx_5 = np.array([[0., 1.], [0., 1.]])
cls.mx_6 = np.array([[1., 2.], [3., 4.]])
def test_11(self):
test_res = tools.nan_dot(self.mx_1, self.mx_1)
expected_res = np.array([[ np.nan, np.nan], [ np.nan, 11.]])
assert_array_equal(test_res, expected_res)
def test_12(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_1, self.mx_2)
expected_res = np.array([[ nan, nan], [ nan, nan]])
assert_array_equal(test_res, expected_res)
def test_13(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_1, self.mx_3)
expected_res = np.array([[ 0., 0.], [ 0., 0.]])
assert_array_equal(test_res, expected_res)
def test_14(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_1, self.mx_4)
expected_res = np.array([[ nan, 0.], [ 5., 0.]])
assert_array_equal(test_res, expected_res)
def test_41(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_4, self.mx_1)
expected_res = np.array([[ nan, 1.], [ nan, 1.]])
assert_array_equal(test_res, expected_res)
def test_23(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_2, self.mx_3)
expected_res = np.array([[ 0., 0.], [ 0., 0.]])
assert_array_equal(test_res, expected_res)
def test_32(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_3, self.mx_2)
expected_res = np.array([[ 0., 0.], [ 0., 0.]])
assert_array_equal(test_res, expected_res)
def test_24(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_2, self.mx_4)
expected_res = np.array([[ nan, 0.], [ 5., 0.]])
assert_array_equal(test_res, expected_res)
def test_25(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_2, self.mx_5)
expected_res = np.array([[ 0., nan], [ 0., 5.]])
assert_array_equal(test_res, expected_res)
def test_66(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_6, self.mx_6)
expected_res = np.array([[ 7., 10.], [ 15., 22.]])
assert_array_equal(test_res, expected_res)
|
|
#
# Copyright 2014-2016 Vinay Vasista, Ravi Teja Mullapudi, Uday Bondhugula,
# and others from Multicore Computing Lab, Department of Computer Science
# and Automation, Indian Institute of Science
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# storage_mapping.py : Mapping from logical functions to physical arrays,
# exploration of reuse opportunities.
#
from __future__ import absolute_import, division, print_function
import logging
import targetc as genc
from expression import *
from pipe import *
from liveness import *
# LOG CONFIG #
storage_logger = logging.getLogger("storage_mapping.py")
storage_logger.setLevel(logging.DEBUG)
LOG = storage_logger.log
class TypeSizeMap(object):
_type_size_map = { "void":1,
"int8":1, "uint8":1,
"int16":2, "uint16":2,
"int32":4, "uint32":4,
"int64":8, "uint64":8,
"float":4, "double":8 }
@classmethod
def getsize(cls, typ):
typ_name = typ.c_type_name()
assert typ_name in cls._type_size_map
return cls._type_size_map[typ_name]
def get_dim_size(dim_storage, const=None):
if const == None:
const = dim_storage.const
if isinstance(dim_storage.coeff, Fraction):
numr = dim_storage.coeff.numerator
denr = dim_storage.coeff.denominator
param_part = numr * dim_storage.orig_param // denr
else:
param_part = dim_storage.coeff * dim_storage.orig_param
size = param_part + const
size = simplify_expr(size)
return size
class Dimension:
def __init__(self, size_map):
_param = size_map[0]
self._orig_param = _param
if _param == 0: # constant
self._param = '0'
else: # Parameter
self._param = _param.name
self._size_expr = size_map[1]
coeff_map = get_affine_var_and_param_coeff(self._size_expr)
self._const = int(get_constant_from_expr(self._size_expr))
self._coeff = 1
if not self.is_constant:
self._coeff = coeff_map[_param]
else:
self._coeff = self._const
@property
def orig_param(self):
return self._orig_param
@property
def param(self):
return self._param
@property
def size(self):
return self._size_expr
@property
def coeff(self):
return self._coeff
@property
def const(self):
return self._const
@property
def is_constant(self):
return self._param == '0'
def __str__(self):
'''
const = str(self.const)
if self.param == '0':
return '['+const+']'
coeff = str(self.coeff)
'''
dim_str = '['+str(get_dim_size(self))+']'
return dim_str
class Storage:
def __init__(self, _typ, _dims, _dim_sizes):
self._typ = _typ
self._dims = _dims
self._dim_sizes = _dim_sizes
self._id = None
self._dimension = []
for dim in range(0, self._dims):
self._dimension.append(Dimension(self._dim_sizes[dim]))
self._lookup_key = self.generate_key()
self._offsets = self.gen_param_offsets()
@property
def typ(self):
return self._typ
@property
def dims(self):
return self._dims
@property
def dim_sizes(self):
return self._dim_sizes
@property
def id_(self):
return self._id
@property
def lookup_key(self):
return self._lookup_key
@property
def offsets(self):
return self._offsets
def get_dim(self, dim):
assert dim < self._dims
return self._dimension[dim]
def generate_key(self):
'''
To create class mapping, we generate keys this way -
- Field 0 : size, in bytes, of data type of the compute object
- Field 1 : dimensionality 'dim' of the compute object
- Following 'dim' fields are tuples of Parameter names with their
respective coefficents. The fields are sorted using the parameter
names.
'''
key = [TypeSizeMap.getsize(self.typ), self.dims]
# get (param, coeff) key from each dim
param_keys = []
for dim in range(0, self.dims):
storage_dim = self.get_dim(dim)
param_keys.append((storage_dim.param, storage_dim.coeff))
param_keys = sorted(param_keys, key=lambda x:x[0])
key.extend(param_keys)
# convert to string because list as a dict key is not allowed
key = str(key)
return key
def gen_param_offsets(self):
# get (param, const) from each dim
param_offsets = []
for dim in range(0, self.dims):
storage_dim = self.get_dim(dim)
offset_tuple = (storage_dim.param, storage_dim.const)
param_offsets.append(offset_tuple)
return param_offsets
def compute_total_size(self):
total_size = 1
for size in self.dim_sizes:
total_size *= size
return total_size
def generate_id(self):
self._id = IdGen.get_stg_id()
def __str__(self):
typ_str = str(self.typ.c_type_name())
ndims_str = str(self.dims)
dims_str = ''
for i in range(0, self.dims):
dim = self.get_dim(i)
dims_str += str(dim)
if i < self.dims-1:
dims_str += ' :: '
stg_str = '{'+typ_str+', '+ndims_str+', '+dims_str+'}'
return stg_str
def classify_storage(pipeline):
'''
Classifies the compute objects into separate groups based on their storage
sizes.
'''
def find_storage_equivalence(comps):
'''
Create a mapping to the compute object from it's size properties.
The classification can be further improved with the knowledge of param
constraints or estimates, by not differentiating b/w dimensions of
equal sizes.
NOTE: This module is unaware of whether the pipeline outputs must be
excluded from classification with other compute objects.
'''
storage_class_map = {}
for comp in comps:
storage = comp.orig_storage_class
key = storage.lookup_key
if key not in storage_class_map:
storage_class_map[key] = [comp]
else:
storage_class_map[key].append(comp)
return storage_class_map
def maximal_storage(comps, storage_class_map):
'''
Compute the maximal storage needed at each dimension individually and
over approximate the total storage to be the product of maximal storage
of all dimensions. This can further be improved with the knowledge of
param constraints (or estimates) which suggests an exact (or
approximate) measure of the size of each dimension.
'''
# ***
log_level = logging.DEBUG
LOG(log_level, "_______")
LOG(log_level, "Storage classes:")
# ***
new_storage_class_map = {}
for key in storage_class_map:
class_comps = storage_class_map[key] # a list
# pick a dummy comp to get the total number of dimensions and the
# original parameter associated with each dimension
helper_comp = class_comps[0]
typ = helper_comp.func.typ
dims = helper_comp.func.ndims
helper_storage = helper_comp.orig_storage_class
offsets = helper_storage.offsets
# this list holds the maximal offset value for each dimension
max_offset = [offsets[dim][1] for dim in range(0, dims)]
for comp in class_comps:
storage = comp.orig_storage_class
offsets = storage.offsets
for dim in range(0, dims):
dim_off = offsets[dim][1] # its a tuple
max_offset[dim] = int(max(max_offset[dim], dim_off))
# collect the dim storage info and update with the new maximal
# offset
dim_sizes = []
for dim in range(0, dims):
dim_storage = helper_storage.get_dim(dim)
new_size = get_dim_size(dim_storage, max_offset[dim])
dim_sizes.append((dim_storage.orig_param, new_size))
# final maximal storage for this class
max_storage = Storage(typ, dims, dim_sizes)
max_storage.generate_id()
# all comps of this class now have identical storage
new_storage_class_map[max_storage] = []
for comp in class_comps:
comp.set_storage_class(max_storage)
new_storage_class_map[max_storage].append(comp)
# ***
log_level = logging.DEBUG
LOG(log_level, key)
LOG(log_level, "\t%-*s" % \
(15, [comp.func.name for comp in class_comps]))
LOG(log_level, "\t%-*s" % (15, str(max_storage)))
# ***
# clear the temporary mappings
storage_class_map.clear()
return new_storage_class_map
def naive_classification(comps):
'''
For each comp, use it's original storage class to set it's storage
class.
'''
storage_class_map = {}
for comp in comps:
storage_class = comp.orig_storage_class
storage_class.generate_id()
storage_class_map[comp] = storage_class
comp.set_storage_class(storage_class)
return storage_class_map
def set_input_objects_storage(pipeline):
'''
Collect compute objects of functions of type input, and return a naive
classification map for them.
'''
inp_comps = [pipeline.func_map[inp] for inp in pipeline.inputs]
storage_class_map = \
naive_classification(inp_comps)
return storage_class_map
def classify_storage_for_comps(comps, opt=False):
'''
If storage optimization is enabled, classify storage based on certain
equivalence criteria.
'''
if opt:
# find equivalence in size between storage objects and create
# classes of storage objects
storage_class_map = find_storage_equivalence(comps)
# compute the maximal offsets in each dimension of the compute
# objects, and compute the total_size of the storage for each
# storage class
storage_class_map = maximal_storage(comps, storage_class_map)
else:
storage_class_map = naive_classification(comps)
return storage_class_map
# ''' main '''
opt = 'optimize_storage' in pipeline.options
storage_class_map = {}
# storage classification for pipeline inputs
storage_class_map['inputs'] = set_input_objects_storage(pipeline)
# storage classification for group compute objects
for group in pipeline.groups:
g_comps = [comp for comp in group.comps if not comp.is_liveout]
storage_class_map[group] = classify_storage_for_comps(g_comps, opt)
# storage classification for outputs
out_comps = [pipeline.func_map[func] for func in pipeline.outputs]
storage_class_map['liveouts'] = classify_storage_for_comps(out_comps,
opt=False)
# storage classification for other liveouts
live_comps = list(set(pipeline.liveouts).difference(set(out_comps)))
liveout_stg_class_map = classify_storage_for_comps(live_comps,
opt)
storage_class_map['liveouts'].update(liveout_stg_class_map)
return storage_class_map
def log_schedule(comps, schedule):
log_level = logging.DEBUG-2
LOG(log_level, "\n=======")
LOG(log_level, "Schedules:")
for comp in comps:
LOG(log_level, "\t%-*s" % (15, comp.func.name) + \
": "+str(schedule[comp]))
return
def log_storage_mapping(comps, storage_map):
log_level = logging.DEBUG-1
LOG(log_level, "")
LOG(log_level, "Storage mapping:")
for comp in comps:
LOG(log_level, "\t%-*s" % (15, comp.func.name) + \
": "+str(storage_map[comp]))
return
def remap_storage_for_comps(comps, storage_class_map, schedule,
liveness_map, storage_map, opt=False):
'''
If storage optimization is enabled, enable reuse by setting array numbers
for comps which can use the same array for computation.
'''
array_count = 0
if not opt:
for comp in comps:
array_count += 1
storage_map[comp] = array_count
return
# sort comps according to their schedule
sorted_comps = get_sorted_objs(schedule)
# initialize a pool of arrays for each storage class
stg_classes = list(set([comp.storage_class for comp in sorted_comps]))
array_pool = {}
for stg_class in stg_classes:
array_pool[stg_class] = []
for comp in sorted_comps:
stg_class = comp.storage_class
# if no array of stg_class is free as of now
if not array_pool[stg_class]:
array_count += 1
storage_map[comp] = array_count
# there is a free array of stg_class in the pool
else:
storage_map[comp] = array_pool[stg_class].pop()
# return free arrays to pool
time = schedule[comp]
# if any comp is not live after this point
if time in liveness_map:
free_comps = liveness_map[time]
for free_comp in free_comps:
comp_stg_class = free_comp.storage_class
storage_index = storage_map[free_comp]
array_pool[comp_stg_class].append(storage_index)
# ***
log_schedule(sorted_comps, schedule)
log_storage_mapping(sorted_comps, storage_map)
# ***
return
def remap_storage(pipeline):
'''
Map logical storage objects to representative physical arrays
The mapping can be switched between naive and optimized (with reuse)
versions, given a schedule for the comps within its group.
'''
opt = 'optimize_storage' in pipeline.options
# a mapping from comp -> index of array of comp's storage class:
storage_map = {}
storage_class_map = pipeline.storage_class_map
# 1. remap for group
for group in pipeline.groups:
remap_storage_for_comps(group.comps, storage_class_map[group],
group.comps_schedule, group.liveness_map,
storage_map, opt)
# 2. remap for liveouts
remap_storage_for_comps(pipeline.liveouts, storage_class_map['liveouts'],
pipeline.liveouts_schedule, pipeline.liveness_map,
storage_map, opt)
return storage_map
def create_physical_arrays(pipeline):
'''
Create cgen CArrays for compute objects using the storage mapping from
logical storage object of the comp (assumed to be available at this point).
'''
opt = 'optimize_storage' in pipeline.options
def create_new_array(comp, flat_scratch=False):
'''
Creates CArray for a given comp
'''
stg_class = comp.storage_class
# array attributes
array_layout = 'contiguous'
if comp.is_output or comp.is_image_typ: # inputs and outputs
array_name = comp.func.name
else:
tag = str(stg_class.id_)
# array naming
if opt:
array_name = genc.CNameGen.get_array_name(comp.is_liveout, tag)
else:
array_name = comp.func.name
if not comp.is_liveout: # live out
if flat_scratch: # linearized array
array_layout = 'contiguous_static'
else:
array_layout = 'multidim'
array_type = genc.TypeMap.convert(comp.func.typ)
array_sizes = []
for dim in range(0, stg_class.dims):
dim_storage = stg_class.get_dim(dim)
array_sizes.append(get_dim_size(dim_storage))
# create CArray object
array = genc.CArray(array_type, array_name, array_sizes)
array.layout = array_layout
return array
def set_array_for_comp(comp, array_id, created, flat_scratch=False):
'''
Set CArray for comp by newly creating it or finding the already created
corresponding object.
'''
if array_id in created:
array = created[array_id]
else:
array = create_new_array(comp, flat_scratch)
# record the array creation
created[array_id] = array
return array
def set_arrays_for_inputs(pipeline):
'''
Representative CArray objects for inputs. Should not allocate.
'''
func_map = pipeline.func_map
inputs = pipeline.inputs
for inp in inputs:
inp_comp = func_map[inp]
array = create_new_array(inp_comp)
inp_comp.set_storage_object(array)
return
def set_arrays_for_outputs(pipeline, created):
'''
Representative CArray objects for outputs. Should not allocate.
'''
func_map = pipeline.func_map
outputs = pipeline.outputs
for out in outputs:
out_comp = func_map[out]
array = create_new_array(out_comp)
out_comp.set_storage_object(array)
# record array creation. Outputs may collide with non-output
# liveouts for reuse.
array_id = pipeline.storage_map[out_comp]
created[array_id] = array
return
def set_arrays_for_comps(pipeline, created_arrays, flat_scratch):
'''
CArray objects for intermediate and non-output liveout compute objects.
'''
for group in pipeline.groups:
# place where created scratchpads are recorded
created_scratch = {}
# create / map CArray objects to comps
for comp in group.comps:
if comp.is_output:
continue
array_id = pipeline.storage_map[comp]
if comp.is_liveout:
array = set_array_for_comp(comp, array_id, created_arrays)
else:
array = set_array_for_comp(comp, array_id, created_scratch,
flat_scratch)
comp.set_storage_object(array)
return
flat_scratch = 'flatten_scratchpad' in pipeline.options
# place where created arrays are recorded
created_arrays = {}
# first create arrays for pipeline inputs
set_arrays_for_inputs(pipeline)
# create arrays for pipeline outputs.
# doing this first will open doors for using output arrays, that will be
# allocated outside the pipeline function, for other liveouts.
set_arrays_for_outputs(pipeline, created_arrays)
# create arrays for the rest of the comps
set_arrays_for_comps(pipeline, created_arrays, flat_scratch)
# collect users for each array created
array_writers = {}
for comp in pipeline.comps:
if comp.array not in array_writers:
array_writers[comp.array] = []
array_writers[comp.array].append(comp)
return array_writers
def map_reverse(map_):
'''
Assumes map_[key] = val, where val is a list
'''
rmap = {}
for key in map_:
for map_val in map_[key]:
rmap[map_val] = key
return rmap
def create_array_freelist(pipeline):
'''
Create a list of arrays for each time in the group schedule, at which
these arrays have their last use.
'''
def logs(liveness_map2, array_writers, last_use, free_arrays):
# ***
log_level = logging.DEBUG-2
LOG(log_level, "\n_______")
LOG(log_level, "Reverse liveness map for Liveouts:")
for comp in liveness_map2:
LOG(log_level, "\t%-*s" % (15, comp.func.name) + \
": " + str(liveness_map2[comp]))
# ***
log_level = logging.DEBUG-2
LOG(log_level, "\n_______")
LOG(log_level, "Array Users:")
for array in array_writers:
if True in [comp.is_liveout for comp in array_writers[array]]:
LOG(log_level, "\t%-*s" % (15, array.name) + ": " + \
str([comp.func.name for comp in array_writers[array]]))
# ***
log_level = logging.DEBUG-1
LOG(log_level, "\n_______")
LOG(log_level, "Last use map for arrays:")
for array in last_use:
LOG(log_level, "\t%-*s" % (15, array.name) + \
": " + str(last_use[array]))
# ***
log_level = logging.DEBUG-1
LOG(log_level, "\n_______")
LOG(log_level, "Free arrays :")
for g in free_arrays:
LOG(log_level,
"\t%-*s" % (15, g.name+" ("+str(g_schedule[g])+")") + \
": " + str([arr.name for arr in free_arrays[g]]))
return
array_writers = pipeline.array_writers
g_schedule = pipeline.group_schedule
liveness_map = pipeline.liveness_map
# get a map-reverse
liveness_map2 = map_reverse(liveness_map)
# ignore all arrays used by pipeline outputs
out_comps = [pipeline.func_map[func] for func in pipeline.outputs]
output_arrays = [comp.array for comp in out_comps]
for array in output_arrays:
array_writers.pop(array)
# find the scheduled time (of group) at which arrays have thier last use
last_use = {}
for array in array_writers:
# are we dealing with full arrays? -
if True in [comp.is_liveout for comp in array_writers[array]]:
writer_sched = {}
for writer in array_writers[array]:
writer_sched[writer] = g_schedule[writer.group]
last_writer = max(array_writers[array],
key=lambda x:writer_sched[x])
if last_writer in liveness_map2: # not pipeline outputs
last_use[array] = liveness_map2[last_writer]
# reverse-map from group_schedule -> group
schedule_g = dict((v, k) for k, v in g_schedule.items())
# create a direct mapping from groups to arrays that are not live after
# the group's execution is complete
free_arrays = {}
for group in g_schedule:
free_arrays[group] = []
for array in last_use:
user_sched = last_use[array]
# find the group with schedule time = user_sched
group = schedule_g[user_sched]
free_arrays[group].append(array)
# ***
logs(liveness_map2, array_writers, last_use, free_arrays)
return free_arrays
|
|
from __future__ import absolute_import
from sfepy import data_dir
import six
filename_mesh = data_dir + '/meshes/3d/special/cube_cylinder.mesh'
if 0:
from sfepy.discrete.fem.utils import refine_mesh
refinement_level = 1
filename_mesh = refine_mesh(filename_mesh, refinement_level)
material_2 = {
'name' : 'coef',
'values' : {'val' : 1.0},
}
field_1 = {
'name' : 'temperature',
'dtype' : 'real',
'shape' : (1,),
'region' : 'Omega',
'approx_order' : 1,
}
variables = {
't' : ('unknown field', 'temperature', 0),
's' : ('test field', 'temperature', 't'),
}
regions = {
'Omega' : 'all',
'Gamma_Left' : ('vertices in (x < 0.0001)', 'facet'),
'Gamma_Right' : ('vertices in (x > 0.999)', 'facet'),
}
ebcs = {
't1' : ('Gamma_Left', {'t.0' : 2.0}),
't2' : ('Gamma_Right', {'t.0' : -2.0}),
}
integral_1 = {
'name' : 'i',
'order' : 1,
}
equations = {
'Temperature' : """dw_laplace.i.Omega(coef.val, s, t) = 0"""
}
class DiagPC(object):
"""
Diagonal (Jacobi) preconditioner.
Equivalent to setting `'precond' : 'jacobi'`.
"""
def setUp(self, pc):
A = pc.getOperators()[0]
self.idiag = 1.0 / A.getDiagonal()
def apply(self, pc, x, y):
y.pointwiseMult(x, self.idiag)
def setup_petsc_precond(mtx, problem):
return DiagPC()
solvers = {
'd00' : ('ls.scipy_direct',
{}
),
'd01' : ('ls.scipy_direct',
{'method' : 'umfpack',
'warn' : True,}
),
'd02' : ('ls.scipy_direct',
{'method' : 'superlu',
'warn' : True,}
),
'd10' : ('ls.mumps', {}),
'i00' : ('ls.pyamg',
{'method' : 'ruge_stuben_solver',
'accel' : 'cg',
'eps_r' : 1e-12,
'method:max_levels' : 5,
'solve:cycle' : 'V',}
),
'i01' : ('ls.pyamg',
{'method' : 'smoothed_aggregation_solver',
'accel' : 'cg',
'eps_r' : 1e-12,}
),
'i02' : ('ls.pyamg_krylov',
{'method' : 'cg',
'eps_r' : 1e-12,
'i_max' : 1000,}
),
'i10' : ('ls.petsc',
{'method' : 'cg', # ksp_type
'precond' : 'none', # pc_type
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'i_max' : 1000,} # maxits
),
'i11' : ('ls.petsc',
{'method' : 'cg', # ksp_type
'precond' : 'python', # just for output (unused)
'setup_precond' : setup_petsc_precond, # user-defined pc
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'i_max' : 1000,} # maxits
),
'i12' : ('ls.petsc',
{'method' : 'cg', # ksp_type
'precond' : 'jacobi', # pc_type
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'i_max' : 1000,} # maxits
),
'i13' : ('ls.petsc',
{'method' : 'cg', # ksp_type
'precond' : 'icc', # pc_type
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'i_max' : 1000,} # maxits
),
'i20' : ('ls.scipy_iterative',
{'method' : 'cg',
'i_max' : 1000,
'eps_a' : 1e-12,
'eps_r' : 1e-12,}
),
'i21' : ('ls.scipy_iterative',
{'method' : 'bicgstab',
'i_max' : 1000,
'eps_a' : 1e-12,
'eps_r' : 1e-12,}
),
'i22' : ('ls.scipy_iterative',
{'method' : 'qmr',
'i_max' : 1000,
'eps_a' : 1e-12,
'eps_r' : 1e-12,}
),
'newton' : ('nls.newton', {
'i_max' : 1,
'eps_a' : 1e-10,
}),
}
options = {
'nls' : 'newton',
}
from sfepy.base.testing import TestCommon
output_name = 'test_linear_solvers_%s.vtk'
class Test(TestCommon):
can_fail = ['ls.pyamg', 'ls.pyamg_krylov', 'ls.petsc', 'ls.mumps',
'ls.scipy_direct']
@staticmethod
def from_conf(conf, options):
from sfepy.discrete import Problem
problem = Problem.from_conf(conf)
problem.time_update()
test = Test(problem=problem, conf=conf, options=options)
return test
def _list_linear_solvers(self, confs):
d = []
for key, val in six.iteritems(confs):
if val.kind.find('ls.') == 0:
d.append(val)
d.sort(key=lambda a: a.name)
return d
def test_solvers(self):
from sfepy.base.base import IndexedStruct
import os.path as op
solver_confs = self._list_linear_solvers(self.problem.solver_confs)
ok = True
tt = []
for solver_conf in solver_confs:
method = solver_conf.get('method', '')
precond = solver_conf.get('precond', '')
name = ' '.join((solver_conf.name, solver_conf.kind,
method, precond)).rstrip()
self.report(name)
self.report('matrix size:', self.problem.mtx_a.shape)
self.report(' nnz:', self.problem.mtx_a.nnz)
status = IndexedStruct()
try:
self.problem.init_solvers(status=status,
ls_conf=solver_conf,
force=True)
state = self.problem.solve()
failed = status.nls_status.condition != 0
except Exception as aux:
failed = True
status = None
exc = aux
ok = ok and ((not failed) or (solver_conf.kind in self.can_fail))
if status is not None:
status = status.nls_status
for kv in six.iteritems(status.time_stats):
self.report('%10s: %7.2f [s]' % kv)
self.report('condition: %d, err0: %.3e, err: %.3e'
% (status.condition, status.err0, status.err))
tt.append([name,
status.time_stats['solve'],
status.ls_n_iter,
status.err])
aux = name.replace(' ', '_')
fname = op.join(self.options.out_dir,
op.split(self.conf.output_name)[1]) % aux
self.problem.save_state(fname, state)
else:
self.report('solver failed:')
self.report(exc)
tt.append([name, -1, 1e10, 1e10])
tt.sort(key=lambda a: a[1])
self.report('solution times / numbers of iterations (residual norms):')
for row in tt:
self.report('%.2f [s] / % 4d' % (row[1], row[2]),
'(%.3e)' % row[3], ':', row[0])
return ok
def test_ls_reuse(self):
import numpy as nm
from sfepy.solvers import Solver
self.problem.init_solvers(ls_conf=self.problem.solver_confs['d00'])
nls = self.problem.get_nls()
state0 = self.problem.get_initial_state()
state0.apply_ebc()
vec0 = state0.get_state(self.problem.active_only)
self.problem.update_materials()
rhs = nls.fun(vec0)
mtx = nls.fun_grad(vec0)
ok = True
for name in ['i12', 'i01']:
solver_conf = self.problem.solver_confs[name]
method = solver_conf.get('method', '')
precond = solver_conf.get('precond', '')
name = ' '.join((solver_conf.name, solver_conf.kind,
method, precond)).rstrip()
self.report(name)
try:
ls = Solver.any_from_conf(solver_conf)
except:
self.report('skipped!')
continue
conf = ls.conf.copy()
conf.force_reuse = True
sol00 = ls(rhs, mtx=mtx, conf=conf)
digest00 = ls.mtx_digest
sol0 = ls(rhs, mtx=mtx)
digest0 = ls.mtx_digest
sol1 = ls(rhs, mtx=2*mtx, conf=conf)
digest1 = ls.mtx_digest
sol2 = ls(rhs, mtx=2*mtx)
digest2 = ls.mtx_digest
ls(rhs, mtx=2*mtx)
digest3 = ls.mtx_digest
_ok = digest00 != digest0
self.report(digest00, '!=', digest0, ':', _ok); ok = ok and _ok
_ok = digest0 == digest1
self.report(digest0, '==', digest1, ':', _ok); ok = ok and _ok
_ok = digest1 != digest2
self.report(digest1, '!=', digest2, ':', _ok); ok = ok and _ok
_ok = digest2[1] == digest3[1]
self.report(digest2[1], '==', digest3[1], ':', _ok); ok = ok and _ok
_ok = nm.allclose(sol00, sol0, atol=1e-12, rtol=0.0)
self.report('sol00 == sol0:', _ok); ok = ok and _ok
_ok = nm.allclose(sol0, sol1, atol=1e-12, rtol=0.0)
self.report('sol0 == sol1:', _ok); ok = ok and _ok
_ok = nm.allclose(sol0, 2 * sol2, atol=1e-12, rtol=0.0)
self.report('sol0 == 2 * sol2:', _ok); ok = ok and _ok
return ok
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_or_update_request(
resource_group_name: str,
availability_set_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"availabilitySetName": _SERIALIZER.url("availability_set_name", availability_set_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_update_request(
resource_group_name: str,
availability_set_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"availabilitySetName": _SERIALIZER.url("availability_set_name", availability_set_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request(
resource_group_name: str,
availability_set_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-12-01"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"availabilitySetName": _SERIALIZER.url("availability_set_name", availability_set_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
**kwargs
)
def build_get_request(
resource_group_name: str,
availability_set_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"availabilitySetName": _SERIALIZER.url("availability_set_name", availability_set_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_subscription_request(
subscription_id: str,
*,
expand: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/availabilitySets')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = _SERIALIZER.query("expand", expand, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_request(
resource_group_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_available_sizes_request(
resource_group_name: str,
availability_set_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}/vmSizes')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"availabilitySetName": _SERIALIZER.url("availability_set_name", availability_set_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class AvailabilitySetsOperations(object):
"""AvailabilitySetsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2020_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def create_or_update(
self,
resource_group_name: str,
availability_set_name: str,
parameters: "_models.AvailabilitySet",
**kwargs: Any
) -> "_models.AvailabilitySet":
"""Create or update an availability set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param availability_set_name: The name of the availability set.
:type availability_set_name: str
:param parameters: Parameters supplied to the Create Availability Set operation.
:type parameters: ~azure.mgmt.compute.v2020_12_01.models.AvailabilitySet
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AvailabilitySet, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_12_01.models.AvailabilitySet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailabilitySet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'AvailabilitySet')
request = build_create_or_update_request(
resource_group_name=resource_group_name,
availability_set_name=availability_set_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AvailabilitySet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}'} # type: ignore
@distributed_trace
def update(
self,
resource_group_name: str,
availability_set_name: str,
parameters: "_models.AvailabilitySetUpdate",
**kwargs: Any
) -> "_models.AvailabilitySet":
"""Update an availability set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param availability_set_name: The name of the availability set.
:type availability_set_name: str
:param parameters: Parameters supplied to the Update Availability Set operation.
:type parameters: ~azure.mgmt.compute.v2020_12_01.models.AvailabilitySetUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AvailabilitySet, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_12_01.models.AvailabilitySet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailabilitySet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'AvailabilitySetUpdate')
request = build_update_request(
resource_group_name=resource_group_name,
availability_set_name=availability_set_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AvailabilitySet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}'} # type: ignore
@distributed_trace
def delete(
self,
resource_group_name: str,
availability_set_name: str,
**kwargs: Any
) -> None:
"""Delete an availability set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param availability_set_name: The name of the availability set.
:type availability_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
availability_set_name=availability_set_name,
subscription_id=self._config.subscription_id,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
availability_set_name: str,
**kwargs: Any
) -> "_models.AvailabilitySet":
"""Retrieves information about an availability set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param availability_set_name: The name of the availability set.
:type availability_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AvailabilitySet, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_12_01.models.AvailabilitySet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailabilitySet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
availability_set_name=availability_set_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AvailabilitySet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}'} # type: ignore
@distributed_trace
def list_by_subscription(
self,
expand: Optional[str] = None,
**kwargs: Any
) -> Iterable["_models.AvailabilitySetListResult"]:
"""Lists all availability sets in a subscription.
:param expand: The expand expression to apply to the operation. Allowed values are
'instanceView'.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailabilitySetListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2020_12_01.models.AvailabilitySetListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailabilitySetListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
expand=expand,
template_url=self.list_by_subscription.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
expand=expand,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("AvailabilitySetListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/availabilitySets'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> Iterable["_models.AvailabilitySetListResult"]:
"""Lists all availability sets in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailabilitySetListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2020_12_01.models.AvailabilitySetListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailabilitySetListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("AvailabilitySetListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets'} # type: ignore
@distributed_trace
def list_available_sizes(
self,
resource_group_name: str,
availability_set_name: str,
**kwargs: Any
) -> Iterable["_models.VirtualMachineSizeListResult"]:
"""Lists all available virtual machine sizes that can be used to create a new virtual machine in
an existing availability set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param availability_set_name: The name of the availability set.
:type availability_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachineSizeListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2020_12_01.models.VirtualMachineSizeListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineSizeListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_available_sizes_request(
resource_group_name=resource_group_name,
availability_set_name=availability_set_name,
subscription_id=self._config.subscription_id,
template_url=self.list_available_sizes.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_available_sizes_request(
resource_group_name=resource_group_name,
availability_set_name=availability_set_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("VirtualMachineSizeListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_available_sizes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}/vmSizes'} # type: ignore
|
|
#!/usr/bin/env python3
# Copyright (c) 2018-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the Partially Signed Transaction RPCs.
"""
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_approx,
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
find_output,
)
import json
import os
MAX_BIP125_RBF_SEQUENCE = 0xfffffffd
# Create one-input, one-output, no-fee transaction:
class PSBTTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.extra_args = [
["-walletrbf=1"],
["-walletrbf=0", "-changetype=legacy"],
[]
]
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
# TODO: Re-enable this test with segwit v1
def test_utxo_conversion(self):
mining_node = self.nodes[2]
offline_node = self.nodes[0]
online_node = self.nodes[1]
# Disconnect offline node from others
# Topology of test network is linear, so this one call is enough
self.disconnect_nodes(0, 1)
# Create watchonly on online_node
online_node.createwallet(wallet_name='wonline', disable_private_keys=True)
wonline = online_node.get_wallet_rpc('wonline')
w2 = online_node.get_wallet_rpc('')
# Mine a transaction that credits the offline address
offline_addr = offline_node.getnewaddress(address_type="p2sh-segwit")
online_addr = w2.getnewaddress(address_type="p2sh-segwit")
wonline.importaddress(offline_addr, "", False)
mining_node.sendtoaddress(address=offline_addr, amount=1.0)
mining_node.generate(nblocks=1)
self.sync_blocks([mining_node, online_node])
# Construct an unsigned PSBT on the online node (who doesn't know the output is Segwit, so will include a non-witness UTXO)
utxos = wonline.listunspent(addresses=[offline_addr])
raw = wonline.createrawtransaction([{"txid":utxos[0]["txid"], "vout":utxos[0]["vout"]}],[{online_addr:0.9999}])
psbt = wonline.walletprocesspsbt(online_node.converttopsbt(raw))["psbt"]
assert "non_witness_utxo" in mining_node.decodepsbt(psbt)["inputs"][0]
# Have the offline node sign the PSBT (which will update the UTXO to segwit)
signed_psbt = offline_node.walletprocesspsbt(psbt)["psbt"]
assert "witness_utxo" in mining_node.decodepsbt(signed_psbt)["inputs"][0]
# Make sure we can mine the resulting transaction
txid = mining_node.sendrawtransaction(mining_node.finalizepsbt(signed_psbt)["hex"])
mining_node.generate(1)
self.sync_blocks([mining_node, online_node])
assert_equal(online_node.gettxout(txid,0)["confirmations"], 1)
wonline.unloadwallet()
# Reconnect
self.connect_nodes(0, 1)
self.connect_nodes(0, 2)
def assert_change_type(self, psbtx, expected_type):
"""Assert that the given PSBT has a change output with the given type."""
# The decodepsbt RPC is stateless and independent of any settings, we can always just call it on the first node
decoded_psbt = self.nodes[0].decodepsbt(psbtx["psbt"])
changepos = psbtx["changepos"]
assert_equal(decoded_psbt["tx"]["vout"][changepos]["scriptPubKey"]["type"], expected_type)
def run_test(self):
# Create and fund a raw tx for sending 10 BTC
psbtx1 = self.nodes[0].walletcreatefundedpsbt([], {self.nodes[2].getnewaddress():10})['psbt']
# If inputs are specified, do not automatically add more:
utxo1 = self.nodes[0].listunspent()[0]
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[0].walletcreatefundedpsbt, [{"txid": utxo1['txid'], "vout": utxo1['vout']}], {self.nodes[2].getnewaddress():90})
psbtx1 = self.nodes[0].walletcreatefundedpsbt([{"txid": utxo1['txid'], "vout": utxo1['vout']}], {self.nodes[2].getnewaddress():90}, 0, {"add_inputs": True})['psbt']
assert_equal(len(self.nodes[0].decodepsbt(psbtx1)['tx']['vin']), 2)
# Inputs argument can be null
self.nodes[0].walletcreatefundedpsbt(None, {self.nodes[2].getnewaddress():10})
# Node 1 should not be able to add anything to it but still return the psbtx same as before
psbtx = self.nodes[1].walletprocesspsbt(psbtx1)['psbt']
assert_equal(psbtx1, psbtx)
# Sign the transaction and send
signed_tx = self.nodes[0].walletprocesspsbt(psbtx)['psbt']
final_tx = self.nodes[0].finalizepsbt(signed_tx)['hex']
self.nodes[0].sendrawtransaction(final_tx)
# Manually selected inputs can be locked:
assert_equal(len(self.nodes[0].listlockunspent()), 0)
utxo1 = self.nodes[0].listunspent()[0]
psbtx1 = self.nodes[0].walletcreatefundedpsbt([{"txid": utxo1['txid'], "vout": utxo1['vout']}], {self.nodes[2].getnewaddress():1}, 0,{"lockUnspents": True})["psbt"]
assert_equal(len(self.nodes[0].listlockunspent()), 1)
# Locks are ignored for manually selected inputs
self.nodes[0].walletcreatefundedpsbt([{"txid": utxo1['txid'], "vout": utxo1['vout']}], {self.nodes[2].getnewaddress():1}, 0)
# Create p2sh, p2wpkh, and p2wsh addresses
pubkey0 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())['pubkey']
pubkey1 = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['pubkey']
pubkey2 = self.nodes[2].getaddressinfo(self.nodes[2].getnewaddress())['pubkey']
# Setup watchonly wallets
self.nodes[2].createwallet(wallet_name='wmulti', disable_private_keys=True)
wmulti = self.nodes[2].get_wallet_rpc('wmulti')
# Create all the addresses
p2sh = wmulti.addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "legacy")['address']
p2wsh = wmulti.addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "bech32")['address']
p2sh_p2wsh = wmulti.addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "p2sh-segwit")['address']
if not self.options.descriptors:
wmulti.importaddress(p2sh)
wmulti.importaddress(p2wsh)
wmulti.importaddress(p2sh_p2wsh)
p2wpkh = self.nodes[1].getnewaddress("", "bech32")
p2pkh = self.nodes[1].getnewaddress("", "legacy")
p2sh_p2wpkh = self.nodes[1].getnewaddress("", "p2sh-segwit")
# fund those addresses
rawtx = self.nodes[0].createrawtransaction([], {p2sh:10, p2wsh:10, p2wpkh:10, p2sh_p2wsh:10, p2sh_p2wpkh:10, p2pkh:10})
rawtx = self.nodes[0].fundrawtransaction(rawtx, {"changePosition":3})
signed_tx = self.nodes[0].signrawtransactionwithwallet(rawtx['hex'])['hex']
txid = self.nodes[0].sendrawtransaction(signed_tx)
self.nodes[0].generate(6)
self.sync_all()
# Find the output pos
p2sh_pos = -1
p2wsh_pos = -1
p2wpkh_pos = -1
p2pkh_pos = -1
p2sh_p2wsh_pos = -1
p2sh_p2wpkh_pos = -1
decoded = self.nodes[0].decoderawtransaction(signed_tx)
for out in decoded['vout']:
if out['scriptPubKey']['addresses'][0] == p2sh:
p2sh_pos = out['n']
elif out['scriptPubKey']['addresses'][0] == p2wsh:
p2wsh_pos = out['n']
elif out['scriptPubKey']['addresses'][0] == p2wpkh:
p2wpkh_pos = out['n']
elif out['scriptPubKey']['addresses'][0] == p2sh_p2wsh:
p2sh_p2wsh_pos = out['n']
elif out['scriptPubKey']['addresses'][0] == p2sh_p2wpkh:
p2sh_p2wpkh_pos = out['n']
elif out['scriptPubKey']['addresses'][0] == p2pkh:
p2pkh_pos = out['n']
inputs = [{"txid": txid, "vout": p2wpkh_pos}, {"txid": txid, "vout": p2sh_p2wpkh_pos}, {"txid": txid, "vout": p2pkh_pos}]
outputs = [{self.nodes[1].getnewaddress(): 29.99}]
# spend single key from node 1
created_psbt = self.nodes[1].walletcreatefundedpsbt(inputs, outputs)
walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(created_psbt['psbt'])
# Make sure it has both types of UTXOs
decoded = self.nodes[1].decodepsbt(walletprocesspsbt_out['psbt'])
assert 'non_witness_utxo' in decoded['inputs'][0]
assert 'witness_utxo' in decoded['inputs'][0]
# Check decodepsbt fee calculation (input values shall only be counted once per UTXO)
assert_equal(decoded['fee'], created_psbt['fee'])
assert_equal(walletprocesspsbt_out['complete'], True)
self.nodes[1].sendrawtransaction(self.nodes[1].finalizepsbt(walletprocesspsbt_out['psbt'])['hex'])
self.log.info("Test walletcreatefundedpsbt fee rate of 10000 sat/vB and 0.1 BTC/kvB produces a total fee at or slightly below -maxtxfee (~0.05290000)")
res1 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"fee_rate": 10000, "add_inputs": True})
assert_approx(res1["fee"], 0.055, 0.005)
res2 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"feeRate": "0.1", "add_inputs": True})
assert_approx(res2["fee"], 0.055, 0.005)
self.log.info("Test min fee rate checks with walletcreatefundedpsbt are bypassed, e.g. a fee_rate under 1 sat/vB is allowed")
res3 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"fee_rate": "0.99999999", "add_inputs": True})
assert_approx(res3["fee"], 0.00000381, 0.0000001)
res4 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"feeRate": 0.00000999, "add_inputs": True})
assert_approx(res4["fee"], 0.00000381, 0.0000001)
self.log.info("Test min fee rate checks with walletcreatefundedpsbt are bypassed and that funding non-standard 'zero-fee' transactions is valid")
for param in ["fee_rate", "feeRate"]:
assert_equal(self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {param: 0, "add_inputs": True})["fee"], 0)
self.log.info("Test invalid fee rate settings")
for param, value in {("fee_rate", 100000), ("feeRate", 1)}:
assert_raises_rpc_error(-4, "Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {param: value, "add_inputs": True})
assert_raises_rpc_error(-3, "Amount out of range",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {param: -1, "add_inputs": True})
assert_raises_rpc_error(-3, "Amount is not a number or string",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {param: {"foo": "bar"}, "add_inputs": True})
assert_raises_rpc_error(-3, "Invalid amount",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {param: "", "add_inputs": True})
self.log.info("- raises RPC error if both feeRate and fee_rate are passed")
assert_raises_rpc_error(-8, "Cannot specify both fee_rate (sat/vB) and feeRate (BTC/kvB)",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"fee_rate": 0.1, "feeRate": 0.1, "add_inputs": True})
self.log.info("- raises RPC error if both feeRate and estimate_mode passed")
assert_raises_rpc_error(-8, "Cannot specify both estimate_mode and feeRate",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": "economical", "feeRate": 0.1, "add_inputs": True})
for param in ["feeRate", "fee_rate"]:
self.log.info("- raises RPC error if both {} and conf_target are passed".format(param))
assert_raises_rpc_error(-8, "Cannot specify both conf_target and {}. Please provide either a confirmation "
"target in blocks for automatic fee estimation, or an explicit fee rate.".format(param),
self.nodes[1].walletcreatefundedpsbt ,inputs, outputs, 0, {param: 1, "conf_target": 1, "add_inputs": True})
self.log.info("- raises RPC error if both fee_rate and estimate_mode are passed")
assert_raises_rpc_error(-8, "Cannot specify both estimate_mode and fee_rate",
self.nodes[1].walletcreatefundedpsbt ,inputs, outputs, 0, {"fee_rate": 1, "estimate_mode": "economical", "add_inputs": True})
self.log.info("- raises RPC error with invalid estimate_mode settings")
for k, v in {"number": 42, "object": {"foo": "bar"}}.items():
assert_raises_rpc_error(-3, "Expected type string for estimate_mode, got {}".format(k),
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": v, "conf_target": 0.1, "add_inputs": True})
for mode in ["", "foo", Decimal("3.141592")]:
assert_raises_rpc_error(-8, 'Invalid estimate_mode parameter, must be one of: "unset", "economical", "conservative"',
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": mode, "conf_target": 0.1, "add_inputs": True})
self.log.info("- raises RPC error with invalid conf_target settings")
for mode in ["unset", "economical", "conservative"]:
self.log.debug("{}".format(mode))
for k, v in {"string": "", "object": {"foo": "bar"}}.items():
assert_raises_rpc_error(-3, "Expected type number for conf_target, got {}".format(k),
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": mode, "conf_target": v, "add_inputs": True})
for n in [-1, 0, 1009]:
assert_raises_rpc_error(-8, "Invalid conf_target, must be between 1 and 1008", # max value of 1008 per src/policy/fees.h
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": mode, "conf_target": n, "add_inputs": True})
self.log.info("Test walletcreatefundedpsbt with too-high fee rate produces total fee well above -maxtxfee and raises RPC error")
# previously this was silently capped at -maxtxfee
for bool_add, outputs_array in {True: outputs, False: [{self.nodes[1].getnewaddress(): 1}]}.items():
msg = "Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)"
assert_raises_rpc_error(-4, msg, self.nodes[1].walletcreatefundedpsbt, inputs, outputs_array, 0, {"fee_rate": 1000000, "add_inputs": bool_add})
assert_raises_rpc_error(-4, msg, self.nodes[1].walletcreatefundedpsbt, inputs, outputs_array, 0, {"feeRate": 1, "add_inputs": bool_add})
self.log.info("Test various PSBT operations")
# partially sign multisig things with node 1
psbtx = wmulti.walletcreatefundedpsbt(inputs=[{"txid":txid,"vout":p2wsh_pos},{"txid":txid,"vout":p2sh_pos},{"txid":txid,"vout":p2sh_p2wsh_pos}], outputs={self.nodes[1].getnewaddress():29.99}, options={'changeAddress': self.nodes[1].getrawchangeaddress()})['psbt']
walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(psbtx)
psbtx = walletprocesspsbt_out['psbt']
assert_equal(walletprocesspsbt_out['complete'], False)
# Unload wmulti, we don't need it anymore
wmulti.unloadwallet()
# partially sign with node 2. This should be complete and sendable
walletprocesspsbt_out = self.nodes[2].walletprocesspsbt(psbtx)
assert_equal(walletprocesspsbt_out['complete'], True)
self.nodes[2].sendrawtransaction(self.nodes[2].finalizepsbt(walletprocesspsbt_out['psbt'])['hex'])
# check that walletprocesspsbt fails to decode a non-psbt
rawtx = self.nodes[1].createrawtransaction([{"txid":txid,"vout":p2wpkh_pos}], {self.nodes[1].getnewaddress():9.99})
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[1].walletprocesspsbt, rawtx)
# Convert a non-psbt to psbt and make sure we can decode it
rawtx = self.nodes[0].createrawtransaction([], {self.nodes[1].getnewaddress():10})
rawtx = self.nodes[0].fundrawtransaction(rawtx)
new_psbt = self.nodes[0].converttopsbt(rawtx['hex'])
self.nodes[0].decodepsbt(new_psbt)
# Make sure that a non-psbt with signatures cannot be converted
# Error could be either "TX decode failed" (segwit inputs causes parsing to fail) or "Inputs must not have scriptSigs and scriptWitnesses"
# We must set iswitness=True because the serialized transaction has inputs and is therefore a witness transaction
signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx['hex'])
assert_raises_rpc_error(-22, "", self.nodes[0].converttopsbt, hexstring=signedtx['hex'], iswitness=True)
assert_raises_rpc_error(-22, "", self.nodes[0].converttopsbt, hexstring=signedtx['hex'], permitsigdata=False, iswitness=True)
# Unless we allow it to convert and strip signatures
self.nodes[0].converttopsbt(signedtx['hex'], True)
# Explicitly allow converting non-empty txs
new_psbt = self.nodes[0].converttopsbt(rawtx['hex'])
self.nodes[0].decodepsbt(new_psbt)
# Create outputs to nodes 1 and 2
node1_addr = self.nodes[1].getnewaddress()
node2_addr = self.nodes[2].getnewaddress()
txid1 = self.nodes[0].sendtoaddress(node1_addr, 13)
txid2 = self.nodes[0].sendtoaddress(node2_addr, 13)
blockhash = self.nodes[0].generate(6)[0]
self.sync_all()
vout1 = find_output(self.nodes[1], txid1, 13, blockhash=blockhash)
vout2 = find_output(self.nodes[2], txid2, 13, blockhash=blockhash)
# Create a psbt spending outputs from nodes 1 and 2
psbt_orig = self.nodes[0].createpsbt([{"txid":txid1, "vout":vout1}, {"txid":txid2, "vout":vout2}], {self.nodes[0].getnewaddress():25.999})
# Update psbts, should only have data for one input and not the other
psbt1 = self.nodes[1].walletprocesspsbt(psbt_orig, False, "ALL")['psbt']
psbt1_decoded = self.nodes[0].decodepsbt(psbt1)
assert psbt1_decoded['inputs'][0] and not psbt1_decoded['inputs'][1]
# Check that BIP32 path was added
assert "bip32_derivs" in psbt1_decoded['inputs'][0]
psbt2 = self.nodes[2].walletprocesspsbt(psbt_orig, False, "ALL", False)['psbt']
psbt2_decoded = self.nodes[0].decodepsbt(psbt2)
assert not psbt2_decoded['inputs'][0] and psbt2_decoded['inputs'][1]
# Check that BIP32 paths were not added
assert "bip32_derivs" not in psbt2_decoded['inputs'][1]
# Sign PSBTs (workaround issue #18039)
psbt1 = self.nodes[1].walletprocesspsbt(psbt_orig)['psbt']
psbt2 = self.nodes[2].walletprocesspsbt(psbt_orig)['psbt']
# Combine, finalize, and send the psbts
combined = self.nodes[0].combinepsbt([psbt1, psbt2])
finalized = self.nodes[0].finalizepsbt(combined)['hex']
self.nodes[0].sendrawtransaction(finalized)
self.nodes[0].generate(6)
self.sync_all()
# Test additional args in walletcreatepsbt
# Make sure both pre-included and funded inputs
# have the correct sequence numbers based on
# replaceable arg
block_height = self.nodes[0].getblockcount()
unspent = self.nodes[0].listunspent()[0]
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height+2, {"replaceable": False, "add_inputs": True}, False)
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_greater_than(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" not in psbt_in
assert_equal(decoded_psbt["tx"]["locktime"], block_height+2)
# Same construction with only locktime set and RBF explicitly enabled
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height, {"replaceable": True, "add_inputs": True}, True)
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_equal(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" in psbt_in
assert_equal(decoded_psbt["tx"]["locktime"], block_height)
# Same construction without optional arguments
psbtx_info = self.nodes[0].walletcreatefundedpsbt([], [{self.nodes[2].getnewaddress():unspent["amount"]+1}])
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_equal(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" in psbt_in
assert_equal(decoded_psbt["tx"]["locktime"], 0)
# Same construction without optional arguments, for a node with -walletrbf=0
unspent1 = self.nodes[1].listunspent()[0]
psbtx_info = self.nodes[1].walletcreatefundedpsbt([{"txid":unspent1["txid"], "vout":unspent1["vout"]}], [{self.nodes[2].getnewaddress():unspent1["amount"]+1}], block_height, {"add_inputs": True})
decoded_psbt = self.nodes[1].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_greater_than(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" in psbt_in
# Make sure change address wallet does not have P2SH innerscript access to results in success
# when attempting BnB coin selection
self.nodes[0].walletcreatefundedpsbt([], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height+2, {"changeAddress":self.nodes[1].getnewaddress()}, False)
# Make sure the wallet's change type is respected by default
small_output = {self.nodes[0].getnewaddress():0.1}
psbtx_native = self.nodes[0].walletcreatefundedpsbt([], [small_output])
self.assert_change_type(psbtx_native, "witness_v0_keyhash")
psbtx_legacy = self.nodes[1].walletcreatefundedpsbt([], [small_output])
self.assert_change_type(psbtx_legacy, "pubkeyhash")
# Make sure the change type of the wallet can also be overwritten
psbtx_np2wkh = self.nodes[1].walletcreatefundedpsbt([], [small_output], 0, {"change_type":"p2sh-segwit"})
self.assert_change_type(psbtx_np2wkh, "scripthash")
# Make sure the change type cannot be specified if a change address is given
invalid_options = {"change_type":"legacy","changeAddress":self.nodes[0].getnewaddress()}
assert_raises_rpc_error(-8, "both change address and address type options", self.nodes[0].walletcreatefundedpsbt, [], [small_output], 0, invalid_options)
# Regression test for 14473 (mishandling of already-signed witness transaction):
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], 0, {"add_inputs": True})
complete_psbt = self.nodes[0].walletprocesspsbt(psbtx_info["psbt"])
double_processed_psbt = self.nodes[0].walletprocesspsbt(complete_psbt["psbt"])
assert_equal(complete_psbt, double_processed_psbt)
# We don't care about the decode result, but decoding must succeed.
self.nodes[0].decodepsbt(double_processed_psbt["psbt"])
# BIP 174 Test Vectors
# Check that unknown values are just passed through
unknown_psbt = "cHNidP8BAD8CAAAAAf//////////////////////////////////////////AAAAAAD/////AQAAAAAAAAAAA2oBAAAAAAAACg8BAgMEBQYHCAkPAQIDBAUGBwgJCgsMDQ4PAAA="
unknown_out = self.nodes[0].walletprocesspsbt(unknown_psbt)['psbt']
assert_equal(unknown_psbt, unknown_out)
# Open the data file
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/rpc_psbt.json'), encoding='utf-8') as f:
d = json.load(f)
invalids = d['invalid']
valids = d['valid']
creators = d['creator']
signers = d['signer']
combiners = d['combiner']
finalizers = d['finalizer']
extractors = d['extractor']
# Invalid PSBTs
for invalid in invalids:
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decodepsbt, invalid)
# Valid PSBTs
for valid in valids:
self.nodes[0].decodepsbt(valid)
# Creator Tests
for creator in creators:
created_tx = self.nodes[0].createpsbt(creator['inputs'], creator['outputs'])
assert_equal(created_tx, creator['result'])
# Signer tests
for i, signer in enumerate(signers):
self.nodes[2].createwallet(wallet_name="wallet{}".format(i))
wrpc = self.nodes[2].get_wallet_rpc("wallet{}".format(i))
for key in signer['privkeys']:
wrpc.importprivkey(key)
signed_tx = wrpc.walletprocesspsbt(signer['psbt'])['psbt']
assert_equal(signed_tx, signer['result'])
# Combiner test
for combiner in combiners:
combined = self.nodes[2].combinepsbt(combiner['combine'])
assert_equal(combined, combiner['result'])
# Empty combiner test
assert_raises_rpc_error(-8, "Parameter 'txs' cannot be empty", self.nodes[0].combinepsbt, [])
# Finalizer test
for finalizer in finalizers:
finalized = self.nodes[2].finalizepsbt(finalizer['finalize'], False)['psbt']
assert_equal(finalized, finalizer['result'])
# Extractor test
for extractor in extractors:
extracted = self.nodes[2].finalizepsbt(extractor['extract'], True)['hex']
assert_equal(extracted, extractor['result'])
# Unload extra wallets
for i, signer in enumerate(signers):
self.nodes[2].unloadwallet("wallet{}".format(i))
# TODO: Re-enable this for segwit v1
# self.test_utxo_conversion()
# Test that psbts with p2pkh outputs are created properly
p2pkh = self.nodes[0].getnewaddress(address_type='legacy')
psbt = self.nodes[1].walletcreatefundedpsbt([], [{p2pkh : 1}], 0, {"includeWatching" : True}, True)
self.nodes[0].decodepsbt(psbt['psbt'])
# Test decoding error: invalid base64
assert_raises_rpc_error(-22, "TX decode failed invalid base64", self.nodes[0].decodepsbt, ";definitely not base64;")
# Send to all types of addresses
addr1 = self.nodes[1].getnewaddress("", "bech32")
txid1 = self.nodes[0].sendtoaddress(addr1, 11)
vout1 = find_output(self.nodes[0], txid1, 11)
addr2 = self.nodes[1].getnewaddress("", "legacy")
txid2 = self.nodes[0].sendtoaddress(addr2, 11)
vout2 = find_output(self.nodes[0], txid2, 11)
addr3 = self.nodes[1].getnewaddress("", "p2sh-segwit")
txid3 = self.nodes[0].sendtoaddress(addr3, 11)
vout3 = find_output(self.nodes[0], txid3, 11)
self.sync_all()
def test_psbt_input_keys(psbt_input, keys):
"""Check that the psbt input has only the expected keys."""
assert_equal(set(keys), set(psbt_input.keys()))
# Create a PSBT. None of the inputs are filled initially
psbt = self.nodes[1].createpsbt([{"txid":txid1, "vout":vout1},{"txid":txid2, "vout":vout2},{"txid":txid3, "vout":vout3}], {self.nodes[0].getnewaddress():32.999})
decoded = self.nodes[1].decodepsbt(psbt)
test_psbt_input_keys(decoded['inputs'][0], [])
test_psbt_input_keys(decoded['inputs'][1], [])
test_psbt_input_keys(decoded['inputs'][2], [])
# Update a PSBT with UTXOs from the node
# Bech32 inputs should be filled with witness UTXO. Other inputs should not be filled because they are non-witness
updated = self.nodes[1].utxoupdatepsbt(psbt)
decoded = self.nodes[1].decodepsbt(updated)
test_psbt_input_keys(decoded['inputs'][0], ['witness_utxo'])
test_psbt_input_keys(decoded['inputs'][1], [])
test_psbt_input_keys(decoded['inputs'][2], [])
# Try again, now while providing descriptors, making P2SH-segwit work, and causing bip32_derivs and redeem_script to be filled in
descs = [self.nodes[1].getaddressinfo(addr)['desc'] for addr in [addr1,addr2,addr3]]
updated = self.nodes[1].utxoupdatepsbt(psbt=psbt, descriptors=descs)
decoded = self.nodes[1].decodepsbt(updated)
test_psbt_input_keys(decoded['inputs'][0], ['witness_utxo', 'bip32_derivs'])
test_psbt_input_keys(decoded['inputs'][1], [])
test_psbt_input_keys(decoded['inputs'][2], ['witness_utxo', 'bip32_derivs', 'redeem_script'])
# Two PSBTs with a common input should not be joinable
psbt1 = self.nodes[1].createpsbt([{"txid":txid1, "vout":vout1}], {self.nodes[0].getnewaddress():Decimal('10.999')})
assert_raises_rpc_error(-8, "exists in multiple PSBTs", self.nodes[1].joinpsbts, [psbt1, updated])
# Join two distinct PSBTs
addr4 = self.nodes[1].getnewaddress("", "p2sh-segwit")
txid4 = self.nodes[0].sendtoaddress(addr4, 5)
vout4 = find_output(self.nodes[0], txid4, 5)
self.nodes[0].generate(6)
self.sync_all()
psbt2 = self.nodes[1].createpsbt([{"txid":txid4, "vout":vout4}], {self.nodes[0].getnewaddress():Decimal('4.999')})
psbt2 = self.nodes[1].walletprocesspsbt(psbt2)['psbt']
psbt2_decoded = self.nodes[0].decodepsbt(psbt2)
assert "final_scriptwitness" in psbt2_decoded['inputs'][0] and "final_scriptSig" in psbt2_decoded['inputs'][0]
joined = self.nodes[0].joinpsbts([psbt, psbt2])
joined_decoded = self.nodes[0].decodepsbt(joined)
assert len(joined_decoded['inputs']) == 4 and len(joined_decoded['outputs']) == 2 and "final_scriptwitness" not in joined_decoded['inputs'][3] and "final_scriptSig" not in joined_decoded['inputs'][3]
# Check that joining shuffles the inputs and outputs
# 10 attempts should be enough to get a shuffled join
shuffled = False
for _ in range(10):
shuffled_joined = self.nodes[0].joinpsbts([psbt, psbt2])
shuffled |= joined != shuffled_joined
if shuffled:
break
assert shuffled
# Newly created PSBT needs UTXOs and updating
addr = self.nodes[1].getnewaddress("", "p2sh-segwit")
txid = self.nodes[0].sendtoaddress(addr, 7)
addrinfo = self.nodes[1].getaddressinfo(addr)
blockhash = self.nodes[0].generate(6)[0]
self.sync_all()
vout = find_output(self.nodes[0], txid, 7, blockhash=blockhash)
psbt = self.nodes[1].createpsbt([{"txid":txid, "vout":vout}], {self.nodes[0].getnewaddress("", "p2sh-segwit"):Decimal('6.999')})
analyzed = self.nodes[0].analyzepsbt(psbt)
assert not analyzed['inputs'][0]['has_utxo'] and not analyzed['inputs'][0]['is_final'] and analyzed['inputs'][0]['next'] == 'updater' and analyzed['next'] == 'updater'
# After update with wallet, only needs signing
updated = self.nodes[1].walletprocesspsbt(psbt, False, 'ALL', True)['psbt']
analyzed = self.nodes[0].analyzepsbt(updated)
assert analyzed['inputs'][0]['has_utxo'] and not analyzed['inputs'][0]['is_final'] and analyzed['inputs'][0]['next'] == 'signer' and analyzed['next'] == 'signer' and analyzed['inputs'][0]['missing']['signatures'][0] == addrinfo['embedded']['witness_program']
# Check fee and size things
assert analyzed['fee'] == Decimal('0.001') and analyzed['estimated_vsize'] == 134 and analyzed['estimated_feerate'] == Decimal('0.00746268')
# After signing and finalizing, needs extracting
signed = self.nodes[1].walletprocesspsbt(updated)['psbt']
analyzed = self.nodes[0].analyzepsbt(signed)
assert analyzed['inputs'][0]['has_utxo'] and analyzed['inputs'][0]['is_final'] and analyzed['next'] == 'extractor'
self.log.info("PSBT spending unspendable outputs should have error message and Creator as next")
analysis = self.nodes[0].analyzepsbt('cHNidP8BAJoCAAAAAljoeiG1ba8MI76OcHBFbDNvfLqlyHV5JPVFiHuyq911AAAAAAD/////g40EJ9DsZQpoqka7CwmK6kQiwHGyyng1Kgd5WdB86h0BAAAAAP////8CcKrwCAAAAAAWAEHYXCtx0AYLCcmIauuBXlCZHdoSTQDh9QUAAAAAFv8/wADXYP/7//////8JxOh0LR2HAI8AAAAAAAEBIADC6wsAAAAAF2oUt/X69ELjeX2nTof+fZ10l+OyAokDAQcJAwEHEAABAACAAAEBIADC6wsAAAAAF2oUt/X69ELjeX2nTof+fZ10l+OyAokDAQcJAwEHENkMak8AAAAA')
assert_equal(analysis['next'], 'creator')
assert_equal(analysis['error'], 'PSBT is not valid. Input 0 spends unspendable output')
self.log.info("PSBT with invalid values should have error message and Creator as next")
analysis = self.nodes[0].analyzepsbt('cHNidP8BAHECAAAAAfA00BFgAm6tp86RowwH6BMImQNL5zXUcTT97XoLGz0BAAAAAAD/////AgD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XL87QKVAAAAABYAFPck4gF7iL4NL4wtfRAKgQbghiTUAAAAAAABAR8AgIFq49AHABYAFJUDtxf2PHo641HEOBOAIvFMNTr2AAAA')
assert_equal(analysis['next'], 'creator')
assert_equal(analysis['error'], 'PSBT is not valid. Input 0 has invalid value')
self.log.info("PSBT with signed, but not finalized, inputs should have Finalizer as next")
analysis = self.nodes[0].analyzepsbt('cHNidP8BAHECAAAAAZYezcxdnbXoQCmrD79t/LzDgtUo9ERqixk8wgioAobrAAAAAAD9////AlDDAAAAAAAAFgAUy/UxxZuzZswcmFnN/E9DGSiHLUsuGPUFAAAAABYAFLsH5o0R38wXx+X2cCosTMCZnQ4baAAAAAABAR8A4fUFAAAAABYAFOBI2h5thf3+Lflb2LGCsVSZwsltIgIC/i4dtVARCRWtROG0HHoGcaVklzJUcwo5homgGkSNAnJHMEQCIGx7zKcMIGr7cEES9BR4Kdt/pzPTK3fKWcGyCJXb7MVnAiALOBgqlMH4GbC1HDh/HmylmO54fyEy4lKde7/BT/PWxwEBAwQBAAAAIgYC/i4dtVARCRWtROG0HHoGcaVklzJUcwo5homgGkSNAnIYDwVpQ1QAAIABAACAAAAAgAAAAAAAAAAAAAAiAgL+CIiB59NSCssOJRGiMYQK1chahgAaaJpIXE41Cyir+xgPBWlDVAAAgAEAAIAAAACAAQAAAAAAAAAA')
assert_equal(analysis['next'], 'finalizer')
analysis = self.nodes[0].analyzepsbt('cHNidP8BAHECAAAAAfA00BFgAm6tp86RowwH6BMImQNL5zXUcTT97XoLGz0BAAAAAAD/////AgCAgWrj0AcAFgAUKNw0x8HRctAgmvoevm4u1SbN7XL87QKVAAAAABYAFPck4gF7iL4NL4wtfRAKgQbghiTUAAAAAAABAR8A8gUqAQAAABYAFJUDtxf2PHo641HEOBOAIvFMNTr2AAAA')
assert_equal(analysis['next'], 'creator')
assert_equal(analysis['error'], 'PSBT is not valid. Output amount invalid')
analysis = self.nodes[0].analyzepsbt('cHNidP8BAJoCAAAAAkvEW8NnDtdNtDpsmze+Ht2LH35IJcKv00jKAlUs21RrAwAAAAD/////S8Rbw2cO1020OmybN74e3Ysffkglwq/TSMoCVSzbVGsBAAAAAP7///8CwLYClQAAAAAWABSNJKzjaUb3uOxixsvh1GGE3fW7zQD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XIAAAAAAAEAnQIAAAACczMa321tVHuN4GKWKRncycI22aX3uXgwSFUKM2orjRsBAAAAAP7///9zMxrfbW1Ue43gYpYpGdzJwjbZpfe5eDBIVQozaiuNGwAAAAAA/v///wIA+QKVAAAAABl2qRT9zXUVA8Ls5iVqynLHe5/vSe1XyYisQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAAAAAQEfQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAA==')
assert_equal(analysis['next'], 'creator')
assert_equal(analysis['error'], 'PSBT is not valid. Input 0 specifies invalid prevout')
assert_raises_rpc_error(-25, 'Inputs missing or spent', self.nodes[0].walletprocesspsbt, 'cHNidP8BAJoCAAAAAkvEW8NnDtdNtDpsmze+Ht2LH35IJcKv00jKAlUs21RrAwAAAAD/////S8Rbw2cO1020OmybN74e3Ysffkglwq/TSMoCVSzbVGsBAAAAAP7///8CwLYClQAAAAAWABSNJKzjaUb3uOxixsvh1GGE3fW7zQD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XIAAAAAAAEAnQIAAAACczMa321tVHuN4GKWKRncycI22aX3uXgwSFUKM2orjRsBAAAAAP7///9zMxrfbW1Ue43gYpYpGdzJwjbZpfe5eDBIVQozaiuNGwAAAAAA/v///wIA+QKVAAAAABl2qRT9zXUVA8Ls5iVqynLHe5/vSe1XyYisQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAAAAAQEfQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAA==')
if __name__ == '__main__':
PSBTTest().main()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
from tvm import relay
from tvm.relay import testing
import tvm
from tvm.contrib import graph_runtime
from tvm.contrib.debugger import debug_runtime
import tvm.testing
def input_shape(mod):
return [int(x) for x in mod["main"].checked_type.arg_types[0].shape]
def verify(data):
if not tvm.runtime.enabled("llvm"):
print("Skip because llvm is not enabled")
return
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
graph, lib, graph_params = relay.build_module.build(mod, "llvm", params=params)
ctx = tvm.cpu()
module = graph_runtime.create(graph, lib, ctx)
module.set_input("data", data)
module.set_input(**graph_params)
module.run()
out = module.get_output(0).asnumpy()
return out
def test_legacy_compatibility():
if not tvm.testing.device_enabled("llvm"):
print("Skip because llvm is not enabled")
return
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
graph, lib, graph_params = relay.build_module.build(mod, "llvm", params=params)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
ctx = tvm.cpu()
module = graph_runtime.create(graph, lib, ctx)
module.set_input("data", data)
module.set_input(**graph_params)
module.run()
out = module.get_output(0).asnumpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
def test_cpu():
if not tvm.testing.device_enabled("llvm"):
print("Skip because llvm is not enabled")
return
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
# raw api
ctx = tvm.cpu()
gmod = complied_graph_lib["default"](ctx)
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
set_input("data", tvm.nd.array(data))
run()
out = get_output(0).asnumpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
# graph runtime wrapper
gmod = graph_runtime.GraphModule(complied_graph_lib["default"](ctx))
gmod.set_input("data", data)
gmod.run()
out = gmod.get_output(0).asnumpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
@tvm.testing.requires_cuda
@tvm.testing.requires_gpu
def test_gpu():
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "cuda", params=params)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
ctx = tvm.gpu()
# raw api
gmod = complied_graph_lib["default"](ctx)
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
set_input("data", tvm.nd.array(data))
run()
out = get_output(0).asnumpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
# graph runtime wrapper
gmod = graph_runtime.GraphModule(complied_graph_lib["default"](ctx))
gmod.set_input("data", data)
gmod.run()
out = gmod.get_output(0).asnumpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
@tvm.testing.uses_gpu
def test_mod_export():
def verify_cpu_export(obj_format):
if not tvm.testing.device_enabled("llvm"):
print("Skip because llvm is not enabled")
return
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
from tvm.contrib import util
temp = util.tempdir()
if obj_format == ".so":
file_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
complied_graph_lib.export_library(path_lib)
loaded_lib = tvm.runtime.load_module(path_lib)
ctx = tvm.cpu(0)
gmod = loaded_lib["default"](ctx)
# raw api
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
set_input("data", tvm.nd.array(data))
run()
out = get_output(0).asnumpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
# graph runtime wrapper
gmod = graph_runtime.GraphModule(loaded_lib["default"](ctx))
gmod.set_input("data", data)
gmod.run()
out = gmod.get_output(0).asnumpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
def verify_gpu_export(obj_format):
if not tvm.testing.device_enabled("cuda"):
print("Skip because cuda is not enabled")
return
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "cuda", params=params)
from tvm.contrib import util
temp = util.tempdir()
if obj_format == ".so":
file_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
complied_graph_lib.export_library(path_lib)
loaded_lib = tvm.runtime.load_module(path_lib)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
ctx = tvm.gpu()
# raw api
gmod = loaded_lib["default"](ctx)
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
set_input("data", tvm.nd.array(data))
run()
out = get_output(0).asnumpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
# graph runtime wrapper
gmod = graph_runtime.GraphModule(loaded_lib["default"](ctx))
gmod.set_input("data", data)
gmod.run()
out = gmod.get_output(0).asnumpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
def verify_rpc_cpu_export(obj_format):
if not tvm.testing.device_enabled("llvm"):
print("Skip because llvm is not enabled")
return
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
from tvm.contrib import util
temp = util.tempdir()
if obj_format == ".so":
file_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
complied_graph_lib.export_library(path_lib)
from tvm import rpc
remote = rpc.LocalSession()
remote.upload(path_lib)
loaded_lib = remote.load_module(path_lib)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
ctx = remote.cpu()
# raw api
gmod = loaded_lib["default"](ctx)
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
set_input("data", tvm.nd.array(data, ctx=ctx))
run()
out = get_output(0).asnumpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
# graph runtime wrapper
gmod = graph_runtime.GraphModule(loaded_lib["default"](ctx))
gmod.set_input("data", data)
gmod.run()
out = gmod.get_output(0).asnumpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
def verify_rpc_gpu_export(obj_format):
if not tvm.testing.device_enabled("cuda"):
print("Skip because cuda is not enabled")
return
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "cuda", params=params)
from tvm.contrib import util
temp = util.tempdir()
if obj_format == ".so":
file_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
complied_graph_lib.export_library(path_lib)
from tvm import rpc
server = rpc.Server("localhost", use_popen=True, port=9094)
remote = rpc.connect(server.host, server.port)
remote.upload(path_lib)
loaded_lib = remote.load_module(path_lib)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
ctx = remote.gpu()
# raw api
gmod = loaded_lib["default"](ctx)
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
set_input("data", tvm.nd.array(data, ctx=ctx))
run()
out = get_output(0).asnumpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
# graph runtime wrapper
gmod = graph_runtime.GraphModule(loaded_lib["default"](ctx))
gmod.set_input("data", data)
gmod.run()
out = gmod.get_output(0).asnumpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
for obj_format in [".so", ".tar"]:
verify_cpu_export(obj_format)
verify_gpu_export(obj_format)
verify_rpc_cpu_export(obj_format)
verify_rpc_gpu_export(obj_format)
@tvm.testing.uses_gpu
def test_remove_package_params():
def verify_cpu_remove_package_params(obj_format):
if not tvm.testing.device_enabled("llvm"):
print("Skip because llvm is not enabled")
return
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
from tvm.contrib import util
temp = util.tempdir()
if obj_format == ".so":
file_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
complied_graph_lib_no_params = complied_graph_lib["remove_params"]()
complied_graph_lib_no_params.export_library(path_lib)
with open(temp.relpath("deploy_param.params"), "wb") as fo:
fo.write(relay.save_param_dict(complied_graph_lib.get_params()))
loaded_lib = tvm.runtime.load_module(path_lib)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
ctx = tvm.cpu(0)
# raw api
gmod = loaded_lib["default"](ctx)
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
load_params = gmod["load_params"]
loaded_params = bytearray(open(temp.relpath("deploy_param.params"), "rb").read())
set_input("data", tvm.nd.array(data))
load_params(loaded_params)
run()
out = get_output(0).asnumpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
# graph runtime wrapper
gmod = graph_runtime.GraphModule(loaded_lib["default"](ctx))
loaded_params = bytearray(open(temp.relpath("deploy_param.params"), "rb").read())
gmod.set_input("data", data)
gmod.load_params(loaded_params)
gmod.run()
out = gmod.get_output(0).asnumpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
def verify_gpu_remove_package_params(obj_format):
if not tvm.testing.device_enabled("cuda"):
print("Skip because cuda is not enabled")
return
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "cuda", params=params)
from tvm.contrib import util
temp = util.tempdir()
if obj_format == ".so":
file_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
complied_graph_lib_no_params = complied_graph_lib["remove_params"]()
complied_graph_lib_no_params.export_library(path_lib)
with open(temp.relpath("deploy_param.params"), "wb") as fo:
fo.write(relay.save_param_dict(complied_graph_lib.get_params()))
loaded_lib = tvm.runtime.load_module(path_lib)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
ctx = tvm.gpu(0)
# raw api
gmod = loaded_lib["default"](ctx)
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
load_params = gmod["load_params"]
loaded_params = bytearray(open(temp.relpath("deploy_param.params"), "rb").read())
set_input("data", tvm.nd.array(data))
load_params(loaded_params)
run()
out = get_output(0).asnumpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
# graph runtime wrapper
gmod = graph_runtime.GraphModule(loaded_lib["default"](ctx))
loaded_params = bytearray(open(temp.relpath("deploy_param.params"), "rb").read())
gmod.set_input("data", data)
gmod.load_params(loaded_params)
gmod.run()
out = gmod.get_output(0).asnumpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
def verify_rpc_cpu_remove_package_params(obj_format):
if not tvm.testing.device_enabled("llvm"):
print("Skip because llvm is not enabled")
return
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
from tvm.contrib import util
temp = util.tempdir()
if obj_format == ".so":
file_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
complied_graph_lib_no_params = complied_graph_lib["remove_params"]()
complied_graph_lib_no_params.export_library(path_lib)
path_params = temp.relpath("deploy_param.params")
with open(path_params, "wb") as fo:
fo.write(relay.save_param_dict(complied_graph_lib.get_params()))
from tvm import rpc
remote = rpc.LocalSession()
remote.upload(path_lib)
loaded_lib = remote.load_module(path_lib)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
ctx = remote.cpu()
# raw api
gmod = loaded_lib["default"](ctx)
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
load_params = gmod["load_params"]
loaded_params = bytearray(open(path_params, "rb").read())
set_input("data", tvm.nd.array(data, ctx=ctx))
load_params(loaded_params)
run()
out = get_output(0).asnumpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
# graph runtime wrapper
gmod = graph_runtime.GraphModule(loaded_lib["default"](ctx))
loaded_params = bytearray(open(path_params, "rb").read())
gmod.set_input("data", data)
gmod.load_params(loaded_params)
gmod.run()
out = gmod.get_output(0).asnumpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
def verify_rpc_gpu_remove_package_params(obj_format):
if not tvm.testing.device_enabled("cuda"):
print("Skip because cuda is not enabled")
return
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "cuda", params=params)
from tvm.contrib import util
temp = util.tempdir()
if obj_format == ".so":
file_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
complied_graph_lib_no_params = complied_graph_lib["remove_params"]()
complied_graph_lib_no_params.export_library(path_lib)
path_params = temp.relpath("deploy_param.params")
with open(path_params, "wb") as fo:
fo.write(relay.save_param_dict(complied_graph_lib.get_params()))
from tvm import rpc
remote = rpc.LocalSession()
remote.upload(path_lib)
loaded_lib = remote.load_module(path_lib)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
ctx = remote.gpu()
# raw api
gmod = loaded_lib["default"](ctx)
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
load_params = gmod["load_params"]
loaded_params = bytearray(open(path_params, "rb").read())
set_input("data", tvm.nd.array(data, ctx=ctx))
load_params(loaded_params)
run()
out = get_output(0).asnumpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
# graph runtime wrapper
gmod = graph_runtime.GraphModule(loaded_lib["default"](ctx))
loaded_params = bytearray(open(path_params, "rb").read())
gmod.set_input("data", data)
gmod.load_params(loaded_params)
gmod.run()
out = gmod.get_output(0).asnumpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
for obj_format in [".so", ".tar"]:
verify_cpu_remove_package_params(obj_format)
verify_gpu_remove_package_params(obj_format)
verify_rpc_cpu_remove_package_params(obj_format)
verify_rpc_gpu_remove_package_params(obj_format)
def test_debug_graph_runtime():
if not tvm.testing.device_enabled("llvm"):
print("Skip because llvm is not enabled")
return
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
# raw api
ctx = tvm.cpu()
try:
gmod = complied_graph_lib["debug_create"]("default", ctx)
except:
print("Skip because debug graph_runtime not enabled")
return
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
set_input("data", tvm.nd.array(data))
run()
out = get_output(0).asnumpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
# debug graph runtime wrapper
debug_g_mod = debug_runtime.GraphModuleDebug(
complied_graph_lib["debug_create"]("default", ctx),
[ctx],
complied_graph_lib.get_json(),
None,
)
debug_g_mod.set_input("data", data)
debug_g_mod.run()
out = debug_g_mod.get_output(0).asnumpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
if __name__ == "__main__":
test_legacy_compatibility()
test_cpu()
test_gpu()
test_mod_export()
test_remove_package_params()
test_debug_graph_runtime()
|
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for git_common.py"""
import binascii
import collections
import os
import signal
import sys
import tempfile
import time
import unittest
DEPOT_TOOLS_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, DEPOT_TOOLS_ROOT)
from testing_support import coverage_utils
from testing_support import git_test_utils
class GitCommonTestBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(GitCommonTestBase, cls).setUpClass()
import git_common
cls.gc = git_common
cls.gc.TEST_MODE = True
class Support(GitCommonTestBase):
def _testMemoizeOneBody(self, threadsafe):
calls = collections.defaultdict(int)
def double_if_even(val):
calls[val] += 1
return val * 2 if val % 2 == 0 else None
# Use this explicitly as a wrapper fn instead of a decorator. Otherwise
# pylint crashes (!!)
double_if_even = self.gc.memoize_one(threadsafe=threadsafe)(double_if_even)
self.assertEqual(4, double_if_even(2))
self.assertEqual(4, double_if_even(2))
self.assertEqual(None, double_if_even(1))
self.assertEqual(None, double_if_even(1))
self.assertDictEqual({1: 2, 2: 1}, calls)
double_if_even.set(10, 20)
self.assertEqual(20, double_if_even(10))
self.assertDictEqual({1: 2, 2: 1}, calls)
double_if_even.clear()
self.assertEqual(4, double_if_even(2))
self.assertEqual(4, double_if_even(2))
self.assertEqual(None, double_if_even(1))
self.assertEqual(None, double_if_even(1))
self.assertEqual(20, double_if_even(10))
self.assertDictEqual({1: 4, 2: 2, 10: 1}, calls)
def testMemoizeOne(self):
self._testMemoizeOneBody(threadsafe=False)
def testMemoizeOneThreadsafe(self):
self._testMemoizeOneBody(threadsafe=True)
def testOnce(self):
testlist = []
# This works around a bug in pylint
once = self.gc.once
@once
def add_to_list():
testlist.append('dog')
add_to_list()
add_to_list()
add_to_list()
add_to_list()
self.assertEquals(testlist, ['dog'])
def slow_square(i):
"""Helper for ScopedPoolTest.
Must be global because non top-level functions aren't pickleable.
"""
return i ** 2
class ScopedPoolTest(GitCommonTestBase):
CTRL_C = signal.CTRL_C_EVENT if sys.platform == 'win32' else signal.SIGINT
def testThreads(self):
result = []
with self.gc.ScopedPool(kind='threads') as pool:
result = list(pool.imap(slow_square, xrange(10)))
self.assertEqual([0, 1, 4, 9, 16, 25, 36, 49, 64, 81], result)
def testThreadsCtrlC(self):
result = []
with self.assertRaises(KeyboardInterrupt):
with self.gc.ScopedPool(kind='threads') as pool:
# Make sure this pool is interrupted in mid-swing
for i in pool.imap(slow_square, xrange(20)):
if i > 32:
os.kill(os.getpid(), self.CTRL_C)
result.append(i)
self.assertEqual([0, 1, 4, 9, 16, 25], result)
def testProcs(self):
result = []
with self.gc.ScopedPool() as pool:
result = list(pool.imap(slow_square, xrange(10)))
self.assertEqual([0, 1, 4, 9, 16, 25, 36, 49, 64, 81], result)
def testProcsCtrlC(self):
result = []
with self.assertRaises(KeyboardInterrupt):
with self.gc.ScopedPool() as pool:
# Make sure this pool is interrupted in mid-swing
for i in pool.imap(slow_square, xrange(20)):
if i > 32:
os.kill(os.getpid(), self.CTRL_C)
result.append(i)
self.assertEqual([0, 1, 4, 9, 16, 25], result)
class ProgressPrinterTest(GitCommonTestBase):
class FakeStream(object):
def __init__(self):
self.data = set()
self.count = 0
def write(self, line):
self.data.add(line)
def flush(self):
self.count += 1
@unittest.expectedFailure
def testBasic(self):
"""This test is probably racy, but I don't have a better alternative."""
fmt = '%(count)d/10'
stream = self.FakeStream()
pp = self.gc.ProgressPrinter(fmt, enabled=True, fout=stream, period=0.01)
with pp as inc:
for _ in xrange(10):
time.sleep(0.02)
inc()
filtered = {x.strip() for x in stream.data}
rslt = {fmt % {'count': i} for i in xrange(11)}
self.assertSetEqual(filtered, rslt)
self.assertGreaterEqual(stream.count, 10)
class GitReadOnlyFunctionsTest(git_test_utils.GitRepoReadOnlyTestBase,
GitCommonTestBase):
REPO_SCHEMA = """
A B C D
B E D
"""
COMMIT_A = {
'some/files/file1': {'data': 'file1'},
'some/files/file2': {'data': 'file2'},
'some/files/file3': {'data': 'file3'},
'some/other/file': {'data': 'otherfile'},
}
COMMIT_C = {
'some/files/file2': {
'mode': 0755,
'data': 'file2 - vanilla'},
}
COMMIT_E = {
'some/files/file2': {'data': 'file2 - merged'},
}
COMMIT_D = {
'some/files/file2': {'data': 'file2 - vanilla\nfile2 - merged'},
}
def testHashes(self):
ret = self.repo.run(
self.gc.hash_multi, *[
'master',
'master~3',
self.repo['E']+'~',
self.repo['D']+'^2',
'tag_C^{}',
]
)
self.assertEqual([
self.repo['D'],
self.repo['A'],
self.repo['B'],
self.repo['E'],
self.repo['C'],
], ret)
self.assertEquals(
self.repo.run(self.gc.hash_one, 'branch_D'),
self.repo['D']
)
self.assertTrue(self.repo['D'].startswith(
self.repo.run(self.gc.hash_one, 'branch_D', short=True)))
def testStream(self):
items = set(self.repo.commit_map.itervalues())
def testfn():
for line in self.gc.run_stream('log', '--format=%H').xreadlines():
line = line.strip()
self.assertIn(line, items)
items.remove(line)
self.repo.run(testfn)
def testCurrentBranch(self):
def cur_branch_out_of_git():
os.chdir('..')
return self.gc.current_branch()
self.assertIsNone(self.repo.run(cur_branch_out_of_git))
self.repo.git('checkout', 'branch_D')
self.assertEqual(self.repo.run(self.gc.current_branch), 'branch_D')
def testBranches(self):
self.assertEqual(self.repo.run(set, self.gc.branches()),
{'master', 'branch_D', 'root_A'})
def testDormant(self):
self.assertFalse(self.repo.run(self.gc.is_dormant, 'master'))
self.repo.git('config', 'branch.master.dormant', 'true')
self.assertTrue(self.repo.run(self.gc.is_dormant, 'master'))
def testParseCommitrefs(self):
ret = self.repo.run(
self.gc.parse_commitrefs, *[
'master',
'master~3',
self.repo['E']+'~',
self.repo['D']+'^2',
'tag_C^{}',
]
)
self.assertEqual(ret, map(binascii.unhexlify, [
self.repo['D'],
self.repo['A'],
self.repo['B'],
self.repo['E'],
self.repo['C'],
]))
with self.assertRaisesRegexp(Exception, r"one of \('master', 'bananas'\)"):
self.repo.run(self.gc.parse_commitrefs, 'master', 'bananas')
def testTags(self):
self.assertEqual(set(self.repo.run(self.gc.tags)),
{'tag_'+l for l in 'ABCDE'})
def testTree(self):
tree = self.repo.run(self.gc.tree, 'master:some/files')
file1 = self.COMMIT_A['some/files/file1']['data']
file2 = self.COMMIT_D['some/files/file2']['data']
file3 = self.COMMIT_A['some/files/file3']['data']
self.assertEquals(
tree['file1'],
('100644', 'blob', git_test_utils.git_hash_data(file1)))
self.assertEquals(
tree['file2'],
('100755', 'blob', git_test_utils.git_hash_data(file2)))
self.assertEquals(
tree['file3'],
('100644', 'blob', git_test_utils.git_hash_data(file3)))
tree = self.repo.run(self.gc.tree, 'master:some')
self.assertEquals(len(tree), 2)
# Don't check the tree hash because we're lazy :)
self.assertEquals(tree['files'][:2], ('040000', 'tree'))
tree = self.repo.run(self.gc.tree, 'master:wat')
self.assertEqual(tree, None)
def testTreeRecursive(self):
tree = self.repo.run(self.gc.tree, 'master:some', recurse=True)
file1 = self.COMMIT_A['some/files/file1']['data']
file2 = self.COMMIT_D['some/files/file2']['data']
file3 = self.COMMIT_A['some/files/file3']['data']
other = self.COMMIT_A['some/other/file']['data']
self.assertEquals(
tree['files/file1'],
('100644', 'blob', git_test_utils.git_hash_data(file1)))
self.assertEquals(
tree['files/file2'],
('100755', 'blob', git_test_utils.git_hash_data(file2)))
self.assertEquals(
tree['files/file3'],
('100644', 'blob', git_test_utils.git_hash_data(file3)))
self.assertEquals(
tree['other/file'],
('100644', 'blob', git_test_utils.git_hash_data(other)))
class GitMutableFunctionsTest(git_test_utils.GitRepoReadWriteTestBase,
GitCommonTestBase):
REPO_SCHEMA = ''
def _intern_data(self, data):
with tempfile.TemporaryFile() as f:
f.write(data)
f.seek(0)
return self.repo.run(self.gc.intern_f, f)
def testInternF(self):
data = 'CoolBobcatsBro'
data_hash = self._intern_data(data)
self.assertEquals(git_test_utils.git_hash_data(data), data_hash)
self.assertEquals(data, self.repo.git('cat-file', 'blob', data_hash).stdout)
def testMkTree(self):
tree = {}
for i in 1, 2, 3:
name = 'file%d' % i
tree[name] = ('100644', 'blob', self._intern_data(name))
tree_hash = self.repo.run(self.gc.mktree, tree)
self.assertEquals('37b61866d6e061c4ba478e7eb525be7b5752737d', tree_hash)
def testConfig(self):
self.repo.git('config', '--add', 'happy.derpies', 'food')
self.assertEquals(self.repo.run(self.gc.config_list, 'happy.derpies'),
['food'])
self.assertEquals(self.repo.run(self.gc.config_list, 'sad.derpies'), [])
self.repo.git('config', '--add', 'happy.derpies', 'cat')
self.assertEquals(self.repo.run(self.gc.config_list, 'happy.derpies'),
['food', 'cat'])
self.assertEquals('cat', self.repo.run(self.gc.config, 'dude.bob', 'cat'))
self.repo.run(self.gc.set_config, 'dude.bob', 'dog')
self.assertEquals('dog', self.repo.run(self.gc.config, 'dude.bob', 'cat'))
self.repo.run(self.gc.del_config, 'dude.bob')
# This should work without raising an exception
self.repo.run(self.gc.del_config, 'dude.bob')
self.assertEquals('cat', self.repo.run(self.gc.config, 'dude.bob', 'cat'))
self.assertEquals('origin/master', self.repo.run(self.gc.root))
self.repo.git('config', 'depot-tools.upstream', 'catfood')
self.assertEquals('catfood', self.repo.run(self.gc.root))
def testUpstream(self):
self.repo.git('commit', '--allow-empty', '-am', 'foooooo')
self.assertEquals(self.repo.run(self.gc.upstream, 'bobly'), None)
self.assertEquals(self.repo.run(self.gc.upstream, 'master'), None)
self.repo.git('checkout', '-tb', 'happybranch', 'master')
self.assertEquals(self.repo.run(self.gc.upstream, 'happybranch'),
'master')
def testNormalizedVersion(self):
self.assertTrue(all(
isinstance(x, int) for x in self.repo.run(self.gc.get_git_version)))
@unittest.expectedFailure
def testGetBranchesInfo(self):
self.repo.git('commit', '--allow-empty', '-am', 'foooooo')
self.repo.git('checkout', '-tb', 'happybranch', 'master')
self.repo.git('commit', '--allow-empty', '-am', 'foooooo')
self.repo.git('checkout', '-tb', 'child', 'happybranch')
self.repo.git('checkout', '-tb', 'to_delete', 'master')
self.repo.git('checkout', '-tb', 'parent_gone', 'to_delete')
self.repo.git('branch', '-D', 'to_delete')
supports_track = (
self.repo.run(self.gc.get_git_version)
>= self.gc.MIN_UPSTREAM_TRACK_GIT_VERSION)
actual = self.repo.run(self.gc.get_branches_info, supports_track)
expected = {
'happybranch': (
self.repo.run(self.gc.hash_one, 'happybranch', short=True),
'master',
1 if supports_track else None,
None
),
'child': (
self.repo.run(self.gc.hash_one, 'child', short=True),
'happybranch',
None,
None
),
'master': (
self.repo.run(self.gc.hash_one, 'master', short=True),
'',
None,
None
),
'': None,
'parent_gone': (
self.repo.run(self.gc.hash_one, 'parent_gone', short=True),
'to_delete',
1 if supports_track else None,
None
),
'to_delete': None
}
self.assertEquals(expected, actual)
class GitMutableStructuredTest(git_test_utils.GitRepoReadWriteTestBase,
GitCommonTestBase):
REPO_SCHEMA = """
A B C D E F G
B H I J K
J L
X Y Z
CAT DOG
"""
COMMIT_B = {'file': {'data': 'B'}}
COMMIT_H = {'file': {'data': 'H'}}
COMMIT_I = {'file': {'data': 'I'}}
COMMIT_J = {'file': {'data': 'J'}}
COMMIT_K = {'file': {'data': 'K'}}
COMMIT_L = {'file': {'data': 'L'}}
def setUp(self):
super(GitMutableStructuredTest, self).setUp()
self.repo.git('branch', '--set-upstream-to', 'root_X', 'branch_Z')
self.repo.git('branch', '--set-upstream-to', 'branch_G', 'branch_K')
self.repo.git('branch', '--set-upstream-to', 'branch_K', 'branch_L')
self.repo.git('branch', '--set-upstream-to', 'root_A', 'branch_G')
self.repo.git('branch', '--set-upstream-to', 'root_X', 'root_A')
def testTooManyBranches(self):
for i in xrange(30):
self.repo.git('branch', 'a'*i)
_, rslt = self.repo.capture_stdio(list, self.gc.branches())
self.assertIn('too many branches (39/20)', rslt)
self.repo.git('config', 'depot-tools.branch-limit', 'cat')
_, rslt = self.repo.capture_stdio(list, self.gc.branches())
self.assertIn('too many branches (39/20)', rslt)
self.repo.git('config', 'depot-tools.branch-limit', '100')
# should not raise
self.assertEqual(38, len(self.repo.run(list, self.gc.branches())))
def testMergeBase(self):
self.repo.git('checkout', 'branch_K')
self.assertEqual(
self.repo['B'],
self.repo.run(self.gc.get_or_create_merge_base, 'branch_K', 'branch_G')
)
self.assertEqual(
self.repo['J'],
self.repo.run(self.gc.get_or_create_merge_base, 'branch_L', 'branch_K')
)
self.assertEqual(
self.repo['B'], self.repo.run(self.gc.config, 'branch.branch_K.base')
)
self.assertEqual(
'branch_G', self.repo.run(self.gc.config, 'branch.branch_K.base-upstream')
)
# deadbeef is a bad hash, so this will result in repo['B']
self.repo.run(self.gc.manual_merge_base, 'branch_K', 'deadbeef', 'branch_G')
self.assertEqual(
self.repo['B'],
self.repo.run(self.gc.get_or_create_merge_base, 'branch_K', 'branch_G')
)
# but if we pick a real ancestor, then it'll work
self.repo.run(self.gc.manual_merge_base, 'branch_K', self.repo['I'],
'branch_G')
self.assertEqual(
self.repo['I'],
self.repo.run(self.gc.get_or_create_merge_base, 'branch_K', 'branch_G')
)
self.assertEqual({'branch_K': self.repo['I'], 'branch_L': self.repo['J']},
self.repo.run(self.gc.branch_config_map, 'base'))
self.repo.run(self.gc.remove_merge_base, 'branch_K')
self.repo.run(self.gc.remove_merge_base, 'branch_L')
self.assertEqual(None,
self.repo.run(self.gc.config, 'branch.branch_K.base'))
self.assertEqual({}, self.repo.run(self.gc.branch_config_map, 'base'))
# if it's too old, then it caps at merge-base
self.repo.run(self.gc.manual_merge_base, 'branch_K', self.repo['A'],
'branch_G')
self.assertEqual(
self.repo['B'],
self.repo.run(self.gc.get_or_create_merge_base, 'branch_K', 'branch_G')
)
# If the user does --set-upstream-to something else, then we discard the
# base and recompute it.
self.repo.run(self.gc.run, 'branch', '-u', 'root_A')
self.assertEqual(
self.repo['A'],
self.repo.run(self.gc.get_or_create_merge_base, 'branch_K')
)
self.assertIsNone(
self.repo.run(self.gc.get_or_create_merge_base, 'branch_DOG'))
def testGetBranchTree(self):
skipped, tree = self.repo.run(self.gc.get_branch_tree)
self.assertEqual(skipped, {'master', 'root_X', 'branch_DOG', 'root_CAT'})
self.assertEqual(tree, {
'branch_G': 'root_A',
'root_A': 'root_X',
'branch_K': 'branch_G',
'branch_L': 'branch_K',
'branch_Z': 'root_X'
})
topdown = list(self.gc.topo_iter(tree))
bottomup = list(self.gc.topo_iter(tree, top_down=False))
self.assertEqual(topdown, [
('branch_Z', 'root_X'),
('root_A', 'root_X'),
('branch_G', 'root_A'),
('branch_K', 'branch_G'),
('branch_L', 'branch_K'),
])
self.assertEqual(bottomup, [
('branch_L', 'branch_K'),
('branch_Z', 'root_X'),
('branch_K', 'branch_G'),
('branch_G', 'root_A'),
('root_A', 'root_X'),
])
def testIsGitTreeDirty(self):
self.assertEquals(False, self.repo.run(self.gc.is_dirty_git_tree, 'foo'))
self.repo.open('test.file', 'w').write('test data')
self.repo.git('add', 'test.file')
self.assertEquals(True, self.repo.run(self.gc.is_dirty_git_tree, 'foo'))
def testSquashBranch(self):
self.repo.git('checkout', 'branch_K')
self.assertEquals(True, self.repo.run(self.gc.squash_current_branch,
'cool message'))
lines = ['cool message', '']
for l in 'HIJK':
lines.extend((self.repo[l], l, ''))
lines.pop()
msg = '\n'.join(lines)
self.assertEquals(self.repo.run(self.gc.run, 'log', '-n1', '--format=%B'),
msg)
self.assertEquals(
self.repo.git('cat-file', 'blob', 'branch_K:file').stdout,
'K'
)
def testSquashBranchEmpty(self):
self.repo.git('checkout', 'branch_K')
self.repo.git('checkout', 'branch_G', '.')
self.repo.git('commit', '-m', 'revert all changes no branch')
# Should return False since the quash would result in an empty commit
stdout = self.repo.capture_stdio(self.gc.squash_current_branch)[0]
self.assertEquals(stdout, 'Nothing to commit; squashed branch is empty\n')
def testRebase(self):
self.assertSchema("""
A B C D E F G
B H I J K
J L
X Y Z
CAT DOG
""")
rslt = self.repo.run(
self.gc.rebase, 'branch_G', 'branch_K~4', 'branch_K')
self.assertTrue(rslt.success)
self.assertSchema("""
A B C D E F G H I J K
B H I J L
X Y Z
CAT DOG
""")
rslt = self.repo.run(
self.gc.rebase, 'branch_K', 'branch_L~1', 'branch_L', abort=True)
self.assertFalse(rslt.success)
self.assertFalse(self.repo.run(self.gc.in_rebase))
rslt = self.repo.run(
self.gc.rebase, 'branch_K', 'branch_L~1', 'branch_L', abort=False)
self.assertFalse(rslt.success)
self.assertTrue(self.repo.run(self.gc.in_rebase))
self.assertEqual(self.repo.git('status', '--porcelain').stdout, 'UU file\n')
self.repo.git('checkout', '--theirs', 'file')
self.repo.git('add', 'file')
self.repo.git('rebase', '--continue')
self.assertSchema("""
A B C D E F G H I J K L
X Y Z
CAT DOG
""")
class GitFreezeThaw(git_test_utils.GitRepoReadWriteTestBase):
@classmethod
def setUpClass(cls):
super(GitFreezeThaw, cls).setUpClass()
import git_common
cls.gc = git_common
cls.gc.TEST_MODE = True
REPO_SCHEMA = """
A B C D
B E D
"""
COMMIT_A = {
'some/files/file1': {'data': 'file1'},
'some/files/file2': {'data': 'file2'},
'some/files/file3': {'data': 'file3'},
'some/other/file': {'data': 'otherfile'},
}
COMMIT_C = {
'some/files/file2': {
'mode': 0755,
'data': 'file2 - vanilla'},
}
COMMIT_E = {
'some/files/file2': {'data': 'file2 - merged'},
}
COMMIT_D = {
'some/files/file2': {'data': 'file2 - vanilla\nfile2 - merged'},
}
def testNothing(self):
self.assertIsNotNone(self.repo.run(self.gc.thaw)) # 'Nothing to thaw'
self.assertIsNotNone(self.repo.run(self.gc.freeze)) # 'Nothing to freeze'
def testAll(self):
def inner():
with open('some/files/file2', 'a') as f2:
print >> f2, 'cool appended line'
os.mkdir('some/other_files')
with open('some/other_files/subdir_file', 'w') as f3:
print >> f3, 'new file!'
with open('some/files/file5', 'w') as f5:
print >> f5, 'New file!1!one!'
STATUS_1 = '\n'.join((
' M some/files/file2',
'A some/files/file5',
'?? some/other_files/'
)) + '\n'
self.repo.git('add', 'some/files/file5')
# Freeze group 1
self.assertEquals(self.repo.git('status', '--porcelain').stdout, STATUS_1)
self.assertIsNone(self.gc.freeze())
self.assertEquals(self.repo.git('status', '--porcelain').stdout, '')
# Freeze group 2
with open('some/files/file2', 'a') as f2:
print >> f2, 'new! appended line!'
self.assertEquals(self.repo.git('status', '--porcelain').stdout,
' M some/files/file2\n')
self.assertIsNone(self.gc.freeze())
self.assertEquals(self.repo.git('status', '--porcelain').stdout, '')
# Thaw it out!
self.assertIsNone(self.gc.thaw())
self.assertIsNotNone(self.gc.thaw()) # One thaw should thaw everything
self.assertEquals(self.repo.git('status', '--porcelain').stdout, STATUS_1)
self.repo.run(inner)
if __name__ == '__main__':
sys.exit(coverage_utils.covered_main(
os.path.join(DEPOT_TOOLS_ROOT, 'git_common.py')
))
|
|
import traceback
import sys
import ctypes
import comtypes
from comtypes.hresult import *
import comtypes.automation
import comtypes.typeinfo
import comtypes.connectionpoints
from comtypes.client import wrap
from comtypes.client.dynamic import Dispatch
from comtypes.gen import MSHTML
import logging
logger = logging.getLogger(__name__)
class _AdviseConnection(object):
def __init__(self, source, interface, receiver):
cpc = source.QueryInterface(comtypes.connectionpoints.IConnectionPointContainer)
self.cp = cpc.FindConnectionPoint(ctypes.byref(interface._iid_))
logger.debug("Start advise %s", interface)
self.cookie = self.cp.Advise(receiver)
self.receiver = receiver
def disconnect(self):
if self.cookie:
self.cp.Unadvise(self.cookie)
logger.debug("Unadvised %s", self.cp)
self.cp = None
self.cookie = None
del self.receiver
def __del__(self):
try:
if self.cookie is not None:
self.cp.Unadvise(self.cookie)
except (comtypes.COMError, WindowsError):
# Are we sure we want to ignore errors here?
pass
def FindOutgoingInterface(source):
"""XXX Describe the strategy that is used..."""
# If the COM object implements IProvideClassInfo2, it is easy to
# find the default autgoing interface.
try:
pci = source.QueryInterface(comtypes.typeinfo.IProvideClassInfo2)
guid = pci.GetGUID(1)
except comtypes.COMError:
pass
else:
# another try: block needed?
try:
interface = comtypes.com_interface_registry[str(guid)]
except KeyError:
tinfo = pci.GetClassInfo()
tlib, index = tinfo.GetContainingTypeLib()
from comtypes.client import GetModule
GetModule(tlib)
interface = comtypes.com_interface_registry[str(guid)]
logger.debug("%s using sinkinterface %s", source, interface)
return interface
# If we can find the CLSID of the COM object, we can look for a
# registered outgoing interface (__clsid has been set by
# comtypes.client):
clsid = source.__dict__.get('__clsid')
try:
interface = comtypes.com_coclass_registry[clsid]._outgoing_interfaces_[0]
except KeyError:
pass
else:
logger.debug("%s using sinkinterface from clsid %s", source, interface)
return interface
## interface = find_single_connection_interface(source)
## if interface:
## return interface
raise TypeError("cannot determine source interface")
def find_single_connection_interface(source):
# Enumerate the connection interfaces. If we find a single one,
# return it, if there are more, we give up since we cannot
# determine which one to use.
cpc = source.QueryInterface(comtypes.connectionpoints.IConnectionPointContainer)
enum = cpc.EnumConnectionPoints()
iid = enum.next().GetConnectionInterface()
try:
enum.next()
except StopIteration:
try:
interface = comtypes.com_interface_registry[str(iid)]
except KeyError:
return None
else:
logger.debug("%s using sinkinterface from iid %s", source, interface)
return interface
else:
logger.debug("%s has nore than one connection point", source)
return None
from comtypes._comobject import _MethodFinder
class _SinkMethodFinder(_MethodFinder):
def __init__(self, inst, sink):
super(_SinkMethodFinder, self).__init__(inst)
self.sink = sink
def find_method(self, fq_name, mthname):
try:
return super(_SinkMethodFinder, self).find_method(fq_name, mthname)
except AttributeError:
try:
return getattr(self.sink, fq_name)
except AttributeError:
return getattr(self.sink, mthname)
def CreateEventReceiver(interface, sink):
class Sink(comtypes.COMObject):
_com_interfaces_ = [interface]
def _get_method_finder_(self, itf):
# Use a special MethodFinder that will first try 'self',
# then the sink.
return _SinkMethodFinder(self, sink)
return Sink()
def GetEvents(source, sink, interface=None):
"""Receive COM events from 'source'. Events will call methods on
the 'sink' object. 'interface' is the source interface to use.
"""
# When called from CreateObject, the sourceinterface has already
# been determined by the coclass. Otherwise, the only thing that
# makes sense is to use IProvideClassInfo2 to get the default
# source interface.
if interface is None:
interface = FindOutgoingInterface(source)
rcv = CreateEventReceiver(interface, sink)
return _AdviseConnection(source, interface, rcv)
class EventDumper(object):
"""Universal sink for COM events."""
def __getattr__(self, name):
"Create event handler methods on demand"
if name.startswith("__") and name.endswith("__"):
raise AttributeError(name)
#print "# event found:", name
def handler(self, this, *args, **kw):
# XXX handler is called with 'this'. Should we really print "None" instead?
args = (None,) + args
#print "Event %s(%s)" % (name, ", ".join([repr(a) for a in args]))
return comtypes.instancemethod(handler, EventDumper, self)
def ShowEvents(source, interface=None):
"""Receive COM events from 'source'. A special event sink will be
used that first prints the names of events that are found in the
outgoing interface, and will also print out the events when they
are fired.
"""
return GetEvents(source, sink=EventDumper(), interface=interface)
def PumpEvents(timeout):
"""This following code waits for 'timeout' seconds in the way
required for COM, internally doing the correct things depending
on the COM appartment of the current thread. It is possible to
terminate the message loop by pressing CTRL+C, which will raise
a KeyboardInterrupt.
"""
# XXX Should there be a way to pass additional event handles which
# can terminate this function?
# XXX XXX XXX
#
# It may be that I misunderstood the CoWaitForMultipleHandles
# function. Is a message loop required in a STA? Seems so...
#
# MSDN says:
#
# If the caller resides in a single-thread apartment,
# CoWaitForMultipleHandles enters the COM modal loop, and the
# thread's message loop will continue to dispatch messages using
# the thread's message filter. If no message filter is registered
# for the thread, the default COM message processing is used.
#
# If the calling thread resides in a multithread apartment (MTA),
# CoWaitForMultipleHandles calls the Win32 function
# MsgWaitForMultipleObjects.
hevt = ctypes.windll.kernel32.CreateEventA(None, True, False, None)
handles = (ctypes.c_void_p * 1)(hevt)
RPC_S_CALLPENDING = -2147417835
@ctypes.WINFUNCTYPE(ctypes.c_int, ctypes.c_uint)
def HandlerRoutine(dwCtrlType):
if dwCtrlType == 0: # CTRL+C
ctypes.windll.kernel32.SetEvent(hevt)
return 1
return 0
ctypes.windll.kernel32.SetConsoleCtrlHandler(HandlerRoutine, 1)
try:
try:
res = ctypes.oledll.ole32.CoWaitForMultipleHandles(0,
int(timeout * 1000),
len(handles), handles,
ctypes.byref(ctypes.c_ulong()))
except WindowsError, details:
if details.args[0] != RPC_S_CALLPENDING: # timeout expired
raise
else:
raise KeyboardInterrupt
finally:
ctypes.windll.kernel32.CloseHandle(hevt)
ctypes.windll.kernel32.SetConsoleCtrlHandler(HandlerRoutine, 0)
class _DispEventReceiver(comtypes.COMObject):
_com_interfaces_ = [comtypes.automation.IDispatch]
# Hrm. If the receiving interface is implemented as a dual interface,
# the methods implementations expect 'out, retval' parameters in their
# argument list.
#
# What would happen if we call ITypeInfo::Invoke() ?
# If we call the methods directly, shouldn't we pass pVarResult
# as last parameter?
def IDispatch_Invoke(self, this, memid, riid, lcid, wFlags, pDispParams,
pVarResult, pExcepInfo, puArgErr):
#print "IDispatch_Invoke", memid, this, riid, lcid, pDispParams
mth = self.dispmap.get(memid, None)
if mth is None:
return S_OK
dp = pDispParams[0]
#print "num args", dp.cArgs
# DISPPARAMS contains the arguments in reverse order
args = [dp.rgvarg[i].value for i in range(dp.cArgs)]
#print "Event", self, memid, mth, args
event = None
if len(args) > 0:
event = wrap(args[0])
try:
result = mth(self.sender, event, None)
except:
sys.stderr.write( traceback.print_exc() )
sys.stderr.flush()
if pVarResult:
pVarResult[0].value = result
return S_OK
def GetTypeInfoCount(self, this, presult):
if not presult:
return E_POINTER
presult[0] = 0
return S_OK
def GetTypeInfo(self, this, itinfo, lcid, pptinfo):
return E_NOTIMPL
def GetIDsOfNames(self, this, riid, rgszNames, cNames, lcid, rgDispId):
return E_NOTIMPL
# XXX move into comtypes
def _getmemid(idlflags):
# get the dispid from the idlflags sequence
return [memid for memid in idlflags if isinstance(memid, int)][0]
# XXX move into comtypes?
def _get_dispmap(interface):
# return a dictionary mapping dispid numbers to method names
assert issubclass(interface, comtypes.automation.IDispatch)
dispmap = {}
if "dual" in interface._idlflags_:
# It would be nice if that would work:
## for info in interface._methods_:
## mth = getattr(interface, info.name)
## memid = mth.im_func.memid
# See also MSDN docs for the 'defaultvtable' idl flag, or
# IMPLTYPEFLAG_DEFAULTVTABLE. This is not a flag of the
# interface, but of the coclass!
#
# Use the _methods_ list
assert not hasattr(interface, "_disp_methods_")
for restype, name, argtypes, paramflags, idlflags, helpstring in interface._methods_:
memid = _getmemid(idlflags)
dispmap[memid] = name
else:
# Use _disp_methods_
# tag, name, idlflags, restype(?), argtypes(?)
for tag, name, idlflags, restype, argtypes in interface._disp_methods_:
memid = _getmemid(idlflags)
dispmap[memid] = name
return dispmap
def GetDispEventReceiver(interface, sink, sink_name=None):
methods = {} # maps memid to function
interfaces = interface.mro()[:-3] # skip IDispatch, IUnknown, object
interface_names = [itf.__name__ for itf in interfaces]
for itf in interfaces:
for memid, name in _get_dispmap(itf).iteritems():
if name == sink_name:
#print "GetDispEventReceiver", memid, name
methods[0] = sink
continue
# find methods to call, if not found ignore event
for itf_name in interface_names:
mth = getattr(sink, "%s_%s" % (itf_name, name), None)
if mth is not None:
break
else:
mth = getattr(sink, name, lambda *args: S_OK)
methods[memid] = mth
# XX Move this stuff into _DispEventReceiver.__init__() ?
rcv = _DispEventReceiver()
rcv.dispmap = methods
rcv._com_pointers_[interface._iid_] = rcv._com_pointers_[comtypes.automation.IDispatch._iid_]
return rcv
def GetCustomEventReceiver(interface, sink):
class EventReceiver(comtypes.COMObject):
_com_interfaces_ = [interface]
for itf in interface.mro()[:-2]: # skip object and IUnknown
for info in itf._methods_:
restype, name, argtypes, paramflags, idlflags, docstring = info
mth = getattr(sink, name, lambda self, this, *args: S_OK)
setattr(EventReceiver, name, mth)
rcv = EventReceiver()
return rcv
|
|
"""
:synopsis: Unit Tests for Windows IIS Module 'module.win_iis'
:platform: Windows
:maturity: develop
versionadded:: 2016.11.0
"""
import salt.modules.win_iis as win_iis
import salt.utils.json
from salt.exceptions import SaltInvocationError
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, call, patch
from tests.support.unit import TestCase
APP_LIST = {
"testApp": {
"apppool": "MyTestPool",
"path": "/testApp",
"preload": False,
"protocols": ["http"],
"sourcepath": r"C:\inetpub\apps\testApp",
}
}
APPPOOL_LIST = {"MyTestPool": {"applications": ["MyTestSite"], "state": "Started"}}
BINDING_LIST = {
"*:80:": {
"certificatehash": None,
"certificatestorename": None,
"hostheader": None,
"ipaddress": "*",
"port": 80,
"protocol": "http",
"sslflags": 0,
},
"*:443:mytestsite.local": {
"certificatehash": "9988776655443322111000AAABBBCCCDDDEEEFFF",
"certificatestorename": "My",
"hostheader": "mytestsite.local",
"ipaddress": "*",
"port": 443,
"protocol": "https",
"sslflags": 0,
},
}
SITE_LIST = {
"MyTestSite": {
"apppool": "MyTestPool",
"bindings": BINDING_LIST,
"id": 1,
"sourcepath": r"C:\inetpub\wwwroot",
"state": "Started",
}
}
VDIR_LIST = {"TestVdir": {"sourcepath": r"C:\inetpub\vdirs\TestVdir"}}
NESTED_VDIR_LIST = {
"Test/Nested/Vdir": {"sourcepath": r"C:\inetpub\vdirs\NestedTestVdir"}
}
LIST_APPS_SRVMGR = {
"retcode": 0,
"stdout": salt.utils.json.dumps(
[
{
"applicationPool": "MyTestPool",
"name": "testApp",
"path": "/testApp",
"PhysicalPath": r"C:\inetpub\apps\testApp",
"preloadEnabled": False,
"protocols": "http",
}
]
),
}
LIST_APPPOOLS_SRVMGR = {
"retcode": 0,
"stdout": salt.utils.json.dumps(
[
{
"name": "MyTestPool",
"state": "Started",
"Applications": {"value": ["MyTestSite"], "Count": 1},
}
]
),
}
LIST_VDIRS_SRVMGR = {
"retcode": 0,
"stdout": salt.utils.json.dumps(
[{"name": "TestVdir", "physicalPath": r"C:\inetpub\vdirs\TestVdir"}]
),
}
LIST_MORE_VDIRS_SRVMGR = {
"retcode": 0,
"stdout": salt.utils.json.dumps(
[
{"name": "TestVdir", "physicalPath": r"C:\inetpub\vdirs\TestVdir"},
{
"name": "Test/Nested/Vdir",
"physicalPath": r"C:\inetpub\vdirs\NestedTestVdir",
},
]
),
}
CONTAINER_SETTING = {
"retcode": 0,
"stdout": salt.utils.json.dumps([{"managedPipelineMode": "Integrated"}]),
}
CERT_BINDING_INFO = "*:443:mytestsite.local"
class WinIisTestCase(TestCase, LoaderModuleMockMixin):
"""
Test cases for salt.modules.win_iis
"""
def setup_loader_modules(self):
return {win_iis: {}}
def test_create_apppool(self):
"""
Test - Create an IIS application pool.
"""
with patch(
"salt.modules.win_iis._srvmgr", MagicMock(return_value={"retcode": 0})
), patch(
"salt.modules.win_iis.list_apppools", MagicMock(return_value=dict())
), patch.dict(
win_iis.__salt__
):
self.assertTrue(win_iis.create_apppool("MyTestPool"))
def test_list_apppools(self):
"""
Test - List all configured IIS application pools.
"""
with patch.dict(win_iis.__salt__), patch(
"salt.modules.win_iis._srvmgr", MagicMock(return_value=LIST_APPPOOLS_SRVMGR)
):
self.assertEqual(win_iis.list_apppools(), APPPOOL_LIST)
def test_remove_apppool(self):
"""
Test - Remove an IIS application pool.
"""
with patch.dict(win_iis.__salt__), patch(
"salt.modules.win_iis._srvmgr", MagicMock(return_value={"retcode": 0})
), patch(
"salt.modules.win_iis.list_apppools",
MagicMock(
return_value={
"MyTestPool": {"applications": list(), "state": "Started"}
}
),
):
self.assertTrue(win_iis.remove_apppool("MyTestPool"))
def test_restart_apppool(self):
"""
Test - Restart an IIS application pool.
"""
with patch.dict(win_iis.__salt__), patch(
"salt.modules.win_iis._srvmgr", MagicMock(return_value={"retcode": 0})
):
self.assertTrue(win_iis.restart_apppool("MyTestPool"))
def test_create_site(self):
"""
Test - Create a basic website in IIS.
"""
kwargs = {
"name": "MyTestSite",
"sourcepath": r"C:\inetpub\wwwroot",
"apppool": "MyTestPool",
"hostheader": "mytestsite.local",
"ipaddress": "*",
"port": 80,
"protocol": "http",
}
with patch.dict(win_iis.__salt__), patch(
"salt.modules.win_iis._srvmgr", MagicMock(return_value={"retcode": 0})
), patch(
"salt.modules.win_iis.list_sites", MagicMock(return_value=dict())
), patch(
"salt.modules.win_iis.list_apppools", MagicMock(return_value=dict())
):
self.assertTrue(win_iis.create_site(**kwargs))
def test_create_site_failed(self):
"""
Test - Create a basic website in IIS using invalid data.
"""
kwargs = {
"name": "MyTestSite",
"sourcepath": r"C:\inetpub\wwwroot",
"apppool": "MyTestPool",
"hostheader": "mytestsite.local",
"ipaddress": "*",
"port": 80,
"protocol": "invalid-protocol-name",
}
with patch.dict(win_iis.__salt__), patch(
"salt.modules.win_iis._srvmgr", MagicMock(return_value={"retcode": 0})
), patch(
"salt.modules.win_iis.list_sites", MagicMock(return_value=dict())
), patch(
"salt.modules.win_iis.list_apppools", MagicMock(return_value=dict())
):
self.assertRaises(SaltInvocationError, win_iis.create_site, **kwargs)
def test_remove_site(self):
"""
Test - Delete a website from IIS.
"""
with patch.dict(win_iis.__salt__), patch(
"salt.modules.win_iis._srvmgr", MagicMock(return_value={"retcode": 0})
), patch("salt.modules.win_iis.list_sites", MagicMock(return_value=SITE_LIST)):
self.assertTrue(win_iis.remove_site("MyTestSite"))
def test_create_app(self):
"""
Test - Create an IIS application.
"""
kwargs = {
"name": "testApp",
"site": "MyTestSite",
"sourcepath": r"C:\inetpub\apps\testApp",
"apppool": "MyTestPool",
}
with patch.dict(win_iis.__salt__), patch(
"os.path.isdir", MagicMock(return_value=True)
), patch(
"salt.modules.win_iis._srvmgr", MagicMock(return_value={"retcode": 0})
), patch(
"salt.modules.win_iis.list_apps", MagicMock(return_value=APP_LIST)
):
self.assertTrue(win_iis.create_app(**kwargs))
def test_list_apps(self):
"""
Test - Get all configured IIS applications for the specified site.
"""
with patch.dict(win_iis.__salt__), patch(
"salt.modules.win_iis._srvmgr", MagicMock(return_value=LIST_APPS_SRVMGR)
):
self.assertEqual(win_iis.list_apps("MyTestSite"), APP_LIST)
def test_remove_app(self):
"""
Test - Remove an IIS application.
"""
kwargs = {"name": "otherApp", "site": "MyTestSite"}
with patch.dict(win_iis.__salt__), patch(
"salt.modules.win_iis._srvmgr", MagicMock(return_value={"retcode": 0})
), patch("salt.modules.win_iis.list_apps", MagicMock(return_value=APP_LIST)):
self.assertTrue(win_iis.remove_app(**kwargs))
def test_create_binding(self):
"""
Test - Create an IIS binding.
"""
kwargs = {
"site": "MyTestSite",
"hostheader": "",
"ipaddress": "*",
"port": 80,
"protocol": "http",
"sslflags": 0,
}
with patch.dict(win_iis.__salt__), patch(
"salt.modules.win_iis._srvmgr", MagicMock(return_value={"retcode": 0})
), patch(
"salt.modules.win_iis.list_bindings", MagicMock(return_value=BINDING_LIST)
):
self.assertTrue(win_iis.create_binding(**kwargs))
def test_create_binding_failed(self):
"""
Test - Create an IIS binding using invalid data.
"""
kwargs = {
"site": "MyTestSite",
"hostheader": "",
"ipaddress": "*",
"port": 80,
"protocol": "invalid-protocol-name",
"sslflags": 999,
}
with patch.dict(win_iis.__salt__), patch(
"salt.modules.win_iis._srvmgr", MagicMock(return_value={"retcode": 0})
), patch(
"salt.modules.win_iis.list_bindings", MagicMock(return_value=BINDING_LIST)
):
self.assertRaises(SaltInvocationError, win_iis.create_binding, **kwargs)
def test_list_bindings(self):
"""
Test - Get all configured IIS bindings for the specified site.
"""
with patch.dict(win_iis.__salt__), patch(
"salt.modules.win_iis.list_sites", MagicMock(return_value=SITE_LIST)
):
self.assertEqual(win_iis.list_bindings("MyTestSite"), BINDING_LIST)
def test_remove_binding(self):
"""
Test - Remove an IIS binding.
"""
kwargs = {
"site": "MyTestSite",
"hostheader": "myothertestsite.local",
"ipaddress": "*",
"port": 443,
}
with patch.dict(win_iis.__salt__), patch(
"salt.modules.win_iis._srvmgr", MagicMock(return_value={"retcode": 0})
), patch(
"salt.modules.win_iis.list_bindings", MagicMock(return_value=BINDING_LIST)
):
self.assertTrue(win_iis.remove_binding(**kwargs))
def test_create_vdir(self):
"""
Test - Create an IIS virtual directory.
"""
kwargs = {
"name": "TestVdir",
"site": "MyTestSite",
"sourcepath": r"C:\inetpub\vdirs\TestVdir",
}
with patch.dict(win_iis.__salt__), patch(
"os.path.isdir", MagicMock(return_value=True)
), patch(
"salt.modules.win_iis._srvmgr", MagicMock(return_value={"retcode": 0})
), patch(
"salt.modules.win_iis.list_vdirs", MagicMock(return_value=VDIR_LIST)
):
self.assertTrue(win_iis.create_vdir(**kwargs))
def test_list_vdirs(self):
"""
Test - Get configured IIS virtual directories.
"""
vdirs = {"TestVdir": {"sourcepath": r"C:\inetpub\vdirs\TestVdir"}}
with patch.dict(win_iis.__salt__), patch(
"salt.modules.win_iis._srvmgr", MagicMock(return_value=LIST_VDIRS_SRVMGR)
):
self.assertEqual(win_iis.list_vdirs("MyTestSite"), vdirs)
def test_remove_vdir(self):
"""
Test - Remove an IIS virtual directory.
"""
kwargs = {"name": "TestOtherVdir", "site": "MyTestSite"}
with patch.dict(win_iis.__salt__), patch(
"salt.modules.win_iis._srvmgr", MagicMock(return_value={"retcode": 0})
), patch("salt.modules.win_iis.list_vdirs", MagicMock(return_value=VDIR_LIST)):
self.assertTrue(win_iis.remove_vdir(**kwargs))
def test_create_nested_vdir(self):
"""
Test - Create a nested IIS virtual directory.
"""
kwargs = {
"name": "Test/Nested/Vdir",
"site": "MyTestSite",
"sourcepath": r"C:\inetpub\vdirs\NestedTestVdir",
}
with patch.dict(win_iis.__salt__), patch(
"os.path.isdir", MagicMock(return_value=True)
), patch(
"salt.modules.win_iis._srvmgr", MagicMock(return_value={"retcode": 0})
), patch(
"salt.modules.win_iis.list_vdirs", MagicMock(return_value=NESTED_VDIR_LIST)
):
self.assertTrue(win_iis.create_vdir(**kwargs))
def test_list_nested_vdirs(self):
"""
Test - Get configured IIS virtual directories.
"""
vdirs = {
"TestVdir": {"sourcepath": r"C:\inetpub\vdirs\TestVdir"},
"Test/Nested/Vdir": {"sourcepath": r"C:\inetpub\vdirs\NestedTestVdir"},
}
with patch.dict(win_iis.__salt__), patch(
"salt.modules.win_iis._srvmgr",
MagicMock(return_value=LIST_MORE_VDIRS_SRVMGR),
):
self.assertEqual(win_iis.list_vdirs("MyTestSite"), vdirs)
def test_create_cert_binding(self):
"""
Test - Assign a certificate to an IIS binding.
"""
kwargs = {
"name": "9988776655443322111000AAABBBCCCDDDEEEFFF",
"site": "MyTestSite",
"hostheader": "mytestsite.local",
"ipaddress": "*",
"port": 443,
}
with patch.dict(win_iis.__salt__), patch(
"salt.modules.win_iis._list_certs",
MagicMock(return_value={"9988776655443322111000AAABBBCCCDDDEEEFFF": None}),
), patch(
"salt.modules.win_iis._srvmgr",
MagicMock(return_value={"retcode": 0, "stdout": 10}),
), patch(
"salt.utils.json.loads",
MagicMock(return_value=[{"MajorVersion": 10, "MinorVersion": 0}]),
), patch(
"salt.modules.win_iis.list_bindings", MagicMock(return_value=BINDING_LIST)
), patch(
"salt.modules.win_iis.list_cert_bindings",
MagicMock(
return_value={CERT_BINDING_INFO: BINDING_LIST[CERT_BINDING_INFO]}
),
):
self.assertTrue(win_iis.create_cert_binding(**kwargs))
def test_list_cert_bindings(self):
"""
Test - List certificate bindings for an IIS site.
"""
key = "*:443:mytestsite.local"
with patch.dict(win_iis.__salt__), patch(
"salt.modules.win_iis.list_sites", MagicMock(return_value=SITE_LIST)
):
self.assertEqual(
win_iis.list_cert_bindings("MyTestSite"), {key: BINDING_LIST[key]}
)
def test_remove_cert_binding(self):
"""
Test - Remove a certificate from an IIS binding.
"""
kwargs = {
"name": "FFFEEEDDDCCCBBBAAA0001112233445566778899",
"site": "MyOtherTestSite",
"hostheader": "myothertestsite.local",
"ipaddress": "*",
"port": 443,
}
with patch.dict(win_iis.__salt__), patch(
"salt.modules.win_iis._srvmgr", MagicMock(return_value={"retcode": 0})
), patch(
"salt.modules.win_iis.list_cert_bindings",
MagicMock(
return_value={CERT_BINDING_INFO: BINDING_LIST[CERT_BINDING_INFO]}
),
):
self.assertTrue(win_iis.remove_cert_binding(**kwargs))
def test_get_container_setting(self):
"""
Test - Get the value of the setting for the IIS container.
"""
kwargs = {
"name": "MyTestSite",
"container": "AppPools",
"settings": ["managedPipelineMode"],
}
with patch.dict(win_iis.__salt__), patch(
"salt.modules.win_iis._srvmgr", MagicMock(return_value=CONTAINER_SETTING)
):
self.assertEqual(
win_iis.get_container_setting(**kwargs),
{"managedPipelineMode": "Integrated"},
)
def test_set_container_setting(self):
"""
Test - Set the value of the setting for an IIS container.
"""
kwargs = {
"name": "MyTestSite",
"container": "AppPools",
"settings": {"managedPipelineMode": "Integrated"},
}
with patch.dict(win_iis.__salt__), patch(
"salt.modules.win_iis._srvmgr", MagicMock(return_value={"retcode": 0})
), patch(
"salt.modules.win_iis.get_container_setting",
MagicMock(return_value={"managedPipelineMode": "Integrated"}),
):
self.assertTrue(win_iis.set_container_setting(**kwargs))
def test__collection_match_to_index(self):
bad_match = {"key_0": "value"}
first_match = {"key_1": "value"}
second_match = {"key_2": "value"}
collection = [first_match, second_match]
settings = [{"name": "enabled", "value": collection}]
with patch.dict(win_iis.__salt__), patch(
"salt.modules.win_iis.get_webconfiguration_settings",
MagicMock(return_value=settings),
):
ret = win_iis._collection_match_to_index(
"pspath", "colfilter", "name", bad_match
)
self.assertEqual(ret, -1)
ret = win_iis._collection_match_to_index(
"pspath", "colfilter", "name", first_match
)
self.assertEqual(ret, 0)
ret = win_iis._collection_match_to_index(
"pspath", "colfilter", "name", second_match
)
self.assertEqual(ret, 1)
def test__prepare_settings(self):
simple_setting = {"name": "value", "filter": "value"}
collection_setting = {"name": "Collection[{yaml:\n\tdata}]", "filter": "value"}
with patch.dict(win_iis.__salt__), patch(
"salt.modules.win_iis._collection_match_to_index", MagicMock(return_value=0)
):
ret = win_iis._prepare_settings(
"pspath",
[
simple_setting,
collection_setting,
{"invalid": "setting"},
{"name": "filter-less_setting"},
],
)
self.assertEqual(ret, [simple_setting, collection_setting])
@patch("salt.modules.win_iis.log")
def test_get_webconfiguration_settings_empty(self, mock_log):
ret = win_iis.get_webconfiguration_settings("name", settings=[])
mock_log.warning.assert_called_once_with("No settings provided")
self.assertEqual(ret, {})
def test_get_webconfiguration_settings(self):
# Setup
name = "IIS"
collection_setting = {"name": "Collection[{yaml:\n\tdata}]", "filter": "value"}
filter_setting = {
"name": "enabled",
"filter": (
"system.webServer / security / authentication / anonymousAuthentication"
),
}
settings = [collection_setting, filter_setting]
ps_cmd = [
"$Settings = New-Object System.Collections.ArrayList;",
]
for setting in settings:
ps_cmd.extend(
[
"$Property = Get-WebConfigurationProperty -PSPath '{}'".format(
name
),
"-Name '{name}' -Filter '{filter}' -ErrorAction Stop;".format(
filter=setting["filter"], name=setting["name"]
),
"if (([String]::IsNullOrEmpty($Property) -eq $False) -and",
"($Property.GetType()).Name -eq 'ConfigurationAttribute') {",
"$Property = $Property | Select-Object",
"-ExpandProperty Value };",
"$Settings.add(@{{filter='{filter}';name='{name}';value=[String]"
" $Property}})| Out-Null;".format(
filter=setting["filter"], name=setting["name"]
),
"$Property = $Null;",
]
)
ps_cmd.append("$Settings")
# Execute
with patch.dict(win_iis.__salt__), patch(
"salt.modules.win_iis._prepare_settings", MagicMock(return_value=settings)
), patch(
"salt.modules.win_iis._srvmgr",
MagicMock(return_value={"retcode": 0, "stdout": "{}"}),
):
ret = win_iis.get_webconfiguration_settings(name, settings=settings)
# Verify
win_iis._srvmgr.assert_called_with(cmd=ps_cmd, return_json=True)
self.assertEqual(ret, {})
@patch("salt.modules.win_iis.log")
def test_set_webconfiguration_settings_empty(self, mock_log):
ret = win_iis.set_webconfiguration_settings("name", settings=[])
mock_log.warning.assert_called_once_with("No settings provided")
self.assertEqual(ret, False)
@patch("salt.modules.win_iis.log")
def test_set_webconfiguration_settings_no_changes(self, mock_log):
# Setup
name = "IIS"
setting = {
"name": "Collection[{yaml:\n\tdata}]",
"filter": (
"system.webServer / security / authentication / anonymousAuthentication"
),
"value": [],
}
settings = [setting]
# Execute
with patch.dict(win_iis.__salt__), patch(
"salt.modules.win_iis._prepare_settings", MagicMock(return_value=settings)
), patch(
"salt.modules.win_iis._srvmgr",
MagicMock(return_value={"retcode": 0, "stdout": "{}"}),
), patch(
"salt.modules.win_iis.get_webconfiguration_settings",
MagicMock(return_value=settings),
):
ret = win_iis.set_webconfiguration_settings(name, settings=settings)
# Verify
mock_log.debug.assert_called_with(
"Settings already contain the provided values."
)
self.assertEqual(ret, True)
@patch("salt.modules.win_iis.log")
def test_set_webconfiguration_settings_failed(self, mock_log):
# Setup
name = "IIS"
setting = {
"name": "Collection[{yaml:\n\tdata}]",
"filter": (
"system.webServer / security / authentication / anonymousAuthentication"
),
"value": [],
}
settings = [setting]
# Execute
with patch.dict(win_iis.__salt__), patch(
"salt.modules.win_iis._prepare_settings", MagicMock(return_value=settings)
), patch(
"salt.modules.win_iis._srvmgr",
MagicMock(return_value={"retcode": 0, "stdout": "{}"}),
), patch(
"salt.modules.win_iis.get_webconfiguration_settings",
MagicMock(side_effect=[[], [{"value": "unexpected_change!"}]]),
):
ret = win_iis.set_webconfiguration_settings(name, settings=settings)
# Verify
self.assertEqual(ret, False)
mock_log.error.assert_called_with("Failed to change settings: %s", settings)
@patch("salt.modules.win_iis.log")
def test_set_webconfiguration_settings(self, mock_log):
# Setup
name = "IIS"
setting = {
"name": "Collection[{yaml:\n\tdata}]",
"filter": (
"system.webServer / security / authentication / anonymousAuthentication"
),
"value": [],
}
settings = [setting]
# Execute
with patch.dict(win_iis.__salt__), patch(
"salt.modules.win_iis._prepare_settings", MagicMock(return_value=settings)
), patch(
"salt.modules.win_iis._srvmgr",
MagicMock(return_value={"retcode": 0, "stdout": "{}"}),
), patch(
"salt.modules.win_iis.get_webconfiguration_settings",
MagicMock(side_effect=[[], settings]),
):
ret = win_iis.set_webconfiguration_settings(name, settings=settings)
# Verify
self.assertEqual(ret, True)
mock_log.debug.assert_called_with(
"Settings configured successfully: %s", settings
)
def test_get_webconfiguration_settings_no_settings(self):
self.assertEqual(win_iis.get_webconfiguration_settings("salt", {}), {})
def test_get_webconfiguration_settings_pass(self):
settings = [
{
"name": "enabled",
"filter": (
"system.webServer/security/authentication/anonymousAuthentication"
),
}
]
ps_cmd_validate = [
"Get-WebConfigurationProperty",
"-PSPath",
"'salt'",
"-Filter",
"'system.webServer/security/authentication/anonymousAuthentication'",
"-Name",
"'enabled'",
"-ErrorAction",
"Stop",
"|",
"Out-Null;",
]
ps_cmd = [
"$Settings = New-Object System.Collections.ArrayList;",
"$Property = Get-WebConfigurationProperty -PSPath 'salt'",
"-Name 'enabled' -Filter"
" 'system.webServer/security/authentication/anonymousAuthentication'"
" -ErrorAction Stop;",
"if (([String]::IsNullOrEmpty($Property) -eq $False) -and",
"($Property.GetType()).Name -eq 'ConfigurationAttribute') {",
"$Property = $Property | Select-Object",
"-ExpandProperty Value };",
"$Settings.add(@{filter='system.webServer/security/authentication/anonymousAuthentication';name='enabled';value=[String]"
" $Property})| Out-Null;",
"$Property = $Null;",
"$Settings",
]
func_ret = {"name": "enabled", "value": True}
with patch.object(
win_iis, "_srvmgr", return_value={"retcode": 0, "stdout": "json data"}
) as _srvmgr:
with patch.object(
win_iis.salt.utils.json, "loads", return_value=func_ret
) as loads:
ret = win_iis.get_webconfiguration_settings("salt", settings)
self.assertEqual(_srvmgr.call_count, 2)
self.assertEqual(
_srvmgr.mock_calls[0], call(cmd=ps_cmd_validate, return_json=True)
)
self.assertEqual(
_srvmgr.mock_calls[1], call(cmd=ps_cmd, return_json=True)
)
loads.assert_called_once_with("json data", strict=False)
self.assertEqual(func_ret, ret)
def test_set_webconfiguration_settings_no_settings(self):
self.assertEqual(win_iis.set_webconfiguration_settings("salt", {}), False)
def test_set_webconfiguration_settings_pass(self):
settings = [
{
"name": "enabled",
"filter": (
"system.webServer/security/authentication/anonymousAuthentication"
),
"value": False,
}
]
current_settings = [
{
"name": "enabled",
"filter": (
"system.webServer/security/authentication/anonymousAuthentication"
),
"value": True,
}
]
new_settings = [
{
"name": "enabled",
"filter": (
"system.webServer/security/authentication/anonymousAuthentication"
),
"value": False,
}
]
ps_cmd = [
"Set-WebConfigurationProperty",
"-PSPath",
"'salt'",
"-Filter",
"'system.webServer/security/authentication/anonymousAuthentication'",
"-Name",
"'enabled'",
"-Value",
"'False';",
]
with patch.object(
win_iis,
"get_webconfiguration_settings",
side_effect=[current_settings, new_settings],
) as get_webconfiguration_settings:
with patch.object(
win_iis, "_srvmgr", return_value={"retcode": 0}
) as _srvmgr:
ret = win_iis.set_webconfiguration_settings("salt", settings)
self.assertEqual(get_webconfiguration_settings.call_count, 2)
self.assertEqual(
get_webconfiguration_settings.mock_calls[0],
call(name="salt", settings=settings),
)
self.assertEqual(
get_webconfiguration_settings.mock_calls[1],
call(name="salt", settings=settings),
)
_srvmgr.assert_called_once_with(ps_cmd)
self.assertTrue(ret)
def test_set_webconfiguration_settings_fail(self):
settings = [
{
"name": "enabled",
"filter": (
"system.webServer/security/authentication/anonymousAuthentication"
),
"value": False,
}
]
current_settings = [
{
"name": "enabled",
"filter": (
"system.webServer/security/authentication/anonymousAuthentication"
),
"value": True,
}
]
new_settings = [
{
"name": "enabled",
"filter": (
"system.webServer/security/authentication/anonymousAuthentication"
),
"value": True,
}
]
ps_cmd = [
"Set-WebConfigurationProperty",
"-PSPath",
"'salt'",
"-Filter",
"'system.webServer/security/authentication/anonymousAuthentication'",
"-Name",
"'enabled'",
"-Value",
"'False';",
]
with patch.object(
win_iis,
"get_webconfiguration_settings",
side_effect=[current_settings, new_settings],
) as get_webconfiguration_settings:
with patch.object(
win_iis, "_srvmgr", return_value={"retcode": 0}
) as _srvmgr:
ret = win_iis.set_webconfiguration_settings("salt", settings)
self.assertEqual(get_webconfiguration_settings.call_count, 2)
self.assertEqual(
get_webconfiguration_settings.mock_calls[0],
call(name="salt", settings=settings),
)
self.assertEqual(
get_webconfiguration_settings.mock_calls[1],
call(name="salt", settings=settings),
)
_srvmgr.assert_called_once_with(ps_cmd)
self.assertFalse(ret)
|
|
"""
The module for psi-statistics for RBF kernel for Spike-and-Slab GPLVM
"""
import numpy as np
from ....util.caching import Cache_this
from . import PSICOMP_RBF
gpu_code = """
// define THREADNUM
#define IDX_NMQ(n,m,q) ((q*M+m)*N+n)
#define IDX_NMM(n,m1,m2) ((m2*M+m1)*N+n)
#define IDX_NQ(n,q) (q*N+n)
#define IDX_NM(n,m) (m*N+n)
#define IDX_MQ(m,q) (q*M+m)
#define IDX_MM(m1,m2) (m2*M+m1)
#define IDX_NQB(n,q,b) ((b*Q+q)*N+n)
#define IDX_QB(q,b) (b*Q+q)
// Divide data evenly
__device__ void divide_data(int total_data, int psize, int pidx, int *start, int *end) {
int residue = (total_data)%psize;
if(pidx<residue) {
int size = total_data/psize+1;
*start = size*pidx;
*end = *start+size;
} else {
int size = total_data/psize;
*start = size*pidx+residue;
*end = *start+size;
}
}
__device__ void reduce_sum(double* array, int array_size) {
int s;
if(array_size >= blockDim.x) {
for(int i=blockDim.x+threadIdx.x; i<array_size; i+= blockDim.x) {
array[threadIdx.x] += array[i];
}
array_size = blockDim.x;
}
__syncthreads();
for(int i=1; i<=array_size;i*=2) {s=i;}
if(threadIdx.x < array_size-s) {array[threadIdx.x] += array[s+threadIdx.x];}
__syncthreads();
for(s=s/2;s>=1;s=s/2) {
if(threadIdx.x < s) {array[threadIdx.x] += array[s+threadIdx.x];}
__syncthreads();
}
}
__global__ void compDenom(double *log_denom1, double *log_denom2, double *log_gamma, double*log_gamma1, double *gamma, double *l, double *S, int N, int Q)
{
int n_start, n_end;
divide_data(N, gridDim.x, blockIdx.x, &n_start, &n_end);
for(int i=n_start*Q+threadIdx.x; i<n_end*Q; i+=blockDim.x) {
int n=i/Q;
int q=i%Q;
double Snq = S[IDX_NQ(n,q)];
double lq = l[q]*l[q];
double gnq = gamma[IDX_NQ(n,q)];
log_denom1[IDX_NQ(n,q)] = log(Snq/lq+1.);
log_denom2[IDX_NQ(n,q)] = log(2.*Snq/lq+1.);
log_gamma[IDX_NQ(n,q)] = log(gnq);
log_gamma1[IDX_NQ(n,q)] = log(1.-gnq);
}
}
__global__ void psi1computations(double *psi1, double *log_denom1, double *log_gamma, double*log_gamma1, double var, double *l, double *Z, double *mu, double *S, int N, int M, int Q)
{
int m_start, m_end;
divide_data(M, gridDim.x, blockIdx.x, &m_start, &m_end);
for(int m=m_start; m<m_end; m++) {
for(int n=threadIdx.x; n<N; n+= blockDim.x) {
double log_psi1 = 0;
for(int q=0;q<Q;q++) {
double Zmq = Z[IDX_MQ(m,q)];
double muZ = mu[IDX_NQ(n,q)]-Zmq;
double Snq = S[IDX_NQ(n,q)];
double lq = l[q]*l[q];
double exp1 = log_gamma[IDX_NQ(n,q)]-(muZ*muZ/(Snq+lq)+log_denom1[IDX_NQ(n,q)])/(2.);
double exp2 = log_gamma1[IDX_NQ(n,q)]-Zmq*Zmq/(2.*lq);
log_psi1 += (exp1>exp2)?exp1+log1p(exp(exp2-exp1)):exp2+log1p(exp(exp1-exp2));
}
psi1[IDX_NM(n,m)] = var*exp(log_psi1);
}
}
}
__global__ void psi2computations(double *psi2, double *psi2n, double *log_denom2, double *log_gamma, double*log_gamma1, double var, double *l, double *Z, double *mu, double *S, int N, int M, int Q)
{
int psi2_idx_start, psi2_idx_end;
__shared__ double psi2_local[THREADNUM];
divide_data((M+1)*M/2, gridDim.x, blockIdx.x, &psi2_idx_start, &psi2_idx_end);
for(int psi2_idx=psi2_idx_start; psi2_idx<psi2_idx_end; psi2_idx++) {
int m1 = int((sqrt(8.*psi2_idx+1.)-1.)/2.);
int m2 = psi2_idx - (m1+1)*m1/2;
psi2_local[threadIdx.x] = 0;
for(int n=threadIdx.x;n<N;n+=blockDim.x) {
double log_psi2_n = 0;
for(int q=0;q<Q;q++) {
double Zm1q = Z[IDX_MQ(m1,q)];
double Zm2q = Z[IDX_MQ(m2,q)];
double dZ = Zm1q - Zm2q;
double muZhat = mu[IDX_NQ(n,q)]- (Zm1q+Zm2q)/2.;
double Z2 = Zm1q*Zm1q+Zm2q*Zm2q;
double Snq = S[IDX_NQ(n,q)];
double lq = l[q]*l[q];
double exp1 = dZ*dZ/(-4.*lq)-muZhat*muZhat/(2.*Snq+lq) - log_denom2[IDX_NQ(n,q)]/2. + log_gamma[IDX_NQ(n,q)];
double exp2 = log_gamma1[IDX_NQ(n,q)] - Z2/(2.*lq);
log_psi2_n += (exp1>exp2)?exp1+log1p(exp(exp2-exp1)):exp2+log1p(exp(exp1-exp2));
}
double exp_psi2_n = exp(log_psi2_n);
psi2n[IDX_NMM(n,m1,m2)] = var*var*exp_psi2_n;
if(m1!=m2) { psi2n[IDX_NMM(n,m2,m1)] = var*var*exp_psi2_n;}
psi2_local[threadIdx.x] += exp_psi2_n;
}
__syncthreads();
reduce_sum(psi2_local, THREADNUM);
if(threadIdx.x==0) {
psi2[IDX_MM(m1,m2)] = var*var*psi2_local[0];
if(m1!=m2) { psi2[IDX_MM(m2,m1)] = var*var*psi2_local[0]; }
}
__syncthreads();
}
}
__global__ void psi1compDer(double *dvar, double *dl, double *dZ, double *dmu, double *dS, double *dgamma, double *dL_dpsi1, double *psi1, double *log_denom1, double *log_gamma, double*log_gamma1, double var, double *l, double *Z, double *mu, double *S, double *gamma, int N, int M, int Q)
{
int m_start, m_end;
__shared__ double g_local[THREADNUM];
divide_data(M, gridDim.x, blockIdx.x, &m_start, &m_end);
int P = int(ceil(double(N)/THREADNUM));
double dvar_local = 0;
for(int q=0;q<Q;q++) {
double lq_sqrt = l[q];
double lq = lq_sqrt*lq_sqrt;
double dl_local = 0;
for(int p=0;p<P;p++) {
int n = p*THREADNUM + threadIdx.x;
double dmu_local = 0;
double dS_local = 0;
double dgamma_local = 0;
double Snq,mu_nq,gnq,log_gnq,log_gnq1,log_de;
if(n<N) {Snq = S[IDX_NQ(n,q)]; mu_nq=mu[IDX_NQ(n,q)]; gnq = gamma[IDX_NQ(n,q)];
log_gnq = log_gamma[IDX_NQ(n,q)]; log_gnq1 = log_gamma1[IDX_NQ(n,q)];
log_de = log_denom1[IDX_NQ(n,q)];}
for(int m=m_start; m<m_end; m++) {
if(n<N) {
double lpsi1 = psi1[IDX_NM(n,m)]*dL_dpsi1[IDX_NM(n,m)];
if(q==0) {dvar_local += lpsi1;}
double Zmq = Z[IDX_MQ(m,q)];
double Zmu = Zmq - mu_nq;
double denom = Snq+lq;
double Zmu2_denom = Zmu*Zmu/denom;
double exp1 = log_gnq-(Zmu*Zmu/(Snq+lq)+log_de)/(2.);
double exp2 = log_gnq1-Zmq*Zmq/(2.*lq);
double d_exp1,d_exp2;
if(exp1>exp2) {
d_exp1 = 1.;
d_exp2 = exp(exp2-exp1);
} else {
d_exp1 = exp(exp1-exp2);
d_exp2 = 1.;
}
double exp_sum = d_exp1+d_exp2;
dmu_local += lpsi1*Zmu*d_exp1/(denom*exp_sum);
dS_local += lpsi1*(Zmu2_denom-1.)*d_exp1/(denom*exp_sum);
dgamma_local += lpsi1*(d_exp1/gnq-d_exp2/(1.-gnq))/exp_sum;
dl_local += lpsi1*((Zmu2_denom+Snq/lq)/denom*d_exp1+Zmq*Zmq/(lq*lq)*d_exp2)/(2.*exp_sum);
g_local[threadIdx.x] = lpsi1*(-Zmu/denom*d_exp1-Zmq/lq*d_exp2)/exp_sum;
}
__syncthreads();
reduce_sum(g_local, p<P-1?THREADNUM:N-(P-1)*THREADNUM);
if(threadIdx.x==0) {dZ[IDX_MQ(m,q)] += g_local[0];}
}
if(n<N) {
dmu[IDX_NQB(n,q,blockIdx.x)] += dmu_local;
dS[IDX_NQB(n,q,blockIdx.x)] += dS_local/2.;
dgamma[IDX_NQB(n,q,blockIdx.x)] += dgamma_local;
}
__threadfence_block();
}
g_local[threadIdx.x] = dl_local*2.*lq_sqrt;
__syncthreads();
reduce_sum(g_local, THREADNUM);
if(threadIdx.x==0) {dl[IDX_QB(q,blockIdx.x)] += g_local[0];}
}
g_local[threadIdx.x] = dvar_local;
__syncthreads();
reduce_sum(g_local, THREADNUM);
if(threadIdx.x==0) {dvar[blockIdx.x] += g_local[0]/var;}
}
__global__ void psi2compDer(double *dvar, double *dl, double *dZ, double *dmu, double *dS, double *dgamma, double *dL_dpsi2, double *psi2n, double *log_denom2, double *log_gamma, double*log_gamma1, double var, double *l, double *Z, double *mu, double *S, double *gamma, int N, int M, int Q)
{
int m_start, m_end;
__shared__ double g_local[THREADNUM];
divide_data(M, gridDim.x, blockIdx.x, &m_start, &m_end);
int P = int(ceil(double(N)/THREADNUM));
double dvar_local = 0;
for(int q=0;q<Q;q++) {
double lq_sqrt = l[q];
double lq = lq_sqrt*lq_sqrt;
double dl_local = 0;
for(int p=0;p<P;p++) {
int n = p*THREADNUM + threadIdx.x;
double dmu_local = 0;
double dS_local = 0;
double dgamma_local = 0;
double Snq,mu_nq,gnq,log_gnq,log_gnq1,log_de;
if(n<N) {Snq = S[IDX_NQ(n,q)]; mu_nq=mu[IDX_NQ(n,q)]; gnq = gamma[IDX_NQ(n,q)];
log_gnq = log_gamma[IDX_NQ(n,q)]; log_gnq1 = log_gamma1[IDX_NQ(n,q)];
log_de = log_denom2[IDX_NQ(n,q)];}
for(int m1=m_start; m1<m_end; m1++) {
g_local[threadIdx.x] = 0;
for(int m2=0;m2<M;m2++) {
if(n<N) {
double lpsi2 = psi2n[IDX_NMM(n,m1,m2)]*dL_dpsi2[IDX_MM(m1,m2)];
if(q==0) {dvar_local += lpsi2;}
double Zm1q = Z[IDX_MQ(m1,q)];
double Zm2q = Z[IDX_MQ(m2,q)];
double dZ = Zm1q - Zm2q;
double Z2 = Zm1q*Zm1q+Zm2q*Zm2q;
double muZhat = mu_nq - (Zm1q + Zm2q)/2.;
double denom = 2.*Snq+lq;
double muZhat2_denom = muZhat*muZhat/denom;
double exp1 = dZ*dZ/(-4.*lq)-muZhat*muZhat/(2.*Snq+lq) - log_de/2. + log_gnq;
double exp2 = log_gnq1 - Z2/(2.*lq);
double d_exp1,d_exp2;
if(exp1>exp2) {
d_exp1 = 1.;
d_exp2 = exp(exp2-exp1);
} else {
d_exp1 = exp(exp1-exp2);
d_exp2 = 1.;
}
double exp_sum = d_exp1+d_exp2;
dmu_local += lpsi2*muZhat/denom*d_exp1/exp_sum;
dS_local += lpsi2*(2.*muZhat2_denom-1.)/denom*d_exp1/exp_sum;
dgamma_local += lpsi2*(d_exp1/gnq-d_exp2/(1.-gnq))/exp_sum;
dl_local += lpsi2*(((Snq/lq+muZhat2_denom)/denom+dZ*dZ/(4.*lq*lq))*d_exp1+Z2/(2.*lq*lq)*d_exp2)/exp_sum;
g_local[threadIdx.x] += 2.*lpsi2*((muZhat/denom-dZ/(2*lq))*d_exp1-Zm1q/lq*d_exp2)/exp_sum;
}
}
__syncthreads();
reduce_sum(g_local, p<P-1?THREADNUM:N-(P-1)*THREADNUM);
if(threadIdx.x==0) {dZ[IDX_MQ(m1,q)] += g_local[0];}
}
if(n<N) {
dmu[IDX_NQB(n,q,blockIdx.x)] += -2.*dmu_local;
dS[IDX_NQB(n,q,blockIdx.x)] += dS_local;
dgamma[IDX_NQB(n,q,blockIdx.x)] += dgamma_local;
}
__threadfence_block();
}
g_local[threadIdx.x] = dl_local*2.*lq_sqrt;
__syncthreads();
reduce_sum(g_local, THREADNUM);
if(threadIdx.x==0) {dl[IDX_QB(q,blockIdx.x)] += g_local[0];}
}
g_local[threadIdx.x] = dvar_local;
__syncthreads();
reduce_sum(g_local, THREADNUM);
if(threadIdx.x==0) {dvar[blockIdx.x] += g_local[0]*2/var;}
}
"""
class PSICOMP_SSRBF_GPU(PSICOMP_RBF):
def __init__(self, threadnum=128, blocknum=15, GPU_direct=False):
from pycuda.compiler import SourceModule
from ....util.gpu_init import initGPU
initGPU()
self.GPU_direct = GPU_direct
self.gpuCache = None
self.threadnum = threadnum
self.blocknum = blocknum
module = SourceModule("#define THREADNUM "+str(self.threadnum)+"\n"+gpu_code)
self.g_psi1computations = module.get_function('psi1computations')
self.g_psi1computations.prepare('PPPPdPPPPiii')
self.g_psi2computations = module.get_function('psi2computations')
self.g_psi2computations.prepare('PPPPPdPPPPiii')
self.g_psi1compDer = module.get_function('psi1compDer')
self.g_psi1compDer.prepare('PPPPPPPPPPPdPPPPPiii')
self.g_psi2compDer = module.get_function('psi2compDer')
self.g_psi2compDer.prepare('PPPPPPPPPPPdPPPPPiii')
self.g_compDenom = module.get_function('compDenom')
self.g_compDenom.prepare('PPPPPPPii')
def __deepcopy__(self, memo):
s = PSICOMP_SSRBF_GPU(threadnum=self.threadnum, blocknum=self.blocknum, GPU_direct=self.GPU_direct)
memo[id(self)] = s
return s
def _initGPUCache(self, N, M, Q):
import pycuda.gpuarray as gpuarray
if self.gpuCache == None:
self.gpuCache = {
'l_gpu' :gpuarray.empty((Q,),np.float64,order='F'),
'Z_gpu' :gpuarray.empty((M,Q),np.float64,order='F'),
'mu_gpu' :gpuarray.empty((N,Q),np.float64,order='F'),
'S_gpu' :gpuarray.empty((N,Q),np.float64,order='F'),
'gamma_gpu' :gpuarray.empty((N,Q),np.float64,order='F'),
'psi1_gpu' :gpuarray.empty((N,M),np.float64,order='F'),
'psi2_gpu' :gpuarray.empty((M,M),np.float64,order='F'),
'psi2n_gpu' :gpuarray.empty((N,M,M),np.float64,order='F'),
'dL_dpsi1_gpu' :gpuarray.empty((N,M),np.float64,order='F'),
'dL_dpsi2_gpu' :gpuarray.empty((M,M),np.float64,order='F'),
'log_denom1_gpu' :gpuarray.empty((N,Q),np.float64,order='F'),
'log_denom2_gpu' :gpuarray.empty((N,Q),np.float64,order='F'),
'log_gamma_gpu' :gpuarray.empty((N,Q),np.float64,order='F'),
'log_gamma1_gpu' :gpuarray.empty((N,Q),np.float64,order='F'),
# derivatives
'dvar_gpu' :gpuarray.empty((self.blocknum,),np.float64, order='F'),
'dl_gpu' :gpuarray.empty((Q,self.blocknum),np.float64, order='F'),
'dZ_gpu' :gpuarray.empty((M,Q),np.float64, order='F'),
'dmu_gpu' :gpuarray.empty((N,Q,self.blocknum),np.float64, order='F'),
'dS_gpu' :gpuarray.empty((N,Q,self.blocknum),np.float64, order='F'),
'dgamma_gpu' :gpuarray.empty((N,Q,self.blocknum),np.float64, order='F'),
# grad
'grad_l_gpu' :gpuarray.empty((Q,),np.float64, order='F'),
'grad_mu_gpu' :gpuarray.empty((N,Q,),np.float64, order='F'),
'grad_S_gpu' :gpuarray.empty((N,Q,),np.float64, order='F'),
'grad_gamma_gpu' :gpuarray.empty((N,Q,),np.float64, order='F'),
}
else:
assert N==self.gpuCache['mu_gpu'].shape[0]
assert M==self.gpuCache['Z_gpu'].shape[0]
assert Q==self.gpuCache['l_gpu'].shape[0]
def sync_params(self, lengthscale, Z, mu, S, gamma):
if len(lengthscale)==1:
self.gpuCache['l_gpu'].fill(lengthscale)
else:
self.gpuCache['l_gpu'].set(np.asfortranarray(lengthscale))
self.gpuCache['Z_gpu'].set(np.asfortranarray(Z))
self.gpuCache['mu_gpu'].set(np.asfortranarray(mu))
self.gpuCache['S_gpu'].set(np.asfortranarray(S))
self.gpuCache['gamma_gpu'].set(np.asfortranarray(gamma))
N,Q = self.gpuCache['S_gpu'].shape
self.g_compDenom.prepared_call((self.blocknum,1),(self.threadnum,1,1), self.gpuCache['log_denom1_gpu'].gpudata,self.gpuCache['log_denom2_gpu'].gpudata,self.gpuCache['log_gamma_gpu'].gpudata,self.gpuCache['log_gamma1_gpu'].gpudata,self.gpuCache['gamma_gpu'].gpudata,self.gpuCache['l_gpu'].gpudata,self.gpuCache['S_gpu'].gpudata, np.int32(N), np.int32(Q))
def reset_derivative(self):
self.gpuCache['dvar_gpu'].fill(0.)
self.gpuCache['dl_gpu'].fill(0.)
self.gpuCache['dZ_gpu'].fill(0.)
self.gpuCache['dmu_gpu'].fill(0.)
self.gpuCache['dS_gpu'].fill(0.)
self.gpuCache['dgamma_gpu'].fill(0.)
self.gpuCache['grad_l_gpu'].fill(0.)
self.gpuCache['grad_mu_gpu'].fill(0.)
self.gpuCache['grad_S_gpu'].fill(0.)
self.gpuCache['grad_gamma_gpu'].fill(0.)
def get_dimensions(self, Z, variational_posterior):
return variational_posterior.mean.shape[0], Z.shape[0], Z.shape[1]
@Cache_this(limit=1, ignore_args=(0,))
def psicomputations(self, kern, Z, variational_posterior, return_psi2_n=False):
"""
Z - MxQ
mu - NxQ
S - NxQ
"""
variance, lengthscale = kern.variance, kern.lengthscale
N,M,Q = self.get_dimensions(Z, variational_posterior)
self._initGPUCache(N,M,Q)
self.sync_params(lengthscale, Z, variational_posterior.mean, variational_posterior.variance, variational_posterior.binary_prob)
psi1_gpu = self.gpuCache['psi1_gpu']
psi2_gpu = self.gpuCache['psi2_gpu']
psi2n_gpu = self.gpuCache['psi2n_gpu']
l_gpu = self.gpuCache['l_gpu']
Z_gpu = self.gpuCache['Z_gpu']
mu_gpu = self.gpuCache['mu_gpu']
S_gpu = self.gpuCache['S_gpu']
log_denom1_gpu = self.gpuCache['log_denom1_gpu']
log_denom2_gpu = self.gpuCache['log_denom2_gpu']
log_gamma_gpu = self.gpuCache['log_gamma_gpu']
log_gamma1_gpu = self.gpuCache['log_gamma1_gpu']
psi0 = np.empty((N,))
psi0[:] = variance
self.g_psi1computations.prepared_call((self.blocknum,1),(self.threadnum,1,1),psi1_gpu.gpudata, log_denom1_gpu.gpudata, log_gamma_gpu.gpudata, log_gamma1_gpu.gpudata, np.float64(variance),l_gpu.gpudata,Z_gpu.gpudata,mu_gpu.gpudata,S_gpu.gpudata, np.int32(N), np.int32(M), np.int32(Q))
self.g_psi2computations.prepared_call((self.blocknum,1),(self.threadnum,1,1),psi2_gpu.gpudata, psi2n_gpu.gpudata, log_denom2_gpu.gpudata, log_gamma_gpu.gpudata, log_gamma1_gpu.gpudata, np.float64(variance),l_gpu.gpudata,Z_gpu.gpudata,mu_gpu.gpudata,S_gpu.gpudata, np.int32(N), np.int32(M), np.int32(Q))
if self.GPU_direct:
return psi0, psi1_gpu, psi2_gpu
else:
return psi0, psi1_gpu.get(), psi2_gpu.get()
@Cache_this(limit=1, ignore_args=(0,2,3,4))
def psiDerivativecomputations(self, kern, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
variance, lengthscale = kern.variance, kern.lengthscale
from ....util.linalg_gpu import sum_axis
ARD = (len(lengthscale)!=1)
N,M,Q = self.get_dimensions(Z, variational_posterior)
psi1_gpu = self.gpuCache['psi1_gpu']
psi2n_gpu = self.gpuCache['psi2n_gpu']
l_gpu = self.gpuCache['l_gpu']
Z_gpu = self.gpuCache['Z_gpu']
mu_gpu = self.gpuCache['mu_gpu']
S_gpu = self.gpuCache['S_gpu']
gamma_gpu = self.gpuCache['gamma_gpu']
dvar_gpu = self.gpuCache['dvar_gpu']
dl_gpu = self.gpuCache['dl_gpu']
dZ_gpu = self.gpuCache['dZ_gpu']
dmu_gpu = self.gpuCache['dmu_gpu']
dS_gpu = self.gpuCache['dS_gpu']
dgamma_gpu = self.gpuCache['dgamma_gpu']
grad_l_gpu = self.gpuCache['grad_l_gpu']
grad_mu_gpu = self.gpuCache['grad_mu_gpu']
grad_S_gpu = self.gpuCache['grad_S_gpu']
grad_gamma_gpu = self.gpuCache['grad_gamma_gpu']
log_denom1_gpu = self.gpuCache['log_denom1_gpu']
log_denom2_gpu = self.gpuCache['log_denom2_gpu']
log_gamma_gpu = self.gpuCache['log_gamma_gpu']
log_gamma1_gpu = self.gpuCache['log_gamma1_gpu']
if self.GPU_direct:
dL_dpsi1_gpu = dL_dpsi1
dL_dpsi2_gpu = dL_dpsi2
dL_dpsi0_sum = gpuarray.sum(dL_dpsi0).get()
else:
dL_dpsi1_gpu = self.gpuCache['dL_dpsi1_gpu']
dL_dpsi2_gpu = self.gpuCache['dL_dpsi2_gpu']
dL_dpsi1_gpu.set(np.asfortranarray(dL_dpsi1))
dL_dpsi2_gpu.set(np.asfortranarray(dL_dpsi2))
dL_dpsi0_sum = dL_dpsi0.sum()
self.reset_derivative()
# t=self.g_psi1compDer(dvar_gpu,dl_gpu,dZ_gpu,dmu_gpu,dS_gpu,dL_dpsi1_gpu,psi1_gpu, np.float64(variance),l_gpu,Z_gpu,mu_gpu,S_gpu, np.int32(N), np.int32(M), np.int32(Q), block=(self.threadnum,1,1), grid=(self.blocknum,1),time_kernel=True)
# print 'g_psi1compDer '+str(t)
# t=self.g_psi2compDer(dvar_gpu,dl_gpu,dZ_gpu,dmu_gpu,dS_gpu,dL_dpsi2_gpu,psi2n_gpu, np.float64(variance),l_gpu,Z_gpu,mu_gpu,S_gpu, np.int32(N), np.int32(M), np.int32(Q), block=(self.threadnum,1,1), grid=(self.blocknum,1),time_kernel=True)
# print 'g_psi2compDer '+str(t)
self.g_psi1compDer.prepared_call((self.blocknum,1),(self.threadnum,1,1),dvar_gpu.gpudata,dl_gpu.gpudata,dZ_gpu.gpudata,dmu_gpu.gpudata,dS_gpu.gpudata,dgamma_gpu.gpudata,dL_dpsi1_gpu.gpudata,psi1_gpu.gpudata, log_denom1_gpu.gpudata, log_gamma_gpu.gpudata, log_gamma1_gpu.gpudata, np.float64(variance),l_gpu.gpudata,Z_gpu.gpudata,mu_gpu.gpudata,S_gpu.gpudata,gamma_gpu.gpudata,np.int32(N), np.int32(M), np.int32(Q))
self.g_psi2compDer.prepared_call((self.blocknum,1),(self.threadnum,1,1),dvar_gpu.gpudata,dl_gpu.gpudata,dZ_gpu.gpudata,dmu_gpu.gpudata,dS_gpu.gpudata,dgamma_gpu.gpudata,dL_dpsi2_gpu.gpudata,psi2n_gpu.gpudata, log_denom2_gpu.gpudata, log_gamma_gpu.gpudata, log_gamma1_gpu.gpudata, np.float64(variance),l_gpu.gpudata,Z_gpu.gpudata,mu_gpu.gpudata,S_gpu.gpudata,gamma_gpu.gpudata,np.int32(N), np.int32(M), np.int32(Q))
dL_dvar = dL_dpsi0_sum + gpuarray.sum(dvar_gpu).get()
sum_axis(grad_mu_gpu,dmu_gpu,N*Q,self.blocknum)
dL_dmu = grad_mu_gpu.get()
sum_axis(grad_S_gpu,dS_gpu,N*Q,self.blocknum)
dL_dS = grad_S_gpu.get()
sum_axis(grad_gamma_gpu,dgamma_gpu,N*Q,self.blocknum)
dL_dgamma = grad_gamma_gpu.get()
dL_dZ = dZ_gpu.get()
if ARD:
sum_axis(grad_l_gpu,dl_gpu,Q,self.blocknum)
dL_dlengscale = grad_l_gpu.get()
else:
dL_dlengscale = gpuarray.sum(dl_gpu).get()
return dL_dvar, dL_dlengscale, dL_dZ, dL_dmu, dL_dS, dL_dgamma
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ParameterizedTruncatedNormalOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
import timeit
import numpy as np
from six.moves import range # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def _get_stddev_inside_bounds_before_using_randn(gpu):
# The boundary where the randn sampler is used varies between CPU and GPU.
if gpu:
return 1.3
else:
return 1.7
class TruncatedNormalMoments(object):
memoized_moments = None
mean = None
stddev = None
minval = None
maxval = None
def __init__(self, mean, stddev, minval, maxval):
self.memoized_moments = [1.0] # 0th moment
self.mean = np.double(mean)
self.stddev = np.double(stddev)
# NOTE(ringwalt): The formula doesn't handle infinite values.
self.minval = np.double(max(-10, minval))
self.maxval = np.double(min(10, maxval))
def __getitem__(self, moment):
"""Calculates the truncated normal moments.
Args:
moment: The number for the moment.
Returns:
The value for the given moment.
Uses the recurrence relation described in:
http://www.smp.uq.edu.au/people/YoniNazarathy/teaching_projects
/studentWork/EricOrjebin_TruncatedNormalMoments.pdf
"""
assert moment > 0
# The test case must ensure it can import scipy.stats before this point.
import scipy.stats # pylint: disable=g-import-not-at-top
dist = scipy.stats.norm(loc=self.mean, scale=self.stddev)
for k in range(len(self.memoized_moments), moment + 1):
m_k_minus_2 = self.memoized_moments[k - 2] if k > 1 else np.double(0.0)
m_k_minus_1 = self.memoized_moments[k - 1]
numerator = (np.power(self.maxval, k - 1) * dist.pdf(self.maxval) -
np.power(self.minval, k - 1) * dist.pdf(self.minval))
denominator = dist.cdf(self.maxval) - dist.cdf(self.minval)
m = ((k - 1) * self.stddev**2 * m_k_minus_2 + self.mean * m_k_minus_1 -
self.stddev * numerator / denominator)
assert abs(m) < 1e50 # ensure numerical accuracy
self.memoized_moments.append(m)
return self.memoized_moments[moment]
def calculate_moments(samples, max_moment):
moments = [0.0] * (max_moment + 1)
for sample in samples:
value = 1.0
for k in range(len(moments)):
moments[k] += value
value *= sample
for i in range(len(moments)):
moments[i] /= len(samples)
return moments
def z_test(real, expected, i, num_samples):
numerical_error = 1e-6 # per-operation error
moment_mean = expected[i]
moment_squared = expected[2 * i]
moment_var = moment_squared - moment_mean * moment_mean
error_per_moment = i * numerical_error
total_variance = moment_var / float(num_samples) + error_per_moment
return abs((real[i] - moment_mean) / math.sqrt(total_variance))
class ParameterizedTruncatedNormalTest(test.TestCase):
z_limit = 6.0
# Stop at moment 10 to avoid numerical errors in the theoretical moments.
max_moment = 10
def validateMoments(self, shape, mean, stddev, minval, maxval, seed=1618):
try:
# TruncatedNormalMoments requires scipy.stats.
# Give up early if we are unable to import it.
import scipy.stats # pylint: disable=g-import-not-at-top,unused-variable
random_seed.set_random_seed(seed)
with self.cached_session(use_gpu=True):
samples = random_ops.parameterized_truncated_normal(shape, mean, stddev,
minval,
maxval).eval()
assert (~np.isnan(samples)).all()
moments = calculate_moments(samples, self.max_moment)
expected_moments = TruncatedNormalMoments(mean, stddev, minval, maxval)
num_samples = functools.reduce(lambda x, y: x * y, shape, 1)
for i in range(1, len(moments)):
self.assertLess(
z_test(moments, expected_moments, i, num_samples), self.z_limit)
except ImportError as e:
tf_logging.warn("Cannot test truncated normal op: %s" % str(e))
def validateKolmogorovSmirnov(self,
shape,
mean,
stddev,
minval,
maxval,
seed=1618):
try:
import scipy.stats # pylint: disable=g-import-not-at-top
random_seed.set_random_seed(seed)
with self.cached_session(use_gpu=True):
samples = random_ops.parameterized_truncated_normal(shape, mean, stddev,
minval,
maxval).eval()
assert (~np.isnan(samples)).all()
minval = max(mean - stddev * 10, minval)
maxval = min(mean + stddev * 10, maxval)
dist = scipy.stats.norm(loc=mean, scale=stddev)
cdf_min = dist.cdf(minval)
cdf_max = dist.cdf(maxval)
def truncated_cdf(x):
return np.clip((dist.cdf(x) - cdf_min) / (cdf_max - cdf_min), 0.0, 1.0)
pvalue = scipy.stats.kstest(samples, truncated_cdf)[1]
self.assertGreater(pvalue, 1e-10)
except ImportError as e:
tf_logging.warn("Cannot test truncated normal op: %s" % str(e))
@test_util.run_deprecated_v1
def testDefaults(self):
self.validateMoments([10**5], 0.0, 1.0, -2.0, 2.0)
@test_util.run_deprecated_v1
def testShifted(self):
self.validateMoments([10**5], -1.0, 1.0, -2.0, 2.0)
@test_util.run_deprecated_v1
def testRightTail(self):
self.validateMoments([10**5], 0.0, 1.0, 4.0, np.infty)
@test_util.run_deprecated_v1
def testLeftTail(self):
self.validateMoments([10**5], 0.0, 1.0, -np.infty, -4.0)
@test_util.run_deprecated_v1
def testLeftTailTwoSidedBounds(self):
self.validateMoments([10**5], 0.0, 1.0, -6.0, -3.0)
@test_util.run_deprecated_v1
def testTwoSidedLeftTailShifted(self):
self.validateKolmogorovSmirnov([10**5], 6.0, 1.0, -1.0, 1.0)
@test_util.run_deprecated_v1
def testRightTailShifted(self):
self.validateMoments([10**5], -5.0, 1.0, 2.0, np.infty)
@test_util.run_deprecated_v1
def testSmallStddev(self):
self.validateKolmogorovSmirnov([10**5], 0.0, 0.1, 0.05, 0.10)
@test_util.run_deprecated_v1
def testSamplingWithSmallStdDevFarFromBound(self):
sample_op = random_ops.parameterized_truncated_normal(
shape=(int(1e5),), means=0.8, stddevs=0.05, minvals=-1., maxvals=1.)
with self.session(use_gpu=True) as sess:
samples = sess.run(sample_op)
# 0. is more than 16 standard deviations from the mean, and
# should have a likelihood < 1e-57.
assert (~np.isnan(samples)).all()
no_neg_samples = np.sum(samples < 0.)
self.assertEqual(no_neg_samples, 0.)
@test_util.run_deprecated_v1
def testSamplingAtRandnSwitchover(self):
# The randn sampler is used as the bounds are moved farther from the mean,
# and the probability of accepting a sample increases the farther the
# bounds are from the mean.
# This test asserts that at the point of switchover, both samplers are
# working (not raising an error or returning nan) and returning the
# expected moments.
use_gpu = test.is_gpu_available()
stddev_inside_bounds_before_using_randn = (
_get_stddev_inside_bounds_before_using_randn(use_gpu))
epsilon = 0.001
self.validateMoments(
shape=[10**6],
mean=0.,
stddev=1.0,
minval=-epsilon,
maxval=stddev_inside_bounds_before_using_randn - epsilon)
self.validateMoments(
shape=[10**6],
mean=0.,
stddev=1.0,
minval=-epsilon,
maxval=stddev_inside_bounds_before_using_randn + epsilon)
# Benchmarking code
def parameterized_vs_naive(shape, num_iters, use_gpu=False):
np.random.seed(1618) # Make it reproducible.
# No CSE/CF.
optimizer_options = config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L0)
config = config_pb2.ConfigProto(graph_options=config_pb2.GraphOptions(
optimizer_options=optimizer_options))
with session.Session(config=config) as sess:
with ops.device("/cpu:0" if not use_gpu else None):
param_op = control_flow_ops.group(
random_ops.parameterized_truncated_normal(shape))
naive_op = control_flow_ops.group(random_ops.truncated_normal(shape))
# Burn-in to avoid session setup costs in the timing.
sess.run(param_op)
sess.run(param_op)
param_dt = timeit.timeit(lambda: sess.run(param_op), number=num_iters)
sess.run(naive_op)
sess.run(naive_op)
naive_dt = timeit.timeit(lambda: sess.run(naive_op), number=num_iters)
return param_dt, naive_dt
def randn_sampler_switchover(shape, num_iters, use_gpu=False):
# Benchmark by constructing samplers on the threshold of using the randn
# rejection sampling and check that this threshold is set correctly by
# benchmarking with bounds just above and below this threshold.
# The uniform and randn samplers should have about the same performance
# at this point.
stddev_inside_bounds_before_using_randn = (
_get_stddev_inside_bounds_before_using_randn(use_gpu))
epsilon = 0.001
np.random.seed(1618) # Make it reproducible.
# No CSE/CF.
optimizer_options = config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L0)
config = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(
optimizer_options=optimizer_options))
with session.Session(config=config) as sess:
with ops.device("/cpu:0" if not use_gpu else "/gpu:0"):
uniform_sampler_op = control_flow_ops.group(
random_ops.parameterized_truncated_normal(
shape,
means=0.,
stddevs=1.0,
minvals=-stddev_inside_bounds_before_using_randn + epsilon,
maxvals=0.01))
randn_sampler_op = control_flow_ops.group(
random_ops.parameterized_truncated_normal(
shape,
means=0.,
stddevs=1.0,
minvals=-stddev_inside_bounds_before_using_randn - epsilon,
maxvals=0.01))
# Burn-in to avoid session setup costs in the timing.
sess.run(uniform_sampler_op)
sess.run(uniform_sampler_op)
uniform_dt = timeit.timeit(
lambda: sess.run(uniform_sampler_op), number=num_iters)
sess.run(randn_sampler_op)
sess.run(randn_sampler_op)
randn_dt = timeit.timeit(
lambda: sess.run(randn_sampler_op), number=num_iters)
return randn_dt, uniform_dt
class TruncatedNormalBenchmark(test.Benchmark):
def benchmarkParameterizedOpVsNaiveOpCpu(self):
self._benchmarkParameterizedOpVsNaiveOp(False)
def benchmarkParameterizedOpVsNaiveOpGpu(self):
self._benchmarkParameterizedOpVsNaiveOp(True)
def _benchmarkParameterizedOpVsNaiveOp(self, use_gpu):
num_iters = 50
print(("Composition of new ParameterizedTruncatedNormalOp vs. "
"naive TruncatedNormalOp [%d iters]") % num_iters)
print("Shape\tsec(parameterized)\tsec(naive)\tspeedup")
for shape in [[10000, 100], [1000, 1000], [1000000], [100, 100, 100],
[20, 20, 20, 20]]:
p_dt, n_dt = parameterized_vs_naive(shape, num_iters, use_gpu)
print("%s\t%.3f\t%.3f\t%.2f" % (shape, p_dt, n_dt, p_dt / n_dt))
shape_str = "-".join(map(str, shape))
self.report_benchmark(
name="parameterized_shape" + shape_str,
iters=num_iters,
wall_time=p_dt)
self.report_benchmark(
name="naive_shape" + shape_str, iters=num_iters, wall_time=n_dt)
def benchmarkRandnSamplerCPU(self):
self._benchmarkRandnSampler(False)
def benchmarkRandnSamplerGPU(self):
self._benchmarkRandnSampler(True)
def _benchmarkRandnSampler(self, use_gpu):
num_iters = 100
shape = [int(1e6)]
randn_dt, uniform_dt = randn_sampler_switchover(shape, num_iters, use_gpu)
print(("Randn Sampler vs uniform samplers [%d iters]\t%.4f\t%.4f") %
(num_iters, randn_dt, uniform_dt))
gpu_str = "_gpu" if use_gpu else "_cpu"
self.report_benchmark(
name="randn_sampler" + gpu_str, iters=num_iters, wall_time=randn_dt)
self.report_benchmark(
name="uniform_sampler" + gpu_str, iters=num_iters, wall_time=uniform_dt)
if __name__ == "__main__":
test.main()
|
|
import os
import shutil
import stat
import unittest
from common import TestCase
import pyuv
# Make stat return integers
try:
os.stat_float_times(False)
except AttributeError:
pass
pyuv.fs.stat_float_times(False)
BAD_FILE = 'test_file_bad'
TEST_FILE = 'test_file_1234'
TEST_FILE2 = 'test_file_1234_2'
TEST_LINK = 'test_file_1234_link'
TEST_DIR = 'test-dir'
TEST_DIR2 = 'test-dir_2'
BAD_DIR = 'test-dir-bad'
MAX_INT32_VALUE = 2 ** 31 - 1
OFFSET_VALUE = MAX_INT32_VALUE if not os.name == 'nt' else 2 ** 8 - 1
class FileTestCase(TestCase):
TEST_FILE_CONTENT = 'test'
def setUp(self):
super(FileTestCase, self).setUp()
with open(TEST_FILE, 'w') as f:
f.write(self.TEST_FILE_CONTENT)
def tearDown(self):
try:
os.remove(TEST_FILE)
except OSError:
pass
super(FileTestCase, self).tearDown()
class FSTestRequestDict(FileTestCase):
def stat_cb(self, req):
self.errorno = req.error
self.assertEqual(req.test, 'test123')
def test_request_dict(self):
self.errorno = None
req = pyuv.fs.stat(self.loop, TEST_FILE, self.stat_cb)
req.test = 'test123'
self.loop.run()
self.assertEqual(self.errorno, None)
class FSTestStat(FileTestCase):
def stat_cb(self, req):
self.errorno = req.error
def test_stat_error(self):
self.errorno = None
pyuv.fs.stat(self.loop, BAD_FILE, self.stat_cb)
self.loop.run()
self.assertEqual(self.errorno, pyuv.errno.UV_ENOENT)
def test_stat(self):
self.errorno = None
pyuv.fs.stat(self.loop, TEST_FILE, self.stat_cb)
self.loop.run()
self.assertEqual(self.errorno, None)
def test_stat_sync(self):
self.stat_data = pyuv.fs.stat(self.loop, TEST_FILE)
self.assertNotEqual(self.stat_data, None)
def test_stat_error_sync(self):
try:
pyuv.fs.stat(self.loop, BAD_FILE)
except pyuv.error.FSError as e:
self.errorno = e.args[0]
else:
self.errorno = None
self.assertEqual(self.errorno, pyuv.errno.UV_ENOENT)
class FSTestLstat(FileTestCase):
def setUp(self):
super(FSTestLstat, self).setUp()
try:
pyuv.fs.symlink(self.loop, TEST_FILE, TEST_LINK, 0)
except pyuv.error.FSError as e:
if e.args[0] == pyuv.errno.UV_EPERM:
raise unittest.SkipTest("Symlinks not permitted")
def tearDown(self):
try:
os.remove(TEST_LINK)
except OSError:
pass
super(FSTestLstat, self).tearDown()
def stat_cb(self, req):
self.errorno = req.error
def test_lstat(self):
self.errorno = None
pyuv.fs.lstat(self.loop, TEST_LINK, self.stat_cb)
self.loop.run()
self.assertEqual(self.errorno, None)
class FSTestFstat(FileTestCase):
def fstat_cb(self, req):
self.errorno = req.error
def test_fstat(self):
self.errorno = None
fd = pyuv.fs.open(self.loop, TEST_FILE, os.O_RDWR, stat.S_IREAD|stat.S_IWRITE)
pyuv.fs.fstat(self.loop, fd, self.fstat_cb)
self.loop.run()
pyuv.fs.close(self.loop, fd)
self.assertEqual(self.errorno, None)
def test_fstat_sync(self):
fd = pyuv.fs.open(self.loop, TEST_FILE, os.O_RDWR, stat.S_IREAD|stat.S_IWRITE)
self.stat_data = pyuv.fs.fstat(self.loop, fd)
pyuv.fs.close(self.loop, fd)
self.assertNotEqual(self.stat_data, None)
class FSTestUnlink(FileTestCase):
def bad_unlink_cb(self, req):
self.errorno = req.error
def test_bad_unlink(self):
self.errorno = None
pyuv.fs.unlink(self.loop, BAD_FILE, self.bad_unlink_cb)
self.loop.run()
self.assertEqual(self.errorno, pyuv.errno.UV_ENOENT)
def unlink_cb(self, req):
self.errorno = req.error
def test_unlink(self):
self.errorno = None
pyuv.fs.unlink(self.loop, TEST_FILE, self.unlink_cb)
self.loop.run()
self.assertEqual(self.errorno, None)
def test_unlink_sync(self):
pyuv.fs.unlink(self.loop, TEST_FILE)
def test_unlink_error_sync(self):
try:
pyuv.fs.unlink(self.loop, BAD_FILE)
except pyuv.error.FSError as e:
self.errorno = e.args[0]
else:
self.errorno = None
self.assertEqual(self.errorno, pyuv.errno.UV_ENOENT)
class FSTestMkdir(TestCase):
def setUp(self):
super(FSTestMkdir, self).setUp()
os.mkdir(BAD_DIR, 0o755)
def tearDown(self):
os.rmdir(BAD_DIR)
try:
os.rmdir(TEST_DIR)
except OSError:
pass
super(FSTestMkdir, self).tearDown()
def mkdir_cb(self, req):
self.errorno = req.error
def test_bad_mkdir(self):
self.errorno = None
pyuv.fs.mkdir(self.loop, BAD_DIR, 0o755, self.mkdir_cb)
self.loop.run()
self.assertEqual(self.errorno, pyuv.errno.UV_EEXIST)
def test_mkdir(self):
self.errorno = None
pyuv.fs.mkdir(self.loop, TEST_DIR, 0o755, self.mkdir_cb)
self.loop.run()
self.assertEqual(self.errorno, None)
self.assertTrue(os.path.isdir(TEST_DIR))
def test_mkdir_sync(self):
pyuv.fs.mkdir(self.loop, TEST_DIR, 0o755)
self.assertTrue(os.path.isdir(TEST_DIR))
def test_mkdir_error_sync(self):
try:
pyuv.fs.mkdir(self.loop, BAD_DIR, 0o755)
except pyuv.error.FSError as e:
self.errorno = e.args[0]
else:
self.errorno = None
self.assertEqual(self.errorno, pyuv.errno.UV_EEXIST)
class FSTestRmdir(TestCase):
def setUp(self):
super(FSTestRmdir, self).setUp()
os.mkdir(TEST_DIR, 0o755)
def tearDown(self):
try:
os.rmdir(TEST_DIR)
except OSError:
pass
super(FSTestRmdir, self).tearDown()
def rmdir_cb(self, req):
self.errorno = req.error
def test_bad_rmdir(self):
self.errorno = None
pyuv.fs.rmdir(self.loop, BAD_DIR, self.rmdir_cb)
self.loop.run()
self.assertEqual(self.errorno, pyuv.errno.UV_ENOENT)
def test_rmdir(self):
self.errorno = None
pyuv.fs.rmdir(self.loop, TEST_DIR, self.rmdir_cb)
self.loop.run()
self.assertEqual(self.errorno, None)
self.assertFalse(os.path.isdir(TEST_DIR))
def test_rmdir_sync(self):
pyuv.fs.rmdir(self.loop, TEST_DIR)
self.assertFalse(os.path.isdir(TEST_DIR))
def test_rmdir_error_sync(self):
try:
pyuv.fs.rmdir(self.loop, BAD_DIR)
except pyuv.error.FSError as e:
self.errorno = e.args[0]
else:
self.errorno = None
self.assertEqual(self.errorno, pyuv.errno.UV_ENOENT)
class FSTestRename(FileTestCase):
def tearDown(self):
try:
os.remove(TEST_FILE2)
except OSError:
pass
super(FSTestRename, self).tearDown()
def rename_cb(self, req):
self.errorno = req.error
def test_rename(self):
self.errorno = None
pyuv.fs.rename(self.loop, TEST_FILE, TEST_FILE2, self.rename_cb)
self.loop.run()
self.assertEqual(self.errorno, None)
self.assertFalse(os.path.exists(TEST_FILE))
self.assertTrue(os.path.exists(TEST_FILE2))
def test_rename_sync(self):
pyuv.fs.rename(self.loop, TEST_FILE, TEST_FILE2)
self.assertFalse(os.path.exists(TEST_FILE))
self.assertTrue(os.path.exists(TEST_FILE2))
class FSTestChmod(FileTestCase):
def chmod_cb(self, req):
self.errorno = req.error
def test_chmod(self):
self.errorno = None
pyuv.fs.chmod(self.loop, TEST_FILE, 0o777, self.chmod_cb)
self.loop.run()
self.assertEqual(self.errorno, None)
mode = os.stat(TEST_FILE).st_mode
self.assertTrue(bool(mode & stat.S_IRWXU) and bool(mode & stat.S_IRWXG) and bool(mode & stat.S_IRWXO))
def test_chmod_sync(self):
pyuv.fs.chmod(self.loop, TEST_FILE, 0o777)
mode = os.stat(TEST_FILE).st_mode
self.assertTrue(bool(mode & stat.S_IRWXU) and bool(mode & stat.S_IRWXG) and bool(mode & stat.S_IRWXO))
class FSTestFchmod(FileTestCase):
def fchmod_cb(self, req):
self.errorno = req.error
def test_fchmod(self):
self.errorno = None
fd = pyuv.fs.open(self.loop, TEST_FILE, os.O_RDWR, stat.S_IREAD | stat.S_IWRITE)
pyuv.fs.fchmod(self.loop, fd, 0o777, self.fchmod_cb)
self.loop.run()
pyuv.fs.close(self.loop, fd)
self.assertEqual(self.errorno, None)
mode = os.stat(TEST_FILE).st_mode
self.assertTrue(bool(mode & stat.S_IRWXU) and bool(mode & stat.S_IRWXG) and bool(mode & stat.S_IRWXO))
def test_fchmod_sync(self):
fd = pyuv.fs.open(self.loop, TEST_FILE, os.O_RDWR, stat.S_IREAD | stat.S_IWRITE)
pyuv.fs.fchmod(self.loop, fd, 0o777)
pyuv.fs.close(self.loop, fd)
mode = os.stat(TEST_FILE).st_mode
self.assertTrue(bool(mode & stat.S_IRWXU) and bool(mode & stat.S_IRWXG) and bool(mode & stat.S_IRWXO))
class FSTestLink(FileTestCase):
def tearDown(self):
os.remove(TEST_LINK)
super(FSTestLink, self).tearDown()
def link_cb(self, req):
self.errorno = req.error
def test_link(self):
self.errorno = None
pyuv.fs.link(self.loop, TEST_FILE, TEST_LINK, self.link_cb)
self.loop.run()
self.assertEqual(self.errorno, None)
self.assertEqual(os.stat(TEST_FILE).st_ino, os.stat(TEST_LINK).st_ino)
def test_link_sync(self):
pyuv.fs.link(self.loop, TEST_FILE, TEST_LINK)
self.assertEqual(os.stat(TEST_FILE).st_ino, os.stat(TEST_LINK).st_ino)
class FSTestSymlink(FileTestCase):
def tearDown(self):
try:
os.remove(TEST_LINK)
except OSError:
pass
super(FSTestSymlink, self).tearDown()
def symlink_cb(self, req):
self.errorno = req.error
def test_symlink(self):
self.errorno = None
pyuv.fs.symlink(self.loop, TEST_FILE, TEST_LINK, 0, self.symlink_cb)
self.loop.run()
if self.errorno == pyuv.errno.UV_EPERM:
raise unittest.SkipTest("Symlinks not permitted")
self.assertEqual(self.errorno, None)
self.assertTrue(os.stat(TEST_LINK).st_mode & stat.S_IFLNK)
def test_symlink_sync(self):
try:
pyuv.fs.symlink(self.loop, TEST_FILE, TEST_LINK, 0)
except pyuv.error.FSError as e:
if e.args[0] == pyuv.errno.UV_EPERM:
raise unittest.SkipTest("Symlinks not permitted")
self.assertTrue(os.stat(TEST_LINK).st_mode & stat.S_IFLNK)
class FSTestReadlink(FileTestCase):
def setUp(self):
super(FSTestReadlink, self).setUp()
try:
pyuv.fs.symlink(self.loop, TEST_FILE, TEST_LINK, 0)
except pyuv.error.FSError as e:
if e.args[0] == pyuv.errno.UV_EPERM:
raise unittest.SkipTest("Symlinks not permitted")
def tearDown(self):
try:
os.remove(TEST_LINK)
except OSError:
pass
super(FSTestReadlink, self).tearDown()
def readlink_cb(self, req):
self.errorno = req.error
self.link_path = req.result
def test_readlink(self):
self.errorno = None
self.link_path = None
pyuv.fs.readlink(self.loop, TEST_LINK, self.readlink_cb)
self.loop.run()
self.assertEqual(self.errorno, None)
self.assertEqual(self.link_path, TEST_FILE)
def test_readlink_sync(self):
self.link_path = pyuv.fs.readlink(self.loop, TEST_LINK)
self.assertEqual(self.link_path, TEST_FILE)
class FSTestChown(FileTestCase):
def chown_cb(self, req):
self.errorno = req.error
def test_chown(self):
self.errorno = None
pyuv.fs.chown(self.loop, TEST_FILE, -1, -1, self.chown_cb)
self.loop.run()
self.assertEqual(self.errorno, None)
def test_chown_sync(self):
pyuv.fs.chown(self.loop, TEST_FILE, -1, -1)
class FSTestFchown(FileTestCase):
def fchown_cb(self, req):
self.errorno = req.error
def test_fchown(self):
self.errorno = None
fd = pyuv.fs.open(self.loop, TEST_FILE, os.O_RDWR, stat.S_IREAD | stat.S_IWRITE)
pyuv.fs.fchown(self.loop, fd, -1, -1, self.fchown_cb)
self.loop.run()
pyuv.fs.close(self.loop, fd)
self.assertEqual(self.errorno, None)
def test_fchown_sync(self):
fd = pyuv.fs.open(self.loop, TEST_FILE, os.O_RDWR, stat.S_IREAD | stat.S_IWRITE)
pyuv.fs.fchown(self.loop, fd, -1, -1)
pyuv.fs.close(self.loop, fd)
class FSTestOpen(TestCase):
def setUp(self):
super(FSTestOpen, self).setUp()
try:
os.remove(TEST_FILE)
except OSError:
pass
def tearDown(self):
try:
os.remove(TEST_FILE)
except OSError:
pass
super(FSTestOpen, self).tearDown()
def close_cb(self, req):
self.errorno = req.error
def open_cb(self, req):
fd = req.result
self.assertNotEqual(fd, None)
self.assertEqual(req.error, None)
pyuv.fs.close(self.loop, fd, self.close_cb)
def test_open_create(self):
self.errorno = None
pyuv.fs.open(self.loop, TEST_FILE, os.O_WRONLY|os.O_CREAT, stat.S_IREAD|stat.S_IWRITE, self.open_cb)
self.loop.run()
self.assertEqual(self.errorno, None)
def open_noent_cb(self, req):
self.fd = req.result
self.errorno = req.error
def test_open_noent(self):
self.fd = None
self.errorno = None
pyuv.fs.open(self.loop, BAD_FILE, os.O_RDONLY, 0, self.open_noent_cb)
self.loop.run()
self.assertEqual(self.fd, None)
self.assertEqual(self.errorno, pyuv.errno.UV_ENOENT)
def test_open_sync(self):
fd = pyuv.fs.open(self.loop, TEST_FILE, os.O_WRONLY|os.O_CREAT|os.O_TRUNC, stat.S_IREAD|stat.S_IWRITE)
pyuv.fs.close(self.loop, fd)
def test_open_error_sync(self):
try:
pyuv.fs.open(self.loop, BAD_FILE, os.O_RDONLY, 0)
except pyuv.error.FSError as e:
self.errorno = e.args[0]
else:
self.errorno = None
self.assertEqual(self.errorno, pyuv.errno.UV_ENOENT)
class FSTestRead(FileTestCase):
TEST_FILE_CONTENT = 'test1234567890'
def read_cb(self, req):
self.errorno = req.error
self.data = req.result
pyuv.fs.close(self.loop, self.fd)
def test_read(self):
self.data = None
self.errorno = None
self.fd = pyuv.fs.open(self.loop, TEST_FILE, os.O_RDONLY, stat.S_IREAD)
pyuv.fs.read(self.loop, self.fd, 4, -1, self.read_cb)
self.loop.run()
self.assertEqual(self.errorno, None)
self.assertEqual(self.data, b'test')
def test_read_sync(self):
fd = pyuv.fs.open(self.loop, TEST_FILE, os.O_RDONLY, stat.S_IREAD)
self.data = pyuv.fs.read(self.loop, fd, 4, -1)
pyuv.fs.close(self.loop, fd)
self.assertEqual(self.data, b'test')
def test_read_offset(self):
with open(TEST_FILE, 'w') as f:
f.seek(OFFSET_VALUE)
f.write('test1234567890')
fd = pyuv.fs.open(self.loop, TEST_FILE, os.O_RDONLY, stat.S_IREAD)
data = pyuv.fs.read(self.loop, fd, 4, OFFSET_VALUE + 4)
pyuv.fs.close(self.loop, fd)
self.assertEqual(data, b'1234')
class FSTestWrite(TestCase):
def setUp(self):
super(FSTestWrite, self).setUp()
self.fd = pyuv.fs.open(self.loop, TEST_FILE, os.O_WRONLY|os.O_CREAT|os.O_TRUNC, stat.S_IWRITE|stat.S_IREAD)
def tearDown(self):
os.remove(TEST_FILE)
super(FSTestWrite, self).tearDown()
def write_cb(self, req):
pyuv.fs.close(self.loop, self.fd)
self.bytes_written = req.result
self.errorno = req.error
def test_write(self):
self.bytes_written = None
self.errorno = None
pyuv.fs.write(self.loop, self.fd, b"TEST", -1, self.write_cb)
self.loop.run()
self.assertEqual(self.bytes_written, 4)
self.assertEqual(self.errorno, None)
with open(TEST_FILE, 'r') as fobj:
self.assertEqual(fobj.read(), "TEST")
def test_write_null(self):
self.bytes_written = None
self.errorno = None
pyuv.fs.write(self.loop, self.fd, b"TES\x00T", -1, self.write_cb)
self.loop.run()
self.assertEqual(self.bytes_written, 5)
self.assertEqual(self.errorno, None)
with open(TEST_FILE, 'r') as fobj:
self.assertEqual(fobj.read(), "TES\x00T")
def test_write_sync(self):
self.bytes_written = pyuv.fs.write(self.loop, self.fd, b"TEST", -1)
pyuv.fs.close(self.loop, self.fd)
self.assertEqual(self.bytes_written, 4)
with open(TEST_FILE, 'r') as fobj:
self.assertEqual(fobj.read(), "TEST")
def test_write_offset(self):
offset = OFFSET_VALUE + 4
self.bytes_written = pyuv.fs.write(self.loop, self.fd, b"TEST", offset)
pyuv.fs.close(self.loop, self.fd)
with open(TEST_FILE, 'r') as fobj:
fobj.seek(offset)
self.assertEqual(fobj.read(), "TEST")
class FSTestFsync(TestCase):
def write_cb(self, req):
self.assertEqual(req.result, 4)
self.assertEqual(req.error, None)
pyuv.fs.fdatasync(self.loop, self.fd, self.fdatasync_cb)
def fdatasync_cb(self, req):
self.assertEqual(req.error, None)
pyuv.fs.fsync(self.loop, self.fd, self.fsync_cb)
def fsync_cb(self, req):
self.assertEqual(req.error, None)
def test_fsync(self):
self.fd = pyuv.fs.open(self.loop, TEST_FILE, os.O_RDWR|os.O_CREAT|os.O_TRUNC, stat.S_IREAD|stat.S_IWRITE)
pyuv.fs.write(self.loop, self.fd, b"TEST", -1, self.write_cb)
self.loop.run()
pyuv.fs.close(self.loop, self.fd)
with open(TEST_FILE, 'r') as fobj:
self.assertEqual(fobj.read(), "TEST")
def test_fsync_sync(self):
self.fd = pyuv.fs.open(self.loop, TEST_FILE, os.O_RDWR|os.O_CREAT|os.O_TRUNC, stat.S_IREAD|stat.S_IWRITE)
pyuv.fs.write(self.loop, self.fd, b"TEST", -1)
pyuv.fs.fdatasync(self.loop, self.fd)
pyuv.fs.fsync(self.loop, self.fd)
pyuv.fs.close(self.loop, self.fd)
with open(TEST_FILE, 'r') as fobj:
self.assertEqual(fobj.read(), "TEST")
class FSTestFtruncate(FileTestCase):
TEST_FILE_CONTENT = "test-data"
def ftruncate_cb(self, req):
self.errorno = req.error
pyuv.fs.close(self.loop, self.fd)
def test_ftruncate1(self):
self.errorno = None
self.fd = pyuv.fs.open(self.loop, TEST_FILE, os.O_RDWR, stat.S_IREAD|stat.S_IWRITE)
pyuv.fs.ftruncate(self.loop, self.fd, 4, self.ftruncate_cb)
self.loop.run()
self.assertEqual(self.errorno, None)
with open(TEST_FILE, 'r') as fobj:
self.assertEqual(fobj.read(), "test")
def test_ftruncate2(self):
self.errorno = None
self.fd = pyuv.fs.open(self.loop, TEST_FILE, os.O_RDWR, stat.S_IREAD|stat.S_IWRITE)
pyuv.fs.ftruncate(self.loop, self.fd, 0, self.ftruncate_cb)
self.loop.run()
self.assertEqual(self.errorno, None)
with open(TEST_FILE, 'r') as fobj:
self.assertEqual(fobj.read(), "")
def test_ftruncate_sync(self):
fd = pyuv.fs.open(self.loop, TEST_FILE, os.O_RDWR, stat.S_IREAD|stat.S_IWRITE)
pyuv.fs.ftruncate(self.loop, fd, 0)
pyuv.fs.close(self.loop, fd)
with open(TEST_FILE, 'r') as fobj:
self.assertEqual(fobj.read(), "")
class FSTestScandir(TestCase):
def setUp(self):
super(FSTestScandir, self).setUp()
os.mkdir(TEST_DIR, 0o755)
os.mkdir(os.path.join(TEST_DIR, TEST_DIR2), 0o755)
with open(os.path.join(TEST_DIR, TEST_FILE), 'w') as f:
f.write('test')
with open(os.path.join(TEST_DIR, TEST_FILE2), 'w') as f:
f.write('test')
def tearDown(self):
shutil.rmtree(TEST_DIR)
super(FSTestScandir, self).tearDown()
def scandir_cb(self, req):
self.errorno = req.error
self.files = req.result
def test_bad_scandir(self):
self.errorno = None
self.files = None
pyuv.fs.scandir(self.loop, BAD_DIR, self.scandir_cb)
self.loop.run()
self.assertEqual(self.errorno, pyuv.errno.UV_ENOENT)
def test_scandir(self):
self.errorno = None
self.files = None
pyuv.fs.scandir(self.loop, TEST_DIR, self.scandir_cb)
self.loop.run()
self.assertEqual(self.errorno, None)
self.assertTrue(TEST_FILE in [f.name for f in self.files])
self.assertTrue(TEST_FILE2 in [f.name for f in self.files])
self.assertTrue(TEST_DIR2 in [f.name for f in self.files])
def test_scandir_sync(self):
self.files = pyuv.fs.scandir(self.loop, TEST_DIR)
self.assertNotEqual(self.files, None)
self.assertTrue(TEST_FILE in [f.name for f in self.files])
self.assertTrue(TEST_FILE2 in [f.name for f in self.files])
self.assertTrue(TEST_DIR2 in [f.name for f in self.files])
def test_scandir_error_sync(self):
try:
pyuv.fs.scandir(self.loop, BAD_DIR)
except pyuv.error.FSError as e:
self.errorno = e.args[0]
else:
self.errorno = None
self.assertEqual(self.errorno, pyuv.errno.UV_ENOENT)
class FSTestSendfile(TestCase):
def setUp(self):
super(FSTestSendfile, self).setUp()
with open(TEST_FILE, 'w') as f:
f.write("begin\n")
os.lseek(f.fileno(), 65536, os.SEEK_CUR)
f.write("end\n")
f.flush()
def tearDown(self):
os.remove(TEST_FILE)
os.remove(TEST_FILE2)
super(FSTestSendfile, self).tearDown()
def sendfile_cb(self, req):
self.bytes_written = req.result
self.errorno = req.error
def test_sendfile(self):
self.result = None
self.errorno = None
fd = pyuv.fs.open(self.loop, TEST_FILE, os.O_RDWR, stat.S_IREAD|stat.S_IWRITE)
fd2 = pyuv.fs.open(self.loop, TEST_FILE2, os.O_RDWR|os.O_CREAT, stat.S_IREAD|stat.S_IWRITE)
pyuv.fs.sendfile(self.loop, fd2, fd, 0, 131072, self.sendfile_cb)
self.loop.run()
pyuv.fs.close(self.loop, fd)
pyuv.fs.close(self.loop, fd2)
self.assertEqual(self.errorno, None)
with open(TEST_FILE, 'r') as fobj1:
with open(TEST_FILE2, 'r') as fobj2:
self.assertEqual(fobj1.read(), fobj2.read())
def test_sendfile_sync(self):
fd = pyuv.fs.open(self.loop, TEST_FILE, os.O_RDWR, stat.S_IREAD|stat.S_IWRITE)
fd2 = pyuv.fs.open(self.loop, TEST_FILE2, os.O_RDWR|os.O_CREAT, stat.S_IREAD|stat.S_IWRITE)
self.bytes_written = pyuv.fs.sendfile(self.loop, fd2, fd, 0, 131072)
pyuv.fs.close(self.loop, fd)
pyuv.fs.close(self.loop, fd2)
with open(TEST_FILE, 'r') as fobj1:
with open(TEST_FILE2, 'r') as fobj2:
self.assertEqual(fobj1.read(), fobj2.read())
def test_sendfile_offset(self):
offset = OFFSET_VALUE + 1
with open(TEST_FILE, 'w') as f:
f.seek(offset)
f.write("test")
f.flush()
fd = pyuv.fs.open(self.loop, TEST_FILE, os.O_RDWR, stat.S_IREAD|stat.S_IWRITE)
fd2 = pyuv.fs.open(self.loop, TEST_FILE2, os.O_RDWR|os.O_CREAT, stat.S_IREAD|stat.S_IWRITE)
self.bytes_written = pyuv.fs.sendfile(self.loop, fd2, fd, offset, 4)
pyuv.fs.close(self.loop, fd)
pyuv.fs.close(self.loop, fd2)
with open(TEST_FILE, 'r') as fobj1:
fobj1.seek(offset)
with open(TEST_FILE2, 'r') as fobj2:
self.assertEqual(fobj1.read(), fobj2.read())
class FSTestUtime(FileTestCase):
def setUp(self):
super(FSTestUtime, self).setUp()
self.fd = None
def utime_cb(self, req):
self.errorno = req.error
if self.fd is not None:
pyuv.fs.close(self.loop, self.fd)
def test_utime(self):
self.errorno = None
atime = mtime = 400497753
pyuv.fs.utime(self.loop, TEST_FILE, atime, mtime, self.utime_cb)
self.loop.run()
self.assertEqual(self.errorno, None)
s = os.stat(TEST_FILE)
self.assertEqual(s.st_atime, atime)
self.assertEqual(s.st_mtime, mtime)
def test_futime(self):
self.errorno = None
atime = mtime = 400497753
self.fd = pyuv.fs.open(self.loop, TEST_FILE, os.O_RDWR, stat.S_IWRITE|stat.S_IREAD)
pyuv.fs.futime(self.loop, self.fd, atime, mtime, self.utime_cb)
self.loop.run()
self.assertEqual(self.errorno, None)
s = os.stat(TEST_FILE)
self.assertTrue(s.st_atime == atime and s.st_mtime == mtime)
def test_utime_sync(self):
atime = mtime = 400497753
pyuv.fs.utime(self.loop, TEST_FILE, atime, mtime)
s = os.stat(TEST_FILE)
self.assertEqual(s.st_atime, atime)
self.assertEqual(s.st_mtime, mtime)
def test_futime_sync(self):
atime = mtime = 400497753
self.fd = pyuv.fs.open(self.loop, TEST_FILE, os.O_RDWR, stat.S_IWRITE|stat.S_IREAD)
pyuv.fs.futime(self.loop, self.fd, atime, mtime)
pyuv.fs.close(self.loop, self.fd)
s = os.stat(TEST_FILE)
self.assertEqual(s.st_atime, atime)
self.assertEqual(s.st_mtime, mtime)
class FSTestAccess(TestCase):
def setUp(self):
super(FSTestAccess, self).setUp()
with open(TEST_FILE, 'w') as f:
f.write("test")
def tearDown(self):
try:
os.remove(TEST_FILE)
except OSError:
pass
super(FSTestAccess, self).tearDown()
def access_cb(self, req):
self.errorno = req.error
def test_bad_access(self):
self.errorno = None
pyuv.fs.access(self.loop, BAD_FILE, os.F_OK, self.access_cb)
self.loop.run()
self.assertEqual(self.errorno, pyuv.errno.UV_ENOENT)
def test_access(self):
self.errorno = None
pyuv.fs.access(self.loop, TEST_FILE, os.F_OK, self.access_cb)
self.loop.run()
self.assertEqual(self.errorno, None)
def test_access_sync(self):
pyuv.fs.access(self.loop, TEST_FILE, os.F_OK)
def test_access_error_sync(self):
try:
pyuv.fs.access(self.loop, BAD_FILE, os.F_OK)
except pyuv.error.FSError as e:
self.errorno = e.args[0]
else:
self.errorno = None
self.assertEqual(self.errorno, pyuv.errno.UV_ENOENT)
class FSTestRealpath(TestCase):
def realpath_cb(self, req):
self.errorno = req.error
self.result = req.result
def test_realpath(self):
self.errorno = None
self.result = None
pyuv.fs.realpath(self.loop, '.', self.realpath_cb)
self.loop.run()
self.assertEqual(self.errorno, None)
self.assertNotEqual(self.result, '.')
def test_realpath_sync(self):
result = pyuv.fs.realpath(self.loop, '.')
self.assertNotEqual(result, '.')
class FSEventTestBasic(FileTestCase):
def tearDown(self):
try:
os.remove(TEST_FILE2)
except OSError:
pass
super(FSEventTestBasic, self).tearDown()
def on_fsevent_cb(self, handle, filename, events, errorno):
handle.stop()
handle.close()
self.filename = filename
self.events = events
self.errorno = errorno
def timer_cb(self, timer):
timer.close()
os.rename(TEST_FILE, TEST_FILE2)
def test_fsevent_basic(self):
self.errorno = None
self.events = None
self.filename = None
fs_event = pyuv.fs.FSEvent(self.loop)
fs_event.start(TEST_FILE, 0, self.on_fsevent_cb)
self.assertEqual(fs_event.path, TEST_FILE)
timer = pyuv.Timer(self.loop)
timer.start(self.timer_cb, 1, 0)
self.loop.run()
self.assertEqual(self.errorno, None)
self.assertTrue(self.filename in (None, TEST_FILE, TEST_FILE2))
self.assertTrue(self.events in (pyuv.fs.UV_CHANGE, pyuv.fs.UV_RENAME))
class FSEventTest(FileTestCase):
def setUp(self):
super(FSEventTest, self).setUp()
os.mkdir(TEST_DIR, 0o755)
with open(os.path.join(TEST_DIR, TEST_FILE), 'w') as f:
f.write("test")
def tearDown(self):
shutil.rmtree(TEST_DIR)
try:
os.remove(TEST_FILE2)
except OSError:
pass
super(FSEventTest, self).tearDown()
def on_fsevent_cb(self, handle, filename, events, errorno):
handle.stop()
handle.close()
self.filename = filename
self.events = events
self.errorno = errorno
def timer_cb2(self, timer):
timer.close()
os.rename(os.path.join(TEST_DIR, TEST_FILE), os.path.join(TEST_DIR, TEST_FILE2))
def test_fsevent_dir(self):
self.errorno = None
self.events = None
self.filename = None
fs_event = pyuv.fs.FSEvent(self.loop)
fs_event.start(TEST_DIR, 0, self.on_fsevent_cb)
timer = pyuv.Timer(self.loop)
timer.start(self.timer_cb2, 1, 0)
self.loop.run()
self.assertEqual(self.errorno, None)
self.assertTrue(self.filename == None or self.filename == TEST_FILE)
self.assertTrue(self.events & pyuv.fs.UV_RENAME)
def timer_cb3(self, timer):
timer.close()
os.utime(os.path.join(TEST_DIR, TEST_FILE), None)
def test_fsevent_nrefile(self):
self.errorno = None
self.events = None
self.filename = None
fs_event = pyuv.fs.FSEvent(self.loop)
fs_event.start(os.path.join(TEST_DIR, TEST_FILE), 0, self.on_fsevent_cb)
timer = pyuv.Timer(self.loop)
timer.start(self.timer_cb3, 1, 0)
self.loop.run()
self.assertEqual(self.errorno, None)
self.assertTrue(self.filename == None or self.filename == TEST_FILE)
self.assertTrue(self.events & pyuv.fs.UV_CHANGE)
class FSPollTest(TestCase):
def tearDown(self):
try:
os.remove(TEST_FILE)
except OSError:
pass
super(FSPollTest, self).tearDown()
def _touch_file(self):
with open(TEST_FILE, 'w+') as f:
self.count += 1
for i in range(self.count+1):
f.write('*')
def on_timer(self, timer):
self._touch_file()
def on_fspoll(self, handle, prev_stat, curr_stat, error):
if self.poll_cb_called == 0:
self.assertEqual(error, pyuv.errno.UV_ENOENT)
self._touch_file()
elif self.poll_cb_called == 1:
self.timer.start(self.on_timer, 0.02, 0.0)
elif self.poll_cb_called == 2:
self.timer.start(self.on_timer, 0.2, 0.0)
elif self.poll_cb_called == 3:
os.remove(TEST_FILE)
elif self.poll_cb_called == 4:
self.assertEqual(error, pyuv.errno.UV_ENOENT)
self.fs_poll.close()
self.timer.close()
else:
self.fail('This should not happen')
self.poll_cb_called += 1
def test_fspoll1(self):
self.count = 0
self.poll_cb_called = 0
self.timer = pyuv.Timer(self.loop)
self.fs_poll = pyuv.fs.FSPoll(self.loop)
self.fs_poll.start(TEST_FILE, 0.1, self.on_fspoll)
self.assertEqual(self.fs_poll.path, TEST_FILE)
self.loop.run()
self.assertEqual(self.poll_cb_called, 5)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
|
##########################################################################
#
# Copyright (c) 2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import unittest
import os
import sys
import threading
import IECore
import IECoreRI
class SXRendererTest( unittest.TestCase ) :
def __loadImage( self, fileName ) :
i = IECore.Reader.create( fileName ).read()
r = i["R"].data
g = i["G"].data
b = i["B"].data
result = IECore.V3fVectorData()
v = IECore.V3f
for i in range( 0, len( r ) ) :
result.append( v( r[i], g[i], b[i] ) )
return result
def __saveImage( self, data, dataWindow, fileName ) :
image = IECore.ImagePrimitive( dataWindow, dataWindow )
if isinstance( data, IECore.FloatVectorData ) :
image["R"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, data )
image["G"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, data )
image["B"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, data )
else :
r = IECore.FloatVectorData()
g = IECore.FloatVectorData()
b = IECore.FloatVectorData()
for c in data :
r.append( c[0] )
g.append( c[1] )
b.append( c[2] )
image["R"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, r )
image["G"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, g )
image["B"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, b )
IECore.Writer.create( image, fileName ).write()
def __rectanglePoints( self, box ) :
p = IECore.V3fVectorData()
n = IECore.V3fVectorData()
i = IECore.V3fVectorData()
dPdu = IECore.V3fVectorData()
dPdv = IECore.V3fVectorData()
s = IECore.FloatVectorData()
t = IECore.FloatVectorData()
for y in range( box.min.y, box.max.y + 1 ) :
for x in range( box.min.x, box.max.x + 1 ) :
p.append( IECore.V3f( x, y, 0 ) )
n.append( IECore.V3f( 0, 0, 1 ) )
i.append( IECore.V3f( 0, 0, -1 ) )
dPdu.append( IECore.V3f( 2, 0, 0 ) )
dPdv.append( IECore.V3f( 0, 2, 0 ) )
s.append( float( x ) / box.size().x )
t.append( float( y ) / box.size().y )
return IECore.CompoundData( {
"P" : p,
"N" : n,
"Ng" : n,
"I" : i,
"dPdu" : dPdu,
"dPdv" : dPdv,
"s" : s,
"t" : t,
} )
def __assertVectorDataAlmostEqual( self, data1, data2 ) :
self.assertEqual( len( data1 ), len( data2 ) )
self.assertEqual( data1.typeName(), data2.typeName() )
if isinstance( data1, IECore.Color3fVectorData ) :
for i in range( 0, len( data1 ) ) :
self.failUnless( data1[i].equalWithAbsError( data2[i], 0.000001 ) )
else :
for i in range( 0, len( data1 ) ) :
self.assertAlmostEqual( data1[i], data2[i], 6 )
def test( self ) :
r = IECoreRI.SXRenderer()
points = IECore.CompoundData( {
"N" : self.__loadImage( "test/IECoreRI/data/sxInput/cowN.exr" ),
"Ng" : self.__loadImage( "test/IECoreRI/data/sxInput/cowN.exr" ),
"P" : self.__loadImage( "test/IECoreRI/data/sxInput/cowP.exr" ),
"I" : self.__loadImage( "test/IECoreRI/data/sxInput/cowI.exr" ),
} )
self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxTest.sdl test/IECoreRI/shaders/sxTest.sl" ), 0 )
r.shader( "surface", "test/IECoreRI/shaders/sxTest.sdl", { "noiseFrequency" : 1.0, "tint" : IECore.Color3f( 1 ) } )
s = r.shade( points )
self.assertEqual( len( s ), 6 )
self.failUnless( "outputFloat" in s )
self.failUnless( "outputColor" in s )
self.failUnless( "Ci" in s )
self.failUnless( "Oi" in s )
self.failUnless( "P" in s )
self.failUnless( "N" in s )
self.__assertVectorDataAlmostEqual( s["outputFloat"], IECore.ObjectReader( "test/IECoreRI/data/sxOutput/cowFloat.cob" ).read() )
self.__assertVectorDataAlmostEqual( s["outputColor"], IECore.ObjectReader( "test/IECoreRI/data/sxOutput/cowColor.cob" ).read() )
self.__assertVectorDataAlmostEqual( s["Ci"], IECore.ObjectReader( "test/IECoreRI/data/sxOutput/cowCI.cob" ).read() )
self.__assertVectorDataAlmostEqual( s["Oi"], IECore.ObjectReader( "test/IECoreRI/data/sxOutput/cowOI.cob" ).read() )
def testSplineParameter( self ) :
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/splineTest.sdl test/IECoreRI/shaders/splineTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
r.shader( "surface", "test/IECoreRI/shaders/splineTest.sdl", {
"spl" : IECore.SplinefColor3fData(
IECore.SplinefColor3f(
IECore.CubicBasisf.catmullRom(),
(
( 0, IECore.Color3f( 1, 0, 0 ) ),
( 0, IECore.Color3f( 1, 0, 0 ) ),
( 1, IECore.Color3f( 0, 0, 1 ) ),
( 1, IECore.Color3f( 0, 0, 1 ) ),
)
)
)
} )
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 10 ) )
s = r.shade( self.__rectanglePoints( b ) )
self.__assertVectorDataAlmostEqual( s["Ci"], IECore.ObjectReader( "test/IECoreRI/data/sxOutput/spline.cob" ).read() )
# make sure that users don't have to provide values for every varying shader parameter if
# they don't want to. this used to crash.
def testMissingPredefinedVariables( self ) :
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/splineTest.sdl test/IECoreRI/shaders/splineTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
r.shader( "surface", "test/IECoreRI/shaders/splineTest.sdl", {} )
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 100 ) )
points = self.__rectanglePoints( b )
del points["t"] # remove information the shader requires
s = r.shade( points )
def testParameterTypes( self ) :
self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxParameterTest.sdl test/IECoreRI/shaders/sxParameterTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
r.shader( "surface", "test/IECoreRI/shaders/sxParameterTest.sdl", {
"mustBeOne" : 1.0,
"mustBeRed" : IECore.Color3f( 1, 0, 0 ),
"mustBeTwo" : IECore.V3f( 2 ),
"mustBeThree" : IECore.V3f( 3 ),
"mustBeFour" : IECore.V3f( 4 ),
"mustBeHelloWorld" : "helloWorld",
"mustBeOneTwoThree" : IECore.V3f( 1, 2, 3 ),
} )
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 1 ) )
s = r.shade( self.__rectanglePoints( b ) )
self.assertEqual( s["Ci"][0], IECore.Color3f( 0, 1, 0 ) )
def testFloat3PrimitiveVariable( self ) :
self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxParameterTest.sdl test/IECoreRI/shaders/sxParameterTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
r.shader( "surface", "test/IECoreRI/shaders/sxParameterTest.sdl", {
"mustBeOne" : 1.0,
"mustBeRed" : IECore.Color3f( 1, 0, 0 ),
"mustBeTwo" : IECore.V3f( 2 ),
"mustBeThree" : IECore.V3f( 3 ),
"mustBeFour" : IECore.V3f( 4 ),
"mustBeHelloWorld" : "helloWorld",
} )
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 10 ) )
points = self.__rectanglePoints( b )
points["mustBeOneTwoThree"] = IECore.V3fVectorData( [ IECore.V3f( 1, 2, 3 ) ] * len( points["P"] ) )
s = r.shade( points )
for c in s["Ci"] :
self.assertEqual( c, IECore.Color3f( 0, 1, 0 ) )
def testIntParameterSupport( self ) :
self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxParameterTest.sdl test/IECoreRI/shaders/sxParameterTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
r.shader( "surface", "test/IECoreRI/shaders/sxParameterTest.sdl", {
"mustBeOne" : IECore.IntData( 1 ),
"mustBeRed" : IECore.Color3f( 1, 0, 0 ),
"mustBeTwo" : IECore.V3f( 2 ),
"mustBeThree" : IECore.V3f( 3 ),
"mustBeFour" : IECore.V3f( 4 ),
"mustBeHelloWorld" : "helloWorld",
"mustBeOneTwoThree" : IECore.V3f( 1, 2, 3 ),
} )
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 1 ) )
s = r.shade( self.__rectanglePoints( b ) )
self.assertEqual( s["Ci"][0], IECore.Color3f( 0, 1, 0 ) )
def testBoolParameterSupport( self ) :
self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxParameterTest.sdl test/IECoreRI/shaders/sxParameterTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
r.shader( "surface", "test/IECoreRI/shaders/sxParameterTest.sdl", {
"mustBeOne" : IECore.BoolData( True ),
"mustBeRed" : IECore.Color3f( 1, 0, 0 ),
"mustBeTwo" : IECore.V3f( 2 ),
"mustBeThree" : IECore.V3f( 3 ),
"mustBeFour" : IECore.V3f( 4 ),
"mustBeHelloWorld" : "helloWorld",
"mustBeOneTwoThree" : IECore.V3f( 1, 2, 3 ),
} )
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 1 ) )
s = r.shade( self.__rectanglePoints( b ) )
self.assertEqual( s["Ci"][0], IECore.Color3f( 0, 1, 0 ) )
def testStack( self ) :
self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxStackTest.sdl test/IECoreRI/shaders/sxStackTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 100 ) )
points = self.__rectanglePoints( b )
self.assertEqual( r.getAttribute( "color" ), IECore.Color3fData( IECore.Color3f( 1 ) ) )
self.assertEqual( r.getAttribute( "opacity" ), IECore.Color3fData( IECore.Color3f( 1 ) ) )
with IECore.WorldBlock( r ) :
r.setAttribute( "color", IECore.Color3f( 1, 0, 0 ) )
self.assertEqual( r.getAttribute( "color" ), IECore.Color3fData( IECore.Color3f( 1, 0, 0 ) ) )
r.shader( "surface", "test/IECoreRI/shaders/sxStackTest.sdl", { "blue" : 1.0 } )
with IECore.AttributeBlock( r ) :
r.setAttribute( "color", IECore.Color3f( 0, 1, 0 ) )
self.assertEqual( r.getAttribute( "color" ), IECore.Color3fData( IECore.Color3f( 0, 1, 0 ) ) )
r.shader( "surface", "test/IECoreRI/shaders/sxStackTest.sdl", { "blue" : 0.5 } )
s = r.shade( points )
for c in s["Ci"] :
self.assertEqual( c, IECore.Color3f( 0, 0, 0.5 ) )
self.assertEqual( r.getAttribute( "color" ), IECore.Color3fData( IECore.Color3f( 1, 0, 0 ) ) )
s = r.shade( points )
for c in s["Ci"] :
self.assertEqual( c, IECore.Color3f( 0, 0, 1 ) )
self.assertEqual( r.getAttribute( "color" ), IECore.Color3fData( IECore.Color3f( 1 ) ) )
self.assertEqual( r.getAttribute( "opacity" ), IECore.Color3fData( IECore.Color3f( 1 ) ) )
def testNoShader( self ) :
r = IECoreRI.SXRenderer()
with IECore.WorldBlock( r ) :
self.assertRaises( RuntimeError, r.shade, self.__rectanglePoints( IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 100 ) ) ) )
def testCoshaders( self ) :
self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxCoshaderTest.sdl test/IECoreRI/shaders/sxCoshaderTest.sl" ), 0 )
self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxCoshaderTestMain.sdl test/IECoreRI/shaders/sxCoshaderTestMain.sl" ), 0 )
r = IECoreRI.SXRenderer()
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 100 ) )
points = self.__rectanglePoints( b )
with IECore.WorldBlock( r ) :
r.shader( "shader", "test/IECoreRI/shaders/sxCoshaderTest", { "shaderColor" : IECore.Color3f( 1, 0, 0 ), "__handle" : "cs1" } )
r.shader( "shader", "test/IECoreRI/shaders/sxCoshaderTest", { "sColor" : IECore.Color3f( 0, 1, 0 ), "__handle" : "cs2" } )
r.shader( "shader", "test/IECoreRI/shaders/sxCoshaderTest", { "tColor" : IECore.Color3f( 0, 0, 1 ), "__handle" : "cs3" } )
r.shader( "surface", "test/IECoreRI/shaders/sxCoshaderTestMain", { "coshaders" : IECore.StringVectorData( [ "cs1", "cs2", "cs3" ] ) } )
s = r.shade( points )
self.assertEqual( s["Ci"], IECore.ObjectReader( "test/IECoreRI/data/sxOutput/coshaders.cob" ).read() )
def testCoshadersWithGetVar( self ) :
self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxCoshaderTest.sdl test/IECoreRI/shaders/sxCoshaderTest.sl" ), 0 )
self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxCoshaderTestMain.sdl test/IECoreRI/shaders/sxCoshaderTestMain.sl" ), 0 )
r = IECoreRI.SXRenderer()
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 4 ) )
points = self.__rectanglePoints( b )
points["forGetVar"] = IECore.Color3fVectorData( [ IECore.Color3f( x[0], x[1], x[2] ) for x in points["P"] ] )
with IECore.WorldBlock( r ) :
r.shader( "shader", "test/IECoreRI/shaders/sxCoshaderTest", { "primVarName" : "forGetVar", "__handle" : "cs1" } )
r.shader( "surface", "test/IECoreRI/shaders/sxCoshaderTestMain", { "coshaders" : IECore.StringVectorData( [ "cs1" ] ) } )
s = r.shade( points )
self.assertEqual( s["Ci"], points["forGetVar"] )
def testGrids( self ) :
self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxGridTest.sdl test/IECoreRI/shaders/sxGridTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 20, 10 ) )
points = self.__rectanglePoints( b )
with IECore.WorldBlock( r ) :
r.shader( "surface", "test/IECoreRI/shaders/sxGridTest", {} )
# not providing enough points for the grid should raise
self.assertRaises( RuntimeError, r.shade, points, IECore.V2i( 100, 500 ) )
s = r.shade( points )
del s["P"] # test data on disk was created before we supported P as an output
del s["N"] # test data on disk was created before we supported N as an output
self.assertEqual( s, IECore.ObjectReader( "test/IECoreRI/data/sxOutput/noGrid.cob" ).read() )
s = r.shade( points, IECore.V2i( 21, 11 ) )
del s["P"] # test data on disk was created before we supported P as an output
del s["N"] # test data on disk was created before we supported N as an output
self.assertEqual( s, IECore.ObjectReader( "test/IECoreRI/data/sxOutput/grid.cob" ).read() )
def testPlaneShade( self ) :
r = IECoreRI.SXRenderer()
self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxStTest.sdl test/IECoreRI/shaders/sxStTest.sl" ), 0 )
r.shader( "surface", "test/IECoreRI/shaders/sxStTest.sdl", {} )
data = r.shadePlane( IECore.V2i( 64, 64 ) )
del data["P"]
del data["N"]
self.assertEqual( data, IECore.Reader.create( "test/IECoreRI/data/sxOutput/shadePlaneCompoundData.cob" ).read() )
image = r.shadePlaneToImage( IECore.V2i( 64, 64 ) )
expectedImage = IECore.Reader.create( "test/IECoreRI/data/sxOutput/shadePlaneImage.exr" ).read()
self.assertEqual( IECore.ImageDiffOp()( imageA=image, imageB=expectedImage, maxError=0 ), IECore.BoolData( False ) )
def testWrongType( self ) :
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/splineTest.sdl test/IECoreRI/shaders/splineTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
r.shader( "surface", "test/IECoreRI/shaders/splineTest.sdl", {} )
p = self.__rectanglePoints( IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 10 ) ) )
p["t"] = p["P"]
self.assertRaises( RuntimeError, r.shade, p )
def testWrongSize( self ) :
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/splineTest.sdl test/IECoreRI/shaders/splineTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
r.shader( "surface", "test/IECoreRI/shaders/splineTest.sdl", {} )
p = self.__rectanglePoints( IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 10 ) ) )
del p["t"][-10:]
self.assertRaises( RuntimeError, r.shade, p )
def testDisplacementShader( self ) :
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxDisplacementTest.sdl test/IECoreRI/shaders/sxDisplacementTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
with IECore.WorldBlock( r ) :
r.shader( "displacement", "test/IECoreRI/shaders/sxDisplacementTest.sdl", {} )
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 20, 10 ) )
points = self.__rectanglePoints( b )
## need to use a grid topology if we want calculatenormal() to work
s = r.shade( points, IECore.V2i( 21, 11 ) )
self.assertEqual( len( s ), 2 )
self.failUnless( "P" in s )
self.failUnless( "N" in s )
for i in range( 0, len( points["P"] ) ) :
self.failUnless( s["P"][i].equalWithAbsError( points["P"][i] + points["N"][i], 0.001 ) )
self.failUnless( s["N"][i].equalWithAbsError( IECore.V3f( 0, 0, 1 ), 0.001 ) )
def testDisplacementAndSurfaceShaders( self ) :
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxDisplacementTest.sdl test/IECoreRI/shaders/sxDisplacementTest.sl" ), 0 )
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxTest.sdl test/IECoreRI/shaders/sxTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
with IECore.WorldBlock( r ) :
r.shader( "displacement", "test/IECoreRI/shaders/sxDisplacementTest.sdl", {} )
r.shader( "surface", "test/IECoreRI/shaders/sxTest.sdl", {} )
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 20, 10 ) )
points = self.__rectanglePoints( b )
## need to use a grid topology if we want calculatenormal() to work
s = r.shade( points, IECore.V2i( 21, 11 ) )
self.assertEqual( len( s ), 6 )
self.failUnless( "P" in s )
self.failUnless( "N" in s )
self.failUnless( "Ci" in s )
self.failUnless( "Oi" in s )
self.failUnless( "outputFloat" in s )
self.failUnless( "outputColor" in s )
for i in range( 0, len( points["P"] ) ) :
self.failUnless( s["P"][i].equalWithAbsError( points["P"][i] + points["N"][i], 0.001 ) )
self.failUnless( s["N"][i].equalWithAbsError( IECore.V3f( 0, 0, 1 ), 0.001 ) )
def testLights( self ) :
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxLightTest.sdl test/IECoreRI/shaders/sxLightTest.sl" ), 0 )
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxIlluminanceTest.sdl test/IECoreRI/shaders/sxIlluminanceTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
with IECore.WorldBlock( r ) :
r.shader( "surface", "test/IECoreRI/shaders/sxIlluminanceTest", {} )
r.light( "test/IECoreRI/shaders/sxLightTest", "light0", {} )
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 20, 10 ) )
points = self.__rectanglePoints( b )
s = r.shade( points, IECore.V2i( 21, 11 ) )
for i in range( 0, len( points["P"] ) ) :
c = s["Ci"][i]
self.assertEqual( points["P"][i], IECore.V3f( c[0], c[1], c[2] ) )
def testPredefinedPrimitiveVariables( self ) :
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxPredefinedPrimitiveVariableTest.sdl test/IECoreRI/shaders/sxPredefinedPrimitiveVariableTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
with IECore.WorldBlock( r ) :
r.shader( "surface", "test/IECoreRI/shaders/sxPredefinedPrimitiveVariableTest", {} )
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 20, 10 ) )
points = self.__rectanglePoints( b )
s = r.shade( points, IECore.V2i( 21, 11 ) )
for i in range( 0, len( points["P"] ) ) :
self.assertEqual( s["Ci"][i], IECore.Color3f( 1, 1, 1 ) )
def testNonPredefinedPrimitiveVariables( self ) :
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxNonPredefinedPrimitiveVariableTest.sdl test/IECoreRI/shaders/sxNonPredefinedPrimitiveVariableTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
with IECore.WorldBlock( r ) :
r.shader( "surface", "test/IECoreRI/shaders/sxNonPredefinedPrimitiveVariableTest", {} )
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 20, 10 ) )
points = self.__rectanglePoints( b )
points["colorPrimVar"] = IECore.Color3fVectorData( [ IECore.Color3f( v[0], v[1], v[2] ) for v in points["P"] ] )
points["floatPrimVar"] = points["s"]
s = r.shade( points, IECore.V2i( 21, 11 ) )
for i in range( 0, len( points["P"] ) ) :
c = points["colorPrimVar"][i]
c[0] = points["s"][i]
self.assertEqual( s["Ci"][i], c )
def testNonPredefinedPrimitiveVariablesForCoshaders( self ) :
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxCoshaderTestMain.sdl test/IECoreRI/shaders/sxCoshaderTestMain.sl" ), 0 )
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxCoshaderTest.sdl test/IECoreRI/shaders/sxCoshaderTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
with IECore.WorldBlock( r ) :
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 20, 10 ) )
points = self.__rectanglePoints( b )
points["colorPrimVar"] = IECore.Color3fVectorData( [ IECore.Color3f( v[0], v[1], v[2] ) for v in points["P"] ] )
r.shader( "shader", "test/IECoreRI/shaders/sxCoshaderTest", { "__handle" : "cs1" } )
r.shader( "surface", "test/IECoreRI/shaders/sxCoshaderTestMain", { "coshaders" : IECore.StringVectorData( [ "cs1" ] ) } )
s = r.shade( points, IECore.V2i( 21, 11 ) )
self.assertEqual( s["Ci"], points["colorPrimVar"] )
def testUniformPrimitiveVariables( self ) :
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxUniformPrimitiveVariableTest.sdl test/IECoreRI/shaders/sxUniformPrimitiveVariableTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
with IECore.WorldBlock( r ) :
r.shader( "surface", "test/IECoreRI/shaders/sxUniformPrimitiveVariableTest", {} )
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 20, 10 ) )
points = self.__rectanglePoints( b )
points["colorPrimVar"] = IECore.Color3fData( IECore.Color3f( 0, 0.5, 1 ) )
points["floatPrimVar"] = IECore.FloatData( 16.0 )
points["vectorPrimVar"] = IECore.V3fData( IECore.V3f( 0.25, 0.5, 2 ) )
points["stringPrimVar"] = IECore.StringData( "hello shader!" )
points["stringVectorPrimVar"] = IECore.StringVectorData( ["who's", "a", "good", "boy" ] )
s = r.shade( points, IECore.V2i( 21, 11 ) )
for i in range( 0, len( points["P"] ) ) :
self.assertEqual( s["Ci"][i], IECore.Color3f( 0.125, 0.25, 0.75 ) )
def testUniformPrimitiveVariableShaderParameters( self ) :
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxUniformPrimitiveVariableShaderParameterTest.sdl test/IECoreRI/shaders/sxUniformPrimitiveVariableShaderParameterTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
with IECore.WorldBlock( r ) :
r.shader( "surface", "test/IECoreRI/shaders/sxUniformPrimitiveVariableShaderParameterTest", {} )
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 20, 10 ) )
points = self.__rectanglePoints( b )
points["colorPrimVar"] = IECore.Color3fData( IECore.Color3f( 0, 0.5, 1 ) )
points["floatPrimVar"] = IECore.FloatData( 16.0 )
points["vectorPrimVar"] = IECore.V3fData( IECore.V3f( 0.25, 0.5, 2 ) )
points["stringPrimVar"] = IECore.StringData( "hello shader!" )
points["stringVectorPrimVar"] = IECore.StringVectorData( ["who's", "a", "good", "boy" ] )
s = r.shade( points, IECore.V2i( 21, 11 ) )
for i in range( 0, len( points["P"] ) ) :
self.assertEqual( s["Ci"][i], IECore.Color3f( 0.125, 0.25, 0.5 ) )
def testThreading( self ) :
# set up a renderer with a shader in it
self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxTest.sdl test/IECoreRI/shaders/sxTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
r.shader( "surface", "test/IECoreRI/shaders/sxTest.sdl", { "noiseFrequency" : 1.0, "tint" : IECore.Color3f( 1 ) } )
# and get some points to shade
points = IECore.CompoundData( {
"N" : self.__loadImage( "test/IECoreRI/data/sxInput/cowN.exr" ),
"Ng" : self.__loadImage( "test/IECoreRI/data/sxInput/cowN.exr" ),
"P" : self.__loadImage( "test/IECoreRI/data/sxInput/cowP.exr" ),
"I" : self.__loadImage( "test/IECoreRI/data/sxInput/cowI.exr" ),
} )
# shade in lots of different threads at the same time
def s( i ) :
results[i] = r.shade( points )
threads = []
results = []
for i in range( 0, 300 ) :
threads.append( threading.Thread( target = IECore.curry( s, i ) ) )
results.append( None )
for t in threads :
t.start()
for t in threads :
t.join()
# and check that it all worked
cowFloat = IECore.ObjectReader( "test/IECoreRI/data/sxOutput/cowFloat.cob" ).read()
cowColor = IECore.ObjectReader( "test/IECoreRI/data/sxOutput/cowColor.cob" ).read()
cowCI = IECore.ObjectReader( "test/IECoreRI/data/sxOutput/cowCI.cob" ).read()
cowOI = IECore.ObjectReader( "test/IECoreRI/data/sxOutput/cowOI.cob" ).read()
# check that the first set of results is close enough to the expected results.
# we allow some small variation as 3delight's noise routines seem to yield
# veryvery small differences between some versions.
self.__assertVectorDataAlmostEqual( results[0]["outputFloat"], cowFloat )
self.__assertVectorDataAlmostEqual( results[0]["outputColor"], cowColor )
self.__assertVectorDataAlmostEqual( results[0]["Ci"], cowCI )
self.__assertVectorDataAlmostEqual( results[0]["Oi"], cowOI )
# check that all results are exactly equal to the first set. even if we
# accept small variations between different 3delight versions we don't accept
# variation within one version.
for s in results :
self.assertEqual( len( s ), 6 )
self.failUnless( "outputFloat" in s )
self.failUnless( "outputColor" in s )
self.failUnless( "Ci" in s )
self.failUnless( "Oi" in s )
self.failUnless( "P" in s )
self.failUnless( "N" in s )
self.assertEqual( s["outputFloat"], results[0]["outputFloat"] )
self.assertEqual( s["outputColor"], results[0]["outputColor"] )
self.assertEqual( s["Ci"], results[0]["Ci"] )
self.assertEqual( s["Oi"], results[0]["Oi"] )
def testGetVar( self ) :
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxGetVarTest.sdl test/IECoreRI/shaders/sxGetVarTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
with IECore.WorldBlock( r ) :
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 20, 10 ) )
points = self.__rectanglePoints( b )
points["floatValue1"] = points["s"]
points["floatValue2"] = points["t"]
r.shader( "surface", "test/IECoreRI/shaders/sxGetVarTest", { } )
s = r.shade( points, IECore.V2i( 21, 11 ) )
for i in range( 0, len( s["Ci"] ) ) :
self.assertEqual( s["Ci"][i], IECore.Color3f( 0, points["floatValue1"][i], points["floatValue2"][i] ) )
def testGetShaderInConstruct( self ) :
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxGetShaderTest.sdl test/IECoreRI/shaders/sxGetShaderTest.sl" ), 0 )
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxCoshaderTest.sdl test/IECoreRI/shaders/sxCoshaderTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
with IECore.WorldBlock( r ) :
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 20, 10 ) )
points = self.__rectanglePoints( b )
r.shader( "shader", "test/IECoreRI/shaders/sxCoshaderTest", { "__handle" : "cs1", "sColor" : IECore.Color3f( 0, 1, 0 ), } )
r.shader( "surface", "test/IECoreRI/shaders/sxGetShaderTest", { "coshader" : IECore.StringData( "cs1" ) } )
s = r.shade( points, IECore.V2i( 21, 11 ) )
for i in range( 0, len( points["P"] ) ) :
self.assertEqual( s["Ci"][i], IECore.Color3f( 0, points["s"][i], 0 ) )
def testCoshadersStack( self ) :
self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxCoshaderTest.sdl test/IECoreRI/shaders/sxCoshaderTest.sl" ), 0 )
self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxCoshaderTestMain.sdl test/IECoreRI/shaders/sxCoshaderTestMain.sl" ), 0 )
r = IECoreRI.SXRenderer()
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 100 ) )
points = self.__rectanglePoints( b )
with IECore.WorldBlock( r ) :
r.shader( "shader", "test/IECoreRI/shaders/sxCoshaderTest", { "shaderColor" : IECore.Color3f( 1, 0, 0 ), "__handle" : "cs1" } )
r.shader( "shader", "test/IECoreRI/shaders/sxCoshaderTest", { "sColor" : IECore.Color3f( 0, 1, 0 ), "__handle" : "cs2" } )
r.shader( "shader", "test/IECoreRI/shaders/sxCoshaderTest", { "tColor" : IECore.Color3f( 0, 0, 1 ), "__handle" : "cs3" } )
with IECore.AttributeBlock( r ) :
# these guys should be popped and therefore not affect the result
r.shader( "shader", "test/IECoreRI/shaders/sxCoshaderTest", { "shaderColor" : IECore.Color3f( 1, 1, 1 ), "__handle" : "cs1" } )
r.shader( "shader", "test/IECoreRI/shaders/sxCoshaderTest", { "sColor" : IECore.Color3f( 1, 1, 0 ), "__handle" : "cs2" } )
r.shader( "shader", "test/IECoreRI/shaders/sxCoshaderTest", { "tColor" : IECore.Color3f( 0.5, 0, 0.25 ), "__handle" : "cs3" } )
r.shader( "surface", "test/IECoreRI/shaders/sxCoshaderTestMain", { "coshaders" : IECore.StringVectorData( [ "cs1", "cs2", "cs3" ] ) } )
s = r.shade( points )
self.assertEqual( s["Ci"], IECore.ObjectReader( "test/IECoreRI/data/sxOutput/coshaders.cob" ).read() )
def testLightsStack( self ) :
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxLightTest.sdl test/IECoreRI/shaders/sxLightTest.sl" ), 0 )
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxIlluminanceTest.sdl test/IECoreRI/shaders/sxIlluminanceTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
with IECore.WorldBlock( r ) :
r.shader( "surface", "test/IECoreRI/shaders/sxIlluminanceTest", {} )
r.light( "test/IECoreRI/shaders/sxLightTest", "light0", {} )
with IECore.AttributeBlock( r ) :
# this guy should be popped and therefore not affect the result
r.light( "test/IECoreRI/shaders/sxLightTest", "light1", {} )
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 20, 10 ) )
points = self.__rectanglePoints( b )
s = r.shade( points, IECore.V2i( 21, 11 ) )
for i in range( 0, len( points["P"] ) ) :
c = s["Ci"][i]
self.assertEqual( points["P"][i], IECore.V3f( c[0], c[1], c[2] ) )
def testZeroLength( self ) :
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/splineTest.sdl test/IECoreRI/shaders/splineTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
r.shader( "surface", "test/IECoreRI/shaders/splineTest.sdl", {} )
p = self.__rectanglePoints( IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 10 ) ) )
for k, v in p.items() :
del v[:]
self.assertRaises( RuntimeError, r.shade, p )
def testThreadedTextureLookups( self ) :
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxTextureTest.sdl test/IECoreRI/shaders/sxTextureTest.sl" ), 0 )
points = self.__rectanglePoints( IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 255 ) ) )
# by default you should be able to run as many threads as the hardware will support
# concurrently.
for i in range( 0, 10 ) :
r = IECoreRI.SXRenderer()
r.shader( "surface", "test/IECoreRI/shaders/sxTextureTest.sdl", {
"fileName" : os.path.realpath( "./test/IECoreRI/data/textures/uvMap.256x256.tdl" ),
} )
# note the -1 when determining the number of threads. 3delight behaviour changed around
# 10.0.35, such that render:nthreads (which defaults to hardwareConcurrency()) is the
# number of threads that will be making Sx calls of any sort, whereas prior to that it
# was the number of threads that would actually call SxCallShader. because we've set up
# the renderer on this thread, it's taken one off the count for the number of threads we
# can spawn to do the shading.
threads = []
for i in range( 0, IECore.hardwareConcurrency() - 1 ) :
threads.append( threading.Thread( target = IECore.curry( r.shade, points ) ) )
for t in threads :
t.start()
for t in threads :
t.join()
# but if you want to use more then you need to let the library know about it
# by calling setOption( "ri:render:nthreads" )
for i in range( 0, 10 ) :
r = IECoreRI.SXRenderer()
# see above - we're adding one to number of threads we'll be using to do the shading,
# because we've also used a thread (the current thread) to perform the setup.
r.setOption( "ri:render:nthreads", IECore.IntData( IECore.hardwareConcurrency() * 2 + 1 ) )
r.shader( "surface", "test/IECoreRI/shaders/sxTextureTest.sdl", {
"fileName" : os.path.realpath( "./test/IECoreRI/data/textures/uvMap.256x256.tdl" ),
} )
threads = []
for i in range( 0, IECore.hardwareConcurrency() * 2 ) :
threads.append( threading.Thread( target = IECore.curry( r.shade, points ) ) )
for t in threads :
t.start()
for t in threads :
t.join()
def tearDown( self ) :
files = [
"test/IECoreRI/shaders/sxTest.sdl",
"test/IECoreRI/shaders/splineTest.sdl",
"test/IECoreRI/shaders/sxParameterTest.sdl",
"test/IECoreRI/shaders/sxStackTest.sdl",
"test/IECoreRI/shaders/sxCoshaderTest.sdl",
"test/IECoreRI/shaders/sxCoshaderTestMain.sdl",
"test/IECoreRI/shaders/sxGridTest.sdl",
"test/IECoreRI/shaders/sxDisplacementTest.sdl",
"test/IECoreRI/shaders/sxIlluminanceTest.sdl",
"test/IECoreRI/shaders/sxLightTest.sdl",
"test/IECoreRI/shaders/sxStTest.sdl",
"test/IECoreRI/shaders/sxNonPredefinedPrimitiveVariableTest.sdl",
"test/IECoreRI/shaders/sxGetVarTest.sdl",
"test/IECoreRI/shaders/sxGetShaderTest.sdl",
"test/IECoreRI/shaders/sxTextureTest.sdl",
]
for f in files :
if os.path.exists( f ) :
os.remove( f )
if __name__ == "__main__":
unittest.main()
|
|
"""Manage Fake OF tables for unit tests"""
# pylint: disable=too-many-lines
# Copyright (C) 2015 Research and Innovation Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2019 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import argparse
import json
import ast
import heapq
import pprint
from collections import OrderedDict
from bitstring import Bits
from ryu.ofproto import ofproto_v1_3 as ofp
from ryu.ofproto import ofproto_v1_3_parser as parser
from ryu.ofproto import ofproto_parser as ofp_parser
from ryu.lib import addrconv
CONTROLLER_PORT = 4294967293
IN_PORT = 4294967288
class FakeOFTableException(Exception):
"""Indicates an erroneous flow or group mod"""
class DFS:
"""Provides a way of tracking the search through the FakeOFNetwork"""
visited = None
heap = None
def __init__(self):
self.visited = {}
self.heap = []
def visit(self, dp_id, pkt):
"""
Notifies the DFS that a packet has visited the dp_id
Args:
dp_id: The DP ID for the node that is being visited
pkt: The packet that is visiting the node
"""
self.visited.setdefault(dp_id, [])
if pkt not in self.visited[dp_id]:
self.visited[dp_id].append(pkt)
def has_visited(self, dp_id, pkt):
"""
Returns true if the packet has visited the node DP ID before
Args:
dp_id: The DP ID for the node is being visited
pkt: The packet that is visiting the node
"""
if dp_id in self.visited:
if pkt in self.visited[dp_id]:
return True
return False
def peek(self):
"""
Returns the first item in the heap (with the highest priority (smallest value))
with popping from the heap
Returns:
dp_id, pkt
"""
if not self.heap:
return None, None
item = self.heap[0]
return item[1][0], item[1][1]
def push(self, dp_id, pkt, priority):
"""
Pushes the dp_id and pkt onto the heap with priority
Args:
dp_id:
pkt:
priority:
"""
heapq.heappush(self.heap, (priority, (dp_id, tuple(pkt.items()))))
def pop(self):
"""
Obtains the item with the highest priority
Returns:
dp_id, pkt
"""
if not self.heap:
return None, None
item = heapq.heappop(self.heap)
return item[1][0], item[1][1]
class FakeOFNetwork:
"""
FakeOFNetwork is a virtual openflow pipeline used for testing openflow controllers
The network contains multiple FakeOFTables to represent multiple switches in a network
"""
def __init__(self, valves_manager, num_tables, requires_tfm=True):
"""
Args:
valves_manager (ValvesManager): Valves manager class to resolve stack traversals
num_tables (int): The number of tables to configure in each FakeOFTable
requires_tfm (bool): Whether TFMs are required
"""
self.valves_manager = valves_manager
self.tables = {}
for dp_id in self.valves_manager.valves:
self.tables[dp_id] = FakeOFTable(dp_id, num_tables, requires_tfm)
def apply_ofmsgs(self, dp_id, ofmsgs, ignore_errors=False):
"""Applies ofmsgs to a FakeOFTable for DP ID"""
self.tables[dp_id].apply_ofmsgs(ofmsgs, ignore_errors=ignore_errors)
def print_table(self, dp_id):
"""Prints the table in string format to STDERR"""
sys.stderr.write('TABLE %x' % dp_id)
sys.stderr.write(str(self.tables[dp_id]) + '\n')
sys.stderr.write('======================\n\n')
def shortest_path_len(self, src_dpid, dst_dpid):
"""Returns the length of the shortest path from the source to the destination"""
if src_dpid == dst_dpid:
return 1
src_valve = self.valves_manager.valves[src_dpid]
dst_valve = self.valves_manager.valves[dst_dpid]
if src_valve.dp.stack and dst_valve.dp.stack:
return len(src_valve.dp.stack.shortest_path(dst_valve.dp.name))
return 2
def is_output(self, match, src_dpid, dst_dpid, port=None, vid=None, trace=False):
"""
Traverses a packet through the network until we have searched everything
or successfully output a packet to the destination with expected port and vid
If port is None return True if output to any port (including special ports)
regardless of VLAN tag.
If vid is None return True if output to specified port regardless of VLAN tag.
If vid OFPVID_PRESENT bit is 0, return True if output packet does not have
a VLAN tag OR packet OFPVID_PRESENT is 0
Args:
match (dict): A dictionary keyed by header field names with values
src_dpid: The source DP ID of the match packet entering the Fake OF network
dst_dpid: The expected destination DP ID of the packet match
port: The expected output port on the destination DP
vid: The expected output vid on the destination DP
trace (bool): Print the trace of traversing the tables
Returns:
true if packets with match fields is output to port with correct VLAN
"""
found = False
dfs = DFS()
priority = self.shortest_path_len(src_dpid, dst_dpid)
pkt = match.copy()
dfs.push(src_dpid, pkt, priority)
dfs.visit(src_dpid, pkt)
while not found:
# Search through the packet paths until we have searched everything or
# successfully output the packet to the destination in the expected format
dp_id, pkt = dfs.pop()
if dp_id is None or pkt is None:
break
pkt = dict(pkt)
if dp_id == dst_dpid:
# A packet has reached the destination, so test for the output
found = self.tables[dp_id].is_full_output(pkt, port, vid, trace=trace)
if not found and trace:
# A packet on the destination DP is not output in the expected state so
# continue searching (flood reflection)
sys.stderr.write('Output is away from destination\n')
if not found:
# Packet not reached destination, so continue traversing
if trace:
sys.stderr.write('FakeOFTable %s: %s\n' % (dp_id, pkt))
port_outputs = self.tables[dp_id].get_port_outputs(pkt, trace=trace)
valve = self.valves_manager.valves[dp_id]
for out_port, out_pkts in port_outputs.items():
if out_port == IN_PORT:
# Rebind output to the packet in_port value
out_port = pkt['in_port']
if out_port not in valve.dp.ports:
# Ignore output to improper ports & controller
# TODO: Here we should actually send the packet to the
# controller, and maybe install necessary rules to
# help testing routing implementations
continue
for out_pkt in out_pkts:
port_obj = valve.dp.ports[out_port]
if port_obj.stack:
# Need to continue traversing through the FakeOFNetwork
adj_port = port_obj.stack['port']
adj_dpid = port_obj.stack['dp'].dp_id
new_pkt = out_pkt.copy()
new_pkt['in_port'] = adj_port.number
if not dfs.has_visited(adj_dpid, new_pkt):
# Add packet to the heap if we have not visited the node with
# this packet before
priority = self.shortest_path_len(adj_dpid, dst_dpid)
dfs.push(adj_dpid, new_pkt, priority)
dfs.visit(adj_dpid, new_pkt)
elif trace:
# Output to non-stack port, can ignore this output
sys.stderr.write(
'Ignoring non-stack output %s:%s\n' % (valve.dp.name, out_port))
if trace:
sys.stderr.write('\n')
return found
def table_state(self, dp_id):
"""Return tuple of table hash & table str"""
return self.tables[dp_id].table_state()
def hash_table(self, dp_id):
"""Return a hash of a single FakeOFTable"""
return self.tables[dp_id].__hash__()
class FakeOFTable:
"""Fake OFTable is a virtual openflow pipeline used for testing openflow
controllers.
The tables are populated using apply_ofmsgs and can be queried with
is_output.
"""
def __init__(self, dp_id, num_tables=1, requires_tfm=True):
self.dp_id = dp_id
self.tables = [[] for _ in range(0, num_tables)]
self.groups = {}
self.requires_tfm = requires_tfm
self.tfm = {}
def table_state(self):
"""Return tuple of table hash & table str"""
table_str = str(self.tables)
return (hash(frozenset(table_str)), table_str)
def __hash__(self):
"""Return a host of the tables"""
return hash(frozenset(str(self.tables)))
def _apply_groupmod(self, ofmsg):
"""Maintain group table."""
def _del(_ofmsg, group_id):
if group_id == ofp.OFPG_ALL:
self.groups = {}
return
if group_id in self.groups:
del self.groups[group_id]
def _add(ofmsg, group_id):
if group_id in self.groups:
raise FakeOFTableException(
'group already in group table: %s' % ofmsg)
self.groups[group_id] = ofmsg
def _modify(ofmsg, group_id):
if group_id not in self.groups:
raise FakeOFTableException(
'group not in group table: %s' % ofmsg)
self.groups[group_id] = ofmsg
_groupmod_handlers = {
ofp.OFPGC_DELETE: _del,
ofp.OFPGC_ADD: _add,
ofp.OFPGC_MODIFY: _modify,
}
_groupmod_handlers[ofmsg.command](ofmsg, ofmsg.group_id)
def _apply_flowmod(self, ofmsg):
"""Adds, Deletes and modify flow modification messages are applied
according to section 6.4 of the OpenFlow 1.3 specification."""
def _validate_flowmod_tfm(table_id, tfm_body, ofmsg):
if not self.requires_tfm:
return
if table_id == ofp.OFPTT_ALL:
if ofmsg.match.items() and not self.tfm:
raise FakeOFTableException(
'got %s with matches before TFM that defines tables'
% ofmsg)
return
if tfm_body is None:
raise FakeOFTableException(
'got %s before TFM that defines table %u' % (
ofmsg, table_id
)
)
def _add(table, flowmod):
# From the 1.3 spec, section 6.4:
# For add requests (OFPFC_ADD) with the
# OFPFF_CHECK_OVERLAP flag set, the switch must first
# check for any overlapping flow entries in the
# requested table. Two flow entries overlap if a
# single packet may match both, and both flow entries
# have the same priority, but the two flow entries
# don't have the exact same match. If an overlap
# conflict exists between an existing flow entry and
# the add request, the switch must refuse the addition
# and respond with an ofp_error_msg with
# OFPET_FLOW_MOD_FAILED type and OFPFMFC_OVERLAP code.
#
# Without the check overlap flag it seems like it is
# possible that we can have overlapping flow table
# entries which will cause ambiguous behaviour. This is
# obviously unnacceptable so we will assume this is
# always set
for fte in table:
if flowmod.fte_matches(fte, strict=True):
table.remove(fte)
break
if flowmod.overlaps(fte):
raise FakeOFTableException(
'Overlapping flowmods {} and {}'.format(
flowmod, fte))
table.append(flowmod)
def _del(table, flowmod):
removals = [fte for fte in table if flowmod.fte_matches(fte)]
for fte in removals:
table.remove(fte)
def _del_strict(table, flowmod):
for fte in table:
if flowmod.fte_matches(fte, strict=True):
table.remove(fte)
break
def _modify(table, flowmod):
for fte in table:
if flowmod.fte_matches(fte):
fte.instructions = flowmod.instructions
def _modify_strict(table, flowmod):
for fte in table:
if flowmod.fte_matches(fte, strict=True):
fte.instructions = flowmod.instructions
break
_flowmod_handlers = {
ofp.OFPFC_ADD: _add,
ofp.OFPFC_DELETE: _del,
ofp.OFPFC_DELETE_STRICT: _del_strict,
ofp.OFPFC_MODIFY: _modify,
ofp.OFPFC_MODIFY_STRICT: _modify_strict,
}
table_id = ofmsg.table_id
tfm_body = self.tfm.get(table_id, None)
if table_id == ofp.OFPTT_ALL or table_id is None:
tables = self.tables
else:
tables = [self.tables[table_id]]
_validate_flowmod_tfm(table_id, tfm_body, ofmsg)
flowmod = FlowMod(ofmsg)
for table in tables:
_flowmod_handlers[ofmsg.command](table, flowmod)
if tfm_body:
for table in tables:
entries = len(table)
if entries > tfm_body.max_entries:
tfm_table_details = '%s : table %u %s full (%u/%u)' % (
self.dp_id, table_id, tfm_body.name, entries, tfm_body.max_entries)
flow_dump = '\n\n'.join(
(tfm_table_details, str(ofmsg), str(tfm_body)))
raise FakeOFTableException(flow_dump)
def _apply_tfm(self, ofmsg):
self.tfm = {body.table_id: body for body in ofmsg.body}
def _apply_flowstats(self, ofmsg):
"""Update state of flow tables to match an OFPFlowStatsReply message.
This assumes a tfm is not required."""
self.tables = []
self.requires_tfm = False
self.tfm = {}
for stat in ofmsg.body:
while len(self.tables) <= stat.table_id:
self.tables.append([])
self.tables[stat.table_id].append(FlowMod(stat))
def apply_ofmsgs(self, ofmsgs, ignore_errors=False):
"""Update state of test flow tables."""
for ofmsg in ofmsgs:
try:
if isinstance(ofmsg, parser.OFPBarrierRequest):
continue
if isinstance(ofmsg, parser.OFPPacketOut):
continue
if isinstance(ofmsg, parser.OFPSetConfig):
continue
if isinstance(ofmsg, parser.OFPSetAsync):
continue
if isinstance(ofmsg, parser.OFPDescStatsRequest):
continue
if isinstance(ofmsg, parser.OFPMeterMod):
# TODO: handle OFPMeterMod
continue
if isinstance(ofmsg, parser.OFPTableFeaturesStatsRequest):
self._apply_tfm(ofmsg)
continue
if isinstance(ofmsg, parser.OFPGroupMod):
self._apply_groupmod(ofmsg)
continue
if isinstance(ofmsg, parser.OFPFlowMod):
self._apply_flowmod(ofmsg)
self.sort_tables()
continue
if isinstance(ofmsg, parser.OFPFlowStatsReply):
self._apply_flowstats(ofmsg)
self.sort_tables()
continue
except FakeOFTableException:
if not ignore_errors:
raise
if not ignore_errors:
raise FakeOFTableException('Unsupported flow %s' % str(ofmsg))
def single_table_lookup(self, match, table_id, trace=False):
"""
Searches through a single table with `table_id` for entries
that will be applied to the packet with fields represented by match
Args:
match (dict): A dictionary keyed by header field names with values
table_id (int): The table ID to send the match packet through
trace (bool): Print the trace of traversing the table
Returns:
matching_fte: First matching flowmod in the table
"""
packet_dict = match.copy()
table = self.tables[table_id]
matching_fte = None
# Find matching flowmods
for fte in table:
if fte.pkt_matches(packet_dict):
matching_fte = fte
break
if trace:
sys.stderr.write('%s: %s\n' % (table_id, matching_fte))
return matching_fte
def _process_instruction(self, match, instruction):
"""
Process an instructions actions into an output dictionary
Args:
match (dict): A dictionary keyed by header field names with values
instruction: The instruction being applied to the packet match
Returns:
outputs: OrderedDict of an output port to list of output packets
packet_dict: final dictionary of the packet
"""
outputs = OrderedDict()
packet_dict = match.copy()
pending_actions = []
for action in instruction.actions:
if action.type == ofp.OFPAT_OUTPUT:
# Save the packet that is output to a port
outputs.setdefault(action.port, [])
outputs[action.port].append(packet_dict.copy())
pending_actions = []
continue
pending_actions.append(action)
if action.type == ofp.OFPAT_SET_FIELD:
# Set field, modify a packet header
packet_dict[action.key] = action.value
elif action.type == ofp.OFPAT_PUSH_VLAN:
if 'vlan_vid' in packet_dict and packet_dict['vlan_vid'] & ofp.OFPVID_PRESENT:
# Pushing on another tag, so create another
# field for the encapsulated VID
packet_dict['encap_vid'] = packet_dict['vlan_vid']
# Push the VLAN header to the packet
packet_dict['vlan_vid'] = ofp.OFPVID_PRESENT
elif action.type == ofp.OFPAT_POP_VLAN:
# Remove VLAN header from the packet
packet_dict.pop('vlan_vid')
if 'vlan_pcp' in packet_dict:
# Also make sure to pop off any VLAN header information too
packet_dict.pop('vlan_pcp')
if 'encap_vid' in packet_dict:
# Move the encapsulated VID to the front
packet_dict['vlan_vid'] = packet_dict['encap_vid']
packet_dict.pop('encap_vid')
else:
packet_dict['vlan_vid'] = 0
elif action.type == ofp.OFPAT_GROUP:
# Group mod so make sure that we process the group buckets
if action.group_id not in self.groups:
raise FakeOFTableException('output group not in group table: %s' % action)
buckets = self.groups[action.group_id].buckets
for bucket in buckets:
bucket_outputs, _, _ = self._process_instruction(packet_dict, bucket)
for out_port, out_pkts in bucket_outputs.items():
outputs.setdefault(out_port, [])
outputs[out_port].extend(out_pkts)
pending_actions = []
return outputs, packet_dict, pending_actions
def get_table_output(self, match, table_id, trace=False):
"""
Send a packet through a single table and return the output
ports mapped to the output packet
Args:
match (dict): A dictionary keyed by header field names with values
table_id (int): The table ID to send the packet match through
trace (bool): Print the trace of traversing the table
Returns:
outputs: OrderedDict of an output port to output packet map
packet_dict: The last version of the packet
next_table: Table ID of the next table
"""
next_table = None
packet_dict = match.copy()
outputs = OrderedDict()
matching_fte = self.single_table_lookup(match, table_id, trace)
pending_actions = []
if matching_fte:
for instruction in matching_fte.instructions:
if instruction.type == ofp.OFPIT_GOTO_TABLE:
if table_id < instruction.table_id:
next_table = instruction.table_id
else:
raise FakeOFTableException('goto to lower table ID')
elif instruction.type == ofp.OFPIT_APPLY_ACTIONS:
if not instruction.actions:
raise FakeOFTableException('no-op instruction actions')
instruction_outputs, packet_dict, pending_actions = self._process_instruction(
packet_dict, instruction)
for out_port, out_pkts in instruction_outputs.items():
outputs.setdefault(out_port, [])
outputs[out_port].extend(out_pkts)
elif instruction.type == ofp.OFPIT_WRITE_METADATA:
metadata = packet_dict.get('metadata', 0)
mask = instruction.metadata_mask
mask_compl = mask ^ 0xFFFFFFFFFFFFFFFF
packet_dict['metadata'] = (metadata & mask_compl)\
| (instruction.metadata & mask)
if next_table:
pending_actions = []
if pending_actions:
raise FakeOFTableException('flow performs actions on packet after \
output with no goto: %s' % matching_fte)
return outputs, packet_dict, next_table
def get_output(self, match, trace=False):
"""
Get all of the outputs of the tables with the output packets
for each table in the FakeOFTable that match progresses through
Args:
match (dict): A dictionary keyed by header field names with values
trace (bool): Print the trace of traversing the table
Returns:
table_outputs: map from table_id output to output ports & packets
for that table
"""
table_outputs = {}
table_id = 0
next_table = True
packet_dict = match.copy()
while next_table:
next_table = False
outputs, packet_dict, next_table_id = self.get_table_output(
packet_dict, table_id, trace)
table_outputs[table_id] = outputs
next_table = next_table_id is not None
table_id = next_table_id
return table_outputs
def get_port_outputs(self, match, trace=False):
"""
Get all of the outputs of the tables with the output packets
for each table in the FakeOFTable that match progresses through
Args:
match (dict): A dictionary keyed by header field names with value
trace (bool): Print the trace of traversing the table
Returns:
table_outputs: Map from output port number to a list of unique output packets
"""
port_outputs = {}
table_id = 0
next_table = True
packet_dict = match.copy()
while next_table:
next_table = False
outputs, packet_dict, next_table_id = self.get_table_output(
packet_dict, table_id, trace)
for out_port, out_pkts in outputs.items():
port_outputs.setdefault(out_port, [])
# Remove duplicate entries from the list
for out_pkt in out_pkts:
if out_pkt not in port_outputs[out_port]:
port_outputs[out_port].append(out_pkt)
next_table = next_table_id is not None
table_id = next_table_id
return port_outputs
def is_full_output(self, match, port=None, vid=None, trace=False):
"""
If port is None return True if output to any port (including special ports)
regardless of VLAN tag.
If vid is None return True if output to specified port regardless of VLAN tag.
If vid OFPVID_PRESENT bit is 0, return True if output packet does not have
a VLAN tag OR packet OFPVID_PRESENT is 0
Args:
match (dict): A dictionary keyed by header field names with values
port: The expected output port
vid: The expected output vid
trace (bool): Print the trace of traversing the tables
Returns:
true if packets with match fields is output to port with correct VLAN
"""
table_outputs = self.get_output(match, trace)
if trace:
sys.stderr.write(pprint.pformat(table_outputs) + '\n')
in_port = match.get('in_port')
for table_outputs in table_outputs.values():
for out_port, out_pkts in table_outputs.items():
for out_pkt in out_pkts:
if port == out_port and port == out_pkt['in_port']:
continue
if port is None:
# Port is None & outputting so return true
return True
if vid is None:
# Vid is None, return true if output to specified port
if port == out_port:
return True
if out_port == ofp.OFPP_IN_PORT and port == in_port:
# In some cases we want to match to specifically ofp.OFPP_IN_PORT
# otherwise we treat ofp.OFPP_IN_PORT as the match in_port
return True
if port == out_port or (out_port == ofp.OFPP_IN_PORT and port == in_port):
# Matching port, so check matching VID
if vid & ofp.OFPVID_PRESENT == 0:
# If OFPVID_PRESENT bit is 0 then packet should not have a VLAN tag
return ('vlan_vid' not in out_pkt
or out_pkt['vlan_vid'] & ofp.OFPVID_PRESENT == 0)
# VID specified, check if matching expected
return 'vlan_vid' in out_pkt and vid == out_pkt['vlan_vid']
return False
def lookup(self, match, trace=False):
"""Return the entries from flowmods that matches match.
Searches each table in the pipeline for the entries that will be
applied to the packet with fields represented by match.
Arguments:
match: a dictionary keyed by header field names with values.
header fields not provided in match must be wildcarded for the
entry to be considered matching.
Returns: a list of the flowmods that will be applied to the packet
represented by match
"""
packet_dict = match.copy() # Packet headers may be modified
instructions = []
table_id = 0
goto_table = True
while goto_table:
goto_table = False
table = self.tables[table_id]
matching_fte = None
# find a matching flowmod
for fte in table:
if fte.pkt_matches(packet_dict):
matching_fte = fte
break
# if a flowmod is found, make modifications to the match values and
# determine if another lookup is necessary
if trace:
sys.stderr.write('%d: %s\n' % (table_id, matching_fte))
if matching_fte:
for instruction in matching_fte.instructions:
instructions.append(instruction)
if instruction.type == ofp.OFPIT_GOTO_TABLE:
if table_id < instruction.table_id:
table_id = instruction.table_id
goto_table = True
elif instruction.type == ofp.OFPIT_APPLY_ACTIONS:
for action in instruction.actions:
if action.type == ofp.OFPAT_SET_FIELD:
packet_dict[action.key] = action.value
elif instruction.type == ofp.OFPIT_WRITE_METADATA:
metadata = packet_dict.get('metadata', 0)
mask = instruction.metadata_mask
mask_compl = mask ^ 0xFFFFFFFFFFFFFFFF
packet_dict['metadata'] = (metadata & mask_compl)\
| (instruction.metadata & mask)
return (instructions, packet_dict)
def flow_count(self):
"""Return number of flow tables rules"""
return sum(map(len, self.tables))
def is_output(self, match, port=None, vid=None, trace=False):
"""Return true if packets with match fields is output to port with
correct vlan.
If port is none it will return true if output to any port (including
special ports) regardless of vlan tag.
If vid is none it will return true if output to specified port
regardless of vlan tag.
To specify checking that the packet should not have a vlan tag, set the
OFPVID_PRESENT bit in vid to 0.
Arguments:
Match: a dictionary keyed by header field names with values.
"""
full_output = self.is_full_output(match.copy(), port, vid, trace)
def _output_result(action, vid_stack, port, vid):
if port is None:
return True
in_port = match.get('in_port')
result = None
if action.port == port:
if port == in_port:
result = None
elif vid is None:
result = True
elif vid & ofp.OFPVID_PRESENT == 0:
result = not vid_stack
else:
result = bool(vid_stack and vid == vid_stack[-1])
elif action.port == ofp.OFPP_IN_PORT and port == in_port:
result = True
return result
def _process_vid_stack(action, vid_stack):
if action.type == ofp.OFPAT_PUSH_VLAN:
vid_stack.append(ofp.OFPVID_PRESENT)
elif action.type == ofp.OFPAT_POP_VLAN:
vid_stack.pop()
elif action.type == ofp.OFPAT_SET_FIELD:
if action.key == 'vlan_vid':
vid_stack[-1] = action.value
return vid_stack
if trace:
sys.stderr.write(
'tracing packet flow %s matching to port %s, vid %s\n' % (match, port, vid))
# vid_stack represents the packet's vlan stack, innermost label listed
# first
match_vid = match.get('vlan_vid', 0)
vid_stack = []
if match_vid & ofp.OFPVID_PRESENT != 0:
vid_stack.append(match_vid)
instructions, _ = self.lookup(match, trace=trace)
for instruction in instructions:
if instruction.type != ofp.OFPIT_APPLY_ACTIONS:
continue
for action in instruction.actions:
vid_stack = _process_vid_stack(action, vid_stack)
if action.type == ofp.OFPAT_OUTPUT:
output_result = _output_result(
action, vid_stack, port, vid)
if output_result is not None:
if output_result != full_output:
raise FakeOFTableException('Output functions do not match')
return output_result
elif action.type == ofp.OFPAT_GROUP:
if action.group_id not in self.groups:
raise FakeOFTableException(
'output group not in group table: %s' % action)
buckets = self.groups[action.group_id].buckets
for bucket in buckets:
bucket_vid_stack = vid_stack
for bucket_action in bucket.actions:
bucket_vid_stack = _process_vid_stack(
bucket_action, bucket_vid_stack)
if bucket_action.type == ofp.OFPAT_OUTPUT:
output_result = _output_result(
bucket_action, vid_stack, port, vid)
if output_result is not None:
if output_result != full_output:
raise FakeOFTableException('Output functions do not match')
return output_result
if full_output is not False:
raise FakeOFTableException('Output functions do not match')
return False
def apply_instructions_to_packet(self, match):
"""
Send packet through the fake OF table pipeline
Args:
match (dict): A dict keyed by header fields with values, represents
a packet
Returns:
dict: Modified match dict, represents packet that has been through
the pipeline with values possibly altered
"""
_, packet_dict = self.lookup(match)
return packet_dict
def __str__(self):
string = ''
for table_id, table in enumerate(self.tables):
string += '\n----- Table %u -----\n' % (table_id)
string += '\n'.join(sorted([str(flowmod) for flowmod in table]))
return string
def sort_tables(self):
"""Sort flows in tables by priority order."""
self.tables = [sorted(table, reverse=True) for table in self.tables]
class FlowMod:
"""Represents a flow modification message and its corresponding entry in
the flow table.
"""
MAC_MATCH_FIELDS = (
'eth_src', 'eth_dst', 'arp_sha', 'arp_tha', 'ipv6_nd_sll',
'ipv6_nd_tll'
)
IPV4_MATCH_FIELDS = ('ipv4_src', 'ipv4_dst', 'arp_spa', 'arp_tpa')
IPV6_MATCH_FIELDS = ('ipv6_src', 'ipv6_dst', 'ipv6_nd_target')
HEX_FIELDS = ('eth_type')
def __init__(self, flowmod):
"""flowmod is a ryu flow modification message object"""
self.priority = flowmod.priority
self.cookie = flowmod.cookie
self.instructions = flowmod.instructions
self.validate_instructions()
self.match_values = {}
self.match_masks = {}
self.out_port = None
# flowmod can be an OFPFlowMod or an OFPStats
if isinstance(flowmod, parser.OFPFlowMod):
if flowmod.command in (ofp.OFPFC_DELETE, ofp.OFPFC_DELETE_STRICT)\
and flowmod.out_port != ofp.OFPP_ANY:
self.out_port = flowmod.out_port
for key, val in flowmod.match.items():
if isinstance(val, tuple):
val, mask = val
else:
mask = -1
mask = self.match_to_bits(key, mask)
val = self.match_to_bits(key, val) & mask
self.match_values[key] = val
self.match_masks[key] = mask
def validate_instructions(self):
instruction_types = set()
for instruction in self.instructions:
if instruction.type in instruction_types:
raise FakeOFTableException(
'FlowMod with Multiple instructions of the '
'same type: {}'.format(self.instructions))
instruction_types.add(instruction.type)
def out_port_matches(self, other):
"""returns True if other has an output action to this flowmods
output_port"""
if self.out_port is None or self.out_port == ofp.OFPP_ANY:
return True
for instruction in other.instructions:
if instruction.type == ofp.OFPIT_APPLY_ACTIONS:
for action in instruction.actions:
if action.type == ofp.OFPAT_OUTPUT:
if action.port == self.out_port:
return True
return False
def pkt_matches(self, pkt_dict):
"""returns True if pkt_dict matches this flow table entry.
args:
pkt_dict - a dictionary keyed by flow table match fields with
values
if an element is included in the flow table entry match fields but not
in the pkt_dict that is assumed to indicate a failed match
"""
# TODO: add cookie and out_group
for key, val in self.match_values.items():
if key not in pkt_dict:
return False
val_bits = self.match_to_bits(key, pkt_dict[key])
if val_bits != (val & self.match_masks[key]):
return False
return True
def _matches_match(self, other):
return (self.priority == other.priority
and self.match_values == other.match_values
and self.match_masks == other.match_masks)
def fte_matches(self, other, strict=False):
"""returns True if the flow table entry other matches this flowmod.
used for finding existing flow table entries that match with this
flowmod.
args:
other - a flowmod object
strict (bool) - whether to use strict matching (as defined in
of1.3 specification section 6.4)
"""
if not self.out_port_matches(other):
return False
if strict:
return self._matches_match(other)
for key, val in self.match_values.items():
if key not in other.match_values:
return False
if other.match_values[key] & self.match_masks[key] != val:
return False
return True
def overlaps(self, other):
""" returns True if any packet can match both self and other."""
# This is different from the matches method as matches assumes an
# undefined field is a failed match. In this case an undefined field is
# potentially an overlap and therefore is considered success
if other.priority != self.priority:
return False
for key, val in self.match_values.items():
if key in other.match_values:
if val & other.match_masks[key] != other.match_values[key]:
return False
if other.match_values[key] & self.match_masks[key] != val:
return False
return True
def match_to_bits(self, key, val):
"""convert match fields and masks to bits objects.
this allows for masked matching. Converting all match fields to the
same object simplifies things (eg __str__).
"""
if isinstance(val, Bits):
return val
def _val_to_bits(conv, val, length):
if val == -1:
return Bits(int=-1, length=length)
return Bits(bytes=conv(val), length=length)
if key in self.MAC_MATCH_FIELDS:
return _val_to_bits(addrconv.mac.text_to_bin, val, 48)
if key in self.IPV4_MATCH_FIELDS:
return _val_to_bits(addrconv.ipv4.text_to_bin, val, 32)
if key in self.IPV6_MATCH_FIELDS:
return _val_to_bits(addrconv.ipv6.text_to_bin, val, 128)
return Bits(int=int(val), length=64)
def bits_to_str(self, key, val):
if key in self.MAC_MATCH_FIELDS:
result = addrconv.mac.bin_to_text(val.tobytes())
elif key in self.IPV4_MATCH_FIELDS:
result = addrconv.ipv4.bin_to_text(val.tobytes())
elif key in self.IPV6_MATCH_FIELDS:
result = addrconv.ipv6.bin_to_text(val.tobytes())
elif key in self.HEX_FIELDS:
result = str(val.hex.lstrip('0'))
else:
result = str(val.int)
return result
def __lt__(self, other):
return self.priority < other.priority
def __eq__(self, other):
return (self._matches_match(other)
and self.out_port == other.out_port
and self.instructions == other.instructions)
def __hash__(self):
return hash((
self.priority,
self.match_values,
self.match_masks,
self.out_port,
self.instructions,
))
def _pretty_field_str(self, key, value, mask=None):
mask_str = ""
value_int = value
mask_int = mask
if isinstance(value, Bits):
value_int = value.int
if isinstance(mask, Bits):
mask_int = mask.int # pytype: disable=attribute-error
elif mask is None:
mask_int = -1
if key == 'vlan_vid':
if value_int & ofp.OFPVID_PRESENT == 0:
result = 'vlan untagged'
elif key == 'vlan_vid' and mask_int == ofp.OFPVID_PRESENT:
result = 'vlan tagged'
else:
result = str(value_int ^ ofp.OFPVID_PRESENT)
if mask_int != -1:
mask_str = str(mask_int ^ ofp.OFPVID_PRESENT)
elif isinstance(value, Bits):
result = self.bits_to_str(key, value)
if mask is not None and mask_int != -1:
mask_str = self.bits_to_str(key, mask)
elif isinstance(value, str):
result = value
if mask is not None:
mask_str = mask
elif isinstance(value, int):
if key in self.HEX_FIELDS:
result = hex(value)
if mask is not None and mask != -1:
mask_str = hex(mask)
else:
result = str(value)
if mask is not None and mask != -1:
mask_str = str(mask)
if mask_str:
result += "/{}".format(mask_str)
return result
def _pretty_action_str(self, action):
actions_names_attrs = {
parser.OFPActionPushVlan.__name__: ('push_vlan', 'ethertype'),
parser.OFPActionPopVlan.__name__: ('pop_vlan', None),
parser.OFPActionGroup.__name__: ('group', 'group_id'),
parser.OFPActionDecNwTtl.__name__: ('dec_nw_ttl', None)}
value = None
if isinstance(action, parser.OFPActionOutput):
name = 'output'
if action.port == CONTROLLER_PORT:
value = 'CONTROLLER'
elif action.port == IN_PORT:
value = 'IN_PORT'
else:
value = str(action.port)
elif isinstance(action, parser.OFPActionSetField):
name = 'set_{}'.format(action.key)
value = self._pretty_field_str(action.key, action.value)
else:
name, attr = actions_names_attrs[type(action).__name__]
if attr:
value = getattr(action, attr)
result = name
if value:
result += " {}".format(value)
return result
def __str__(self):
result = 'Priority: {0} | Match: '.format(self.priority)
for key in sorted(self.match_values.keys()):
val = self.match_values[key]
mask = self.match_masks[key]
result += " {} {},".format(
key, self._pretty_field_str(key, val, mask))
result = result.rstrip(',')
result += " | Instructions :"
if not self.instructions:
result += ' drop'
for instruction in self.instructions:
if isinstance(instruction, parser.OFPInstructionGotoTable):
result += ' goto {}'.format(instruction.table_id)
elif isinstance(instruction, parser.OFPInstructionActions):
for action in instruction.actions:
result += " {},".format(self._pretty_action_str(action))
else:
result += str(instruction)
result = result.rstrip(',')
return result
def __repr__(self):
string = 'priority: {0} cookie: {1}'.format(self.priority, self.cookie)
for key in sorted(self.match_values.keys()):
mask = self.match_masks[key]
string += ' {0}: {1}'.format(key, self.match_values[key])
if mask.int != -1: # pytype: disable=attribute-error
string += '/{0}'.format(mask)
string += ' Instructions: {0}'.format(str(self.instructions))
return string
class FakeRyuDp: # pylint: disable=too-few-public-methods
"""Fake ryu Datapath object.
Just needed to provide a parser to allow us to extract ryu objects from
JSON
"""
def __init__(self):
"""Create fake ryu DP"""
self.ofproto_parser = parser
def parse_print_args():
"""Parse arguments for the print command"""
arg_parser = argparse.ArgumentParser(
prog='fakeoftable',
description='Prints a JSON flow table in a human readable format',
usage="""
Print a flow table in a human readable format
{argv0} print -f FILE
""".format(argv0=sys.argv[0])
)
arg_parser.add_argument(
'-f',
'--file',
help='file containing an OFPFlowStatsReply message in JSON format'
)
args = arg_parser.parse_args(sys.argv[2:])
return {'filename': args.file}
def parse_probe_args():
"""Parse arguments for the probe command"""
arg_parser = argparse.ArgumentParser(
prog='fakeoftable',
description='Performs a packet lookup on a JSON openflow table',
usage="""
Find the flow table entries in a given flow table that match a given packet
{argv0} probe -f FILE -p PACKET_STRING
""".format(argv0=sys.argv[0])
)
arg_parser.add_argument(
'-p',
'--packet',
metavar='PACKET_STRING',
help=(
'''string representation of a packet dictionary eg. '''
'''"{'in_port': 1, 'eth_dst': '01:80:c2:00:00:02', 'eth_type': '''
'''34825}"''')
)
arg_parser.add_argument(
'-f',
'--file',
metavar='FILE',
help='file containing an OFPFlowStatsReply message in JSON format'
)
args = arg_parser.parse_args(sys.argv[2:])
packet = args.packet
packet = ast.literal_eval(args.packet)
# fix vlan vid
if 'vlan_vid' in packet:
packet['vlan_vid'] |= ofp.OFPVID_PRESENT
return {'packet': packet, 'filename': args.file}
def parse_args():
"""parse arguments"""
arg_parser = argparse.ArgumentParser(
prog='fakeoftable',
description='Performs operations on JSON openflow tables',
usage="""
{argv0} <command> <args>
""".format(argv0=sys.argv[0])
)
arg_parser.add_argument(
'command',
help='Subcommand, either "print" or "probe"'
)
args = arg_parser.parse_args(sys.argv[1:2])
try:
if args.command == 'probe':
command_args = parse_probe_args()
elif args.command == 'print':
command_args = parse_print_args()
except (KeyError, IndexError, ValueError, AttributeError) as err:
print(err)
arg_parser.print_help()
sys.exit(-1)
return (args.command, command_args)
def _print(filename, **_kwargs):
"""Prints the JSON flow table from a file in a human readable format"""
with open(filename, 'r') as file_handle:
msg = json.load(file_handle)
datapath = FakeRyuDp()
ofmsg = ofp_parser.ofp_msg_from_jsondict(datapath, msg)
table = FakeOFTable(1)
table.apply_ofmsgs([ofmsg])
print(table)
def probe(filename, packet):
"""Prints the actions applied to packet by the table from the file"""
with open(filename, 'r') as file_handle:
msg = json.load(file_handle)
datapath = FakeRyuDp()
ofmsg = ofp_parser.ofp_msg_from_jsondict(datapath, msg)
table = FakeOFTable(1)
table.apply_ofmsgs([ofmsg])
instructions, out_packet = table.lookup(packet)
print(packet)
for instruction in instructions:
print(instruction)
print(out_packet)
def main():
command, kwargs = parse_args()
if command == 'probe':
probe(**kwargs)
elif command == 'print':
_print(**kwargs)
if __name__ == '__main__':
main()
|
|
# Copyright 2012 SINA Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The instance interfaces extension."""
import netaddr
import six
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova import compute
from nova import exception
from nova.i18n import _
from nova import network
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'attach_interfaces')
def _translate_interface_attachment_view(port_info):
"""Maps keys for interface attachment details view."""
return {
'net_id': port_info['network_id'],
'port_id': port_info['id'],
'mac_addr': port_info['mac_address'],
'port_state': port_info['status'],
'fixed_ips': port_info.get('fixed_ips', None),
}
class InterfaceAttachmentController(object):
"""The interface attachment API controller for the OpenStack API."""
def __init__(self):
self.compute_api = compute.API()
self.network_api = network.API()
super(InterfaceAttachmentController, self).__init__()
def index(self, req, server_id):
"""Returns the list of interface attachments for a given instance."""
return self._items(req, server_id,
entity_maker=_translate_interface_attachment_view)
def show(self, req, server_id, id):
"""Return data about the given interface attachment."""
context = req.environ['nova.context']
authorize(context)
port_id = id
# NOTE(mriedem): We need to verify the instance actually exists from
# the server_id even though we're not using the instance for anything,
# just the port id.
common.get_instance(self.compute_api, context, server_id)
try:
port_info = self.network_api.show_port(context, port_id)
except exception.NotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.Forbidden as e:
raise exc.HTTPForbidden(explanation=e.format_message())
if port_info['port']['device_id'] != server_id:
msg = _("Instance %(instance)s does not have a port with id"
"%(port)s") % {'instance': server_id, 'port': port_id}
raise exc.HTTPNotFound(explanation=msg)
return {'interfaceAttachment': _translate_interface_attachment_view(
port_info['port'])}
def create(self, req, server_id, body):
"""Attach an interface to an instance."""
context = req.environ['nova.context']
authorize(context)
network_id = None
port_id = None
req_ip = None
if body:
attachment = body['interfaceAttachment']
network_id = attachment.get('net_id', None)
port_id = attachment.get('port_id', None)
try:
req_ip = attachment['fixed_ips'][0]['ip_address']
except Exception:
pass
if network_id and port_id:
msg = _("Must not input both network_id and port_id")
raise exc.HTTPBadRequest(explanation=msg)
if req_ip and not network_id:
msg = _("Must input network_id when request IP address")
raise exc.HTTPBadRequest(explanation=msg)
if req_ip:
try:
netaddr.IPAddress(req_ip)
except netaddr.AddrFormatError as e:
raise exc.HTTPBadRequest(explanation=six.text_type(e))
try:
instance = common.get_instance(self.compute_api,
context, server_id,
want_objects=True)
LOG.audit(_("Attach interface"), instance=instance)
vif = self.compute_api.attach_interface(context,
instance, network_id, port_id, req_ip)
except (exception.PortNotFound,
exception.FixedIpAlreadyInUse,
exception.PortInUse,
exception.NetworkDuplicated,
exception.NetworkAmbiguous,
exception.NetworkNotFound) as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except NotImplementedError:
msg = _("Network driver does not support this function.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
except exception.InterfaceAttachFailed as e:
LOG.exception(e)
msg = _("Failed to attach interface")
raise webob.exc.HTTPInternalServerError(explanation=msg)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'attach_interface', server_id)
return self.show(req, server_id, vif['id'])
def delete(self, req, server_id, id):
"""Detach an interface from an instance."""
context = req.environ['nova.context']
authorize(context)
port_id = id
instance = common.get_instance(self.compute_api,
context, server_id,
want_objects=True)
LOG.audit(_("Detach interface %s"), port_id, instance=instance)
try:
self.compute_api.detach_interface(context,
instance, port_id=port_id)
except exception.PortNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except NotImplementedError:
msg = _("Network driver does not support this function.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'detach_interface', server_id)
return webob.Response(status_int=202)
def _items(self, req, server_id, entity_maker):
"""Returns a list of attachments, transformed through entity_maker."""
context = req.environ['nova.context']
authorize(context)
instance = common.get_instance(self.compute_api, context, server_id,
want_objects=True)
results = []
search_opts = {'device_id': instance.uuid}
try:
data = self.network_api.list_ports(context, **search_opts)
except exception.NotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except NotImplementedError:
msg = _("Network driver does not support this function.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
ports = data.get('ports', [])
results = [entity_maker(port) for port in ports]
return {'interfaceAttachments': results}
class Attach_interfaces(extensions.ExtensionDescriptor):
"""Attach interface support."""
name = "AttachInterfaces"
alias = "os-attach-interfaces"
namespace = "http://docs.openstack.org/compute/ext/interfaces/api/v1.1"
updated = "2012-07-22T00:00:00Z"
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-interface',
InterfaceAttachmentController(),
parent=dict(
member_name='server',
collection_name='servers'))
resources.append(res)
return resources
|
|
#!/usr/bin/env python
# * coding: utf8 *
'''
datatable.py
A module that reads the stewardship spreadsheet and builds the sgid index page
'''
from collections import OrderedDict
from os import rename
from os.path import dirname, join
import re
import pygsheets
#: from pydash.strings import start_case
def munge_data(item, i, indices):
names = item[indices['layer']].split('.')
if len(names) == 1:
category = None
name = names[0]
elif len(names) == 2:
category = names[0].lower()
name = names[1]
elif len(names) == 3:
category = names[0].lower()
name = '.'.join([names[1], names[2]])
def utf8_encode(value):
if value is None:
return ''
return value
def should_link(value):
if value is None or len(value) == 0:
return ''
url = item[indices['url']]
if url is None or len(url) == 0 or url == 'N/A':
return utf8_encode(value)
if url.lower().startswith('http') or url.lower().startswith('ftp'):
return f'<a href="{url}">{utf8_encode(value)}</a>'
anchor_tag = item[indices['anchor']]
if anchor_tag is None or len(anchor_tag) == 0:
return f'<a href="{{% link {url} %}}">{utf8_encode(value)}</a>'
return f'<a href="{{% link {url} %}}#{anchor_tag}">{utf8_encode(value)}</a>'
def endpoint_link(value):
if value is None or len(value) == 0:
return ''
if ',' in value:
value = value.split(',')
return ''.join([f'<a href="{v}" class="pull-right">{{% include fa_icon.html api=true class="svg-inline--fa fa-w-20 fa-fw" %}}</i></a>' for v in value])
return f'<a href="{value}" class="pull-right">{{% include fa_icon.html api=true class="svg-inline--fa fa-w-20 fa-fw" %}}</i></a>'
def webapp_link(value):
if value is None or len(value) == 0:
return ''
if ',' in value:
value = value.split(',')
return ''.join([f'<a href="{v.strip()}" class="pull-right">{{% include fa_icon.html globe=true class="svg-inline--fa fa-w-16 fa-fw" %}}</a>' for v in value])
return f'<a href="{value.strip()}" class="pull-right">{{% include fa_icon.html globe=true class="svg-inline--fa fa-w-16 fa-fw" %}}</a>'
def booleanize(value):
if value is None or len(value) == 0:
return False
return True
return OrderedDict([
('category', utf8_encode(category)),
('name', should_link(start_case(name.replace('_', ' ')))),
('deprecated', booleanize(item[indices['deprecated']])),
('agency', utf8_encode(item[indices['data_source']])),
('description', utf8_encode(item[indices['description']])),
('service', ''.join([endpoint_link(item[indices['endpoint']]), webapp_link(item[indices['web_app']])
]))
])
def get_sheet_data(gc, sheet_id, worksheet_id):
worksheet = gc.open_by_key(sheet_id).worksheet_by_title(worksheet_id)
data = worksheet.get_all_values(returnas='matrix')
header = data.pop(0)
indices = {
'layer': header.index('SGID Data Layer'),
'description': header.index('Description'),
'data_source': header.index('Data Source'),
'url': header.index('Website URL'),
'anchor': header.index('Anchor'),
'data_type': header.index('Data Type'),
'endpoint': header.index('Endpoint'),
'web_app': header.index('Webapp'),
'deprecated': header.index('Deprecated')
}
return [munge_data(item, i, indices) for i, item in enumerate(data)]
def create(data):
categories = list(set([x['category'] for x in data]))
categories.sort()
html = '''---
title: SGID Index
---
<h6 id='show_filters' class='pointer'><i class="fas fa-search"></i> Show Filters</h6>
<div id='filters' class='hidden'>{}</div>
<div class="muted text-center">Simple Filter (matches text in individual columns with <a href='https://github.com/javve/list.js'>list.js</a>)</div>
<div id='table' class='datatable'>
<input class="search" placeholder="Search SGID Index" />
<table>
'''.format(' | '.join(['<a id="filter_{0}" class="filter-item">{0}</a>'.format(x) for x in categories if len(x) > 0]))
once = True
for item in data:
if item['deprecated']:
continue
if once:
html += ''' <thead>
<tr>
{}
</tr>
</thead>
<tbody class='list'>'''.format('\n'.join(
[' <th scope="col"><span class="sort" data-sort="{0}">{0}</span></th>'.format(key) for key in item.keys() if key != 'deprecated']))
once = False
html += '''
<tr>
{}
</tr>'''.format('\n'.join([' <td data-th="{0}" class="{0}">{1}</td>'.format(key, value) for key, value in item.items() if key != 'deprecated']))
html += '''
</tbody>
</table>
</div>
<script src="{% link js/dist/list.min.js %}"></script>
'''
return html
UPPER = "[A-Z\\xC0-\\xD6\\xD8-\\xDE]"
LOWER = "[a-z\\xDf-\\xF6\\xF8-\\xFF]+"
RE_WORDS = "/{upper}+(?={upper}{lower})|{upper}?{lower}|{upper}+|[0-9]+/g".format(
upper=UPPER, lower=LOWER
)
def capitalize(text, strict=True):
return text.capitalize() if strict else text[:1].upper() + text[1:]
def start_case(text):
if text is None:
text = ""
return " ".join(capitalize(word, strict=False) for word in words(text))
def words(text, pattern=None):
return reg_exp_js_match(text, pattern or RE_WORDS)
def reg_exp_js_match(text, reg_exp):
return js_to_py_re_find(reg_exp)(text)
def js_to_py_re_find(reg_exp):
"""Return Python regular expression matching function based on Javascript style regexp."""
pattern, options = reg_exp[1:].rsplit("/", 1)
flags = re.I if "i" in options else 0
def find(text):
if "g" in options:
results = re.findall(pattern, text, flags=flags)
else:
results = re.search(pattern, text, flags=flags)
if results:
results = [results.group()]
else:
results = []
return results
return find
if __name__ == '__main__':
gc = pygsheets.authorize(service_account_env_var='GIS_DRIVE_API_CREDENTIALS')
data = get_sheet_data(gc, '11ASS7LnxgpnD0jN4utzklREgMf1pcvYjcXcIcESHweQ', 'SGID Stewardship Info')
data = [x for x in data if len(x['name'].strip()) > 0]
html = create(data)
file_path = join(dirname(__file__), '..', 'data', 'sgid-index', 'index.html')
with open(file_path + '.bak', 'w', newline='\r\n') as data:
data.writelines(html)
rename(file_path + '.bak', file_path)
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Used to unify various data providers under a single namespace."""
import logging
import gin
import gin.tf
import tensorflow as tf
from tf3d.datasets import rio # pylint: disable=g-bad-import-order
from tf3d.datasets import scannet_scene # pylint: disable=g-bad-import-order
from tf3d.datasets import waymo_object_per_frame # pylint: disable=g-bad-import-order
_DATASET_MAP = {
'rio': rio,
'scannet_scene': scannet_scene,
'waymo_object_per_frame': waymo_object_per_frame,
}
def get_file_pattern(dataset_name,
split_name,
dataset_format=None,
dataset_dir=None):
"""Returns the file pattern given the dataset name and split.
Args:
dataset_name: Dataset name.
split_name: A train/test split name.
dataset_format: A str of the dataset format.
dataset_dir: The base directory of the dataset sources.
Returns:
A string containing the file pattern.
"""
if dataset_dir is not None:
return _DATASET_MAP[dataset_name].get_file_pattern(
split_name=split_name,
dataset_dir=dataset_dir,
dataset_format=dataset_format)
else:
return _DATASET_MAP[dataset_name].get_file_pattern(split_name=split_name)
def get_decode_fn(dataset_name,
include_saved_predictions=False):
decoder_params = {}
if include_saved_predictions:
decoder_params['include_saved_predictions'] = include_saved_predictions
return _DATASET_MAP[dataset_name].get_decode_fn(**decoder_params)
def get_items_to_descriptions(dataset_name):
return _DATASET_MAP[dataset_name].ITEMS_TO_DESCRIPTIONS
def get_num_samples(dataset_name, split_name):
return _DATASET_MAP[dataset_name].SPLITS_TO_SIZES[split_name]
def _get_params(dataset_name):
params = {}
if _DATASET_MAP[dataset_name].IGNORE_LABEL is not None:
params['ignore_label'] = _DATASET_MAP[dataset_name].IGNORE_LABEL
if _DATASET_MAP[dataset_name].NUM_CLASSES is not None:
params['num_classes'] = _DATASET_MAP[dataset_name].NUM_CLASSES
return params
def _read_data(file_read_func, file_pattern, shuffle, num_readers,
filenames_shuffle_buffer_size, num_epochs, read_block_length,
shuffle_buffer_size):
"""Gets a dataset tuple.
Args:
file_read_func: Function to use in tf.contrib.data.parallel_interleave, to
read every individual file into a tf.data.Dataset.
file_pattern: A string containing a file pattern that corresponds to a set
of files containing the data.
shuffle: Whether data should be processed in the order they are read in, or
shuffled randomly.
num_readers: Number of file shards to read in parallel.
filenames_shuffle_buffer_size: Buffer size to be used when shuffling file
names.
num_epochs: The number of times a data source is read. If set to zero, the
data source will be reused indefinitely.
read_block_length: Number of records to read from each reader at once.
shuffle_buffer_size: Buffer size to be used when shuffling.
Returns:
A tf.data.Dataset.
"""
# Shard, shuffle, and read files.
dataset = tf.data.Dataset.list_files(
file_pattern=file_pattern, shuffle=shuffle)
if shuffle:
dataset = dataset.shuffle(filenames_shuffle_buffer_size)
elif num_readers > 1:
logging.warning('`shuffle` is false, but the input data stream is '
'still slightly shuffled since `num_readers` > 1.')
dataset = dataset.repeat(num_epochs or None)
records_dataset = dataset.interleave(
map_func=file_read_func,
cycle_length=num_readers,
block_length=read_block_length,
num_parallel_calls=tf.data.experimental.AUTOTUNE,
deterministic=shuffle)
if shuffle:
records_dataset = records_dataset.shuffle(shuffle_buffer_size)
return records_dataset
def tfrecord_read_fn(filename):
return tf.data.TFRecordDataset(filename).prefetch(1)
@gin.configurable(
'get_tf_data_decoder', denylist=['batch_size', 'is_training'])
def get_tf_data_decoder(dataset_format,
decode_fn,
file_pattern,
batch_size,
is_training,
preprocess_fn=None,
feature_keys=None,
label_keys=None,
num_readers=64,
filenames_shuffle_buffer_size=100,
num_epochs=0,
read_block_length=32,
shuffle_buffer_size=256,
num_parallel_batches=8,
num_prefetch_batches=2,
):
"""Reads a tf.data.Dataset given a decoder and outputs tensor dictionaries.
Args:
dataset_format: Currently 'tfexample' and 'recordio' are supported.
decode_fn: Decoder function.
file_pattern: A string containing the file pattern that represents the
sstable.
batch_size: Batch size.
is_training: Whether reading data in training mode or not. If in training,
data will be shuffled and if not it won't be shuffled. Also in training
preprocessing can act different than in eval time.
preprocess_fn: A function that preprocesses data.
feature_keys: Either None or a list[str] with keys in features.
label_keys: Either None or a list[str] with keys in labels.
num_readers: Number of file shards to read in parallel.
filenames_shuffle_buffer_size: Buffer size to be used when shuffling file
names.
num_epochs: The number of times a data source is read. If set to zero, the
data source will be reused indefinitely.
read_block_length: Number of records to read from each reader at once.
shuffle_buffer_size: Buffer size to be used when shuffling.
num_parallel_batches: Number of batches to produce in parallel. If this is
run on a 2x2 TPU set this to 8.
num_prefetch_batches: Number of batches to prefetch. Prefetch decouples
input pipeline and model so they can be pipelined resulting in higher
throughput. Set this to a small constant and increment linearly until the
improvements become marginal or you exceed your cpu memory budget. Setting
this to -1, automatically tunes this value for you.
Returns:
Return a tf.data.dataset where each element is a dictionary with features
and labels; if not executing eagerly i.e. under tf1 environment, returns a
dictionary with features and labels instead.
"""
def _process_fn(key, value):
"""Sets up tf graph that decodes and preprocesses input."""
tensors_dict = decode_fn(value)
if preprocess_fn is None:
return tensors_dict
else:
output_keys = feature_keys + label_keys
return preprocess_fn(
inputs=tensors_dict, output_keys=output_keys, is_training=is_training)
if dataset_format == 'tfrecord':
read_fn = tfrecord_read_fn
else:
raise ValueError('Unknown dataset type')
# Read data
dataset = _read_data(
file_read_func=read_fn,
file_pattern=file_pattern,
num_readers=num_readers,
shuffle=is_training,
filenames_shuffle_buffer_size=filenames_shuffle_buffer_size,
num_epochs=num_epochs,
read_block_length=read_block_length,
shuffle_buffer_size=shuffle_buffer_size)
if dataset_format == 'tfrecord':
# insert dummy key to form (key, value pair)
dataset = dataset.map(lambda x: (None, x))
# Preprocess data
dataset_dict = tf.data.Dataset.batch(
dataset.map(
_process_fn, num_parallel_calls=num_parallel_batches),
batch_size=batch_size,
drop_remainder=True)
dataset_dict = dataset_dict.prefetch(num_prefetch_batches)
return dataset_dict
@gin.configurable(denylist=['batch_size', 'is_training'])
def get_tf_data_dataset(dataset_name,
split_name,
batch_size,
is_training,
preprocess_fn=None,
feature_keys=None,
label_keys=None,
num_readers=64,
filenames_shuffle_buffer_size=100,
num_epochs=0,
read_block_length=32,
shuffle_buffer_size=256,
num_parallel_batches=8,
num_prefetch_batches=2,
dataset_dir=None,
dataset_format=None):
"""Reads a tf.data.Dataset given a dataset name and split and outputs tensors.
Args:
dataset_name: Dataset name.
split_name: A train/test split name.
batch_size: Batch size.
is_training: Whether reading data in training mode or not. If in training,
data will be shuffled and if not it won't be shuffled. Also in training
preprocessing can act different than in eval time.
preprocess_fn: A function that preprocesses data.
feature_keys: Either None or a list[str] with keys in features.
label_keys: Either None or a list[str] with keys in labels.
num_readers: Number of file shards to read in parallel.
filenames_shuffle_buffer_size: Buffer size to be used when shuffling file
names.
num_epochs: The number of times a data source is read. If set to zero, the
data source will be reused indefinitely.
read_block_length: Number of records to read from each reader at once.
shuffle_buffer_size: Buffer size to be used when shuffling.
num_parallel_batches: Number of batches to produce in parallel. If this is
run on a 2x2 TPU set this to 8.
num_prefetch_batches: Number of batches to prefetch. Prefetch decouples
input pipeline and model so they can be pipelined resulting in higher
throughput. Set this to a small constant and increment linearly until the
improvements become marginal or you exceed your cpu memory budget. Setting
this to -1, automatically tunes this value for you.
dataset_dir: The base directory of the dataset sources.
dataset_format: If not None, a str of dataset format, can be 'tfrecord',
'sstable' or 'recordio'.
Returns:
Return a tf.data.dataset where each element is a dictionary with features
and labels; if not executing eagerly i.e. under tf1 environment, returns a
dictionary with features and labels instead.
"""
if dataset_format is None:
dataset_format = _DATASET_MAP[dataset_name].DATASET_FORMAT
file_pattern = get_file_pattern(
dataset_name=dataset_name,
split_name=split_name,
dataset_dir=dataset_dir,
dataset_format=dataset_format)
decode_fn = get_decode_fn(dataset_name=dataset_name)
if feature_keys is None:
feature_keys = list(_DATASET_MAP[dataset_name].get_feature_keys())
if label_keys is None:
label_keys = list(_DATASET_MAP[dataset_name].get_label_keys())
return get_tf_data_decoder(
dataset_format=dataset_format,
decode_fn=decode_fn,
file_pattern=file_pattern,
batch_size=batch_size,
is_training=is_training,
preprocess_fn=preprocess_fn,
feature_keys=feature_keys,
label_keys=label_keys,
num_readers=num_readers,
filenames_shuffle_buffer_size=filenames_shuffle_buffer_size,
num_epochs=num_epochs,
read_block_length=read_block_length,
shuffle_buffer_size=shuffle_buffer_size,
num_parallel_batches=num_parallel_batches,
num_prefetch_batches=num_prefetch_batches)
|
|
# http://www.danvk.org/2015/01/09/extracting-text-from-an-image-using-ocropus.html
# for character level confidence
# https://github.com/tmbdev/ocropy/pull/25
import io
import json
import os
import subprocess
from wand.image import Image
import logging
from PIL import Image as PillowImage
from bs4 import BeautifulSoup
from pdfminer_transform import PdfminerTransform
from os.path import expanduser
import urllib
def tr_get_pdf_text(pdf_filename_absolute, pdfminer_folder):
LOGGER = logging.getLogger(__name__)
# processing_folder = os.path.dirname(pdf_filename_absolute)
pdf_filename = os.path.basename(pdf_filename_absolute)
transformer = PdfminerTransform()
text_filename = pdfminer_folder + "/" + pdf_filename + ".txt"
html_filename = pdfminer_folder + "/" + pdf_filename + ".html"
pdf_dict = transformer.transform_file(pdf_filename_absolute)
# LOGGER.debug(pdf_dict)
if "markup" in pdf_dict:
with io.open(html_filename, "w", encoding="utf8") as f:
f.write(pdf_dict["markup"]["innerHTML"])
with io.open(text_filename, "w", encoding="utf8") as f:
f.write(unicode(pdf_dict["markup"]["innerText"]))
else:
LOGGER.warn(("Pdfminer extraction failure.", pdf_dict["error"]))
def maketr_get_field_zones(zones, page_number):
def get_field_zones(image_filename, output_folder):
LOGGER = logging.getLogger(__name__)
LOGGER.info(image_filename)
image = PillowImage.open(image_filename)
for index, field_zone in enumerate(zones):
field_zone_folder = "{0}_{1}_{2}".format(output_folder, page_number, index)
if not os.path.exists(field_zone_folder):
os.makedirs(field_zone_folder)
zone_percent = field_zone["region"]
width, height = image.size
bbox = [
zone_percent[0] * width / 100,
zone_percent[1] * height / 100,
zone_percent[2] * width / 100,
zone_percent[3] * height / 100
]
zone_image = image.crop(bbox)
image_path = "{0}/get_field_zones.png".format(field_zone_folder)
zone_image.save(image_path)
field_zone["image"] = image_path
field_zone["text"] = ""
with io.open("{0}/get_field_zones.json".format(field_zone_folder), "w", encoding="utf8") as fh:
data = json.dumps(field_zone, ensure_ascii=False)
fh.write(unicode(data))
yield image_path
return get_field_zones
def tr_zxing(png_filename, output_folder):
libs = "extern/zxing/core-3.2.1.jar"
libs += ":extern/zxing/javase-3.2.1.jar"
libs += ":extern/zxing/jcommander-1.48.jar"
png_filename = os.path.abspath(png_filename)
png_filename = urllib.pathname2url(png_filename)
command_array = [
"java",
"-cp",
libs,
"com.google.zxing.client.j2se.CommandLineRunner",
png_filename,
"--try_harder"
# "--crop", "ileft", "top", "width", "height"
]
result = subprocess.check_output(command_array)
logging.debug(" ".join(command_array))
logging.info(result)
zxing_filename = "{}/tr_zxing.txt".format(output_folder)
with open(zxing_filename, "w") as fh:
fh.write(result)
return zxing_filename
def tr_cuneiform_txt(png_filename, output_folder):
LOGGER = logging.getLogger(__name__)
output_filename_absolute = output_folder + "/tr_cuneiform_txt.txt"
out = subprocess.check_output([
"cuneiform",
"-o", output_filename_absolute,
png_filename
])
LOGGER.debug(out)
return output_filename_absolute
def tr_cuneiform_hocr(png_filename, output_folder):
LOGGER = logging.getLogger(__name__)
output_filename_absolute = output_folder + "/tr_cuneiform_hocr.html"
out = subprocess.check_output([
"cuneiform",
"-f", "hocr",
"-o", output_filename_absolute,
png_filename
])
LOGGER.debug(out)
return output_filename_absolute
def tr_threshold(png_filename, output_folder):
LOGGER = logging.getLogger(__name__)
out = subprocess.check_output([
"ocropus-nlbin",
png_filename,
"-o", output_folder
])
LOGGER.debug(out)
return os.path.join(output_folder, "0001.bin.png")
def tr_png(local_document_path, output_folder):
with Image(
filename=local_document_path,
resolution=200) as img:
img.compression_quality = 100
basename = os.path.basename(local_document_path)
out_filename = os.path.join(output_folder, basename + ".png")
img.save(filename=out_filename)
if len(img.sequence) > 1:
logging.info("multipage {}".format(local_document_path))
return [os.path.join(output_folder, basename + "-" + str(index) + ".png") for index, x in enumerate(img.sequence)]
return out_filename
def tr_tesseract_txt(png_absolutepath, output_folder):
LOGGER = logging.getLogger(__name__)
hocr_filename = os.path.join(output_folder, "tr_tesseract_txt")
out = subprocess.check_output([
"tesseract",
png_absolutepath,
hocr_filename
])
LOGGER.debug(out)
return hocr_filename + ".txt"
def tr_tesseract_hocr(png_absolutepath, output_folder):
LOGGER = logging.getLogger(__name__)
hocr_filename = os.path.join(output_folder, "recognition_tesseract")
out = subprocess.check_output([
"tesseract",
png_absolutepath,
hocr_filename,
"hocr"
])
LOGGER.debug(out)
return hocr_filename + ".hocr"
def tr_ocropus_words(png_filename, output_folder):
LOGGER = logging.getLogger(__name__)
processing_folder = os.path.dirname(png_filename)
# layout analysis
out = subprocess.check_output([
"ocropus-gpageseg",
png_filename
])
# predict
LOGGER.debug(out)
out = subprocess.check_output([
"ocropus-rpred",
"-m", "en-default.pyrnn.gz",
os.path.join(processing_folder, "0001/*.png")
])
LOGGER.debug(out)
def tr_ocropus_hocr(png_filename, output_folder):
LOGGER = logging.getLogger(__name__)
# layout analysis
out = subprocess.check_output([
"ocropus-gpageseg",
png_filename
])
LOGGER.debug(out)
hocr_filename_absolute = os.path.join(output_folder, "tr_ocropus_hocr.html")
out = subprocess.check_output([
"ocropus-hocr",
png_filename,
"-o", hocr_filename_absolute
])
LOGGER.debug(out)
return hocr_filename_absolute
def read_hocr_tesseract(soup, image):
# hocr actually differs wildly between engines in features used and format
# TODO with ocropus the attribute is ocr_word, with tesseract it's ocrx_word
# a few other things seemed to change
# so much for standards
for index, word in enumerate(soup.find_all("span", "ocrx_word")):
logging.info(word)
# bbox = [int(x) for x in word["title"].split()[1:]]
bbox = [int(x.replace(";", "")) for x in word["title"].split()[1:5]]
zone = image.crop(bbox)
# text = word.span.contents[0]
if len(word.contents) > 0:
# text = word.contents[0]
text = word.text
else:
text = ""
try:
text = text.replace("/", "")
region = {
"id": word["id"],
"image": word["id"] + ".bin.png",
"text": unicode(text),
"width": unicode(zone.size[0]),
"height": unicode(zone.size[1])
}
except Exception as e:
logging.error(e, exc_info=True)
yield zone, region
def get_hocr_zones(processing_folder, png_filename, engine="tesseract"):
image_filename = processing_folder + "/" + png_filename
logging.info(image_filename)
image = PillowImage.open(image_filename)
if engine == "tesseract":
engine_filename = engine + ".hocr"
else:
engine_filename = engine + ".hocr.html"
hocr_filename = "{0}/{1}/{2}".format(processing_folder, engine, engine_filename)
soup = BeautifulSoup(open(hocr_filename))
logging.info("opened " + hocr_filename)
logging.info(soup.getText())
regions = []
for zone, region in read_hocr_tesseract(soup, image):
regions.append(region)
# TODO page number folder
zone.save("{0}/{1}/{2}.bin.png".format(processing_folder, engine, region["id"]))
with io.open(
"{0}/{1}/{2}.txt".format(processing_folder, engine, region["id"]),
"w", encoding="utf8") as fh:
fh.write(region["text"])
with io.open(
"{0}/{1}/master.json".format(processing_folder, engine),
"w", encoding="utf8") as fh:
fh.write(u"var regions = \n")
fh.write(json.dumps(regions, ensure_ascii=False))
logging.info("Done")
def main():
pdf_folder = "/shared/projects/seeker/data/oldpdfjs/pdf"
processing_folder = "/shared/projects/seeker/data/oldpdfjs/ocropus"
# pdf_to_png(pdf_folder, processing_folder)
pngs = [f for f in os.listdir(processing_folder) if f.endswith(".png")]
for png_filename in pngs[:3]:
ocropus_png(processing_folder, png_filename, make_snippets=True)
# tesseract_png(processing_folder, png_filename)
|
|
import unittest
from hecuba import config
from hecuba.hdict import StorageDict
from hecuba.storageobj import StorageObj
class SObj_Basic(StorageObj):
'''
@ClassField attr1 int
@ClassField attr2 double
@ClassField attr3 str
'''
class SDict_SimpleTypeSpec(StorageDict):
'''
@TypeSpec dict<<id:int>, info:str>
'''
class SDict_ComplexTypeSpec(StorageDict):
'''
@TypeSpec dict<<id:int>, state:tests.withcassandra.storagedict_split_tests.SObj_Basic>
'''
class SObj_SimpleClassField(StorageObj):
'''
@ClassField attr1 int
@ClassField mydict dict<<key:str>, value:double>
@ClassField attr3 double
'''
class SObj_ComplexClassField(StorageObj):
'''
@ClassField attr1 int
@ClassField mydict dict<<key:str>, val:tests.withcassandra.storagedict_split_tests.SObj_Basic>
@ClassField attr3 double
'''
class StorageDictSplitTestbase(unittest.TestCase):
@classmethod
def setUpClass(cls):
config.session.execute("DROP KEYSPACE IF EXISTS my_app", timeout=60)
config.session.execute(
"CREATE KEYSPACE IF NOT EXISTS my_app WITH "
"replication = {'class': 'SimpleStrategy', 'replication_factor': 1};",
timeout=60)
@classmethod
def tearDownClass(cls):
config.session.execute("DROP KEYSPACE IF EXISTS my_app", timeout=60)
def test_simple_iterkeys_split(self):
config.session.execute(
"CREATE TABLE IF NOT EXISTS my_app.tab30(position int, value text, PRIMARY KEY(position))")
tablename = "tab30"
pd = StorageDict(tablename,
[('position', 'int')],
[('value', 'text')])
num_inserts = 10000
what_should_be = set()
for i in range(num_inserts):
pd[i] = 'ciao' + str(i)
what_should_be.add(i)
count = 0
res = set()
for partition in pd.split():
for val in partition.keys():
res.add(val)
count += 1
self.assertEqual(count, num_inserts)
self.assertEqual(what_should_be, res)
count, = config.session.execute('SELECT count(*) FROM my_app.tab30')[0]
self.assertEqual(count, num_inserts)
pd.delete_persistent()
def test_remote_build_iterkeys_split(self):
config.session.execute(
"CREATE TABLE IF NOT EXISTS my_app.tab_b0(position int, value text, PRIMARY KEY(position))")
tablename = "tab_b0"
pd = StorageDict(tablename,
[('position', 'int')],
[('value', 'text')])
num_inserts = 10000
what_should_be = set()
for i in range(num_inserts):
pd[i] = 'ciao' + str(i)
what_should_be.add(i)
pd = StorageDict(tablename,
[('position', 'int')],
[('value', 'text')])
count = 0
res = set()
for partition in pd.split():
id = partition.storage_id
from storage.api import getByID
rebuild = getByID(id)
for val in rebuild.keys():
res.add(val)
count += 1
self.assertEqual(count, num_inserts)
self.assertEqual(what_should_be, res)
count, = config.session.execute('SELECT count(*) FROM my_app.tab_b0')[0]
self.assertEqual(count, num_inserts)
pd.delete_persistent()
def test_composed_iteritems(self):
config.session.execute(
"CREATE TABLE IF NOT EXISTS my_app.tab_b1(pid int,time int, value text,x float,y float,z float, PRIMARY KEY(pid,time))")
tablename = "tab_b1"
pd = StorageDict(tablename,
[('pid', 'int'), ('time', 'int')],
[('value', 'text'), ('x', 'double'), ('y', 'double'), ('z', 'double')])
num_inserts = 10000
what_should_be = {}
for i in range(num_inserts):
pd[i, i + 100] = ['ciao' + str(i), i * 0.1, i * 0.2, i * 0.3]
what_should_be[i, i + 100] = ('ciao' + str(i), i * 0.1, i * 0.2, i * 0.3)
count = 0
res = {}
for partition in pd.split():
for key, val in partition.items():
res[key] = val
count += 1
self.assertEqual(count, num_inserts)
count, = config.session.execute('SELECT count(*) FROM my_app.tab_b1')[0]
self.assertEqual(count, num_inserts)
delta = 0.0001
for i in range(num_inserts):
a = what_should_be[i, i + 100]
b = res[i, i + 100]
self.assertEqual(a[0], b.value)
self.assertAlmostEquals(a[1], b.x, delta=delta)
self.assertAlmostEquals(a[2], b.y, delta=delta)
self.assertAlmostEquals(a[3], b.z, delta=delta)
pd.delete_persistent()
def computeItems(self, SDict):
counter = 0
for item in SDict.keys():
counter = counter + 1
# self.assertEqual(counter, expected)
return counter
def test_split_type_spec_basic(self):
nitems = 1000
mybook = SDict_SimpleTypeSpec("test_records")
for id in range(0, nitems):
mybook[id] = 'someRandomText' + str(id)
del mybook
import gc
gc.collect()
# verify all data has been written
myotherbook = SDict_SimpleTypeSpec("test_records")
self.assertEqual(nitems, self.computeItems(myotherbook))
# we don't want anything in memory
del myotherbook
myfinalbook = SDict_SimpleTypeSpec("test_records")
# split the dict and assert all the dicts generated contain the expected data
acc = 0
nsplits = 0
for b in myfinalbook.split(): # this split fails
acc = acc + self.computeItems(b)
nsplits = nsplits + 1
self.assertEqual(acc, nitems)
myfinalbook.delete_persistent()
def test_split_type_spec_complex(self):
config.session.execute("DROP TABLE IF EXISTS my_app.SObj_ComplexClassField")
nitems = 10
mybook = SDict_ComplexTypeSpec("experimentx")
for id in range(0, nitems):
mybook[id] = SObj_Basic()
mybook[id].attr1 = id
mybook[id].attr2 = id / nitems
mybook[id].attr3 = "basicobj" + str(id)
del mybook
# verify all data has been written
myotherbook = SDict_ComplexTypeSpec("experimentx")
self.assertEqual(nitems, self.computeItems(myotherbook))
# we don't want anything in memory
del myotherbook
myfinalbook = SDict_ComplexTypeSpec("experimentx")
# split the dict and assert all the dicts generated contain the expected data
acc = 0
nsplits = 0
for b in myfinalbook.split(): # this split fails
acc = acc + self.computeItems(b)
nsplits = nsplits + 1
self.assertEqual(acc, nitems)
myfinalbook.delete_persistent()
def test_split_class_field_simple(self):
config.session.execute("DROP TABLE IF EXISTS my_app.SObj_SimpleClassField")
nitems = 80
mybook = SObj_SimpleClassField("so_split_dict_simple")
mybook.attr1 = nitems
mybook.attr3 = nitems / 100
for id in range(0, nitems):
key_text = 'so_split_dict_simple' + str(id)
mybook.mydict[key_text] = id / nitems
del mybook
# verify all data has been written
myotherbook = SObj_SimpleClassField("so_split_dict_simple")
self.assertEqual(nitems, self.computeItems(myotherbook.mydict))
# we don't want anything in memory
del myotherbook
myfinalbook = SObj_SimpleClassField("so_split_dict_simple")
# split the dict and assert all the dicts generated contain the expected data
acc = 0
nsplits = 0
for b in myfinalbook.mydict.split(): # this split fails
acc = acc + self.computeItems(b)
nsplits = nsplits + 1
self.assertEqual(acc, nitems)
myfinalbook.delete_persistent()
def test_split_class_field_complex(self):
nitems = 50
mybook = SObj_ComplexClassField("so_split_dict_complex")
mybook.attr1 = nitems
mybook.attr3 = nitems / 100
for id in range(0, nitems):
key_text = 'so_split_dict_simple' + str(id)
so = SObj_Basic()
so.attr1 = id
so.attr2 = id / nitems
so.attr3 = 'someInnerRandomText' + str(id)
mybook.mydict[key_text] = so
del mybook
# verify all data has been written
myotherbook = SObj_ComplexClassField("so_split_dict_complex")
self.assertEqual(nitems, self.computeItems(myotherbook.mydict))
# we don't want anything in memory
del myotherbook
myfinalbook = SObj_ComplexClassField("so_split_dict_complex")
# split the dict and assert all the dicts generated contain the expected data
acc = 0
nsplits = 0
for b in myfinalbook.mydict.split(): # this split fails
acc = acc + self.computeItems(b)
nsplits = nsplits + 1
self.assertEqual(acc, nitems)
myfinalbook.delete_persistent()
def test_len_on_split(self):
ninserts = 100
obj = SDict_SimpleTypeSpec("test_split_len")
for i in range(ninserts):
obj[i] = str(f"test_split_len{i}")
nin = len(obj)
count = 0
for chunk in obj.split():
count = count + len(chunk)
self.assertEqual(count, ninserts)
obj.delete_persistent()
'''
def test_remote_build_composed_iteritems(self):
config.session.execute(
"CREATE TABLE IF NOT EXISTS my_app.tab_b2(pid int,time int, value text,x float,y float,z float, PRIMARY KEY(pid,time))")
tablename = "tab_b2"
pd = StorageDict(tablename,
[('pid', 'int'), ('time', 'int')],
[('value', 'text'), ('x', 'float'), ('y', 'float'), ('z', 'float')])
what_should_be = {}
for i in range(10000):
pd[i, i + 100] = ('ciao' + str(i), i * 0.1, i * 0.2, i * 0.3)
what_should_be[i, i + 100] = ('ciao' + str(i), i * 0.1, i * 0.2, i * 0.3)
del pd
count, = config.session.execute('SELECT count(*) FROM my_app.tab_b2')[0]
self.assertEqual(count, 10000)
pd = StorageDict(tablename,
[('pid', 'int'), ('time', 'int')],
[('value', 'text'), ('x', 'float'), ('y', 'float'), ('z', 'float')])
count = 0
res = {}
for partition in pd.split():
id = partition.storage_id
from storage.api import getByID
rebuild = getByID(id)
for key, val in rebuild.items():
res[key] = val
count += 1
self.assertEqual(count, 10000)
delta = 0.0001
for i in range(10000):
a = what_should_be[i, i + 100]
b = res[i, i + 100]
self.assertEqual(a[0], b.value)
self.assertAlmostEquals(a[1], b.x, delta=delta)
self.assertAlmostEquals(a[2], b.y, delta=delta)
self.assertAlmostEquals(a[3], b.z, delta=delta)
'''
class StorageDictSlitTestVnodes(StorageDictSplitTestbase):
@classmethod
def setUpClass(cls):
from hfetch import disconnectCassandra
disconnectCassandra()
from .. import test_config, set_ccm_cluster
test_config.ccm_cluster.clear()
set_ccm_cluster()
from .. import TEST_DEBUG
try:
test_config.ccm_cluster.populate(3, use_vnodes=True).start()
except Exception as ex:
if not TEST_DEBUG:
raise ex
import hfetch
import hecuba
import importlib
importlib.reload(hfetch)
import importlib
importlib.reload(hecuba)
config.session.execute("DROP KEYSPACE IF EXISTS my_app")
config.session.execute(
"CREATE KEYSPACE IF NOT EXISTS my_app WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};")
super(StorageDictSplitTestbase, cls).setUpClass()
@classmethod
def tearDownClass(cls):
config.session.execute("DROP KEYSPACE IF EXISTS my_app")
from .. import test_config
from hfetch import disconnectCassandra
disconnectCassandra()
test_config.ccm_cluster.clear()
from .. import set_up_default_cassandra
set_up_default_cassandra()
if __name__ == '__main__':
unittest.main()
|
|
# Copyright (c) 2013 Christian Schwede <christian.schwede@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import nested
import json
import mock
import os
import random
import re
import string
import tempfile
import time
import unittest
import urlparse
from eventlet.green import urllib2
from six import StringIO
from swift.cli import recon
from swift.common import utils
from swift.common.ring import builder
class TestHelpers(unittest.TestCase):
def test_seconds2timeunit(self):
self.assertEqual(recon.seconds2timeunit(10), (10, 'seconds'))
self.assertEqual(recon.seconds2timeunit(600), (10, 'minutes'))
self.assertEqual(recon.seconds2timeunit(36000), (10, 'hours'))
self.assertEqual(recon.seconds2timeunit(60 * 60 * 24 * 10),
(10, 'days'))
def test_size_suffix(self):
self.assertEqual(recon.size_suffix(5 * 10 ** 2), '500 bytes')
self.assertEqual(recon.size_suffix(5 * 10 ** 3), '5 kB')
self.assertEqual(recon.size_suffix(5 * 10 ** 6), '5 MB')
self.assertEqual(recon.size_suffix(5 * 10 ** 9), '5 GB')
self.assertEqual(recon.size_suffix(5 * 10 ** 12), '5 TB')
self.assertEqual(recon.size_suffix(5 * 10 ** 15), '5 PB')
self.assertEqual(recon.size_suffix(5 * 10 ** 18), '5 EB')
self.assertEqual(recon.size_suffix(5 * 10 ** 21), '5 ZB')
class TestScout(unittest.TestCase):
def setUp(self, *_args, **_kwargs):
self.scout_instance = recon.Scout("type", suppress_errors=True)
self.url = 'http://127.0.0.1:8080/recon/type'
self.server_type_url = 'http://127.0.0.1:8080/'
@mock.patch('eventlet.green.urllib2.urlopen')
def test_scout_ok(self, mock_urlopen):
mock_urlopen.return_value.read = lambda: json.dumps([])
url, content, status = self.scout_instance.scout(
("127.0.0.1", "8080"))
self.assertEqual(url, self.url)
self.assertEqual(content, [])
self.assertEqual(status, 200)
@mock.patch('eventlet.green.urllib2.urlopen')
def test_scout_url_error(self, mock_urlopen):
mock_urlopen.side_effect = urllib2.URLError("")
url, content, status = self.scout_instance.scout(
("127.0.0.1", "8080"))
self.assertTrue(isinstance(content, urllib2.URLError))
self.assertEqual(url, self.url)
self.assertEqual(status, -1)
@mock.patch('eventlet.green.urllib2.urlopen')
def test_scout_http_error(self, mock_urlopen):
mock_urlopen.side_effect = urllib2.HTTPError(
self.url, 404, "Internal error", None, None)
url, content, status = self.scout_instance.scout(
("127.0.0.1", "8080"))
self.assertEqual(url, self.url)
self.assertTrue(isinstance(content, urllib2.HTTPError))
self.assertEqual(status, 404)
@mock.patch('eventlet.green.urllib2.urlopen')
def test_scout_server_type_ok(self, mock_urlopen):
def getheader(name):
d = {'Server': 'server-type'}
return d.get(name)
mock_urlopen.return_value.info.return_value.getheader = getheader
url, content, status = self.scout_instance.scout_server_type(
("127.0.0.1", "8080"))
self.assertEqual(url, self.server_type_url)
self.assertEqual(content, 'server-type')
self.assertEqual(status, 200)
@mock.patch('eventlet.green.urllib2.urlopen')
def test_scout_server_type_url_error(self, mock_urlopen):
mock_urlopen.side_effect = urllib2.URLError("")
url, content, status = self.scout_instance.scout_server_type(
("127.0.0.1", "8080"))
self.assertTrue(isinstance(content, urllib2.URLError))
self.assertEqual(url, self.server_type_url)
self.assertEqual(status, -1)
@mock.patch('eventlet.green.urllib2.urlopen')
def test_scout_server_type_http_error(self, mock_urlopen):
mock_urlopen.side_effect = urllib2.HTTPError(
self.server_type_url, 404, "Internal error", None, None)
url, content, status = self.scout_instance.scout_server_type(
("127.0.0.1", "8080"))
self.assertEqual(url, self.server_type_url)
self.assertTrue(isinstance(content, urllib2.HTTPError))
self.assertEqual(status, 404)
class TestRecon(unittest.TestCase):
def setUp(self, *_args, **_kwargs):
self.recon_instance = recon.SwiftRecon()
self.swift_dir = tempfile.gettempdir()
self.ring_name = "test_object_%s" % (
''.join(random.choice(string.digits) for x in range(6)))
self.tmpfile_name = "%s/%s.ring.gz" % (self.swift_dir, self.ring_name)
utils.HASH_PATH_SUFFIX = 'endcap'
utils.HASH_PATH_PREFIX = 'startcap'
def tearDown(self, *_args, **_kwargs):
try:
os.remove(self.tmpfile_name)
except OSError:
pass
def test_gen_stats(self):
stats = self.recon_instance._gen_stats((1, 4, 10, None), 'Sample')
self.assertEqual(stats.get('name'), 'Sample')
self.assertEqual(stats.get('average'), 5.0)
self.assertEqual(stats.get('high'), 10)
self.assertEqual(stats.get('reported'), 3)
self.assertEqual(stats.get('low'), 1)
self.assertEqual(stats.get('total'), 15)
self.assertEqual(stats.get('number_none'), 1)
self.assertEqual(stats.get('perc_none'), 25.0)
def test_ptime(self):
with mock.patch('time.localtime') as mock_localtime:
mock_localtime.return_value = time.struct_time(
(2013, 12, 17, 10, 0, 0, 1, 351, 0))
timestamp = self.recon_instance._ptime(1387274400)
self.assertEqual(timestamp, "2013-12-17 10:00:00")
mock_localtime.assert_called_with(1387274400)
timestamp2 = self.recon_instance._ptime()
self.assertEqual(timestamp2, "2013-12-17 10:00:00")
mock_localtime.assert_called_with()
def test_get_devices(self):
ringbuilder = builder.RingBuilder(2, 3, 1)
ringbuilder.add_dev({'id': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000,
'device': 'sda1', 'region': 0})
ringbuilder.add_dev({'id': 1, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001,
'device': 'sda1', 'region': 0})
ringbuilder.add_dev({'id': 2, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002,
'device': 'sda1', 'region': 1})
ringbuilder.add_dev({'id': 3, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10003,
'device': 'sda1', 'region': 1})
ringbuilder.rebalance()
ringbuilder.get_ring().save(self.tmpfile_name)
ips = self.recon_instance.get_devices(
None, None, self.swift_dir, self.ring_name)
self.assertEqual(
set([('127.0.0.1', 10000), ('127.0.0.1', 10001),
('127.0.0.1', 10002), ('127.0.0.1', 10003)]), ips)
ips = self.recon_instance.get_devices(
0, None, self.swift_dir, self.ring_name)
self.assertEqual(
set([('127.0.0.1', 10000), ('127.0.0.1', 10001)]), ips)
ips = self.recon_instance.get_devices(
1, None, self.swift_dir, self.ring_name)
self.assertEqual(
set([('127.0.0.1', 10002), ('127.0.0.1', 10003)]), ips)
ips = self.recon_instance.get_devices(
0, 0, self.swift_dir, self.ring_name)
self.assertEqual(set([('127.0.0.1', 10000)]), ips)
ips = self.recon_instance.get_devices(
1, 1, self.swift_dir, self.ring_name)
self.assertEqual(set([('127.0.0.1', 10003)]), ips)
def test_get_ringmd5(self):
for server_type in ('account', 'container', 'object', 'object-1'):
ring_name = '%s.ring.gz' % server_type
ring_file = os.path.join(self.swift_dir, ring_name)
open(ring_file, 'w')
empty_file_hash = 'd41d8cd98f00b204e9800998ecf8427e'
hosts = [("127.0.0.1", "8080")]
with mock.patch('swift.cli.recon.Scout') as mock_scout:
scout_instance = mock.MagicMock()
url = 'http://%s:%s/recon/ringmd5' % hosts[0]
response = {
'/etc/swift/account.ring.gz': empty_file_hash,
'/etc/swift/container.ring.gz': empty_file_hash,
'/etc/swift/object.ring.gz': empty_file_hash,
'/etc/swift/object-1.ring.gz': empty_file_hash,
}
status = 200
scout_instance.scout.return_value = (url, response, status)
mock_scout.return_value = scout_instance
stdout = StringIO()
mock_hash = mock.MagicMock()
patches = [
mock.patch('sys.stdout', new=stdout),
mock.patch('swift.cli.recon.md5', new=mock_hash),
]
with nested(*patches):
mock_hash.return_value.hexdigest.return_value = \
empty_file_hash
self.recon_instance.get_ringmd5(hosts, self.swift_dir)
output = stdout.getvalue()
expected = '1/1 hosts matched'
for line in output.splitlines():
if '!!' in line:
self.fail('Unexpected Error in output: %r' % line)
if expected in line:
break
else:
self.fail('Did not find expected substring %r '
'in output:\n%s' % (expected, output))
for ring in ('account', 'container', 'object', 'object-1'):
os.remove(os.path.join(self.swift_dir, "%s.ring.gz" % ring))
def test_quarantine_check(self):
hosts = [('127.0.0.1', 6010), ('127.0.0.1', 6020),
('127.0.0.1', 6030), ('127.0.0.1', 6040),
('127.0.0.1', 6050)]
# sample json response from http://<host>:<port>/recon/quarantined
responses = {6010: {'accounts': 0, 'containers': 0, 'objects': 1,
'policies': {'0': {'objects': 0},
'1': {'objects': 1}}},
6020: {'accounts': 1, 'containers': 1, 'objects': 3,
'policies': {'0': {'objects': 1},
'1': {'objects': 2}}},
6030: {'accounts': 2, 'containers': 2, 'objects': 5,
'policies': {'0': {'objects': 2},
'1': {'objects': 3}}},
6040: {'accounts': 3, 'containers': 3, 'objects': 7,
'policies': {'0': {'objects': 3},
'1': {'objects': 4}}},
# A server without storage policies enabled
6050: {'accounts': 0, 'containers': 0, 'objects': 4}}
# <low> <high> <avg> <total> <Failed> <no_result> <reported>
expected = {'objects_0': (0, 3, 1.5, 6, 0.0, 0, 4),
'objects_1': (1, 4, 2.5, 10, 0.0, 0, 4),
'objects': (1, 7, 4.0, 20, 0.0, 0, 5),
'accounts': (0, 3, 1.2, 6, 0.0, 0, 5),
'containers': (0, 3, 1.2, 6, 0.0, 0, 5)}
def mock_scout_quarantine(app, host):
url = 'http://%s:%s/recon/quarantined' % host
response = responses[host[1]]
status = 200
return url, response, status
stdout = StringIO()
patches = [
mock.patch('swift.cli.recon.Scout.scout', mock_scout_quarantine),
mock.patch('sys.stdout', new=stdout),
]
with nested(*patches):
self.recon_instance.quarantine_check(hosts)
output = stdout.getvalue()
r = re.compile("\[quarantined_(.*)\](.*)")
for line in output.splitlines():
m = r.match(line)
if m:
ex = expected.pop(m.group(1))
self.assertEqual(m.group(2),
" low: %s, high: %s, avg: %s, total: %s,"
" Failed: %s%%, no_result: %s, reported: %s"
% ex)
self.assertFalse(expected)
def test_drive_audit_check(self):
hosts = [('127.0.0.1', 6010), ('127.0.0.1', 6020),
('127.0.0.1', 6030), ('127.0.0.1', 6040)]
# sample json response from http://<host>:<port>/recon/driveaudit
responses = {6010: {'drive_audit_errors': 15},
6020: {'drive_audit_errors': 0},
6030: {'drive_audit_errors': 257},
6040: {'drive_audit_errors': 56}}
# <low> <high> <avg> <total> <Failed> <no_result> <reported>
expected = (0, 257, 82.0, 328, 0.0, 0, 4)
def mock_scout_driveaudit(app, host):
url = 'http://%s:%s/recon/driveaudit' % host
response = responses[host[1]]
status = 200
return url, response, status
stdout = StringIO()
patches = [
mock.patch('swift.cli.recon.Scout.scout', mock_scout_driveaudit),
mock.patch('sys.stdout', new=stdout),
]
with nested(*patches):
self.recon_instance.driveaudit_check(hosts)
output = stdout.getvalue()
r = re.compile("\[drive_audit_errors(.*)\](.*)")
lines = output.splitlines()
self.assertTrue(lines)
for line in lines:
m = r.match(line)
if m:
self.assertEqual(m.group(2),
" low: %s, high: %s, avg: %s, total: %s,"
" Failed: %s%%, no_result: %s, reported: %s"
% expected)
class TestReconCommands(unittest.TestCase):
def setUp(self):
self.recon = recon.SwiftRecon()
self.hosts = set([('127.0.0.1', 10000)])
def mock_responses(self, resps):
def fake_urlopen(url, timeout):
scheme, netloc, path, _, _, _ = urlparse.urlparse(url)
self.assertEqual(scheme, 'http') # can't handle anything else
self.assertTrue(path.startswith('/recon/'))
if ':' in netloc:
host, port = netloc.split(':', 1)
port = int(port)
else:
host = netloc
port = 80
response_body = resps[(host, port, path[7:])]
resp = mock.MagicMock()
resp.read = mock.MagicMock(side_effect=[response_body])
return resp
return mock.patch('eventlet.green.urllib2.urlopen', fake_urlopen)
def test_server_type_check(self):
hosts = [('127.0.0.1', 6010), ('127.0.0.1', 6011),
('127.0.0.1', 6012)]
# sample json response from http://<host>:<port>/
responses = {6010: 'object-server', 6011: 'container-server',
6012: 'account-server'}
def mock_scout_server_type(app, host):
url = 'http://%s:%s/' % (host[0], host[1])
response = responses[host[1]]
status = 200
return url, response, status
stdout = StringIO()
patches = [
mock.patch('swift.cli.recon.Scout.scout_server_type',
mock_scout_server_type),
mock.patch('sys.stdout', new=stdout),
]
res_object = 'Invalid: http://127.0.0.1:6010/ is object-server'
res_container = 'Invalid: http://127.0.0.1:6011/ is container-server'
res_account = 'Invalid: http://127.0.0.1:6012/ is account-server'
valid = "1/1 hosts ok, 0 error[s] while checking hosts."
# Test for object server type - default
with nested(*patches):
self.recon.server_type_check(hosts)
output = stdout.getvalue()
self.assertTrue(res_container in output.splitlines())
self.assertTrue(res_account in output.splitlines())
stdout.truncate(0)
# Test ok for object server type - default
with nested(*patches):
self.recon.server_type_check([hosts[0]])
output = stdout.getvalue()
self.assertTrue(valid in output.splitlines())
stdout.truncate(0)
# Test for account server type
with nested(*patches):
self.recon.server_type = 'account'
self.recon.server_type_check(hosts)
output = stdout.getvalue()
self.assertTrue(res_container in output.splitlines())
self.assertTrue(res_object in output.splitlines())
stdout.truncate(0)
# Test ok for account server type
with nested(*patches):
self.recon.server_type = 'account'
self.recon.server_type_check([hosts[2]])
output = stdout.getvalue()
self.assertTrue(valid in output.splitlines())
stdout.truncate(0)
# Test for container server type
with nested(*patches):
self.recon.server_type = 'container'
self.recon.server_type_check(hosts)
output = stdout.getvalue()
self.assertTrue(res_account in output.splitlines())
self.assertTrue(res_object in output.splitlines())
stdout.truncate(0)
# Test ok for container server type
with nested(*patches):
self.recon.server_type = 'container'
self.recon.server_type_check([hosts[1]])
output = stdout.getvalue()
self.assertTrue(valid in output.splitlines())
def test_get_swiftconfmd5(self):
hosts = set([('10.1.1.1', 10000),
('10.2.2.2', 10000)])
cksum = '729cf900f2876dead617d088ece7fe8c'
responses = {
('10.1.1.1', 10000, 'swiftconfmd5'):
json.dumps({'/etc/swift/swift.conf': cksum}),
('10.2.2.2', 10000, 'swiftconfmd5'):
json.dumps({'/etc/swift/swift.conf': cksum})}
printed = []
with self.mock_responses(responses):
with mock.patch.object(self.recon, '_md5_file', lambda _: cksum):
self.recon.get_swiftconfmd5(hosts, printfn=printed.append)
output = '\n'.join(printed) + '\n'
self.assertTrue("2/2 hosts matched" in output)
def test_get_swiftconfmd5_mismatch(self):
hosts = set([('10.1.1.1', 10000),
('10.2.2.2', 10000)])
cksum = '29d5912b1fcfcc1066a7f51412769c1d'
responses = {
('10.1.1.1', 10000, 'swiftconfmd5'):
json.dumps({'/etc/swift/swift.conf': cksum}),
('10.2.2.2', 10000, 'swiftconfmd5'):
json.dumps({'/etc/swift/swift.conf': 'bogus'})}
printed = []
with self.mock_responses(responses):
with mock.patch.object(self.recon, '_md5_file', lambda _: cksum):
self.recon.get_swiftconfmd5(hosts, printfn=printed.append)
output = '\n'.join(printed) + '\n'
self.assertTrue("1/2 hosts matched" in output)
self.assertTrue("http://10.2.2.2:10000/recon/swiftconfmd5 (bogus) "
"doesn't match on disk md5sum" in output)
def test_object_auditor_check(self):
# Recon middleware response from an object server
def dummy_request(*args, **kwargs):
values = {
'passes': 0, 'errors': 0, 'audit_time': 0,
'start_time': 0, 'quarantined': 0, 'bytes_processed': 0}
return [('http://127.0.0.1:6010/recon/auditor/object', {
'object_auditor_stats_ALL': values,
'object_auditor_stats_ZBF': values,
}, 200)]
response = {}
def catch_print(computed):
response[computed.get('name')] = computed
cli = recon.SwiftRecon()
cli.pool.imap = dummy_request
cli._print_stats = catch_print
cli.object_auditor_check([('127.0.0.1', 6010)])
# Now check that output contains all keys and names
keys = ['average', 'number_none', 'high',
'reported', 'low', 'total', 'perc_none']
names = [
'ALL_audit_time_last_path',
'ALL_quarantined_last_path',
'ALL_errors_last_path',
'ALL_passes_last_path',
'ALL_bytes_processed_last_path',
'ZBF_audit_time_last_path',
'ZBF_quarantined_last_path',
'ZBF_errors_last_path',
'ZBF_bytes_processed_last_path'
]
for name in names:
computed = response.get(name)
self.assertTrue(computed)
for key in keys:
self.assertTrue(key in computed)
def test_disk_usage(self):
def dummy_request(*args, **kwargs):
return [('http://127.0.0.1:6010/recon/diskusage', [
{"device": "sdb1", "mounted": True,
"avail": 10, "used": 90, "size": 100},
{"device": "sdc1", "mounted": True,
"avail": 15, "used": 85, "size": 100},
{"device": "sdd1", "mounted": True,
"avail": 15, "used": 85, "size": 100}],
200)]
cli = recon.SwiftRecon()
cli.pool.imap = dummy_request
default_calls = [
mock.call('Distribution Graph:'),
mock.call(' 85% 2 **********************************' +
'***********************************'),
mock.call(' 90% 1 **********************************'),
mock.call('Disk usage: space used: 260 of 300'),
mock.call('Disk usage: space free: 40 of 300'),
mock.call('Disk usage: lowest: 85.0%, ' +
'highest: 90.0%, avg: 86.6666666667%'),
mock.call('=' * 79),
]
with mock.patch('__builtin__.print') as mock_print:
cli.disk_usage([('127.0.0.1', 6010)])
mock_print.assert_has_calls(default_calls)
with mock.patch('__builtin__.print') as mock_print:
expected_calls = default_calls + [
mock.call('LOWEST 5'),
mock.call('85.00% 127.0.0.1 sdc1'),
mock.call('85.00% 127.0.0.1 sdd1'),
mock.call('90.00% 127.0.0.1 sdb1')
]
cli.disk_usage([('127.0.0.1', 6010)], 0, 5)
mock_print.assert_has_calls(expected_calls)
with mock.patch('__builtin__.print') as mock_print:
expected_calls = default_calls + [
mock.call('TOP 5'),
mock.call('90.00% 127.0.0.1 sdb1'),
mock.call('85.00% 127.0.0.1 sdc1'),
mock.call('85.00% 127.0.0.1 sdd1')
]
cli.disk_usage([('127.0.0.1', 6010)], 5, 0)
mock_print.assert_has_calls(expected_calls)
@mock.patch('__builtin__.print')
@mock.patch('time.time')
def test_object_replication_check(self, mock_now, mock_print):
now = 1430000000.0
def dummy_request(*args, **kwargs):
return [
('http://127.0.0.1:6010/recon/replication/object',
{"object_replication_time": 61,
"object_replication_last": now},
200),
('http://127.0.0.1:6020/recon/replication/object',
{"object_replication_time": 23,
"object_replication_last": now},
200),
]
cli = recon.SwiftRecon()
cli.pool.imap = dummy_request
default_calls = [
mock.call('[replication_time] low: 23, high: 61, avg: 42.0, ' +
'total: 84, Failed: 0.0%, no_result: 0, reported: 2'),
mock.call('Oldest completion was 2015-04-25 22:13:20 ' +
'(42 seconds ago) by 127.0.0.1:6010.'),
mock.call('Most recent completion was 2015-04-25 22:13:20 ' +
'(42 seconds ago) by 127.0.0.1:6010.'),
]
mock_now.return_value = now + 42
cli.object_replication_check([('127.0.0.1', 6010),
('127.0.0.1', 6020)])
mock_print.assert_has_calls(default_calls)
@mock.patch('__builtin__.print')
@mock.patch('time.time')
def test_replication_check(self, mock_now, mock_print):
now = 1430000000.0
def dummy_request(*args, **kwargs):
return [
('http://127.0.0.1:6011/recon/replication/container',
{"replication_last": now,
"replication_stats": {
"no_change": 2, "rsync": 0, "success": 3, "failure": 1,
"attempted": 0, "ts_repl": 0, "remove": 0,
"remote_merge": 0, "diff_capped": 0, "start": now,
"hashmatch": 0, "diff": 0, "empty": 0},
"replication_time": 42},
200),
('http://127.0.0.1:6021/recon/replication/container',
{"replication_last": now,
"replication_stats": {
"no_change": 0, "rsync": 0, "success": 1, "failure": 0,
"attempted": 0, "ts_repl": 0, "remove": 0,
"remote_merge": 0, "diff_capped": 0, "start": now,
"hashmatch": 0, "diff": 0, "empty": 0},
"replication_time": 23},
200),
]
cli = recon.SwiftRecon()
cli.pool.imap = dummy_request
default_calls = [
mock.call('[replication_failure] low: 0, high: 1, avg: 0.5, ' +
'total: 1, Failed: 0.0%, no_result: 0, reported: 2'),
mock.call('[replication_success] low: 1, high: 3, avg: 2.0, ' +
'total: 4, Failed: 0.0%, no_result: 0, reported: 2'),
mock.call('[replication_time] low: 23, high: 42, avg: 32.5, ' +
'total: 65, Failed: 0.0%, no_result: 0, reported: 2'),
mock.call('[replication_attempted] low: 0, high: 0, avg: 0.0, ' +
'total: 0, Failed: 0.0%, no_result: 0, reported: 2'),
mock.call('Oldest completion was 2015-04-25 22:13:20 ' +
'(42 seconds ago) by 127.0.0.1:6011.'),
mock.call('Most recent completion was 2015-04-25 22:13:20 ' +
'(42 seconds ago) by 127.0.0.1:6011.'),
]
mock_now.return_value = now + 42
cli.replication_check([('127.0.0.1', 6011), ('127.0.0.1', 6021)])
# We need any_order=True because the order of calls depends on the dict
# that is returned from the recon middleware, thus can't rely on it
mock_print.assert_has_calls(default_calls, any_order=True)
@mock.patch('__builtin__.print')
@mock.patch('time.time')
def test_load_check(self, mock_now, mock_print):
now = 1430000000.0
def dummy_request(*args, **kwargs):
return [
('http://127.0.0.1:6010/recon/load',
{"1m": 0.2, "5m": 0.4, "15m": 0.25,
"processes": 10000, "tasks": "1/128"},
200),
('http://127.0.0.1:6020/recon/load',
{"1m": 0.4, "5m": 0.8, "15m": 0.75,
"processes": 9000, "tasks": "1/200"},
200),
]
cli = recon.SwiftRecon()
cli.pool.imap = dummy_request
default_calls = [
mock.call('[5m_load_avg] low: 0, high: 0, avg: 0.6, total: 1, ' +
'Failed: 0.0%, no_result: 0, reported: 2'),
mock.call('[15m_load_avg] low: 0, high: 0, avg: 0.5, total: 1, ' +
'Failed: 0.0%, no_result: 0, reported: 2'),
mock.call('[1m_load_avg] low: 0, high: 0, avg: 0.3, total: 0, ' +
'Failed: 0.0%, no_result: 0, reported: 2'),
]
mock_now.return_value = now + 42
cli.load_check([('127.0.0.1', 6010), ('127.0.0.1', 6020)])
# We need any_order=True because the order of calls depends on the dict
# that is returned from the recon middleware, thus can't rely on it
mock_print.assert_has_calls(default_calls, any_order=True)
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Input pipeline.
Please see the [reading data how-to](../../how_tos/reading_data/index.md)
for context.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import summary_ops
from tensorflow.python.ops import variables
from tensorflow.python.training import queue_runner
def match_filenames_once(pattern, name=None):
"""Save the list of files matching pattern, so it is only computed once.
Args:
pattern: A file pattern (glob).
name: A name for the operations (optional).
Returns:
A variable that is initialized to the list of files matching pattern.
"""
with ops.op_scope([pattern], name, "matching_filenames") as name:
return variables.Variable(io_ops.matching_files(pattern), trainable=False,
name=name, validate_shape=False)
def limit_epochs(tensor, num_epochs=None, name=None):
"""Returns tensor `num_epochs` times and then raises an `OutOfRange` error.
Args:
tensor: Any `Tensor`.
num_epochs: An integer (optional). If specified, limits the number
of steps the output tensor may be evaluated.
name: A name for the operations (optional).
Returns:
tensor or `OutOfRange`.
"""
if num_epochs is None:
return tensor
if num_epochs <= 0:
raise ValueError("num_epochs must be > 0 not %d." % num_epochs)
with ops.op_scope([tensor], name, "limit_epochs") as name:
zero64 = constant_op.constant(0, dtype=dtypes.int64)
epochs = variables.Variable(zero64, name="epochs")
counter = epochs.count_up_to(num_epochs)
with ops.control_dependencies([counter]):
return array_ops.identity(tensor, name=name)
def _input_producer(input_tensor, dtype, num_epochs, shuffle, seed, capacity,
name, summary_name):
if shuffle:
input_tensor = random_ops.random_shuffle(input_tensor, seed=seed)
input_tensor = limit_epochs(input_tensor, num_epochs)
q = data_flow_ops.FIFOQueue(capacity=capacity, dtypes=[dtype], shapes=[[]],
name=name)
enq = q.enqueue_many([input_tensor])
queue_runner.add_queue_runner(queue_runner.QueueRunner(q, [enq]))
summary_ops.scalar_summary("queue/%s/%s" % (q.name, summary_name),
math_ops.cast(q.size(), dtypes.float32) *
(1. / capacity))
return q
def string_input_producer(string_tensor, num_epochs=None, shuffle=True,
seed=None, capacity=32, name=None):
"""Output strings (e.g. filenames) to a queue for an input pipeline.
Args:
string_tensor: A 1-D string tensor with the strings to produce.
num_epochs: An integer (optional). If specified, `string_input_producer`
produces each string from `string_tensor` `num_epochs` times before
generating an OutOfRange error. If not specified, `string_input_producer`
can cycle through the strings in `string_tensor` an unlimited number of
times.
shuffle: Boolean. If true, the strings are randomly shuffled within each
epoch.
seed: An integer (optional). Seed used if shuffle == True.
capacity: An integer. Sets the queue capacity.
name: A name for the operations (optional).
Returns:
A queue with the output strings. A `QueueRunner` for the Queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
"""
with ops.op_scope([string_tensor], name, "input_producer") as name:
return _input_producer(
string_tensor, dtypes.string, num_epochs, shuffle, seed, capacity, name,
"fraction_of_%d_full" % capacity)
def range_input_producer(limit, num_epochs=None, shuffle=True, seed=None,
capacity=32, name=None):
"""Produces the integers from 0 to limit-1 in a queue.
Args:
limit: An int32 scalar tensor.
num_epochs: An integer (optional). If specified, `range_input_producer`
produces each integer `num_epochs` times before generating an
OutOfRange error. If not specified, `range_input_producer` can cycle
through the integers an unlimited number of times.
shuffle: Boolean. If true, the integers are randomly shuffled within each
epoch.
seed: An integer (optional). Seed used if shuffle == True.
capacity: An integer. Sets the queue capacity.
name: A name for the operations (optional).
Returns:
A Queue with the output integers. A `QueueRunner` for the Queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
"""
with ops.op_scope([limit], name, "input_producer") as name:
range_tensor = math_ops.range(limit)
return _input_producer(
range_tensor, dtypes.int32, num_epochs, shuffle, seed, capacity, name,
"fraction_of_%d_full" % capacity)
def slice_input_producer(tensor_list, num_epochs=None, shuffle=True, seed=None,
capacity=32, name=None):
"""Produces a slice of each `Tensor` in `tensor_list`.
Implemented using a Queue -- a `QueueRunner` for the Queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
Args:
tensor_list: A list of `Tensor` objects. Every `Tensor` in
`tensor_list` must have the same size in the first dimension.
num_epochs: An integer (optional). If specified, `slice_input_producer`
produces each slice `num_epochs` times before generating
an `OutOfRange` error. If not specified, `slice_input_producer` can cycle
through the slices an unlimited number of times.
seed: An integer (optional). Seed used if shuffle == True.
capacity: An integer. Sets the queue capacity.
name: A name for the operations (optional).
Returns:
A list of tensors, one for each element of `tensor_list`. If the tensor
in `tensor_list` has shape `[N, a, b, .., z]`, then the corresponding output
tensor will have shape `[a, b, ..., z]`.
"""
with ops.op_scope(tensor_list, name, "input_producer"):
tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensor_list)
if not tensor_list:
raise ValueError(
"Expected at least one tensor in slice_input_producer().")
range_size = array_ops.shape(tensor_list[0])[0]
# TODO(josh11b): Add an assertion that the first dimension of
# everything in TensorList matches. Maybe just check the inferred shapes?
queue = range_input_producer(range_size, num_epochs=num_epochs,
shuffle=shuffle, seed=seed, capacity=capacity)
index = queue.dequeue()
output = [array_ops.gather(t, index) for t in tensor_list]
return output
# Helpers for the batching functions ------------------------------------------
def _flatten(tensor_list_list):
return [tensor for tensor_list in tensor_list_list for tensor in tensor_list]
def _validate(tensor_list):
tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensor_list)
if not tensor_list:
raise ValueError("Expected at least one tensor in batch().")
return tensor_list
def _validate_join(tensor_list_list):
tensor_list_list = [ops.convert_n_to_tensor_or_indexed_slices(tl)
for tl in tensor_list_list]
if not tensor_list_list:
raise ValueError("Expected at least one input in batch_join().")
return tensor_list_list
def _dtypes(tensor_list_list):
all_types = [[t.dtype for t in tl] for tl in tensor_list_list]
types = all_types[0]
for other_types in all_types[1:]:
if other_types != types:
raise TypeError("Expected types to be consistent: %s vs. %s." %
", ".join(x.name for x in types),
", ".join(x.name for x in other_types))
return types
def _merge_shapes(shape_list, enqueue_many):
shape_list = [tensor_shape.as_shape(s) for s in shape_list]
if enqueue_many:
# We want the shapes without the leading batch dimension.
shape_list = [s.with_rank_at_least(1)[1:] for s in shape_list]
merged_shape = shape_list[0]
for s in shape_list[1:]:
merged_shape.merge_with(s)
return merged_shape.as_list()
def _shapes(tensor_list_list, shapes, enqueue_many):
if shapes is None:
l = len(tensor_list_list[0])
shapes = [_merge_shapes(
[tl[i].get_shape().as_list() for tl in tensor_list_list], enqueue_many)
for i in xrange(l)]
return shapes
def _enqueue_join(queue, tensor_list_list, enqueue_many):
if enqueue_many:
enqueue_ops = [queue.enqueue_many(tl) for tl in tensor_list_list]
else:
enqueue_ops = [queue.enqueue(tl) for tl in tensor_list_list]
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue, enqueue_ops))
def _enqueue(queue, tensor_list, threads, enqueue_many):
if enqueue_many:
enqueue_ops = [queue.enqueue_many(tensor_list)] * threads
else:
enqueue_ops = [queue.enqueue(tensor_list)] * threads
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue, enqueue_ops))
# Batching functions ----------------------------------------------------------
def batch(tensor_list, batch_size, num_threads=1, capacity=32,
enqueue_many=False, shapes=None, name=None):
"""Creates batches of tensors in `tensor_list`.
This function is implemented using a queue. A `QueueRunner` for the
queue is added to the current `Graph`'s `QUEUE_RUNNER` collection.
If `enqueue_many` is `False`, `tensor_list` is assumed to represent a
single example. An input tensor with shape `[x, y, z]` will be output
as a tensor with shape `[batch_size, x, y, z]`.
If `enqueue_many` is `True`, `tensor_list` is assumed to represent a
batch of examples, where the first dimension is indexed by example,
and all members of `tensor_list` should have the same size in the
first dimension. If an input tensor has shape `[*, x, y, z]`, the
output will have shape `[batch_size, x, y, z]`. The `capacity` argument
controls the how long the prefetching is allowed to grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
*N.B.:* You must ensure that either (i) the `shapes` argument is
passed, or (ii) all of the tensors in `tensor_list` must have
fully-defined shapes. `ValueError` will be raised if neither of
these conditions holds.
Args:
tensor_list: The list of tensors to enqueue.
batch_size: The new batch size pulled from the queue.
num_threads: The number of threads enqueuing `tensor_list`.
capacity: An integer. The maximum number of elements in the queue.
enqueue_many: Whether each tensor in `tensor_list` is a single example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list`.
name: (Optional) A name for the operations.
Returns:
A list of tensors with the same number and types as `tensor_list`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensor_list`.
"""
with ops.op_scope(tensor_list, name, "batch") as name:
tensor_list = _validate(tensor_list)
types = _dtypes([tensor_list])
shapes = _shapes([tensor_list], shapes, enqueue_many)
# TODO(josh11b,mrry): Switch to BatchQueue once it is written.
queue = data_flow_ops.FIFOQueue(
capacity=capacity, dtypes=types, shapes=shapes)
_enqueue(queue, tensor_list, num_threads, enqueue_many)
summary_ops.scalar_summary(
"queue/%s/fraction_of_%d_full" % (queue.name, capacity),
math_ops.cast(queue.size(), dtypes.float32) * (1. / capacity))
return queue.dequeue_many(batch_size, name=name)
# TODO(josh11b): Add a thread_multiplier or num_threads (that has to be
# a multiple of len(tensor_list_list)?) parameter, to address the use
# case where you want more parallelism than you can support different
# readers (either because you don't have that many files or can't
# read that many files in parallel due to the number of seeks required).
# Once this is done, batch() can be written as a call to batch_join().
def batch_join(tensor_list_list, batch_size, capacity=32, enqueue_many=False,
shapes=None, name=None):
"""Runs a list of tensors to fill a queue to create batches of examples.
Enqueues a different list of tensors in different threads.
Implemented using a queue -- a `QueueRunner` for the queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
`len(tensor_list_list)` threads will be started,
with thread `i` enqueuing the tensors from
`tensor_list_list[i]`. `tensor_list_list[i1][j]` must match
`tensor_list_list[i2][j]` in type and shape, except in the first
dimension if `enqueue_many` is true.
If `enqueue_many` is `False`, each `tensor_list_list[i]` is assumed
to represent a single example. An input tensor `x` will be output as a
tensor with shape `[batch_size] + x.shape`.
If `enqueue_many` is `True`, `tensor_list_list[i]` is assumed to
represent a batch of examples, where the first dimension is indexed
by example, and all members of `tensor_list_list[i]` should have the
same size in the first dimension. The slices of any input tensor
`x` are treated as examples, and the output tensors will have shape
`[batch_size] + x.shape[1:]`.
The `capacity` argument controls the how long the prefetching is allowed to
grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
*N.B.:* You must ensure that either (i) the `shapes` argument is
passed, or (ii) all of the tensors in `tensor_list_list` must have
fully-defined shapes. `ValueError` will be raised if neither of
these conditions holds.
Args:
tensor_list_list: A list of tuples of tensors to enqueue.
batch_size: An integer. The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
enqueue_many: Whether each tensor in `tensor_list_list` is a single
example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list_list[i]`.
name: (Optional) A name for the operations.
Returns:
A list of tensors with the same number and types as
`tensor_list_list[i]`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensor_list_list`.
"""
with ops.op_scope(_flatten(tensor_list_list), name, "batch_join") as name:
tensor_list_list = _validate_join(tensor_list_list)
types = _dtypes(tensor_list_list)
shapes = _shapes(tensor_list_list, shapes, enqueue_many)
# TODO(josh11b,mrry): Switch to BatchQueue once it is written.
queue = data_flow_ops.FIFOQueue(
capacity=capacity, dtypes=types, shapes=shapes)
_enqueue_join(queue, tensor_list_list, enqueue_many)
summary_ops.scalar_summary(
"queue/%s/fraction_of_%d_full" % (queue.name, capacity),
math_ops.cast(queue.size(), dtypes.float32) * (1. / capacity))
return queue.dequeue_many(batch_size, name=name)
def shuffle_batch(tensor_list, batch_size, capacity, min_after_dequeue,
num_threads=1, seed=None, enqueue_many=False, shapes=None,
name=None):
"""Creates batches by randomly shuffling tensors.
This function adds the following to the current `Graph`:
* A shuffling queue into which tensors from `tensor_list` are enqueued.
* A `dequeue_many` operation to create batches from the queue.
* A `QueueRunner` to `QUEUE_RUNNER` collection, to enqueue the tensors
from `tensor_list`.
If `enqueue_many` is `False`, `tensor_list` is assumed to represent a
single example. An input tensor with shape `[x, y, z]` will be output
as a tensor with shape `[batch_size, x, y, z]`.
If `enqueue_many` is `True`, `tensor_list` is assumed to represent a
batch of examples, where the first dimension is indexed by example,
and all members of `tensor_list` should have the same size in the
first dimension. If an input tensor has shape `[*, x, y, z]`, the
output will have shape `[batch_size, x, y, z]`.
The `capacity` argument controls the how long the prefetching is allowed to
grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
For example:
```python
# Creates batches of 32 images and 32 labels.
image_batch, label_batch = tf.train.shuffle_batch(
[single_image, single_label],
batch_size=32,
num_threads=4,
capacity=50000,
min_after_dequeue=10000)
```
*N.B.:* You must ensure that either (i) the `shapes` argument is
passed, or (ii) all of the tensors in `tensor_list` must have
fully-defined shapes. `ValueError` will be raised if neither of
these conditions holds.
Args:
tensor_list: The list of tensors to enqueue.
batch_size: The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
min_after_dequeue: Minimum number elements in the queue after a
dequeue, used to ensure a level of mixing of elements.
num_threads: The number of threads enqueuing `tensor_list`.
seed: Seed for the random shuffling within the queue.
enqueue_many: Whether each tensor in `tensor_list` is a single example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list`.
name: (Optional) A name for the operations.
Returns:
A list of tensors with the same number and types as `tensor_list`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensor_list`.
"""
with ops.op_scope(tensor_list, name, "shuffle_batch") as name:
tensor_list = _validate(tensor_list)
types = _dtypes([tensor_list])
shapes = _shapes([tensor_list], shapes, enqueue_many)
queue = data_flow_ops.RandomShuffleQueue(
capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed,
dtypes=types, shapes=shapes)
_enqueue(queue, tensor_list, num_threads, enqueue_many)
full = (math_ops.cast(math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) *
(1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = (
"queue/%sfraction_over_%d_of_%d_full" %
(name, min_after_dequeue, capacity - min_after_dequeue))
summary_ops.scalar_summary(summary_name, full)
return queue.dequeue_many(batch_size, name=name)
def shuffle_batch_join(tensor_list_list, batch_size, capacity,
min_after_dequeue, seed=None, enqueue_many=False,
shapes=None, name=None):
"""Create batches by randomly shuffling tensors.
This version enqueues a different list of tensors in different threads.
It adds the following to the current `Graph`:
* A shuffling queue into which tensors from `tensor_list_list` are enqueued.
* A `dequeue_many` operation to create batches from the queue.
* A `QueueRunner` to `QUEUE_RUNNER` collection, to enqueue the tensors
from `tensor_list_list`.
`len(tensor_list_list)` threads will be started, with thread `i` enqueuing
the tensors from `tensor_list_list[i]`. `tensor_list_list[i1][j]` must match
`tensor_list_list[i2][j]` in type and shape, except in the first dimension if
`enqueue_many` is true.
If `enqueue_many` is `False`, each `tensor_list_list[i]` is assumed
to represent a single example. An input tensor with shape `[x, y,
z]` will be output as a tensor with shape `[batch_size, x, y, z]`.
If `enqueue_many` is `True`, `tensor_list_list[i]` is assumed to
represent a batch of examples, where the first dimension is indexed
by example, and all members of `tensor_list_list[i]` should have the
same size in the first dimension. If an input tensor has shape `[*, x,
y, z]`, the output will have shape `[batch_size, x, y, z]`.
The `capacity` argument controls the how long the prefetching is allowed to
grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
Args:
tensor_list_list: A list of tuples of tensors to enqueue.
batch_size: An integer. The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
min_after_dequeue: Minimum number elements in the queue after a
dequeue, used to ensure a level of mixing of elements.
seed: Seed for the random shuffling within the queue.
enqueue_many: Whether each tensor in `tensor_list_list` is a single
example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list_list[i]`.
name: (Optional) A name for the operations.
Returns:
A list of tensors with the same number and types as `tensor_list_list[i]`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensor_list_list`.
"""
with ops.op_scope(
_flatten(tensor_list_list), name, "shuffle_batch_join") as name:
tensor_list_list = _validate_join(tensor_list_list)
types = _dtypes(tensor_list_list)
shapes = _shapes(tensor_list_list, shapes, enqueue_many)
queue = data_flow_ops.RandomShuffleQueue(
capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed,
dtypes=types, shapes=shapes)
_enqueue_join(queue, tensor_list_list, enqueue_many)
full = (math_ops.cast(math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) *
(1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = (
"queue/%sfraction_over_%d_of_%d_full" %
(name, min_after_dequeue, capacity - min_after_dequeue))
summary_ops.scalar_summary(summary_name, full)
return queue.dequeue_many(batch_size, name=name)
|
|
"""Provides device automations for homekit devices."""
from typing import List
from aiohomekit.model.characteristics import CharacteristicsTypes
from aiohomekit.model.characteristics.const import InputEventValues
from aiohomekit.model.services import ServicesTypes
from aiohomekit.utils import clamp_enum_to_char
import voluptuous as vol
from homeassistant.components.automation import AutomationActionType
from homeassistant.components.device_automation import TRIGGER_BASE_SCHEMA
from homeassistant.const import CONF_DEVICE_ID, CONF_DOMAIN, CONF_PLATFORM, CONF_TYPE
from homeassistant.core import CALLBACK_TYPE, HomeAssistant, callback
from homeassistant.helpers.typing import ConfigType
from .const import DOMAIN, KNOWN_DEVICES, TRIGGERS
TRIGGER_TYPES = {
"button1",
"button2",
"button3",
"button4",
"button5",
"button6",
"button7",
"button8",
"button9",
"button10",
}
TRIGGER_SUBTYPES = {"single_press", "double_press", "long_press"}
CONF_IID = "iid"
CONF_SUBTYPE = "subtype"
TRIGGER_SCHEMA = TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_TYPE): vol.In(TRIGGER_TYPES),
vol.Required(CONF_SUBTYPE): vol.In(TRIGGER_SUBTYPES),
}
)
HK_TO_HA_INPUT_EVENT_VALUES = {
InputEventValues.SINGLE_PRESS: "single_press",
InputEventValues.DOUBLE_PRESS: "double_press",
InputEventValues.LONG_PRESS: "long_press",
}
class TriggerSource:
"""Represents a stateless source of event data from HomeKit."""
def __init__(self, connection, aid, triggers):
"""Initialize a set of triggers for a device."""
self._hass = connection.hass
self._connection = connection
self._aid = aid
self._triggers = {}
for trigger in triggers:
self._triggers[(trigger["type"], trigger["subtype"])] = trigger
self._callbacks = {}
def fire(self, iid, value):
"""Process events that have been received from a HomeKit accessory."""
for event_handler in self._callbacks.get(iid, []):
event_handler(value)
def async_get_triggers(self):
"""List device triggers for homekit devices."""
yield from self._triggers
async def async_attach_trigger(
self,
config: TRIGGER_SCHEMA,
action: AutomationActionType,
automation_info: dict,
) -> CALLBACK_TYPE:
"""Attach a trigger."""
def event_handler(char):
if config[CONF_SUBTYPE] != HK_TO_HA_INPUT_EVENT_VALUES[char["value"]]:
return
self._hass.async_create_task(action({"trigger": config}))
trigger = self._triggers[config[CONF_TYPE], config[CONF_SUBTYPE]]
iid = trigger["characteristic"]
self._connection.add_watchable_characteristics([(self._aid, iid)])
self._callbacks.setdefault(iid, []).append(event_handler)
def async_remove_handler():
if iid in self._callbacks:
self._callbacks[iid].remove(event_handler)
return async_remove_handler
def enumerate_stateless_switch(service):
"""Enumerate a stateless switch, like a single button."""
# A stateless switch that has a SERVICE_LABEL_INDEX is part of a group
# And is handled separately
if service.has(CharacteristicsTypes.SERVICE_LABEL_INDEX):
if len(service.linked) > 0:
return []
char = service[CharacteristicsTypes.INPUT_EVENT]
# HomeKit itself supports single, double and long presses. But the
# manufacturer might not - clamp options to what they say.
all_values = clamp_enum_to_char(InputEventValues, char)
results = []
for event_type in all_values:
results.append(
{
"characteristic": char.iid,
"value": event_type,
"type": "button1",
"subtype": HK_TO_HA_INPUT_EVENT_VALUES[event_type],
}
)
return results
def enumerate_stateless_switch_group(service):
"""Enumerate a group of stateless switches, like a remote control."""
switches = list(
service.accessory.services.filter(
service_type=ServicesTypes.STATELESS_PROGRAMMABLE_SWITCH,
child_service=service,
order_by=[CharacteristicsTypes.SERVICE_LABEL_INDEX],
)
)
results = []
for idx, switch in enumerate(switches):
char = switch[CharacteristicsTypes.INPUT_EVENT]
# HomeKit itself supports single, double and long presses. But the
# manufacturer might not - clamp options to what they say.
all_values = clamp_enum_to_char(InputEventValues, char)
for event_type in all_values:
results.append(
{
"characteristic": char.iid,
"value": event_type,
"type": f"button{idx + 1}",
"subtype": HK_TO_HA_INPUT_EVENT_VALUES[event_type],
}
)
return results
def enumerate_doorbell(service):
"""Enumerate doorbell buttons."""
input_event = service[CharacteristicsTypes.INPUT_EVENT]
# HomeKit itself supports single, double and long presses. But the
# manufacturer might not - clamp options to what they say.
all_values = clamp_enum_to_char(InputEventValues, input_event)
results = []
for event_type in all_values:
results.append(
{
"characteristic": input_event.iid,
"value": event_type,
"type": "doorbell",
"subtype": HK_TO_HA_INPUT_EVENT_VALUES[event_type],
}
)
return results
TRIGGER_FINDERS = {
"service-label": enumerate_stateless_switch_group,
"stateless-programmable-switch": enumerate_stateless_switch,
"doorbell": enumerate_doorbell,
}
async def async_setup_triggers_for_entry(hass: HomeAssistant, config_entry):
"""Triggers aren't entities as they have no state, but we still need to set them up for a config entry."""
hkid = config_entry.data["AccessoryPairingID"]
conn = hass.data[KNOWN_DEVICES][hkid]
@callback
def async_add_service(aid, service_dict):
service_type = service_dict["stype"]
# If not a known service type then we can't handle any stateless events for it
if service_type not in TRIGGER_FINDERS:
return False
# We can't have multiple trigger sources for the same device id
# Can't have a doorbell and a remote control in the same accessory
# They have to be different accessories (they can be on the same bridge)
# In practice, this is inline with what iOS actually supports AFAWCT.
device_id = conn.devices[aid]
if device_id in hass.data[TRIGGERS]:
return False
# At the moment add_listener calls us with the raw service dict, rather than
# a service model. So turn it into a service ourselves.
accessory = conn.entity_map.aid(aid)
service = accessory.services.iid(service_dict["iid"])
# Just because we recognise the service type doesn't mean we can actually
# extract any triggers - so only proceed if we can
triggers = TRIGGER_FINDERS[service_type](service)
if len(triggers) == 0:
return False
trigger = TriggerSource(conn, aid, triggers)
hass.data[TRIGGERS][device_id] = trigger
return True
conn.add_listener(async_add_service)
def async_fire_triggers(conn, events):
"""Process events generated by a HomeKit accessory into automation triggers."""
for (aid, iid), ev in events.items():
if aid in conn.devices:
device_id = conn.devices[aid]
if device_id in conn.hass.data[TRIGGERS]:
source = conn.hass.data[TRIGGERS][device_id]
source.fire(iid, ev)
async def async_get_triggers(hass: HomeAssistant, device_id: str) -> List[dict]:
"""List device triggers for homekit devices."""
if device_id not in hass.data.get(TRIGGERS, {}):
return []
device = hass.data[TRIGGERS][device_id]
triggers = []
for trigger, subtype in device.async_get_triggers():
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_TYPE: trigger,
CONF_SUBTYPE: subtype,
}
)
return triggers
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: AutomationActionType,
automation_info: dict,
) -> CALLBACK_TYPE:
"""Attach a trigger."""
config = TRIGGER_SCHEMA(config)
device_id = config[CONF_DEVICE_ID]
device = hass.data[TRIGGERS][device_id]
return await device.async_attach_trigger(config, action, automation_info)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Sample Stats Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import sample_stats
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import spectral_ops_test_util
from tensorflow.python.platform import test
rng = np.random.RandomState(0)
class _AutoCorrelationTest(object):
@property
def use_static_shape(self):
raise NotImplementedError("Subclass failed to implement `use_static_shape`")
@property
def dtype(self):
raise NotImplementedError("Subclass failed to implement `dtype`.")
def test_constant_sequence_axis_0_max_lags_none_center_false(self):
x_ = np.array([[0., 0., 0.],
[1., 1., 1.]]).astype(self.dtype)
x_ph = array_ops.placeholder_with_default(
input=x_,
shape=x_.shape if self.use_static_shape else None)
with spectral_ops_test_util.fft_kernel_label_map():
with self.test_session() as sess:
# Setting normalize = True means we divide by zero.
auto_corr = sample_stats.auto_correlation(
x_ph, axis=1, center=False, normalize=False)
if self.use_static_shape:
self.assertEqual((2, 3), auto_corr.shape)
auto_corr_ = sess.run(auto_corr)
self.assertAllClose(
[[0., 0., 0.],
[1., 1., 1.]], auto_corr_)
def test_constant_sequence_axis_0_max_lags_none_center_true(self):
x_ = np.array([[0., 0., 0.],
[1., 1., 1.]]).astype(self.dtype)
x_ph = array_ops.placeholder_with_default(
input=x_,
shape=x_.shape if self.use_static_shape else None)
with spectral_ops_test_util.fft_kernel_label_map():
with self.test_session() as sess:
# Setting normalize = True means we divide by zero.
auto_corr = sample_stats.auto_correlation(
x_ph, axis=1, normalize=False, center=True)
if self.use_static_shape:
self.assertEqual((2, 3), auto_corr.shape)
auto_corr_ = sess.run(auto_corr)
self.assertAllClose(
[[0., 0., 0.],
[0., 0., 0.]], auto_corr_)
def check_results_versus_brute_force(
self, x, axis, max_lags, center, normalize):
"""Compute auto-correlation by brute force, then compare to tf result."""
# Brute for auto-corr -- avoiding fft and transpositions.
axis_len = x.shape[axis]
if max_lags is None:
max_lags = axis_len - 1
else:
max_lags = min(axis_len - 1, max_lags)
auto_corr_at_lag = []
if center:
x -= x.mean(axis=axis, keepdims=True)
for m in range(max_lags + 1):
auto_corr_at_lag.append((
np.take(x, indices=range(0, axis_len - m), axis=axis) *
np.conj(np.take(x, indices=range(m, axis_len), axis=axis))
).mean(axis=axis, keepdims=True))
rxx = np.concatenate(auto_corr_at_lag, axis=axis)
if normalize:
rxx /= np.take(rxx, [0], axis=axis)
x_ph = array_ops.placeholder_with_default(
x, shape=x.shape if self.use_static_shape else None)
with spectral_ops_test_util.fft_kernel_label_map():
with self.test_session():
auto_corr = sample_stats.auto_correlation(
x_ph, axis=axis, max_lags=max_lags, center=center,
normalize=normalize)
if self.use_static_shape:
output_shape = list(x.shape)
output_shape[axis] = max_lags + 1
self.assertAllEqual(output_shape, auto_corr.shape)
self.assertAllClose(rxx, auto_corr.eval(), rtol=1e-5, atol=1e-5)
def test_axis_n1_center_false_max_lags_none(self):
x = rng.randn(2, 3, 4).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(2, 3, 4).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=-1, max_lags=None, center=False, normalize=False)
def test_axis_n2_center_false_max_lags_none(self):
x = rng.randn(3, 4, 5).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(3, 4, 5).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=-2, max_lags=None, center=False, normalize=False)
def test_axis_n1_center_false_max_lags_none_normalize_true(self):
x = rng.randn(2, 3, 4).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(2, 3, 4).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=-1, max_lags=None, center=False, normalize=True)
def test_axis_n2_center_false_max_lags_none_normalize_true(self):
x = rng.randn(3, 4, 5).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(3, 4, 5).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=-2, max_lags=None, center=False, normalize=True)
def test_axis_0_center_true_max_lags_none(self):
x = rng.randn(3, 4, 5).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(3, 4, 5).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=0, max_lags=None, center=True, normalize=False)
def test_axis_2_center_true_max_lags_1(self):
x = rng.randn(3, 4, 5).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(3, 4, 5).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=2, max_lags=1, center=True, normalize=False)
def test_axis_2_center_true_max_lags_100(self):
# There are less than 100 elements in axis 2, so expect we get back an array
# the same size as x, despite having asked for 100 lags.
x = rng.randn(3, 4, 5).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(3, 4, 5).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=2, max_lags=100, center=True, normalize=False)
def test_long_orthonormal_sequence_has_corr_length_0(self):
l = 10000
x = rng.randn(l).astype(self.dtype)
x_ph = array_ops.placeholder_with_default(
x, shape=(l,) if self.use_static_shape else None)
with spectral_ops_test_util.fft_kernel_label_map():
with self.test_session():
rxx = sample_stats.auto_correlation(
x_ph, max_lags=l // 2, center=True, normalize=False)
if self.use_static_shape:
self.assertAllEqual((l // 2 + 1,), rxx.shape)
rxx_ = rxx.eval()
# OSS CPU FFT has some accuracy issues is not the most accurate.
# So this tolerance is a bit bad.
self.assertAllClose(1., rxx_[0], rtol=0.05)
# The maximal error in the rest of the sequence is not great.
self.assertAllClose(np.zeros(l // 2), rxx_[1:], atol=0.1)
# The mean error in the rest is ok, actually 0.008 when I tested it.
self.assertLess(np.abs(rxx_[1:]).mean(), 0.02)
def test_step_function_sequence(self):
# x jumps to new random value every 10 steps. So correlation length = 10.
x = (rng.randint(-10, 10, size=(1000, 1))
* np.ones((1, 10))).ravel().astype(self.dtype)
x_ph = array_ops.placeholder_with_default(
x, shape=(1000 * 10,) if self.use_static_shape else None)
with spectral_ops_test_util.fft_kernel_label_map():
with self.test_session():
rxx = sample_stats.auto_correlation(
x_ph, max_lags=1000 * 10 // 2, center=True, normalize=False)
if self.use_static_shape:
self.assertAllEqual((1000 * 10 // 2 + 1,), rxx.shape)
rxx_ = rxx.eval()
rxx_ /= rxx_[0]
# Expect positive correlation for the first 10 lags, then significantly
# smaller negative.
self.assertGreater(rxx_[:10].min(), 0)
self.assertGreater(rxx_[9], 5 * rxx_[10:20].mean())
# RXX should be decreasing for the first 10 lags.
diff = np.diff(rxx_)
self.assertLess(diff[:10].max(), 0)
def test_normalization(self):
l = 10000
x = 3 * rng.randn(l).astype(self.dtype)
x_ph = array_ops.placeholder_with_default(
x, shape=(l,) if self.use_static_shape else None)
with spectral_ops_test_util.fft_kernel_label_map():
with self.test_session():
rxx = sample_stats.auto_correlation(
x_ph, max_lags=l // 2, center=True, normalize=True)
if self.use_static_shape:
self.assertAllEqual((l // 2 + 1,), rxx.shape)
rxx_ = rxx.eval()
# Note that RXX[0] = 1, despite the fact that E[X^2] = 9, and this is
# due to normalize=True.
# OSS CPU FFT has some accuracy issues is not the most accurate.
# So this tolerance is a bit bad.
self.assertAllClose(1., rxx_[0], rtol=0.05)
# The maximal error in the rest of the sequence is not great.
self.assertAllClose(np.zeros(l // 2), rxx_[1:], atol=0.1)
# The mean error in the rest is ok, actually 0.008 when I tested it.
self.assertLess(np.abs(rxx_[1:]).mean(), 0.02)
class AutoCorrelationTestStaticShapeFloat32(test.TestCase,
_AutoCorrelationTest):
@property
def dtype(self):
return np.float32
@property
def use_static_shape(self):
return True
class AutoCorrelationTestStaticShapeComplex64(test.TestCase,
_AutoCorrelationTest):
@property
def dtype(self):
return np.complex64
@property
def use_static_shape(self):
return True
class AutoCorrelationTestDynamicShapeFloat32(test.TestCase,
_AutoCorrelationTest):
@property
def dtype(self):
return np.float32
@property
def use_static_shape(self):
return False
class PercentileTestWithLowerInterpolation(test.TestCase):
_interpolation = "lower"
def test_one_dim_odd_input(self):
x = [1., 5., 3., 2., 4.]
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation, axis=0)
with self.test_session():
pct = sample_stats.percentile(
x, q=q, interpolation=self._interpolation, axis=[0])
self.assertAllEqual((), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_one_dim_even_input(self):
x = [1., 5., 3., 2., 4., 5.]
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation)
with self.test_session():
pct = sample_stats.percentile(x, q=q, interpolation=self._interpolation)
self.assertAllEqual((), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_two_dim_odd_input_axis_0(self):
x = np.array([[-1., 50., -3.5, 2., -1], [0., 0., 3., 2., 4.]]).T
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation, axis=0)
with self.test_session():
# Get dim 1 with negative and positive indices.
pct_neg_index = sample_stats.percentile(
x, q=q, interpolation=self._interpolation, axis=[0])
pct_pos_index = sample_stats.percentile(
x, q=q, interpolation=self._interpolation, axis=[0])
self.assertAllEqual((2,), pct_neg_index.get_shape())
self.assertAllEqual((2,), pct_pos_index.get_shape())
self.assertAllClose(expected_percentile, pct_neg_index.eval())
self.assertAllClose(expected_percentile, pct_pos_index.eval())
def test_two_dim_even_axis_0(self):
x = np.array([[1., 2., 4., 50.], [1., 2., -4., 5.]]).T
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation, axis=0)
with self.test_session():
pct = sample_stats.percentile(
x, q=q, interpolation=self._interpolation, axis=[0])
self.assertAllEqual((2,), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_two_dim_even_input_and_keep_dims_true(self):
x = np.array([[1., 2., 4., 50.], [1., 2., -4., 5.]]).T
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation, keepdims=True, axis=0)
with self.test_session():
pct = sample_stats.percentile(
x,
q=q,
interpolation=self._interpolation,
keep_dims=True,
axis=[0])
self.assertAllEqual((1, 2), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_four_dimensional_input(self):
x = rng.rand(2, 3, 4, 5)
for axis in [None, 0, 1, -2, (0,), (-1,), (-1, 1), (3, 1), (-3, 0)]:
expected_percentile = np.percentile(
x, q=0.77, interpolation=self._interpolation, axis=axis)
with self.test_session():
pct = sample_stats.percentile(
x,
q=0.77,
interpolation=self._interpolation,
axis=axis)
self.assertAllEqual(expected_percentile.shape, pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_four_dimensional_input_and_keepdims(self):
x = rng.rand(2, 3, 4, 5)
for axis in [None, 0, 1, -2, (0,), (-1,), (-1, 1), (3, 1), (-3, 0)]:
expected_percentile = np.percentile(
x,
q=0.77,
interpolation=self._interpolation,
axis=axis,
keepdims=True)
with self.test_session():
pct = sample_stats.percentile(
x,
q=0.77,
interpolation=self._interpolation,
axis=axis,
keep_dims=True)
self.assertAllEqual(expected_percentile.shape, pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_four_dimensional_input_x_static_ndims_but_dynamic_sizes(self):
x = rng.rand(2, 3, 4, 5)
x_ph = array_ops.placeholder(dtypes.float64, shape=[None, None, None, None])
for axis in [None, 0, 1, -2, (0,), (-1,), (-1, 1), (3, 1), (-3, 0)]:
expected_percentile = np.percentile(
x, q=0.77, interpolation=self._interpolation, axis=axis)
with self.test_session():
pct = sample_stats.percentile(
x_ph,
q=0.77,
interpolation=self._interpolation,
axis=axis)
self.assertAllClose(expected_percentile, pct.eval(feed_dict={x_ph: x}))
def test_four_dimensional_input_and_keepdims_x_static_ndims_dynamic_sz(self):
x = rng.rand(2, 3, 4, 5)
x_ph = array_ops.placeholder(dtypes.float64, shape=[None, None, None, None])
for axis in [None, 0, 1, -2, (0,), (-1,), (-1, 1), (3, 1), (-3, 0)]:
expected_percentile = np.percentile(
x,
q=0.77,
interpolation=self._interpolation,
axis=axis,
keepdims=True)
with self.test_session():
pct = sample_stats.percentile(
x_ph,
q=0.77,
interpolation=self._interpolation,
axis=axis,
keep_dims=True)
self.assertAllClose(expected_percentile, pct.eval(feed_dict={x_ph: x}))
def test_with_integer_dtype(self):
x = [1, 5, 3, 2, 4]
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation)
with self.test_session():
pct = sample_stats.percentile(x, q=q, interpolation=self._interpolation)
self.assertEqual(dtypes.int32, pct.dtype)
self.assertAllEqual((), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
class PercentileTestWithHigherInterpolation(
PercentileTestWithLowerInterpolation):
_interpolation = "higher"
class PercentileTestWithNearestInterpolation(test.TestCase):
"""Test separately because np.round and tf.round make different choices."""
_interpolation = "nearest"
def test_one_dim_odd_input(self):
x = [1., 5., 3., 2., 4.]
for q in [0, 10.1, 25.1, 49.9, 50.1, 50.01, 89, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation)
with self.test_session():
pct = sample_stats.percentile(x, q=q, interpolation=self._interpolation)
self.assertAllEqual((), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_one_dim_even_input(self):
x = [1., 5., 3., 2., 4., 5.]
for q in [0, 10.1, 25.1, 49.9, 50.1, 50.01, 89, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation)
with self.test_session():
pct = sample_stats.percentile(x, q=q, interpolation=self._interpolation)
self.assertAllEqual((), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_invalid_interpolation_raises(self):
x = [1., 5., 3., 2., 4.]
with self.assertRaisesRegexp(ValueError, "interpolation"):
sample_stats.percentile(x, q=0.5, interpolation="bad")
def test_vector_q_raises_static(self):
x = [1., 5., 3., 2., 4.]
with self.assertRaisesRegexp(ValueError, "Expected.*ndims"):
sample_stats.percentile(x, q=[0.5])
def test_vector_q_raises_dynamic(self):
x = [1., 5., 3., 2., 4.]
q_ph = array_ops.placeholder(dtypes.float32)
pct = sample_stats.percentile(x, q=q_ph, validate_args=True)
with self.test_session():
with self.assertRaisesOpError("rank"):
pct.eval(feed_dict={q_ph: [0.5]})
if __name__ == "__main__":
test.main()
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sgf_wrapper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf # pylint: disable=g-bad-import-order
import coords
import go
from sgf_wrapper import replay_sgf, translate_sgf_move, make_sgf
import utils_test
JAPANESE_HANDICAP_SGF = '''(;GM[1]FF[4]CA[UTF-8]AP[CGoban:3]ST[2]RU[Japanese]
SZ[9]HA[2]RE[Void]KM[5.50]PW[test_white]PB[test_black]AB[gc][cg];W[ee];B[dg])'''
CHINESE_HANDICAP_SGF = '''(;GM[1]FF[4]CA[UTF-8]AP[CGoban:3]ST[2]RU[Chinese]SZ[9]
HA[2]RE[Void]KM[5.50]PW[test_white]PB[test_black]RE[B+39.50];B[gc];B[cg];W[ee];
B[gg];W[eg];B[ge];W[ce];B[ec];W[cc];B[dd];W[de];B[cd];W[bd];B[bc];W[bb];B[be];
W[ac];B[bf];W[dh];B[ch];W[ci];B[bi];W[di];B[ah];W[gh];B[hh];W[fh];B[hg];W[gi];
B[fg];W[dg];B[ei];W[cf];B[ef];W[ff];B[fe];W[bg];B[bh];W[af];B[ag];W[ae];B[ad];
W[ae];B[ed];W[db];B[df];W[eb];B[fb];W[ea];B[fa])'''
NO_HANDICAP_SGF = '''(;CA[UTF-8]SZ[9]PB[Murakawa Daisuke]PW[Iyama Yuta]KM[6.5]
HA[0]RE[W+1.5]GM[1];B[fd];W[cf];B[eg];W[dd];B[dc];W[cc];B[de];W[cd];B[ed];W[he];
B[ce];W[be];B[df];W[bf];B[hd];W[ge];B[gd];W[gg];B[db];W[cb];B[cg];W[bg];B[gh];
W[fh];B[hh];W[fg];B[eh];W[ei];B[di];W[fi];B[hg];W[dh];B[ch];W[ci];B[bh];W[ff];
B[fe];W[hf];B[id];W[bi];B[ah];W[ef];B[dg];W[ee];B[di];W[ig];B[ai];W[ih];B[fb];
W[hi];B[ag];W[ab];B[bd];W[bc];B[ae];W[ad];B[af];W[bd];B[ca];W[ba];B[da];W[ie])
'''
tf.logging.set_verbosity(tf.logging.ERROR)
class TestSgfGeneration(utils_test.MiniGoUnitTest):
def test_translate_sgf_move(self):
self.assertEqual(
';B[db]',
translate_sgf_move(go.PlayerMove(go.BLACK, (1, 3)), None))
self.assertEqual(
';W[aa]',
translate_sgf_move(go.PlayerMove(go.WHITE, (0, 0)), None))
self.assertEqual(
';W[]',
translate_sgf_move(go.PlayerMove(go.WHITE, None), None))
self.assertEqual(
';B[db]C[comment]',
translate_sgf_move(go.PlayerMove(go.BLACK, (1, 3)), 'comment'))
def test_make_sgf(self):
all_pwcs = list(replay_sgf(utils_test.BOARD_SIZE, NO_HANDICAP_SGF))
second_last_position, last_move, _ = all_pwcs[-1]
last_position = second_last_position.play_move(last_move)
back_to_sgf = make_sgf(
utils_test.BOARD_SIZE,
last_position.recent,
last_position.score(),
komi=last_position.komi,
)
reconstructed_positions = list(replay_sgf(
utils_test.BOARD_SIZE, back_to_sgf))
second_last_position2, last_move2, _ = reconstructed_positions[-1]
last_position2 = second_last_position2.play_move(last_move2)
self.assertEqualPositions(last_position, last_position2)
class TestSgfWrapper(utils_test.MiniGoUnitTest):
def test_sgf_props(self):
sgf_replayer = replay_sgf(utils_test.BOARD_SIZE, CHINESE_HANDICAP_SGF)
initial = next(sgf_replayer)
self.assertEqual(initial.result, go.BLACK)
self.assertEqual(initial.position.komi, 5.5)
def test_japanese_handicap_handling(self):
intermediate_board = utils_test.load_board('''
.........
.........
......X..
.........
....O....
.........
..X......
.........
.........
''')
intermediate_position = go.Position(
utils_test.BOARD_SIZE,
intermediate_board,
n=1,
komi=5.5,
caps=(0, 0),
recent=(go.PlayerMove(go.WHITE, coords.from_kgs(
utils_test.BOARD_SIZE, 'E5')),),
to_play=go.BLACK,
)
final_board = utils_test.load_board('''
.........
.........
......X..
.........
....O....
.........
..XX.....
.........
.........
''')
final_position = go.Position(
utils_test.BOARD_SIZE,
final_board,
n=2,
komi=5.5,
caps=(0, 0),
recent=(
go.PlayerMove(go.WHITE, coords.from_kgs(
utils_test.BOARD_SIZE, 'E5')),
go.PlayerMove(go.BLACK, coords.from_kgs(
utils_test.BOARD_SIZE, 'D3')),),
to_play=go.WHITE,
)
positions_w_context = list(replay_sgf(
utils_test.BOARD_SIZE, JAPANESE_HANDICAP_SGF))
self.assertEqualPositions(
intermediate_position, positions_w_context[1].position)
final_replayed_position = positions_w_context[-1].position.play_move(
positions_w_context[-1].next_move)
self.assertEqualPositions(final_position, final_replayed_position)
def test_chinese_handicap_handling(self):
intermediate_board = utils_test.load_board('''
.........
.........
......X..
.........
.........
.........
.........
.........
.........
''')
intermediate_position = go.Position(
utils_test.BOARD_SIZE,
intermediate_board,
n=1,
komi=5.5,
caps=(0, 0),
recent=(go.PlayerMove(go.BLACK, coords.from_kgs(
utils_test.BOARD_SIZE, 'G7')),),
to_play=go.BLACK,
)
final_board = utils_test.load_board('''
....OX...
.O.OOX...
O.O.X.X..
.OXXX....
OX...XX..
.X.XXO...
X.XOOXXX.
XXXO.OOX.
.XOOX.O..
''')
final_position = go.Position(
utils_test.BOARD_SIZE,
final_board,
n=50,
komi=5.5,
caps=(7, 2),
ko=None,
recent=(
go.PlayerMove(
go.WHITE, coords.from_kgs(utils_test.BOARD_SIZE, 'E9')),
go.PlayerMove(
go.BLACK, coords.from_kgs(utils_test.BOARD_SIZE, 'F9')),),
to_play=go.WHITE
)
positions_w_context = list(replay_sgf(
utils_test.BOARD_SIZE, CHINESE_HANDICAP_SGF))
self.assertEqualPositions(
intermediate_position, positions_w_context[1].position)
self.assertEqual(
positions_w_context[1].next_move, coords.from_kgs(
utils_test.BOARD_SIZE, 'C3'))
final_replayed_position = positions_w_context[-1].position.play_move(
positions_w_context[-1].next_move)
self.assertEqualPositions(final_position, final_replayed_position)
if __name__ == '__main__':
tf.test.main()
|
|
"""Simple database to track task execution.
"""
import collections
import doctest
import logging
import os
import datetime
import json
import sqlite3
import redis
from ox_herd import settings as ox_settings
def create(run_db=None):
"Create and return RunDB reference based on run_db input."
run_db = run_db if run_db else ox_settings.RUN_DB
if run_db[0] == 'redis':
return RedisRunDB()
if run_db[0] == 'sqlite':
return SqliteRunDB(run_db[1])
raise ValueError('Could not understand run_db %s' % str(run_db))
class RunDB(object):
"""Abstract specification for database to track running of tasks.
"""
def record_task_start(self, task_name, template=None):
"""Record that we are starting task with given name in database.
:arg task_name: String name for task.
:arg template: String indicating template to use in displaying
task result or None to use default.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:returns: Task id to use in referring to task later (e.g, in
record_task_finish method). This is backend dependeant
and may be an integer or string or something else
depending on what is easiest for the backend.
PURPOSE: Record that we started something.
"""
raise NotImplementedError
def record_task_finish(self, task_id, return_value, status='finished',
json_blob=None, pickle_blob=None):
"""Record we finished a task.
:arg task_id: ID for task as returned by record_task_start.
:arg return_value: String return value of task.
:arg status='finished': String status of task.
:arg json_blob=None: Optional string representing json encoding of
task output. Using JSON to store the result
for later inspection is more portable.
:arg pickle_blob=None: Optional string representing python pickle
encoding of task output. Using JSON to store
the result for later inspection is more
portable, but you can use pickle if necessary.
"""
raise NotImplementedError
def delete_task(self, task_id):
"""Delete the task from the database.
:arg task_id: ID for task as returned by record_task_start.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Delete the task.
"""
raise NotImplementedError
def get_tasks(self, status='finished', start_utc=None, end_utc=None,
max_count=None):
"""Return list of TaskInfo objects.
:arg status='finished': Status of tasks to search. Should be one
of entries from get_allowed_status().
:arg start_utc=None: String specifying minimum task_start_utc.
:arg end_utc=None: String specifying maximum task_end_utc
:arg max_count=None: Optional integer for max items to return
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:returns: List of TaskInfo objects.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Main way to get information about the tasks run.
"""
raw_tasks = self._help_get_tasks(status, start_utc, end_utc)
return self.limit_task_count(raw_tasks, max_count)
def _help_get_tasks(self, status='finished', start_utc=None, end_utc=None):
"""Return list of TaskInfo objects.
:arg status='finished': Status of tasks to search. Should be one
of entries from get_allowed_status().
:arg start_utc=None: String specifying minimum task_start_utc.
:arg end_utc=None: String specifying maximum task_end_utc
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:returns: List of TaskInfo objects.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Main way to get information about the tasks run.
Intended to be called by get_tasks.
"""
raise NotImplementedError
@staticmethod
def get_allowed_status():
"""Return list of allowed status strings for tasks.
It is important to only use values from the allowed list so
we can store effectively on things like redis.
"""
return ['started', 'finished']
@staticmethod
def limit_task_count(task_list, max_count=-1):
"""Take list of TaskInfo items and returnt he last max_count items.
"""
if max_count is None or max_count < 0 or len(task_list) < max_count:
return task_list
sorted_tl = list(sorted(
task_list, key=lambda item: (
item.task_end_utc if item.task_end_utc else
item.task_start_utc)))
return sorted_tl[-max_count:]
def get_latest(self, task_name):
"""Return task_info for most recent finished task with given task_name.
"""
raise NotImplementedError
class TaskInfo(object):
"""Python class to represent task info stored in database.
"""
def __init__( # pylint: disable=too-many-arguments
self, task_id, task_name, task_start_utc=None,
task_status=None, task_end_utc=None, return_value=None,
json_data=None, pickle_data=None, template=None):
self.task_id = task_id
self.task_name = task_name
self.task_start_utc = task_start_utc
self.task_status = task_status
self.task_end_utc = task_end_utc
self.template = template
self.return_value = return_value
self.json_data = json_data
self.pickle_data = pickle_data
def __repr__(self):
args = ', '.join(['%s=%s' % (
name, repr(value)) for name, value in self.to_dict().items()])
return '%s(%s)' % (self.__class__.__name__, args)
def to_dict(self):
"""Return self as a dict.
"""
return collections.OrderedDict([
(name, getattr(self, name, '')) for name in [
'task_id', 'task_name', 'task_start_utc', 'task_status',
'task_end_utc', 'return_value', 'json_data', 'pickle_data',
'template']])
def to_json(self):
"""Return json version of self.
"""
return json.dumps(self.to_dict())
def run_time(self, round_to=2):
"""Return total running time if possible (-1 if task not finished)
"""
if not self.task_end_utc:
return -1
result = 'UNKNOWN'
try:
fmt = '%Y-%m-%d %H:%M:%S.%f'
end_utc_dt = datetime.datetime.strptime(
self.task_end_utc, fmt)
start_utc_dt = datetime.datetime.strptime(
self.task_start_utc, fmt)
result = (end_utc_dt - start_utc_dt).total_seconds()
result = round(result, round_to)
except Exception as problem:
logging.exception('Could not parse start/end time of %s: %s',
self, problem)
result = 'E:%s' % str(problem)
return result
class RedisRunDB(RunDB):
"""Implementation of RunDB with redis backend.
"""
def __init__(self):
self.conn = redis.StrictRedis()
self.my_prefix = ox_settings.REDIS_PREFIX + ':__'
self.id_counter = self.my_prefix + 'task_id_counter'
self.task_master = self.my_prefix + 'task_master' + '::'
def delete_all(self, really=False):
"""Delete everything related to this from Redis.
Only works if really=True.
Mainly for testing; be *VERY* careful with this.
"""
if not really:
raise ValueError('Not doing delete_all since really=%s' % str(
really))
my_keys = list(self.conn.scan_iter(match=self.my_prefix + '*'))
if my_keys:
# names = ' '.join([item.decode('utf8') for item in my_keys])
self.conn.delete(*my_keys)
def record_task_start(self, task_name, template=None):
'Implement record_task_start for this backend.'
if not task_name:
raise ValueError('Must have non-empty task_name not "%s"' % str(
task_name))
if task_name[0:2] == ':_':
raise ValueError('Invalid task name %s; cannot start with ":_"' % (
str(task_name)))
task_id = '%s_%s' % (task_name, datetime.datetime.utcnow().timestamp())
task_key = self.task_master + task_id
if self.conn.get(task_key):
raise ValueError(
'Cannot add task %s as %s since already exists' % (
str(task_name), task_id))
info = TaskInfo(
task_id, task_name, str(datetime.datetime.utcnow()),
'started', template=template).to_json()
add_result = self.conn.setex(
task_key, ox_settings.OX_TASK_TTL, info)
assert add_result, 'Got add_result = %s for %s; race condition?' % (
add_result, task_id)
return task_id
def delete_task(self, task_id):
"""Delete desired id.
"""
task_key = self.task_master + task_id
self.conn.delete(task_key)
def get_task_info(self, task_id):
"""Return dict representation of task with given task_id or None.
"""
task_info = None
task_key = self.task_master + task_id
task_info_json = self.conn.get(task_key)
if task_info_json:
task_info = json.loads(task_info_json.decode('utf-8'))
return task_info
def get_task(self, task_id):
"""Get the task with the given task_id and return it as TaskInfo.
"""
task_info = self.get_task_info(task_id)
if task_info:
return TaskInfo(**task_info)
return None
def record_task_finish(self, task_id, return_value, status='finished',
json_blob=None, pickle_blob=None):
'Implement record_task_finish for this backend.'
task_info = self.get_task_info(task_id)
if not task_info:
logging.error('Unable to update existing task with finish stats')
logging.error('Will create finished but unstarted task')
task_info = {'task_name': 'unknown', 'task_status': 'unknown'}
if task_info['task_status'] == 'finished':
raise ValueError('Cannot record_task_finish for %s; already ended.'
% str(task_info))
task_info['task_end_utc'] = str(datetime.datetime.utcnow())
task_info['return_value'] = return_value
task_info['task_status'] = 'finished'
task_info['json_data'] = json_blob
task_info['pickle_data'] = pickle_blob
task_key = self.task_master + task_id
self.conn.setex(task_key,
ox_settings.OX_TASK_TTL, json.dumps(task_info))
def _help_get_tasks(self, status='finished', start_utc=None, end_utc=None):
"""Return list of TaskInfo objects.
:arg status='finished': Status of tasks to search. Should be one
entries from get_allowed_status().
:arg start_utc=None: String specifying minimum task_start_utc.
:arg end_utc=None: String specifying maximum task_end_utc
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:returns: List of TaskInfo objects.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Main way to get information about the tasks run.
"""
result = []
for key in self.conn.scan_iter(match=self.task_master + '*'):
item_json = self.conn.get(key)
item_kw = json.loads(item_json.decode('utf8'))
if not (status is None or item_kw['task_status'] == status):
continue
if not (start_utc is None or item_kw.get(
'task_start_utc', start_utc) >= start_utc):
continue
if not (end_utc is None or item_kw.get(
'task_end_utc', end_utc) <= end_utc):
continue
result.append(TaskInfo(**item_kw))
return result
def get_latest(self, task_name):
"""Implementation of required get_latest method.
The redis implementation of this is not very efficient and could be
improved.
"""
result = None
my_tasks = self._help_get_tasks()
for item in my_tasks:
if (item.task_name != task_name or item.task_status != 'finished'):
continue
if result is None or (
item.task_end_utc > result.task_end_utc):
result = item
return result
@staticmethod
def _regr_test():
"""
>>> import os, tempfile, datetime, time, random, imp
>>> random_key = random.randint(0,10000000) # so tests do not collide
>>> print('Using random_key = %s' % str(random_key)) # doctest: +ELLIPSIS
Using random_key = ...
>>> from ox_herd.core import ox_run_db
>>> ox_run_db.ox_settings.REDIS_PREFIX += ('test_%s' % random_key)
>>> ox_run_db.ox_settings.OX_TASK_TTL = 10
>>> ignore = imp.reload(ox_run_db)
>>> db = ox_run_db.RedisRunDB()
>>> task_id = db.record_task_start('test')
>>> time.sleep(1)
>>> db.record_task_finish(task_id, 'test_return')
>>> t = db.get_tasks()
>>> len(t)
1
>>> t[0].task_name
'test'
>>> t[0].task_status
'finished'
>>> task_id = db.record_task_start('test_again')
>>> len(db.get_tasks('finished'))
1
>>> len(db.get_tasks(None))
2
>>> db.record_task_finish(task_id, 'test_again')
>>> t = db.get_tasks()
>>> len(t)
2
>>> max_list = db.get_tasks(max_count=1)
>>> len(max_list)
1
>>> max_list[0].task_name
'test_again'
Now verify that keys auto-expired in redis
>>> for i in range(10):
... keys = db.conn.keys(ox_run_db.ox_settings.REDIS_PREFIX + '*')
... if not keys:
... break
... logging.info('Sleeping a bit waiting for keys to expire: %s', keys)
... time.sleep(2)
...
>>> db.conn.keys(ox_run_db.ox_settings.REDIS_PREFIX + '*')
[]
Now cleanup
>>> db.delete_all(really=True)
>>> db.conn.keys(ox_run_db.ox_settings.REDIS_PREFIX + '*')
[]
"""
class SqliteRunDB(RunDB):
"""Implementation of RunDB with sqlite backend.
Redis is preferred, but SqliteRunDB is also possible with more
configuration.
"""
def __init__(self, db_path, allow_create=True):
if not os.path.exists(db_path) and allow_create:
logging.warning('No db file at %s; creating', str(db_path))
self.create(db_path)
self.conn = sqlite3.connect(db_path)
@staticmethod
def sql_to_create_tables():
"Return SQL to create required database tables."
sql = """CREATE TABLE task_info (
task_id INTEGER PRIMARY KEY ASC,
task_name text,
task_start_utc text,
task_status text,
task_end_utc text,
return_value text,
json_blob text,
pickle_blob text,
template text
);
"""
return sql
def create(self, db_path):
"Create database at given path."
sql = self.sql_to_create_tables()
conn = sqlite3.connect(db_path)
cursor = conn.cursor()
cursor.execute(sql)
conn.commit()
conn.close()
def record_task_start(self, task_name, template=None):
'Implement record_task_start for this backend.'
sql = '''INSERT INTO task_info (
task_name, task_start_utc, task_status, template) VALUES (?, ?, ?, ?)
'''
cursor = self.conn.cursor()
cursor.execute(sql, [task_name, datetime.datetime.utcnow(), 'started',
template])
task_id = cursor.lastrowid
self.conn.commit()
assert task_id is not None, (
'Expected 1 task id for insert but got %s' % str(task_id))
return task_id
def delete_task(self, task_id):
"""Delete desired id.
"""
sql = '''DELETE FROM task_info WHERE task_id = ?'''
self.conn.execute(sql, task_id)
def record_task_finish(self, task_id, return_value, status='finished',
json_blob=None, pickle_blob=None):
'Implement record_task_finish for this backend.'
sql = '''UPDATE task_info
SET task_end_utc=?, return_value=?, task_status=?,
json_blob=?, pickle_blob=?
WHERE task_id=?'''
cursor = self.conn.cursor()
utcnow = datetime.datetime.utcnow()
cursor.execute(sql, [utcnow, return_value, str(status), json_blob,
pickle_blob, task_id])
rowcount = cursor.rowcount
if rowcount > 1:
raise ValueError(
'Impossible: updated multiple rows with single task_id %s' % (
str(task_id)))
elif not rowcount:
logging.error('Unable to update existing task with finish stats')
logging.error('Will create finished but unstarted task')
sql = '''INSERT INTO task_info (
task_name, task_start_utc,
task_id, task_end_utc, return_value, task_status
json_blob=?, pickle_blob=?) VALUES (
'unknown', 'unknown', ?, ?, ?, ?)'''
cursor.execute(sql, [json_blob, pickle_blob, task_id,
utcnow, return_value, status])
self.conn.commit()
def _help_get_tasks(self, status='finished', start_utc=None, end_utc=None):
"""Return list of TaskInfo objects.
:arg status='finished': Status of tasks to search. Should be one
entries from get_allowed_status().
:arg start_utc=None: String specifying minimum task_start_utc.
:arg end_utc=None: String specifying maximum task_end_utc
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:returns: List of TaskInfo objects.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Main way to get information about the tasks run.
"""
cursor = self.conn.cursor()
sql = ['select * from task_info where task_status like ?']
args = [status]
if start_utc is not None:
sql.append(' AND task_start_utc >= ?')
args.append(str(start_utc))
if end_utc is not None:
sql.append(' AND (task_end_utc IS NULL OR task_end_utc >= ?)')
args.append(str(end_utc))
cursor.execute('\n'.join(sql), args)
return [TaskInfo(*item) for item in cursor.fetchall()]
@staticmethod
def _regr_test():
"""
>>> import os, tempfile, datetime, time
>>> from ox_herd.core import ox_run_db
>>> db_file = tempfile.mktemp(suffix='.sql')
>>> db = ox_run_db.SqliteRunDB(db_file)
>>> task_id = db.record_task_start('test')
>>> time.sleep(1)
>>> db.record_task_finish(task_id, 'test_return')
>>> db.conn.close()
>>> del db
>>> os.remove(db_file)
>>> assert not os.path.exists(db_file)
"""
if __name__ == '__main__':
doctest.testmod()
print('Finished tests')
|
|
#
# Copyright (c) 2011-2014 Exxeleron GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import struct
try:
from cStringIO import BytesIO
except ImportError:
from io import BytesIO
from . import MetaData, CONVERSION_OPTIONS
from .qtype import * # @UnusedWildImport
from .qcollection import qlist, QList, QTemporalList, QDictionary, QTable, QKeyedTable, get_list_qtype
from .qtemporal import QTemporal, to_raw_qtemporal, array_to_raw_qtemporal
class QWriterException(Exception):
'''
Indicates an error raised during data serialization.
'''
pass
ENDIANESS = '\1' if sys.byteorder == 'little' else '\0'
class QWriter(object):
'''
Provides serialization to q IPC protocol.
:Parameters:
- `stream` (`socket` or `None`) - stream for data serialization
- `protocol_version` (`integer`) - version IPC protocol
- `encoding` (`string`) - encoding for characters serialization
:Attrbutes:
- `_writer_map` - stores mapping between Python types and functions
responsible for serializing into IPC representation
'''
_writer_map = {}
serialize = Mapper(_writer_map)
def __init__(self, stream, protocol_version, encoding = 'latin-1'):
self._stream = stream
self._protocol_version = protocol_version
self._encoding = encoding
def write(self, data, msg_type, **options):
'''Serializes and pushes single data object to a wrapped stream.
:Parameters:
- `data` - data to be serialized
- `msg_type` (one of the constants defined in :class:`.MessageType`) -
type of the message
:Options:
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:returns: if wraped stream is ``None`` serialized data,
otherwise ``None``
'''
self._buffer = BytesIO()
self._options = MetaData(**CONVERSION_OPTIONS.union_dict(**options))
# header and placeholder for message size
self._buffer.write(('%s%s\0\0\0\0\0\0' % (ENDIANESS, chr(msg_type))).encode(self._encoding))
self._write(data)
# update message size
data_size = self._buffer.tell()
self._buffer.seek(4)
self._buffer.write(struct.pack('i', data_size))
# write data to socket
if self._stream:
self._stream.sendall(self._buffer.getvalue())
else:
return self._buffer.getvalue()
def _write(self, data):
if data is None:
self._write_null()
else:
if isinstance(data, Exception) or (type(data) == type and issubclass(data, Exception)):
data_type = Exception
else:
data_type = type(data)
writer = self._get_writer(data_type)
if writer:
writer(self, data)
else:
qtype = Q_TYPE.get(type(data), None)
if qtype:
self._write_atom(data, qtype)
else:
raise QWriterException('Unable to serialize type: %s' % data.__class__ if isinstance(data, object) else type(data))
def _get_writer(self, data_type):
return self._writer_map.get(data_type, None)
def _write_null(self):
self._buffer.write(struct.pack('=bx', QNULL))
@serialize(Exception)
def _write_error(self, data):
self._buffer.write(struct.pack('b', QERROR))
if isinstance(data, Exception):
msg = data.__class__.__name__
if data.args:
msg = data.args[0]
else:
msg = data.__name__
self._buffer.write(msg.encode(self._encoding))
self._buffer.write(b'\0')
def _write_atom(self, data, qtype):
try:
self._buffer.write(struct.pack('b', qtype))
fmt = STRUCT_MAP[qtype]
self._buffer.write(struct.pack(fmt, data))
except KeyError:
raise QWriterException('Unable to serialize type: %s' % data.__class__ if isinstance(data, object) else type(data))
@serialize(tuple, list)
def _write_generic_list(self, data):
self._buffer.write(struct.pack('=bxi', QGENERAL_LIST, len(data)))
for element in data:
self._write(element)
@serialize(str, bytes)
def _write_string(self, data):
if not self._options.single_char_strings and len(data) == 1:
self._write_atom(ord(data), QCHAR)
else:
if isinstance(data, str):
encoded_data = data.encode(self._encoding)
self._buffer.write(struct.pack('=bxi', QSTRING, len(encoded_data)))
self._buffer.write(encoded_data)
else:
self._buffer.write(struct.pack('=bxi', QSTRING, len(data)))
self._buffer.write(data)
@serialize(numpy.string_)
def _write_symbol(self, data):
self._buffer.write(struct.pack('=b', QSYMBOL))
if data:
self._buffer.write(data)
self._buffer.write(b'\0')
@serialize(uuid.UUID)
def _write_guid(self, data):
if self._protocol_version < 3:
raise QWriterException('kdb+ protocol version violation: Guid not supported pre kdb+ v3.0')
self._buffer.write(struct.pack('=b', QGUID))
self._buffer.write(data.bytes)
@serialize(QTemporal)
def _write_temporal(self, data):
try:
if self._protocol_version < 1 and (data.meta.qtype == QTIMESPAN or data.meta.qtype == QTIMESTAMP):
raise QWriterException('kdb+ protocol version violation: data type %s not supported pre kdb+ v2.6' % hex(data.meta.qtype))
self._buffer.write(struct.pack('=b', data.meta.qtype))
fmt = STRUCT_MAP[data.meta.qtype]
self._buffer.write(struct.pack(fmt, to_raw_qtemporal(data.raw, data.meta.qtype)))
except KeyError:
raise QWriterException('Unable to serialize type: %s' % type(data))
@serialize(numpy.datetime64, numpy.timedelta64)
def _write_numpy_temporal(self, data):
try:
qtype = TEMPORAL_PY_TYPE[str(data.dtype)]
if self._protocol_version < 1 and (qtype == QTIMESPAN or qtype == QTIMESTAMP):
raise QWriterException('kdb+ protocol version violation: data type %s not supported pre kdb+ v2.6' % hex(qtype))
self._buffer.write(struct.pack('=b', qtype))
fmt = STRUCT_MAP[qtype]
self._buffer.write(struct.pack(fmt, to_raw_qtemporal(data, qtype)))
except KeyError:
raise QWriterException('Unable to serialize type: %s' % data.dtype)
@serialize(QLambda)
def _write_lambda(self, data):
self._buffer.write(struct.pack('=b', QLAMBDA))
self._buffer.write(b'\0')
self._write_string(data.expression)
@serialize(QProjection)
def _write_projection(self, data):
self._buffer.write(struct.pack('=bi', QPROJECTION, len(data.parameters)))
for parameter in data.parameters:
self._write(parameter)
@serialize(QDictionary, QKeyedTable)
def _write_dictionary(self, data):
self._buffer.write(struct.pack('=b', QDICTIONARY))
self._write(data.keys)
self._write(data.values)
@serialize(QTable)
def _write_table(self, data):
self._buffer.write(struct.pack('=bxb', QTABLE, QDICTIONARY))
self._write(qlist(numpy.array(data.dtype.names), qtype = QSYMBOL_LIST))
self._buffer.write(struct.pack('=bxi', QGENERAL_LIST, len(data.dtype)))
for column in data.dtype.names:
self._write_list(data[column], data.meta[column])
@serialize(numpy.ndarray, QList, QTemporalList)
def _write_list(self, data, qtype = None):
if qtype is not None:
qtype = -abs(qtype)
if qtype is None:
qtype = get_list_qtype(data)
if self._protocol_version < 1 and (abs(qtype) == QTIMESPAN_LIST or abs(qtype) == QTIMESTAMP_LIST):
raise QWriterException('kdb+ protocol version violation: data type %s not supported pre kdb+ v2.6' % hex(data.meta.qtype))
if qtype == QGENERAL_LIST:
self._write_generic_list(data)
elif qtype == QCHAR:
self._write_string(data.tostring())
else:
self._buffer.write(struct.pack('=bxi', -qtype, len(data)))
if data.dtype.type in (numpy.datetime64, numpy.timedelta64):
# convert numpy temporal to raw q temporal
data = array_to_raw_qtemporal(data, qtype = qtype)
if qtype == QSYMBOL:
for symbol in data:
if symbol:
self._buffer.write(symbol)
self._buffer.write(b'\0')
elif qtype == QGUID:
if self._protocol_version < 3:
raise QWriterException('kdb+ protocol version violation: Guid not supported pre kdb+ v3.0')
for guid in data:
self._buffer.write(guid.bytes)
else:
self._buffer.write(data.tostring())
|
|
# This is Cyder's main settings file. If you need to override a setting
# locally, use cyder/settings/local.py
import glob
import itertools
import logging
import os
import socket
import sys
from django.utils.functional import lazy
from lib.path_utils import ROOT, path
##########################
# copied from funfactory #
##########################
SLAVE_DATABASES = []
DATABASE_ROUTERS = ('multidb.PinningMasterSlaveRouter',)
## Logging
LOG_LEVEL = logging.INFO
HAS_SYSLOG = True
SYSLOG_TAG = "http_app_playdoh" # Change this after you fork.
LOGGING_CONFIG = None
LOGGING = {}
# CEF Logging
CEF_PRODUCT = 'Playdoh'
CEF_VENDOR = 'Mozilla'
CEF_VERSION = '0'
CEF_DEVICE_VERSION = '0'
## Accepted locales
# Tells the product_details module where to find our local JSON files.
# This ultimately controls how LANGUAGES are constructed.
PROD_DETAILS_DIR = path('lib/product_details_json')
# On dev instances, the list of accepted locales defaults to the contents of
# the `locale` directory within a project module or, for older Playdoh apps,
# the root locale directory. A localizer can add their locale in the l10n
# repository (copy of which is checked out into `locale`) in order to start
# testing the localization on the dev server.
try:
DEV_LANGUAGES = [
os.path.basename(loc).replace('_', '-')
for loc in itertools.chain(glob.iglob(ROOT + '/locale/*'), # old style
glob.iglob(ROOT + '/*/locale/*'))
if (os.path.isdir(loc) and os.path.basename(loc) != 'templates')
]
except OSError:
DEV_LANGUAGES = ('en-US',)
def lazy_lang_url_map():
from django.conf import settings
langs = settings.DEV_LANGUAGES if settings.DEV else settings.PROD_LANGUAGES
return dict([(i.lower(), i) for i in langs])
LANGUAGE_URL_MAP = lazy(lazy_lang_url_map, dict)()
# Override Django's built-in with our native names
def lazy_langs():
from django.conf import settings
from product_details import product_details
langs = DEV_LANGUAGES if settings.DEV else settings.PROD_LANGUAGES
return dict([(lang.lower(), product_details.languages[lang]['native'])
for lang in langs if lang in product_details.languages])
LANGUAGES = lazy(lazy_langs, dict)()
# Tells the extract script what files to look for L10n in and what function
# handles the extraction. The Tower library expects this.
DOMAIN_METHODS = {
'messages': [
# Searching apps dirs only exists for historic playdoh apps.
# See playdoh's base settings for how message paths are set.
('apps/**.py',
'tower.management.commands.extract.extract_tower_python'),
('apps/**/templates/**.html',
'tower.management.commands.extract.extract_tower_template'),
('templates/**.html',
'tower.management.commands.extract.extract_tower_template'),
],
}
# Paths that don't require a locale code in the URL.
SUPPORTED_NONLOCALES = ['media', 'static', 'admin']
## Media and templates.
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = path('static')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'jingo.Loader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.media',
'django.core.context_processors.request',
'session_csrf.context_processor',
'django.contrib.messages.context_processors.messages',
'lib.context_processors.i18n',
'lib.context_processors.globals',
#'jingo_minify.helpers.build_ids',
)
def get_template_context_processors(exclude=(), append=(),
current={'processors': TEMPLATE_CONTEXT_PROCESSORS}):
"""
Returns TEMPLATE_CONTEXT_PROCESSORS without the processors listed in
exclude and with the processors listed in append.
The use of a mutable dict is intentional, in order to preserve the state of
the TEMPLATE_CONTEXT_PROCESSORS tuple across multiple settings files.
"""
current['processors'] = tuple(
[p for p in current['processors'] if p not in exclude]
) + tuple(append)
return current['processors']
TEMPLATE_DIRS = (
path('templates'),
)
# Storage of static files
COMPRESS_ROOT = STATIC_ROOT
COMPRESS_CSS_FILTERS = (
'compressor.filters.css_default.CssAbsoluteFilter',
'compressor.filters.cssmin.CSSMinFilter'
)
COMPRESS_PRECOMPILERS = (
#('text/coffeescript', 'coffee --compile --stdio'),
('text/less', 'lessc {infile} {outfile}'),
#('text/x-sass', 'sass {infile} {outfile}'),
#('text/x-scss', 'sass --scss {infile} {outfile}'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
def JINJA_CONFIG():
import jinja2
from django.conf import settings
# from caching.base import cache
config = {'extensions': ['tower.template.i18n', 'jinja2.ext.do',
'jinja2.ext.with_', 'jinja2.ext.loopcontrols'],
'finalize': lambda x: x if x is not None else ''}
# if 'memcached' in cache.scheme and not settings.DEBUG:
# We're passing the _cache object directly to jinja because
# Django can't store binary directly; it enforces unicode on it.
# Details: http://jinja.pocoo.org/2/documentation/api#bytecode-cache
# and in the errors you get when you try it the other way.
# bc = jinja2.MemcachedBytecodeCache(cache._cache,
# "%sj2:" % settings.CACHE_PREFIX)
# config['cache_size'] = -1 # Never clear the cache
# config['bytecode_cache'] = bc
return config
# Path to Java. Used for compress_assets.
JAVA_BIN = '/usr/bin/java'
# Sessions
#
# By default, be at least somewhat secure with our session cookies.
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_SECURE = True
## Tests
TEST_RUNNER = 'test_utils.runner.RadicalTestSuiteRunner'
## Celery
# True says to simulate background tasks without actually using celeryd.
# Good for local development in case celeryd is not running.
CELERY_ALWAYS_EAGER = True
BROKER_CONNECTION_TIMEOUT = 0.1
CELERY_RESULT_BACKEND = 'amqp'
CELERY_IGNORE_RESULT = True
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
# Time in seconds before celery.exceptions.SoftTimeLimitExceeded is raised.
# The task can catch that and recover but should exit ASAP.
CELERYD_TASK_SOFT_TIME_LIMIT = 60 * 2
## Arecibo
# when ARECIBO_SERVER_URL is set, it can use celery or the regular wrapper
ARECIBO_USES_CELERY = True
# For absolute urls
try:
DOMAIN = socket.gethostname()
except socket.error:
DOMAIN = 'localhost'
PROTOCOL = "http://"
PORT = 80
## django-mobility
MOBILE_COOKIE = 'mobile'
#########
# Cyder #
#########
TESTING = True if sys.argv[1:] and sys.argv[1] == 'test' else False
MIGRATING = (True if sys.argv[1:] and sys.argv[1] == 'maintain_migrate'
else False)
ROOT_URLCONF = 'cyder.urls'
APPEND_SLASH = True
MEDIA_ROOT = path('media')
MEDIA_URL = '/media/'
_base = os.path.dirname(__file__)
site_root = os.path.realpath(os.path.join(_base, '../'))
sys.path.append(site_root)
sys.path.append(site_root + '/vendor')
EMAIL_SUFFIX = '@onid.oregonstate.edu'
CAS_SERVER_URL = 'https://login.oregonstate.edu/cas/login'
CAS_AUTO_CREATE_USERS = False
BUG_REPORT_EMAIL = 'CyderProject@oregonstate.edu'
EMAIL_HOST = 'mail.oregonstate.edu'
SASS_PREPROCESS = True
JINGO_MINIFY_USE_STATIC = False
SOUTH_TESTS_MIGRATE = False
# Bundles is a dictionary of two dictionaries, css and js, which list css files
# and js files that can be bundled together by the minify app.
MINIFY_BUNDLES = {
'css': {
'cyder_css': (
'css/lib/jquery-ui-1.8.11.custom.css',
'css/sticky_footer.css',
'css/globals.scss',
'css/base.scss',
'css/forms.scss',
'css/tables.scss',
),
'cyder_dev_css': ('css/dev.scss',),
'cyder_prod_css': ('css/prod.scss',),
'search': ('css/search.scss',),
'tags_css': ('css//lib/jquery.tagsinput.css',),
},
'js': {
'cyder_js': (
'js/lib/jquery-1.11.1.min.js',
'js/lib/jquery-migrate-1.2.1.min.js',
'js/lib/attribute_adder.js',
'js/lib/jQuery.rightclick.js',
'js/lib/jquery.validate.min.js',
'js/lib/jquery-ui.min.js',
'js/lib/tablesorter.js',
'js/lib/editablegrid/editablegrid.js',
'js/lib/editablegrid/editablegrid_renderers.js',
'js/lib/editablegrid/editablegrid_editors.js',
'js/lib/editablegrid/editablegrid_validators.js',
'js/lib/editablegrid/editablegrid_utils.js',
'js/lib/editablegrid/editablegrid_charts.js',
'js/utils.js',
'js/application.js',
'js/dhcp_raw_include.js',
'js/views.js',
'js/cy_delete.js',
'js/rangewizard.js',
'js/mobile.js',
'js/constants.js',
'js/buttonLogic.js'
),
'rangeform': (
'js/rangeform.js',
),
'admin': (
'js/admin.js',
),
'ctnr': (
'js/ctnr/ctnr.js',
),
'cyuser': (
'js/cyuser/cyuser.js',
),
'systemform': (
'js/systemform.js',
),
'bugreport': (
'js/bugreport.js',
),
'tags_js': (
'js/lib/jquery.tagsinput.js',
),
}
}
INSTALLED_APPS = [
# Local apps
'compressor',
'tower', # for ./manage.py extract (L10n)
'cronjobs', # for ./manage.py cron * cmd line tasks
'django_browserid',
# Django contrib apps
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.staticfiles',
# 'django.contrib.sites',
# 'django.contrib.messages',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
# Third-party apps, patches, fixes
'commonware.response.cookies',
'djcelery',
'django_nose',
'session_csrf',
# L10n
'product_details',
# Cyder
'cyder',
# Third party apps
'south',
'django_cas',
'djcelery',
'django_extensions',
'django_nose',
'jingo_minify',
'rest_framework',
# Django contrib apps
'django.contrib.sessions',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.admin',
'django.contrib.messages',
]
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
#'django_cas.middleware.CASMiddleware',
'cyder.middleware.dev_authentication.DevAuthenticationMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS += (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.request',
'django.core.context_processors.csrf',
'django.contrib.messages.context_processors.messages'
)
SESSION_COOKIE_NAME = 'cyder'
SESSION_COOKIE_SECURE = False
AUTH_PROFILE_MODULE = 'cyder.UserProfile'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
#'django_cas.backends.CASBackend',
)
# Because Jinja2 is the default template loader, add any non-Jinja templated
# apps here:
JINGO_EXCLUDE_APPS = [
'admin',
'debug_toolbar',
'rest_framework',
'cyder.api.authtoken',
]
DJANGO_TEMPLATE_APPS = ['admin']
LOGGING = dict(loggers=dict(playdoh={'level': logging.INFO}))
# # Use this if you have localizable HTML files:
# DOMAIN_METHODS['lhtml'] = [
# ('**/templates/**.lhtml',
# 'tower.management.commands.extract.extract_tower_template'),
# ]
# # Use this if you have localizable HTML files:
# DOMAIN_METHODS['javascript'] = [
# # Make sure that this won't pull in strings from external libraries you
# # may use.
# ('media/js/**.js', 'javascript'),
# ]
#TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
BUILD_PATH = 'builds'
INTERNAL_IPS = ('127.0.0.1', '10.22.74.139', '10.250.2.54')
# Use sha 256 by default but support any other algorithm:
BASE_PASSWORD_HASHERS = (
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.UnsaltedMD5PasswordHasher',
)
HMAC_KEYS = { # for bcrypt only
#'2012-06-06': 'cheesecake',
}
from django_sha2 import get_password_hashers
PASSWORD_HASHERS = get_password_hashers(BASE_PASSWORD_HASHERS, HMAC_KEYS)
# Migration settings
POINTERS = [('128.193.76.253', 'cob-dc81.bus.oregonstate.edu', 'forward'),
('128.193.76.254', 'cob-dc82.bus.oregonstate.edu', 'forward'),
('128.193.76.252', 'cob-dc83.bus.oregonstate.edu', 'forward'),
('128.193.76.255', 'cob-dc84.bus.oregonstate.edu', 'forward'),
]
NONDELEGATED_NS = ['dns.merit.net', 'ns1.nero.net', 'ns1.oregonstate.edu',
'ns1.ucsb.edu', 'ns2.oregonstate.edu']
SECONDARY_ZONES = ["oscs.orst.edu", "oscs.oregonstate.edu", "oscs.orst.net",
"100.193.128.in-addr.arpa", "101.193.128.in-addr.arpa",
"4.215.10.in-addr.arpa", "5.215.10.in-addr.arpa",
"bus.oregonstate.edu", "74.193.128.in-addr.arpa",
"75.193.128.in-addr.arpa", "76.193.128.in-addr.arpa",
"77.193.128.in-addr.arpa", "78.193.128.in-addr.arpa",
"ceoas.oregonstate.edu", "coas.oregonstate.edu",
"oce.orst.edu", "64.193.128.in-addr.arpa",
"65.193.128.in-addr.arpa", "66.193.128.in-addr.arpa",
"67.193.128.in-addr.arpa", "68.193.128.in-addr.arpa",
"69.193.128.in-addr.arpa", "70.193.128.in-addr.arpa",
"71.193.128.in-addr.arpa"]
REVERSE_SOAS = [
'139.201.199', '17.211.140', '18.211.140', '19.211.140', '20.211.140',
'21.211.140', '28.211.140', '32.211.140', '33.211.140', '162.211.140',
'163.211.140', '16.211.140', '193.128', '23.211.140', '165.211.140', '10',
'26.211.140', '71.211.140', '224.211.140', '225.211.140', '226.211.140',
'227.211.140', '228.211.140', '229.211.140', '230.211.140', '231.211.140',
'232.211.140', '233.211.140', '234.211.140', '235.211.140', '236.211.140',
'237.211.140', '238.211.140', '239.211.140',
]
# This list contains tuples that have a zone's name as their 0th element and a
# view's name as the 1st element. For example:
#
# ('mozilla.net', 'public'),
# ('mozilla.net', 'private')
#
# This will cause the public and private view of the mozilla.net zone to not
# have a config statement in the produced config/master.private and
# config/master.public files. The files net/mozilla/mozilla.net.public and
# net/mozilla.net.private *will* be generated and written to disk.
ZONES_WITH_NO_CONFIG = [
]
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
# 'cyder.api.v1.permissions.ReadOnlyIfAuthenticated',
'cyder.api.v1.permissions.ReadOnlyIfAuthenticatedWriteIfSpecialCase',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'cyder.api.v1.authentication.CyderTokenAuthentication',
),
'PAGINATE_BY': 25,
'PAGINATE_BY_PARAM': 'count',
'MAX_PAGINATE_BY': 100,
'DEFAULT_FILTER_BACKENDS': (
'cyder.api.v1.filter.SearchFieldFilter',
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
),
}
# bindbuild settings
# ==================
BINDBUILD = {
# stage_dir: Where test builds should go. This shouldn't be under
# version control.
'stage_dir': '/tmp/dns_stage/',
# prod_dir: This is the directory where Cyder will place its DNS files.
# This should be a Git repo.
'prod_dir': '/tmp/dns_prod/cyzones/',
# bind_prefix: This is the path to where Cyder zone files are built
# relative to the root of the Git repo. This is usually a substring of
# prod_dir.
'bind_prefix': '/tmp/dns_prod/cyzones/',
'lock_file': '/tmp/cyder_dns.lock',
'pid_file': '/tmp/cyder_dns.pid',
'named_checkzone': 'named-checkzone',
'named_checkconf': 'named-checkconf',
'named_checkzone_opts': '',
# None means no limit
'line_decrease_limit': 500,
'line_increase_limit': 500,
'stop_file': '/tmp/cyder_dns.stop',
'stop_file_email_interval': 1800, # 30 minutes
'last_run_file': '/tmp/cyder.last_run',
'log_syslog': False,
}
# dhcp_build settings
# ===================
DHCPBUILD = {
# stage_dir: Where test builds should go. This shouldn't be under
# version control.
'stage_dir': '/tmp/dhcp/stage',
# prod_dir: Where Cyder will place the dhcpd configuration file. This
# should be a Git repo.
'prod_dir': '/tmp/dhcp/prod',
'lock_file': '/tmp/cyder_dhcp.lock',
'pid_file': '/tmp/cyder_dhcp.pid',
'dhcpd': 'dhcpd',
# target_file: The configuration file that will be generated
# check_file: The conf file whose syntax will be checked (None means don't
# check any file)
'files_v4': {
'target_file': 'dhcpd.conf.data',
'check_file': None,
},
'files_v6': {
'target_file': 'dhcpd.conf.data.6',
'check_file': None,
},
# None means no limit
'line_decrease_limit': 500,
'line_increase_limit': 500,
'stop_file': '/tmp/cyder_dhcp.stop',
'stop_file_email_interval': 1800, # 30 minutes
'log_syslog': False,
}
DATETIME_INPUT_FORMATS = (
'%m/%d/%y', # '10/25/06'
'%m/%d/%y %H:%M',
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
)
MYSQL = 'mysql'
MYSQLDUMP = 'mysqldump'
###############################
# more copied from funfactory #
###############################
## Middlewares, apps, URL configs.
def get_middleware(exclude=(), append=(),
current={'middleware': MIDDLEWARE_CLASSES}):
"""
Returns MIDDLEWARE_CLASSES without the middlewares listed in exclude and
with the middlewares listed in append.
The use of a mutable dict is intentional, in order to preserve the state of
the MIDDLEWARE_CLASSES tuple across multiple settings files.
"""
current['middleware'] = tuple(
[m for m in current['middleware'] if m not in exclude]
) + tuple(append)
return current['middleware']
def get_apps(exclude=(), append=(), current={'apps': INSTALLED_APPS}):
"""
Returns INSTALLED_APPS without the apps listed in exclude and with the apps
listed in append.
The use of a mutable dict is intentional, in order to preserve the state of
the INSTALLED_APPS tuple across multiple settings files.
"""
current['apps'] = tuple(
[a for a in current['apps'] if a not in exclude]
) + tuple(append)
return current['apps']
|
|
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# for py2/py3 compatibility
from __future__ import print_function
from functools import reduce
from collections import OrderedDict
import json
import multiprocessing
import optparse
import os
import shlex
import sys
import traceback
# Add testrunner to the path.
sys.path.insert(
0,
os.path.dirname(
os.path.dirname(os.path.abspath(__file__))))
from testrunner.local import command
from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.test_config import TestConfig
from testrunner.testproc import progress
from testrunner.testproc.rerun import RerunProc
from testrunner.testproc.shard import ShardProc
from testrunner.testproc.sigproc import SignalProc
from testrunner.testproc.timeout import TimeoutProc
BASE_DIR = (
os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__)))))
DEFAULT_OUT_GN = 'out.gn'
# Map of test name synonyms to lists of test suites. Should be ordered by
# expected runtimes (suites with slow test cases first). These groups are
# invoked in separate steps on the bots.
TEST_MAP = {
# This needs to stay in sync with test/bot_default.isolate.
"bot_default": [
"debugger",
"mjsunit",
"cctest",
"wasm-spec-tests",
"inspector",
"webkit",
"mkgrokdump",
"wasm-js",
"fuzzer",
"message",
"preparser",
"intl",
"unittests",
],
# This needs to stay in sync with test/default.isolate.
"default": [
"debugger",
"mjsunit",
"cctest",
"wasm-spec-tests",
"inspector",
"mkgrokdump",
"wasm-js",
"fuzzer",
"message",
"preparser",
"intl",
"unittests",
],
# This needs to stay in sync with test/d8_default.isolate.
"d8_default": [
"debugger",
"mjsunit",
"webkit",
"message",
"preparser",
"intl",
],
# This needs to stay in sync with test/optimize_for_size.isolate.
"optimize_for_size": [
"debugger",
"mjsunit",
"cctest",
"inspector",
"webkit",
"intl",
],
"unittests": [
"unittests",
],
}
# Double the timeout for these:
SLOW_ARCHS = ["arm",
"mips",
"mipsel",
"mips64",
"mips64el",
"s390",
"s390x",
"arm64"]
class ModeConfig(object):
def __init__(self, flags, timeout_scalefactor, status_mode, execution_mode):
self.flags = flags
self.timeout_scalefactor = timeout_scalefactor
self.status_mode = status_mode
self.execution_mode = execution_mode
DEBUG_FLAGS = ["--nohard-abort", "--enable-slow-asserts", "--verify-heap",
"--testing-d8-test-runner"]
RELEASE_FLAGS = ["--nohard-abort", "--testing-d8-test-runner"]
MODES = {
"debug": ModeConfig(
flags=DEBUG_FLAGS,
timeout_scalefactor=4,
status_mode="debug",
execution_mode="debug",
),
"optdebug": ModeConfig(
flags=DEBUG_FLAGS,
timeout_scalefactor=4,
status_mode="debug",
execution_mode="debug",
),
"release": ModeConfig(
flags=RELEASE_FLAGS,
timeout_scalefactor=1,
status_mode="release",
execution_mode="release",
),
# Normal trybot release configuration. There, dchecks are always on which
# implies debug is set. Hence, the status file needs to assume debug-like
# behavior/timeouts.
"tryrelease": ModeConfig(
flags=RELEASE_FLAGS,
timeout_scalefactor=1,
status_mode="debug",
execution_mode="release",
),
# This mode requires v8 to be compiled with dchecks and slow dchecks.
"slowrelease": ModeConfig(
flags=RELEASE_FLAGS + ["--enable-slow-asserts"],
timeout_scalefactor=2,
status_mode="debug",
execution_mode="release",
),
}
PROGRESS_INDICATORS = {
'verbose': progress.VerboseProgressIndicator,
'ci': progress.CIProgressIndicator,
'dots': progress.DotsProgressIndicator,
'color': progress.ColorProgressIndicator,
'mono': progress.MonochromeProgressIndicator,
}
class TestRunnerError(Exception):
pass
class BuildConfig(object):
def __init__(self, build_config):
# In V8 land, GN's x86 is called ia32.
if build_config['v8_target_cpu'] == 'x86':
self.arch = 'ia32'
else:
self.arch = build_config['v8_target_cpu']
self.asan = build_config['is_asan']
self.cfi_vptr = build_config['is_cfi']
self.dcheck_always_on = build_config['dcheck_always_on']
self.gcov_coverage = build_config['is_gcov_coverage']
self.is_android = build_config['is_android']
self.is_clang = build_config['is_clang']
self.is_debug = build_config['is_debug']
self.is_full_debug = build_config['is_full_debug']
self.msan = build_config['is_msan']
self.no_i18n = not build_config['v8_enable_i18n_support']
self.no_snap = not build_config['v8_use_snapshot']
self.predictable = build_config['v8_enable_verify_predictable']
self.tsan = build_config['is_tsan']
# TODO(machenbach): We only have ubsan not ubsan_vptr.
self.ubsan_vptr = build_config['is_ubsan_vptr']
self.embedded_builtins = build_config['v8_enable_embedded_builtins']
self.verify_csa = build_config['v8_enable_verify_csa']
self.lite_mode = build_config['v8_enable_lite_mode']
self.pointer_compression = build_config['v8_enable_pointer_compression']
# Export only for MIPS target
if self.arch in ['mips', 'mipsel', 'mips64', 'mips64el']:
self.mips_arch_variant = build_config['mips_arch_variant']
self.mips_use_msa = build_config['mips_use_msa']
@property
def use_sanitizer(self):
return (self.asan or self.cfi_vptr or self.msan or self.tsan or
self.ubsan_vptr)
def __str__(self):
detected_options = []
if self.asan:
detected_options.append('asan')
if self.cfi_vptr:
detected_options.append('cfi_vptr')
if self.dcheck_always_on:
detected_options.append('dcheck_always_on')
if self.gcov_coverage:
detected_options.append('gcov_coverage')
if self.msan:
detected_options.append('msan')
if self.no_i18n:
detected_options.append('no_i18n')
if self.no_snap:
detected_options.append('no_snap')
if self.predictable:
detected_options.append('predictable')
if self.tsan:
detected_options.append('tsan')
if self.ubsan_vptr:
detected_options.append('ubsan_vptr')
if self.embedded_builtins:
detected_options.append('embedded_builtins')
if self.verify_csa:
detected_options.append('verify_csa')
if self.lite_mode:
detected_options.append('lite_mode')
if self.pointer_compression:
detected_options.append('pointer_compression')
return '\n'.join(detected_options)
class BaseTestRunner(object):
def __init__(self, basedir=None):
self.basedir = basedir or BASE_DIR
self.outdir = None
self.build_config = None
self.mode_name = None
self.mode_options = None
self.target_os = None
@property
def framework_name(self):
"""String name of the base-runner subclass, used in test results."""
raise NotImplementedError()
def execute(self, sys_args=None):
if sys_args is None: # pragma: no cover
sys_args = sys.argv[1:]
try:
parser = self._create_parser()
options, args = self._parse_args(parser, sys_args)
if options.swarming:
# Swarming doesn't print how isolated commands are called. Lets make
# this less cryptic by printing it ourselves.
print(' '.join(sys.argv))
self._load_build_config(options)
command.setup(self.target_os, options.device)
try:
self._process_default_options(options)
self._process_options(options)
except TestRunnerError:
parser.print_help()
raise
args = self._parse_test_args(args)
tests = self._load_testsuite_generators(args, options)
self._setup_env()
print(">>> Running tests for %s.%s" % (self.build_config.arch,
self.mode_name))
exit_code = self._do_execute(tests, args, options)
if exit_code == utils.EXIT_CODE_FAILURES and options.json_test_results:
print("Force exit code 0 after failures. Json test results file "
"generated with failure information.")
exit_code = utils.EXIT_CODE_PASS
return exit_code
except TestRunnerError:
traceback.print_exc()
return utils.EXIT_CODE_INTERNAL_ERROR
except KeyboardInterrupt:
return utils.EXIT_CODE_INTERRUPTED
except Exception:
traceback.print_exc()
return utils.EXIT_CODE_INTERNAL_ERROR
finally:
command.tear_down()
def _create_parser(self):
parser = optparse.OptionParser()
parser.usage = '%prog [options] [tests]'
parser.description = """TESTS: %s""" % (TEST_MAP["default"])
self._add_parser_default_options(parser)
self._add_parser_options(parser)
return parser
def _add_parser_default_options(self, parser):
parser.add_option("--gn", help="Scan out.gn for the last built"
" configuration",
default=False, action="store_true")
parser.add_option("--outdir", help="Base directory with compile output",
default="out")
parser.add_option("--buildbot", help="DEPRECATED!",
default=False, action="store_true")
parser.add_option("--arch",
help="The architecture to run tests for")
parser.add_option("-m", "--mode",
help="The test mode in which to run (uppercase for ninja"
" and buildbot builds): %s" % MODES.keys())
parser.add_option("--shell-dir", help="DEPRECATED! Executables from build "
"directory will be used")
parser.add_option("--test-root", help="Root directory of the test suites",
default=os.path.join(self.basedir, 'test'))
parser.add_option("--total-timeout-sec", default=0, type="int",
help="How long should fuzzer run")
parser.add_option("--swarming", default=False, action="store_true",
help="Indicates running test driver on swarming.")
parser.add_option("-j", help="The number of parallel tasks to run",
default=0, type=int)
parser.add_option("-d", "--device",
help="The device ID to run Android tests on. If not "
"given it will be autodetected.")
# Shard
parser.add_option("--shard-count", default=1, type=int,
help="Split tests into this number of shards")
parser.add_option("--shard-run", default=1, type=int,
help="Run this shard from the split up tests.")
# Progress
parser.add_option("-p", "--progress",
choices=PROGRESS_INDICATORS.keys(), default="mono",
help="The style of progress indicator (verbose, dots, "
"color, mono)")
parser.add_option("--json-test-results",
help="Path to a file for storing json results.")
parser.add_option("--junitout", help="File name of the JUnit output")
parser.add_option("--junittestsuite", default="v8tests",
help="The testsuite name in the JUnit output file")
parser.add_option("--exit-after-n-failures", type="int", default=100,
help="Exit after the first N failures instead of "
"running all tests. Pass 0 to disable this feature.")
parser.add_option("--ci-test-completion",
help="Path to a file for logging test completion in the "
"context of CI progress indicator. Ignored if "
"progress indicator is other than 'ci'.")
# Rerun
parser.add_option("--rerun-failures-count", default=0, type=int,
help="Number of times to rerun each failing test case. "
"Very slow tests will be rerun only once.")
parser.add_option("--rerun-failures-max", default=100, type=int,
help="Maximum number of failing test cases to rerun")
# Test config
parser.add_option("--command-prefix", default="",
help="Prepended to each shell command used to run a test")
parser.add_option("--extra-flags", action="append", default=[],
help="Additional flags to pass to each test command")
parser.add_option("--isolates", action="store_true", default=False,
help="Whether to test isolates")
parser.add_option("--no-harness", "--noharness",
default=False, action="store_true",
help="Run without test harness of a given suite")
parser.add_option("--random-seed", default=0, type=int,
help="Default seed for initializing random generator")
parser.add_option("--run-skipped", help="Also run skipped tests.",
default=False, action="store_true")
parser.add_option("-t", "--timeout", default=60, type=int,
help="Timeout for single test in seconds")
parser.add_option("-v", "--verbose", default=False, action="store_true",
help="Verbose output")
# TODO(machenbach): Temporary options for rolling out new test runner
# features.
parser.add_option("--mastername", default='',
help="Mastername property from infrastructure. Not "
"setting this option indicates manual usage.")
parser.add_option("--buildername", default='',
help="Buildername property from infrastructure. Not "
"setting this option indicates manual usage.")
def _add_parser_options(self, parser):
pass
def _parse_args(self, parser, sys_args):
options, args = parser.parse_args(sys_args)
if any(map(lambda v: v and ',' in v,
[options.arch, options.mode])): # pragma: no cover
print('Multiple arch/mode are deprecated')
raise TestRunnerError()
return options, args
def _load_build_config(self, options):
for outdir in self._possible_outdirs(options):
try:
self.build_config = self._do_load_build_config(outdir, options.verbose)
except TestRunnerError:
pass
if not self.build_config: # pragma: no cover
print('Failed to load build config')
raise TestRunnerError
print('Build found: %s' % self.outdir)
if str(self.build_config):
print('>>> Autodetected:')
print(self.build_config)
# Represents the OS where tests are run on. Same as host OS except for
# Android, which is determined by build output.
if self.build_config.is_android:
self.target_os = 'android'
else:
self.target_os = utils.GuessOS()
# Returns possible build paths in order:
# gn
# outdir
# outdir/arch.mode
# Each path is provided in two versions: <path> and <path>/mode for buildbot.
def _possible_outdirs(self, options):
def outdirs():
if options.gn:
yield self._get_gn_outdir()
return
yield options.outdir
if options.arch and options.mode:
yield os.path.join(options.outdir,
'%s.%s' % (options.arch, options.mode))
for outdir in outdirs():
yield os.path.join(self.basedir, outdir)
# buildbot option
if options.mode:
yield os.path.join(self.basedir, outdir, options.mode)
def _get_gn_outdir(self):
gn_out_dir = os.path.join(self.basedir, DEFAULT_OUT_GN)
latest_timestamp = -1
latest_config = None
for gn_config in os.listdir(gn_out_dir):
gn_config_dir = os.path.join(gn_out_dir, gn_config)
if not os.path.isdir(gn_config_dir):
continue
if os.path.getmtime(gn_config_dir) > latest_timestamp:
latest_timestamp = os.path.getmtime(gn_config_dir)
latest_config = gn_config
if latest_config:
print(">>> Latest GN build found: %s" % latest_config)
return os.path.join(DEFAULT_OUT_GN, latest_config)
def _do_load_build_config(self, outdir, verbose=False):
build_config_path = os.path.join(outdir, "v8_build_config.json")
if not os.path.exists(build_config_path):
if verbose:
print("Didn't find build config: %s" % build_config_path)
raise TestRunnerError()
with open(build_config_path) as f:
try:
build_config_json = json.load(f)
except Exception: # pragma: no cover
print("%s exists but contains invalid json. Is your build up-to-date?"
% build_config_path)
raise TestRunnerError()
# In auto-detect mode the outdir is always where we found the build config.
# This ensures that we'll also take the build products from there.
self.outdir = os.path.dirname(build_config_path)
return BuildConfig(build_config_json)
def _process_default_options(self, options):
# We don't use the mode for more path-magic.
# Therefore transform the buildbot mode here to fix build_config value.
if options.mode:
options.mode = self._buildbot_to_v8_mode(options.mode)
build_config_mode = 'debug' if self.build_config.is_debug else 'release'
if options.mode:
if options.mode not in MODES: # pragma: no cover
print('%s mode is invalid' % options.mode)
raise TestRunnerError()
if MODES[options.mode].execution_mode != build_config_mode:
print ('execution mode (%s) for %s is inconsistent with build config '
'(%s)' % (
MODES[options.mode].execution_mode,
options.mode,
build_config_mode))
raise TestRunnerError()
self.mode_name = options.mode
else:
self.mode_name = build_config_mode
self.mode_options = MODES[self.mode_name]
if options.arch and options.arch != self.build_config.arch:
print('--arch value (%s) inconsistent with build config (%s).' % (
options.arch, self.build_config.arch))
raise TestRunnerError()
if options.shell_dir: # pragma: no cover
print('Warning: --shell-dir is deprecated. Searching for executables in '
'build directory (%s) instead.' % self.outdir)
if options.j == 0:
if self.build_config.is_android:
# Adb isn't happy about multi-processed file pushing.
options.j = 1
else:
options.j = multiprocessing.cpu_count()
options.command_prefix = shlex.split(options.command_prefix)
options.extra_flags = sum(map(shlex.split, options.extra_flags), [])
def _buildbot_to_v8_mode(self, config):
"""Convert buildbot build configs to configs understood by the v8 runner.
V8 configs are always lower case and without the additional _x64 suffix
for 64 bit builds on windows with ninja.
"""
mode = config[:-4] if config.endswith('_x64') else config
return mode.lower()
def _process_options(self, options):
pass
def _setup_env(self):
# Use the v8 root as cwd as some test cases use "load" with relative paths.
os.chdir(self.basedir)
# Many tests assume an English interface.
os.environ['LANG'] = 'en_US.UTF-8'
symbolizer_option = self._get_external_symbolizer_option()
if self.build_config.asan:
asan_options = [
symbolizer_option,
'allow_user_segv_handler=1',
'allocator_may_return_null=1',
]
if not utils.GuessOS() in ['macos', 'windows']:
# LSAN is not available on mac and windows.
asan_options.append('detect_leaks=1')
else:
asan_options.append('detect_leaks=0')
if utils.GuessOS() == 'windows':
# https://crbug.com/967663
asan_options.append('detect_stack_use_after_return=0')
os.environ['ASAN_OPTIONS'] = ":".join(asan_options)
if self.build_config.cfi_vptr:
os.environ['UBSAN_OPTIONS'] = ":".join([
'print_stacktrace=1',
'print_summary=1',
'symbolize=1',
symbolizer_option,
])
if self.build_config.ubsan_vptr:
os.environ['UBSAN_OPTIONS'] = ":".join([
'print_stacktrace=1',
symbolizer_option,
])
if self.build_config.msan:
os.environ['MSAN_OPTIONS'] = symbolizer_option
if self.build_config.tsan:
suppressions_file = os.path.join(
self.basedir,
'tools',
'sanitizers',
'tsan_suppressions.txt')
os.environ['TSAN_OPTIONS'] = " ".join([
symbolizer_option,
'suppressions=%s' % suppressions_file,
'exit_code=0',
'report_thread_leaks=0',
'history_size=7',
'report_destroy_locked=0',
])
def _get_external_symbolizer_option(self):
external_symbolizer_path = os.path.join(
self.basedir,
'third_party',
'llvm-build',
'Release+Asserts',
'bin',
'llvm-symbolizer',
)
if utils.IsWindows():
# Quote, because sanitizers might confuse colon as option separator.
external_symbolizer_path = '"%s.exe"' % external_symbolizer_path
return 'external_symbolizer_path=%s' % external_symbolizer_path
def _parse_test_args(self, args):
if not args:
args = self._get_default_suite_names()
# Expand arguments with grouped tests. The args should reflect the list
# of suites as otherwise filters would break.
def expand_test_group(name):
return TEST_MAP.get(name, [name])
return reduce(list.__add__, map(expand_test_group, args), [])
def _args_to_suite_names(self, args, test_root):
# Use default tests if no test configuration was provided at the cmd line.
all_names = set(utils.GetSuitePaths(test_root))
args_names = OrderedDict([(arg.split('/')[0], None) for arg in args]) # set
return [name for name in args_names if name in all_names]
def _get_default_suite_names(self):
return []
def _load_testsuite_generators(self, args, options):
names = self._args_to_suite_names(args, options.test_root)
test_config = self._create_test_config(options)
variables = self._get_statusfile_variables(options)
# Head generator with no elements
test_chain = testsuite.TestGenerator(0, [], [])
for name in names:
if options.verbose:
print('>>> Loading test suite: %s' % name)
suite = testsuite.TestSuite.Load(
os.path.join(options.test_root, name), test_config,
self.framework_name)
if self._is_testsuite_supported(suite, options):
tests = suite.load_tests_from_disk(variables)
test_chain.merge(tests)
return test_chain
def _is_testsuite_supported(self, suite, options):
"""A predicate that can be overridden to filter out unsupported TestSuite
instances (see NumFuzzer for usage)."""
return True
def _get_statusfile_variables(self, options):
simd_mips = (
self.build_config.arch in ['mipsel', 'mips', 'mips64', 'mips64el'] and
self.build_config.mips_arch_variant == "r6" and
self.build_config.mips_use_msa)
mips_arch_variant = (
self.build_config.arch in ['mipsel', 'mips', 'mips64', 'mips64el'] and
self.build_config.mips_arch_variant)
# TODO(machenbach): In GN we can derive simulator run from
# target_arch != v8_target_arch in the dumped build config.
return {
"arch": self.build_config.arch,
"asan": self.build_config.asan,
"byteorder": sys.byteorder,
"dcheck_always_on": self.build_config.dcheck_always_on,
"deopt_fuzzer": False,
"endurance_fuzzer": False,
"gc_fuzzer": False,
"gc_stress": False,
"gcov_coverage": self.build_config.gcov_coverage,
"isolates": options.isolates,
"is_clang": self.build_config.is_clang,
"is_full_debug": self.build_config.is_full_debug,
"mips_arch_variant": mips_arch_variant,
"mode": self.mode_options.status_mode
if not self.build_config.dcheck_always_on
else "debug",
"msan": self.build_config.msan,
"no_harness": options.no_harness,
"no_i18n": self.build_config.no_i18n,
"no_snap": self.build_config.no_snap,
"novfp3": False,
"optimize_for_size": "--optimize-for-size" in options.extra_flags,
"predictable": self.build_config.predictable,
"simd_mips": simd_mips,
"simulator_run": False,
"system": self.target_os,
"tsan": self.build_config.tsan,
"ubsan_vptr": self.build_config.ubsan_vptr,
"embedded_builtins": self.build_config.embedded_builtins,
"verify_csa": self.build_config.verify_csa,
"lite_mode": self.build_config.lite_mode,
"pointer_compression": self.build_config.pointer_compression,
}
def _create_test_config(self, options):
timeout = options.timeout * self._timeout_scalefactor(options)
return TestConfig(
command_prefix=options.command_prefix,
extra_flags=options.extra_flags,
isolates=options.isolates,
mode_flags=self.mode_options.flags,
no_harness=options.no_harness,
noi18n=self.build_config.no_i18n,
random_seed=options.random_seed,
run_skipped=options.run_skipped,
shell_dir=self.outdir,
timeout=timeout,
verbose=options.verbose,
)
def _timeout_scalefactor(self, options):
"""Increases timeout for slow build configurations."""
factor = self.mode_options.timeout_scalefactor
if self.build_config.arch in SLOW_ARCHS:
factor *= 4
if self.build_config.lite_mode:
factor *= 2
if self.build_config.predictable:
factor *= 4
if self.build_config.use_sanitizer:
factor *= 1.5
if self.build_config.is_full_debug:
factor *= 4
return factor
# TODO(majeski): remove options & args parameters
def _do_execute(self, suites, args, options):
raise NotImplementedError()
def _prepare_procs(self, procs):
procs = filter(None, procs)
for i in range(0, len(procs) - 1):
procs[i].connect_to(procs[i + 1])
procs[0].setup()
def _create_shard_proc(self, options):
myid, count = self._get_shard_info(options)
if count == 1:
return None
return ShardProc(myid - 1, count)
def _get_shard_info(self, options):
"""
Returns pair:
(id of the current shard [1; number of shards], number of shards)
"""
# Read gtest shard configuration from environment (e.g. set by swarming).
# If none is present, use values passed on the command line.
shard_count = int(
os.environ.get('GTEST_TOTAL_SHARDS', options.shard_count))
shard_run = os.environ.get('GTEST_SHARD_INDEX')
if shard_run is not None:
# The v8 shard_run starts at 1, while GTEST_SHARD_INDEX starts at 0.
shard_run = int(shard_run) + 1
else:
shard_run = options.shard_run
if options.shard_count > 1:
# Log if a value was passed on the cmd line and it differs from the
# environment variables.
if options.shard_count != shard_count: # pragma: no cover
print("shard_count from cmd line differs from environment variable "
"GTEST_TOTAL_SHARDS")
if (options.shard_run > 1 and
options.shard_run != shard_run): # pragma: no cover
print("shard_run from cmd line differs from environment variable "
"GTEST_SHARD_INDEX")
if shard_run < 1 or shard_run > shard_count:
# TODO(machenbach): Turn this into an assert. If that's wrong on the
# bots, printing will be quite useless. Or refactor this code to make
# sure we get a return code != 0 after testing if we got here.
print("shard-run not a valid number, should be in [1:shard-count]")
print("defaulting back to running all tests")
return 1, 1
return shard_run, shard_count
def _create_progress_indicators(self, test_count, options):
procs = [PROGRESS_INDICATORS[options.progress]()]
if options.junitout:
procs.append(progress.JUnitTestProgressIndicator(options.junitout,
options.junittestsuite))
if options.json_test_results:
procs.append(progress.JsonTestProgressIndicator(
self.framework_name,
options.json_test_results,
self.build_config.arch,
self.mode_options.execution_mode))
for proc in procs:
proc.configure(options)
for proc in procs:
try:
proc.set_test_count(test_count)
except AttributeError:
pass
return procs
def _create_result_tracker(self, options):
return progress.ResultsTracker(options.exit_after_n_failures)
def _create_timeout_proc(self, options):
if not options.total_timeout_sec:
return None
return TimeoutProc(options.total_timeout_sec)
def _create_signal_proc(self):
return SignalProc()
def _create_rerun_proc(self, options):
if not options.rerun_failures_count:
return None
return RerunProc(options.rerun_failures_count,
options.rerun_failures_max)
|
|
import Live
from _Generic.Devices import *
from _Framework.ControlSurfaceComponent import ControlSurfaceComponent
from _Framework.EncoderElement import EncoderElement
from _Framework.ButtonElement import ButtonElement
from _Framework.DisplayDataSource import DisplayDataSource
class Live8DeviceComponent(ControlSurfaceComponent):
__doc__ = ' Class representing a device in Live '
def __init__(self, *a, **k):
super(Live8DeviceComponent, self).__init__(*a, **k)
self._device_banks = DEVICE_DICT
self._device_best_banks = DEVICE_BOB_DICT
self._device_bank_names = BANK_NAME_DICT
self._device = None
self._parameter_controls = None
self._bank_up_button = None
self._bank_down_button = None
self._bank_buttons = None
self._on_off_button = None
self._lock_button = None
self._lock_callback = None
self._device_name_data_source = None
self._device_bank_registry = {}
self._bank_index = 0
self._bank_name = '<No Bank>'
self._locked_to_device = False
return None
def disconnect(self):
self._lock_callback = None
self._device_bank_registry = None
if self._parameter_controls != None:
for control in self._parameter_controls:
control.release_parameter()
self._parameter_controls = None
if self._bank_up_button != None:
self._bank_up_button.remove_value_listener(self._bank_up_value)
self._bank_up_button = None
if self._bank_down_button != None:
self._bank_down_button.remove_value_listener(self._bank_down_value)
self._bank_down_button = None
if self._bank_buttons != None:
for button in self._bank_buttons:
button.remove_value_listener(self._bank_value)
self._bank_buttons = None
if self._on_off_button != None:
if self._on_off_button.value_has_listener(self._on_off_value):
self._on_off_button.remove_value_listener(self._on_off_value)
self._on_off_button = None
if self._lock_button != None:
if self._lock_button.value_has_listener(self._lock_value):
self._lock_button.remove_value_listener(self._lock_value)
self._lock_button = None
if self._device != None:
parameter = self._on_off_parameter()
if parameter != None:
if parameter.value_has_listener(self._on_on_off_changed):
parameter.remove_value_listener(self._on_on_off_changed)
if self._device.name_has_listener(self._on_device_name_changed):
self._device.remove_name_listener(self._on_device_name_changed)
if self._device.parameters_has_listener(self._on_parameters_changed):
self._device.remove_parameters_listener(self._on_parameters_changed)
self._device = None
return None
def on_enabled_changed(self):
self.update()
def set_device(self, device):
assert ((device == None) or isinstance(device, Live.Device.Device))
if ((not self._locked_to_device) and (device != self._device)):
if (self._device != None):
self._device.remove_name_listener(self._on_device_name_changed)
self._device.remove_parameters_listener(self._on_parameters_changed)
parameter = self._on_off_parameter()
if (parameter != None):
parameter.remove_value_listener(self._on_on_off_changed)
if (self._parameter_controls != None):
for control in self._parameter_controls:
control.release_parameter()
self._device = device
if (self._device != None):
self._bank_index = 0
self._device.add_name_listener(self._on_device_name_changed)
self._device.add_parameters_listener(self._on_parameters_changed)
parameter = self._on_off_parameter()
if (parameter != None):
parameter.add_value_listener(self._on_on_off_changed)
for key in self._device_bank_registry.keys():
if (key == self._device):
self._bank_index = self._device_bank_registry.get(key, 0)
del self._device_bank_registry[key]
break
self._bank_name = '<No Bank>' #added
self._on_device_name_changed()
self.update()
def set_bank_nav_buttons(self, down_button, up_button):
assert ((down_button != None) or (up_button == None))
assert ((up_button == None) or isinstance(up_button, ButtonElement))
assert ((down_button == None) or isinstance(down_button, ButtonElement))
do_update = False
if up_button != self._bank_up_button:
do_update = True
if self._bank_up_button != None:
self._bank_up_button.remove_value_listener(self._bank_up_value)
self._bank_up_button = up_button
if self._bank_up_button != None:
self._bank_up_button.add_value_listener(self._bank_up_value)
if down_button != self._bank_down_button:
do_update = True
if self._bank_down_button != None:
self._bank_down_button.remove_value_listener(self._bank_down_value)
self._bank_down_button = down_button
if self._bank_down_button != None:
self._bank_down_button.add_value_listener(self._bank_down_value)
if do_update:
self.update()
return None
def set_bank_buttons(self, buttons):
assert ((buttons == None) or isinstance(buttons, tuple))
if self._bank_buttons != None:
for button in self._bank_buttons:
button.remove_value_listener(self._bank_value)
self._bank_buttons = buttons
if self._bank_buttons != None:
identify_sender = True
for button in self._bank_buttons:
button.add_value_listener(self._bank_value, identify_sender)
self.update()
return None
def set_parameter_controls(self, controls):
assert (controls != None)
assert isinstance(controls, tuple)
if self._device != None and self._parameter_controls != None:
for control in self._parameter_controls:
control.release_parameter()
for control in controls:
assert (control != None)
assert isinstance(control, EncoderElement)
self._parameter_controls = controls
self.update()
return None
def set_lock_to_device(self, lock, device):
assert isinstance(lock, type(False))
assert (lock is not self._locked_to_device)
if lock:
self.set_device(device)
else:
assert (device == self._device)
self._locked_to_device = lock
if self.is_enabled():
if (self._lock_button != None):
if self._locked_to_device:
self._lock_button.turn_on()
else:
self._lock_button.turn_off()
def set_lock_button(self, button):
assert ((button == None) or isinstance(button, ButtonElement))
if self._lock_button != None:
self._lock_button.remove_value_listener(self._lock_value)
self._lock_button = None
self._lock_button = button
if self._lock_button != None:
self._lock_button.add_value_listener(self._lock_value)
self.update()
return None
def set_on_off_button(self, button):
assert ((button == None) or isinstance(button, ButtonElement))
if self._on_off_button != None:
self._on_off_button.remove_value_listener(self._on_off_value)
self._on_off_button = None
self._on_off_button = button
if self._on_off_button != None:
self._on_off_button.add_value_listener(self._on_off_value)
self.update()
return None
def set_lock_callback(self, callback):
assert (self._lock_callback == None)
assert (callback != None)
assert (dir(callback).count('im_func') is 1)
self._lock_callback = callback
return None
def restore_bank(self, bank_index):
if self._device != None and self._is_banking_enabled() and self._locked_to_device and self._number_of_parameter_banks() > bank_index and self._bank_index != bank_index:
self._bank_index = bank_index
self.update()
return None
def device_name_data_source(self):
if self._device_name_data_source == None:
self._device_name_data_source = DisplayDataSource()
self._on_device_name_changed()
return self._device_name_data_source
def update(self):
if (self.is_enabled() and (self._device != None)):
self._device_bank_registry[self._device] = self._bank_index
if (self._parameter_controls != None):
old_bank_name = self._bank_name #added
self._assign_parameters()
if self._bank_name != old_bank_name: #added
try:
self._show_msg_callback(self._device.name + ' Bank: ' + self._bank_name) #added
except:
pass
if ((self._bank_up_button != None) and (self._bank_down_button != None)):
if (self._number_of_parameter_banks()) > (self._bank_index + 1):
self._bank_up_button.turn_on()
else:
self._bank_up_button.turn_off()
if (self._bank_index > 0):
self._bank_down_button.turn_on()
else:
self._bank_down_button.turn_off()
if (self._bank_buttons != None):
for index in range(len(self._bank_buttons)):
if (index == self._bank_index):
self._bank_buttons[index].turn_on()
else:
self._bank_buttons[index].turn_off()
else:
if (self._lock_button != None):
self._lock_button.turn_off()
if (self._bank_up_button != None):
self._bank_up_button.turn_off()
if (self._bank_down_button != None):
self._bank_down_button.turn_off()
if (self._bank_buttons != None):
for button in self._bank_buttons:
button.turn_off()
if (self._parameter_controls != None):
for control in self._parameter_controls:
control.release_parameter()
#self._rebuild_callback()
def _bank_up_value(self, value):
assert (self._bank_up_button != None)
assert (value != None)
assert isinstance(value, int)
if self.is_enabled():
if ((not self._bank_up_button.is_momentary()) or (value is not 0)):
if (self._device != None):
num_banks = self._number_of_parameter_banks()
if (self._bank_down_button == None):
self._bank_name = ''
self._bank_index = ((self._bank_index + 1) % num_banks)
self.update()
elif (num_banks > (self._bank_index + 1)):
self._bank_name = ''
self._bank_index += 1
self.update()
def _bank_down_value(self, value):
assert (self._bank_down_button != None)
assert (value != None)
assert isinstance(value, int)
if self.is_enabled():
if ((not self._bank_down_button.is_momentary()) or (value is not 0)):
if ((self._device != None) and (self._bank_index > 0)):
self._bank_name = ''
self._bank_index -= 1
self.update()
def _lock_value(self, value):
assert (self._lock_button != None)
assert (self._lock_callback != None)
assert (value != None)
assert isinstance(value, int)
if not self._lock_button.is_momentary() or value is not 0:
self._lock_callback()
return None
def _on_off_value(self, value):
assert (self._on_off_button != None)
assert (value in range(128))
if not self._on_off_button.is_momentary() or value is not 0:
parameter = self._on_off_parameter()
if parameter != None and parameter.is_enabled:
parameter.value = float(int(parameter.value == 0.0))
return None
def _bank_value(self, value, button):
assert (self._bank_buttons != None)
assert (value != None)
assert (button != None)
assert isinstance(value, int)
assert isinstance(button, ButtonElement)
assert (list(self._bank_buttons).count(button) == 1)
if self.is_enabled() and self._device != None: #added
if ((not button.is_momentary()) or (value is not 0)):
bank = list(self._bank_buttons).index(button)
if (bank != self._bank_index):
if (self._number_of_parameter_banks() > bank):
self._bank_name = '' #added
self._bank_index = bank
self.update()
else:
try:
self._show_msg_callback(self._device.name + ' Bank: ' + self._bank_name)
except:
pass
def _is_banking_enabled(self):
direct_banking = (self._bank_buttons != None)
roundtrip_banking = (self._bank_up_button != None)
increment_banking = ((self._bank_up_button != None) and (self._bank_down_button != None))
return (direct_banking or (roundtrip_banking or increment_banking))
def _assign_parameters(self):
assert self.is_enabled()
assert (self._device != None)
assert (self._parameter_controls != None)
self._bank_name = ('Bank ' + str(self._bank_index + 1)) #added
if (self._device.class_name in self._device_banks.keys()): #modified
assert (self._device.class_name in self._device_best_banks.keys())
banks = self._device_banks[self._device.class_name]
bank = None
#if (not self._is_banking_enabled()):
# banks = self._device_best_banks[self._device.class_name]
# self._bank_name = 'Best of Parameters' #added
if (len(banks) > self._bank_index):
bank = banks[self._bank_index]
if self._is_banking_enabled(): #added
if self._device.class_name in self._device_bank_names.keys(): #added
self._bank_name = self._device_bank_names[self._device.class_name] #added *recheck
assert ((bank == None) or (len(bank) >= len(self._parameter_controls)))
for index in range(len(self._parameter_controls)):
parameter = None
if (bank != None):
parameter = get_parameter_by_name(self._device, bank[index])
if (parameter != None):
self._parameter_controls[index].connect_to(parameter)
else:
self._parameter_controls[index].release_parameter()
else:
parameters = self._device_parameters_to_map()
num_controls = len(self._parameter_controls)
index = (self._bank_index * num_controls)
for control in self._parameter_controls:
if (index < len(parameters)):
control.connect_to(parameters[index])
else:
control.release_parameter()
index += 1
def _on_device_name_changed(self):
if (self._device_name_data_source != None):
if (self.is_enabled() and (self._device != None)):
self._device_name_data_source.set_display_string(self._device.name)
else:
self._device_name_data_source.set_display_string('No Device')
def _on_parameters_changed(self):
self.update()
def _on_off_parameter(self):
result = None
if (self._device != None):
for parameter in self._device.parameters:
if str(parameter.name).startswith('Device On'):
result = parameter
break
return result
def _on_on_off_changed(self):
if (self.is_enabled() and (self._on_off_button != None)):
turn_on = False
if (self._device != None):
parameter = self._on_off_parameter()
turn_on = ((parameter != None) and (parameter.value > 0.0))
if turn_on:
self._on_off_button.turn_on()
else:
self._on_off_button.turn_off()
def _device_parameters_to_map(self):
assert self.is_enabled()
assert (self._device != None)
assert (self._parameter_controls != None)
return self._device.parameters[1:] #check this...
def _number_of_parameter_banks(self):
return number_of_parameter_banks(self._device) #added
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from django.conf import settings
from django.core import exceptions
from django.core.urlresolvers import reverse
from django import http
from django.test.utils import override_settings # noqa
from django.utils import html
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.dashboards.project.stacks import forms
from openstack_dashboard.dashboards.project.stacks import mappings
from openstack_dashboard.dashboards.project.stacks import tables
INDEX_URL = reverse('horizon:project:stacks:index')
class MockResource(object):
def __init__(self, resource_type, physical_resource_id):
self.resource_type = resource_type
self.physical_resource_id = physical_resource_id
class MappingsTests(test.TestCase):
def test_mappings(self):
def assertMappingUrl(url, resource_type, physical_resource_id):
mock = MockResource(resource_type, physical_resource_id)
mock_url = mappings.resource_to_url(mock)
self.assertEqual(url, mock_url)
assertMappingUrl(
'/project/networks/subnets/aaa/detail',
'OS::Quantum::Subnet',
'aaa')
assertMappingUrl(
None,
'OS::Quantum::Subnet',
None)
assertMappingUrl(
None,
None,
None)
assertMappingUrl(
None,
'AWS::AutoScaling::LaunchConfiguration',
'aaa')
assertMappingUrl(
'/project/instances/aaa/',
'AWS::EC2::Instance',
'aaa')
assertMappingUrl(
'/project/containers/aaa/',
'OS::Swift::Container',
'aaa')
assertMappingUrl(
None,
'Foo::Bar::Baz',
'aaa')
def test_stack_output(self):
self.assertEqual(u'foo', mappings.stack_output('foo'))
self.assertEqual(u'', mappings.stack_output(None))
outputs = ['one', 'two', 'three']
expected_text = """[\n "one", \n "two", \n "three"\n]"""
self.assertEqual(u'<pre>%s</pre>' % html.escape(expected_text),
mappings.stack_output(outputs))
outputs = {'foo': 'bar'}
expected_text = """{\n "foo": "bar"\n}"""
self.assertEqual(u'<pre>%s</pre>' % html.escape(expected_text),
mappings.stack_output(outputs))
self.assertEqual(
u'<a href="http://www.example.com/foo" target="_blank">'
'http://www.example.com/foo</a>',
mappings.stack_output('http://www.example.com/foo'))
class StackTests(test.TestCase):
@override_settings(API_RESULT_PAGE_SIZE=2)
@test.create_stubs({api.heat: ('stacks_list',)})
def test_index_paginated(self):
stacks = self.stacks.list()[:5]
api.heat.stacks_list(IsA(http.HttpRequest),
marker=None,
paginate=True,
sort_dir='desc') \
.AndReturn([stacks,
True, True])
api.heat.stacks_list(IsA(http.HttpRequest),
marker=None,
paginate=True,
sort_dir='desc') \
.AndReturn([stacks[:2],
True, True])
api.heat.stacks_list(IsA(http.HttpRequest),
marker=stacks[2].id,
paginate=True,
sort_dir='desc') \
.AndReturn([stacks[2:4],
True, True])
api.heat.stacks_list(IsA(http.HttpRequest),
marker=stacks[4].id,
paginate=True,
sort_dir='desc') \
.AndReturn([stacks[4:],
True, True])
self.mox.ReplayAll()
url = reverse('horizon:project:stacks:index')
res = self.client.get(url)
# get all
self.assertEqual(len(res.context['stacks_table'].data),
len(stacks))
self.assertTemplateUsed(res, 'project/stacks/index.html')
res = self.client.get(url)
# get first page with 2 items
self.assertEqual(len(res.context['stacks_table'].data),
settings.API_RESULT_PAGE_SIZE)
url = "%s?%s=%s" % (reverse('horizon:project:stacks:index'),
tables.StacksTable._meta.pagination_param, stacks[2].id)
res = self.client.get(url)
# get second page (items 2-4)
self.assertEqual(len(res.context['stacks_table'].data),
settings.API_RESULT_PAGE_SIZE)
url = "%s?%s=%s" % (reverse('horizon:project:stacks:index'),
tables.StacksTable._meta.pagination_param, stacks[4].id)
res = self.client.get(url)
# get third page (item 5)
self.assertEqual(len(res.context['stacks_table'].data),
1)
@override_settings(API_RESULT_PAGE_SIZE=2)
@test.create_stubs({api.heat: ('stacks_list',)})
def test_index_prev_paginated(self):
stacks = self.stacks.list()[:3]
api.heat.stacks_list(IsA(http.HttpRequest),
marker=None,
paginate=True,
sort_dir='desc') \
.AndReturn([stacks,
True, False])
api.heat.stacks_list(IsA(http.HttpRequest),
marker=None,
paginate=True,
sort_dir='desc') \
.AndReturn([stacks[:2],
True, True])
api.heat.stacks_list(IsA(http.HttpRequest),
marker=stacks[2].id,
paginate=True,
sort_dir='desc') \
.AndReturn([stacks[2:],
True, True])
api.heat.stacks_list(IsA(http.HttpRequest),
marker=stacks[2].id,
paginate=True,
sort_dir='asc') \
.AndReturn([stacks[:2],
True, True])
self.mox.ReplayAll()
url = reverse('horizon:project:stacks:index')
res = self.client.get(url)
# get all
self.assertEqual(len(res.context['stacks_table'].data),
len(stacks))
self.assertTemplateUsed(res, 'project/stacks/index.html')
res = self.client.get(url)
# get first page with 2 items
self.assertEqual(len(res.context['stacks_table'].data),
settings.API_RESULT_PAGE_SIZE)
url = "%s?%s=%s" % (reverse('horizon:project:stacks:index'),
tables.StacksTable._meta.pagination_param, stacks[2].id)
res = self.client.get(url)
# get second page (item 3)
self.assertEqual(len(res.context['stacks_table'].data), 1)
url = "%s?%s=%s" % (reverse('horizon:project:stacks:index'),
tables.StacksTable._meta.prev_pagination_param, stacks[2].id)
res = self.client.get(url)
# prev back to get first page with 2 pages
self.assertEqual(len(res.context['stacks_table'].data),
settings.API_RESULT_PAGE_SIZE)
@test.create_stubs({api.heat: ('stack_create', 'template_validate')})
def test_launch_stack(self):
template = self.stack_templates.first()
stack = self.stacks.first()
api.heat.template_validate(IsA(http.HttpRequest),
template=template.data) \
.AndReturn(json.loads(template.validate))
api.heat.stack_create(IsA(http.HttpRequest),
stack_name=stack.stack_name,
timeout_mins=60,
disable_rollback=True,
template=template.data,
parameters=IsA(dict),
password='password')
self.mox.ReplayAll()
url = reverse('horizon:project:stacks:select_template')
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/stacks/select_template.html')
form_data = {'template_source': 'raw',
'template_data': template.data,
'method': forms.TemplateForm.__name__}
res = self.client.post(url, form_data)
self.assertTemplateUsed(res, 'project/stacks/create.html')
url = reverse('horizon:project:stacks:launch')
form_data = {'template_source': 'raw',
'template_data': template.data,
'password': 'password',
'parameters': template.validate,
'stack_name': stack.stack_name,
"timeout_mins": 60,
"disable_rollback": True,
"__param_DBUsername": "admin",
"__param_LinuxDistribution": "F17",
"__param_InstanceType": "m1.small",
"__param_KeyName": "test",
"__param_DBPassword": "admin",
"__param_DBRootPassword": "admin",
"__param_DBName": "wordpress",
'method': forms.CreateStackForm.__name__}
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.heat: ('stack_create', 'template_validate')})
def test_launch_stackwith_environment(self):
template = self.stack_templates.first()
environment = self.stack_environments.first()
stack = self.stacks.first()
api.heat.template_validate(IsA(http.HttpRequest),
template=template.data,
environment=environment.data) \
.AndReturn(json.loads(template.validate))
api.heat.stack_create(IsA(http.HttpRequest),
stack_name=stack.stack_name,
timeout_mins=60,
disable_rollback=True,
template=template.data,
environment=environment.data,
parameters=IsA(dict),
password='password')
self.mox.ReplayAll()
url = reverse('horizon:project:stacks:select_template')
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/stacks/select_template.html')
form_data = {'template_source': 'raw',
'template_data': template.data,
'environment_source': 'raw',
'environment_data': environment.data,
'method': forms.TemplateForm.__name__}
res = self.client.post(url, form_data)
self.assertTemplateUsed(res, 'project/stacks/create.html')
url = reverse('horizon:project:stacks:launch')
form_data = {'template_source': 'raw',
'template_data': template.data,
'environment_source': 'raw',
'environment_data': environment.data,
'password': 'password',
'parameters': template.validate,
'stack_name': stack.stack_name,
"timeout_mins": 60,
"disable_rollback": True,
"__param_DBUsername": "admin",
"__param_LinuxDistribution": "F17",
"__param_InstanceType": "m1.small",
"__param_KeyName": "test",
"__param_DBPassword": "admin",
"__param_DBRootPassword": "admin",
"__param_DBName": "wordpress",
'method': forms.CreateStackForm.__name__}
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.heat: ('template_validate',)})
def test_launch_stack_with_hidden_parameters(self):
template = {
'data': ('heat_template_version: 2013-05-23\n'
'parameters:\n'
' public_string:\n'
' type: string\n'
' secret_string:\n'
' type: string\n'
' hidden: true\n'),
'validate': {
'Description': 'No description',
'Parameters': {
'public_string': {
'Label': 'public_string',
'Description': '',
'Type': 'String',
'NoEcho': 'false'
},
'secret_string': {
'Label': 'secret_string',
'Description': '',
'Type': 'String',
'NoEcho': 'true'
}
}
}
}
api.heat.template_validate(IsA(http.HttpRequest),
template=template['data']) \
.AndReturn(template['validate'])
self.mox.ReplayAll()
url = reverse('horizon:project:stacks:select_template')
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/stacks/select_template.html')
form_data = {'template_source': 'raw',
'template_data': template['data'],
'method': forms.TemplateForm.__name__}
res = self.client.post(url, form_data)
self.assertTemplateUsed(res, 'project/stacks/create.html')
# ensure the fields were rendered correctly
self.assertContains(res, '<input class="form-control" '
'id="id___param_public_string" '
'name="__param_public_string" '
'type="text" />', html=True)
self.assertContains(res, '<input class="form-control" '
'id="id___param_secret_string" '
'name="__param_secret_string" '
'type="password" />', html=True)
@test.create_stubs({api.heat: ('stack_update', 'stack_get',
'template_get', 'template_validate')})
def test_edit_stack_template(self):
template = self.stack_templates.first()
stack = self.stacks.first()
# GET to template form
api.heat.stack_get(IsA(http.HttpRequest),
stack.id).AndReturn(stack)
# POST template form, validation
api.heat.template_validate(IsA(http.HttpRequest),
template=template.data) \
.AndReturn(json.loads(template.validate))
# GET to edit form
api.heat.stack_get(IsA(http.HttpRequest),
stack.id).AndReturn(stack)
api.heat.template_get(IsA(http.HttpRequest),
stack.id) \
.AndReturn(json.loads(template.validate))
# POST to edit form
api.heat.stack_get(IsA(http.HttpRequest),
stack.id).AndReturn(stack)
fields = {
'stack_name': stack.stack_name,
'disable_rollback': True,
'timeout_mins': 61,
'password': 'password',
'template': IsA(unicode),
'parameters': IsA(dict)
}
api.heat.stack_update(IsA(http.HttpRequest),
stack_id=stack.id,
**fields)
self.mox.ReplayAll()
url = reverse('horizon:project:stacks:change_template',
args=[stack.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/stacks/change_template.html')
form_data = {'template_source': 'raw',
'template_data': template.data,
'method': forms.ChangeTemplateForm.__name__}
res = self.client.post(url, form_data)
url = reverse('horizon:project:stacks:edit_stack',
args=[stack.id, ])
form_data = {'template_source': 'raw',
'template_data': template.data,
'password': 'password',
'parameters': template.validate,
'stack_name': stack.stack_name,
'stack_id': stack.id,
"timeout_mins": 61,
"disable_rollback": True,
"__param_DBUsername": "admin",
"__param_LinuxDistribution": "F17",
"__param_InstanceType": "m1.small",
"__param_KeyName": "test",
"__param_DBPassword": "admin",
"__param_DBRootPassword": "admin",
"__param_DBName": "wordpress",
'method': forms.EditStackForm.__name__}
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_launch_stack_form_invalid_names_fail(self):
self._test_launch_stack_invalid_name('2_StartWithDigit')
self._test_launch_stack_invalid_name('_StartWithUnderscore')
self._test_launch_stack_invalid_name('.StartWithPoint')
def _test_launch_stack_invalid_name(self, name):
template = self.stack_templates.first()
url = reverse('horizon:project:stacks:launch')
form_data = {'template_source': 'raw',
'template_data': template.data,
'password': 'password',
'parameters': template.validate,
'stack_name': name,
"timeout_mins": 60,
"disable_rollback": True,
"__param_DBUsername": "admin",
"__param_LinuxDistribution": "F17",
"__param_InstanceType": "m1.small",
"__param_KeyName": "test",
"__param_DBPassword": "admin",
"__param_DBRootPassword": "admin",
"__param_DBName": "wordpress",
'method': forms.CreateStackForm.__name__}
res = self.client.post(url, form_data)
error = ('Name must start with a letter and may only contain letters, '
'numbers, underscores, periods and hyphens.')
self.assertFormErrors(res, 1)
self.assertFormError(res, "form", 'stack_name', error)
class TemplateFormTests(test.TestCase):
class SimpleFile(object):
def __init__(self, name, data):
self.name = name
self.data = data
def read(self):
return self.data
def test_create_upload_form_attributes(self):
attrs = forms.create_upload_form_attributes(
'env', 'url', 'Environment')
self.assertEqual(attrs['data-envsource-url'], 'Environment')
def test_clean_file_upload_form_url(self):
kwargs = {'next_view': 'Launch Stack'}
t = forms.TemplateForm({}, **kwargs)
precleaned = {
'template_url': 'http://templateurl.com',
}
t.clean_uploaded_files('template', 'template', precleaned, {})
self.assertEqual(precleaned['template_url'], 'http://templateurl.com')
def test_clean_file_upload_form_multiple(self):
kwargs = {'next_view': 'Launch Stack'}
t = forms.TemplateForm({}, **kwargs)
precleaned = {
'template_url': 'http://templateurl.com',
'template_data': 'http://templateurl.com',
}
self.assertRaises(
exceptions.ValidationError,
t.clean_uploaded_files,
'template',
'template',
precleaned,
{})
def test_clean_file_upload_form_invalid_json(self):
kwargs = {'next_view': 'Launch Stack'}
t = forms.TemplateForm({}, **kwargs)
precleaned = {
'template_data': 'http://templateurl.com',
}
json_str = '{notvalidjson::::::json/////json'
files = {'template_upload':
self.SimpleFile('template_name', json_str)}
self.assertRaises(
exceptions.ValidationError,
t.clean_uploaded_files,
'template',
'template',
precleaned,
files)
def test_clean_file_upload_form_valid_data(self):
kwargs = {'next_view': 'Launch Stack'}
t = forms.TemplateForm({}, **kwargs)
precleaned = {
'template_data': 'http://templateurl.com',
}
json_str = '{"isvalid":"json"}'
files = {'template_upload':
self.SimpleFile('template_name', json_str)}
t.clean_uploaded_files('template', 'template', precleaned, files)
self.assertEqual(
json_str,
precleaned['template_data'])
|
|
"Base Cache class."
import time
import warnings
from django.core.exceptions import ImproperlyConfigured
from django.utils.module_loading import import_string
class InvalidCacheBackendError(ImproperlyConfigured):
pass
class CacheKeyWarning(RuntimeWarning):
pass
# Stub class to ensure not passing in a `timeout` argument results in
# the default timeout
DEFAULT_TIMEOUT = object()
# Memcached does not accept keys longer than this.
MEMCACHE_MAX_KEY_LENGTH = 250
def default_key_func(key, key_prefix, version):
"""
Default function to generate keys.
Construct the key used by all other methods. By default, prepend
the `key_prefix'. KEY_FUNCTION can be used to specify an alternate
function with custom key making behavior.
"""
return '%s:%s:%s' % (key_prefix, version, key)
def get_key_func(key_func):
"""
Function to decide which key function to use.
Default to ``default_key_func``.
"""
if key_func is not None:
if callable(key_func):
return key_func
else:
return import_string(key_func)
return default_key_func
class BaseCache:
def __init__(self, params):
timeout = params.get('timeout', params.get('TIMEOUT', 300))
if timeout is not None:
try:
timeout = int(timeout)
except (ValueError, TypeError):
timeout = 300
self.default_timeout = timeout
options = params.get('OPTIONS', {})
max_entries = params.get('max_entries', options.get('MAX_ENTRIES', 300))
try:
self._max_entries = int(max_entries)
except (ValueError, TypeError):
self._max_entries = 300
cull_frequency = params.get('cull_frequency', options.get('CULL_FREQUENCY', 3))
try:
self._cull_frequency = int(cull_frequency)
except (ValueError, TypeError):
self._cull_frequency = 3
self.key_prefix = params.get('KEY_PREFIX', '')
self.version = params.get('VERSION', 1)
self.key_func = get_key_func(params.get('KEY_FUNCTION'))
def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT):
"""
Return the timeout value usable by this backend based upon the provided
timeout.
"""
if timeout == DEFAULT_TIMEOUT:
timeout = self.default_timeout
elif timeout == 0:
# ticket 21147 - avoid time.time() related precision issues
timeout = -1
return None if timeout is None else time.time() + timeout
def make_key(self, key, version=None):
"""
Construct the key used by all other methods. By default, use the
key_func to generate a key (which, by default, prepends the
`key_prefix' and 'version'). A different key function can be provided
at the time of cache construction; alternatively, you can subclass the
cache backend to provide custom key making behavior.
"""
if version is None:
version = self.version
new_key = self.key_func(key, self.key_prefix, version)
return new_key
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
"""
Set a value in the cache if the key does not already exist. If
timeout is given, use that timeout for the key; otherwise use the
default cache timeout.
Return True if the value was stored, False otherwise.
"""
raise NotImplementedError('subclasses of BaseCache must provide an add() method')
def get(self, key, default=None, version=None):
"""
Fetch a given key from the cache. If the key does not exist, return
default, which itself defaults to None.
"""
raise NotImplementedError('subclasses of BaseCache must provide a get() method')
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
"""
Set a value in the cache. If timeout is given, use that timeout for the
key; otherwise use the default cache timeout.
"""
raise NotImplementedError('subclasses of BaseCache must provide a set() method')
def delete(self, key, version=None):
"""
Delete a key from the cache, failing silently.
"""
raise NotImplementedError('subclasses of BaseCache must provide a delete() method')
def get_many(self, keys, version=None):
"""
Fetch a bunch of keys from the cache. For certain backends (memcached,
pgsql) this can be *much* faster when fetching multiple values.
Return a dict mapping each key in keys to its value. If the given
key is missing, it will be missing from the response dict.
"""
d = {}
for k in keys:
val = self.get(k, version=version)
if val is not None:
d[k] = val
return d
def get_or_set(self, key, default, timeout=DEFAULT_TIMEOUT, version=None):
"""
Fetch a given key from the cache. If the key does not exist,
add the key and set it to the default value. The default value can
also be any callable. If timeout is given, use that timeout for the
key; otherwise use the default cache timeout.
Return the value of the key stored or retrieved.
"""
val = self.get(key, version=version)
if val is None and default is not None:
if callable(default):
default = default()
self.add(key, default, timeout=timeout, version=version)
# Fetch the value again to avoid a race condition if another caller
# added a value between the first get() and the add() above.
return self.get(key, default, version=version)
return val
def has_key(self, key, version=None):
"""
Return True if the key is in the cache and has not expired.
"""
return self.get(key, version=version) is not None
def incr(self, key, delta=1, version=None):
"""
Add delta to value in the cache. If the key does not exist, raise a
ValueError exception.
"""
value = self.get(key, version=version)
if value is None:
raise ValueError("Key '%s' not found" % key)
new_value = value + delta
self.set(key, new_value, version=version)
return new_value
def decr(self, key, delta=1, version=None):
"""
Subtract delta from value in the cache. If the key does not exist, raise
a ValueError exception.
"""
return self.incr(key, -delta, version=version)
def __contains__(self, key):
"""
Return True if the key is in the cache and has not expired.
"""
# This is a separate method, rather than just a copy of has_key(),
# so that it always has the same functionality as has_key(), even
# if a subclass overrides it.
return self.has_key(key)
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
"""
Set a bunch of values in the cache at once from a dict of key/value
pairs. For certain backends (memcached), this is much more efficient
than calling set() multiple times.
If timeout is given, use that timeout for the key; otherwise use the
default cache timeout.
"""
for key, value in data.items():
self.set(key, value, timeout=timeout, version=version)
def delete_many(self, keys, version=None):
"""
Delete a bunch of values in the cache at once. For certain backends
(memcached), this is much more efficient than calling delete() multiple
times.
"""
for key in keys:
self.delete(key, version=version)
def clear(self):
"""Remove *all* values from the cache at once."""
raise NotImplementedError('subclasses of BaseCache must provide a clear() method')
def validate_key(self, key):
"""
Warn about keys that would not be portable to the memcached
backend. This encourages (but does not force) writing backend-portable
cache code.
"""
if len(key) > MEMCACHE_MAX_KEY_LENGTH:
warnings.warn(
'Cache key will cause errors if used with memcached: %r '
'(longer than %s)' % (key, MEMCACHE_MAX_KEY_LENGTH), CacheKeyWarning
)
for char in key:
if ord(char) < 33 or ord(char) == 127:
warnings.warn(
'Cache key contains characters that will cause errors if '
'used with memcached: %r' % key, CacheKeyWarning
)
break
def incr_version(self, key, delta=1, version=None):
"""
Add delta to the cache version for the supplied key. Return the new
version.
"""
if version is None:
version = self.version
value = self.get(key, version=version)
if value is None:
raise ValueError("Key '%s' not found" % key)
self.set(key, value, version=version + delta)
self.delete(key, version=version)
return version + delta
def decr_version(self, key, delta=1, version=None):
"""
Subtract delta from the cache version for the supplied key. Return the
new version.
"""
return self.incr_version(key, -delta, version)
def close(self, **kwargs):
"""Close the cache connection"""
pass
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import object
import logging
import time
from nose.plugins.skip import SkipTest
from nose.tools import assert_equal, assert_true, assert_false
from desktop.auth.backend import rewrite_user
from desktop.lib.django_test_util import make_logged_in_client
from desktop.lib.test_utils import add_to_group, grant_access
from desktop.models import uuid_default
from useradmin.models import User
from metadata.conf import OPTIMIZER, has_optimizer
from metadata.optimizer.optimizer_client import OptimizerClient
LOG = logging.getLogger(__name__)
class BaseTestOptimizerClient(object):
integration = True
UPLOADED = False
DATABASE = 'db1'
@classmethod
def setup_class(cls):
if not has_optimizer():
raise SkipTest
cls.client = make_logged_in_client(username='test', is_superuser=False)
cls.user = User.objects.get(username='test')
cls.user = rewrite_user(cls.user)
add_to_group('test')
grant_access("test", "test", "metadata")
grant_access("test", "test", "optimizer")
cls.api = OptimizerClient(user=cls.user)
if not BaseTestOptimizerClient.UPLOADED:
cls.upload()
BaseTestOptimizerClient.UPLOADED = True
# Should run first
@classmethod
def upload(cls):
queries = [
(uuid_default(), 0, "select emps.id from emps where emps.name = 'Joe' group by emps.mgr, emps.id;", BaseTestOptimizerClient.DATABASE),
(uuid_default(), 0, "select emps.name from emps where emps.num = 007 group by emps.state, emps.name;", BaseTestOptimizerClient.DATABASE),
(uuid_default(), 0, "select Part.partkey, max(Part.salary), Part.name, Part.type from %s.Part where Part.yyprice > 2095" % BaseTestOptimizerClient.DATABASE, BaseTestOptimizerClient.DATABASE),
(uuid_default(), 0, "select Part.partkey, Part.name, Part.mfgr FROM Part WHERE Part.name LIKE '%red';", BaseTestOptimizerClient.DATABASE),
(uuid_default(), 0, "select count(*) as loans from account a where a.account_state_id in (5,9);", BaseTestOptimizerClient.DATABASE),
(uuid_default(), 0, "select orders.key, orders.id from orders where orders.price < 9999", BaseTestOptimizerClient.DATABASE),
(uuid_default(), 0, "select x from x join y where x.a = y.a;", BaseTestOptimizerClient.DATABASE),
# DDL
(uuid_default(), 0, ' '.join('''CREATE TABLE `web_logs`(
`_version_` bigint,
`app` string COMMENT 'app',
`bytes` smallint COMMENT 'http://demo.gethue.com/ is',
`city` string COMMENT 'city',
`client_ip` string,
`code` tinyint,
`country_code` string,
`country_code3` string,
`country_name` string,
`device_family` string,
`extension` string,
`latitude` float,
`longitude` float,
`method` string,
`os_family` string,
`os_major` string,
`protocol` string,
`record` string,
`referer` string,
`region_code` bigint,
`request` string,
`subapp` string,
`time` string,
`url` string,
`user_agent` string,
`user_agent_family` string,
`user_agent_major` string,
`id` string)
COMMENT 'http://demo.gethue.com/ rocks!'
PARTITIONED BY (
`date` string)
'''.splitlines()), BaseTestOptimizerClient.DATABASE)
]
resp = cls.api.upload(data=queries, data_type='queries', source_platform='hive')
assert_true('status' in resp, resp)
assert_true('count' in resp, resp)
assert_true('state' in resp['status'], resp)
assert_true('workloadId' in resp['status'], resp)
assert_true('failedQueries' in resp['status'], resp)
assert_true('successQueries' in resp['status'], resp)
assert_true(resp['status']['state'] in ('WAITING', 'FINISHED', 'FAILED'), resp['status']['state'])
resp = cls.api.upload_status(workload_id=resp['status']['workloadId'])
assert_true('status' in resp, resp)
assert_true('state' in resp['status'], resp)
assert_true('workloadId' in resp['status'], resp)
i = 0
while i < 60 and resp['status']['state'] not in ('FINISHED', 'FAILED'):
resp = cls.api.upload_status(workload_id=resp['status']['workloadId'])
i += 1
time.sleep(1)
LOG.info('Upload state: %(state)s' % resp['status'])
assert_true(i < 60 and resp['status']['state'] == 'FINISHED', resp)
assert_equal(resp['status']['successQueries'], 8, resp)
@classmethod
def teardown_class(cls):
cls.user.is_superuser = False
cls.user.save()
class TestOptimizerClient(BaseTestOptimizerClient):
def test_tenant(self):
resp = self.api.get_tenant(cluster_id=OPTIMIZER.CLUSTER_ID.get())
assert_true('tenant' in resp, resp)
def test_top_tables(self):
database_name = 'default'
resp = self.api.top_tables(database_name=database_name)
assert_true(isinstance(resp['results'], list), resp)
assert_true('eid' in resp['results'][0], resp)
assert_true('name' in resp['results'][0], resp)
database_name = BaseTestOptimizerClient.DATABASE
resp = self.api.top_tables(database_name=database_name)
assert_true(isinstance(resp['results'], list), resp)
def test_table_details(self): # Requires test_upload to run before
resp = self.api.table_details(database_name='default', table_name='emps')
assert_true('columnCount' in resp, resp)
assert_true('createCount' in resp, resp)
assert_true('table_ddl' in resp, resp)
assert_true('deleteCount' in resp, resp)
assert_true('iview_ddl' in resp, resp)
assert_true('updateCount' in resp, resp)
assert_true('colStats' in resp, resp)
assert_true('joinCount' in resp, resp)
assert_true('view_ddl' in resp, resp)
assert_true('tableStats' in resp, resp)
assert_true('queryCount' in resp, resp)
assert_true('selectCount' in resp, resp)
assert_true('insertCount' in resp, resp)
assert_true('tid' in resp, resp)
assert_true('type' in resp, resp)
assert_true('name' in resp, resp)
resp = self.api.table_details(database_name=BaseTestOptimizerClient.DATABASE, table_name='Part')
assert_true('tid' in resp, resp)
assert_true('columnCount' in resp, resp)
def test_query_risk(self):
query = 'Select * from items'
resp = self.api.query_risk(query=query, source_platform='hive', db_name=BaseTestOptimizerClient.DATABASE)
assert_true(len(resp) > 0, resp)
assert_true('riskAnalysis' in resp['hints'][0], resp)
assert_true('risk' in resp['hints'][0], resp)
assert_true('riskRecommendation' in resp['hints'][0], resp)
def test_query_compatibility(self):
source_platform = 'hive'
target_platform = 'impala'
query = 'Select * from (Select item.id from item)'
resp = self.api.query_compatibility(source_platform=source_platform, target_platform=target_platform, query=query)
assert_true('clauseName' in resp, resp)
assert_true('clauseError' in resp, resp)
assert_true('queryError' in resp, resp)
assert_true('clauseString' in resp, resp)
def test_top_filters(self):
resp = self.api.top_filters(db_tables=['%s.Part' % BaseTestOptimizerClient.DATABASE])
assert_true(len(resp['results']) > 0, resp)
def test_top_joins(self):
resp = self.api.top_joins(db_tables=['%s.x' % BaseTestOptimizerClient.DATABASE])
assert_true(len(resp['results']) > 0, resp)
assert_true(resp['results'][0]['tables'], [u'%s.x', u'%s.y' % (BaseTestOptimizerClient.DATABASE, BaseTestOptimizerClient.DATABASE)])
assert_true('queryIds' in resp['results'][0], resp)
assert_true('totalTableCount' in resp['results'][0], resp)
assert_true('totalQueryCount' in resp['results'][0], resp)
assert_true('joinType' in resp['results'][0], resp)
assert_equal(resp['results'][0]['joinCols'], [{u'columns': [u'%s.x.a' % BaseTestOptimizerClient.DATABASE, u'%s.y.a' % BaseTestOptimizerClient.DATABASE]}])
def test_top_aggs(self):
resp = self.api.top_aggs(db_tables=['%s.Part' % BaseTestOptimizerClient.DATABASE])
assert_true(len(resp['results']) > 0, resp)
assert_true('tables' in resp['results'][0], resp)
assert_true('queryIds' in resp['results'][0], resp)
assert_true('totalTableCount' in resp['results'][0], resp)
assert_true('totalQueryCount' in resp['results'][0], resp)
assert_true('type' in resp['results'][0], resp)
assert_true('columns' in resp['results'][0], resp)
def test_top_columns(self):
resp = self.api.top_columns(db_tables=['%s.Part' % BaseTestOptimizerClient.DATABASE])
assert_true('orderbyColumns' in resp, resp)
assert_true('selectColumns' in resp, resp)
assert_true('filterColumns' in resp, resp)
assert_true('joinColumns' in resp, resp)
assert_true('groupbyColumns' in resp, resp)
assert_true(resp['orderbyColumns'], resp)
assert_true('selectColumns' in resp, resp)
assert_true('filterColumns' in resp, resp)
assert_true('joinColumns' in resp, resp)
assert_true('groupbyColumns' in resp, resp)
def test_top_databases(self):
resp = self.api.top_databases()
assert_true(len(resp['results']) > 0, resp)
assert_true('instanceCount' in resp['results'][0], resp)
assert_true('totalTableCount' in resp['results'][0], resp)
def test_similar_queries(self):
raise SkipTest # Experimental only
source_platform = 'hive'
query = 'Select * from (Select item.id from item)'
resp = self.api.similar_queries(source_platform=source_platform, query=query)
assert_true('querySignature' in resp, resp)
assert_true('query' in resp, resp)
class TestOptimizerRiskApi(BaseTestOptimizerClient):
def test_risk_10_views(self):
source_platform = 'hive'
query = '''SELECT code
FROM
(SELECT code
FROM
(SELECT code
FROM
(SELECT code
FROM
(SELECT code
FROM
(SELECT code
FROM
(SELECT code
FROM
(SELECT code
FROM
(SELECT code
FROM
(SELECT code
FROM
(SELECT code
FROM
(SELECT code
FROM
(SELECT code
FROM sample_01) t1) t2) t3) t4) t5) t6) t7) t8) t9) t10) t11) t12
'''
resp = self.api.query_risk(query=query, source_platform=source_platform, db_name=BaseTestOptimizerClient.DATABASE)
_assert_risks(['>=10 Inline Views present in query.'], resp['hints'])
def test_risk_cartesian_cross_join(self):
source_platform = 'hive'
query = '''SELECT ID, NAME, AMOUNT, DATE FROM CUSTOMERS, ORDERS
'''
resp = self.api.query_risk(query=query, source_platform=source_platform, db_name=BaseTestOptimizerClient.DATABASE)
_assert_risks(['Cartesian or CROSS join found.'], resp['hints'])
source_platform = 'hive'
query = '''SELECT s07.description,
s07.total_emp,
s08.total_emp,
s07.salary
FROM sample_07 s07,
sample_08 s08
WHERE s07.salary > 88
ORDER BY s07.salary DESC
'''
resp = self.api.query_risk(query=query, source_platform=source_platform, db_name=BaseTestOptimizerClient.DATABASE)
_assert_risks(['Cartesian or CROSS join found.'], resp['hints'])
def test_risk_5_joins(self):
source_platform = 'hive'
query = '''SELECT s07.description, s07.total_emp, s08.total_emp, s07.salary
FROM
sample_07 s07
JOIN
sample_08 s08
ON ( s07.code = s08.code )
JOIN
sample_06 s06
ON ( s07.code = s06.code )
JOIN
sample_05 s05
ON ( s07.code = s05.code )
JOIN
sample_04 s04
ON ( s07.code = s04.code )
JOIN
sample_03 s03
ON ( s07.code = s03.code )
JOIN
sample_02 s02
ON ( s07.code = s02.code )
JOIN
sample_01 s01
ON ( s07.code = s01.code )
WHERE
( s07.total_emp > s08.total_emp
AND s07.salary > 100000 )
ORDER BY s07.salary DESC
LIMIT 1000
'''
resp = self.api.query_risk(query=query, source_platform=source_platform, db_name=BaseTestOptimizerClient.DATABASE)
_assert_risks(['>=5 table joins or >=10 join conditions found.'], resp['hints'])
def test_risk_10_group_by_columns(self):
source_platform = 'impala'
query = '''SELECT *
FROM transactions
GROUP BY account_client,
account_cty_code,
account_num,
allow_code,
ally_811,
anti_detect,
anti_transcode,
cc_fee,
auth_code,
cvv_eval,
cred_extract, denied_code
limit 5
'''
resp = self.api.query_risk(query=query, source_platform=source_platform, db_name=BaseTestOptimizerClient.DATABASE)
_assert_risks(['>=10 columns present in GROUP BY list.'], resp['hints'])
assert_equal(resp['noDDL'], ['%s.transactions' % BaseTestOptimizerClient.DATABASE])
assert_equal(resp['noStats'], ['%s.transactions' % BaseTestOptimizerClient.DATABASE])
def test_risk_cross_join_false_positive(self):
source_platform = 'hive'
query = '''SELECT s07.description, s07.total_emp, s08.total_emp, s07.salary
FROM
sample_07 s07 JOIN
sample_08 s08
ON ( s07.code = s08.code )
WHERE
( s07.total_emp > s08.total_emp
AND s07.salary > 100000 )
ORDER BY s07.salary DESC
LIMIT 1000
'''
resp = self.api.query_risk(query=query, source_platform=source_platform, db_name=BaseTestOptimizerClient.DATABASE)
_assert_risks(['Cartesian or CROSS join found.'], resp['hints'], present=False)
source_platform = 'hive'
query = '''select x from x join y where x.a = y.a'''
resp = self.api.query_risk(query=query, source_platform=source_platform, db_name=BaseTestOptimizerClient.DATABASE)
_assert_risks(['Cartesian or CROSS join found.'], resp['hints'], present=False)
def test_risk_no_filter_on_any_partitioned_column(self):
source_platform = 'hive'
query = '''SELECT * FROM web_logs'''
db_name = BaseTestOptimizerClient.DATABASE
resp = self.api.query_risk(query=query, source_platform=source_platform, db_name=db_name)
_assert_risks(['Query on partitioned table is missing filters on partioning columns.'], resp['hints'])
assert_false(resp['noDDL'], resp) # DDL was uploaded already
assert_equal(resp['noStats'], ['%s.web_logs' % BaseTestOptimizerClient.DATABASE])
source_platform = 'hive'
query = '''SELECT * FROM web_logs LIMIT 1'''
resp = self.api.query_risk(query=query, source_platform=source_platform, db_name=db_name)
_assert_risks(['Query on partitioned table is missing filters on partioning columns.'], resp['hints'])
source_platform = 'hive'
query = '''SELECT * FROM web_logs WHERE app='oozie' LIMIT 1'''
resp = self.api.query_risk(query=query, source_platform=source_platform, db_name=db_name)
_assert_risks(['Query on partitioned table is missing filters on partioning columns.'], resp['hints'])
source_platform = 'hive'
query = '''SELECT * FROM web_logs WHERE date='20180101' '''
resp = self.api.query_risk(query=query, source_platform=source_platform, db_name=db_name)
_assert_risks(['Query on partitioned table is missing filters on partioning columns.'], resp['hints'], present=False)
source_platform = 'hive'
query = '''SELECT * FROM web_logs WHERE app='oozie' AND date='20180101' '''
resp = self.api.query_risk(query=query, source_platform=source_platform, db_name=db_name)
_assert_risks(['Query on partitioned table is missing filters on partioning columns.'], resp['hints'], present=False)
def test_risk_listing_all_risk_tables_all_the_time(self):
source_platform = 'hive'
query = '''SELECT * FROM web_logs JOIN a ON web_logs.id = a.id LIMIT 100'''
db_name = BaseTestOptimizerClient.DATABASE
resp = self.api.query_risk(query=query, source_platform=source_platform, db_name=db_name)
_assert_risks(['Query on partitioned table is missing filters on partioning columns.'], resp['hints'])
assert_equal([suggestion for suggestion in resp['hints'] if suggestion['riskId'] == 22][0]['riskTables'], ['%s.web_logs' % BaseTestOptimizerClient.DATABASE])
assert_equal(resp['noDDL'], ['%s.a' % BaseTestOptimizerClient.DATABASE])
assert_equal(resp['noStats'], ['%s.a' % BaseTestOptimizerClient.DATABASE, '%s.web_logs' % BaseTestOptimizerClient.DATABASE])
def _assert_risks(risks, suggestions, present=True):
suggestion_names = [suggestion['riskAnalysis'] for suggestion in suggestions]
for risk in risks:
if present:
assert_true(risk in suggestion_names, suggestions)
else:
assert_false(risk in suggestion_names, suggestions)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.