text
stringlengths 0
1.05M
| meta
dict |
---|---|
from __future__ import absolute_import, division, print_function
import os.path
import subprocess
EXTENSIONS = ['.gpg', '.asc']
HAS_ARMOR = {'.gpg': False, '.asc': True}
OVERRIDE_HOMEDIR = None # useful for unit tests
def is_encrypted(path):
_, ext = os.path.splitext(path)
return ext in EXTENSIONS
def has_armor(path):
_, ext = os.path.splitext(path)
return HAS_ARMOR[ext]
def unencrypted_ext(path):
root, ext = os.path.splitext(path)
if ext in EXTENSIONS:
_, ext = os.path.splitext(root)
return ext
def _base_args():
args = ['gpg2', '--use-agent', '--quiet', '--batch', '--yes']
if OVERRIDE_HOMEDIR:
args += ['--homedir', OVERRIDE_HOMEDIR]
return args
def decrypt(path):
args = ['--decrypt', path]
return subprocess.check_output(_base_args() + args)
def encrypt(recipient, dest_path, content):
args = ["--encrypt"]
if has_armor(dest_path):
args += ["--armor"]
args += ["--recipient", recipient, "--output", dest_path]
popen = subprocess.Popen(_base_args() + args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = popen.communicate(content)
assert popen.returncode == 0, stderr
| {
"repo_name": "mbr/pw",
"path": "pw/_gpg.py",
"copies": "1",
"size": "1315",
"license": "mit",
"hash": 6951234829107844000,
"line_mean": 25.8367346939,
"line_max": 65,
"alpha_frac": 0.5961977186,
"autogenerated": false,
"ratio": 3.6126373626373627,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47088350812373625,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os.path, sys
from collections import namedtuple
from io import StringIO
if sys.version_info[0] < 3:
from ushlex import shlex
else:
from shlex import shlex
from . import _gpg
Entry = namedtuple('Entry', ['key', 'user', 'password', 'notes'])
def normalized_key(key):
return key.replace(' ', '_').lower()
class Store:
"""Password store."""
def __init__(self, path, entries):
# normalize keys
self.entries = [e._replace(key=normalized_key(e.key)) for e in entries]
self.path = path
def search(self, key_pattern, user_pattern):
"""Search database for given key and user pattern."""
# normalize key
key_pattern = normalized_key(key_pattern)
# search
results = []
for entry in self.entries:
if key_pattern in entry.key and user_pattern in entry.user:
results.append(entry)
# sort results according to key (stability of sorted() ensures that the order of accounts for any given key remains untouched)
return sorted(results, key=lambda e: e.key)
@staticmethod
def load(path):
"""Load password store from file."""
# load source (decrypting if necessary)
if _gpg.is_encrypted(path):
src = _gpg.decrypt(path)
else:
src = open(path, 'rb').read()
# parse database source
ext = _gpg.unencrypted_ext(path)
if ext in ['.yml', '.yaml']:
from . import _yaml
entries = _yaml.parse_entries(src)
else:
entries = _parse_entries(src)
return Store(path, entries)
class SyntaxError(Exception):
def __init__(self, lineno, line, reason):
super(SyntaxError, self).__init__(
'line %s: %s (%r)' % (lineno + 1, reason, line))
EXPECT_ENTRY = 'expecting entry'
EXPECT_ENTRY_OR_NOTES = 'expecting entry or notes'
def _parse_entries(src):
entries = []
state = EXPECT_ENTRY
for lineno, line in enumerate(src.decode('utf-8').splitlines()):
# empty lines are skipped (but also terminate the notes section)
sline = line.strip()
if not sline or sline.startswith('#'):
state = EXPECT_ENTRY
continue
# non-empty line with leading spaces is interpreted as a notes line
if line[0] in [' ', '\t']:
if state != EXPECT_ENTRY_OR_NOTES:
raise SyntaxError(lineno, line, state)
# add line of notes
notes = entries[-1].notes
if notes:
notes += "\n"
notes += sline
entries[-1] = entries[-1]._replace(notes=notes)
continue
# parse line using shlex
sio = StringIO(line)
lexer = shlex(sio, posix=True)
lexer.whitespace_split = True
# otherwise, parse as an entry
try:
key = lexer.get_token()
except ValueError as e:
raise SyntaxError(lineno, line, str(e))
if not key:
raise SyntaxError(lineno, line, state)
key = key.rstrip(':')
try:
user = lexer.get_token()
except ValueError as e:
raise SyntaxError(lineno, line, str(e))
if not user:
raise SyntaxError(lineno, line, state)
user = user
try:
password = lexer.get_token()
except ValueError as e:
raise SyntaxError(lineno, line, str(e))
if not password:
password = user
user = notes = ''
else:
password = password
notes = sio.read().strip()
entries.append(Entry(key, user, password, notes))
state = EXPECT_ENTRY_OR_NOTES
return entries
| {
"repo_name": "mbr/pw",
"path": "pw/store.py",
"copies": "1",
"size": "3822",
"license": "mit",
"hash": -6821358308487970000,
"line_mean": 28.4,
"line_max": 134,
"alpha_frac": 0.5675039246,
"autogenerated": false,
"ratio": 4.241953385127636,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004415954415954416,
"num_lines": 130
} |
from __future__ import absolute_import, division, print_function
import pandas as pd
from abc import ABCMeta
import warnings
from sklearn.externals import six
from skutil.base import overrides
from .transform import _flatten_one
from .util import reorder_h2o_frame, _gen_optimized_chunks, h2o_col_to_numpy
from .base import check_frame, BaseH2OFunctionWrapper
from ..preprocessing.balance import (_validate_ratio, _validate_target, _validate_num_classes,
_OversamplingBalancePartitioner, _UndersamplingBalancePartitioner,
BalancerMixin)
__all__ = [
'H2OOversamplingClassBalancer',
'H2OUndersamplingClassBalancer'
]
def _validate_x_y_ratio(X, y, ratio):
"""Validates the following, given that X is
already a validated pandas DataFrame:
1. That y is a string
2. That the number of classes does not exceed _max_classes
as defined by the BalancerMixin class
3. That the number of classes is at least 2
4. That ratio is a float that falls between 0.0 (exclusive) and
1.0 (inclusive)
Parameters
----------
X : ``H2OFrame``, shape=(n_samples, n_features)
The frame from which to sample
y : str
The name of the column that is the response class.
This is the column on which ``value_counts`` will be
executed to determine imbalance.
ratio : float
The ratio at which the balancing operation will
be performed. Used to determine whether balancing is
required.
Returns
-------
out_tup : tuple, shape=(3,)
a length-3 tuple with the following args:
[0] - cts (pd.Series), the ascending sorted ``value_counts``
of the class, where the index is the class label.
[1] - n_classes (int), the number of unique classes
[2] - needs_balancing (bool), whether the least populated class
is represented at a rate lower than the demanded ratio.
"""
# validate ratio, if the current ratio is >= the ratio, it's "balanced enough"
ratio = _validate_ratio(ratio)
y = _validate_target(y) # cast to string type
is_factor = _flatten_one(X[y].isfactor()) # is the target a factor?
# if the target is a factor, we might have an issue here...
"""
if is_factor:
warnings.warn('Balancing with the target as a factor can cause unpredictable '
'sampling behavior (H2O makes it difficult to assess equality '
'between two factors). Balancing works best when the target '
'is an int. If possible, consider using `asnumeric`.', UserWarning)
"""
# generate cts. Have to get kludgier in h2o... then validate is < max classes
# we have to do it this way, because H2O might treat the vals as enum, and we cannot
# slice based on equality (dernit, H2O).
target_col = pd.Series(h2o_col_to_numpy(X[y]))
cts = target_col.value_counts().sort_values(ascending=True)
n_classes = _validate_num_classes(cts)
needs_balancing = (cts.values[0] / cts.values[-1]) < ratio
index = cts.index if not is_factor else cts.index.astype('str')
out_tup = (dict(zip(index, cts.values)), # cts
index, # labels sorted ascending by commonality
target_col.values if not is_factor else target_col.astype('str').values, # the target
n_classes,
needs_balancing)
return out_tup
class _BaseH2OBalancer(six.with_metaclass(ABCMeta,
BaseH2OFunctionWrapper,
BalancerMixin)):
"""Base class for all H2O balancers. Provides _min_version
and _max_version for BaseH2OFunctionWrapper constructor.
"""
def __init__(self, target_feature, ratio=BalancerMixin._def_ratio,
min_version='any', max_version=None, shuffle=True):
super(_BaseH2OBalancer, self).__init__(target_feature=target_feature,
min_version=min_version,
max_version=max_version)
self.ratio = ratio
self.shuffle = shuffle
# this is a new warning
if shuffle:
warnings.warn('Setting shuffle=True will eventually be deprecated, as H2O '
'does not allow re-ordering of frames by row. The current work-around '
'(rbinding the rows) is known to cause issues in the H2O ExprNode '
'cache for very large frames.', DeprecationWarning)
class H2OOversamplingClassBalancer(_BaseH2OBalancer):
"""Oversample the minority classes until they are represented
at the target proportion to the majority class.
Parameters
----------
target_feature : str
The name of the response column. The response column must be
more than a single class and less than
``skutil.preprocessing.balance.BalancerMixin._max_classes``
ratio : float, optional (default=0.2)
The target ratio of the minority records to the majority records. If the
existing ratio is >= the provided ratio, the return value will merely be
a copy of the input frame
shuffle : bool, optional (default=True)
Whether or not to shuffle rows on return
Examples
--------
Consider the following example: with a ``ratio`` of 0.5, the
minority classes (1, 2) will be oversampled until they are represented
at a ratio of at least 0.5 * the prevalence of the majority class (0)
>>> def example():
... import h2o
... import pandas as pd
... import numpy as np
... from skutil.h2o.frame import value_counts
... from skutil.h2o import from_pandas
...
... # initialize h2o
... h2o.init()
...
... # read into pandas
... x = pd.DataFrame(np.concatenate([np.zeros(100), np.ones(30), np.ones(25)*2]), columns=['A'])
...
... # load into h2o
... X = from_pandas(x)
...
... # initialize sampler
... sampler = H2OOversamplingClassBalancer(target_feature="A", ratio=0.5)
...
... # do balancing
... X_balanced = sampler.balance(X)
... value_counts(X_balanced)
>>>
>>> example() # doctest: +SKIP
0 100
1 50
2 50
Name A, dtype: int64
.. versionadded:: 0.1.0
"""
def __init__(self, target_feature, ratio=BalancerMixin._def_ratio, shuffle=True):
# as of now, no min/max version; it's simply compatible with all...
super(H2OOversamplingClassBalancer, self).__init__(
target_feature=target_feature, ratio=ratio, shuffle=shuffle)
@overrides(BalancerMixin)
def balance(self, X):
"""Apply the oversampling balance operation. Oversamples
the minority class to the provided ratio of minority
class(es) : majority class.
Parameters
----------
X : ``H2OFrame``, shape=(n_samples, n_features)
The imbalanced dataset.
Returns
-------
Xb : ``H2OFrame``, shape=(n_samples, n_features)
The balanced H2OFrame
"""
# check on state of X
frame = check_frame(X, copy=False)
# get the partitioner
partitioner = _OversamplingBalancePartitioner(
X=frame, y_name=self.target_feature,
ratio=self.ratio, validation_function=_validate_x_y_ratio)
sample_idcs = partitioner.get_indices(self.shuffle)
# since H2O won't allow us to resample (it's considered rearranging)
# we need to rbind at each point of duplication... this can be pretty
# inefficient, so we might need to get clever about this...
Xb = reorder_h2o_frame(frame, _gen_optimized_chunks(sample_idcs), from_chunks=True)
return Xb
class H2OUndersamplingClassBalancer(_BaseH2OBalancer):
"""Undersample the majority class until it is represented
at the target proportion to the most-represented minority class.
Parameters
----------
target_feature : str
The name of the response column. The response column must be
more than a single class and less than
``skutil.preprocessing.balance.BalancerMixin._max_classes``
ratio : float, optional (default=0.2)
The target ratio of the minority records to the majority records. If the
existing ratio is >= the provided ratio, the return value will merely be
a copy of the input frame
shuffle : bool, optional (default=True)
Whether or not to shuffle rows on return
Examples
--------
Consider the following example: with a ``ratio`` of 0.5, the
majority class (0) will be undersampled until the second most-populous
class (1) is represented at a ratio of 0.5.
>>> def example():
... import h2o
... import pandas as pd
... import numpy as np
... from skutil.h2o.frame import value_counts
... from skutil.h2o import from_pandas
...
... # initialize h2o
... h2o.init()
...
... # read into pandas
... x = pd.DataFrame(np.concatenate([np.zeros(100), np.ones(30), np.ones(25)*2]), columns=['A'])
...
... # load into h2o
... X = from_pandas(x) # doctest:+ELLIPSIS
...
... # initialize sampler
... sampler = H2OUndersamplingClassBalancer(target_feature="A", ratio=0.5)
...
... X_balanced = sampler.balance(X)
... value_counts(X_balanced)
...
>>> example() # doctest: +SKIP
0 60
1 30
2 10
Name A, dtype: int64
.. versionadded:: 0.1.0
"""
_min_version = '3.8.2.9'
_max_version = None
def __init__(self, target_feature, ratio=BalancerMixin._def_ratio, shuffle=True):
super(H2OUndersamplingClassBalancer, self).__init__(
target_feature=target_feature, ratio=ratio, min_version=self._min_version,
max_version=self._max_version, shuffle=shuffle)
@overrides(BalancerMixin)
def balance(self, X):
"""Apply the undersampling balance operation. Undersamples
the majority class to the provided ratio of minority
class(es) : majority class
Parameters
----------
X : ``H2OFrame``, shape=(n_samples, n_features)
The imbalanced dataset.
Returns
-------
Xb : ``H2OFrame``, shape=(n_samples, n_features)
The balanced H2OFrame
"""
# check on state of X
frame = check_frame(X, copy=False)
# get the partitioner
partitioner = _UndersamplingBalancePartitioner(
X=frame, y_name=self.target_feature, ratio=self.ratio,
validation_function=_validate_x_y_ratio)
# since there are no feature_names, we can just slice
# the h2o frame as is, given the indices:
idcs = partitioner.get_indices(self.shuffle)
Xb = frame[idcs, :] if not self.shuffle else reorder_h2o_frame(frame,
_gen_optimized_chunks(idcs),
from_chunks=True)
return Xb
| {
"repo_name": "tgsmith61591/skutil",
"path": "skutil/h2o/balance.py",
"copies": "1",
"size": "11679",
"license": "bsd-3-clause",
"hash": 6749210249554094000,
"line_mean": 36.1942675159,
"line_max": 108,
"alpha_frac": 0.5857522048,
"autogenerated": false,
"ratio": 4.142958495920539,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004868555379090475,
"num_lines": 314
} |
from __future__ import absolute_import, division, print_function
import pandas as pd
import numpy as np
from pandas import DataFrame
from pennies.market.curves import Curve, DiscountCurveWithNodes
class Market(object):
""" Market base class.
Attributes
----------
dt_valuation: date or datetime, preferably datetime64
Time at which market is valid
"""
def __init__(self, dt_valuation):
"""
Parameters
----------
dt_valuation: date or datetime, preferably datetime64
Time at which market is valid
"""
self.dt_valuation = dt_valuation
class RatesTermStructure(Market):
"""Provider of required Interest Rates.
This contains a coherent set of curves to provide discount and forward
interest rates, along with associated discount factors, and meta data.
Calibration generally is performed to reprice N market assets,
where N is the total of all nodes in all the curves.
The core of a RatesTermStructure is a dt_valuation, and a dict of curves,
map_curves. The is keyed off currency, then name, which must include
'discount'. The others will typically be ibor frequencies: 3, 6, ..
Attributes
----------
map_curves: dict of dict
Contains a dictionary of dictionaries. The top key is currency strings.
Within each currency dictionary, 'discount' must exist.
"""
def __init__(self, dt_valuation, map_curves=None, map_fx=None):
super(RatesTermStructure, self).__init__(dt_valuation)
self.map_curves = map_curves
self.map_discount_curves = {ccy: self.map_curves[ccy]["discount"]
for ccy in map_curves}
self.map_fx = map_fx
self.nodes = DataFrame(columns=['ttm', 'dates', 'rates', 'ccy', 'curve'])
for ccy, curves in self.map_curves.items():
df_ccy = DataFrame(columns=['curve', 'ttm', 'dates', 'rates'])
for key, crv in curves.items():
try:
df_crv = crv.frame.loc[:, ['ttm', 'dates', 'rates']]
df_crv['ccy'] = ccy
df_crv['curve'] = key
if key == 'discount':
df_ccy = pd.concat([df_crv, df_ccy], ignore_index=True)
else:
df_ccy = pd.concat([df_ccy, df_crv], ignore_index=True)
except AttributeError:
pass # Curves without nodes will not be calibrated
self.nodes = pd.concat([self.nodes, df_ccy], ignore_index=True)
#self.nodes.sort_values(by=['ccy', 'ttm'], inplace=True)
# TODO - What's the point of if key == blah, if we sort? Why are we sorting?
@classmethod
def from_frame(cls, dt_valuation, frame, map_fx=None):
map_curves = {}
for ccy in np.unique(frame.ccy):
mp_ccy = {}
df_ccy = frame.loc[frame.ccy == ccy]
for key in np.unique(df_ccy.curve):
mp_ccy[key] = DiscountCurveWithNodes.from_frame(
df_ccy[df_ccy.curve == key]['dates', 'rates'],
dt_valuation)
map_curves[ccy] = mp_ccy
return RatesTermStructure(dt_valuation, map_curves, map_fx)
def discount_curve(self, currency):
"""Access to discount curve for given currency"""
return self.map_curves[currency]["discount"]
def discount_factor(self, date, currency=None):
if currency is None:
if len(self.map_discount_curves) == 1:
currency = list(self.map_discount_curves)[0]
else:
raise ValueError("currency argument must be defined if Market "
"contains more than one of them.")
return self.map_discount_curves[currency].discount_factor(date)
def curve(self, currency, key):
""" Provides curve and its market key that provide rates for input
Parameters
----------
currency: str
Currency of the rates required.
key:
Key used to describe rate.
For IBOR, this will be the integer frequency in months
Returns
-------
tuple
(curve, key) curve in market that produces rates for key requested
"""
try:
ccy_map = self.map_curves[currency]
except KeyError:
raise ValueError('Requested currency not present in market: {}'
.format(currency))
try:
return ccy_map[key], key
except KeyError:
try:
return ccy_map['discount'], 'discount'
except KeyError:
raise KeyError('Curve requested with key,{}, and currency ,{}, '
'that has neither that key, nor "discount". '
'Perhaps the currency is wrong.'
.format(key, currency))
def fx(self, this_ccy, per_that_ccy):
raise NotImplementedError('Foreign Exchange not yet implemented')
def rate_sensitivity(self, ttm, currency, curve_key):
"""Sensitivity of interpolated point to node: dy(x)/dy_i
Sensitivity of rate at time, ttm, to a unit move in the rate of each
node in the curve. Hence, for each ttm, a vector is returned.
Parameters
----------
ttm: array-like
Time in years from valuation date to some rate's maturity date.
currency: str
Currency of the curve.
curve_key:
Key to the curve in map_curves.
Returns
-------
array-like
shape = ttm.shape + curve.node_dates.shape
"""
crv, key = self.curve(currency, curve_key)
return crv.rate_sensitivity(ttm)
@classmethod
def of_single_curve(cls, dt_valuation, yield_curve):
"""Create market consisting of a single discount curve, and a valid date
If forward rates are to be computed, the discount curve will be used.
"""
curve_map = {yield_curve.currency: {"discount": yield_curve}}
return cls(dt_valuation, curve_map)
@classmethod
def from_curve_map(cls, dt_valuation, curve_map):
"""Create market consisting of a map of curves.
The map is a dictionary of dictionaries.
The top level keys are currency strings.
The next level are strings describing the curves in that currency.
One curve key in each currency must be named 'discount'.
The others are frequencies defined as an integer number of months.
These 'frequency' curves are used to produce forward ibor rates,
from pseudo-discount factors.
"""
return cls(dt_valuation, curve_map)
def __str__(self):
return str(self.nodes)
| {
"repo_name": "caseyclements/pennies",
"path": "pennies/market/market.py",
"copies": "1",
"size": "6889",
"license": "apache-2.0",
"hash": 2619547526906832000,
"line_mean": 37.0607734807,
"line_max": 84,
"alpha_frac": 0.581216432,
"autogenerated": false,
"ratio": 4.284203980099503,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5365420412099502,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pandas as pd
import numpy as np
def readPhoSimInstanceCatalog(fname,
names=['obj', 'SourceID', 'RA', 'DEC', 'MAG_NORM',\
'SED_NAME', 'REDSHIFT', 'GAMMA1',\
'GAMMA2', 'MU', 'DELTA_RA', 'DELTA_DEC',\
'SOURCE_TYPE', 'DUST_REST_NAME',\
'Av', 'Rv', 'Dust_Lab_Name', 'EBV']):
"""
read the phoSimInstanceCatalog and return the contents
Parameters
----------
fname : mandatory, string
filename of the phosim instance catalog
names : a list of column names matching the number of columns
Returns
-------
A `pandas.DataFrame` with the phosim Instance Catalog with metadata
accessed as a dictionary through the meta attribute of the return.
"""
# read the header into a metadata list, and get number of lines to skip
# for catalog
metalines = []
with open(fname) as f:
linenum = 0
for line in f:
if line.startswith('object'):
continue
metalines.append(line)
linenum += 1
# process the headers into a metadata list
meta = metadataFromLines(metalines)
# read the catalog into a dataframe
df = pd.read_csv(fname, skiprows=linenum, names=names, sep='\s+')
df.meta = meta
return df
def metadataFromLines(lines):
"""
process the metadata lines into a dictionary
"""
info = [line.split() for line in lines]
meta = {key: np.float(value) for key, value in info}
return meta
if __name__ == "__main__":
meta, df = readPhoSimInstanceCatalog('/Users/rbiswas/src/LSST/sims_catUtils/examples/SNOnlyPhoSimCatalog.dat')
print(df.head())
| {
"repo_name": "DarkEnergyScienceCollaboration/Twinkles",
"path": "python/desc/twinkles/analyseICat.py",
"copies": "2",
"size": "1865",
"license": "mit",
"hash": 2123846808205650700,
"line_mean": 29.5737704918,
"line_max": 114,
"alpha_frac": 0.5764075067,
"autogenerated": false,
"ratio": 4.045553145336226,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.018279184609317004,
"num_lines": 61
} |
from __future__ import absolute_import, division, print_function
import pandas as pd
import sys, os
import tarfile
import gzip
class ValidatePhoSimCatalogs(object):
MegaByte = 1024*1024
def __init__(self,
obsHistIDValues,
prefix='InstanceCatalogs/phosim_input_'):
self.obsHistIDValues = obsHistIDValues
self.prefix=prefix
@classmethod
def fromRunlog(cls, runlog='run.log',
prefix='InstanceCatalogs/phosim_input_',
obsHistIDrange=[0, None]):
runs = pd.read_csv(runlog)
if obsHistIDrange[-1] is None:
obsHistIDrange[-1] = len(runs)
obsHistIDValues = runs.obsHistID.values[obsHistIDrange[0]:obsHistIDrange[1]]
return cls(obsHistIDValues=obsHistIDValues, prefix=prefix)
@staticmethod
def filenames(obsHistID, prefix='InstanceCatalogs/phosim_input_'):
"""
return the filenames for the phosim instance catalog and the
spectra tar ball corresponding to the obsHistID.
"""
basename = prefix + str(obsHistID)
spectra_tar = basename + '.tar.gz'
phosimInstanceCatalog = basename + '.txt.gz'
return phosimInstanceCatalog, spectra_tar
@staticmethod
def listSpectraFromPhosimInstanceCatalog(phosimInstanceCatalog,
filePrefix='spectra_files'):
"""
obtain a list of filenames of SN spectra mentioned in a phosim Instance
catalog.
Parameters
----------
phosimInstanceCatalog: string, mandatory
absolute filename for phosimInstanceCatalog
filePrefix : string, optional, default to 'spectra_files'
This function assumes that all SN spectra have the same
prefix
"""
if phosimInstanceCatalog.endswith('gz'):
with gzip.open(phosimInstanceCatalog, 'r') as f:
contents = f.read()
elif phosimInstanceCatalog.endswith('txt'):
with open(phosimInstanceCatalog, 'r') as f:
contents = f.read()
else:
raise ValueError('Not implemented: handling files with ending string', phosimInstanceCatalog)
fnames = [ee for ee in contents.split() if filePrefix in ee]
return fnames
@staticmethod
def listTarredSpectra(tarredspectra):
"""
"""
t = tarfile.open(tarredspectra, 'r|gz')
fnames = t.getnames()
return fnames
@staticmethod
def compareFilenames(phosimInstanceCatalog, tarredspectra):
phosimspectra = ValidatePhoSimCatalogs.listSpectraFromPhosimInstanceCatalog(phosimInstanceCatalog)
tarredspecs = ValidatePhoSimCatalogs.listTarredSpectra(tarredspectra)
pss = set(phosimspectra)
ts = set(tarredspecs)
return ts == pss, pss - ts, ts - pss
@staticmethod
def validateSizes(phosimInstanceCatalog, spectra_tar, unitSize,
minSizePhosim=15, minSizeSpectra=40):
"""
Check that the files exist and have sizes above a minimum size (ie. not
empyty)
"""
minSizeSpectra = minSizeSpectra * unitSize
minSizePhosim = minSizePhosim * unitSize
success = False
try:
spectra_size = os.path.getsize(spectra_tar)
except:
spectra_size = False
try:
phoSimCat_size = os.path.getsize(phosimInstanceCatalog)
except:
phoSimCat_size = False
if phoSimCat_size and spectra_size:
success = (phoSimCat_size > minSizePhosim) and (spectra_size > minSizeSpectra)
if success:
untarredInstanceCatalog = phosimInstanceCatalog.split('.gz')[0]
if os.path.exists(untarredInstanceCatalog):
os.remove(untarredInstanceCatalog)
return success, phoSimCat_size, spectra_size, unitSize
def run(self, filename='validateCompleteness'):
f = open(filename + '_success.dat', 'w')
g = open(filename +'_failures.dat', 'w')
for obsHistID in self.obsHistIDValues:
phosimInstanceCatalog, spectra = self.filenames(obsHistID,
self.prefix)
success, phosimSize, spectraSize = self.validateSizes(phosimInstanceCatalog=phosimInstanceCatalog, spectra_tar=spectra, unitSize=self.MegaByte)
if success:
f.write("{0:d},{1:2.1f},{2:2.1f}\n".format(obsHistID,
phosimSize, spectraSize))
else:
g.write("{0:d},{1:2.1f},{2:2.1f}\n".format(obsHistID,
phosimSize, spectraSize))
f.close()
g.close()
if __name__=='__main__':
v = ValidatePhoSimCatalogs.fromRunlog(runlog='FirstSet_obsHistIDs.csv',
obsHistIDrange=[500, 600])
v.run()
#
#
#
#
#def filenames(obsHistID, prefix='InstanceCatalogs/phosim_input_'):
# """
# return the filenames for the phosim instance catalog and the spectra tar
# ball corresponding to the obsHistID.
#
# """
# basename = prefix + str(obsHistID)
# spectra_tar = basename + 'tar.gz'
# phosimInstanceCatalog = basename + '.txt.gz'
#
# return phosimInstanceCatalog, specta_tar
#
#
| {
"repo_name": "LSSTDESC/Twinkles",
"path": "python/desc/twinkles/validation/validate.py",
"copies": "2",
"size": "5393",
"license": "mit",
"hash": 3766738838175807000,
"line_mean": 35.1946308725,
"line_max": 155,
"alpha_frac": 0.605043575,
"autogenerated": false,
"ratio": 3.8329779673063253,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.020692787268814083,
"num_lines": 149
} |
from __future__ import absolute_import, division, print_function
import paramiko
try:
import SocketServer
except ImportError:
import socketserver as SocketServer
import select
import threading
import requests
import logging
import getpass
import psycopg2
import socket
__author__ = 'jgarman'
log = logging.getLogger(__name__)
def get_password(server_name):
return getpass.getpass("Enter password for %s: " % server_name)
class ForwardServer (SocketServer.ThreadingTCPServer, threading.Thread):
def __init__(self, *args, **kwargs):
SocketServer.ThreadingTCPServer.__init__(self, *args, **kwargs)
threading.Thread.__init__(self)
daemon_threads = True
allow_reuse_address = True
def run(self):
return self.serve_forever()
def get_request_handler(remote_host, remote_port, transport):
class SubHandler(Handler):
chain_host = remote_host
chain_port = int(remote_port)
ssh_transport = transport
return SubHandler
class Handler(SocketServer.BaseRequestHandler):
def handle(self):
try:
chan = self.ssh_transport.open_channel('direct-tcpip',
(self.chain_host, self.chain_port),
self.request.getpeername())
except Exception as e:
log.debug('Incoming request to %s:%d failed: %s' % (self.chain_host, self.chain_port, repr(e)))
return
if chan is None:
log.debug('Incoming request to %s:%d was rejected by the SSH server.' %
(self.chain_host, self.chain_port))
return
log.debug('Connected! Tunnel open %r -> %r -> %r' % (self.request.getpeername(),
chan.getpeername(), (self.chain_host, self.chain_port)))
while True:
r, w, x = select.select([self.request, chan], [], [])
if self.request in r:
data = self.request.recv(1024)
if len(data) == 0:
break
chan.send(data)
if chan in r:
data = chan.recv(1024)
if len(data) == 0:
break
self.request.send(data)
peername = self.request.getpeername()
chan.close()
self.request.close()
log.debug('Tunnel closed from %r' % (peername,))
class SSHConnection(object):
def __init__(self, username, hostname, port, password_callback=get_password):
self.ssh_connection = paramiko.SSHClient()
self.ssh_connection.load_system_host_keys()
self.ssh_connection.set_missing_host_key_policy(paramiko.WarningPolicy())
self.name = "%s@%s:%d" % (username, hostname, port)
self.session = requests.Session()
connected = False
password = ''
while not connected:
try:
self.ssh_connection.connect(hostname=hostname, username=username, port=port, look_for_keys=False,
password=password, timeout=2.0, banner_timeout=2.0, allow_agent=True)
connected = True
except paramiko.AuthenticationException:
password = password_callback(self.name)
except paramiko.SSHException as e:
log.error("Error logging into %s: %s" % (self.name, str(e)))
raise
except socket.error as e:
log.error("Error connecting to %s: %s" % (self.name, str(e)))
raise
self.forwarded_connections = []
solr_forwarded_port = self.forward_tunnel('127.0.0.1', 8080)
self.solr_url_base = 'http://127.0.0.1:%d' % solr_forwarded_port
def http_get(self, path, **kwargs):
return self.session.get('%s%s' % (self.solr_url_base, path), **kwargs)
def http_post(self, path, *args, **kwargs):
return self.session.post('%s%s' % (self.solr_url_base, path), *args, **kwargs)
def forward_tunnel(self, remote_host, remote_port):
# this is a little convoluted, but lets me configure things for the Handler
# object. (socketserver doesn't give Handlers any way to access the outer
# server normally.)
transport = self.ssh_connection.get_transport()
local_port = 12001
conn = None
while not conn and local_port < 65536:
try:
conn = ForwardServer(('127.0.0.1', local_port),
get_request_handler(remote_host, remote_port, transport))
except Exception:
local_port += 1
if conn:
conn.daemon = True
conn.start()
self.forwarded_connections.append(conn)
return local_port
raise Exception("Cannot find open local port")
def close(self):
for conn in self.forwarded_connections:
conn.shutdown()
def open_file(self, filename, mode='r'):
return self.ssh_connection.open_sftp().file(filename, mode=mode)
def open_db(self, user, password, database, host, port):
local_port = self.forward_tunnel(remote_host=host, remote_port=port)
return psycopg2.connect(user=user, password=password, database=database, host=host, port=local_port)
def __str__(self):
return self.name
| {
"repo_name": "carbonblack/cb-event-duplicator",
"path": "cbopensource/tools/eventduplicator/ssh_connection.py",
"copies": "1",
"size": "5411",
"license": "mit",
"hash": 6834108745970847000,
"line_mean": 34.8344370861,
"line_max": 118,
"alpha_frac": 0.5788209203,
"autogenerated": false,
"ratio": 4.083773584905661,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000988441624571416,
"num_lines": 151
} |
from __future__ import (absolute_import, division, print_function)
import periodictable
from qtpy.QtWidgets import QDialog
from addie.utilities import load_ui
#from addie.ui_isotopes import Ui_Dialog as UiDialog
class IsotopesHandler:
def __init__(self, parent=None, element=''):
if parent.isotope_ui is None:
o_isotope = IsotopeDialog(parent=parent,
element=element)
o_isotope.show()
parent.isotope_ui = o_isotope
else:
parent.isotope_ui.setFocus()
parent.isotope_ui.activateWindow()
class IsotopeDialog(QDialog):
def __init__(self, parent=None, element=''):
self.parent = parent
QDialog.__init__(self, parent=parent)
self.ui = load_ui('isotopes.ui', baseinstance=self)
self.init_widgets(element)
def init_widgets(self, element):
list_isotopes = [element]
for iso in getattr(periodictable, element):
#reformat
list_str_iso = str(iso).split('-')
str_iso = "".join(list_str_iso[::-1])
list_isotopes.append(str_iso)
self.ui.comboBox.addItems(list_isotopes)
def accept(self):
isotope_selected = self.ui.comboBox.currentText()
isotope_number = self.ui.number_of_elements.value()
is_natural_element = False
if self.ui.comboBox.currentIndex() == 0:
is_natural_element = True
self.parent.add_new_entry(isotope=isotope_selected,
number=isotope_number,
is_natural_element=is_natural_element)
self.close()
self.parent.isotope_ui = None
def reject(self):
self.close()
self.parent.isotope_ui = None
def closeEvent(self, c):
pass
| {
"repo_name": "neutrons/FastGR",
"path": "addie/processing/mantid/master_table/periodic_table/isotopes_handler.py",
"copies": "1",
"size": "1833",
"license": "mit",
"hash": -4760862760227223000,
"line_mean": 30.0677966102,
"line_max": 72,
"alpha_frac": 0.5897435897,
"autogenerated": false,
"ratio": 3.6881287726358147,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9772421305557607,
"avg_score": 0.0010902113556414802,
"num_lines": 59
} |
from __future__ import absolute_import, division, print_function
import pycares # type: ignore
import socket
from tornado.concurrent import Future
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.netutil import Resolver, is_valid_ip
class CaresResolver(Resolver):
"""Name resolver based on the c-ares library.
This is a non-blocking and non-threaded resolver. It may not produce
the same results as the system resolver, but can be used for non-blocking
resolution when threads cannot be used.
c-ares fails to resolve some names when ``family`` is ``AF_UNSPEC``,
so it is only recommended for use in ``AF_INET`` (i.e. IPv4). This is
the default for ``tornado.simple_httpclient``, but other libraries
may default to ``AF_UNSPEC``.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
"""
def initialize(self):
self.io_loop = IOLoop.current()
self.channel = pycares.Channel(sock_state_cb=self._sock_state_cb)
self.fds = {}
def _sock_state_cb(self, fd, readable, writable):
state = ((IOLoop.READ if readable else 0) |
(IOLoop.WRITE if writable else 0))
if not state:
self.io_loop.remove_handler(fd)
del self.fds[fd]
elif fd in self.fds:
self.io_loop.update_handler(fd, state)
self.fds[fd] = state
else:
self.io_loop.add_handler(fd, self._handle_events, state)
self.fds[fd] = state
def _handle_events(self, fd, events):
read_fd = pycares.ARES_SOCKET_BAD
write_fd = pycares.ARES_SOCKET_BAD
if events & IOLoop.READ:
read_fd = fd
if events & IOLoop.WRITE:
write_fd = fd
self.channel.process_fd(read_fd, write_fd)
@gen.coroutine
def resolve(self, host, port, family=0):
if is_valid_ip(host):
addresses = [host]
else:
# gethostbyname doesn't take callback as a kwarg
fut = Future()
self.channel.gethostbyname(host, family,
lambda result, error: fut.set_result((result, error)))
result, error = yield fut
if error:
raise IOError('C-Ares returned error %s: %s while resolving %s' %
(error, pycares.errno.strerror(error), host))
addresses = result.addresses
addrinfo = []
for address in addresses:
if '.' in address:
address_family = socket.AF_INET
elif ':' in address:
address_family = socket.AF_INET6
else:
address_family = socket.AF_UNSPEC
if family != socket.AF_UNSPEC and family != address_family:
raise IOError('Requested socket family %d but got %d' %
(family, address_family))
addrinfo.append((address_family, (address, port)))
raise gen.Return(addrinfo)
| {
"repo_name": "hhru/tornado",
"path": "tornado/platform/caresresolver.py",
"copies": "2",
"size": "3068",
"license": "apache-2.0",
"hash": 9186483834327608000,
"line_mean": 37.835443038,
"line_max": 93,
"alpha_frac": 0.5880052151,
"autogenerated": false,
"ratio": 4.068965517241379,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.565697073234138,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pycares # type: ignore
import socket
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.netutil import Resolver, is_valid_ip
class CaresResolver(Resolver):
"""Name resolver based on the c-ares library.
This is a non-blocking and non-threaded resolver. It may not produce
the same results as the system resolver, but can be used for non-blocking
resolution when threads cannot be used.
c-ares fails to resolve some names when ``family`` is ``AF_UNSPEC``,
so it is only recommended for use in ``AF_INET`` (i.e. IPv4). This is
the default for ``tornado.simple_httpclient``, but other libraries
may default to ``AF_UNSPEC``.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
"""
def initialize(self):
self.io_loop = IOLoop.current()
self.channel = pycares.Channel(sock_state_cb=self._sock_state_cb)
self.fds = {}
def _sock_state_cb(self, fd, readable, writable):
state = ((IOLoop.READ if readable else 0) |
(IOLoop.WRITE if writable else 0))
if not state:
self.io_loop.remove_handler(fd)
del self.fds[fd]
elif fd in self.fds:
self.io_loop.update_handler(fd, state)
self.fds[fd] = state
else:
self.io_loop.add_handler(fd, self._handle_events, state)
self.fds[fd] = state
def _handle_events(self, fd, events):
read_fd = pycares.ARES_SOCKET_BAD
write_fd = pycares.ARES_SOCKET_BAD
if events & IOLoop.READ:
read_fd = fd
if events & IOLoop.WRITE:
write_fd = fd
self.channel.process_fd(read_fd, write_fd)
@gen.coroutine
def resolve(self, host, port, family=0):
if is_valid_ip(host):
addresses = [host]
else:
# gethostbyname doesn't take callback as a kwarg
self.channel.gethostbyname(host, family, (yield gen.Callback(1)))
callback_args = yield gen.Wait(1)
assert isinstance(callback_args, gen.Arguments)
assert not callback_args.kwargs
result, error = callback_args.args
if error:
raise IOError('C-Ares returned error %s: %s while resolving %s' %
(error, pycares.errno.strerror(error), host))
addresses = result.addresses
addrinfo = []
for address in addresses:
if '.' in address:
address_family = socket.AF_INET
elif ':' in address:
address_family = socket.AF_INET6
else:
address_family = socket.AF_UNSPEC
if family != socket.AF_UNSPEC and family != address_family:
raise IOError('Requested socket family %d but got %d' %
(family, address_family))
addrinfo.append((address_family, (address, port)))
raise gen.Return(addrinfo)
| {
"repo_name": "eklitzke/tornado",
"path": "tornado/platform/caresresolver.py",
"copies": "9",
"size": "3093",
"license": "apache-2.0",
"hash": 7346685480752654000,
"line_mean": 38.1518987342,
"line_max": 81,
"alpha_frac": 0.5945683802,
"autogenerated": false,
"ratio": 4.037859007832898,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9132427388032898,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import py
import pytest
# test for _argcomplete but not specific for any application
def equal_with_bash(prefix, ffc, fc, out=None):
res = ffc(prefix)
res_bash = set(fc(prefix))
retval = set(res) == res_bash
if out:
out.write('equal_with_bash %s %s\n' % (retval, res))
if not retval:
out.write(' python - bash: %s\n' % (set(res) - res_bash))
out.write(' bash - python: %s\n' % (res_bash - set(res)))
return retval
# copied from argcomplete.completers as import from there
# also pulls in argcomplete.__init__ which opens filedescriptor 9
# this gives an IOError at the end of testrun
def _wrapcall(*args, **kargs):
try:
if py.std.sys.version_info > (2, 7):
return py.std.subprocess.check_output(*args, **kargs).decode().splitlines()
if 'stdout' in kargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = py.std.subprocess.Popen(
stdout=py.std.subprocess.PIPE, *args, **kargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kargs.get("args")
if cmd is None:
cmd = args[0]
raise py.std.subprocess.CalledProcessError(retcode, cmd)
return output.decode().splitlines()
except py.std.subprocess.CalledProcessError:
return []
class FilesCompleter(object):
'File completer class, optionally takes a list of allowed extensions'
def __init__(self, allowednames=(), directories=True):
# Fix if someone passes in a string instead of a list
if type(allowednames) is str:
allowednames = [allowednames]
self.allowednames = [x.lstrip('*').lstrip('.') for x in allowednames]
self.directories = directories
def __call__(self, prefix, **kwargs):
completion = []
if self.allowednames:
if self.directories:
files = _wrapcall(['bash', '-c',
"compgen -A directory -- '{p}'".format(p=prefix)])
completion += [f + '/' for f in files]
for x in self.allowednames:
completion += _wrapcall(['bash', '-c',
"compgen -A file -X '!*.{0}' -- '{p}'".format(x, p=prefix)])
else:
completion += _wrapcall(['bash', '-c',
"compgen -A file -- '{p}'".format(p=prefix)])
anticomp = _wrapcall(['bash', '-c',
"compgen -A directory -- '{p}'".format(p=prefix)])
completion = list(set(completion) - set(anticomp))
if self.directories:
completion += [f + '/' for f in anticomp]
return completion
class TestArgComplete(object):
@pytest.mark.skipif("sys.platform in ('win32', 'darwin')")
def test_compare_with_compgen(self):
from _pytest._argcomplete import FastFilesCompleter
ffc = FastFilesCompleter()
fc = FilesCompleter()
for x in '/ /d /data qqq'.split():
assert equal_with_bash(x, ffc, fc, out=py.std.sys.stdout)
@pytest.mark.skipif("sys.platform in ('win32', 'darwin')")
def test_remove_dir_prefix(self):
"""this is not compatible with compgen but it is with bash itself:
ls /usr/<TAB>
"""
from _pytest._argcomplete import FastFilesCompleter
ffc = FastFilesCompleter()
fc = FilesCompleter()
for x in '/usr/'.split():
assert not equal_with_bash(x, ffc, fc, out=py.std.sys.stdout)
| {
"repo_name": "MichaelAquilina/pytest",
"path": "testing/test_argcomplete.py",
"copies": "1",
"size": "3708",
"license": "mit",
"hash": -1407717435905832400,
"line_mean": 37.2268041237,
"line_max": 101,
"alpha_frac": 0.5701186624,
"autogenerated": false,
"ratio": 3.9700214132762315,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5040140075676232,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pytest
from _pytest.main import EXIT_NOTESTSCOLLECTED
class SessionTests(object):
def test_basic_testitem_events(self, testdir):
tfile = testdir.makepyfile("""
def test_one():
pass
def test_one_one():
assert 0
def test_other():
raise ValueError(23)
class TestClass(object):
def test_two(self, someargs):
pass
""")
reprec = testdir.inline_run(tfile)
passed, skipped, failed = reprec.listoutcomes()
assert len(skipped) == 0
assert len(passed) == 1
assert len(failed) == 3
end = lambda x: x.nodeid.split("::")[-1]
assert end(failed[0]) == "test_one_one"
assert end(failed[1]) == "test_other"
itemstarted = reprec.getcalls("pytest_itemcollected")
assert len(itemstarted) == 4
# XXX check for failing funcarg setup
#colreports = reprec.getcalls("pytest_collectreport")
#assert len(colreports) == 4
#assert colreports[1].report.failed
def test_nested_import_error(self, testdir):
tfile = testdir.makepyfile("""
import import_fails
def test_this():
assert import_fails.a == 1
""", import_fails="""
import does_not_work
a = 1
""")
reprec = testdir.inline_run(tfile)
l = reprec.getfailedcollections()
assert len(l) == 1
out = str(l[0].longrepr)
assert out.find('does_not_work') != -1
def test_raises_output(self, testdir):
reprec = testdir.inline_runsource("""
import pytest
def test_raises_doesnt():
pytest.raises(ValueError, int, "3")
""")
passed, skipped, failed = reprec.listoutcomes()
assert len(failed) == 1
out = failed[0].longrepr.reprcrash.message
if not out.find("DID NOT RAISE") != -1:
print(out)
pytest.fail("incorrect raises() output")
def test_generator_yields_None(self, testdir):
reprec = testdir.inline_runsource("""
def test_1():
yield None
""")
failures = reprec.getfailedcollections()
out = failures[0].longrepr.reprcrash.message
i = out.find('TypeError')
assert i != -1
def test_syntax_error_module(self, testdir):
reprec = testdir.inline_runsource("this is really not python")
l = reprec.getfailedcollections()
assert len(l) == 1
out = str(l[0].longrepr)
assert out.find(str('not python')) != -1
def test_exit_first_problem(self, testdir):
reprec = testdir.inline_runsource("""
def test_one(): assert 0
def test_two(): assert 0
""", '--exitfirst')
passed, skipped, failed = reprec.countoutcomes()
assert failed == 1
assert passed == skipped == 0
def test_maxfail(self, testdir):
reprec = testdir.inline_runsource("""
def test_one(): assert 0
def test_two(): assert 0
def test_three(): assert 0
""", '--maxfail=2')
passed, skipped, failed = reprec.countoutcomes()
assert failed == 2
assert passed == skipped == 0
def test_broken_repr(self, testdir):
p = testdir.makepyfile("""
import pytest
class BrokenRepr1(object):
foo=0
def __repr__(self):
raise Exception("Ha Ha fooled you, I'm a broken repr().")
class TestBrokenClass(object):
def test_explicit_bad_repr(self):
t = BrokenRepr1()
pytest.raises(Exception, 'repr(t)')
def test_implicit_bad_repr1(self):
t = BrokenRepr1()
assert t.foo == 1
""")
reprec = testdir.inline_run(p)
passed, skipped, failed = reprec.listoutcomes()
assert len(failed) == 1
out = failed[0].longrepr.reprcrash.message
assert out.find("""[Exception("Ha Ha fooled you, I'm a broken repr().") raised in repr()]""") != -1 #'
def test_skip_file_by_conftest(self, testdir):
testdir.makepyfile(conftest="""
import pytest
def pytest_collect_file():
pytest.skip("intentional")
""", test_file="""
def test_one(): pass
""")
try:
reprec = testdir.inline_run(testdir.tmpdir)
except pytest.skip.Exception:
pytest.fail("wrong skipped caught")
reports = reprec.getreports("pytest_collectreport")
assert len(reports) == 1
assert reports[0].skipped
class TestNewSession(SessionTests):
def test_order_of_execution(self, testdir):
reprec = testdir.inline_runsource("""
l = []
def test_1():
l.append(1)
def test_2():
l.append(2)
def test_3():
assert l == [1,2]
class Testmygroup(object):
reslist = l
def test_1(self):
self.reslist.append(1)
def test_2(self):
self.reslist.append(2)
def test_3(self):
self.reslist.append(3)
def test_4(self):
assert self.reslist == [1,2,1,2,3]
""")
passed, skipped, failed = reprec.countoutcomes()
assert failed == skipped == 0
assert passed == 7
# also test listnames() here ...
def test_collect_only_with_various_situations(self, testdir):
p = testdir.makepyfile(
test_one="""
def test_one():
raise ValueError()
class TestX(object):
def test_method_one(self):
pass
class TestY(TestX):
pass
""",
test_three="xxxdsadsadsadsa",
__init__=""
)
reprec = testdir.inline_run('--collect-only', p.dirpath())
itemstarted = reprec.getcalls("pytest_itemcollected")
assert len(itemstarted) == 3
assert not reprec.getreports("pytest_runtest_logreport")
started = reprec.getcalls("pytest_collectstart")
finished = reprec.getreports("pytest_collectreport")
assert len(started) == len(finished)
assert len(started) == 7 # XXX extra TopCollector
colfail = [x for x in finished if x.failed]
assert len(colfail) == 1
def test_minus_x_import_error(self, testdir):
testdir.makepyfile(__init__="")
testdir.makepyfile(test_one="xxxx", test_two="yyyy")
reprec = testdir.inline_run("-x", testdir.tmpdir)
finished = reprec.getreports("pytest_collectreport")
colfail = [x for x in finished if x.failed]
assert len(colfail) == 1
def test_minus_x_overridden_by_maxfail(self, testdir):
testdir.makepyfile(__init__="")
testdir.makepyfile(test_one="xxxx", test_two="yyyy", test_third="zzz")
reprec = testdir.inline_run("-x", "--maxfail=2", testdir.tmpdir)
finished = reprec.getreports("pytest_collectreport")
colfail = [x for x in finished if x.failed]
assert len(colfail) == 2
def test_plugin_specify(testdir):
pytest.raises(ImportError, """
testdir.parseconfig("-p", "nqweotexistent")
""")
#pytest.raises(ImportError,
# "config.do_configure(config)"
#)
def test_plugin_already_exists(testdir):
config = testdir.parseconfig("-p", "terminal")
assert config.option.plugins == ['terminal']
config._do_configure()
config._ensure_unconfigure()
def test_exclude(testdir):
hellodir = testdir.mkdir("hello")
hellodir.join("test_hello.py").write("x y syntaxerror")
hello2dir = testdir.mkdir("hello2")
hello2dir.join("test_hello2.py").write("x y syntaxerror")
testdir.makepyfile(test_ok="def test_pass(): pass")
result = testdir.runpytest("--ignore=hello", "--ignore=hello2")
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
def test_sessionfinish_with_start(testdir):
testdir.makeconftest("""
import os
l = []
def pytest_sessionstart():
l.append(os.getcwd())
os.chdir("..")
def pytest_sessionfinish():
assert l[0] == os.getcwd()
""")
res = testdir.runpytest("--collect-only")
assert res.ret == EXIT_NOTESTSCOLLECTED
| {
"repo_name": "flub/pytest",
"path": "testing/test_session.py",
"copies": "1",
"size": "8701",
"license": "mit",
"hash": -8308709361175897000,
"line_mean": 34.2267206478,
"line_max": 110,
"alpha_frac": 0.5499367889,
"autogenerated": false,
"ratio": 4.1236966824644545,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5173633471364454,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pytest
from _pytest.main import EXIT_NOTESTSCOLLECTED
class SessionTests(object):
def test_basic_testitem_events(self, testdir):
tfile = testdir.makepyfile("""
def test_one():
pass
def test_one_one():
assert 0
def test_other():
raise ValueError(23)
class TestClass(object):
def test_two(self, someargs):
pass
""")
reprec = testdir.inline_run(tfile)
passed, skipped, failed = reprec.listoutcomes()
assert len(skipped) == 0
assert len(passed) == 1
assert len(failed) == 3
def end(x):
return x.nodeid.split("::")[-1]
assert end(failed[0]) == "test_one_one"
assert end(failed[1]) == "test_other"
itemstarted = reprec.getcalls("pytest_itemcollected")
assert len(itemstarted) == 4
# XXX check for failing funcarg setup
# colreports = reprec.getcalls("pytest_collectreport")
# assert len(colreports) == 4
# assert colreports[1].report.failed
def test_nested_import_error(self, testdir):
tfile = testdir.makepyfile("""
import import_fails
def test_this():
assert import_fails.a == 1
""", import_fails="""
import does_not_work
a = 1
""")
reprec = testdir.inline_run(tfile)
values = reprec.getfailedcollections()
assert len(values) == 1
out = str(values[0].longrepr)
assert out.find('does_not_work') != -1
def test_raises_output(self, testdir):
reprec = testdir.inline_runsource("""
import pytest
def test_raises_doesnt():
pytest.raises(ValueError, int, "3")
""")
passed, skipped, failed = reprec.listoutcomes()
assert len(failed) == 1
out = failed[0].longrepr.reprcrash.message
if not out.find("DID NOT RAISE") != -1:
print(out)
pytest.fail("incorrect raises() output")
def test_generator_yields_None(self, testdir):
reprec = testdir.inline_runsource("""
def test_1():
yield None
""")
failures = reprec.getfailedcollections()
out = failures[0].longrepr.reprcrash.message
i = out.find('TypeError')
assert i != -1
def test_syntax_error_module(self, testdir):
reprec = testdir.inline_runsource("this is really not python")
values = reprec.getfailedcollections()
assert len(values) == 1
out = str(values[0].longrepr)
assert out.find(str('not python')) != -1
def test_exit_first_problem(self, testdir):
reprec = testdir.inline_runsource("""
def test_one(): assert 0
def test_two(): assert 0
""", '--exitfirst')
passed, skipped, failed = reprec.countoutcomes()
assert failed == 1
assert passed == skipped == 0
def test_maxfail(self, testdir):
reprec = testdir.inline_runsource("""
def test_one(): assert 0
def test_two(): assert 0
def test_three(): assert 0
""", '--maxfail=2')
passed, skipped, failed = reprec.countoutcomes()
assert failed == 2
assert passed == skipped == 0
def test_broken_repr(self, testdir):
p = testdir.makepyfile("""
import pytest
class BrokenRepr1(object):
foo=0
def __repr__(self):
raise Exception("Ha Ha fooled you, I'm a broken repr().")
class TestBrokenClass(object):
def test_explicit_bad_repr(self):
t = BrokenRepr1()
pytest.raises(Exception, 'repr(t)')
def test_implicit_bad_repr1(self):
t = BrokenRepr1()
assert t.foo == 1
""")
reprec = testdir.inline_run(p)
passed, skipped, failed = reprec.listoutcomes()
assert len(failed) == 1
out = failed[0].longrepr.reprcrash.message
assert out.find("""[Exception("Ha Ha fooled you, I'm a broken repr().") raised in repr()]""") != -1 # '
def test_skip_file_by_conftest(self, testdir):
testdir.makepyfile(conftest="""
import pytest
def pytest_collect_file():
pytest.skip("intentional")
""", test_file="""
def test_one(): pass
""")
try:
reprec = testdir.inline_run(testdir.tmpdir)
except pytest.skip.Exception:
pytest.fail("wrong skipped caught")
reports = reprec.getreports("pytest_collectreport")
assert len(reports) == 1
assert reports[0].skipped
class TestNewSession(SessionTests):
def test_order_of_execution(self, testdir):
reprec = testdir.inline_runsource("""
values = []
def test_1():
values.append(1)
def test_2():
values.append(2)
def test_3():
assert values == [1,2]
class Testmygroup(object):
reslist = values
def test_1(self):
self.reslist.append(1)
def test_2(self):
self.reslist.append(2)
def test_3(self):
self.reslist.append(3)
def test_4(self):
assert self.reslist == [1,2,1,2,3]
""")
passed, skipped, failed = reprec.countoutcomes()
assert failed == skipped == 0
assert passed == 7
# also test listnames() here ...
def test_collect_only_with_various_situations(self, testdir):
p = testdir.makepyfile(
test_one="""
def test_one():
raise ValueError()
class TestX(object):
def test_method_one(self):
pass
class TestY(TestX):
pass
""",
test_three="xxxdsadsadsadsa",
__init__=""
)
reprec = testdir.inline_run('--collect-only', p.dirpath())
itemstarted = reprec.getcalls("pytest_itemcollected")
assert len(itemstarted) == 3
assert not reprec.getreports("pytest_runtest_logreport")
started = reprec.getcalls("pytest_collectstart")
finished = reprec.getreports("pytest_collectreport")
assert len(started) == len(finished)
assert len(started) == 7 # XXX extra TopCollector
colfail = [x for x in finished if x.failed]
assert len(colfail) == 1
def test_minus_x_import_error(self, testdir):
testdir.makepyfile(__init__="")
testdir.makepyfile(test_one="xxxx", test_two="yyyy")
reprec = testdir.inline_run("-x", testdir.tmpdir)
finished = reprec.getreports("pytest_collectreport")
colfail = [x for x in finished if x.failed]
assert len(colfail) == 1
def test_minus_x_overridden_by_maxfail(self, testdir):
testdir.makepyfile(__init__="")
testdir.makepyfile(test_one="xxxx", test_two="yyyy", test_third="zzz")
reprec = testdir.inline_run("-x", "--maxfail=2", testdir.tmpdir)
finished = reprec.getreports("pytest_collectreport")
colfail = [x for x in finished if x.failed]
assert len(colfail) == 2
def test_plugin_specify(testdir):
pytest.raises(ImportError, """
testdir.parseconfig("-p", "nqweotexistent")
""")
# pytest.raises(ImportError,
# "config.do_configure(config)"
# )
def test_plugin_already_exists(testdir):
config = testdir.parseconfig("-p", "terminal")
assert config.option.plugins == ['terminal']
config._do_configure()
config._ensure_unconfigure()
def test_exclude(testdir):
hellodir = testdir.mkdir("hello")
hellodir.join("test_hello.py").write("x y syntaxerror")
hello2dir = testdir.mkdir("hello2")
hello2dir.join("test_hello2.py").write("x y syntaxerror")
testdir.makepyfile(test_ok="def test_pass(): pass")
result = testdir.runpytest("--ignore=hello", "--ignore=hello2")
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
def test_sessionfinish_with_start(testdir):
testdir.makeconftest("""
import os
values = []
def pytest_sessionstart():
values.append(os.getcwd())
os.chdir("..")
def pytest_sessionfinish():
assert values[0] == os.getcwd()
""")
res = testdir.runpytest("--collect-only")
assert res.ret == EXIT_NOTESTSCOLLECTED
| {
"repo_name": "avadacatavra/servo",
"path": "tests/wpt/web-platform-tests/tools/third_party/pytest/testing/test_session.py",
"copies": "14",
"size": "8801",
"license": "mpl-2.0",
"hash": 1973044402612436000,
"line_mean": 33.5137254902,
"line_max": 112,
"alpha_frac": 0.5519827292,
"autogenerated": false,
"ratio": 4.161229314420804,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pytest
def setup_module(mod):
mod.nose = pytest.importorskip("nose")
def test_nose_setup(testdir):
p = testdir.makepyfile(
"""
values = []
from nose.tools import with_setup
@with_setup(lambda: values.append(1), lambda: values.append(2))
def test_hello():
assert values == [1]
def test_world():
assert values == [1,2]
test_hello.setup = lambda: values.append(1)
test_hello.teardown = lambda: values.append(2)
"""
)
result = testdir.runpytest(p, "-p", "nose")
result.assert_outcomes(passed=2)
def test_setup_func_with_setup_decorator():
from _pytest.nose import call_optional
values = []
class A(object):
@pytest.fixture(autouse=True)
def f(self):
values.append(1)
call_optional(A(), "f")
assert not values
def test_setup_func_not_callable():
from _pytest.nose import call_optional
class A(object):
f = 1
call_optional(A(), "f")
def test_nose_setup_func(testdir):
p = testdir.makepyfile(
"""
from nose.tools import with_setup
values = []
def my_setup():
a = 1
values.append(a)
def my_teardown():
b = 2
values.append(b)
@with_setup(my_setup, my_teardown)
def test_hello():
print (values)
assert values == [1]
def test_world():
print (values)
assert values == [1,2]
"""
)
result = testdir.runpytest(p, "-p", "nose")
result.assert_outcomes(passed=2)
def test_nose_setup_func_failure(testdir):
p = testdir.makepyfile(
"""
from nose.tools import with_setup
values = []
my_setup = lambda x: 1
my_teardown = lambda x: 2
@with_setup(my_setup, my_teardown)
def test_hello():
print (values)
assert values == [1]
def test_world():
print (values)
assert values == [1,2]
"""
)
result = testdir.runpytest(p, "-p", "nose")
result.stdout.fnmatch_lines(["*TypeError: <lambda>()*"])
def test_nose_setup_func_failure_2(testdir):
testdir.makepyfile(
"""
values = []
my_setup = 1
my_teardown = 2
def test_hello():
assert values == []
test_hello.setup = my_setup
test_hello.teardown = my_teardown
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_nose_setup_partial(testdir):
pytest.importorskip("functools")
p = testdir.makepyfile(
"""
from functools import partial
values = []
def my_setup(x):
a = x
values.append(a)
def my_teardown(x):
b = x
values.append(b)
my_setup_partial = partial(my_setup, 1)
my_teardown_partial = partial(my_teardown, 2)
def test_hello():
print (values)
assert values == [1]
def test_world():
print (values)
assert values == [1,2]
test_hello.setup = my_setup_partial
test_hello.teardown = my_teardown_partial
"""
)
result = testdir.runpytest(p, "-p", "nose")
result.stdout.fnmatch_lines(["*2 passed*"])
def test_nose_test_generator_fixtures(testdir):
p = testdir.makepyfile(
"""
# taken from nose-0.11.1 unit_tests/test_generator_fixtures.py
from nose.tools import eq_
called = []
def outer_setup():
called.append('outer_setup')
def outer_teardown():
called.append('outer_teardown')
def inner_setup():
called.append('inner_setup')
def inner_teardown():
called.append('inner_teardown')
def test_gen():
called[:] = []
for i in range(0, 5):
yield check, i
def check(i):
expect = ['outer_setup']
for x in range(0, i):
expect.append('inner_setup')
expect.append('inner_teardown')
expect.append('inner_setup')
eq_(called, expect)
test_gen.setup = outer_setup
test_gen.teardown = outer_teardown
check.setup = inner_setup
check.teardown = inner_teardown
class TestClass(object):
def setup(self):
print ("setup called in %s" % self)
self.called = ['setup']
def teardown(self):
print ("teardown called in %s" % self)
eq_(self.called, ['setup'])
self.called.append('teardown')
def test(self):
print ("test called in %s" % self)
for i in range(0, 5):
yield self.check, i
def check(self, i):
print ("check called in %s" % self)
expect = ['setup']
#for x in range(0, i):
# expect.append('setup')
# expect.append('teardown')
#expect.append('setup')
eq_(self.called, expect)
"""
)
result = testdir.runpytest(p, "-p", "nose")
result.stdout.fnmatch_lines(["*10 passed*"])
def test_module_level_setup(testdir):
testdir.makepyfile(
"""
from nose.tools import with_setup
items = {}
def setup():
items[1]=1
def teardown():
del items[1]
def setup2():
items[2] = 2
def teardown2():
del items[2]
def test_setup_module_setup():
assert items[1] == 1
@with_setup(setup2, teardown2)
def test_local_setup():
assert items[2] == 2
assert 1 not in items
"""
)
result = testdir.runpytest("-p", "nose")
result.stdout.fnmatch_lines(["*2 passed*"])
def test_nose_style_setup_teardown(testdir):
testdir.makepyfile(
"""
values = []
def setup_module():
values.append(1)
def teardown_module():
del values[0]
def test_hello():
assert values == [1]
def test_world():
assert values == [1]
"""
)
result = testdir.runpytest("-p", "nose")
result.stdout.fnmatch_lines(["*2 passed*"])
def test_nose_setup_ordering(testdir):
testdir.makepyfile(
"""
def setup_module(mod):
mod.visited = True
class TestClass(object):
def setup(self):
assert visited
def test_first(self):
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
def test_apiwrapper_problem_issue260(testdir):
# this would end up trying a call an optional teardown on the class
# for plain unittests we dont want nose behaviour
testdir.makepyfile(
"""
import unittest
class TestCase(unittest.TestCase):
def setup(self):
#should not be called in unittest testcases
assert 0, 'setup'
def teardown(self):
#should not be called in unittest testcases
assert 0, 'teardown'
def setUp(self):
print('setup')
def tearDown(self):
print('teardown')
def test_fun(self):
pass
"""
)
result = testdir.runpytest()
result.assert_outcomes(passed=1)
def test_setup_teardown_linking_issue265(testdir):
# we accidentally didnt integrate nose setupstate with normal setupstate
# this test ensures that won't happen again
testdir.makepyfile(
'''
import pytest
class TestGeneric(object):
def test_nothing(self):
"""Tests the API of the implementation (for generic and specialized)."""
@pytest.mark.skipif("True", reason=
"Skip tests to check if teardown is skipped as well.")
class TestSkipTeardown(TestGeneric):
def setup(self):
"""Sets up my specialized implementation for $COOL_PLATFORM."""
raise Exception("should not call setup for skipped tests")
def teardown(self):
"""Undoes the setup."""
raise Exception("should not call teardown for skipped tests")
'''
)
reprec = testdir.runpytest()
reprec.assert_outcomes(passed=1, skipped=1)
def test_SkipTest_during_collection(testdir):
p = testdir.makepyfile(
"""
import nose
raise nose.SkipTest("during collection")
def test_failing():
assert False
"""
)
result = testdir.runpytest(p)
result.assert_outcomes(skipped=1)
def test_SkipTest_in_test(testdir):
testdir.makepyfile(
"""
import nose
def test_skipping():
raise nose.SkipTest("in test")
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(skipped=1)
def test_istest_function_decorator(testdir):
p = testdir.makepyfile(
"""
import nose.tools
@nose.tools.istest
def not_test_prefix():
pass
"""
)
result = testdir.runpytest(p)
result.assert_outcomes(passed=1)
def test_nottest_function_decorator(testdir):
testdir.makepyfile(
"""
import nose.tools
@nose.tools.nottest
def test_prefix():
pass
"""
)
reprec = testdir.inline_run()
assert not reprec.getfailedcollections()
calls = reprec.getreports("pytest_runtest_logreport")
assert not calls
def test_istest_class_decorator(testdir):
p = testdir.makepyfile(
"""
import nose.tools
@nose.tools.istest
class NotTestPrefix(object):
def test_method(self):
pass
"""
)
result = testdir.runpytest(p)
result.assert_outcomes(passed=1)
def test_nottest_class_decorator(testdir):
testdir.makepyfile(
"""
import nose.tools
@nose.tools.nottest
class TestPrefix(object):
def test_method(self):
pass
"""
)
reprec = testdir.inline_run()
assert not reprec.getfailedcollections()
calls = reprec.getreports("pytest_runtest_logreport")
assert not calls
| {
"repo_name": "davidszotten/pytest",
"path": "testing/test_nose.py",
"copies": "2",
"size": "10598",
"license": "mit",
"hash": -4086959844285042700,
"line_mean": 23.5324074074,
"line_max": 88,
"alpha_frac": 0.5344404605,
"autogenerated": false,
"ratio": 4.229050279329609,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5763490739829609,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import pytest
def test_yield_tests_deprecation(testdir):
testdir.makepyfile("""
def func1(arg, arg2):
assert arg == arg2
def test_gen():
yield "m1", func1, 15, 3*5
yield "m2", func1, 42, 6*7
def test_gen2():
for k in range(10):
yield func1, 1, 1
""")
result = testdir.runpytest('-ra')
result.stdout.fnmatch_lines([
'*yield tests are deprecated, and scheduled to be removed in pytest 4.0*',
'*2 passed*',
])
assert result.stdout.str().count('yield tests are deprecated') == 2
def test_funcarg_prefix_deprecation(testdir):
testdir.makepyfile("""
def pytest_funcarg__value():
return 10
def test_funcarg_prefix(value):
assert value == 10
""")
result = testdir.runpytest('-ra')
result.stdout.fnmatch_lines([
('*pytest_funcarg__value: '
'declaring fixtures using "pytest_funcarg__" prefix is deprecated '
'and scheduled to be removed in pytest 4.0. '
'Please remove the prefix and use the @pytest.fixture decorator instead.'),
'*1 passed*',
])
def test_pytest_setup_cfg_deprecated(testdir):
testdir.makefile('.cfg', setup='''
[pytest]
addopts = --verbose
''')
result = testdir.runpytest()
result.stdout.fnmatch_lines(['*pytest*section in setup.cfg files is deprecated*use*tool:pytest*instead*'])
def test_str_args_deprecated(tmpdir, testdir):
"""Deprecate passing strings to pytest.main(). Scheduled for removal in pytest-4.0."""
from _pytest.main import EXIT_NOTESTSCOLLECTED
warnings = []
class Collect(object):
def pytest_logwarning(self, message):
warnings.append(message)
ret = pytest.main("%s -x" % tmpdir, plugins=[Collect()])
msg = ('passing a string to pytest.main() is deprecated, '
'pass a list of arguments instead.')
assert msg in warnings
assert ret == EXIT_NOTESTSCOLLECTED
def test_getfuncargvalue_is_deprecated(request):
pytest.deprecated_call(request.getfuncargvalue, 'tmpdir')
def test_resultlog_is_deprecated(testdir):
result = testdir.runpytest('--help')
result.stdout.fnmatch_lines(['*DEPRECATED path for machine-readable result log*'])
testdir.makepyfile('''
def test():
pass
''')
result = testdir.runpytest('--result-log=%s' % testdir.tmpdir.join('result.log'))
result.stdout.fnmatch_lines([
'*--result-log is deprecated and scheduled for removal in pytest 4.0*',
'*See https://docs.pytest.org/*/usage.html#creating-resultlog-format-files for more information*',
])
@pytest.mark.filterwarnings('always:Metafunc.addcall is deprecated')
def test_metafunc_addcall_deprecated(testdir):
testdir.makepyfile("""
def pytest_generate_tests(metafunc):
metafunc.addcall({'i': 1})
metafunc.addcall({'i': 2})
def test_func(i):
pass
""")
res = testdir.runpytest('-s')
assert res.ret == 0
res.stdout.fnmatch_lines([
"*Metafunc.addcall is deprecated*",
"*2 passed, 2 warnings*",
])
def test_terminal_reporter_writer_attr(pytestconfig):
"""Check that TerminalReporter._tw is also available as 'writer' (#2984)
This attribute is planned to be deprecated in 3.4.
"""
try:
import xdist # noqa
pytest.skip('xdist workers disable the terminal reporter plugin')
except ImportError:
pass
terminal_reporter = pytestconfig.pluginmanager.get_plugin('terminalreporter')
assert terminal_reporter.writer is terminal_reporter._tw
@pytest.mark.parametrize('plugin', ['catchlog', 'capturelog'])
def test_pytest_catchlog_deprecated(testdir, plugin):
testdir.makepyfile("""
def test_func(pytestconfig):
pytestconfig.pluginmanager.register(None, 'pytest_{0}')
""".format(plugin))
res = testdir.runpytest()
assert res.ret == 0
res.stdout.fnmatch_lines([
"*pytest-*log plugin has been merged into the core*",
"*1 passed, 1 warnings*",
])
| {
"repo_name": "tareqalayan/pytest",
"path": "testing/deprecated_test.py",
"copies": "1",
"size": "4205",
"license": "mit",
"hash": -8326292208509188000,
"line_mean": 32.1102362205,
"line_max": 110,
"alpha_frac": 0.6328180737,
"autogenerated": false,
"ratio": 4.027777777777778,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5160595851477778,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import re
from datetime import datetime
from datetime import timedelta
import numpy as np
import pandas
from sunpy.extern import six
import astropy.time
__all__ = ['find_time', 'extract_time', 'parse_time', 'is_time', 'day_of_year', 'break_time', 'get_day', 'is_time_in_given_format']
# Mapping of time format codes to regular expressions.
REGEX = {
'%Y': '(?P<year>\d{4})',
'%j': '(?P<dayofyear>\d{3})',
'%m': '(?P<month>\d{1,2})',
'%d': '(?P<day>\d{1,2})',
'%H': '(?P<hour>\d{1,2})',
'%M': '(?P<minute>\d{1,2})',
'%S': '(?P<second>\d{1,2})',
'%f': '(?P<microsecond>\d+)',
'%b': '(?P<month_str>[a-zA-Z]+)',
}
TIME_FORMAT_LIST = [
"%Y-%m-%dT%H:%M:%S.%f", # Example 2007-05-04T21:08:12.999999
"%Y/%m/%dT%H:%M:%S.%f", # Example 2007/05/04T21:08:12.999999
"%Y-%m-%dT%H:%M:%S.%fZ", # Example 2007-05-04T21:08:12.999Z
"%Y-%m-%dT%H:%M:%S", # Example 2007-05-04T21:08:12
"%Y/%m/%dT%H:%M:%S", # Example 2007/05/04T21:08:12
"%Y%m%dT%H%M%S.%f", # Example 20070504T210812.999999
"%Y%m%dT%H%M%S", # Example 20070504T210812
"%Y/%m/%d %H:%M:%S", # Example 2007/05/04 21:08:12
"%Y/%m/%d %H:%M", # Example 2007/05/04 21:08
"%Y/%m/%d %H:%M:%S.%f", # Example 2007/05/04 21:08:12.999999
"%Y-%m-%d %H:%M:%S.%f", # Example 2007-05-04 21:08:12.999999
"%Y-%m-%d %H:%M:%S", # Example 2007-05-04 21:08:12
"%Y-%m-%d %H:%M", # Example 2007-05-04 21:08
"%Y-%b-%d %H:%M:%S", # Example 2007-May-04 21:08:12
"%Y-%b-%d %H:%M", # Example 2007-May-04 21:08
"%Y-%b-%d", # Example 2007-May-04
"%Y-%m-%d", # Example 2007-05-04
"%Y/%m/%d", # Example 2007/05/04
"%d-%b-%Y", # Example 04-May-2007
"%Y%m%d_%H%M%S", # Example 20070504_210812
"%Y:%j:%H:%M:%S", # Example 2012:124:21:08:12
"%Y:%j:%H:%M:%S.%f", # Example 2012:124:21:08:12.999999
"%Y%m%d%H%M%S", # Example 20140101000001 (JSOC / VSO)
]
def _group_or_none(match, group, fun):
try:
ret = match.group(group)
except IndexError:
return None
else:
return fun(ret)
def _n_or_eq(a, b):
return a is None or a == b
def _regex_parse_time(inp, format):
# Parser for finding out the minute value so we can adjust the string
# from 24:00:00 to 00:00:00 the next day because strptime does not
# understand the former.
for key, value in six.iteritems(REGEX):
format = format.replace(key, value)
match = re.match(format, inp)
if match is None:
return None, None
try:
hour = match.group("hour")
except IndexError:
return inp, timedelta(days=0)
if match.group("hour") == "24":
if not all(_n_or_eq(_group_or_none(match, g, int), 00)
for g in ["minute", "second", "microsecond"]
):
raise ValueError
from_, to = match.span("hour")
return inp[:from_] + "00" + inp[to:], timedelta(days=1)
return inp, timedelta(days=0)
def find_time(string, format):
""" Return iterator of occurrences of date formatted with format
in string. Currently supported format codes: """
re_format = format
for key, value in six.iteritems(REGEX):
re_format = re_format.replace(key, value)
matches = re.finditer(re_format, string)
for match in matches:
try:
matchstr = string[slice(*match.span())]
dt = datetime.strptime(matchstr, format)
except ValueError:
continue
else:
yield dt
find_time.__doc__ += ', '.join(list(REGEX.keys()))
def _iter_empty(iter):
try:
next(iter)
except StopIteration:
return True
return False
def extract_time(string):
""" Find subset of string that corresponds to a datetime and return
its value as a a datetime. If more than one or none is found, raise
ValueError. """
matched = None
bestmatch = None
for time_format in TIME_FORMAT_LIST:
found = find_time(string, time_format)
try:
match = next(found)
except StopIteration:
continue
else:
if matched is not None:
if time_format.startswith(matched):
# Already matched is a substring of the one just matched.
matched = time_format
bestmatch = match
elif not matched.startswith(time_format):
# If just matched is substring of time_format, just ignore
# just matched.
raise ValueError("Ambiguous string")
else:
matched = time_format
bestmatch = match
if not _iter_empty(found):
raise ValueError("Ambiguous string")
if not matched:
raise ValueError("Time not found")
return bestmatch
def parse_time(time_string, time_format=''):
"""Given a time string will parse and return a datetime object.
Similar to the anytim function in IDL.
utime -- Time since epoch 1 Jan 1979
Parameters
----------
time_string : [ int, float, time_string, datetime ]
Date to parse which can be either time_string, int, datetime object.
time_format : [ basestring, utime, datetime ]
Specifies the format user has provided the time_string in.
Returns
-------
out : datetime
DateTime corresponding to input date string
Note:
If time_string is an instance of float, then it is assumed to be in utime format.
Examples
--------
>>> import sunpy.time
>>> sunpy.time.parse_time('2012/08/01')
datetime.datetime(2012, 8, 1, 0, 0)
>>> sunpy.time.parse_time('2005-08-04T00:01:02.000Z')
datetime.datetime(2005, 8, 4, 0, 1, 2)
"""
if isinstance(time_string, pandas.tslib.Timestamp):
return time_string.to_datetime()
elif isinstance(time_string, datetime) or time_format == 'datetime':
return time_string
elif isinstance(time_string, tuple):
return datetime(*time_string)
elif time_format == 'utime' or isinstance(time_string, (int, float)) :
return datetime(1979, 1, 1) + timedelta(0, time_string)
elif isinstance(time_string, pandas.tseries.index.DatetimeIndex):
return time_string._mpl_repr()
elif isinstance(time_string, np.ndarray) and 'datetime64' in str(time_string.dtype):
ii = [ss.astype(datetime) for ss in time_string]
# Validate (in an agnostic way) that we are getting a datetime rather than a date
return np.array([datetime(*(dt.timetuple()[:6])) for dt in ii])
elif time_string is 'now':
return datetime.utcnow()
elif isinstance(time_string, astropy.time.Time):
return time_string.datetime
else:
# remove trailing zeros and the final dot to allow any
# number of zeros. This solves issue #289
if '.' in time_string:
time_string = time_string.rstrip("0").rstrip(".")
for time_format in TIME_FORMAT_LIST:
try:
try:
ts, time_delta = _regex_parse_time(time_string,
time_format)
except TypeError:
break
if ts is None:
continue
return datetime.strptime(ts, time_format) + time_delta
except ValueError:
pass
raise ValueError("{tstr!s} is not a valid time string!".format(tstr=time_string))
def is_time(time_string, time_format=''):
"""
Returns true if the input is a valid date/time representation
Parameters
----------
time_string : [ int, float, time_string, datetime ]
Date to parse which can be either time_string, int, datetime object.
time_format : [ basestring, utime, datetime ]
Specifies the format user has provided the time_string in.
Returns
-------
out : bool
True if can be parsed by parse_time
Notes
-----
If time_string is an instance of float, then it is assumed to be in
unix time format.
Examples
--------
>>> import sunpy.time
>>> sunpy.time.parse_time('2012/08/01')
datetime.datetime(2012, 8, 1, 0, 0)
>>> sunpy.time.parse_time('2005-08-04T00:01:02.000Z')
datetime.datetime(2005, 8, 4, 0, 1, 2)
.. todo::
add ability to parse tai (International Atomic Time seconds
since Jan 1, 1958)
"""
if time_string is None:
return False
elif isinstance(time_string, datetime):
return True
try:
parse_time(time_string,time_format)
except ValueError:
return False
else:
return True
def day_of_year(time_string):
"""Returns the (fractional) day of year.
Parameters
----------
time_string : string
A parse_time compatible string
Returns
-------
out : float
The fractional day of year (where Jan 1st is 1).
Examples
--------
>>> import sunpy.time
>>> sunpy.time.day_of_year('2012/01/01')
1.0
>>> sunpy.time.day_of_year('2012/08/01')
214.0
>>> sunpy.time.day_of_year('2005-08-04T00:18:02.000Z')
216.01252314814815
"""
SECONDS_IN_DAY = 60 * 60 * 24.0
time = parse_time(time_string)
time_diff = time - datetime(time.year, 1, 1, 0, 0, 0)
return time_diff.days + time_diff.seconds / SECONDS_IN_DAY + 1
def break_time(t='now', time_format=''):
"""Given a time returns a string. Useful for naming files."""
#TODO: should be able to handle a time range
return parse_time(t, time_format).strftime("%Y%m%d_%H%M%S")
def get_day(dt):
""" Return datetime for the beginning of the day of given datetime. """
return datetime(dt.year, dt.month, dt.day)
def is_time_in_given_format(time_string, time_format):
"""Tests whether a time string is formatted according to the given time
format."""
try:
datetime.strptime(time_string, time_format)
return True
except ValueError:
return False
| {
"repo_name": "Alex-Ian-Hamilton/sunpy",
"path": "sunpy/time/time.py",
"copies": "1",
"size": "10283",
"license": "bsd-2-clause",
"hash": -132090842417742820,
"line_mean": 32.2783171521,
"line_max": 131,
"alpha_frac": 0.5741515122,
"autogenerated": false,
"ratio": 3.5446397793864186,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4618791291586419,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import re
from unittest import TestCase
from webob import Request, Response
from webtest import TestApp, TestRequest
from manhattan.middleware import ManhattanMiddleware
from manhattan.record import Record
from manhattan.log.memory import MemoryLog
class SampleApp(object):
def __call__(self, environ, start_response):
req = Request(environ)
if req.path_info.endswith('.txt'):
s = 'Hello %s' % req.path_info
resp = Response(s)
resp.content_type = 'text/plain'
elif req.path_info.endswith('.iter'):
resp = Response()
s = 'Hello %s' % req.path_info.encode('ascii')
def app_iter(sample):
for piece in ('<html><body>', sample, '</body>', '</html>'):
yield piece
self.consumed_iter = True
yield ' '
self.consumed_iter = False
resp.content_type = 'text/html'
resp.app_iter = app_iter(s)
else:
s = '<html><body><h1>Hello %s</h1></body></html>' % req.path_info
resp = Response(s)
resp.content_type = 'text/html'
return resp(environ, start_response)
log = MemoryLog()
host_map = {'localhost': 3,
'example.com': 5}
inner_app = SampleApp()
wrapped_app = ManhattanMiddleware(inner_app, log, 'secret', host_map=host_map)
app = TestApp(wrapped_app)
class TestMiddleware(TestCase):
def setUp(self):
app.reset()
log.purge()
def process(self):
records = list(log.process())
self.assertEqual(len(records), 1)
record = Record.from_list(records[0][0])
return record
def test_request(self):
resp = app.get('/')
record = self.process()
self.assertEqual(record.key, 'page')
self.assertEqual(record.site_id, '3')
first_vid = record.vid
m = re.search('<img (.+)src="(.+)" alt="" />', resp.body)
pixel_path = m.group(2)
resp = app.get(pixel_path)
self.assertEqual(resp.content_type, 'image/gif')
record = self.process()
self.assertEqual(record.key, 'pixel')
self.assertEqual(record.site_id, '3')
self.assertEqual(first_vid, record.vid)
resp = app.get('/foo')
record = self.process()
self.assertEqual(record.key, 'page')
self.assertTrue(record.url.endswith('/foo'))
self.assertEqual(record.site_id, '3')
self.assertEqual(first_vid, record.vid)
def test_host_map(self):
resp = app.get('/hello', extra_environ={'HTTP_HOST': 'example.com'})
self.assertEqual(resp.content_type, 'text/html')
record = self.process()
self.assertEqual(record.key, 'page')
self.assertTrue(record.url.endswith('/hello'))
self.assertEqual(record.site_id, '5')
def test_unknown_host(self):
resp = app.get('/somepage',
extra_environ={'HTTP_HOST':
'supercalifragilicious.com'})
self.assertEqual(resp.content_type, 'text/html')
record = self.process()
self.assertEqual(record.key, 'page')
self.assertTrue(record.url.endswith('/somepage'))
self.assertEqual(record.site_id, '0')
def test_pixel_req(self):
resp = app.get('/vpixel.gif')
self.assertEqual(resp.content_type, 'image/gif',
'An html response should have a pixel tag.')
def test_non_html_pixel(self):
resp = app.get('/non-html-page.txt')
self.assertNotIn('/vpixel.gif', resp.body,
'A non-html response should not have a pixel tag.')
def test_generator_response(self):
req = Request.blank('/quux.iter')
resp = req.get_response(wrapped_app)
self.assertFalse(inner_app.consumed_iter,
'The generator response has been buffered by '
'middleware before instead of being returned as an '
'iterable.')
self.assertIn('/vpixel.gif', resp.body)
self.assertTrue(inner_app.consumed_iter)
def test_latin1_user_agent(self):
# Example user agent is latin1-encoded, so should be preserved.
sample_ua = '\xc0 \xe0 hello'
app.get('/somepage', extra_environ={'HTTP_USER_AGENT': sample_ua})
record = self.process()
self.assertEqual(record.user_agent, sample_ua.decode('latin1'))
def test_nongetpost_methods_not_processed(self):
app.put('/somepage')
app.delete('/somepage')
app.options('/somepage')
records = list(log.process())
self.assertEqual(len(records), 0)
def test_safari_top_sites_not_counted(self):
app.get('/blah', headers={'X-Purpose': 'preview'})
records = list(log.process())
self.assertEqual(len(records), 0)
def test_signature_mangled(self):
app.get('/')
orig_cookie = app.cookies['manhattan']
# truncate the last 4 chars, which will blow the sig
bad_cookie = orig_cookie[:-4]
bad_request = TestRequest.blank('/', cookies={'manhattan': bad_cookie})
app.request(bad_request)
new_cookie = app.cookies['manhattan']
self.assertNotEqual(bad_cookie, new_cookie)
| {
"repo_name": "storborg/manhattan",
"path": "manhattan/tests/test_middleware.py",
"copies": "1",
"size": "5386",
"license": "mit",
"hash": 1058024441326132900,
"line_mean": 32.8742138365,
"line_max": 79,
"alpha_frac": 0.5855922763,
"autogenerated": false,
"ratio": 3.809052333804809,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48946446101048086,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
#import re
import glob
import os
import numpy as np
from qtpy.QtCore import (Qt)
from qtpy.QtGui import (QCursor)
from qtpy.QtWidgets import (QFileDialog, QMenu, QMessageBox, QTableWidgetSelectionRange)
import addie.processing.idl.populate_master_table
from addie.processing.idl.export_table import ExportTable
from addie.processing.idl.import_table import ImportTable
from addie.utilities.file_handler import FileHandler
from addie.processing.idl.populate_background_widgets import PopulateBackgroundWidgets
from addie.processing.idl.sample_environment_handler import SampleEnvironmentHandler
import addie.processing.idl.step2_gui_handler
from addie.widgets.filedialog import get_save_file
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cm
class TableHandler(object):
list_selected_row = None
def __init__(self, parent=None):
self.parent = parent
def retrieve_list_of_selected_rows(self):
self.list_selected_row = []
for _row_index in range(self.parent.postprocessing_ui.table.rowCount()):
_widgets = self.parent.postprocessing_ui.table.cellWidget(_row_index, 0).children()
if len(_widgets) > 0:
_selected_widget = self.parent.postprocessing_ui.table.cellWidget(_row_index, 0).children()[1]
if _selected_widget.checkState() == Qt.Checked:
_entry = self._collect_metadata(row_index=_row_index)
self.list_selected_row.append(_entry)
def _collect_metadata(self, row_index=-1):
if row_index == -1:
return []
_name = self.retrieve_item_text(row_index, 1)
_runs = self.retrieve_item_text(row_index, 2)
_sample_formula = self.retrieve_item_text(row_index, 3)
_mass_density = self.retrieve_item_text(row_index, 4)
_radius = self.retrieve_item_text(row_index, 5)
_packing_fraction = self.retrieve_item_text(row_index, 6)
_sample_shape = self._retrieve_sample_shape(row_index)
_do_abs_correction = self._retrieve_do_abs_correction(row_index)
_metadata = {'name': _name,
'runs': _runs,
'sample_formula': _sample_formula,
'mass_density': _mass_density,
'radius': _radius,
'packing_fraction': _packing_fraction,
'sample_shape': _sample_shape,
'do_abs_correction': _do_abs_correction}
return _metadata
def _retrieve_sample_shape(self, row_index):
_widget = self.parent.postprocessing_ui.table.cellWidget(row_index, 7)
_selected_index = _widget.currentIndex()
_sample_shape = _widget.itemText(_selected_index)
return _sample_shape
def _retrieve_do_abs_correction(self, row_index):
_widget = self.parent.postprocessing_ui.table.cellWidget(row_index, 8).children()[1]
if (_widget.checkState() == Qt.Checked):
return 'go'
else:
return 'nogo'
def current_row(self):
_row = self.parent.postprocessing_ui.table.currentRow()
return _row
def right_click(self, position=None):
_duplicate_row = -1
_plot_sofq = -1
_remove_row = -1
_new_row = -1
_copy = -1
_paste = -1
_cut = -1
_refresh_table = -1
_clear_table = -1
# _import = -1
# _export = -1 _check_all = -1
_uncheck_all = -1
_undo = -1
_redo = -1
_plot_sofq_diff_first_run_row = -1
_plot_sofq_diff_average_row = -1
_plot_cryostat = -1
_plot_furnace = -1
_invert_selection = -1
menu = QMenu(self.parent)
if self.parent.table_selection_buffer == {}:
paste_status = False
else:
paste_status = True
if (self.parent.postprocessing_ui.table.rowCount() > 0):
_undo = menu.addAction("Undo")
_undo.setEnabled(self.parent.undo_button_enabled)
_redo = menu.addAction("Redo")
_redo.setEnabled(self.parent.redo_button_enabled)
menu.addSeparator()
_copy = menu.addAction("Copy")
_paste = menu.addAction("Paste")
self._paste_menu = _paste
_paste.setEnabled(paste_status)
_cut = menu.addAction("Clear")
menu.addSeparator()
_check_all = menu.addAction("Check All")
_uncheck_all = menu.addAction("Unchecked All")
menu.addSeparator()
_invert_selection = menu.addAction("Inverse Selection")
menu.addSeparator()
_new_row = menu.addAction("Insert Blank Row")
if (self.parent.postprocessing_ui.table.rowCount() > 0):
_duplicate_row = menu.addAction("Duplicate Row")
_remove_row = menu.addAction("Remove Row(s)")
menu.addSeparator()
_plot_menu = menu.addMenu('Plot')
_plot_sofq = _plot_menu.addAction("S(Q) ...")
_plot_sofq_diff_first_run_row = _plot_menu.addAction("S(Q) Diff (1st run)...")
_plot_sofq_diff_average_row = _plot_menu.addAction("S(Q) Diff (Avg.)...")
_temp_menu = _plot_menu.addMenu("Temperature")
_plot_cryostat = _temp_menu.addAction("Cyrostat...")
_plot_furnace = _temp_menu.addAction("Furnace...")
menu.addSeparator()
_refresh_table = menu.addAction("Refresh/Reset Table")
_clear_table = menu.addAction("Clear Table")
action = menu.exec_(QCursor.pos())
self.current_row = self.current_row()
if action == _undo:
self.parent.action_undo_clicked()
elif action == _redo:
self.parent.action_redo_clicked()
elif action == _copy:
self._copy()
elif action == _paste:
self._paste()
elif action == _cut:
self._cut()
elif action == _duplicate_row:
self._duplicate_row()
elif action == _plot_sofq:
self._plot_sofq()
elif action == _plot_sofq_diff_first_run_row:
self._plot_sofq_diff_first_run_row()
elif action == _plot_sofq_diff_average_row:
self._plot_sofq_diff_average_row()
elif action == _plot_cryostat:
self._plot_temperature(samp_env_choice='cryostat')
elif action == _plot_furnace:
self._plot_temperature(samp_env_choice='furnace')
elif action == _invert_selection:
self._inverse_selection()
elif action == _new_row:
self._new_row()
elif action == _remove_row:
self._remove_selected_rows()
elif action == _refresh_table:
self._refresh_table()
elif action == _clear_table:
self._clear_table()
elif action == _check_all:
self.check_all()
elif action == _uncheck_all:
self.uncheck_all()
def _import(self):
_current_folder = self.parent.current_folder
[_table_file, _] = QFileDialog.getOpenFileName(parent=self.parent,
caption="Select File",
directory=_current_folder,
filter=("text (*.txt);; All Files (*.*)"))
if not _table_file:
return
if isinstance(_table_file, tuple):
_table_file = _table_file[0]
new_path = os.path.dirname(_table_file)
self.parent.current_folder = new_path
self._clear_table()
_import_handler = ImportTable(filename=_table_file, parent=self.parent)
_import_handler.run()
_pop_back_wdg = PopulateBackgroundWidgets(main_window=self.parent)
_pop_back_wdg.run()
_o_gui = addie.processing.idl.step2_gui_handler.Step2GuiHandler(main_window=self.parent)
_o_gui.check_gui()
def _export(self):
_current_folder = self.parent.current_folder
_table_file, _ = get_save_file(parent=self.parent,
caption="Select File",
directory=_current_folder,
filter={'text (*.txt)':'txt', 'All Files (*.*)':''})
if not _table_file:
return
if isinstance(_table_file, tuple):
_table_file = _table_file[0]
_file_handler = FileHandler(filename=_table_file)
_file_handler.check_file_extension(ext_requested='txt')
_table_file = _file_handler.filename
_export_handler = ExportTable(parent=self.parent,
filename=_table_file)
_export_handler.run()
def _copy(self):
_selection = self.parent.postprocessing_ui.table.selectedRanges()
_selection = _selection[0]
left_column = _selection.leftColumn()
right_column = _selection.rightColumn()
top_row = _selection.topRow()
bottom_row = _selection.bottomRow()
self.parent.table_selection_buffer = {'left_column': left_column,
'right_column': right_column,
'top_row': top_row,
'bottom_row': bottom_row}
self._paste_menu.setEnabled(True)
def _paste(self, _cut=False):
_copy_selection = self.parent.table_selection_buffer
_copy_left_column = _copy_selection['left_column']
# make sure selection start at the same column
_paste_selection = self.parent.postprocessing_ui.table.selectedRanges()
_paste_left_column = _paste_selection[0].leftColumn()
if not (_copy_left_column == _paste_left_column):
QMessageBox.warning(self.parent,
"Check copy/paste selection!",
"Check your selection! ")
return
_copy_right_column = _copy_selection["right_column"]
_copy_top_row = _copy_selection["top_row"]
_copy_bottom_row = _copy_selection["bottom_row"]
_paste_top_row = _paste_selection[0].topRow()
index = 0
for _row in range(_copy_top_row, _copy_bottom_row+1):
_paste_row = _paste_top_row + index
for _column in range(_copy_left_column, _copy_right_column + 1):
if _column in np.arange(1, 7):
if _cut:
_item_text = ''
else:
_item_text = self.retrieve_item_text(_row, _column)
self.paste_item_text(_paste_row, _column, _item_text)
if _column == 7:
if _cut:
_widget_index = 0
else:
_widget_index = self.retrieve_sample_shape_index(_row)
self.set_widget_index(_widget_index, _paste_row)
if _column == 8:
if _cut:
_widget_state = Qt.Unchecked
else:
_widget_state = self.retrieve_do_abs_correction_state(_row)
self.set_widget_state(_widget_state, _paste_row)
index += 1
def _inverse_selection(self):
selected_range = self.parent.postprocessing_ui.table.selectedRanges()
nbr_column = self.parent.postprocessing_ui.table.columnCount()
self.select_all(status=True)
# inverse selected rows
for _range in selected_range:
_range.leftColumn = 0
_range.rightColun = nbr_column-1
self.parent.postprocessing_ui.table.setRangeSelected(_range, False)
def select_all(self, status=True):
nbr_row = self.parent.postprocessing_ui.table.rowCount()
nbr_column = self.parent.postprocessing_ui.table.columnCount()
_full_range = QTableWidgetSelectionRange(0, 0, nbr_row-1, nbr_column-1)
self.parent.postprocessing_ui.table.setRangeSelected(_full_range, status)
def check_all(self):
self.select_first_column(status=True)
def uncheck_all(self):
self.select_first_column(status=False)
def select_row(self, row=-1, status=True):
nbr_column = self.parent.postprocessing_ui.table.columnCount()
_range = QTableWidgetSelectionRange(row, 0, row, nbr_column-1)
self.parent.postprocessing_ui.table.setRangeSelected(_range, status)
def check_row(self, row=-1, status=True):
_widgets = self.parent.postprocessing_ui.table.cellWidget(row, 0).children()
if len(_widgets) > 0:
_selected_widget = self.parent.postprocessing_ui.table.cellWidget(row, 0).children()[1]
_selected_widget.setChecked(status)
def select_first_column(self, status=True):
for _row in range(self.parent.postprocessing_ui.table.rowCount()):
_widgets = self.parent.postprocessing_ui.table.cellWidget(_row, 0).children()
if len(_widgets) > 0:
_selected_widget = self.parent.postprocessing_ui.table.cellWidget(_row, 0).children()[1]
_selected_widget.setChecked(status)
_o_gui = addie.processing.idl.step2_gui_handler.Step2GuiHandler(main_window=self.parent)
_o_gui.check_gui()
def check_selection_status(self, state, row):
list_ranges = self.parent.postprocessing_ui.table.selectedRanges()
for _range in list_ranges:
bottom_row = _range.bottomRow()
top_row = _range.topRow()
range_row = list(range(top_row, bottom_row + 1))
for _row in range_row:
_widgets = self.parent.postprocessing_ui.table.cellWidget(_row, 0).children()
if len(_widgets) > 0:
_selected_widget = self.parent.postprocessing_ui.table.cellWidget(_row, 0).children()[1]
_selected_widget.setChecked(state)
_o_gui = addie.processing.idl.step2_gui_handler.Step2GuiHandler(main_window=self.parent)
_o_gui.check_gui()
def _cut(self):
self._copy()
self._paste(_cut=True)
def _duplicate_row(self):
_row = self.current_row
metadata_to_copy = self._collect_metadata(row_index=_row)
o_populate = addie.processing.idl.populate_master_table.PopulateMasterTable(main_window=self.parent)
o_populate.add_new_row(metadata_to_copy, row=_row)
def _plot_fetch_files(self, file_type='SofQ'):
if file_type == 'SofQ':
search_dir = './SofQ'
prefix = 'NOM_'
suffix = 'SQ.dat'
elif file_type == 'nexus':
cwd = os.getcwd()
search_dir = cwd[:cwd.find('shared')]+'/nexus'
prefix = 'NOM_'
suffix = '.nxs.h5'
#ipts = int(re.search(r"IPTS-(\d*)\/", os.getcwd()).group(1))
_row = self.current_row
_row_runs = self._collect_metadata(row_index=_row)['runs'].split(',')
output_list = list()
file_list = [a_file for a_file in glob.glob(search_dir+'/'+prefix+'*')]
for run in _row_runs:
the_file = search_dir+'/'+prefix+str(run)+suffix
if the_file in file_list:
output_list.append({'file': the_file, 'run': run})
return output_list
def _plot_fetch_data(self):
file_list = self._plot_fetch_files(file_type='SofQ')
for data in file_list:
with open(data['file'], 'r') as handle:
x, y, e = np.loadtxt(handle, unpack=True)
data['x'] = x
data['y'] = y
return file_list
def _plot_datasets(self, datasets, shift_value=1.0, cmap_choice='inferno', title=None):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
# configure plot
cmap = plt.get_cmap(cmap_choice)
cNorm = colors.Normalize(vmin=0, vmax=len(datasets))
scalarMap = cm.ScalarMappable(norm=cNorm, cmap=cmap)
mrks = [0, -1]
# plot data
shifter = 0.0
for idx, data in enumerate(datasets):
data['y'] += shifter
colorVal = scalarMap.to_rgba(idx)
if 'linestyle' in data:
ax.plot(data['x'], data['y'], data['linestyle']+'o', label=data['run'], color=colorVal, markevery=mrks,)
else:
ax.plot(data['x'], data['y'], label=data['run'], color=colorVal, markevery=mrks)
shifter += shift_value
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1], labels[::-1], title='Runs', loc='center left', bbox_to_anchor=(1, 0.5))
if title:
fig.suptitle(title)
plt.show()
def _plot_sofq(self):
sofq_datasets = self._plot_fetch_data()
self._plot_datasets(sorted(sofq_datasets, key=lambda k: int(k['run'])), title='S(Q)')
def _plot_sofq_diff_first_run_row(self):
sofq_datasets = self._plot_fetch_data()
sofq_base = dict(sofq_datasets[0])
for sofq in sorted(sofq_datasets, key=lambda k: int(k['run'])):
sofq['y'] = sofq['y'] - sofq_base['y']
self._plot_datasets(sofq_datasets, shift_value=0.2, title='S(Q) - S(Q) for run '+sofq_base['run'])
def _plot_sofq_diff_average_row(self):
sofq_datasets = self._plot_fetch_data()
sofq_data = [sofq['y'] for sofq in sofq_datasets]
sofq_avg = np.average(sofq_data, axis=0)
for sofq in sorted(sofq_datasets, key=lambda k: int(k['run'])):
sofq['y'] = sofq['y'] - sofq_avg
self._plot_datasets(sofq_datasets, shift_value=0.2, title='S(Q) - <S(Q)>')
def _plot_temperature(self, samp_env_choice=None):
file_list = self._plot_fetch_files(file_type='nexus')
samp_env = SampleEnvironmentHandler(samp_env_choice)
datasets = list()
for data in file_list:
samp_x, samp_y = samp_env.getDataFromFile(data['file'], 'samp')
envi_x, envi_y = samp_env.getDataFromFile(data['file'], 'envi')
print(data['file'])
datasets.append({'run': data['run'] + '_samp', 'x': samp_x, 'y': samp_y, 'linestyle': '-'})
datasets.append({'run': None, 'x': envi_x, 'y': envi_y, 'linestyle': '--'})
self._plot_datasets(sorted(datasets, key=lambda k: k['run']),
shift_value=0.0, title='Temperature: '+samp_env_choice)
def _new_row(self):
_row = self.current_row
if _row == -1:
_row = 0
o_populate = addie.processing.idl.populate_master_table.PopulateMasterTable(main_window=self.parent)
_metadata = o_populate.empty_metadata()
o_populate.add_new_row(_metadata, row=_row)
def _remove_selected_rows(self):
selected_range = self.parent.postprocessing_ui.table.selectedRanges()
_nbr_row_removed = 0
_local_nbr_row_removed = 0
for _range in selected_range:
_top_row = _range.topRow()
_bottom_row = _range.bottomRow()
nbr_row = _bottom_row - _top_row + 1
for i in np.arange(nbr_row):
self._remove_row(row=_top_row - _nbr_row_removed)
_local_nbr_row_removed += 1
_nbr_row_removed = _local_nbr_row_removed
_pop_back_wdg = PopulateBackgroundWidgets(main_window=self.parent)
_pop_back_wdg.run()
def _remove_row(self, row=-1):
if row == -1:
row = self.current_row
self.parent.postprocessing_ui.table.removeRow(row)
_o_gui = addie.processing.idl.step2_gui_handler.Step2GuiHandler(main_window=self.parent)
_o_gui.check_gui()
def _refresh_table(self):
self.parent.populate_table_clicked()
_o_gui = addie.processing.idl.step2_gui_handler.Step2GuiHandler(main_window=self.parent)
_o_gui.check_gui()
def _clear_table(self):
_number_of_row = self.parent.postprocessing_ui.table.rowCount()
self.parent.postprocessing_ui.table.setSortingEnabled(False)
for _row in np.arange(_number_of_row):
self.parent.postprocessing_ui.table.removeRow(0)
self.parent.postprocessing_ui.background_line_edit.setText("")
self.parent.postprocessing_ui.background_comboBox.clear()
_o_gui = addie.processing.idl.step2_gui_handler.Step2GuiHandler(main_window=self.parent)
_o_gui.check_gui()
def set_widget_state(self, _widget_state, _row):
_widget = self.parent.postprocessing_ui.table.cellWidget(_row, 8).children()[1]
_widget.setCheckState(_widget_state)
def retrieve_do_abs_correction_state(self, _row):
_widget = self.parent.postprocessing_ui.table.cellWidget(_row, 8).children()[1]
return _widget.checkState()
def set_widget_index(self, _widget_index, _row):
_widget = self.parent.postprocessing_ui.table.cellWidget(_row, 7)
_widget.setCurrentIndex(_widget_index)
def paste_item_text(self, _row, _column, _item_text):
_item = self.parent.postprocessing_ui.table.item(_row, _column)
_item.setText(_item_text)
def retrieve_sample_shape_index(self, row_index):
_widget = self.parent.postprocessing_ui.table.cellWidget(row_index, 7)
_selected_index = _widget.currentIndex()
return _selected_index
def retrieve_item_text(self, row, column):
_item = self.parent.postprocessing_ui.table.item(row, column)
if _item is None:
return ''
else:
return str(_item.text())
def name_search(self):
nbr_row = self.parent.postprocessing_ui.table.rowCount()
if nbr_row == 0:
return
_string = str(self.parent.postprocessing_ui.name_search.text()).lower()
if _string == '':
self.select_all(status=False)
else:
for _row in range(nbr_row):
_text_row = str(self.parent.postprocessing_ui.table.item(_row, 1).text()).lower()
if _string in _text_row:
self.select_row(row=_row, status=True)
| {
"repo_name": "neutrons/FastGR",
"path": "addie/processing/idl/table_handler.py",
"copies": "1",
"size": "22403",
"license": "mit",
"hash": 796581622623209200,
"line_mean": 39.1487455197,
"line_max": 120,
"alpha_frac": 0.5731375262,
"autogenerated": false,
"ratio": 3.6648126942581385,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9732910817854139,
"avg_score": 0.0010078805207999312,
"num_lines": 558
} |
from __future__ import absolute_import, division, print_function
import re
import os
from simdna.simdnautil import util
import gzip
def assertParameterNecessaryForMode(parameterName, parameter, modeName, mode):
if (parameter is None):
raise RuntimeError(
parameterName + " is necessary when " + modeName + " is " + mode)
def assertParameterIrrelevantForMode(parameterName, parameter, modeName, mode):
if (parameter is not None):
raise RuntimeError(
parameterName + " is irrelevant when " + modeName + " is " + mode)
def unsupportedValueForMode(modeName, mode):
raise RuntimeError("Unsupported value for " + modeName + ": " + str(mode))
def getCoreFileName(fileName):
return getFileNameParts(fileName).coreFileName
def getFileNameParts(fileName):
p = re.compile(r"^(.*/)?([^\./]+)(\.[^/]*)?$")
m = p.search(fileName)
return FileNameParts(m.group(1), m.group(2), m.group(3))
class FileNameParts:
def __init__(self, directory, coreFileName, extension):
self.directory = directory if (directory is not None) else os.getcwd()
self.coreFileName = coreFileName
self.extension = extension
def getFullPath(self):
return self.directory + "/" + self.fileName
def getCoreFileNameAndExtension(self):
return self.coreFileName + self.extension
def getCoreFileNameWithTransformation(self, transformation=lambda x: x):
return transformation(self.coreFileName)
def getFileNameWithTransformation(self, transformation, extension=None):
toReturn = self.getCoreFileNameWithTransformation(transformation)
if (extension is not None):
toReturn = toReturn + extension
else:
if (self.extension is not None):
toReturn = toReturn + self.extension
return toReturn
def getFilePathWithTransformation(self, transformation=lambda x: x, extension=None):
return self.directory + "/" + self.getFileNameWithTransformation(transformation, extension=extension)
def getFileHandle(filename, mode="r"):
if (re.search('.gz$', filename) or re.search('.gzip', filename)):
if (mode == "r"):
mode = "rt"
elif (mode == "w"):
# I think write will actually append if the file already
# exists...so you want to remove it if it exists
import os.path
if os.path.isfile(filename):
os.remove(filename)
return gzip.open(filename, mode)
else:
return open(filename, mode)
# returns an array of all filter variables.
def splitLinesIntoOtherFiles(fileHandle, preprocessingStep, filterVariableFromLine, outputFilePathFromFilterVariable):
filterVariablesToReturn = []
filterVariableToOutputFileHandle = {}
for line in fileHandle:
processedLine = line
if (preprocessingStep is not None):
processedLine = preprocessingStep(processedLine)
filterVariable = filterVariableFromLine(processedLine)
if (filterVariable not in filterVariableToOutputFileHandle):
outputFilePath = outputFilePathFromFilterVariable(filterVariable)
filterVariablesToReturn.append(filterVariable)
outputFileHandle = getFileHandle(outputFilePath, 'w')
filterVariableToOutputFileHandle[filterVariable] = outputFileHandle
outputFileHandle = filterVariableToOutputFileHandle[filterVariable]
outputFileHandle.write(line)
for fileHandle in filterVariableToOutputFileHandle.items():
fileHandle[1].close()
return filterVariablesToReturn
# transformation has a specified default so that this can be used to, for
# instance, unzip a gzipped file.
def transformFile(
# should be some function of the line and the line number
# processing to be applied before filterFunction AND transformation
fileHandle, outputFile, transformation=lambda x: x, outputTitleFromInputTitle=None, ignoreInputTitle=False, filterFunction=None, preprocessing=None, progressUpdate=None, progressUpdateFileName=None
):
outputFileHandle = getFileHandle(outputFile, 'w')
i = 0
action = lambda inp, i: outputFileHandle.write(inp)
for line in fileHandle:
i += 1
if (i == 1):
if (outputTitleFromInputTitle is not None):
outputFileHandle.write(outputTitleFromInputTitle(line))
processLine(line, i, ignoreInputTitle, preprocessing,
filterFunction, transformation, action)
printProgress(progressUpdate, i, progressUpdateFileName)
outputFileHandle.close()
# reades a line of the file on-demand.
class FileReader:
def __init__(self, fileHandle, preprocessing=None, filterFunction=None, transformation=lambda x: x, ignoreInputTitle=False):
self.fileHandle = fileHandle
self.preprocessing = preprocessing
self.filterFunction = filterFunction
self.transformation = transformation
self.ignoreInputTitle = ignoreInputTitle
self.i = 0
self.eof = False
def getNextLine(self):
line = self.fileHandle.readline()
if (line != ""): # empty string is eof...
self.i += 1
if (self.i == 1):
if (self.ignoreInputTitle == True):
self.title = line
return self.getNextLine()
def action(x, i): # to be passed into processLine
self.toReturn = x
processLine(
line=line, i=self.i, ignoreInputTitle=self.ignoreInputTitle, preprocessing=self.preprocessing, filterFunction=self.filterFunction, transformation=self.transformation, action=action)
return self.toReturn
else:
self.eof = True
return None
def writeToFile(outputFile, contents):
outputFileHandle = getFileHandle(outputFile, 'w')
writeToFileHandle(outputFileHandle, contents)
def writeToFileHandle(outputFileHandle, contents):
outputFileHandle.write(contents)
outputFileHandle.close()
def transformFileIntoArray(fileHandle):
return readRowsIntoArr(fileHandle)
def performActionOnEachLineOfFile(fileHandle, action=None # should be a function that accepts the preprocessed/filtered line and the line number
# the preprocessing step is performed before
# both 'filterFunction' and 'transformation'.
# Originally I just had 'transformation'.
, transformation=lambda x: x, ignoreInputTitle=False, filterFunction=None, preprocessing=None, actionFromTitle=None, progressUpdate=None, progressUpdateFileName=None
):
if (actionFromTitle is None and action is None):
raise ValueError("One of actionFromTitle or action should not be None")
if (actionFromTitle is not None and action is not None):
raise ValueError(
"Only one of actionFromTitle or action can be non-None")
if (actionFromTitle is not None and ignoreInputTitle == False):
raise ValueError(
"If actionFromTitle is not None, ignoreInputTitle should probably be True because it implies a title is present")
i = 0
for line in fileHandle:
i += 1
if (i == 1 and actionFromTitle is not None):
action = actionFromTitle(line)
processLine(line, i, ignoreInputTitle, preprocessing,
filterFunction, transformation, action, progressUpdate)
printProgress(progressUpdate, i, progressUpdateFileName)
fileHandle.close()
def performActionInBatchesOnEachLineOfFile(fileHandle, batchSize, actionOnLineInBatch, actionAtEndOfBatch, transformation=lambda x: x, filterFunction=None, preprocessing=None, progressUpdate=None, ignoreInputTitle=False):
def action(inp, lineNumber):
actionOnLineInBatch(inp, lineNumber)
if (lineNumber % batchSize == 0):
actionAtEndOfBatch()
performActionOnEachLineOfFile(
fileHandle=fileHandle, action=action, transformation=transformation, filterFunction=filterFunction, preprocessing=preprocessing, progressUpdate=progressUpdate, ignoreInputTitle=ignoreInputTitle
)
actionAtEndOfBatch()
def processLine(line, i, ignoreInputTitle, preprocessing, filterFunction, transformation, action, progressUpdate=None):
if (i > 1 or (ignoreInputTitle == False)):
if (preprocessing is not None):
line = preprocessing(line)
if (filterFunction is None or filterFunction(line, i)):
action(transformation(line), i)
def printProgress(progressUpdate, i, fileName=None):
if progressUpdate is not None:
if (i % progressUpdate == 0):
print("Processed " + str(i) + " lines" +
str("" if fileName is None else " of " + fileName))
def defaultTabSeppd(s):
s = trimNewline(s)
s = splitByTabs(s)
return s
def defaultWhitespaceSeppd(s):
s = trimNewline(s)
s = s.split()
return s
def trimNewline(s):
return s.rstrip('\r\n')
def appendNewline(s):
return s + "\n" # aargh O(n) aargh FIXME if you can
def splitByDelimiter(s, delimiter):
return s.split(delimiter)
def splitByTabs(s):
return splitByDelimiter(s, "\t")
def stringToFloat(s):
return float(s)
def stringToInt(s):
return int(s)
def lambdaMaker_getAtPosition(index):
return (lambda x: x[index])
def lambdaMaker_insertSuffixIntoFileName(suffix, separator):
return lambda fileName: getFileNameParts(fileName).getFileNameWithTransformation(
lambda coreFileName: coreFileName + separator + suffix
)
def lambdaMaker_insertPrefixIntoFileName(prefix, separator):
return lambda fileName: getFileNameParts(fileName).getFileNameWithTransformation(
lambda coreFileName: prefix + separator + coreFileName
)
def simpleDictionaryFromFile(fileHandle, keyIndex=0, valIndex=1, titlePresent=False, transformation=defaultTabSeppd):
from collections import OrderedDict
toReturn = OrderedDict()
def action(inp, lineNumber):
toReturn[inp[keyIndex]] = inp[valIndex]
performActionOnEachLineOfFile(
fileHandle=fileHandle, action=action, transformation=transformation, ignoreInputTitle=titlePresent
)
return toReturn
'''Accepts a title, uses it to produce a function that generates a dictionary given an array'''
def lambdaMaker_dictionaryFromLine(title, delimiter="\t"):
lineToArr = util.chainFunctions(
trimNewline, lambda x: splitByDelimiter(x, delimiter))
splitTitle = lineToArr(title)
def lineToDictionary(line):
toReturn = {}
for entry in enumerate(lineToArr(line)):
toReturn[splitTitle[entry[0]]] = entry[1]
return toReturn
return lineToDictionary
# wrapper for the cat command
def concatenateFiles(outputFile, arrOfFilesToConcatenate):
util.executeAsSystemCall(
"cat " + (" ".join(arrOfFilesToConcatenate)) + " > " + outputFile)
def concatenateFiles_preprocess(
# x is the input line, y is the transformed input file name (see
# inputFileNameTransformation)
# the function that transforms the path of the input file
# function that takes input title and transforms into output title.
# Considers title of first file in line.
# really unfortunately named. Should have been named 'titlePresent'
outputFile, arrOfFilesToConcatenate, transformation=lambda x, y: x, inputFileTransformation=lambda x: getFileNameParts(x).coreFileName, outputTitleFromInputTitle=None, ignoreInputTitle=False
):
inputTitle = None
outputFileHandle = getFileHandle(outputFile, 'w')
for aFile in arrOfFilesToConcatenate:
transformedInputFilename = inputFileTransformation(aFile)
aFileHandle = getFileHandle(aFile)
i = 0
for line in aFileHandle:
i += 1
if (i == 1):
if (outputTitleFromInputTitle is not None):
if (inputTitle is None):
inputTitle = line
outputFileHandle.write(
outputTitleFromInputTitle(inputTitle))
if (i > 1 or (ignoreInputTitle == False)):
outputFileHandle.write(transformation(
line, transformedInputFilename))
outputFileHandle.close()
def readRowsIntoArr(fileHandle, progressUpdate=None, titlePresent=False):
arr = []
def action(inp, lineNumber):
if progressUpdate is not None:
if (lineNumber % progressUpdate == 0):
print("processed " + str(lineNumber) + " lines")
arr.append(inp)
performActionOnEachLineOfFile(
fileHandle, transformation=trimNewline, action=action, ignoreInputTitle=titlePresent
)
return arr
def writeRowsToFile(rows, theFile):
writeRowsToFileHandle(rows, getFileHandle(theFile, 'w'))
def writeRowsToFileHandle(rows, fileHandle):
for row in rows:
fileHandle.write(str(row) + "\n")
fileHandle.close()
def readColIntoArr(fileHandle, col=0, titlePresent=True):
arr = []
def action(inp, lineNumber):
arr.append(inp[col])
performActionOnEachLineOfFile(
fileHandle, transformation=defaultTabSeppd, action=action, ignoreInputTitle=titlePresent
)
return arr
def read2DMatrix(fileHandle, colNamesPresent=False, rowNamesPresent=False, contentType=float, contentStartIndex=None, contentEndIndex=None, progressUpdate=None, numpify=False):
"""
"numpify" will return a numpy mat for the rows
returns an instance of util.Titled2DMatrix
Has attributes rows, rowNames, colNames
"""
fileHandle = getFileHandle(fileHandle) if isinstance(
fileHandle, str) else fileHandle
if (contentStartIndex is None):
contentStartIndex = 1 if rowNamesPresent else 0
if (contentEndIndex is not None):
assert contentEndIndex > contentStartIndex
titled2DMatrix = util.Titled2DMatrix(
colNamesPresent=colNamesPresent, rowNamesPresent=rowNamesPresent)
contentEndIndexWrapper = util.VariableWrapper(None)
def action(inp, lineNumber):
if (lineNumber == 1 and colNamesPresent):
contentEndIndexWrapper.var = len(
inp) if contentEndIndex is None else contentEndIndex
titled2DMatrix.setColNames(
inp[contentStartIndex:contentEndIndexWrapper.var])
assert contentEndIndexWrapper.var > contentStartIndex
else:
rowName = inp[0] if rowNamesPresent else None
# ignore the column denoting the name of the row
arr = [contentType(x) for x in inp[
contentStartIndex:contentEndIndexWrapper.var]]
titled2DMatrix.addRow(arr, rowName=rowName)
performActionOnEachLineOfFile(
fileHandle=fileHandle, action=action, transformation=defaultTabSeppd, ignoreInputTitle=False, progressUpdate=progressUpdate
)
if (numpify):
import numpy as np
titled2DMatrix.rows = np.array(titled2DMatrix.rows)
return titled2DMatrix
class SubsetOfColumnsToUseOptions(object):
def __init__(self, mode='setOfColumnNames', columnNames=None, N=None):
self.mode = mode
self.columnNames = columnNames
self.N = N
self.integrityChecks()
def integrityChecks(self):
if (self.mode == 'setOfColumnNames'):
assertParameterIrrelevantForMode(
"N", self.N, "subsetOfColumnsToUseMode", self.mode)
assertParameterNecessaryForMode(
"columnNames", self.columnNames, "subsetOfColumnsToUseMode", self.mode)
elif (self.mode == 'topN'):
assertParameterIrrelevantForMode(
"columnNames", self.columnNames, "subsetOfColumnsToUseMode", self.mode)
assertParameterNecessaryForMode(
"N", self.N, "subsetOfColumnsToUseMode", self.mode)
else:
# unsupportedValueForMode(modeName, mode)
raise ValueError()
def getCoreTitledMappingAction(subsetOfColumnsToUseOptions, contentType, contentStartIndex, subsetOfRowsToUse=None, keyColumns=[0]):
subsetOfRowsToUseMembershipDict = dict(
(x, 1) for x in subsetOfRowsToUse) if subsetOfRowsToUse is not None else None
indicesToCareAboutWrapper = util.VariableWrapper(None)
def titledMappingAction(inp, lineNumber):
if (lineNumber == 1): # handling of the title
if subsetOfColumnsToUseOptions is None:
columnOrdering = inp[contentStartIndex:]
else:
if (subsetOfColumnsToUseOptions.mode == SubsetOfColumnsToUseMode.setOfColumnNames):
columnOrdering = subsetOfColumnsToUseOptions.columnNames
elif (subsetOfColumnsToUseOptions.mode == SubsetOfColumnsToUseMode.topN):
columnOrdering = inp[
contentStartIndex:contentStartIndex + SubsetOfColumnsToUseMode.topN]
else:
raise RuntimeError(
"Unsupported subsetOfColumnsToUseOptions.mode: " + str(subsetOfColumnsToUseOptions.mode))
print("Subset of labels to use is specified")
indicesLookup = dict((x, i) for (i, x) in enumerate(inp))
indicesToCareAboutWrapper.var = []
for labelToUse in columnOrdering:
indicesToCareAboutWrapper.var.append(
indicesLookup[labelToUse])
return columnOrdering
else:
# regular line processing
key = "_".join(inp[x] for x in keyColumns)
if (subsetOfRowsToUseMembershipDict is None or (key in subsetOfRowsToUseMembershipDict)):
if (indicesToCareAboutWrapper.var is None):
arrToAdd = [contentType(x)
for x in inp[contentStartIndex:]]
else:
arrToAdd = [contentType(inp[x])
for x in indicesToCareAboutWrapper.var]
return key, arrToAdd
return None
return titledMappingAction
def readTitledMapping(fileHandle, contentType=float, contentStartIndex=1, subsetOfColumnsToUseOptions=None, subsetOfRowsToUse=None, progressUpdate=None, keyColumns=[0]):
"""
returns an instance of util.TitledMapping.
util.TitledMapping has functions:
- getTitledArrForKey(key): returns an instance of util.TitledArr which has: getCol(colName) and setCol(colName)
- getArrForKey(key): returns the array for the key
- keyPresenceCheck(key): throws an error if the key is absent
Is also iterable! Returns an iterator of util.TitledArr
subsetOfColumnsToUseOptions: instance of SubsetOfColumnsToUseOptions
subsetOfRowsToUse: something that has a subset of row ids to be considered
"""
titledMappingWrapper = util.VariableWrapper(None)
coreTitledMappingAction = getCoreTitledMappingAction(subsetOfColumnsToUseOptions=subsetOfColumnsToUseOptions,
contentType=contentType, contentStartIndex=contentStartIndex, subsetOfRowsToUse=subsetOfRowsToUse, keyColumns=keyColumns)
def action(inp, lineNumber):
if (lineNumber == 1): # handling of the title
columnOrdering = coreTitledMappingAction(inp, lineNumber)
titledMappingWrapper.var = util.TitledMapping(columnOrdering)
else:
key, arrToAdd = coreTitledMappingAction(inp, lineNumber)
if (arrToAdd is not None):
titledMappingWrapper.var.addKey(key, arrToAdd)
performActionOnEachLineOfFile(
fileHandle, transformation=defaultTabSeppd, action=action
)
return titledMappingWrapper.var
def writeMatrixToFile(fileHandle, rows, colNames=None, rowNames=None):
if (colNames is not None):
fileHandle.write(
("rowName\t" if rowNames is not None else "") + "\t".join(colNames) + "\n")
for i, row in enumerate(rows):
if (rowNames is not None):
fileHandle.write(rowNames[i] + "\t")
stringifiedRow = [str(x) for x in row]
toWrite = "\t".join(stringifiedRow) + "\n"
fileHandle.write(toWrite)
fileHandle.close()
# will trim the newline for you
def titleColumnToIndex(title, sep="\t"):
title = trimNewline(title)
title = title.split(sep)
return util.valToIndexMap(title)
def peekAtFirstLineOfFile(fileName):
fh = getFileHandle(fileName)
line = fh.readline()
fh.close()
return line
def getTitleOfFile(fileName):
title = defaultTabSeppd(peekAtFirstLineOfFile(fileName))
return title
class FastaIterator(object):
"""
Returns an iterator over lines of a fasta file - assumes each sequence
spans only one line!
"""
def __init__(self, fileHandle, progressUpdate=None, progressUpdateFileName=None):
self.fileHandle = fileHandle
self.progressUpdate = progressUpdate
self.progressUpdateFileName = progressUpdateFileName
self.lineCount = 0
def __iter__(self):
return self
def next(self):
self.lineCount += 1
printProgress(self.progressUpdate, self.lineCount,
self.progressUpdateFileName)
# should raise StopIteration if at end of lines
keyLine = trimNewline(self.fileHandle.next())
sequence = trimNewline(self.fileHandle.next())
if (keyLine.startswith(">") == False):
raise RuntimeError(
"Expecting a record name line that begins with > but got " + str(keyLine))
key = keyLine.lstrip(">")
return key, sequence
class BackupForWriteFileHandle(object):
"""
Wrapper around a filehandle that
backs up the file while writing,
then deletes the backup when close
is called
"""
def __init__(self, fileName):
self.fileName = fileName
self.backupFileName = fileName + ".backup"
os.system("cp " + self.fileName + " " + self.backupFileName)
self.outputFileHandle = getFileHandle(self.fileName, 'w')
def write(self, *args, **kwargs):
self.outputFileHandle.write(*args, **kwargs)
def close(self):
self.outputFileHandle.close()
os.system("rm " + self.backupFileName)
def restore(self):
os.system("cp " + self.backupFileName + " " + self.fileName)
os.system("rm " + self.backupFileName)
self.outputFileHandle.close()
| {
"repo_name": "kundajelab/simdna",
"path": "simdna/simdnautil/fileProcessing.py",
"copies": "1",
"size": "22718",
"license": "mit",
"hash": 1321763878186418000,
"line_mean": 37.375,
"line_max": 221,
"alpha_frac": 0.6686768201,
"autogenerated": false,
"ratio": 4.239223735771599,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5407900555871599,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import re
import os
try:
from . import fits
except ImportError:
fits = None
try:
from . import jp2
except ImportError:
jp2 = None
try:
from . import ana
except ImportError:
ana = None
__all__ = ['read_file', 'read_file_header', 'write_file']
# File formats supported by SunPy
_known_extensions = {
('fts', 'fits'): 'fits',
('jp2', 'j2k', 'jpc', 'jpt'): 'jp2',
('fz', 'f0'): 'ana'
}
# Define a dict which raises a custom error message if the value is None
class Readers(dict):
def __init__(self, *args):
dict.__init__(self, *args)
def __getitem__(self, key):
val = dict.__getitem__(self, key)
if val is None:
raise ReaderError("The Reader sunpy.io.{key!s} is not available, ".format(key=key) +
"please check that you have the required dependencies installed.")
return val
#Map the readers
_readers = Readers({
'fits':fits,
'jp2':jp2,
'ana':ana
})
def read_file(filepath, filetype=None, **kwargs):
"""
Automatically determine the filetype and read the file.
Parameters
----------
filepath : `str`
The file to be read
filetype : `str`
Supported reader or extension to manually specify the filetype.
Supported readers are ('jp2', 'fits', 'ana')
memmap : bool
Should memory mapping be used, i.e. keep data on disk rather than in RAM.
This is currently only supported by the FITS reader.
Returns
-------
pairs : `list`
A list of (data, header) tuples.
Notes
-----
Other keyword arguments are passed to the reader used.
"""
if filetype:
return _readers[filetype].read(filepath, **kwargs)
for extension, readername in _known_extensions.items():
if filepath.endswith(extension) or filetype in extension:
return _readers[readername].read(filepath, **kwargs)
# If filetype is not apparent from extension, attempt to detect
readername = _detect_filetype(filepath)
return _readers[readername].read(filepath, **kwargs)
def read_file_header(filepath, filetype=None, **kwargs):
"""
Reads the header from a given file.
This should always return a instance of io.header.FileHeader
Parameters
----------
filepath : `str`
The file from which the header is to be read.
filetype : `str`
Supported reader or extension to manually specify the filetype.
Supported readers are ('jp2', 'fits')
Returns
-------
headers : `list`
A list of headers
"""
if filetype:
return _readers[filetype].get_header(filepath, **kwargs)
for extension, readername in _known_extensions.items():
if filepath.endswith(extension) or filetype in extension:
return _readers[readername].get_header(filepath, **kwargs)
readername = _detect_filetype(filepath)
return _readers[readername].get_header(filepath, **kwargs)
def write_file(fname, data, header, filetype='auto', **kwargs):
"""
Write a file from a data & header pair using one of the defined file types.
Parameters
----------
fname : `str`
Filename of file to save.
data : `numpy.ndarray`
Data to save to a fits file.
header : `collections.OrderedDict`
Meta data to save with the data.
filetype : `str`
{'auto', 'fits', 'jp2'} Filetype to save if auto fname extension will
be detected, else specify a supported file extension.
Notes
-----
* Other keyword arguments will be passes to the writer function used.
* This routine currently only supports saving a single HDU.
"""
if filetype == 'auto':
for extension, readername in _known_extensions.items():
if fname.endswith(extension):
return _readers[readername].write(fname, data, header, **kwargs)
else:
for extension, readername in _known_extensions.items():
if filetype in extension:
return _readers[readername].write(fname, data, header, **kwargs)
# Nothing has matched, panic
raise ValueError("This filetype is not supported")
def _detect_filetype(filepath):
"""
Attempts to determine the type of data contained in a file. This is only
used for reading because it opens the file to check the data.
Parameters
----------
filepath : `str`
Where the file is.
Returns
-------
filetype : `str`
The type of file.
"""
# Open file and read in first two lines
with open(filepath, 'rb') as fp:
line1 = fp.readline()
line2 = fp.readline()
# Some FITS files do not have line breaks at the end of header cards.
fp.seek(0)
first80 = fp.read(80)
# FITS
#
# Check the extensions to see if it is a gzipped FITS file
filepath_rest_ext1, ext1 = os.path.splitext(filepath)
_, ext2 = os.path.splitext(filepath_rest_ext1)
gzip_extensions = [".gz"]
fits_extensions = [".fts", ".fit", ".fits"]
if (ext1 in gzip_extensions and ext2 in fits_extensions):
return 'fits'
# Check for "KEY_WORD =" at beginning of file
match = re.match(r"[A-Z0-9_]{0,8} *=".encode('ascii'), first80)
if match is not None:
return 'fits'
# JPEG 2000
#
# Checks for one of two signatures found at beginning of all JP2 files.
# Adapted from ExifTool
# [1] http://www.sno.phy.queensu.ca/~phil/exiftool/
# [2] http://www.jpeg.org/public/fcd15444-2.pdf
# [3] ftp://ftp.remotesensing.org/jpeg2000/fcd15444-1.pdf
jp2_signatures = [b"\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a",
b"\x00\x00\x00\x0cjP\x1a\x1a\x0d\x0a\x87\x0a"]
for sig in jp2_signatures:
if line1 + line2 == sig:
return 'jp2'
# Raise an error if an unsupported filetype is encountered
raise UnrecognizedFileTypeError("The requested filetype is not currently "
"supported by SunPy.")
class UnrecognizedFileTypeError(IOError):
"""Exception to raise when an unknown file type is encountered"""
pass
class ReaderError(ImportError):
"""Exception to raise when an unknown file type is encountered"""
pass
class InvalidJPEG2000FileExtension(IOError):
pass
| {
"repo_name": "Alex-Ian-Hamilton/sunpy",
"path": "sunpy/io/file_tools.py",
"copies": "1",
"size": "6434",
"license": "bsd-2-clause",
"hash": 6027619039845871000,
"line_mean": 27.8520179372,
"line_max": 96,
"alpha_frac": 0.6207646876,
"autogenerated": false,
"ratio": 3.8993939393939394,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5020158626993939,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import re
import psycopg2
import psycopg2.extras
import requests
import json
from cbopensource.tools.eventduplicator.utils import get_process_id, update_sensor_id_refs, update_feed_id_refs
from copy import deepcopy
from collections import defaultdict
import logging
import datetime
__author__ = 'jgarman'
log = logging.getLogger(__name__)
class SolrBase(object):
def __init__(self, connection):
self.connection = connection
self.have_cb_conf = False
self.dbhandle = None
self.cb_conf = None
def __del__(self):
self.close()
def close(self):
self.connection.close()
def get_cb_conf(self):
fp = self.connection.open_file('/etc/cb/cb.conf')
self.cb_conf = fp.read()
self.have_cb_conf = True
def get_cb_conf_item(self, item, default=None):
retval = default
if not self.have_cb_conf:
self.get_cb_conf()
re_match = re.compile("%s=([^\\n]+)" % item)
if type(self.cb_conf) != str:
self.cb_conf = self.cb_conf.decode('utf8')
matches = re_match.search(self.cb_conf)
if matches:
retval = matches.group(1)
return retval
def get_db_parameters(self):
url = self.get_cb_conf_item('DatabaseURL', None)
if not url:
raise Exception("Could not get DatabaseURL from remote server")
db_pattern = re.compile('postgresql\\+psycopg2:\\/\\/([^:]+):([^@]+)@([^:]+):(\d+)/(.*)')
db_match = db_pattern.match(url)
if db_match:
username = db_match.group(1)
password = db_match.group(2)
hostname = db_match.group(3)
remote_port = db_match.group(4)
database_name = db_match.group(5)
return username, password, hostname, remote_port, database_name
raise Exception("Could not connect to database")
def dbconn(self):
"""
:return: database_connection
:rtype: psycopg2.connection
"""
if self.dbhandle:
return self.dbhandle
username, password, hostname, remote_port, database_name = self.get_db_parameters()
conn = self.connection.open_db(user=username, password=password, database=database_name, host='127.0.0.1',
port=remote_port)
self.dbhandle = conn
return conn
def solr_get(self, path, *args, **kwargs):
return self.connection.http_get(path, *args, **kwargs)
def solr_post(self, path, *args, **kwargs):
return self.connection.http_post(path, *args, **kwargs)
def find_db_row_matching(self, table_name, obj):
obj.pop('id', None)
cursor = self.dbconn().cursor()
predicate = ' AND '.join(["%s = %%(%s)s" % (key, key) for key in obj.keys()])
# FIXME: is there a better way to do this?
query = 'SELECT id from %s WHERE %s' % (table_name, predicate)
cursor.execute(query, obj)
row_id = cursor.fetchone()
if row_id:
return row_id[0]
else:
return None
def insert_db_row(self, table_name, obj):
obj.pop('id', None)
cursor = self.dbconn().cursor()
fields = ', '.join(obj.keys())
values = ', '.join(['%%(%s)s' % x for x in obj])
query = 'INSERT INTO %s (%s) VALUES (%s) RETURNING id' % (table_name, fields, values)
try:
cursor.execute(query, obj)
self.dbconn().commit()
row_id = cursor.fetchone()[0]
return row_id
except psycopg2.Error as e:
log.error("Error inserting row into table %s, id %s: %s" % (table_name, obj.get("id", None), e.message))
return None
class LocalConnection(object):
def __init__(self):
# TODO: if for some reason someone has changed SolrPort on their cb server... this is incorrect
self.solr_url_base = 'http://127.0.0.1:8080'
self.session = requests.Session()
@staticmethod
def open_file(filename, mode='r'):
return open(filename, mode)
@staticmethod
def open_db(user, password, database, host, port):
return psycopg2.connect(user=user, password=password, database=database, host=host, port=port)
def http_get(self, path, **kwargs):
return self.session.get('%s%s' % (self.solr_url_base, path), **kwargs)
def http_post(self, path, *args, **kwargs):
return self.session.post('%s%s' % (self.solr_url_base, path), *args, **kwargs)
def close(self):
pass
def __str__(self):
return "Local Cb datastore"
class SolrInputSource(SolrBase):
def __init__(self, connection, **kwargs):
self.query = kwargs.pop('query')
self.pagination_length = 20
super(SolrInputSource, self).__init__(connection)
def doc_count_hint(self):
query = "/solr/0/select"
params = {
'q': self.query,
'sort': 'start asc',
'wt': 'json',
'rows': 0
}
resp = self.solr_get(query, params=params)
rj = resp.json()
return rj.get('response', {}).get('numFound', 0)
def paginated_get(self, query, params, start=0):
params['rows'] = self.pagination_length
params['start'] = start
while True:
resp = self.solr_get(query, params=params)
rj = resp.json()
docs = rj.get('response', {}).get('docs', [])
if not len(docs):
break
for doc in docs:
yield doc
params['start'] += len(docs)
params['rows'] = self.pagination_length
def get_process_docs(self, query_filter=None):
query = "/solr/0/select"
if not query_filter:
query_filter = self.query
params = {
'q': query_filter,
'sort': 'start asc',
'wt': 'json'
}
for doc in self.paginated_get(query, params):
yield doc
def get_feed_doc(self, feed_key):
query = "/solr/cbfeeds/select"
feed_name, feed_id = feed_key.split(':')
params = {
'q': 'id:"%s" AND feed_name:%s' % (feed_id, feed_name),
'wt': 'json'
}
result = self.solr_get(query, params=params)
if not result.ok:
return None
rj = result.json()
docs = rj.get('response', {}).get('docs', [{}])
if len(docs) == 0:
return None
return docs[0]
def get_feed_metadata(self, feed_id):
try:
conn = self.dbconn()
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
cur.execute('SELECT id,name,display_name,feed_url,summary,icon,provider_url,tech_data,category,icon_small' +
'FROM alliance_feeds WHERE id=%s', (feed_id,))
feed_info = cur.fetchone()
if not feed_info:
return None
conn.commit()
except Exception as e:
log.error("Error getting feed metadata for id %s: %s" % (feed_id, str(e)))
return None
return feed_info
def get_binary_doc(self, md5sum):
query = "/solr/cbmodules/select"
params = {
'q': 'md5:%s' % md5sum.upper(),
'wt': 'json'
}
result = self.solr_get(query, params=params)
if result.status_code != 200:
return None
rj = result.json()
docs = rj.get('response', {}).get('docs', [{}])
if len(docs) == 0:
return None
return docs[0]
def get_version(self):
return self.connection.open_file('/usr/share/cb/VERSION').read()
def get_sensor_doc(self, sensor_id):
try:
conn = self.dbconn()
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
cur.execute('SELECT * FROM sensor_registrations WHERE id=%s',(sensor_id,))
sensor_info = cur.fetchone()
cur.execute('SELECT * FROM sensor_builds WHERE id=%s', (sensor_info['build_id'],))
build_info = cur.fetchone()
cur.execute('SELECT * FROM sensor_os_environments WHERE id=%s', (sensor_info['os_environment_id'],))
environment_info = cur.fetchone()
conn.commit()
except Exception as e:
log.error("Error getting sensor data for sensor id %s: %s" % (sensor_id, str(e)))
return None
if not sensor_info or not build_info or not environment_info:
log.error("Could not get full sensor data for sensor id %d" % sensor_id)
return None
return {
'sensor_info': sensor_info,
'build_info': build_info,
'os_info': environment_info
}
def connection_name(self):
return str(self.connection)
def cleanup(self):
pass
class SolrOutputSink(SolrBase):
def __init__(self, connection):
super(SolrOutputSink, self).__init__(connection)
self.feed_id_map = {}
self.existing_md5s = set()
self.sensor_id_map = {}
self.sensor_os_map = {}
self.sensor_build_map = {}
self.written_docs = defaultdict(int)
self.new_metadata = defaultdict(list)
self.doc_endpoints = {
'binary': '/solr/cbmodules/update/json',
'proc': '/solr/0/update',
'feed': '/solr/cbfeeds/update/json'
}
self.now = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.000Z")
def set_data_version(self, version):
target_version = self.connection.open_file('/usr/share/cb/VERSION').read()
if type(target_version) != str:
target_version = target_version.decode('utf8')
target_version = target_version.strip()
source_major_version = '.'.join(version.split('.')[:2])
target_major_version = '.'.join(target_version.split('.')[:2])
if source_major_version != target_major_version:
log.warning(("Source data was generated from Cb version %s; target is %s. This may not work. "
"Continuing anyway." % (version, target_version)))
return True
# TODO: cut-and-paste violation
def get_binary_doc(self, md5sum):
query = "/solr/cbmodules/select"
params = {
'q': 'md5:%s' % md5sum.upper(),
'wt': 'json'
}
result = self.solr_get(query, params=params)
if result.status_code != 200:
return None
rj = result.json()
docs = rj.get('response', {}).get('docs', [{}])
if len(docs) == 0:
return None
return docs[0]
def output_doc(self, doc_type, doc_content):
args = {"add": {"commitWithin": 5000, "doc": doc_content}}
headers = {'content-type': 'application/json; charset=utf8'}
r = self.solr_post(self.doc_endpoints[doc_type],
data=json.dumps(args), headers=headers, timeout=60)
self.written_docs[doc_type] += 1
if not r.ok:
log.error("Error sending document to destination Solr: %s" % r.content)
return r
def output_feed_doc(self, doc_content):
if doc_content['feed_id'] not in self.feed_id_map:
log.warning("got feed document %s:%s without associated feed metadata" % (doc_content['feed_name'],
doc_content['id']))
else:
feed_id = self.feed_id_map[doc_content['feed_id']]
doc_content = deepcopy(doc_content)
update_feed_id_refs(doc_content, feed_id)
self.output_doc("feed", doc_content)
def output_binary_doc(self, doc_content):
md5sum = doc_content.get('md5').upper()
if md5sum in self.existing_md5s:
return
if self.get_binary_doc(md5sum):
self.existing_md5s.add(md5sum)
return
self.output_doc("binary", doc_content)
def output_process_doc(self, doc_content):
# first, update the sensor_id in the process document to match the target settings
if doc_content['sensor_id'] not in self.sensor_id_map:
log.warning("Got process document %s without associated sensor data" % get_process_id(doc_content))
else:
sensor_id = self.sensor_id_map[doc_content['sensor_id']]
doc_content = deepcopy(doc_content)
update_sensor_id_refs(doc_content, sensor_id)
# fix up the last_update field
last_update = doc_content.get("last_update", None) or self.now
doc_content["last_update"] = {"set": last_update}
doc_content.pop("last_server_update", None)
self.output_doc("proc", doc_content)
def output_feed_metadata(self, doc_content):
original_id = doc_content['id']
feed_id = self.find_db_row_matching('alliance_feeds', {'name': doc_content['name']})
if feed_id:
self.feed_id_map[original_id] = feed_id
return
doc_content.pop('id', None)
doc_content['manually_added'] = True
doc_content['enabled'] = False
doc_content['display_name'] += ' (added via cb-event-duplicator)'
feed_id = self.insert_db_row('alliance_feeds', doc_content)
self.new_metadata['feed'].append(doc_content['name'])
self.feed_id_map[original_id] = feed_id
def output_sensor_info(self, doc_content):
original_id = doc_content['sensor_info']['id']
sensor_id = self.find_db_row_matching('sensor_registrations',
{'computer_dns_name': doc_content['sensor_info']['computer_dns_name'],
'computer_name': doc_content['sensor_info']['computer_name']})
if sensor_id:
# there's already a sensor that matches what we're looking for
self.sensor_id_map[original_id] = sensor_id
return
# we need to first ensure that the sensor build and os_environment are available in the target server
os_id = self.find_db_row_matching('sensor_os_environments', doc_content['os_info'])
if not os_id:
os_id = self.insert_db_row('sensor_os_environments', doc_content['os_info'])
build_id = self.find_db_row_matching('sensor_builds', doc_content['build_info'])
if not build_id:
build_id = self.insert_db_row('sensor_builds', doc_content['build_info'])
doc_content['sensor_info']['group_id'] = 1 # TODO: mirror groups?
doc_content['sensor_info']['build_id'] = build_id
doc_content['sensor_info']['os_environment_id'] = os_id
sensor_id = self.insert_db_row('sensor_registrations', doc_content['sensor_info'])
self.new_metadata['sensor'].append(doc_content['sensor_info']['computer_name'])
self.sensor_id_map[original_id] = sensor_id
def cleanup(self):
headers = {'content-type': 'application/json; charset=utf8'}
args = {}
for doc_type in self.doc_endpoints.keys():
self.solr_post(self.doc_endpoints[doc_type] + '?commit=true',
data=json.dumps(args), headers=headers, timeout=60)
def connection_name(self):
return str(self.connection)
def report(self):
report_data = "Documents inserted into %s by type:\n" % (self.connection,)
for key in self.written_docs.keys():
report_data += " %8s: %d\n" % (key, self.written_docs[key])
for key in self.new_metadata.keys():
report_data += "New %ss created in %s:\n" % (key, self.connection)
for value in self.new_metadata[key]:
report_data += " %s\n" % value
return report_data
| {
"repo_name": "carbonblack/cb-event-duplicator",
"path": "cbopensource/tools/eventduplicator/solr_endpoint.py",
"copies": "1",
"size": "15924",
"license": "mit",
"hash": -5540732508703287000,
"line_mean": 34.9458239278,
"line_max": 120,
"alpha_frac": 0.5636146697,
"autogenerated": false,
"ratio": 3.734521575984991,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4798136245684991,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import re
from . import steps
# Cache for _make_pp, maps pp_string -> pp_fn
_PP_MEMO = {}
# Lookup table of pp-string name -> pipeline step
PP_STEPS = dict((cls.name, cls) for cls in
(getattr(steps, cls_name) for cls_name in steps.__all__))
def preprocess(spectra, pp_string, wavelengths=None, copy=True):
pp_fn = _make_pp(pp_string)
if hasattr(spectra, 'shape'):
if copy:
spectra = spectra.copy()
S, w = pp_fn(spectra, wavelengths)
return S # TODO: return w as well
if not copy:
for t in spectra:
y, w = pp_fn(t[:,1:2].T, t[:,0])
t[:,0] = w
t[:,1] = y.ravel()
return spectra
pp = []
for t in spectra:
tt = t.copy()
y, w = pp_fn(tt[:,1:2].T, tt[:,0])
tt[:,0] = w
tt[:,1] = y.ravel()
pp.append(tt)
return pp
def _make_pp(pp_string):
'''Convert a preprocess string into its corresponding function.
pp_string: str, looks like "foo:1:2,bar:4,baz:quux"
In this example, there are 3 preprocessing steps: foo, bar, and baz:quux.
Step 'foo' takes two arguments, 'bar' takes one, and 'baz:quux' none.
Returns: pp_fn(spectra, wavelengths), callable
'''
# try to use the cache
if pp_string in _PP_MEMO:
return _PP_MEMO[pp_string]
# populate the preprocessing function pipeline
pipeline = []
if pp_string:
for step in pp_string.split(','):
# Hack: some names include a colon
parts = step.split(':')
if len(parts) > 1 and re.match(r'[a-z]+', parts[1]):
idx = 2
else:
idx = 1
name = ':'.join(parts[:idx])
args = ':'.join(parts[idx:])
pipeline.append(PP_STEPS[name].from_string(args))
# return a function that runs the pipeline
def _fn(S, w):
for p in pipeline:
S, w = p.apply(S, w)
return S, w
_PP_MEMO[pp_string] = _fn
return _fn
| {
"repo_name": "all-umass/superman",
"path": "superman/preprocess/pipeline.py",
"copies": "1",
"size": "1908",
"license": "mit",
"hash": 3657690625026275300,
"line_mean": 24.7837837838,
"line_max": 79,
"alpha_frac": 0.5938155136,
"autogenerated": false,
"ratio": 3.102439024390244,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4196254537990244,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import requests
from lxml import etree
from symantecssl.request_models import RequestEnvelope as ReqEnv
class FailedRequest(Exception):
def __init__(self, response):
super(FailedRequest, self).__init__()
self.response = response
def post_request(endpoint, request_model, credentials):
"""Create a post request against Symantec's SOAPXML API.
Currently supported Request Models are:
GetModifiedOrders
QuickOrderRequest
note:: the request can take a considerable amount of time if the
date range covers a large amount of changes.
note:: credentials should be a dictionary with the following values:
partner_code
username
password
Access all data from response via models
:param endpoint: Symantec endpoint to hit directly
:param request_model: request model instance to initiate call type
:param credentials: Symantec specific credentials for orders.
:return response: deserialized response from API
"""
request_model.set_credentials(**credentials)
model = ReqEnv(request_model=request_model)
serialized_xml = etree.tostring(model.serialize(), pretty_print=True)
headers = {'Content-Type': 'application/soap+xml'}
response = requests.post(endpoint, serialized_xml, headers=headers)
setattr(response, "model", None)
# Symantec not expected to return 2xx range; only 200
if response.status_code != 200:
raise FailedRequest(response)
xml_root = etree.fromstring(response.content)
deserialized = request_model.response_model.deserialize(xml_root)
setattr(response, "model", deserialized)
return response
| {
"repo_name": "glyph/symantecssl",
"path": "symantecssl/order.py",
"copies": "1",
"size": "1721",
"license": "apache-2.0",
"hash": -8859789861087711000,
"line_mean": 29.7321428571,
"line_max": 73,
"alpha_frac": 0.7292271935,
"autogenerated": false,
"ratio": 4.379134860050891,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 56
} |
from __future__ import (absolute_import, division, print_function)
import setuptools
from distutils.core import setup, Extension
import numpy as np
import versioneer
fastccd = Extension('fastccd',
sources=['src/fastccdmodule.c',
'src/fastccd.c'],
extra_compile_args=['-fopenmp'],
extra_link_args=['-lgomp'])
image = Extension('image',
sources=['src/imagemodule.c',
'src/image.c'],
extra_compile_args=['-fopenmp'],
extra_link_args=['-lgomp'])
phocount = Extension('phocount',
sources=['src/phocountmodule.c',
'src/phocount.c'],
extra_compile_args=['-fopenmp'],
extra_link_args=['-lgomp'])
setup(
name='csxtools',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
author='Brookhaven National Laboratory',
packages=setuptools.find_packages(exclude=['src', 'tests']),
ext_package='csxtools.ext',
include_dirs=[np.get_include()],
ext_modules=[fastccd, image, phocount],
tests_require=['pytest'],
install_requires=['numpy'], # essential deps only
url='http://github.com/NSLS-II_CSX/csxtools',
keywords='Xray Analysis',
license='BSD'
)
| {
"repo_name": "stuwilkins/csxtools",
"path": "setup.py",
"copies": "1",
"size": "1368",
"license": "bsd-3-clause",
"hash": 2119189011420941600,
"line_mean": 35,
"line_max": 66,
"alpha_frac": 0.5643274854,
"autogenerated": false,
"ratio": 3.965217391304348,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.5029544876704348,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
import simplejson
from qtpy.QtWidgets import QDialog, QFileDialog, QTableWidgetItem
from addie.utilities import load_ui
from qtpy import QtGui, QtCore
import numpy as np
from addie.processing.mantid.master_table.utilities import LoadGroupingFile
from addie.initialization.widgets.main_tab import set_default_folder_path
from addie.utilities.general import get_list_algo
COLUMNS_WIDTH = [150, 150]
class ReductionConfigurationHandler:
def __init__(self, parent=None):
if parent.reduction_configuration_ui is None:
parent.reduction_configuration_ui = ReductionConfiguration(parent=parent)
parent.reduction_configuration_ui.show()
if parent.reduction_configuration_ui_position:
parent.reduction_configuration_ui.move(parent.reduction_configuration_ui_position)
else:
parent.reduction_configuration_ui.activateWindow()
parent.reduction_configuration_ui.setFocus()
class ReductionConfiguration(QDialog):
list_grouping_intermediate_browse_widgets = []
list_grouping_output_browse_widgets = []
list_grouping_intermediate_widgets = []
list_grouping_output_widgets = []
global_key_value = {}
def __init__(self, parent=None):
self.parent = parent
QDialog.__init__(self, parent=parent)
self.ui = load_ui('reduction_configuration_dialog.ui', baseinstance=self)
self.init_widgets()
set_default_folder_path(self.parent)
def init_widgets(self):
'''init all widgets with values in case we already opened that window, or populated with
default values'''
self.ui.reset_pdf_q_range_button.setIcon(QtGui.QIcon(":/MPL Toolbar/reset_logo.png"))
self.ui.reset_pdf_r_range_button.setIcon(QtGui.QIcon(":/MPL Toolbar/reset_logo.png"))
# init all widgets with previous or default values
LoadReductionConfiguration(parent=self, grand_parent=self.parent)
self.list_grouping_intermediate_browse_widgets = [self.ui.intermediate_browse_button,
self.ui.intermediate_browse_value,
self.ui.intermediate_browse_groups_value,
self.ui.intermediate_browse_groups_label]
self.list_grouping_intermediate_widgets = [self.ui.intermediate_from_calibration_label,
self.ui.intermediate_from_calibration_groups_label,
self.ui.intermediate_from_calibration_groups_value]
self.list_grouping_output_browse_widgets = [self.ui.output_browse_button,
self.ui.output_browse_value,
self.ui.output_browse_groups_value,
self.ui.output_browse_groups_label]
self.list_grouping_output_widgets = [self.ui.output_from_calibration_label,
self.ui.output_from_calibration_groups_label,
self.ui.output_from_calibration_groups_value]
intermediate_grouping = self.parent.intermediate_grouping
status_intermediate = intermediate_grouping['enabled']
self.change_status_intermediate_buttons(status=status_intermediate)
self.ui.intermediate_browse_radio_button.setChecked(status_intermediate)
self.ui.intermediate_browse_value.setText(intermediate_grouping['filename'])
self.ui.intermediate_browse_groups_value.setText(str(intermediate_grouping['nbr_groups']))
output_grouping = self.parent.output_grouping
status_output = output_grouping['enabled']
self.change_status_output_buttons(status=status_output)
self.ui.output_browse_radio_button.setChecked(status_output)
self.ui.output_browse_value.setText(output_grouping['filename'])
self.ui.output_browse_groups_value.setText(str(output_grouping['nbr_groups']))
self.init_global_key_value_widgets()
self.update_key_value_widgets()
def init_global_key_value_widgets(self):
self.populate_list_algo()
self._set_column_widths()
self.init_table()
def _remove_blacklist_algo(self, list_algo):
list_algo_without_blacklist = []
for _algo in list_algo:
if not (_algo in self.parent.align_and_focus_powder_from_files_blacklist):
list_algo_without_blacklist.append(_algo)
return list_algo_without_blacklist
def _set_column_widths(self):
for _col, _width in enumerate(COLUMNS_WIDTH):
self.ui.key_value_table.setColumnWidth(_col, _width)
def init_table(self):
global_key_value = self.parent.global_key_value
for _row, _key in enumerate(global_key_value.keys()):
_value = global_key_value[_key]
self._add_row(row=_row, key=_key, value=_value)
def show_global_key_value_widgets(self, visible=False):
self.ui.global_key_value_groupBox.setVisible(visible)
def remove_from_list(self,
original_list=[],
to_remove=[]):
if to_remove:
clean_list_algo = []
for _algo in original_list:
if not(_algo in to_remove):
clean_list_algo.append(_algo)
return clean_list_algo
else:
return original_list
def populate_list_algo(self):
self.ui.list_key_comboBox.clear()
raw_list_algo = get_list_algo('AlignAndFocusPowderFromFiles')
list_algo_without_blacklist = self._remove_blacklist_algo(raw_list_algo)
global_list_keys = self.parent.global_key_value.keys()
global_unused_list_algo = self.remove_from_list(original_list=list_algo_without_blacklist,
to_remove=global_list_keys)
self.ui.list_key_comboBox.addItems(global_unused_list_algo)
def add_key_value(self):
self._add_new_row_at_bottom()
self.update_key_value_widgets()
self.populate_list_algo()
self.ui.list_key_comboBox.setFocus()
def _add_row(self, row=-1, key='', value=""):
self.ui.key_value_table.insertRow(row)
self._set_item(key, row, 0)
self._set_item(value, row, 1, is_editable=True)
def _set_item(self, text, row, column, is_editable=False):
key_item = QTableWidgetItem(text)
if not is_editable:
key_item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
self.ui.key_value_table.setItem(row, column, key_item)
def _add_new_row_at_bottom(self):
value = str(self.ui.new_value_widget.text())
# do not allow to add row with empty value
if value.strip() == "":
return
nbr_row = self.get_nbr_row()
key = self.get_current_selected_key()
self.parent.global_key_value[key] = value
self.global_key_value[key] = value
self._add_row(row=nbr_row, key=key, value=value)
self.ui.new_value_widget.setText("")
def get_current_selected_key(self):
return str(self.ui.list_key_comboBox.currentText())
def get_nbr_row(self):
return self.ui.key_value_table.rowCount()
def _get_selected_row_range(self):
selection = self.ui.key_value_table.selectedRanges()
if not selection:
return None
from_row = selection[0].topRow()
to_row = selection[0].bottomRow()
return np.arange(from_row, to_row+1)
def _remove_rows(self, row_range):
first_row_selected = row_range[0]
for _ in row_range:
self.ui.key_value_table.removeRow(first_row_selected)
def remove_key_value_selected(self):
selected_row_range = self._get_selected_row_range()
if selected_row_range is None:
return
self._remove_rows(selected_row_range)
self.update_key_value_widgets()
self.update_global_key_value()
self.populate_list_algo()
def update_global_key_value(self):
nbr_row = self.get_nbr_row()
global_key_value = {}
for _row in np.arange(nbr_row):
_key = self._get_cell_value(_row, 0)
_value = self._get_cell_value(_row, 1)
global_key_value[_key] = _value
self.parent.global_key_value = global_key_value
self.global_key_value = global_key_value
def _get_cell_value(self, row, column):
item = self.ui.key_value_table.item(row, column)
return str(item.text())
def _what_state_remove_button_should_be(self):
nbr_row = self.get_nbr_row()
if nbr_row > 0:
enable = True
else:
enable = False
return enable
def update_key_value_widgets(self):
enable = self._what_state_remove_button_should_be()
self.ui.remove_selection_button.setEnabled(enable)
def _check_status_intermediate_buttons(self):
'''this method will enabled or not all the widgets of the intermediate groups browse section'''
status_browse_widgets = self.ui.intermediate_browse_radio_button.isChecked()
self.parent.intermediate_grouping['enabled'] = status_browse_widgets
self.change_status_intermediate_buttons(status=status_browse_widgets)
def change_status_intermediate_buttons(self, status=False):
for _widget in self.list_grouping_intermediate_browse_widgets:
_widget.setEnabled(status)
for _widget in self.list_grouping_intermediate_widgets:
_widget.setEnabled(not status)
def _check_status_output_buttons(self):
'''this method will enabled or not all the widgets of the output groups browse section'''
status_browse_widgets = self.ui.output_browse_radio_button.isChecked()
self.parent.output_grouping['enabled'] = status_browse_widgets
self.change_status_output_buttons(status=status_browse_widgets)
def change_status_output_buttons(self, status=False):
for _widget in self.list_grouping_output_browse_widgets:
_widget.setEnabled(status)
for _widget in self.list_grouping_output_widgets:
_widget.setEnabled(not status)
def intermediate_radio_button_clicked(self):
self._check_status_intermediate_buttons()
def intermediate_browse_radio_button_clicked(self):
self._check_status_intermediate_buttons()
def output_radio_button_clicked(self):
self._check_status_output_buttons()
def output_browse_radio_button_clicked(self):
self._check_status_output_buttons()
def intermediate_browse_button_clicked(self):
_characterization_folder = self.parent.characterization_folder
[_intermediate_group_file, _] = QFileDialog.getOpenFileName(parent=self.parent,
caption="Select Grouping File",
directory=_characterization_folder,
filter="XML (*.xml)")
if _intermediate_group_file:
self.ui.intermediate_browse_value.setText(_intermediate_group_file)
o_grouping = LoadGroupingFile(filename=_intermediate_group_file)
nbr_groups = o_grouping.get_number_of_groups()
self.ui.intermediate_browse_groups_value.setText(str(nbr_groups))
self.parent.intermediate_grouping['filename'] = _intermediate_group_file
self.parent.intermediate_grouping['nbr_groups'] = nbr_groups
def output_browse_button_clicked(self):
_characterization_folder = self.parent.characterization_folder
[_output_group_file, _] = QFileDialog.getOpenFileName(parent=self.parent,
caption="Select Grouping File",
directory=_characterization_folder,
filter="XML (*.xml)")
if _output_group_file:
self.ui.output_browse_value.setText(_output_group_file)
o_grouping = LoadGroupingFile(filename=_output_group_file)
nbr_groups = o_grouping.get_number_of_groups()
self.ui.output_browse_groups_value.setText(str(nbr_groups))
self.parent.output_grouping['filename'] = _output_group_file
self.parent.output_grouping['nbr_groups'] = nbr_groups
def pdf_reset_q_range_button(self):
pass
def pdf_reset_r_range_button(self):
pass
def bragg_browse_characterization_clicked(self):
_characterization_folder = self.parent.characterization_folder
[_characterization_file, _] = QFileDialog.getOpenFileName(parent=self.parent,
caption="Select Characterization File",
directory=_characterization_folder,
filter=self.parent.characterization_extension)
if _characterization_file:
self.ui.bragg_characterization_file.setText(_characterization_file)
def pdf_browse_characterization_clicked(self):
_characterization_folder = self.parent.characterization_folder
[_characterization_file, _] = QFileDialog.getOpenFileName(parent=self.parent,
caption="Select Characterization File",
directory=_characterization_folder,
filter=self.parent.characterization_extension)
if _characterization_file:
self.ui.pdf_characterization_file.setText(_characterization_file)
def close_button(self):
# save state of buttons
SaveReductionConfiguration(parent=self, grand_parent=self.parent)
self._retrieve_global_key_value()
self.parent.global_key_value = self.global_key_value
# close
self.close()
def _retrieve_global_key_value(self):
global_key_value = {}
nbr_row = self.ui.key_value_table.rowCount()
for _row in np.arange(nbr_row):
_key = self._get_cell_value(_row, 0)
_value = self._get_cell_value(_row, 1)
global_key_value[_key] = _value
self.global_key_value = global_key_value
def add_global_key_value_to_all_rows(self):
global_list_key_value = self.parent.global_key_value
list_table_ui = self.parent.master_table_list_ui
if not list_table_ui:
return
for _random_key in list_table_ui.keys():
_entry = list_table_ui[_random_key]
current_local_key_value = _entry['align_and_focus_args_infos']
if current_local_key_value == {}:
_entry['align_and_focus_args_infos'] = global_list_key_value
else:
list_local_keys = current_local_key_value.keys()
list_global_keys = global_list_key_value.keys()
new_local_key_value = {}
for _key in list_local_keys:
if _key in list_global_keys:
new_local_key_value[_key] = global_list_key_value[_key]
else:
new_local_key_value[_key] = current_local_key_value[_key]
_entry['align_and_focus_args_infos'] = new_local_key_value
list_table_ui[_random_key] = _entry
self.parent.master_table_list_ui = list_table_ui
def closeEvent(self, event=None):
self.parent.reduction_configuration_ui = None
self.parent.reduction_configuration_ui_position = self.pos()
self.add_global_key_value_to_all_rows()
def cancel_clicked(self):
self.close()
class LoadReductionConfiguration:
def __init__(self, parent=None, grand_parent=None):
# list of sample environment
if grand_parent.reduction_configuration == {}:
config_file = grand_parent.addie_config_file
with open(config_file) as f:
data = simplejson.load(f)
pdf_q_range = data['pdf']['q_range']
pdf_r_range = data['pdf']['r_range']
pdf_reduction_configuration_file = data["pdf"]["reduction_configuration_file"]
pdf_characterization_file = data["pdf"]["characterization_file"]
bragg_characterization_file = data["bragg"]["characterization_file"]
bragg_number_of_bins = data["bragg"]["number_of_bins"]
bragg_wavelength = data["bragg"]["wavelength"]
#calibration_file = data["pdf_bragg"]["calibration_file"]
push_data_positive = data["advanced"]["push_data_positive"]
else:
pdf_q_range = grand_parent.reduction_configuration['pdf']['q_range']
pdf_r_range = grand_parent.reduction_configuration['pdf']['r_range']
pdf_reduction_configuration_file = grand_parent.reduction_configuration['pdf']['reduction_configuration_file']
pdf_characterization_file = grand_parent.reduction_configuration['pdf']['characterization_file']
bragg_characterization_file = grand_parent.reduction_configuration["bragg"]["characterization_file"]
bragg_number_of_bins = grand_parent.reduction_configuration["bragg"]["number_of_bins"]
bragg_wavelength = grand_parent.reduction_configuration["bragg"]["wavelength"]
#calibration_file = grand_parent.reduction_configuration["pdf_bragg"]["calibration_file"]
push_data_positive = grand_parent.reduction_configuration["advanced"]["push_data_positive"]
# PDF and Bragg
#self._set_text_value(ui=parent.ui.calibration_file, value=calibration_file)
# PDF
self._set_text_value(ui=parent.ui.pdf_q_range_min, value=pdf_q_range["min"])
self._set_text_value(ui=parent.ui.pdf_q_range_max, value=pdf_q_range["max"])
self._set_text_value(ui=parent.ui.pdf_q_range_delta, value=pdf_q_range["delta"])
self._set_text_value(ui=parent.ui.pdf_r_range_min, value=pdf_r_range["min"])
self._set_text_value(ui=parent.ui.pdf_r_range_max, value=pdf_r_range["max"])
self._set_text_value(ui=parent.ui.pdf_r_range_delta, value=pdf_r_range["delta"])
self._set_text_value(ui=parent.ui.pdf_reduction_configuration_file, value=pdf_reduction_configuration_file)
self._set_text_value(ui=parent.ui.pdf_characterization_file, value=pdf_characterization_file)
# Bragg
self._set_text_value(ui=parent.ui.bragg_characterization_file, value=bragg_characterization_file)
self._set_text_value(ui=parent.ui.bragg_number_of_bins, value=bragg_number_of_bins)
self._set_text_value(ui=parent.ui.bragg_wavelength_min, value=bragg_wavelength["min"])
self._set_text_value(ui=parent.ui.bragg_wavelength_max, value=bragg_wavelength["max"])
# advanced
self._set_checkbox_value(ui=parent.ui.push_data_positive, value=push_data_positive)
def _set_text_value(self, ui=None, value=""):
if ui is None:
return
ui.setText(str(value))
def _set_checkbox_value(self, ui=None, value=False):
if ui is None:
return
ui.setChecked(value)
class SaveReductionConfiguration:
def __init__(self, parent=None, grand_parent=None):
reduction_configuration = {}
# PDF and Bragg
reduction_configuration['pdf_bragg'] = {}
#calibration_file = self._get_text_value(parent.ui.calibration_file)
#reduction_configuration['pdf_bragg']["calibration_file"] = calibration_file
# PDF
pdf_reduction_configuration = {}
pdf_reduction_configuration['characterization_file'] = self._get_text_value(parent.ui.pdf_characterization_file)
pdf_q_range_min = self._get_text_value(parent.ui.pdf_q_range_min)
pdf_q_range_max = self._get_text_value(parent.ui.pdf_q_range_max)
pdf_q_range_delta = self._get_text_value(parent.ui.pdf_q_range_delta)
pdf_reduction_configuration['q_range'] = {'min': pdf_q_range_min,
'max': pdf_q_range_max,
'delta': pdf_q_range_delta}
pdf_r_range_min = self._get_text_value(parent.ui.pdf_r_range_min)
pdf_r_range_max = self._get_text_value(parent.ui.pdf_r_range_max)
pdf_r_range_delta = self._get_text_value(parent.ui.pdf_r_range_delta)
pdf_reduction_configuration['r_range'] = {'min': pdf_r_range_min,
'max': pdf_r_range_max,
'delta': pdf_r_range_delta}
pdf_reduction_configuration_file = self._get_text_value(parent.ui.pdf_reduction_configuration_file)
pdf_reduction_configuration['reduction_configuration_file'] = pdf_reduction_configuration_file
reduction_configuration['pdf'] = pdf_reduction_configuration
# Bragg
bragg_reduction_configuration = {}
bragg_characterization_file = self._get_text_value(parent.ui.bragg_characterization_file)
bragg_reduction_configuration["characterization_file"] = bragg_characterization_file
bragg_number_of_bins = self._get_text_value(parent.ui.bragg_number_of_bins)
bragg_reduction_configuration["number_of_bins"] = bragg_number_of_bins
bragg_wavelength_min = self._get_text_value(parent.ui.bragg_wavelength_min)
bragg_wavelength_max = self._get_text_value(parent.ui.bragg_wavelength_max)
bragg_reduction_configuration["wavelength"] = {'min': bragg_wavelength_min,
'max': bragg_wavelength_max}
reduction_configuration['bragg'] = bragg_reduction_configuration
# advanced
advanced_reduction_configuration = {}
advanced_reduction_configuration["push_data_positive"] = self._set_checkbox_value(ui=parent.ui.push_data_positive)
reduction_configuration["advanced"] = advanced_reduction_configuration
# final save
grand_parent.reduction_configuration = reduction_configuration
def _get_text_value(self, ui=None):
if ui is None:
return ""
return str(ui.text())
def _set_checkbox_value(self, ui=None):
if ui is None:
return False
_state = ui.checkState()
if _state == QtCore.Qt.Checked:
return True
else:
return False
| {
"repo_name": "neutrons/FastGR",
"path": "addie/processing/mantid/master_table/reduction_configuration_handler.py",
"copies": "1",
"size": "22993",
"license": "mit",
"hash": 7505251698780740000,
"line_mean": 44.8027888446,
"line_max": 122,
"alpha_frac": 0.6125342496,
"autogenerated": false,
"ratio": 3.9264002732240435,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001988442416349582,
"num_lines": 502
} |
from __future__ import absolute_import, division, print_function
import six
from six.moves import range
import sys
import utool as ut
import numpy as np
try:
import guitool_ibeis as gt
from guitool_ibeis.__PYQT__ import QtWidgets
from guitool_ibeis.__PYQT__ import QtCore
except ImportError:
try:
from PyQt4 import QtGui as QtWidgets
from PyQt4 import QtCore
except ImportError:
pass
try:
from PyQt5 import QtWidgets # NOQA
from PyQt5 import QtCore # NOQA
except ImportError:
pass
print('Warning: guitool_ibeis did not import correctly')
#(print, print_, printDBG, rrr, profile) = ut.inject(__name__, '[screeninfo]', DEBUG=True)
ut.noinject(__name__, '[screeninfo]')
DEFAULT_MAX_ROWS = 3
# Win7 Areo
WIN7_SIZES = {
'os_border_x': 20,
'os_border_y': 35,
'os_border_h': 30,
'win_border_x': 17,
'win_border_y': 10,
'mpl_toolbar_y': 10,
}
# Ubuntu (Medeterrainian Dark)
GNOME3_SIZES = {
'os_border_x': 0,
'os_border_y': 35, # for gnome3 title bar
'os_border_h': 0,
'win_border_x': 5,
'win_border_y': 30,
'mpl_toolbar_y': 0,
}
for key in GNOME3_SIZES:
GNOME3_SIZES[key] += 5
def infer_monitor_specs(res_w, res_h, inches_diag):
"""
monitors = [
dict(name='work1', inches_diag=23, res_w=1920, res_h=1080),
dict(name='work2', inches_diag=24, res_w=1920, res_h=1200),
dict(name='hp-129', inches_diag=25, res_w=1920, res_h=1080),
dict(name='?-26', inches_diag=26, res_w=1920, res_h=1080),
dict(name='?-27', inches_diag=27, res_w=1920, res_h=1080),
]
for info in monitors:
name = info['name']
inches_diag = info['inches_diag']
res_h = info['res_h']
res_w = info['res_w']
print('---')
print(name)
inches_w = inches_diag * res_w / np.sqrt(res_h**2 + res_w**2)
inches_h = inches_diag * res_h / np.sqrt(res_h**2 + res_w**2)
print('inches diag = %.2f' % (inches_diag))
print('inches WxH = %.2f x %.2f' % (inches_w, inches_h))
#inches_w = inches_diag * res_w/sqrt(res_h**2 + res_w**2)
"""
import sympy
# Build a system of equations and solve it
inches_w, inches_h = sympy.symbols('inches_w inches_h'.split(), real=True, positive=True)
res_w, res_h = sympy.symbols('res_w res_h'.split(), real=True, positive=True)
inches_diag, = sympy.symbols('inches_diag'.split(), real=True, positive=True)
equations = [
sympy.Eq(inches_diag, (inches_w ** 2 + inches_h ** 2) ** .5),
sympy.Eq(res_w / res_h, inches_w / inches_h),
]
print('Possible solutions:')
query_vars = [inches_w, inches_h]
for solution in sympy.solve(equations, query_vars):
print('Solution:')
reprstr = ut.repr3(ut.odict(zip(query_vars, solution)), explicit=True, nobr=1, with_comma=False)
print(ut.indent(ut.autopep8_format(reprstr)))
#(inches_diag*res_w/sqrt(res_h**2 + res_w**2), inches_diag*res_h/sqrt(res_h**2 + res_w**2))
def get_resolution_info(monitor_num=0):
r"""
Args:
monitor_num (int): (default = 0)
Returns:
dict: info
CommandLine:
python -m plottool_ibeis.screeninfo get_resolution_info --show
xrandr | grep ' connected'
grep "NVIDIA" /var/log/Xorg.0.log
Example:
>>> # DISABLE_DOCTEST
>>> from plottool_ibeis.screeninfo import * # NOQA
>>> monitor_num = 1
>>> for monitor_num in range(get_number_of_monitors()):
>>> info = get_resolution_info(monitor_num)
>>> print('monitor(%d).info = %s' % (monitor_num, ut.repr3(info, precision=3)))
"""
import guitool_ibeis as gt
app = gt.ensure_qtapp()[0] # NOQA
# screen_resolution = app.desktop().screenGeometry()
# width, height = screen_resolution.width(), screen_resolution.height()
# print('height = %r' % (height,))
# print('width = %r' % (width,))
desktop = QtWidgets.QDesktopWidget()
screen = desktop.screen(monitor_num)
ppi_x = screen.logicalDpiX()
ppi_y = screen.logicalDpiY()
dpi_x = screen.physicalDpiX()
dpi_y = screen.physicalDpiY()
# This call is not rotated correctly
# rect = screen.screenGeometry()
# This call has bad offsets
rect = desktop.screenGeometry(screen=monitor_num)
# This call subtracts offsets weirdly
# desktop.availableGeometry(screen=monitor_num)
pixels_w = rect.width()
# for num in range(desktop.screenCount()):
# pass
pixels_h = rect.height()
# + rect.y()
"""
I have two monitors (screens), after rotation effects they have
the geometry: (for example)
S1 = {x: 0, y=300, w: 1920, h:1080}
S2 = {x=1920, y=0, w: 1080, h:1920}
Here is a pictoral example
G--------------------------------------C-------------------
| | |
A--------------------------------------| |
| | |
| | |
| | |
| S1 | |
| | S2 |
| | |
| | |
| | |
|--------------------------------------B |
| | |
| | |
----------------------------------------------------------D
Desired Info
G = (0, 0)
A = (S1.x, S1.y)
B = (S1.x + S1.w, S1.y + S1.h)
C = (S2.x, S2.y)
D = (S2.x + S1.w, S2.y + S2.h)
from PyQt4 import QtGui, QtCore
app = QtCore.QCoreApplication.instance()
if app is None:
import sys
app = QtGui.QApplication(sys.argv)
desktop = QtGui.QDesktopWidget()
rect1 = desktop.screenGeometry(screen=0)
rect2 = desktop.screenGeometry(screen=1)
"""
# I want to get the relative positions of my monitors
# pt = screen.pos()
# pt = screen.mapToGlobal(pt)
# pt = screen.mapToGlobal(screen.pos())
# Screen offsets seem bugged
# off_x = pt.x()
# off_y = pt.y()
# print(pt.x())
# print(pt.y())
# pt = screen.mapToGlobal(QtCore.QPoint(0, 0))
# print(pt.x())
# print(pt.y())
off_x = rect.x()
off_y = rect.y()
# pt.x(), pt.y()
inches_w = (pixels_w / dpi_x)
inches_h = (pixels_h / dpi_y)
inches_diag = (inches_w ** 2 + inches_h ** 2) ** .5
mm_w = inches_w * ut.MM_PER_INCH
mm_h = inches_h * ut.MM_PER_INCH
mm_diag = inches_diag * ut.MM_PER_INCH
ratio = min(mm_w, mm_h) / max(mm_w, mm_h)
#pixel_density = dpi_x / ppi_x
info = ut.odict([
('monitor_num', monitor_num),
('off_x', off_x),
('off_y', off_y),
('ratio', ratio),
('ppi_x', ppi_x),
('ppi_y', ppi_y),
('dpi_x', dpi_x),
('dpi_y', dpi_y),
#'pixel_density', pixel_density),
('inches_w', inches_w),
('inches_h', inches_h),
('inches_diag', inches_diag),
('mm_w', mm_w),
('mm_h', mm_h),
('mm_diag', mm_diag),
('pixels_w', pixels_w),
('pixels_h', pixels_h),
])
return info
def get_number_of_monitors():
gt.ensure_qtapp()
desktop = QtWidgets.QDesktopWidget()
if hasattr(desktop, 'numScreens'):
n = desktop.numScreens()
else:
n = desktop.screenCount()
return n
def get_monitor_geom(monitor_num=0):
r"""
Args:
monitor_num (int): (default = 0)
Returns:
tuple: geom
CommandLine:
python -m plottool_ibeis.screeninfo get_monitor_geom --show
Example:
>>> # DISABLE_DOCTEST
>>> from plottool_ibeis.screeninfo import * # NOQA
>>> monitor_num = 0
>>> geom = get_monitor_geom(monitor_num)
>>> result = ('geom = %s' % (ut.repr2(geom),))
>>> print(result)
"""
gt.ensure_qtapp()
desktop = QtWidgets.QDesktopWidget()
rect = desktop.availableGeometry(screen=monitor_num)
geom = (rect.x(), rect.y(), rect.width(), rect.height())
return geom
def get_monitor_geometries():
gt.ensure_qtapp()
monitor_geometries = {}
desktop = QtWidgets.QDesktopWidget()
if hasattr(desktop, 'numScreens'):
n = desktop.numScreens()
else:
n = desktop.screenCount()
for screenx in range(n):
rect = desktop.availableGeometry(screen=screenx)
geom = (rect.x(), rect.y(), rect.width(), rect.height())
monitor_geometries[screenx] = geom
return monitor_geometries
def get_stdpxls():
if sys.platform.startswith('win32'):
stdpxls = WIN7_SIZES
elif sys.platform.startswith('linux'):
stdpxls = GNOME3_SIZES
else:
stdpxls = GNOME3_SIZES
return stdpxls
def get_xywh_pads():
stdpxls = get_stdpxls()
w_pad = stdpxls['win_border_x']
y_pad = stdpxls['win_border_y'] + stdpxls['mpl_toolbar_y']
# Pads are applied to all windows
x_pad = stdpxls['os_border_x']
y_pad = stdpxls['os_border_y']
return (x_pad, y_pad, w_pad, y_pad)
def get_avail_geom(monitor_num=None, percent_w=1.0, percent_h=1.0):
stdpxls = get_stdpxls()
if monitor_num is None:
monitor_num = 0
monitor_geometries = get_monitor_geometries()
try:
(startx, starty, availw, availh) = monitor_geometries[monitor_num]
except KeyError:
(startx, starty, availw, availh) = six.itervalues(monitor_geometries).next()
available_geom = (startx,
starty,
availw * percent_w,
(availh - stdpxls['os_border_h']) * percent_h)
return available_geom
def get_valid_fig_positions(num_wins, max_rows=None, row_first=True,
monitor_num=None, percent_w=1.0,
percent_h=1.0):
"""
Returns a list of bounding boxes where figures can be placed on the screen
"""
if percent_h is None:
percent_h = 1.0
if percent_w is None:
percent_w = 1.0
if max_rows is None:
max_rows = DEFAULT_MAX_ROWS
available_geom = get_avail_geom(monitor_num, percent_w=percent_w, percent_h=percent_h)
# print('available_geom = %r' % (available_geom,))
startx, starty, avail_width, avail_height = available_geom
nRows = num_wins if num_wins < max_rows else max_rows
nCols = int(np.ceil(num_wins / nRows))
win_height = avail_height / nRows
win_width = avail_width / nCols
(x_pad, y_pad, w_pad, h_pad) = get_xywh_pads()
# print('startx, startx = %r, %r' % (startx, starty))
# print('avail_width, avail_height = %r, %r' % (avail_width, avail_height))
# print('win_width, win_height = %r, %r' % (win_width, win_height))
# print('nRows, nCols = %r, %r' % (nRows, nCols))
def get_position_ix(ix):
if row_first:
rowx = ix % nRows
colx = int(ix // nRows)
else:
colx = (ix % nCols)
rowx = int(ix // nCols)
w = win_width - w_pad
h = win_height - h_pad
x = startx + colx * (win_width) + x_pad
y = starty + rowx * (win_height) + y_pad
return (x, y, w, h)
valid_positions = [get_position_ix(ix) for ix in range(num_wins)]
return valid_positions
if __name__ == '__main__':
r"""
CommandLine:
python -m plottool_ibeis.screeninfo
python -m plottool_ibeis.screeninfo --allexamples
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
| {
"repo_name": "Erotemic/plottool",
"path": "plottool_ibeis/screeninfo.py",
"copies": "1",
"size": "11949",
"license": "apache-2.0",
"hash": 1710663284665587700,
"line_mean": 30.6949602122,
"line_max": 104,
"alpha_frac": 0.5318436689,
"autogenerated": false,
"ratio": 3.2009107956067506,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42327544645067505,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
import six
import collections
from functools import reduce
_HTML_TEMPLATE = """
<table>
{% for key, value in document | dictsort recursive %}
<tr>
<th> {{ key }} </th>
<td>
{% if value.items %}
<table>
{{ loop(value | dictsort) }}
</table>
{% else %}
{% if key == 'time' %}
{{ value | human_time }}
{% else %}
{{ value }}
{% endif %}
{% endif %}
</td>
</tr>
{% endfor %}
</table>
"""
class DocumentIsReadOnly(Exception):
pass
class Document(dict):
def __init__(self, name, *args, **kwargs):
super(Document, self).__init__(*args, **kwargs)
super(Document, self).__setitem__('_name', name)
super(Document, self).__setattr__('__dict__', self)
def __setattr__(self, key, value):
raise DocumentIsReadOnly()
def __setitem__(self, key, value):
raise DocumentIsReadOnly()
def __delattr__(self, key):
raise DocumentIsReadOnly()
def __delitem__(self, key):
raise DocumentIsReadOnly()
def update(self, *args, **kwargs):
raise DocumentIsReadOnly()
def pop(self, key):
raise DocumentIsReadOnly()
def __iter__(self):
return (k for k in super(Document, self).__iter__()
if not (isinstance(k, six.string_types) and k.startswith('_')))
def items(self):
return ((k, v) for k, v in super(Document, self).items()
if not (isinstance(k, six.string_types) and k.startswith('_')))
def values(self):
return (v for k, v in super(Document, self).items()
if not (isinstance(k, six.string_types) and k.startswith('_')))
def keys(self):
return (k for k in super(Document, self).keys()
if not (isinstance(k, six.string_types) and k.startswith('_')))
def __len__(self):
return len(list(self.keys()))
def _repr_html_(self):
import jinja2
env = jinja2.Environment()
env.filters['human_time'] = pretty_print_time
template = env.from_string(_HTML_TEMPLATE)
return template.render(document=self)
def __str__(self):
try:
return vstr(self)
except ImportError:
# import error will be raised if prettytable is not available
return super(Document, self).__str__()
def to_name_dict_pair(self):
"""Convert to (name, dict) pair
This can be used to safely mutate a Document::
name, dd = doc.to_name_dict_pair()
dd['new_key'] = 'aardvark'
dd['run_start'] = dd['run_start']['uid']
new_doc = Document(name, dd)
Returns
-------
name : str
Name of Document
ret : dict
Data payload of Document
"""
ret = dict(self)
name = ret.pop('_name')
return name, ret
def pretty_print_time(timestamp):
import humanize
import time
import datetime
dt = datetime.datetime.fromtimestamp(timestamp).isoformat()
ago = humanize.naturaltime(time.time() - timestamp)
return '{ago} ({date})'.format(ago=ago, date=dt)
def _format_dict(value, name_width, value_width, name, tabs=0):
ret = ''
for k, v in six.iteritems(value):
if isinstance(v, collections.Mapping):
ret += _format_dict(v, name_width, value_width, k, tabs=tabs+1)
else:
ret += ("\n%s%-{}s: %-{}s".format(
name_width, value_width) % (' '*tabs, k[:16], v))
return ret
def _format_data_keys_dict(data_keys_dict):
from prettytable import PrettyTable
fields = reduce(set.union,
(set(v) for v in six.itervalues(data_keys_dict)))
fields = sorted(list(fields))
table = PrettyTable(["data keys"] + list(fields))
table.align["data keys"] = 'l'
table.padding_width = 1
for data_key, key_dict in sorted(data_keys_dict.items()):
row = [data_key]
for fld in fields:
row.append(key_dict.get(fld, ''))
table.add_row(row)
return table
def vstr(doc, indent=0):
"""Recursive document walker and formatter
Parameters
----------
doc : Document
Dict-like thing to format, must have `_name` key
indent : int, optional
The indentation level. Defaults to starting at 0 and adding one tab
per recursion level
"""
headings = [
# characters recommended as headers by ReST docs
'=', '-', '`', ':', '.', "'", '"', '~', '^', '_', '*', '+', '#',
# all other valid header characters according to ReST docs
'!', '$', '%', '&', '(', ')', ',', '/', ';', '<', '>', '?', '@',
'[', '\\', ']', '{', '|', '}'
]
name = doc['_name']
ret = "\n%s\n%s" % (name, headings[indent]*len(name))
documents = []
name_width = 16
value_width = 40
for name, value in sorted(doc.items()):
if name == 'descriptors':
# this case is to deal with Headers from databroker
for val in value:
documents.append((name, val))
elif name == 'data_keys':
ret += "\n%s" % str(_format_data_keys_dict(value))
elif isinstance(value, collections.Mapping):
if '_name' in value:
documents.append((name, value))
else:
# format dicts reasonably
ret += "\n%-{}s:".format(name_width) % (name)
ret += _format_dict(value, name_width, value_width,
name, tabs=1)
else:
ret += ("\n%-{}s: %-{}s".format(name_width, value_width) %
(name[:16], value))
for name, value in documents:
ret += "\n%s" % (vstr(value, indent+1))
# ret += "\n"
ret = ret.split('\n')
ret = ["%s%s" % (' '*indent, line) for line in ret]
ret = "\n".join(ret)
return ret
def ref_doc_to_uid(doc, field):
"""Convert a reference doc to a uid
Given a Document, replace the given field (which must contain a
Document) with the uid of that Document.
Returns a new instance with the updated values
Parameters
----------
doc : Document
The document to replace an entry in
field : str
The field to replace with the uid of it's contents
"""
name, doc = doc.to_name_dict_pair()
doc[field] = doc[field]['uid']
return Document(name, doc)
| {
"repo_name": "cowanml/synch_service_utils",
"path": "synch_service_utils/doc.py",
"copies": "1",
"size": "6545",
"license": "bsd-3-clause",
"hash": -5065366327581751000,
"line_mean": 28.481981982,
"line_max": 79,
"alpha_frac": 0.5361344538,
"autogenerated": false,
"ratio": 3.8773696682464456,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9913504122046446,
"avg_score": 0,
"num_lines": 222
} |
from __future__ import absolute_import, division, print_function
import six
import numpy as np
def fit_quad_to_peak(x, y):
"""
Fits a quadratic to the data points handed in
to the from y = b[0](x-b[1])**2 + b[2] and R2
(measure of goodness of fit)
Parameters
----------
x : ndarray
locations
y : ndarray
values
Returns
-------
b : tuple
coefficients of form y = b[0](x-b[1])**2 + b[2]
R2 : float
R2 value
"""
lenx = len(x)
# some sanity checks
if lenx < 3:
raise Exception('insufficient points handed in ')
# set up fitting array
X = np.vstack((x ** 2, x, np.ones(lenx))).T
# use linear least squares fitting
beta, _, _, _ = np.linalg.lstsq(X, y)
SSerr = np.sum((np.polyval(beta, x) - y)**2)
SStot = np.sum((y - np.mean(y))**2)
# re-map the returned value to match the form we want
ret_beta = (beta[0],
-beta[1] / (2 * beta[0]),
beta[2] - beta[0] * (beta[1] / (2 * beta[0])) ** 2)
return ret_beta, 1 - SSerr / SStot
| {
"repo_name": "giltis/scikit-xray",
"path": "skxray/core/fitting/funcs.py",
"copies": "3",
"size": "1095",
"license": "bsd-3-clause",
"hash": 120331540945149120,
"line_mean": 22.8043478261,
"line_max": 67,
"alpha_frac": 0.5324200913,
"autogenerated": false,
"ratio": 3.1107954545454546,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5143215545845454,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
import six
class Namespaces(object):
"""
Class for holding and maniputlating a dictionary containing the various namespaces for
each standard.
"""
namespace_dict = {
'atom' : 'http://www.w3.org/2005/Atom',
'csw' : 'http://www.opengis.net/cat/csw/2.0.2',
'dc' : 'http://purl.org/dc/elements/1.1/',
'dct' : 'http://purl.org/dc/terms/',
'dif' : 'http://gcmd.gsfc.nasa.gov/Aboutus/xml/dif/',
'draw' : 'gov.usgs.cida.gdp.draw',
'fes' : 'http://www.opengis.net/fes/2.0',
'fgdc' : 'http://www.opengis.net/cat/csw/csdgm',
'gco' : 'http://www.isotc211.org/2005/gco',
'gfc' : 'http://www.isotc211.org/2005/gfc',
'gm03' : 'http://www.interlis.ch/INTERLIS2.3',
'gmd' : 'http://www.isotc211.org/2005/gmd',
'gmi' : 'http://www.isotc211.org/2005/gmi',
'gml' : 'http://www.opengis.net/gml',
'gml311': 'http://www.opengis.net/gml',
'gml32' : 'http://www.opengis.net/gml/3.2',
'gmx' : 'http://www.isotc211.org/2005/gmx',
'gts' : 'http://www.isotc211.org/2005/gts',
'ogc' : 'http://www.opengis.net/ogc',
'om' : 'http://www.opengis.net/om/1.0',
'om10' : 'http://www.opengis.net/om/1.0',
'om100' : 'http://www.opengis.net/om/1.0',
'om20' : 'http://www.opengis.net/om/2.0',
'ows' : 'http://www.opengis.net/ows',
'ows100': 'http://www.opengis.net/ows',
'ows110': 'http://www.opengis.net/ows/1.1',
'ows200': 'http://www.opengis.net/ows/2.0',
'rim' : 'urn:oasis:names:tc:ebxml-regrep:xsd:rim:3.0',
'rdf' : 'http://www.w3.org/1999/02/22-rdf-syntax-ns#',
'sa' : 'http://www.opengis.net/sampling/1.0',
'sml' : 'http://www.opengis.net/sensorML/1.0.1',
'sml101': 'http://www.opengis.net/sensorML/1.0.1',
'sos' : 'http://www.opengis.net/sos/1.0',
'sos20' : 'http://www.opengis.net/sos/2.0',
'srv' : 'http://www.isotc211.org/2005/srv',
'swe' : 'http://www.opengis.net/swe/1.0.1',
'swe10' : 'http://www.opengis.net/swe/1.0',
'swe101': 'http://www.opengis.net/swe/1.0.1',
'swe20' : 'http://www.opengis.net/swe/2.0',
'swes' : 'http://www.opengis.net/swes/2.0',
'tml' : 'ttp://www.opengis.net/tml',
'wfs' : 'http://www.opengis.net/wfs',
'wfs20' : 'http://www.opengis.net/wfs/2.0',
'wcs' : 'http://www.opengis.net/wcs',
'wms' : 'http://www.opengis.net/wms',
'wps' : 'http://www.opengis.net/wps/1.0.0',
'wps100': 'http://www.opengis.net/wps/1.0.0',
'xlink' : 'http://www.w3.org/1999/xlink',
'xs' : 'http://www.w3.org/2001/XMLSchema',
'xs2' : 'http://www.w3.org/XML/Schema',
'xsi' : 'http://www.w3.org/2001/XMLSchema-instance',
'wml2' : 'http://www.opengis.net/waterml/2.0'
}
def get_namespace(self, key):
"""
Retrieves a namespace from the dictionary
Example:
--------
>>> from owslib.namespaces import Namespaces
>>> ns = Namespaces()
>>> ns.get_namespace('csw')
'http://www.opengis.net/cat/csw/2.0.2'
>>> ns.get_namespace('wfs20')
'http://www.opengis.net/wfs/2.0'
"""
retval = None
if key in self.namespace_dict:
retval = self.namespace_dict[key]
return retval
def get_versioned_namespace(self, key, ver=None):
"""
Retrieves a namespace from the dictionary with a specific version number
Example:
--------
>>> from owslib.namespaces import Namespaces
>>> ns = Namespaces()
>>> ns.get_versioned_namespace('ows')
'http://www.opengis.net/ows'
>>> ns.get_versioned_namespace('ows','1.1.0')
'http://www.opengis.net/ows/1.1'
"""
if ver is None:
return self.get_namespace(key)
version = ''
# Strip the decimals out of the passed in version
for s in ver.split('.'):
version += s
key += version
retval = None
if key in self.namespace_dict:
retval = self.namespace_dict[key]
return retval
def get_namespaces(self, keys=None):
"""
Retrieves a dict of namespaces from the namespace mapping
Parameters
----------
- keys: List of keys query and return
Example:
--------
>>> ns = Namespaces()
>>> x = ns.get_namespaces(['csw','gmd'])
>>> x == {'csw': 'http://www.opengis.net/cat/csw/2.0.2', 'gmd': 'http://www.isotc211.org/2005/gmd'}
True
>>> x = ns.get_namespaces('csw')
>>> x == {'csw': 'http://www.opengis.net/cat/csw/2.0.2'}
True
>>> ns.get_namespaces()
{...}
"""
# If we aren't looking for any namespaces in particular return the whole dict
if keys is None or len(keys) == 0:
return self.namespace_dict
if isinstance(keys, six.string_types):
return { keys: self.get_namespace(keys) }
retval = {}
for key in keys:
retval[key] = self.get_namespace(key)
return retval
def get_namespace_from_url(self, url):
for k, v in self.namespace_dict.items():
if v == url:
return k
return None
| {
"repo_name": "QuLogic/OWSLib",
"path": "owslib/namespaces.py",
"copies": "2",
"size": "5809",
"license": "bsd-3-clause",
"hash": 3748785552714494500,
"line_mean": 36.9673202614,
"line_max": 111,
"alpha_frac": 0.4978481666,
"autogenerated": false,
"ratio": 3.0541535226077814,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4552001689207782,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import six # NOQA
import functools
from ibeis import constants as const
from ibeis.control import accessor_decors
from ibeis.control.accessor_decors import (adder, ider, default_decorator,
getter_1to1, getter_1toM, deleter)
import utool as ut
from ibeis.control.controller_inject import make_ibs_register_decorator
print, print_, printDBG, rrr, profile = ut.inject(__name__, '[manual_dependant]')
CLASS_INJECT_KEY, register_ibs_method = make_ibs_register_decorator(__name__)
ANNOT_ROWID = 'annot_rowid'
CHIP_ROWID = 'chip_rowid'
FEAT_VECS = 'feature_vecs'
FEAT_KPTS = 'feature_keypoints'
FEAT_NUM_FEAT = 'feature_num_feats'
@register_ibs_method
@ider
def _get_all_cids(ibs):
""" alias """
return _get_all_chip_rowids(ibs)
@register_ibs_method
@ider
def _get_all_fids(ibs):
""" alias """
return _get_all_feat_rowids(ibs)
@register_ibs_method
def _get_all_feat_rowids(ibs):
"""
Returns:
list_ (list): unfiltered fids (computed feature rowids) for every
configuration (YOU PROBABLY SHOULD NOT USE THIS)"""
all_fids = ibs.dbcache.get_all_rowids(const.FEATURE_TABLE)
return all_fids
@register_ibs_method
def _get_all_chip_rowids(ibs):
"""
Returns:
list_ (list): unfiltered cids (computed chip rowids) for every
configuration (YOU PROBABLY SHOULD NOT USE THIS) """
all_cids = ibs.dbcache.get_all_rowids(const.CHIP_TABLE)
return all_cids
@register_ibs_method
@adder
def add_annot_chips(ibs, aid_list, qreq_=None):
"""
FIXME: This is a dirty dirty function
Adds chip data to the ANNOTATION. (does not create ANNOTATIONs. first use add_annots
and then pass them here to ensure chips are computed) """
# Ensure must be false, otherwise an infinite loop occurs
from ibeis.model.preproc import preproc_chip
cid_list = ibs.get_annot_chip_rowids(aid_list, ensure=False)
dirty_aids = ut.get_dirty_items(aid_list, cid_list)
if len(dirty_aids) > 0:
if ut.VERBOSE:
print('[ibs] adding chips')
try:
# FIXME: Cant be lazy until chip config / delete issue is fixed
preproc_chip.compute_and_write_chips(ibs, aid_list)
#preproc_chip.compute_and_write_chips_lazy(ibs, aid_list)
params_iter = preproc_chip.add_annot_chips_params_gen(ibs, dirty_aids)
except AssertionError as ex:
ut.printex(ex, '[!ibs.add_annot_chips]')
print('[!ibs.add_annot_chips] ' + ut.list_dbgstr('aid_list'))
raise
colnames = (ANNOT_ROWID, 'config_rowid', 'chip_uri', 'chip_width', 'chip_height',)
get_rowid_from_superkey = functools.partial(ibs.get_annot_chip_rowids, ensure=False, qreq_=qreq_)
cid_list = ibs.dbcache.add_cleanly(const.CHIP_TABLE, colnames, params_iter, get_rowid_from_superkey)
return cid_list
@register_ibs_method
@adder
def add_chip_feats(ibs, cid_list, force=False, qreq_=None):
""" Computes the features for every chip without them """
from ibeis.model.preproc import preproc_feat
fid_list = ibs.get_chip_fids(cid_list, ensure=False, qreq_=qreq_)
dirty_cids = ut.get_dirty_items(cid_list, fid_list)
if len(dirty_cids) > 0:
if ut.VERBOSE:
print('[ibs] adding %d / %d features' % (len(dirty_cids), len(cid_list)))
params_iter = preproc_feat.add_feat_params_gen(ibs, dirty_cids, qreq_=qreq_)
colnames = (CHIP_ROWID, 'config_rowid', FEAT_NUM_FEAT, FEAT_KPTS, FEAT_VECS)
get_rowid_from_superkey = functools.partial(ibs.get_chip_fids, ensure=False, qreq_=qreq_)
fid_list = ibs.dbcache.add_cleanly(const.FEATURE_TABLE, colnames, params_iter, get_rowid_from_superkey)
return fid_list
@register_ibs_method
@deleter
def delete_annot_chip_thumbs(ibs, aid_list, quiet=False):
""" Removes chip thumbnails from disk """
thumbpath_list = ibs.get_annot_chip_thumbpath(aid_list)
#ut.remove_fpaths(thumbpath_list, quiet=quiet, lbl='chip_thumbs')
ut.remove_existing_fpaths(thumbpath_list, quiet=quiet, lbl='chip_thumbs')
@register_ibs_method
@deleter
def delete_annot_chips(ibs, aid_list, qreq_=None):
""" Clears annotation data but does not remove the annotation """
_cid_list = ibs.get_annot_chip_rowids(aid_list, ensure=False, qreq_=qreq_)
cid_list = ut.filter_Nones(_cid_list)
ibs.delete_chips(cid_list)
# HACK FIX: if annot chips are None then the image thumbnail
# will not be invalidated
if len(_cid_list) != len(cid_list):
aid_list_ = [aid for aid, _cid in zip(aid_list, _cid_list) if _cid is None]
gid_list_ = ibs.get_annot_gids(aid_list_)
ibs.delete_image_thumbs(gid_list_)
@register_ibs_method
@deleter
#@cache_invalidator(const.CHIP_TABLE)
def delete_chips(ibs, cid_list, verbose=ut.VERBOSE, qreq_=None):
""" deletes images from the database that belong to gids"""
from ibeis.model.preproc import preproc_chip
if verbose:
print('[ibs] deleting %d annotation-chips' % len(cid_list))
# Delete chip-images from disk
#preproc_chip.delete_chips(ibs, cid_list, verbose=verbose)
preproc_chip.on_delete(ibs, cid_list, verbose=verbose)
# Delete chip features from sql
_fid_list = ibs.get_chip_fids(cid_list, ensure=False, qreq_=qreq_)
fid_list = ut.filter_Nones(_fid_list)
ibs.delete_features(fid_list)
# Delete chips from sql
ibs.dbcache.delete_rowids(const.CHIP_TABLE, cid_list)
@register_ibs_method
@deleter
@accessor_decors.cache_invalidator(const.FEATURE_TABLE)
def delete_features(ibs, fid_list):
""" deletes images from the database that belong to fids"""
if ut.VERBOSE:
print('[ibs] deleting %d features' % len(fid_list))
ibs.dbcache.delete_rowids(const.FEATURE_TABLE, fid_list)
@register_ibs_method
@getter_1to1
def get_chip_aids(ibs, cid_list):
aid_list = ibs.dbcache.get(const.CHIP_TABLE, (ANNOT_ROWID,), cid_list)
return aid_list
@register_ibs_method
@default_decorator
def get_chip_config_rowid(ibs, qreq_=None):
""" # FIXME: Configs are still handled poorly
This method deviates from the rest of the controller methods because it
always returns a scalar instead of a list. I'm still not sure how to
make it more ibeisy
"""
if qreq_ is not None:
# TODO store config_rowid in qparams
# Or find better way to do this in general
chip_cfg_suffix = qreq_.qparams.chip_cfgstr
else:
chip_cfg_suffix = ibs.cfg.chip_cfg.get_cfgstr()
chip_cfg_rowid = ibs.add_config(chip_cfg_suffix)
return chip_cfg_rowid
@register_ibs_method
@getter_1to1
def get_chip_configids(ibs, cid_list):
config_rowid_list = ibs.dbcache.get(const.CHIP_TABLE, ('config_rowid',), cid_list)
return config_rowid_list
@register_ibs_method
@getter_1to1
def get_chip_detectpaths(ibs, cid_list):
"""
Returns:
new_gfpath_list (list): a list of image paths resized to a constant area for detection
"""
from ibeis.model.preproc import preproc_detectimg
new_gfpath_list = preproc_detectimg.compute_and_write_detectchip_lazy(ibs, cid_list)
return new_gfpath_list
@register_ibs_method
def get_chip_feat_rowids(ibs, cid_list, ensure=True, eager=True, nInput=None, qreq_=None):
# alias for get_chip_fids
return get_chip_fids(ibs, cid_list, ensure=ensure, eager=eager, nInput=nInput, qreq_=qreq_)
@register_ibs_method
@getter_1to1
@accessor_decors.dev_cache_getter(const.CHIP_TABLE, 'feature_rowid')
def get_chip_fids(ibs, cid_list, ensure=True, eager=True, nInput=None, qreq_=None):
if ensure:
ibs.add_chip_feats(cid_list, qreq_=qreq_)
feat_config_rowid = ibs.get_feat_config_rowid(qreq_=qreq_)
colnames = ('feature_rowid',)
where_clause = CHIP_ROWID + '=? AND config_rowid=?'
params_iter = ((cid, feat_config_rowid) for cid in cid_list)
fid_list = ibs.dbcache.get_where(const.FEATURE_TABLE, colnames, params_iter,
where_clause, eager=eager,
nInput=nInput)
return fid_list
@register_ibs_method
@getter_1to1
def get_chip_uris(ibs, cid_list):
"""
Returns:
chip_fpath_list (list): a list of chip paths by their aid
"""
chip_fpath_list = ibs.dbcache.get(const.CHIP_TABLE, ('chip_uri',), cid_list)
return chip_fpath_list
@register_ibs_method
@getter_1to1
#@cache_getter('const.CHIP_TABLE', 'chip_size')
def get_chip_sizes(ibs, cid_list):
chipsz_list = ibs.dbcache.get(const.CHIP_TABLE, ('chip_width', 'chip_height',), cid_list)
return chipsz_list
@register_ibs_method
@getter_1to1
def get_chips(ibs, cid_list, ensure=True):
"""
Returns:
chip_list (list): a list cropped images in numpy array form by their cid
"""
from ibeis.model.preproc import preproc_chip
if ensure:
try:
ut.assert_all_not_None(cid_list, 'cid_list')
except AssertionError as ex:
ut.printex(ex, 'Invalid cid_list', key_list=[
'ensure', 'cid_list'])
raise
aid_list = ibs.get_chip_aids(cid_list)
chip_list = preproc_chip.compute_or_read_annotation_chips(ibs, aid_list, ensure=ensure)
return chip_list
@register_ibs_method
@default_decorator
def get_feat_config_rowid(ibs, qreq_=None):
"""
Returns the feature configuration id based on the cfgstr
defined by ibs.cfg.feat_cfg.get_cfgstr()
# FIXME: Configs are still handled poorly
used in ibeis.model.preproc.preproc_feats in the param
generator. (that should probably be moved into the controller)
"""
if qreq_ is not None:
# TODO store config_rowid in qparams
# Or find better way to do this in general
feat_cfg_suffix = qreq_.qparams.feat_cfgstr
else:
feat_cfg_suffix = ibs.cfg.feat_cfg.get_cfgstr()
feat_cfg_rowid = ibs.add_config(feat_cfg_suffix)
return feat_cfg_rowid
@register_ibs_method
@getter_1toM
@accessor_decors.cache_getter(const.FEATURE_TABLE, FEAT_KPTS)
def get_feat_kpts(ibs, fid_list, eager=True, nInput=None):
"""
Returns:
kpts_list (list): chip keypoints in [x, y, iv11, iv21, iv22, ori] format
"""
kpts_list = ibs.dbcache.get(const.FEATURE_TABLE, (FEAT_KPTS,), fid_list, eager=eager, nInput=nInput)
return kpts_list
@register_ibs_method
@getter_1toM
@accessor_decors.cache_getter(const.FEATURE_TABLE, FEAT_VECS)
def get_feat_vecs(ibs, fid_list, eager=True, nInput=None):
"""
Returns:
vecs_list (list): chip SIFT descriptors
"""
vecs_list = ibs.dbcache.get(const.FEATURE_TABLE, (FEAT_VECS,), fid_list, eager=eager, nInput=nInput)
return vecs_list
@register_ibs_method
@getter_1to1
@accessor_decors.cache_getter(const.FEATURE_TABLE, FEAT_NUM_FEAT)
def get_num_feats(ibs, fid_list, eager=True, nInput=None):
"""
Returns:
nFeats_list (list): the number of keypoint / descriptor pairs
"""
nFeats_list = ibs.dbcache.get(const.FEATURE_TABLE, (FEAT_NUM_FEAT,), fid_list, eager=True, nInput=None)
nFeats_list = [(-1 if nFeats is None else nFeats) for nFeats in nFeats_list]
return nFeats_list
@register_ibs_method
@ider
def get_valid_cids(ibs, qreq_=None):
""" Valid chip rowids of the current configuration """
# FIXME: configids need reworking
chip_config_rowid = ibs.get_chip_config_rowid(qreq_=qreq_)
cid_list = ibs.dbcache.get_all_rowids_where(const.FEATURE_TABLE, 'config_rowid=?', (chip_config_rowid,))
return cid_list
@register_ibs_method
@ider
def get_valid_fids(ibs, qreq_=None):
""" Valid feature rowids of the current configuration """
# FIXME: configids need reworking
feat_config_rowid = ibs.get_feat_config_rowid(qreq_=qreq_)
fid_list = ibs.dbcache.get_all_rowids_where(const.FEATURE_TABLE, 'config_rowid=?', (feat_config_rowid,))
return fid_list
if __name__ == '__main__':
"""
CommandLine:
python -m ibeis.control.manual_dependant_funcs
python -m ibeis.control.manual_dependant_funcs --allexamples
python -m ibeis.control.manual_dependant_funcs --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
@register_ibs_method
@adder
def add_annot_chips(ibs, aid_list, qreq_=None):
""" annot.chip.add(aid_list)
CRITICAL FUNCTION MUST EXIST FOR ALL DEPENDANTS
Adds / ensures / computes a dependant property
Args:
aid_list
Returns:
returns chip_rowid_list of added (or already existing chips)
TemplateInfo:
Tadder_pl_dependant
parent = annot
leaf = chip
CommandLine:
python -m ibeis.control.manual_chip_funcs --test-add_annot_chips
Example1:
>>> # ENABLE_DOCTEST
>>> from ibeis.control.manual_chip_funcs import * # NOQA
>>> ibs, qreq_ = testdata_ibs()
>>> aid_list = ibs._get_all_aids()[::3]
>>> chip_rowid_list = ibs.add_annot_chips(aid_list, qreq_=qreq_)
>>> assert len(chip_rowid_list) == len(aid_list)
>>> ut.assert_all_not_None(chip_rowid_list)
Example2:
>>> # ENABLE_DOCTEST
>>> from ibeis.control.manual_chip_funcs import * # NOQA
>>> ibs, qreq_ = testdata_ibs()
>>> aid_list = ibs._get_all_aids()[0:10]
>>> sub_aid_list1 = aid_list[0:6]
>>> sub_aid_list2 = aid_list[5:7]
>>> sub_aid_list3 = aid_list[0:7]
>>> sub_cid_list1 = ibs.get_annot_chip_rowids(sub_aid_list1, qreq_=qreq_, ensure=True)
>>> ut.assert_all_not_None(sub_cid_list1)
>>> ibs.delete_annot_chips(sub_aid_list2)
>>> sub_cid_list3 = ibs.get_annot_chip_rowids(sub_aid_list3, qreq_=qreq_, ensure=False)
>>> # Only the last two should be None
>>> ut.assert_all_not_None(sub_cid_list3)
>>> assert sub_cid_list3[5:7] == [None, None]
>>> sub_cid_list3_ensured = ibs.get_annot_chip_rowids(sub_aid_list3, qreq_=qreq_, ensure=True)
>>> # Only two params should have been computed here
>>> ut.assert_all_not_None(sub_cid_list3_ensured)
"""
from ibeis.model.preproc import preproc_chip
ut.assert_all_not_None(aid_list, 'aid_list')
# Get requested configuration id
config_rowid = ibs.get_chip_config_rowid(qreq_=qreq_)
# Find leaf rowids that need to be computed
initial_chip_rowid_list = get_annot_chip_rowids_(ibs, aid_list, qreq_=qreq_)
# Get corresponding "dirty" parent rowids
isdirty_list = ut.flag_None_items(initial_chip_rowid_list)
dirty_aid_list = ut.filter_items(aid_list, isdirty_list)
num_dirty = len(dirty_aid_list)
if num_dirty > 0:
#if ut.VERBOSE:
print('[add_annot_chips] adding %d / %d new chips' % (len(dirty_aid_list), len(aid_list)))
# Dependant columns do not need true from_superkey getters.
# We can use the Tgetter_pl_dependant_rowids_ instead
get_rowid_from_superkey = functools.partial(
ibs.get_annot_chip_rowids_, qreq_=qreq_)
proptup_gen = preproc_chip.generate_chip_properties(ibs, dirty_aid_list)
dirty_params_iter = (
(aid, config_rowid, chip_uri, chip_width, chip_height)
for aid, (chip_uri, chip_width, chip_height,) in
zip(dirty_aid_list, proptup_gen)
)
dirty_params_iter = list(dirty_params_iter)
colnames = ['annot_rowid', 'config_rowid',
'chip_uri', 'chip_width', 'chip_height']
#chip_rowid_list = ibs.dbcache.add_cleanly(
# const.CHIP_TABLE, colnames, params_iter, get_rowid_from_superkey)
ibs.dbcache._add(const.CHIP_TABLE, colnames, dirty_params_iter)
# Now that the dirty params are added get the correct order of rowids
chip_rowid_list = get_rowid_from_superkey(aid_list)
else:
chip_rowid_list = initial_chip_rowid_list
return chip_rowid_list
| {
"repo_name": "SU-ECE-17-7/ibeis",
"path": "_broken/manual_dependant_funcs.py",
"copies": "1",
"size": "16023",
"license": "apache-2.0",
"hash": 4854856790968308000,
"line_mean": 35.4159090909,
"line_max": 111,
"alpha_frac": 0.6604256381,
"autogenerated": false,
"ratio": 3.026633925198338,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9179503795588533,
"avg_score": 0.001511153541960849,
"num_lines": 440
} |
from __future__ import absolute_import, division, print_function
import subprocess
import logging
import multiprocessing
import os
from importlib import import_module
from datetime import datetime
import traceback
logger = logging.getLogger(__name__)
def run_task(job_name, shell=None, cmd=None, python=None, env={}, **kwargs):
job_reference = job_name
if "FILENAME" in env:
job_reference += " for " + env["FILENAME"]
def run_parallel_task(what, run_shell=False):
environment = dict(os.environ).update(env)
try:
if run_shell is True:
process = subprocess.Popen(what, shell=True, executable='/bin/sh', env=environment)
else:
what = what.split()
program = what[0]
process = subprocess.Popen(what, shell=False, executable=program, env=environment)
process.wait()
except (AssertionError, subprocess.CalledProcessError) as e:
logger.error("failure running %s: %s" % (job_reference, str(e)))
clock = datetime.now()
env["DATETIME"] = clock.strftime('%Y-%m-%dT%H:%M:%SZ')
logger.info("{0} - executing {1}".format(clock.strftime('%Y-%m-%d %H:%M:%SZ'), job_reference))
if shell is not None:
run_parallel_task(shell, True)
if cmd is not None:
run_parallel_task(cmd, False)
if python is not None:
elements = python.split('.')
module_name = ''
if len(elements) > 1:
function_name = elements[-1]
module_name = '.'.join(elements[:-1])
else:
function_name = python
try:
module_import = import_module(module_name)
python_function = getattr(module_import, function_name)
os.environ.update(env)
arguments = kwargs.pop('arguments', {})
def wrapper_function():
try:
python_function(**arguments)
except Exception:
logger.error(traceback.format_exc())
process = multiprocessing.Process(target=wrapper_function)
process.start()
process.join()
except Exception:
logger.error(traceback.format_exc())
| {
"repo_name": "stcorp/legato",
"path": "legato/run.py",
"copies": "1",
"size": "2241",
"license": "bsd-3-clause",
"hash": -6918189304984023000,
"line_mean": 35.1451612903,
"line_max": 99,
"alpha_frac": 0.5858991522,
"autogenerated": false,
"ratio": 4.2523719165085385,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009351580319322255,
"num_lines": 62
} |
from __future__ import absolute_import, division, print_function
import sys
from functools import wraps
import six
import pytest
from _pytest.compat import is_generator, get_real_func, safe_getattr, _PytestWrapper
from _pytest.outcomes import OutcomeException
def test_is_generator():
def zap():
yield
def foo():
pass
assert is_generator(zap)
assert not is_generator(foo)
def test_real_func_loop_limit():
class Evil(object):
def __init__(self):
self.left = 1000
def __repr__(self):
return "<Evil left={left}>".format(left=self.left)
def __getattr__(self, attr):
if not self.left:
raise RuntimeError("its over")
self.left -= 1
return self
evil = Evil()
with pytest.raises(ValueError):
res = get_real_func(evil)
print(res)
def test_get_real_func():
"""Check that get_real_func correctly unwraps decorators until reaching the real function"""
def decorator(f):
@wraps(f)
def inner():
pass
if six.PY2:
inner.__wrapped__ = f
return inner
def func():
pass
wrapped_func = decorator(decorator(func))
assert get_real_func(wrapped_func) is func
wrapped_func2 = decorator(decorator(wrapped_func))
assert get_real_func(wrapped_func2) is func
# special case for __pytest_wrapped__ attribute: used to obtain the function up until the point
# a function was wrapped by pytest itself
wrapped_func2.__pytest_wrapped__ = _PytestWrapper(wrapped_func)
assert get_real_func(wrapped_func2) is wrapped_func
@pytest.mark.skipif(
sys.version_info < (3, 4), reason="asyncio available in Python 3.4+"
)
def test_is_generator_asyncio(testdir):
testdir.makepyfile(
"""
from _pytest.compat import is_generator
import asyncio
@asyncio.coroutine
def baz():
yield from [1,2,3]
def test_is_generator_asyncio():
assert not is_generator(baz)
"""
)
# avoid importing asyncio into pytest's own process,
# which in turn imports logging (#8)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(["*1 passed*"])
@pytest.mark.skipif(
sys.version_info < (3, 5), reason="async syntax available in Python 3.5+"
)
def test_is_generator_async_syntax(testdir):
testdir.makepyfile(
"""
from _pytest.compat import is_generator
def test_is_generator_py35():
async def foo():
await foo()
async def bar():
pass
assert not is_generator(foo)
assert not is_generator(bar)
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
class ErrorsHelper(object):
@property
def raise_exception(self):
raise Exception("exception should be catched")
@property
def raise_fail(self):
pytest.fail("fail should be catched")
def test_helper_failures():
helper = ErrorsHelper()
with pytest.raises(Exception):
helper.raise_exception
with pytest.raises(OutcomeException):
helper.raise_fail
def test_safe_getattr():
helper = ErrorsHelper()
assert safe_getattr(helper, "raise_exception", "default") == "default"
assert safe_getattr(helper, "raise_fail", "default") == "default"
| {
"repo_name": "davidszotten/pytest",
"path": "testing/test_compat.py",
"copies": "2",
"size": "3446",
"license": "mit",
"hash": -6396151884707085000,
"line_mean": 24.3382352941,
"line_max": 99,
"alpha_frac": 0.6189785258,
"autogenerated": false,
"ratio": 4.016317016317016,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002358381193593265,
"num_lines": 136
} |
from __future__ import absolute_import, division, print_function
import sys
from inspect import CO_VARARGS, CO_VARKEYWORDS
import re
from weakref import ref
from _pytest.compat import _PY2, _PY3, PY35, safe_str
import py
builtin_repr = repr
reprlib = py.builtin._tryimport('repr', 'reprlib')
if _PY3:
from traceback import format_exception_only
else:
from ._py2traceback import format_exception_only
class Code(object):
""" wrapper around Python code objects """
def __init__(self, rawcode):
if not hasattr(rawcode, "co_filename"):
rawcode = getrawcode(rawcode)
try:
self.filename = rawcode.co_filename
self.firstlineno = rawcode.co_firstlineno - 1
self.name = rawcode.co_name
except AttributeError:
raise TypeError("not a code object: %r" % (rawcode,))
self.raw = rawcode
def __eq__(self, other):
return self.raw == other.raw
__hash__ = None
def __ne__(self, other):
return not self == other
@property
def path(self):
""" return a path object pointing to source code (note that it
might not point to an actually existing file). """
try:
p = py.path.local(self.raw.co_filename)
# maybe don't try this checking
if not p.check():
raise OSError("py.path check failed.")
except OSError:
# XXX maybe try harder like the weird logic
# in the standard lib [linecache.updatecache] does?
p = self.raw.co_filename
return p
@property
def fullsource(self):
""" return a _pytest._code.Source object for the full source file of the code
"""
from _pytest._code import source
full, _ = source.findsource(self.raw)
return full
def source(self):
""" return a _pytest._code.Source object for the code object's source only
"""
# return source only for that part of code
import _pytest._code
return _pytest._code.Source(self.raw)
def getargs(self, var=False):
""" return a tuple with the argument names for the code object
if 'var' is set True also return the names of the variable and
keyword arguments when present
"""
# handfull shortcut for getting args
raw = self.raw
argcount = raw.co_argcount
if var:
argcount += raw.co_flags & CO_VARARGS
argcount += raw.co_flags & CO_VARKEYWORDS
return raw.co_varnames[:argcount]
class Frame(object):
"""Wrapper around a Python frame holding f_locals and f_globals
in which expressions can be evaluated."""
def __init__(self, frame):
self.lineno = frame.f_lineno - 1
self.f_globals = frame.f_globals
self.f_locals = frame.f_locals
self.raw = frame
self.code = Code(frame.f_code)
@property
def statement(self):
""" statement this frame is at """
import _pytest._code
if self.code.fullsource is None:
return _pytest._code.Source("")
return self.code.fullsource.getstatement(self.lineno)
def eval(self, code, **vars):
""" evaluate 'code' in the frame
'vars' are optional additional local variables
returns the result of the evaluation
"""
f_locals = self.f_locals.copy()
f_locals.update(vars)
return eval(code, self.f_globals, f_locals)
def exec_(self, code, **vars):
""" exec 'code' in the frame
'vars' are optiona; additional local variables
"""
f_locals = self.f_locals.copy()
f_locals.update(vars)
py.builtin.exec_(code, self.f_globals, f_locals)
def repr(self, object):
""" return a 'safe' (non-recursive, one-line) string repr for 'object'
"""
return py.io.saferepr(object)
def is_true(self, object):
return object
def getargs(self, var=False):
""" return a list of tuples (name, value) for all arguments
if 'var' is set True also include the variable and keyword
arguments when present
"""
retval = []
for arg in self.code.getargs(var):
try:
retval.append((arg, self.f_locals[arg]))
except KeyError:
pass # this can occur when using Psyco
return retval
class TracebackEntry(object):
""" a single entry in a traceback """
_repr_style = None
exprinfo = None
def __init__(self, rawentry, excinfo=None):
self._excinfo = excinfo
self._rawentry = rawentry
self.lineno = rawentry.tb_lineno - 1
def set_repr_style(self, mode):
assert mode in ("short", "long")
self._repr_style = mode
@property
def frame(self):
import _pytest._code
return _pytest._code.Frame(self._rawentry.tb_frame)
@property
def relline(self):
return self.lineno - self.frame.code.firstlineno
def __repr__(self):
return "<TracebackEntry %s:%d>" % (self.frame.code.path, self.lineno + 1)
@property
def statement(self):
""" _pytest._code.Source object for the current statement """
source = self.frame.code.fullsource
return source.getstatement(self.lineno)
@property
def path(self):
""" path to the source code """
return self.frame.code.path
def getlocals(self):
return self.frame.f_locals
locals = property(getlocals, None, None, "locals of underlaying frame")
def getfirstlinesource(self):
# on Jython this firstlineno can be -1 apparently
return max(self.frame.code.firstlineno, 0)
def getsource(self, astcache=None):
""" return failing source code. """
# we use the passed in astcache to not reparse asttrees
# within exception info printing
from _pytest._code.source import getstatementrange_ast
source = self.frame.code.fullsource
if source is None:
return None
key = astnode = None
if astcache is not None:
key = self.frame.code.path
if key is not None:
astnode = astcache.get(key, None)
start = self.getfirstlinesource()
try:
astnode, _, end = getstatementrange_ast(self.lineno, source,
astnode=astnode)
except SyntaxError:
end = self.lineno + 1
else:
if key is not None:
astcache[key] = astnode
return source[start:end]
source = property(getsource)
def ishidden(self):
""" return True if the current frame has a var __tracebackhide__
resolving to True
If __tracebackhide__ is a callable, it gets called with the
ExceptionInfo instance and can decide whether to hide the traceback.
mostly for internal use
"""
try:
tbh = self.frame.f_locals['__tracebackhide__']
except KeyError:
try:
tbh = self.frame.f_globals['__tracebackhide__']
except KeyError:
return False
if py.builtin.callable(tbh):
return tbh(None if self._excinfo is None else self._excinfo())
else:
return tbh
def __str__(self):
try:
fn = str(self.path)
except py.error.Error:
fn = '???'
name = self.frame.code.name
try:
line = str(self.statement).lstrip()
except KeyboardInterrupt:
raise
except:
line = "???"
return " File %r:%d in %s\n %s\n" % (fn, self.lineno + 1, name, line)
def name(self):
return self.frame.code.raw.co_name
name = property(name, None, None, "co_name of underlaying code")
class Traceback(list):
""" Traceback objects encapsulate and offer higher level
access to Traceback entries.
"""
Entry = TracebackEntry
def __init__(self, tb, excinfo=None):
""" initialize from given python traceback object and ExceptionInfo """
self._excinfo = excinfo
if hasattr(tb, 'tb_next'):
def f(cur):
while cur is not None:
yield self.Entry(cur, excinfo=excinfo)
cur = cur.tb_next
list.__init__(self, f(tb))
else:
list.__init__(self, tb)
def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None):
""" return a Traceback instance wrapping part of this Traceback
by provding any combination of path, lineno and firstlineno, the
first frame to start the to-be-returned traceback is determined
this allows cutting the first part of a Traceback instance e.g.
for formatting reasons (removing some uninteresting bits that deal
with handling of the exception/traceback)
"""
for x in self:
code = x.frame.code
codepath = code.path
if ((path is None or codepath == path) and
(excludepath is None or not hasattr(codepath, 'relto') or
not codepath.relto(excludepath)) and
(lineno is None or x.lineno == lineno) and
(firstlineno is None or x.frame.code.firstlineno == firstlineno)):
return Traceback(x._rawentry, self._excinfo)
return self
def __getitem__(self, key):
val = super(Traceback, self).__getitem__(key)
if isinstance(key, type(slice(0))):
val = self.__class__(val)
return val
def filter(self, fn=lambda x: not x.ishidden()):
""" return a Traceback instance with certain items removed
fn is a function that gets a single argument, a TracebackEntry
instance, and should return True when the item should be added
to the Traceback, False when not
by default this removes all the TracebackEntries which are hidden
(see ishidden() above)
"""
return Traceback(filter(fn, self), self._excinfo)
def getcrashentry(self):
""" return last non-hidden traceback entry that lead
to the exception of a traceback.
"""
for i in range(-1, -len(self) - 1, -1):
entry = self[i]
if not entry.ishidden():
return entry
return self[-1]
def recursionindex(self):
""" return the index of the frame/TracebackEntry where recursion
originates if appropriate, None if no recursion occurred
"""
cache = {}
for i, entry in enumerate(self):
# id for the code.raw is needed to work around
# the strange metaprogramming in the decorator lib from pypi
# which generates code objects that have hash/value equality
# XXX needs a test
key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno
# print "checking for recursion at", key
l = cache.setdefault(key, [])
if l:
f = entry.frame
loc = f.f_locals
for otherloc in l:
if f.is_true(f.eval(co_equal,
__recursioncache_locals_1=loc,
__recursioncache_locals_2=otherloc)):
return i
l.append(entry.frame.f_locals)
return None
co_equal = compile('__recursioncache_locals_1 == __recursioncache_locals_2',
'?', 'eval')
class ExceptionInfo(object):
""" wraps sys.exc_info() objects and offers
help for navigating the traceback.
"""
_striptext = ''
_assert_start_repr = "AssertionError(u\'assert " if _PY2 else "AssertionError(\'assert "
def __init__(self, tup=None, exprinfo=None):
import _pytest._code
if tup is None:
tup = sys.exc_info()
if exprinfo is None and isinstance(tup[1], AssertionError):
exprinfo = getattr(tup[1], 'msg', None)
if exprinfo is None:
exprinfo = py.io.saferepr(tup[1])
if exprinfo and exprinfo.startswith(self._assert_start_repr):
self._striptext = 'AssertionError: '
self._excinfo = tup
#: the exception class
self.type = tup[0]
#: the exception instance
self.value = tup[1]
#: the exception raw traceback
self.tb = tup[2]
#: the exception type name
self.typename = self.type.__name__
#: the exception traceback (_pytest._code.Traceback instance)
self.traceback = _pytest._code.Traceback(self.tb, excinfo=ref(self))
def __repr__(self):
return "<ExceptionInfo %s tblen=%d>" % (self.typename, len(self.traceback))
def exconly(self, tryshort=False):
""" return the exception as a string
when 'tryshort' resolves to True, and the exception is a
_pytest._code._AssertionError, only the actual exception part of
the exception representation is returned (so 'AssertionError: ' is
removed from the beginning)
"""
lines = format_exception_only(self.type, self.value)
text = ''.join(lines)
text = text.rstrip()
if tryshort:
if text.startswith(self._striptext):
text = text[len(self._striptext):]
return text
def errisinstance(self, exc):
""" return True if the exception is an instance of exc """
return isinstance(self.value, exc)
def _getreprcrash(self):
exconly = self.exconly(tryshort=True)
entry = self.traceback.getcrashentry()
path, lineno = entry.frame.code.raw.co_filename, entry.lineno
return ReprFileLocation(path, lineno + 1, exconly)
def getrepr(self, showlocals=False, style="long",
abspath=False, tbfilter=True, funcargs=False):
""" return str()able representation of this exception info.
showlocals: show locals per traceback entry
style: long|short|no|native traceback style
tbfilter: hide entries (where __tracebackhide__ is true)
in case of style==native, tbfilter and showlocals is ignored.
"""
if style == 'native':
return ReprExceptionInfo(ReprTracebackNative(
py.std.traceback.format_exception(
self.type,
self.value,
self.traceback[0]._rawentry,
)), self._getreprcrash())
fmt = FormattedExcinfo(showlocals=showlocals, style=style,
abspath=abspath, tbfilter=tbfilter, funcargs=funcargs)
return fmt.repr_excinfo(self)
def __str__(self):
entry = self.traceback[-1]
loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
return str(loc)
def __unicode__(self):
entry = self.traceback[-1]
loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
return unicode(loc)
def match(self, regexp):
"""
Match the regular expression 'regexp' on the string representation of
the exception. If it matches then True is returned (so that it is
possible to write 'assert excinfo.match()'). If it doesn't match an
AssertionError is raised.
"""
__tracebackhide__ = True
if not re.search(regexp, str(self.value)):
assert 0, "Pattern '{0!s}' not found in '{1!s}'".format(
regexp, self.value)
return True
class FormattedExcinfo(object):
""" presenting information about failing Functions and Generators. """
# for traceback entries
flow_marker = ">"
fail_marker = "E"
def __init__(self, showlocals=False, style="long", abspath=True, tbfilter=True, funcargs=False):
self.showlocals = showlocals
self.style = style
self.tbfilter = tbfilter
self.funcargs = funcargs
self.abspath = abspath
self.astcache = {}
def _getindent(self, source):
# figure out indent for given source
try:
s = str(source.getstatement(len(source) - 1))
except KeyboardInterrupt:
raise
except:
try:
s = str(source[-1])
except KeyboardInterrupt:
raise
except:
return 0
return 4 + (len(s) - len(s.lstrip()))
def _getentrysource(self, entry):
source = entry.getsource(self.astcache)
if source is not None:
source = source.deindent()
return source
def _saferepr(self, obj):
return py.io.saferepr(obj)
def repr_args(self, entry):
if self.funcargs:
args = []
for argname, argvalue in entry.frame.getargs(var=True):
args.append((argname, self._saferepr(argvalue)))
return ReprFuncArgs(args)
def get_source(self, source, line_index=-1, excinfo=None, short=False):
""" return formatted and marked up source lines. """
import _pytest._code
lines = []
if source is None or line_index >= len(source.lines):
source = _pytest._code.Source("???")
line_index = 0
if line_index < 0:
line_index += len(source)
space_prefix = " "
if short:
lines.append(space_prefix + source.lines[line_index].strip())
else:
for line in source.lines[:line_index]:
lines.append(space_prefix + line)
lines.append(self.flow_marker + " " + source.lines[line_index])
for line in source.lines[line_index + 1:]:
lines.append(space_prefix + line)
if excinfo is not None:
indent = 4 if short else self._getindent(source)
lines.extend(self.get_exconly(excinfo, indent=indent, markall=True))
return lines
def get_exconly(self, excinfo, indent=4, markall=False):
lines = []
indent = " " * indent
# get the real exception information out
exlines = excinfo.exconly(tryshort=True).split('\n')
failindent = self.fail_marker + indent[1:]
for line in exlines:
lines.append(failindent + line)
if not markall:
failindent = indent
return lines
def repr_locals(self, locals):
if self.showlocals:
lines = []
keys = [loc for loc in locals if loc[0] != "@"]
keys.sort()
for name in keys:
value = locals[name]
if name == '__builtins__':
lines.append("__builtins__ = <builtins>")
else:
# This formatting could all be handled by the
# _repr() function, which is only reprlib.Repr in
# disguise, so is very configurable.
str_repr = self._saferepr(value)
# if len(str_repr) < 70 or not isinstance(value,
# (list, tuple, dict)):
lines.append("%-10s = %s" % (name, str_repr))
# else:
# self._line("%-10s =\\" % (name,))
# # XXX
# py.std.pprint.pprint(value, stream=self.excinfowriter)
return ReprLocals(lines)
def repr_traceback_entry(self, entry, excinfo=None):
import _pytest._code
source = self._getentrysource(entry)
if source is None:
source = _pytest._code.Source("???")
line_index = 0
else:
# entry.getfirstlinesource() can be -1, should be 0 on jython
line_index = entry.lineno - max(entry.getfirstlinesource(), 0)
lines = []
style = entry._repr_style
if style is None:
style = self.style
if style in ("short", "long"):
short = style == "short"
reprargs = self.repr_args(entry) if not short else None
s = self.get_source(source, line_index, excinfo, short=short)
lines.extend(s)
if short:
message = "in %s" % (entry.name)
else:
message = excinfo and excinfo.typename or ""
path = self._makepath(entry.path)
filelocrepr = ReprFileLocation(path, entry.lineno + 1, message)
localsrepr = None
if not short:
localsrepr = self.repr_locals(entry.locals)
return ReprEntry(lines, reprargs, localsrepr, filelocrepr, style)
if excinfo:
lines.extend(self.get_exconly(excinfo, indent=4))
return ReprEntry(lines, None, None, None, style)
def _makepath(self, path):
if not self.abspath:
try:
np = py.path.local().bestrelpath(path)
except OSError:
return path
if len(np) < len(str(path)):
path = np
return path
def repr_traceback(self, excinfo):
traceback = excinfo.traceback
if self.tbfilter:
traceback = traceback.filter()
if is_recursion_error(excinfo):
traceback, extraline = self._truncate_recursive_traceback(traceback)
else:
extraline = None
last = traceback[-1]
entries = []
for index, entry in enumerate(traceback):
einfo = (last == entry) and excinfo or None
reprentry = self.repr_traceback_entry(entry, einfo)
entries.append(reprentry)
return ReprTraceback(entries, extraline, style=self.style)
def _truncate_recursive_traceback(self, traceback):
"""
Truncate the given recursive traceback trying to find the starting point
of the recursion.
The detection is done by going through each traceback entry and finding the
point in which the locals of the frame are equal to the locals of a previous frame (see ``recursionindex()``.
Handle the situation where the recursion process might raise an exception (for example
comparing numpy arrays using equality raises a TypeError), in which case we do our best to
warn the user of the error and show a limited traceback.
"""
try:
recursionindex = traceback.recursionindex()
except Exception as e:
max_frames = 10
extraline = (
'!!! Recursion error detected, but an error occurred locating the origin of recursion.\n'
' The following exception happened when comparing locals in the stack frame:\n'
' {exc_type}: {exc_msg}\n'
' Displaying first and last {max_frames} stack frames out of {total}.'
).format(exc_type=type(e).__name__, exc_msg=safe_str(e), max_frames=max_frames, total=len(traceback))
traceback = traceback[:max_frames] + traceback[-max_frames:]
else:
if recursionindex is not None:
extraline = "!!! Recursion detected (same locals & position)"
traceback = traceback[:recursionindex + 1]
else:
extraline = None
return traceback, extraline
def repr_excinfo(self, excinfo):
if _PY2:
reprtraceback = self.repr_traceback(excinfo)
reprcrash = excinfo._getreprcrash()
return ReprExceptionInfo(reprtraceback, reprcrash)
else:
repr_chain = []
e = excinfo.value
descr = None
while e is not None:
if excinfo:
reprtraceback = self.repr_traceback(excinfo)
reprcrash = excinfo._getreprcrash()
else:
# fallback to native repr if the exception doesn't have a traceback:
# ExceptionInfo objects require a full traceback to work
reprtraceback = ReprTracebackNative(py.std.traceback.format_exception(type(e), e, None))
reprcrash = None
repr_chain += [(reprtraceback, reprcrash, descr)]
if e.__cause__ is not None:
e = e.__cause__
excinfo = ExceptionInfo((type(e), e, e.__traceback__)) if e.__traceback__ else None
descr = 'The above exception was the direct cause of the following exception:'
elif (e.__context__ is not None and not e.__suppress_context__):
e = e.__context__
excinfo = ExceptionInfo((type(e), e, e.__traceback__)) if e.__traceback__ else None
descr = 'During handling of the above exception, another exception occurred:'
else:
e = None
repr_chain.reverse()
return ExceptionChainRepr(repr_chain)
class TerminalRepr(object):
def __str__(self):
s = self.__unicode__()
if _PY2:
s = s.encode('utf-8')
return s
def __unicode__(self):
# FYI this is called from pytest-xdist's serialization of exception
# information.
io = py.io.TextIO()
tw = py.io.TerminalWriter(file=io)
self.toterminal(tw)
return io.getvalue().strip()
def __repr__(self):
return "<%s instance at %0x>" % (self.__class__, id(self))
class ExceptionRepr(TerminalRepr):
def __init__(self):
self.sections = []
def addsection(self, name, content, sep="-"):
self.sections.append((name, content, sep))
def toterminal(self, tw):
for name, content, sep in self.sections:
tw.sep(sep, name)
tw.line(content)
class ExceptionChainRepr(ExceptionRepr):
def __init__(self, chain):
super(ExceptionChainRepr, self).__init__()
self.chain = chain
# reprcrash and reprtraceback of the outermost (the newest) exception
# in the chain
self.reprtraceback = chain[-1][0]
self.reprcrash = chain[-1][1]
def toterminal(self, tw):
for element in self.chain:
element[0].toterminal(tw)
if element[2] is not None:
tw.line("")
tw.line(element[2], yellow=True)
super(ExceptionChainRepr, self).toterminal(tw)
class ReprExceptionInfo(ExceptionRepr):
def __init__(self, reprtraceback, reprcrash):
super(ReprExceptionInfo, self).__init__()
self.reprtraceback = reprtraceback
self.reprcrash = reprcrash
def toterminal(self, tw):
self.reprtraceback.toterminal(tw)
super(ReprExceptionInfo, self).toterminal(tw)
class ReprTraceback(TerminalRepr):
entrysep = "_ "
def __init__(self, reprentries, extraline, style):
self.reprentries = reprentries
self.extraline = extraline
self.style = style
def toterminal(self, tw):
# the entries might have different styles
for i, entry in enumerate(self.reprentries):
if entry.style == "long":
tw.line("")
entry.toterminal(tw)
if i < len(self.reprentries) - 1:
next_entry = self.reprentries[i + 1]
if entry.style == "long" or \
entry.style == "short" and next_entry.style == "long":
tw.sep(self.entrysep)
if self.extraline:
tw.line(self.extraline)
class ReprTracebackNative(ReprTraceback):
def __init__(self, tblines):
self.style = "native"
self.reprentries = [ReprEntryNative(tblines)]
self.extraline = None
class ReprEntryNative(TerminalRepr):
style = "native"
def __init__(self, tblines):
self.lines = tblines
def toterminal(self, tw):
tw.write("".join(self.lines))
class ReprEntry(TerminalRepr):
localssep = "_ "
def __init__(self, lines, reprfuncargs, reprlocals, filelocrepr, style):
self.lines = lines
self.reprfuncargs = reprfuncargs
self.reprlocals = reprlocals
self.reprfileloc = filelocrepr
self.style = style
def toterminal(self, tw):
if self.style == "short":
self.reprfileloc.toterminal(tw)
for line in self.lines:
red = line.startswith("E ")
tw.line(line, bold=True, red=red)
# tw.line("")
return
if self.reprfuncargs:
self.reprfuncargs.toterminal(tw)
for line in self.lines:
red = line.startswith("E ")
tw.line(line, bold=True, red=red)
if self.reprlocals:
# tw.sep(self.localssep, "Locals")
tw.line("")
self.reprlocals.toterminal(tw)
if self.reprfileloc:
if self.lines:
tw.line("")
self.reprfileloc.toterminal(tw)
def __str__(self):
return "%s\n%s\n%s" % ("\n".join(self.lines),
self.reprlocals,
self.reprfileloc)
class ReprFileLocation(TerminalRepr):
def __init__(self, path, lineno, message):
self.path = str(path)
self.lineno = lineno
self.message = message
def toterminal(self, tw):
# filename and lineno output for each entry,
# using an output format that most editors unterstand
msg = self.message
i = msg.find("\n")
if i != -1:
msg = msg[:i]
tw.write(self.path, bold=True, red=True)
tw.line(":%s: %s" % (self.lineno, msg))
class ReprLocals(TerminalRepr):
def __init__(self, lines):
self.lines = lines
def toterminal(self, tw):
for line in self.lines:
tw.line(line)
class ReprFuncArgs(TerminalRepr):
def __init__(self, args):
self.args = args
def toterminal(self, tw):
if self.args:
linesofar = ""
for name, value in self.args:
ns = "%s = %s" % (name, value)
if len(ns) + len(linesofar) + 2 > tw.fullwidth:
if linesofar:
tw.line(linesofar)
linesofar = ns
else:
if linesofar:
linesofar += ", " + ns
else:
linesofar = ns
if linesofar:
tw.line(linesofar)
tw.line("")
def getrawcode(obj, trycall=True):
""" return code object for given function. """
try:
return obj.__code__
except AttributeError:
obj = getattr(obj, 'im_func', obj)
obj = getattr(obj, 'func_code', obj)
obj = getattr(obj, 'f_code', obj)
obj = getattr(obj, '__code__', obj)
if trycall and not hasattr(obj, 'co_firstlineno'):
if hasattr(obj, '__call__') and not py.std.inspect.isclass(obj):
x = getrawcode(obj.__call__, trycall=False)
if hasattr(x, 'co_firstlineno'):
return x
return obj
if PY35: # RecursionError introduced in 3.5
def is_recursion_error(excinfo):
return excinfo.errisinstance(RecursionError) # noqa
else:
def is_recursion_error(excinfo):
if not excinfo.errisinstance(RuntimeError):
return False
try:
return "maximum recursion depth exceeded" in str(excinfo.value)
except UnicodeError:
return False
| {
"repo_name": "MichaelAquilina/pytest",
"path": "_pytest/_code/code.py",
"copies": "1",
"size": "31859",
"license": "mit",
"hash": -3245967323877453000,
"line_mean": 34.0870044053,
"line_max": 117,
"alpha_frac": 0.5618820428,
"autogenerated": false,
"ratio": 4.253538050734313,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005428649112477274,
"num_lines": 908
} |
from __future__ import absolute_import, division, print_function
import sys
from os.path import expanduser
sys.path.append(expanduser('~/code/ibeis'))
import utool
utool.inject_colored_exceptions()
from PyQt4 import QtCore, QtGui
import utool # NOQA
from ibeis.gui.frontend_helpers import * # NOQA
QTRANSLATE = QtWidgets.QApplication.translate
QUTF8 = QtWidgets.QApplication.UnicodeUTF8
class Ui_mainSkel(object):
def setupUi(ui, front):
setup_ui(ui, front, front.back)
ui.postsetupUI()
ui.tablesTabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(front)
ui.retranslateUi(front)
ui.connectUi()
def postsetupUI(ui):
#print('[skel] Calling Postsetup')
for func in ui.postsetup_fns:
func()
def retranslateUi(ui, front):
#print('[skel] Calling Retranslate')
for key, text in ui.retranslate_dict.iteritems():
obj, setter = key
frontname = front.objectName()
#print('TRANSLATE %s.%s to %r' % (objname, setter,
# text))
qtext = QTRANSLATE(frontname, text, None, QUTF8)
getattr(obj, setter)(qtext)
def connectUi(ui):
# Connect all signals from GUI
for key, slot_fn in ui.connection_dict.iteritems():
obj, attr = key
key_sig = (obj.objectName(), attr, slot_fn.func_name)
# Do not connect signals more than once
if key_sig in ui.connected_signals:
ui.connected_signals
continue
ui.connected_signals.add(key_sig)
#print('CONNECT %s.%s to %r' % (obj.objectName(), attr,
# slot_fn.func_name))
getattr(obj, attr).connect(slot_fn)
def ensureImageSetTab(ui, front, imagesettext):
""" Ensure imageset tab for specific imagesettext """
parent = ui.imagesetsTabWidget
# ImageSetText Sanitization
if imagesettext == '' or imagesettext == 'None':
imagesettext = None
if imagesettext not in ui.imagesettext_dict:
# Create the imageset tab
tabWidget = newImageSetTabs(front, parent, imagesettext=imagesettext)
ui.imagesettext_dict[imagesettext] = tabWidget
ui.retranslateUi(front)
def deleteImageSetTab(ui, front, imagesettext):
""" Delete imageset tab for specific imagesettext """
# ImageSetText Sanitization
if imagesettext == '' or imagesettext == 'None':
imagesettext = None
try: # Remove the imageset tab
tabWiget = ui.imagesettext_dict[imagesettext]
ui.deleteImageSetTab(front, imagesettext)
del tabWiget
except KeyError:
pass
ui.retranslateUi(front)
def setup_ui(ui, front, back):
ui.imagesettext_dict = {}
ui.connected_signals = set()
ui.connection_dict = {} # dict of signal / slots to connect
ui.retranslate_dict = {}
ui.retranslatable_fns = [] # A list of retranslatable functions
ui.postsetup_fns = []
back = front.back
setup_main_layout(ui, front, back)
# IMAGESET SUPERTABS
ui.imagesetsTabWidget = newTabWidget(front, ui.splitter,
'imagesetsTabWidget', vstretch=10)
ui.ensureImageSetTab(front, imagesettext=None)
# Split Panes
ui.progressBar = newProgressBar(ui.splitter, visible=False)
ui.outputEdit = newOutputEdit(ui.splitter, visible=False)
# Menus
setup_file_menu(ui, front, back)
setup_actions_menu(ui, front, back)
setup_batch_menu(ui, front, back)
setup_option_menu(ui, front, back)
setup_help_menu(ui, front, back)
setup_developer_menu(ui, front, back)
def newImageSetTabs(front, parent, imagesettext=None):
if imagesettext is None or imagesettext == 'None' or imagesettext == '':
tab_text = 'database'
imagesettext = ''
else:
tab_text = str(imagesettext)
tabWidget = newTabbedTabWidget(front, parent,
'tablesView' + imagesettext,
'tablesTabWidget' + imagesettext,
tab_text,
vstretch=10)
tabWidget.newTabbedTable('gids', imagesettext, 'Image Table',
clicked_slot_fn=front.gids_tbl_clicked,
pressed_slot_fn=front.rowid_tbl_pressed,
changed_slot_fn=front.gids_tbl_changed)
tabWidget.newTabbedTable('rids', imagesettext, 'ROI Table',
clicked_slot_fn=front.rids_tbl_clicked,
pressed_slot_fn=front.rowid_tbl_pressed,
changed_slot_fn=front.rids_tbl_changed)
tabWidget.newTabbedTable('nids', imagesettext, 'Name Table',
clicked_slot_fn=front.nids_tbl_clicked,
pressed_slot_fn=front.rowid_tbl_pressed,
changed_slot_fn=front.nids_tbl_clicked)
tabWidget.newTabbedTable('qres', imagesettext, 'Query Result Table',
clicked_slot_fn=front.qres_tbl_clicked,
pressed_slot_fn=front.rowid_tbl_pressed,
changed_slot_fn=front.qres_tbl_changed)
def setup_file_menu(ui, front, back):
""" FILE MENU """
ui.menuFile = newMenu(front, ui.menubar, 'menuFile', 'File')
ui.menuFile.newAction(
name='actionNew_Database',
text='New Database',
tooltip='Create a new folder to use as a database.',
shortcut='Ctrl+N',
slot_fn=back.new_database)
ui.menuFile.newAction(
name='actionOpen_Database',
text='Open Database',
tooltip='Opens a different database directory.',
shortcut='Ctrl+O',
slot_fn=back.open_database)
ui.menuFile.addSeparator()
ui.menuFile.newAction(
name='actionSave_Database',
tooltip='Saves csv tables',
text='Save Database',
shortcut='Ctrl+S',
slot_fn=back.save_database)
ui.menuFile.addSeparator()
ui.menuFile.newAction(
name='actionImport_Img_file',
text='Import Images (select file(s))',
shortcut=None,
slot_fn=back.import_images_from_file)
ui.menuFile.newAction(
name='actionImport_Img_dir',
text='Import Images (select directory)',
shortcut='Ctrl+I',
slot_fn=back.import_images_from_dir)
ui.menuFile.addSeparator()
ui.menuFile.newAction(
name='actionQuit',
text='Quit',
shortcut='',
slot_fn=back.quit)
def setup_actions_menu(ui, front, back):
""" ACTIONS MENU """
ui.menuActions = newMenu(front, ui.menubar, 'menuActions', 'Actions')
ui.menuActions.newAction(
name='actionAdd_ROI',
text='Add ROI',
shortcut='A',
slot_fn=back.add_roi)
ui.menuActions.newAction(
name='actionQuery',
text='Query',
shortcut='Q',
slot_fn=back.query)
ui.menuActions.addSeparator()
ui.menuActions.newAction(
name='actionReselect_ROI',
text='Reselect ROI Bbox',
shortcut='R',
slot_fn=back.reselect_roi)
ui.menuActions.newAction(
name='actionReselect_Ori',
text='Reselect ROI Orientation',
shortcut='O',
slot_fn=back.reselect_ori)
ui.menuActions.addSeparator()
ui.menuActions.newAction(
name='actionNext',
text='Select Next',
shortcut='N',
slot_fn=back.select_next)
ui.menuActions.newAction(
name='actionPrev',
text='Select Previous',
shortcut='P',
slot_fn=back.select_prev)
ui.menuActions.addSeparator()
ui.menuActions.newAction(
name='actionDelete_ROI',
text='Delete ROI',
shortcut='Ctrl+Del',
slot_fn=back.delete_roi)
ui.menuActions.newAction(
name='actionDelete_Image',
text='Trash Image',
shortcut='',
slot_fn=back.delete_image)
def setup_batch_menu(ui, front, back):
""" BATCH MENU """
ui.menuBatch = newMenu(front, ui.menubar, 'menuBatch', 'Batch')
ui.menuBatch.newAction(
name='actionPrecomputeROIFeatures',
text='Precompute Chips/Features',
shortcut='Ctrl+Return',
slot_fn=back.precompute_feats)
ui.menuBatch.newAction(
name='actionPrecompute_Queries',
text='Precompute Queries',
tooltip='''This might take anywhere from a coffee break to an
overnight procedure depending on how many ROIs you\'ve
made. It queries each chip and saves the result which
allows multiple queries to be rapidly inspected later.''',
shortcut='',
slot_fn=back.precompute_queries)
ui.menuBatch.newAction(
name='actionDetect_Grevys_Quick',
text='Detect Grevys Quick',
slot_fn=back.detect_grevys_quick)
ui.menuBatch.newAction(
name='actionDetect_Grevys_Fine',
text='Detect Grevys Fine',
slot_fn=back.detect_grevys_fine)
ui.menuBatch.addSeparator()
ui.menuBatch.newAction(
name='actionCompute_ImageSets',
text='Compute ImageSets',
shortcut='Ctrl+E',
slot_fn=back.compute_occurrences)
ui.menuBatch.addSeparator()
def setup_option_menu(ui, front, back):
""" OPTIONS MENU """
ui.menuOptions = newMenu(front, ui.menubar, 'menuOptions', 'Options')
ui.menuOptions.newAction(
name='actionLayout_Figures',
text='Layout Figures',
tooltip='Organizes windows in a grid',
shortcut='Ctrl+L',
slot_fn=back.layout_figures)
ui.menuOptions.addSeparator()
ui.menuOptions.newAction(
name='actionPreferences',
text='Edit Preferences',
tooltip='Changes algorithm parameters and program behavior.',
shortcut='Ctrl+P',
slot_fn=back.edit_preferences)
def setup_help_menu(ui, front, back):
""" HELP MENU """
ui.menuHelp = newMenu(front, ui.menubar, 'menuHelp', 'Help')
about_msg = 'IBEIS = Image Based Ecological Information System'
ui.menuHelp.newAction(
name='actionAbout',
text='About',
shortcut='',
slot_fn=msg_event('About', about_msg))
ui.menuHelp.newAction(
name='actionView_Docs',
text='View Documentation',
shortcut='',
slot_fn=back.view_docs)
# ---
ui.menuHelp.addSeparator()
# ---
ui.menuHelp.newAction(
name='actionView_DBDir',
text='View Database Directory',
shortcut='',
slot_fn=back.view_database_dir)
# ---
ui.menuHelp.addSeparator()
# ---
ui.menuHelp.newAction(
name='actionDelete_Precomputed_Results',
text='Delete Cached Query Results',
shortcut='',
slot_fn=back.delete_queryresults_dir)
ui.menuHelp.newAction(
name='actionDelete_computed_directory',
text='Delete computed directory',
shortcut='',
slot_fn=back.delete_cache)
ui.menuHelp.newAction(
name='actionDelete_global_preferences',
text='Delete Global Preferences',
shortcut='',
slot_fn=back.delete_global_prefs)
def setup_developer_menu(ui, front, back):
""" DEV MENU """
ui.menuDev = newMenu(front, ui.menubar, 'menuDev', 'Dev')
ui.menuDev.newAction(
name='actionDeveloper_reload',
text='Developer Reload',
shortcut='Ctrl+Shift+R',
slot_fn=back.dev_reload)
ui.menuDev.newAction(
name='actionDeveloper_mode',
text='Developer IPython',
shortcut='Ctrl+Shift+I',
slot_fn=back.dev_mode)
ui.menuDev.newAction(
name='actionDeveloper_CLS',
text='CLS',
shortcut='Ctrl+Shift+C',
slot_fn=back.dev_cls)
ui.menuDev.newAction(
name='actionDeveloper_DumpDB',
text='Dump SQL Database',
slot_fn=back.dev_dumpdb)
def setup_main_layout(ui, front, back):
default_title = 'IBEIS - No Database Opened'
initMainWidget(front, 'mainSkel', size=(1000, 600), title=default_title)
ui.centralwidget, ui.verticalLayout = newCentralLayout(front)
ui.splitter = newVerticalSplitter(ui.centralwidget, ui.verticalLayout)
ui.menubar = newMenubar(front, 'menubar')
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
front = QtWidgets.QMainWindow()
front.ui = Ui_mainSkel()
front.ui.setupUi(front)
front.show()
sys.exit(app.exec_())
| {
"repo_name": "SU-ECE-17-7/ibeis",
"path": "_broken/Skeleton.py",
"copies": "1",
"size": "12706",
"license": "apache-2.0",
"hash": -6032751619593585000,
"line_mean": 34.2944444444,
"line_max": 81,
"alpha_frac": 0.6032583032,
"autogenerated": false,
"ratio": 3.765856550088915,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4869114853288915,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys
import argparse
import re
import requests
import tempfile
import zipfile
import logging
import os.path
from cbopensource.tools.eventduplicator.solr_endpoint import SolrInputSource, SolrOutputSink, LocalConnection
from cbopensource.tools.eventduplicator.transporter import Transporter, DataAnonymizer
from cbopensource.tools.eventduplicator.file_endpoint import FileInputSource, FileOutputSink
from cbopensource.tools.eventduplicator import main_log
from cbopensource.tools.eventduplicator.ssh_connection import SSHConnection
__author__ = 'jgarman'
def initialize_logger(verbose):
if verbose:
main_log.setLevel(logging.DEBUG)
else:
main_log.setLevel(logging.INFO)
# create console handler and set level to info
handler = logging.StreamHandler()
if verbose:
handler.setLevel(logging.DEBUG)
else:
handler.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)-15s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
main_log.addHandler(handler)
def input_from_zip(fn):
tempdir = tempfile.mkdtemp()
z = zipfile.ZipFile(fn)
z.extractall(tempdir)
return FileInputSource(tempdir)
def main():
ssh_help = ", or a remote Cb server (root@cb5.server:2202)"
parser = argparse.ArgumentParser(description="Transfer data from one Cb server to another")
parser.add_argument("source", help="Data source - can be a pathname (/tmp/blah), " +
"a URL referencing a zip package" +
"(http://my.server.com/package.zip), the local Cb server (local)%s" % ssh_help)
parser.add_argument("destination", help="Data destination - can be a filepath (/tmp/blah), " +
"the local Cb server (local)%s" % ssh_help)
parser.add_argument("-v", "--verbose", help="Increase output verbosity", action="store_true")
parser.add_argument("--anonymize", help="Anonymize data in transport", action="store_true", default=False)
parser.add_argument("-q", "--query", help="Source data query (required for server input)", action="store")
parser.add_argument("--tree", help="Traverse up and down process tree", action="store_true", default=False)
options = parser.parse_args()
host_match = re.compile("([^@]+)@([^:]+)(:([\d]*))?")
source_parts = host_match.match(options.source)
destination_parts = host_match.match(options.destination)
input_source = None
initialize_logger(options.verbose)
if options.source == options.destination:
sys.stderr.write("Talk to yourself often?\n\n")
parser.print_usage()
return 2
if options.source.startswith(('http://', 'https://')):
with tempfile.NamedTemporaryFile() as handle:
response = requests.get(options.source, stream=True)
if not response.ok:
raise Exception("Could not retrieve package at %s" % options.source)
print("Downloading package from %s..." % options.source)
for block in response.iter_content(1024):
handle.write(block)
handle.flush()
print("Done. Unzipping...")
input_source = input_from_zip(handle.name)
elif options.source == 'local':
input_connection = LocalConnection()
input_source = SolrInputSource(input_connection, query=options.query)
elif source_parts:
port_number = 22
if source_parts.group(4):
port_number = int(source_parts.group(4))
input_connection = SSHConnection(username=source_parts.group(1), hostname=source_parts.group(2),
port=port_number)
input_source = SolrInputSource(input_connection, query=options.query)
else:
# source_parts is a file path
if not os.path.exists(options.source):
sys.stderr.write("Cannot find file %s\n\n" % options.source)
return 2
if os.path.isdir(options.source):
input_source = FileInputSource(options.source)
else:
print("Unzipping %s into a temporary directory for processing..." % options.source)
input_source = input_from_zip(options.source)
if type(input_source) == SolrInputSource:
if not options.query:
sys.stderr.write("Query is required when using Solr as a data source\n\n")
parser.print_usage()
return 2
if options.destination == 'local':
output_connection = LocalConnection()
output_sink = SolrOutputSink(output_connection)
elif destination_parts:
port_number = 22
if destination_parts.group(4):
port_number = int(destination_parts.group(4))
output_connection = SSHConnection(username=destination_parts.group(1), hostname=destination_parts.group(2),
port=port_number)
output_sink = SolrOutputSink(output_connection)
else:
output_sink = FileOutputSink(options.destination)
t = Transporter(input_source, output_sink, tree=options.tree)
if options.anonymize:
t.add_anonymizer(DataAnonymizer())
try:
t.transport(debug=options.verbose)
except KeyboardInterrupt:
print("\nMigration interrupted. Processed:")
print(t.get_report())
return 1
print("Migration complete!")
print(t.get_report())
return 0
if __name__ == '__main__':
main()
| {
"repo_name": "carbonblack/cb-event-duplicator",
"path": "cbopensource/tools/eventduplicator/data_migration.py",
"copies": "1",
"size": "5577",
"license": "mit",
"hash": -558549309351275600,
"line_mean": 37.7291666667,
"line_max": 118,
"alpha_frac": 0.6464048772,
"autogenerated": false,
"ratio": 4.070802919708029,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5217207796908029,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys
import collections
import errno
import importlib
import os
import os.path
import re
import itertools
from termcolor import colored
PACKAGE_REGEXP = r"^(.*?\/)([a-z0-9_-]+\/[a-z0-9_-]+)([:@][a-z0-9._+-]+|@sha256:[a-z0-9]+)?$"
def get_media_type(mediatype):
if mediatype:
match = re.match(r"application/vnd\.appr\.[a-z_-]+\.(.+?)\.(.+).(.+)", mediatype)
if match:
mediatype = match.group(1)
return mediatype
def package_filename(name, version, media_type):
return "%s_%s_%s" % (name.replace("/", "_"), version, media_type)
def parse_version(version):
if version is None or version == "default":
return {'key': "version", "value": "default"}
elif str.startswith(version, "@sha256:"):
return {'key': 'digest', 'value': version.split("@sha256:")[1]}
elif version[0] == "@":
return {'key': 'version', 'value': version[1:]}
elif version[0] == ":":
return {'key': 'channel', 'value': version[1:]}
else:
return {'key': 'unknown', 'value': version}
def parse_version_req(version):
"""
Converts a version string to a dict with following rules:
if string starts with ':' it is a channel
if string starts with 'sha256' it is a digest
else it is a release
"""
if version is None:
version = "default"
if version[0] == ':' or version.startswith('channel:'):
parts = {'key': 'channel', 'value': version.split(':')[1]}
elif version.startswith('sha256:'):
parts = {'key': 'digest', 'value': version.split('sha256:')[1]}
else:
parts = {'key': 'version', 'value': version}
return parts
def getenv(value, envname, default=None):
if not value:
if default:
value = os.getenv(envname, default)
else:
value = os.environ[envname]
return value
def split_package_name(name):
sp = re.sub(r"^https?://", "", name).split("/")
package_parts = {"host": None, "namespace": None, "package": None, "version": None}
if len(sp) >= 1:
if name[-1] != "/":
name = name + "/"
match = re.match(r"(https?://)?(.+?)/.*", name)
host_groups = match.groups()
if host_groups[0] is not None:
host = ''.join(host_groups)
else:
host = host_groups[1]
package_parts["host"] = host
if len(sp) >= 2:
package_parts["namespace"] = sp[1]
if len(sp) >= 3:
match = re.match(r"^([a-z0-9_-]+?)([:@][a-z0-9._+-]+|@sha256:[a-z0-9]+)?$", sp[2], re.I)
package, version = match.groups()
package_parts['package'] = package
package_parts['version'] = version
return package_parts
def parse_package_name(name, regexp=PACKAGE_REGEXP):
package_regexp = regexp
match = re.match(package_regexp, name, re.I)
if match is None:
raise ValueError(
"Package '%s' does not match format 'registry/namespace/name[@version|:channel]'" %
(name))
host, package, version = match.groups()
if not version:
version = 'default'
if not host:
host = None
else:
host = host[:-1]
namespace, package = package.split("/")
return {'host': host, 'namespace': namespace, 'package': package, 'version': version}
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def colorize(status):
msg = {}
if os.getenv("APPR_COLORIZE_OUTPUT", "true") == "true":
msg = {
'ok': 'green',
'created': 'yellow',
'updated': 'cyan',
'replaced': 'yellow',
'absent': 'green',
'deleted': 'red',
'protected': 'magenta'}
color = msg.get(status, None)
if color:
return colored(status, color)
else:
return status
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
def convert_utf8(data):
try:
if isinstance(data, basestring):
return str(data)
elif isinstance(data, collections.Mapping):
return dict(map(convert_utf8, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(convert_utf8, data))
else:
return data
except UnicodeEncodeError:
return data
# from celery/kombu https://github.com/celery/celery (BSD license)
def symbol_by_name(name, aliases={}, imp=None, package=None, sep='.', default=None, **kwargs):
"""Get symbol by qualified name.
The name should be the full dot-separated path to the class::
modulename.ClassName
Example::
celery.concurrency.processes.TaskPool
^- class name
or using ':' to separate module and symbol::
celery.concurrency.processes:TaskPool
If `aliases` is provided, a dict containing short name/long name
mappings, the name is looked up in the aliases first.
Examples:
>>> symbol_by_name('celery.concurrency.processes.TaskPool')
<class 'celery.concurrency.processes.TaskPool'>
>>> symbol_by_name('default', {
... 'default': 'celery.concurrency.processes.TaskPool'})
<class 'celery.concurrency.processes.TaskPool'>
# Does not try to look up non-string names.
>>> from celery.concurrency.processes import TaskPool
>>> symbol_by_name(TaskPool) is TaskPool
True
"""
def _reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
if imp is None:
imp = importlib.import_module
if not isinstance(name, basestring):
return name # already a class
name = aliases.get(name) or name
sep = ':' if ':' in name else sep
module_name, _, cls_name = name.rpartition(sep)
if not module_name:
cls_name, module_name = None, package if package else cls_name
try:
try:
module = imp(module_name, package=package, **kwargs)
except ValueError as exc:
_reraise(ValueError,
ValueError("Couldn't import {0!r}: {1}".format(name, exc)), sys.exc_info()[2])
return getattr(module, cls_name) if cls_name else module
except (ImportError, AttributeError):
if default is None:
raise
return default
def flatten(array):
return list(itertools.chain(*array))
def isbundled():
return getattr(sys, 'frozen', False)
def get_current_script_path():
executable = sys.executable
if os.path.basename(executable) == "appr":
path = executable
else:
path = sys.argv[0]
return os.path.realpath(path)
def abspath(relative_path):
""" Get absolute path """
if isbundled():
base_path = sys.executable
else:
base_path = os.path.abspath(".")
return os.path.realpath(os.path.join(base_path, relative_path))
| {
"repo_name": "app-registry/appr",
"path": "appr/utils.py",
"copies": "2",
"size": "7306",
"license": "apache-2.0",
"hash": -7817391575817106000,
"line_mean": 28.224,
"line_max": 99,
"alpha_frac": 0.5791130578,
"autogenerated": false,
"ratio": 3.8052083333333333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00039068615686429733,
"num_lines": 250
} |
from __future__ import absolute_import, division, print_function
import sys
import os
curr_path = os.path.abspath(os.path.dirname(__file__))
sys.path = [os.path.dirname(os.path.dirname(curr_path)), curr_path] + sys.path
curr_path = None
try:
import cPickle as pickle
except:
import pickle
import logging
import csv
import h5py
import numpy as np
import pandas as pd
import re
import auto_deepnet.utils.exceptions as exceptions
logger = logging.getLogger("auto_deepnet")
logger.setLevel(logging.DEBUG)
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
'''
function: save_pickle_data
inputs:
- file_path: string pathname to save data to
- data_frame: pandas data_frame to save to disk in any picklable format
- pandas_format (optional): whether to save as a pandas dataframe or as a numpy array
- append (optional): whether to append data to preexisting data. Requires data to be in the same format
- mode (optional): The mode to open file as
description:
helper function to save any data to disk via pickling
'''
def save_pickle_data(file_path, data_frame, **kwargs):
logger.info("Opening pickle file {} to write data...".format(file_path))
pandas_format = kwargs.get('pandas_format', True)
append = kwargs.get('append', False)
mode = kwargs.get('mode', 'wb')
if append and os.path.isfile(file_path):
logger.info("Opening file to append data...")
try:
data_frame = pd.concat((load_pickle_data(file_path), data_frame))
except Exception as e:
logger.exception("Error appending data from {}: {}".format(file_path), e)
try:
if 'pandas_format' not in kwargs or pandas_format:
data_frame.to_pickle(file_path)
else:
with open(file_path, mode) as f:
pickle.dump(data_frame.values, f)
except Exception as e:
logger.exception("Failed with Error {0}".format(e))
raise exceptions.FileSaveError
logger.info("Successfully saved pickle data")
'''
function: load_pickle_data
inputs:
- file_path: string pathname to load data from
- mode: the mode to open file as
helper function to load any pickled data from disk
'''
def load_pickle_data(file_path, **kwargs):
mode = kwargs.get('mode', 'rb')
logger.info("Opening pickle file {} to read...".format(file_path))
try:
with open(file_path, mode) as f:
data = pickle.load(f)
except Exception as e:
logger.exception("Failed with Error {0}".format(e))
raise exceptions.FileLoadError
logger.info("Successfully read pickle data")
return data
'''
function: save_hdf5_data
inputs:
- file_path: string pathname to save data to
- data_frame: the pandas dataframe to save to disk
- key (optional): The name to call the dataset
- pandas_format (optional): whether to save as a pandas structure or default hdf5
- mode (optional): The mode to open file as
- format (optional): whether to save as a table or fixed dataset
- append (optional): Whether data should be appended or replaced
'''
def save_hdf5_data(file_path, data_frame, **kwargs):
pandas_format = kwargs.get('pandas_format', True)
key = kwargs.get('key', 'data')
mode = kwargs.get('mode', 'a')
format = kwargs.get('format', 'table')
append = kwargs.get('append', False)
logger.info("Opening HDF5 file {} to write data...".format(file_path))
try:
if pandas_format:
with pd.HDFStore(file_path, mode=mode) as f:
if key in f and not append:
f.remove(key)
f.put(key=key, value=data_frame, format=format, append=append)
else:
if key == None:
logger.error("Need a key when saving as default HDF5 format")
raise exceptions.FileSaveError
with h5py.File(file_path, mode) as f:
if key in f:
if append:
data_frame = pd.concat((pd.DataFrame(f[key]), data_frame))
del f[key]
f.create_dataset(key, data=data_frame.values)
except Exception as e:
logger.exception("Failed with Error {0}".format(e))
raise exceptions.FileSaveError
logger.info("Successfully saved hdf5 data")
'''
function: load_hdf5_file
inputs:
- file_path: string pathname to load data from
- key (optional): name of the dataset
- pandas_format (optional): whether the file was saved in pandas format
- mode (optional): The mode to open the file as
description:
helper function to load an hdf5 file from disk
'''
def load_hdf5_data(file_path, **kwargs):
key = kwargs.get('key', None)
pandas_format = kwargs.get('pandas_format', True)
mode = kwargs.get('mode', 'r')
logger.info("Opening HDF5 file {} to read...".format(file_path))
try:
if pandas_format:
data = pd.read_hdf(file_path, key=key, mode=mode)
else:
with h5py.File(file_path, mode) as f:
data = f[key][()]
except KeyError as e:
logger.exception("Dataset {} does not exist".format(dataset))
raise exceptions.FileLoadError("Dataset does not exist")
except Exception as e:
logger.exception("Problem loading dataset: {0}".format(e))
raise exceptions.FileLoadError
logger.info("Successfully loaded HDF5 data")
return data
'''
function: save_csv_data
inputs:
- file_path: string pathname to load data from
- data_frame: pandas data to save to csv
- append: whether to append to preexisting data
- mode (optional): The mode to open the file as
other inputs:
- any inputs to pd.DataFrame.to_csv() (optional)
'''
def save_csv_data(file_path, data_frame, **kwargs):
logger.info("Opening CSV file {} to write data".format(file_path))
if 'index' not in kwargs:
kwargs['index'] = False
if 'mode' not in kwargs:
kwargs['mode'] = 'w'
append = kwargs.pop('append', False)
kwargs.pop('pandas_format', None)
kwargs.pop('format', None)
try:
if append:
data_frame.to_csv(file_path, index=False, mode='a', header=False)
else:
data_frame.to_csv(file_path, **kwargs)
except Exception as e:
logger.exception("Problem saving dataset: {0}".format(e))
raise exceptions.FileLoadError
logger.info("Successfully saved CSV data")
'''
function: load_csv_data
inputs:
- file_path: string pathname to load data from
other inputs:
- any inputs used by pd.read_csv() (optional)
'''
def load_csv_data(file_path, **kwargs):
kwargs.pop('pandas_format', None)
kwargs.pop('mode', None)
logger.info("Opening CSV file {} to read...".format(file_path))
try:
data = pd.read_csv(file_path, **kwargs)
except Exception as e:
logger.exception("Problem reading CSV: {0}".format(e))
raise exceptions.FileSaveError
logger.info("Successfully loaded CSV data")
return data
'''
function: save_data
inputs:
- file_path: string pathname to save data to
- data_frame: data to save to disk
- save_format (optional): format to save to disk
- overwrite (optional): whether to overwrite preexisting data
- mode (optional): mode to open file in
- key: The name to save the data as (required if hdf5 format, deprecated otherwise)
- pandas_format (optional): whether to save as a pandas dataframe or as a numpy array
- append (optional): whether to append data
additional inputs:
- Any inputs that can be used by other saver functions
'''
def save_data(file_path, data_frame, save_format='hdf5', overwrite=False, mode='a', **kwargs):
if 'key' not in kwargs and save_format == 'hdf5':
logger.warning("No key specified, defaulting to 'data'")
kwargs['key'] = 'data'
if save_format != 'csv':
if 'pandas_format' not in kwargs:
kwargs['pandas_format'] = True
if 'format' not in kwargs:
kwargs['format'] = 'table'
if 'append' not in kwargs:
kwargs['append'] = False
if 'index' not in kwargs:
kwargs['index'] = False
logger.info("Attempting to save data to {}...".format(file_path))
try:
dir_name, file_name = os.path.split(file_path)
except Exception as e:
logger.exception("Error with file path {}: {}".format(file_path, e))
raise exceptions.FileSaveError("Invalid file path")
if len(dir_name) > 0 and not os.path.isdir(dir_name):
logger.info("Directory {} does not exist. Creating...".format(dir_name))
os.makedirs(dir_name)
if os.path.isfile(file_path):
if not overwrite:
logger.error("File {} already exists.".format(file_path))
raise exceptions.FileSaveError
if (mode == 'w' or save_format == 'pickle'):
logger.warning("File {} will be overwritten".format(file_path))
os.remove(file_path)
if (mode == 'a' and save_format == 'pickle'):
logger.warning("Can't use mode='a' for writing to pickle files. using mode='wb' instead...")
mode = 'wb'
saver = {
'hdf5': save_hdf5_data,
'csv': save_csv_data,
'pickle': save_pickle_data
}
try:
saver.get(save_format, save_hdf5_data)(file_path, data_frame, mode=mode, **kwargs)
except Exception as e:
logger.exception("Error saving file {}".format(file_path))
raise exceptions.FileSaveError
'''
function: load_data
inputs:
- file_path: string pathname to load data from
- load_format: format to load data as
additional inputs:
- any inputs used by other loader functions
'''
def load_data(file_path, load_format='hdf5', **kwargs):
if 'key' not in kwargs and load_format == 'hdf5':
kwargs['key'] = None
if load_format != 'csv' and 'pandas_format' not in kwargs:
kwargs['pandas_format'] = True
if 'mode' not in kwargs:
if load_format == 'pickle':
kwargs['mode'] = 'rb'
elif load_format == 'hdf5':
kwargs['mode'] = 'r'
logger.info("Attempting to load data from {}...".format(file_path))
if not os.path.isfile(file_path):
logger.error("File {} does not exist".format(file_path))
loader = {
'hdf5': load_hdf5_data,
'csv': load_csv_data,
'pickle': load_pickle_data
}
try:
return loader.get(load_format, load_hdf5_data)(file_path, **kwargs)
except Exception as e:
logger.exception("Error loading file {}".format(file_path))
raise exceptions.FileLoadError
| {
"repo_name": "autodeepnet/autodeepnet",
"path": "auto_deepnet/utils/data_utils.py",
"copies": "1",
"size": "10645",
"license": "mit",
"hash": -3178479304475838000,
"line_mean": 35.9618055556,
"line_max": 107,
"alpha_frac": 0.6342883983,
"autogenerated": false,
"ratio": 3.7721474131821404,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49064358114821405,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys
import os
import logging
from multiprocessing import Process, Queue
import time
import traceback
import signal
from watchdog.observers import Observer
from watchdog.events import *
from . import registry
from .config import read_configuration_file
from .run import run_task
# plugins
from . import timed
from . import filesystem
class MonitorConfigFiles(PatternMatchingEventHandler):
def on_any_event(self, event):
restart()
def run(args, task_queue):
# Read the configuration
configuration, list_of_paths = read_configuration_file(args.config_file)
# Monitor configuration files
observer = Observer()
for path_to_monitor in list_of_paths:
if os.path.isdir(path_to_monitor):
# create monitor for any change in the directory
handler = MonitorConfigFiles()
path = os.path.realpath(path_to_monitor)
else:
# create monitor on directory with specific pattern for config file
handler = MonitorConfigFiles([os.path.realpath(path_to_monitor)])
path = os.path.dirname(os.path.realpath(path_to_monitor))
observer.schedule(handler, path, recursive=False)
observer.start()
# parse the config
if configuration is not None:
for name, config in configuration.items():
if 'type' in config:
type_ = config.pop('type')
trigger = registry.lookup(type_)
trigger(name, task_queue, **config)
# run the config
registry.start()
def worker_main(task_queue):
while True:
job_name, env, kwargs = task_queue.get()
run_task(job_name, env=env, **kwargs)
def main(args):
# setup logging
level = logging.INFO
if args.verbose:
level = logging.DEBUG
# logging
handler = logging.StreamHandler()
handler.setLevel(level)
root = logging.getLogger()
root.setLevel(level)
root.addHandler(handler)
# setup worker pool, task queue
task_queue = Queue()
for i in range(args.workers):
process = Process(target=worker_main, args=(task_queue,))
process.start()
# run the command
run(args, task_queue)
# Prevent main thread to finish
while True:
time.sleep(10)
def restart():
print('Restarting due to change in configuration files')
registry.stop()
registry.join()
executable = os.environ.get('LEGATO', sys.argv[0])
try:
os.execvp(executable, sys.argv)
except Exception:
traceback.print_exc()
sys.exit(1)
def shutdown(*args, **kwargs):
sys.exit()
signal.signal(signal.SIGINT, shutdown)
| {
"repo_name": "stcorp/legato",
"path": "legato/daemon.py",
"copies": "1",
"size": "2729",
"license": "bsd-3-clause",
"hash": -4004419391066124000,
"line_mean": 24.9904761905,
"line_max": 79,
"alpha_frac": 0.6544521803,
"autogenerated": false,
"ratio": 4.048961424332345,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 105
} |
from __future__ import absolute_import, division, print_function
import sys
import os
import py
import pytest
from _pytest import config as parseopt
@pytest.fixture
def parser():
return parseopt.Parser()
class TestParser(object):
def test_no_help_by_default(self, capsys):
parser = parseopt.Parser(usage="xyz")
pytest.raises(SystemExit, lambda: parser.parse(["-h"]))
out, err = capsys.readouterr()
assert err.find("error: unrecognized arguments") != -1
def test_argument(self):
with pytest.raises(parseopt.ArgumentError):
# need a short or long option
argument = parseopt.Argument()
argument = parseopt.Argument('-t')
assert argument._short_opts == ['-t']
assert argument._long_opts == []
assert argument.dest == 't'
argument = parseopt.Argument('-t', '--test')
assert argument._short_opts == ['-t']
assert argument._long_opts == ['--test']
assert argument.dest == 'test'
argument = parseopt.Argument('-t', '--test', dest='abc')
assert argument.dest == 'abc'
assert str(argument) == (
"Argument(_short_opts: ['-t'], _long_opts: ['--test'], dest: 'abc')"
)
def test_argument_type(self):
argument = parseopt.Argument('-t', dest='abc', type=int)
assert argument.type is int
argument = parseopt.Argument('-t', dest='abc', type=str)
assert argument.type is str
argument = parseopt.Argument('-t', dest='abc', type=float)
assert argument.type is float
with pytest.warns(DeprecationWarning):
with pytest.raises(KeyError):
argument = parseopt.Argument('-t', dest='abc', type='choice')
argument = parseopt.Argument('-t', dest='abc', type=str,
choices=['red', 'blue'])
assert argument.type is str
def test_argument_processopt(self):
argument = parseopt.Argument('-t', type=int)
argument.default = 42
argument.dest = 'abc'
res = argument.attrs()
assert res['default'] == 42
assert res['dest'] == 'abc'
def test_group_add_and_get(self, parser):
group = parser.getgroup("hello", description="desc")
assert group.name == "hello"
assert group.description == "desc"
def test_getgroup_simple(self, parser):
group = parser.getgroup("hello", description="desc")
assert group.name == "hello"
assert group.description == "desc"
group2 = parser.getgroup("hello")
assert group2 is group
def test_group_ordering(self, parser):
parser.getgroup("1")
parser.getgroup("2")
parser.getgroup("3", after="1")
groups = parser._groups
groups_names = [x.name for x in groups]
assert groups_names == list("132")
def test_group_addoption(self):
group = parseopt.OptionGroup("hello")
group.addoption("--option1", action="store_true")
assert len(group.options) == 1
assert isinstance(group.options[0], parseopt.Argument)
def test_group_addoption_conflict(self):
group = parseopt.OptionGroup("hello again")
group.addoption("--option1", "--option-1", action="store_true")
with pytest.raises(ValueError) as err:
group.addoption("--option1", "--option-one", action="store_true")
assert str(set(["--option1"])) in str(err.value)
def test_group_shortopt_lowercase(self, parser):
group = parser.getgroup("hello")
pytest.raises(ValueError, """
group.addoption("-x", action="store_true")
""")
assert len(group.options) == 0
group._addoption("-x", action="store_true")
assert len(group.options) == 1
def test_parser_addoption(self, parser):
group = parser.getgroup("custom options")
assert len(group.options) == 0
group.addoption("--option1", action="store_true")
assert len(group.options) == 1
def test_parse(self, parser):
parser.addoption("--hello", dest="hello", action="store")
args = parser.parse(['--hello', 'world'])
assert args.hello == "world"
assert not getattr(args, parseopt.FILE_OR_DIR)
def test_parse2(self, parser):
args = parser.parse([py.path.local()])
assert getattr(args, parseopt.FILE_OR_DIR)[0] == py.path.local()
def test_parse_known_args(self, parser):
parser.parse_known_args([py.path.local()])
parser.addoption("--hello", action="store_true")
ns = parser.parse_known_args(["x", "--y", "--hello", "this"])
assert ns.hello
assert ns.file_or_dir == ['x']
def test_parse_known_and_unknown_args(self, parser):
parser.addoption("--hello", action="store_true")
ns, unknown = parser.parse_known_and_unknown_args(["x", "--y",
"--hello", "this"])
assert ns.hello
assert ns.file_or_dir == ['x']
assert unknown == ['--y', 'this']
def test_parse_will_set_default(self, parser):
parser.addoption("--hello", dest="hello", default="x", action="store")
option = parser.parse([])
assert option.hello == "x"
del option.hello
parser.parse_setoption([], option)
assert option.hello == "x"
def test_parse_setoption(self, parser):
parser.addoption("--hello", dest="hello", action="store")
parser.addoption("--world", dest="world", default=42)
class A(object):
pass
option = A()
args = parser.parse_setoption(['--hello', 'world'], option)
assert option.hello == "world"
assert option.world == 42
assert not args
def test_parse_special_destination(self, parser):
parser.addoption("--ultimate-answer", type=int)
args = parser.parse(['--ultimate-answer', '42'])
assert args.ultimate_answer == 42
def test_parse_split_positional_arguments(self, parser):
parser.addoption("-R", action='store_true')
parser.addoption("-S", action='store_false')
args = parser.parse(['-R', '4', '2', '-S'])
assert getattr(args, parseopt.FILE_OR_DIR) == ['4', '2']
args = parser.parse(['-R', '-S', '4', '2', '-R'])
assert getattr(args, parseopt.FILE_OR_DIR) == ['4', '2']
assert args.R is True
assert args.S is False
args = parser.parse(['-R', '4', '-S', '2'])
assert getattr(args, parseopt.FILE_OR_DIR) == ['4', '2']
assert args.R is True
assert args.S is False
def test_parse_defaultgetter(self):
def defaultget(option):
if not hasattr(option, 'type'):
return
if option.type is int:
option.default = 42
elif option.type is str:
option.default = "world"
parser = parseopt.Parser(processopt=defaultget)
parser.addoption("--this", dest="this", type=int, action="store")
parser.addoption("--hello", dest="hello", type=str, action="store")
parser.addoption("--no", dest="no", action="store_true")
option = parser.parse([])
assert option.hello == "world"
assert option.this == 42
assert option.no is False
def test_drop_short_helper(self):
parser = py.std.argparse.ArgumentParser(formatter_class=parseopt.DropShorterLongHelpFormatter)
parser.add_argument('-t', '--twoword', '--duo', '--two-word', '--two',
help='foo').map_long_option = {'two': 'two-word'}
# throws error on --deux only!
parser.add_argument('-d', '--deuxmots', '--deux-mots',
action='store_true', help='foo').map_long_option = {'deux': 'deux-mots'}
parser.add_argument('-s', action='store_true', help='single short')
parser.add_argument('--abc', '-a',
action='store_true', help='bar')
parser.add_argument('--klm', '-k', '--kl-m',
action='store_true', help='bar')
parser.add_argument('-P', '--pq-r', '-p', '--pqr',
action='store_true', help='bar')
parser.add_argument('--zwei-wort', '--zweiwort', '--zweiwort',
action='store_true', help='bar')
parser.add_argument('-x', '--exit-on-first', '--exitfirst',
action='store_true', help='spam').map_long_option = {'exitfirst': 'exit-on-first'}
parser.add_argument('files_and_dirs', nargs='*')
args = parser.parse_args(['-k', '--duo', 'hallo', '--exitfirst'])
assert args.twoword == 'hallo'
assert args.klm is True
assert args.zwei_wort is False
assert args.exit_on_first is True
assert args.s is False
args = parser.parse_args(['--deux-mots'])
with pytest.raises(AttributeError):
assert args.deux_mots is True
assert args.deuxmots is True
args = parser.parse_args(['file', 'dir'])
assert '|'.join(args.files_and_dirs) == 'file|dir'
def test_drop_short_0(self, parser):
parser.addoption('--funcarg', '--func-arg', action='store_true')
parser.addoption('--abc-def', '--abc-def', action='store_true')
parser.addoption('--klm-hij', action='store_true')
args = parser.parse(['--funcarg', '--k'])
assert args.funcarg is True
assert args.abc_def is False
assert args.klm_hij is True
def test_drop_short_2(self, parser):
parser.addoption('--func-arg', '--doit', action='store_true')
args = parser.parse(['--doit'])
assert args.func_arg is True
def test_drop_short_3(self, parser):
parser.addoption('--func-arg', '--funcarg', '--doit', action='store_true')
args = parser.parse(['abcd'])
assert args.func_arg is False
assert args.file_or_dir == ['abcd']
def test_drop_short_help0(self, parser, capsys):
parser.addoption('--func-args', '--doit', help='foo',
action='store_true')
parser.parse([])
help = parser.optparser.format_help()
assert '--func-args, --doit foo' in help
# testing would be more helpful with all help generated
def test_drop_short_help1(self, parser, capsys):
group = parser.getgroup("general")
group.addoption('--doit', '--func-args', action='store_true', help='foo')
group._addoption("-h", "--help", action="store_true", dest="help",
help="show help message and configuration info")
parser.parse(['-h'])
help = parser.optparser.format_help()
assert '-doit, --func-args foo' in help
def test_multiple_metavar_help(self, parser):
"""
Help text for options with a metavar tuple should display help
in the form "--preferences=value1 value2 value3" (#2004).
"""
group = parser.getgroup("general")
group.addoption('--preferences', metavar=('value1', 'value2', 'value3'), nargs=3)
group._addoption("-h", "--help", action="store_true", dest="help")
parser.parse(['-h'])
help = parser.optparser.format_help()
assert '--preferences=value1 value2 value3' in help
def test_argcomplete(testdir, monkeypatch):
if not py.path.local.sysfind('bash'):
pytest.skip("bash not available")
script = str(testdir.tmpdir.join("test_argcomplete"))
pytest_bin = sys.argv[0]
if "pytest" not in os.path.basename(pytest_bin):
pytest.skip("need to be run with pytest executable, not %s" % (pytest_bin,))
with open(str(script), 'w') as fp:
# redirect output from argcomplete to stdin and stderr is not trivial
# http://stackoverflow.com/q/12589419/1307905
# so we use bash
fp.write('COMP_WORDBREAKS="$COMP_WORDBREAKS" %s 8>&1 9>&2' % pytest_bin)
# alternative would be exteneded Testdir.{run(),_run(),popen()} to be able
# to handle a keyword argument env that replaces os.environ in popen or
# extends the copy, advantage: could not forget to restore
monkeypatch.setenv('_ARGCOMPLETE', "1")
monkeypatch.setenv('_ARGCOMPLETE_IFS', "\x0b")
monkeypatch.setenv('COMP_WORDBREAKS', ' \\t\\n"\\\'><=;|&(:')
arg = '--fu'
monkeypatch.setenv('COMP_LINE', "pytest " + arg)
monkeypatch.setenv('COMP_POINT', str(len("pytest " + arg)))
result = testdir.run('bash', str(script), arg)
if result.ret == 255:
# argcomplete not found
pytest.skip("argcomplete not available")
elif not result.stdout.str():
pytest.skip("bash provided no output, argcomplete not available?")
else:
result.stdout.fnmatch_lines(["--funcargs", "--fulltrace"])
os.mkdir('test_argcomplete.d')
arg = 'test_argc'
monkeypatch.setenv('COMP_LINE', "pytest " + arg)
monkeypatch.setenv('COMP_POINT', str(len('pytest ' + arg)))
result = testdir.run('bash', str(script), arg)
result.stdout.fnmatch_lines(["test_argcomplete", "test_argcomplete.d/"])
| {
"repo_name": "avadacatavra/servo",
"path": "tests/wpt/web-platform-tests/tools/third_party/pytest/testing/test_parseopt.py",
"copies": "13",
"size": "13167",
"license": "mpl-2.0",
"hash": 2340592460295924000,
"line_mean": 41.6116504854,
"line_max": 110,
"alpha_frac": 0.5827447406,
"autogenerated": false,
"ratio": 3.823170731707317,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys
import os
import py, pytest
from _pytest import config as parseopt
@pytest.fixture
def parser():
return parseopt.Parser()
class TestParser(object):
def test_no_help_by_default(self, capsys):
parser = parseopt.Parser(usage="xyz")
pytest.raises(SystemExit, lambda: parser.parse(["-h"]))
out, err = capsys.readouterr()
assert err.find("error: unrecognized arguments") != -1
def test_argument(self):
with pytest.raises(parseopt.ArgumentError):
# need a short or long option
argument = parseopt.Argument()
argument = parseopt.Argument('-t')
assert argument._short_opts == ['-t']
assert argument._long_opts == []
assert argument.dest == 't'
argument = parseopt.Argument('-t', '--test')
assert argument._short_opts == ['-t']
assert argument._long_opts == ['--test']
assert argument.dest == 'test'
argument = parseopt.Argument('-t', '--test', dest='abc')
assert argument.dest == 'abc'
assert str(argument) == (
"Argument(_short_opts: ['-t'], _long_opts: ['--test'], dest: 'abc')"
)
def test_argument_type(self):
argument = parseopt.Argument('-t', dest='abc', type=int)
assert argument.type is int
argument = parseopt.Argument('-t', dest='abc', type=str)
assert argument.type is str
argument = parseopt.Argument('-t', dest='abc', type=float)
assert argument.type is float
with pytest.warns(DeprecationWarning):
with pytest.raises(KeyError):
argument = parseopt.Argument('-t', dest='abc', type='choice')
argument = parseopt.Argument('-t', dest='abc', type=str,
choices=['red', 'blue'])
assert argument.type is str
def test_argument_processopt(self):
argument = parseopt.Argument('-t', type=int)
argument.default = 42
argument.dest = 'abc'
res = argument.attrs()
assert res['default'] == 42
assert res['dest'] == 'abc'
def test_group_add_and_get(self, parser):
group = parser.getgroup("hello", description="desc")
assert group.name == "hello"
assert group.description == "desc"
def test_getgroup_simple(self, parser):
group = parser.getgroup("hello", description="desc")
assert group.name == "hello"
assert group.description == "desc"
group2 = parser.getgroup("hello")
assert group2 is group
def test_group_ordering(self, parser):
parser.getgroup("1")
parser.getgroup("2")
parser.getgroup("3", after="1")
groups = parser._groups
groups_names = [x.name for x in groups]
assert groups_names == list("132")
def test_group_addoption(self):
group = parseopt.OptionGroup("hello")
group.addoption("--option1", action="store_true")
assert len(group.options) == 1
assert isinstance(group.options[0], parseopt.Argument)
def test_group_addoption_conflict(self):
group = parseopt.OptionGroup("hello again")
group.addoption("--option1", "--option-1", action="store_true")
with pytest.raises(ValueError) as err:
group.addoption("--option1", "--option-one", action="store_true")
assert str(set(["--option1"])) in str(err.value)
def test_group_shortopt_lowercase(self, parser):
group = parser.getgroup("hello")
pytest.raises(ValueError, """
group.addoption("-x", action="store_true")
""")
assert len(group.options) == 0
group._addoption("-x", action="store_true")
assert len(group.options) == 1
def test_parser_addoption(self, parser):
group = parser.getgroup("custom options")
assert len(group.options) == 0
group.addoption("--option1", action="store_true")
assert len(group.options) == 1
def test_parse(self, parser):
parser.addoption("--hello", dest="hello", action="store")
args = parser.parse(['--hello', 'world'])
assert args.hello == "world"
assert not getattr(args, parseopt.FILE_OR_DIR)
def test_parse2(self, parser):
args = parser.parse([py.path.local()])
assert getattr(args, parseopt.FILE_OR_DIR)[0] == py.path.local()
def test_parse_known_args(self, parser):
parser.parse_known_args([py.path.local()])
parser.addoption("--hello", action="store_true")
ns = parser.parse_known_args(["x", "--y", "--hello", "this"])
assert ns.hello
assert ns.file_or_dir == ['x']
def test_parse_known_and_unknown_args(self, parser):
parser.addoption("--hello", action="store_true")
ns, unknown = parser.parse_known_and_unknown_args(["x", "--y",
"--hello", "this"])
assert ns.hello
assert ns.file_or_dir == ['x']
assert unknown == ['--y', 'this']
def test_parse_will_set_default(self, parser):
parser.addoption("--hello", dest="hello", default="x", action="store")
option = parser.parse([])
assert option.hello == "x"
del option.hello
parser.parse_setoption([], option)
assert option.hello == "x"
def test_parse_setoption(self, parser):
parser.addoption("--hello", dest="hello", action="store")
parser.addoption("--world", dest="world", default=42)
class A(object):
pass
option = A()
args = parser.parse_setoption(['--hello', 'world'], option)
assert option.hello == "world"
assert option.world == 42
assert not args
def test_parse_special_destination(self, parser):
parser.addoption("--ultimate-answer", type=int)
args = parser.parse(['--ultimate-answer', '42'])
assert args.ultimate_answer == 42
def test_parse_split_positional_arguments(self, parser):
parser.addoption("-R", action='store_true')
parser.addoption("-S", action='store_false')
args = parser.parse(['-R', '4', '2', '-S'])
assert getattr(args, parseopt.FILE_OR_DIR) == ['4', '2']
args = parser.parse(['-R', '-S', '4', '2', '-R'])
assert getattr(args, parseopt.FILE_OR_DIR) == ['4', '2']
assert args.R == True
assert args.S == False
args = parser.parse(['-R', '4', '-S', '2'])
assert getattr(args, parseopt.FILE_OR_DIR) == ['4', '2']
assert args.R == True
assert args.S == False
def test_parse_defaultgetter(self):
def defaultget(option):
if not hasattr(option, 'type'):
return
if option.type is int:
option.default = 42
elif option.type is str:
option.default = "world"
parser = parseopt.Parser(processopt=defaultget)
parser.addoption("--this", dest="this", type=int, action="store")
parser.addoption("--hello", dest="hello", type=str, action="store")
parser.addoption("--no", dest="no", action="store_true")
option = parser.parse([])
assert option.hello == "world"
assert option.this == 42
assert option.no is False
def test_drop_short_helper(self):
parser = py.std.argparse.ArgumentParser(formatter_class=parseopt.DropShorterLongHelpFormatter)
parser.add_argument('-t', '--twoword', '--duo', '--two-word', '--two',
help='foo').map_long_option = {'two': 'two-word'}
# throws error on --deux only!
parser.add_argument('-d', '--deuxmots', '--deux-mots',
action='store_true', help='foo').map_long_option = {'deux': 'deux-mots'}
parser.add_argument('-s', action='store_true', help='single short')
parser.add_argument('--abc', '-a',
action='store_true', help='bar')
parser.add_argument('--klm', '-k', '--kl-m',
action='store_true', help='bar')
parser.add_argument('-P', '--pq-r', '-p', '--pqr',
action='store_true', help='bar')
parser.add_argument('--zwei-wort', '--zweiwort', '--zweiwort',
action='store_true', help='bar')
parser.add_argument('-x', '--exit-on-first', '--exitfirst',
action='store_true', help='spam').map_long_option = {'exitfirst': 'exit-on-first'}
parser.add_argument('files_and_dirs', nargs='*')
args = parser.parse_args(['-k', '--duo', 'hallo', '--exitfirst'])
assert args.twoword == 'hallo'
assert args.klm is True
assert args.zwei_wort is False
assert args.exit_on_first is True
assert args.s is False
args = parser.parse_args(['--deux-mots'])
with pytest.raises(AttributeError):
assert args.deux_mots is True
assert args.deuxmots is True
args = parser.parse_args(['file', 'dir'])
assert '|'.join(args.files_and_dirs) == 'file|dir'
def test_drop_short_0(self, parser):
parser.addoption('--funcarg', '--func-arg', action='store_true')
parser.addoption('--abc-def', '--abc-def', action='store_true')
parser.addoption('--klm-hij', action='store_true')
args = parser.parse(['--funcarg', '--k'])
assert args.funcarg is True
assert args.abc_def is False
assert args.klm_hij is True
def test_drop_short_2(self, parser):
parser.addoption('--func-arg', '--doit', action='store_true')
args = parser.parse(['--doit'])
assert args.func_arg is True
def test_drop_short_3(self, parser):
parser.addoption('--func-arg', '--funcarg', '--doit', action='store_true')
args = parser.parse(['abcd'])
assert args.func_arg is False
assert args.file_or_dir == ['abcd']
def test_drop_short_help0(self, parser, capsys):
parser.addoption('--func-args', '--doit', help = 'foo',
action='store_true')
parser.parse([])
help = parser.optparser.format_help()
assert '--func-args, --doit foo' in help
# testing would be more helpful with all help generated
def test_drop_short_help1(self, parser, capsys):
group = parser.getgroup("general")
group.addoption('--doit', '--func-args', action='store_true', help='foo')
group._addoption("-h", "--help", action="store_true", dest="help",
help="show help message and configuration info")
parser.parse(['-h'])
help = parser.optparser.format_help()
assert '-doit, --func-args foo' in help
def test_multiple_metavar_help(self, parser):
"""
Help text for options with a metavar tuple should display help
in the form "--preferences=value1 value2 value3" (#2004).
"""
group = parser.getgroup("general")
group.addoption('--preferences', metavar=('value1', 'value2', 'value3'), nargs=3)
group._addoption("-h", "--help", action="store_true", dest="help")
parser.parse(['-h'])
help = parser.optparser.format_help()
assert '--preferences=value1 value2 value3' in help
def test_argcomplete(testdir, monkeypatch):
if not py.path.local.sysfind('bash'):
pytest.skip("bash not available")
script = str(testdir.tmpdir.join("test_argcomplete"))
pytest_bin = sys.argv[0]
if "pytest" not in os.path.basename(pytest_bin):
pytest.skip("need to be run with pytest executable, not %s" %(pytest_bin,))
with open(str(script), 'w') as fp:
# redirect output from argcomplete to stdin and stderr is not trivial
# http://stackoverflow.com/q/12589419/1307905
# so we use bash
fp.write('COMP_WORDBREAKS="$COMP_WORDBREAKS" %s 8>&1 9>&2' % pytest_bin)
# alternative would be exteneded Testdir.{run(),_run(),popen()} to be able
# to handle a keyword argument env that replaces os.environ in popen or
# extends the copy, advantage: could not forget to restore
monkeypatch.setenv('_ARGCOMPLETE', "1")
monkeypatch.setenv('_ARGCOMPLETE_IFS',"\x0b")
monkeypatch.setenv('COMP_WORDBREAKS', ' \\t\\n"\\\'><=;|&(:')
arg = '--fu'
monkeypatch.setenv('COMP_LINE', "pytest " + arg)
monkeypatch.setenv('COMP_POINT', str(len("pytest " + arg)))
result = testdir.run('bash', str(script), arg)
if result.ret == 255:
# argcomplete not found
pytest.skip("argcomplete not available")
elif not result.stdout.str():
pytest.skip("bash provided no output, argcomplete not available?")
else:
if py.std.sys.version_info < (2,7):
result.stdout.lines = result.stdout.lines[0].split('\x0b')
result.stdout.fnmatch_lines(["--funcargs", "--fulltrace"])
else:
result.stdout.fnmatch_lines(["--funcargs", "--fulltrace"])
if py.std.sys.version_info < (2,7):
return
os.mkdir('test_argcomplete.d')
arg = 'test_argc'
monkeypatch.setenv('COMP_LINE', "pytest " + arg)
monkeypatch.setenv('COMP_POINT', str(len('pytest ' + arg)))
result = testdir.run('bash', str(script), arg)
result.stdout.fnmatch_lines(["test_argcomplete", "test_argcomplete.d/"])
| {
"repo_name": "flub/pytest",
"path": "testing/test_parseopt.py",
"copies": "1",
"size": "13411",
"license": "mit",
"hash": -4827687024680913000,
"line_mean": 41.983974359,
"line_max": 110,
"alpha_frac": 0.5815375438,
"autogenerated": false,
"ratio": 3.808861119000284,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9881635811943665,
"avg_score": 0.0017525701713236848,
"num_lines": 312
} |
from __future__ import absolute_import, division, print_function
import sys
import platform
import os
import _pytest._code
from _pytest.debugging import SUPPORTS_BREAKPOINT_BUILTIN
import pytest
_ENVIRON_PYTHONBREAKPOINT = os.environ.get("PYTHONBREAKPOINT", "")
def runpdb_and_get_report(testdir, source):
p = testdir.makepyfile(source)
result = testdir.runpytest_inprocess("--pdb", p)
reports = result.reprec.getreports("pytest_runtest_logreport")
assert len(reports) == 3, reports # setup/call/teardown
return reports[1]
@pytest.fixture
def custom_pdb_calls():
called = []
# install dummy debugger class and track which methods were called on it
class _CustomPdb(object):
def __init__(self, *args, **kwargs):
called.append("init")
def reset(self):
called.append("reset")
def interaction(self, *args):
called.append("interaction")
_pytest._CustomPdb = _CustomPdb
return called
@pytest.fixture
def custom_debugger_hook():
called = []
# install dummy debugger class and track which methods were called on it
class _CustomDebugger(object):
def __init__(self, *args, **kwargs):
called.append("init")
def reset(self):
called.append("reset")
def interaction(self, *args):
called.append("interaction")
def set_trace(self, frame):
print("**CustomDebugger**")
called.append("set_trace")
_pytest._CustomDebugger = _CustomDebugger
yield called
del _pytest._CustomDebugger
class TestPDB(object):
@pytest.fixture
def pdblist(self, request):
monkeypatch = request.getfixturevalue("monkeypatch")
pdblist = []
def mypdb(*args):
pdblist.append(args)
plugin = request.config.pluginmanager.getplugin("debugging")
monkeypatch.setattr(plugin, "post_mortem", mypdb)
return pdblist
def test_pdb_on_fail(self, testdir, pdblist):
rep = runpdb_and_get_report(
testdir,
"""
def test_func():
assert 0
""",
)
assert rep.failed
assert len(pdblist) == 1
tb = _pytest._code.Traceback(pdblist[0][0])
assert tb[-1].name == "test_func"
def test_pdb_on_xfail(self, testdir, pdblist):
rep = runpdb_and_get_report(
testdir,
"""
import pytest
@pytest.mark.xfail
def test_func():
assert 0
""",
)
assert "xfail" in rep.keywords
assert not pdblist
def test_pdb_on_skip(self, testdir, pdblist):
rep = runpdb_and_get_report(
testdir,
"""
import pytest
def test_func():
pytest.skip("hello")
""",
)
assert rep.skipped
assert len(pdblist) == 0
def test_pdb_on_BdbQuit(self, testdir, pdblist):
rep = runpdb_and_get_report(
testdir,
"""
import bdb
def test_func():
raise bdb.BdbQuit
""",
)
assert rep.failed
assert len(pdblist) == 0
def test_pdb_on_KeyboardInterrupt(self, testdir, pdblist):
rep = runpdb_and_get_report(
testdir,
"""
def test_func():
raise KeyboardInterrupt
""",
)
assert rep.failed
assert len(pdblist) == 1
def test_pdb_interaction(self, testdir):
p1 = testdir.makepyfile(
"""
def test_1():
i = 0
assert i == 1
"""
)
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect(".*def test_1")
child.expect(".*i = 0")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "def test_1" not in rest
self.flush(child)
@staticmethod
def flush(child):
if platform.system() == "Darwin":
return
if child.isalive():
child.wait()
def test_pdb_unittest_postmortem(self, testdir):
p1 = testdir.makepyfile(
"""
import unittest
class Blub(unittest.TestCase):
def tearDown(self):
self.filename = None
def test_false(self):
self.filename = 'debug' + '.me'
assert 0
"""
)
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect("(Pdb)")
child.sendline("p self.filename")
child.sendeof()
rest = child.read().decode("utf8")
assert "debug.me" in rest
self.flush(child)
def test_pdb_unittest_skip(self, testdir):
"""Test for issue #2137"""
p1 = testdir.makepyfile(
"""
import unittest
@unittest.skipIf(True, 'Skipping also with pdb active')
class MyTestCase(unittest.TestCase):
def test_one(self):
assert 0
"""
)
child = testdir.spawn_pytest("-rs --pdb %s" % p1)
child.expect("Skipping also with pdb active")
child.expect("1 skipped in")
child.sendeof()
self.flush(child)
def test_pdb_print_captured_stdout(self, testdir):
p1 = testdir.makepyfile(
"""
def test_1():
print("get\\x20rekt")
assert False
"""
)
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect("captured stdout")
child.expect("get rekt")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "get rekt" not in rest
self.flush(child)
def test_pdb_print_captured_stderr(self, testdir):
p1 = testdir.makepyfile(
"""
def test_1():
import sys
sys.stderr.write("get\\x20rekt")
assert False
"""
)
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect("captured stderr")
child.expect("get rekt")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "get rekt" not in rest
self.flush(child)
def test_pdb_dont_print_empty_captured_stdout_and_stderr(self, testdir):
p1 = testdir.makepyfile(
"""
def test_1():
assert False
"""
)
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect("(Pdb)")
output = child.before.decode("utf8")
child.sendeof()
assert "captured stdout" not in output
assert "captured stderr" not in output
self.flush(child)
@pytest.mark.parametrize("showcapture", ["all", "no", "log"])
def test_pdb_print_captured_logs(self, testdir, showcapture):
p1 = testdir.makepyfile(
"""
def test_1():
import logging
logging.warn("get " + "rekt")
assert False
"""
)
child = testdir.spawn_pytest("--show-capture=%s --pdb %s" % (showcapture, p1))
if showcapture in ("all", "log"):
child.expect("captured log")
child.expect("get rekt")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
self.flush(child)
def test_pdb_print_captured_logs_nologging(self, testdir):
p1 = testdir.makepyfile(
"""
def test_1():
import logging
logging.warn("get " + "rekt")
assert False
"""
)
child = testdir.spawn_pytest(
"--show-capture=all --pdb " "-p no:logging %s" % p1
)
child.expect("get rekt")
output = child.before.decode("utf8")
assert "captured log" not in output
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
self.flush(child)
def test_pdb_interaction_exception(self, testdir):
p1 = testdir.makepyfile(
"""
import pytest
def globalfunc():
pass
def test_1():
pytest.raises(ValueError, globalfunc)
"""
)
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect(".*def test_1")
child.expect(".*pytest.raises.*globalfunc")
child.expect("(Pdb)")
child.sendline("globalfunc")
child.expect(".*function")
child.sendeof()
child.expect("1 failed")
self.flush(child)
def test_pdb_interaction_on_collection_issue181(self, testdir):
p1 = testdir.makepyfile(
"""
import pytest
xxx
"""
)
child = testdir.spawn_pytest("--pdb %s" % p1)
# child.expect(".*import pytest.*")
child.expect("(Pdb)")
child.sendeof()
child.expect("1 error")
self.flush(child)
def test_pdb_interaction_on_internal_error(self, testdir):
testdir.makeconftest(
"""
def pytest_runtest_protocol():
0/0
"""
)
p1 = testdir.makepyfile("def test_func(): pass")
child = testdir.spawn_pytest("--pdb %s" % p1)
# child.expect(".*import pytest.*")
child.expect("(Pdb)")
child.sendeof()
self.flush(child)
def test_pdb_interaction_capturing_simple(self, testdir):
p1 = testdir.makepyfile(
"""
import pytest
def test_1():
i = 0
print ("hello17")
pytest.set_trace()
x = 3
"""
)
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("x = 3")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf-8")
assert "1 failed" in rest
assert "def test_1" in rest
assert "hello17" in rest # out is captured
self.flush(child)
def test_pdb_set_trace_interception(self, testdir):
p1 = testdir.makepyfile(
"""
import pdb
def test_1():
pdb.set_trace()
"""
)
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "reading from stdin while output" not in rest
self.flush(child)
def test_pdb_and_capsys(self, testdir):
p1 = testdir.makepyfile(
"""
import pytest
def test_1(capsys):
print ("hello1")
pytest.set_trace()
"""
)
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.send("capsys.readouterr()\n")
child.expect("hello1")
child.sendeof()
child.read()
self.flush(child)
def test_set_trace_capturing_afterwards(self, testdir):
p1 = testdir.makepyfile(
"""
import pdb
def test_1():
pdb.set_trace()
def test_2():
print ("hello")
assert 0
"""
)
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.send("c\n")
child.expect("test_2")
child.expect("Captured")
child.expect("hello")
child.sendeof()
child.read()
self.flush(child)
def test_pdb_interaction_doctest(self, testdir):
p1 = testdir.makepyfile(
"""
import pytest
def function_1():
'''
>>> i = 0
>>> assert i == 1
'''
"""
)
child = testdir.spawn_pytest("--doctest-modules --pdb %s" % p1)
child.expect("(Pdb)")
child.sendline("i")
child.expect("0")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
self.flush(child)
def test_pdb_interaction_capturing_twice(self, testdir):
p1 = testdir.makepyfile(
"""
import pytest
def test_1():
i = 0
print ("hello17")
pytest.set_trace()
x = 3
print ("hello18")
pytest.set_trace()
x = 4
"""
)
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("x = 3")
child.expect("(Pdb)")
child.sendline("c")
child.expect("x = 4")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "def test_1" in rest
assert "hello17" in rest # out is captured
assert "hello18" in rest # out is captured
self.flush(child)
def test_pdb_used_outside_test(self, testdir):
p1 = testdir.makepyfile(
"""
import pytest
pytest.set_trace()
x = 5
"""
)
child = testdir.spawn("%s %s" % (sys.executable, p1))
child.expect("x = 5")
child.sendeof()
self.flush(child)
def test_pdb_used_in_generate_tests(self, testdir):
p1 = testdir.makepyfile(
"""
import pytest
def pytest_generate_tests(metafunc):
pytest.set_trace()
x = 5
def test_foo(a):
pass
"""
)
child = testdir.spawn_pytest(str(p1))
child.expect("x = 5")
child.sendeof()
self.flush(child)
def test_pdb_collection_failure_is_shown(self, testdir):
p1 = testdir.makepyfile("xxx")
result = testdir.runpytest_subprocess("--pdb", p1)
result.stdout.fnmatch_lines(["*NameError*xxx*", "*1 error*"])
def test_enter_pdb_hook_is_called(self, testdir):
testdir.makeconftest(
"""
def pytest_enter_pdb(config):
assert config.testing_verification == 'configured'
print 'enter_pdb_hook'
def pytest_configure(config):
config.testing_verification = 'configured'
"""
)
p1 = testdir.makepyfile(
"""
import pytest
def test_foo():
pytest.set_trace()
"""
)
child = testdir.spawn_pytest(str(p1))
child.expect("enter_pdb_hook")
child.send("c\n")
child.sendeof()
self.flush(child)
def test_pdb_custom_cls(self, testdir, custom_pdb_calls):
p1 = testdir.makepyfile("""xxx """)
result = testdir.runpytest_inprocess("--pdb", "--pdbcls=_pytest:_CustomPdb", p1)
result.stdout.fnmatch_lines(["*NameError*xxx*", "*1 error*"])
assert custom_pdb_calls == ["init", "reset", "interaction"]
def test_pdb_custom_cls_without_pdb(self, testdir, custom_pdb_calls):
p1 = testdir.makepyfile("""xxx """)
result = testdir.runpytest_inprocess("--pdbcls=_pytest:_CustomPdb", p1)
result.stdout.fnmatch_lines(["*NameError*xxx*", "*1 error*"])
assert custom_pdb_calls == []
def test_pdb_custom_cls_with_settrace(self, testdir, monkeypatch):
testdir.makepyfile(
custom_pdb="""
class CustomPdb(object):
def set_trace(*args, **kwargs):
print 'custom set_trace>'
"""
)
p1 = testdir.makepyfile(
"""
import pytest
def test_foo():
pytest.set_trace()
"""
)
monkeypatch.setenv("PYTHONPATH", str(testdir.tmpdir))
child = testdir.spawn_pytest("--pdbcls=custom_pdb:CustomPdb %s" % str(p1))
child.expect("custom set_trace>")
self.flush(child)
class TestDebuggingBreakpoints(object):
def test_supports_breakpoint_module_global(self):
"""
Test that supports breakpoint global marks on Python 3.7+ and not on
CPython 3.5, 2.7
"""
if sys.version_info.major == 3 and sys.version_info.minor >= 7:
assert SUPPORTS_BREAKPOINT_BUILTIN is True
if sys.version_info.major == 3 and sys.version_info.minor == 5:
assert SUPPORTS_BREAKPOINT_BUILTIN is False
if sys.version_info.major == 2 and sys.version_info.minor == 7:
assert SUPPORTS_BREAKPOINT_BUILTIN is False
@pytest.mark.skipif(
not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin"
)
@pytest.mark.parametrize("arg", ["--pdb", ""])
def test_sys_breakpointhook_configure_and_unconfigure(self, testdir, arg):
"""
Test that sys.breakpointhook is set to the custom Pdb class once configured, test that
hook is reset to system value once pytest has been unconfigured
"""
testdir.makeconftest(
"""
import sys
from pytest import hookimpl
from _pytest.debugging import pytestPDB
def pytest_configure(config):
config._cleanup.append(check_restored)
def check_restored():
assert sys.breakpointhook == sys.__breakpointhook__
def test_check():
assert sys.breakpointhook == pytestPDB.set_trace
"""
)
testdir.makepyfile(
"""
def test_nothing(): pass
"""
)
args = (arg,) if arg else ()
result = testdir.runpytest_subprocess(*args)
result.stdout.fnmatch_lines(["*1 passed in *"])
@pytest.mark.skipif(
not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin"
)
def test_pdb_custom_cls(self, testdir, custom_debugger_hook):
p1 = testdir.makepyfile(
"""
def test_nothing():
breakpoint()
"""
)
result = testdir.runpytest_inprocess(
"--pdb", "--pdbcls=_pytest:_CustomDebugger", p1
)
result.stdout.fnmatch_lines(["*CustomDebugger*", "*1 passed*"])
assert custom_debugger_hook == ["init", "set_trace"]
@pytest.mark.parametrize("arg", ["--pdb", ""])
@pytest.mark.skipif(
not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin"
)
def test_environ_custom_class(self, testdir, custom_debugger_hook, arg):
testdir.makeconftest(
"""
import os
import sys
os.environ['PYTHONBREAKPOINT'] = '_pytest._CustomDebugger.set_trace'
def pytest_configure(config):
config._cleanup.append(check_restored)
def check_restored():
assert sys.breakpointhook == sys.__breakpointhook__
def test_check():
import _pytest
assert sys.breakpointhook is _pytest._CustomDebugger.set_trace
"""
)
testdir.makepyfile(
"""
def test_nothing(): pass
"""
)
args = (arg,) if arg else ()
result = testdir.runpytest_subprocess(*args)
result.stdout.fnmatch_lines(["*1 passed in *"])
@pytest.mark.skipif(
not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin"
)
@pytest.mark.skipif(
not _ENVIRON_PYTHONBREAKPOINT == "",
reason="Requires breakpoint() default value",
)
def test_sys_breakpoint_interception(self, testdir):
p1 = testdir.makepyfile(
"""
def test_1():
breakpoint()
"""
)
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "reading from stdin while output" not in rest
TestPDB.flush(child)
@pytest.mark.skipif(
not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin"
)
def test_pdb_not_altered(self, testdir):
p1 = testdir.makepyfile(
"""
import pdb
def test_1():
pdb.set_trace()
"""
)
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "reading from stdin while output" not in rest
TestPDB.flush(child)
| {
"repo_name": "notriddle/servo",
"path": "tests/wpt/web-platform-tests/tools/third_party/pytest/testing/test_pdb.py",
"copies": "30",
"size": "21116",
"license": "mpl-2.0",
"hash": 7073402896163590000,
"line_mean": 29.0797720798,
"line_max": 94,
"alpha_frac": 0.5211214245,
"autogenerated": false,
"ratio": 4.173947420438822,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys
import platform
import _pytest._code
import pytest
def runpdb_and_get_report(testdir, source):
p = testdir.makepyfile(source)
result = testdir.runpytest_inprocess("--pdb", p)
reports = result.reprec.getreports("pytest_runtest_logreport")
assert len(reports) == 3, reports # setup/call/teardown
return reports[1]
@pytest.fixture
def custom_pdb_calls():
called = []
# install dummy debugger class and track which methods were called on it
class _CustomPdb(object):
def __init__(self, *args, **kwargs):
called.append("init")
def reset(self):
called.append("reset")
def interaction(self, *args):
called.append("interaction")
_pytest._CustomPdb = _CustomPdb
return called
class TestPDB(object):
@pytest.fixture
def pdblist(self, request):
monkeypatch = request.getfixturevalue("monkeypatch")
pdblist = []
def mypdb(*args):
pdblist.append(args)
plugin = request.config.pluginmanager.getplugin('debugging')
monkeypatch.setattr(plugin, 'post_mortem', mypdb)
return pdblist
def test_pdb_on_fail(self, testdir, pdblist):
rep = runpdb_and_get_report(testdir, """
def test_func():
assert 0
""")
assert rep.failed
assert len(pdblist) == 1
tb = _pytest._code.Traceback(pdblist[0][0])
assert tb[-1].name == "test_func"
def test_pdb_on_xfail(self, testdir, pdblist):
rep = runpdb_and_get_report(testdir, """
import pytest
@pytest.mark.xfail
def test_func():
assert 0
""")
assert "xfail" in rep.keywords
assert not pdblist
def test_pdb_on_skip(self, testdir, pdblist):
rep = runpdb_and_get_report(testdir, """
import pytest
def test_func():
pytest.skip("hello")
""")
assert rep.skipped
assert len(pdblist) == 0
def test_pdb_on_BdbQuit(self, testdir, pdblist):
rep = runpdb_and_get_report(testdir, """
import bdb
def test_func():
raise bdb.BdbQuit
""")
assert rep.failed
assert len(pdblist) == 0
def test_pdb_interaction(self, testdir):
p1 = testdir.makepyfile("""
def test_1():
i = 0
assert i == 1
""")
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect(".*def test_1")
child.expect(".*i = 0")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "def test_1" not in rest
self.flush(child)
@staticmethod
def flush(child):
if platform.system() == 'Darwin':
return
if child.isalive():
child.wait()
def test_pdb_unittest_postmortem(self, testdir):
p1 = testdir.makepyfile("""
import unittest
class Blub(unittest.TestCase):
def tearDown(self):
self.filename = None
def test_false(self):
self.filename = 'debug' + '.me'
assert 0
""")
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect('(Pdb)')
child.sendline('p self.filename')
child.sendeof()
rest = child.read().decode("utf8")
assert 'debug.me' in rest
self.flush(child)
def test_pdb_unittest_skip(self, testdir):
"""Test for issue #2137"""
p1 = testdir.makepyfile("""
import unittest
@unittest.skipIf(True, 'Skipping also with pdb active')
class MyTestCase(unittest.TestCase):
def test_one(self):
assert 0
""")
child = testdir.spawn_pytest("-rs --pdb %s" % p1)
child.expect('Skipping also with pdb active')
child.expect('1 skipped in')
child.sendeof()
self.flush(child)
def test_pdb_interaction_capture(self, testdir):
p1 = testdir.makepyfile("""
def test_1():
print("getrekt")
assert False
""")
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect("getrekt")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "getrekt" not in rest
self.flush(child)
def test_pdb_interaction_exception(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def globalfunc():
pass
def test_1():
pytest.raises(ValueError, globalfunc)
""")
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect(".*def test_1")
child.expect(".*pytest.raises.*globalfunc")
child.expect("(Pdb)")
child.sendline("globalfunc")
child.expect(".*function")
child.sendeof()
child.expect("1 failed")
self.flush(child)
def test_pdb_interaction_on_collection_issue181(self, testdir):
p1 = testdir.makepyfile("""
import pytest
xxx
""")
child = testdir.spawn_pytest("--pdb %s" % p1)
# child.expect(".*import pytest.*")
child.expect("(Pdb)")
child.sendeof()
child.expect("1 error")
self.flush(child)
def test_pdb_interaction_on_internal_error(self, testdir):
testdir.makeconftest("""
def pytest_runtest_protocol():
0/0
""")
p1 = testdir.makepyfile("def test_func(): pass")
child = testdir.spawn_pytest("--pdb %s" % p1)
# child.expect(".*import pytest.*")
child.expect("(Pdb)")
child.sendeof()
self.flush(child)
def test_pdb_interaction_capturing_simple(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def test_1():
i = 0
print ("hello17")
pytest.set_trace()
x = 3
""")
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("x = 3")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf-8")
assert "1 failed" in rest
assert "def test_1" in rest
assert "hello17" in rest # out is captured
self.flush(child)
def test_pdb_set_trace_interception(self, testdir):
p1 = testdir.makepyfile("""
import pdb
def test_1():
pdb.set_trace()
""")
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "reading from stdin while output" not in rest
self.flush(child)
def test_pdb_and_capsys(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def test_1(capsys):
print ("hello1")
pytest.set_trace()
""")
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.send("capsys.readouterr()\n")
child.expect("hello1")
child.sendeof()
child.read()
self.flush(child)
def test_set_trace_capturing_afterwards(self, testdir):
p1 = testdir.makepyfile("""
import pdb
def test_1():
pdb.set_trace()
def test_2():
print ("hello")
assert 0
""")
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.send("c\n")
child.expect("test_2")
child.expect("Captured")
child.expect("hello")
child.sendeof()
child.read()
self.flush(child)
def test_pdb_interaction_doctest(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def function_1():
'''
>>> i = 0
>>> assert i == 1
'''
""")
child = testdir.spawn_pytest("--doctest-modules --pdb %s" % p1)
child.expect("(Pdb)")
child.sendline('i')
child.expect("0")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
self.flush(child)
def test_pdb_interaction_capturing_twice(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def test_1():
i = 0
print ("hello17")
pytest.set_trace()
x = 3
print ("hello18")
pytest.set_trace()
x = 4
""")
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("x = 3")
child.expect("(Pdb)")
child.sendline('c')
child.expect("x = 4")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "def test_1" in rest
assert "hello17" in rest # out is captured
assert "hello18" in rest # out is captured
self.flush(child)
def test_pdb_used_outside_test(self, testdir):
p1 = testdir.makepyfile("""
import pytest
pytest.set_trace()
x = 5
""")
child = testdir.spawn("%s %s" % (sys.executable, p1))
child.expect("x = 5")
child.sendeof()
self.flush(child)
def test_pdb_used_in_generate_tests(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def pytest_generate_tests(metafunc):
pytest.set_trace()
x = 5
def test_foo(a):
pass
""")
child = testdir.spawn_pytest(str(p1))
child.expect("x = 5")
child.sendeof()
self.flush(child)
def test_pdb_collection_failure_is_shown(self, testdir):
p1 = testdir.makepyfile("""xxx """)
result = testdir.runpytest_subprocess("--pdb", p1)
result.stdout.fnmatch_lines([
"*NameError*xxx*",
"*1 error*",
])
def test_enter_pdb_hook_is_called(self, testdir):
testdir.makeconftest("""
def pytest_enter_pdb(config):
assert config.testing_verification == 'configured'
print 'enter_pdb_hook'
def pytest_configure(config):
config.testing_verification = 'configured'
""")
p1 = testdir.makepyfile("""
import pytest
def test_foo():
pytest.set_trace()
""")
child = testdir.spawn_pytest(str(p1))
child.expect("enter_pdb_hook")
child.send('c\n')
child.sendeof()
self.flush(child)
def test_pdb_custom_cls(self, testdir, custom_pdb_calls):
p1 = testdir.makepyfile("""xxx """)
result = testdir.runpytest_inprocess(
"--pdb", "--pdbcls=_pytest:_CustomPdb", p1)
result.stdout.fnmatch_lines([
"*NameError*xxx*",
"*1 error*",
])
assert custom_pdb_calls == ["init", "reset", "interaction"]
def test_pdb_custom_cls_without_pdb(self, testdir, custom_pdb_calls):
p1 = testdir.makepyfile("""xxx """)
result = testdir.runpytest_inprocess(
"--pdbcls=_pytest:_CustomPdb", p1)
result.stdout.fnmatch_lines([
"*NameError*xxx*",
"*1 error*",
])
assert custom_pdb_calls == []
def test_pdb_custom_cls_with_settrace(self, testdir, monkeypatch):
testdir.makepyfile(custom_pdb="""
class CustomPdb(object):
def set_trace(*args, **kwargs):
print 'custom set_trace>'
""")
p1 = testdir.makepyfile("""
import pytest
def test_foo():
pytest.set_trace()
""")
monkeypatch.setenv('PYTHONPATH', str(testdir.tmpdir))
child = testdir.spawn_pytest("--pdbcls=custom_pdb:CustomPdb %s" % str(p1))
child.expect('custom set_trace>')
if child.isalive():
child.wait()
| {
"repo_name": "mattnenterprise/servo",
"path": "tests/wpt/web-platform-tests/tools/third_party/pytest/testing/test_pdb.py",
"copies": "13",
"size": "12539",
"license": "mpl-2.0",
"hash": 3606061365693867000,
"line_mean": 29.8842364532,
"line_max": 82,
"alpha_frac": 0.5194991626,
"autogenerated": false,
"ratio": 4.1151952740400395,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00002967535165291709,
"num_lines": 406
} |
from __future__ import absolute_import, division, print_function
import sys
import re
PY3 = sys.version_info >= (3, 0, 0)
class EditorConfigToolObject(object):
"""Base class for EditorConfig tools"""
byte_order_marks = {
'\xef\xbb\xbf': 'utf-8-bom',
'\xfe\xff': 'utf-16be',
'\xff\xfe': 'utf-16le',
'\x00\x00\xfe\xff': 'utf-32be',
'\xff\xfe\x00\x00': 'utf-32le',
}
line_endings = {
'crlf': '\r\n',
'lf': '\n',
'cr': '\r',
}
newlines = dict(zip(line_endings.values(), line_endings.keys()))
class EditorConfigChecker(EditorConfigToolObject):
"""Allows checking file validity based on given EditorConfig"""
def __init__(self, fix=False):
self.auto_fix = fix
self.errors = set()
def check_indentation(self, line, indent_style):
"""Return error string iff incorrect characters found in indentation"""
if indent_style == 'space' and '\t' in line:
self.errors.add("Tab indentation found")
elif indent_style == 'tab' and re.search('^(\s* \t+|^ +)', line):
self.errors.add("Space indentation found")
return line
def check_charset(self, line, charset):
"""Return error string iff incorrect BOM found for expected charset"""
found_charset = None
if charset in ('utf-8', 'latin1'):
charset = None
for bom, given_charset in self.byte_order_marks.items():
if line.startswith(bom):
found_charset = given_charset
if found_charset != charset:
if not found_charset:
found_charset = "utf-8 or latin1"
self.errors.add("Charset %s found" % found_charset)
return line
def check_final_newline(self, line, insert_final_newline):
"""Return given final line with newline added/removed if necessary"""
if not line:
return line
has_final_newline = line[-1:] in ('\r', '\n')
if (insert_final_newline in ('true', 'false') and
insert_final_newline != str(has_final_newline).lower()):
if has_final_newline:
self.errors.add("Final newline found")
else:
self.errors.add("No final newline found")
if self.auto_fix:
if insert_final_newline == 'true':
return re.sub(r'\r?\n?$', r'\n', line)
elif insert_final_newline == 'false':
return re.sub(r'\r?\n?$', '', line)
return line
def check_trailing_whitespace(self, line, trim_trailing_whitespace):
"""Return line with whitespace trimmed if necessary"""
if trim_trailing_whitespace == 'true':
new_line = re.sub(r'[ \t]*(\r?\n?)$', r'\1', line)
if new_line != line:
self.errors.add("Trailing whitespace found")
if self.auto_fix:
line = new_line
return line
def check(self, filename, properties):
"""Return error string list if file format doesn't match properties"""
# Error list, current line, correctly indented line count, line number
lines = []
line = None
correctly_indented = 0
lineno = 0
def handle_line(function, property_name):
"""Add to error list if current line error for given function"""
if property_name in properties:
return function(line, properties[property_name])
else:
return line
open_opts = {'encoding': 'latin1'} if PY3 else {}
with open(filename, 'Ur+' if self.auto_fix else 'Ur', **open_opts) as f:
# Loop over file lines and append each error found to error list
if properties.get('end_of_line') in self.line_endings:
end_of_line = properties['end_of_line']
newline = self.line_endings[end_of_line]
else:
end_of_line = None
newline = None
for lineno, line in enumerate(f):
if end_of_line is None and f.newlines:
newline = f.newlines[0]
if lineno == 0:
handle_line(self.check_charset, 'charset')
line = handle_line(self.check_trailing_whitespace,
'trim_trailing_whitespace')
if (properties.get('indent_style') == 'tab' or
'indent_style' in properties and
'tab_width' in properties and
properties['indent_size'] == properties['tab_width']):
line = handle_line(self.check_indentation, 'indent_style')
if properties.get('indent_style') == 'space':
spaces = len(re.search('^ *', line).group(0))
if (spaces <= 1 or
spaces % int(properties['indent_size']) == 0):
correctly_indented += 1
else:
correctly_indented += 1
if self.auto_fix and newline:
line = line.replace('\n', newline)
lines.append(line)
if type(f.newlines) is tuple and end_of_line:
self.errors.add("Mixed line endings found: %s" %
','.join(self.newlines[n] for n in f.newlines))
elif (end_of_line is not None and f.newlines is not None and
newline != f.newlines):
self.errors.add("Incorrect line ending found: %s" %
self.newlines.get(f.newlines))
if lineno and float(correctly_indented) / (lineno + 1) < 0.70:
self.errors.add("Over 30% of lines appear to be incorrectly indented")
line = handle_line(self.check_final_newline,
'insert_final_newline')
if self.auto_fix and line is not None:
if newline:
line = line.replace('\n', newline)
lines[-1] = line
if self.auto_fix:
f.seek(0)
f.writelines(lines)
f.truncate()
errors = list(self.errors)
self.errors.clear()
return errors
| {
"repo_name": "treyhunner/editorconfig-tools",
"path": "editorconfig_tools/editorconfig_tools.py",
"copies": "1",
"size": "6295",
"license": "bsd-2-clause",
"hash": -5261074171448664000,
"line_mean": 39.3525641026,
"line_max": 86,
"alpha_frac": 0.5301032566,
"autogenerated": false,
"ratio": 4.25050641458474,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00062465293320523,
"num_lines": 156
} |
from __future__ import absolute_import, division, print_function
import sys
import textwrap
import pytest
import _pytest._code
from _pytest.config.findpaths import getcfg, get_common_ancestor, determine_setup
from _pytest.config import _iter_rewritable_modules
from _pytest.main import EXIT_NOTESTSCOLLECTED
class TestParseIni(object):
@pytest.mark.parametrize(
"section, filename", [("pytest", "pytest.ini"), ("tool:pytest", "setup.cfg")]
)
def test_getcfg_and_config(self, testdir, tmpdir, section, filename):
sub = tmpdir.mkdir("sub")
sub.chdir()
tmpdir.join(filename).write(
_pytest._code.Source(
"""
[{section}]
name = value
""".format(
section=section
)
)
)
rootdir, inifile, cfg = getcfg([sub])
assert cfg["name"] == "value"
config = testdir.parseconfigure(sub)
assert config.inicfg["name"] == "value"
def test_getcfg_empty_path(self):
"""correctly handle zero length arguments (a la pytest '')"""
getcfg([""])
def test_append_parse_args(self, testdir, tmpdir, monkeypatch):
monkeypatch.setenv("PYTEST_ADDOPTS", '--color no -rs --tb="short"')
tmpdir.join("pytest.ini").write(
_pytest._code.Source(
"""
[pytest]
addopts = --verbose
"""
)
)
config = testdir.parseconfig(tmpdir)
assert config.option.color == "no"
assert config.option.reportchars == "s"
assert config.option.tbstyle == "short"
assert config.option.verbose
def test_tox_ini_wrong_version(self, testdir):
testdir.makefile(
".ini",
tox="""
[pytest]
minversion=9.0
""",
)
result = testdir.runpytest()
assert result.ret != 0
result.stderr.fnmatch_lines(["*tox.ini:2*requires*9.0*actual*"])
@pytest.mark.parametrize(
"section, name",
[("tool:pytest", "setup.cfg"), ("pytest", "tox.ini"), ("pytest", "pytest.ini")],
)
def test_ini_names(self, testdir, name, section):
testdir.tmpdir.join(name).write(
textwrap.dedent(
"""
[{section}]
minversion = 1.0
""".format(
section=section
)
)
)
config = testdir.parseconfig()
assert config.getini("minversion") == "1.0"
def test_toxini_before_lower_pytestini(self, testdir):
sub = testdir.tmpdir.mkdir("sub")
sub.join("tox.ini").write(
textwrap.dedent(
"""
[pytest]
minversion = 2.0
"""
)
)
testdir.tmpdir.join("pytest.ini").write(
textwrap.dedent(
"""
[pytest]
minversion = 1.5
"""
)
)
config = testdir.parseconfigure(sub)
assert config.getini("minversion") == "2.0"
@pytest.mark.xfail(reason="probably not needed")
def test_confcutdir(self, testdir):
sub = testdir.mkdir("sub")
sub.chdir()
testdir.makeini(
"""
[pytest]
addopts = --qwe
"""
)
result = testdir.inline_run("--confcutdir=.")
assert result.ret == 0
class TestConfigCmdlineParsing(object):
def test_parsing_again_fails(self, testdir):
config = testdir.parseconfig()
pytest.raises(AssertionError, lambda: config.parse([]))
def test_explicitly_specified_config_file_is_loaded(self, testdir):
testdir.makeconftest(
"""
def pytest_addoption(parser):
parser.addini("custom", "")
"""
)
testdir.makeini(
"""
[pytest]
custom = 0
"""
)
testdir.makefile(
".cfg",
custom="""
[pytest]
custom = 1
""",
)
config = testdir.parseconfig("-c", "custom.cfg")
assert config.getini("custom") == "1"
testdir.makefile(
".cfg",
custom_tool_pytest_section="""
[tool:pytest]
custom = 1
""",
)
config = testdir.parseconfig("-c", "custom_tool_pytest_section.cfg")
assert config.getini("custom") == "1"
def test_absolute_win32_path(self, testdir):
temp_cfg_file = testdir.makefile(
".cfg",
custom="""
[pytest]
addopts = --version
""",
)
from os.path import normpath
temp_cfg_file = normpath(str(temp_cfg_file))
ret = pytest.main("-c " + temp_cfg_file)
assert ret == _pytest.main.EXIT_OK
class TestConfigAPI(object):
def test_config_trace(self, testdir):
config = testdir.parseconfig()
values = []
config.trace.root.setwriter(values.append)
config.trace("hello")
assert len(values) == 1
assert values[0] == "hello [config]\n"
def test_config_getoption(self, testdir):
testdir.makeconftest(
"""
def pytest_addoption(parser):
parser.addoption("--hello", "-X", dest="hello")
"""
)
config = testdir.parseconfig("--hello=this")
for x in ("hello", "--hello", "-X"):
assert config.getoption(x) == "this"
pytest.raises(ValueError, "config.getoption('qweqwe')")
@pytest.mark.skipif("sys.version_info[0] < 3")
def test_config_getoption_unicode(self, testdir):
testdir.makeconftest(
"""
from __future__ import unicode_literals
def pytest_addoption(parser):
parser.addoption('--hello', type=str)
"""
)
config = testdir.parseconfig("--hello=this")
assert config.getoption("hello") == "this"
def test_config_getvalueorskip(self, testdir):
config = testdir.parseconfig()
pytest.raises(pytest.skip.Exception, "config.getvalueorskip('hello')")
verbose = config.getvalueorskip("verbose")
assert verbose == config.option.verbose
def test_config_getvalueorskip_None(self, testdir):
testdir.makeconftest(
"""
def pytest_addoption(parser):
parser.addoption("--hello")
"""
)
config = testdir.parseconfig()
with pytest.raises(pytest.skip.Exception):
config.getvalueorskip("hello")
def test_getoption(self, testdir):
config = testdir.parseconfig()
with pytest.raises(ValueError):
config.getvalue("x")
assert config.getoption("x", 1) == 1
def test_getconftest_pathlist(self, testdir, tmpdir):
somepath = tmpdir.join("x", "y", "z")
p = tmpdir.join("conftest.py")
p.write("pathlist = ['.', %r]" % str(somepath))
config = testdir.parseconfigure(p)
assert config._getconftest_pathlist("notexist", path=tmpdir) is None
pl = config._getconftest_pathlist("pathlist", path=tmpdir)
print(pl)
assert len(pl) == 2
assert pl[0] == tmpdir
assert pl[1] == somepath
def test_addini(self, testdir):
testdir.makeconftest(
"""
def pytest_addoption(parser):
parser.addini("myname", "my new ini value")
"""
)
testdir.makeini(
"""
[pytest]
myname=hello
"""
)
config = testdir.parseconfig()
val = config.getini("myname")
assert val == "hello"
pytest.raises(ValueError, config.getini, "other")
def test_addini_pathlist(self, testdir):
testdir.makeconftest(
"""
def pytest_addoption(parser):
parser.addini("paths", "my new ini value", type="pathlist")
parser.addini("abc", "abc value")
"""
)
p = testdir.makeini(
"""
[pytest]
paths=hello world/sub.py
"""
)
config = testdir.parseconfig()
values = config.getini("paths")
assert len(values) == 2
assert values[0] == p.dirpath("hello")
assert values[1] == p.dirpath("world/sub.py")
pytest.raises(ValueError, config.getini, "other")
def test_addini_args(self, testdir):
testdir.makeconftest(
"""
def pytest_addoption(parser):
parser.addini("args", "new args", type="args")
parser.addini("a2", "", "args", default="1 2 3".split())
"""
)
testdir.makeini(
"""
[pytest]
args=123 "123 hello" "this"
"""
)
config = testdir.parseconfig()
values = config.getini("args")
assert len(values) == 3
assert values == ["123", "123 hello", "this"]
values = config.getini("a2")
assert values == list("123")
def test_addini_linelist(self, testdir):
testdir.makeconftest(
"""
def pytest_addoption(parser):
parser.addini("xy", "", type="linelist")
parser.addini("a2", "", "linelist")
"""
)
testdir.makeini(
"""
[pytest]
xy= 123 345
second line
"""
)
config = testdir.parseconfig()
values = config.getini("xy")
assert len(values) == 2
assert values == ["123 345", "second line"]
values = config.getini("a2")
assert values == []
@pytest.mark.parametrize(
"str_val, bool_val", [("True", True), ("no", False), ("no-ini", True)]
)
def test_addini_bool(self, testdir, str_val, bool_val):
testdir.makeconftest(
"""
def pytest_addoption(parser):
parser.addini("strip", "", type="bool", default=True)
"""
)
if str_val != "no-ini":
testdir.makeini(
"""
[pytest]
strip=%s
"""
% str_val
)
config = testdir.parseconfig()
assert config.getini("strip") is bool_val
def test_addinivalue_line_existing(self, testdir):
testdir.makeconftest(
"""
def pytest_addoption(parser):
parser.addini("xy", "", type="linelist")
"""
)
testdir.makeini(
"""
[pytest]
xy= 123
"""
)
config = testdir.parseconfig()
values = config.getini("xy")
assert len(values) == 1
assert values == ["123"]
config.addinivalue_line("xy", "456")
values = config.getini("xy")
assert len(values) == 2
assert values == ["123", "456"]
def test_addinivalue_line_new(self, testdir):
testdir.makeconftest(
"""
def pytest_addoption(parser):
parser.addini("xy", "", type="linelist")
"""
)
config = testdir.parseconfig()
assert not config.getini("xy")
config.addinivalue_line("xy", "456")
values = config.getini("xy")
assert len(values) == 1
assert values == ["456"]
config.addinivalue_line("xy", "123")
values = config.getini("xy")
assert len(values) == 2
assert values == ["456", "123"]
def test_confcutdir_check_isdir(self, testdir):
"""Give an error if --confcutdir is not a valid directory (#2078)"""
with pytest.raises(pytest.UsageError):
testdir.parseconfig(
"--confcutdir", testdir.tmpdir.join("file").ensure(file=1)
)
with pytest.raises(pytest.UsageError):
testdir.parseconfig("--confcutdir", testdir.tmpdir.join("inexistant"))
config = testdir.parseconfig(
"--confcutdir", testdir.tmpdir.join("dir").ensure(dir=1)
)
assert config.getoption("confcutdir") == str(testdir.tmpdir.join("dir"))
@pytest.mark.parametrize(
"names, expected",
[
(["bar.py"], ["bar"]),
(["foo", "bar.py"], []),
(["foo", "bar.pyc"], []),
(["foo", "__init__.py"], ["foo"]),
(["foo", "bar", "__init__.py"], []),
],
)
def test_iter_rewritable_modules(self, names, expected):
assert list(_iter_rewritable_modules(["/".join(names)])) == expected
class TestConfigFromdictargs(object):
def test_basic_behavior(self):
from _pytest.config import Config
option_dict = {"verbose": 444, "foo": "bar", "capture": "no"}
args = ["a", "b"]
config = Config.fromdictargs(option_dict, args)
with pytest.raises(AssertionError):
config.parse(["should refuse to parse again"])
assert config.option.verbose == 444
assert config.option.foo == "bar"
assert config.option.capture == "no"
assert config.args == args
def test_origargs(self):
"""Show that fromdictargs can handle args in their "orig" format"""
from _pytest.config import Config
option_dict = {}
args = ["-vvvv", "-s", "a", "b"]
config = Config.fromdictargs(option_dict, args)
assert config.args == ["a", "b"]
assert config._origargs == args
assert config.option.verbose == 4
assert config.option.capture == "no"
def test_inifilename(self, tmpdir):
tmpdir.join("foo/bar.ini").ensure().write(
_pytest._code.Source(
"""
[pytest]
name = value
"""
)
)
from _pytest.config import Config
inifile = "../../foo/bar.ini"
option_dict = {"inifilename": inifile, "capture": "no"}
cwd = tmpdir.join("a/b")
cwd.join("pytest.ini").ensure().write(
_pytest._code.Source(
"""
[pytest]
name = wrong-value
should_not_be_set = true
"""
)
)
with cwd.ensure(dir=True).as_cwd():
config = Config.fromdictargs(option_dict, ())
assert config.args == [str(cwd)]
assert config.option.inifilename == inifile
assert config.option.capture == "no"
# this indicates this is the file used for getting configuration values
assert config.inifile == inifile
assert config.inicfg.get("name") == "value"
assert config.inicfg.get("should_not_be_set") is None
def test_options_on_small_file_do_not_blow_up(testdir):
def runfiletest(opts):
reprec = testdir.inline_run(*opts)
passed, skipped, failed = reprec.countoutcomes()
assert failed == 2
assert skipped == passed == 0
path = testdir.makepyfile(
"""
def test_f1(): assert 0
def test_f2(): assert 0
"""
)
for opts in (
[],
["-l"],
["-s"],
["--tb=no"],
["--tb=short"],
["--tb=long"],
["--fulltrace"],
["--traceconfig"],
["-v"],
["-v", "-v"],
):
runfiletest(opts + [path])
def test_preparse_ordering_with_setuptools(testdir, monkeypatch):
pkg_resources = pytest.importorskip("pkg_resources")
def my_iter(name):
assert name == "pytest11"
class Dist(object):
project_name = "spam"
version = "1.0"
def _get_metadata(self, name):
return ["foo.txt,sha256=abc,123"]
class EntryPoint(object):
name = "mytestplugin"
dist = Dist()
def load(self):
class PseudoPlugin(object):
x = 42
return PseudoPlugin()
return iter([EntryPoint()])
monkeypatch.setattr(pkg_resources, "iter_entry_points", my_iter)
testdir.makeconftest(
"""
pytest_plugins = "mytestplugin",
"""
)
monkeypatch.setenv("PYTEST_PLUGINS", "mytestplugin")
config = testdir.parseconfig()
plugin = config.pluginmanager.getplugin("mytestplugin")
assert plugin.x == 42
def test_setuptools_importerror_issue1479(testdir, monkeypatch):
pkg_resources = pytest.importorskip("pkg_resources")
def my_iter(name):
assert name == "pytest11"
class Dist(object):
project_name = "spam"
version = "1.0"
def _get_metadata(self, name):
return ["foo.txt,sha256=abc,123"]
class EntryPoint(object):
name = "mytestplugin"
dist = Dist()
def load(self):
raise ImportError("Don't hide me!")
return iter([EntryPoint()])
monkeypatch.setattr(pkg_resources, "iter_entry_points", my_iter)
with pytest.raises(ImportError):
testdir.parseconfig()
@pytest.mark.parametrize("block_it", [True, False])
def test_plugin_preparse_prevents_setuptools_loading(testdir, monkeypatch, block_it):
pkg_resources = pytest.importorskip("pkg_resources")
plugin_module_placeholder = object()
def my_iter(name):
assert name == "pytest11"
class Dist(object):
project_name = "spam"
version = "1.0"
def _get_metadata(self, name):
return ["foo.txt,sha256=abc,123"]
class EntryPoint(object):
name = "mytestplugin"
dist = Dist()
def load(self):
return plugin_module_placeholder
return iter([EntryPoint()])
monkeypatch.setattr(pkg_resources, "iter_entry_points", my_iter)
args = ("-p", "no:mytestplugin") if block_it else ()
config = testdir.parseconfig(*args)
config.pluginmanager.import_plugin("mytestplugin")
if block_it:
assert "mytestplugin" not in sys.modules
assert config.pluginmanager.get_plugin("mytestplugin") is None
else:
assert config.pluginmanager.get_plugin(
"mytestplugin"
) is plugin_module_placeholder
def test_cmdline_processargs_simple(testdir):
testdir.makeconftest(
"""
def pytest_cmdline_preparse(args):
args.append("-h")
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*pytest*", "*-h*"])
def test_invalid_options_show_extra_information(testdir):
"""display extra information when pytest exits due to unrecognized
options in the command-line"""
testdir.makeini(
"""
[pytest]
addopts = --invalid-option
"""
)
result = testdir.runpytest()
result.stderr.fnmatch_lines(
[
"*error: unrecognized arguments: --invalid-option*",
"* inifile: %s*" % testdir.tmpdir.join("tox.ini"),
"* rootdir: %s*" % testdir.tmpdir,
]
)
@pytest.mark.parametrize(
"args",
[
["dir1", "dir2", "-v"],
["dir1", "-v", "dir2"],
["dir2", "-v", "dir1"],
["-v", "dir2", "dir1"],
],
)
def test_consider_args_after_options_for_rootdir_and_inifile(testdir, args):
"""
Consider all arguments in the command-line for rootdir and inifile
discovery, even if they happen to occur after an option. #949
"""
# replace "dir1" and "dir2" from "args" into their real directory
root = testdir.tmpdir.mkdir("myroot")
d1 = root.mkdir("dir1")
d2 = root.mkdir("dir2")
for i, arg in enumerate(args):
if arg == "dir1":
args[i] = d1
elif arg == "dir2":
args[i] = d2
with root.as_cwd():
result = testdir.runpytest(*args)
result.stdout.fnmatch_lines(["*rootdir: *myroot, inifile:"])
@pytest.mark.skipif("sys.platform == 'win32'")
def test_toolongargs_issue224(testdir):
result = testdir.runpytest("-m", "hello" * 500)
assert result.ret == EXIT_NOTESTSCOLLECTED
def test_config_in_subdirectory_colon_command_line_issue2148(testdir):
conftest_source = """
def pytest_addoption(parser):
parser.addini('foo', 'foo')
"""
testdir.makefile(
".ini",
**{"pytest": "[pytest]\nfoo = root", "subdir/pytest": "[pytest]\nfoo = subdir"}
)
testdir.makepyfile(
**{
"conftest": conftest_source,
"subdir/conftest": conftest_source,
"subdir/test_foo": """
def test_foo(pytestconfig):
assert pytestconfig.getini('foo') == 'subdir'
""",
}
)
result = testdir.runpytest("subdir/test_foo.py::test_foo")
assert result.ret == 0
def test_notify_exception(testdir, capfd):
config = testdir.parseconfig()
excinfo = pytest.raises(ValueError, "raise ValueError(1)")
config.notify_exception(excinfo)
out, err = capfd.readouterr()
assert "ValueError" in err
class A(object):
def pytest_internalerror(self, excrepr):
return True
config.pluginmanager.register(A())
config.notify_exception(excinfo)
out, err = capfd.readouterr()
assert not err
def test_load_initial_conftest_last_ordering(testdir):
from _pytest.config import get_config
pm = get_config().pluginmanager
class My(object):
def pytest_load_initial_conftests(self):
pass
m = My()
pm.register(m)
hc = pm.hook.pytest_load_initial_conftests
values = hc._nonwrappers + hc._wrappers
expected = ["_pytest.config", "test_config", "_pytest.capture"]
assert [x.function.__module__ for x in values] == expected
def test_get_plugin_specs_as_list():
from _pytest.config import _get_plugin_specs_as_list
with pytest.raises(pytest.UsageError):
_get_plugin_specs_as_list({"foo"})
with pytest.raises(pytest.UsageError):
_get_plugin_specs_as_list(dict())
assert _get_plugin_specs_as_list(None) == []
assert _get_plugin_specs_as_list("") == []
assert _get_plugin_specs_as_list("foo") == ["foo"]
assert _get_plugin_specs_as_list("foo,bar") == ["foo", "bar"]
assert _get_plugin_specs_as_list(["foo", "bar"]) == ["foo", "bar"]
assert _get_plugin_specs_as_list(("foo", "bar")) == ["foo", "bar"]
class TestWarning(object):
def test_warn_config(self, testdir):
testdir.makeconftest(
"""
values = []
def pytest_configure(config):
config.warn("C1", "hello")
def pytest_logwarning(code, message):
if message == "hello" and code == "C1":
values.append(1)
"""
)
testdir.makepyfile(
"""
def test_proper(pytestconfig):
import conftest
assert conftest.values == [1]
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_warn_on_test_item_from_request(self, testdir, request):
testdir.makepyfile(
"""
import pytest
@pytest.fixture
def fix(request):
request.node.warn("T1", "hello")
def test_hello(fix):
pass
"""
)
result = testdir.runpytest("--disable-pytest-warnings")
assert result.parseoutcomes()["warnings"] > 0
assert "hello" not in result.stdout.str()
result = testdir.runpytest()
result.stdout.fnmatch_lines(
"""
===*warnings summary*===
*test_warn_on_test_item_from_request.py::test_hello*
*hello*
"""
)
class TestRootdir(object):
def test_simple_noini(self, tmpdir):
assert get_common_ancestor([tmpdir]) == tmpdir
a = tmpdir.mkdir("a")
assert get_common_ancestor([a, tmpdir]) == tmpdir
assert get_common_ancestor([tmpdir, a]) == tmpdir
with tmpdir.as_cwd():
assert get_common_ancestor([]) == tmpdir
no_path = tmpdir.join("does-not-exist")
assert get_common_ancestor([no_path]) == tmpdir
assert get_common_ancestor([no_path.join("a")]) == tmpdir
@pytest.mark.parametrize("name", "setup.cfg tox.ini pytest.ini".split())
def test_with_ini(self, tmpdir, name):
inifile = tmpdir.join(name)
inifile.write("[pytest]\n")
a = tmpdir.mkdir("a")
b = a.mkdir("b")
for args in ([tmpdir], [a], [b]):
rootdir, inifile, inicfg = determine_setup(None, args)
assert rootdir == tmpdir
assert inifile == inifile
rootdir, inifile, inicfg = determine_setup(None, [b, a])
assert rootdir == tmpdir
assert inifile == inifile
@pytest.mark.parametrize("name", "setup.cfg tox.ini".split())
def test_pytestini_overides_empty_other(self, tmpdir, name):
inifile = tmpdir.ensure("pytest.ini")
a = tmpdir.mkdir("a")
a.ensure(name)
rootdir, inifile, inicfg = determine_setup(None, [a])
assert rootdir == tmpdir
assert inifile == inifile
def test_setuppy_fallback(self, tmpdir):
a = tmpdir.mkdir("a")
a.ensure("setup.cfg")
tmpdir.ensure("setup.py")
rootdir, inifile, inicfg = determine_setup(None, [a])
assert rootdir == tmpdir
assert inifile is None
assert inicfg == {}
def test_nothing(self, tmpdir, monkeypatch):
monkeypatch.chdir(str(tmpdir))
rootdir, inifile, inicfg = determine_setup(None, [tmpdir])
assert rootdir == tmpdir
assert inifile is None
assert inicfg == {}
def test_with_specific_inifile(self, tmpdir):
inifile = tmpdir.ensure("pytest.ini")
rootdir, inifile, inicfg = determine_setup(inifile, [tmpdir])
assert rootdir == tmpdir
class TestOverrideIniArgs(object):
@pytest.mark.parametrize("name", "setup.cfg tox.ini pytest.ini".split())
def test_override_ini_names(self, testdir, name):
testdir.tmpdir.join(name).write(
textwrap.dedent(
"""
[pytest]
custom = 1.0"""
)
)
testdir.makeconftest(
"""
def pytest_addoption(parser):
parser.addini("custom", "")"""
)
testdir.makepyfile(
"""
def test_pass(pytestconfig):
ini_val = pytestconfig.getini("custom")
print('\\ncustom_option:%s\\n' % ini_val)"""
)
result = testdir.runpytest("--override-ini", "custom=2.0", "-s")
assert result.ret == 0
result.stdout.fnmatch_lines(["custom_option:2.0"])
result = testdir.runpytest(
"--override-ini", "custom=2.0", "--override-ini=custom=3.0", "-s"
)
assert result.ret == 0
result.stdout.fnmatch_lines(["custom_option:3.0"])
def test_override_ini_pathlist(self, testdir):
testdir.makeconftest(
"""
def pytest_addoption(parser):
parser.addini("paths", "my new ini value", type="pathlist")"""
)
testdir.makeini(
"""
[pytest]
paths=blah.py"""
)
testdir.makepyfile(
"""
import py.path
def test_pathlist(pytestconfig):
config_paths = pytestconfig.getini("paths")
print(config_paths)
for cpf in config_paths:
print('\\nuser_path:%s' % cpf.basename)"""
)
result = testdir.runpytest(
"--override-ini", "paths=foo/bar1.py foo/bar2.py", "-s"
)
result.stdout.fnmatch_lines(["user_path:bar1.py", "user_path:bar2.py"])
def test_override_multiple_and_default(self, testdir):
testdir.makeconftest(
"""
def pytest_addoption(parser):
addini = parser.addini
addini("custom_option_1", "", default="o1")
addini("custom_option_2", "", default="o2")
addini("custom_option_3", "", default=False, type="bool")
addini("custom_option_4", "", default=True, type="bool")"""
)
testdir.makeini(
"""
[pytest]
custom_option_1=custom_option_1
custom_option_2=custom_option_2
"""
)
testdir.makepyfile(
"""
def test_multiple_options(pytestconfig):
prefix = "custom_option"
for x in range(1, 5):
ini_value=pytestconfig.getini("%s_%d" % (prefix, x))
print('\\nini%d:%s' % (x, ini_value))
"""
)
result = testdir.runpytest(
"--override-ini",
"custom_option_1=fulldir=/tmp/user1",
"-o",
"custom_option_2=url=/tmp/user2?a=b&d=e",
"-o",
"custom_option_3=True",
"-o",
"custom_option_4=no",
"-s",
)
result.stdout.fnmatch_lines(
[
"ini1:fulldir=/tmp/user1",
"ini2:url=/tmp/user2?a=b&d=e",
"ini3:True",
"ini4:False",
]
)
def test_override_ini_usage_error_bad_style(self, testdir):
testdir.makeini(
"""
[pytest]
xdist_strict=False
"""
)
result = testdir.runpytest("--override-ini", "xdist_strict True", "-s")
result.stderr.fnmatch_lines(["*ERROR* *expects option=value*"])
@pytest.mark.parametrize("with_ini", [True, False])
def test_override_ini_handled_asap(self, testdir, with_ini):
"""-o should be handled as soon as possible and always override what's in ini files (#2238)"""
if with_ini:
testdir.makeini(
"""
[pytest]
python_files=test_*.py
"""
)
testdir.makepyfile(
unittest_ini_handle="""
def test():
pass
"""
)
result = testdir.runpytest("--override-ini", "python_files=unittest_*.py")
result.stdout.fnmatch_lines(["*1 passed in*"])
def test_with_arg_outside_cwd_without_inifile(self, tmpdir, monkeypatch):
monkeypatch.chdir(str(tmpdir))
a = tmpdir.mkdir("a")
b = tmpdir.mkdir("b")
rootdir, inifile, inicfg = determine_setup(None, [a, b])
assert rootdir == tmpdir
assert inifile is None
def test_with_arg_outside_cwd_with_inifile(self, tmpdir):
a = tmpdir.mkdir("a")
b = tmpdir.mkdir("b")
inifile = a.ensure("pytest.ini")
rootdir, parsed_inifile, inicfg = determine_setup(None, [a, b])
assert rootdir == a
assert inifile == parsed_inifile
@pytest.mark.parametrize("dirs", ([], ["does-not-exist"], ["a/does-not-exist"]))
def test_with_non_dir_arg(self, dirs, tmpdir):
with tmpdir.ensure(dir=True).as_cwd():
rootdir, inifile, inicfg = determine_setup(None, dirs)
assert rootdir == tmpdir
assert inifile is None
def test_with_existing_file_in_subdir(self, tmpdir):
a = tmpdir.mkdir("a")
a.ensure("exist")
with tmpdir.as_cwd():
rootdir, inifile, inicfg = determine_setup(None, ["a/exist"])
assert rootdir == tmpdir
assert inifile is None
def test_addopts_before_initini(self, monkeypatch):
cache_dir = ".custom_cache"
monkeypatch.setenv("PYTEST_ADDOPTS", "-o cache_dir=%s" % cache_dir)
from _pytest.config import get_config
config = get_config()
config._preparse([], addopts=True)
assert config._override_ini == ["cache_dir=%s" % cache_dir]
def test_override_ini_does_not_contain_paths(self):
"""Check that -o no longer swallows all options after it (#3103)"""
from _pytest.config import get_config
config = get_config()
config._preparse(["-o", "cache_dir=/cache", "/some/test/path"])
assert config._override_ini == ["cache_dir=/cache"]
def test_multiple_override_ini_options(self, testdir, request):
"""Ensure a file path following a '-o' option does not generate an error (#3103)"""
testdir.makepyfile(
**{
"conftest.py": """
def pytest_addoption(parser):
parser.addini('foo', default=None, help='some option')
parser.addini('bar', default=None, help='some option')
""",
"test_foo.py": """
def test(pytestconfig):
assert pytestconfig.getini('foo') == '1'
assert pytestconfig.getini('bar') == '0'
""",
"test_bar.py": """
def test():
assert False
""",
}
)
result = testdir.runpytest("-o", "foo=1", "-o", "bar=0", "test_foo.py")
assert "ERROR:" not in result.stderr.str()
result.stdout.fnmatch_lines(["collected 1 item", "*= 1 passed in *="])
| {
"repo_name": "emilio/servo",
"path": "tests/wpt/web-platform-tests/tools/third_party/pytest/testing/test_config.py",
"copies": "30",
"size": "33360",
"license": "mpl-2.0",
"hash": 8472920065930399000,
"line_mean": 30.2359550562,
"line_max": 102,
"alpha_frac": 0.5326738609,
"autogenerated": false,
"ratio": 3.993774691727523,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys
import textwrap
import pytest
import _pytest._code
from _pytest.config import getcfg, get_common_ancestor, determine_setup, _iter_rewritable_modules
from _pytest.main import EXIT_NOTESTSCOLLECTED
class TestParseIni(object):
@pytest.mark.parametrize('section, filename',
[('pytest', 'pytest.ini'), ('tool:pytest', 'setup.cfg')])
def test_getcfg_and_config(self, testdir, tmpdir, section, filename):
sub = tmpdir.mkdir("sub")
sub.chdir()
tmpdir.join(filename).write(_pytest._code.Source("""
[{section}]
name = value
""".format(section=section)))
rootdir, inifile, cfg = getcfg([sub])
assert cfg['name'] == "value"
config = testdir.parseconfigure(sub)
assert config.inicfg['name'] == 'value'
def test_getcfg_empty_path(self):
"""correctly handle zero length arguments (a la pytest '')"""
getcfg([''])
def test_append_parse_args(self, testdir, tmpdir, monkeypatch):
monkeypatch.setenv('PYTEST_ADDOPTS', '--color no -rs --tb="short"')
tmpdir.join("pytest.ini").write(_pytest._code.Source("""
[pytest]
addopts = --verbose
"""))
config = testdir.parseconfig(tmpdir)
assert config.option.color == 'no'
assert config.option.reportchars == 's'
assert config.option.tbstyle == 'short'
assert config.option.verbose
def test_tox_ini_wrong_version(self, testdir):
testdir.makefile('.ini', tox="""
[pytest]
minversion=9.0
""")
result = testdir.runpytest()
assert result.ret != 0
result.stderr.fnmatch_lines([
"*tox.ini:2*requires*9.0*actual*"
])
@pytest.mark.parametrize("section, name", [
('tool:pytest', 'setup.cfg'),
('pytest', 'tox.ini'),
('pytest', 'pytest.ini')],
)
def test_ini_names(self, testdir, name, section):
testdir.tmpdir.join(name).write(textwrap.dedent("""
[{section}]
minversion = 1.0
""".format(section=section)))
config = testdir.parseconfig()
assert config.getini("minversion") == "1.0"
def test_toxini_before_lower_pytestini(self, testdir):
sub = testdir.tmpdir.mkdir("sub")
sub.join("tox.ini").write(textwrap.dedent("""
[pytest]
minversion = 2.0
"""))
testdir.tmpdir.join("pytest.ini").write(textwrap.dedent("""
[pytest]
minversion = 1.5
"""))
config = testdir.parseconfigure(sub)
assert config.getini("minversion") == "2.0"
@pytest.mark.xfail(reason="probably not needed")
def test_confcutdir(self, testdir):
sub = testdir.mkdir("sub")
sub.chdir()
testdir.makeini("""
[pytest]
addopts = --qwe
""")
result = testdir.inline_run("--confcutdir=.")
assert result.ret == 0
class TestConfigCmdlineParsing(object):
def test_parsing_again_fails(self, testdir):
config = testdir.parseconfig()
pytest.raises(AssertionError, lambda: config.parse([]))
def test_explicitly_specified_config_file_is_loaded(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addini("custom", "")
""")
testdir.makeini("""
[pytest]
custom = 0
""")
testdir.makefile(".cfg", custom="""
[pytest]
custom = 1
""")
config = testdir.parseconfig("-c", "custom.cfg")
assert config.getini("custom") == "1"
testdir.makefile(".cfg", custom_tool_pytest_section="""
[tool:pytest]
custom = 1
""")
config = testdir.parseconfig("-c", "custom_tool_pytest_section.cfg")
assert config.getini("custom") == "1"
def test_absolute_win32_path(self, testdir):
temp_cfg_file = testdir.makefile(".cfg", custom="""
[pytest]
addopts = --version
""")
from os.path import normpath
temp_cfg_file = normpath(str(temp_cfg_file))
ret = pytest.main("-c " + temp_cfg_file)
assert ret == _pytest.main.EXIT_OK
class TestConfigAPI(object):
def test_config_trace(self, testdir):
config = testdir.parseconfig()
values = []
config.trace.root.setwriter(values.append)
config.trace("hello")
assert len(values) == 1
assert values[0] == "hello [config]\n"
def test_config_getoption(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addoption("--hello", "-X", dest="hello")
""")
config = testdir.parseconfig("--hello=this")
for x in ("hello", "--hello", "-X"):
assert config.getoption(x) == "this"
pytest.raises(ValueError, "config.getoption('qweqwe')")
@pytest.mark.skipif('sys.version_info[0] < 3')
def test_config_getoption_unicode(self, testdir):
testdir.makeconftest("""
from __future__ import unicode_literals
def pytest_addoption(parser):
parser.addoption('--hello', type=str)
""")
config = testdir.parseconfig('--hello=this')
assert config.getoption('hello') == 'this'
def test_config_getvalueorskip(self, testdir):
config = testdir.parseconfig()
pytest.raises(pytest.skip.Exception,
"config.getvalueorskip('hello')")
verbose = config.getvalueorskip("verbose")
assert verbose == config.option.verbose
def test_config_getvalueorskip_None(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addoption("--hello")
""")
config = testdir.parseconfig()
with pytest.raises(pytest.skip.Exception):
config.getvalueorskip('hello')
def test_getoption(self, testdir):
config = testdir.parseconfig()
with pytest.raises(ValueError):
config.getvalue('x')
assert config.getoption("x", 1) == 1
def test_getconftest_pathlist(self, testdir, tmpdir):
somepath = tmpdir.join("x", "y", "z")
p = tmpdir.join("conftest.py")
p.write("pathlist = ['.', %r]" % str(somepath))
config = testdir.parseconfigure(p)
assert config._getconftest_pathlist('notexist', path=tmpdir) is None
pl = config._getconftest_pathlist('pathlist', path=tmpdir)
print(pl)
assert len(pl) == 2
assert pl[0] == tmpdir
assert pl[1] == somepath
def test_addini(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addini("myname", "my new ini value")
""")
testdir.makeini("""
[pytest]
myname=hello
""")
config = testdir.parseconfig()
val = config.getini("myname")
assert val == "hello"
pytest.raises(ValueError, config.getini, 'other')
def test_addini_pathlist(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addini("paths", "my new ini value", type="pathlist")
parser.addini("abc", "abc value")
""")
p = testdir.makeini("""
[pytest]
paths=hello world/sub.py
""")
config = testdir.parseconfig()
values = config.getini("paths")
assert len(values) == 2
assert values[0] == p.dirpath('hello')
assert values[1] == p.dirpath('world/sub.py')
pytest.raises(ValueError, config.getini, 'other')
def test_addini_args(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addini("args", "new args", type="args")
parser.addini("a2", "", "args", default="1 2 3".split())
""")
testdir.makeini("""
[pytest]
args=123 "123 hello" "this"
""")
config = testdir.parseconfig()
values = config.getini("args")
assert len(values) == 3
assert values == ["123", "123 hello", "this"]
values = config.getini("a2")
assert values == list("123")
def test_addini_linelist(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addini("xy", "", type="linelist")
parser.addini("a2", "", "linelist")
""")
testdir.makeini("""
[pytest]
xy= 123 345
second line
""")
config = testdir.parseconfig()
values = config.getini("xy")
assert len(values) == 2
assert values == ["123 345", "second line"]
values = config.getini("a2")
assert values == []
@pytest.mark.parametrize('str_val, bool_val',
[('True', True), ('no', False), ('no-ini', True)])
def test_addini_bool(self, testdir, str_val, bool_val):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addini("strip", "", type="bool", default=True)
""")
if str_val != 'no-ini':
testdir.makeini("""
[pytest]
strip=%s
""" % str_val)
config = testdir.parseconfig()
assert config.getini("strip") is bool_val
def test_addinivalue_line_existing(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addini("xy", "", type="linelist")
""")
testdir.makeini("""
[pytest]
xy= 123
""")
config = testdir.parseconfig()
values = config.getini("xy")
assert len(values) == 1
assert values == ["123"]
config.addinivalue_line("xy", "456")
values = config.getini("xy")
assert len(values) == 2
assert values == ["123", "456"]
def test_addinivalue_line_new(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addini("xy", "", type="linelist")
""")
config = testdir.parseconfig()
assert not config.getini("xy")
config.addinivalue_line("xy", "456")
values = config.getini("xy")
assert len(values) == 1
assert values == ["456"]
config.addinivalue_line("xy", "123")
values = config.getini("xy")
assert len(values) == 2
assert values == ["456", "123"]
def test_confcutdir_check_isdir(self, testdir):
"""Give an error if --confcutdir is not a valid directory (#2078)"""
with pytest.raises(pytest.UsageError):
testdir.parseconfig('--confcutdir', testdir.tmpdir.join('file').ensure(file=1))
with pytest.raises(pytest.UsageError):
testdir.parseconfig('--confcutdir', testdir.tmpdir.join('inexistant'))
config = testdir.parseconfig('--confcutdir', testdir.tmpdir.join('dir').ensure(dir=1))
assert config.getoption('confcutdir') == str(testdir.tmpdir.join('dir'))
@pytest.mark.parametrize('names, expected', [
(['bar.py'], ['bar']),
(['foo', 'bar.py'], []),
(['foo', 'bar.pyc'], []),
(['foo', '__init__.py'], ['foo']),
(['foo', 'bar', '__init__.py'], []),
])
def test_iter_rewritable_modules(self, names, expected):
assert list(_iter_rewritable_modules(['/'.join(names)])) == expected
class TestConfigFromdictargs(object):
def test_basic_behavior(self):
from _pytest.config import Config
option_dict = {
'verbose': 444,
'foo': 'bar',
'capture': 'no',
}
args = ['a', 'b']
config = Config.fromdictargs(option_dict, args)
with pytest.raises(AssertionError):
config.parse(['should refuse to parse again'])
assert config.option.verbose == 444
assert config.option.foo == 'bar'
assert config.option.capture == 'no'
assert config.args == args
def test_origargs(self):
"""Show that fromdictargs can handle args in their "orig" format"""
from _pytest.config import Config
option_dict = {}
args = ['-vvvv', '-s', 'a', 'b']
config = Config.fromdictargs(option_dict, args)
assert config.args == ['a', 'b']
assert config._origargs == args
assert config.option.verbose == 4
assert config.option.capture == 'no'
def test_inifilename(self, tmpdir):
tmpdir.join("foo/bar.ini").ensure().write(_pytest._code.Source("""
[pytest]
name = value
"""))
from _pytest.config import Config
inifile = '../../foo/bar.ini'
option_dict = {
'inifilename': inifile,
'capture': 'no',
}
cwd = tmpdir.join('a/b')
cwd.join('pytest.ini').ensure().write(_pytest._code.Source("""
[pytest]
name = wrong-value
should_not_be_set = true
"""))
with cwd.ensure(dir=True).as_cwd():
config = Config.fromdictargs(option_dict, ())
assert config.args == [str(cwd)]
assert config.option.inifilename == inifile
assert config.option.capture == 'no'
# this indicates this is the file used for getting configuration values
assert config.inifile == inifile
assert config.inicfg.get('name') == 'value'
assert config.inicfg.get('should_not_be_set') is None
def test_options_on_small_file_do_not_blow_up(testdir):
def runfiletest(opts):
reprec = testdir.inline_run(*opts)
passed, skipped, failed = reprec.countoutcomes()
assert failed == 2
assert skipped == passed == 0
path = testdir.makepyfile("""
def test_f1(): assert 0
def test_f2(): assert 0
""")
for opts in ([], ['-l'], ['-s'], ['--tb=no'], ['--tb=short'],
['--tb=long'], ['--fulltrace'],
['--traceconfig'], ['-v'], ['-v', '-v']):
runfiletest(opts + [path])
def test_preparse_ordering_with_setuptools(testdir, monkeypatch):
pkg_resources = pytest.importorskip("pkg_resources")
def my_iter(name):
assert name == "pytest11"
class Dist(object):
project_name = 'spam'
version = '1.0'
def _get_metadata(self, name):
return ['foo.txt,sha256=abc,123']
class EntryPoint(object):
name = "mytestplugin"
dist = Dist()
def load(self):
class PseudoPlugin(object):
x = 42
return PseudoPlugin()
return iter([EntryPoint()])
monkeypatch.setattr(pkg_resources, 'iter_entry_points', my_iter)
testdir.makeconftest("""
pytest_plugins = "mytestplugin",
""")
monkeypatch.setenv("PYTEST_PLUGINS", "mytestplugin")
config = testdir.parseconfig()
plugin = config.pluginmanager.getplugin("mytestplugin")
assert plugin.x == 42
def test_setuptools_importerror_issue1479(testdir, monkeypatch):
pkg_resources = pytest.importorskip("pkg_resources")
def my_iter(name):
assert name == "pytest11"
class Dist(object):
project_name = 'spam'
version = '1.0'
def _get_metadata(self, name):
return ['foo.txt,sha256=abc,123']
class EntryPoint(object):
name = "mytestplugin"
dist = Dist()
def load(self):
raise ImportError("Don't hide me!")
return iter([EntryPoint()])
monkeypatch.setattr(pkg_resources, 'iter_entry_points', my_iter)
with pytest.raises(ImportError):
testdir.parseconfig()
@pytest.mark.parametrize('block_it', [True, False])
def test_plugin_preparse_prevents_setuptools_loading(testdir, monkeypatch, block_it):
pkg_resources = pytest.importorskip("pkg_resources")
plugin_module_placeholder = object()
def my_iter(name):
assert name == "pytest11"
class Dist(object):
project_name = 'spam'
version = '1.0'
def _get_metadata(self, name):
return ['foo.txt,sha256=abc,123']
class EntryPoint(object):
name = "mytestplugin"
dist = Dist()
def load(self):
return plugin_module_placeholder
return iter([EntryPoint()])
monkeypatch.setattr(pkg_resources, 'iter_entry_points', my_iter)
args = ("-p", "no:mytestplugin") if block_it else ()
config = testdir.parseconfig(*args)
config.pluginmanager.import_plugin("mytestplugin")
if block_it:
assert "mytestplugin" not in sys.modules
assert config.pluginmanager.get_plugin('mytestplugin') is None
else:
assert config.pluginmanager.get_plugin('mytestplugin') is plugin_module_placeholder
def test_cmdline_processargs_simple(testdir):
testdir.makeconftest("""
def pytest_cmdline_preparse(args):
args.append("-h")
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*pytest*",
"*-h*",
])
def test_invalid_options_show_extra_information(testdir):
"""display extra information when pytest exits due to unrecognized
options in the command-line"""
testdir.makeini("""
[pytest]
addopts = --invalid-option
""")
result = testdir.runpytest()
result.stderr.fnmatch_lines([
"*error: unrecognized arguments: --invalid-option*",
"* inifile: %s*" % testdir.tmpdir.join('tox.ini'),
"* rootdir: %s*" % testdir.tmpdir,
])
@pytest.mark.parametrize('args', [
['dir1', 'dir2', '-v'],
['dir1', '-v', 'dir2'],
['dir2', '-v', 'dir1'],
['-v', 'dir2', 'dir1'],
])
def test_consider_args_after_options_for_rootdir_and_inifile(testdir, args):
"""
Consider all arguments in the command-line for rootdir and inifile
discovery, even if they happen to occur after an option. #949
"""
# replace "dir1" and "dir2" from "args" into their real directory
root = testdir.tmpdir.mkdir('myroot')
d1 = root.mkdir('dir1')
d2 = root.mkdir('dir2')
for i, arg in enumerate(args):
if arg == 'dir1':
args[i] = d1
elif arg == 'dir2':
args[i] = d2
with root.as_cwd():
result = testdir.runpytest(*args)
result.stdout.fnmatch_lines(['*rootdir: *myroot, inifile:'])
@pytest.mark.skipif("sys.platform == 'win32'")
def test_toolongargs_issue224(testdir):
result = testdir.runpytest("-m", "hello" * 500)
assert result.ret == EXIT_NOTESTSCOLLECTED
def test_config_in_subdirectory_colon_command_line_issue2148(testdir):
conftest_source = '''
def pytest_addoption(parser):
parser.addini('foo', 'foo')
'''
testdir.makefile('.ini', **{
'pytest': '[pytest]\nfoo = root',
'subdir/pytest': '[pytest]\nfoo = subdir',
})
testdir.makepyfile(**{
'conftest': conftest_source,
'subdir/conftest': conftest_source,
'subdir/test_foo': '''
def test_foo(pytestconfig):
assert pytestconfig.getini('foo') == 'subdir'
'''})
result = testdir.runpytest('subdir/test_foo.py::test_foo')
assert result.ret == 0
def test_notify_exception(testdir, capfd):
config = testdir.parseconfig()
excinfo = pytest.raises(ValueError, "raise ValueError(1)")
config.notify_exception(excinfo)
out, err = capfd.readouterr()
assert "ValueError" in err
class A(object):
def pytest_internalerror(self, excrepr):
return True
config.pluginmanager.register(A())
config.notify_exception(excinfo)
out, err = capfd.readouterr()
assert not err
def test_load_initial_conftest_last_ordering(testdir):
from _pytest.config import get_config
pm = get_config().pluginmanager
class My(object):
def pytest_load_initial_conftests(self):
pass
m = My()
pm.register(m)
hc = pm.hook.pytest_load_initial_conftests
values = hc._nonwrappers + hc._wrappers
expected = [
"_pytest.config",
'test_config',
'_pytest.capture',
]
assert [x.function.__module__ for x in values] == expected
def test_get_plugin_specs_as_list():
from _pytest.config import _get_plugin_specs_as_list
with pytest.raises(pytest.UsageError):
_get_plugin_specs_as_list(set(['foo']))
with pytest.raises(pytest.UsageError):
_get_plugin_specs_as_list(dict())
assert _get_plugin_specs_as_list(None) == []
assert _get_plugin_specs_as_list('') == []
assert _get_plugin_specs_as_list('foo') == ['foo']
assert _get_plugin_specs_as_list('foo,bar') == ['foo', 'bar']
assert _get_plugin_specs_as_list(['foo', 'bar']) == ['foo', 'bar']
assert _get_plugin_specs_as_list(('foo', 'bar')) == ['foo', 'bar']
class TestWarning(object):
def test_warn_config(self, testdir):
testdir.makeconftest("""
values = []
def pytest_configure(config):
config.warn("C1", "hello")
def pytest_logwarning(code, message):
if message == "hello" and code == "C1":
values.append(1)
""")
testdir.makepyfile("""
def test_proper(pytestconfig):
import conftest
assert conftest.values == [1]
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_warn_on_test_item_from_request(self, testdir, request):
testdir.makepyfile("""
import pytest
@pytest.fixture
def fix(request):
request.node.warn("T1", "hello")
def test_hello(fix):
pass
""")
result = testdir.runpytest("--disable-pytest-warnings")
assert result.parseoutcomes()["warnings"] > 0
assert "hello" not in result.stdout.str()
result = testdir.runpytest()
result.stdout.fnmatch_lines("""
===*warnings summary*===
*test_warn_on_test_item_from_request.py::test_hello*
*hello*
""")
class TestRootdir(object):
def test_simple_noini(self, tmpdir):
assert get_common_ancestor([tmpdir]) == tmpdir
a = tmpdir.mkdir("a")
assert get_common_ancestor([a, tmpdir]) == tmpdir
assert get_common_ancestor([tmpdir, a]) == tmpdir
with tmpdir.as_cwd():
assert get_common_ancestor([]) == tmpdir
no_path = tmpdir.join('does-not-exist')
assert get_common_ancestor([no_path]) == tmpdir
assert get_common_ancestor([no_path.join('a')]) == tmpdir
@pytest.mark.parametrize("name", "setup.cfg tox.ini pytest.ini".split())
def test_with_ini(self, tmpdir, name):
inifile = tmpdir.join(name)
inifile.write("[pytest]\n")
a = tmpdir.mkdir("a")
b = a.mkdir("b")
for args in ([tmpdir], [a], [b]):
rootdir, inifile, inicfg = determine_setup(None, args)
assert rootdir == tmpdir
assert inifile == inifile
rootdir, inifile, inicfg = determine_setup(None, [b, a])
assert rootdir == tmpdir
assert inifile == inifile
@pytest.mark.parametrize("name", "setup.cfg tox.ini".split())
def test_pytestini_overides_empty_other(self, tmpdir, name):
inifile = tmpdir.ensure("pytest.ini")
a = tmpdir.mkdir("a")
a.ensure(name)
rootdir, inifile, inicfg = determine_setup(None, [a])
assert rootdir == tmpdir
assert inifile == inifile
def test_setuppy_fallback(self, tmpdir):
a = tmpdir.mkdir("a")
a.ensure("setup.cfg")
tmpdir.ensure("setup.py")
rootdir, inifile, inicfg = determine_setup(None, [a])
assert rootdir == tmpdir
assert inifile is None
assert inicfg == {}
def test_nothing(self, tmpdir, monkeypatch):
monkeypatch.chdir(str(tmpdir))
rootdir, inifile, inicfg = determine_setup(None, [tmpdir])
assert rootdir == tmpdir
assert inifile is None
assert inicfg == {}
def test_with_specific_inifile(self, tmpdir):
inifile = tmpdir.ensure("pytest.ini")
rootdir, inifile, inicfg = determine_setup(inifile, [tmpdir])
assert rootdir == tmpdir
class TestOverrideIniArgs(object):
@pytest.mark.parametrize("name", "setup.cfg tox.ini pytest.ini".split())
def test_override_ini_names(self, testdir, name):
testdir.tmpdir.join(name).write(textwrap.dedent("""
[pytest]
custom = 1.0"""))
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addini("custom", "")""")
testdir.makepyfile("""
def test_pass(pytestconfig):
ini_val = pytestconfig.getini("custom")
print('\\ncustom_option:%s\\n' % ini_val)""")
result = testdir.runpytest("--override-ini", "custom=2.0", "-s")
assert result.ret == 0
result.stdout.fnmatch_lines(["custom_option:2.0"])
result = testdir.runpytest("--override-ini", "custom=2.0",
"--override-ini=custom=3.0", "-s")
assert result.ret == 0
result.stdout.fnmatch_lines(["custom_option:3.0"])
def test_override_ini_pathlist(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addini("paths", "my new ini value", type="pathlist")""")
testdir.makeini("""
[pytest]
paths=blah.py""")
testdir.makepyfile("""
import py.path
def test_pathlist(pytestconfig):
config_paths = pytestconfig.getini("paths")
print(config_paths)
for cpf in config_paths:
print('\\nuser_path:%s' % cpf.basename)""")
result = testdir.runpytest("--override-ini",
'paths=foo/bar1.py foo/bar2.py', "-s")
result.stdout.fnmatch_lines(["user_path:bar1.py",
"user_path:bar2.py"])
def test_override_multiple_and_default(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
addini = parser.addini
addini("custom_option_1", "", default="o1")
addini("custom_option_2", "", default="o2")
addini("custom_option_3", "", default=False, type="bool")
addini("custom_option_4", "", default=True, type="bool")""")
testdir.makeini("""
[pytest]
custom_option_1=custom_option_1
custom_option_2=custom_option_2
""")
testdir.makepyfile("""
def test_multiple_options(pytestconfig):
prefix = "custom_option"
for x in range(1, 5):
ini_value=pytestconfig.getini("%s_%d" % (prefix, x))
print('\\nini%d:%s' % (x, ini_value))
""")
result = testdir.runpytest(
"--override-ini", 'custom_option_1=fulldir=/tmp/user1',
'-o', 'custom_option_2=url=/tmp/user2?a=b&d=e',
"-o", 'custom_option_3=True',
"-o", 'custom_option_4=no', "-s")
result.stdout.fnmatch_lines(["ini1:fulldir=/tmp/user1",
"ini2:url=/tmp/user2?a=b&d=e",
"ini3:True",
"ini4:False"])
def test_override_ini_usage_error_bad_style(self, testdir):
testdir.makeini("""
[pytest]
xdist_strict=False
""")
result = testdir.runpytest("--override-ini", 'xdist_strict True', "-s")
result.stderr.fnmatch_lines(["*ERROR* *expects option=value*"])
@pytest.mark.parametrize('with_ini', [True, False])
def test_override_ini_handled_asap(self, testdir, with_ini):
"""-o should be handled as soon as possible and always override what's in ini files (#2238)"""
if with_ini:
testdir.makeini("""
[pytest]
python_files=test_*.py
""")
testdir.makepyfile(unittest_ini_handle="""
def test():
pass
""")
result = testdir.runpytest("--override-ini", 'python_files=unittest_*.py')
result.stdout.fnmatch_lines(["*1 passed in*"])
def test_with_arg_outside_cwd_without_inifile(self, tmpdir, monkeypatch):
monkeypatch.chdir(str(tmpdir))
a = tmpdir.mkdir("a")
b = tmpdir.mkdir("b")
rootdir, inifile, inicfg = determine_setup(None, [a, b])
assert rootdir == tmpdir
assert inifile is None
def test_with_arg_outside_cwd_with_inifile(self, tmpdir):
a = tmpdir.mkdir("a")
b = tmpdir.mkdir("b")
inifile = a.ensure("pytest.ini")
rootdir, parsed_inifile, inicfg = determine_setup(None, [a, b])
assert rootdir == a
assert inifile == parsed_inifile
@pytest.mark.parametrize('dirs', ([], ['does-not-exist'],
['a/does-not-exist']))
def test_with_non_dir_arg(self, dirs, tmpdir):
with tmpdir.ensure(dir=True).as_cwd():
rootdir, inifile, inicfg = determine_setup(None, dirs)
assert rootdir == tmpdir
assert inifile is None
def test_with_existing_file_in_subdir(self, tmpdir):
a = tmpdir.mkdir("a")
a.ensure("exist")
with tmpdir.as_cwd():
rootdir, inifile, inicfg = determine_setup(None, ['a/exist'])
assert rootdir == tmpdir
assert inifile is None
def test_addopts_before_initini(self, monkeypatch):
cache_dir = '.custom_cache'
monkeypatch.setenv('PYTEST_ADDOPTS', '-o cache_dir=%s' % cache_dir)
from _pytest.config import get_config
config = get_config()
config._preparse([], addopts=True)
assert config._override_ini == ['cache_dir=%s' % cache_dir]
def test_override_ini_does_not_contain_paths(self):
"""Check that -o no longer swallows all options after it (#3103)"""
from _pytest.config import get_config
config = get_config()
config._preparse(['-o', 'cache_dir=/cache', '/some/test/path'])
assert config._override_ini == ['cache_dir=/cache']
def test_multiple_override_ini_options(self, testdir, request):
"""Ensure a file path following a '-o' option does not generate an error (#3103)"""
testdir.makepyfile(**{
"conftest.py": """
def pytest_addoption(parser):
parser.addini('foo', default=None, help='some option')
parser.addini('bar', default=None, help='some option')
""",
"test_foo.py": """
def test(pytestconfig):
assert pytestconfig.getini('foo') == '1'
assert pytestconfig.getini('bar') == '0'
""",
"test_bar.py": """
def test():
assert False
""",
})
result = testdir.runpytest('-o', 'foo=1', '-o', 'bar=0', 'test_foo.py')
assert 'ERROR:' not in result.stderr.str()
result.stdout.fnmatch_lines([
'collected 1 item',
'*= 1 passed in *=',
])
| {
"repo_name": "tareqalayan/pytest",
"path": "testing/test_config.py",
"copies": "1",
"size": "31827",
"license": "mit",
"hash": 4068192186713091000,
"line_mean": 34.2458471761,
"line_max": 102,
"alpha_frac": 0.5574512207,
"autogenerated": false,
"ratio": 3.94631122132672,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.500376244202672,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys
import time
import utool as ut
import matplotlib as mpl
from plottool_ibeis import custom_figure
#from .custom_constants import golden_wh
SLEEP_TIME = .01
__QT4_WINDOW_LIST__ = []
ut.noinject(__name__, '[fig_presenter]')
VERBOSE = ut.get_argflag(('--verbose-fig', '--verbfig', '--verb-pt'))
#(print, print_, printDBG, rrr, profile) = ut.inject(__name__, '[fig_presenter]', DEBUG=True)
def unregister_qt4_win(win):
global __QT4_WINDOW_LIST__
if win == 'all':
__QT4_WINDOW_LIST__ = []
else:
try:
#index = __QT4_WINDOW_LIST__.index(win)
__QT4_WINDOW_LIST__.remove(win)
except ValueError:
pass
def register_qt4_win(win):
global __QT4_WINDOW_LIST__
__QT4_WINDOW_LIST__.append(win)
# ---- GENERAL FIGURE COMMANDS ----
def set_geometry(fnum, x, y, w, h):
fig = custom_figure.ensure_fig(fnum)
qtwin = get_figure_window(fig)
qtwin.setGeometry(x, y, w, h)
def get_geometry(fnum):
fig = custom_figure.ensure_fig(fnum)
qtwin = get_figure_window(fig)
(x1, y1, x2, y2) = qtwin.geometry().getCoords()
(x, y, w, h) = (x1, y1, x2 - x1, y2 - y1)
return (x, y, w, h)
#def get_screen_info():
# # TODO Move dependency to guitool_ibeis
# desktop = QtWidgets.QDesktopWidget()
# mask = desktop.mask() # NOQA
# layout_direction = desktop.layoutDirection() # NOQA
# screen_number = desktop.screenNumber() # NOQA
# normal_geometry = desktop.normalGeometry() # NOQA
# num_screens = desktop.screenCount() # NOQA
# avail_rect = desktop.availableGeometry() # NOQA
# screen_rect = desktop.screenGeometry() # NOQA
# QtWidgets.QDesktopWidget().availableGeometry().center() # NOQA
# normal_geometry = desktop.normalGeometry() # NOQA
#@profile
def get_all_figures():
manager_list = mpl._pylab_helpers.Gcf.get_all_fig_managers()
all_figures = []
# Make sure you dont show figures that this module closed
for manager in manager_list:
try:
fig = manager.canvas.figure
except AttributeError:
continue
if not fig.__dict__.get('df2_closed', False):
all_figures.append(fig)
# Return all the figures sorted by their number
all_figures = sorted(all_figures, key=lambda fig: fig.number)
return all_figures
def get_all_qt4_wins():
return __QT4_WINDOW_LIST__
def all_figures_show():
if VERBOSE:
print('all_figures_show')
if not ut.get_argflag('--noshow'):
for fig in get_all_figures():
time.sleep(SLEEP_TIME)
show_figure(fig)
#fig.show()
#fig.canvas.draw()
def show_figure(fig):
try:
fig.show()
fig.canvas.draw()
except AttributeError as ex:
if not hasattr(fig, '_no_raise_plottool_ibeis'):
ut.printex(ex, '[pt] probably registered made figure with Qt.', iswarning=True)
def all_figures_tight_layout():
if '--noshow' not in sys.argv:
for fig in iter(get_all_figures()):
fig.tight_layout()
time.sleep(SLEEP_TIME)
def get_main_win_base():
if hasattr(mpl.backends, 'backend_qt4'):
backend = mpl.backends.backend_qt4
else:
backend = mpl.backends.backend_qt5
try:
QMainWin = backend.MainWindow
except Exception as ex:
try:
ut.printex(ex, 'warning', '[fig_presenter]')
#from guitool_ibeis.__PYQT__ import QtGui
QMainWin = backend.QtWidgets.QMainWindow
except Exception as ex1:
ut.printex(ex1, 'warning', '[fig_presenter]')
QMainWin = object
return QMainWin
def get_all_windows():
""" Returns all mpl figures and registered qt windows """
try:
all_figures = get_all_figures()
all_qt4wins = get_all_qt4_wins()
all_wins = all_qt4wins + [get_figure_window(fig) for fig in all_figures]
return all_wins
except AttributeError as ex:
ut.printex(ex, 'probably using a windowless backend',
iswarning=True)
return []
#@profile
def all_figures_tile(max_rows=None, row_first=True, no_tile=False,
monitor_num=None, percent_w=None, percent_h=None,
hide_toolbar=True):
"""
Lays out all figures in a grid. if wh is a scalar, a golden ratio is used
"""
#print('[plottool_ibeis] all_figures_tile()')
if no_tile:
return
current_backend = mpl.get_backend()
if not current_backend.startswith('Qt'):
#print('current_backend=%r is not a Qt backend. cannot tile.' % current_backend)
return
all_wins = get_all_windows()
num_wins = len(all_wins)
if num_wins == 0:
return
from plottool_ibeis import screeninfo
valid_positions = screeninfo.get_valid_fig_positions(num_wins, max_rows,
row_first, monitor_num,
percent_w=percent_w,
percent_h=percent_h)
QMainWin = get_main_win_base()
for ix, win in enumerate(all_wins):
isqt4_mpl = isinstance(win, QMainWin)
from guitool_ibeis.__PYQT__ import QtGui # NOQA
from guitool_ibeis.__PYQT__ import QtWidgets # NOQA
isqt4_back = isinstance(win, QtWidgets.QMainWindow)
isqt4_widget = isinstance(win, QtWidgets.QWidget)
(x, y, w, h) = valid_positions[ix]
#printDBG('tile %d-th win: xywh=%r' % (ix, (x, y, w, h)))
if not isqt4_mpl and not isqt4_back and not isqt4_widget:
raise NotImplementedError('%r-th Backend %r is not a Qt Window' %
(ix, win))
try:
if hide_toolbar:
toolbar = win.findChild(QtWidgets.QToolBar)
toolbar.setVisible(False)
win.setGeometry(x, y, w, h)
except Exception as ex:
ut.printex(ex)
def all_figures_bring_to_front():
try:
all_figures = get_all_figures()
for fig in iter(all_figures):
bring_to_front(fig)
except Exception as ex:
if not hasattr(fig, '_no_raise_plottool_ibeis'):
ut.printex(ex, iswarning=True)
def close_all_figures():
print('[pt] close_all_figures')
all_figures = get_all_figures()
for fig in iter(all_figures):
close_figure(fig)
def close_figure(fig):
print('[pt] close_figure')
fig.clf()
fig.df2_closed = True
qtwin = get_figure_window(fig)
qtwin.close()
def get_figure_window(fig):
try:
qwin = fig.canvas.manager.window
except AttributeError:
qwin = fig.canvas.window()
return qwin
def bring_to_front(fig):
if VERBOSE:
print('[pt] bring_to_front')
#what is difference between show and show normal?
qtwin = get_figure_window(fig)
qtwin.raise_()
#if not ut.WIN32:
# NOT sure on the correct order of these
# can cause the figure geometry to be unset
from guitool_ibeis.__PYQT__.QtCore import Qt
qtwin.activateWindow()
qtwin.setWindowFlags(Qt.WindowStaysOnTopHint)
qtwin.setWindowFlags(Qt.WindowFlags(0))
qtwin.show()
def show():
if VERBOSE:
print('[pt] show')
all_figures_show()
all_figures_bring_to_front()
#plt.show()
def reset():
if VERBOSE:
print('[pt] reset')
close_all_figures()
def draw():
if VERBOSE:
print('[pt] draw')
all_figures_show()
def update():
if VERBOSE:
print('[pt] update')
draw()
all_figures_bring_to_front()
def iupdate():
if VERBOSE:
print('[pt] iupdate')
if ut.inIPython():
update()
iup = iupdate
def present(*args, **kwargs):
"""
basically calls show if not embeded.
Kwargs:
max_rows, row_first, no_tile, monitor_num, percent_w, percent_h,
hide_toolbar
CommandLine:
python -m plottool_ibeis.fig_presenter present
Example:
>>> # DISABLE_DOCTEST
>>> from plottool_ibeis.fig_presenter import * # NOQA
>>> result = present()
>>> print(result)
>>> import plottool_ibeis as pt
>>> pt.show_if_requested()
"""
if VERBOSE:
print('[pt] present')
if not ut.get_argflag('--noshow'):
#print('[fig_presenter] Presenting figures...')
#with warnings.catch_warnings():
# warnings.simplefilter("ignore")
all_figures_tile(*args, **kwargs)
# Both of these lines cause the weird non-refresh black border behavior
all_figures_show()
all_figures_bring_to_front()
| {
"repo_name": "Erotemic/plottool",
"path": "plottool_ibeis/fig_presenter.py",
"copies": "1",
"size": "8747",
"license": "apache-2.0",
"hash": 2005018749097487400,
"line_mean": 27.3993506494,
"line_max": 93,
"alpha_frac": 0.5912884418,
"autogenerated": false,
"ratio": 3.416796875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.949619666589232,
"avg_score": 0.002377730181535859,
"num_lines": 308
} |
from __future__ import absolute_import, division, print_function
import sys
import traceback
from PyQt4.QtCore import (QAbstractItemModel, QModelIndex, QVariant, QString,
Qt, QObject)
# Decorator to help catch errors that QT wont report
def report_thread_error(fn):
def report_thread_error_wrapper(*args, **kwargs):
try:
ret = fn(*args, **kwargs)
return ret
except Exception as ex:
print('\n\n *!!* Thread Raised Exception: ' + str(ex))
print('\n\n *!!* Thread Exception Traceback: \n\n' + traceback.format_exc())
sys.stdout.flush()
et, ei, tb = sys.exc_info()
raise
return report_thread_error_wrapper
class IBEIS_QTable(QAbstractItemModel):
""" Convention states only items with column index 0 can have children """
@report_thread_error
def __init__(self, ibs,
tblname='gids',
tblcols=['gid', 'gname'],
fancycols_dict={},
tbleditable=[],
parent=None):
super(IBEIS_QTable, self).__init__(parent)
self.ibs = ibs
self.tblname = tblname
self.fancycols_dict = fancycols_dict
@report_thread_error
def index2_tableitem(self, index=QModelIndex()):
""" Internal helper method """
if index.isValid():
item = index.internalPointer()
if item:
return item
return None
#-----------
# Overloaded ItemModel Read Functions
@report_thread_error
def rowCount(self, parent=QModelIndex()):
parentPref = self.index2_tableitem(parent)
return parentPref.qt_row_count()
@report_thread_error
def columnCount(self, parent=QModelIndex()):
parentPref = self.index2_tableitem(parent)
return parentPref.qt_col_count()
@report_thread_error
def data(self, index, role=Qt.DisplayRole):
""" Returns the data stored under the given role
for the item referred to by the index. """
if not index.isValid():
return QVariant()
if role != Qt.DisplayRole and role != Qt.EditRole:
return QVariant()
nodePref = self.index2_tableitem(index)
data = nodePref.qt_get_data(index.column())
var = QVariant(data)
#print('--- data() ---')
#print('role = %r' % role)
#print('data = %r' % data)
#print('type(data) = %r' % type(data))
if isinstance(data, float):
var = QVariant(QString.number(data, format='g', precision=6))
if isinstance(data, bool):
var = QVariant(data).toString()
if isinstance(data, int):
var = QVariant(data).toString()
#print('var= %r' % var)
#print('type(var)= %r' % type(var))
return var
@report_thread_error
def index(self, row, col, parent=QModelIndex()):
""" Returns the index of the item in the model specified
by the given row, column and parent index. """
if parent.isValid() and parent.column() != 0:
return QModelIndex()
parentPref = self.index2_tableitem(parent)
childPref = parentPref.qt_get_child(row)
if childPref:
return self.createIndex(row, col, childPref)
else:
return QModelIndex()
@report_thread_error
def parent(self, index=None):
""" Returns the parent of the model item with the given index.
If the item has no parent, an invalid QModelIndex is returned. """
if index is None: # Overload with QObject.parent()
return QObject.parent(self)
if not index.isValid():
return QModelIndex()
nodePref = self.index2_tableitem(index)
parentPref = nodePref.qt_get_parent()
if parentPref == self.rootPref:
return QModelIndex()
return self.createIndex(parentPref.qt_parents_index_of_me(), 0, parentPref)
#-----------
# Overloaded ItemModel Write Functions
@report_thread_error
def flags(self, index):
""" Returns the item flags for the given index. """
if index.column() == 0:
# The First Column is just a label and unchangable
return Qt.ItemIsEnabled | Qt.ItemIsSelectable
if not index.isValid():
return Qt.ItemFlag(0)
item_col, item_rowid = self.index2_itemdata(index)
if item_rowid:
if item_col in self.col_editable:
return Qt.ItemIsEditable | Qt.ItemIsEnabled | Qt.ItemIsSelectable
return Qt.ItemFlag(0)
@report_thread_error
def setData(self, index, data, role=Qt.EditRole):
""" Sets the role data for the item at index to value. """
if role != Qt.EditRole:
return False
#print('--- setData() ---')
#print('role = %r' % role)
#print('data = %r' % data)
#print('type(data) = %r' % type(data))
leafPref = self.index2_tableitem(index)
result = leafPref.qt_set_leaf_data(data)
if result is True:
self.dataChanged.emit(index, index)
return result
@report_thread_error
def headerData(self, section, orientation, role=Qt.DisplayRole):
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
column_key = self.table_headers[section]
column_name = self.fancycols_dict.get(column_key, column_key)
return QVariant(column_name)
return QVariant()
| {
"repo_name": "SU-ECE-17-7/ibeis",
"path": "_broken/abstract_tables.py",
"copies": "1",
"size": "5537",
"license": "apache-2.0",
"hash": -2786170446205484000,
"line_mean": 36.6666666667,
"line_max": 88,
"alpha_frac": 0.5867798447,
"autogenerated": false,
"ratio": 3.963493199713672,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5050273044413671,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys
from py._code.code import FormattedExcinfo
import py
import warnings
import inspect
import _pytest
from _pytest._code.code import TerminalRepr
from _pytest.compat import (
NOTSET, exc_clear, _format_args,
getfslineno, get_real_func,
is_generator, isclass, getimfunc,
getlocation, getfuncargnames,
safe_getattr,
)
from _pytest.outcomes import fail, TEST_OUTCOME
from _pytest.compat import FuncargnamesCompatAttr
if sys.version_info[:2] == (2, 6):
from ordereddict import OrderedDict
else:
from collections import OrderedDict
def pytest_sessionstart(session):
import _pytest.python
scopename2class.update({
'class': _pytest.python.Class,
'module': _pytest.python.Module,
'function': _pytest.main.Item,
})
session._fixturemanager = FixtureManager(session)
scopename2class = {}
scope2props = dict(session=())
scope2props["module"] = ("fspath", "module")
scope2props["class"] = scope2props["module"] + ("cls",)
scope2props["instance"] = scope2props["class"] + ("instance", )
scope2props["function"] = scope2props["instance"] + ("function", "keywords")
def scopeproperty(name=None, doc=None):
def decoratescope(func):
scopename = name or func.__name__
def provide(self):
if func.__name__ in scope2props[self.scope]:
return func(self)
raise AttributeError("%s not available in %s-scoped context" % (
scopename, self.scope))
return property(provide, None, None, func.__doc__)
return decoratescope
def get_scope_node(node, scope):
cls = scopename2class.get(scope)
if cls is None:
if scope == "session":
return node.session
raise ValueError("unknown scope")
return node.getparent(cls)
def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager):
# this function will transform all collected calls to a functions
# if they use direct funcargs (i.e. direct parametrization)
# because we want later test execution to be able to rely on
# an existing FixtureDef structure for all arguments.
# XXX we can probably avoid this algorithm if we modify CallSpec2
# to directly care for creating the fixturedefs within its methods.
if not metafunc._calls[0].funcargs:
return # this function call does not have direct parametrization
# collect funcargs of all callspecs into a list of values
arg2params = {}
arg2scope = {}
for callspec in metafunc._calls:
for argname, argvalue in callspec.funcargs.items():
assert argname not in callspec.params
callspec.params[argname] = argvalue
arg2params_list = arg2params.setdefault(argname, [])
callspec.indices[argname] = len(arg2params_list)
arg2params_list.append(argvalue)
if argname not in arg2scope:
scopenum = callspec._arg2scopenum.get(argname,
scopenum_function)
arg2scope[argname] = scopes[scopenum]
callspec.funcargs.clear()
# register artificial FixtureDef's so that later at test execution
# time we can rely on a proper FixtureDef to exist for fixture setup.
arg2fixturedefs = metafunc._arg2fixturedefs
for argname, valuelist in arg2params.items():
# if we have a scope that is higher than function we need
# to make sure we only ever create an according fixturedef on
# a per-scope basis. We thus store and cache the fixturedef on the
# node related to the scope.
scope = arg2scope[argname]
node = None
if scope != "function":
node = get_scope_node(collector, scope)
if node is None:
assert scope == "class" and isinstance(collector, _pytest.python.Module)
# use module-level collector for class-scope (for now)
node = collector
if node and argname in node._name2pseudofixturedef:
arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]]
else:
fixturedef = FixtureDef(fixturemanager, '', argname,
get_direct_param_fixture_func,
arg2scope[argname],
valuelist, False, False)
arg2fixturedefs[argname] = [fixturedef]
if node is not None:
node._name2pseudofixturedef[argname] = fixturedef
def getfixturemarker(obj):
""" return fixturemarker or None if it doesn't exist or raised
exceptions."""
try:
return getattr(obj, "_pytestfixturefunction", None)
except TEST_OUTCOME:
# some objects raise errors like request (from flask import request)
# we don't expect them to be fixture functions
return None
def get_parametrized_fixture_keys(item, scopenum):
""" return list of keys for all parametrized arguments which match
the specified scope. """
assert scopenum < scopenum_function # function
try:
cs = item.callspec
except AttributeError:
pass
else:
# cs.indices.items() is random order of argnames. Need to
# sort this so that different calls to
# get_parametrized_fixture_keys will be deterministic.
for argname, param_index in sorted(cs.indices.items()):
if cs._arg2scopenum[argname] != scopenum:
continue
if scopenum == 0: # session
key = (argname, param_index)
elif scopenum == 1: # module
key = (argname, param_index, item.fspath)
elif scopenum == 2: # class
key = (argname, param_index, item.fspath, item.cls)
yield key
# algorithm for sorting on a per-parametrized resource setup basis
# it is called for scopenum==0 (session) first and performs sorting
# down to the lower scopes such as to minimize number of "high scope"
# setups and teardowns
def reorder_items(items):
argkeys_cache = {}
for scopenum in range(0, scopenum_function):
argkeys_cache[scopenum] = d = {}
for item in items:
keys = OrderedDict.fromkeys(get_parametrized_fixture_keys(item, scopenum))
if keys:
d[item] = keys
return reorder_items_atscope(items, set(), argkeys_cache, 0)
def reorder_items_atscope(items, ignore, argkeys_cache, scopenum):
if scopenum >= scopenum_function or len(items) < 3:
return items
items_done = []
while 1:
items_before, items_same, items_other, newignore = \
slice_items(items, ignore, argkeys_cache[scopenum])
items_before = reorder_items_atscope(
items_before, ignore, argkeys_cache, scopenum + 1)
if items_same is None:
# nothing to reorder in this scope
assert items_other is None
return items_done + items_before
items_done.extend(items_before)
items = items_same + items_other
ignore = newignore
def slice_items(items, ignore, scoped_argkeys_cache):
# we pick the first item which uses a fixture instance in the
# requested scope and which we haven't seen yet. We slice the input
# items list into a list of items_nomatch, items_same and
# items_other
if scoped_argkeys_cache: # do we need to do work at all?
it = iter(items)
# first find a slicing key
for i, item in enumerate(it):
argkeys = scoped_argkeys_cache.get(item)
if argkeys is not None:
newargkeys = OrderedDict.fromkeys(k for k in argkeys if k not in ignore)
if newargkeys: # found a slicing key
slicing_argkey, _ = newargkeys.popitem()
items_before = items[:i]
items_same = [item]
items_other = []
# now slice the remainder of the list
for item in it:
argkeys = scoped_argkeys_cache.get(item)
if argkeys and slicing_argkey in argkeys and \
slicing_argkey not in ignore:
items_same.append(item)
else:
items_other.append(item)
newignore = ignore.copy()
newignore.add(slicing_argkey)
return (items_before, items_same, items_other, newignore)
return items, None, None, None
def fillfixtures(function):
""" fill missing funcargs for a test function. """
try:
request = function._request
except AttributeError:
# XXX this special code path is only expected to execute
# with the oejskit plugin. It uses classes with funcargs
# and we thus have to work a bit to allow this.
fm = function.session._fixturemanager
fi = fm.getfixtureinfo(function.parent, function.obj, None)
function._fixtureinfo = fi
request = function._request = FixtureRequest(function)
request._fillfixtures()
# prune out funcargs for jstests
newfuncargs = {}
for name in fi.argnames:
newfuncargs[name] = function.funcargs[name]
function.funcargs = newfuncargs
else:
request._fillfixtures()
def get_direct_param_fixture_func(request):
return request.param
class FuncFixtureInfo:
def __init__(self, argnames, names_closure, name2fixturedefs):
self.argnames = argnames
self.names_closure = names_closure
self.name2fixturedefs = name2fixturedefs
class FixtureRequest(FuncargnamesCompatAttr):
""" A request for a fixture from a test or fixture function.
A request object gives access to the requesting test context
and has an optional ``param`` attribute in case
the fixture is parametrized indirectly.
"""
def __init__(self, pyfuncitem):
self._pyfuncitem = pyfuncitem
#: fixture for which this request is being performed
self.fixturename = None
#: Scope string, one of "function", "class", "module", "session"
self.scope = "function"
self._fixture_values = {} # argname -> fixture value
self._fixture_defs = {} # argname -> FixtureDef
fixtureinfo = pyfuncitem._fixtureinfo
self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy()
self._arg2index = {}
self._fixturemanager = pyfuncitem.session._fixturemanager
@property
def fixturenames(self):
# backward incompatible note: now a readonly property
return list(self._pyfuncitem._fixtureinfo.names_closure)
@property
def node(self):
""" underlying collection node (depends on current request scope)"""
return self._getscopeitem(self.scope)
def _getnextfixturedef(self, argname):
fixturedefs = self._arg2fixturedefs.get(argname, None)
if fixturedefs is None:
# we arrive here because of a a dynamic call to
# getfixturevalue(argname) usage which was naturally
# not known at parsing/collection time
parentid = self._pyfuncitem.parent.nodeid
fixturedefs = self._fixturemanager.getfixturedefs(argname, parentid)
self._arg2fixturedefs[argname] = fixturedefs
# fixturedefs list is immutable so we maintain a decreasing index
index = self._arg2index.get(argname, 0) - 1
if fixturedefs is None or (-index > len(fixturedefs)):
raise FixtureLookupError(argname, self)
self._arg2index[argname] = index
return fixturedefs[index]
@property
def config(self):
""" the pytest config object associated with this request. """
return self._pyfuncitem.config
@scopeproperty()
def function(self):
""" test function object if the request has a per-function scope. """
return self._pyfuncitem.obj
@scopeproperty("class")
def cls(self):
""" class (can be None) where the test function was collected. """
clscol = self._pyfuncitem.getparent(_pytest.python.Class)
if clscol:
return clscol.obj
@property
def instance(self):
""" instance (can be None) on which test function was collected. """
# unittest support hack, see _pytest.unittest.TestCaseFunction
try:
return self._pyfuncitem._testcase
except AttributeError:
function = getattr(self, "function", None)
if function is not None:
return py.builtin._getimself(function)
@scopeproperty()
def module(self):
""" python module object where the test function was collected. """
return self._pyfuncitem.getparent(_pytest.python.Module).obj
@scopeproperty()
def fspath(self):
""" the file system path of the test module which collected this test. """
return self._pyfuncitem.fspath
@property
def keywords(self):
""" keywords/markers dictionary for the underlying node. """
return self.node.keywords
@property
def session(self):
""" pytest session object. """
return self._pyfuncitem.session
def addfinalizer(self, finalizer):
""" add finalizer/teardown function to be called after the
last test within the requesting test context finished
execution. """
# XXX usually this method is shadowed by fixturedef specific ones
self._addfinalizer(finalizer, scope=self.scope)
def _addfinalizer(self, finalizer, scope):
colitem = self._getscopeitem(scope)
self._pyfuncitem.session._setupstate.addfinalizer(
finalizer=finalizer, colitem=colitem)
def applymarker(self, marker):
""" Apply a marker to a single test function invocation.
This method is useful if you don't want to have a keyword/marker
on all function invocations.
:arg marker: a :py:class:`_pytest.mark.MarkDecorator` object
created by a call to ``pytest.mark.NAME(...)``.
"""
try:
self.node.keywords[marker.markname] = marker
except AttributeError:
raise ValueError(marker)
def raiseerror(self, msg):
""" raise a FixtureLookupError with the given message. """
raise self._fixturemanager.FixtureLookupError(None, self, msg)
def _fillfixtures(self):
item = self._pyfuncitem
fixturenames = getattr(item, "fixturenames", self.fixturenames)
for argname in fixturenames:
if argname not in item.funcargs:
item.funcargs[argname] = self.getfixturevalue(argname)
def cached_setup(self, setup, teardown=None, scope="module", extrakey=None):
""" (deprecated) Return a testing resource managed by ``setup`` &
``teardown`` calls. ``scope`` and ``extrakey`` determine when the
``teardown`` function will be called so that subsequent calls to
``setup`` would recreate the resource. With pytest-2.3 you often
do not need ``cached_setup()`` as you can directly declare a scope
on a fixture function and register a finalizer through
``request.addfinalizer()``.
:arg teardown: function receiving a previously setup resource.
:arg setup: a no-argument function creating a resource.
:arg scope: a string value out of ``function``, ``class``, ``module``
or ``session`` indicating the caching lifecycle of the resource.
:arg extrakey: added to internal caching key of (funcargname, scope).
"""
if not hasattr(self.config, '_setupcache'):
self.config._setupcache = {} # XXX weakref?
cachekey = (self.fixturename, self._getscopeitem(scope), extrakey)
cache = self.config._setupcache
try:
val = cache[cachekey]
except KeyError:
self._check_scope(self.fixturename, self.scope, scope)
val = setup()
cache[cachekey] = val
if teardown is not None:
def finalizer():
del cache[cachekey]
teardown(val)
self._addfinalizer(finalizer, scope=scope)
return val
def getfixturevalue(self, argname):
""" Dynamically run a named fixture function.
Declaring fixtures via function argument is recommended where possible.
But if you can only decide whether to use another fixture at test
setup time, you may use this function to retrieve it inside a fixture
or test function body.
"""
return self._get_active_fixturedef(argname).cached_result[0]
def getfuncargvalue(self, argname):
""" Deprecated, use getfixturevalue. """
from _pytest import deprecated
warnings.warn(
deprecated.GETFUNCARGVALUE,
DeprecationWarning,
stacklevel=2)
return self.getfixturevalue(argname)
def _get_active_fixturedef(self, argname):
try:
return self._fixture_defs[argname]
except KeyError:
try:
fixturedef = self._getnextfixturedef(argname)
except FixtureLookupError:
if argname == "request":
class PseudoFixtureDef:
cached_result = (self, [0], None)
scope = "function"
return PseudoFixtureDef
raise
# remove indent to prevent the python3 exception
# from leaking into the call
result = self._getfixturevalue(fixturedef)
self._fixture_values[argname] = result
self._fixture_defs[argname] = fixturedef
return fixturedef
def _get_fixturestack(self):
current = self
l = []
while 1:
fixturedef = getattr(current, "_fixturedef", None)
if fixturedef is None:
l.reverse()
return l
l.append(fixturedef)
current = current._parent_request
def _getfixturevalue(self, fixturedef):
# prepare a subrequest object before calling fixture function
# (latter managed by fixturedef)
argname = fixturedef.argname
funcitem = self._pyfuncitem
scope = fixturedef.scope
try:
param = funcitem.callspec.getparam(argname)
except (AttributeError, ValueError):
param = NOTSET
param_index = 0
if fixturedef.params is not None:
frame = inspect.stack()[3]
frameinfo = inspect.getframeinfo(frame[0])
source_path = frameinfo.filename
source_lineno = frameinfo.lineno
source_path = py.path.local(source_path)
if source_path.relto(funcitem.config.rootdir):
source_path = source_path.relto(funcitem.config.rootdir)
msg = (
"The requested fixture has no parameter defined for the "
"current test.\n\nRequested fixture '{0}' defined in:\n{1}"
"\n\nRequested here:\n{2}:{3}".format(
fixturedef.argname,
getlocation(fixturedef.func, funcitem.config.rootdir),
source_path,
source_lineno,
)
)
fail(msg)
else:
# indices might not be set if old-style metafunc.addcall() was used
param_index = funcitem.callspec.indices.get(argname, 0)
# if a parametrize invocation set a scope it will override
# the static scope defined with the fixture function
paramscopenum = funcitem.callspec._arg2scopenum.get(argname)
if paramscopenum is not None:
scope = scopes[paramscopenum]
subrequest = SubRequest(self, scope, param, param_index, fixturedef)
# check if a higher-level scoped fixture accesses a lower level one
subrequest._check_scope(argname, self.scope, scope)
# clear sys.exc_info before invoking the fixture (python bug?)
# if its not explicitly cleared it will leak into the call
exc_clear()
try:
# call the fixture function
val = fixturedef.execute(request=subrequest)
finally:
# if fixture function failed it might have registered finalizers
self.session._setupstate.addfinalizer(fixturedef.finish,
subrequest.node)
return val
def _check_scope(self, argname, invoking_scope, requested_scope):
if argname == "request":
return
if scopemismatch(invoking_scope, requested_scope):
# try to report something helpful
lines = self._factorytraceback()
fail("ScopeMismatch: You tried to access the %r scoped "
"fixture %r with a %r scoped request object, "
"involved factories\n%s" % (
(requested_scope, argname, invoking_scope, "\n".join(lines))),
pytrace=False)
def _factorytraceback(self):
lines = []
for fixturedef in self._get_fixturestack():
factory = fixturedef.func
fs, lineno = getfslineno(factory)
p = self._pyfuncitem.session.fspath.bestrelpath(fs)
args = _format_args(factory)
lines.append("%s:%d: def %s%s" % (
p, lineno, factory.__name__, args))
return lines
def _getscopeitem(self, scope):
if scope == "function":
# this might also be a non-function Item despite its attribute name
return self._pyfuncitem
node = get_scope_node(self._pyfuncitem, scope)
if node is None and scope == "class":
# fallback to function item itself
node = self._pyfuncitem
assert node
return node
def __repr__(self):
return "<FixtureRequest for %r>" % (self.node)
class SubRequest(FixtureRequest):
""" a sub request for handling getting a fixture from a
test function/fixture. """
def __init__(self, request, scope, param, param_index, fixturedef):
self._parent_request = request
self.fixturename = fixturedef.argname
if param is not NOTSET:
self.param = param
self.param_index = param_index
self.scope = scope
self._fixturedef = fixturedef
self.addfinalizer = fixturedef.addfinalizer
self._pyfuncitem = request._pyfuncitem
self._fixture_values = request._fixture_values
self._fixture_defs = request._fixture_defs
self._arg2fixturedefs = request._arg2fixturedefs
self._arg2index = request._arg2index
self._fixturemanager = request._fixturemanager
def __repr__(self):
return "<SubRequest %r for %r>" % (self.fixturename, self._pyfuncitem)
class ScopeMismatchError(Exception):
""" A fixture function tries to use a different fixture function which
which has a lower scope (e.g. a Session one calls a function one)
"""
scopes = "session module class function".split()
scopenum_function = scopes.index("function")
def scopemismatch(currentscope, newscope):
return scopes.index(newscope) > scopes.index(currentscope)
def scope2index(scope, descr, where=None):
"""Look up the index of ``scope`` and raise a descriptive value error
if not defined.
"""
try:
return scopes.index(scope)
except ValueError:
raise ValueError(
"{0} {1}has an unsupported scope value '{2}'".format(
descr, 'from {0} '.format(where) if where else '',
scope)
)
class FixtureLookupError(LookupError):
""" could not return a requested Fixture (missing or invalid). """
def __init__(self, argname, request, msg=None):
self.argname = argname
self.request = request
self.fixturestack = request._get_fixturestack()
self.msg = msg
def formatrepr(self):
tblines = []
addline = tblines.append
stack = [self.request._pyfuncitem.obj]
stack.extend(map(lambda x: x.func, self.fixturestack))
msg = self.msg
if msg is not None:
# the last fixture raise an error, let's present
# it at the requesting side
stack = stack[:-1]
for function in stack:
fspath, lineno = getfslineno(function)
try:
lines, _ = inspect.getsourcelines(get_real_func(function))
except (IOError, IndexError, TypeError):
error_msg = "file %s, line %s: source code not available"
addline(error_msg % (fspath, lineno + 1))
else:
addline("file %s, line %s" % (fspath, lineno + 1))
for i, line in enumerate(lines):
line = line.rstrip()
addline(" " + line)
if line.lstrip().startswith('def'):
break
if msg is None:
fm = self.request._fixturemanager
available = []
parentid = self.request._pyfuncitem.parent.nodeid
for name, fixturedefs in fm._arg2fixturedefs.items():
faclist = list(fm._matchfactories(fixturedefs, parentid))
if faclist and name not in available:
available.append(name)
msg = "fixture %r not found" % (self.argname,)
msg += "\n available fixtures: %s" % (", ".join(sorted(available)),)
msg += "\n use 'pytest --fixtures [testpath]' for help on them."
return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname)
class FixtureLookupErrorRepr(TerminalRepr):
def __init__(self, filename, firstlineno, tblines, errorstring, argname):
self.tblines = tblines
self.errorstring = errorstring
self.filename = filename
self.firstlineno = firstlineno
self.argname = argname
def toterminal(self, tw):
# tw.line("FixtureLookupError: %s" %(self.argname), red=True)
for tbline in self.tblines:
tw.line(tbline.rstrip())
lines = self.errorstring.split("\n")
if lines:
tw.line('{0} {1}'.format(FormattedExcinfo.fail_marker,
lines[0].strip()), red=True)
for line in lines[1:]:
tw.line('{0} {1}'.format(FormattedExcinfo.flow_marker,
line.strip()), red=True)
tw.line()
tw.line("%s:%d" % (self.filename, self.firstlineno + 1))
def fail_fixturefunc(fixturefunc, msg):
fs, lineno = getfslineno(fixturefunc)
location = "%s:%s" % (fs, lineno + 1)
source = _pytest._code.Source(fixturefunc)
fail(msg + ":\n\n" + str(source.indent()) + "\n" + location,
pytrace=False)
def call_fixture_func(fixturefunc, request, kwargs):
yieldctx = is_generator(fixturefunc)
if yieldctx:
it = fixturefunc(**kwargs)
res = next(it)
def teardown():
try:
next(it)
except StopIteration:
pass
else:
fail_fixturefunc(fixturefunc,
"yield_fixture function has more than one 'yield'")
request.addfinalizer(teardown)
else:
res = fixturefunc(**kwargs)
return res
class FixtureDef:
""" A container for a factory definition. """
def __init__(self, fixturemanager, baseid, argname, func, scope, params,
unittest=False, ids=None):
self._fixturemanager = fixturemanager
self.baseid = baseid or ''
self.has_location = baseid is not None
self.func = func
self.argname = argname
self.scope = scope
self.scopenum = scope2index(
scope or "function",
descr='fixture {0}'.format(func.__name__),
where=baseid
)
self.params = params
startindex = unittest and 1 or None
self.argnames = getfuncargnames(func, startindex=startindex)
self.unittest = unittest
self.ids = ids
self._finalizer = []
def addfinalizer(self, finalizer):
self._finalizer.append(finalizer)
def finish(self):
exceptions = []
try:
while self._finalizer:
try:
func = self._finalizer.pop()
func()
except:
exceptions.append(sys.exc_info())
if exceptions:
e = exceptions[0]
del exceptions # ensure we don't keep all frames alive because of the traceback
py.builtin._reraise(*e)
finally:
ihook = self._fixturemanager.session.ihook
ihook.pytest_fixture_post_finalizer(fixturedef=self)
# even if finalization fails, we invalidate
# the cached fixture value
if hasattr(self, "cached_result"):
del self.cached_result
def execute(self, request):
# get required arguments and register our own finish()
# with their finalization
for argname in self.argnames:
fixturedef = request._get_active_fixturedef(argname)
if argname != "request":
fixturedef.addfinalizer(self.finish)
my_cache_key = request.param_index
cached_result = getattr(self, "cached_result", None)
if cached_result is not None:
result, cache_key, err = cached_result
if my_cache_key == cache_key:
if err is not None:
py.builtin._reraise(*err)
else:
return result
# we have a previous but differently parametrized fixture instance
# so we need to tear it down before creating a new one
self.finish()
assert not hasattr(self, "cached_result")
ihook = self._fixturemanager.session.ihook
return ihook.pytest_fixture_setup(fixturedef=self, request=request)
def __repr__(self):
return ("<FixtureDef name=%r scope=%r baseid=%r >" %
(self.argname, self.scope, self.baseid))
def pytest_fixture_setup(fixturedef, request):
""" Execution of fixture setup. """
kwargs = {}
for argname in fixturedef.argnames:
fixdef = request._get_active_fixturedef(argname)
result, arg_cache_key, exc = fixdef.cached_result
request._check_scope(argname, request.scope, fixdef.scope)
kwargs[argname] = result
fixturefunc = fixturedef.func
if fixturedef.unittest:
if request.instance is not None:
# bind the unbound method to the TestCase instance
fixturefunc = fixturedef.func.__get__(request.instance)
else:
# the fixture function needs to be bound to the actual
# request.instance so that code working with "fixturedef" behaves
# as expected.
if request.instance is not None:
fixturefunc = getimfunc(fixturedef.func)
if fixturefunc != fixturedef.func:
fixturefunc = fixturefunc.__get__(request.instance)
my_cache_key = request.param_index
try:
result = call_fixture_func(fixturefunc, request, kwargs)
except TEST_OUTCOME:
fixturedef.cached_result = (None, my_cache_key, sys.exc_info())
raise
fixturedef.cached_result = (result, my_cache_key, None)
return result
class FixtureFunctionMarker:
def __init__(self, scope, params, autouse=False, ids=None, name=None):
self.scope = scope
self.params = params
self.autouse = autouse
self.ids = ids
self.name = name
def __call__(self, function):
if isclass(function):
raise ValueError(
"class fixtures not supported (may be in the future)")
function._pytestfixturefunction = self
return function
def fixture(scope="function", params=None, autouse=False, ids=None, name=None):
""" (return a) decorator to mark a fixture factory function.
This decorator can be used (with or without parameters) to define a
fixture function. The name of the fixture function can later be
referenced to cause its invocation ahead of running tests: test
modules or classes can use the pytest.mark.usefixtures(fixturename)
marker. Test functions can directly use fixture names as input
arguments in which case the fixture instance returned from the fixture
function will be injected.
:arg scope: the scope for which this fixture is shared, one of
"function" (default), "class", "module" or "session".
:arg params: an optional list of parameters which will cause multiple
invocations of the fixture function and all of the tests
using it.
:arg autouse: if True, the fixture func is activated for all tests that
can see it. If False (the default) then an explicit
reference is needed to activate the fixture.
:arg ids: list of string ids each corresponding to the params
so that they are part of the test id. If no ids are provided
they will be generated automatically from the params.
:arg name: the name of the fixture. This defaults to the name of the
decorated function. If a fixture is used in the same module in
which it is defined, the function name of the fixture will be
shadowed by the function arg that requests the fixture; one way
to resolve this is to name the decorated function
``fixture_<fixturename>`` and then use
``@pytest.fixture(name='<fixturename>')``.
Fixtures can optionally provide their values to test functions using a ``yield`` statement,
instead of ``return``. In this case, the code block after the ``yield`` statement is executed
as teardown code regardless of the test outcome. A fixture function must yield exactly once.
"""
if callable(scope) and params is None and autouse is False:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse, name=name)(scope)
if params is not None and not isinstance(params, (list, tuple)):
params = list(params)
return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name)
def yield_fixture(scope="function", params=None, autouse=False, ids=None, name=None):
""" (return a) decorator to mark a yield-fixture factory function.
.. deprecated:: 3.0
Use :py:func:`pytest.fixture` directly instead.
"""
if callable(scope) and params is None and not autouse:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse, ids=ids, name=name)(scope)
else:
return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name)
defaultfuncargprefixmarker = fixture()
@fixture(scope="session")
def pytestconfig(request):
""" the pytest config object with access to command line opts."""
return request.config
class FixtureManager:
"""
pytest fixtures definitions and information is stored and managed
from this class.
During collection fm.parsefactories() is called multiple times to parse
fixture function definitions into FixtureDef objects and internal
data structures.
During collection of test functions, metafunc-mechanics instantiate
a FuncFixtureInfo object which is cached per node/func-name.
This FuncFixtureInfo object is later retrieved by Function nodes
which themselves offer a fixturenames attribute.
The FuncFixtureInfo object holds information about fixtures and FixtureDefs
relevant for a particular function. An initial list of fixtures is
assembled like this:
- ini-defined usefixtures
- autouse-marked fixtures along the collection chain up from the function
- usefixtures markers at module/class/function level
- test function funcargs
Subsequently the funcfixtureinfo.fixturenames attribute is computed
as the closure of the fixtures needed to setup the initial fixtures,
i. e. fixtures needed by fixture functions themselves are appended
to the fixturenames list.
Upon the test-setup phases all fixturenames are instantiated, retrieved
by a lookup of their FuncFixtureInfo.
"""
_argprefix = "pytest_funcarg__"
FixtureLookupError = FixtureLookupError
FixtureLookupErrorRepr = FixtureLookupErrorRepr
def __init__(self, session):
self.session = session
self.config = session.config
self._arg2fixturedefs = {}
self._holderobjseen = set()
self._arg2finish = {}
self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))]
session.config.pluginmanager.register(self, "funcmanage")
def getfixtureinfo(self, node, func, cls, funcargs=True):
if funcargs and not hasattr(node, "nofuncargs"):
argnames = getfuncargnames(func, cls=cls)
else:
argnames = ()
usefixtures = getattr(func, "usefixtures", None)
initialnames = argnames
if usefixtures is not None:
initialnames = usefixtures.args + initialnames
fm = node.session._fixturemanager
names_closure, arg2fixturedefs = fm.getfixtureclosure(initialnames,
node)
return FuncFixtureInfo(argnames, names_closure, arg2fixturedefs)
def pytest_plugin_registered(self, plugin):
nodeid = None
try:
p = py.path.local(plugin.__file__)
except AttributeError:
pass
else:
# construct the base nodeid which is later used to check
# what fixtures are visible for particular tests (as denoted
# by their test id)
if p.basename.startswith("conftest.py"):
nodeid = p.dirpath().relto(self.config.rootdir)
if p.sep != "/":
nodeid = nodeid.replace(p.sep, "/")
self.parsefactories(plugin, nodeid)
def _getautousenames(self, nodeid):
""" return a tuple of fixture names to be used. """
autousenames = []
for baseid, basenames in self._nodeid_and_autousenames:
if nodeid.startswith(baseid):
if baseid:
i = len(baseid)
nextchar = nodeid[i:i + 1]
if nextchar and nextchar not in ":/":
continue
autousenames.extend(basenames)
# make sure autousenames are sorted by scope, scopenum 0 is session
autousenames.sort(
key=lambda x: self._arg2fixturedefs[x][-1].scopenum)
return autousenames
def getfixtureclosure(self, fixturenames, parentnode):
# collect the closure of all fixtures , starting with the given
# fixturenames as the initial set. As we have to visit all
# factory definitions anyway, we also return a arg2fixturedefs
# mapping so that the caller can reuse it and does not have
# to re-discover fixturedefs again for each fixturename
# (discovering matching fixtures for a given name/node is expensive)
parentid = parentnode.nodeid
fixturenames_closure = self._getautousenames(parentid)
def merge(otherlist):
for arg in otherlist:
if arg not in fixturenames_closure:
fixturenames_closure.append(arg)
merge(fixturenames)
arg2fixturedefs = {}
lastlen = -1
while lastlen != len(fixturenames_closure):
lastlen = len(fixturenames_closure)
for argname in fixturenames_closure:
if argname in arg2fixturedefs:
continue
fixturedefs = self.getfixturedefs(argname, parentid)
if fixturedefs:
arg2fixturedefs[argname] = fixturedefs
merge(fixturedefs[-1].argnames)
return fixturenames_closure, arg2fixturedefs
def pytest_generate_tests(self, metafunc):
for argname in metafunc.fixturenames:
faclist = metafunc._arg2fixturedefs.get(argname)
if faclist:
fixturedef = faclist[-1]
if fixturedef.params is not None:
func_params = getattr(getattr(metafunc.function, 'parametrize', None), 'args', [[None]])
# skip directly parametrized arguments
argnames = func_params[0]
if not isinstance(argnames, (tuple, list)):
argnames = [x.strip() for x in argnames.split(",") if x.strip()]
if argname not in func_params and argname not in argnames:
metafunc.parametrize(argname, fixturedef.params,
indirect=True, scope=fixturedef.scope,
ids=fixturedef.ids)
else:
continue # will raise FixtureLookupError at setup time
def pytest_collection_modifyitems(self, items):
# separate parametrized setups
items[:] = reorder_items(items)
def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False):
if nodeid is not NOTSET:
holderobj = node_or_obj
else:
holderobj = node_or_obj.obj
nodeid = node_or_obj.nodeid
if holderobj in self._holderobjseen:
return
self._holderobjseen.add(holderobj)
autousenames = []
for name in dir(holderobj):
# The attribute can be an arbitrary descriptor, so the attribute
# access below can raise. safe_getatt() ignores such exceptions.
obj = safe_getattr(holderobj, name, None)
# fixture functions have a pytest_funcarg__ prefix (pre-2.3 style)
# or are "@pytest.fixture" marked
marker = getfixturemarker(obj)
if marker is None:
if not name.startswith(self._argprefix):
continue
if not callable(obj):
continue
marker = defaultfuncargprefixmarker
from _pytest import deprecated
self.config.warn('C1', deprecated.FUNCARG_PREFIX.format(name=name), nodeid=nodeid)
name = name[len(self._argprefix):]
elif not isinstance(marker, FixtureFunctionMarker):
# magic globals with __getattr__ might have got us a wrong
# fixture attribute
continue
else:
if marker.name:
name = marker.name
msg = 'fixtures cannot have "pytest_funcarg__" prefix ' \
'and be decorated with @pytest.fixture:\n%s' % name
assert not name.startswith(self._argprefix), msg
fixture_def = FixtureDef(self, nodeid, name, obj,
marker.scope, marker.params,
unittest=unittest, ids=marker.ids)
faclist = self._arg2fixturedefs.setdefault(name, [])
if fixture_def.has_location:
faclist.append(fixture_def)
else:
# fixturedefs with no location are at the front
# so this inserts the current fixturedef after the
# existing fixturedefs from external plugins but
# before the fixturedefs provided in conftests.
i = len([f for f in faclist if not f.has_location])
faclist.insert(i, fixture_def)
if marker.autouse:
autousenames.append(name)
if autousenames:
self._nodeid_and_autousenames.append((nodeid or '', autousenames))
def getfixturedefs(self, argname, nodeid):
"""
Gets a list of fixtures which are applicable to the given node id.
:param str argname: name of the fixture to search for
:param str nodeid: full node id of the requesting test.
:return: list[FixtureDef]
"""
try:
fixturedefs = self._arg2fixturedefs[argname]
except KeyError:
return None
else:
return tuple(self._matchfactories(fixturedefs, nodeid))
def _matchfactories(self, fixturedefs, nodeid):
for fixturedef in fixturedefs:
if nodeid.startswith(fixturedef.baseid):
yield fixturedef
| {
"repo_name": "ryanmockabee/golfr",
"path": "flask/lib/python3.6/site-packages/_pytest/fixtures.py",
"copies": "2",
"size": "45173",
"license": "mit",
"hash": -5533879496215085000,
"line_mean": 38.940760389,
"line_max": 108,
"alpha_frac": 0.6071104421,
"autogenerated": false,
"ratio": 4.392551536367172,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5999661978467171,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys
import _pytest._code
import py
import pytest
def test_ne():
code1 = _pytest._code.Code(compile('foo = "bar"', '', 'exec'))
assert code1 == code1
code2 = _pytest._code.Code(compile('foo = "baz"', '', 'exec'))
assert code2 != code1
def test_code_gives_back_name_for_not_existing_file():
name = 'abc-123'
co_code = compile("pass\n", name, 'exec')
assert co_code.co_filename == name
code = _pytest._code.Code(co_code)
assert str(code.path) == name
assert code.fullsource is None
def test_code_with_class():
class A(object):
pass
pytest.raises(TypeError, "_pytest._code.Code(A)")
if True:
def x():
pass
def test_code_fullsource():
code = _pytest._code.Code(x)
full = code.fullsource
assert 'test_code_fullsource()' in str(full)
def test_code_source():
code = _pytest._code.Code(x)
src = code.source()
expected = """def x():
pass"""
assert str(src) == expected
def test_frame_getsourcelineno_myself():
def func():
return sys._getframe(0)
f = func()
f = _pytest._code.Frame(f)
source, lineno = f.code.fullsource, f.lineno
assert source[lineno].startswith(" return sys._getframe(0)")
def test_getstatement_empty_fullsource():
def func():
return sys._getframe(0)
f = func()
f = _pytest._code.Frame(f)
prop = f.code.__class__.fullsource
try:
f.code.__class__.fullsource = None
assert f.statement == _pytest._code.Source("")
finally:
f.code.__class__.fullsource = prop
def test_code_from_func():
co = _pytest._code.Code(test_frame_getsourcelineno_myself)
assert co.firstlineno
assert co.path
def test_unicode_handling():
value = py.builtin._totext('\xc4\x85\xc4\x87\n', 'utf-8').encode('utf8')
def f():
raise Exception(value)
excinfo = pytest.raises(Exception, f)
str(excinfo)
if sys.version_info[0] < 3:
unicode(excinfo)
@pytest.mark.skipif(sys.version_info[0] >= 3, reason='python 2 only issue')
def test_unicode_handling_syntax_error():
value = py.builtin._totext('\xc4\x85\xc4\x87\n', 'utf-8').encode('utf8')
def f():
raise SyntaxError('invalid syntax', (None, 1, 3, value))
excinfo = pytest.raises(Exception, f)
str(excinfo)
if sys.version_info[0] < 3:
unicode(excinfo)
def test_code_getargs():
def f1(x):
pass
c1 = _pytest._code.Code(f1)
assert c1.getargs(var=True) == ('x',)
def f2(x, *y):
pass
c2 = _pytest._code.Code(f2)
assert c2.getargs(var=True) == ('x', 'y')
def f3(x, **z):
pass
c3 = _pytest._code.Code(f3)
assert c3.getargs(var=True) == ('x', 'z')
def f4(x, *y, **z):
pass
c4 = _pytest._code.Code(f4)
assert c4.getargs(var=True) == ('x', 'y', 'z')
def test_frame_getargs():
def f1(x):
return sys._getframe(0)
fr1 = _pytest._code.Frame(f1('a'))
assert fr1.getargs(var=True) == [('x', 'a')]
def f2(x, *y):
return sys._getframe(0)
fr2 = _pytest._code.Frame(f2('a', 'b', 'c'))
assert fr2.getargs(var=True) == [('x', 'a'), ('y', ('b', 'c'))]
def f3(x, **z):
return sys._getframe(0)
fr3 = _pytest._code.Frame(f3('a', b='c'))
assert fr3.getargs(var=True) == [('x', 'a'), ('z', {'b': 'c'})]
def f4(x, *y, **z):
return sys._getframe(0)
fr4 = _pytest._code.Frame(f4('a', 'b', c='d'))
assert fr4.getargs(var=True) == [('x', 'a'), ('y', ('b',)),
('z', {'c': 'd'})]
class TestExceptionInfo(object):
def test_bad_getsource(self):
try:
if False:
pass
else:
assert False
except AssertionError:
exci = _pytest._code.ExceptionInfo()
assert exci.getrepr()
class TestTracebackEntry(object):
def test_getsource(self):
try:
if False:
pass
else:
assert False
except AssertionError:
exci = _pytest._code.ExceptionInfo()
entry = exci.traceback[0]
source = entry.getsource()
assert len(source) == 6
assert 'assert False' in source[5]
| {
"repo_name": "MichaelAquilina/pytest",
"path": "testing/code/test_code.py",
"copies": "1",
"size": "4319",
"license": "mit",
"hash": 4406300711697776600,
"line_mean": 23.8218390805,
"line_max": 76,
"alpha_frac": 0.5579995369,
"autogenerated": false,
"ratio": 3.1874538745387455,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42454534114387454,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys
import pytest
from _pytest.compat import is_generator, get_real_func
def test_is_generator():
def zap():
yield
def foo():
pass
assert is_generator(zap)
assert not is_generator(foo)
def test_real_func_loop_limit():
class Evil(object):
def __init__(self):
self.left = 1000
def __repr__(self):
return "<Evil left={left}>".format(left=self.left)
def __getattr__(self, attr):
if not self.left:
raise RuntimeError('its over')
self.left -= 1
return self
evil = Evil()
with pytest.raises(ValueError):
res = get_real_func(evil)
print(res)
@pytest.mark.skipif(sys.version_info < (3, 4),
reason='asyncio available in Python 3.4+')
def test_is_generator_asyncio(testdir):
testdir.makepyfile("""
from _pytest.compat import is_generator
import asyncio
@asyncio.coroutine
def baz():
yield from [1,2,3]
def test_is_generator_asyncio():
assert not is_generator(baz)
""")
# avoid importing asyncio into pytest's own process,
# which in turn imports logging (#8)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(['*1 passed*'])
@pytest.mark.skipif(sys.version_info < (3, 5),
reason='async syntax available in Python 3.5+')
def test_is_generator_async_syntax(testdir):
testdir.makepyfile("""
from _pytest.compat import is_generator
def test_is_generator_py35():
async def foo():
await foo()
async def bar():
pass
assert not is_generator(foo)
assert not is_generator(bar)
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines(['*1 passed*'])
| {
"repo_name": "MichaelAquilina/pytest",
"path": "testing/test_compat.py",
"copies": "2",
"size": "1943",
"license": "mit",
"hash": -8615409309966627000,
"line_mean": 24.5657894737,
"line_max": 67,
"alpha_frac": 0.5774575399,
"autogenerated": false,
"ratio": 4.107822410147992,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5685279950047992,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys
import pytest
from _pytest.compat import is_generator, get_real_func, safe_getattr
from _pytest.outcomes import OutcomeException
def test_is_generator():
def zap():
yield
def foo():
pass
assert is_generator(zap)
assert not is_generator(foo)
def test_real_func_loop_limit():
class Evil(object):
def __init__(self):
self.left = 1000
def __repr__(self):
return "<Evil left={left}>".format(left=self.left)
def __getattr__(self, attr):
if not self.left:
raise RuntimeError('its over')
self.left -= 1
return self
evil = Evil()
with pytest.raises(ValueError):
res = get_real_func(evil)
print(res)
@pytest.mark.skipif(sys.version_info < (3, 4),
reason='asyncio available in Python 3.4+')
def test_is_generator_asyncio(testdir):
testdir.makepyfile("""
from _pytest.compat import is_generator
import asyncio
@asyncio.coroutine
def baz():
yield from [1,2,3]
def test_is_generator_asyncio():
assert not is_generator(baz)
""")
# avoid importing asyncio into pytest's own process,
# which in turn imports logging (#8)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(['*1 passed*'])
@pytest.mark.skipif(sys.version_info < (3, 5),
reason='async syntax available in Python 3.5+')
def test_is_generator_async_syntax(testdir):
testdir.makepyfile("""
from _pytest.compat import is_generator
def test_is_generator_py35():
async def foo():
await foo()
async def bar():
pass
assert not is_generator(foo)
assert not is_generator(bar)
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines(['*1 passed*'])
class ErrorsHelper(object):
@property
def raise_exception(self):
raise Exception('exception should be catched')
@property
def raise_fail(self):
pytest.fail('fail should be catched')
def test_helper_failures():
helper = ErrorsHelper()
with pytest.raises(Exception):
helper.raise_exception
with pytest.raises(OutcomeException):
helper.raise_fail
def test_safe_getattr():
helper = ErrorsHelper()
assert safe_getattr(helper, 'raise_exception', 'default') == 'default'
assert safe_getattr(helper, 'raise_fail', 'default') == 'default'
| {
"repo_name": "Jayflux/servo",
"path": "tests/wpt/web-platform-tests/tools/third_party/pytest/testing/test_compat.py",
"copies": "14",
"size": "2612",
"license": "mpl-2.0",
"hash": 7726085993476138000,
"line_mean": 24.8613861386,
"line_max": 74,
"alpha_frac": 0.6049004594,
"autogenerated": false,
"ratio": 4.087636932707356,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys, os
import argparse
import datetime
import time
import warnings
import numpy as np
import astropy.table
# See pixsim.py
import astropy.time
from astropy.io import fits
import astropy.units as u
import desimodel.io
import desisim.simexp
import desisim.io
import desispec.io
import desiutil.depend
def parse(options=None):
parser=argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
if 'DESI_ROOT' in os.environ:
_default_arcfile = os.path.join(os.getenv('DESI_ROOT'),
'spectro', 'templates', 'calib', 'v0.4', 'arc-lines-average-in-vacuum-from-winlight-20170118.fits')
else:
_default_arcfile = None
#- Required
parser.add_argument('--expid', type=int, help="exposure ID")
parser.add_argument('--night', type=str, help="YEARMMDD")
#- Optional
parser.add_argument('--arcfile', type=str, help="input arc calib spec file",
default=_default_arcfile)
parser.add_argument('--simspec', type=str, help="output simspec file")
parser.add_argument('--fibermap', type=str, help="output fibermap file")
parser.add_argument('--outdir', type=str, help="output directory")
parser.add_argument('--nspec', type=int, default=5000, help="number of spectra to include")
parser.add_argument('--nonuniform', action='store_true', help="Include calibration screen non-uniformity")
parser.add_argument('--clobber', action='store_true', help="overwrite any pre-existing output files")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
if options is None:
args = parser.parse_args()
else:
args = parser.parse_args(options)
if args.simspec is None:
args.simspec = desisim.io.findfile('simspec', args.night, args.expid,
outdir=args.outdir)
if args.fibermap is None:
#- put in same directory as simspec by default
filedir = os.path.dirname(os.path.abspath(args.simspec))
filename = os.path.basename(desispec.io.findfile('fibermap', args.night, args.expid))
args.fibermap = os.path.join(filedir, filename)
return args
def main(args=None):
'''
TODO: document
Note: this bypasses specsim since we don't have an arclamp model in
surface brightness units; we only have electrons on the CCD
'''
import desiutil.log
log = desiutil.log.get_logger()
from desiutil.iers import freeze_iers
freeze_iers()
if isinstance(args, (list, tuple, type(None))):
args = parse(args)
log.info('reading arc data from {}'.format(args.arcfile))
arcdata = astropy.table.Table.read(args.arcfile)
wave, phot, fibermap = \
desisim.simexp.simarc(arcdata, nspec=args.nspec, nonuniform=args.nonuniform)
log.info('Writing {}'.format(args.fibermap))
fibermap.meta['NIGHT'] = args.night
fibermap.meta['EXPID'] = args.expid
fibermap.meta['EXTNAME'] = 'FIBERMAP'
fibermap.write(args.fibermap, overwrite=args.clobber)
#- TODO: explain bypassing desisim.io.write_simspec
header = fits.Header()
desiutil.depend.add_dependencies(header)
header['EXPID'] = args.expid
header['NIGHT'] = args.night
header['FLAVOR'] = 'arc'
header['DOSVER'] = 'SIM'
header['EXPTIME'] = 5 #- TODO: add exptime support
#- TODO: DATE-OBS on night instead of now
tx = astropy.time.Time(datetime.datetime(*time.gmtime()[0:6]))
header['DATE-OBS'] = tx.utc.isot
desisim.io.write_simspec_arc(args.simspec, wave, phot, header, fibermap, overwrite=args.clobber)
| {
"repo_name": "desihub/desisim",
"path": "py/desisim/scripts/newarc.py",
"copies": "1",
"size": "3656",
"license": "bsd-3-clause",
"hash": 2453513553131931600,
"line_mean": 33.4905660377,
"line_max": 111,
"alpha_frac": 0.6742341357,
"autogenerated": false,
"ratio": 3.3664825046040514,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4540716640304051,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys, os
import argparse
import time
import numpy as np
import astropy.table
import astropy.time
import astropy.units as u
import astropy.io.fits as pyfits
import desisim.specsim
import desisim.simexp
import desisim.obs
import desisim.io
import desisim.util
from desiutil.log import get_logger
import desispec.io
import desispec.io.util
import desimodel.io
import desitarget
from desispec.spectra import Spectra
from desispec.resolution import Resolution
def sim_spectra(wave, flux, program, spectra_filename, obsconditions=None,
sourcetype=None, targetid=None, redshift=None, expid=0, seed=0, skyerr=0.0, ra=None,
dec=None, meta=None, fibermap_columns=None, fullsim=False, use_poisson=True, specsim_config_file="desi", dwave_out=None, save_resolution=True):
"""
Simulate spectra from an input set of wavelength and flux and writes a FITS file in the Spectra format that can
be used as input to the redshift fitter.
Args:
wave : 1D np.array of wavelength in Angstrom (in vacuum) in observer frame (i.e. redshifted)
flux : 1D or 2D np.array. 1D array must have same size as wave, 2D array must have shape[1]=wave.size
flux has to be in units of 10^-17 ergs/s/cm2/A
spectra_filename : path to output FITS file in the Spectra format
program : dark, lrg, qso, gray, grey, elg, bright, mws, bgs
ignored if obsconditions is not None
Optional:
obsconditions : dictionnary of observation conditions with SEEING EXPTIME AIRMASS MOONFRAC MOONALT MOONSEP
sourcetype : list of string, allowed values are (sky,elg,lrg,qso,bgs,star), type of sources, used for fiber aperture loss , default is star
targetid : list of targetids for each target. default of None has them generated as str(range(nspec))
redshift : list/array with each index being the redshifts for that target
expid : this expid number will be saved in the Spectra fibermap
seed : random seed
skyerr : fractional sky subtraction error
ra : numpy array with targets RA (deg)
dec : numpy array with targets Dec (deg)
meta : dictionnary, saved in primary fits header of the spectra file
fibermap_columns : add these columns to the fibermap
fullsim : if True, write full simulation data in extra file per camera
use_poisson : if False, do not use numpy.random.poisson to simulate the Poisson noise. This is useful to get reproducible random
realizations.
save_resolution : if True it will save the Resolution matrix for each spectra.
If False returns a resolution matrix (useful for mocks to save disk space).
"""
log = get_logger()
if len(flux.shape)==1 :
flux=flux.reshape((1,flux.size))
nspec=flux.shape[0]
log.info("Starting simulation of {} spectra".format(nspec))
if sourcetype is None :
sourcetype = np.array(["star" for i in range(nspec)])
log.debug("sourcetype = {}".format(sourcetype))
tileid = 0
telera = 0
teledec = 0
dateobs = time.gmtime()
night = desisim.obs.get_night(utc=dateobs)
program = program.lower()
frame_fibermap = desispec.io.fibermap.empty_fibermap(nspec)
frame_fibermap.meta["FLAVOR"]="custom"
frame_fibermap.meta["NIGHT"]=night
frame_fibermap.meta["EXPID"]=expid
# add DESI_TARGET
tm = desitarget.targetmask.desi_mask
frame_fibermap['DESI_TARGET'][sourcetype=="star"]=tm.STD_FAINT
frame_fibermap['DESI_TARGET'][sourcetype=="lrg"]=tm.LRG
frame_fibermap['DESI_TARGET'][sourcetype=="elg"]=tm.ELG
frame_fibermap['DESI_TARGET'][sourcetype=="qso"]=tm.QSO
frame_fibermap['DESI_TARGET'][sourcetype=="sky"]=tm.SKY
frame_fibermap['DESI_TARGET'][sourcetype=="bgs"]=tm.BGS_ANY
if fibermap_columns is not None :
for k in fibermap_columns.keys() :
frame_fibermap[k] = fibermap_columns[k]
if targetid is None:
targetid = np.arange(nspec).astype(int)
# add TARGETID
frame_fibermap['TARGETID'] = targetid
# spectra fibermap has two extra fields : night and expid
# This would be cleaner if desispec would provide the spectra equivalent
# of desispec.io.empty_fibermap()
spectra_fibermap = desispec.io.empty_fibermap(nspec)
spectra_fibermap = desispec.io.util.add_columns(spectra_fibermap,
['NIGHT', 'EXPID', 'TILEID'],
[np.int32(night), np.int32(expid), np.int32(tileid)],
)
for s in range(nspec):
for tp in frame_fibermap.dtype.fields:
spectra_fibermap[s][tp] = frame_fibermap[s][tp]
if ra is not None :
spectra_fibermap["TARGET_RA"] = ra
spectra_fibermap["FIBER_RA"] = ra
if dec is not None :
spectra_fibermap["TARGET_DEC"] = dec
spectra_fibermap["FIBER_DEC"] = dec
if obsconditions is None:
if program in ['dark', 'lrg', 'qso']:
obsconditions = desisim.simexp.reference_conditions['DARK']
elif program in ['elg', 'gray', 'grey']:
obsconditions = desisim.simexp.reference_conditions['GRAY']
elif program in ['mws', 'bgs', 'bright']:
obsconditions = desisim.simexp.reference_conditions['BRIGHT']
else:
raise ValueError('unknown program {}'.format(program))
elif isinstance(obsconditions, str):
try:
obsconditions = desisim.simexp.reference_conditions[obsconditions.upper()]
except KeyError:
raise ValueError('obsconditions {} not in {}'.format(
obsconditions.upper(),
list(desisim.simexp.reference_conditions.keys())))
try:
params = desimodel.io.load_desiparams()
wavemin = params['ccd']['b']['wavemin']
wavemax = params['ccd']['z']['wavemax']
except KeyError:
wavemin = desimodel.io.load_throughput('b').wavemin
wavemax = desimodel.io.load_throughput('z').wavemax
if specsim_config_file == "eboss":
wavemin = 3500
wavemax = 10000
if wave[0] > wavemin:
log.warning('Minimum input wavelength {}>{}; padding with zeros'.format(
wave[0], wavemin))
dwave = wave[1] - wave[0]
npad = int((wave[0] - wavemin)/dwave + 1)
wavepad = np.arange(npad) * dwave
wavepad += wave[0] - dwave - wavepad[-1]
fluxpad = np.zeros((flux.shape[0], len(wavepad)), dtype=flux.dtype)
wave = np.concatenate([wavepad, wave])
flux = np.hstack([fluxpad, flux])
assert flux.shape[1] == len(wave)
assert np.allclose(dwave, np.diff(wave))
assert wave[0] <= wavemin
if wave[-1] < wavemax:
log.warning('Maximum input wavelength {}<{}; padding with zeros'.format(
wave[-1], wavemax))
dwave = wave[-1] - wave[-2]
npad = int( (wavemax - wave[-1])/dwave + 1 )
wavepad = wave[-1] + dwave + np.arange(npad)*dwave
fluxpad = np.zeros((flux.shape[0], len(wavepad)), dtype=flux.dtype)
wave = np.concatenate([wave, wavepad])
flux = np.hstack([flux, fluxpad])
assert flux.shape[1] == len(wave)
assert np.allclose(dwave, np.diff(wave))
assert wavemax <= wave[-1]
ii = (wavemin <= wave) & (wave <= wavemax)
flux_unit = 1e-17 * u.erg / (u.Angstrom * u.s * u.cm ** 2 )
wave = wave[ii]*u.Angstrom
flux = flux[:,ii]*flux_unit
sim = desisim.simexp.simulate_spectra(wave, flux, fibermap=frame_fibermap,
obsconditions=obsconditions, redshift=redshift, seed=seed,
psfconvolve=True, specsim_config_file=specsim_config_file, dwave_out=dwave_out)
random_state = np.random.RandomState(seed)
sim.generate_random_noise(random_state,use_poisson=use_poisson)
scale=1e17
specdata = None
resolution={}
for camera in sim.instrument.cameras:
R = Resolution(camera.get_output_resolution_matrix())
resolution[camera.name] = np.tile(R.to_fits_array(), [nspec, 1, 1])
if not save_resolution :
resolution[camera.name] = R.to_fits_array()
skyscale = skyerr * random_state.normal(size=sim.num_fibers)
if fullsim :
for table in sim.camera_output :
band = table.meta['name'].strip()[0]
table_filename=spectra_filename.replace(".fits","-fullsim-{}.fits".format(band))
table.write(table_filename,format="fits",overwrite=True)
print("wrote",table_filename)
if specsim_config_file == "eboss":
for table in sim._eboss_camera_output:
wave = table['wavelength'].astype(float)
flux = (table['observed_flux']+table['random_noise_electrons']*table['flux_calibration']).T.astype(float)
if np.any(skyscale):
flux += ((table['num_sky_electrons']*skyscale)*table['flux_calibration']).T.astype(float)
ivar = table['flux_inverse_variance'].T.astype(float)
band = table.meta['name'].strip()[0]
flux = flux * scale
ivar = ivar / scale**2
mask = np.zeros(flux.shape).astype(int)
spec = Spectra([band], {band : wave}, {band : flux}, {band : ivar},
resolution_data=None,
mask={band : mask},
fibermap=spectra_fibermap,
meta=meta,
single=True)
if specdata is None :
specdata = spec
else :
specdata.update(spec)
else:
for table in sim.camera_output :
wave = table['wavelength'].astype(float)
flux = (table['observed_flux']+table['random_noise_electrons']*table['flux_calibration']).T.astype(float)
if np.any(skyscale):
flux += ((table['num_sky_electrons']*skyscale)*table['flux_calibration']).T.astype(float)
ivar = table['flux_inverse_variance'].T.astype(float)
band = table.meta['name'].strip()[0]
flux = flux * scale
ivar = ivar / scale**2
mask = np.zeros(flux.shape).astype(int)
if not save_resolution :
spec = Spectra([band], {band : wave}, {band : flux}, {band : ivar},
resolution_data=None,
mask={band : mask},
fibermap=spectra_fibermap,
meta=meta,
single=True)
else :
spec = Spectra([band], {band : wave}, {band : flux}, {band : ivar},
resolution_data={band : resolution[band]},
mask={band : mask},
fibermap=spectra_fibermap,
meta=meta,
single=True)
if specdata is None :
specdata = spec
else :
specdata.update(spec)
desispec.io.write_spectra(spectra_filename, specdata)
log.info('Wrote '+spectra_filename)
# need to clear the simulation buffers that keeps growing otherwise
# because of a different number of fibers each time ...
desisim.specsim._simulators.clear()
desisim.specsim._simdefaults.clear()
if not save_resolution :
return resolution
def parse(options=None):
parser=argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="""Fast simulation of spectra into the final DESI format (Spectra class) that can be directly used as
an input to the redshift fitter (redrock). The input file is an ASCII file with first column the wavelength in A (in vacuum, redshifted), the other columns are treated as spectral flux densities in units of 10^-17 ergs/s/cm2/A.""")
#- Required
parser.add_argument('-i','--input', type=str, required=True, help="Input spectra, ASCII or fits")
parser.add_argument('-o','--out-spectra', type=str, required=True, help="Output spectra")
#- Optional
parser.add_argument('--repeat', type=int, default=1, help="Duplicate the input spectra to have several random realizations")
#- Optional observing conditions to override program defaults
parser.add_argument('--program', type=str, default="DARK", help="Program (DARK, GRAY or BRIGHT)")
parser.add_argument('--seeing', type=float, default=None, help="Seeing FWHM [arcsec]")
parser.add_argument('--airmass', type=float, default=None, help="Airmass")
parser.add_argument('--exptime', type=float, default=None, help="Exposure time [sec]")
parser.add_argument('--moonfrac', type=float, default=None, help="Moon illumination fraction; 1=full")
parser.add_argument('--moonalt', type=float, default=None, help="Moon altitude [degrees]")
parser.add_argument('--moonsep', type=float, default=None, help="Moon separation to tile [degrees]")
parser.add_argument('--seed', type=int, default=0, help="Random seed")
parser.add_argument('--source-type', type=str, default=None, help="Source type (for fiber loss), among sky,elg,lrg,qso,bgs,star")
parser.add_argument('--skyerr', type=float, default=0.0, help="Fractional sky subtraction error")
parser.add_argument('--fullsim',action='store_true',help="write full simulation data in extra file per camera, for debugging")
if options is None:
args = parser.parse_args()
else:
args = parser.parse_args(options)
return args
def main(args=None):
log = get_logger()
if isinstance(args, (list, tuple, type(None))):
args = parse(args)
if isinstance(args, (list, tuple, type(None))):
args = parse(args)
if args.source_type is not None :
allowed=["sky","elg","lrg","qso","bgs","star"]
if not args.source_type in allowed :
log.error("source type has to be among {}".format(allowed))
sys.exit(12)
exptime = args.exptime
if exptime is None :
exptime = 1000. # sec
#- Generate obsconditions with args.program, then override as needed
obsconditions = desisim.simexp.reference_conditions[args.program.upper()]
if args.airmass is not None:
obsconditions['AIRMASS'] = args.airmass
if args.seeing is not None:
obsconditions['SEEING'] = args.seeing
if exptime is not None:
obsconditions['EXPTIME'] = exptime
if args.moonfrac is not None:
obsconditions['MOONFRAC'] = args.moonfrac
if args.moonalt is not None:
obsconditions['MOONALT'] = args.moonalt
if args.moonsep is not None:
obsconditions['MOONSEP'] = args.moonsep
# ascii version
isfits=False
hdulist=None
try :
hdulist=pyfits.open(args.input)
isfits=True
except (IOError,OSError) :
pass
if isfits :
log.info("Reading an input FITS file")
if 'WAVELENGTH' in hdulist:
input_wave = hdulist["WAVELENGTH"].data
elif "WAVE" in hdulist:
input_wave = hdulist["WAVE"].data
else:
log.error("need an HDU with EXTNAME='WAVELENGTH' with a 1D array/image of wavelength in A in vacuum")
sys.exit(12)
if not "FLUX" in hdulist :
log.error("need an HDU with EXTNAME='FLUX' with a 1D or 2D array/image of flux in units of 10^-17 ergs/s/cm2/A")
sys.exit(12)
input_flux = hdulist["FLUX"].data
if input_wave.size != input_flux.shape[1] :
log.error("WAVELENGTH size {} != FLUX shape[1] = {} (NAXIS1 in fits)")
hdulist.close()
else :
# read is ASCII
try :
tmp = np.loadtxt(args.input).T
except (ValueError,TypeError) :
log.error("could not read ASCII file, need at least two columns, separated by ' ', the first one for wavelength in A in vacuum, the other ones for flux in units of 10^-17 ergs/s/cm2/A, one column per spectrum.")
log.error("error message : {}".format(sys.exc_info()))
sys.exit(12)
if tmp.shape[0]<2 :
log.error("need at least two columns in ASCII file (one for wavelength in A in vacuum, one for flux in units of 10^-17 ergs/s/cm2/A")
sys.exit(12)
input_wave = tmp[0]
input_flux = tmp[1:]
if args.repeat>1 :
input_flux = np.tile(input_flux, (args.repeat,1 ))
log.info("input flux shape (after repeat) = {}".format(input_flux.shape))
else :
log.info("input flux shape = {}".format(input_flux.shape))
sourcetype=args.source_type
if sourcetype is not None and len(input_flux.shape)>1 :
nspec=input_flux.shape[0]
sourcetype=np.array([sourcetype for i in range(nspec)])
sim_spectra(input_wave, input_flux, args.program, obsconditions=obsconditions,
spectra_filename=args.out_spectra,seed=args.seed,sourcetype=sourcetype,
skyerr=args.skyerr,fullsim=args.fullsim)
| {
"repo_name": "desihub/desisim",
"path": "py/desisim/scripts/quickspectra.py",
"copies": "1",
"size": "17349",
"license": "bsd-3-clause",
"hash": 2186157357112628000,
"line_mean": 41.837037037,
"line_max": 266,
"alpha_frac": 0.6111591446,
"autogenerated": false,
"ratio": 3.660126582278481,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9633721755932199,
"avg_score": 0.02751279418925655,
"num_lines": 405
} |
from __future__ import absolute_import, division, print_function
import sys, os
import argparse
import numpy as np
import astropy.table
import astropy.time
import astropy.units as u
from desisim.simexp import simscience, get_mock_spectra
import desisim.io
import desisim.util
from desiutil.log import get_logger
def parse(options=None):
parser=argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
#- Required
parser.add_argument('--fiberassign', type=str, required=True,
help="input fiberassign directory or tile file")
parser.add_argument('--mockdir', type=str, required=True,
help="directory with mock targets and truth")
parser.add_argument('--obslist', type=str, required=True,
help="input surveysim obslist file")
parser.add_argument('--expid', type=int, required=True, help="exposure ID")
#- Optional
parser.add_argument('--nside', help='healpixel organization scheme of the mock spectra', type=int, default=64)
parser.add_argument('--outdir', type=str, help="output directory")
parser.add_argument('--nspec', type=int, default=None, help="number of spectra to include")
parser.add_argument('--clobber', action='store_true', help="overwrite any pre-existing output files")
log = get_logger()
if options is None:
args = parser.parse_args()
log.info(' '.join(sys.argv))
else:
args = parser.parse_args(options)
log.info('newexp-mock '+' '.join(options))
return args
def main(args=None):
log = get_logger()
if isinstance(args, (list, tuple, type(None))):
args = parse(args)
if args.obslist.endswith('.ecsv'):
obslist = astropy.table.Table.read(args.obslist, format='ascii.ecsv')
else:
obslist = astropy.table.Table.read(args.obslist)
obs = obslist[obslist['EXPID'] == args.expid][0]
tileid = obs['TILEID']
night = obs['NIGHT']
program = obs['PROGRAM']
if os.path.isdir(args.fiberassign):
#- TODO: move file location logic to desispec / desitarget / fiberassign
args.fiberassign = os.path.join(args.fiberassign, 'fiberassign-{:06d}.fits'.format(tileid))
if not os.path.exists(args.fiberassign):
#- try previous name
args.fiberassign = os.path.join(os.path.dirname(args.fiberassign), 'tile-{:06d}.fits'.format(tileid))
fiberassign = astropy.table.Table.read(args.fiberassign, 'FIBERASSIGN')
if args.outdir is None:
args.outdir = desisim.io.simdir(night=night, expid=args.expid, mkdir=True)
if args.nspec is None:
args.nspec = len(fiberassign)
elif args.nspec <= len(fiberassign):
fiberassign = fiberassign[0:args.nspec]
else:
log.error('args.nspec {} > len(fiberassign) {}'.format(
args.nspec, len(fiberassign)))
sys.exit(1)
log.info('Simulating night {} expid {} tile {} {}'.format(
night, args.expid, tileid, program))
if program.lower() in ('bright', 'sv_bgs', 'sv_mws'):
mock_obscon = 'bright'
else:
mock_obscon = 'dark' #- includes gray for mocks
try:
flux, wave, meta, objmeta = get_mock_spectra(
fiberassign, mockdir=args.mockdir,
nside=args.nside, obscon=mock_obscon)
except Exception as err:
log.fatal('Failed expid {} fiberassign {} tile {}'.format(
args.expid, args.fiberassign, tileid))
raise err
sim, fibermap = simscience((flux, wave, meta), fiberassign,
obsconditions=obs, psfconvolve=False)
#- TODO: header keyword code is replicated from obs.new_exposure()
telera, teledec = desisim.io.get_tile_radec(tileid)
header = dict(
NIGHT = (night, 'Night of observation YEARMMDD'),
EXPID = (args.expid, 'DESI exposure ID'),
TILEID = (tileid, 'DESI tile ID'),
PROGRAM = (program, 'program [dark, bright, ...]'),
FLAVOR = ('science', 'Flavor [arc, flat, science, zero, ...]'),
TELRA = (telera, 'Telescope pointing RA [degrees]'),
TELDEC = (teledec, 'Telescope pointing dec [degrees]'),
AIRMASS = (obs['AIRMASS'], 'Airmass at middle of exposure'),
EXPTIME = (obs['EXPTIME'], 'Exposure time [sec]'),
SEEING = (obs['SEEING'], 'Seeing FWHM [arcsec]'),
MOONFRAC = (obs['MOONFRAC'], 'Moon illumination fraction 0-1; 1=full'),
MOONALT = (obs['MOONALT'], 'Moon altitude [degrees]'),
MOONSEP = (obs['MOONSEP'], 'Moon:tile separation angle [degrees]'),
)
header['DATE-OBS'] = (sim.observation.exposure_start.isot, 'Start of exposure')
#- Write fibermap to $DESI_SPECTRO_SIM/$PIXPROD not $DESI_SPECTRO_DATA
fibermap.meta.update(header)
fibermap.meta['EXTNAME'] = 'FIBERMAP'
fibermap.write(desisim.io.findfile('simfibermap', night, args.expid,
outdir=args.outdir), overwrite=args.clobber)
desisim.io.write_simspec(sim, meta, fibermap, obs, args.expid, night, header=header,
objmeta=objmeta, outdir=args.outdir, overwrite=args.clobber)
| {
"repo_name": "desihub/desisim",
"path": "py/desisim/scripts/newexp_mock.py",
"copies": "1",
"size": "5135",
"license": "bsd-3-clause",
"hash": -7743873888002739000,
"line_mean": 39.753968254,
"line_max": 114,
"alpha_frac": 0.6399221032,
"autogenerated": false,
"ratio": 3.4051724137931036,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4545094516993104,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys, os
import argparse
import numpy as np
import astropy.table
import astropy.time
import astropy.units as u
import desisim.simexp
import desisim.obs
import desisim.io
import desisim.util
from desiutil.log import get_logger
def parse(options=None):
parser=argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
#- Required
parser.add_argument('--program', type=str, required=True, help="Program name, e.g. dark, bright, gray")
#- Optional observing conditions to override program defaults
parser.add_argument('--seeing', type=float, default=None, help="Seeing FWHM [arcsec]")
parser.add_argument('--airmass', type=float, default=None, help="Airmass")
parser.add_argument('--exptime', type=float, default=None, help="Exposure time [sec]")
parser.add_argument('--moonfrac', type=float, default=None, help="Moon illumination fraction; 1=full")
parser.add_argument('--moonalt', type=float, default=None, help="Moon altitude [degrees]")
parser.add_argument('--moonsep', type=float, default=None, help="Moon separation to tile [degrees]")
#- Optional
parser.add_argument('--expid', type=int, default=None, help="exposure ID")
parser.add_argument('--night', type=int, default=None, help="YEARMMDD of observation")
parser.add_argument('--tileid', type=int, default=None, help="Tile ID")
parser.add_argument('--outdir', type=str, help="output directory")
parser.add_argument('--nspec', type=int, default=5000, help="number of spectra to include")
parser.add_argument('--clobber', action='store_true', help="overwrite any pre-existing output files")
parser.add_argument('--seed', type=int, default=None, help="Random number seed")
parser.add_argument('--nproc', type=int, default=None, help="Number of multiprocessing processes")
if options is None:
args = parser.parse_args()
else:
args = parser.parse_args(options)
return args
def main(args):
log = get_logger()
#- Generate obsconditions with args.program, then override as needed
args.program = args.program.upper()
if args.program in ['ARC', 'FLAT']:
obsconditions = None
else:
obsconditions = desisim.simexp.reference_conditions[args.program]
if args.airmass is not None:
obsconditions['AIRMASS'] = args.airmass
if args.seeing is not None:
obsconditions['SEEING'] = args.seeing
if args.exptime is not None:
obsconditions['EXPTIME'] = args.exptime
if args.moonfrac is not None:
obsconditions['MOONFRAC'] = args.moonfrac
if args.moonalt is not None:
obsconditions['MOONALT'] = args.moonalt
if args.moonsep is not None:
obsconditions['MOONSEP'] = args.moonsep
sim, fibermap, meta, obs, objmeta = desisim.obs.new_exposure(args.program,
nspec=args.nspec, night=args.night, expid=args.expid,
tileid=args.tileid, nproc=args.nproc, seed=args.seed,
obsconditions=obsconditions, outdir=args.outdir)
| {
"repo_name": "desihub/desisim",
"path": "py/desisim/scripts/newexp_random.py",
"copies": "1",
"size": "3134",
"license": "bsd-3-clause",
"hash": -1692166498314321700,
"line_mean": 41.9315068493,
"line_max": 107,
"alpha_frac": 0.6904913848,
"autogenerated": false,
"ratio": 3.6022988505747127,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.975587399964295,
"avg_score": 0.007383247146352375,
"num_lines": 73
} |
from __future__ import absolute_import, division, print_function
import sys, os
import numpy as np
from astropy.table import Table
import astropy.units as u
import specsim.simulator
from desispec.frame import Frame
import desispec.io
from desispec.resolution import Resolution
import desisim.io
import desisim.simexp
from desisim.util import dateobs2night
import desisim.specsim
#-------------------------------------------------------------------------
def parse(options=None):
import argparse
parser = argparse.ArgumentParser(usage = "{prog} [options]")
parser.add_argument("--simspec", type=str, help="input simspec file")
parser.add_argument("--outdir", type=str, help="output directory")
parser.add_argument("--firstspec", type=int, default=0,
help="first spectrum to simulate")
parser.add_argument("--nspec", type=int, default=5000,
help="number of spectra to simulate")
parser.add_argument("--cframe", action="store_true",
help="directly write cframe")
parser.add_argument("--dwave", type=float, default=0.8, help="output wavelength step, in Angstrom")
if options is None:
args = parser.parse_args()
else:
args = parser.parse_args(options)
return args
def main(args=None):
'''
Converts simspec -> frame files; see fastframe --help for usage options
'''
#- TODO: use desiutil.log
if isinstance(args, (list, tuple, type(None))):
args = parse(args)
print('Reading files')
simspec = desisim.io.read_simspec(args.simspec, readphot=False)
if simspec.flavor == 'arc':
print('arc exposure; no frames to output')
return
fibermap = simspec.fibermap
obs = simspec.obsconditions
night = simspec.header['NIGHT']
expid = simspec.header['EXPID']
firstspec = args.firstspec
nspec = min(args.nspec, len(fibermap)-firstspec)
print('Simulating spectra {}-{}'.format(firstspec, firstspec+nspec))
wave = simspec.wave
flux = simspec.flux
ii = slice(firstspec, firstspec+nspec)
if simspec.flavor == 'science':
sim = desisim.simexp.simulate_spectra(wave, flux[ii],
fibermap=fibermap[ii], obsconditions=obs, dwave_out=args.dwave,
psfconvolve=True)
elif simspec.flavor in ['arc', 'flat', 'calib']:
x = fibermap['FIBERASSIGN_X']
y = fibermap['FIBERASSIGN_Y']
fiber_area = desisim.simexp.fiber_area_arcsec2(x, y)
surface_brightness = (flux.T / fiber_area).T
config = desisim.simexp._specsim_config_for_wave(wave, dwave_out=args.dwave)
# sim = specsim.simulator.Simulator(config, num_fibers=nspec)
sim = desisim.specsim.get_simulator(config, num_fibers=nspec,
camera_output=True)
sim.observation.exposure_time = simspec.header['EXPTIME'] * u.s
sbunit = 1e-17 * u.erg / (u.Angstrom * u.s * u.cm ** 2 * u.arcsec ** 2)
xy = np.vstack([x, y]).T * u.mm
sim.simulate(calibration_surface_brightness=surface_brightness[ii]*sbunit,
focal_positions=xy[ii])
else:
raise ValueError('Unknown simspec flavor {}'.format(simspec.flavor))
sim.generate_random_noise()
for i, results in enumerate(sim.camera_output):
results = sim.camera_output[i]
wave = results['wavelength']
scale=1e17
if args.cframe :
phot = scale*(results['observed_flux'] + results['random_noise_electrons']*results['flux_calibration']).T
ivar = 1./scale**2*results['flux_inverse_variance'].T
else :
phot = (results['num_source_electrons'] + \
results['num_sky_electrons'] + \
results['num_dark_electrons'] + \
results['random_noise_electrons']).T
ivar = 1.0 / results['variance_electrons'].T
R = Resolution(sim.instrument.cameras[i].get_output_resolution_matrix())
Rdata = np.tile(R.data.T, nspec).T.reshape(
nspec, R.data.shape[0], R.data.shape[1])
assert np.all(Rdata[0] == R.data)
assert phot.shape == (nspec, len(wave))
for spectro in range(10):
imin = max(firstspec, spectro*500) - firstspec
imax = min(firstspec+nspec, (spectro+1)*500) - firstspec
if imax <= imin:
continue
xphot = phot[imin:imax]
xivar = ivar[imin:imax]
xfibermap = fibermap[ii][imin:imax]
camera = '{}{}'.format(sim.camera_names[i], spectro)
meta = simspec.header.copy()
meta['CAMERA'] = camera
if args.cframe :
units = '1e-17 erg/(s cm2 Angstrom)'
else :
#
# We want to save electrons per angstrom and not electrons per bin
# to be consistent with the extraction code (specter.extract.ex2d).
# And to be FITS-compliant, we call electrons "counts".
#
units = 'count/Angstrom'
dwave=np.gradient(wave)
xphot /= dwave
xivar *= dwave**2
meta['BUNIT']=units
meta['DETECTOR'] = 'SIM'
if camera[0] == 'b':
meta['CCDSIZE'] = '4162,4232'
else:
meta['CCDSIZE'] = '4194,4256'
readnoise = sim.instrument.cameras[i].read_noise.value
meta['OBSRDNA'] = readnoise
meta['OBSRDNB'] = readnoise
meta['OBSRDNC'] = readnoise
meta['OBSRDND'] = readnoise
frame = Frame(wave, xphot, xivar, resolution_data=Rdata[0:imax-imin],
spectrograph=spectro, fibermap=xfibermap, meta=meta)
if args.cframe :
outfile = desispec.io.findfile('cframe', night, expid, camera,
outdir=args.outdir)
else :
outfile = desispec.io.findfile('frame', night, expid, camera,
outdir=args.outdir)
print('writing {}'.format(outfile))
desispec.io.write_frame(outfile, frame, units=units)
| {
"repo_name": "desihub/desisim",
"path": "py/desisim/scripts/fastframe.py",
"copies": "1",
"size": "6283",
"license": "bsd-3-clause",
"hash": 5966347090914149000,
"line_mean": 39.2756410256,
"line_max": 117,
"alpha_frac": 0.571064778,
"autogenerated": false,
"ratio": 3.6275981524249423,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9673674509477662,
"avg_score": 0.004997684189456063,
"num_lines": 156
} |
from __future__ import absolute_import, division, print_function
import tarfile
import pandas as pd
import gzip
import shutil
import os
import time
def namelist(fname):
"""
return the tarfile nameand the targz file name
"""
basename = fname.split('.')[0]
tarfilename = basename + '.tar'
targzname = tarfilename + '.gz'
return tarfilename, targzname
def tarfilelist(lst, fname):
"""
tar up all the filenames in lst into base.tar where base = basename for
fname
"""
outfname, _ = namelist(fname)
f = tarfile.open(outfname, 'w')
for s in lst:
f.add(s)
f.close()
return outfname
def snspectralist(fname, logffname=None):
"""
List all the spectra files associated with a phosim instance catalog
"""
x = []
with open(fname, 'r') as f:
for line in f:
if 'spectra_file' in line:
x.append(line.split()[5])
return x
def listFiles(logfile, prefix='InstanceCatalogs/phosim_input_'):
"""
Read the log file to get a list of phosim instance catalogs done
"""
df = pd.read_csv(logfile)
fileList = [prefix + str(x) + '.txt' for x in df.obsHistID.values]
return fileList
return x
def gziptarfile(fname, prefix=''):
"""
gzip a tarred up file
"""
tarfilename, targzname = namelist(fname)
targzname = prefix + targzname
with open(tarfilename, 'rb') as f_in, gzip.open(targzname, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
def cleanup(fname):
l = snspectralist(fname)
tarfilename, _ = namelist(fname)
for file in l:
os.remove(file)
os.remove(tarfilename)
if __name__=='__main__':
import pandas as pd
import sys
import gzip
logfilename = 'run.log'
filenames = listFiles(logfilename, prefix='InstanceCatalogs/phosim_input_')
for fname in filenames:
starttime = time.time()
print(fname)
tgzfile = fname.split('.')[0] + '.tar.gz'
if os.path.exists(tgzfile):
continue
with open(fname, 'rb') as fin, gzip.open(fname + '.gz', 'wb') as fout:
shutil.copyfileobj(fin, fout)
x = snspectralist(fname)
listtime = time.time()
print(len(x))
tarfiles = tarfilelist(x, fname)
tartime = time.time()
gziptarfile(fname)
ziptime = time.time()
totaltime = ziptime - starttime
zippingtime = ziptime - starttime
tarringtime = tartime - starttime
print(totaltime, zippingtime, tarringtime)
cleanup(fname)
print(fname, tarfile, x)
| {
"repo_name": "LSSTDESC/Twinkles",
"path": "python/desc/twinkles/cleanupspectra.py",
"copies": "2",
"size": "2612",
"license": "mit",
"hash": 6686208785191495000,
"line_mean": 24.8613861386,
"line_max": 79,
"alpha_frac": 0.6106431853,
"autogenerated": false,
"ratio": 3.5489130434782608,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5159556228778261,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import threading
import time
import os
from datetime import datetime
import logging
import traceback
import schedule
from .registry import register
from .run import run_task
_schedule = schedule.Scheduler()
logging.getLogger('schedule').propagate = False
logging.getLogger('schedule').addHandler(logging.NullHandler())
logger = logging.getLogger(__name__)
class Timer(threading.Thread):
def __init__(self, schedule):
super(Timer, self).__init__()
self._schedule = schedule
self.finished = threading.Event()
def stop(self):
self.finished.set()
def run(self):
while not self.finished.is_set():
try:
self._schedule.run_pending()
if self._schedule.next_run is not None:
self.finished.wait(self._schedule.idle_seconds)
else:
self.finished.wait(60)
except Exception:
logger.warning(traceback.format_exc())
def start():
thread = Timer(_schedule)
thread.daemon = True
thread.start()
return thread
def stop(thread):
thread.stop()
def join(thread):
thread.join()
_intervals = ["second", "minute", "hour", "day", "week", "monday",
"tuesday", "wednesday", "thursday", "friday", "saturday", "sunday"]
_units = ["seconds", "minutes", "hours", "days", "weeks"]
def parse_interval(parts, scheduler):
part = parts.pop(0)
if part in _intervals:
interval = part
# call scheduler.every().<interval>()
return getattr(scheduler.every(), interval), parts
else:
try:
n = int(part, 10)
unit = parts.pop(0)
assert unit in _units
return getattr(scheduler.every(n), unit), parts
except (AssertionError, ValueError) as e:
raise AssertionError("expected a number followed by a unit or one of [%s] (%s)" % ("|".join(_intervals), e))
def parse_optional_at(parts, interval):
if len(parts) == 0:
return interval, parts
else:
assert len(parts) == 2, "Expected 'at HH:MM', got %s'" % " ".join(parts)
at, time = parts
assert at == "at", "Expected 'at HH:MM', got %s'" % " ".join(parts)
return interval.at(time), parts
def parse_when(when, scheduler):
parts = when.split(" ")
assert len(parts) > 0, "Invalid time spec %s" % when
every = parts.pop(0)
assert every == "every", "Invalid time spec: %s" % when
interval, parts = parse_interval(parts, scheduler)
interval, parts = parse_optional_at(parts, interval)
return interval
def queue_task(job_name, task_queue, **kwargs):
task_queue.put((job_name, {}, kwargs))
@register('time', start, stop, join)
def timed_trigger(job_name, task_queue, when, **kwargs):
if not isinstance(when, list):
when = [when]
for when_term in when:
parse_when(when_term, _schedule).do(queue_task, job_name, task_queue, **kwargs)
| {
"repo_name": "stcorp/legato",
"path": "legato/timed.py",
"copies": "1",
"size": "3051",
"license": "bsd-3-clause",
"hash": -722763642202157300,
"line_mean": 25.3017241379,
"line_max": 120,
"alpha_frac": 0.6076696165,
"autogenerated": false,
"ratio": 3.7995018679950188,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9905267652592556,
"avg_score": 0.0003807663804924044,
"num_lines": 116
} |
from __future__ import absolute_import, division, print_function
import threading
import numpy as np
from toolz import memoize
import datashape
import numba
from datashape import isdatelike, TimeDelta
from .core import optimize
from ..expr import Expr, Arithmetic, Math, Map, UnaryOp
from ..expr.strings import isstring
from ..expr.broadcast import broadcast_collect, Broadcast
from .pyfunc import funcstr
Broadcastable = Arithmetic, Math, Map, UnaryOp
lock = threading.Lock()
def optimize_ndarray(expr, *data, **kwargs):
for leaf in expr._leaves():
leaf_measure = leaf.dshape.measure
# TODO: remove datelike skipping when numba/numba#1202 is fixed
if (isstring(leaf_measure) or
isdatelike(leaf_measure) or
isinstance(leaf_measure, TimeDelta) or
isinstance(leaf.dshape.measure, datashape.Record) and
any(isstring(dt) or isdatelike(dt) or isinstance(dt, TimeDelta)
for dt in leaf.dshape.measure.types)):
return expr
else:
return broadcast_collect(expr, Broadcastable=Broadcastable,
WantToBroadcast=Broadcastable)
for i in range(1, 11):
optimize.register(Expr, *([np.ndarray] * i))(optimize_ndarray)
def get_numba_type(dshape):
"""Get the numba type corresponding to the ``datashape.Mono`` instance.
`dshape`
Parameters
----------
dshape : datashape.Mono
Returns
-------
restype : numba.types.Type
Numba type corresponding to `dshape`.
Examples
--------
>>> get_numba_type(datashape.bool_) == numba.bool_
True
See Also
--------
compute_signature
"""
measure = dshape.measure
if measure == datashape.bool_:
restype = numba.bool_ # str(bool_) == 'bool' so we can't use getattr
elif measure == datashape.date_:
restype = numba.types.NPDatetime('D')
elif measure == datashape.datetime_:
restype = numba.types.NPDatetime('us')
elif isinstance(measure, datashape.TimeDelta): # isinstance for diff freqs
restype = numba.types.NPTimedelta(measure.unit)
elif isinstance(measure, datashape.String):
encoding = measure.encoding
fixlen = measure.fixlen
if fixlen is None:
if encoding == 'A':
return numba.types.string
raise TypeError("Numba cannot handle variable length strings")
typ = (numba.types.CharSeq
if encoding == 'A' else numba.types.UnicodeCharSeq)
return typ(fixlen or 0)
elif measure == datashape.object_:
raise TypeError("Numba cannot handle object datashape")
else:
try:
restype = getattr(numba, str(measure))
except AttributeError:
raise TypeError('Invalid datashape to numba type: %r' % measure)
return restype
def compute_signature(expr):
"""Get the ``numba`` *function signature* corresponding to ``DataShape``
Examples
--------
>>> from blaze import symbol
>>> s = symbol('s', 'int64')
>>> t = symbol('t', 'float32')
>>> from numba import float64, int64, float32
>>> expr = s + t
>>> compute_signature(expr) == float64(int64, float32)
True
Notes
-----
* This could potentially be adapted/refactored to deal with
``datashape.Function`` types.
* Cannot handle ``datashape.Record`` types.
"""
assert datashape.isscalar(expr.schema)
restype = get_numba_type(expr.schema)
argtypes = [get_numba_type(e.schema) for e in expr._leaves()]
return restype(*argtypes)
def _get_numba_ufunc(expr):
"""Construct a numba ufunc from a blaze expression
Parameters
----------
expr : blaze.expr.Expr
Returns
-------
f : function
A numba vectorized function
Examples
--------
>>> from blaze import symbol
>>> import numpy as np
>>> s = symbol('s', 'float64')
>>> t = symbol('t', 'float64')
>>> x = np.array([1.0, 2.0, 3.0])
>>> y = np.array([2.0, 3.0, 4.0])
>>> f = get_numba_ufunc(s + t)
>>> f(x, y)
array([ 3., 5., 7.])
See Also
--------
get_numba_type
compute_signature
"""
if isinstance(expr, Broadcast):
leaves = expr._scalars
expr = expr._scalar_expr
else:
leaves = expr._leaves()
s, scope = funcstr(leaves, expr)
scope = dict((k, numba.jit(nopython=True)(v) if callable(v) else v)
for k, v in scope.items())
# get the func
func = eval(s, scope)
# get the signature
sig = compute_signature(expr)
# vectorize is currently not thread safe. So lock the thread.
# TODO FIXME remove this when numba has made vectorize thread safe.
with lock:
ufunc = numba.vectorize([sig], nopython=True)(func)
return ufunc
# do this here so we can run our doctest
get_numba_ufunc = memoize(_get_numba_ufunc)
def broadcast_numba(t, *data, **kwargs):
return get_numba_ufunc(t)(*data)
| {
"repo_name": "maxalbert/blaze",
"path": "blaze/compute/numba.py",
"copies": "9",
"size": "5010",
"license": "bsd-3-clause",
"hash": 7726477799779975000,
"line_mean": 27.1460674157,
"line_max": 79,
"alpha_frac": 0.6167664671,
"autogenerated": false,
"ratio": 3.691967575534267,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8808734042634268,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import timeit
def run_benchmark_get_invVR_mats_sqrd_scale(iterations):
test_tuples = (['get_invVR_mats_sqrd_scale(invVRs)\n'], ['_get_invVR_mats_sqrd_scale_cyth(invVRs)'])
setup_script = '''from vtool.keypoint import *
np.random.seed(0)
invVRs = np.random.rand(4, 3, 3).astype(np.float64)
'''
time_line = lambda line: timeit.timeit(stmt=line, setup=setup_script, number=iterations)
time_pair = lambda x, y: (time_line(x), time_line(y))
def print_timing_info(tup):
(x, y) = time_pair(tup)
print("Time for %d iterations of the python version: %d" % (iterations, x))
print("Time for %d iterations of the cython version: %d" % (iterations, y))
return (x, y)
return list(map(print_timing_info, test_tuples))
def run_benchmark_rectify_invV_mats_are_up(iterations):
test_tuples = ([], [])
setup_script = ''''''
time_line = lambda line: timeit.timeit(stmt=line, setup=setup_script, number=iterations)
time_pair = lambda x, y: (time_line(x), time_line(y))
def print_timing_info(tup):
(x, y) = time_pair(tup)
print("Time for %d iterations of the python version: %d" % (iterations, x))
print("Time for %d iterations of the cython version: %d" % (iterations, y))
return (x, y)
return list(map(print_timing_info, test_tuples))
def run_all_benchmarks(iterations):
run_benchmark_get_invVR_mats_sqrd_scale(interations)
run_benchmark_rectify_invV_mats_are_up(interations)
| {
"repo_name": "aweinstock314/cyth",
"path": "cyth/bench.py",
"copies": "1",
"size": "1547",
"license": "apache-2.0",
"hash": 818518678351759900,
"line_mean": 41.9722222222,
"line_max": 104,
"alpha_frac": 0.6612798966,
"autogenerated": false,
"ratio": 3.033333333333333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9162316372855835,
"avg_score": 0.00645937141549957,
"num_lines": 36
} |
from __future__ import (absolute_import, division, print_function)
import time
from qtpy.QtGui import (QCursor)
from qtpy.QtWidgets import (QAction, QMenu)
from addie.plot import MplGraphicsView
from addie.addiedriver import AddieDriver
import addie.utilities.workspaces
class BraggView(MplGraphicsView):
""" Graphics view for Bragg diffraction
"""
def __init__(self, parent):
"""
Initialization
Parameters
----------
parent
"""
MplGraphicsView.__init__(self, parent)
self._driver = AddieDriver()
# control class
# key: bank ID, value: list of workspace names
self._bankPlotDict = dict()
for bank_id in range(1, 7):
self._bankPlotDict[bank_id] = list()
# key: workspace name. value: line ID
self._gssDict = dict()
# dictionary for on-canvas plot Y size
self._plotScaleDict = dict()
self._singleGSSMode = True
self._bankColorDict = {1: 'black',
2: 'red',
3: 'blue',
4: 'green',
5: 'brown',
6: 'orange'}
# color sequence for multiple GSAS mode
self._gssColorList = ["black", "red", "blue", "green",
"cyan", "magenta", "yellow"]
self._gssLineStyleList = ['-', '--', '-.']
self._gssLineMarkers = ['.', 'D', 'o', 's', 'x']
# a dictionary to manage the GSAS plot's color and marker
self._gssLineDict = dict() # key: GSAS workspace name. value:
self._gssLineColorMarkerDict = dict()
self._currColorStyleMarkerIndex = 0
# define the dynamic menu
self._myCanvas.mpl_connect(
'button_press_event',
self.on_mouse_press_event)
# records of the plots on canvas
# workspaces' names (not bank, but original workspace) on canvas
self._workspaceSet = set()
# unit
self._unitX = None
return
def check_banks(self, bank_to_plot_list):
""" Check the to-plot bank list against the current being-plot bank list,
to find out the banks which are to plot and to be removed from plot.
Args:
bank_to_plot_list:
Returns:
2-tuple. (1) list of banks' IDs to be plot and (2) list of
banks' IDs to be removed from current canvas.
"""
# check
assert isinstance(bank_to_plot_list, list)
new_plot_banks = bank_to_plot_list[:]
to_remove_banks = list()
for bank_id in list(self._bankPlotDict.keys()):
if len(self._bankPlotDict[bank_id]) == 0:
# previously-not-being plot. either in new_plot_banks already
# or no-op
continue
elif bank_id in bank_to_plot_list:
# previously-being plot, then to be removed from new-plot-list
new_plot_banks.remove(bank_id)
else:
# previously-being plot, then to be removed from canvas
to_remove_banks.append(bank_id)
return new_plot_banks, to_remove_banks
def set_unit(self, x_unit):
"""
set the unit of the powder diffraction pattern
Parameters
----------
x_unit
Returns
-------
"""
assert isinstance(x_unit, str), 'Unit of X-axis {0} must be a string but not a {1}.' \
''.format(x_unit, type(x_unit))
if x_unit not in ['TOF', 'MomentumTransfer', 'dSpacing']:
raise RuntimeError(
'Unit {0} of X-axis is not recognized.'.format(x_unit))
self._unitX = x_unit
return
def evt_toolbar_home(self):
"""
override the behavior if a tool bar's HOME button is pressed
Returns
-------
"""
time.sleep(0.1)
# call the super
super(BraggView, self).evt_toolbar_home()
# if it is first time in this region
if self._homeXYLimit is None:
if self._unitX == 'TOF':
self.setXYLimit(xmin=0, xmax=20000, ymin=None, ymax=None)
elif self._unitX == 'MomentumTransfer':
self.setXYLimit(xmin=0, xmax=20, ymin=None, ymax=None)
elif self._unitX == 'dSpacing':
self.setXYLimit(xmin=0, xmax=7, ymin=None, ymax=None)
else:
raise RuntimeError('Unit %s unknown' % self._unitX)
return
def get_ws_name_on_canvas(self, bank_id):
"""
Get workspace' names on canvas according to its bank ID
Args:
bank_id: bank ID, integer between 1 and 6
Returns: a list of workspace' names
"""
# check input requirements
assert isinstance(bank_id, int), 'Bank ID %s must be an integer but not %s.' \
'' % (str(bank_id), str(type(bank_id)))
assert 1 <= bank_id <= 6, 'Bank ID %d must be in [1, 6].' % bank_id
# return
return self._bankPlotDict[bank_id]
def get_multi_gss_color(self):
"""
Get the present color and line style in multiple-GSS mode
Returns:
"""
# get basic statistic
num_marker = len(self._gssLineMarkers)
num_style = len(self._gssLineStyleList)
num_color = len(self._gssColorList)
print('[DB] Index = ', self._currColorStyleMarkerIndex)
# get color with current color index
value = num_style * num_color
marker_value = self._currColorStyleMarkerIndex / value
marker_index = int(marker_value)
style_value = self._currColorStyleMarkerIndex % value / num_color
style_index = int(style_value)
color_value = self._currColorStyleMarkerIndex % value % num_color
color_index = int(color_value)
color = self._gssColorList[color_index]
style = self._gssLineStyleList[style_index]
marker = self._gssLineMarkers[marker_index]
# advance to next index but reset if reaches limit
self._currColorStyleMarkerIndex += 1
# reset
if self._currColorStyleMarkerIndex == num_color * num_style * num_marker:
self._currColorStyleMarkerIndex = 0
return color, style, marker
def get_workspaces(self):
"""
Get the names of workspaces on the canvas
Returns
-------
"""
return list(self._workspaceSet)
@staticmethod
def _generate_plot_key(ws_group_name, bank_id):
"""
Generate a standard key for a plot from GSAS Workspace group name and bank ID
Args:
ws_group_name:
bank_id:
Returns:
"""
# check
assert isinstance(ws_group_name, str), 'Workspace group\'s name must be a string, but not %s.' \
'' % str(type(ws_group_name))
assert isinstance(bank_id, int), 'Bank ID %s must be an integer but not %s.' \
'' % (str(bank_id), str(type(bank_id)))
plot_key = '%s_bank%d' % (ws_group_name, bank_id)
return plot_key
def on_mouse_press_event(self, event):
"""
handle mouse pressing event
Returns:
"""
# get the button and position information.
curr_x = event.xdata
curr_y = event.ydata
if curr_x is None or curr_y is None:
# outside of canvas
return
button = event.button
if button == 1:
# left button: no operation
pass
elif button == 3:
# right button:
# Pop-out menu
self.menu = QMenu(self)
if self.get_canvas().is_legend_on:
# figure has legend: remove legend
action1 = QAction('Hide legend', self)
action1.triggered.connect(self._myCanvas.hide_legend)
action2 = QAction('Legend font larger', self)
action2.triggered.connect(
self._myCanvas.increase_legend_font_size)
action3 = QAction('Legend font smaller', self)
action3.triggered.connect(
self._myCanvas.decrease_legend_font_size)
self.menu.addAction(action2)
self.menu.addAction(action3)
else:
# figure does not have legend: add legend
action1 = QAction('Show legend', self)
action1.triggered.connect(self._myCanvas.show_legend)
self.menu.addAction(action1)
# pop up menu
self.menu.popup(QCursor.pos())
return
def plot_banks(self, plot_bank_dict, unit):
"""
Plot a few banks to canvas. If the bank has been plot on canvas already,
then remove the previous data
Args:
plot_bank_dict: dictionary: key = ws group name, value = banks to show
unit: string for X-range unit. can be TOF, dSpacing or Q (momentum transfer)
"""
# check
assert isinstance(plot_bank_dict, dict)
# plot
for ws_name in list(plot_bank_dict.keys()):
self._driver.convert_bragg_data(ws_name, unit)
# get workspace name
self._workspaceSet.add(ws_name)
for bank_id in plot_bank_dict[ws_name]:
# determine the color/marker/style of the line - shouldn't be
# special
if self._singleGSSMode:
# single bank mode
bank_color = self._bankColorDict[bank_id]
marker = None
style = None
else:
# multiple bank mode
bank_color, style, marker = self.get_multi_gss_color()
print(
'[DB...BAT] Plot Mode (single bank) = {0}, group = {1}, bank = {2}, color = {3}, marker = {4},'
'style = {5}'
''.format(
self._singleGSSMode,
ws_name,
bank_id,
bank_color,
marker,
style))
# plot
plot_id = self.add_plot_1d(
ws_name,
wkspindex=bank_id - 1,
marker=marker,
color=bank_color,
line_style=style,
x_label=unit,
y_label='I({0})'.format(unit),
label='%s Bank %d' % (ws_name, bank_id)
)
# plot key
plot_key = self._generate_plot_key(ws_name, bank_id)
self._bankPlotDict[bank_id].append(plot_key)
self._gssDict[plot_key] = plot_id
self._plotScaleDict[plot_id] = addie.utilities.workspaces.get_y_range(
ws_name, bank_id - 1) # is this needed?
# self.scale_auto()
def plot_general_ws(self, ws_name):
"""
Plot a workspace that does not belong to any workspace group
Parameters
"""
# register
self._workspaceSet.add(ws_name)
# plot
plot_id = self.add_plot_1d(
ws_name,
wkspindex=0,
marker=None,
color='black',
label=ws_name)
self._plotScaleDict[plot_id] = addie.utilities.workspaces.get_y_range(
ws_name, 0)
# scale the plot automatically
self.scale_auto()
def remove_gss_banks(self, ws_group_name, bank_id_list):
"""
Remove a few bank ID from Bragg plot
Args:
ws_group_name: workspace group name as bank ID
bank_id_list:
Returns: error message (empty string for non-error)
"""
# check
assert isinstance(bank_id_list, list)
# remove line from canvas
error_message = ''
for bank_id in bank_id_list:
bank_id = int(bank_id)
# from bank ID key
plot_key = self._generate_plot_key(ws_group_name, bank_id)
# line is not plot
if plot_key not in self._gssDict:
error_message += 'Workspace %s Bank %d is not on canvas to delete.\n' % (
ws_group_name, bank_id)
continue
bank_line_id = self._gssDict[plot_key]
# remove from canvas
try:
self.remove_line(bank_line_id)
except ValueError as val_error:
error_message = 'Unable to remove bank %d plot (ID = %d) due to %s.' % (
bank_id, bank_line_id, str(val_error))
raise ValueError(error_message)
# remove from data structure
del self._gssDict[plot_key]
del self._plotScaleDict[bank_line_id]
self._bankPlotDict[bank_id].remove(plot_key)
# scale automatically
# self.scale_auto()
# debug output
db_buf = ''
for bank_id in self._bankPlotDict:
db_buf += '%d: %s \t' % (bank_id, str(self._bankPlotDict[bank_id]))
print('After removing %s, Buffer: %s.' % (str(bank_id_list), db_buf))
return error_message
def reset(self):
"""
Reset the canvas for new Bragg data
Returns:
None
"""
# clean the dictionaries
for bank_id in list(self._bankPlotDict.keys()):
self._bankPlotDict[bank_id] = list()
self._gssDict.clear()
self._plotScaleDict.clear()
# clear the workspace record
self._workspaceSet.clear()
# clear all lines and reset color/marker counter
self.clear_all_lines()
self.reset_line_color_marker_index()
self._currColorIndex = 0
def reset_color(self):
"""
Reset color, line style and marker index
Returns
-------
"""
self._currColorStyleMarkerIndex = 0
def scale_auto(self):
"""Scale automatically for the plots on the canvas
"""
# get Y min and Y max
y_min = min(0., self._plotScaleDict.values()
[0][0]) # always include zero
y_max = self._plotScaleDict.values()[0][1]
for temp_min, temp_max in self._plotScaleDict.values():
y_min = min(y_min, temp_min)
y_max = max(y_max, temp_max)
# determine the canvas Y list
upper_y = y_max * 1.05
# set limit
self.setXYLimit(ymin=y_min, ymax=upper_y)
def set_to_single_gss(self, mode_on):
"""
Set to single-GSAS/multiple-bank model
Args:
mode_on:
Returns:
"""
assert isinstance(mode_on, bool), 'Single GSAS mode {0} must be a boolean but not a {1}.' \
''.format(mode_on, type(mode_on))
self._singleGSSMode = mode_on
if mode_on is False:
# set to multiple GSAS mode
self._currColorStyleMarkerIndex = 0
| {
"repo_name": "neutrons/FastGR",
"path": "addie/rietveld/braggview.py",
"copies": "1",
"size": "15352",
"license": "mit",
"hash": 8765056677313830000,
"line_mean": 31.5254237288,
"line_max": 115,
"alpha_frac": 0.5208441897,
"autogenerated": false,
"ratio": 4.087326943556976,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5108171133256976,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import time
from .util import choose_population, decode_http_header, decode_url
from .record import PageRecord, PixelRecord, GoalRecord, SplitRecord
# Goal Value Aggregation Types
RATE = 'R'
AVERAGE = 'A'
SUM = 'S'
PER = 'I'
# Goal Value Measurement Formats
NUMERIC = 'N'
CURRENCY = 'C'
PERCENTAGE = 'P'
class Visitor(object):
"""
A handle to perform operations on the given visitor session.
"""
RATE = RATE
AVERAGE = AVERAGE
SUM = SUM
PER = PER
NUMERIC = NUMERIC
CURRENCY = CURRENCY
PERCENTAGE = PERCENTAGE
def __init__(self, id, log, site_id='', buffer_writes=True):
"""
Initialize the Visitor handle.
:param id:
id to reference this Visitor.
:type id:
str
:param log:
A log instance that implements the manhattan log interface methods.
:param buffer_writes:
If True, log entries are buffered and flushed at the end of
a request or when `flush()` is called manually; if False,
they're written immediately.
"""
self.id = id
self.log = log
self.site_id = str(site_id)
self.buffer_writes = buffer_writes
self.buffer = []
def timestamp(self):
"""
Override this to generate event timestamps in a different way. Defaults
to the POSIX epoch.
"""
return '%0.4f' % time.time()
def write(self, *records):
self.buffer += records
if not self.buffer_writes:
self.flush()
def flush(self):
"""Write buffered records to log."""
if self.buffer:
records = [r.to_list() for r in self.buffer]
self.buffer = []
self.log.write(*records)
def page(self, request):
"""
Log a page view for this visitor.
:param request:
A request object corresponding to the page to log.
:type request:
webob.Request instance
"""
rec = PageRecord(timestamp=self.timestamp(),
vid=self.id,
site_id=self.site_id,
ip=request.remote_addr or '0.0.0.0',
method=request.method,
url=decode_url(request.url),
user_agent=decode_http_header(request.user_agent),
referer=decode_http_header(request.referer))
self.write(rec)
def pixel(self):
"""
Log a pixel view for this visitor.
"""
rec = PixelRecord(timestamp=self.timestamp(),
vid=self.id,
site_id=self.site_id)
self.write(rec)
def goal(self, name, value=None, value_type=None, value_format=None):
"""
Log a goal hit for this visitor.
:param name:
Name of the goal.
:type name:
str
:param value:
Value of this goal.
:type value:
int or float
:param value_type:
Type of goal value aggregation to perform.
:type value_type:
RATE, AVERAGE or SUM
:param value_format:
Display format for this goal value.
:type value_format:
NUMERIC, CURRENCY, or PERCENTAGE
"""
value = value and str(value)
rec = GoalRecord(timestamp=self.timestamp(),
vid=self.id,
site_id=self.site_id,
name=name.encode('ascii', 'replace').decode('ascii'),
value=value or '',
value_type=value_type or '',
value_format=value_format or '')
self.write(rec)
def split(self, test_name, populations=None):
"""
Perform a split test for this visitor. The resulting population is
calculated deterministically based on the test name and the visitor id,
so the same visitor id and the same test name will always be assigned
to the same population.
:param test_name:
Name of the test.
:type test_name:
str
:param populations:
Population specified. Can be any of the following:
None -- 50/50 split performed between True or False.
list -- Select evenly between entries in the list.
dict -- A weighted split between keys in the dict. The weight
of each population is specified by the value, as a float.
:returns:
The population selected for the visitor.
"""
selected = choose_population(self.id + test_name, populations)
rec = SplitRecord(timestamp=self.timestamp(),
vid=self.id,
site_id=self.site_id,
test_name=test_name,
selected=str(selected))
self.write(rec)
return selected
| {
"repo_name": "storborg/manhattan",
"path": "manhattan/visitor.py",
"copies": "1",
"size": "5106",
"license": "mit",
"hash": -9062803135054246000,
"line_mean": 30.7142857143,
"line_max": 79,
"alpha_frac": 0.5385820603,
"autogenerated": false,
"ratio": 4.471103327495622,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 161
} |
from __future__ import absolute_import, division, print_function
import types
from random import randint
from webob import Request
from manhattan import visitor
from manhattan.visitor import Visitor
from .base import work_path
test_complex_goals = [
(u'abandoned cart', set([u'add to cart']), set([u'began checkout'])),
(u'abandoned checkout',
set([u'began checkout']), set([u'completed checkout'])),
(u'abandoned after validation failure',
set([u'began checkout', u'checkout validation failed']),
set([u'completed checkout'])),
(u'abandoned after payment failure',
set([u'began checkout', u'payment failed']),
set([u'completed checkout'])),
]
sampleconfig = {
'input_log_path': work_path('clientserver-python-config'),
'sqlalchemy_url': 'sqlite:///' + work_path('sampleconfig.db'),
'complex_goals': test_complex_goals,
'bind': 'tcp://127.0.0.1:5556',
'verbose': False,
'error_log_path': work_path('python-config-debug.log'),
}
# fields are:
# site_id
# timestamp
# command (event type)
# visitor ID (vid)
# args to command
test_clickstream = [
(1, 10, 'page', 'a', '/'),
(1, 10, 'page', 'b', '/cheese'),
(1, 132, 'pixel', 'a'),
(1, 240, 'page', 'a', '/potatoes'),
(1, 290, 'page', 'a', '/potatoes/sweet'),
(1, 295, 'page', 'bot', '/potatoes/russet'),
(1, 300, 'pixel', 'b'),
(1, 382, 'page', 'a', '/potatoes/russet'),
(1, 385, 'goal', 'a', 'add to cart', ''),
(1, 394, 'page', 'b', '/cheese/parmesan'),
(2, 401, 'page', 'q', '/'),
(1, 448, 'page', 'c', '/fruit/apples'),
(1, 462, 'page', 'b', '/cart'),
(2, 544, 'page', 'q', '/candy'),
(2, 545, 'pixel', 'q'),
(2, 680, 'page', 'q', '/candy'),
(1, 749, 'goal', 'f', 'fake goal', ''),
(1, 1120, 'goal', 'b', 'add to cart', ''),
(1, 1180, 'page', 'bot', '/potatoes/russet'),
(1, 1200, 'page', 'f', '/'),
(1, 1202, 'pixel', 'f'),
(2, 1311, 'page', 'q', '/candy/molds'),
(1, 1596, 'page', 'c', '/fruit/bananas'),
(1, 1602, 'split', 'b', 'red checkout form'),
(1, 1602, 'page', 'b', '/checkout'),
(1, 1602, 'goal', 'b', 'began checkout', ''),
(1, 1706, 'page', 'f', '/fruit'),
(2, 1807, 'page', 'q', '/cart'),
(2, 1807, 'goal', 'q', 'add to cart', ''),
(1, 1821, 'page', 'bot', 'fruit'),
(1, 1920, 'page', 'bot', '/cart'),
(1, 1950, 'goal', 'bot', 'add to cart', ''),
(1, 1996, 'page', 'a', '/cart'),
(1, 1996, 'goal', 'a', 'add to cart', ''),
(1, 2043, 'goal', 'b', 'checkout validation failed', ''),
(1, 2112, 'pixel', 'c'),
(1, 2196, 'page', 'a', '/cheese/gouda'),
(1, 2356, 'page', 'a', '/cheese'),
(2, 2477, 'page', 'q', '/checkout'),
(2, 2477, 'goal', 'q', 'began checkout', ''),
(1, 2680, 'page', 'b', '/cheese'),
(1, 2840, 'page', 'd', '/'),
(1, 2846, 'pixel', 'd'),
(1, 3110, 'split', 'b', 'red checkout form'),
(1, 3340, 'page', 'd', '/cheese'),
(1, 3514, 'page', 'd', '/cheese/gruyere'),
(1, 3514, 'page', 'b', '/checkout/complete'),
(1, 3514, 'goal', 'b', 'completed checkout', 31.78),
(1, 3514, 'goal', 'b', 'order margin', 22.5),
(1, 3514, 'goal', 'b', 'margin per session', 7.15),
(1, 3600, 'page', 'c', '/'),
(1, 3620, 'page', 'd', '/cart'),
(1, 4114, 'goal', 'd', 'add to cart', ''),
(1, 4278, 'split', 'd', 'red checkout form'),
(1, 4278, 'page', 'd', '/checkout'),
(1, 4278, 'goal', 'd', 'began checkout', ''),
(1, 4534, 'page', 'a', '/account'),
(1, 4600, 'page', 'e', '/fruit'),
(1, 4616, 'pixel', 'e'),
(1, 4700, 'page', 'bot', '/fruit/cherries'),
(1, 4990, 'split', 'd', 'red checkout form'),
(1, 4990, 'page', 'd', '/checkout/complete'),
(1, 4990, 'goal', 'd', 'completed checkout', 64.99),
(1, 4990, 'goal', 'd', 'order margin', 20.1),
(1, 4990, 'goal', 'd', 'margin per session', 13.06),
(1, 5002, 'page', 'e', '/fruit/pineapples'),
(1, 5174, 'page', 'e', '/fruit/cherries'),
(1, 5226, 'page', 'e', '/fruit/pears'),
(1, 5244, 'page', 'e', '/cart'),
(1, 5244, 'goal', 'e', 'add to cart', ''),
(1, 5950, 'split', 'e', 'red checkout form'),
(1, 5950, 'page', 'e', '/checkout'),
(1, 5950, 'goal', 'e', 'began checkout', ''),
(1, 6278, 'page', 'd', '/account'),
(1, 6396, 'page', 'd', '/'),
(1, 6620, 'split', 'e', 'red checkout form'),
(1, 6620, 'page', 'e', '/checkout/complete'),
(1, 6620, 'goal', 'e', 'completed checkout', 11.42),
(1, 6620, 'goal', 'e', 'order margin', 27.8),
(1, 6620, 'goal', 'e', 'margin per session', 3.17),
(1, 6988, 'page', 'b', '/fruit'),
(1, 7020, 'page', 'f', '/cheese'),
(1, 7042, 'page', 'f', '/cheese/cheddar'),
(1, 7068, 'page', 'f', '/cart'),
(1, 7068, 'goal', 'f', 'add to cart', ''),
(1, 7198, 'page', 'f', '/cheese'),
(1, 7246, 'split', 'f', 'red checkout form'),
(1, 7246, 'page', 'f', '/checkout'),
(1, 7246, 'goal', 'f', 'began checkout', ''),
(1, 7350, 'goal', 'f', 'payment failed', ''),
]
def run_clickstream(log, first=None, last=None):
value_types = {
'completed checkout': visitor.SUM,
'order margin': visitor.AVERAGE,
'margin per session': visitor.PER,
}
value_formats = {
'completed checkout': visitor.CURRENCY,
'order margin': visitor.PERCENTAGE,
'margin per session': visitor.CURRENCY,
}
browsers = {
'a': u'Chrome/666.0',
'b': u'Safari/12345',
'c': u'Firefox/infinity',
'd': u'Chrome/17',
'e': u'Opera/sucks',
'f': u'MSIE/9',
'bot': u'ScroogleBot',
'q': u'Chrome/641.1',
}
visitors = {}
def get_visitor(vid, site_id):
if vid not in visitors:
visitors[vid] = Visitor(
vid, log, site_id=site_id, buffer_writes=False)
return visitors[vid]
def set_fake_timestamp(v, ts):
def fake_timestamp(self):
return '%d.%04d' % (ts, randint(0, 9999))
v.timestamp = types.MethodType(fake_timestamp, v)
stream = test_clickstream
if first and last:
stream = stream[first:last]
elif first:
stream = stream[first:]
elif last:
stream = stream[:last]
for action in stream:
site_id = action[0]
ts = action[1]
cmd = action[2]
vid = action[3]
v = get_visitor(vid, site_id)
args = action[4:]
set_fake_timestamp(v, ts)
if cmd == 'page':
req = Request.blank(args[0])
req.user_agent = browsers[vid]
v.page(req)
elif cmd == 'pixel':
v.pixel()
elif cmd == 'goal':
goal_name = unicode(args[0])
value = args[1]
value_type = value_types.get(goal_name)
value_format = value_formats.get(goal_name)
v.goal(goal_name, value=value,
value_type=value_type,
value_format=value_format)
elif cmd == 'split':
v.split(unicode(args[0]))
| {
"repo_name": "storborg/manhattan",
"path": "manhattan/tests/data.py",
"copies": "1",
"size": "7072",
"license": "mit",
"hash": -931529757496996900,
"line_mean": 33.4975609756,
"line_max": 73,
"alpha_frac": 0.5024038462,
"autogenerated": false,
"ratio": 2.8186528497409324,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38210566959409326,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import types
def format_exception_only(etype, value):
"""Format the exception part of a traceback.
The arguments are the exception type and value such as given by
sys.last_type and sys.last_value. The return value is a list of
strings, each ending in a newline.
Normally, the list contains a single string; however, for
SyntaxError exceptions, it contains several lines that (when
printed) display detailed information about where the syntax
error occurred.
The message indicating which exception occurred is always the last
string in the list.
"""
# An instance should not have a meaningful value parameter, but
# sometimes does, particularly for string exceptions, such as
# >>> raise string1, string2 # deprecated
#
# Clear these out first because issubtype(string1, SyntaxError)
# would throw another exception and mask the original problem.
if (isinstance(etype, BaseException) or
isinstance(etype, types.InstanceType) or
etype is None or type(etype) is str): # noqa: E129
return [_format_final_exc_line(etype, value)]
stype = etype.__name__
if not issubclass(etype, SyntaxError):
return [_format_final_exc_line(stype, value)]
# It was a syntax error; show exactly where the problem was found.
lines = []
try:
msg, (filename, lineno, offset, badline) = value.args
except Exception:
pass
else:
filename = filename or "<string>"
lines.append(' File "%s", line %d\n' % (filename, lineno))
if badline is not None:
lines.append(' %s\n' % badline.strip())
if offset is not None:
caretspace = badline.rstrip('\n')[:offset].lstrip()
# non-space whitespace (likes tabs) must be kept for alignment
caretspace = ((c.isspace() and c or ' ') for c in caretspace)
# only three spaces to account for offset1 == pos 0
lines.append(' %s^\n' % ''.join(caretspace))
value = msg
lines.append(_format_final_exc_line(stype, value))
return lines
def _format_final_exc_line(etype, value):
"""Return a list of a single line -- normal case for format_exception_only"""
valuestr = _some_str(value)
if value is None or not valuestr:
line = "%s\n" % etype
else:
line = "%s: %s\n" % (etype, valuestr)
return line
def _some_str(value):
try:
return str(value)
except UnicodeError:
try:
value = unicode(value)
return value.encode('utf-8', 'replace')
except Exception:
pass
return '<unprintable %s object>' % type(value).__name__
| {
"repo_name": "igogorek/allure-python",
"path": "allure-python-commons/src/_compat.py",
"copies": "1",
"size": "2789",
"license": "apache-2.0",
"hash": -1317742598575751000,
"line_mean": 33.8625,
"line_max": 81,
"alpha_frac": 0.6303334529,
"autogenerated": false,
"ratio": 4.150297619047619,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0001524390243902439,
"num_lines": 80
} |
from __future__ import absolute_import, division, print_function
import ubelt as ub
import warnings
def have_gpu(min_memory=8000):
""" Determine if we are on a machine with a good GPU """
# FIXME: HACK
gpus = gpu_info()
if not gpus:
return False
return any(gpu['mem_total'] >= min_memory for gpu in gpus)
# import platform
# GPU_MACHINES = {'arisia', 'aretha'}
# # Maybe we look at nvidia-smi instead?
# hostname = platform.node()
# return hostname in GPU_MACHINES
def find_unused_gpu(min_memory=0):
"""
Finds GPU with the lowest memory usage by parsing output of nvidia-smi
python -c "from pysseg.util import gpu_util; print(gpu_util.find_unused_gpu())"
"""
gpus = gpu_info()
if gpus is None:
return None
gpu_avail_mem = {n: gpu['mem_avail'] for n, gpu in gpus.items()}
usage_order = ub.argsort(gpu_avail_mem)
gpu_num = usage_order[-1]
if gpu_avail_mem[gpu_num] < min_memory:
return None
else:
return gpu_num
def gpu_info():
"""
Parses nvidia-smi
"""
result = ub.cmd('nvidia-smi')
if result['ret'] != 0:
warnings.warn('Could not run nvidia-smi.')
return None
lines = result['out'].splitlines()
gpu_lines = []
current = None
for line in lines:
if current is None:
# Signals the start of GPU info
if line.startswith('|====='):
current = []
else:
if len(line.strip()) == 0:
# End of GPU info
break
elif line.startswith('+----'):
# Move to the next GPU
gpu_lines.append(current)
current = []
else:
current.append(line)
def parse_gpu_lines(lines):
line1 = lines[0]
line2 = lines[1]
gpu = {}
gpu['name'] = ' '.join(line1.split('|')[1].split()[1:-1])
gpu['num'] = int(' '.join(line1.split('|')[1].split()[0]))
mempart = line2.split('|')[2].strip()
part1, part2 = mempart.split('/')
gpu['mem_used'] = float(part1.strip().replace('MiB', ''))
gpu['mem_total'] = float(part2.strip().replace('MiB', ''))
gpu['mem_avail'] = gpu['mem_total'] - gpu['mem_used']
return gpu
gpus = {}
for num, lines in enumerate(gpu_lines):
gpu = parse_gpu_lines(lines)
assert num == gpu['num'], (
'nums ({}, {}) do not agree. probably a parsing error'.format(num, gpu['num']))
assert num not in gpus, (
'Multiple GPUs labeled as num {}. Probably a parsing error'.format(num))
gpus[num] = gpu
return gpus
| {
"repo_name": "Erotemic/ibeis",
"path": "ibeis/algo/verif/torch/gpu_util.py",
"copies": "1",
"size": "2701",
"license": "apache-2.0",
"hash": -2806548042339511300,
"line_mean": 29.0111111111,
"line_max": 91,
"alpha_frac": 0.5420214735,
"autogenerated": false,
"ratio": 3.5822281167108754,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46242495902108754,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import urwid
from urwid.util import _target_encoding
# generic urwid helpers -------------------------------------------------------
def make_canvas(txt, attr, maxcol, fill_attr=None):
processed_txt = []
processed_attr = []
processed_cs = []
for line, line_attr in zip(txt, attr):
# filter out zero-length attrs
line_attr = [(aname, l) for aname, l in line_attr if l > 0]
diff = maxcol - len(line)
if diff > 0:
line += " "*diff
line_attr.append((fill_attr, diff))
else:
from urwid.util import rle_subseg
line = line[:maxcol]
line_attr = rle_subseg(line_attr, 0, maxcol)
from urwid.util import apply_target_encoding
encoded_line, line_cs = apply_target_encoding(line)
# line_cs contains byte counts as requested by TextCanvas, but
# line_attr still contains column counts at this point: let's fix this.
def get_byte_line_attr(line, line_attr):
i = 0
for label, column_count in line_attr:
byte_count = len(line[i:i+column_count].encode(_target_encoding))
i += column_count
yield label, byte_count
line_attr = list(get_byte_line_attr(line, line_attr))
processed_txt.append(encoded_line)
processed_attr.append(line_attr)
processed_cs.append(line_cs)
return urwid.TextCanvas(
processed_txt,
processed_attr,
processed_cs,
maxcol=maxcol)
def make_hotkey_markup(s):
import re
match = re.match(r"^([^_]*)_(.)(.*)$", s)
assert match is not None
return [
(None, match.group(1)),
("hotkey", match.group(2)),
(None, match.group(3)),
]
def labelled_value(label, value):
return urwid.AttrMap(urwid.Text([
("label", label), str(value)]),
"fixed value", "fixed value")
class SelectableText(urwid.Text):
def selectable(self):
return True
def keypress(self, size, key):
return key
class SignalWrap(urwid.WidgetWrap):
def __init__(self, w, is_preemptive=False):
urwid.WidgetWrap.__init__(self, w)
self.event_listeners = []
self.is_preemptive = is_preemptive
def listen(self, mask, handler):
self.event_listeners.append((mask, handler))
def keypress(self, size, key):
result = key
if self.is_preemptive:
for mask, handler in self.event_listeners:
if mask is None or mask == key:
result = handler(self, size, key)
break
if result is not None:
result = self._w.keypress(size, key)
if result is not None and not self.is_preemptive:
for mask, handler in self.event_listeners:
if mask is None or mask == key:
return handler(self, size, key)
return result
# {{{ debugger-specific stuff
class StackFrame(urwid.FlowWidget):
def __init__(self, is_current, name, class_name, filename, line):
self.is_current = is_current
self.name = name
self.class_name = class_name
self.filename = filename
self.line = line
def selectable(self):
return True
def rows(self, size, focus=False):
return 1
def render(self, size, focus=False):
maxcol = size[0]
if focus:
apfx = "focused "
else:
apfx = ""
if self.is_current:
apfx += "current "
crnt_pfx = ">> "
else:
crnt_pfx = " "
text = crnt_pfx+self.name
attr = [(apfx+"frame name", 3+len(self.name))]
if self.class_name is not None:
text += " [%s]" % self.class_name
attr.append((apfx+"frame class", len(self.class_name)+3))
loc = " %s:%d" % (self.filename, self.line)
text += loc
attr.append((apfx+"frame location", len(loc)))
return make_canvas([text], [attr], maxcol, apfx+"frame location")
def keypress(self, size, key):
return key
class BreakpointFrame(urwid.FlowWidget):
def __init__(self, is_current, filename, breakpoint):
self.is_current = is_current
self.filename = filename
self.breakpoint = breakpoint
self.line = breakpoint.line # Starts at 1
self.enabled = breakpoint.enabled
self.hits = breakpoint.hits
def selectable(self):
return True
def rows(self, size, focus=False):
return 1
def render(self, size, focus=False):
maxcol = size[0]
if focus:
apfx = "focused "
else:
apfx = ""
bp_pfx = ''
if not self.enabled:
apfx += "disabled "
bp_pfx += "X"
if self.is_current:
apfx += "current "
bp_pfx += ">>"
bp_pfx = bp_pfx.ljust(3)
hits_label = 'hits' if self.hits != 1 else 'hit'
loc = " %s:%d (%s %s)" % (self.filename, self.line, self.hits, hits_label)
text = bp_pfx+loc
attr = [(apfx+"breakpoint", len(loc))]
return make_canvas([text], [attr], maxcol, apfx+"breakpoint")
def keypress(self, size, key):
return key
class SearchController(object):
def __init__(self, ui):
self.ui = ui
self.highlight_line = None
self.search_box = None
self.last_search_string = None
def cancel_highlight(self):
if self.highlight_line is not None:
self.highlight_line.set_highlight(False)
self.highlight_line = None
def cancel_search(self):
self.cancel_highlight()
self.hide_search_ui()
def hide_search_ui(self):
self.search_box = None
del self.ui.lhs_col.contents[0]
self.ui.lhs_col.set_focus(self.ui.lhs_col.widget_list[0])
def open_search_ui(self):
lhs_col = self.ui.lhs_col
if self.search_box is None:
_, self.search_start = self.ui.source.get_focus()
self.search_box = SearchBox(self)
self.search_AttrMap = urwid.AttrMap(
self.search_box, "search box")
lhs_col.item_types.insert(
0, ("flow", None))
lhs_col.widget_list.insert(0, self.search_AttrMap)
self.ui.columns.set_focus(lhs_col)
lhs_col.set_focus(self.search_AttrMap)
else:
self.ui.columns.set_focus(lhs_col)
lhs_col.set_focus(self.search_AttrMap)
#self.search_box.restart_search()
def perform_search(self, dir, s=None, start=None, update_search_start=False):
self.cancel_highlight()
# self.ui.lhs_col.set_focus(self.ui.lhs_col.widget_list[1])
if s is None:
s = self.last_search_string
if s is None:
self.ui.message("No previous search term.")
return False
else:
self.last_search_string = s
if start is None:
start = self.search_start
case_insensitive = s.lower() == s
if start > len(self.ui.source):
start = 0
i = (start+dir) % len(self.ui.source)
if i >= len(self.ui.source):
i = 0
while i != start:
sline = self.ui.source[i].text
if case_insensitive:
sline = sline.lower()
if s in sline:
sl = self.ui.source[i]
sl.set_highlight(True)
self.highlight_line = sl
self.ui.source.set_focus(i)
if update_search_start:
self.search_start = i
return True
i = (i+dir) % len(self.ui.source)
return False
class SearchBox(urwid.Edit):
def __init__(self, controller):
urwid.Edit.__init__(self, [("label", "Search: ")], "")
self.controller = controller
def restart_search(self):
from time import time
now = time()
if self.search_start_time > 5:
self.set_edit_text("")
self.search_time = now
def keypress(self, size, key):
result = urwid.Edit.keypress(self, size, key)
txt = self.get_edit_text()
if result is not None:
if key == "esc":
self.controller.cancel_search()
return None
elif key == "enter":
if txt:
self.controller.hide_search_ui()
self.controller.perform_search(dir=1, s=txt,
update_search_start=True)
else:
self.controller.cancel_search()
return None
else:
if self.controller.perform_search(dir=1, s=txt):
self.controller.search_AttrMap.set_attr_map({None: "search box"})
else:
self.controller.search_AttrMap.set_attr_map(
{None: "search not found"})
return result
# }}}
| {
"repo_name": "albfan/pudb",
"path": "pudb/ui_tools.py",
"copies": "1",
"size": "9197",
"license": "mit",
"hash": 8891552879544881000,
"line_mean": 27.3858024691,
"line_max": 82,
"alpha_frac": 0.534739589,
"autogenerated": false,
"ratio": 3.8130182421227197,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.484775783112272,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import utool as ut
import numpy as np
import plottool_ibeis.draw_func2 as df2
from plottool_ibeis import custom_constants
#from vtool_ibeis import keypoint as ktool
#(print, print_, printDBG, rrr, profile) = ut.inject(__name__, '[viz_sv]', DEBUG=False)
ut.noinject(__name__, '[viz_sv]')
def get_blended_chip(chip1, chip2, M):
"""
warps chip1 into chip2 space
"""
import vtool_ibeis as vt
wh2 = vt.get_size(chip2)
chip1_Mt = vt.warpHomog(chip1, M, wh2)
chip2_blendM = vt.blend_images(chip1_Mt, chip2)
return chip2_blendM
#@ut.indent_func
def show_sv(chip1, chip2, kpts1, kpts2, fm, homog_tup=None, aff_tup=None,
mx=None, show_assign=True, show_lines=True, show_kpts=True,
show_aff=None, fnum=1, refine_method=None, **kwargs):
""" Visualizes spatial verification
CommandLine:
python -m vtool_ibeis.spatial_verification --test-spatially_verify_kpts --show
"""
import vtool_ibeis as vt
#import plottool_ibeis as pt
# GEt Matching chips
kpts1_m = kpts1[fm.T[0]]
kpts2_m = kpts2[fm.T[1]]
wh2 = vt.get_size(chip2)
#
# Get Affine Chips, Keypoints, Inliers
if show_aff is None:
show_aff_ = aff_tup is not None
else:
show_aff_ = show_aff
if show_aff_:
(aff_inliers, Aff) = aff_tup
chip1_At = vt.warpAffine(chip1, Aff, wh2)
#kpts1_mAt = ktool.transform_kpts(kpts1_m, Aff)
chip2_blendA = vt.blend_images(chip1_At, chip2)
#
# Get Homog Chips, Keypoints, Inliers
show_homog = homog_tup is not None
if show_homog:
(hom_inliers, H) = homog_tup
#kpts1_mHt = ktool.transform_kpts(kpts1_m, H)
chip1_Ht = vt.warpHomog(chip1, H, wh2)
chip2_blendH = vt.blend_images(chip1_Ht, chip2)
#
# Drawing settings
nRows = (show_assign) + (show_aff_) + (show_homog)
nCols1 = (show_assign) + (show_aff_) + (show_homog)
nCols2 = 2
pnum1_ = df2.get_pnum_func(nRows, nCols1)
pnum2_ = df2.get_pnum_func(nRows, nCols2)
#in_kwargs = dict(rect=True, ell_alpha=.7, eig=False, ori=True, pts=True)
#out_kwargs = dict(rect=False, ell_alpha=.3, eig=False)
in_kwargs = dict(rect=False, ell_alpha=.7, eig=False, ori=False, pts=False)
out_kwargs = dict(rect=False, ell_alpha=.7, eig=False, ori=False)
def _draw_kpts(*args, **kwargs):
if not show_kpts:
return
df2.draw_kpts2(*args, **kwargs)
def draw_inlier_kpts(kpts_m, inliers, color, H=None):
_draw_kpts(kpts_m[inliers], color=color, H=H, **in_kwargs)
if mx is not None:
_draw_kpts(kpts_m[mx:(mx + 1)], color=color, ell_linewidth=3, H=H, **in_kwargs)
def _draw_matches(px, title, inliers):
dmkwargs = dict(fs=None, title=title, all_kpts=False, draw_lines=True,
docla=True, draw_border=True, fnum=fnum, pnum=pnum1_(px), colors=df2.ORANGE)
__fm = np.vstack((inliers, inliers)).T
df2.show_chipmatch2(chip1, chip2, kpts1_m, kpts2_m, __fm, **dmkwargs)
return px + 1
from plottool_ibeis import color_funcs
colors = df2.distinct_colors(2, brightness=.95)
color1, color2 = colors[0], colors[1]
color1_dark = color_funcs.darken_rgb(color1, .2)
color2_dark = color_funcs.darken_rgb(color2, .2)
def _draw_chip(px, title, chip, inliers, kpts1_m, kpts2_m, H1=None):
if isinstance(px, tuple):
pnum = px
df2.imshow(chip, title=title, fnum=fnum, pnum=pnum)
px = pnum[2]
else:
df2.imshow(chip, title=title, fnum=fnum, pnum=pnum2_(px))
if kpts1_m is not None:
_draw_kpts(kpts1_m, color=color1_dark, H=H1, **out_kwargs)
draw_inlier_kpts(kpts1_m, inliers, color1, H=H1)
if kpts2_m is not None:
_draw_kpts(kpts2_m, color=color2_dark, **out_kwargs)
draw_inlier_kpts(kpts2_m, inliers, color2)
if kpts2_m is not None and kpts1_m is not None and show_lines:
__fm = np.vstack((inliers, inliers)).T
df2.draw_lines2(kpts1_m, kpts2_m, __fm,
color_list=[custom_constants.ORANGE], lw=2,
line_alpha=1,
H1=H1)
return px + 1
#
# Begin the drawing
df2.figure(fnum=fnum, pnum=(nRows, nCols1, 1), docla=True, doclf=True)
px = 0
if show_assign:
# Draw the Assigned -> Affine -> Homography matches
px = _draw_matches(px, '%d Assigned matches ' % len(fm), np.arange(len(fm)))
if show_aff_:
px = _draw_matches(px, '%d Initial inliers ' % len(aff_inliers), aff_inliers)
if show_homog:
if refine_method is None:
refine_method = ''
if len(refine_method) > 0:
refine_method_ = '(%s) ' % (refine_method,)
else:
refine_method_ = ''
px = _draw_matches(
px,
'%d Refined %sinliers' % (len(hom_inliers), refine_method_),
hom_inliers)
#
# Draw the Affine Transformations
px = nCols2 * show_assign
#if show_aff_ or show_homog:
if show_aff_:
#px = _draw_chip(px, 'Source', chip1, aff_inliers, kpts1_m, None)
#px = _draw_chip(px, 'Dest', chip2, aff_inliers, None, kpts2_m)
px = _draw_chip(px, 'Initial Warped', chip1_At, aff_inliers, kpts1_m, None, H1=Aff)
px = _draw_chip(px, 'Initial Blend', chip2_blendA, aff_inliers, kpts1_m, kpts2_m, H1=Aff)
#px = _draw_chip(px, 'Affine', chip1_At, aff_inliers, kpts1_mAt, None)
#px = _draw_chip(px, 'Aff Blend', chip2_blendA, aff_inliers, kpts1_mAt, kpts2_m)
pass
#
# Draw the Homography Transformation
if show_homog:
#px = _draw_chip(px, 'Source', chip1, hom_inliers, kpts1_m, None)
#px = _draw_chip(px, 'Dest', chip2, hom_inliers, None, kpts2_m)
#px = _draw_chip(px, 'Homog', chip1_Ht, hom_inliers, kpts1_mHt, None)
#px = _draw_chip(px, 'Homog Blend', chip2_blendH, hom_inliers, kpts1_mHt, kpts2_m)
px = _draw_chip(px, 'Refined Warped', chip1_Ht, hom_inliers, kpts1_m, None, H1=H)
px = _draw_chip(px, 'Refined Blend', chip2_blendH, hom_inliers, kpts1_m, kpts2_m, H1=H)
#
# Adjust subplots
def show_sv_simple(chip1, chip2, kpts1, kpts2, fm, inliers, mx=None, fnum=1, vert=None, **kwargs):
"""
CommandLine:
python -m plottool_ibeis.draw_sv --test-show_sv_simple --show
Example:
>>> # DISABLE_DOCTEST
>>> from plottool_ibeis.draw_sv import * # NOQA
>>> import vtool_ibeis as vt
>>> kpts1, kpts2, fm, aff_inliers, chip1, chip2, xy_thresh_sqrd = vt.testdata_matching_affine_inliers()
>>> inliers = aff_inliers
>>> mx = None
>>> fnum = 1
>>> vert = None # ut.get_argval('--vert', type_=bool, default=None)
>>> result = show_sv_simple(chip1, chip2, kpts1, kpts2, fm, inliers, mx, fnum, vert=vert)
>>> print(result)
>>> import plottool_ibeis as pt
>>> pt.show_if_requested()
"""
import plottool_ibeis as pt
import vtool_ibeis as vt
colors = pt.distinct_colors(2, brightness=.95)
color1, color2 = colors[0:2]
# Begin the drawing
fnum = pt.ensure_fnum(fnum)
pt.figure(fnum=fnum, pnum=(1, 1, 1), docla=True, doclf=True)
#dmkwargs = dict(fs=None, title='Inconsistent Matches', all_kpts=False, draw_lines=True,
# docla=True, draw_border=True, fnum=fnum, pnum=(1, 1, 1), colors=pt.ORANGE)
inlier_mask = vt.index_to_boolmask(inliers, maxval=len(fm))
fm_inliers = fm.compress(inlier_mask, axis=0)
fm_outliers = fm.compress(np.logical_not(inlier_mask), axis=0)
xywh1, xywh2, sf_tup = pt.show_chipmatch2(chip1, chip2, vert=vert,
modifysize=True, new_return=True)
sf1, sf2 = sf_tup
fmatch_kw = dict(ell_linewidth=2, ell_alpha=.7, line_alpha=.7)
pt.plot_fmatch(xywh1, xywh2, kpts1, kpts2, fm_inliers, colors=color1,
scale_factor1=sf1, scale_factor2=sf2, **fmatch_kw)
pt.plot_fmatch(xywh1, xywh2, kpts1, kpts2, fm_outliers, colors=color2,
scale_factor1=sf1, scale_factor2=sf2, **fmatch_kw)
if __name__ == '__main__':
"""
CommandLine:
python -m plottool_ibeis.draw_sv
python -m plottool_ibeis.draw_sv --allexamples
python -m plottool_ibeis.draw_sv --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
ut.doctest_funcs()
| {
"repo_name": "Erotemic/plottool",
"path": "plottool_ibeis/draw_sv.py",
"copies": "1",
"size": "8764",
"license": "apache-2.0",
"hash": 6867854366311585000,
"line_mean": 40.7333333333,
"line_max": 111,
"alpha_frac": 0.5870607029,
"autogenerated": false,
"ratio": 2.760314960629921,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8833239321088897,
"avg_score": 0.0028272684882046876,
"num_lines": 210
} |
from __future__ import absolute_import, division, print_function
import utool as ut
import numpy as np
(print, rrr, profile) = ut.inject2(__name__)
def nx_makenode(graph, name, **attrkw):
if 'size' in attrkw:
attrkw['width'], attrkw['height'] = attrkw.pop('size')
graph.add_node(name, **attrkw)
return name
def multidb_montage():
r"""
CommandLine:
python -m ibeis.scripts.specialdraw multidb_montage --save montage.jpg --dpath ~/slides --diskshow --show
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.scripts.specialdraw import * # NOQA
>>> multidb_montage()
"""
import ibeis
import plottool_ibeis as pt
import vtool_ibeis as vt
import numpy as np
pt.ensureqt()
dbnames = [
'PZ_Master1',
'GZ_Master1',
'humpbacks_fb',
'GIRM_Master1',
]
ibs_list = [ibeis.opendb(dbname) for dbname in dbnames]
target_num = 1000
sample_size = target_num // len(ibs_list)
aids_list = []
for ibs in ibs_list:
aids = ibs.sample_annots_general(
minqual='good', sample_size=sample_size)
aids_list.append(aids)
print(ut.depth_profile(aids_list))
chip_lists = []
for ibs, aids in zip(ibs_list, aids_list):
annots = ibs.annots(aids)
chip_lists.append(annots.chips)
chips = ut.flatten(chip_lists)
np.random.shuffle(chips)
widescreen_ratio = 16 / 9
ratio = ut.PHI
ratio = widescreen_ratio
fpath = ut.get_argval('--save', type_=str, default='montage.jpg')
#height = 6000
width = 6000
#width = int(height * ratio)
height = int(width / ratio)
dsize = (width, height)
dst = vt.montage(chips, dsize)
vt.imwrite(fpath, dst)
if ut.get_argflag('--show'):
pt.imshow(dst)
def featweight_fig():
r"""
CommandLine:
python -m ibeis.scripts.specialdraw featweight_fig --show
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.scripts.specialdraw import * # NOQA
>>> featweight_fig()
>>> ut.show_if_requested()
"""
# ENABLE_DOCTEST
import ibeis
# import plottool_ibeis as pt
import matplotlib as mpl
from ibeis.scripts.thesis import TMP_RC
mpl.rcParams.update(TMP_RC)
from ibeis.core_annots import gen_featweight_worker
#test_featweight_worker()
# ibs = ibeis.opendb(defaultdb='GZ_Master1')
# aid = ut.get_argval('--aid', type_=list, default=2810)
ibs = ibeis.opendb(defaultdb='PZ_MTEST')
aid = ut.get_argval('--aid', type_=int, default=1)
depc = ibs.depc
aids = [aid]
assert all(ibs.db.rows_exist('annotations', aids))
config = {'dim_size': 450, 'resize_dim': 'area', 'smooth_thresh': 30,
'smooth_ksize': 30}
probchip = depc.get('probchip', aids, 'img', config=config, recompute=True)[0]
chipsize = depc.get('chips', aids, ('width', 'height'), config=config)[0]
kpts = depc.get('feat', aids, 'kpts', config=config)[0]
tup = (kpts, probchip, chipsize)
weights = gen_featweight_worker(tup)
assert np.all(weights <= 1.0), 'weights cannot be greater than 1'
chip = depc.get('chips', aids, 'img', config=config)[0]
ut.quit_if_noshow()
import plottool_ibeis as pt
fnum = 1
pnum_ = pt.make_pnum_nextgen(1, 3)
pt.figure(fnum=fnum, doclf=True)
pt.imshow(chip, pnum=pnum_(0), fnum=fnum)
pt.imshow(probchip, pnum=pnum_(2), fnum=fnum)
pt.imshow(chip, pnum=pnum_(1), fnum=fnum)
color_list = pt.draw_kpts2(kpts, weights=weights, ell_alpha=.3)
color_list
# cb = pt.colorbar(weights, color_list)
# cb.set_label('featweights')
def simple_vsone_matches():
"""
CommandLine:
python -m ibeis.scripts.specialdraw simple_vsone_matches --show \
--db GZ_Master1 --aids=2811,2810
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.scripts.specialdraw import * # NOQA
>>> simple_vsone_matches()
>>> ut.show_if_requested()
"""
import ibeis
# import plottool_ibeis as pt
import matplotlib as mpl
from ibeis.scripts.thesis import TMP_RC
mpl.rcParams.update(TMP_RC)
ibs = ibeis.opendb(defaultdb='GZ_Master1')
aids = ut.get_argval('--aids', type_=list, default=[2811, 2810])
assert len(aids) == 2
assert all(ibs.db.rows_exist('annotations', aids))
aid1, aid2 = aids
infr = ibeis.AnnotInference(ibs=ibs, aids=aids)
edges = [(aid1, aid2)]
match = infr._exec_pairwise_match(edges)[0]
ut.quit_if_noshow()
import plottool_ibeis as pt
pt.figure(fnum=1, doclf=True)
match.show(heatmask=True, vert=False, modifysize=True, show_ell=False,
show_lines=False, show_ori=False)
def double_depcache_graph():
r"""
CommandLine:
python -m ibeis.scripts.specialdraw double_depcache_graph --show --testmode
python -m ibeis.scripts.specialdraw double_depcache_graph --save=figures5/doubledepc.png --dpath ~/latex/cand/ --diskshow --figsize=8,20 --dpi=220 --testmode --show --clipwhite
python -m ibeis.scripts.specialdraw double_depcache_graph --save=figures5/doubledepc.png --dpath ~/latex/cand/ --diskshow --figsize=8,20 --dpi=220 --testmode --show --clipwhite --arrow-width=.5
python -m ibeis.scripts.specialdraw double_depcache_graph --save=figures5/doubledepc.png --dpath ~/latex/cand/ --diskshow --figsize=8,20 --dpi=220 --testmode --show --clipwhite --arrow-width=5
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.scripts.specialdraw import * # NOQA
>>> result = double_depcache_graph()
>>> print(result)
>>> ut.quit_if_noshow()
>>> import plottool_ibeis as pt
>>> ut.show_if_requested()
"""
import ibeis
import networkx as nx
import plottool_ibeis as pt
pt.ensureqt()
# pt.plt.xkcd()
ibs = ibeis.opendb('testdb1')
reduced = True
implicit = True
annot_graph = ibs.depc_annot.make_graph(reduced=reduced, implicit=implicit)
image_graph = ibs.depc_image.make_graph(reduced=reduced, implicit=implicit)
to_rename = ut.isect(image_graph.nodes(), annot_graph.nodes())
nx.relabel_nodes(annot_graph, {x: 'annot_' + x for x in to_rename}, copy=False)
nx.relabel_nodes(image_graph, {x: 'image_' + x for x in to_rename}, copy=False)
graph = nx.compose_all([image_graph, annot_graph])
#graph = nx.union_all([image_graph, annot_graph], rename=('image', 'annot'))
# userdecision = nx_makenode(graph, 'user decision', shape='rect', color=pt.DARK_YELLOW, style='diagonals')
# userdecision = nx_makenode(graph, 'user decision', shape='circle', color=pt.DARK_YELLOW)
userdecision = nx_makenode(graph, 'User decision', shape='rect',
#width=100, height=100,
color=pt.YELLOW, style='diagonals')
#longcat = True
longcat = False
#edge = ('feat', 'neighbor_index')
#data = graph.get_edge_data(*edge)[0]
#print('data = %r' % (data,))
#graph.remove_edge(*edge)
## hack
#graph.add_edge('featweight', 'neighbor_index', **data)
graph.add_edge('detections', userdecision, constraint=longcat, color=pt.PINK)
graph.add_edge(userdecision, 'annotations', constraint=longcat, color=pt.PINK)
# graph.add_edge(userdecision, 'annotations', implicit=True, color=[0, 0, 0])
if not longcat:
pass
#graph.add_edge('images', 'annotations', style='invis')
#graph.add_edge('thumbnails', 'annotations', style='invis')
#graph.add_edge('thumbnails', userdecision, style='invis')
graph.remove_node('Has_Notch')
graph.remove_node('annotmask')
layoutkw = {
'ranksep': 5,
'nodesep': 5,
'dpi': 96,
# 'nodesep': 1,
}
ns = 1000
ut.nx_set_default_node_attributes(graph, 'fontsize', 72)
ut.nx_set_default_node_attributes(graph, 'fontname', 'Ubuntu')
ut.nx_set_default_node_attributes(graph, 'style', 'filled')
ut.nx_set_default_node_attributes(graph, 'width', ns * ut.PHI)
ut.nx_set_default_node_attributes(graph, 'height', ns * (1 / ut.PHI))
#for u, v, d in graph.edge(data=True):
for u, vkd in graph.edge.items():
for v, dk in vkd.items():
for k, d in dk.items():
localid = d.get('local_input_id')
if localid:
# d['headlabel'] = localid
if localid not in ['1']:
d['taillabel'] = localid
#d['label'] = localid
if d.get('taillabel') in {'1'}:
del d['taillabel']
node_alias = {
'chips': 'Chip',
'images': 'Image',
'feat': 'Feat',
'featweight': 'Feat Weights',
'thumbnails': 'Thumbnail',
'detections': 'Detections',
'annotations': 'Annotation',
'Notch_Tips': 'Notch Tips',
'probchip': 'Prob Chip',
'Cropped_Chips': 'Croped Chip',
'Trailing_Edge': 'Trailing\nEdge',
'Block_Curvature': 'Block\nCurvature',
# 'BC_DTW': 'block curvature /\n dynamic time warp',
'BC_DTW': 'DTW Distance',
'vsone': 'Hots vsone',
'feat_neighbs': 'Nearest\nNeighbors',
'neighbor_index': 'Neighbor\nIndex',
'vsmany': 'Hots vsmany',
'annot_labeler': 'Annot Labeler',
'labeler': 'Labeler',
'localizations': 'Localizations',
'classifier': 'Classifier',
'sver': 'Spatial\nVerification',
'Classifier': 'Existence',
'image_labeler': 'Image Labeler',
}
node_alias = {
'Classifier': 'existence',
'feat_neighbs': 'neighbors',
'sver': 'spatial_verification',
'Cropped_Chips': 'cropped_chip',
'BC_DTW': 'dtw_distance',
'Block_Curvature': 'curvature',
'Trailing_Edge': 'trailing_edge',
'Notch_Tips': 'notch_tips',
'thumbnails': 'thumbnail',
'images': 'image',
'annotations': 'annotation',
'chips': 'chip',
#userdecision: 'User de'
}
node_alias = ut.delete_dict_keys(node_alias, ut.setdiff(node_alias.keys(),
graph.nodes()))
nx.relabel_nodes(graph, node_alias, copy=False)
fontkw = dict(fontname='Ubuntu', fontweight='normal', fontsize=12)
#pt.gca().set_aspect('equal')
#pt.figure()
pt.show_nx(graph, layoutkw=layoutkw, fontkw=fontkw)
pt.zoom_factory()
def lighten_hex(hexcolor, amount):
import plottool_ibeis as pt
import matplotlib.colors as colors
return pt.color_funcs.lighten_rgb(colors.hex2color(hexcolor), amount)
def general_identify_flow():
r"""
CommandLine:
python -m ibeis.scripts.specialdraw general_identify_flow --show --save pairsim.png --dpi=100 --diskshow --clipwhite
python -m ibeis.scripts.specialdraw general_identify_flow --dpi=200 --diskshow --clipwhite --dpath ~/latex/cand/ --figsize=20,10 --save figures4/pairprob.png --arrow-width=2.0
Example:
>>> # SCRIPT
>>> from ibeis.scripts.specialdraw import * # NOQA
>>> general_identify_flow()
>>> ut.quit_if_noshow()
>>> ut.show_if_requested()
"""
import networkx as nx
import plottool_ibeis as pt
pt.ensureqt()
# pt.plt.xkcd()
graph = nx.DiGraph()
def makecluster(name, num, **attrkw):
return [nx_makenode(name + str(n), **attrkw) for n in range(num)]
def add_edge2(u, v, *args, **kwargs):
v = ut.ensure_iterable(v)
u = ut.ensure_iterable(u)
for _u, _v in ut.product(u, v):
graph.add_edge(_u, _v, *args, **kwargs)
# *** Primary color:
p_shade2 = '#41629A'
# *** Secondary color
s1_shade2 = '#E88B53'
# *** Secondary color
s2_shade2 = '#36977F'
# *** Complement color
c_shade2 = '#E8B353'
ns = 512
ut.inject_func_as_method(graph, nx_makenode)
annot1_color = p_shade2
annot2_color = s1_shade2
#annot1_color2 = pt.color_funcs.lighten_rgb(colors.hex2color(annot1_color), .01)
annot1 = graph.nx_makenode('Annotation X', width=ns, height=ns, groupid='annot', color=annot1_color)
annot2 = graph.nx_makenode('Annotation Y', width=ns, height=ns, groupid='annot', color=annot2_color)
featX = graph.nx_makenode('Features X', size=(ns / 1.2, ns / 2), groupid='feats', color=lighten_hex(annot1_color, .1))
featY = graph.nx_makenode('Features Y', size=(ns / 1.2, ns / 2), groupid='feats', color=lighten_hex(annot2_color, .1))
#'#4771B3')
global_pairvec = graph.nx_makenode('Global similarity\n(viewpoint, quality, ...)', width=ns * ut.PHI * 1.2, color=s2_shade2)
findnn = graph.nx_makenode('Find correspondences\n(nearest neighbors)', shape='ellipse', color=c_shade2)
local_pairvec = graph.nx_makenode('Local similarities\n(LNBNN, spatial error, ...)',
size=(ns * 2.2, ns), color=lighten_hex(c_shade2, .1))
agglocal = graph.nx_makenode('Aggregate', size=(ns / 1.1, ns / 2), shape='ellipse', color=lighten_hex(c_shade2, .2))
catvecs = graph.nx_makenode('Concatenate', size=(ns / 1.1, ns / 2), shape='ellipse', color=lighten_hex(s2_shade2, .1))
pairvec = graph.nx_makenode('Vector of\npairwise similarities', color=lighten_hex(s2_shade2, .2))
classifier = graph.nx_makenode('Classifier\n(SVM/RF/DNN)', color=lighten_hex(s2_shade2, .3))
prob = graph.nx_makenode('Matching Probability\n(same individual given\nsimilar viewpoint)', color=lighten_hex(s2_shade2, .4))
graph.add_edge(annot1, global_pairvec)
graph.add_edge(annot2, global_pairvec)
add_edge2(annot1, featX)
add_edge2(annot2, featY)
add_edge2(featX, findnn)
add_edge2(featY, findnn)
add_edge2(findnn, local_pairvec)
graph.add_edge(local_pairvec, agglocal, constraint=True)
graph.add_edge(agglocal, catvecs, constraint=False)
graph.add_edge(global_pairvec, catvecs)
graph.add_edge(catvecs, pairvec)
# graph.add_edge(annot1, classifier, style='invis')
# graph.add_edge(pairvec, classifier , constraint=False)
graph.add_edge(pairvec, classifier)
graph.add_edge(classifier, prob)
ut.nx_set_default_node_attributes(graph, 'shape', 'rect')
#ut.nx_set_default_node_attributes(graph, 'fillcolor', nx.get_node_attributes(graph, 'color'))
#ut.nx_set_default_node_attributes(graph, 'style', 'rounded')
ut.nx_set_default_node_attributes(graph, 'style', 'filled,rounded')
ut.nx_set_default_node_attributes(graph, 'fixedsize', 'true')
ut.nx_set_default_node_attributes(graph, 'xlabel', nx.get_node_attributes(graph, 'label'))
ut.nx_set_default_node_attributes(graph, 'width', ns * ut.PHI)
ut.nx_set_default_node_attributes(graph, 'height', ns)
ut.nx_set_default_node_attributes(graph, 'regular', False)
#font = 'MonoDyslexic'
#font = 'Mono_Dyslexic'
font = 'Ubuntu'
ut.nx_set_default_node_attributes(graph, 'fontsize', 72)
ut.nx_set_default_node_attributes(graph, 'fontname', font)
#ut.nx_delete_node_attr(graph, 'width')
#ut.nx_delete_node_attr(graph, 'height')
#ut.nx_delete_node_attr(graph, 'fixedsize')
#ut.nx_delete_node_attr(graph, 'style')
#ut.nx_delete_node_attr(graph, 'regular')
#ut.nx_delete_node_attr(graph, 'shape')
# node_dict = ut.nx_node_dict(graph)
#node_dict[annot1]['label'] = "<f0> left|<f1> mid\ dle|<f2> right"
#node_dict[annot2]['label'] = ut.codeblock(
# '''
# <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0">
# <TR><TD>left</TD><TD PORT="f1">mid dle</TD><TD PORT="f2">right</TD></TR>
# </TABLE>>
# ''')
#node_dict[annot1]['label'] = ut.codeblock(
# '''
# <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0">
# <TR><TD>left</TD><TD PORT="f1">mid dle</TD><TD PORT="f2">right</TD></TR>
# </TABLE>>
# ''')
#node_dict[annot1]['shape'] = 'none'
#node_dict[annot1]['margin'] = '0'
layoutkw = {
'forcelabels': True,
'prog': 'dot',
'rankdir': 'LR',
# 'splines': 'curved',
'splines': 'line',
'samplepoints': 20,
'showboxes': 1,
# 'splines': 'polyline',
#'splines': 'spline',
'sep': 100 / 72,
'nodesep': 300 / 72,
'ranksep': 300 / 72,
#'inputscale': 72,
# 'inputscale': 1,
# 'dpi': 72,
# 'concentrate': 'true', # merges edge lines
# 'splines': 'ortho',
# 'aspect': 1,
# 'ratio': 'compress',
# 'size': '5,4000',
# 'rank': 'max',
}
#fontkw = dict(fontfamilty='sans-serif', fontweight='normal', fontsize=12)
#fontkw = dict(fontname='Ubuntu', fontweight='normal', fontsize=12)
#fontkw = dict(fontname='Ubuntu', fontweight='light', fontsize=20)
fontkw = dict(fontname=font, fontweight='light', fontsize=12)
#prop = fm.FontProperties(fname='/usr/share/fonts/truetype/groovygh.ttf')
pt.show_nx(graph, layout='agraph', layoutkw=layoutkw, **fontkw)
pt.zoom_factory()
def graphcut_flow():
r"""
Returns:
?: name
CommandLine:
python -m ibeis.scripts.specialdraw graphcut_flow --show
python -m ibeis.scripts.specialdraw graphcut_flow --show --save cutflow.png --diskshow --clipwhite
python -m ibeis.scripts.specialdraw graphcut_flow --save figures4/cutiden.png --diskshow --clipwhite --dpath ~/latex/crall-candidacy-2015/ --figsize=24,10 --arrow-width=2.0
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.scripts.specialdraw import * # NOQA
>>> graphcut_flow()
>>> ut.quit_if_noshow()
>>> import plottool_ibeis as pt
>>> ut.show_if_requested()
"""
import plottool_ibeis as pt
pt.ensureqt()
import networkx as nx
# pt.plt.xkcd()
graph = nx.DiGraph()
def makecluster(name, num, **attrkw):
return [nx_makenode(graph, name + str(n), **attrkw) for n in range(num)]
def add_edge2(u, v, *args, **kwargs):
v = ut.ensure_iterable(v)
u = ut.ensure_iterable(u)
for _u, _v in ut.product(u, v):
graph.add_edge(_u, _v, *args, **kwargs)
ns = 512
# *** Primary color:
p_shade2 = '#41629A'
# *** Secondary color
s1_shade2 = '#E88B53'
# *** Secondary color
s2_shade2 = '#36977F'
# *** Complement color
c_shade2 = '#E8B353'
annot1 = nx_makenode(graph, 'Unlabeled\nannotations\n(query)', width=ns, height=ns,
groupid='annot', color=p_shade2)
annot2 = nx_makenode(graph, 'Labeled\nannotations\n(database)', width=ns, height=ns,
groupid='annot', color=s1_shade2)
occurprob = nx_makenode(graph, 'Dense \nprobabilities', color=lighten_hex(p_shade2, .1))
cacheprob = nx_makenode(graph, 'Cached \nprobabilities', color=lighten_hex(s1_shade2, .1))
sparseprob = nx_makenode(graph, 'Sparse\nprobabilities', color=lighten_hex(c_shade2, .1))
graph.add_edge(annot1, occurprob)
graph.add_edge(annot1, sparseprob)
graph.add_edge(annot2, sparseprob)
graph.add_edge(annot2, cacheprob)
matchgraph = nx_makenode(graph, 'Graph of\npotential matches', color=lighten_hex(s2_shade2, .1))
cutalgo = nx_makenode(graph, 'Graph cut algorithm', color=lighten_hex(s2_shade2, .2), shape='ellipse')
cc_names = nx_makenode(graph, 'Identifications,\n splits, and merges are\nconnected components', color=lighten_hex(s2_shade2, .3))
graph.add_edge(occurprob, matchgraph)
graph.add_edge(sparseprob, matchgraph)
graph.add_edge(cacheprob, matchgraph)
graph.add_edge(matchgraph, cutalgo)
graph.add_edge(cutalgo, cc_names)
ut.nx_set_default_node_attributes(graph, 'shape', 'rect')
ut.nx_set_default_node_attributes(graph, 'style', 'filled,rounded')
ut.nx_set_default_node_attributes(graph, 'fixedsize', 'true')
ut.nx_set_default_node_attributes(graph, 'width', ns * ut.PHI)
ut.nx_set_default_node_attributes(graph, 'height', ns * (1 / ut.PHI))
ut.nx_set_default_node_attributes(graph, 'regular', False)
layoutkw = {
'prog': 'dot',
'rankdir': 'LR',
'splines': 'line',
'sep': 100 / 72,
'nodesep': 300 / 72,
'ranksep': 300 / 72,
}
fontkw = dict(fontname='Ubuntu', fontweight='light', fontsize=14)
pt.show_nx(graph, layout='agraph', layoutkw=layoutkw, **fontkw)
pt.zoom_factory()
def merge_viewpoint_graph():
r"""
CommandLine:
python -m ibeis.scripts.specialdraw merge_viewpoint_graph --show
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.scripts.specialdraw import * # NOQA
>>> result = merge_viewpoint_graph()
>>> print(result)
>>> ut.quit_if_noshow()
>>> import plottool_ibeis as pt
>>> ut.show_if_requested()
"""
import plottool_ibeis as pt
import ibeis
import networkx as nx
defaultdb = 'PZ_Master1'
ibs = ibeis.opendb(defaultdb=defaultdb)
#nids = None
aids = ibs.get_name_aids(4875)
ibs.print_annot_stats(aids)
left_aids = ibs.filter_annots_general(aids, view='left')[0:3]
right_aids = ibs.filter_annots_general(aids, view='right')
right_aids = list(set(right_aids) - {14517})[0:3]
back = ibs.filter_annots_general(aids, view='back')[0:4]
backleft = ibs.filter_annots_general(aids, view='backleft')[0:4]
backright = ibs.filter_annots_general(aids, view='backright')[0:4]
right_graph = nx.DiGraph(ut.upper_diag_self_prodx(right_aids))
left_graph = nx.DiGraph(ut.upper_diag_self_prodx(left_aids))
back_edges = [
tuple([back[0], backright[0]][::1]),
tuple([back[0], backleft[0]][::1]),
]
back_graph = nx.DiGraph(back_edges)
# Let the graph be a bit smaller
right_graph.edge[right_aids[1]][right_aids[2]]['constraint'] = ut.get_argflag('--constraint')
left_graph.edge[left_aids[1]][left_aids[2]]['constraint'] = ut.get_argflag('--constraint')
#right_graph = right_graph.to_undirected().to_directed()
#left_graph = left_graph.to_undirected().to_directed()
nx.set_node_attributes(right_graph, name='groupid', values='right')
nx.set_node_attributes(left_graph, name='groupid', values='left')
#nx.set_node_attributes(right_graph, name='scale', values=.2)
#nx.set_node_attributes(left_graph, name='scale', values=.2)
# node_dict[back[0]]['scale'] = 2.3
nx.set_node_attributes(back_graph, name='groupid', values='back')
view_graph = nx.compose_all([left_graph, back_graph, right_graph])
view_graph.add_edges_from([
[backright[0], right_aids[0]][::-1],
[backleft[0], left_aids[0]][::-1],
])
pt.ensureqt()
graph = graph = view_graph # NOQA
#graph = graph.to_undirected()
nx.set_edge_attributes(graph, name='color', values=pt.DARK_ORANGE[0:3])
#nx.set_edge_attributes(graph, name='color', values=pt.BLACK)
nx.set_edge_attributes(graph, name='color', values={edge: pt.LIGHT_BLUE[0:3] for edge in back_edges})
#pt.close_all_figures();
from ibeis.viz import viz_graph
layoutkw = {
'nodesep': 1,
}
viz_graph.viz_netx_chipgraph(ibs, graph, with_images=1, prog='dot',
augment_graph=False, layoutkw=layoutkw)
if False:
"""
#view_graph = left_graph
pt.close_all_figures(); viz_netx_chipgraph(ibs, view_graph, with_images=0, prog='neato')
#viz_netx_chipgraph(ibs, view_graph, layout='pydot', with_images=False)
#back_graph = make_name_graph_interaction(ibs, aids=back, with_all=False)
aids = left_aids + back + backleft + backright + right_aids
for aid, chip in zip(aids, ibs.get_annot_chips(aids)):
fpath = ut.truepath('~/slides/merge/aid_%d.jpg' % (aid,))
vt.imwrite(fpath, vt.resize_to_maxdims(chip, (400, 400)))
ut.copy_files_to(, )
aids = ibs.filterannots_by_tags(ibs.get_valid_aids(),
dict(has_any_annotmatch='splitcase'))
aid1 = ibs.group_annots_by_name_dict(aids)[252]
aid2 = ibs.group_annots_by_name_dict(aids)[6791]
aids1 = ibs.get_annot_groundtruth(aid1)[0][0:4]
aids2 = ibs.get_annot_groundtruth(aid2)[0]
make_name_graph_interaction(ibs, aids=aids1 + aids2, with_all=False)
ut.ensuredir(ut.truthpath('~/slides/split/))
for aid, chip in zip(aids, ibs.get_annot_chips(aids)):
fpath = ut.truepath('~/slides/merge/aidA_%d.jpg' % (aid,))
vt.imwrite(fpath, vt.resize_to_maxdims(chip, (400, 400)))
"""
pass
def setcover_example():
"""
CommandLine:
python -m ibeis.scripts.specialdraw setcover_example --show
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.scripts.specialdraw import * # NOQA
>>> result = setcover_example()
>>> print(result)
>>> ut.quit_if_noshow()
>>> import plottool_ibeis as pt
>>> ut.show_if_requested()
"""
import ibeis
import plottool_ibeis as pt
from ibeis.viz import viz_graph
import networkx as nx
pt.ensureqt()
ibs = ibeis.opendb(defaultdb='testdb2')
if False:
# Select a good set
aids = ibs.get_name_aids(ibs.get_valid_nids())
# ibeis.testdata_aids('testdb2', a='default:mingt=2')
aids = [a for a in aids if len(a) > 1]
for a in aids:
print(ut.repr3(ibs.get_annot_stats_dict(a)))
print(aids[-2])
#aids = [78, 79, 80, 81, 88, 91]
aids = [78, 79, 81, 88, 91]
qreq_ = ibs.depc.new_request('vsone', aids, aids)
cm_list = qreq_.execute()
from ibeis.algo.hots import orig_graph_iden
infr = orig_graph_iden.OrigAnnotInference(cm_list)
unique_aids, prob_annots = infr.make_prob_annots()
import numpy as np
print(ut.hz_str('prob_annots = ', ut.repr2(prob_annots, precision=2, max_line_width=140, suppress_small=True)))
# ut.setcover_greedy(candidate_sets_dict)
max_weight = 3
prob_annots[np.diag_indices(len(prob_annots))] = np.inf
prob_annots = prob_annots
thresh_points = np.sort(prob_annots[np.isfinite(prob_annots)])
# probably not the best way to go about searching for these thresholds
# but when you have a hammer...
if False:
quant = sorted(np.diff(thresh_points))[(len(thresh_points) - 1) // 2 ]
candset = {point: thresh_points[np.abs(thresh_points - point) < quant] for point in thresh_points}
check_thresholds = len(aids) * 2
thresh_points2 = np.array(ut.setcover_greedy(candset, max_weight=check_thresholds).keys())
thresh_points = thresh_points2
# pt.plot(sorted(thresh_points), 'rx')
# pt.plot(sorted(thresh_points2), 'o')
# prob_annots = prob_annots.T
# thresh_start = np.mean(thresh_points)
current_idxs = []
current_covers = []
current_val = np.inf
for thresh in thresh_points:
covering_sets = [np.where(row >= thresh)[0] for row in (prob_annots)]
candidate_sets_dict = {ax: others for ax, others in enumerate(covering_sets)}
soln_cover = ut.setcover_ilp(candidate_sets_dict, max_weight=max_weight)
exemplar_idxs = list(soln_cover.keys())
soln_weight = len(exemplar_idxs)
val = max_weight - soln_weight
# print('val = %r' % (val,))
# print('soln_weight = %r' % (soln_weight,))
if val < current_val:
current_val = val
current_covers = covering_sets
current_idxs = exemplar_idxs
exemplars = ut.take(aids, current_idxs)
ensure_edges = [(aids[ax], aids[ax2]) for ax, other_xs in enumerate(current_covers) for ax2 in other_xs]
graph = viz_graph.make_netx_graph_from_aid_groups(
ibs, [aids], allow_directed=True, ensure_edges=ensure_edges,
temp_nids=[1] * len(aids))
viz_graph.ensure_node_images(ibs, graph)
nx.set_node_attributes(graph, name='framewidth', values=False)
nx.set_node_attributes(graph, name='framewidth', values={aid: 4.0 for aid in exemplars})
nx.set_edge_attributes(graph, name='color', values=pt.ORANGE)
nx.set_node_attributes(graph, name='color', values=pt.LIGHT_BLUE)
nx.set_node_attributes(graph, name='shape', values='rect')
layoutkw = {
'sep' : 1 / 10,
'prog': 'neato',
'overlap': 'false',
#'splines': 'ortho',
'splines': 'spline',
}
pt.show_nx(graph, layout='agraph', layoutkw=layoutkw)
pt.zoom_factory()
def k_redun_demo():
r"""
python -m ibeis.scripts.specialdraw k_redun_demo --save=kredun.png
python -m ibeis.scripts.specialdraw k_redun_demo --show
Example:
>>> # SCRIPT
>>> from ibeis.scripts.specialdraw import * # NOQA
>>> k_redun_demo()
>>> ut.quit_if_noshow()
>>> import plottool_ibeis as pt
>>> ut.show_if_requested()
"""
import ibeis
import plottool_ibeis as pt
from ibeis.viz import viz_graph
from ibeis.algo.graph.state import POSTV, NEGTV, INCMP
# import networkx as nx
pt.ensureqt()
ibs = ibeis.opendb(defaultdb='PZ_Master1')
nid2_aid = {
6612: [7664, 7462, 7522],
6625: [7746, 7383, 7390, 7477, 7376, 7579],
6630: [7586, 7377, 7464, 7478],
}
aids = ut.flatten(nid2_aid.values())
infr = ibeis.AnnotInference(ibs=ibs, aids=aids, autoinit=True)
for name_aids in nid2_aid.values():
for edge in ut.itertwo(name_aids):
infr.add_feedback(edge, POSTV)
infr.add_feedback((7664, 7522), POSTV)
infr.add_feedback((7746, 7477), POSTV)
infr.add_feedback((7383, 7376), POSTV)
# infr.add_feedback((7664, 7383), NEGTV)
# infr.add_feedback((7462, 7746), NEGTV)
# infr.add_feedback((7464, 7376), NEGTV)
# Adjust between new and old variable names
infr.set_edge_attrs('evidence_decision', infr.get_edge_attrs('evidence_decision'))
infr.set_edge_attrs(infr.CUT_WEIGHT_KEY, ut.dzip(infr.get_edges_where_eq('evidence_decision', POSTV), [1.0]))
infr.set_edge_attrs(infr.CUT_WEIGHT_KEY, ut.dzip(infr.get_edges_where_eq('evidence_decision', NEGTV), [0.0]))
infr.set_edge_attrs(infr.CUT_WEIGHT_KEY, ut.dzip(infr.get_edges_where_eq('evidence_decision', INCMP), [0.5]))
infr.initialize_visual_node_attrs()
infr.update_node_image_attribute(use_image=True)
infr.update_visual_attrs(use_image=True, show_unreviewed_edges=True,
groupby='name_label',
splines='spline',
show_cand=False)
infr.set_edge_attrs('linewidth', 2)
# infr.set_edge_attrs('linewidth', ut.dzip(infr.get_edges_where_eq('evidence_decision', POSTV), [4]))
# infr.set_edge_attrs('color', pt.BLACK)
infr.set_edge_attrs('alpha', .7)
viz_graph.ensure_node_images(ibs, infr.graph)
infr.show(use_image=True, update_attrs=False)
def graph_iden_cut_demo():
r"""
CommandLine:
python -m ibeis.scripts.specialdraw graph_iden_cut_demo --show --precut
python -m ibeis.scripts.specialdraw graph_iden_cut_demo --show --postcut
python -m ibeis.scripts.specialdraw graph_iden_cut_demo --precut --save=precut.png --clipwhite
python -m ibeis.scripts.specialdraw graph_iden_cut_demo --postcut --save=postcut.png --clipwhite
Example:
>>> # SCRIPT
>>> from ibeis.scripts.specialdraw import * # NOQA
>>> graph_iden_cut_demo()
>>> ut.quit_if_noshow()
>>> import plottool_ibeis as pt
>>> ut.show_if_requested()
"""
import ibeis
import plottool_ibeis as pt
from ibeis.viz import viz_graph
# import networkx as nx
pt.ensureqt()
ibs = ibeis.opendb(defaultdb='PZ_Master1')
nid2_aid = {
#4880: [3690, 3696, 3703, 3706, 3712, 3721],
4880: [3690, 3696, 3703],
6537: [3739],
# 6653: [7671],
6610: [7566, 7408],
#6612: [7664, 7462, 7522],
#6624: [7465, 7360],
#6625: [7746, 7383, 7390, 7477, 7376, 7579],
6630: [7586, 7377, 7464, 7478],
#6677: [7500]
}
if False:
# Find extra example
annots = ibs.annots(ibs.filter_annots_general(view='right', require_timestamp=True, min_pername=2))
unique_nids = ut.unique(annots.nids)
nid_to_annots = ut.dzip(unique_nids, map(ibs.annots, ibs.get_name_aids(unique_nids)))
# nid_to_annots = annots.group_items(annots.nids)
right_nids = ut.argsort(ut.map_dict_vals(len, nid_to_annots))[::-1]
right_annots = nid_to_annots[right_nids[1]]
inter = pt.interact_multi_image.MultiImageInteraction(right_annots.chips)
inter.start()
inter = pt.interact_multi_image.MultiImageInteraction(ibs.annots([16228, 16257, 16273]).chips)
inter.start()
ut.take(right_annots.aids, [2, 6, 10])
nid2_aid.update({4429: [16228, 16257, 16273]})
aids = ut.flatten(nid2_aid.values())
postcut = ut.get_argflag('--postcut')
aids_list = ibs.group_annots_by_name(aids)[0]
infr = ibeis.AnnotInference(ibs=ibs, aids=ut.flatten(aids_list),
autoinit=True)
if postcut:
infr.init_test_mode2(enable_autoreview=False)
node_to_label = infr.get_node_attrs('orig_name_label')
label_to_nodes = ut.group_items(node_to_label.keys(),
node_to_label.values())
# cliques
new_edges = []
for label, nodes in label_to_nodes.items():
for edge in ut.combinations(nodes, 2):
if not infr.has_edge(edge):
new_edges.append(infr.e_(*edge))
# negative edges
import random
rng = random.Random(0)
for aids1, aids2 in ut.combinations(nid2_aid.values(), 2):
aid1 = rng.choice(aids1)
aid2 = rng.choice(aids2)
new_edges.append(infr.e_(aid1, aid2))
infr.graph.add_edges_from(new_edges)
infr.apply_edge_truth(new_edges)
for edge in new_edges:
infr.queue.push(edge, -1)
from ibeis.algo.graph.state import POSTV, NEGTV, INCMP
try:
while True:
edge, priority = infr.pop()
feedback = infr.request_user_review(edge)
infr.add_feedback(edge=edge, **feedback)
except StopIteration:
pass
else:
infr.ensure_full()
# Adjust between new and old variable names
infr.set_edge_attrs('evidence_decision', infr.get_edge_attrs('evidence_decision'))
infr.set_edge_attrs(infr.CUT_WEIGHT_KEY, ut.dzip(infr.get_edges_where_eq('evidence_decision', POSTV), [1.0]))
infr.set_edge_attrs(infr.CUT_WEIGHT_KEY, ut.dzip(infr.get_edges_where_eq('evidence_decision', NEGTV), [0.0]))
infr.set_edge_attrs(infr.CUT_WEIGHT_KEY, ut.dzip(infr.get_edges_where_eq('evidence_decision', INCMP), [0.5]))
infr.initialize_visual_node_attrs()
infr.update_node_image_attribute(use_image=True)
infr.update_visual_attrs(use_image=True, show_unreviewed_edges=True,
groupby='name_label', splines='spline',
show_cand=not postcut)
infr.set_edge_attrs('linewidth', 2)
infr.set_edge_attrs('linewidth', ut.dzip(infr.get_edges_where_eq('evidence_decision', POSTV), [4]))
if not postcut:
infr.set_edge_attrs('color', pt.BLACK)
infr.set_edge_attrs('alpha', .7)
if not postcut:
infr.set_node_attrs('framewidth', 0)
viz_graph.ensure_node_images(ibs, infr.graph)
infr.show(use_image=True, update_attrs=False)
def show_id_graph():
r"""
CommandLine:
python -m ibeis.scripts.specialdraw show_id_graph --show
python -m ibeis.scripts.specialdraw show_id_graph --show
Example:
>>> # SCRIPT
>>> from ibeis.scripts.specialdraw import * # NOQA
>>> show_id_graph()
>>> ut.quit_if_noshow()
>>> import plottool_ibeis as pt
>>> ut.show_if_requested()
"""
import ibeis
import plottool_ibeis as pt
# import networkx as nx
pt.ensureqt()
# ibs = ibeis.opendb(defaultdb='PZ_PB_RF_TRAIN')
ibs = ibeis.opendb(defaultdb='PZ_Master1')
parent_infr = ibeis.AnnotInference(ibs=ibs, aids='all')
parent_infr.reset_feedback('staging', apply=True)
edgecat = parent_infr.categorize_edges()
MAX_SIZE = 6
MAX_NUM = 6
pccs = []
infr = parent_infr
if ibs.dbname == 'PZ_Master1':
incomp_pcc = {5652, 5197, 4244}
force_incomp_edge = [(5652, 5197)]
pccs.append(incomp_pcc)
else:
pccs = []
force_incomp_edge = []
if len(pccs) == 0:
for (n1, n2), es in edgecat['notcomp'].items():
if n1 == n2:
cc = parent_infr.pos_graph._ccs[n1]
pccs.append(cc)
break
if len(pccs) == 0:
for cc in parent_infr.positive_components():
a = ibs.annots(cc)
if any(t is not None and 'left' not in t for t in a.yaw_texts):
# print(a.yaw_texts)
if any(t is not None and 'left' in t for t in a.yaw_texts):
if any(t is not None and 'right' in t for t in a.yaw_texts):
print(a.yaw_texts)
if len(cc) <= MAX_SIZE:
pccs.append(cc)
# break
if len(pccs) == 0:
for (n1, n2), es in edgecat['notcomp'].items():
cc1 = parent_infr.pos_graph._ccs[n1]
cc2 = parent_infr.pos_graph._ccs[n2]
# s1 = len(parent_infr.pos_graph._ccs[n1])
# s2 = len(parent_infr.pos_graph._ccs[n2])
# if s1 in {3} and s2 in {3}:
# print(annots1.yaw_texts)
# print(annots2.yaw_texts)
pccs.append(frozenset(cc1))
pccs.append(frozenset(cc2))
break
MAX_SIZE += len(pccs) - 1
for cc in parent_infr.positive_components():
cc = frozenset(cc)
if len(cc) < MAX_SIZE:
if cc not in pccs:
if len(cc) not in set(map(len, pccs)):
pccs.append(cc)
if len(pccs) >= MAX_NUM:
break
from ibeis.algo.graph.state import POSTV, NEGTV, INCMP, UNREV # NOQA
subinfr = parent_infr.subgraph(ut.flatten(pccs))
subinfr._viz_image_config['thumbsize'] = 700
subinfr._viz_image_config['grow'] = True
infr = subinfr
infr.apply_nondynamic_update()
# infr.ensure_mst()
infr.ensure_mst(label='orig_name_label')
# infr.ensure_cliques(evidence_decision=POSTV)
# infr.show(pickable=True, use_image=True, groupby='name_label',
# splines='spline')
infr.apply_nondynamic_update()
for edge in infr.find_neg_redun_candidate_edges(k=1):
infr.add_feedback(edge, evidence_decision=NEGTV)
import itertools as it
edges = list(it.combinations(infr.aids, 2))
n = 0
incomp_edges = ut.compress(edges, [not f for f in infr.is_comparable(edges)])
for e in ut.shuffle(incomp_edges, rng=3545115929):
infr.add_feedback(e, evidence_decision=INCMP)
n += 1
if n > 3:
break
for e in force_incomp_edge:
infr.add_feedback(e, evidence_decision=INCMP)
for edge in infr.find_neg_redun_candidate_edges(k=1):
infr.add_feedback(edge, evidence_decision=NEGTV)
savekw = dict(dpi=300, transparent=True, edgecolor='none')
showkw = dict(pickable=True, use_image=True, groupby='name_label',
splines='spline', fnum=1)
infr.show(show_positive_edges=False, show_negative_edges=False,
show_incomparable_edges=False, **showkw)
fig = pt.gcf()
fig.savefig('id_graph1.png',
bbox_inches=pt.extract_axes_extents(fig, combine=True), **savekw)
infr.show(show_positive_edges=True, show_negative_edges=False,
show_incomparable_edges=False, **showkw)
fig = pt.gcf()
fig.savefig('id_graph2.png',
bbox_inches=pt.extract_axes_extents(fig, combine=True), **savekw)
infr.show(show_positive_edges=False, show_negative_edges=True,
show_incomparable_edges=False, **showkw)
fig = pt.gcf()
fig.savefig('id_graph3.png',
bbox_inches=pt.extract_axes_extents(fig, combine=True), **savekw)
infr.show(show_positive_edges=False, show_negative_edges=False,
show_incomparable_edges=True, **showkw)
fig = pt.gcf()
fig.savefig('id_graph4.png',
bbox_inches=pt.extract_axes_extents(fig, combine=True), **savekw)
import networkx as nx
infr.show(pin=True, **showkw)
nx.set_node_attributes(infr.graph, name='pin', values='true')
fig = pt.gcf()
fig.savefig('id_graph5.png',
bbox_inches=pt.extract_axes_extents(fig, combine=True), **savekw)
infr2 = infr.copy()
for edge in infr2.find_pos_redun_candidate_edges(k=2):
infr2.add_feedback(edge, evidence_decision=POSTV)
infr2.show(pickable=True, use_image=True,
groupby='name_label', fnum=1, splines='spline')
fig = pt.gcf()
fig.savefig('id_graph6.png',
bbox_inches=pt.extract_axes_extents(fig, combine=True), **savekw)
for edge in infr2.find_neg_redun_candidate_edges(k=2):
infr2.add_feedback(edge, evidence_decision=NEGTV)
infr2.show(pickable=True, use_image=True,
groupby='name_label', fnum=1, splines='spline')
fig = pt.gcf()
fig.savefig('id_graph7.png',
bbox_inches=pt.extract_axes_extents(fig, combine=True), **savekw)
infr3 = infr.copy()
for edge in infr3.find_pos_redun_candidate_edges(k=2):
infr3.add_feedback(edge, evidence_decision=POSTV)
for cc in infr3.non_pos_redundant_pccs(k=3):
for edge in infr3.find_pos_augment_edges(cc, k=3):
infr3.add_feedback(edge, evidence_decision=NEGTV)
break
infr3.show(pickable=True, use_image=True, show_between=False,
show_inconsistency=True,
groupby='name_label', fnum=1, splines='spline')
fig = pt.gcf()
fig.savefig('id_graph8.png',
bbox_inches=pt.extract_axes_extents(fig, combine=True), **savekw)
infr4 = infr.copy()
for edge in infr4.edges():
infr4.add_feedback(edge, evidence_decision=UNREV)
infr4.refresh_candidate_edges()
infr4.show(show_cand=True, **showkw)
fig = pt.gcf()
fig.savefig('id_graph9.png',
bbox_inches=pt.extract_axes_extents(fig, combine=True), **savekw)
def intraoccurrence_connected():
r"""
CommandLine:
python -m ibeis.scripts.specialdraw intraoccurrence_connected --show
python -m ibeis.scripts.specialdraw intraoccurrence_connected --show --smaller
python -m ibeis.scripts.specialdraw intraoccurrence_connected --precut --save=precut.jpg
python -m ibeis.scripts.specialdraw intraoccurrence_connected --postcut --save=postcut.jpg
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.scripts.specialdraw import * # NOQA
>>> result = intraoccurrence_connected()
>>> print(result)
>>> ut.quit_if_noshow()
>>> import plottool_ibeis as pt
>>> ut.show_if_requested()
"""
import ibeis
import plottool_ibeis as pt
from ibeis.viz import viz_graph
import networkx as nx
pt.ensureqt()
ibs = ibeis.opendb(defaultdb='PZ_Master1')
nid2_aid = {
#4880: [3690, 3696, 3703, 3706, 3712, 3721],
4880: [3690, 3696, 3703],
6537: [3739],
6653: [7671],
6610: [7566, 7408],
#6612: [7664, 7462, 7522],
#6624: [7465, 7360],
#6625: [7746, 7383, 7390, 7477, 7376, 7579],
6630: [7586, 7377, 7464, 7478],
#6677: [7500]
}
nid2_dbaids = {
4880: [33, 6120, 7164],
6537: [7017, 7206],
6653: [7660]
}
if ut.get_argflag('--small') or ut.get_argflag('--smaller'):
del nid2_aid[6630]
del nid2_aid[6537]
del nid2_dbaids[6537]
if ut.get_argflag('--smaller'):
nid2_dbaids[4880].remove(33)
nid2_aid[4880].remove(3690)
nid2_aid[6610].remove(7408)
#del nid2_aid[4880]
#del nid2_dbaids[4880]
aids = ut.flatten(nid2_aid.values())
temp_nids = [1] * len(aids)
postcut = ut.get_argflag('--postcut')
aids_list = ibs.group_annots_by_name(aids)[0]
ensure_edges = 'all' if True or not postcut else None
unlabeled_graph = infr.graph
unlabeled_graph = viz_graph.make_netx_graph_from_aid_groups(
ibs, aids_list,
#invis_edges=invis_edges,
ensure_edges=ensure_edges, temp_nids=temp_nids)
viz_graph.color_by_nids(unlabeled_graph, unique_nids=[1] *
len(list(unlabeled_graph.nodes())))
viz_graph.ensure_node_images(ibs, unlabeled_graph)
nx.set_node_attributes(unlabeled_graph, name='shape', values='rect')
#unlabeled_graph = unlabeled_graph.to_undirected()
# Find the "database exemplars for these annots"
if False:
gt_aids = ibs.get_annot_groundtruth(aids)
gt_aids = [ut.setdiff(s, aids) for s in gt_aids]
dbaids = ut.unique(ut.flatten(gt_aids))
dbaids = ibs.filter_annots_general(dbaids, minqual='good')
ibs.get_annot_quality_texts(dbaids)
else:
dbaids = ut.flatten(nid2_dbaids.values())
exemplars = nx.DiGraph()
#graph = exemplars # NOQA
exemplars.add_nodes_from(dbaids)
def add_clique(graph, nodes, edgeattrs={}, nodeattrs={}):
edge_list = ut.upper_diag_self_prodx(nodes)
graph.add_edges_from(edge_list, **edgeattrs)
return edge_list
for aids_, nid in zip(*ibs.group_annots_by_name(dbaids)):
add_clique(exemplars, aids_)
viz_graph.ensure_node_images(ibs, exemplars)
viz_graph.color_by_nids(exemplars, ibs=ibs)
nx.set_node_attributes(unlabeled_graph, name='framewidth', values=False)
nx.set_node_attributes(exemplars, name='framewidth', values=4.0)
nx.set_node_attributes(unlabeled_graph, name='group', values='unlab')
nx.set_node_attributes(exemplars, name='group', values='exemp')
#big_graph = nx.compose_all([unlabeled_graph])
big_graph = nx.compose_all([exemplars, unlabeled_graph])
# add sparse connections from unlabeled to exemplars
import numpy as np
rng = np.random.RandomState(0)
if True or not postcut:
for aid_ in unlabeled_graph.nodes():
flags = rng.rand(len(exemplars)) > .5
nid_ = ibs.get_annot_nids(aid_)
exnids = np.array(ibs.get_annot_nids(list(exemplars.nodes())))
flags = np.logical_or(exnids == nid_, flags)
exmatches = ut.compress(list(exemplars.nodes()), flags)
big_graph.add_edges_from(list(ut.product([aid_], exmatches)),
color=pt.ORANGE, implicit=True)
else:
for aid_ in unlabeled_graph.nodes():
flags = rng.rand(len(exemplars)) > .5
exmatches = ut.compress(list(exemplars.nodes()), flags)
nid_ = ibs.get_annot_nids(aid_)
exnids = np.array(ibs.get_annot_nids(exmatches))
exmatches = ut.compress(exmatches, exnids == nid_)
big_graph.add_edges_from(list(ut.product([aid_], exmatches)))
pass
nx.set_node_attributes(big_graph, name='shape', values='rect')
#if False and postcut:
# ut.nx_delete_node_attr(big_graph, 'nid')
# ut.nx_delete_edge_attr(big_graph, 'color')
# viz_graph.ensure_graph_nid_labels(big_graph, ibs=ibs)
# viz_graph.color_by_nids(big_graph, ibs=ibs)
# big_graph = big_graph.to_undirected()
layoutkw = {
'sep' : 1 / 5,
'prog': 'neato',
'overlap': 'false',
#'splines': 'ortho',
'splines': 'spline',
}
as_directed = False
#as_directed = True
#hacknode = True
hacknode = 0
graph = big_graph
ut.nx_ensure_agraph_color(graph)
if hacknode:
nx.set_edge_attributes(graph, name='taillabel', values={e: str(e[0]) for e in graph.edges()})
nx.set_edge_attributes(graph, name='headlabel', values={e: str(e[1]) for e in graph.edges()})
_, layout_info = pt.nx_agraph_layout(graph, inplace=True, **layoutkw)
node_dict = ut.nx_node_dict(graph)
if ut.get_argflag('--smaller'):
node_dict[7660]['pos'] = np.array([550, 350])
node_dict[6120]['pos'] = np.array([200, 600]) + np.array([350, -400])
node_dict[7164]['pos'] = np.array([200, 480]) + np.array([350, -400])
nx.set_node_attributes(graph, name='pin', values='true')
_, layout_info = pt.nx_agraph_layout(graph,
inplace=True, **layoutkw)
elif ut.get_argflag('--small'):
node_dict[7660]['pos'] = np.array([750, 350])
node_dict[33]['pos'] = np.array([300, 600]) + np.array([350, -400])
node_dict[6120]['pos'] = np.array([500, 600]) + np.array([350, -400])
node_dict[7164]['pos'] = np.array([410, 480]) + np.array([350, -400])
nx.set_node_attributes(graph, name='pin', values='true')
_, layout_info = pt.nx_agraph_layout(graph,
inplace=True, **layoutkw)
if not postcut:
#pt.show_nx(graph.to_undirected(), layout='agraph', layoutkw=layoutkw,
# as_directed=False)
#pt.show_nx(graph, layout='agraph', layoutkw=layoutkw,
# as_directed=as_directed, hacknode=hacknode)
pt.show_nx(graph, layout='custom', layoutkw=layoutkw,
as_directed=as_directed, hacknode=hacknode)
else:
#explicit_graph = pt.get_explicit_graph(graph)
#_, layout_info = pt.nx_agraph_layout(explicit_graph, orig_graph=graph,
# **layoutkw)
#layout_info['edge']['alpha'] = .8
#pt.apply_graph_layout_attrs(graph, layout_info)
#graph_layout_attrs = layout_info['graph']
##edge_layout_attrs = layout_info['edge']
##node_layout_attrs = layout_info['node']
#for key, vals in layout_info['node'].items():
# #print('[special] key = %r' % (key,))
# nx.set_node_attributes(graph, name=key, values=vals)
#for key, vals in layout_info['edge'].items():
# #print('[special] key = %r' % (key,))
# nx.set_edge_attributes(graph, name=key, values=vals)
#nx.set_edge_attributes(graph, name='alpha', values=.8)
#graph.graph['splines'] = graph_layout_attrs.get('splines', 'line')
#graph.graph['splines'] = 'polyline' # graph_layout_attrs.get('splines', 'line')
#graph.graph['splines'] = 'line'
cut_graph = graph.copy()
edge_list = list(cut_graph.edges())
edge_nids = np.array(ibs.unflat_map(ibs.get_annot_nids, edge_list))
cut_flags = edge_nids.T[0] != edge_nids.T[1]
cut_edges = ut.compress(edge_list, cut_flags)
cut_graph.remove_edges_from(cut_edges)
ut.nx_delete_node_attr(cut_graph, 'nid')
viz_graph.ensure_graph_nid_labels(cut_graph, ibs=ibs)
#ut.nx_get_default_node_attributes(exemplars, 'color', None)
ut.nx_delete_node_attr(cut_graph, 'color', nodes=unlabeled_graph.nodes())
aid2_color = ut.nx_get_default_node_attributes(cut_graph, 'color', None)
nid2_colors = ut.group_items(aid2_color.values(), ibs.get_annot_nids(aid2_color.keys()))
nid2_colors = ut.map_dict_vals(ut.filter_Nones, nid2_colors)
nid2_colors = ut.map_dict_vals(ut.unique, nid2_colors)
#for val in nid2_colors.values():
# assert len(val) <= 1
# Get initial colors
nid2_color_ = {nid: colors_[0] for nid, colors_ in nid2_colors.items()
if len(colors_) == 1}
graph = cut_graph
viz_graph.color_by_nids(cut_graph, ibs=ibs, nid2_color_=nid2_color_)
nx.set_node_attributes(cut_graph, name='framewidth', values=4)
pt.show_nx(cut_graph, layout='custom', layoutkw=layoutkw,
as_directed=as_directed, hacknode=hacknode)
pt.zoom_factory()
# The database exemplars
# TODO: match these along with the intra encounter set
#interact = viz_graph.make_name_graph_interaction(
# ibs, aids=dbaids, with_all=False, prog='neato', framewidth=True)
#print(interact)
# Groupid only works for dot
#nx.set_node_attributes(unlabeled_graph, name='groupid', values='unlabeled')
#nx.set_node_attributes(exemplars, name='groupid', values='exemplars')
#exemplars = exemplars.to_undirected()
#add_clique(exemplars, aids_, edgeattrs=dict(constraint=False))
#layoutkw = {}
#pt.show_nx(exemplars, layout='agraph', layoutkw=layoutkw,
# as_directed=False, framewidth=True,)
def scalespace():
r"""
THIS DOES NOT SHOW A REAL SCALE SPACE PYRAMID YET. FIXME.
Returns:
?: imgBGRA_warped
CommandLine:
python -m ibeis.scripts.specialdraw scalespace --show
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.scripts.specialdraw import * # NOQA
>>> imgBGRA_warped = scalespace()
>>> result = ('imgBGRA_warped = %s' % (ut.repr2(imgBGRA_warped),))
>>> print(result)
>>> ut.quit_if_noshow()
>>> import plottool_ibeis as pt
>>> ut.show_if_requested()
"""
import numpy as np
# import matplotlib.pyplot as plt
import cv2
import vtool_ibeis as vt
import plottool_ibeis as pt
pt.qt4ensure()
#imgBGR = vt.imread(ut.grab_test_imgpath('lena.png'))
imgBGR = vt.imread(ut.grab_test_imgpath('zebra.png'))
# imgBGR = vt.imread(ut.grab_test_imgpath('carl.jpg'))
# Convert to colored intensity image
imgGray = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2GRAY)
imgBGR = cv2.cvtColor(imgGray, cv2.COLOR_GRAY2BGR)
imgRaw = imgBGR
# TODO: # stack images in pyramid # boarder?
initial_sigma = 1.6
num_intervals = 4
def makepyramid_octave(imgRaw, level, num_intervals):
# Downsample image to take sigma to a power of level
step = (2 ** (level))
img_level = imgRaw[::step, ::step]
# Compute interval relative scales
interval = np.array(list(range(num_intervals)))
relative_scales = (2 ** ((interval / num_intervals)))
sigma_intervals = initial_sigma * relative_scales
octave_intervals = []
for sigma in sigma_intervals:
sizex = int(6. * sigma + 1.) + int(1 - (int(6. * sigma + 1.) % 2))
ksize = (sizex, sizex)
img_blur = cv2.GaussianBlur(img_level, ksize, sigmaX=sigma,
sigmaY=sigma,
borderType=cv2.BORDER_REPLICATE)
octave_intervals.append(img_blur)
return octave_intervals
pyramid = []
num_octaves = 4
for level in range(num_octaves):
octave = makepyramid_octave(imgRaw, level, num_intervals)
pyramid.append(octave)
def makewarp(imgBGR):
# hack a projection matrix using dummy homogrpahy
imgBGRA = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2BGRA)
imgBGRA[:, :, 3] = .87 * 255 # hack alpha
imgBGRA = vt.pad_image(imgBGRA, 2, value=[0, 0, 255, 255])
size = np.array(vt.get_size(imgBGRA))
pts1 = np.array([(0, 0), (0, 1), (1, 1), (1, 0)]) * size
x_adjust = .15
y_adjust = .5
pts2 = np.array([(x_adjust, 0), (0, 1 - y_adjust), (1, 1 - y_adjust), (1 - x_adjust, 0)]) * size
H = cv2.findHomography(pts1, pts2)[0]
dsize = np.array(vt.bbox_from_verts(pts2)[2:4]).astype(np.int)
warpkw = dict(flags=cv2.INTER_LANCZOS4, borderMode=cv2.BORDER_CONSTANT)
imgBGRA_warped = cv2.warpPerspective(imgBGRA, H, tuple(dsize), **warpkw)
return imgBGRA_warped
framesize = (700, 500)
steps = np.array([.04, .03, .02, .01]) * 1.3
numintervals = 4
octave_ty_starts = [1.0]
for i in range(1, 4):
prev_ty = octave_ty_starts[-1]
prev_base = pyramid[i - 1][0]
next_ty = prev_ty - ((prev_base.shape[0] / framesize[1]) / 2 + (numintervals - 1) * (steps[i - 1]))
octave_ty_starts.append(next_ty)
def temprange(stop, step, num):
return [stop - (x * step) for x in range(num)]
layers = []
for i in range(0, 4):
ty_start = octave_ty_starts[i]
step = steps[i]
intervals = pyramid[i]
ty_range = temprange(ty_start, step, numintervals)
nextpart = [
vt.embed_in_square_image(makewarp(interval), framesize, img_origin=(.5, .5),
target_origin=(.5, ty / 2))
for ty, interval in zip(ty_range, intervals)
]
layers += nextpart
for layer in layers:
pt.imshow(layer)
pt.plt.grid(False)
def event_space():
"""
pip install matplotlib-venn
"""
from matplotlib import pyplot as plt
# import numpy as np
from matplotlib_venn import venn3, venn2, venn3_circles
plt.figure(figsize=(4, 4))
v = venn3(subsets=(1, 1, 1, 1, 1, 1, 1), set_labels=('A', 'B', 'C'))
v.get_patch_by_id('100').set_alpha(1.0)
v.get_patch_by_id('100').set_color('white')
v.get_label_by_id('100').set_text('Unknown')
v.get_label_by_id('A').set_text('Set "A"')
c = venn3_circles(subsets=(1, 1, 1, 1, 1, 1, 1), linestyle='dashed')
c[0].set_lw(1.0)
c[0].set_ls('dotted')
plt.show()
same = set(['comparable', 'incomparable', 'same'])
diff = set(['comparable', 'incomparable', 'diff'])
# comparable = set(['comparable', 'same', 'diff'])
# incomparable = set(['incomparable', 'same', 'diff'])
subsets = [same, diff] # , comparable, incomparable]
set_labels = ('same', 'diff') # , 'comparable', 'incomparable')
venn3(subsets=subsets, set_labels=set_labels)
plt.show()
import plottool_ibeis as pt
pt.ensureqt()
from matplotlib_subsets import treesets_rectangles
tree = (
(120, 'Same', None), [
((50, 'comparable', None), []),
((50, 'incomparable', None), [])
]
(120, 'Diff', None), [
((50, 'comparable', None), []),
((50, 'incomparable', None), [])
]
)
treesets_rectangles(tree)
plt.show()
from matplotlib import pyplot as plt
from matplotlib_venn import venn2, venn2_circles # NOQA
# Subset sizes
s = (
2, # Ab
3, # aB
1, # AB
)
v = venn2(subsets=s, set_labels=('A', 'B'))
# Subset labels
v.get_label_by_id('10').set_text('A but not B')
v.get_label_by_id('01').set_text('B but not A')
v.get_label_by_id('11').set_text('A and B')
# Subset colors
v.get_patch_by_id('10').set_color('c')
v.get_patch_by_id('01').set_color('#993333')
v.get_patch_by_id('11').set_color('blue')
# Subset alphas
v.get_patch_by_id('10').set_alpha(0.4)
v.get_patch_by_id('01').set_alpha(1.0)
v.get_patch_by_id('11').set_alpha(0.7)
# Border styles
c = venn2_circles(subsets=s, linestyle='solid')
c[0].set_ls('dashed') # Line style
c[0].set_lw(2.0) # Line width
plt.show()
# plt.savefig('example_tree.pdf', bbox_inches='tight')
# plt.close()
# venn2(subsets=(25, 231+65, 8+15))
# # Find out the location of the two circles
# # (you can look up how its done in the first lines
# # of the venn2 function)
# from matplotlib_venn._venn2 import compute_venn2_areas, solve_venn2_circles
# subsets = (25, 231+65, 8+15)
# areas = compute_venn2_areas(subsets, normalize_to=1.0)
# centers, radii = solve_venn2_circles(areas)
# # Now draw the third circle.
# # Its area is (15+65)/(25+8+15) times
# # that of the first circle,
# # hence its radius must be
# r3 = radii[0]*sqrt((15+65.0)/(25+8+15))
# # Its position must be such that the intersection
# # area with C1 is 15/(15+8+25) of C1's area.
# # The way to compute the distance between
# # the circles by area can be looked up in
# # solve_venn2_circles
# from matplotlib_venn._math import find_distance_by_area
# distance = find_distance_by_area(radii[0], r3,
# 15.0/(15+8+25)*np.pi*radii[0]*radii[0])
# ax = gca()
# ax.add_patch(Circle(centers[0] + np.array([distance, 0]),
# r3, alpha=0.5, edgecolor=None,
# facecolor='red', linestyle=None,
# linewidth=0))
def draw_inconsistent_pcc():
"""
CommandLine:
python -m ibeis.scripts.specialdraw draw_inconsistent_pcc --show
"""
from ibeis.algo.graph import demo
import plottool_ibeis as pt
import matplotlib as mpl
from ibeis.scripts.thesis import TMP_RC
mpl.rcParams.update(TMP_RC)
kwargs = dict(num_pccs=1, n_incon=1, p_incon=1, size=4)
infr = demo.demodata_infr(**kwargs)
infr.set_node_attrs('pos', {
1: (30, 40),
3: (70, 40),
4: ( 0, 0),
2: (100, 0),
})
fnum = 1
infr.set_node_attrs('pin', True)
# infr.set_node_attrs('fixed_size', False)
# infr.set_node_attrs('scale', .1)
# infr.set_node_attrs('width', 16)
infr.show(show_inconsistency=False, simple_labels=True, pickable=True,
pnum=(1, 2, 1), fnum=fnum)
ax = pt.gca()
truth_colors = infr._get_truth_colors()
from ibeis.algo.graph.state import POSTV, NEGTV
pt.append_phantom_legend_label('positive', truth_colors[POSTV], ax=ax)
pt.append_phantom_legend_label('negative', truth_colors[NEGTV], ax=ax)
# pt.append_phantom_legend_label('incomparble', truth_colors[INCMP], ax=ax)
pt.show_phantom_legend_labels(size=infr.graph.graph['fontsize'])
ax.set_aspect('equal')
infr.show(show_inconsistency=True, simple_labels=True, pickable=True,
pnum=(1, 2, 2), fnum=fnum)
ax = pt.gca()
truth_colors = infr._get_truth_colors()
from ibeis.algo.graph.state import POSTV, NEGTV
pt.append_phantom_legend_label('positive', truth_colors[POSTV], ax=ax)
pt.append_phantom_legend_label('negative', truth_colors[NEGTV], ax=ax)
pt.append_phantom_legend_label('hypothesis', infr._error_color, ax=ax)
# pt.append_phantom_legend_label('incomparble', truth_colors[INCMP], ax=ax)
pt.show_phantom_legend_labels(size=infr.graph.graph['fontsize'])
# ax.set_aspect('equal')
ax.set_aspect('equal')
ut.show_if_requested()
def draw_graph_id():
"""
CommandLine:
python -m ibeis.scripts.specialdraw draw_graph_id --show
"""
from ibeis.algo.graph import demo
import plottool_ibeis as pt
import matplotlib as mpl
from ibeis.scripts.thesis import TMP_RC
mpl.rcParams.update(TMP_RC)
kwargs = dict(num_pccs=5, p_incon=0, size=4, size_std=1,
p_incomp=.2,
p_pair_neg=.5, p_pair_incmp=.4)
infr = demo.demodata_infr(**kwargs)
infr.graph.graph['hpad'] = 50
infr.graph.graph['vpad'] = 10
infr.graph.graph['group_grid'] = True
infr.show(show_inconsistency=False,
simple_labels=True,
wavy=False, groupby='name_label', pickable=True)
ax = pt.gca()
truth_colors = infr._get_truth_colors()
from ibeis.algo.graph.state import POSTV, NEGTV, INCMP
pt.append_phantom_legend_label('positive', truth_colors[POSTV], ax=ax)
pt.append_phantom_legend_label('negative', truth_colors[NEGTV], ax=ax)
pt.append_phantom_legend_label('incomparble', truth_colors[INCMP], ax=ax)
pt.show_phantom_legend_labels(size=infr.graph.graph['fontsize'])
ax.set_aspect('equal')
ut.show_if_requested()
def redun_demo2():
r"""
python -m ibeis.scripts.specialdraw redun_demo2 --show
"""
from ibeis.algo.graph.state import POSTV, NEGTV, INCMP # NOQA
from ibeis.algo.graph import demo
# from ibeis.algo.graph import nx_utils
import plottool_ibeis as pt
# import networkx as nx
pt.ensureqt()
import matplotlib as mpl
from ibeis.scripts.thesis import TMP_RC
mpl.rcParams.update(TMP_RC)
fnum = 1
showkw = dict(show_inconsistency=False, show_labels=True,
simple_labels=True,
show_recent_review=False, wavy=False,
groupby='name_label',
splines='spline',
pickable=True, fnum=fnum)
graphkw = dict(hpad=50, vpad=50, group_grid=True)
pnum_ = pt.make_pnum_nextgen(2, 3)
def show_redun(infr):
infr.graph.graph.update(graphkw)
infr.show(pnum=pnum_(), **showkw)
ax = pt.gca()
ax.set_aspect('equal')
ccs = list(infr.positive_components())
if len(ccs) == 1:
cc = ccs[0]
ax.set_xlabel(str(infr.pos_redundancy(cc)) + '-positive-redundant')
else:
cc1, cc2 = ccs
ax.set_xlabel(str(infr.neg_redundancy(cc1, cc2)) + '-negative-redundant')
infr = demo.make_demo_infr(ccs=[(1, 2, 3, 5, 4), (6,)])
infr.add_feedback((5, 6), evidence_decision=POSTV)
# infr.add_feedback((3, 4), evidence_decision='unreviewed')
show_redun(infr)
infr = infr.copy()
for u, v in infr.find_pos_augment_edges(set(infr.graph.nodes()), k=2):
infr.add_feedback((u, v), evidence_decision=POSTV)
show_redun(infr)
infr = infr.copy()
for u, v in infr.find_pos_augment_edges(set(infr.graph.nodes()), k=3):
infr.add_feedback((u, v), evidence_decision=POSTV)
show_redun(infr)
infr = demo.make_demo_infr(ccs=[(1, 2, 3, 4), (11, 12, 13, 14, 15)])
infr.add_feedback((2, 11), evidence_decision=NEGTV)
show_redun(infr)
infr = demo.make_demo_infr(ccs=[(1, 2, 3, 4), (11, 12, 13, 14, 15)])
infr.add_feedback((2, 11), evidence_decision=NEGTV)
infr.add_feedback((4, 14), evidence_decision=NEGTV)
show_redun(infr)
infr = demo.make_demo_infr(ccs=[(1, 2, 3, 4), (11, 12, 13, 14, 15)])
infr.add_feedback((2, 11), evidence_decision=NEGTV)
infr.add_feedback((4, 14), evidence_decision=NEGTV)
infr.add_feedback((2, 14), evidence_decision=NEGTV)
show_redun(infr)
fig = pt.gcf()
fig.set_size_inches(10, 5)
ut.show_if_requested()
def redun_demo3():
r"""
python -m ibeis.scripts.specialdraw redun_demo3 --show
python -m ibeis.scripts.specialdraw redun_demo3 --saveparts=~/slides/incon_redun.jpg --dpi=300
"""
from ibeis.algo.graph.state import POSTV, NEGTV, INCMP # NOQA
from ibeis.algo.graph import demo
from ibeis.algo.graph import nx_utils as nxu
import plottool_ibeis as pt
# import networkx as nx
pt.ensureqt()
import matplotlib as mpl
from ibeis.scripts.thesis import TMP_RC
mpl.rcParams.update(TMP_RC)
fnum = 1
showkw = dict(show_inconsistency=False, show_labels=True,
simple_labels=True,
show_recent_review=False, wavy=False,
groupby='name_label',
splines='spline',
show_all=True,
pickable=True, fnum=fnum)
graphkw = dict(hpad=50, vpad=50, group_grid=True)
pnum_ = pt.make_pnum_nextgen(2, 1)
infr = demo.make_demo_infr(ccs=[(1, 2, 3, 5, 4), (6,)])
infr.add_feedback((5, 6), evidence_decision=POSTV)
for e in nxu.complement_edges(infr.graph):
infr.add_feedback(e, evidence_decision=INCMP)
infr.graph.graph.update(graphkw)
infr.show(pnum=pnum_(), **showkw)
ax = pt.gca()
ax.set_aspect('equal')
ccs = [(1, 2, 3, 4), (11, 12, 13, 14, 15)]
infr = demo.make_demo_infr(ccs=ccs)
infr.add_feedback((4, 14), evidence_decision=NEGTV)
import networkx as nx
for e in nxu.edges_between(nx.complement(infr.graph), ccs[0], ccs[1]):
print('e = %r' % (e,))
infr.add_feedback(e, evidence_decision=INCMP)
infr.graph.graph.update(graphkw)
infr.show(pnum=pnum_(), **showkw)
ax = pt.gca()
ax.set_aspect('equal')
fig = pt.gcf()
fig.set_size_inches(10 / 3, 5)
ut.show_if_requested()
def system_diagram():
"""
CommandLine:
python -m ibeis.scripts.specialdraw system_diagram --show
"""
from ibeis.algo.graph.state import POSTV, NEGTV, INCMP, UNREV # NOQA
from ibeis.algo.graph import demo
from ibeis.algo.graph import nx_utils as nxu # NOQA
import plottool_ibeis as pt
# import networkx as nx
pt.ensureqt()
import matplotlib as mpl
from ibeis.scripts.thesis import TMP_RC
mpl.rcParams.update(TMP_RC)
# fnum = 1
# showkw = dict(show_inconsistency=False, show_labels=True,
# simple_labels=True,
# show_recent_review=False, wavy=False,
# groupby='name_label',
# splines='spline',
# show_all=True,
# pickable=True, fnum=fnum)
# graphkw = dict(hpad=50, vpad=50, group_grid=True)
# pnum_ = pt.make_pnum_nextgen(2, 1)
infr = demo.demodata_infr(ccs=[(1, 2, 3, 4), (5, 6, 7), (8, 9,), (10,)])
showkw = dict(
show_unreviewed_edges=True, show_inferred_same=False,
show_inferred_diff=False, show_labels=True, simple_labels=True,
show_recent_review=False, reposition=False, pickable=True,
outof=(len(infr.aids)), # hack for colors
)
infr.clear_edges()
# ----------------------
# Step1: Find candidates
# ----------------------
infr.params['ranking.ntop'] = 4
infr.refresh_candidate_edges()
infr.update_visual_attrs(groupby='name_label')
infr.set_node_attrs('pin', 'true')
infr.set_node_attrs('shape', 'circle')
infr.clear_feedback()
infr.clear_name_labels()
# infr.ensure_edges_from([(10, 5), (10, 6)])
infr.ensure_prioritized(list(infr.edges()))
edge_overrides = {}
# edge_overrides = {
# # 'linestyle': {e: 'dashed' for e in infr.edges()},
# 'linestyle': {e: 'dashed' for e in infr.get_edges_where_eq('decision', UNREV)},
# }
infr.show(edge_overrides=edge_overrides, fnum=1, pnum=(1, 4, 1), **showkw)
pt.gca().set_aspect('equal')
# ---------------------------
# Step 2: Automatic decisions
# ---------------------------
infr.task_probs.pop('photobomb_state', None)
infr.params['autoreview.enabled'] = True
infr.params['autoreview.prioritize_nonpos'] = True
infr.task_thresh['match_state'][POSTV] = .8
infr.task_thresh['match_state'][NEGTV] = .54
infr.task_thresh['match_state'][INCMP] = .5
# infr.add_feedback((1, 2), POSTV) # hack
infr.ensure_prioritized(infr.get_edges_where_eq('decision', UNREV))
gen = infr._inner_priority_gen()
next(gen)
# edge_overrides = {
# # 'linestyle': {e: 'dashed' for e in infr.edges()},
# 'linestyle': {e: 'dashed' for e in infr.get_edges_where_eq('decision', UNREV)},
# }
infr.apply_nondynamic_update()
infr.update_visual_attrs(groupby='name_label')
infr.show(edge_overrides=edge_overrides, fnum=1, pnum=(1, 4, 2), **showkw)
pt.gca().set_aspect('equal')
# --------------
# Error recovery
# --------------
possible = list(infr.find_pos_redun_candidate_edges())
edge = possible[min(1, len(possible) - 1)]
infr.add_feedback(edge, NEGTV)
node_overrides = {
'label': {n: '{}!'.format(n) for n in ut.flatten(infr.inconsistent_components())}
}
# edge_overrides = {
# 'linestyle': {e: 'dashed' for e in infr.get_edges_where_eq('decision', UNREV)},
# }
infr.update_visual_attrs(groupby='name_label')
infr.show(edge_overrides=edge_overrides, node_overrides=node_overrides,
fnum=1, pnum=(1, 4, 3), **showkw)
pt.gca().set_aspect('equal')
# Manual Decisions
infr.init_simulation(oracle_accuracy=1.0)
infr.params['redun.neg.only_auto'] = False
infr.main_loop()
# ISSUE:
# For some reason a incomparable edge (3, 10) is being manually reviewed
# again in the main loop even though it was already reviewed.
# Quick Fix: add feedback specifically for this example.
infr.add_feedback((3, 10), INCMP)
# ISSUE:
# When candidate edges are added within pos-redun CCs, the inferred state
# should be set (but currently it is not).
# EG: edge (1, 2) is added, but the CC is already pos-redun, but the
# inferred state on the edge is never set.
# Quick Fix: inference between newly added edges that were already
# pos-redun
infr.apply_nondynamic_update()
# edge_overrides = {
# # 'linestyle': {e: 'dashed' for e in infr.edges()},
# 'linestyle': {e: 'dashed' for e in infr.get_edges_where_eq('decision', UNREV)},
# }
infr.update_visual_attrs(groupby='name_label')
infr.show(edge_overrides=edge_overrides, fnum=1, pnum=(1, 4, 4), **showkw)
pt.gca().set_aspect('equal')
ut.show_if_requested()
if __name__ == '__main__':
r"""
CommandLine:
python -m ibeis.scripts.specialdraw
python -m ibeis.scripts.specialdraw --allexamples
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
| {
"repo_name": "Erotemic/ibeis",
"path": "ibeis/scripts/specialdraw.py",
"copies": "1",
"size": "73472",
"license": "apache-2.0",
"hash": -3049319626379602000,
"line_mean": 36.3143727781,
"line_max": 203,
"alpha_frac": 0.6013583406,
"autogenerated": false,
"ratio": 3.035531317137663,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.912727414313681,
"avg_score": 0.0019231029201704972,
"num_lines": 1969
} |
from __future__ import absolute_import, division, print_function
import utool
import numpy as np
from ibeis.model.hots.smk import smk_core
from ibeis.model.hots.hstypes import FLOAT_TYPE, VEC_DIM
from vtool import clustering2 as clustertool
from six.moves import zip
(print, print_, printDBG, rrr, profile) = utool.inject(__name__, '[smk_speed]')
@profile
def compute_agg_rvecs(rvecs_list, idxs_list, aids_list, maws_list):
"""
Sums and normalizes all rvecs that belong to the same word and the same
annotation id
Example:
>>> from ibeis.model.hots.smk.smk_speed import * # NOQA
>>> from ibeis.model.hots.smk import smk_debug
>>> words, wx_sublist, aids_list, idxs_list, idx2_vec, maws_list = smk_debug.testdata_nonagg_rvec()
>>> rvecs_list = compute_nonagg_rvec_listcomp(words, wx_sublist, idxs_list, idx2_vec)
"""
#assert len(idxs_list) == len(rvecs_list)
# group members of each word by aid, we will collapse these groups
grouptup_list = [clustertool.group_indicies(aids) for aids in aids_list]
# Agg aids
aggaids_list = [tup[0] for tup in grouptup_list]
groupxs_list = [tup[1] for tup in grouptup_list]
# Aggregate vecs that belong to the same aid, for each word
# (weighted aggregation with multi-assign-weights)
aggvecs_list = [
np.vstack([smk_core.aggregate_rvecs(rvecs.take(xs, axis=0), maws.take(xs)) for xs in groupxs])
if len(groupxs) > 0 else
np.empty((0, VEC_DIM), dtype=FLOAT_TYPE)
for rvecs, maws, groupxs in zip(rvecs_list, maws_list, groupxs_list)]
# Agg idxs
aggidxs_list = [[idxs.take(xs) for xs in groupxs]
for idxs, groupxs in zip(idxs_list, groupxs_list)]
aggmaws_list = [np.array([maws.take(xs).prod() for xs in groupxs])
for maws, groupxs in zip(maws_list, groupxs_list)]
return aggvecs_list, aggaids_list, aggidxs_list, aggmaws_list
#def group_and_aggregate(rvecs, aids):
# """
# assumes rvecs are all from the same word
# Returns aggregated vecs, the aids they came from, and the invertable feature
# map
# >>> from ibeis.model.hots.smk.smk_speed import *
# >>> rvecs = np.random.rand(5, 128) * 255
# >>> aids = np.array([1, 1, 2, 3, 2])
# """
# assert len(aids) == len(rvecs)
# if len(aids) == 0:
# group_aids = np.empty((0), dtype=INTEGER_TYPE)
# groupxs = np.empty((0), dtype=INTEGER_TYPE)
# group_aggvecs = np.empty((0, 128), dtype=FLOAT_TYPE)
# return group_aids, group_aggvecs
# else:
# group_aids, groupxs = group_indicies(aids) # 35us
# group_vecs = [rvecs.take(xs, axis=0) for xs in groupxs]
# aggvec_list = [smk_core.aggregate_rvecs(vecs) for vecs in group_vecs] # 25us
# group_aggvecs = np.vstack(aggvec_list) # 6.53us
# return group_aids, group_aggvecs, groupxs
# #with utool.Timer('tew'):
# # agg_list = [group_and_aggregate(rvecs, aids)
# # for rvecs, aids in zip(rvecs_list, aids_list)] # 233 ms
@profile
def compute_nonagg_rvec_listcomp(words, wx_sublist, idxs_list, idx2_vec):
"""
PREFERED METHOD - 110ms
Example:
>>> from ibeis.model.hots.smk import smk_debug
>>> words, wx_sublist, aids_list, idxs_list, idx2_vec, maws_list = smk_debug.testdata_nonagg_rvec()
Timeit:
%timeit words_list = [words[np.newaxis, wx] for wx in wx_sublist] # 5 ms
%timeit words_list = [words[wx:wx + 1] for wx in wx_sublist] # 1.6 ms
"""
#with utool.Timer('compute_nonagg_rvec_listcomp'):
#vecs_list = [idx2_vec[idxs] for idxs in idxs_list] # 23 ms
words_list = [words[wx:wx + 1] for wx in wx_sublist] # 1 ms
vecs_list = [idx2_vec.take(idxs, axis=0) for idxs in idxs_list] # 5.3 ms
rvecs_list = [smk_core.get_norm_rvecs(vecs, word)
for vecs, word in zip(vecs_list, words_list)] # 103 ms # 90%
return rvecs_list
def compute_nonagg_residuals_forloop(words, wx_sublist, idxs_list, idx2_vec):
"""
OK, but slower than listcomp method - 140ms
Timeit:
idxs = idxs.astype(np.int32)
%timeit idx2_vec.take(idxs, axis=0) # 1.27
%timeit idx2_vec.take(idxs.astype(np.int32), axis=0) # 1.94
%timeit idx2_vec[idxs] # 7.8
"""
#with utool.Timer('compute_nonagg_residuals_forloop'):
num = wx_sublist.size
rvecs_list = np.empty(num, dtype=np.ndarray)
for count, wx in enumerate(wx_sublist):
idxs = idxs_list[count]
vecs = idx2_vec[idxs]
word = words[wx:wx + 1]
rvecs_n = smk_core.get_norm_rvecs(vecs, word)
rvecs_list[count] = rvecs_n
return rvecs_list
def compute_nonagg_residuals_pandas(words, wx_sublist, wx2_idxs, idx2_vec):
"""
VERY SLOW. DEBUG USE ONLY
Ignore:
words = words.values
wxlist = [wx]
### index test
%timeit words[wx:wx + 1] # 0.334 us
%timeit words[wx, np.newaxis] # 1.05 us
%timeit words[np.newaxis, wx] # 1.05 us
%timeit words.take(wxlist, axis=0) # 1.6 us
### pandas test
%timeit words.values[wx:wx + 1] # 7.6 us
%timeit words[wx:wx + 1].values # 84.9 us
"""
#with utool.Timer('compute_nonagg_residuals_pandas'):
#mark, end_ = utool.log_progress('compute residual: ', len(wx_sublist), flushfreq=500, writefreq=50)
num = wx_sublist.size
rvecs_arr = np.empty(num, dtype=np.ndarray)
# Compute Residuals
for count, wx in enumerate(wx_sublist):
#mark(count)
idxs = wx2_idxs[wx].values
vecs = idx2_vec.take(idxs).values
word = words.values[wx:wx + 1]
rvecs_n = smk_core.get_norm_rvecs(vecs, word)
rvecs_arr[count] = rvecs_n
return rvecs_arr
| {
"repo_name": "SU-ECE-17-7/ibeis",
"path": "_broken/smk_speed.py",
"copies": "1",
"size": "5781",
"license": "apache-2.0",
"hash": 8832965927042741000,
"line_mean": 39.1458333333,
"line_max": 107,
"alpha_frac": 0.6223836706,
"autogenerated": false,
"ratio": 2.854814814814815,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3977198485414815,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import utool
import pandas as pd
import numpy as np
(print, print_, printDBG, rrr, profile) = utool.inject(__name__, '[pdh]')
from ibeis.model.hots.hstypes import VEC_DIM, INTEGER_TYPE
class LazyGetter(object):
def __init__(self, getter_func):
self.getter_func = getter_func
def __getitem__(self, index):
return self.getter_func(index)
def __call__(self, index):
return self.getter_func(index)
#def lazy_getter(getter_func):
# def lazy_closure(*args):
# return getter_func(*args)
# return lazy_closure
class DataFrameProxy(object):
"""
pandas is actually really slow. This class emulates it so
I don't have to change my function calls, but without all the slowness.
"""
def __init__(self, ibs):
self.ibs = ibs
def __getitem__(self, key):
if key == 'kpts':
return LazyGetter(self.ibs.get_annot_kpts)
elif key == 'vecs':
return LazyGetter(self.ibs.get_annot_desc)
elif key == 'labels':
return LazyGetter(self.ibs.get_annot_class_labels)
@profile
def Int32Index(data, dtype=np.int32, copy=True, name=None):
return pd.Index(data, dtype=dtype, copy=copy, name=name)
if INTEGER_TYPE is np.int32:
IntIndex = Int32Index
else:
IntIndex = pd.Int64Index
@profile
def RangeIndex(size, name=None):
arr = np.arange(size, dtype=INTEGER_TYPE)
index = IntIndex(arr, copy=False, name=name)
return index
VEC_COLUMNS = RangeIndex(VEC_DIM, name='vec')
KPT_COLUMNS = pd.Index(['xpos', 'ypos', 'a', 'c', 'd', 'theta'], name='kpt')
PANDAS_TYPES = (pd.Series, pd.DataFrame, pd.Index)
@profile
def IntSeries(data, *args, **kwargs):
if 'index' not in kwargs:
index = IntIndex(np.arange(len(data), dtype=INTEGER_TYPE))
return pd.Series(data, *args, index=index, **kwargs)
else:
return pd.Series(data, *args, **kwargs)
@profile
def pandasify_dict1d(dict_, keys, val_name, series_name, dense=True):
""" Turns dict into heirarchy of series """
if dense:
key2_series = pd.Series(
{key: pd.Series(dict_.get(key, []), name=val_name,)
for key in keys},
index=keys, name=series_name)
else:
key2_series = pd.Series(
{key: pd.Series(dict_.get(key), name=val_name,)
for key in keys},
index=IntIndex(dict_.keys(), name=keys.name), name=series_name)
return key2_series
@profile
def pandasify_dict2d(dict_, keys, key2_index, columns, series_name):
""" Turns dict into heirarchy of dataframes """
item_list = [dict_[key] for key in keys]
index_list = [key2_index[key] for key in keys]
_data = {
key: pd.DataFrame(item, index=index, columns=columns,)
for key, item, index in zip(keys, item_list, index_list)
}
key2_df = pd.Series(_data, index=keys, name=series_name)
return key2_df
@profile
def pandasify_list2d(item_list, keys, columns, val_name, series_name):
""" Turns dict into heirarchy of dataframes """
index_list = [RangeIndex(len(item), name=val_name) for item in item_list]
_data = [pd.DataFrame(item, index=index, columns=columns,)
for item, index in zip(item_list, index_list)]
key2_df = pd.Series(_data, index=keys, name=series_name)
return key2_df
@profile
def ensure_values(data):
if isinstance(data, (np.ndarray, list)):
return data
elif isinstance(data, PANDAS_TYPES):
return data.values
elif isinstance(data, dict):
return np.array(list(data.values()))
else:
raise AssertionError(type(data))
@profile
def ensure_index(data):
if isinstance(data, PANDAS_TYPES):
return data.index
elif isinstance(data, dict):
return np.array(list(data.keys()))
else:
return np.arange(len(data))
#raise AssertionError(type(data))
def ensure_values_subset(data, keys):
if isinstance(data, dict):
return [data[key] for key in keys]
elif isinstance(data, PANDAS_TYPES):
return [ensure_values(item) for item in data[keys].values]
else:
raise AssertionError(type(data))
def ensure_values_scalar_subset(data, keys):
if isinstance(data, dict):
return [data[key] for key in keys]
elif isinstance(data, PANDAS_TYPES):
return [item for item in data[keys].values]
else:
raise AssertionError(type(data))
def ensure_2d_values(data):
#if not isinstance(data, PANDAS_TYPES):
# return data
data_ = ensure_values(data)
if len(data_) == 0:
return data_
else:
if isinstance(data_[0], PANDAS_TYPES):
return [item.values for item in data]
else:
raise AssertionError(type(data))
@profile
def pandasify_rvecs_list(wx_sublist, wx2_idxs_values, rvecs_list, aids_list,
fxs_list):
assert len(rvecs_list) == len(wx2_idxs_values)
assert len(rvecs_list) == len(wx_sublist)
rvecsdf_list = [
pd.DataFrame(rvecs, index=idxs, columns=VEC_COLUMNS)
for rvecs, idxs in zip(rvecs_list, wx2_idxs_values)] # 413 ms
_aids_list = [pd.Series(aids) for aids in aids_list]
wx2_rvecs = IntSeries(rvecsdf_list, index=wx_sublist, name='rvec')
wx2_aids = IntSeries(_aids_list, index=wx_sublist, name='wx2_aids')
wx2_fxs = IntSeries(fxs_list, index=wx_sublist, name='wx2_aids')
return wx2_rvecs, wx2_aids, wx2_fxs
@profile
def pandasify_agg_list(wx_sublist, aggvecs_list, aggaids_list, aggfxs_list):
"""
Example:
>>> from ibeis.model.hots.smk.pandas_helpers import *
"""
_aids_list = [IntSeries(aids, name='aids') for aids in aggaids_list]
_aggvecs_list = [pd.DataFrame(vecs, index=aids, columns=VEC_COLUMNS)
for vecs, aids in zip(aggvecs_list, _aids_list)]
wx2_aggaids = IntSeries(_aids_list, index=wx_sublist, name='wx2_aggaids')
wx2_aggvecs = pd.Series(_aggvecs_list, index=wx_sublist, name='wx2_aggvecs')
wx2_aggfxs = pd.Series(aggfxs_list, index=wx_sublist, name='wx2_aggfxs')
return wx2_aggvecs, wx2_aggaids, wx2_aggfxs
| {
"repo_name": "SU-ECE-17-7/ibeis",
"path": "_broken/pandas_helpers.py",
"copies": "1",
"size": "6197",
"license": "apache-2.0",
"hash": -5567994988022571000,
"line_mean": 30.6173469388,
"line_max": 80,
"alpha_frac": 0.6374051961,
"autogenerated": false,
"ratio": 3.163348647269015,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9294975037008711,
"avg_score": 0.0011557612720606081,
"num_lines": 196
} |
from __future__ import absolute_import, division, print_function
import utool
import six
from six.moves import zip, range, map
import numpy as np
import numpy.linalg as npl
import utool as ut
import vtool
(print, print_, printDBG, rrr, profile) = utool.inject(__name__, '[vr2]', DEBUG=False)
def get_chipmatch_testdata(**kwargs):
from ibeis.model.hots import pipeline
cfgdict = {'dupvote_weight': 1.0}
ibs, qreq_ = pipeline.get_pipeline_testdata('testdb1', cfgdict)
# Run first four pipeline steps
locals_ = pipeline.testrun_pipeline_upto(qreq_, 'spatial_verification')
qaid2_chipmatch = locals_['qaid2_chipmatch_FILT']
# Get a single chipmatch
qaid = six.next(six.iterkeys(qaid2_chipmatch))
chipmatch = qaid2_chipmatch[qaid]
return ibs, qreq_, qaid, chipmatch
def score_chipmatch_csum(qaid, chipmatch, qreq_):
"""
score_chipmatch_csum
Args:
chipmatch (tuple):
Returns:
tuple: aid_list, score_list
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.model.hots.voting_rules2 import * # NOQA
>>> ibs, qreq_, qaid, chipmatch = get_chipmatch_testdata()
>>> (aid_list, score_list) = score_chipmatch_csum(qaid, chipmatch, qreq_)
>>> print(aid_list, score_list)
"""
#(aid2_fm, aid2_fsv, aid2_fk, aid2_score, aid2_H) = chipmatch
aid2_fsv = chipmatch.aid2_fsv
if False:
aid2_fs = {aid: fsv.prod(axis=1) for aid, fsv in six.iteritems(aid2_fsv)}
aid_list = list(six.iterkeys(aid2_fs))
fs_list = ut.dict_take(aid2_fs, aid_list)
#fs_list = list(six.itervalues(aid2_fs))
score_list = [np.sum(fs) for fs in fs_list]
else:
aid_list = list(six.iterkeys(aid2_fsv))
fsv_list = ut.dict_take(aid2_fsv, aid_list)
fs_list = [fsv.prod(axis=1) for fsv in fsv_list]
score_list = [np.sum(fs) for fs in fs_list]
return (aid_list, score_list)
#aid2_score = {aid: np.sum(fs) for (aid, fs) in six.iteritems(aid2_fs)}
#return aid2_score
def score_chipmatch_nsum(qaid, chipmatch, qreq_):
"""
score_chipmatch_nsum
Args:
chipmatch (tuple):
Returns:
dict: nid2_score
CommandLine:
python dev.py -t custom:score_method=csum,prescore_method=csum --db GZ_ALL --show --va -w --qaid 1032 --noqcache
python dev.py -t nsum_nosv --db GZ_ALL --allgt --noqcache
python dev.py -t nsum --db GZ_ALL --show --va -w --qaid 1032 --noqcache
python dev.py -t nsum_nosv --db GZ_ALL --show --va -w --qaid 1032 --noqcache
qaid=1032_res_gooc+f4msr4ouy9t_quuid=c4f78a6d.npz
qaid=1032_res_5ujbs8h&%vw1olnx_quuid=c4f78a6d.npz
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.model.hots.voting_rules2 import * # NOQA
>>> ibs, qreq_, qaid, chipmatch = get_chipmatch_testdata()
>>> (aid_list, score_list) = score_chipmatch_nsum(qaid, chipmatch, qreq_)
>>> print(aid_list, score_list)
"""
# FIXME:
# for now apply a hack to return aid scores
# TODO: rectify this code with code in name scoring
# TODO: should be another version of nsum where each feature gets a single vote
NEW_WAY = True
if NEW_WAY:
aid_list, nscore_list = score_chipmatch_true_nsum(qaid, chipmatch, qreq_, True)
return aid_list, nscore_list
else:
(nid_list, nsum_list) = score_chipmatch_true_nsum(qaid, chipmatch, qreq_, False)
aid2_csum = dict(zip(*score_chipmatch_csum(qaid, chipmatch, qreq_)))
aids_list = qreq_.ibs.get_name_aids(nid_list, enable_unknown_fix=True)
aid2_nscore = {}
daids = np.intersect1d(list(six.iterkeys(aid2_csum)),
qreq_.daids)
for nid, nsum, aids in zip(nid_list, nsum_list, aids_list):
aids_ = np.intersect1d(aids, daids)
if len(aids_) == 1:
aid2_nscore[aids_[0]] = nsum
elif len(aids_) > 1:
csum_arr = np.array([aid2_csum[aid] for aid in aids_])
# No something else in the old way is wrong.
# just use new way it seems better.
#BAD?: sortx = csum_arr.argsort()[::-1]
#sortx = csum_arr.argsort()
sortx = csum_arr.argsort()[::-1]
# Give the best scoring annotation the score
aid2_nscore[aids_[sortx[0]]] = nsum
# All other annotations receive 0 score
for aid in aids_[sortx[1:]]:
aid2_nscore[aid] = 0
else:
print('warning in voting rules nsum')
aid_list = list(six.iterkeys(aid2_nscore))
score_list = ut.dict_take(aid2_nscore, aid_list)
#score_list = list(six.itervalues(aid2_nscore))
return (aid_list, score_list)
#raise NotImplementedError('nsum')
def score_chipmatch_true_nsum(qaid, chipmatch, qreq_, return_wrt_aids=False):
"""
Sums scores over all annots with those names.
Dupvote weighting should be on to combat double counting
"""
# Nonhacky version of name scoring
#(aid2_fm, aid2_fsv, aid2_fk, aid2_score, aid2_H) = chipmatch
aid2_fsv = chipmatch.aid2_fsv
NEW_WAY = True
if NEW_WAY:
# New version
aid_list = list(six.iterkeys(aid2_fsv))
fsv_list = ut.dict_take(aid2_fsv, aid_list)
#fs_list = [fsv.prod(axis=1) if fsv.shape[1] > 1 else fsv.T[0] for fsv in fsv_list]
fs_list = [fsv.prod(axis=1) for fsv in fsv_list]
annot_score_list = np.array([fs.sum() for fs in fs_list])
annot_nid_list = np.array(qreq_.ibs.get_annot_name_rowids(aid_list))
nid_list, groupxs = vtool.group_indicies(annot_nid_list)
grouped_scores = vtool.apply_grouping(annot_score_list, groupxs)
else:
aid2_fs = {aid: fsv.prod(axis=1) for aid, fsv in six.iteritems(aid2_fsv)}
aid_list = list(six.iterkeys(aid2_fs))
annot_score_list = np.array([fs.sum() for fs in six.itervalues(aid2_fs)])
annot_nid_list = np.array(qreq_.ibs.get_annot_name_rowids(aid_list))
nid_list, groupxs = vtool.group_indicies(annot_nid_list)
grouped_scores = vtool.apply_grouping(annot_score_list, groupxs)
if return_wrt_aids:
def indicator_array(size, pos, value):
""" creates zero array and places value at pos """
arr = np.zeros(size)
arr[pos] = value
return arr
grouped_nscores = [indicator_array(scores.size, scores.argmax(), scores.sum()) for scores in grouped_scores]
nscore_list = vtool.clustering2.invert_apply_grouping(grouped_nscores, groupxs)
#nscore_list = ut.flatten(grouped_nscores)
return aid_list, nscore_list
else:
score_list = [scores.sum() for scores in grouped_scores]
return nid_list, score_list
#score_list = [scores.sum() for scores in grouped_scores]
#return nid_list, score_list
def score_chipmatch_nunique(ibs, qaid, chipmatch, qreq):
raise NotImplementedError('nunique')
#def enforce_one_name(ibs, aid2_score, chipmatch=None, aid2_chipscore=None):
# """
# this is a hack to make the same name only show up once in the top ranked
# list
# """
# if chipmatch is not None:
# (_, aid2_fs, _, _) = chipmatch
# aid2_chipscore = np.array([np.sum(fs) for fs in aid2_fs])
# # FIXME
# nid_list = ibs.get_name_aids()
# nid2_aids = {nid: aids for nid, aids in zip(ibs.get_name_aids(nid_list))}
# aid2_score = np.array(aid2_score)
# for nid, aids in enumerate(nid2_aids):
# if len(aids) < 2 or nid <= 1:
# continue
# #print(aids)
# # zero the aids with the lowest csum score
# sortx = aid2_chipscore[aids].argsort()
# aids_to_zero = np.array(aids)[sortx[0:-1]]
# aid2_score[aids_to_zero] = 0
# return aid2_score
def score_chipmatch_PL(ibs, qcx, chipmatch, qreq):
"""
chipmatch = qcx2_chipmatch[qcx]
"""
K = qreq.cfg.nn_cfg.K
max_alts = qreq.cfg.agg_cfg.max_alts
isWeighted = qreq.cfg.agg_cfg.isWeighted
# Create voting vectors of top K utilities
qfx2_utilities = _chipmatch2_utilities(ibs, qcx, chipmatch, K)
qfx2_utilities = _filter_utilities(qfx2_utilities, max_alts)
# Run Placket Luce Model
# 1) create placket luce matrix pairwise matrix
if isWeighted:
PL_matrix, altx2_tnx = _utilities2_weighted_pairwise_breaking(qfx2_utilities)
else:
PL_matrix, altx2_tnx = _utilities2_pairwise_breaking(qfx2_utilities)
# 2) find the gamma vector which minimizes || Pl * gamma || s.t. gamma > 0
gamma = _optimize(PL_matrix)
# Find the probability each alternative is #1
altx2_prob = _PL_score(gamma)
#print('[vote] gamma = %r' % gamma)
#print('[vote] altx2_prob = %r' % altx2_prob)
# Use probabilities as scores
aid2_score, nid2_score = get_scores_from_altx2_score(ibs, qcx, altx2_prob, altx2_tnx)
# HACK HACK HACK!!!
#aid2_score = enforce_one_name_per_cscore(ibs, aid2_score, chipmatch)
return aid2_score, nid2_score
def _optimize(M):
#print('[vote] optimize')
if M.size == 0:
return np.array([])
(u, s, v) = npl.svd(M)
x = np.abs(v[-1])
check = np.abs(M.dot(x)) < 1E-9
if not all(check):
raise Exception('SVD method failed miserably')
return x
def _PL_score(gamma):
#print('[vote] computing probabilities')
nAlts = len(gamma)
altx2_prob = np.zeros(nAlts)
for altx in range(nAlts):
altx2_prob[altx] = gamma[altx] / np.sum(gamma)
#print('[vote] altx2_prob: '+str(altx2_prob))
#print('[vote] sum(prob): '+str(sum(altx2_prob)))
return altx2_prob
def get_scores_from_altx2_score(ibs, qcx, altx2_prob, altx2_tnx):
nid2_score = np.zeros(len(ibs.tables.nid2_name))
aid2_score = np.zeros(len(ibs.tables.aid2_aid))
nid2_cxs = ibs.get_nx2_cxs()
for altx, prob in enumerate(altx2_prob):
tnx = altx2_tnx[altx]
if tnx < 0: # account for temporary names
aid2_score[-tnx] = prob
nid2_score[1] += prob
else:
nid2_score[tnx] = prob
for aid in nid2_cxs[tnx]:
if aid == qcx:
continue
aid2_score[aid] = prob
return aid2_score, nid2_score
def _chipmatch2_utilities(ibs, qcx, chipmatch, K):
"""
Output: qfx2_utilities - map where qfx is the key and utilities are values
utilities are lists of tuples
utilities ~ [(aid, temp_name_index, feature_score, feature_rank), ...]
fx1 : [(aid_0, tnx_0, fs_0, fk_0), ..., (aid_m, tnx_m, fs_m, fk_m)]
fx2 : [(aid_0, tnx_0, fs_0, fk_0), ..., (aid_m, tnx_m, fs_m, fk_m)]
...
fxN : [(aid_0, tnx_0, fs_0, fk_0), ..., (aid_m, tnx_m, fs_m, fk_m)]
"""
#print('[vote] computing utilities')
aid2_nx = ibs.tables.aid2_nx
nQFeats = len(ibs.feats.aid2_kpts[qcx])
# Stack the feature matches
(aid2_fm, aid2_fs, aid2_fk, aid2_H) = chipmatch
aids = np.hstack([[aid] * len(aid2_fm[aid]) for aid in range(len(aid2_fm))])
aids = np.array(aids, np.int)
fms = np.vstack(aid2_fm)
# Get the individual feature match lists
qfxs = fms[:, 0]
fss = np.hstack(aid2_fs)
fks = np.hstack(aid2_fk)
qfx2_utilities = [[] for _ in range(nQFeats)]
for aid, qfx, fk, fs in zip(aids, qfxs, fks, fss):
nid = aid2_nx[aid]
# Apply temporary uniquish name
tnx = nid if nid >= 2 else -aid
utility = (aid, tnx, fs, fk)
qfx2_utilities[qfx].append(utility)
for qfx in range(len(qfx2_utilities)):
utilities = qfx2_utilities[qfx]
utilities = sorted(utilities, key=lambda tup: tup[3])
qfx2_utilities[qfx] = utilities
return qfx2_utilities
def _filter_utilities(qfx2_utilities, max_alts=200):
print('[vote] filtering utilities')
tnxs = [utool[1] for utils in qfx2_utilities for utool in utils]
if len(tnxs) == 0:
return qfx2_utilities
tnxs = np.array(tnxs)
tnxs_min = tnxs.min()
tnx2_freq = np.bincount(tnxs - tnxs_min)
nAlts = (tnx2_freq > 0).sum()
nRemove = max(0, nAlts - max_alts)
print(' * removing %r/%r alternatives' % (nRemove, nAlts))
if nRemove > 0: # remove least frequent names
most_freq_tnxs = tnx2_freq.argsort()[::-1] + tnxs_min
keep_tnxs = set(most_freq_tnxs[0:max_alts].tolist())
for qfx in range(len(qfx2_utilities)):
utils = qfx2_utilities[qfx]
qfx2_utilities[qfx] = [utool for utool in utils if utool[1] in keep_tnxs]
return qfx2_utilities
def _utilities2_pairwise_breaking(qfx2_utilities):
print('[vote] building pairwise matrix')
hstack = np.hstack
cartesian = utool.cartesian
tnxs = [util[1] for utils in qfx2_utilities for util in utils]
altx2_tnx = utool.unique_ordered2(tnxs)
tnx2_altx = {nid: altx for altx, nid in enumerate(altx2_tnx)}
nUtilities = len(qfx2_utilities)
nAlts = len(altx2_tnx)
altxs = np.arange(nAlts)
pairwise_mat = np.zeros((nAlts, nAlts))
qfx2_porder = [np.array([tnx2_altx[util[1]] for util in utils])
for utils in qfx2_utilities]
def sum_win(ij):
""" pairiwse wins on off-diagonal """
pairwise_mat[ij[0], ij[1]] += 1
def sum_loss(ij):
""" pairiwse wins on off-diagonal """
pairwise_mat[ij[1], ij[1]] -= 1
nVoters = 0
for qfx in range(nUtilities):
# partial and compliment order over alternatives
porder = utool.unique_ordered2(qfx2_porder[qfx])
nReport = len(porder)
if nReport == 0:
continue
#sys.stdout.write('.')
corder = np.setdiff1d(altxs, porder)
# pairwise winners and losers
pw_winners = [porder[r:r + 1] for r in range(nReport)]
pw_losers = [hstack((corder, porder[r + 1:])) for r in range(nReport)]
pw_iter = zip(pw_winners, pw_losers)
pw_votes_ = [cartesian((winner, losers)) for winner, losers in pw_iter]
pw_votes = np.vstack(pw_votes_)
#pw_votes = [(w,l) for votes in pw_votes_ for w,l in votes if w != l]
list(map(sum_win, iter(pw_votes)))
list(map(sum_loss, iter(pw_votes)))
nVoters += 1
#print('')
PLmatrix = pairwise_mat / nVoters
# sum(0) gives you the sum over rows, which is summing each column
# Basically a column stochastic matrix should have
# M.sum(0) = 0
#print('CheckMat = %r ' % all(np.abs(PLmatrix.sum(0)) < 1E-9))
return PLmatrix, altx2_tnx
def _get_alts_from_utilities(qfx2_utilities):
""" get temp name indexes """
tnxs = [utool[1] for utils in qfx2_utilities for utool in utils]
altx2_tnx = utool.unique_ordered2(tnxs)
tnx2_altx = {nid: altx for altx, nid in enumerate(altx2_tnx)}
nUtilities = len(qfx2_utilities)
nAlts = len(altx2_tnx)
altxs = np.arange(nAlts)
return tnxs, altx2_tnx, tnx2_altx, nUtilities, nAlts, altxs
def _utilities2_weighted_pairwise_breaking(qfx2_utilities):
print('[vote] building pairwise matrix')
tnxs, altx2_tnx, tnx2_altx, nUtilities, nAlts, altxs = _get_alts_from_utilities(qfx2_utilities)
pairwise_mat = np.zeros((nAlts, nAlts))
# agent to alternative vote vectors
qfx2_porder = [np.array([tnx2_altx[utool[1]] for utool in utils]) for utils in qfx2_utilities]
# agent to alternative weight/utility vectors
qfx2_worder = [np.array([utool[2] for utool in utils]) for utils in qfx2_utilities]
nVoters = 0
for qfx in range(nUtilities):
# partial and compliment order over alternatives
porder = qfx2_porder[qfx]
worder = qfx2_worder[qfx]
_, idx = np.unique(porder, return_inverse=True)
idx = np.sort(idx)
porder = porder[idx]
worder = worder[idx]
nReport = len(porder)
if nReport == 0:
continue
#sys.stdout.write('.')
corder = np.setdiff1d(altxs, porder)
nUnreport = len(corder)
# pairwise winners and losers
for r_win in range(0, nReport):
# for each prefered alternative
i = porder[r_win]
wi = worder[r_win]
# count the reported victories: i > j
for r_lose in range(r_win + 1, nReport):
j = porder[r_lose]
#wj = worder[r_lose]
#w = wi - wj
w = wi
pairwise_mat[i, j] += w
pairwise_mat[j, j] -= w
# count the un-reported victories: i > j
for r_lose in range(nUnreport):
j = corder[r_lose]
#wj = 0
#w = wi - wj
w = wi
pairwise_mat[i, j] += w
pairwise_mat[j, j] -= w
nVoters += wi
#print('')
PLmatrix = pairwise_mat / nVoters
# sum(0) gives you the sum over rows, which is summing each column
# Basically a column stochastic matrix should have
# M.sum(0) = 0
#print('CheckMat = %r ' % all(np.abs(PLmatrix.sum(0)) < 1E-9))
return PLmatrix, altx2_tnx
# Positional Scoring Rules
def positional_scoring_rule(qfx2_utilities, rule, isWeighted):
tnxs, altx2_tnx, tnx2_altx, nUtilities, nAlts, altxs = _get_alts_from_utilities(qfx2_utilities)
# agent to alternative vote vectors
qfx2_porder = [np.array([tnx2_altx[util[1]] for util in utils]) for utils in qfx2_utilities]
# agent to alternative weight/utility vectors
if isWeighted:
qfx2_worder = [np.array([util[2] for util in utils]) for utils in qfx2_utilities]
else:
qfx2_worder = [np.array([ 1.0 for util in utils]) for utils in qfx2_utilities]
K = max(map(len, qfx2_utilities))
if rule == 'borda':
score_vec = np.arange(0, K)[::-1] + 1
if rule == 'plurality':
score_vec = np.zeros(K)
score_vec[0] = 1
if rule == 'topk':
score_vec = np.ones(K)
score_vec = np.array(score_vec, dtype=np.int)
#print('----')
#title = 'Rule=%s Weighted=%r ' % (rule, not qfx2_weight is None)
#print('[vote] ' + title)
#print('[vote] score_vec = %r' % (score_vec,))
altx2_score = _positional_score(altxs, score_vec, qfx2_porder, qfx2_worder)
#ranked_candiates = alt_score.argsort()[::-1]
#ranked_scores = alt_score[ranked_candiates]
#viz_votingrule_table(ranked_candiates, ranked_scores, correct_altx, title, fnum)
return altx2_score, altx2_tnx
def _positional_score(altxs, score_vec, qfx2_porder, qfx2_worder):
nAlts = len(altxs)
altx2_score = np.zeros(nAlts)
# For each voter
for qfx in range(len(qfx2_porder)):
partial_order = qfx2_porder[qfx]
weights = qfx2_worder[qfx]
# Loop over the ranked alternatives applying positional/meta weight
for ix, altx in enumerate(partial_order):
#if altx == -1: continue
altx2_score[altx] += weights[ix] * score_vec[ix]
return altx2_score
def score_chipmatch_pos(ibs, qcx, chipmatch, qreq, rule='borda'):
"""
Positional Scoring Rule
"""
(aid2_fm, aid2_fs, aid2_fk, _) = chipmatch
K = qreq.cfg.nn_cfg.K
isWeighted = qreq.cfg.agg_cfg.isWeighted
# Create voting vectors of top K utilities
qfx2_utilities = _chipmatch2_utilities(ibs, qcx, chipmatch, K)
# Run Positional Scoring Rule
altx2_score, altx2_tnx = positional_scoring_rule(qfx2_utilities, rule, isWeighted)
# Map alternatives back to chips/names
aid2_score, nid2_score = get_scores_from_altx2_score(ibs, qcx, altx2_score, altx2_tnx)
# HACK HACK HACK!!!
#aid2_score = enforce_one_name_per_cscore(ibs, aid2_score, chipmatch)
return aid2_score, nid2_score
if __name__ == '__main__':
"""
python -m ibeis.model.hots.voting_rules2
python -m ibeis.model.hots.voting_rules2 --allexamples
"""
import multiprocessing
multiprocessing.freeze_support()
ut.doctest_funcs()
| {
"repo_name": "SU-ECE-17-7/ibeis",
"path": "_broken/voting_rules2.py",
"copies": "1",
"size": "19954",
"license": "apache-2.0",
"hash": 259997907953496500,
"line_mean": 38.2795275591,
"line_max": 120,
"alpha_frac": 0.6084494337,
"autogenerated": false,
"ratio": 2.9189584552369805,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9012168837930044,
"avg_score": 0.003047810201387348,
"num_lines": 508
} |
from __future__ import absolute_import, division, print_function
import utool
def test_akmeans(full_test=False, plot_test=False, num_pca_dims=2, data_dim=2,
nump=1000):
import numpy as np
from vtool_ibeis import clustering
nump = nump
dims = data_dim # 128
dtype = np.uint8
print('Make %d random %d-dimensional %s points.' % (nump, dims, dtype))
# Seed for a determenistic test
np.random.seed(42)
data = np.array(np.random.randint(0, 255, (nump, dims)), dtype=dtype)
num_clusters = 10
max_iters = 2
ave_unchanged_thresh = 0
ave_unchanged_iterwin = 10
flann_params = {}
cache_dir = utool.get_app_resource_dir('vtool_ibeis', 'test_cache')
utool.ensuredir(cache_dir)
# Test precomputing
dx2_label, centers = clustering.precompute_akmeans(data, num_clusters,
max_iters=max_iters,
cache_dir=cache_dir)
# internal names
datax2_clusterx, centroids = dx2_label, centers
if plot_test:
clustering.plot_clusters(data, datax2_clusterx, centroids, num_pca_dims=num_pca_dims)
assert centers.shape == (num_clusters, dims), 'sanity check'
assert dx2_label.shape == (nump,), 'sanity check'
# Test regular computing
if full_test:
dx2_label, centers = clustering.akmeans(data, num_clusters, max_iters=max_iters)
assert centers.shape == (num_clusters, dims), 'sanity check'
assert dx2_label.shape == (nump,), 'sanity check'
if False:
# other test (development)
from vtool_ibeis._pyflann_backend import pyflann
flann_lib_inst = pyflann.flann
flann_class_inst = pyflann.FLANN()
flann_class_inst.build_index(data)
return locals()
if __name__ == '__main__':
testkw = {
'plot_test': utool.get_argflag('--plot-test'),
'full_test': utool.get_argflag('--full-test'),
'num_pca_dims': utool.get_argval('--num-pca-dims', type_=int, default=2),
'data_dim': utool.get_argval('--data-dim', type_=int, default=2),
'nump': utool.get_argval('--nump', type_=int, default=2000),
}
test_locals = utool.run_test(test_akmeans, **testkw)
exec(utool.execstr_dict(test_locals, 'test_locals'))
if testkw['plot_test']:
from plottool_ibeis import draw_func2 as df2
exec(df2.present())
else:
exec(utool.ipython_execstr())
| {
"repo_name": "Erotemic/vtool",
"path": "tests/test_akmeans.py",
"copies": "1",
"size": "2480",
"license": "apache-2.0",
"hash": -3845769695492962300,
"line_mean": 35.4705882353,
"line_max": 93,
"alpha_frac": 0.6096774194,
"autogenerated": false,
"ratio": 3.2588699080157686,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9366042226321669,
"avg_score": 0.0005010202188197016,
"num_lines": 68
} |
from __future__ import absolute_import, division, print_function
import warnings
import re
import py
import sys
import pytest
from _pytest.recwarn import WarningsRecorder
def test_recwarn_functional(testdir):
reprec = testdir.inline_runsource("""
import warnings
def test_method(recwarn):
warnings.warn("hello")
warn = recwarn.pop()
assert isinstance(warn.message, UserWarning)
""")
res = reprec.countoutcomes()
assert tuple(res) == (1, 0, 0), res
class TestWarningsRecorderChecker(object):
def test_recording(self):
rec = WarningsRecorder()
with rec:
assert not rec.list
py.std.warnings.warn_explicit("hello", UserWarning, "xyz", 13)
assert len(rec.list) == 1
py.std.warnings.warn(DeprecationWarning("hello"))
assert len(rec.list) == 2
warn = rec.pop()
assert str(warn.message) == "hello"
l = rec.list
rec.clear()
assert len(rec.list) == 0
assert l is rec.list
pytest.raises(AssertionError, "rec.pop()")
def test_typechecking(self):
from _pytest.recwarn import WarningsChecker
with pytest.raises(TypeError):
WarningsChecker(5)
with pytest.raises(TypeError):
WarningsChecker(('hi', RuntimeWarning))
with pytest.raises(TypeError):
WarningsChecker([DeprecationWarning, RuntimeWarning])
def test_invalid_enter_exit(self):
# wrap this test in WarningsRecorder to ensure warning state gets reset
with WarningsRecorder():
with pytest.raises(RuntimeError):
rec = WarningsRecorder()
rec.__exit__(None, None, None) # can't exit before entering
with pytest.raises(RuntimeError):
rec = WarningsRecorder()
with rec:
with rec:
pass # can't enter twice
class TestDeprecatedCall(object):
"""test pytest.deprecated_call()"""
def dep(self, i, j=None):
if i == 0:
py.std.warnings.warn("is deprecated", DeprecationWarning,
stacklevel=1)
return 42
def dep_explicit(self, i):
if i == 0:
py.std.warnings.warn_explicit("dep_explicit", category=DeprecationWarning,
filename="hello", lineno=3)
def test_deprecated_call_raises(self):
with pytest.raises(AssertionError) as excinfo:
pytest.deprecated_call(self.dep, 3, 5)
assert 'Did not produce' in str(excinfo)
def test_deprecated_call(self):
pytest.deprecated_call(self.dep, 0, 5)
def test_deprecated_call_ret(self):
ret = pytest.deprecated_call(self.dep, 0)
assert ret == 42
def test_deprecated_call_preserves(self):
onceregistry = py.std.warnings.onceregistry.copy()
filters = py.std.warnings.filters[:]
warn = py.std.warnings.warn
warn_explicit = py.std.warnings.warn_explicit
self.test_deprecated_call_raises()
self.test_deprecated_call()
assert onceregistry == py.std.warnings.onceregistry
assert filters == py.std.warnings.filters
assert warn is py.std.warnings.warn
assert warn_explicit is py.std.warnings.warn_explicit
def test_deprecated_explicit_call_raises(self):
with pytest.raises(AssertionError):
pytest.deprecated_call(self.dep_explicit, 3)
def test_deprecated_explicit_call(self):
pytest.deprecated_call(self.dep_explicit, 0)
pytest.deprecated_call(self.dep_explicit, 0)
@pytest.mark.parametrize('mode', ['context_manager', 'call'])
def test_deprecated_call_no_warning(self, mode):
"""Ensure deprecated_call() raises the expected failure when its block/function does
not raise a deprecation warning.
"""
def f():
pass
msg = 'Did not produce DeprecationWarning or PendingDeprecationWarning'
with pytest.raises(AssertionError, matches=msg):
if mode == 'call':
pytest.deprecated_call(f)
else:
with pytest.deprecated_call():
f()
@pytest.mark.parametrize('warning_type', [PendingDeprecationWarning, DeprecationWarning])
@pytest.mark.parametrize('mode', ['context_manager', 'call'])
@pytest.mark.parametrize('call_f_first', [True, False])
def test_deprecated_call_modes(self, warning_type, mode, call_f_first):
"""Ensure deprecated_call() captures a deprecation warning as expected inside its
block/function.
"""
def f():
warnings.warn(warning_type("hi"))
return 10
# ensure deprecated_call() can capture the warning even if it has already been triggered
if call_f_first:
assert f() == 10
if mode == 'call':
assert pytest.deprecated_call(f) == 10
else:
with pytest.deprecated_call():
assert f() == 10
@pytest.mark.parametrize('mode', ['context_manager', 'call'])
def test_deprecated_call_exception_is_raised(self, mode):
"""If the block of the code being tested by deprecated_call() raises an exception,
it must raise the exception undisturbed.
"""
def f():
raise ValueError('some exception')
with pytest.raises(ValueError, match='some exception'):
if mode == 'call':
pytest.deprecated_call(f)
else:
with pytest.deprecated_call():
f()
def test_deprecated_call_specificity(self):
other_warnings = [Warning, UserWarning, SyntaxWarning, RuntimeWarning,
FutureWarning, ImportWarning, UnicodeWarning]
for warning in other_warnings:
def f():
warnings.warn(warning("hi"))
with pytest.raises(AssertionError):
pytest.deprecated_call(f)
with pytest.raises(AssertionError):
with pytest.deprecated_call():
f()
def test_deprecated_function_already_called(self, testdir):
"""deprecated_call should be able to catch a call to a deprecated
function even if that function has already been called in the same
module. See #1190.
"""
testdir.makepyfile("""
import warnings
import pytest
def deprecated_function():
warnings.warn("deprecated", DeprecationWarning)
def test_one():
deprecated_function()
def test_two():
pytest.deprecated_call(deprecated_function)
""")
result = testdir.runpytest()
# for some reason in py26 catch_warnings manages to catch the deprecation warning
# from deprecated_function(), even with default filters active (which ignore deprecation
# warnings)
py26 = sys.version_info[:2] == (2, 6)
expected = '*=== 2 passed in *===' if not py26 else '*=== 2 passed, 1 warnings in *==='
result.stdout.fnmatch_lines(expected)
class TestWarns(object):
def test_strings(self):
# different messages, b/c Python suppresses multiple identical warnings
source1 = "warnings.warn('w1', RuntimeWarning)"
source2 = "warnings.warn('w2', RuntimeWarning)"
source3 = "warnings.warn('w3', RuntimeWarning)"
pytest.warns(RuntimeWarning, source1)
pytest.raises(pytest.fail.Exception,
lambda: pytest.warns(UserWarning, source2))
pytest.warns(RuntimeWarning, source3)
def test_function(self):
pytest.warns(SyntaxWarning,
lambda msg: warnings.warn(msg, SyntaxWarning), "syntax")
def test_warning_tuple(self):
pytest.warns((RuntimeWarning, SyntaxWarning),
lambda: warnings.warn('w1', RuntimeWarning))
pytest.warns((RuntimeWarning, SyntaxWarning),
lambda: warnings.warn('w2', SyntaxWarning))
pytest.raises(pytest.fail.Exception,
lambda: pytest.warns(
(RuntimeWarning, SyntaxWarning),
lambda: warnings.warn('w3', UserWarning)))
def test_as_contextmanager(self):
with pytest.warns(RuntimeWarning):
warnings.warn("runtime", RuntimeWarning)
with pytest.warns(UserWarning):
warnings.warn("user", UserWarning)
with pytest.raises(pytest.fail.Exception) as excinfo:
with pytest.warns(RuntimeWarning):
warnings.warn("user", UserWarning)
excinfo.match(r"DID NOT WARN. No warnings of type \(.+RuntimeWarning.+,\) was emitted. "
r"The list of emitted warnings is: \[UserWarning\('user',\)\].")
with pytest.raises(pytest.fail.Exception) as excinfo:
with pytest.warns(UserWarning):
warnings.warn("runtime", RuntimeWarning)
excinfo.match(r"DID NOT WARN. No warnings of type \(.+UserWarning.+,\) was emitted. "
r"The list of emitted warnings is: \[RuntimeWarning\('runtime',\)\].")
with pytest.raises(pytest.fail.Exception) as excinfo:
with pytest.warns(UserWarning):
pass
excinfo.match(r"DID NOT WARN. No warnings of type \(.+UserWarning.+,\) was emitted. "
r"The list of emitted warnings is: \[\].")
warning_classes = (UserWarning, FutureWarning)
with pytest.raises(pytest.fail.Exception) as excinfo:
with pytest.warns(warning_classes) as warninfo:
warnings.warn("runtime", RuntimeWarning)
warnings.warn("import", ImportWarning)
message_template = ("DID NOT WARN. No warnings of type {0} was emitted. "
"The list of emitted warnings is: {1}.")
excinfo.match(re.escape(message_template.format(warning_classes,
[each.message for each in warninfo])))
def test_record(self):
with pytest.warns(UserWarning) as record:
warnings.warn("user", UserWarning)
assert len(record) == 1
assert str(record[0].message) == "user"
def test_record_only(self):
with pytest.warns(None) as record:
warnings.warn("user", UserWarning)
warnings.warn("runtime", RuntimeWarning)
assert len(record) == 2
assert str(record[0].message) == "user"
assert str(record[1].message) == "runtime"
def test_record_by_subclass(self):
with pytest.warns(Warning) as record:
warnings.warn("user", UserWarning)
warnings.warn("runtime", RuntimeWarning)
assert len(record) == 2
assert str(record[0].message) == "user"
assert str(record[1].message) == "runtime"
class MyUserWarning(UserWarning): pass
class MyRuntimeWarning(RuntimeWarning): pass
with pytest.warns((UserWarning, RuntimeWarning)) as record:
warnings.warn("user", MyUserWarning)
warnings.warn("runtime", MyRuntimeWarning)
assert len(record) == 2
assert str(record[0].message) == "user"
assert str(record[1].message) == "runtime"
def test_double_test(self, testdir):
"""If a test is run again, the warning should still be raised"""
testdir.makepyfile('''
import pytest
import warnings
@pytest.mark.parametrize('run', [1, 2])
def test(run):
with pytest.warns(RuntimeWarning):
warnings.warn("runtime", RuntimeWarning)
''')
result = testdir.runpytest()
result.stdout.fnmatch_lines(['*2 passed in*'])
| {
"repo_name": "flub/pytest",
"path": "testing/test_recwarn.py",
"copies": "1",
"size": "11967",
"license": "mit",
"hash": -6839746933968195000,
"line_mean": 37.4790996785,
"line_max": 96,
"alpha_frac": 0.5932982368,
"autogenerated": false,
"ratio": 4.522675736961451,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5615973973761452,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import warnings
from odo.core import NetworkDispatcher, path, FailedConversionWarning
from datashape import discover
d = NetworkDispatcher('foo')
@d.register(float, int, cost=1.0)
def f(x, **kwargs):
return float(x)
@d.register(str, float, cost=1.0)
def g(x, **kwargs):
return str(x)
def test_basic():
assert [func for a, b, func, cost in d.path(int, str)] == [f, g]
assert list(d.path(int, str)) == list(d.path(1, ''))
def test_convert_is_robust_to_failures():
foo = NetworkDispatcher('foo')
def badfunc(*args, **kwargs):
raise NotImplementedError()
class A(object): pass
class B(object): pass
class C(object): pass
discover.register((A, B, C))(lambda x: 'int')
foo.register(B, A, cost=1.0)(lambda x, **kwargs: 1)
foo.register(C, B, cost=1.0)(badfunc)
foo.register(C, A, cost=10.0)(lambda x, **kwargs: 2)
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter('always')
assert foo(C, A()) == 2
assert len(ws) == 1
w = ws[0].message
assert isinstance(w, FailedConversionWarning)
assert 'B -> C' in str(w)
def test_convert_failure_takes_greedy_path():
foo = NetworkDispatcher('foo')
class A(object):
pass
class B(object):
pass
class C(object):
pass
class D(object):
pass
discover.register((A, B, C, D))(lambda _: 'int')
foo.register(B, A, cost=1.0)(lambda _, **__: 1)
@foo.register(D, A, cost=10.0)
def expensive_edge(*args, **kwargs):
raise AssertionError(
'convert should not take this route because it is more expensive'
' than the initial route and greedy route',
)
@foo.register(C, B, cost=1.0)
def badedge(*args, **kwargs):
raise NotImplementedError()
@foo.register(D, C, cost=1.0)
def impossible_edge(*args, **kwargs):
raise AssertionError(
'to get to this edge B->C would need to pass which is impossible'
)
greedy_retry_route_selected = [False]
# this edge is more expensive than the cost of B->C->D so it shouldn't
# be picked until B->C has been removed
@foo.register(D, B, cost=3.0)
def greedy_retry_route(data, **kwargs):
greedy_retry_route_selected[0] = True
assert data == 1
return 2
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter('always')
assert foo(D, A()) == 2
assert greedy_retry_route_selected[0], 'we did not call the expected edge'
assert len(ws) == 1
w = ws[0].message
assert isinstance(w, FailedConversionWarning)
assert 'B -> C' in str(w)
def test_ooc_behavior():
foo = NetworkDispatcher('foo')
class A(object): pass
class B(object): pass
class C(object): pass
discover.register((A, B, C))(lambda x: 'int')
foo.register(B, A, cost=1.0)(lambda x, **kwargs: 1)
foo.register(C, B, cost=1.0)(lambda x, **kwargs: x / 0) # note that this errs
foo.register(C, A, cost=10.0)(lambda x, **kwargs: 2)
assert ([(a, b, cost) for a, b, _, cost in path(foo.graph, A, C)] ==
[(A, B, 1.0), (B, C, 1.0)])
ooc = set([A, C])
assert ([(a, b, cost)
for a, b, _, cost, in path(foo.graph, A, C, ooc_types=ooc)] ==
[(A, C, 10.0)])
| {
"repo_name": "quantopian/odo",
"path": "odo/tests/test_core.py",
"copies": "4",
"size": "3391",
"license": "bsd-3-clause",
"hash": -8494419520315370000,
"line_mean": 26.5691056911,
"line_max": 81,
"alpha_frac": 0.5989383663,
"autogenerated": false,
"ratio": 3.2418738049713194,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002552707350271825,
"num_lines": 123
} |
from __future__ import absolute_import, division, print_function
log_version = 1
class Record(object):
base_fields = ('timestamp', 'vid', 'site_id')
fields = ()
def __init__(self, **kwargs):
for field in self.base_fields + self.fields:
setattr(self, field, kwargs.get(field, ''))
def to_list(self):
return ([str(log_version), self.key] +
[getattr(self, field) for field in
self.base_fields + self.fields])
@staticmethod
def from_list(vals):
version = vals[0]
record_type = vals[1]
rest = vals[2:]
assert int(version) == log_version
cls = _record_types[record_type]
kwargs = {field: val for field, val
in zip(cls.base_fields + cls.fields, rest)}
return cls(**kwargs)
class PageRecord(Record):
key = 'page'
fields = ('ip', 'method', 'url', 'user_agent', 'referer')
class PixelRecord(Record):
key = 'pixel'
fields = ()
class GoalRecord(Record):
key = 'goal'
fields = ('name', 'value', 'value_type', 'value_format')
class SplitRecord(Record):
key = 'split'
fields = ('test_name', 'selected')
_record_types = {cls.key: cls for cls in
(PageRecord, PixelRecord, GoalRecord, SplitRecord)}
| {
"repo_name": "storborg/manhattan",
"path": "manhattan/record.py",
"copies": "1",
"size": "1306",
"license": "mit",
"hash": 519893813450032960,
"line_mean": 23.641509434,
"line_max": 68,
"alpha_frac": 0.5735068913,
"autogenerated": false,
"ratio": 3.597796143250689,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4671303034550689,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
from ansible.errors import AnsibleError
import subprocess
import re
import os
class ActionModule(ActionBase):
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
if "all" in self._task.args:
all = self._task.args.get("all")
if all == "True" or all == "true" or all == "Yes" or all == "yes": # oh dear
all = True
else:
all = False
if "user" in self._task.args:
user = self._task.args.get("user")
else:
user = False
command = "ps -ef | grep -vn ' grep ' | %s | awk '{print \"sudo -n kill -9\", $2}' | sh"
if user != False:
grep = "grep sshd | grep '%s'" % user
command += " && echo OTHERUSER"
elif all == True:
grep = "grep sshd:"
else:
grep = "grep sshd: | grep `whoami`"
command += " && exit"
sub = subprocess.Popen(["ssh", '-tt', '-n', '-S', 'none', self._connection.host, command % grep],
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = sub.communicate()
os.system('stty sane')
result = super(ActionModule, self).run(tmp, task_vars)
if "Write failed: Broken pipe" in err or "Shared connection to" in err or "Connection to %s closed by remote host" % self._connection.host in err or "OTHERUSER" in out:
result['failed'] = False
else:
result['failed'] = True
result['msg'] = err
return result
| {
"repo_name": "udondan/ansible-role-ssh-reconnect",
"path": "action_plugins/ssh-reconnect.py",
"copies": "1",
"size": "1938",
"license": "mit",
"hash": -6420074478502311000,
"line_mean": 31.8474576271,
"line_max": 176,
"alpha_frac": 0.4927760578,
"autogenerated": false,
"ratio": 4.194805194805195,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5187581252605195,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class ComposeError(Exception):
''' Base class for exceptions in this module. '''
def __str__(self):
return self.message
def __repr__(self):
return self.message
class ValueGeneratorError(ComposeError):
''' Class for handling errors raised by value generators in NodeGroup compose. '''
def __init__(self, group_name, node_index, var, message):
self.message = 'Error generating value "%s" for Node number %i, NodeGroup "%s" : %s' % (var, node_index, group_name, message)
class ValueGeneratorTypeError(ComposeError):
''' Class for handling errors raised by value generators in NodeGroup compose. '''
def __init__(self, group_name, node_index, var, message):
self.message = 'Invalid value for "%s" in Node number %i, NodeGroup "%s" : %s' % (var, node_index, group_name, message)
class ContextVarGeneratorError(ComposeError):
''' Class for handling errors raised by context var generators in Cluster compose. '''
def __init__(self, ansible_group, var_name, message):
self.message = 'Error generating context var "%s" for ansible_group "%s" : %s' % (var_name, ansible_group, message)
class GroupVarGeneratorError(ComposeError):
''' Class for handling errors raised by group var generators in Cluster compose. '''
def __init__(self, ansible_group, var_name, message):
self.message = 'Error generating group var "%s" for ansible_group "%s" : %s' % (var_name, ansible_group, message)
class HostVarGeneratorError(ComposeError):
''' Class for handling errors raised by host var generators in Cluster compose. '''
def __init__(self, host, var_name, message):
self.message = 'Error generating host var "%s" for host "%s" : %s' % (var_name, host, message)
class PlaybookLoadError(ComposeError):
''' Class for handling errors raised when loading playbook. '''
def __init__(self, filename, message):
self.message = 'Error loading playbook data from "%s": %s' % (filename, message)
class PlaybookParseError(ComposeError):
''' Class for handling errors raised when parsing playbook. '''
def __init__(self, message):
self.message = 'Error parsing playbook: %s' % (message)
class PlaybookCompileError(ComposeError):
''' Class for handling errors raised when parsing playbook. '''
def __init__(self, cluster, message):
self.message = 'Error compiling cluster "%s": %s ' % (cluster, message)
| {
"repo_name": "fabriziopandini/vagrant-playbook",
"path": "vagrantplaybook/errors.py",
"copies": "1",
"size": "2533",
"license": "mit",
"hash": -2661264489811282400,
"line_mean": 41.9322033898,
"line_max": 133,
"alpha_frac": 0.6770627714,
"autogenerated": false,
"ratio": 3.945482866043614,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005930763425496375,
"num_lines": 59
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class VarsModule(object):
''' Creates and modifies host variables '''
def __init__(self, inventory):
self.inventory = inventory
self.inventory_basedir = inventory.basedir()
# Wrap salts and keys variables in {% raw %} to prevent jinja templating errors
def wrap_salts_in_raw(self, host, hostvars):
if 'vault_wordpress_sites' in hostvars:
for name, site in hostvars['vault_wordpress_sites'].iteritems():
for key, value in site['env'].iteritems():
if key.endswith(('_key', '_salt')) and not value.startswith(('{% raw', '{%raw')):
hostvars['vault_wordpress_sites'][name]['env'][key] = ''.join(['{% raw %}', value, '{% endraw %}'])
host.vars['vault_wordpress_sites'] = hostvars['vault_wordpress_sites']
def get_host_vars(self, host, vault_password=None):
self.wrap_salts_in_raw(host, host.get_group_vars())
return {}
| {
"repo_name": "blinkpi/vagrant",
"path": "trellis/lib/trellis/plugins/vars/vars.py",
"copies": "1",
"size": "1049",
"license": "mit",
"hash": 907922710675918500,
"line_mean": 46.6818181818,
"line_max": 123,
"alpha_frac": 0.6053384175,
"autogenerated": false,
"ratio": 4.0038167938931295,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003649224128865655,
"num_lines": 22
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
inventory: yaml_groups
short_description: Uses a specifically YAML file as inventory source.
description:
- Alternative YAML formatted inventory for Ansible.
- Allows you to to assign groups to hosts as well as hosts to groups
- Easily make new groups that are supersets and subsets of other groups.
notes:
- To function it requires being whitelisted in configuration.
options:
yaml_extensions:
description: list of 'valid' extensions for files containing YAML
type: list
default: ['.yaml', '.yml', '.json']
url:
'''
EXAMPLES = '''
---
groups:
app1-prod:
include:
- app1
require:
- prod
app1-dev:
include:
- app1
require:
- prod
app2-prod:
hosts:
- app2-web1
app2:
include:
- app2-prod
- app2-dev
all-apps:
include:
- app1
- app2
hosts:
web-app1-prod.location1.com:
groups:
- app1
- location1
- prod
- web
db-app1-prod.location1.com:
groups:
- app1
- location1
- prod
- db
app1-dev.location1.com:
vars:
EXAMPLE: "true"
groups:
- app1
- location2
- dev
- web
- db
'''
import os
from collections import MutableMapping, Sequence
from ansible import constants as C
from ansible.errors import AnsibleParserError
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_native
from ansible.parsing.utils.addresses import parse_address
from ansible.plugins.inventory import BaseFileInventoryPlugin, detect_range, expand_hostname_range
def is_sequence(obj):
return isinstance(obj, Sequence) and not isinstance(obj, basestring)
def is_dict(obj):
return isinstance(obj, MutableMapping)
def must_be_sequence(obj, name=None):
if not is_sequence(obj):
if name:
raise AnsibleParserError('Invalid "%s" entry, requires a sequence, found "%s" instead.' % (name, type(obj)))
else:
raise AnsibleParserError('Invalid data, requires a sequence, found "%s" instead.' % (name, type(obj)))
return obj
def must_be_dict(obj, name=None):
if not is_dict(obj):
if name:
raise AnsibleParserError('Invalid "%s" entry, requires a dictionary, found "%s" instead.' % (name, type(obj)))
else:
raise AnsibleParserError('Invalid data, requires a dictionary, found "%s" instead.' % (name, type(obj)))
return obj
def must_not_be_plugin(obj):
if 'plugin' in obj:
raise AnsibleParserError('Plugin configuration YAML file, not YAML groups inventory')
if 'all' in obj:
raise AnsibleParserError('Standard configuration YAML file, not YAML groups inventory')
return obj
class InventoryModule(BaseFileInventoryPlugin):
NAME = 'yaml-groups'
def __init__(self):
super(InventoryModule, self).__init__()
def verify_file(self, path):
valid = False
if super(InventoryModule, self).verify_file(path):
file_name, ext = os.path.splitext(path)
if not ext or ext in C.YAML_FILENAME_EXTENSIONS:
valid = True
return valid
def parse(self, inventory, loader, path, cache=True):
''' parses the inventory file '''
super(InventoryModule, self).parse(inventory, loader, path)
try:
data = self.loader.load_from_file(path)
except Exception as e:
raise AnsibleParserError(e)
if not data:
raise AnsibleParserError('Parsed empty YAML file')
must_be_dict(data)
must_not_be_plugin(data)
if 'hosts' in data:
self._parse_hosts(data['hosts'])
if 'groups' in data:
self._parse_groups(data['groups'])
def _parse_hosts(self, hosts):
must_be_dict(hosts, name='hosts')
for host_name in hosts:
self._parse_host(host_name, hosts[host_name])
def _parse_host(self, host_pattern, host):
'''
Each host key can be a pattern, try to process it and add variables as needed
'''
must_be_dict(host)
(host_names, port) = self._expand_hostpattern(host_pattern)
all_group = self.inventory.groups['all']
for host_name in host_names:
self.inventory.add_host(host_name, port=port)
all_group.add_host(self.inventory.get_host(host_name))
if 'groups' in host:
self._parse_host_groups(host_names, host['groups'])
if 'vars' in host:
self._parse_host_vars(host_names, host['vars'])
def _populate_host_vars(self, hosts, variables, group=None, port=None):
for host in hosts:
self.inventory.add_host(host, group=group, port=port)
for k in variables:
self.inventory.set_variable(host, k, variables[k])
def _parse_host_vars(self, host_names, host_vars):
must_be_dict(host_vars, name='vars')
self._populate_host_vars(host_names, host_vars)
def _parse_host_groups(self, host_names, host_groups):
must_be_sequence(host_groups, name='groups')
for group_name in host_groups:
self.inventory.add_group(group_name)
for host_name in host_names:
self.inventory.add_child(group_name, host_name)
def _parse_groups(self, groups):
must_be_dict(groups, name='groups')
for group_name in sorted(groups):
self._parse_group(group_name, groups[group_name])
def _parse_group(self, group_name, group_data):
must_be_dict(group_data, name=('groups/%s %s' % (group_name, group_data)))
self.inventory.add_group(group_name)
group = self.inventory.groups[group_name]
all_group = self.inventory.groups['all']
if 'vars' in group_data:
group_vars = must_be_dict(group_data['vars'], name='vars')
for var_name in group_vars:
group.set_variable(var_name, group_vars[var_name])
if 'hosts' in group_data:
host_names = must_be_sequence(group_data['hosts'], name='hosts')
for host_name in host_names:
self.inventory.add_host(host_name)
group.add_host(host_name)
all_group.add_host(self.inventory.get_host(host_name))
if 'include' in group_data:
include_names = must_be_sequence(group_data['include'], name='include')
for include_name in include_names:
self._parse_group_include(group, include_name)
if 'require' in group_data:
require_names = must_be_sequence(group_data['require'], name='require')
for require_name in require_names:
self._parse_group_require(group, require_name)
if 'exclude' in group_data:
exclude_names = must_be_sequence(group_data['exclude'], name='exclude')
for exclude_name in exclude_names:
self._parse_group_exclude(group, exclude_name)
def _parse_group_include(self, group, include_name):
if include_name not in self.inventory.groups:
return
include_group = self.inventory.groups[include_name]
for host in include_group.get_hosts():
group.add_host(host)
def _parse_group_require(self, group, require_name):
if require_name not in self.inventory.groups:
raise AnsibleParserError('Group "%s" requires non-existant group "%s"' % (group.name, require_name))
require_group = self.inventory.groups[require_name]
for host in group.get_hosts():
if host not in require_group.get_hosts():
group.remove_host(host)
def _parse_group_exclude(self, group, exclude_name):
if exclude_name not in self.inventory.groups:
return
exclude_group = self.inventory.groups[exclude_name]
for host in exclude_group.get_hosts():
if host in group.get_hosts():
group.remove_host(host)
def _expand_hostpattern(self, hostpattern):
'''
Takes a single host pattern and returns a list of host_names and an
optional port number that applies to all of them.
'''
# Can the given hostpattern be parsed as a host with an optional port
# specification?
try:
(pattern, port) = parse_address(hostpattern, allow_ranges=True)
except:
# not a recognizable host pattern
pattern = hostpattern
port = None
# Once we have separated the pattern, we expand it into list of one or
# more host_names, depending on whether it contains any [x:y] ranges.
if detect_range(pattern):
host_names = expand_hostname_range(pattern)
else:
host_names = [pattern]
return (host_names, port)
| {
"repo_name": "theasp/ansible-inventory-yml",
"path": "inventory_plugins/yaml_groups.py",
"copies": "1",
"size": "9046",
"license": "mit",
"hash": -2085740337542393600,
"line_mean": 31.1921708185,
"line_max": 122,
"alpha_frac": 0.6116515587,
"autogenerated": false,
"ratio": 3.9658044717229286,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5077456030422929,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
import boto.ec2
import boto.cloudformation
import os
import sys
import time
import pickle
# region/stack/param
class LookupModule(LookupBase):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
self.cache_dir = os.path.join(os.environ['HOME'],'.stack_outputs')
self.cache_time = 60
def check_cache(self, file):
now = int(time.time())
data = ''
if os.path.isfile(file):
# check time stamp of file
if ( now - int(os.path.getmtime(file)) ) < self.cache_time:
fh = open(file, 'r')
data = pickle.load(fh)
return data
def get_regions(self):
regions_cache = os.path.join(self.cache_dir, 'regions')
regions = self.check_cache(regions_cache)
if regions:
pass
else:
try:
regions = boto.ec2.regions()
regions = [ r.name for r in regions ]
fh = open(regions_cache, 'w')
pickle.dump(regions, fh)
except:
raise AnsibleError('Couldn\'t retrieve aws regions')
return regions
def get_stack_info(self, region, stack_name):
stack_cache = os.path.join(self.cache_dir, region + '-' + stack_name)
outputs = self.check_cache(stack_cache)
if outputs:
pass
else:
try:
conn = boto.cloudformation.connect_to_region(region)
stack = conn.describe_stacks(stack_name_or_id=stack_name)[0]
fh = open(stack_cache, 'w')
outputs = stack.outputs
pickle.dump(outputs, fh)
except:
outputs = ''
return outputs
def run(self, terms, inject=None, **kwargs):
if not os.path.isdir(self.cache_dir):
os.mkdir(self.cache_dir)
regions = self.get_regions()
if len(terms) == 1:
args = terms[0].split('/')
else:
args = terms
if args[0] in regions:
region = args[0]
stack_name = args[1]
keys = args[2:]
else:
if 'AWS_REGION' in os.environ:
region = os.environ['AWS_REGION']
if not region in regions:
raise errors.AnsibleError('%s is not a valid aws region' % region)
stack_name = args[0]
keys = args[1:]
else:
raise AnsibleError('aws region not found in argument or AWS_REGION env var')
stack_outputs = self.get_stack_info(region, stack_name)
outputs = []
if stack_outputs:
for obj in stack_outputs:
if obj.key in keys:
outputs.append(obj.value)
if len(outputs) == 0:
raise AnsibleError('Nothing was retured by lookup')
return outputs
| {
"repo_name": "pjodouin/ansible-repo",
"path": "plugins/lookup/cf_output.py",
"copies": "1",
"size": "2695",
"license": "apache-2.0",
"hash": 3942088175768661000,
"line_mean": 25.95,
"line_max": 84,
"alpha_frac": 0.6159554731,
"autogenerated": false,
"ratio": 3.6271870794078063,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47431425525078064,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible import errors as ansible_errors
from ansible.plugins.action import ActionBase
from ansible.release import __version__ as ansible_version
from dciauth.version import __version__ as dciauth_version
from dciclient.v1.api import context as dci_context
from dciclient.version import __version__ as dciclient_version
import os
try:
from dciclient.v1.api import component as dci_component
from dciclient.v1.api import job as dci_job
from dciclient.v1.api import topic as dci_topic
except ImportError:
dciclient_found = False
else:
dciclient_found = True
class ActionModule(ActionBase):
@staticmethod
def _get_details():
"""Method that retrieves the appropriate credentials. """
login = os.getenv('DCI_LOGIN')
password = os.getenv('DCI_PASSWORD')
client_id = os.getenv('DCI_CLIENT_ID')
api_secret = os.getenv('DCI_API_SECRET')
url = os.getenv('DCI_CS_URL', 'https://api.distributed-ci.io')
return login, password, url, client_id, api_secret
def _build_dci_context(self):
login, password, url, client_id, api_secret = self._get_details()
user_agent = ('Ansible/%s (python-dciclient/%s, python-dciauth/%s)'
) % (ansible_version, dciclient_version, dciauth_version)
if login is not None and password is not None:
return dci_context.build_dci_context(url, login, password,
user_agent)
elif client_id is not None and api_secret is not None:
return dci_context.build_signature_context(url, client_id,
api_secret, user_agent)
def run(self, tmp=None, task_vars=None):
super(ActionModule, self).run(tmp, task_vars)
ctx = self._build_dci_context()
job_id = task_vars['job_info']['job']['id']
team_id = task_vars['job_info']['job']['team_id']
topic_id = task_vars['job_info']['job']['topic_id']
git_args = self._task.args.copy()
module_return = self._execute_module(module_name='git',
module_args=git_args,
task_vars=task_vars, tmp=tmp)
if 'after' not in module_return:
return module_return
# format = <repo name>:<commit id>
project_name = git_args['repo'].split('/')[-1]
if project_name.endswith('.git'):
project_name = project_name[:-4]
cmpt_name = module_return['after']
cmpt = dci_component.create(
ctx,
name=cmpt_name,
canonical_project_name='%s %s' % (project_name, cmpt_name[:7]),
team_id=team_id,
topic_id=topic_id,
url="%s/commit/%s" % (git_args['repo'], module_return['after']),
type=project_name)
cmpt_id = None
if cmpt.status_code == 201:
cmpt_id = cmpt.json()['component']['id']
else:
_where = "name:%s,type:%s" % (cmpt_name, project_name)
res = dci_topic.list_components(ctx, topic_id, where=_where)
cmpts = res.json()['components']
if len(cmpts) > 0:
cmpt_id = cmpts[0]['id']
if cmpt_id is None:
raise ansible_errors.AnsibleError('component %s not found or not created' % cmpt_name) # noqa
cmpt = dci_job.add_component(
ctx,
job_id=job_id,
component_id=cmpt_id)
if cmpt.status_code == 409:
module_return['message_action_plugin_git'] = cmpt.text
elif cmpt.status_code != 201:
raise ansible_errors.AnsibleError('error while attaching component %s to job %s: %s' % (cmpt_id, job_id, cmpt.text)) # noqa
return module_return
| {
"repo_name": "redhat-cip/dci-ansible",
"path": "action_plugins/git.py",
"copies": "1",
"size": "3928",
"license": "apache-2.0",
"hash": 1967395421067684000,
"line_mean": 37.5098039216,
"line_max": 136,
"alpha_frac": 0.5784114053,
"autogenerated": false,
"ratio": 3.6744621141253506,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9752873519425351,
"avg_score": 0,
"num_lines": 102
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.playbook.task_include import TaskInclude
from ansible.plugins.callback import CallbackBase
from ansible.utils.color import colorize, hostcolor
from ansible import constants as C
from pprint import pprint
class CallbackModule(CallbackBase):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'quietable'
def __init__(self):
self._play = None
self._last_task_banner = None
self._last_task_name = None
self._task_type_cache = {}
self._last_play_printed = False
self._last_task_printed = False
super(CallbackModule, self).__init__()
def v2_playbook_on_play_start(self, play):
self._play = play
self._print_play_banner(play)
def v2_playbook_on_task_start(self, task, is_conditional):
self._task_start(task, prefix='Task')
def v2_playbook_on_cleanup_task_start(self, task):
self._task_start(task, prefix='Cleanup')
def v2_playbook_on_handler_task_start(self, task):
self._task_start(task, prefix='Running Handler')
def v2_runner_on_failed(self, result, ignore_errors=False):
delegated_vars = result._result.get('_ansible_delegated_vars', None)
self._clean_results(result._result, result._task.action)
if not self._last_play_printed:
self._print_play_banner(self._play, force_print=True)
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task, force_print=True)
self._handle_exception(result._result, use_stderr=True)
self._handle_warnings(result._result)
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
if delegated_vars:
self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'],
self._dump_results(result._result)),
color=C.COLOR_ERROR, stderr=True)
else:
self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)),
color=C.COLOR_ERROR, stderr=True)
if ignore_errors:
self._display.display("...ignoring", color=C.COLOR_SKIP)
def v2_runner_on_ok(self, result):
delegated_vars = result._result.get('_ansible_delegated_vars', None)
if isinstance(result._task, TaskInclude) or 'silent' in result._task.tags:
return
if result._result.get('changed', False):
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
if delegated_vars:
msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
else:
msg = "changed: [%s]" % result._host.get_name()
color = C.COLOR_CHANGED
else:
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
if delegated_vars:
msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
else:
msg = "ok: [%s]" % result._host.get_name()
color = C.COLOR_OK
# Assertion passed, extra metadata is just noise
if result._task.action == 'assert':
result._result = {}
self._handle_warnings(result._result)
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
self._clean_results(result._result, result._task.action)
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result:
msg += " => %s" % (self._dump_results(result._result),)
self._display.display(msg, color=color)
def v2_runner_on_skipped(self, result):
if isinstance(result._task, TaskInclude) or 'silent' in result._task.tags:
return
self._clean_results(result._result, result._task.action)
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
msg = "skipping: [%s]" % result._host.get_name()
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result:
msg += " => %s" % self._dump_results(result._result)
self._display.display(msg, color=C.COLOR_SKIP)
def v2_runner_on_unreachable(self, result):
if not self._last_play_printed:
self._print_play_banner(self._play, force_print=True)
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task, force_print=True)
delegated_vars = result._result.get('_ansible_delegated_vars', None)
if delegated_vars:
self._display.display("fatal: [%s -> %s]: UNREACHABLE! => %s" % (result._host.get_name(), delegated_vars['ansible_host'],
self._dump_results(result._result)),
color=C.COLOR_UNREACHABLE)
else:
self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_UNREACHABLE)
def v2_runner_item_on_ok(self, result):
delegated_vars = result._result.get('_ansible_delegated_vars', None)
self._clean_results(result._result, result._task.action)
if isinstance(result._task, TaskInclude) or 'silent' in result._task.tags:
return
if result._result.get('changed', False):
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
msg = 'changed'
color = C.COLOR_CHANGED
else:
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
msg = 'ok'
color = C.COLOR_OK
if delegated_vars:
msg += ": [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
else:
msg += ": [%s]" % result._host.get_name()
msg += " => (item=%s)" % (self._get_item_label(result._result),)
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result:
msg += " => %s" % self._dump_results(result._result)
self._display.display(msg, color=color)
def v2_runner_item_on_skipped(self, result):
if "silent" in result._task.tags:
return
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
self._clean_results(result._result, result._task.action)
msg = "skipping: [%s] => (item=%s) " % (result._host.get_name(), self._get_item_label(result._result))
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result:
msg += " => %s" % self._dump_results(result._result)
self._display.display(msg, color=C.COLOR_SKIP)
def v2_runner_item_on_failed(self, result):
if not self._last_play_printed:
self._print_play_banner(self._play, force_print=True)
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task, force_print=True)
delegated_vars = result._result.get('_ansible_delegated_vars', None)
self._clean_results(result._result, result._task.action)
self._handle_exception(result._result)
msg = "failed: "
if delegated_vars:
msg += "[%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
else:
msg += "[%s]" % (result._host.get_name())
if not self._last_play_printed:
self._print_play_banner(self._play, force_print=True)
if not self._last_task_printed:
self._print_task_banner(result._task, force_print=True)
self._handle_warnings(result._result)
self._display.display(msg + " (item=%s) => %s" % (self._get_item_label(result._result), self._dump_results(result._result)), color=C.COLOR_ERROR)
def v2_playbook_on_stats(self, stats):
self._display.banner("Summary")
hosts = sorted(stats.processed.keys())
for h in hosts:
t = stats.summarize(h)
self._display.display(u"%s : %s %s %s %s %s" % (
hostcolor(h, t),
colorize(u'Ok', t['ok'], C.COLOR_OK),
colorize(u'Changed', t['changed'], C.COLOR_CHANGED),
colorize(u'Unreachable', t['unreachable'], C.COLOR_UNREACHABLE),
colorize(u'Failed', t['failures'], C.COLOR_ERROR),
colorize(u'Skipped', t['skipped'], C.COLOR_SKIP)),
screen_only=True
)
self._display.display(u"%s : %s %s %s %s %s" % (
hostcolor(h, t, False),
colorize(u'Ok', t['ok'], None),
colorize(u'Changed', t['changed'], None),
colorize(u'Unreachable', t['unreachable'], None),
colorize(u'Failed', t['failures'], None),
colorize(u'Skip', t['skipped'], None)),
log_only=True
)
self._display.display("", screen_only=True)
# print custom stats if required
if stats.custom and self.show_custom_stats:
self._display.banner("Custom stats: ")
# per host
# TODO: come up with 'pretty format'
for k in sorted(stats.custom.keys()):
if k == '_run':
continue
self._display.display('\t%s: %s' % (k, self._dump_results(stats.custom[k], indent=1).replace('\n', '')))
# print per run custom stats
if '_run' in stats.custom:
self._display.display("", screen_only=True)
self._display.display('\tRun: %s' % self._dump_results(stats.custom['_run'], indent=1).replace('\n', ''))
self._display.display("", screen_only=True)
def _task_start(self, task, prefix=None):
# Cache output prefix for task if provided
# This is needed to properly display 'RUNNING HANDLER' and similar
# when hiding skipped/ok task results
if prefix is not None:
self._task_type_cache[task._uuid] = prefix
# Preserve task name, as all vars may not be available for templating
# when we need it later
if self._play.strategy == 'free':
# Explicitly set to None for strategy 'free' to account for any cached
# task title from a previous non-free play
self._last_task_name = None
else:
self._last_task_name = task.get_name().strip()
self._print_task_banner(task)
def _print_play_banner(self, play, force_print=False):
name = play.get_name().strip()
if not name:
msg = u"Play"
else:
msg = u"Play [%s]" % name
self._last_play_printed = False
if 'silent' in play.tags and not force_print:
return
self._last_play_printed = True
self._display.banner(msg, color=C.COLOR_HIGHLIGHT)
def _print_task_banner(self, task, force_print=False):
# args can be specified as no_log in several places: in the task or in
# the argument spec. We can check whether the task is no_log but the
# argument spec can't be because that is only run on the target
# machine and we haven't run it thereyet at this time.
#
# So we give people a config option to affect display of the args so
# that they can secure this if they feel that their stdout is insecure
# (shoulder surfing, logging stdout straight to a file, etc).
args = ''
if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT:
args = u', '.join(u'%s=%s' % a for a in task.args.items())
args = u' %s' % args
prefix = self._task_type_cache.get(task._uuid, 'TASK')
# Use cached task name
task_name = self._last_task_name
if task_name is None:
task_name = task.get_name().strip()
self._last_task_printed = False
if isinstance(task, TaskInclude) or 'silent' in task.tags and not force_print:
return
self._last_task_printed = True
self._display.banner(u"%s [%s%s]" % (prefix, task_name, args))
if self._display.verbosity >= 2:
path = task.get_path()
if path:
self._display.display(u"task path: %s" % path, color=C.COLOR_DEBUG)
self._last_task_banner = task._uuid
| {
"repo_name": "bbatsche/Vagrant-Setup",
"path": "ansible/callback_plugins/quietable.py",
"copies": "1",
"size": "13317",
"license": "mit",
"hash": -2615911227120475600,
"line_mean": 41.6826923077,
"line_max": 159,
"alpha_frac": 0.5685214388,
"autogenerated": false,
"ratio": 3.836646499567848,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9900435774568306,
"avg_score": 0.0009464327599084296,
"num_lines": 312
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
from ansible.errors import AnsibleError, AnsibleActionFail
from ansible.module_utils._text import to_text
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp
resolved_template = self._task.args.get('template', None)
try:
# We use _find_needle to resolve the path where the template file
# is stored in ansible role
resolved_template = self._find_needle('files', resolved_template)
except AnsibleError as e:
raise AnsibleActionFail(to_text(e))
module_return = dict(changed=False)
# We copy the module and re-run it with the updated template path
module_args = self._task.args.copy()
module_args.update(
dict(
template=resolved_template
)
)
module_return = self._execute_module(
module_name='synthesio.ovh.installation_template',
module_args=module_args,
task_vars=task_vars)
result.update(module_return)
return result
| {
"repo_name": "synthesio/infra-ovh-ansible-module",
"path": "plugins/action/installation_template.py",
"copies": "1",
"size": "1336",
"license": "mit",
"hash": -5615148556057649000,
"line_mean": 32.4,
"line_max": 77,
"alpha_frac": 0.625748503,
"autogenerated": false,
"ratio": 4.227848101265823,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5353596604265823,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.callback.default import CallbackModule as DefaultCallbackModule # noqa
from datetime import datetime # noqa
class CallbackModule(DefaultCallbackModule):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'vanko'
def __init__(self):
super(CallbackModule, self).__init__()
display_orig = self._display.display
def display(msg, *args, **kwargs):
msg = msg.strip()
if msg.endswith('***'):
stamp = str(datetime.now().replace(microsecond=0))
if self._display.verbosity < 1:
stamp = stamp.split()[1] # omit date part
msg = '[%s] %s' % (stamp, msg)
if msg:
display_orig(msg, *args, **kwargs)
self._display.display = display
@property
def _is_verbose(self):
return self._display.verbosity > 1
def v2_playbook_on_task_start(self, task, is_conditional):
if self._is_verbose or task.action != 'include':
super(CallbackModule, self).v2_playbook_on_task_start(task, is_conditional)
def v2_runner_on_skipped(self, result):
if self._is_verbose:
super(CallbackModule, self).v2_runner_on_skipped(result)
def v2_runner_item_on_skipped(self, result):
if self._is_verbose:
super(CallbackModule, self).v2_runner_item_on_skipped(result)
def v2_playbook_on_include(self, included_file):
if self._is_verbose:
super(CallbackModule, self).v2_playbook_on_include(included_file)
| {
"repo_name": "ivandeex/dz",
"path": "ansible/lib/plugins/callback/vanko.py",
"copies": "1",
"size": "1667",
"license": "mit",
"hash": 5134882262732226000,
"line_mean": 33.7291666667,
"line_max": 92,
"alpha_frac": 0.6142771446,
"autogenerated": false,
"ratio": 3.797266514806378,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49115436594063777,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.callback import CallbackBase
import sys
import json
class CallbackModule(CallbackBase):
def __init__(self):
super(CallbackModule, self).__init__()
def emit(self, host, category, data):
if type(data) == dict:
cmd = data['cmd'] if 'cmd' in data else None
stdout = data['stdout'] if 'stdout' in data else None
stderr = data['stderr'] if 'stderr' in data else None
reason = data['reason'] if 'reason' in data else None
if cmd:
print(hilite('[%s]\n> %s' % (category, cmd), category))
if reason:
print(hilite(reason, category))
if stdout:
print(hilite(stdout, category))
if stderr:
print(hilite(stderr, category))
def runner_on_failed(self, host, res, ignore_errors=False):
self.emit(host, 'FAILED', res)
def runner_on_ok(self, host, res):
pass
def runner_on_skipped(self, host, item=None):
self.emit(host, 'SKIPPED', '...')
def runner_on_unreachable(self, host, res):
self.emit(host, 'UNREACHABLE', res)
def runner_on_async_failed(self, host, res, jid):
self.emit(host, 'FAILED', res)
def hilite(msg, status):
def supportsColor():
if (sys.platform != 'win32' or 'ANSICON' in os.environ) and sys.stdout.isatty():
return True
else:
return False
if supportsColor():
attr = []
if status == 'FAILED':
attr.append('31') # red
else:
attr.append('1') # bold
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), msg)
else:
return msg
| {
"repo_name": "nwspeete-ibm/openwhisk",
"path": "ansible/callbacks/logformatter.py",
"copies": "1",
"size": "1795",
"license": "apache-2.0",
"hash": -776392806133759100,
"line_mean": 28.9166666667,
"line_max": 88,
"alpha_frac": 0.5587743733,
"autogenerated": false,
"ratio": 3.7552301255230125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48140044988230124,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.connection.ssh import Connection as SSHConnection
DOCUMENTATION = '''
connection: packer
short_description: ssh based connections for powershell via packer
description:
- This connection plugin allows ansible to communicate to the target packer machines via ssh based connections for powershell.
author: Packer Community
version_added: na
options:
host:
description: Hostname/ip to connect to.
default: inventory_hostname
vars:
- name: ansible_host
- name: ansible_ssh_host
host_key_checking:
#constant: HOST_KEY_CHECKING
description: Determines if ssh should check host keys
type: boolean
ini:
- section: defaults
key: 'host_key_checking'
env:
- name: ANSIBLE_HOST_KEY_CHECKING
password:
description: Authentication password for the C(remote_user). Can be supplied as CLI option.
vars:
- name: ansible_password
- name: ansible_ssh_pass
ssh_args:
description: Arguments to pass to all ssh cli tools
default: '-C -o ControlMaster=auto -o ControlPersist=60s'
ini:
- section: 'ssh_connection'
key: 'ssh_args'
env:
- name: ANSIBLE_SSH_ARGS
ssh_common_args:
description: Common extra args for all ssh CLI tools
vars:
- name: ansible_ssh_common_args
ssh_executable:
default: ssh
description:
- This defines the location of the ssh binary. It defaults to `ssh` which will use the first ssh binary available in $PATH.
- This option is usually not required, it might be useful when access to system ssh is restricted,
or when using ssh wrappers to connect to remote hosts.
env: [{name: ANSIBLE_SSH_EXECUTABLE}]
ini:
- {key: ssh_executable, section: ssh_connection}
yaml: {key: ssh_connection.ssh_executable}
#const: ANSIBLE_SSH_EXECUTABLE
version_added: "2.2"
scp_extra_args:
description: Extra exclusive to the 'scp' CLI
vars:
- name: ansible_scp_extra_args
sftp_extra_args:
description: Extra exclusive to the 'sftp' CLI
vars:
- name: ansible_sftp_extra_args
ssh_extra_args:
description: Extra exclusive to the 'ssh' CLI
vars:
- name: ansible_ssh_extra_args
retries:
# constant: ANSIBLE_SSH_RETRIES
description: Number of attempts to connect.
default: 3
type: integer
env:
- name: ANSIBLE_SSH_RETRIES
ini:
- section: connection
key: retries
- section: ssh_connection
key: retries
port:
description: Remote port to connect to.
type: int
default: 22
ini:
- section: defaults
key: remote_port
env:
- name: ANSIBLE_REMOTE_PORT
vars:
- name: ansible_port
- name: ansible_ssh_port
remote_user:
description:
- User name with which to login to the remote server, normally set by the remote_user keyword.
- If no user is supplied, Ansible will let the ssh client binary choose the user as it normally
ini:
- section: defaults
key: remote_user
env:
- name: ANSIBLE_REMOTE_USER
vars:
- name: ansible_user
- name: ansible_ssh_user
pipelining:
default: ANSIBLE_PIPELINING
description:
- Pipelining reduces the number of SSH operations required to execute a module on the remote server,
by executing many Ansible modules without actual file transfer.
- This can result in a very significant performance improvement when enabled.
- However this conflicts with privilege escalation (become).
For example, when using sudo operations you must first disable 'requiretty' in the sudoers file for the target hosts,
which is why this feature is disabled by default.
env:
- name: ANSIBLE_PIPELINING
#- name: ANSIBLE_SSH_PIPELINING
ini:
- section: defaults
key: pipelining
#- section: ssh_connection
# key: pipelining
type: boolean
vars:
- name: ansible_pipelining
- name: ansible_ssh_pipelining
private_key_file:
description:
- Path to private key file to use for authentication
ini:
- section: defaults
key: private_key_file
env:
- name: ANSIBLE_PRIVATE_KEY_FILE
vars:
- name: ansible_private_key_file
- name: ansible_ssh_private_key_file
control_path:
default: null
description:
- This is the location to save ssh's ControlPath sockets, it uses ssh's variable substitution.
- Since 2.3, if null, ansible will generate a unique hash. Use `%(directory)s` to indicate where to use the control dir path setting.
env:
- name: ANSIBLE_SSH_CONTROL_PATH
ini:
- key: control_path
section: ssh_connection
control_path_dir:
default: ~/.ansible/cp
description:
- This sets the directory to use for ssh control path if the control path setting is null.
- Also, provides the `%(directory)s` variable for the control path setting.
env:
- name: ANSIBLE_SSH_CONTROL_PATH_DIR
ini:
- section: ssh_connection
key: control_path_dir
sftp_batch_mode:
default: True
description: 'TODO: write it'
env: [{name: ANSIBLE_SFTP_BATCH_MODE}]
ini:
- {key: sftp_batch_mode, section: ssh_connection}
type: boolean
scp_if_ssh:
default: smart
description:
- "Prefered method to use when transfering files over ssh"
- When set to smart, Ansible will try them until one succeeds or they all fail
- If set to True, it will force 'scp', if False it will use 'sftp'
env: [{name: ANSIBLE_SCP_IF_SSH}]
ini:
- {key: scp_if_ssh, section: ssh_connection}
'''
class Connection(SSHConnection):
''' ssh based connections for powershell via packer'''
transport = 'packer'
has_pipelining = True
become_methods = []
allow_executable = False
module_implementation_preferences = ('.ps1', '')
def __init__(self, *args, **kwargs):
super(Connection, self).__init__(*args, **kwargs) | {
"repo_name": "KohlsTechnology/packer",
"path": "examples/ansible/connection-plugin/2.4.x/packer.py",
"copies": "12",
"size": "7041",
"license": "mpl-2.0",
"hash": 7257020303845075000,
"line_mean": 36.8602150538,
"line_max": 143,
"alpha_frac": 0.5766226388,
"autogenerated": false,
"ratio": 4.592954990215264,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from . tower_module import TowerModule
from ansible.module_utils.basic import missing_required_lib
try:
from awxkit.api.client import Connection
from awxkit.api.pages.api import ApiV2
from awxkit.api import get_registered_page
HAS_AWX_KIT = True
except ImportError:
HAS_AWX_KIT = False
class TowerAWXKitModule(TowerModule):
connection = None
apiV2Ref = None
def __init__(self, argument_spec, **kwargs):
kwargs['supports_check_mode'] = False
super(TowerAWXKitModule, self).__init__(argument_spec=argument_spec, **kwargs)
# Die if we don't have AWX_KIT installed
if not HAS_AWX_KIT:
self.exit_json(msg=missing_required_lib('awxkit'))
# Establish our conneciton object
self.connection = Connection(self.host, verify=self.verify_ssl)
def authenticate(self):
try:
if self.oauth_token:
self.connection.login(None, None, token=self.oauth_token)
self.authenticated = True
elif self.username:
self.connection.login(username=self.username, password=self.password)
self.authenticated = True
except Exception:
self.exit_json("Failed to authenticate")
def get_api_v2_object(self):
if not self.apiV2Ref:
if not self.authenticated:
self.authenticate()
v2_index = get_registered_page('/api/v2/')(self.connection).get()
self.api_ref = ApiV2(connection=self.connection, **{'json': v2_index})
return self.api_ref
def logout(self):
if self.authenticated:
self.connection.logout()
| {
"repo_name": "GoogleCloudPlatform/sap-deployment-automation",
"path": "third_party/github.com/ansible/awx/awx_collection/plugins/module_utils/tower_awxkit.py",
"copies": "1",
"size": "1765",
"license": "apache-2.0",
"hash": -7916917330926044000,
"line_mean": 32.3018867925,
"line_max": 86,
"alpha_frac": 0.6362606232,
"autogenerated": false,
"ratio": 3.828633405639913,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4964894028839913,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from . tower_module import TowerModule
from ansible.module_utils.urls import Request, SSLValidationError, ConnectionError
from ansible.module_utils.six import PY2
from ansible.module_utils.six.moves.urllib.parse import urlencode
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.module_utils.six.moves.http_cookiejar import CookieJar
import re
from json import loads, dumps
class TowerAPIModule(TowerModule):
# TODO: Move the collection version check into tower_module.py
# This gets set by the make process so whatever is in here is irrelevant
_COLLECTION_VERSION = "0.0.1-devel"
_COLLECTION_TYPE = "awx"
# This maps the collections type (awx/tower) to the values returned by the API
# Those values can be found in awx/api/generics.py line 204
collection_to_version = {
'awx': 'AWX',
'tower': 'Red Hat Ansible Tower',
}
session = None
cookie_jar = CookieJar()
def __init__(self, argument_spec, direct_params=None, error_callback=None, warn_callback=None, **kwargs):
kwargs['supports_check_mode'] = True
super(TowerAPIModule, self).__init__(argument_spec=argument_spec, direct_params=direct_params,
error_callback=error_callback, warn_callback=warn_callback, **kwargs)
self.session = Request(cookies=CookieJar(), validate_certs=self.verify_ssl)
@staticmethod
def param_to_endpoint(name):
exceptions = {
'inventory': 'inventories',
'target_team': 'teams',
'workflow': 'workflow_job_templates'
}
return exceptions.get(name, '{0}s'.format(name))
def head_endpoint(self, endpoint, *args, **kwargs):
return self.make_request('HEAD', endpoint, **kwargs)
def get_endpoint(self, endpoint, *args, **kwargs):
return self.make_request('GET', endpoint, **kwargs)
def patch_endpoint(self, endpoint, *args, **kwargs):
# Handle check mode
if self.check_mode:
self.json_output['changed'] = True
self.exit_json(**self.json_output)
return self.make_request('PATCH', endpoint, **kwargs)
def post_endpoint(self, endpoint, *args, **kwargs):
# Handle check mode
if self.check_mode:
self.json_output['changed'] = True
self.exit_json(**self.json_output)
return self.make_request('POST', endpoint, **kwargs)
def delete_endpoint(self, endpoint, *args, **kwargs):
# Handle check mode
if self.check_mode:
self.json_output['changed'] = True
self.exit_json(**self.json_output)
return self.make_request('DELETE', endpoint, **kwargs)
def get_all_endpoint(self, endpoint, *args, **kwargs):
response = self.get_endpoint(endpoint, *args, **kwargs)
if 'next' not in response['json']:
raise RuntimeError('Expected list from API at {0}, got: {1}'.format(endpoint, response))
next_page = response['json']['next']
if response['json']['count'] > 10000:
self.fail_json(msg='The number of items being queried for is higher than 10,000.')
while next_page is not None:
next_response = self.get_endpoint(next_page)
response['json']['results'] = response['json']['results'] + next_response['json']['results']
next_page = next_response['json']['next']
response['json']['next'] = next_page
return response
def get_one(self, endpoint, *args, **kwargs):
response = self.get_endpoint(endpoint, *args, **kwargs)
if response['status_code'] != 200:
fail_msg = "Got a {0} response when trying to get one from {1}".format(response['status_code'], endpoint)
if 'detail' in response.get('json', {}):
fail_msg += ', detail: {0}'.format(response['json']['detail'])
self.fail_json(msg=fail_msg)
if 'count' not in response['json'] or 'results' not in response['json']:
self.fail_json(msg="The endpoint did not provide count and results")
if response['json']['count'] == 0:
return None
elif response['json']['count'] > 1:
self.fail_json(msg="An unexpected number of items was returned from the API ({0})".format(response['json']['count']))
return response['json']['results'][0]
def resolve_name_to_id(self, endpoint, name_or_id):
# Try to resolve the object by name
name_field = 'name'
if endpoint == 'users':
name_field = 'username'
response = self.get_endpoint(endpoint, **{'data': {name_field: name_or_id}})
if response['status_code'] == 400:
self.fail_json(msg="Unable to try and resolve {0} for {1} : {2}".format(endpoint, name_or_id, response['json']['detail']))
if response['json']['count'] == 1:
return response['json']['results'][0]['id']
elif response['json']['count'] == 0:
try:
int(name_or_id)
# If we got 0 items by name, maybe they gave us an ID, let's try looking it up by ID
response = self.head_endpoint("{0}/{1}".format(endpoint, name_or_id), **{'return_none_on_404': True})
if response is not None:
return name_or_id
except ValueError:
# If we got a value error than we didn't have an integer so we can just pass and fall down to the fail
pass
self.fail_json(msg="The {0} {1} was not found on the Tower server".format(endpoint, name_or_id))
else:
self.fail_json(msg="Found too many names {0} at endpoint {1} try using an ID instead of a name".format(name_or_id, endpoint))
def make_request(self, method, endpoint, *args, **kwargs):
# In case someone is calling us directly; make sure we were given a method, let's not just assume a GET
if not method:
raise Exception("The HTTP method must be defined")
# Make sure we start with /api/vX
if not endpoint.startswith("/"):
endpoint = "/{0}".format(endpoint)
if not endpoint.startswith("/api/"):
endpoint = "/api/v2{0}".format(endpoint)
if not endpoint.endswith('/') and '?' not in endpoint:
endpoint = "{0}/".format(endpoint)
# Extract the headers, this will be used in a couple of places
headers = kwargs.get('headers', {})
# Authenticate to Tower (if we don't have a token and if not already done so)
if not self.oauth_token and not self.authenticated:
# This method will set a cookie in the cookie jar for us and also an oauth_token
self.authenticate(**kwargs)
if self.oauth_token:
# If we have a oauth token, we just use a bearer header
headers['Authorization'] = 'Bearer {0}'.format(self.oauth_token)
# Update the URL path with the endpoint
self.url = self.url._replace(path=endpoint)
if method in ['POST', 'PUT', 'PATCH']:
headers.setdefault('Content-Type', 'application/json')
kwargs['headers'] = headers
elif kwargs.get('data'):
self.url = self.url._replace(query=urlencode(kwargs.get('data')))
data = None # Important, if content type is not JSON, this should not be dict type
if headers.get('Content-Type', '') == 'application/json':
data = dumps(kwargs.get('data', {}))
try:
response = self.session.open(method, self.url.geturl(), headers=headers, validate_certs=self.verify_ssl, follow_redirects=True, data=data)
except(SSLValidationError) as ssl_err:
self.fail_json(msg="Could not establish a secure connection to your host ({1}): {0}.".format(self.url.netloc, ssl_err))
except(ConnectionError) as con_err:
self.fail_json(msg="There was a network error of some kind trying to connect to your host ({1}): {0}.".format(self.url.netloc, con_err))
except(HTTPError) as he:
# Sanity check: Did the server send back some kind of internal error?
if he.code >= 500:
self.fail_json(msg='The host sent back a server error ({1}): {0}. Please check the logs and try again later'.format(self.url.path, he))
# Sanity check: Did we fail to authenticate properly? If so, fail out now; this is always a failure.
elif he.code == 401:
self.fail_json(msg='Invalid Tower authentication credentials for {0} (HTTP 401).'.format(self.url.path))
# Sanity check: Did we get a forbidden response, which means that the user isn't allowed to do this? Report that.
elif he.code == 403:
self.fail_json(msg="You don't have permission to {1} to {0} (HTTP 403).".format(self.url.path, method))
# Sanity check: Did we get a 404 response?
# Requests with primary keys will return a 404 if there is no response, and we want to consistently trap these.
elif he.code == 404:
if kwargs.get('return_none_on_404', False):
return None
self.fail_json(msg='The requested object could not be found at {0}.'.format(self.url.path))
# Sanity check: Did we get a 405 response?
# A 405 means we used a method that isn't allowed. Usually this is a bad request, but it requires special treatment because the
# API sends it as a logic error in a few situations (e.g. trying to cancel a job that isn't running).
elif he.code == 405:
self.fail_json(msg="The Tower server says you can't make a request with the {0} method to this endpoing {1}".format(method, self.url.path))
# Sanity check: Did we get some other kind of error? If so, write an appropriate error message.
elif he.code >= 400:
# We are going to return a 400 so the module can decide what to do with it
page_data = he.read()
try:
return {'status_code': he.code, 'json': loads(page_data)}
# JSONDecodeError only available on Python 3.5+
except ValueError:
return {'status_code': he.code, 'text': page_data}
elif he.code == 204 and method == 'DELETE':
# A 204 is a normal response for a delete function
pass
else:
self.fail_json(msg="Unexpected return code when calling {0}: {1}".format(self.url.geturl(), he))
except(Exception) as e:
self.fail_json(msg="There was an unknown error when trying to connect to {2}: {0} {1}".format(type(e).__name__, e, self.url.geturl()))
finally:
self.url = self.url._replace(query=None)
if not self.version_checked:
# In PY2 we get back an HTTPResponse object but PY2 is returning an addinfourl
# First try to get the headers in PY3 format and then drop down to PY2.
try:
tower_type = response.getheader('X-API-Product-Name', None)
tower_version = response.getheader('X-API-Product-Version', None)
except Exception:
tower_type = response.info().getheader('X-API-Product-Name', None)
tower_version = response.info().getheader('X-API-Product-Version', None)
if self._COLLECTION_TYPE not in self.collection_to_version or self.collection_to_version[self._COLLECTION_TYPE] != tower_type:
self.warn("You are using the {0} version of this collection but connecting to {1}".format(
self._COLLECTION_TYPE, tower_type
))
elif self._COLLECTION_VERSION != tower_version:
self.warn("You are running collection version {0} but connecting to tower version {1}".format(
self._COLLECTION_VERSION, tower_version
))
self.version_checked = True
response_body = ''
try:
response_body = response.read()
except(Exception) as e:
self.fail_json(msg="Failed to read response body: {0}".format(e))
response_json = {}
if response_body and response_body != '':
try:
response_json = loads(response_body)
except(Exception) as e:
self.fail_json(msg="Failed to parse the response json: {0}".format(e))
if PY2:
status_code = response.getcode()
else:
status_code = response.status
return {'status_code': status_code, 'json': response_json}
def authenticate(self, **kwargs):
if self.username and self.password:
# Attempt to get a token from /api/v2/tokens/ by giving it our username/password combo
# If we have a username and password, we need to get a session cookie
login_data = {
"description": "Ansible Tower Module Token",
"application": None,
"scope": "write",
}
# Post to the tokens endpoint with baisc auth to try and get a token
api_token_url = (self.url._replace(path='/api/v2/tokens/')).geturl()
try:
response = self.session.open(
'POST', api_token_url,
validate_certs=self.verify_ssl, follow_redirects=True,
force_basic_auth=True, url_username=self.username, url_password=self.password,
data=dumps(login_data), headers={'Content-Type': 'application/json'}
)
except HTTPError as he:
try:
resp = he.read()
except Exception as e:
resp = 'unknown {0}'.format(e)
self.fail_json(msg='Failed to get token: {0}'.format(he), response=resp)
except(Exception) as e:
# Sanity check: Did the server send back some kind of internal error?
self.fail_json(msg='Failed to get token: {0}'.format(e))
token_response = None
try:
token_response = response.read()
response_json = loads(token_response)
self.oauth_token_id = response_json['id']
self.oauth_token = response_json['token']
except(Exception) as e:
self.fail_json(msg="Failed to extract token information from login response: {0}".format(e), **{'response': token_response})
# If we have neither of these, then we can try un-authenticated access
self.authenticated = True
def delete_if_needed(self, existing_item, on_delete=None):
# This will exit from the module on its own.
# If the method successfully deletes an item and on_delete param is defined,
# the on_delete parameter will be called as a method pasing in this object and the json from the response
# This will return one of two things:
# 1. None if the existing_item is not defined (so no delete needs to happen)
# 2. The response from Tower from calling the delete on the endpont. It's up to you to process the response and exit from the module
# Note: common error codes from the Tower API can cause the module to fail
if existing_item:
# If we have an item, we can try to delete it
try:
item_url = existing_item['url']
item_type = existing_item['type']
item_id = existing_item['id']
except KeyError as ke:
self.fail_json(msg="Unable to process delete of item due to missing data {0}".format(ke))
if 'name' in existing_item:
item_name = existing_item['name']
elif 'username' in existing_item:
item_name = existing_item['username']
elif 'identifier' in existing_item:
item_name = existing_item['identifier']
elif item_type == 'o_auth2_access_token':
# An oauth2 token has no name, instead we will use its id for any of the messages
item_name = existing_item['id']
elif item_type == 'credential_input_source':
# An credential_input_source has no name, instead we will use its id for any of the messages
item_name = existing_item['id']
else:
self.fail_json(msg="Unable to process delete of {0} due to missing name".format(item_type))
response = self.delete_endpoint(item_url)
if response['status_code'] in [202, 204]:
if on_delete:
on_delete(self, response['json'])
self.json_output['changed'] = True
self.json_output['id'] = item_id
self.exit_json(**self.json_output)
else:
if 'json' in response and '__all__' in response['json']:
self.fail_json(msg="Unable to delete {0} {1}: {2}".format(item_type, item_name, response['json']['__all__'][0]))
elif 'json' in response:
# This is from a project delete (if there is an active job against it)
if 'error' in response['json']:
self.fail_json(msg="Unable to delete {0} {1}: {2}".format(item_type, item_name, response['json']['error']))
else:
self.fail_json(msg="Unable to delete {0} {1}: {2}".format(item_type, item_name, response['json']))
else:
self.fail_json(msg="Unable to delete {0} {1}: {2}".format(item_type, item_name, response['status_code']))
else:
self.exit_json(**self.json_output)
def modify_associations(self, association_endpoint, new_association_list):
# if we got None instead of [] we are not modifying the association_list
if new_association_list is None:
return
# First get the existing associations
response = self.get_all_endpoint(association_endpoint)
existing_associated_ids = [association['id'] for association in response['json']['results']]
# Disassociate anything that is in existing_associated_ids but not in new_association_list
ids_to_remove = list(set(existing_associated_ids) - set(new_association_list))
for an_id in ids_to_remove:
response = self.post_endpoint(association_endpoint, **{'data': {'id': int(an_id), 'disassociate': True}})
if response['status_code'] == 204:
self.json_output['changed'] = True
else:
self.fail_json(msg="Failed to disassociate item {0}".format(response['json']['detail']))
# Associate anything that is in new_association_list but not in `association`
for an_id in list(set(new_association_list) - set(existing_associated_ids)):
response = self.post_endpoint(association_endpoint, **{'data': {'id': int(an_id)}})
if response['status_code'] == 204:
self.json_output['changed'] = True
else:
self.fail_json(msg="Failed to associate item {0}".format(response['json']['detail']))
def create_if_needed(self, existing_item, new_item, endpoint, on_create=None, item_type='unknown', associations=None):
# This will exit from the module on its own
# If the method successfully creates an item and on_create param is defined,
# the on_create parameter will be called as a method pasing in this object and the json from the response
# This will return one of two things:
# 1. None if the existing_item is already defined (so no create needs to happen)
# 2. The response from Tower from calling the patch on the endpont. It's up to you to process the response and exit from the module
# Note: common error codes from the Tower API can cause the module to fail
if not endpoint:
self.fail_json(msg="Unable to create new {0} due to missing endpoint".format(item_type))
item_url = None
if existing_item:
try:
item_url = existing_item['url']
except KeyError as ke:
self.fail_json(msg="Unable to process create of item due to missing data {0}".format(ke))
else:
# If we don't have an exisitng_item, we can try to create it
# We have to rely on item_type being passed in since we don't have an existing item that declares its type
# We will pull the item_name out from the new_item, if it exists
for key in ('name', 'username', 'identifier', 'hostname'):
if key in new_item:
item_name = new_item[key]
break
else:
item_name = 'unknown'
response = self.post_endpoint(endpoint, **{'data': new_item})
if response['status_code'] == 201:
self.json_output['name'] = 'unknown'
for key in ('name', 'username', 'identifier', 'hostname'):
if key in response['json']:
self.json_output['name'] = response['json'][key]
self.json_output['id'] = response['json']['id']
self.json_output['changed'] = True
item_url = response['json']['url']
else:
if 'json' in response and '__all__' in response['json']:
self.fail_json(msg="Unable to create {0} {1}: {2}".format(item_type, item_name, response['json']['__all__'][0]))
elif 'json' in response:
self.fail_json(msg="Unable to create {0} {1}: {2}".format(item_type, item_name, response['json']))
else:
self.fail_json(msg="Unable to create {0} {1}: {2}".format(item_type, item_name, response['status_code']))
# Process any associations with this item
if associations is not None:
for association_type in associations:
sub_endpoint = '{0}{1}/'.format(item_url, association_type)
self.modify_associations(sub_endpoint, associations[association_type])
# If we have an on_create method and we actually changed something we can call on_create
if on_create is not None and self.json_output['changed']:
on_create(self, response['json'])
else:
self.exit_json(**self.json_output)
def _encrypted_changed_warning(self, field, old, warning=False):
if not warning:
return
self.warn(
'The field {0} of {1} {2} has encrypted data and may inaccurately report task is changed.'.format(
field, old.get('type', 'unknown'), old.get('id', 'unknown')
))
@staticmethod
def has_encrypted_values(obj):
"""Returns True if JSON-like python content in obj has $encrypted$
anywhere in the data as a value
"""
if isinstance(obj, dict):
for val in obj.values():
if TowerAPIModule.has_encrypted_values(val):
return True
elif isinstance(obj, list):
for val in obj:
if TowerAPIModule.has_encrypted_values(val):
return True
elif obj == TowerAPIModule.ENCRYPTED_STRING:
return True
return False
def objects_could_be_different(self, old, new, field_set=None, warning=False):
if field_set is None:
field_set = set(fd for fd in new.keys() if fd not in ('modified', 'related', 'summary_fields'))
for field in field_set:
new_field = new.get(field, None)
old_field = old.get(field, None)
if old_field != new_field:
return True # Something doesn't match
elif self.has_encrypted_values(new_field) or field not in new:
# case of 'field not in new' - user password write-only field that API will not display
self._encrypted_changed_warning(field, old, warning=warning)
return True
return False
def update_if_needed(self, existing_item, new_item, on_update=None, associations=None):
# This will exit from the module on its own
# If the method successfully updates an item and on_update param is defined,
# the on_update parameter will be called as a method pasing in this object and the json from the response
# This will return one of two things:
# 1. None if the existing_item does not need to be updated
# 2. The response from Tower from patching to the endpoint. It's up to you to process the response and exit from the module.
# Note: common error codes from the Tower API can cause the module to fail
response = None
if existing_item:
# If we have an item, we can see if it needs an update
try:
item_url = existing_item['url']
item_type = existing_item['type']
if item_type == 'user':
item_name = existing_item['username']
elif item_type == 'workflow_job_template_node':
item_name = existing_item['identifier']
elif item_type == 'credential_input_source':
item_name = existing_item['id']
else:
item_name = existing_item['name']
item_id = existing_item['id']
except KeyError as ke:
self.fail_json(msg="Unable to process update of item due to missing data {0}".format(ke))
# Check to see if anything within the item requires the item to be updated
needs_patch = self.objects_could_be_different(existing_item, new_item)
# If we decided the item needs to be updated, update it
self.json_output['id'] = item_id
if needs_patch:
response = self.patch_endpoint(item_url, **{'data': new_item})
if response['status_code'] == 200:
# compare apples-to-apples, old API data to new API data
# but do so considering the fields given in parameters
self.json_output['changed'] = self.objects_could_be_different(
existing_item, response['json'], field_set=new_item.keys(), warning=True)
elif 'json' in response and '__all__' in response['json']:
self.fail_json(msg=response['json']['__all__'])
else:
self.fail_json(**{'msg': "Unable to update {0} {1}, see response".format(item_type, item_name), 'response': response})
else:
raise RuntimeError('update_if_needed called incorrectly without existing_item')
# Process any associations with this item
if associations is not None:
for association_type, id_list in associations.items():
endpoint = '{0}{1}/'.format(item_url, association_type)
self.modify_associations(endpoint, id_list)
# If we change something and have an on_change call it
if on_update is not None and self.json_output['changed']:
if response is None:
last_data = existing_item
else:
last_data = response['json']
on_update(self, last_data)
else:
self.exit_json(**self.json_output)
def create_or_update_if_needed(self, existing_item, new_item, endpoint=None, item_type='unknown', on_create=None, on_update=None, associations=None):
if existing_item:
return self.update_if_needed(existing_item, new_item, on_update=on_update, associations=associations)
else:
return self.create_if_needed(existing_item, new_item, endpoint, on_create=on_create, item_type=item_type, associations=associations)
def logout(self):
if self.authenticated:
# Attempt to delete our current token from /api/v2/tokens/
# Post to the tokens endpoint with baisc auth to try and get a token
api_token_url = (
self.url._replace(
path='/api/v2/tokens/{0}/'.format(self.oauth_token_id),
query=None # in error cases, fail_json exists before exception handling
)
).geturl()
try:
self.session.open(
'DELETE',
api_token_url,
validate_certs=self.verify_ssl,
follow_redirects=True,
force_basic_auth=True,
url_username=self.username,
url_password=self.password
)
self.oauth_token_id = None
self.authenticated = False
except HTTPError as he:
try:
resp = he.read()
except Exception as e:
resp = 'unknown {0}'.format(e)
self.warn('Failed to release tower token: {0}, response: {1}'.format(he, resp))
except(Exception) as e:
# Sanity check: Did the server send back some kind of internal error?
self.warn('Failed to release tower token {0}: {1}'.format(self.oauth_token_id, e))
def is_job_done(self, job_status):
if job_status in ['new', 'pending', 'waiting', 'running']:
return False
else:
return True
| {
"repo_name": "GoogleCloudPlatform/sap-deployment-automation",
"path": "third_party/github.com/ansible/awx/awx_collection/plugins/module_utils/tower_api.py",
"copies": "1",
"size": "29847",
"license": "apache-2.0",
"hash": 2681223581730491400,
"line_mean": 50.1955403087,
"line_max": 155,
"alpha_frac": 0.5819345328,
"autogenerated": false,
"ratio": 4.225226500566252,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5307161033366251,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import jinja2
import os
from ansible.plugins.action import ActionBase
from ansible.utils.hashing import checksum_s
from ansible.utils.unicode import to_bytes
class ActionModule(ActionBase):
TRANSFERS_FILES = True
def create_aeriscloud_directory(self, tmp, task_vars):
module_args = {
'path': '/etc/aeriscloud.d',
'state': 'directory',
'mode': 0755,
'owner': 'root',
'group': 'root'
}
return self._execute_module(module_name='file',
module_args=module_args,
tmp=tmp,
task_vars=task_vars)
def get_checksum(self, dest, all_vars, try_directory=False, source=None, tmp=None):
try:
dest_stat = self._execute_remote_stat(dest, all_vars=all_vars, follow=False, tmp=tmp)
if dest_stat['exists'] and dest_stat['isdir'] and try_directory and source:
base = os.path.basename(source)
dest = os.path.join(dest, base)
dest_stat = self._execute_remote_stat(dest, all_vars=all_vars, follow=False, tmp=tmp)
except Exception as e:
return dict(failed=True, msg=to_bytes(e))
return dest_stat['checksum']
def run(self, tmp=None, task_vars=None):
''' handler for template operations '''
if task_vars is None:
task_vars = dict()
res = self.create_aeriscloud_directory(tmp, task_vars)
if 'failed' in res:
return res
result = super(ActionModule, self).run(tmp, task_vars)
name = self._task.args.get('name', None)
services = self._task.args.get('services', None)
data = {
'services': []
}
for service in services:
if 'when' in service and not self._task.evaluate_conditional(service['when'], task_vars):
continue
if 'path' in service and 'protocol' not in service:
service['protocol'] = 'http'
if 'path' not in service:
service['path'] = ''
if 'protocol' not in service:
service['protocol'] = 'tcp'
data['services'].append(service)
template = jinja2.Template("""{%- for service in services -%}
{{ service['name'] }},{{ service['port'] }},{{ service['path'] }},{{ service['protocol'] }}
{% endfor %}""")
resultant = template.render(data)
# Expand any user home dir specification
dest = self._remote_expand_user(os.path.join('/etc/aeriscloud.d', name))
directory_prepended = True
remote_user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user
if not tmp:
tmp = self._make_tmp_path(remote_user)
self._cleanup_remote_tmp = True
local_checksum = checksum_s(resultant)
remote_checksum = self.get_checksum(dest, task_vars, not directory_prepended, source=dest, tmp=tmp)
if isinstance(remote_checksum, dict):
# Error from remote_checksum is a dict. Valid return is a str
result.update(remote_checksum)
return result
diff = {}
new_module_args = {
'mode': 0644,
'owner': 'root',
'group': 'root'
}
if (remote_checksum == '1') or (local_checksum != remote_checksum):
result['changed'] = True
# if showing diffs, we need to get the remote value
if self._play_context.diff:
diff = self._get_diff_data(dest, resultant, task_vars, source_file=False)
if not self._play_context.check_mode: # do actual work through copy
xfered = self._transfer_data(self._connection._shell.join_path(tmp, 'source'), resultant)
# fix file permissions when the copy is done as a different user
self._fixup_perms2([xfered], remote_user)
# run the copy module
new_module_args.update(
dict(
src=xfered,
dest=dest,
original_basename=os.path.basename(dest),
follow=True,
),
)
result.update(self._execute_module(
module_name='copy',
module_args=new_module_args,
task_vars=task_vars,
tmp=tmp,
delete_remote_tmp=False))
if result.get('changed', False) and self._play_context.diff:
result['diff'] = diff
else:
# when running the file module based on the template data, we do
# not want the source filename (the name of the template) to be used,
# since this would mess up links, so we clear the src param and tell
# the module to follow links. When doing that, we have to set
# original_basename to the template just in case the dest is
# a directory.
new_module_args.update(
dict(
path=dest,
src=None,
original_basename=os.path.basename(dest),
follow=True,
),
)
result.update(self._execute_module(
module_name='file',
module_args=new_module_args,
task_vars=task_vars,
tmp=tmp,
delete_remote_tmp=False))
self._remove_tmp_path(tmp)
return result
| {
"repo_name": "AerisCloud/AerisCloud",
"path": "ansible/plugins/actions/aeriscloud_service.py",
"copies": "1",
"size": "5738",
"license": "mit",
"hash": -3862469200234132000,
"line_mean": 36.0193548387,
"line_max": 107,
"alpha_frac": 0.5332868595,
"autogenerated": false,
"ratio": 4.272524199553239,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5305811059053238,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import os
from ansible.module_utils.six.moves import shlex_quote
from ansible.parsing.dataloader import DataLoader
from ansible.plugins.action import ActionBase
def filter_by_channel(channel_list, data):
result = {}
channels = set([e['name'] for e in channel_list])
for k in data.keys():
if k in channels.keys():
result[k] = data[k]
return result
def resolve_variable_spec(data, spec):
if data and spec.startswith('@') and spec.endswith('@'):
for element in spec.strip('@').split('.'):
data = data.get(element, None)
if not data:
break
if data is None:
raise ValueError("unresolved reference: {}".format(spec.strip("@")))
return data
return spec
# must be named ActionModule or it won't be seen by Ansible
class ActionModule(ActionBase):
TRANSFERS_FILES = True
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
results = super(ActionModule, self).run(tmp, task_vars)
actor_name = self._task.args['name']
actor_repository = self._templar.template('{{ actor_repository }}')
is_local = self._play_context.connection == 'local'
task_vars['actor_remote_repo_path'] = actor_repository
loader = DataLoader()
loader.set_basedir(actor_repository)
results.setdefault('ansible_facts', {}).setdefault('actor_inputs', self._task.args.setdefault('inputs', {}))
results['ansible_facts']['actor_outputs'] = {}
return self._perform(results, actor_name, loader, is_local)
def _perform(self, results, actor_name, loader, is_local):
actor = loader.load_from_file(os.path.join(actor_name, '_actor.yaml'), unsafe=True)
if 'group' in actor:
return self._perform_group(results, actor, loader, is_local)
if not is_local:
sync_result = self._repo_sync(actor_name)
if sync_result['failed']:
return sync_result
results['ansible_facts']['actor_inputs'].update(results['ansible_facts']['actor_outputs'])
inputs = filter_by_channel(actor.get('inputs', ()), results['ansible_facts']['actor_inputs'])
executable = actor['execute']['executable']
params = [str(resolve_variable_spec(inputs, a)) for a in actor['execute'].get('arguments', ())]
if 'script-file' in actor['execute']:
params.insert(0, actor['execute']['script-file'])
command_result = self._low_level_execute_command(
' '.join(shlex_quote(element) for element in [executable] + params),
in_data=json.dumps(inputs),
chdir=self._templar.template("'{{actor_remote_repo_path}}/%s'" % actor_name))
try:
outputs = json.loads(command_result['stdout'])
results['ansible_facts']['actor_outputs'].update(outputs)
except ValueError:
pass
results.setdefault('actor_results', []).append(command_result)
results['failed'] = command_result['rc'] != 0
return results
def _perform_group(self, results, group_data, loader, is_local):
for actor in group_data['group']:
results = self._perform(results, actor, loader, is_local)
if results['failed']:
# Stop on first error - As snactor does it
break
return results
def _repo_sync(self, actor):
args = {'src': self._templar.template("'{{actor_repository}}/'"),
'dest': self._templar.template("'{{actor_remote_repo_path}}'"),
'copy_links': 'no',
'delete': 'yes',
'recursive': 'yes',
'links': 'yes'}
return self._execute_module(module_name='synchronize', module_args=args, task_vars=self._task.args)
| {
"repo_name": "leapp-to/snactor",
"path": "ansible/action_plugins/run-actor.py",
"copies": "1",
"size": "3964",
"license": "apache-2.0",
"hash": 1255737379563213300,
"line_mean": 37.4854368932,
"line_max": 116,
"alpha_frac": 0.6006559031,
"autogenerated": false,
"ratio": 4.028455284552845,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5129111187652845,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ..compat import unittest
from ..compat.mock import patch
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
def set_module_args(args):
if '_ansible_remote_tmp' not in args:
args['_ansible_remote_tmp'] = '/tmp'
if '_ansible_keep_remote_files' not in args:
args['_ansible_keep_remote_files'] = False
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args)
class AnsibleExitJson(Exception):
pass
class AnsibleFailJson(Exception):
pass
def exit_json(*args, **kwargs):
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs):
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
class ModuleTestCase(unittest.TestCase):
def setUp(self):
self.mock_module = patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
self.mock_module.start()
self.mock_sleep = patch('time.sleep')
self.mock_sleep.start()
set_module_args({})
self.addCleanup(self.mock_module.stop)
self.addCleanup(self.mock_sleep.stop)
| {
"repo_name": "F5Networks/f5-ansible-modules",
"path": "ansible_collections/f5networks/f5_modules/tests/unit/modules/utils.py",
"copies": "2",
"size": "1284",
"license": "mit",
"hash": -5574582937127222000,
"line_mean": 24.68,
"line_max": 104,
"alpha_frac": 0.6760124611,
"autogenerated": false,
"ratio": 3.6271186440677967,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5303131105167797,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import logging
import qb.logging
# Find a Display if possible
try:
from __main__ import display
except ImportError:
try:
from ansible.utils.display import Display
except ImportError:
display = None
else:
display = Display()
class DisplayHandler(logging.Handler):
'''
A handler class that writes messages to Ansible's
`ansible.utils.display.Display`, which then writes them to the user output.
Includes static methods that let it act as a sort of a singleton, with
a single instance created on-demand.
'''
# Singleton instance
_instance = None
@staticmethod
def getDisplay():
'''
Get the display instance, if we were able to import or create one.
:rtype: None
:return: No display could be found or created.
:rtype: ansible.util.display.Display
:return: The display we're using.
'''
return display
# .getDisplay
@staticmethod
def getInstance():
'''
:rtype: DisplayHandler
:return: The singleton instance.
'''
if DisplayHandler._instance is None:
DisplayHandler._instance = DisplayHandler()
return DisplayHandler._instance
# .getInstance
@staticmethod
def enable():
'''
Enable logging to Ansible's display by sending {.getInstance()} to
{qb.logging.addHandler()}.
:raises:
'''
instance = DisplayHandler.getInstance()
if instance.display is None:
raise RuntimeError("No display available")
return qb.logging.addHandler(instance)
# .enable
def disable():
'''
Disable logging to Ansible's display be sending {.getInstance()} to
{qb.logging.removeHandler()}.
'''
return qb.logging.removeHandler(DisplayHandler.getInstance())
# .disable
def is_enabled():
return qb.logging.hasHandler(DisplayHandler.getInstance())
# .is_enabled
def __init__(self, display=None):
logging.Handler.__init__(self)
if display is None:
display = DisplayHandler.getDisplay()
self.display = display
# #__init__
def emit(self, record):
'''
Overridden to send log records to Ansible's display.
'''
if self.display is None:
# Nothing we can do, drop it
return
try:
self.format(record)
if record.levelname == 'DEBUG':
return self.display.verbose(record.message, caplevel=1)
elif record.levelname == 'INFO':
return self.display.verbose(record.message, caplevel=0)
elif record.levelname == 'WARNING':
self.display.warning(record.message)
elif record.levelname == 'ERROR':
self.display.error(record.message)
elif record.levelname == 'CRITICAL':
self.display.error("(CRITICAL) {}".format(record.message))
else:
pass
except (KeyboardInterrupt, SystemExit):
raise
except:
raise
# self.handleError(record)
# #emit | {
"repo_name": "nrser/qb",
"path": "lib/python/qb/ansible/display_handler.py",
"copies": "1",
"size": "3352",
"license": "mit",
"hash": 1445246446580276700,
"line_mean": 23.837037037,
"line_max": 79,
"alpha_frac": 0.5799522673,
"autogenerated": false,
"ratio": 4.802292263610315,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006980845773243435,
"num_lines": 135
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import logging
import threading
import json
import weakref
import qb.ipc.stdio
# Current handlers
_handlers = []
def hasHandler(handler):
return handler in _handlers
# hasHandler
def addHandler(handler):
'''
Add a handler if it's not already added.
:rtype: Boolean
:return: `True` if it was added (not already there), `False` if already
present.
'''
if not handler in _handlers:
_handlers.append(handler)
return True
else:
return False
# addHandler
def removeHandler(handler):
'''
Remove a handler.
:rtype: Boolean
:return: `True` if it was removed, `False` if wasn't there to begin
with.
'''
if handler in _handlers:
_handlers.remove(handler)
return True
else:
return False
# removeHandler
def getLogger(name, level=logging.DEBUG):
logger = logging.getLogger(name)
if level is not None:
logger.setLevel(level)
for handler in _handlers:
logger.addHandler(handler)
return Adapter(logger, {})
class Adapter(logging.LoggerAdapter):
'''
Adapter to allow Ruby's Semantic Logger (basis of NRSER::Log) style logging,
which is then easy to translate when sending logs up to the QB master
process via IPC.
Usage:
logger.info(
"Message with payload {value} interpolations",
payload = dict(
value = "interpolated into message",
mote = "values",
# ...
)
)
'''
def process(self, msg, kwds):
payload = None
if 'payload' in kwds:
payload = kwds['payload']
del kwds['payload']
if payload:
try:
msg = msg.format(**payload)
except:
pass
if 'extra' not in kwds:
kwds['extra'] = {}
kwds['extra']['payload'] = payload
return msg, kwds
| {
"repo_name": "nrser/qb",
"path": "lib/python/qb/logging.py",
"copies": "1",
"size": "2138",
"license": "mit",
"hash": 2261420357799702000,
"line_mean": 21.0412371134,
"line_max": 80,
"alpha_frac": 0.5537885875,
"autogenerated": false,
"ratio": 4.435684647302905,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006541536610265133,
"num_lines": 97
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import logging
import threading
import json
import qb.logging
import qb.ipc.stdio
# FIXME After adding central logging stuff
def getLogger(name, level=logging.DEBUG, io_client=qb.ipc.stdio.client):
logger = logging.getLogger(name)
if level is not None:
logger.setLevel(level)
logger.addHandler(Handler(io_client=io_client))
return Adapter(logger, {})
class Handler(logging.Handler):
"""
A handler class which writes logging records to the QB master process
via it's `QB::IPC::STDIO` system, if available.
If QB's STDIO system is not available, discards the logs.
Based on the Python stdlib's `SocketHandler`, though it ended up retaining
almost nothing from it since it just proxies to
:class:`qb.ipc.stdio.Client`, which does all the socket dirty-work.
.. note:
This class **does not** connect the :class:`qb.ipc.stdio.Client`
instance (which defaults to the 'global' :attr:`qb.ipc.stdio.client`
instance - and that's what you should use unless you're testing or
doing something weird).
You need to connect the client somewhere else (before or after creating
loggers is fine).
"""
def __init__(self, io_client=qb.ipc.stdio.client):
"""
Initializes the handler with a :class:`qb.ipc.stdio.Client`, which
default to the 'global' one at :attr:`qb.ipc.stdio.client`. This should
be fine for everything except testing.
See note in class doc about connecting the client.
:param io_client: :class:`qb.ipc.stdio.Client`
"""
logging.Handler.__init__(self)
self.io_client = io_client
def send(self, string):
"""
Send a string to the :attr:`io_client`.
"""
if not self.io_client.log.connected:
return
self.io_client.log.println(string)
def get_sem_log_level(self, level):
"""
Trade Python log level string for a Ruby SemnaticLogger one.
"""
if level == 'DEBUG' or level == 'INFO' or level == 'ERROR':
return level.lower()
elif level == 'WARNING':
return 'warn'
elif level == 'CRITICAL':
return 'fatal'
else:
return 'info'
def emit(self, record):
"""
Emit a record.
Doc coppied in (TOOD re-write):
Pickles the record and writes it to the socket in binary format.
If there is an error with the socket, silently drop the packet.
If there was a problem with the socket, re-establishes the
socket.
record: https://docs.python.org/2/library/logging.html#logrecord-attributes
"""
try:
self.format(record)
struct = dict(
level = self.get_sem_log_level(record.levelname),
name = record.name,
pid = record.process,
# thread = threading.current_thread().name,
thread = record.threadName,
message = record.message,
# timestamp = record.asctime,
)
# The `logging` stdlib module allows you to add extra values
# by providing a `extra` key to the `Logger#debug` call (and
# friends), which it just adds to the the keys and values to the
# `record` object's `#__dict__` (where they better not conflict
# with anything else or you'll be in trouble I guess).
#
# We look for a `payload` key in there.
#
# Example logging with a payload:
#
# logger.debug("My message", extras=dict(payload=dict(x=1)))
#
# Yeah, it sucks... TODO extend Logger or something to make it a
# little easier to use?
#
if 'payload' in record.__dict__:
struct['payload'] = record.__dict__['payload']
string = json.dumps(struct)
self.send(string)
except (KeyboardInterrupt, SystemExit):
raise
except:
raise
# self.handleError(record)
| {
"repo_name": "nrser/qb",
"path": "lib/python/qb/ipc/stdio/logging.py",
"copies": "1",
"size": "4444",
"license": "mit",
"hash": -4451231271454303000,
"line_mean": 31.9185185185,
"line_max": 83,
"alpha_frac": 0.5607560756,
"autogenerated": false,
"ratio": 4.43956043956044,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5500316515160439,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import pytest
import re
import sys
from ansible.utils.collection_loader import AnsibleCollectionRef
def test_import_from_collection(monkeypatch):
collection_root = os.path.join(os.path.dirname(__file__), 'fixtures', 'collections')
collection_path = os.path.join(collection_root, 'ansible_collections/my_namespace/my_collection/plugins/module_utils/my_util.py')
# the trace we're expecting to be generated when running the code below:
# answer = question()
expected_trace_log = [
(collection_path, 5, 'call'),
(collection_path, 6, 'line'),
(collection_path, 6, 'return'),
]
# define the collection root before any ansible code has been loaded
# otherwise config will have already been loaded and changing the environment will have no effect
monkeypatch.setenv('ANSIBLE_COLLECTIONS_PATHS', collection_root)
from ansible.utils.collection_loader import AnsibleCollectionLoader
# zap the singleton collection loader instance if it exists
AnsibleCollectionLoader._Singleton__instance = None
for index in [idx for idx, obj in enumerate(sys.meta_path) if isinstance(obj, AnsibleCollectionLoader)]:
# replace any existing collection loaders that may exist
# since these were loaded during unit test collection
# they will not have the correct configuration
sys.meta_path[index] = AnsibleCollectionLoader()
# make sure the collection loader is installed
# this will be a no-op if the collection loader is already installed
# which will depend on whether or not any tests being run imported ansible.plugins.loader during unit test collection
from ansible.plugins.loader import _configure_collection_loader
_configure_collection_loader() # currently redundant, the import above already calls this
from ansible_collections.my_namespace.my_collection.plugins.module_utils.my_util import question
original_trace_function = sys.gettrace()
trace_log = []
if original_trace_function:
# enable tracing while preserving the existing trace function (coverage)
def my_trace_function(frame, event, arg):
trace_log.append((frame.f_code.co_filename, frame.f_lineno, event))
# the original trace function expects to have itself set as the trace function
sys.settrace(original_trace_function)
# call the original trace function
original_trace_function(frame, event, arg)
# restore our trace function
sys.settrace(my_trace_function)
return my_trace_function
else:
# no existing trace function, so our trace function is much simpler
def my_trace_function(frame, event, arg):
trace_log.append((frame.f_code.co_filename, frame.f_lineno, event))
return my_trace_function
sys.settrace(my_trace_function)
try:
# run a minimal amount of code while the trace is running
# adding more code here, including use of a context manager, will add more to our trace
answer = question()
finally:
sys.settrace(original_trace_function)
# make sure 'import ... as ...' works on builtin synthetic collections
# the following import is not supported (it tries to find module_utils in ansible.plugins)
# import ansible_collections.ansible.builtin.plugins.module_utils as c1
import ansible_collections.ansible.builtin.plugins.action as c2
import ansible_collections.ansible.builtin.plugins as c3
import ansible_collections.ansible.builtin as c4
import ansible_collections.ansible as c5
import ansible_collections as c6
# make sure 'import ...' works on builtin synthetic collections
import ansible_collections.ansible.builtin.plugins.module_utils
import ansible_collections.ansible.builtin.plugins.action
assert ansible_collections.ansible.builtin.plugins.action == c3.action == c2
import ansible_collections.ansible.builtin.plugins
assert ansible_collections.ansible.builtin.plugins == c4.plugins == c3
import ansible_collections.ansible.builtin
assert ansible_collections.ansible.builtin == c5.builtin == c4
import ansible_collections.ansible
assert ansible_collections.ansible == c6.ansible == c5
import ansible_collections
assert ansible_collections == c6
# make sure 'from ... import ...' works on builtin synthetic collections
from ansible_collections.ansible import builtin
from ansible_collections.ansible.builtin import plugins
assert builtin.plugins == plugins
from ansible_collections.ansible.builtin.plugins import action
from ansible_collections.ansible.builtin.plugins.action import command
assert action.command == command
from ansible_collections.ansible.builtin.plugins.module_utils import basic
from ansible_collections.ansible.builtin.plugins.module_utils.basic import AnsibleModule
assert basic.AnsibleModule == AnsibleModule
# make sure relative imports work from collections code
# these require __package__ to be set correctly
import ansible_collections.my_namespace.my_collection.plugins.module_utils.my_other_util
import ansible_collections.my_namespace.my_collection.plugins.action.my_action
# verify that code loaded from a collection does not inherit __future__ statements from the collection loader
if sys.version_info[0] == 2:
# if the collection code inherits the division future feature from the collection loader this will fail
assert answer == 1
else:
assert answer == 1.5
# verify that the filename and line number reported by the trace is correct
# this makes sure that collection loading preserves file paths and line numbers
assert trace_log == expected_trace_log
@pytest.mark.parametrize(
'ref,ref_type,expected_collection,expected_subdirs,expected_resource,expected_python_pkg_name',
[
('ns.coll.myaction', 'action', 'ns.coll', '', 'myaction', 'ansible_collections.ns.coll.plugins.action'),
('ns.coll.subdir1.subdir2.myaction', 'action', 'ns.coll', 'subdir1.subdir2', 'myaction', 'ansible_collections.ns.coll.plugins.action.subdir1.subdir2'),
('ns.coll.myrole', 'role', 'ns.coll', '', 'myrole', 'ansible_collections.ns.coll.roles.myrole'),
('ns.coll.subdir1.subdir2.myrole', 'role', 'ns.coll', 'subdir1.subdir2', 'myrole', 'ansible_collections.ns.coll.roles.subdir1.subdir2.myrole'),
])
def test_fqcr_parsing_valid(ref, ref_type, expected_collection,
expected_subdirs, expected_resource, expected_python_pkg_name):
assert AnsibleCollectionRef.is_valid_fqcr(ref, ref_type)
r = AnsibleCollectionRef.from_fqcr(ref, ref_type)
assert r.collection == expected_collection
assert r.subdirs == expected_subdirs
assert r.resource == expected_resource
assert r.n_python_package_name == expected_python_pkg_name
r = AnsibleCollectionRef.try_parse_fqcr(ref, ref_type)
assert r.collection == expected_collection
assert r.subdirs == expected_subdirs
assert r.resource == expected_resource
assert r.n_python_package_name == expected_python_pkg_name
@pytest.mark.parametrize(
'ref,ref_type,expected_error_type,expected_error_expression',
[
('no_dots_at_all_action', 'action', ValueError, 'is not a valid collection reference'),
('no_nscoll.myaction', 'action', ValueError, 'is not a valid collection reference'),
('ns.coll.myaction', 'bogus', ValueError, 'invalid collection ref_type'),
])
def test_fqcr_parsing_invalid(ref, ref_type, expected_error_type, expected_error_expression):
assert not AnsibleCollectionRef.is_valid_fqcr(ref, ref_type)
with pytest.raises(expected_error_type) as curerr:
AnsibleCollectionRef.from_fqcr(ref, ref_type)
assert re.search(expected_error_expression, str(curerr.value))
r = AnsibleCollectionRef.try_parse_fqcr(ref, ref_type)
assert r is None
@pytest.mark.parametrize(
'name,subdirs,resource,ref_type,python_pkg_name',
[
('ns.coll', None, 'res', 'doc_fragments', 'ansible_collections.ns.coll.plugins.doc_fragments'),
('ns.coll', 'subdir1', 'res', 'doc_fragments', 'ansible_collections.ns.coll.plugins.doc_fragments.subdir1'),
('ns.coll', 'subdir1.subdir2', 'res', 'action', 'ansible_collections.ns.coll.plugins.action.subdir1.subdir2'),
])
def test_collectionref_components_valid(name, subdirs, resource, ref_type, python_pkg_name):
x = AnsibleCollectionRef(name, subdirs, resource, ref_type)
assert x.collection == name
if subdirs:
assert x.subdirs == subdirs
else:
assert x.subdirs == ''
assert x.resource == resource
assert x.ref_type == ref_type
assert x.n_python_package_name == python_pkg_name
@pytest.mark.parametrize(
'name,subdirs,resource,ref_type,expected_error_type,expected_error_expression',
[
('bad_ns', '', 'resource', 'action', ValueError, 'invalid collection name'),
('ns.coll.', '', 'resource', 'action', ValueError, 'invalid collection name'),
('ns.coll', 'badsubdir#', 'resource', 'action', ValueError, 'invalid subdirs entry'),
('ns.coll', 'badsubdir.', 'resource', 'action', ValueError, 'invalid subdirs entry'),
('ns.coll', '.badsubdir', 'resource', 'action', ValueError, 'invalid subdirs entry'),
('ns.coll', '', 'resource', 'bogus', ValueError, 'invalid collection ref_type'),
])
def test_collectionref_components_invalid(name, subdirs, resource, ref_type, expected_error_type, expected_error_expression):
with pytest.raises(expected_error_type) as curerr:
AnsibleCollectionRef(name, subdirs, resource, ref_type)
assert re.search(expected_error_expression, str(curerr.value))
| {
"repo_name": "thaim/ansible",
"path": "test/units/utils/test_collection_loader.py",
"copies": "27",
"size": "9917",
"license": "mit",
"hash": 6986271776616862000,
"line_mean": 45.1255813953,
"line_max": 159,
"alpha_frac": 0.710799637,
"autogenerated": false,
"ratio": 4.0676784249384745,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import requests
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
def save_playbook(self, proj_path, remediation, content):
name = remediation.get('name', None) or 'insights-remediation'
name = re.sub(r'[^\w\s-]', '', name).strip().lower()
name = re.sub(r'[-\s]+', '-', name)
fname = '{}-{}.yml'.format(name, remediation['id'])
file_path = os.path.join(proj_path, fname)
with open(file_path, 'wb') as f:
f.write(content)
def is_stale(self, proj_path, etag):
file_path = os.path.join(proj_path, '.version')
try:
with open(file_path, 'r') as f:
version = f.read()
return version != etag
except IOError:
return True
def write_version(self, proj_path, etag):
file_path = os.path.join(proj_path, '.version')
with open(file_path, 'w') as f:
f.write(etag)
def run(self, tmp=None, task_vars=None):
self._supports_check_mode = False
result = super(ActionModule, self).run(tmp, task_vars)
insights_url = self._task.args.get('insights_url', None)
username = self._task.args.get('username', None)
password = self._task.args.get('password', None)
proj_path = self._task.args.get('project_path', None)
license = self._task.args.get('awx_license_type', None)
awx_version = self._task.args.get('awx_version', None)
session = requests.Session()
session.auth = requests.auth.HTTPBasicAuth(username, password)
headers = {
'Content-Type': 'application/json',
'User-Agent': '{} {} ({})'.format(
'AWX' if license == 'open' else 'Red Hat Ansible Tower',
awx_version,
license
)
}
url = '/api/remediations/v1/remediations'
while url:
res = session.get('{}{}'.format(insights_url, url), headers=headers, timeout=120)
if res.status_code != 200:
result['failed'] = True
result['msg'] = (
'Expected {} to return a status code of 200 but returned status '
'code "{}" instead with content "{}".'.format(url, res.status_code, res.content)
)
return result
# FIXME: ETags are (maybe?) not yet supported in the new
# API, and even if they are we'll need to put some thought
# into how to deal with them in combination with pagination.
if 'ETag' in res.headers:
version = res.headers['ETag']
if version.startswith('"') and version.endswith('"'):
version = version[1:-1]
else:
version = "ETAG_NOT_FOUND"
if not self.is_stale(proj_path, version):
result['changed'] = False
result['version'] = version
return result
url = res.json()['links']['next'] # will be None if we're on the last page
for item in res.json()['data']:
playbook_url = '{}/api/remediations/v1/remediations/{}/playbook'.format(
insights_url, item['id'])
res = session.get(playbook_url, timeout=120)
if res.status_code == 204:
continue
elif res.status_code != 200:
result['failed'] = True
result['msg'] = (
'Expected {} to return a status code of 200 but returned status '
'code "{}" instead with content "{}".'.format(
playbook_url, res.status_code, res.content)
)
return result
self.save_playbook(proj_path, item, res.content)
self.write_version(proj_path, version)
result['changed'] = True
result['version'] = version
return result
| {
"repo_name": "GoogleCloudPlatform/sap-deployment-automation",
"path": "third_party/github.com/ansible/awx/awx/playbooks/action_plugins/insights.py",
"copies": "1",
"size": "4143",
"license": "apache-2.0",
"hash": 3537726054017440000,
"line_mean": 37.3611111111,
"line_max": 100,
"alpha_frac": 0.527878349,
"autogenerated": false,
"ratio": 4.114200595829196,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5142078944829196,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import requests
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
def save_playbook(self, proj_path, plan, content):
fname = '{}-{}.yml'.format(plan.get('name', None) or 'insights-plan', plan['maintenance_id'])
file_path = os.path.join(proj_path, fname)
with open(file_path, 'w') as f:
f.write(content)
def is_stale(self, proj_path, etag):
file_path = os.path.join(proj_path, '.version')
try:
f = open(file_path, 'r')
version = f.read()
f.close()
return version != etag
except IOError:
return True
def write_version(self, proj_path, etag):
file_path = os.path.join(proj_path, '.version')
with open(file_path, 'w') as f:
f.write(etag)
def run(self, tmp=None, task_vars=None):
self._supports_check_mode = False
result = super(ActionModule, self).run(tmp, task_vars)
insights_url = self._task.args.get('insights_url', None)
username = self._task.args.get('username', None)
password = self._task.args.get('password', None)
proj_path = self._task.args.get('project_path', None)
session = requests.Session()
session.auth = requests.auth.HTTPBasicAuth(username, password)
headers = {'Content-Type': 'application/json'}
url = '{}/r/insights/v3/maintenance?ansible=true'.format(insights_url)
res = session.get(url, headers=headers, timeout=120)
if res.status_code != 200:
result['failed'] = True
result['msg'] = 'Expected {} to return a status code of 200 but returned status code "{}" instead with content "{}".'.format(url, res.status_code, res.content)
return result
if 'ETag' in res.headers:
version = res.headers['ETag']
if version.startswith('"') and version.endswith('"'):
version = version[1:-1]
else:
version = "ETAG_NOT_FOUND"
if not self.is_stale(proj_path, version):
result['changed'] = False
result['version'] = version
return result
for item in res.json():
url = '{}/r/insights/v3/maintenance/{}/playbook'.format(insights_url, item['maintenance_id'])
res = session.get(url, timeout=120)
if res.status_code != 200:
result['failed'] = True
result['msg'] = 'Expected {} to return a status code of 200 but returned status code "{}" instead with content "{}".'.format(url, res.status_code, res.content)
return result
self.save_playbook(proj_path, item, res.content)
self.write_version(proj_path, version)
result['changed'] = True
result['version'] = version
return result
| {
"repo_name": "snahelou/awx",
"path": "awx/playbooks/action_plugins/insights.py",
"copies": "1",
"size": "2970",
"license": "apache-2.0",
"hash": 9211688151219706000,
"line_mean": 35.2195121951,
"line_max": 175,
"alpha_frac": 0.5814814815,
"autogenerated": false,
"ratio": 3.877284595300261,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9950117005665255,
"avg_score": 0.0017298142270009728,
"num_lines": 82
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import os
import socket
def path_env_var_name(name):
'''
Get the ENV var name whose value (if any) will be the file system path
to the UNIX socket file for that IO stream.
The names in current use:
>>> path_env_var_name('out')
'QB_STDIO_OUT'
>>> path_env_var_name('err')
'QB_STDIO_ERR'
>>> path_env_var_name('log')
'QB_STDIO_LOG'
'''
return "QB_STDIO_{}".format(name.upper())
class Connection:
'''
Port of Ruby `QB::IPC::STDIO::Client::Connection` class.
'''
def __init__(self, name, type):
self.name = name
self.type = type
self.path = None
self.socket = None
self.env_var_name = path_env_var_name(self.name)
self.connected = False
def __str__(self):
attrs = ' '.join(
"{}={}".format(name, getattr(self, name))
for name in ('name', 'type', 'path', 'connected')
)
return "<qb.ipc.stdio.Connection {}>".format(attrs)
def get_path(self):
if self.env_var_name in os.environ:
self.path = os.environ[self.env_var_name]
return self.path
def connect(self, warnings=None):
if self.connected:
raise RuntimeError("{} is already connected!".format(self))
if self.get_path() is None:
return False
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
self.socket.connect(self.path)
except socket.error, msg:
if warngings is not None:
warning = 'Failed to connect to QB STDOUT stream at {}: {}'
warning = warning.format(qb_stdout_path, msg)
warnings.append(warning)
self.socket = None
return False
self.connected = True
return True
def disconnect(self):
if not self.connected:
raise RuntimeError("{} is not connected!".format(self))
# if self.type == 'out':
# self.socket.flush()
self.socket.close()
self.socket = None
self.connected = False
def println(self, line):
if not line.endswith( u"\n" ):
line = line + u"\n"
self.socket.sendall(line.encode("utf-8"))
class Client:
def __init__(self):
# I don't think need STDIN or we want to deal with what it means here
# self.stdin = Connection(name='in', type='in')
self.stdout = Connection(name='out', type='out')
self.stderr = Connection(name='err', type='out')
self.log = Connection(name='log', type='out')
def connections(self):
return [self.stdout, self.stderr, self.log]
def connect(self, warnings=None):
for connection in self.connections():
if not connection.connected:
connection.connect(warnings)
return self
def disconnect(sefl):
for connection in self.connections():
if connection.connected:
connection.disconnect()
client = Client()
| {
"repo_name": "nrser/qb",
"path": "lib/python/qb/ipc/stdio/__init__.py",
"copies": "1",
"size": "3250",
"license": "mit",
"hash": -5281586262381581000,
"line_mean": 27.0172413793,
"line_max": 77,
"alpha_frac": 0.5473846154,
"autogenerated": false,
"ratio": 4.067584480600751,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9994223210508949,
"avg_score": 0.0241491770983604,
"num_lines": 116
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from awx.main.models import CredentialType
@pytest.mark.django_db
def test_create_custom_credential_type(run_module, admin_user, silence_deprecation):
# Example from docs
result = run_module('tower_credential_type', dict(
name='Nexus',
description='Credentials type for Nexus',
kind='cloud',
inputs={"fields": [{"id": "server", "type": "string", "default": "", "label": ""}], "required": []},
injectors={'extra_vars': {'nexus_credential': 'test'}},
state='present',
), admin_user)
assert not result.get('failed', False), result.get('msg', result)
assert result.get('changed'), result
ct = CredentialType.objects.get(name='Nexus')
assert result['name'] == 'Nexus'
assert result['id'] == ct.pk
assert ct.inputs == {"fields": [{"id": "server", "type": "string", "default": "", "label": ""}], "required": []}
assert ct.injectors == {'extra_vars': {'nexus_credential': 'test'}}
@pytest.mark.django_db
def test_changed_false_with_api_changes(run_module, admin_user):
result = run_module('tower_credential_type', dict(
name='foo',
kind='cloud',
inputs={"fields": [{"id": "env_value", "label": "foo", "default": "foo"}]},
injectors={'env': {'TEST_ENV_VAR': '{{ env_value }}'}},
), admin_user)
assert not result.get('failed', False), result.get('msg', result)
assert result.get('changed'), result
result = run_module('tower_credential_type', dict(
name='foo',
inputs={"fields": [{"id": "env_value", "label": "foo", "default": "foo"}]},
injectors={'env': {'TEST_ENV_VAR': '{{ env_value }}'}},
), admin_user)
assert not result.get('failed', False), result.get('msg', result)
assert not result.get('changed'), result
| {
"repo_name": "GoogleCloudPlatform/sap-deployment-automation",
"path": "third_party/github.com/ansible/awx/awx_collection/test/awx/test_credential_type.py",
"copies": "1",
"size": "1893",
"license": "apache-2.0",
"hash": -515940035179227600,
"line_mean": 37.6326530612,
"line_max": 116,
"alpha_frac": 0.6043317485,
"autogenerated": false,
"ratio": 3.5649717514124295,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46693034999124294,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from awx.main.models import NotificationTemplate
def compare_with_encrypted(model_config, param_config):
'''Given a model_config from the database, assure that this is consistent
with the config given in the notification_configuration parameter
this requires handling of password fields
'''
for key, model_val in model_config.items():
param_val = param_config.get(key, 'missing')
if isinstance(model_val, str) and (model_val.startswith('$encrypted$') or param_val.startswith('$encrypted$')):
assert model_val.startswith('$encrypted$') # must be saved as encrypted
assert len(model_val) > len('$encrypted$')
else:
assert model_val == param_val, 'Config key {0} did not match, (model: {1}, input: {2})'.format(
key, model_val, param_val
)
@pytest.mark.django_db
def test_create_modify_notification_template(run_module, admin_user, organization):
nt_config = {
'username': 'user',
'password': 'password',
'sender': 'foo@invalid.com',
'recipients': ['foo2@invalid.com'],
'host': 'smtp.example.com',
'port': 25,
'use_tls': False, 'use_ssl': False,
'timeout': 4
}
result = run_module('tower_notification', dict(
name='foo-notification-template',
organization=organization.name,
notification_type='email',
notification_configuration=nt_config,
), admin_user)
assert not result.get('failed', False), result.get('msg', result)
assert result.pop('changed', None), result
nt = NotificationTemplate.objects.get(id=result['id'])
compare_with_encrypted(nt.notification_configuration, nt_config)
assert nt.organization == organization
# Test no-op, this is impossible if the notification_configuration is given
# because we cannot determine if password fields changed
result = run_module('tower_notification', dict(
name='foo-notification-template',
organization=organization.name,
notification_type='email',
), admin_user)
assert not result.get('failed', False), result.get('msg', result)
assert not result.pop('changed', None), result
# Test a change in the configuration
nt_config['timeout'] = 12
result = run_module('tower_notification', dict(
name='foo-notification-template',
organization=organization.name,
notification_type='email',
notification_configuration=nt_config,
), admin_user)
assert not result.get('failed', False), result.get('msg', result)
assert result.pop('changed', None), result
nt.refresh_from_db()
compare_with_encrypted(nt.notification_configuration, nt_config)
@pytest.mark.django_db
def test_invalid_notification_configuration(run_module, admin_user, organization):
result = run_module('tower_notification', dict(
name='foo-notification-template',
organization=organization.name,
notification_type='email',
notification_configuration={},
), admin_user)
assert result.get('failed', False), result.get('msg', result)
assert 'Missing required fields for Notification Configuration' in result['msg']
@pytest.mark.django_db
def test_deprecated_to_modern_no_op(run_module, admin_user, organization):
nt_config = {
'url': 'http://www.example.com/hook',
'headers': {
'X-Custom-Header': 'value123'
}
}
result = run_module('tower_notification', dict(
name='foo-notification-template',
organization=organization.name,
notification_type='webhook',
notification_configuration=nt_config,
), admin_user)
assert not result.get('failed', False), result.get('msg', result)
assert result.pop('changed', None), result
result = run_module('tower_notification', dict(
name='foo-notification-template',
organization=organization.name,
notification_type='webhook',
notification_configuration=nt_config,
), admin_user)
assert not result.get('failed', False), result.get('msg', result)
assert not result.pop('changed', None), result
| {
"repo_name": "GoogleCloudPlatform/sap-deployment-automation",
"path": "third_party/github.com/ansible/awx/awx_collection/test/awx/test_notification.py",
"copies": "1",
"size": "4279",
"license": "apache-2.0",
"hash": 8022781793701620000,
"line_mean": 37.5495495495,
"line_max": 119,
"alpha_frac": 0.6599672821,
"autogenerated": false,
"ratio": 4.114423076923077,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5274390359023077,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.