text
stringlengths 0
1.05M
| meta
dict |
---|---|
from __future__ import absolute_import, division
from collections import defaultdict
from hashlib import md5
from lxml import etree
from sqlalchemy.exc import IntegrityError
from changes.artifacts.xml import DelegateParser
from changes.config import db, redis
from changes.lib.coverage import merge_coverage, get_coverage_stats
from changes.models.filecoverage import FileCoverage
from changes.utils.diff_parser import DiffParser
from .base import ArtifactHandler
class CoverageHandler(ArtifactHandler):
FILENAMES = ('coverage.xml', '*.coverage.xml')
def process(self, fp, artifact):
results = self.get_coverage(fp)
for result in results:
try:
with db.session.begin_nested():
db.session.add(result)
except IntegrityError:
lock_key = 'coverage:{job_id}:{file_hash}'.format(
job_id=result.job_id.hex,
file_hash=md5(result.filename).hexdigest(),
)
with redis.lock(lock_key):
result = self.merge_coverage(result)
db.session.add(result)
db.session.commit()
return results
def merge_coverage(self, new):
existing = FileCoverage.query.filter(
FileCoverage.job_id == new.job_id,
FileCoverage.filename == new.filename,
).first()
existing.data = merge_coverage(existing.data, new.data)
self.add_file_stats(existing)
return existing
def process_diff(self):
lines_by_file = defaultdict(set)
try:
source = self.step.job.build.source
except AttributeError:
return lines_by_file
diff = source.generate_diff()
if not diff:
return lines_by_file
diff_parser = DiffParser(diff)
return diff_parser.get_lines_by_file()
def get_processed_diff(self):
if not hasattr(self, '_processed_diff'):
self._processed_diff = self.process_diff()
return self._processed_diff
def add_file_stats(self, result):
diff_lines = self.get_processed_diff()[result.filename]
(result.lines_covered,
result.lines_uncovered,
result.diff_lines_covered,
result.diff_lines_uncovered) = get_coverage_stats(diff_lines, result.data)
def get_coverage(self, fp):
"""
Return a phabricator-capable coverage mapping.
>>> {
>>> 'foo.py': 'NNNUUUUUUUUUUUUCCCUUUUUCCCCCCCCCNNCNCNCCCNNNN',
>>> }
Line flags consists of a single character coverage indicator for each line in the file.
- N: no coverage available
- U: uncovered
- C: covered
"""
try:
parser = etree.XMLParser(target=CoverageParser(self))
return etree.parse(fp, parser)
except etree.XMLSyntaxError:
self.logger.warn("Failed to parse coverage", exc_info=True)
return []
class CoverageParser(DelegateParser):
"""Parses a Cobertura or Jacoco XML file into a list of FileCoverage objects."""
def __init__(self, coverage_handler):
super(CoverageParser, self).__init__()
self.coverage_handler = coverage_handler
def _start(self, tag, attrib):
# check the root tag name to determine which type of coverage file this is
if tag == 'coverage':
self._set_subparser(CoberturaCoverageParser(self.coverage_handler))
elif tag == 'report':
self._set_subparser(JacocoCoverageParser(self.coverage_handler))
else:
# the root tag is not any of the known coverage type
raise NotImplementedError('Unsupported coverage format')
def _close(self):
# because we choose a subparser after seeing the root element, the only
# way we'll get here is if the document is empty
raise etree.XMLSyntaxError("Empty file", None, 1, 1)
class CoberturaCoverageParser(object):
"""Parses a Cobertura XML file into a list of FileCoverage objects."""
def __init__(self, coverage_handler):
self.coverage_handler = coverage_handler
self.step = coverage_handler.step
self.job = coverage_handler.step.job
self.results = []
self.in_file = False
def start(self, tag, attrib):
if tag == 'class':
if 'filename' not in attrib:
self.coverage_handler.logger.warn(
'Unable to determine filename for class node with attributes: %s', attrib)
else:
self.filename = attrib['filename']
self.file_coverage = []
self.current_lineno = 0
self.in_file = True
elif tag == 'line':
if self.in_file:
number = int(attrib['number'])
hits = int(attrib['hits'])
branch = attrib.get('branch') == 'true'
# the line numbers in the file should be strictly increasing
assert self.current_lineno < number
if self.current_lineno < number - 1:
for self.current_lineno in range(self.current_lineno, number - 1):
self.file_coverage.append('N')
# count partial branch coverage as uncovered
if branch:
# condition-coverage attrib looks something like '50% (2/4)'
if 'condition-coverage' not in attrib:
# condition-coverage should always be present if branch="true". if it's
# not, log a warning and mark the line uncovered (to avoid false positives)
self.coverage_handler.logger.warn(
'Line node with branch="true" has no condition-coverage attribute. ' +
'Node attributes: %s', attrib)
self.file_coverage.append('U')
elif attrib['condition-coverage'].startswith('100%'):
self.file_coverage.append('C')
else:
self.file_coverage.append('U')
else:
if hits > 0:
self.file_coverage.append('C')
else:
self.file_coverage.append('U')
self.current_lineno = number
def end(self, tag):
if tag == 'class':
if self.in_file:
result = FileCoverage(
step_id=self.step.id,
job_id=self.job.id,
project_id=self.job.project_id,
filename=self.filename,
data=''.join(self.file_coverage),
)
self.coverage_handler.add_file_stats(result)
self.results.append(result)
self.in_file = False
def data(self, data):
pass
def close(self):
return self.results
class JacocoCoverageParser(object):
"""Parses a Jacoco XML file into a list of FileCoverage objects."""
def __init__(self, coverage_handler):
self.coverage_handler = coverage_handler
self.step = coverage_handler.step
self.job = coverage_handler.step.job
self.results = []
self.in_file = False
def start(self, tag, attrib):
if tag == 'package':
if 'name' not in attrib:
self.coverage_handler.logger.warn(
'Unable to determine name for package node with attributes: %s', attrib)
else:
self.package_path = 'src/main/java/{}'.format(attrib['name'])
elif tag == 'sourcefile':
if 'name' not in attrib:
self.coverage_handler.logger.warn(
'Unable to determine name for sourcefile node with attributes: %s', attrib)
else:
self.filename = '{}/{}'.format(self.package_path, attrib['name'])
self.file_coverage = []
self.current_lineno = 0
self.in_file = True
elif tag == 'line':
if self.in_file:
number = int(attrib['nr'])
hits = int(attrib['ci'])
# the line numbers in the file should be strictly increasing
assert self.current_lineno < number
if self.current_lineno < number - 1:
for self.current_lineno in range(self.current_lineno, number - 1):
self.file_coverage.append('N')
if hits > 0:
self.file_coverage.append('C')
else:
self.file_coverage.append('U')
self.current_lineno = number
def end(self, tag):
if tag == 'class':
if self.in_file:
result = FileCoverage(
step_id=self.step.id,
job_id=self.job.id,
project_id=self.job.project_id,
filename=self.filename,
data=''.join(self.file_coverage),
)
self.coverage_handler.add_file_stats(result)
self.results.append(result)
self.in_file = False
def data(self, data):
pass
def close(self):
return self.results
| {
"repo_name": "dropbox/changes",
"path": "changes/artifacts/coverage.py",
"copies": "1",
"size": "9431",
"license": "apache-2.0",
"hash": 7263816397557960000,
"line_mean": 35.5542635659,
"line_max": 99,
"alpha_frac": 0.5523274308,
"autogenerated": false,
"ratio": 4.51676245210728,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.556908988290728,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
from collections import defaultdict
from hashlib import md5
from lxml import etree
from sqlalchemy.exc import IntegrityError
from changes.config import db, redis
from changes.models.filecoverage import FileCoverage
from changes.utils.diff_parser import DiffParser
from .base import ArtifactHandler
class CoverageHandler(ArtifactHandler):
def process(self, fp):
results = self.get_coverage(fp)
for result in results:
try:
with db.session.begin_nested():
db.session.add(result)
except IntegrityError:
lock_key = 'coverage:{job_id}:{file_hash}'.format(
job_id=result.job_id.hex,
file_hash=md5(result.filename).hexdigest(),
)
with redis.lock(lock_key):
result = self.merge_coverage(result)
db.session.add(result)
db.session.commit()
return results
def merge_coverage(self, new):
existing = FileCoverage.query.filter(
FileCoverage.job_id == new.job_id,
FileCoverage.filename == new.filename,
).first()
cov_data = []
for lineno in range(max(len(existing.data), len(new.data))):
try:
old_cov = existing.data[lineno]
except IndexError:
pass
try:
new_cov = new.data[lineno]
except IndexError:
pass
if old_cov == 'C' or new_cov == 'C':
cov_data.append('C')
elif old_cov == 'U' or new_cov == 'U':
cov_data.append('U')
else:
cov_data.append('N')
existing.data = ''.join(cov_data)
self.add_file_stats(existing)
return existing
def process_diff(self):
lines_by_file = defaultdict(set)
try:
source = self.step.job.build.source
except AttributeError:
return lines_by_file
diff = source.generate_diff()
if not diff:
return lines_by_file
diff_parser = DiffParser(diff)
parsed_diff = diff_parser.parse()
for file_diff in parsed_diff:
for diff_chunk in file_diff['chunks']:
if not file_diff['new_filename']:
continue
lines_by_file[file_diff['new_filename'][2:]].update(
d['new_lineno'] for d in diff_chunk if d['action'] in ('add', 'del')
)
return lines_by_file
def get_processed_diff(self):
if not hasattr(self, '_processed_diff'):
self._processed_diff = self.process_diff()
return self._processed_diff
def add_file_stats(self, result):
diff_lines = self.get_processed_diff()[result.filename]
lines_covered = 0
lines_uncovered = 0
diff_lines_covered = 0
diff_lines_uncovered = 0
for lineno, code in enumerate(result.data):
# lineno is 1-based in diff
line_in_diff = bool((lineno + 1) in diff_lines)
if code == 'C':
lines_covered += 1
if line_in_diff:
diff_lines_covered += 1
elif code == 'U':
lines_uncovered += 1
if line_in_diff:
diff_lines_uncovered += 1
result.lines_covered = lines_covered
result.lines_uncovered = lines_uncovered
result.diff_lines_covered = diff_lines_covered
result.diff_lines_uncovered = diff_lines_uncovered
def get_coverage(self, fp):
"""
Return a phabricator-capable coverage mapping.
>>> {
>>> 'foo.py': 'NNNUUUUUUUUUUUUCCCUUUUUCCCCCCCCCNNCNCNCCCNNNN',
>>> }
Line flags consists of a single character coverage indicator for each line in the file.
- N: no coverage available
- U: uncovered
- C: covered
"""
step = self.step
job = self.step.job
root = etree.fromstring(fp.read())
results = []
for node in root.iter('class'):
filename = node.get('filename')
file_coverage = []
for lineset in node.iterchildren('lines'):
lineno = 0
for line in lineset.iterchildren('line'):
number, hits = int(line.get('number')), int(line.get('hits'))
if lineno < number - 1:
for lineno in range(lineno, number - 1):
file_coverage.append('N')
if hits > 0:
file_coverage.append('C')
else:
file_coverage.append('U')
lineno = number
result = FileCoverage(
step_id=step.id,
job_id=job.id,
project_id=job.project_id,
filename=filename,
data=''.join(file_coverage),
)
self.add_file_stats(result)
results.append(result)
return results
| {
"repo_name": "alex/changes",
"path": "changes/artifacts/coverage.py",
"copies": "1",
"size": "5220",
"license": "apache-2.0",
"hash": 8763046951428787000,
"line_mean": 30.0714285714,
"line_max": 95,
"alpha_frac": 0.5203065134,
"autogenerated": false,
"ratio": 4.393939393939394,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5414245907339393,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
from collections import defaultdict
import numpy as np
import pandas as pd
from six import iteritems
from bokeh.charts import DEFAULT_PALETTE
from bokeh.core.enums import DashPattern
from bokeh.models.glyphs import Rect, Segment, Line, Patches, Arc
from bokeh.models.renderers import GlyphRenderer
from bokeh.core.properties import (Float, String, Datetime, Bool, Instance,
List, Either, Int, Enum, Color, Override, Any, Angle)
from .models import CompositeGlyph
from .properties import Column, EitherColumn
from .stats import (Stat, Quantile, Sum, Min, Max, Bins, stats, Histogram,
BinnedStat)
from .data_source import ChartDataSource
from .utils import marker_types, generate_patch_base, label_from_index_dict
class NestedCompositeGlyph(CompositeGlyph):
"""A composite glyph that consists of other composite glyphs.
An important responsibility of any `CompositeGlyph` is to understand the bounds
of the glyph renderers that make it up. This class is used to provide convenient
properties that return the bounds from the child `CompositeGlyphs`.
"""
children = List(Instance(CompositeGlyph))
@property
def y_max(self):
return max([renderer.y_max for renderer in self.children])
@property
def y_min(self):
return min([renderer.y_min for renderer in self.children])
@property
def x_min(self):
return min([renderer.x_min for renderer in self.children])
@property
def x_max(self):
return max([renderer.x_max for renderer in self.children])
class XyGlyph(CompositeGlyph):
"""Composite glyph that plots in cartesian coordinates."""
x = EitherColumn(String, Column(Float), Column(String), Column(Datetime), Column(Bool))
y = EitherColumn(String, Column(Float), Column(String), Column(Datetime), Column(Bool))
def build_source(self):
labels = self._build_label_array(('x', 'y'), self.label)
str_labels = [str(label) for label in labels]
if self.x is None:
data = dict(x_values=str_labels, y_values=self.y)
elif self.y is None:
data = dict(x_values=self.x, y_values=str_labels)
else:
data = dict(x_values=self.x, y_values=self.y)
return data
def _build_label_array(self, props, value):
for prop in props:
if getattr(self, prop) is not None:
return [value] * len(getattr(self, prop))
@property
def x_max(self):
# TODO(fpliger): since CompositeGlyphs are not exposed in general we
# should expect to always have a Series but in case
# it's not we just use the default min/max instead
# of just failing. When/If we end up exposing
# CompositeGlyphs we should consider making this
# more robust (either enforcing data or checking)
try:
return self.source.data['x_values'].max()
except AttributeError:
return max(self.source.data['x_values'])
@property
def x_min(self):
try:
return self.source.data['x_values'].min()
except AttributeError:
return min(self.source.data['x_values'])
@property
def y_max(self):
try:
return self.source.data['y_values'].max()
except AttributeError:
return max(self.source.data['y_values'])
@property
def y_min(self):
try:
return self.source.data['y_values'].min()
except AttributeError:
return min(self.source.data['y_values'])
class PointGlyph(XyGlyph):
"""A set of glyphs placed in x,y coordinates with the same attributes."""
fill_color = Override(default=DEFAULT_PALETTE[1])
fill_alpha = Override(default=0.7)
marker = String(default='circle')
size = Float(default=8)
def __init__(self, x=None, y=None, color=None, line_color=None, fill_color=None,
marker=None, size=None, **kwargs):
kwargs['x'] = x
kwargs['y'] = y
if marker is not None: kwargs['marker'] = marker
if size is not None: kwargs['size'] = size
if color:
line_color = color
fill_color = color
kwargs['line_color'] = line_color
kwargs['fill_color'] = fill_color
super(PointGlyph, self).__init__(**kwargs)
self.setup()
def get_glyph(self):
return marker_types[self.marker]
def build_renderers(self):
glyph_type = self.get_glyph()
glyph = glyph_type(x='x_values', y='y_values',
line_color=self.line_color,
fill_color=self.fill_color,
size=self.size,
fill_alpha=self.fill_alpha,
line_alpha=self.line_alpha)
yield GlyphRenderer(glyph=glyph)
class LineGlyph(XyGlyph):
"""Represents a group of data as a line."""
width = Int(default=2)
dash = Enum(DashPattern, default='solid')
def __init__(self, x=None, y=None, color=None, line_color=None,
width=None, dash=None, **kwargs):
kwargs['x'] = x
kwargs['y'] = y
if color is not None and line_color is None:
line_color = color
if dash is not None:
kwargs['dash'] = dash
if width is not None:
kwargs['width'] = width
if line_color is not None:
kwargs['line_color'] = line_color
super(LineGlyph, self).__init__(**kwargs)
self.setup()
def build_source(self):
if self.x is None:
x = self.y.index
data = dict(x_values=x, y_values=self.y)
elif self.y is None:
y = self.x.index
data = dict(x_values=self.x, y_values=y)
else:
data = dict(x_values=self.x, y_values=self.y)
return data
def build_renderers(self):
"""Yield a `GlyphRenderer` for the group of data."""
glyph = Line(x='x_values', y='y_values',
line_color=self.line_color,
line_alpha=self.line_alpha,
line_width=self.width,
line_dash=self.dash)
yield GlyphRenderer(glyph=glyph)
class AreaGlyph(LineGlyph):
# ToDo: should these be added to composite glyph?
stack = Bool(default=False)
dodge = Bool(default=False)
base = Float(default=0.0, help="""Lower bound of area.""")
def __init__(self, **kwargs):
line_color = kwargs.get('line_color', None)
fill_color = kwargs.get('fill_color', None)
color = kwargs.get('color', None)
if color is not None:
# apply color to line and fill
kwargs['fill_color'] = color
kwargs['line_color'] = color
elif line_color is not None and fill_color is None:
# apply line color to fill color by default
kwargs['fill_color'] = line_color
super(AreaGlyph, self).__init__(**kwargs)
self.setup()
def build_source(self):
data = super(AreaGlyph, self).build_source()
x0, y0 = generate_patch_base(pd.Series(list(data['x_values'])),
pd.Series(list(data['y_values'])))
data['x_values'] = [x0]
data['y_values'] = [y0]
return data
def build_renderers(self):
# parse all series. We exclude the first attr as it's the x values
# added for the index
glyph = Patches(
xs='x_values', ys='y_values',
fill_alpha=self.fill_alpha, fill_color=self.fill_color,
line_color=self.line_color
)
renderer = GlyphRenderer(data_source=self.source, glyph=glyph)
yield renderer
def __stack__(self, glyphs):
# ToDo: need to handle case of non-aligned indices, see pandas concat
# ToDo: need to address how to aggregate on an index when required
# build a list of series
areas = []
for glyph in glyphs:
areas.append(pd.Series(glyph.source.data['y_values'][0],
index=glyph.source.data['x_values'][0]))
# concat the list of indexed y values into dataframe
df = pd.concat(areas, axis=1)
# calculate stacked values along the rows
stacked_df = df.cumsum(axis=1)
# lower bounds of each area series are diff between stacked and orig values
lower_bounds = stacked_df - df
# reverse the df so the patch is drawn in correct order
lower_bounds = lower_bounds.iloc[::-1]
# concat the upper and lower bounds together
stacked_df = pd.concat([stacked_df, lower_bounds])
# update the data in the glyphs
for i, glyph in enumerate(glyphs):
glyph.source.data['x_values'] = [stacked_df.index.values]
glyph.source.data['y_values'] = [stacked_df.ix[:, i].values]
def get_nested_extent(self, col, func):
return [getattr(arr, func)() for arr in self.source.data[col]]
@property
def x_max(self):
return max(self.get_nested_extent('x_values', 'max'))
@property
def x_min(self):
return min(self.get_nested_extent('x_values', 'min'))
@property
def y_max(self):
return max(self.get_nested_extent('y_values', 'max'))
@property
def y_min(self):
return min(self.get_nested_extent('y_values', 'min'))
class HorizonGlyph(AreaGlyph):
num_folds = Int(default=3, help="""The count of times the data is overlapped.""")
series = Int(default=0, help="""The id of the series as the order it will appear,
starting from 0.""")
series_count = Int()
fold_height = Float(help="""The height of one fold.""")
bins = List(Float, help="""The binedges calculated from the number of folds,
and the maximum value of the entire source data.""")
graph_ratio = Float(help="""Scales heights of each series based on number of folds
and the number of total series being plotted.
""")
pos_color = Color("#006400", help="""The color used for positive values.""")
neg_color = Color("#6495ed", help="""The color used for negative values.""")
flip_neg = Bool(default=True, help="""When True, the negative values will be
plotted as their absolute value, then their individual axes is flipped. If False,
then the negative values will still be taken as their absolute value, but the base
of their shape will start from the same origin as the positive values.
""")
def __init__(self, bins=None, **kwargs):
# fill alpha depends on how many folds will be layered
kwargs['fill_alpha'] = 1.0/kwargs['num_folds']
if bins is not None:
kwargs['bins'] = bins
# each series is shifted up to a synthetic y-axis
kwargs['base'] = kwargs['series'] * max(bins) / kwargs['series_count']
kwargs['graph_ratio'] = float(kwargs['num_folds'])/float(kwargs['series_count'])
super(HorizonGlyph, self).__init__(**kwargs)
def build_source(self):
data = {}
# Build columns for the positive values
pos_y = self.y.copy()
pos_y[pos_y < 0] = 0
xs, ys = self._build_dims(self.x, pos_y)
# list of positive colors and alphas
colors = [self.pos_color] * len(ys)
alphas = [(bin_idx * self.fill_alpha) for bin_idx in
range(0, len(self.bins))]
# If we have negative values at all, add the values for those as well
if self.y.min() < 0:
neg_y = self.y.copy()
neg_y[neg_y > 0] = 0
neg_y = abs(neg_y)
neg_xs, neg_ys = self._build_dims(self.x, neg_y, self.flip_neg)
xs += neg_xs
ys += neg_ys
colors += ([self.neg_color] * len(neg_ys))
alphas += alphas
# create clipped representation of each band
data['x_values'] = xs
data['y_values'] = ys
data['fill_color'] = colors
data['fill_alpha'] = colors
data['line_color'] = colors
return data
def _build_dims(self, x, y, flip=False):
""" Creates values needed to plot each fold of the horizon glyph.
Bins the data based on the binning passed into the glyph, then copies and clips
the values for each bin.
Args:
x (`pandas.Series`): array of x values
y (`pandas.Series`): array of y values
flip (bool): whether to flip values, used when handling negative values
Returns:
tuple(list(`numpy.ndarray`), list(`numpy.ndarray`)): returns a list of
arrays for the x values and list of arrays for the y values. The data
has been folded and transformed so the patches glyph presents the data
in a way that looks like an area chart.
"""
# assign bins to each y value
bin_idx = pd.cut(y, bins=self.bins, labels=False, include_lowest=True)
xs, ys = [], []
for idx, bin in enumerate(self.bins[0:-1]):
# subtract off values associated with lower bins, to get into this bin
temp_vals = y.copy() - (idx * self.fold_height)
# clip the values between the fold range and zero
temp_vals[bin_idx > idx] = self.fold_height * self.graph_ratio
temp_vals[bin_idx < idx] = 0
temp_vals[bin_idx == idx] = self.graph_ratio * temp_vals[bin_idx == idx]
# if flipping, we must start the values from the top of each fold's range
if flip:
temp_vals = (self.fold_height * self.graph_ratio) - temp_vals
base = self.base + (self.fold_height * self.graph_ratio)
else:
base = self.base
# shift values up based on index of series
temp_vals += self.base
val_idx = temp_vals > 0
if pd.Series.any(val_idx):
ys.append(temp_vals)
xs.append(x)
# transform clipped data so it always starts and ends at its base value
if len(ys) > 0:
xs, ys = map(list, zip(*[generate_patch_base(x, y, base=base) for
x, y in zip(xs, ys)]))
return xs, ys
def build_renderers(self):
# parse all series. We exclude the first attr as it's the x values
# added for the index
glyph = Patches(
xs='x_values', ys='y_values',
fill_alpha=self.fill_alpha, fill_color='fill_color',
line_color='line_color'
)
renderer = GlyphRenderer(data_source=self.source, glyph=glyph)
yield renderer
class StepGlyph(LineGlyph):
"""Represents a group of data as a stepped line."""
def build_source(self):
x = self.x
y = self.y
if self.x is None:
x = self.y.index
elif self.y is None:
y = self.x.index
dtype = x.dtype if hasattr(x, 'dtype') else np.int
xs = np.empty(2*len(x)-1, dtype=dtype)
xs[::2] = x[:]
xs[1::2] = x[1:]
dtype = y.dtype if hasattr(y, 'dtype') else np.float64
ys = np.empty(2*len(y)-1, dtype=dtype)
ys[::2] = y[:]
ys[1::2] = y[:-1]
data = dict(x_values=xs, y_values=ys)
return data
class AggregateGlyph(NestedCompositeGlyph):
"""A base composite glyph for aggregating an array.
Implements default stacking and dodging behavior that other composite
glyphs can inherit.
"""
x_label = String()
x_label_value = Any()
stack_label = String()
stack_shift = Float(default=0.0)
dodge_label = String(help="""Where on the scale the glyph should be placed.""")
dodge_shift = Float(default=None)
agg = Instance(Stat, default=Sum())
span = Float(help="""The range of values represented by the aggregate.""")
def __init__(self, x_label=None, **kwargs):
label = kwargs.get('label')
if x_label is not None:
kwargs['x_label_value'] = x_label
if not isinstance(x_label, str):
x_label = str(x_label)
kwargs['x_label'] = x_label
elif label is not None:
kwargs['x_label'] = str(label)
super(AggregateGlyph, self).__init__(**kwargs)
def get_dodge_label(self, shift=0.0):
"""Generate the label defining an offset in relation to a position on a scale."""
if self.dodge_shift is None:
shift_str = ':' + str(0.5 + shift)
elif self.dodge_shift is not None:
shift_str = ':' + str(self.dodge_shift + shift)
else:
shift_str = ''
return str(label_from_index_dict(self.x_label)) + shift_str
def filter_glyphs(self, glyphs):
"""Return only the glyphs that are of the same class."""
return [glyph for glyph in glyphs if isinstance(glyph, self.__class__)]
@staticmethod
def groupby(glyphs, prop):
"""Returns a dict of `CompositeGlyph`s, grouped by unique values of prop.
For example, if all glyphs had a value of 'a' or 'b' for glyph.prop, the dict
would contain two keys, 'a' and 'b', where each value is a list of the glyphs
that had each of the values.
"""
grouped = defaultdict(list)
labels = [getattr(glyph, prop) for glyph in glyphs]
labels = [tuple(label.values()) if isinstance(label, dict) else label for label
in labels]
[grouped[label].append(glyph) for label, glyph in zip(labels, glyphs)]
labels = pd.Series(labels).drop_duplicates().values
return labels, grouped
def __stack__(self, glyphs):
"""Apply relative shifts to the composite glyphs for stacking."""
filtered_glyphs = self.filter_glyphs(glyphs)
labels, grouped = self.groupby(filtered_glyphs, 'x_label')
for label in labels:
group = grouped[label]
# separate the negative and positive aggregates into separate groups
neg_group = [glyph for glyph in group if glyph.span < 0]
pos_group = [glyph for glyph in group if glyph.span >= 0]
# apply stacking to each group separately
for group in [neg_group, pos_group]:
shift = []
for i, glyph in enumerate(group):
# save off the top of each rect's height
shift.append(glyph.span)
if i > 0:
glyph.stack_shift = sum(shift[0:i])
glyph.refresh()
def __dodge__(self, glyphs):
"""Apply relative shifts to the composite glyphs for dodging."""
if self.dodge_label is not None:
filtered_glyphs = self.filter_glyphs(glyphs)
labels, grouped = self.groupby(filtered_glyphs, 'dodge_label')
# calculate transformations
step = np.linspace(0, 1.0, len(grouped.keys()) + 1, endpoint=False)
width = min(0.2, (1. / len(grouped.keys())) ** 1.1)
# set bar attributes and re-aggregate
for i, label in enumerate(labels):
group = grouped[label]
for glyph in group:
glyph.dodge_shift = step[i + 1]
glyph.width = width
glyph.refresh()
class Interval(AggregateGlyph):
"""A rectangle representing aggregated values.
The interval is a rect glyph where two of the parallel sides represent a
summary of values. Each of the two sides is derived from a separate aggregation of
the values provided to the interval.
.. note::
A bar is a special case interval where one side is pinned and used to
communicate a value relative to it.
"""
width = Float(default=0.8)
start_agg = Either(Instance(Stat), Enum(*list(stats.keys())), default=Min(), help="""
The stat used to derive the starting point of the composite glyph.""")
end_agg = Either(Instance(Stat), Enum(*list(stats.keys())), default=Max(), help="""
The stat used to derive the end point of the composite glyph.""")
start = Float(default=0.0)
end = Float()
def __init__(self, label, values, **kwargs):
kwargs['label'] = label
kwargs['values'] = values
super(Interval, self).__init__(**kwargs)
self.setup()
def get_start(self):
"""Get the value for the start of the glyph."""
if len(self.values.index) == 1:
self.start_agg = None
return self.values[0]
elif isinstance(self.start_agg, str):
self.start_agg = stats[self.start_agg]()
self.start_agg.set_data(self.values)
return self.start_agg.value
def get_end(self):
"""Get the value for the end of the glyph."""
if isinstance(self.end_agg, str):
self.end_agg = stats[self.end_agg]()
self.end_agg.set_data(self.values)
return self.end_agg.value
def get_span(self):
"""The total range between the start and end."""
return self.end - self.start
def build_source(self):
# ToDo: Handle rotation
self.start = self.get_start()
self.end = self.get_end()
self.span = self.get_span()
width = [self.width]
if self.dodge_shift is not None:
x = [self.get_dodge_label()]
else:
x = [self.x_label]
height = [self.span]
y = [self.stack_shift + (self.span / 2.0) + self.start]
color = [self.color]
fill_alpha = [self.fill_alpha]
line_color = [self.line_color]
line_alpha = [self.line_alpha]
label = [self.label]
return dict(x=x, y=y, width=width, height=height, color=color,
fill_alpha=fill_alpha, line_color=line_color,
line_alpha=line_alpha, label=label)
@property
def x_max(self):
"""The maximum extent of the glyph in x.
.. note::
Dodging the glyph can affect the value.
"""
return (self.dodge_shift or self.x_label_value) + (self.width / 2.0)
@property
def x_min(self):
"""The maximum extent of the glyph in y.
.. note::
Dodging the glyph can affect the value.
"""
return (self.dodge_shift or self.x_label_value) - (self.width / 2.0)
@property
def y_max(self):
"""Maximum extent of all `Glyph`s.
How much we are stacking + the height of the interval + the base of the interval
.. note::
the start and end of the glyph can swap between being associated with the
min and max when the glyph end represents a negative value.
"""
return max(self.bottom, self.top)
@property
def y_min(self):
"""The minimum extent of all `Glyph`s in y.
.. note::
the start and end of the glyph can swap between being associated with the
min and max when the glyph end represents a negative value.
"""
return min(self.bottom, self.top)
@property
def bottom(self):
"""The value associated with the start of the stacked glyph."""
return self.stack_shift + self.start
@property
def top(self):
"""The value associated with the end of the stacked glyph."""
return self.stack_shift + self.span + self.start
def build_renderers(self):
"""Yields a `GlyphRenderer` associated with a `Rect` glyph."""
glyph = Rect(x='x', y='y', width='width', height='height', fill_color='color',
fill_alpha='fill_alpha', line_color='line_color')
yield GlyphRenderer(glyph=glyph)
class BarGlyph(Interval):
"""Special case of Interval where the span represents a value.
A bar always begins from 0, or the value that is being compared to, and
extends to some positive or negative value.
"""
def __init__(self, label, values, agg='sum', **kwargs):
kwargs['end_agg'] = agg
kwargs['start_agg'] = None
super(BarGlyph, self).__init__(label, values, **kwargs)
self.setup()
def get_start(self):
return 0.0
class DotGlyph(Interval):
"""Special case of Interval where the span represents a value.
A bar always begins from 0, or the value that is being compared to, and
extends to some positive or negative value.
"""
marker = String(default='circle')
size = Float(default=8)
stem = Bool(False, help="""
Whether to draw a stem from each do to the axis.
""")
stem_line_width = Float(default=1)
stem_color = String(default='black')
def __init__(self, label, values, agg='sum', **kwargs):
kwargs['end_agg'] = agg
super(DotGlyph, self).__init__(label, values, **kwargs)
self.setup()
def get_start(self):
return 0.0
def get_glyph(self):
return marker_types[self.marker]
def build_renderers(self):
if self.stem:
yield GlyphRenderer(glyph=Segment(
x0='x', y0=0, x1='x', y1='height',
line_width=self.stem_line_width,
line_color=self.stem_color,
line_alpha='fill_alpha')
)
glyph_type = self.get_glyph()
glyph = glyph_type(x='x', y='height',
line_color=self.line_color,
fill_color=self.color,
size=self.size,
fill_alpha='fill_alpha',
line_alpha='line_alpha'
)
yield GlyphRenderer(glyph=glyph)
class QuartileGlyph(Interval):
"""An interval that has start and end aggregations of quartiles."""
def __init__(self, label, values, interval1, interval2, **kwargs):
kwargs['label'] = label
kwargs['values'] = values
kwargs['start_agg'] = Quantile(interval=interval1)
kwargs['end_agg'] = Quantile(interval=interval2)
super(QuartileGlyph, self).__init__(**kwargs)
self.setup()
class BoxGlyph(AggregateGlyph):
"""Summarizes the distribution with a collection of glyphs.
A box glyph produces one "box" for a given array of vales. The box
is made up of multiple other child composite glyphs (intervals,
scatter) and directly produces glyph renderers for the whiskers,
as well.
"""
q1 = Float(help="""Derived value for 25% of all values.""")
q2 = Float(help="""Derived value for 50% of all values.""")
q3 = Float(help="""Derived value for 75% of all values.""")
iqr = Float()
w0 = Float(help='Lower whisker')
w1 = Float(help='Upper whisker')
q2_glyph = Instance(QuartileGlyph)
q3_glyph = Instance(QuartileGlyph)
whisker_glyph = Instance(GlyphRenderer)
outliers = Either(Bool, Instance(PointGlyph))
marker = String(default='circle')
whisker_width = Float(default=0.3)
whisker_line_width = Float(default=2)
whisker_span_line_width = Float(default=2)
whisker_color = String(default='black')
outlier_fill_color = String(default='red')
outlier_line_color = String(default='red')
outlier_size = Float(default=5)
bar_color = String(default='DimGrey')
def __init__(self, label, values, outliers=True, **kwargs):
width = kwargs.pop('width', None)
bar_color = kwargs.pop('color', None) or kwargs.get('bar_color', None) or self.lookup('bar_color').class_default()
kwargs['outliers'] = kwargs.pop('outliers', None) or outliers
kwargs['label'] = label
kwargs['values'] = values
x_label = kwargs.get('x_label')
kwargs['q2_glyph'] = QuartileGlyph(label=label, x_label=x_label, values=values,
interval1=0.25, interval2=0.5, width=width,
color=bar_color)
kwargs['q3_glyph'] = QuartileGlyph(label=label, x_label=x_label, values=values,
interval1=0.5, interval2=0.75, width=width,
color=bar_color)
super(BoxGlyph, self).__init__(**kwargs)
self.setup()
def build_renderers(self):
"""Yields all renderers that make up the BoxGlyph."""
self.calc_quartiles()
outlier_values = self.values[((self.values < self.w0) | (self.values > self.w1))]
self.whisker_glyph = GlyphRenderer(glyph=Segment(x0='x0s', y0='y0s', x1='x1s', y1='y1s',
line_width=self.whisker_line_width,
line_color=self.whisker_color))
if len(outlier_values) > 0 and self.outliers:
self.outliers = PointGlyph(label=self.label, y=outlier_values,
x=[self.get_dodge_label()] * len(outlier_values),
line_color=self.outlier_line_color,
fill_color=self.outlier_fill_color,
size=self.outlier_size, marker=self.marker)
for comp_glyph in self.composite_glyphs:
for renderer in comp_glyph.renderers:
yield renderer
yield self.whisker_glyph
def calc_quartiles(self):
"""Sets all derived stat properties of the BoxGlyph."""
self.q1 = self.q2_glyph.start
self.q2 = self.q2_glyph.end
self.q3 = self.q3_glyph.end
self.iqr = self.q3 - self.q1
mx = Max()
mx.set_data(self.values)
mn = Min()
mn.set_data(self.values)
self.w0 = max(self.q1 - (1.5 * self.iqr), mn.value)
self.w1 = min(self.q3 + (1.5 * self.iqr), mx.value)
def build_source(self):
"""Calculate stats and builds and returns source for whiskers."""
self.calc_quartiles()
x_label = self.get_dodge_label()
x_w0_label = self.get_dodge_label(shift=(self.whisker_width / 2.0))
x_w1_label = self.get_dodge_label(shift=-(self.whisker_width / 2.0))
# span0, whisker bar0, span1, whisker bar1
x0s = [x_label, x_w0_label, x_label, x_w0_label]
y0s = [self.w0, self.w0, self.q3, self.w1]
x1s = [x_label, x_w1_label, x_label, x_w1_label]
y1s = [self.q1, self.w0, self.w1, self.w1]
return dict(x0s=x0s, y0s=y0s, x1s=x1s, y1s=y1s)
def _set_sources(self):
"""Set the column data source on the whisker glyphs."""
self.whisker_glyph.data_source = self.source
def get_extent(self, func, prop_name):
return func([getattr(renderer, prop_name) for renderer in self.composite_glyphs])
@property
def composite_glyphs(self):
"""Returns list of composite glyphs, excluding the regular glyph renderers."""
comp_glyphs = [self.q2_glyph, self.q3_glyph]
if isinstance(self.outliers, PointGlyph):
comp_glyphs.append(self.outliers)
return comp_glyphs
@property
def x_max(self):
return self.get_extent(max, 'x_max') + self.right_buffer
@property
def x_min(self):
return self.get_extent(min, 'x_min') - self.left_buffer
@property
def y_max(self):
return max(self.w1, self.get_extent(max, 'y_max')) + self.top_buffer
@property
def y_min(self):
return min(self.w0, self.get_extent(min, 'y_min')) - self.bottom_buffer
class HistogramGlyph(AggregateGlyph):
"""Depicts the distribution of values using rectangles created by binning.
The histogram represents a distribution, so will likely include other
options for displaying it, such as KDE and cumulative density.
"""
# derived models
bins = Instance(BinnedStat, help="""A stat used to calculate the bins. The bins stat
includes attributes about each composite bin.""")
bars = List(Instance(BarGlyph), help="""The histogram is comprised of many
BarGlyphs that are derived from the values.""")
density = Bool(False, help="""
Whether to normalize the histogram.
If True, the result is the value of the probability *density* function
at the bin, normalized such that the *integral* over the range is 1. If
False, the result will contain the number of samples in each bin.
For more info check :class:`~bokeh.charts.stats.Histogram` documentation.
(default: False)
""")
def __init__(self, values, label=None, color=None, bins=None, **kwargs):
if label is not None:
kwargs['label'] = label
kwargs['values'] = values
if color is not None:
kwargs['color'] = color
# remove width, since this is handled automatically
kwargs.pop('width', None)
# keep original bins setting private since it just needs to be
# delegated to the Histogram stat
self._bins = bins
super(HistogramGlyph, self).__init__(**kwargs)
self.setup()
def _set_sources(self):
# No need to set sources, since composite glyphs handle this
pass
def build_source(self):
# No need to build source, since composite glyphs handle this
return None
def build_renderers(self):
"""Yield a bar glyph for each bin."""
# TODO(fpliger): We should expose the bin stat class so we could let
# users specify other bins other the Histogram Stat
self.bins = Histogram(values=self.values, bins=self._bins,
density=self.density)
bars = []
for bin in self.bins.bins:
bars.append(BarGlyph(label=bin.label[0], x_label=bin.center,
values=bin.values, color=self.color,
fill_alpha=self.fill_alpha,
agg=bin.stat, width=bin.width))
# provide access to bars as children for bounds properties
self.bars = self.children = bars
for comp_glyph in self.bars:
for renderer in comp_glyph.renderers:
yield renderer
@property
def y_min(self):
return 0.0
class BinGlyph(XyGlyph):
"""Represents a group of data that was aggregated and is represented by a glyph.
"""
bins = Instance(Bins)
column = String()
stat = String()
glyph_name = String()
width = Float()
height = Float()
def __init__(self, x, y, values, column=None, stat='count', glyph='rect', width=1,
height=1, **kwargs):
df = pd.DataFrame(dict(x_vals=x, y_vals=y, values_vals=values))
df.drop_duplicates(inplace=True)
kwargs['x'] = df.x_vals
kwargs['y'] = df.y_vals
kwargs['values'] = df.values_vals
kwargs['column'] = column
kwargs['stat'] = stat
kwargs['glyph_name'] = glyph
kwargs['height'] = height
kwargs['width'] = width
if 'glyphs' not in kwargs:
kwargs['glyphs'] = {'rect': Rect}
super(XyGlyph, self).__init__(**kwargs)
self.setup()
def build_source(self):
return {'x': self.x, 'y': self.y, 'values': self.values}
def build_renderers(self):
glyph_class = self.glyphs[self.glyph_name]
glyph = glyph_class(x='x', y='y', height=self.height, width=self.width,
fill_color=self.fill_color, line_color=self.line_color,
dilate=True)
yield GlyphRenderer(glyph=glyph)
@property
def x_max(self):
return self.get_data_range('x')[1] + self.width / 2.0
@property
def x_min(self):
return self.get_data_range('x')[0] - self.width / 2.0
@property
def y_max(self):
return self.get_data_range('y')[1] + self.height / 2.0
@property
def y_min(self):
return self.get_data_range('y')[0] - self.height / 2.0
def get_data_range(self, col):
data = self.source.data[col]
if ChartDataSource.is_number(data):
return min(data), max(data)
else:
return 1, len(data.drop_duplicates())
class ArcGlyph(LineGlyph):
"""Represents a group of data as an arc."""
start_angle = Angle()
end_angle = Angle()
def __init__(self, **kwargs):
super(self.__class__, self).__init__(**kwargs)
self.setup()
def build_renderers(self):
"""Yield a `GlyphRenderer` for the group of data."""
glyph = Arc(x='x', y='y', radius=1,
start_angle='_end_angle',
end_angle='_start_angle',
line_color='line_color')
yield GlyphRenderer(glyph=glyph)
| {
"repo_name": "quasiben/bokeh",
"path": "bokeh/charts/glyphs.py",
"copies": "3",
"size": "36943",
"license": "bsd-3-clause",
"hash": -4883119939429555000,
"line_mean": 33.6232427366,
"line_max": 122,
"alpha_frac": 0.5810573045,
"autogenerated": false,
"ratio": 3.8716202054076714,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5952677509907671,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
from collections import defaultdict
import numpy as np
import pandas as pd
from bokeh.charts import DEFAULT_PALETTE
from bokeh.core.enums import DashPattern
from bokeh.models.glyphs import Rect, Segment, Line, Patches, Arc
from bokeh.models.renderers import GlyphRenderer
from bokeh.core.properties import (Float, String, Datetime, Bool, Instance,
List, Either, Int, Enum, Color, Override, Any, Angle)
from .models import CompositeGlyph
from .properties import Column, EitherColumn
from .stats import (Stat, Quantile, Sum, Min, Max, Bins, stats, Histogram,
BinnedStat)
from .data_source import ChartDataSource
from .utils import marker_types, generate_patch_base, label_from_index_dict
class NestedCompositeGlyph(CompositeGlyph):
"""A composite glyph that consists of other composite glyphs.
An important responsibility of any `CompositeGlyph` is to understand the bounds
of the glyph renderers that make it up. This class is used to provide convenient
properties that return the bounds from the child `CompositeGlyphs`.
"""
children = List(Instance(CompositeGlyph))
@property
def y_max(self):
return max([renderer.y_max for renderer in self.children])
@property
def y_min(self):
return min([renderer.y_min for renderer in self.children])
@property
def x_min(self):
return min([renderer.x_min for renderer in self.children])
@property
def x_max(self):
return max([renderer.x_max for renderer in self.children])
class XyGlyph(CompositeGlyph):
"""Composite glyph that plots in cartesian coordinates."""
x = EitherColumn(String, Column(Float), Column(String), Column(Datetime), Column(Bool))
y = EitherColumn(String, Column(Float), Column(String), Column(Datetime), Column(Bool))
def build_source(self):
labels = self._build_label_array(('x', 'y'), self.label)
str_labels = [str(label) for label in labels]
if self.x is None:
data = dict(x_values=str_labels, y_values=self.y)
elif self.y is None:
data = dict(x_values=self.x, y_values=str_labels)
else:
data = dict(x_values=self.x, y_values=self.y)
return data
def _build_label_array(self, props, value):
for prop in props:
if getattr(self, prop) is not None:
return [value] * len(getattr(self, prop))
@property
def x_max(self):
# TODO(fpliger): since CompositeGlyphs are not exposed in general we
# should expect to always have a Series but in case
# it's not we just use the default min/max instead
# of just failing. When/If we end up exposing
# CompositeGlyphs we should consider making this
# more robust (either enforcing data or checking)
try:
return self.source.data['x_values'].max()
except AttributeError:
return max(self.source.data['x_values'])
@property
def x_min(self):
try:
return self.source.data['x_values'].min()
except AttributeError:
return min(self.source.data['x_values'])
@property
def y_max(self):
try:
return self.source.data['y_values'].max()
except AttributeError:
return max(self.source.data['y_values'])
@property
def y_min(self):
try:
return self.source.data['y_values'].min()
except AttributeError:
return min(self.source.data['y_values'])
class PointGlyph(XyGlyph):
"""A set of glyphs placed in x,y coordinates with the same attributes."""
fill_color = Override(default=DEFAULT_PALETTE[1])
fill_alpha = Override(default=0.7)
marker = String(default='circle')
size = Float(default=8)
def __init__(self, x=None, y=None, color=None, line_color=None, fill_color=None,
marker=None, size=None, **kwargs):
kwargs['x'] = x
kwargs['y'] = y
if marker is not None: kwargs['marker'] = marker
if size is not None: kwargs['size'] = size
if color:
line_color = color
fill_color = color
kwargs['line_color'] = line_color
kwargs['fill_color'] = fill_color
super(PointGlyph, self).__init__(**kwargs)
self.setup()
def get_glyph(self):
return marker_types[self.marker]
def build_renderers(self):
glyph_type = self.get_glyph()
glyph = glyph_type(x='x_values', y='y_values',
line_color=self.line_color,
fill_color=self.fill_color,
size=self.size,
fill_alpha=self.fill_alpha,
line_alpha=self.line_alpha)
yield GlyphRenderer(glyph=glyph)
class LineGlyph(XyGlyph):
"""Represents a group of data as a line."""
width = Int(default=2)
dash = Enum(DashPattern, default='solid')
def __init__(self, x=None, y=None, color=None, line_color=None,
width=None, dash=None, **kwargs):
kwargs['x'] = x
kwargs['y'] = y
if color is not None and line_color is None:
line_color = color
if dash is not None:
kwargs['dash'] = dash
if width is not None:
kwargs['width'] = width
if line_color is not None:
kwargs['line_color'] = line_color
super(LineGlyph, self).__init__(**kwargs)
self.setup()
def build_source(self):
if self.x is None:
x = self.y.index
data = dict(x_values=x, y_values=self.y)
elif self.y is None:
y = self.x.index
data = dict(x_values=self.x, y_values=y)
else:
data = dict(x_values=self.x, y_values=self.y)
return data
def build_renderers(self):
"""Yield a `GlyphRenderer` for the group of data."""
glyph = Line(x='x_values', y='y_values',
line_color=self.line_color,
line_alpha=self.line_alpha,
line_width=self.width,
line_dash=self.dash)
yield GlyphRenderer(glyph=glyph)
class AreaGlyph(LineGlyph):
# ToDo: should these be added to composite glyph?
stack = Bool(default=False)
dodge = Bool(default=False)
base = Float(default=0.0, help="""Lower bound of area.""")
def __init__(self, **kwargs):
line_color = kwargs.get('line_color')
fill_color = kwargs.get('fill_color')
color = kwargs.get('color')
if color is not None:
# apply color to line and fill
kwargs['fill_color'] = color
kwargs['line_color'] = color
elif line_color is not None and fill_color is None:
# apply line color to fill color by default
kwargs['fill_color'] = line_color
super(AreaGlyph, self).__init__(**kwargs)
self.setup()
def build_source(self):
data = super(AreaGlyph, self).build_source()
x0, y0 = generate_patch_base(pd.Series(list(data['x_values'])),
pd.Series(list(data['y_values'])))
data['x_values'] = [x0]
data['y_values'] = [y0]
return data
def build_renderers(self):
# parse all series. We exclude the first attr as it's the x values
# added for the index
glyph = Patches(
xs='x_values', ys='y_values',
fill_alpha=self.fill_alpha, fill_color=self.fill_color,
line_color=self.line_color
)
renderer = GlyphRenderer(data_source=self.source, glyph=glyph)
yield renderer
def __stack__(self, glyphs):
# ToDo: need to handle case of non-aligned indices, see pandas concat
# ToDo: need to address how to aggregate on an index when required
# build a list of series
areas = []
for glyph in glyphs:
areas.append(pd.Series(glyph.source.data['y_values'][0],
index=glyph.source.data['x_values'][0]))
# concat the list of indexed y values into dataframe
df = pd.concat(areas, axis=1)
# calculate stacked values along the rows
stacked_df = df.cumsum(axis=1)
# lower bounds of each area series are diff between stacked and orig values
lower_bounds = stacked_df - df
# reverse the df so the patch is drawn in correct order
lower_bounds = lower_bounds.iloc[::-1]
# concat the upper and lower bounds together
stacked_df = pd.concat([stacked_df, lower_bounds])
# update the data in the glyphs
for i, glyph in enumerate(glyphs):
glyph.source.data['x_values'] = [stacked_df.index.values]
glyph.source.data['y_values'] = [stacked_df.ix[:, i].values]
def get_nested_extent(self, col, func):
return [getattr(arr, func)() for arr in self.source.data[col]]
@property
def x_max(self):
return max(self.get_nested_extent('x_values', 'max'))
@property
def x_min(self):
return min(self.get_nested_extent('x_values', 'min'))
@property
def y_max(self):
return max(self.get_nested_extent('y_values', 'max'))
@property
def y_min(self):
return min(self.get_nested_extent('y_values', 'min'))
class HorizonGlyph(AreaGlyph):
num_folds = Int(default=3, help="""The count of times the data is overlapped.""")
series = Int(default=0, help="""The id of the series as the order it will appear,
starting from 0.""")
series_count = Int()
fold_height = Float(help="""The height of one fold.""")
bins = List(Float, help="""The binedges calculated from the number of folds,
and the maximum value of the entire source data.""")
graph_ratio = Float(help="""Scales heights of each series based on number of folds
and the number of total series being plotted.
""")
pos_color = Color("#006400", help="""The color used for positive values.""")
neg_color = Color("#6495ed", help="""The color used for negative values.""")
flip_neg = Bool(default=True, help="""When True, the negative values will be
plotted as their absolute value, then their individual axes is flipped. If False,
then the negative values will still be taken as their absolute value, but the base
of their shape will start from the same origin as the positive values.
""")
def __init__(self, bins=None, **kwargs):
# fill alpha depends on how many folds will be layered
kwargs['fill_alpha'] = 1.0/kwargs['num_folds']
if bins is not None:
kwargs['bins'] = bins
# each series is shifted up to a synthetic y-axis
kwargs['base'] = kwargs['series'] * max(bins) / kwargs['series_count']
kwargs['graph_ratio'] = float(kwargs['num_folds'])/float(kwargs['series_count'])
super(HorizonGlyph, self).__init__(**kwargs)
def build_source(self):
data = {}
# Build columns for the positive values
pos_y = self.y.copy()
pos_y[pos_y < 0] = 0
xs, ys = self._build_dims(self.x, pos_y)
# list of positive colors and alphas
colors = [self.pos_color] * len(ys)
alphas = [(bin_idx * self.fill_alpha) for bin_idx in
range(0, len(self.bins))]
# If we have negative values at all, add the values for those as well
if self.y.min() < 0:
neg_y = self.y.copy()
neg_y[neg_y > 0] = 0
neg_y = abs(neg_y)
neg_xs, neg_ys = self._build_dims(self.x, neg_y, self.flip_neg)
xs += neg_xs
ys += neg_ys
colors += ([self.neg_color] * len(neg_ys))
alphas += alphas
# create clipped representation of each band
data['x_values'] = xs
data['y_values'] = ys
data['fill_color'] = colors
data['fill_alpha'] = colors
data['line_color'] = colors
return data
def _build_dims(self, x, y, flip=False):
""" Creates values needed to plot each fold of the horizon glyph.
Bins the data based on the binning passed into the glyph, then copies and clips
the values for each bin.
Args:
x (`pandas.Series`): array of x values
y (`pandas.Series`): array of y values
flip (bool): whether to flip values, used when handling negative values
Returns:
tuple(list(`numpy.ndarray`), list(`numpy.ndarray`)): returns a list of
arrays for the x values and list of arrays for the y values. The data
has been folded and transformed so the patches glyph presents the data
in a way that looks like an area chart.
"""
# assign bins to each y value
bin_idx = pd.cut(y, bins=self.bins, labels=False, include_lowest=True)
xs, ys = [], []
for idx, bin in enumerate(self.bins[0:-1]):
# subtract off values associated with lower bins, to get into this bin
temp_vals = y.copy() - (idx * self.fold_height)
# clip the values between the fold range and zero
temp_vals[bin_idx > idx] = self.fold_height * self.graph_ratio
temp_vals[bin_idx < idx] = 0
temp_vals[bin_idx == idx] = self.graph_ratio * temp_vals[bin_idx == idx]
# if flipping, we must start the values from the top of each fold's range
if flip:
temp_vals = (self.fold_height * self.graph_ratio) - temp_vals
base = self.base + (self.fold_height * self.graph_ratio)
else:
base = self.base
# shift values up based on index of series
temp_vals += self.base
val_idx = temp_vals > 0
if pd.Series.any(val_idx):
ys.append(temp_vals)
xs.append(x)
# transform clipped data so it always starts and ends at its base value
if len(ys) > 0:
xs, ys = map(list, zip(*[generate_patch_base(xx, yy, base=base) for
xx, yy in zip(xs, ys)]))
return xs, ys
def build_renderers(self):
# parse all series. We exclude the first attr as it's the x values
# added for the index
glyph = Patches(
xs='x_values', ys='y_values',
fill_alpha=self.fill_alpha, fill_color='fill_color',
line_color='line_color'
)
renderer = GlyphRenderer(data_source=self.source, glyph=glyph)
yield renderer
class StepGlyph(LineGlyph):
"""Represents a group of data as a stepped line."""
def build_source(self):
x = self.x
y = self.y
if self.x is None:
x = self.y.index
elif self.y is None:
y = self.x.index
dtype = x.dtype if hasattr(x, 'dtype') else np.int
xs = np.empty(2*len(x)-1, dtype=dtype)
xs[::2] = x[:]
xs[1::2] = x[1:]
dtype = y.dtype if hasattr(y, 'dtype') else np.float64
ys = np.empty(2*len(y)-1, dtype=dtype)
ys[::2] = y[:]
ys[1::2] = y[:-1]
data = dict(x_values=xs, y_values=ys)
return data
class AggregateGlyph(NestedCompositeGlyph):
"""A base composite glyph for aggregating an array.
Implements default stacking and dodging behavior that other composite
glyphs can inherit.
"""
x_label = String()
x_label_value = Any()
stack_label = String()
stack_shift = Float(default=0.0)
dodge_label = String(help="""Where on the scale the glyph should be placed.""")
dodge_shift = Float(default=None)
agg = Instance(Stat, default=Sum())
span = Float(help="""The range of values represented by the aggregate.""")
def __init__(self, x_label=None, **kwargs):
label = kwargs.get('label')
if x_label is not None:
kwargs['x_label_value'] = x_label
if not isinstance(x_label, str):
x_label = str(x_label)
kwargs['x_label'] = x_label
elif label is not None:
kwargs['x_label'] = str(label)
super(AggregateGlyph, self).__init__(**kwargs)
def get_dodge_label(self, shift=0.0):
"""Generate the label defining an offset in relation to a position on a scale."""
if self.dodge_shift is None:
shift_str = ':' + str(0.5 + shift)
elif self.dodge_shift is not None:
shift_str = ':' + str(self.dodge_shift + shift)
else:
shift_str = ''
return str(label_from_index_dict(self.x_label)) + shift_str
def filter_glyphs(self, glyphs):
"""Return only the glyphs that are of the same class."""
return [glyph for glyph in glyphs if isinstance(glyph, self.__class__)]
@staticmethod
def groupby(glyphs, prop):
"""Returns a dict of `CompositeGlyph`s, grouped by unique values of prop.
For example, if all glyphs had a value of 'a' or 'b' for glyph.prop, the dict
would contain two keys, 'a' and 'b', where each value is a list of the glyphs
that had each of the values.
"""
grouped = defaultdict(list)
labels = [getattr(glyph, prop) for glyph in glyphs]
labels = [tuple(label.values()) if isinstance(label, dict) else label for label
in labels]
[grouped[label].append(glyph) for label, glyph in zip(labels, glyphs)]
labels = pd.Series(labels).drop_duplicates().values
return labels, grouped
def __stack__(self, glyphs):
"""Apply relative shifts to the composite glyphs for stacking."""
filtered_glyphs = self.filter_glyphs(glyphs)
labels, grouped = self.groupby(filtered_glyphs, 'x_label')
for label in labels:
group = grouped[label]
# separate the negative and positive aggregates into separate groups
neg_group = [glyph for glyph in group if glyph.span < 0]
pos_group = [glyph for glyph in group if glyph.span >= 0]
# apply stacking to each group separately
for group in [neg_group, pos_group]:
shift = []
for i, glyph in enumerate(group):
# save off the top of each rect's height
shift.append(glyph.span)
if i > 0:
glyph.stack_shift = sum(shift[0:i])
glyph.refresh()
def __dodge__(self, glyphs):
"""Apply relative shifts to the composite glyphs for dodging."""
if self.dodge_label is not None:
filtered_glyphs = self.filter_glyphs(glyphs)
labels, grouped = self.groupby(filtered_glyphs, 'dodge_label')
# calculate transformations
step = np.linspace(0, 1.0, len(grouped.keys()) + 1, endpoint=False)
width = min(0.2, (1. / len(grouped.keys())) ** 1.1)
# set bar attributes and re-aggregate
for i, label in enumerate(labels):
group = grouped[label]
for glyph in group:
glyph.dodge_shift = step[i + 1]
glyph.width = width
glyph.refresh()
class Interval(AggregateGlyph):
"""A rectangle representing aggregated values.
The interval is a rect glyph where two of the parallel sides represent a
summary of values. Each of the two sides is derived from a separate aggregation of
the values provided to the interval.
.. note::
A bar is a special case interval where one side is pinned and used to
communicate a value relative to it.
"""
width = Float(default=0.8)
start_agg = Either(Instance(Stat), Enum(*list(stats.keys())), default=Min(), help="""
The stat used to derive the starting point of the composite glyph.""")
end_agg = Either(Instance(Stat), Enum(*list(stats.keys())), default=Max(), help="""
The stat used to derive the end point of the composite glyph.""")
start = Float(default=0.0)
end = Float()
def __init__(self, label, values, **kwargs):
kwargs['label'] = label
kwargs['values'] = values
super(Interval, self).__init__(**kwargs)
self.setup()
def get_start(self):
"""Get the value for the start of the glyph."""
if len(self.values.index) == 1:
self.start_agg = None
return self.values[0]
elif isinstance(self.start_agg, str):
self.start_agg = stats[self.start_agg]()
self.start_agg.set_data(self.values)
return self.start_agg.value
def get_end(self):
"""Get the value for the end of the glyph."""
if isinstance(self.end_agg, str):
self.end_agg = stats[self.end_agg]()
self.end_agg.set_data(self.values)
return self.end_agg.value
def get_span(self):
"""The total range between the start and end."""
return self.end - self.start
def build_source(self):
# ToDo: Handle rotation
self.start = self.get_start()
self.end = self.get_end()
self.span = self.get_span()
width = [self.width]
if self.dodge_shift is not None:
x = [self.get_dodge_label()]
else:
x = [self.x_label]
height = [self.span]
y = [self.stack_shift + (self.span / 2.0) + self.start]
color = [self.color]
fill_alpha = [self.fill_alpha]
line_color = [self.line_color]
line_alpha = [self.line_alpha]
label = [self.label]
return dict(x=x, y=y, width=width, height=height, color=color,
fill_alpha=fill_alpha, line_color=line_color,
line_alpha=line_alpha, label=label)
@property
def x_max(self):
"""The maximum extent of the glyph in x.
.. note::
Dodging the glyph can affect the value.
"""
return (self.dodge_shift or self.x_label_value) + (self.width / 2.0)
@property
def x_min(self):
"""The maximum extent of the glyph in y.
.. note::
Dodging the glyph can affect the value.
"""
return (self.dodge_shift or self.x_label_value) - (self.width / 2.0)
@property
def y_max(self):
"""Maximum extent of all `Glyph`s.
How much we are stacking + the height of the interval + the base of the interval
.. note::
the start and end of the glyph can swap between being associated with the
min and max when the glyph end represents a negative value.
"""
return max(self.bottom, self.top)
@property
def y_min(self):
"""The minimum extent of all `Glyph`s in y.
.. note::
the start and end of the glyph can swap between being associated with the
min and max when the glyph end represents a negative value.
"""
return min(self.bottom, self.top)
@property
def bottom(self):
"""The value associated with the start of the stacked glyph."""
return self.stack_shift + self.start
@property
def top(self):
"""The value associated with the end of the stacked glyph."""
return self.stack_shift + self.span + self.start
def build_renderers(self):
"""Yields a `GlyphRenderer` associated with a `Rect` glyph."""
glyph = Rect(x='x', y='y', width='width', height='height', fill_color='color',
fill_alpha='fill_alpha', line_color='line_color')
yield GlyphRenderer(glyph=glyph)
class BarGlyph(Interval):
"""Special case of Interval where the span represents a value.
A bar always begins from 0, or the value that is being compared to, and
extends to some positive or negative value.
"""
def __init__(self, label, values, agg='sum', **kwargs):
kwargs['end_agg'] = agg
kwargs['start_agg'] = None
super(BarGlyph, self).__init__(label, values, **kwargs)
self.setup()
def get_start(self):
return 0.0
class DotGlyph(Interval):
"""Special case of Interval where the span represents a value.
A bar always begins from 0, or the value that is being compared to, and
extends to some positive or negative value.
"""
marker = String(default='circle')
size = Float(default=8)
stem = Bool(False, help="""
Whether to draw a stem from each do to the axis.
""")
stem_line_width = Float(default=1)
stem_color = String(default='black')
def __init__(self, label, values, agg='sum', **kwargs):
kwargs['end_agg'] = agg
super(DotGlyph, self).__init__(label, values, **kwargs)
self.setup()
def get_start(self):
return 0.0
def get_glyph(self):
return marker_types[self.marker]
def build_renderers(self):
if self.stem:
yield GlyphRenderer(glyph=Segment(
x0='x', y0=0, x1='x', y1='height',
line_width=self.stem_line_width,
line_color=self.stem_color,
line_alpha='fill_alpha')
)
glyph_type = self.get_glyph()
glyph = glyph_type(x='x', y='height',
line_color=self.line_color,
fill_color=self.color,
size=self.size,
fill_alpha='fill_alpha',
line_alpha='line_alpha'
)
yield GlyphRenderer(glyph=glyph)
class QuartileGlyph(Interval):
"""An interval that has start and end aggregations of quartiles."""
def __init__(self, label, values, interval1, interval2, **kwargs):
kwargs['label'] = label
kwargs['values'] = values
kwargs['start_agg'] = Quantile(interval=interval1)
kwargs['end_agg'] = Quantile(interval=interval2)
super(QuartileGlyph, self).__init__(**kwargs)
self.setup()
class BoxGlyph(AggregateGlyph):
"""Summarizes the distribution with a collection of glyphs.
A box glyph produces one "box" for a given array of vales. The box
is made up of multiple other child composite glyphs (intervals,
scatter) and directly produces glyph renderers for the whiskers,
as well.
"""
q1 = Float(help="""Derived value for 25% of all values.""")
q2 = Float(help="""Derived value for 50% of all values.""")
q3 = Float(help="""Derived value for 75% of all values.""")
iqr = Float()
w0 = Float(help='Lower whisker')
w1 = Float(help='Upper whisker')
q2_glyph = Instance(QuartileGlyph)
q3_glyph = Instance(QuartileGlyph)
whisker_glyph = Instance(GlyphRenderer)
outliers = Either(Bool, Instance(PointGlyph))
marker = String(default='circle')
whisker_width = Float(default=0.3)
whisker_line_width = Float(default=2)
whisker_span_line_width = Float(default=2)
whisker_color = String(default='black')
outlier_fill_color = String(default='red')
outlier_line_color = String(default='red')
outlier_size = Float(default=5)
bar_color = String(default='DimGrey')
def __init__(self, label, values, outliers=True, **kwargs):
width = kwargs.pop('width', None)
bar_color = kwargs.pop('color', None) or kwargs.get('bar_color') or self.lookup('bar_color').class_default()
kwargs['outliers'] = kwargs.pop('outliers', None) or outliers
kwargs['label'] = label
kwargs['values'] = values
x_label = kwargs.get('x_label')
kwargs['q2_glyph'] = QuartileGlyph(label=label, x_label=x_label, values=values,
interval1=0.25, interval2=0.5, width=width,
color=bar_color)
kwargs['q3_glyph'] = QuartileGlyph(label=label, x_label=x_label, values=values,
interval1=0.5, interval2=0.75, width=width,
color=bar_color)
super(BoxGlyph, self).__init__(**kwargs)
self.setup()
def build_renderers(self):
"""Yields all renderers that make up the BoxGlyph."""
self.calc_quartiles()
outlier_values = self.values[((self.values < self.w0) | (self.values > self.w1))]
self.whisker_glyph = GlyphRenderer(glyph=Segment(x0='x0s', y0='y0s', x1='x1s', y1='y1s',
line_width=self.whisker_line_width,
line_color=self.whisker_color))
if len(outlier_values) > 0 and self.outliers:
self.outliers = PointGlyph(label=self.label, y=outlier_values,
x=[self.get_dodge_label()] * len(outlier_values),
line_color=self.outlier_line_color,
fill_color=self.outlier_fill_color,
size=self.outlier_size, marker=self.marker)
for comp_glyph in self.composite_glyphs:
for renderer in comp_glyph.renderers:
yield renderer
yield self.whisker_glyph
def calc_quartiles(self):
"""Sets all derived stat properties of the BoxGlyph."""
self.q1 = self.q2_glyph.start
self.q2 = self.q2_glyph.end
self.q3 = self.q3_glyph.end
self.iqr = self.q3 - self.q1
mx = Max()
mx.set_data(self.values)
mn = Min()
mn.set_data(self.values)
self.w0 = max(self.q1 - (1.5 * self.iqr), mn.value)
self.w1 = min(self.q3 + (1.5 * self.iqr), mx.value)
def build_source(self):
"""Calculate stats and builds and returns source for whiskers."""
self.calc_quartiles()
x_label = self.get_dodge_label()
x_w0_label = self.get_dodge_label(shift=(self.whisker_width / 2.0))
x_w1_label = self.get_dodge_label(shift=-(self.whisker_width / 2.0))
# span0, whisker bar0, span1, whisker bar1
x0s = [x_label, x_w0_label, x_label, x_w0_label]
y0s = [self.w0, self.w0, self.q3, self.w1]
x1s = [x_label, x_w1_label, x_label, x_w1_label]
y1s = [self.q1, self.w0, self.w1, self.w1]
return dict(x0s=x0s, y0s=y0s, x1s=x1s, y1s=y1s)
def _set_sources(self):
"""Set the column data source on the whisker glyphs."""
self.whisker_glyph.data_source = self.source
def get_extent(self, func, prop_name):
return func([getattr(renderer, prop_name) for renderer in self.composite_glyphs])
@property
def composite_glyphs(self):
"""Returns list of composite glyphs, excluding the regular glyph renderers."""
comp_glyphs = [self.q2_glyph, self.q3_glyph]
if isinstance(self.outliers, PointGlyph):
comp_glyphs.append(self.outliers)
return comp_glyphs
@property
def x_max(self):
return self.get_extent(max, 'x_max') + self.right_buffer
@property
def x_min(self):
return self.get_extent(min, 'x_min') - self.left_buffer
@property
def y_max(self):
return max(self.w1, self.get_extent(max, 'y_max')) + self.top_buffer
@property
def y_min(self):
return min(self.w0, self.get_extent(min, 'y_min')) - self.bottom_buffer
class HistogramGlyph(AggregateGlyph):
"""Depicts the distribution of values using rectangles created by binning.
The histogram represents a distribution, so will likely include other
options for displaying it, such as KDE and cumulative density.
"""
# derived models
bins = Instance(BinnedStat, help="""A stat used to calculate the bins. The bins stat
includes attributes about each composite bin.""")
bars = List(Instance(BarGlyph), help="""The histogram is comprised of many
BarGlyphs that are derived from the values.""")
density = Bool(False, help="""
Whether to normalize the histogram.
If True, the result is the value of the probability *density* function
at the bin, normalized such that the *integral* over the range is 1. If
False, the result will contain the number of samples in each bin.
For more info check :class:`~bokeh.charts.stats.Histogram` documentation.
(default: False)
""")
def __init__(self, values, label=None, color=None, bins=None, **kwargs):
if label is not None:
kwargs['label'] = label
kwargs['values'] = values
if color is not None:
kwargs['color'] = color
# remove width, since this is handled automatically
kwargs.pop('width', None)
# keep original bins setting private since it just needs to be
# delegated to the Histogram stat
self._bins = bins
super(HistogramGlyph, self).__init__(**kwargs)
self.setup()
def _set_sources(self):
# No need to set sources, since composite glyphs handle this
pass
def build_source(self):
# No need to build source, since composite glyphs handle this
return None
def build_renderers(self):
"""Yield a bar glyph for each bin."""
# TODO(fpliger): We should expose the bin stat class so we could let
# users specify other bins other the Histogram Stat
self.bins = Histogram(values=self.values, bins=self._bins,
density=self.density)
bars = []
for bin in self.bins.bins:
bars.append(BarGlyph(label=bin.label[0], x_label=bin.center,
values=bin.values, color=self.color,
fill_alpha=self.fill_alpha,
agg=bin.stat, width=bin.width))
# provide access to bars as children for bounds properties
self.bars = self.children = bars
for comp_glyph in self.bars:
for renderer in comp_glyph.renderers:
yield renderer
@property
def y_min(self):
return 0.0
class BinGlyph(XyGlyph):
"""Represents a group of data that was aggregated and is represented by a glyph.
"""
bins = Instance(Bins)
column = String()
stat = String()
glyph_name = String()
width = Float()
height = Float()
def __init__(self, x, y, values, column=None, stat='count', glyph='rect', width=1,
height=1, **kwargs):
df = pd.DataFrame(dict(x_vals=x, y_vals=y, values_vals=values))
df.drop_duplicates(inplace=True)
kwargs['x'] = df.x_vals
kwargs['y'] = df.y_vals
kwargs['values'] = df.values_vals
kwargs['column'] = column
kwargs['stat'] = stat
kwargs['glyph_name'] = glyph
kwargs['height'] = height
kwargs['width'] = width
if 'glyphs' not in kwargs:
kwargs['glyphs'] = {'rect': Rect}
super(XyGlyph, self).__init__(**kwargs)
self.setup()
def build_source(self):
return {'x': self.x, 'y': self.y, 'values': self.values}
def build_renderers(self):
glyph_class = self.glyphs[self.glyph_name]
glyph = glyph_class(x='x', y='y', height=self.height, width=self.width,
fill_color=self.fill_color, line_color=self.line_color,
dilate=True)
yield GlyphRenderer(glyph=glyph)
@property
def x_max(self):
return self.get_data_range('x')[1] + self.width / 2.0
@property
def x_min(self):
return self.get_data_range('x')[0] - self.width / 2.0
@property
def y_max(self):
return self.get_data_range('y')[1] + self.height / 2.0
@property
def y_min(self):
return self.get_data_range('y')[0] - self.height / 2.0
def get_data_range(self, col):
data = self.source.data[col]
if ChartDataSource.is_number(data):
return min(data), max(data)
else:
return 1, len(data.drop_duplicates())
class ArcGlyph(LineGlyph):
"""Represents a group of data as an arc."""
start_angle = Angle()
end_angle = Angle()
def __init__(self, **kwargs):
super(self.__class__, self).__init__(**kwargs)
self.setup()
def build_renderers(self):
"""Yield a `GlyphRenderer` for the group of data."""
glyph = Arc(x='x', y='y', radius=1,
start_angle='_end_angle',
end_angle='_start_angle',
line_color='line_color')
yield GlyphRenderer(glyph=glyph)
| {
"repo_name": "azjps/bokeh",
"path": "bokeh/charts/glyphs.py",
"copies": "2",
"size": "36897",
"license": "bsd-3-clause",
"hash": -4083295950435120000,
"line_mean": 33.6125703565,
"line_max": 116,
"alpha_frac": 0.5808602325,
"autogenerated": false,
"ratio": 3.8720747192779936,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006689601324938535,
"num_lines": 1066
} |
from __future__ import absolute_import, division
from collections import OrderedDict
from . import utils
class DefaultPerformance(object):
def __init__(self):
pass
def update(self, trial, status):
pass
def display(output=True):
pass
class Performance2AFC(object):
def __init__(self):
self.decisions = []
self.corrects = []
self.choices = []
self.t_choices = []
#self.rewards = []
def update(self, trial, status):
#self.rewards.append(reward)
if 'correct' in status:
self.decisions.append(True)
self.corrects.append(status['correct'])
if 'choice' in status:
self.choices.append(status['choice'])
else:
self.choices.append(None)
if 't_choice' in status:
self.t_choices.append(status['t_choice'])
else:
self.t_choices.append(None)
else:
self.decisions.append(False)
self.corrects.append(False)
self.choices.append(None)
self.t_choices.append(None)
@property
def n_trials(self):
return len(self.decisions)
@property
def n_decision(self):
return sum(self.decisions)
@property
def n_correct(self):
return sum(self.corrects)
def display(self, output=True):
n_trials = self.n_trials
n_decision = self.n_decision
n_correct = self.n_correct
items = OrderedDict()
items['P(choice)'] = '{}/{} = {:.3f}'.format(n_decision, n_trials,
n_decision/n_trials)
if n_decision > 0:
items['P(correct|choice)'] = '{}/{} = {:.3f}'.format(n_correct, n_decision,
n_correct/n_decision)
if output:
utils.print_dict(items)
return items
class PerformancePostdecisionWager(object):
def __init__(self):
self.wagers = []
self.corrects = []
self.choices = []
self.t_choices = []
def update(self, trial, status):
self.wagers.append(trial['wager'])
self.corrects.append(status.get('correct'))
self.choices.append(status.get('choice'))
self.t_choices.append(status.get('t_choice'))
@property
def n_correct(self):
return sum([c for c in self.corrects if c is not None])
@property
def n_sure_decision(self):
return len([1 for w, c in zip(self.wagers, self.choices) if w and c is not None])
@property
def n_trials(self):
return len(self.choices)
@property
def n_decision(self):
return len([1 for c in self.choices if c in ['L', 'R']])
@property
def n_sure(self):
return len([1 for c in self.choices if c == 'S'])
@property
def n_answer(self):
return len([1 for c in self.choices if c is not None])
@property
def n_wager(self):
return sum(self.wagers)
def display(self, output=True):
n_trials = self.n_trials
n_decision = self.n_decision
n_correct = self.n_correct
n_sure_decision = self.n_sure_decision
n_sure = self.n_sure
n_answer = self.n_answer
n_wager = self.n_wager
items = OrderedDict()
items['P(answer)'] = '{}/{} = {:.3f}'.format(n_answer, n_trials,
n_answer/n_trials)
items['P(decision)'] = '{}/{} = {:.3f}'.format(n_decision, n_trials,
n_decision/n_trials)
if n_decision > 0:
items['P(correct|decision)'] = '{}/{} = {:.3f}'.format(n_correct, n_decision,
n_correct/n_decision)
items['P(wager trials)'] = '{}/{} = {:.3f}'.format(n_wager, n_trials,
n_wager/n_trials)
if n_sure_decision > 0:
items['P(sure)'] = '{}/{} = {:.3f}'.format(n_sure, n_sure_decision,
n_sure/n_sure_decision)
if output:
utils.print_dict(items)
return items
| {
"repo_name": "frsong/pyrl",
"path": "pyrl/performance.py",
"copies": "1",
"size": "4362",
"license": "mit",
"hash": 3924310844509342000,
"line_mean": 30.6086956522,
"line_max": 89,
"alpha_frac": 0.5018340211,
"autogenerated": false,
"ratio": 3.8842386464826357,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9864793675848581,
"avg_score": 0.004255798346810942,
"num_lines": 138
} |
from __future__ import absolute_import, division
from copy import copy
from functools import partial
from .auto import tqdm as tqdm_auto
try:
import keras
except ImportError as e:
try:
from tensorflow import keras
except ImportError:
raise e
__author__ = {"github.com/": ["casperdcl"]}
__all__ = ['TqdmCallback']
class TqdmCallback(keras.callbacks.Callback):
"""Keras callback for epoch and batch progress."""
@staticmethod
def bar2callback(bar, pop=None, delta=(lambda logs: 1)):
def callback(_, logs=None):
n = delta(logs)
if logs:
if pop:
logs = copy(logs)
[logs.pop(i, 0) for i in pop]
bar.set_postfix(logs, refresh=False)
bar.update(n)
return callback
def __init__(self, epochs=None, data_size=None, batch_size=None, verbose=1,
tqdm_class=tqdm_auto, **tqdm_kwargs):
"""
Parameters
----------
epochs : int, optional
data_size : int, optional
Number of training pairs.
batch_size : int, optional
Number of training pairs per batch.
verbose : int
0: epoch, 1: batch (transient), 2: batch. [default: 1].
Will be set to `0` unless both `data_size` and `batch_size`
are given.
tqdm_class : optional
`tqdm` class to use for bars [default: `tqdm.auto.tqdm`].
tqdm_kwargs : optional
Any other arguments used for all bars.
"""
if tqdm_kwargs:
tqdm_class = partial(tqdm_class, **tqdm_kwargs)
self.tqdm_class = tqdm_class
self.epoch_bar = tqdm_class(total=epochs, unit='epoch')
self.on_epoch_end = self.bar2callback(self.epoch_bar)
if data_size and batch_size:
self.batches = batches = (data_size + batch_size - 1) // batch_size
else:
self.batches = batches = None
self.verbose = verbose
if verbose == 1:
self.batch_bar = tqdm_class(total=batches, unit='batch', leave=False)
self.on_batch_end = self.bar2callback(
self.batch_bar, pop=['batch', 'size'],
delta=lambda logs: logs.get('size', 1))
def on_train_begin(self, *_, **__):
params = self.params.get
auto_total = params('epochs', params('nb_epoch', None))
if auto_total is not None:
self.epoch_bar.reset(total=auto_total)
def on_epoch_begin(self, *_, **__):
if self.verbose:
params = self.params.get
total = params('samples', params(
'nb_sample', params('steps', None))) or self.batches
if self.verbose == 2:
if hasattr(self, 'batch_bar'):
self.batch_bar.close()
self.batch_bar = self.tqdm_class(
total=total, unit='batch', leave=True,
unit_scale=1 / (params('batch_size', 1) or 1))
self.on_batch_end = self.bar2callback(
self.batch_bar, pop=['batch', 'size'],
delta=lambda logs: logs.get('size', 1))
elif self.verbose == 1:
self.batch_bar.unit_scale = 1 / (params('batch_size', 1) or 1)
self.batch_bar.reset(total=total)
else:
raise KeyError('Unknown verbosity')
def on_train_end(self, *_, **__):
if self.verbose:
self.batch_bar.close()
self.epoch_bar.close()
def display(self):
"""Displays in the current cell in Notebooks."""
container = getattr(self.epoch_bar, 'container', None)
if container is None:
return
from .notebook import display
display(container)
batch_bar = getattr(self, 'batch_bar', None)
if batch_bar is not None:
display(batch_bar.container)
@staticmethod
def _implements_train_batch_hooks():
return True
@staticmethod
def _implements_test_batch_hooks():
return True
@staticmethod
def _implements_predict_batch_hooks():
return True
| {
"repo_name": "kayhayen/Nuitka",
"path": "nuitka/build/inline_copy/tqdm/tqdm/keras.py",
"copies": "1",
"size": "4211",
"license": "apache-2.0",
"hash": 1940361953200048000,
"line_mean": 33.8016528926,
"line_max": 81,
"alpha_frac": 0.5469009736,
"autogenerated": false,
"ratio": 3.899074074074074,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49459750476740744,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
from django.conf import settings
from django.core import urlresolvers
from django.db import connection
from django.db.models import Sum
from django.db.models.query import QuerySet
from django.http import HttpResponseNotFound, HttpRequest, HttpResponse
from django.template import RequestContext, loader
from django.utils import timezone
from django.utils.translation import ugettext as _
from django.shortcuts import render
from jinja2 import Markup as mark_safe
from analytics.lib.counts import CountStat, process_count_stat, COUNT_STATS
from analytics.lib.time_utils import time_range
from analytics.models import BaseCount, InstallationCount, RealmCount, \
UserCount, StreamCount, last_successful_fill
from zerver.decorator import has_request_variables, REQ, zulip_internal, \
zulip_login_required, to_non_negative_int, to_utc_datetime
from zerver.lib.request import JsonableError
from zerver.lib.response import json_success
from zerver.lib.timestamp import ceiling_to_hour, ceiling_to_day, timestamp_to_datetime
from zerver.models import Realm, UserProfile, UserActivity, \
UserActivityInterval, Client
from collections import defaultdict
from datetime import datetime, timedelta
import itertools
import json
import logging
import pytz
import re
import time
from six.moves import filter, map, range, zip
from typing import Any, Callable, Dict, List, Optional, Set, Text, \
Tuple, Type, Union
@zulip_login_required
def stats(request):
# type: (HttpRequest) -> HttpResponse
return render(request,
'analytics/stats.html',
context=dict(realm_name = request.user.realm.name))
@has_request_variables
def get_chart_data(request, user_profile, chart_name=REQ(),
min_length=REQ(converter=to_non_negative_int, default=None),
start=REQ(converter=to_utc_datetime, default=None),
end=REQ(converter=to_utc_datetime, default=None)):
# type: (HttpRequest, UserProfile, Text, Optional[int], Optional[datetime], Optional[datetime]) -> HttpResponse
if chart_name == 'number_of_humans':
stat = COUNT_STATS['active_users:is_bot:day']
tables = [RealmCount]
subgroups = ['false', 'true']
labels = ['human', 'bot']
labels_sort_function = None
include_empty_subgroups = [True]
elif chart_name == 'messages_sent_over_time':
stat = COUNT_STATS['messages_sent:is_bot:hour']
tables = [RealmCount, UserCount]
subgroups = ['false', 'true']
labels = ['human', 'bot']
labels_sort_function = None
include_empty_subgroups = [True, False]
elif chart_name == 'messages_sent_by_message_type':
stat = COUNT_STATS['messages_sent:message_type:day']
tables = [RealmCount, UserCount]
subgroups = ['public_stream', 'private_stream', 'private_message']
labels = ['Public Streams', 'Private Streams', 'PMs & Group PMs']
labels_sort_function = lambda data: sort_by_totals(data['realm'])
include_empty_subgroups = [True, True]
elif chart_name == 'messages_sent_by_client':
stat = COUNT_STATS['messages_sent:client:day']
tables = [RealmCount, UserCount]
subgroups = [str(x) for x in Client.objects.values_list('id', flat=True).order_by('id')]
# these are further re-written by client_label_map
labels = list(Client.objects.values_list('name', flat=True).order_by('id'))
labels_sort_function = sort_client_labels
include_empty_subgroups = [False, False]
else:
raise JsonableError(_("Unknown chart name: %s") % (chart_name,))
# Most likely someone using our API endpoint. The /stats page does not
# pass a start or end in its requests.
if start is not None and end is not None and start > end:
raise JsonableError(_("Start time is later than end time. Start: %(start)s, End: %(end)s") %
{'start': start, 'end': end})
realm = user_profile.realm
if start is None:
start = realm.date_created
if end is None:
end = last_successful_fill(stat.property)
if end is None or start > end:
logging.warning("User from realm %s attempted to access /stats, but the computed "
"start time: %s (creation time of realm) is later than the computed "
"end time: %s (last successful analytics update). Is the "
"analytics cron job running?" % (realm.string_id, start, end))
raise JsonableError(_("No analytics data available. Please contact your server administrator."))
end_times = time_range(start, end, stat.frequency, min_length)
data = {'end_times': end_times, 'frequency': stat.frequency, 'interval': stat.interval}
for table, include_empty_subgroups_ in zip(tables, include_empty_subgroups):
if table == RealmCount:
data['realm'] = get_time_series_by_subgroup(
stat, RealmCount, realm.id, end_times, subgroups, labels, include_empty_subgroups_)
if table == UserCount:
data['user'] = get_time_series_by_subgroup(
stat, UserCount, user_profile.id, end_times, subgroups, labels, include_empty_subgroups_)
if labels_sort_function is not None:
data['display_order'] = labels_sort_function(data)
else:
data['display_order'] = None
return json_success(data=data)
def sort_by_totals(value_arrays):
# type: (Dict[str, List[int]]) -> List[str]
totals = []
for label, values in value_arrays.items():
totals.append((label, sum(values)))
totals.sort(key=lambda label_total: label_total[1], reverse=True)
return [label for label, total in totals]
# For any given user, we want to show a fixed set of clients in the chart,
# regardless of the time aggregation or whether we're looking at realm or
# user data. This fixed set ideally includes the clients most important in
# understanding the realm's traffic and the user's traffic. This function
# tries to rank the clients so that taking the first N elements of the
# sorted list has a reasonable chance of doing so.
def sort_client_labels(data):
# type: (Dict[str, Dict[str, List[int]]]) -> List[str]
realm_order = sort_by_totals(data['realm'])
user_order = sort_by_totals(data['user'])
label_sort_values = {} # type: Dict[str, float]
for i, label in enumerate(realm_order):
label_sort_values[label] = i
for i, label in enumerate(user_order):
label_sort_values[label] = min(i-.1, label_sort_values.get(label, i))
return [label for label, sort_value in sorted(label_sort_values.items(),
key=lambda x: x[1])]
def table_filtered_to_id(table, key_id):
# type: (Type[BaseCount], int) -> QuerySet
if table == RealmCount:
return RealmCount.objects.filter(realm_id=key_id)
elif table == UserCount:
return UserCount.objects.filter(user_id=key_id)
elif table == StreamCount:
return StreamCount.objects.filter(stream_id=key_id)
elif table == InstallationCount:
return InstallationCount.objects.all()
else:
raise AssertionError("Unknown table: %s" % (table,))
def client_label_map(name):
# type: (str) -> str
if name == "website":
return "Website"
if name.startswith("desktop app"):
return "Old desktop app"
if name == "ZulipAndroid":
return "Android app"
if name == "ZulipiOS":
return "Old iOS app"
if name == "ZulipMobile":
return "New iOS app"
if name in ["ZulipPython", "API: Python"]:
return "Python API"
if name.startswith("Zulip") and name.endswith("Webhook"):
return name[len("Zulip"):-len("Webhook")] + " webhook"
# Clients in dev environment autogenerated data start with _ so
# that it's easy to manually drop without affecting other data.
if settings.DEVELOPMENT and name.startswith("_"):
return name[1:]
return name
def rewrite_client_arrays(value_arrays):
# type: (Dict[str, List[int]]) -> Dict[str, List[int]]
mapped_arrays = {} # type: Dict[str, List[int]]
for label, array in value_arrays.items():
mapped_label = client_label_map(label)
if mapped_label in mapped_arrays:
for i in range(0, len(array)):
mapped_arrays[mapped_label][i] += value_arrays[label][i]
else:
mapped_arrays[mapped_label] = [value_arrays[label][i] for i in range(0, len(array))]
return mapped_arrays
def get_time_series_by_subgroup(stat, table, key_id, end_times, subgroups, labels, include_empty_subgroups):
# type: (CountStat, Type[BaseCount], Optional[int], List[datetime], List[str], List[str], bool) -> Dict[str, List[int]]
if len(subgroups) != len(labels):
raise AssertionError("subgroups and labels have lengths %s and %s, which are different." %
(len(subgroups), len(labels)))
queryset = table_filtered_to_id(table, key_id).filter(property=stat.property) \
.values_list('subgroup', 'end_time', 'value')
value_dicts = defaultdict(lambda: defaultdict(int)) # type: Dict[Optional[str], Dict[datetime, int]]
for subgroup, end_time, value in queryset:
value_dicts[subgroup][end_time] = value
value_arrays = {}
for subgroup, label in zip(subgroups, labels):
if (subgroup in value_dicts) or include_empty_subgroups:
value_arrays[label] = [value_dicts[subgroup][end_time] for end_time in end_times]
if stat == COUNT_STATS['messages_sent:client:day']:
# HACK: We rewrite these arrays to collapse the Client objects
# with similar names into a single sum, and generally give
# them better names
return rewrite_client_arrays(value_arrays)
return value_arrays
eastern_tz = pytz.timezone('US/Eastern')
def make_table(title, cols, rows, has_row_class=False):
# type: (str, List[str], List[Any], bool) -> str
if not has_row_class:
def fix_row(row):
# type: (Any) -> Dict[str, Any]
return dict(cells=row, row_class=None)
rows = list(map(fix_row, rows))
data = dict(title=title, cols=cols, rows=rows)
content = loader.render_to_string(
'analytics/ad_hoc_query.html',
dict(data=data)
)
return content
def dictfetchall(cursor):
# type: (connection.cursor) -> List[Dict[str, Any]]
"Returns all rows from a cursor as a dict"
desc = cursor.description
return [
dict(list(zip([col[0] for col in desc], row)))
for row in cursor.fetchall()
]
def get_realm_day_counts():
# type: () -> Dict[str, Dict[str, str]]
query = '''
select
r.string_id,
(now()::date - pub_date::date) age,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
join zerver_client c on c.id = m.sending_client_id
where
(not up.is_bot)
and
pub_date > now()::date - interval '8 day'
and
c.name not in ('zephyr_mirror', 'ZulipMonitoring')
group by
r.string_id,
age
order by
r.string_id,
age
'''
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
counts = defaultdict(dict) # type: Dict[str, Dict[int, int]]
for row in rows:
counts[row['string_id']][row['age']] = row['cnt']
result = {}
for string_id in counts:
raw_cnts = [counts[string_id].get(age, 0) for age in range(8)]
min_cnt = min(raw_cnts)
max_cnt = max(raw_cnts)
def format_count(cnt):
# type: (int) -> str
if cnt == min_cnt:
good_bad = 'bad'
elif cnt == max_cnt:
good_bad = 'good'
else:
good_bad = 'neutral'
return '<td class="number %s">%s</td>' % (good_bad, cnt)
cnts = ''.join(map(format_count, raw_cnts))
result[string_id] = dict(cnts=cnts)
return result
def realm_summary_table(realm_minutes):
# type: (Dict[str, float]) -> str
query = '''
SELECT
realm.string_id,
coalesce(user_counts.active_user_count, 0) active_user_count,
coalesce(at_risk_counts.at_risk_count, 0) at_risk_count,
(
SELECT
count(*)
FROM zerver_userprofile up
WHERE up.realm_id = realm.id
AND is_active
AND not is_bot
) user_profile_count,
(
SELECT
count(*)
FROM zerver_userprofile up
WHERE up.realm_id = realm.id
AND is_active
AND is_bot
) bot_count
FROM zerver_realm realm
LEFT OUTER JOIN
(
SELECT
up.realm_id realm_id,
count(distinct(ua.user_profile_id)) active_user_count
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
WHERE
query in (
'/json/send_message',
'send_message_backend',
'/api/v1/send_message',
'/json/update_pointer',
'/json/users/me/pointer'
)
AND
last_visit > now() - interval '1 day'
AND
not is_bot
GROUP BY realm_id
) user_counts
ON user_counts.realm_id = realm.id
LEFT OUTER JOIN
(
SELECT
realm_id,
count(*) at_risk_count
FROM (
SELECT
realm.id as realm_id,
up.email
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
JOIN zerver_realm realm
ON realm.id = up.realm_id
WHERE up.is_active
AND (not up.is_bot)
AND
ua.query in (
'/json/send_message',
'send_message_backend',
'/api/v1/send_message',
'/json/update_pointer',
'/json/users/me/pointer'
)
GROUP by realm.id, up.email
HAVING max(last_visit) between
now() - interval '7 day' and
now() - interval '1 day'
) as at_risk_users
GROUP BY realm_id
) at_risk_counts
ON at_risk_counts.realm_id = realm.id
WHERE EXISTS (
SELECT *
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
WHERE
query in (
'/json/send_message',
'/api/v1/send_message',
'send_message_backend',
'/json/update_pointer',
'/json/users/me/pointer'
)
AND
up.realm_id = realm.id
AND
last_visit > now() - interval '2 week'
)
ORDER BY active_user_count DESC, string_id ASC
'''
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
# get messages sent per day
counts = get_realm_day_counts()
for row in rows:
try:
row['history'] = counts[row['string_id']]['cnts']
except Exception:
row['history'] = ''
# augment data with realm_minutes
total_hours = 0.0
for row in rows:
string_id = row['string_id']
minutes = realm_minutes.get(string_id, 0.0)
hours = minutes / 60.0
total_hours += hours
row['hours'] = str(int(hours))
try:
row['hours_per_user'] = '%.1f' % (hours / row['active_user_count'],)
except Exception:
pass
# formatting
for row in rows:
row['string_id'] = realm_activity_link(row['string_id'])
# Count active sites
def meets_goal(row):
# type: (Dict[str, int]) -> bool
return row['active_user_count'] >= 5
num_active_sites = len(list(filter(meets_goal, rows)))
# create totals
total_active_user_count = 0
total_user_profile_count = 0
total_bot_count = 0
total_at_risk_count = 0
for row in rows:
total_active_user_count += int(row['active_user_count'])
total_user_profile_count += int(row['user_profile_count'])
total_bot_count += int(row['bot_count'])
total_at_risk_count += int(row['at_risk_count'])
rows.append(dict(
string_id='Total',
active_user_count=total_active_user_count,
user_profile_count=total_user_profile_count,
bot_count=total_bot_count,
hours=int(total_hours),
at_risk_count=total_at_risk_count,
))
content = loader.render_to_string(
'analytics/realm_summary_table.html',
dict(rows=rows, num_active_sites=num_active_sites)
)
return content
def user_activity_intervals():
# type: () -> Tuple[mark_safe, Dict[str, float]]
day_end = timestamp_to_datetime(time.time())
day_start = day_end - timedelta(hours=24)
output = "Per-user online duration for the last 24 hours:\n"
total_duration = timedelta(0)
all_intervals = UserActivityInterval.objects.filter(
end__gte=day_start,
start__lte=day_end
).select_related(
'user_profile',
'user_profile__realm'
).only(
'start',
'end',
'user_profile__email',
'user_profile__realm__string_id'
).order_by(
'user_profile__realm__string_id',
'user_profile__email'
)
by_string_id = lambda row: row.user_profile.realm.string_id
by_email = lambda row: row.user_profile.email
realm_minutes = {}
for string_id, realm_intervals in itertools.groupby(all_intervals, by_string_id):
realm_duration = timedelta(0)
output += '<hr>%s\n' % (string_id,)
for email, intervals in itertools.groupby(realm_intervals, by_email):
duration = timedelta(0)
for interval in intervals:
start = max(day_start, interval.start)
end = min(day_end, interval.end)
duration += end - start
total_duration += duration
realm_duration += duration
output += " %-*s%s\n" % (37, email, duration)
realm_minutes[string_id] = realm_duration.total_seconds() / 60
output += "\nTotal Duration: %s\n" % (total_duration,)
output += "\nTotal Duration in minutes: %s\n" % (total_duration.total_seconds() / 60.,)
output += "Total Duration amortized to a month: %s" % (total_duration.total_seconds() * 30. / 60.,)
content = mark_safe('<pre>' + output + '</pre>')
return content, realm_minutes
def sent_messages_report(realm):
# type: (str) -> str
title = 'Recently sent messages for ' + realm
cols = [
'Date',
'Humans',
'Bots'
]
query = '''
select
series.day::date,
humans.cnt,
bots.cnt
from (
select generate_series(
(now()::date - interval '2 week'),
now()::date,
interval '1 day'
) as day
) as series
left join (
select
pub_date::date pub_date,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.string_id = %s
and
(not up.is_bot)
and
pub_date > now() - interval '2 week'
group by
pub_date::date
order by
pub_date::date
) humans on
series.day = humans.pub_date
left join (
select
pub_date::date pub_date,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.string_id = %s
and
up.is_bot
and
pub_date > now() - interval '2 week'
group by
pub_date::date
order by
pub_date::date
) bots on
series.day = bots.pub_date
'''
cursor = connection.cursor()
cursor.execute(query, [realm, realm])
rows = cursor.fetchall()
cursor.close()
return make_table(title, cols, rows)
def ad_hoc_queries():
# type: () -> List[Dict[str, str]]
def get_page(query, cols, title):
# type: (str, List[str], str) -> Dict[str, str]
cursor = connection.cursor()
cursor.execute(query)
rows = cursor.fetchall()
rows = list(map(list, rows))
cursor.close()
def fix_rows(i, fixup_func):
# type: (int, Union[Callable[[Realm], mark_safe], Callable[[datetime], str]]) -> None
for row in rows:
row[i] = fixup_func(row[i])
for i, col in enumerate(cols):
if col == 'Realm':
fix_rows(i, realm_activity_link)
elif col in ['Last time', 'Last visit']:
fix_rows(i, format_date_for_activity_reports)
content = make_table(title, cols, rows)
return dict(
content=content,
title=title
)
pages = []
###
for mobile_type in ['Android', 'ZulipiOS']:
title = '%s usage' % (mobile_type,)
query = '''
select
realm.string_id,
up.id user_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like '%s'
group by string_id, up.id, client.name
having max(last_visit) > now() - interval '2 week'
order by string_id, up.id, client.name
''' % (mobile_type,)
cols = [
'Realm',
'User id',
'Name',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Desktop users'
query = '''
select
realm.string_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like 'desktop%%'
group by string_id, client.name
having max(last_visit) > now() - interval '2 week'
order by string_id, client.name
'''
cols = [
'Realm',
'Client',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by realm'
query = '''
select
realm.string_id,
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by string_id, client_name
having max(last_visit) > now() - interval '2 week'
order by string_id, client_name
'''
cols = [
'Realm',
'Client',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by client'
query = '''
select
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
realm.string_id,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by client_name, string_id
having max(last_visit) > now() - interval '2 week'
order by client_name, string_id
'''
cols = [
'Client',
'Realm',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
return pages
@zulip_internal
@has_request_variables
def get_activity(request):
# type: (HttpRequest) -> HttpResponse
duration_content, realm_minutes = user_activity_intervals() # type: Tuple[mark_safe, Dict[str, float]]
counts_content = realm_summary_table(realm_minutes) # type: str
data = [
('Counts', counts_content),
('Durations', duration_content),
]
for page in ad_hoc_queries():
data.append((page['title'], page['content']))
title = 'Activity'
return render(
request,
'analytics/activity.html',
context=dict(data=data, title=title, is_home=True),
)
def get_user_activity_records_for_realm(realm, is_bot):
# type: (str, bool) -> QuerySet
fields = [
'user_profile__full_name',
'user_profile__email',
'query',
'client__name',
'count',
'last_visit',
]
records = UserActivity.objects.filter(
user_profile__realm__string_id=realm,
user_profile__is_active=True,
user_profile__is_bot=is_bot
)
records = records.order_by("user_profile__email", "-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def get_user_activity_records_for_email(email):
# type: (str) -> List[QuerySet]
fields = [
'user_profile__full_name',
'query',
'client__name',
'count',
'last_visit'
]
records = UserActivity.objects.filter(
user_profile__email=email
)
records = records.order_by("-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def raw_user_activity_table(records):
# type: (List[QuerySet]) -> str
cols = [
'query',
'client',
'count',
'last_visit'
]
def row(record):
# type: (QuerySet) -> List[Any]
return [
record.query,
record.client.name,
record.count,
format_date_for_activity_reports(record.last_visit)
]
rows = list(map(row, records))
title = 'Raw Data'
return make_table(title, cols, rows)
def get_user_activity_summary(records):
# type: (List[QuerySet]) -> Dict[str, Dict[str, Any]]
#: `Any` used above should be `Union(int, datetime)`.
#: However current version of `Union` does not work inside other function.
#: We could use something like:
# `Union[Dict[str, Dict[str, int]], Dict[str, Dict[str, datetime]]]`
#: but that would require this long `Union` to carry on throughout inner functions.
summary = {} # type: Dict[str, Dict[str, Any]]
def update(action, record):
# type: (str, QuerySet) -> None
if action not in summary:
summary[action] = dict(
count=record.count,
last_visit=record.last_visit
)
else:
summary[action]['count'] += record.count
summary[action]['last_visit'] = max(
summary[action]['last_visit'],
record.last_visit
)
if records:
summary['name'] = records[0].user_profile.full_name
for record in records:
client = record.client.name
query = record.query
update('use', record)
if client == 'API':
m = re.match('/api/.*/external/(.*)', query)
if m:
client = m.group(1)
update(client, record)
if client.startswith('desktop'):
update('desktop', record)
if client == 'website':
update('website', record)
if ('send_message' in query) or re.search('/api/.*/external/.*', query):
update('send', record)
if query in ['/json/update_pointer', '/json/users/me/pointer', '/api/v1/update_pointer']:
update('pointer', record)
update(client, record)
return summary
def format_date_for_activity_reports(date):
# type: (Optional[datetime]) -> str
if date:
return date.astimezone(eastern_tz).strftime('%Y-%m-%d %H:%M')
else:
return ''
def user_activity_link(email):
# type: (str) -> mark_safe
url_name = 'analytics.views.get_user_activity'
url = urlresolvers.reverse(url_name, kwargs=dict(email=email))
email_link = '<a href="%s">%s</a>' % (url, email)
return mark_safe(email_link)
def realm_activity_link(realm_str):
# type: (str) -> mark_safe
url_name = 'analytics.views.get_realm_activity'
url = urlresolvers.reverse(url_name, kwargs=dict(realm_str=realm_str))
realm_link = '<a href="%s">%s</a>' % (url, realm_str)
return mark_safe(realm_link)
def realm_client_table(user_summaries):
# type: (Dict[str, Dict[str, Dict[str, Any]]]) -> str
exclude_keys = [
'internal',
'name',
'use',
'send',
'pointer',
'website',
'desktop',
]
rows = []
for email, user_summary in user_summaries.items():
email_link = user_activity_link(email)
name = user_summary['name']
for k, v in user_summary.items():
if k in exclude_keys:
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
name,
email_link,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'Last visit',
'Client',
'Name',
'Email',
'Count',
]
title = 'Clients'
return make_table(title, cols, rows)
def user_activity_summary_table(user_summary):
# type: (Dict[str, Dict[str, Any]]) -> str
rows = []
for k, v in user_summary.items():
if k == 'name':
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'last_visit',
'client',
'count',
]
title = 'User Activity'
return make_table(title, cols, rows)
def realm_user_summary_table(all_records, admin_emails):
# type: (List[QuerySet], Set[Text]) -> Tuple[Dict[str, Dict[str, Any]], str]
user_records = {}
def by_email(record):
# type: (QuerySet) -> str
return record.user_profile.email
for email, records in itertools.groupby(all_records, by_email):
user_records[email] = get_user_activity_summary(list(records))
def get_last_visit(user_summary, k):
# type: (Dict[str, Dict[str, datetime]], str) -> Optional[datetime]
if k in user_summary:
return user_summary[k]['last_visit']
else:
return None
def get_count(user_summary, k):
# type: (Dict[str, Dict[str, str]], str) -> str
if k in user_summary:
return user_summary[k]['count']
else:
return ''
def is_recent(val):
# type: (Optional[datetime]) -> bool
age = timezone.now() - val
return age.total_seconds() < 5 * 60
rows = []
for email, user_summary in user_records.items():
email_link = user_activity_link(email)
sent_count = get_count(user_summary, 'send')
cells = [user_summary['name'], email_link, sent_count]
row_class = ''
for field in ['use', 'send', 'pointer', 'desktop', 'ZulipiOS', 'Android']:
visit = get_last_visit(user_summary, field)
if field == 'use':
if visit and is_recent(visit):
row_class += ' recently_active'
if email in admin_emails:
row_class += ' admin'
val = format_date_for_activity_reports(visit)
cells.append(val)
row = dict(cells=cells, row_class=row_class)
rows.append(row)
def by_used_time(row):
# type: (Dict[str, Any]) -> str
return row['cells'][3]
rows = sorted(rows, key=by_used_time, reverse=True)
cols = [
'Name',
'Email',
'Total sent',
'Heard from',
'Message sent',
'Pointer motion',
'Desktop',
'ZulipiOS',
'Android',
]
title = 'Summary'
content = make_table(title, cols, rows, has_row_class=True)
return user_records, content
@zulip_internal
def get_realm_activity(request, realm_str):
# type: (HttpRequest, str) -> HttpResponse
data = [] # type: List[Tuple[str, str]]
all_user_records = {} # type: Dict[str, Any]
try:
admins = Realm.objects.get(string_id=realm_str).get_admin_users()
except Realm.DoesNotExist:
return HttpResponseNotFound("Realm %s does not exist" % (realm_str,))
admin_emails = {admin.email for admin in admins}
for is_bot, page_title in [(False, 'Humans'), (True, 'Bots')]:
all_records = list(get_user_activity_records_for_realm(realm_str, is_bot))
user_records, content = realm_user_summary_table(all_records, admin_emails)
all_user_records.update(user_records)
data += [(page_title, content)]
page_title = 'Clients'
content = realm_client_table(all_user_records)
data += [(page_title, content)]
page_title = 'History'
content = sent_messages_report(realm_str)
data += [(page_title, content)]
realm_link = 'https://stats1.zulip.net:444/render/?from=-7days'
realm_link += '&target=stats.gauges.staging.users.active.%s.0_16hr' % (realm_str,)
title = realm_str
return render(
request,
'analytics/activity.html',
context=dict(data=data, realm_link=realm_link, title=title),
)
@zulip_internal
def get_user_activity(request, email):
# type: (HttpRequest, str) -> HttpResponse
records = get_user_activity_records_for_email(email)
data = [] # type: List[Tuple[str, str]]
user_summary = get_user_activity_summary(records)
content = user_activity_summary_table(user_summary)
data += [('Summary', content)]
content = raw_user_activity_table(records)
data += [('Info', content)]
title = email
return render(
request,
'analytics/activity.html',
context=dict(data=data, title=title),
)
| {
"repo_name": "susansls/zulip",
"path": "analytics/views.py",
"copies": "3",
"size": "36986",
"license": "apache-2.0",
"hash": 4410646678229862400,
"line_mean": 32.4412296564,
"line_max": 123,
"alpha_frac": 0.5574000973,
"autogenerated": false,
"ratio": 3.889169295478444,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5946569392778445,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
from django.core import urlresolvers
from django.db import connection
from django.db.models import Sum
from django.db.models.query import QuerySet
from django.http import HttpResponseNotFound, HttpRequest, HttpResponse
from django.template import RequestContext, loader
from django.utils import timezone
from django.utils.translation import ugettext as _
from jinja2 import Markup as mark_safe
from analytics.lib.counts import CountStat, process_count_stat, COUNT_STATS
from analytics.lib.time_utils import time_range
from analytics.models import BaseCount, InstallationCount, RealmCount, \
UserCount, StreamCount
from zerver.decorator import has_request_variables, REQ, zulip_internal, \
zulip_login_required, to_non_negative_int, to_utc_datetime
from zerver.lib.request import JsonableError
from zerver.lib.response import json_success
from zerver.lib.timestamp import ceiling_to_hour, ceiling_to_day, timestamp_to_datetime
from zerver.models import Realm, UserProfile, UserActivity, \
UserActivityInterval, Client
from zproject.jinja2 import render_to_response
from collections import defaultdict
from datetime import datetime, timedelta
import itertools
import json
import pytz
import re
import time
from six.moves import filter, map, range, zip
from typing import Any, Dict, List, Tuple, Optional, Sequence, Callable, Type, \
Union, Text
@zulip_login_required
def stats(request):
# type: (HttpRequest) -> HttpResponse
return render_to_response('analytics/stats.html')
@has_request_variables
def get_chart_data(request, user_profile, chart_name=REQ(),
min_length=REQ(converter=to_non_negative_int, default=None),
start=REQ(converter=to_utc_datetime, default=None),
end=REQ(converter=to_utc_datetime, default=None)):
# type: (HttpRequest, UserProfile, Text, Optional[int], Optional[datetime], Optional[datetime]) -> HttpResponse
realm = user_profile.realm
# These are implicitly relying on realm.date_created and timezone.now being in UTC.
if start is None:
start = realm.date_created
if end is None:
end = timezone.now()
if start > end:
raise JsonableError(_("Start time is later than end time. Start: %(start)s, End: %(end)s") %
{'start': start, 'end': end})
if chart_name == 'number_of_humans':
stat = COUNT_STATS['active_users:is_bot:day']
tables = [RealmCount]
subgroups = ['false', 'true']
labels = ['human', 'bot']
include_empty_subgroups = True
elif chart_name == 'messages_sent_by_humans_and_bots':
stat = COUNT_STATS['messages_sent:is_bot:hour']
tables = [RealmCount]
subgroups = ['false', 'true']
labels = ['human', 'bot']
include_empty_subgroups = True
elif chart_name == 'messages_sent_by_message_type':
stat = COUNT_STATS['messages_sent:message_type:day']
tables = [RealmCount, UserCount]
subgroups = ['public_stream', 'private_stream', 'private_message']
labels = None
include_empty_subgroups = True
elif chart_name == 'messages_sent_by_client':
stat = COUNT_STATS['messages_sent:client:day']
tables = [RealmCount, UserCount]
subgroups = [str(x) for x in Client.objects.values_list('id', flat=True).order_by('id')]
labels = list(Client.objects.values_list('name', flat=True).order_by('id'))
include_empty_subgroups = False
else:
raise JsonableError(_("Unknown chart name: %s") % (chart_name,))
end_times = time_range(start, end, stat.frequency, min_length)
data = {'end_times': end_times, 'frequency': stat.frequency, 'interval': stat.interval}
for table in tables:
if table == RealmCount:
data['realm'] = get_time_series_by_subgroup(
stat, RealmCount, realm.id, end_times, subgroups, labels, include_empty_subgroups)
if table == UserCount:
data['user'] = get_time_series_by_subgroup(
stat, UserCount, user_profile.id, end_times, subgroups, labels, include_empty_subgroups)
return json_success(data=data)
def table_filtered_to_id(table, key_id):
# type: (Type[BaseCount], int) -> QuerySet
if table == RealmCount:
return RealmCount.objects.filter(realm_id=key_id)
elif table == UserCount:
return UserCount.objects.filter(user_id=key_id)
elif table == StreamCount:
return StreamCount.objects.filter(stream_id=key_id)
elif table == InstallationCount:
return InstallationCount.objects.all()
else:
raise ValueError("Unknown table: %s" % (table,))
def get_time_series_by_subgroup(stat, table, key_id, end_times, subgroups, labels, include_empty_subgroups):
# type: (CountStat, Type[BaseCount], Optional[int], List[datetime], List[str], Optional[List[str]], bool) -> Dict[str, List[int]]
if labels is None:
labels = subgroups
if len(subgroups) != len(labels):
raise ValueError("subgroups and labels have lengths %s and %s, which are different." %
(len(subgroups), len(labels)))
queryset = table_filtered_to_id(table, key_id).filter(property=stat.property) \
.values_list('subgroup', 'end_time', 'value')
value_dicts = defaultdict(lambda: defaultdict(int)) # type: Dict[Optional[str], Dict[datetime, int]]
for subgroup, end_time, value in queryset:
value_dicts[subgroup][end_time] = value
value_arrays = {}
for subgroup, label in zip(subgroups, labels):
if (subgroup in value_dicts) or include_empty_subgroups:
value_arrays[label] = [value_dicts[subgroup][end_time] for end_time in end_times]
return value_arrays
eastern_tz = pytz.timezone('US/Eastern')
def make_table(title, cols, rows, has_row_class=False):
# type: (str, List[str], List[Any], bool) -> str
if not has_row_class:
def fix_row(row):
# type: (Any) -> Dict[str, Any]
return dict(cells=row, row_class=None)
rows = list(map(fix_row, rows))
data = dict(title=title, cols=cols, rows=rows)
content = loader.render_to_string(
'analytics/ad_hoc_query.html',
dict(data=data)
)
return content
def dictfetchall(cursor):
# type: (connection.cursor) -> List[Dict[str, Any]]
"Returns all rows from a cursor as a dict"
desc = cursor.description
return [
dict(list(zip([col[0] for col in desc], row)))
for row in cursor.fetchall()
]
def get_realm_day_counts():
# type: () -> Dict[str, Dict[str, str]]
query = '''
select
r.string_id,
(now()::date - pub_date::date) age,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
join zerver_client c on c.id = m.sending_client_id
where
(not up.is_bot)
and
pub_date > now()::date - interval '8 day'
and
c.name not in ('zephyr_mirror', 'ZulipMonitoring')
group by
r.string_id,
age
order by
r.string_id,
age
'''
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
counts = defaultdict(dict) # type: Dict[str, Dict[int, int]]
for row in rows:
counts[row['string_id']][row['age']] = row['cnt']
result = {}
for string_id in counts:
raw_cnts = [counts[string_id].get(age, 0) for age in range(8)]
min_cnt = min(raw_cnts)
max_cnt = max(raw_cnts)
def format_count(cnt):
# type: (int) -> str
if cnt == min_cnt:
good_bad = 'bad'
elif cnt == max_cnt:
good_bad = 'good'
else:
good_bad = 'neutral'
return '<td class="number %s">%s</td>' % (good_bad, cnt)
cnts = ''.join(map(format_count, raw_cnts))
result[string_id] = dict(cnts=cnts)
return result
def realm_summary_table(realm_minutes):
# type: (Dict[str, float]) -> str
query = '''
SELECT
realm.string_id,
coalesce(user_counts.active_user_count, 0) active_user_count,
coalesce(at_risk_counts.at_risk_count, 0) at_risk_count,
(
SELECT
count(*)
FROM zerver_userprofile up
WHERE up.realm_id = realm.id
AND is_active
AND not is_bot
) user_profile_count,
(
SELECT
count(*)
FROM zerver_userprofile up
WHERE up.realm_id = realm.id
AND is_active
AND is_bot
) bot_count
FROM zerver_realm realm
LEFT OUTER JOIN
(
SELECT
up.realm_id realm_id,
count(distinct(ua.user_profile_id)) active_user_count
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
WHERE
query in (
'/json/send_message',
'send_message_backend',
'/api/v1/send_message',
'/json/update_pointer',
'/json/users/me/pointer'
)
AND
last_visit > now() - interval '1 day'
AND
not is_bot
GROUP BY realm_id
) user_counts
ON user_counts.realm_id = realm.id
LEFT OUTER JOIN
(
SELECT
realm_id,
count(*) at_risk_count
FROM (
SELECT
realm.id as realm_id,
up.email
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
JOIN zerver_realm realm
ON realm.id = up.realm_id
WHERE up.is_active
AND (not up.is_bot)
AND
ua.query in (
'/json/send_message',
'send_message_backend',
'/api/v1/send_message',
'/json/update_pointer',
'/json/users/me/pointer'
)
GROUP by realm.id, up.email
HAVING max(last_visit) between
now() - interval '7 day' and
now() - interval '1 day'
) as at_risk_users
GROUP BY realm_id
) at_risk_counts
ON at_risk_counts.realm_id = realm.id
WHERE EXISTS (
SELECT *
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
WHERE
query in (
'/json/send_message',
'/api/v1/send_message',
'send_message_backend',
'/json/update_pointer',
'/json/users/me/pointer'
)
AND
up.realm_id = realm.id
AND
last_visit > now() - interval '2 week'
)
ORDER BY active_user_count DESC, string_id ASC
'''
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
# get messages sent per day
counts = get_realm_day_counts()
for row in rows:
try:
row['history'] = counts[row['string_id']]['cnts']
except Exception:
row['history'] = ''
# augment data with realm_minutes
total_hours = 0.0
for row in rows:
string_id = row['string_id']
minutes = realm_minutes.get(string_id, 0.0)
hours = minutes / 60.0
total_hours += hours
row['hours'] = str(int(hours))
try:
row['hours_per_user'] = '%.1f' % (hours / row['active_user_count'],)
except Exception:
pass
# formatting
for row in rows:
row['string_id'] = realm_activity_link(row['string_id'])
# Count active sites
def meets_goal(row):
# type: (Dict[str, int]) -> bool
return row['active_user_count'] >= 5
num_active_sites = len(list(filter(meets_goal, rows)))
# create totals
total_active_user_count = 0
total_user_profile_count = 0
total_bot_count = 0
total_at_risk_count = 0
for row in rows:
total_active_user_count += int(row['active_user_count'])
total_user_profile_count += int(row['user_profile_count'])
total_bot_count += int(row['bot_count'])
total_at_risk_count += int(row['at_risk_count'])
rows.append(dict(
string_id='Total',
active_user_count=total_active_user_count,
user_profile_count=total_user_profile_count,
bot_count=total_bot_count,
hours=int(total_hours),
at_risk_count=total_at_risk_count,
))
content = loader.render_to_string(
'analytics/realm_summary_table.html',
dict(rows=rows, num_active_sites=num_active_sites)
)
return content
def user_activity_intervals():
# type: () -> Tuple[mark_safe, Dict[str, float]]
day_end = timestamp_to_datetime(time.time())
day_start = day_end - timedelta(hours=24)
output = "Per-user online duration for the last 24 hours:\n"
total_duration = timedelta(0)
all_intervals = UserActivityInterval.objects.filter(
end__gte=day_start,
start__lte=day_end
).select_related(
'user_profile',
'user_profile__realm'
).only(
'start',
'end',
'user_profile__email',
'user_profile__realm__string_id'
).order_by(
'user_profile__realm__string_id',
'user_profile__email'
)
by_string_id = lambda row: row.user_profile.realm.string_id
by_email = lambda row: row.user_profile.email
realm_minutes = {}
for string_id, realm_intervals in itertools.groupby(all_intervals, by_string_id):
realm_duration = timedelta(0)
output += '<hr>%s\n' % (string_id,)
for email, intervals in itertools.groupby(realm_intervals, by_email):
duration = timedelta(0)
for interval in intervals:
start = max(day_start, interval.start)
end = min(day_end, interval.end)
duration += end - start
total_duration += duration
realm_duration += duration
output += " %-*s%s\n" % (37, email, duration)
realm_minutes[string_id] = realm_duration.total_seconds() / 60
output += "\nTotal Duration: %s\n" % (total_duration,)
output += "\nTotal Duration in minutes: %s\n" % (total_duration.total_seconds() / 60.,)
output += "Total Duration amortized to a month: %s" % (total_duration.total_seconds() * 30. / 60.,)
content = mark_safe('<pre>' + output + '</pre>')
return content, realm_minutes
def sent_messages_report(realm):
# type: (str) -> str
title = 'Recently sent messages for ' + realm
cols = [
'Date',
'Humans',
'Bots'
]
query = '''
select
series.day::date,
humans.cnt,
bots.cnt
from (
select generate_series(
(now()::date - interval '2 week'),
now()::date,
interval '1 day'
) as day
) as series
left join (
select
pub_date::date pub_date,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.string_id = %s
and
(not up.is_bot)
and
pub_date > now() - interval '2 week'
group by
pub_date::date
order by
pub_date::date
) humans on
series.day = humans.pub_date
left join (
select
pub_date::date pub_date,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.string_id = %s
and
up.is_bot
and
pub_date > now() - interval '2 week'
group by
pub_date::date
order by
pub_date::date
) bots on
series.day = bots.pub_date
'''
cursor = connection.cursor()
cursor.execute(query, [realm, realm])
rows = cursor.fetchall()
cursor.close()
return make_table(title, cols, rows)
def ad_hoc_queries():
# type: () -> List[Dict[str, str]]
def get_page(query, cols, title):
# type: (str, List[str], str) -> Dict[str, str]
cursor = connection.cursor()
cursor.execute(query)
rows = cursor.fetchall()
rows = list(map(list, rows))
cursor.close()
def fix_rows(i, fixup_func):
# type: (int, Union[Callable[[Realm], mark_safe], Callable[[datetime], str]]) -> None
for row in rows:
row[i] = fixup_func(row[i])
for i, col in enumerate(cols):
if col == 'Realm':
fix_rows(i, realm_activity_link)
elif col in ['Last time', 'Last visit']:
fix_rows(i, format_date_for_activity_reports)
content = make_table(title, cols, rows)
return dict(
content=content,
title=title
)
pages = []
###
for mobile_type in ['Android', 'ZulipiOS']:
title = '%s usage' % (mobile_type,)
query = '''
select
realm.string_id,
up.id user_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like '%s'
group by string_id, up.id, client.name
having max(last_visit) > now() - interval '2 week'
order by string_id, up.id, client.name
''' % (mobile_type,)
cols = [
'Realm',
'User id',
'Name',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Desktop users'
query = '''
select
realm.string_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like 'desktop%%'
group by string_id, client.name
having max(last_visit) > now() - interval '2 week'
order by string_id, client.name
'''
cols = [
'Realm',
'Client',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by realm'
query = '''
select
realm.string_id,
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by string_id, client_name
having max(last_visit) > now() - interval '2 week'
order by string_id, client_name
'''
cols = [
'Realm',
'Client',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by client'
query = '''
select
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
realm.string_id,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by client_name, string_id
having max(last_visit) > now() - interval '2 week'
order by client_name, string_id
'''
cols = [
'Client',
'Realm',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
return pages
@zulip_internal
@has_request_variables
def get_activity(request):
# type: (HttpRequest) -> HttpResponse
duration_content, realm_minutes = user_activity_intervals() # type: Tuple[mark_safe, Dict[str, float]]
counts_content = realm_summary_table(realm_minutes) # type: str
data = [
('Counts', counts_content),
('Durations', duration_content),
]
for page in ad_hoc_queries():
data.append((page['title'], page['content']))
title = 'Activity'
return render_to_response(
'analytics/activity.html',
dict(data=data, title=title, is_home=True),
request=request
)
def get_user_activity_records_for_realm(realm, is_bot):
# type: (str, bool) -> QuerySet
fields = [
'user_profile__full_name',
'user_profile__email',
'query',
'client__name',
'count',
'last_visit',
]
records = UserActivity.objects.filter(
user_profile__realm__string_id=realm,
user_profile__is_active=True,
user_profile__is_bot=is_bot
)
records = records.order_by("user_profile__email", "-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def get_user_activity_records_for_email(email):
# type: (str) -> List[QuerySet]
fields = [
'user_profile__full_name',
'query',
'client__name',
'count',
'last_visit'
]
records = UserActivity.objects.filter(
user_profile__email=email
)
records = records.order_by("-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def raw_user_activity_table(records):
# type: (List[QuerySet]) -> str
cols = [
'query',
'client',
'count',
'last_visit'
]
def row(record):
# type: (QuerySet) -> List[Any]
return [
record.query,
record.client.name,
record.count,
format_date_for_activity_reports(record.last_visit)
]
rows = list(map(row, records))
title = 'Raw Data'
return make_table(title, cols, rows)
def get_user_activity_summary(records):
# type: (List[QuerySet]) -> Dict[str, Dict[str, Any]]
#: `Any` used above should be `Union(int, datetime)`.
#: However current version of `Union` does not work inside other function.
#: We could use something like:
# `Union[Dict[str, Dict[str, int]], Dict[str, Dict[str, datetime]]]`
#: but that would require this long `Union` to carry on throughout inner functions.
summary = {} # type: Dict[str, Dict[str, Any]]
def update(action, record):
# type: (str, QuerySet) -> None
if action not in summary:
summary[action] = dict(
count=record.count,
last_visit=record.last_visit
)
else:
summary[action]['count'] += record.count
summary[action]['last_visit'] = max(
summary[action]['last_visit'],
record.last_visit
)
if records:
summary['name'] = records[0].user_profile.full_name
for record in records:
client = record.client.name
query = record.query
update('use', record)
if client == 'API':
m = re.match('/api/.*/external/(.*)', query)
if m:
client = m.group(1)
update(client, record)
if client.startswith('desktop'):
update('desktop', record)
if client == 'website':
update('website', record)
if ('send_message' in query) or re.search('/api/.*/external/.*', query):
update('send', record)
if query in ['/json/update_pointer', '/json/users/me/pointer', '/api/v1/update_pointer']:
update('pointer', record)
update(client, record)
return summary
def format_date_for_activity_reports(date):
# type: (Optional[datetime]) -> str
if date:
return date.astimezone(eastern_tz).strftime('%Y-%m-%d %H:%M')
else:
return ''
def user_activity_link(email):
# type: (str) -> mark_safe
url_name = 'analytics.views.get_user_activity'
url = urlresolvers.reverse(url_name, kwargs=dict(email=email))
email_link = '<a href="%s">%s</a>' % (url, email)
return mark_safe(email_link)
def realm_activity_link(realm_str):
# type: (str) -> mark_safe
url_name = 'analytics.views.get_realm_activity'
url = urlresolvers.reverse(url_name, kwargs=dict(realm_str=realm_str))
realm_link = '<a href="%s">%s</a>' % (url, realm_str)
return mark_safe(realm_link)
def realm_client_table(user_summaries):
# type: (Dict[str, Dict[str, Dict[str, Any]]]) -> str
exclude_keys = [
'internal',
'name',
'use',
'send',
'pointer',
'website',
'desktop',
]
rows = []
for email, user_summary in user_summaries.items():
email_link = user_activity_link(email)
name = user_summary['name']
for k, v in user_summary.items():
if k in exclude_keys:
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
name,
email_link,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'Last visit',
'Client',
'Name',
'Email',
'Count',
]
title = 'Clients'
return make_table(title, cols, rows)
def user_activity_summary_table(user_summary):
# type: (Dict[str, Dict[str, Any]]) -> str
rows = []
for k, v in user_summary.items():
if k == 'name':
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'last_visit',
'client',
'count',
]
title = 'User Activity'
return make_table(title, cols, rows)
def realm_user_summary_table(all_records, admin_emails):
# type: (List[QuerySet], Set[Text]) -> Tuple[Dict[str, Dict[str, Any]], str]
user_records = {}
def by_email(record):
# type: (QuerySet) -> str
return record.user_profile.email
for email, records in itertools.groupby(all_records, by_email):
user_records[email] = get_user_activity_summary(list(records))
def get_last_visit(user_summary, k):
# type: (Dict[str, Dict[str, datetime]], str) -> Optional[datetime]
if k in user_summary:
return user_summary[k]['last_visit']
else:
return None
def get_count(user_summary, k):
# type: (Dict[str, Dict[str, str]], str) -> str
if k in user_summary:
return user_summary[k]['count']
else:
return ''
def is_recent(val):
# type: (Optional[datetime]) -> bool
age = datetime.now(val.tzinfo) - val
return age.total_seconds() < 5 * 60
rows = []
for email, user_summary in user_records.items():
email_link = user_activity_link(email)
sent_count = get_count(user_summary, 'send')
cells = [user_summary['name'], email_link, sent_count]
row_class = ''
for field in ['use', 'send', 'pointer', 'desktop', 'ZulipiOS', 'Android']:
visit = get_last_visit(user_summary, field)
if field == 'use':
if visit and is_recent(visit):
row_class += ' recently_active'
if email in admin_emails:
row_class += ' admin'
val = format_date_for_activity_reports(visit)
cells.append(val)
row = dict(cells=cells, row_class=row_class)
rows.append(row)
def by_used_time(row):
# type: (Dict[str, Sequence[str]]) -> str
return row['cells'][3]
rows = sorted(rows, key=by_used_time, reverse=True)
cols = [
'Name',
'Email',
'Total sent',
'Heard from',
'Message sent',
'Pointer motion',
'Desktop',
'ZulipiOS',
'Android'
]
title = 'Summary'
content = make_table(title, cols, rows, has_row_class=True)
return user_records, content
@zulip_internal
def get_realm_activity(request, realm_str):
# type: (HttpRequest, str) -> HttpResponse
data = [] # type: List[Tuple[str, str]]
all_user_records = {} # type: Dict[str, Any]
try:
admins = Realm.objects.get(string_id=realm_str).get_admin_users()
except Realm.DoesNotExist:
return HttpResponseNotFound("Realm %s does not exist" % (realm_str,))
admin_emails = {admin.email for admin in admins}
for is_bot, page_title in [(False, 'Humans'), (True, 'Bots')]:
all_records = list(get_user_activity_records_for_realm(realm_str, is_bot))
user_records, content = realm_user_summary_table(all_records, admin_emails)
all_user_records.update(user_records)
data += [(page_title, content)]
page_title = 'Clients'
content = realm_client_table(all_user_records)
data += [(page_title, content)]
page_title = 'History'
content = sent_messages_report(realm_str)
data += [(page_title, content)]
realm_link = 'https://stats1.zulip.net:444/render/?from=-7days'
realm_link += '&target=stats.gauges.staging.users.active.%s.0_16hr' % (realm_str,)
title = realm_str
return render_to_response(
'analytics/activity.html',
dict(data=data, realm_link=realm_link, title=title),
request=request
)
@zulip_internal
def get_user_activity(request, email):
# type: (HttpRequest, str) -> HttpResponse
records = get_user_activity_records_for_email(email)
data = [] # type: List[Tuple[str, str]]
user_summary = get_user_activity_summary(records)
content = user_activity_summary_table(user_summary)
data += [('Summary', content)]
content = raw_user_activity_table(records)
data += [('Info', content)]
title = email
return render_to_response(
'analytics/activity.html',
dict(data=data, title=title),
request=request
)
| {
"repo_name": "isht3/zulip",
"path": "analytics/views.py",
"copies": "1",
"size": "33085",
"license": "apache-2.0",
"hash": -1240770248560341000,
"line_mean": 31.4045053869,
"line_max": 133,
"alpha_frac": 0.5450808524,
"autogenerated": false,
"ratio": 3.9098321909714016,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9949005275201953,
"avg_score": 0.0011815536338897312,
"num_lines": 1021
} |
from __future__ import absolute_import, division
from django.core import urlresolvers
from django.db import connection
from django.db.models.query import QuerySet
from django.http import HttpResponseNotFound, HttpRequest, HttpResponse
from django.template import RequestContext, loader
from django.utils import timezone
from django.utils.translation import ugettext as _
from jinja2 import Markup as mark_safe
from analytics.lib.counts import CountStat, process_count_stat, COUNT_STATS
from analytics.lib.time_utils import time_range
from analytics.models import RealmCount, UserCount
from zerver.decorator import has_request_variables, REQ, zulip_internal, \
zulip_login_required, to_non_negative_int, to_utc_datetime
from zerver.lib.request import JsonableError
from zerver.lib.response import json_success
from zerver.lib.timestamp import ceiling_to_hour, ceiling_to_day, timestamp_to_datetime
from zerver.models import Realm, UserProfile, UserActivity, UserActivityInterval
from zproject.jinja2 import render_to_response
from collections import defaultdict
from datetime import datetime, timedelta
import itertools
import json
import pytz
import re
import time
from six.moves import filter, map, range, zip
from typing import Any, Dict, List, Tuple, Optional, Sequence, Callable, Union, Text
@zulip_login_required
def stats(request):
# type: (HttpRequest) -> HttpResponse
return render_to_response('analytics/stats.html')
@has_request_variables
def get_chart_data(request, user_profile, chart_name=REQ(),
min_length=REQ(converter=to_non_negative_int, default=None),
start=REQ(converter=to_utc_datetime, default=None),
end=REQ(converter=to_utc_datetime, default=None)):
# type: (HttpRequest, UserProfile, Text, Optional[int], Optional[datetime], Optional[datetime]) -> HttpResponse
realm = user_profile.realm
if chart_name == 'messages_sent_to_realm':
data = get_messages_sent_to_realm(realm, min_length=min_length, start=start, end=end)
else:
raise JsonableError(_("Unknown chart name: %s") % (chart_name,))
return json_success(data=data)
def get_messages_sent_to_realm(realm, min_length=None, start=None, end=None):
# type: (Realm, Optional[int], Optional[datetime], Optional[datetime]) -> Dict[str, Any]
# These are implicitly relying on realm.date_created and timezone.now being in UTC.
if start is None:
start = realm.date_created
if end is None:
end = timezone.now()
if start > end:
raise JsonableError(_("Start time is later than end time. Start: %(start)s, End: %(end)s") %
{'start': start, 'end': end})
frequency = CountStat.DAY
end_times = time_range(start, end, frequency, min_length)
indices = {}
for i, end_time in enumerate(end_times):
indices[end_time] = i
filter_set = RealmCount.objects.filter(
realm=realm, property='messages_sent:is_bot', interval=frequency) \
.values_list('end_time', 'value')
humans = [0]*len(end_times)
for end_time, value in filter_set.filter(subgroup='false'):
humans[indices[end_time]] = value
bots = [0]*len(end_times)
for end_time, value in filter_set.filter(subgroup='true'):
bots[indices[end_time]] = value
return {'end_times': end_times, 'humans': humans, 'bots': bots,
'frequency': frequency, 'interval': frequency}
eastern_tz = pytz.timezone('US/Eastern')
def make_table(title, cols, rows, has_row_class=False):
# type: (str, List[str], List[Any], bool) -> str
if not has_row_class:
def fix_row(row):
# type: (Any) -> Dict[str, Any]
return dict(cells=row, row_class=None)
rows = list(map(fix_row, rows))
data = dict(title=title, cols=cols, rows=rows)
content = loader.render_to_string(
'analytics/ad_hoc_query.html',
dict(data=data)
)
return content
def dictfetchall(cursor):
# type: (connection.cursor) -> List[Dict[str, Any]]
"Returns all rows from a cursor as a dict"
desc = cursor.description
return [
dict(list(zip([col[0] for col in desc], row)))
for row in cursor.fetchall()
]
def get_realm_day_counts():
# type: () -> Dict[str, Dict[str, str]]
query = '''
select
r.string_id,
(now()::date - pub_date::date) age,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
join zerver_client c on c.id = m.sending_client_id
where
(not up.is_bot)
and
pub_date > now()::date - interval '8 day'
and
c.name not in ('zephyr_mirror', 'ZulipMonitoring')
group by
r.string_id,
age
order by
r.string_id,
age
'''
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
counts = defaultdict(dict) # type: Dict[str, Dict[int, int]]
for row in rows:
counts[row['string_id']][row['age']] = row['cnt']
result = {}
for string_id in counts:
raw_cnts = [counts[string_id].get(age, 0) for age in range(8)]
min_cnt = min(raw_cnts)
max_cnt = max(raw_cnts)
def format_count(cnt):
# type: (int) -> str
if cnt == min_cnt:
good_bad = 'bad'
elif cnt == max_cnt:
good_bad = 'good'
else:
good_bad = 'neutral'
return '<td class="number %s">%s</td>' % (good_bad, cnt)
cnts = ''.join(map(format_count, raw_cnts))
result[string_id] = dict(cnts=cnts)
return result
def realm_summary_table(realm_minutes):
# type: (Dict[str, float]) -> str
query = '''
SELECT
realm.string_id,
coalesce(user_counts.active_user_count, 0) active_user_count,
coalesce(at_risk_counts.at_risk_count, 0) at_risk_count,
(
SELECT
count(*)
FROM zerver_userprofile up
WHERE up.realm_id = realm.id
AND is_active
AND not is_bot
) user_profile_count,
(
SELECT
count(*)
FROM zerver_userprofile up
WHERE up.realm_id = realm.id
AND is_active
AND is_bot
) bot_count
FROM zerver_realm realm
LEFT OUTER JOIN
(
SELECT
up.realm_id realm_id,
count(distinct(ua.user_profile_id)) active_user_count
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
WHERE
query in (
'/json/send_message',
'send_message_backend',
'/api/v1/send_message',
'/json/update_pointer',
'/json/users/me/pointer'
)
AND
last_visit > now() - interval '1 day'
AND
not is_bot
GROUP BY realm_id
) user_counts
ON user_counts.realm_id = realm.id
LEFT OUTER JOIN
(
SELECT
realm_id,
count(*) at_risk_count
FROM (
SELECT
realm.id as realm_id,
up.email
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
JOIN zerver_realm realm
ON realm.id = up.realm_id
WHERE up.is_active
AND (not up.is_bot)
AND
ua.query in (
'/json/send_message',
'send_message_backend',
'/api/v1/send_message',
'/json/update_pointer',
'/json/users/me/pointer'
)
GROUP by realm.id, up.email
HAVING max(last_visit) between
now() - interval '7 day' and
now() - interval '1 day'
) as at_risk_users
GROUP BY realm_id
) at_risk_counts
ON at_risk_counts.realm_id = realm.id
WHERE EXISTS (
SELECT *
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
WHERE
query in (
'/json/send_message',
'/api/v1/send_message',
'send_message_backend',
'/json/update_pointer',
'/json/users/me/pointer'
)
AND
up.realm_id = realm.id
AND
last_visit > now() - interval '2 week'
)
ORDER BY active_user_count DESC, string_id ASC
'''
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
# get messages sent per day
counts = get_realm_day_counts()
for row in rows:
try:
row['history'] = counts[row['string_id']]['cnts']
except Exception:
row['history'] = ''
# augment data with realm_minutes
total_hours = 0.0
for row in rows:
string_id = row['string_id']
minutes = realm_minutes.get(string_id, 0.0)
hours = minutes / 60.0
total_hours += hours
row['hours'] = str(int(hours))
try:
row['hours_per_user'] = '%.1f' % (hours / row['active_user_count'],)
except Exception:
pass
# formatting
for row in rows:
row['string_id'] = realm_activity_link(row['string_id'])
# Count active sites
def meets_goal(row):
# type: (Dict[str, int]) -> bool
return row['active_user_count'] >= 5
num_active_sites = len(list(filter(meets_goal, rows)))
# create totals
total_active_user_count = 0
total_user_profile_count = 0
total_bot_count = 0
total_at_risk_count = 0
for row in rows:
total_active_user_count += int(row['active_user_count'])
total_user_profile_count += int(row['user_profile_count'])
total_bot_count += int(row['bot_count'])
total_at_risk_count += int(row['at_risk_count'])
rows.append(dict(
string_id='Total',
active_user_count=total_active_user_count,
user_profile_count=total_user_profile_count,
bot_count=total_bot_count,
hours=int(total_hours),
at_risk_count=total_at_risk_count,
))
content = loader.render_to_string(
'analytics/realm_summary_table.html',
dict(rows=rows, num_active_sites=num_active_sites)
)
return content
def user_activity_intervals():
# type: () -> Tuple[mark_safe, Dict[str, float]]
day_end = timestamp_to_datetime(time.time())
day_start = day_end - timedelta(hours=24)
output = "Per-user online duration for the last 24 hours:\n"
total_duration = timedelta(0)
all_intervals = UserActivityInterval.objects.filter(
end__gte=day_start,
start__lte=day_end
).select_related(
'user_profile',
'user_profile__realm'
).only(
'start',
'end',
'user_profile__email',
'user_profile__realm__string_id'
).order_by(
'user_profile__realm__string_id',
'user_profile__email'
)
by_string_id = lambda row: row.user_profile.realm.string_id
by_email = lambda row: row.user_profile.email
realm_minutes = {}
for string_id, realm_intervals in itertools.groupby(all_intervals, by_string_id):
realm_duration = timedelta(0)
output += '<hr>%s\n' % (string_id,)
for email, intervals in itertools.groupby(realm_intervals, by_email):
duration = timedelta(0)
for interval in intervals:
start = max(day_start, interval.start)
end = min(day_end, interval.end)
duration += end - start
total_duration += duration
realm_duration += duration
output += " %-*s%s\n" % (37, email, duration)
realm_minutes[string_id] = realm_duration.total_seconds() / 60
output += "\nTotal Duration: %s\n" % (total_duration,)
output += "\nTotal Duration in minutes: %s\n" % (total_duration.total_seconds() / 60.,)
output += "Total Duration amortized to a month: %s" % (total_duration.total_seconds() * 30. / 60.,)
content = mark_safe('<pre>' + output + '</pre>')
return content, realm_minutes
def sent_messages_report(realm):
# type: (str) -> str
title = 'Recently sent messages for ' + realm
cols = [
'Date',
'Humans',
'Bots'
]
query = '''
select
series.day::date,
humans.cnt,
bots.cnt
from (
select generate_series(
(now()::date - interval '2 week'),
now()::date,
interval '1 day'
) as day
) as series
left join (
select
pub_date::date pub_date,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.string_id = %s
and
(not up.is_bot)
and
pub_date > now() - interval '2 week'
group by
pub_date::date
order by
pub_date::date
) humans on
series.day = humans.pub_date
left join (
select
pub_date::date pub_date,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.string_id = %s
and
up.is_bot
and
pub_date > now() - interval '2 week'
group by
pub_date::date
order by
pub_date::date
) bots on
series.day = bots.pub_date
'''
cursor = connection.cursor()
cursor.execute(query, [realm, realm])
rows = cursor.fetchall()
cursor.close()
return make_table(title, cols, rows)
def ad_hoc_queries():
# type: () -> List[Dict[str, str]]
def get_page(query, cols, title):
# type: (str, List[str], str) -> Dict[str, str]
cursor = connection.cursor()
cursor.execute(query)
rows = cursor.fetchall()
rows = list(map(list, rows))
cursor.close()
def fix_rows(i, fixup_func):
# type: (int, Union[Callable[[Realm], mark_safe], Callable[[datetime], str]]) -> None
for row in rows:
row[i] = fixup_func(row[i])
for i, col in enumerate(cols):
if col == 'Realm':
fix_rows(i, realm_activity_link)
elif col in ['Last time', 'Last visit']:
fix_rows(i, format_date_for_activity_reports)
content = make_table(title, cols, rows)
return dict(
content=content,
title=title
)
pages = []
###
for mobile_type in ['Android', 'ZulipiOS']:
title = '%s usage' % (mobile_type,)
query = '''
select
realm.string_id,
up.id user_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like '%s'
group by string_id, up.id, client.name
having max(last_visit) > now() - interval '2 week'
order by string_id, up.id, client.name
''' % (mobile_type,)
cols = [
'Realm',
'User id',
'Name',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Desktop users'
query = '''
select
realm.string_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like 'desktop%%'
group by string_id, client.name
having max(last_visit) > now() - interval '2 week'
order by string_id, client.name
'''
cols = [
'Realm',
'Client',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by realm'
query = '''
select
realm.string_id,
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by string_id, client_name
having max(last_visit) > now() - interval '2 week'
order by string_id, client_name
'''
cols = [
'Realm',
'Client',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by client'
query = '''
select
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
realm.string_id,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by client_name, string_id
having max(last_visit) > now() - interval '2 week'
order by client_name, string_id
'''
cols = [
'Client',
'Realm',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
return pages
@zulip_internal
@has_request_variables
def get_activity(request):
# type: (HttpRequest) -> HttpResponse
duration_content, realm_minutes = user_activity_intervals() # type: Tuple[mark_safe, Dict[str, float]]
counts_content = realm_summary_table(realm_minutes) # type: str
data = [
('Counts', counts_content),
('Durations', duration_content),
]
for page in ad_hoc_queries():
data.append((page['title'], page['content']))
title = 'Activity'
return render_to_response(
'analytics/activity.html',
dict(data=data, title=title, is_home=True),
request=request
)
def get_user_activity_records_for_realm(realm, is_bot):
# type: (str, bool) -> QuerySet
fields = [
'user_profile__full_name',
'user_profile__email',
'query',
'client__name',
'count',
'last_visit',
]
records = UserActivity.objects.filter(
user_profile__realm__string_id=realm,
user_profile__is_active=True,
user_profile__is_bot=is_bot
)
records = records.order_by("user_profile__email", "-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def get_user_activity_records_for_email(email):
# type: (str) -> List[QuerySet]
fields = [
'user_profile__full_name',
'query',
'client__name',
'count',
'last_visit'
]
records = UserActivity.objects.filter(
user_profile__email=email
)
records = records.order_by("-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def raw_user_activity_table(records):
# type: (List[QuerySet]) -> str
cols = [
'query',
'client',
'count',
'last_visit'
]
def row(record):
# type: (QuerySet) -> List[Any]
return [
record.query,
record.client.name,
record.count,
format_date_for_activity_reports(record.last_visit)
]
rows = list(map(row, records))
title = 'Raw Data'
return make_table(title, cols, rows)
def get_user_activity_summary(records):
# type: (List[QuerySet]) -> Dict[str, Dict[str, Any]]
#: `Any` used above should be `Union(int, datetime)`.
#: However current version of `Union` does not work inside other function.
#: We could use something like:
# `Union[Dict[str, Dict[str, int]], Dict[str, Dict[str, datetime]]]`
#: but that would require this long `Union` to carry on throughout inner functions.
summary = {} # type: Dict[str, Dict[str, Any]]
def update(action, record):
# type: (str, QuerySet) -> None
if action not in summary:
summary[action] = dict(
count=record.count,
last_visit=record.last_visit
)
else:
summary[action]['count'] += record.count
summary[action]['last_visit'] = max(
summary[action]['last_visit'],
record.last_visit
)
if records:
summary['name'] = records[0].user_profile.full_name
for record in records:
client = record.client.name
query = record.query
update('use', record)
if client == 'API':
m = re.match('/api/.*/external/(.*)', query)
if m:
client = m.group(1)
update(client, record)
if client.startswith('desktop'):
update('desktop', record)
if client == 'website':
update('website', record)
if ('send_message' in query) or re.search('/api/.*/external/.*', query):
update('send', record)
if query in ['/json/update_pointer', '/json/users/me/pointer', '/api/v1/update_pointer']:
update('pointer', record)
update(client, record)
return summary
def format_date_for_activity_reports(date):
# type: (Optional[datetime]) -> str
if date:
return date.astimezone(eastern_tz).strftime('%Y-%m-%d %H:%M')
else:
return ''
def user_activity_link(email):
# type: (str) -> mark_safe
url_name = 'analytics.views.get_user_activity'
url = urlresolvers.reverse(url_name, kwargs=dict(email=email))
email_link = '<a href="%s">%s</a>' % (url, email)
return mark_safe(email_link)
def realm_activity_link(realm_str):
# type: (str) -> mark_safe
url_name = 'analytics.views.get_realm_activity'
url = urlresolvers.reverse(url_name, kwargs=dict(realm_str=realm_str))
realm_link = '<a href="%s">%s</a>' % (url, realm_str)
return mark_safe(realm_link)
def realm_client_table(user_summaries):
# type: (Dict[str, Dict[str, Dict[str, Any]]]) -> str
exclude_keys = [
'internal',
'name',
'use',
'send',
'pointer',
'website',
'desktop',
]
rows = []
for email, user_summary in user_summaries.items():
email_link = user_activity_link(email)
name = user_summary['name']
for k, v in user_summary.items():
if k in exclude_keys:
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
name,
email_link,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'Last visit',
'Client',
'Name',
'Email',
'Count',
]
title = 'Clients'
return make_table(title, cols, rows)
def user_activity_summary_table(user_summary):
# type: (Dict[str, Dict[str, Any]]) -> str
rows = []
for k, v in user_summary.items():
if k == 'name':
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'last_visit',
'client',
'count',
]
title = 'User Activity'
return make_table(title, cols, rows)
def realm_user_summary_table(all_records, admin_emails):
# type: (List[QuerySet], Set[Text]) -> Tuple[Dict[str, Dict[str, Any]], str]
user_records = {}
def by_email(record):
# type: (QuerySet) -> str
return record.user_profile.email
for email, records in itertools.groupby(all_records, by_email):
user_records[email] = get_user_activity_summary(list(records))
def get_last_visit(user_summary, k):
# type: (Dict[str, Dict[str, datetime]], str) -> Optional[datetime]
if k in user_summary:
return user_summary[k]['last_visit']
else:
return None
def get_count(user_summary, k):
# type: (Dict[str, Dict[str, str]], str) -> str
if k in user_summary:
return user_summary[k]['count']
else:
return ''
def is_recent(val):
# type: (Optional[datetime]) -> bool
age = datetime.now(val.tzinfo) - val
return age.total_seconds() < 5 * 60
rows = []
for email, user_summary in user_records.items():
email_link = user_activity_link(email)
sent_count = get_count(user_summary, 'send')
cells = [user_summary['name'], email_link, sent_count]
row_class = ''
for field in ['use', 'send', 'pointer', 'desktop', 'ZulipiOS', 'Android']:
visit = get_last_visit(user_summary, field)
if field == 'use':
if visit and is_recent(visit):
row_class += ' recently_active'
if email in admin_emails:
row_class += ' admin'
val = format_date_for_activity_reports(visit)
cells.append(val)
row = dict(cells=cells, row_class=row_class)
rows.append(row)
def by_used_time(row):
# type: (Dict[str, Sequence[str]]) -> str
return row['cells'][3]
rows = sorted(rows, key=by_used_time, reverse=True)
cols = [
'Name',
'Email',
'Total sent',
'Heard from',
'Message sent',
'Pointer motion',
'Desktop',
'ZulipiOS',
'Android'
]
title = 'Summary'
content = make_table(title, cols, rows, has_row_class=True)
return user_records, content
@zulip_internal
def get_realm_activity(request, realm_str):
# type: (HttpRequest, str) -> HttpResponse
data = [] # type: List[Tuple[str, str]]
all_user_records = {} # type: Dict[str, Any]
try:
admins = Realm.objects.get(string_id=realm_str).get_admin_users()
except Realm.DoesNotExist:
return HttpResponseNotFound("Realm %s does not exist" % (realm_str,))
admin_emails = {admin.email for admin in admins}
for is_bot, page_title in [(False, 'Humans'), (True, 'Bots')]:
all_records = list(get_user_activity_records_for_realm(realm_str, is_bot))
user_records, content = realm_user_summary_table(all_records, admin_emails)
all_user_records.update(user_records)
data += [(page_title, content)]
page_title = 'Clients'
content = realm_client_table(all_user_records)
data += [(page_title, content)]
page_title = 'History'
content = sent_messages_report(realm_str)
data += [(page_title, content)]
realm_link = 'https://stats1.zulip.net:444/render/?from=-7days'
realm_link += '&target=stats.gauges.staging.users.active.%s.0_16hr' % (realm_str,)
title = realm_str
return render_to_response(
'analytics/activity.html',
dict(data=data, realm_link=realm_link, title=title),
request=request
)
@zulip_internal
def get_user_activity(request, email):
# type: (HttpRequest, str) -> HttpResponse
records = get_user_activity_records_for_email(email)
data = [] # type: List[Tuple[str, str]]
user_summary = get_user_activity_summary(records)
content = user_activity_summary_table(user_summary)
data += [('Summary', content)]
content = raw_user_activity_table(records)
data += [('Info', content)]
title = email
return render_to_response(
'analytics/activity.html',
dict(data=data, title=title),
request=request
)
| {
"repo_name": "Diptanshu8/zulip",
"path": "analytics/views.py",
"copies": "2",
"size": "30695",
"license": "apache-2.0",
"hash": -5163053249069646000,
"line_mean": 30.5467625899,
"line_max": 115,
"alpha_frac": 0.5391106043,
"autogenerated": false,
"ratio": 3.915678020155632,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001142126483196238,
"num_lines": 973
} |
from __future__ import absolute_import, division
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import get_object_or_404
from sentry.models import Group, GroupMeta
from sentry.plugins import plugins
from sentry.web.frontend.base import ProjectView
class GroupPluginActionView(ProjectView):
required_scope = 'event:read'
def handle(self, request, organization, team, project, group_id, slug):
group = get_object_or_404(Group, pk=group_id, project=project)
try:
plugin = plugins.get(slug)
except KeyError:
raise Http404('Plugin not found')
GroupMeta.objects.populate_cache([group])
response = plugin.get_view_response(request, group)
if response:
return response
redirect = request.META.get('HTTP_REFERER') or reverse('sentry-stream', kwargs={
'organization_slug': organization.slug,
'project_id': group.project.slug
})
return HttpResponseRedirect(redirect)
| {
"repo_name": "mitsuhiko/sentry",
"path": "src/sentry/web/frontend/group_plugin_action.py",
"copies": "4",
"size": "1079",
"license": "bsd-3-clause",
"hash": -7151608810416036000,
"line_mean": 31.696969697,
"line_max": 88,
"alpha_frac": 0.6895273401,
"autogenerated": false,
"ratio": 4.333333333333333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7022860673433333,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
from django.http import Http404, HttpResponse
from django.shortcuts import get_object_or_404
from sentry.models import Event, Group, GroupMeta, get_group_with_redirect
from sentry.utils import json
from sentry.web.frontend.base import ProjectView
class GroupEventJsonView(ProjectView):
required_scope = 'event:read'
def get(self, request, organization, project, group_id, event_id_or_latest):
try:
# TODO(tkaemming): This should *actually* redirect, see similar
# comment in ``GroupEndpoint.convert_args``.
group, _ = get_group_with_redirect(
group_id,
queryset=Group.objects.filter(project=project),
)
except Group.DoesNotExist:
raise Http404
if event_id_or_latest == 'latest':
# It's possible that a message would not be created under certain
# circumstances (such as a post_save signal failing)
event = group.get_latest_event() or Event(group=group)
else:
event = get_object_or_404(group.event_set, pk=event_id_or_latest)
Event.objects.bind_nodes([event], 'data')
GroupMeta.objects.populate_cache([group])
return HttpResponse(json.dumps(event.as_dict()), content_type='application/json')
| {
"repo_name": "ifduyue/sentry",
"path": "src/sentry/web/frontend/group_event_json.py",
"copies": "2",
"size": "1352",
"license": "bsd-3-clause",
"hash": -4902960208101146000,
"line_mean": 37.6285714286,
"line_max": 89,
"alpha_frac": 0.6568047337,
"autogenerated": false,
"ratio": 4.035820895522388,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006701940035273369,
"num_lines": 35
} |
from __future__ import absolute_import, division
from django.http import Http404, HttpResponse
from sentry import eventstore
from sentry.models import Event, Group, GroupMeta, get_group_with_redirect
from sentry.utils import json
from sentry.web.frontend.base import OrganizationView
class GroupEventJsonView(OrganizationView):
required_scope = "event:read"
def get(self, request, organization, group_id, event_id_or_latest):
try:
# TODO(tkaemming): This should *actually* redirect, see similar
# comment in ``GroupEndpoint.convert_args``.
group, _ = get_group_with_redirect(group_id)
except Group.DoesNotExist:
raise Http404
if event_id_or_latest == "latest":
event = group.get_latest_event()
else:
event = eventstore.get_event_by_id(group.project.id, event_id_or_latest)
if event is None:
raise Http404
Event.objects.bind_nodes([event], "data")
GroupMeta.objects.populate_cache([group])
return HttpResponse(json.dumps(event.as_dict()), content_type="application/json")
| {
"repo_name": "mvaled/sentry",
"path": "src/sentry/web/frontend/group_event_json.py",
"copies": "1",
"size": "1137",
"license": "bsd-3-clause",
"hash": 2701015835722951000,
"line_mean": 32.4411764706,
"line_max": 89,
"alpha_frac": 0.6684256816,
"autogenerated": false,
"ratio": 3.9894736842105263,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5157899365810525,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
from django.http import Http404, HttpResponse
from sentry import eventstore
from sentry.models import Group, GroupMeta, get_group_with_redirect
from sentry.utils import json
from sentry.web.frontend.base import OrganizationView
class GroupEventJsonView(OrganizationView):
required_scope = "event:read"
def get(self, request, organization, group_id, event_id_or_latest):
try:
# TODO(tkaemming): This should *actually* redirect, see similar
# comment in ``GroupEndpoint.convert_args``.
group, _ = get_group_with_redirect(group_id)
except Group.DoesNotExist:
raise Http404
if event_id_or_latest == "latest":
event = group.get_latest_event()
else:
event = eventstore.get_event_by_id(group.project.id, event_id_or_latest)
if event is None:
raise Http404
GroupMeta.objects.populate_cache([group])
return HttpResponse(json.dumps(event.as_dict()), content_type="application/json")
| {
"repo_name": "beeftornado/sentry",
"path": "src/sentry/web/frontend/group_event_json.py",
"copies": "1",
"size": "1080",
"license": "bsd-3-clause",
"hash": -107375942649704210,
"line_mean": 31.7272727273,
"line_max": 89,
"alpha_frac": 0.6712962963,
"autogenerated": false,
"ratio": 4,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.51712962963,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.utils.http import is_safe_url
from sentry.models import Group, GroupMeta
from sentry.plugins.base import plugins
from sentry.web.frontend.base import ProjectView
class GroupPluginActionView(ProjectView):
required_scope = "event:read"
def handle(self, request, organization, project, group_id, slug):
group = get_object_or_404(Group, pk=group_id, project=project)
try:
plugin = plugins.get(slug)
except KeyError:
raise Http404("Plugin not found")
GroupMeta.objects.populate_cache([group])
response = plugin.get_view_response(request, group)
if response:
return response
redirect = request.META.get("HTTP_REFERER", "")
if not is_safe_url(redirect, host=request.get_host()):
redirect = u"/{}/{}/".format(organization.slug, group.project.slug)
return HttpResponseRedirect(redirect)
| {
"repo_name": "beeftornado/sentry",
"path": "src/sentry/web/frontend/group_plugin_action.py",
"copies": "2",
"size": "1077",
"license": "bsd-3-clause",
"hash": 7793431919279023000,
"line_mean": 32.65625,
"line_max": 79,
"alpha_frac": 0.6889507892,
"autogenerated": false,
"ratio": 4.126436781609195,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5815387570809195,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import get_object_or_404
from sudo.utils import is_safe_url
from sentry.models import Group, GroupMeta
from sentry.plugins import plugins
from sentry.web.frontend.base import ProjectView
class GroupPluginActionView(ProjectView):
required_scope = 'event:read'
def handle(self, request, organization, project, group_id, slug):
group = get_object_or_404(Group, pk=group_id, project=project)
try:
plugin = plugins.get(slug)
except KeyError:
raise Http404('Plugin not found')
GroupMeta.objects.populate_cache([group])
response = plugin.get_view_response(request, group)
if response:
return response
redirect = request.META.get('HTTP_REFERER', '')
if not is_safe_url(redirect, host=request.get_host()):
redirect = '/{}/{}/'.format(
organization.slug,
group.project.slug,
)
return HttpResponseRedirect(redirect)
| {
"repo_name": "ifduyue/sentry",
"path": "src/sentry/web/frontend/group_plugin_action.py",
"copies": "2",
"size": "1111",
"license": "bsd-3-clause",
"hash": -8767747537867899000,
"line_mean": 30.7428571429,
"line_max": 70,
"alpha_frac": 0.6579657966,
"autogenerated": false,
"ratio": 4.273076923076923,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5931042719676922,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from sentry.models import Group, GroupMeta, Event
from sentry.utils import json
from sentry.web.frontend.base import ProjectView
class GroupEventJsonView(ProjectView):
required_scope = 'event:read'
def get(self, request, organization, project, team, group_id, event_id_or_latest):
group = get_object_or_404(Group, pk=group_id, project=project)
if event_id_or_latest == 'latest':
# It's possible that a message would not be created under certain
# circumstances (such as a post_save signal failing)
event = group.get_latest_event() or Event(group=group)
else:
event = get_object_or_404(group.event_set, pk=event_id_or_latest)
Event.objects.bind_nodes([event], 'data')
GroupMeta.objects.populate_cache([group])
return HttpResponse(json.dumps(event.as_dict()), mimetype='application/json')
| {
"repo_name": "nicholasserra/sentry",
"path": "src/sentry/web/frontend/group_event_json.py",
"copies": "3",
"size": "1032",
"license": "bsd-3-clause",
"hash": -8794861496284620000,
"line_mean": 37.2222222222,
"line_max": 86,
"alpha_frac": 0.6947674419,
"autogenerated": false,
"ratio": 3.8222222222222224,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6016989664122223,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
from itertools import chain
from PySide import QtGui
from .. import wave_functions
class WaveScene(QtGui.QGraphicsScene):
def __init__(self, waveTable):
super(WaveScene, self).__init__()
self._table = waveTable
self.gridStep = 0x10
self.updatePaths()
self.gridPen = QtGui.QPen(QtGui.QColor.fromRgb(0x00, 0x40, 0xFF, 0x40))
self.originPen = QtGui.QPen(QtGui.QColor.fromRgb(0xFF, 0x40, 0x00, 0x80))
def updatePaths(self):
"""
Generate a path to display grid.
"""
grid_step = self.gridStep
grid_x_min = 0x00
grid_x_max = wave_functions.WAVE_LENGTH
grid_x_origin = 0x00
grid_y_max = 0x80 + grid_step
grid_y_min = -grid_y_max
grid_y_origin = 0x00
path = QtGui.QPainterPath()
# Draw x-grid
for x in range(grid_x_min, grid_x_max + 1, grid_step):
path.moveTo(x, grid_y_min)
path.lineTo(x, grid_y_max)
# Draw y-grid
for y in range(grid_step, grid_y_max + 1, grid_step):
path.moveTo(grid_x_min, y)
path.lineTo(grid_x_max, y)
path.moveTo(grid_x_min, -y)
path.lineTo(grid_x_max, -y)
self.gridPath = path
path = QtGui.QPainterPath()
path.moveTo(grid_x_origin, grid_y_min)
path.lineTo(grid_x_origin, grid_y_max)
path.moveTo(grid_x_min, grid_y_origin)
path.lineTo(grid_x_max, grid_y_origin)
self.originPath = path
def render(self):
self.clear()
# Re-draw paths
self.addPath(self.gridPath, self.gridPen)
self.addPath(self.originPath, self.originPen)
x1, y1 = None, None
for x2, y2 in enumerate(chain(self._table, self._table, self._table)):
y2 = -y2
x2 -= wave_functions.WAVE_LENGTH
# Translate into a center position
if x1 is not None:
self.addLine(x1, y1, x2, y2)
x1, y1 = x2, y2
| {
"repo_name": "timsavage/funktion-generator",
"path": "utils/wave-editor/wave_editor/gui/widgets.py",
"copies": "1",
"size": "2064",
"license": "bsd-3-clause",
"hash": 135686952714937060,
"line_mean": 28.0704225352,
"line_max": 81,
"alpha_frac": 0.5683139535,
"autogenerated": false,
"ratio": 3.2149532710280373,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9282408413123021,
"avg_score": 0.00017176228100309173,
"num_lines": 71
} |
from __future__ import absolute_import, division
from plotly import exceptions, optional_imports
import plotly.colors as clrs
from plotly.figure_factory import utils
from plotly.graph_objs import graph_objs
from plotly.validators.heatmap import ColorscaleValidator
# Optional imports, may be None for users that only use our core functionality.
np = optional_imports.get_module("numpy")
def validate_annotated_heatmap(z, x, y, annotation_text):
"""
Annotated-heatmap-specific validations
Check that if a text matrix is supplied, it has the same
dimensions as the z matrix.
See FigureFactory.create_annotated_heatmap() for params
:raises: (PlotlyError) If z and text matrices do not have the same
dimensions.
"""
if annotation_text is not None and isinstance(annotation_text, list):
utils.validate_equal_length(z, annotation_text)
for lst in range(len(z)):
if len(z[lst]) != len(annotation_text[lst]):
raise exceptions.PlotlyError(
"z and text should have the " "same dimensions"
)
if x:
if len(x) != len(z[0]):
raise exceptions.PlotlyError(
"oops, the x list that you "
"provided does not match the "
"width of your z matrix "
)
if y:
if len(y) != len(z):
raise exceptions.PlotlyError(
"oops, the y list that you "
"provided does not match the "
"length of your z matrix "
)
def create_annotated_heatmap(
z,
x=None,
y=None,
annotation_text=None,
colorscale="Plasma",
font_colors=None,
showscale=False,
reversescale=False,
**kwargs
):
"""
Function that creates annotated heatmaps
This function adds annotations to each cell of the heatmap.
:param (list[list]|ndarray) z: z matrix to create heatmap.
:param (list) x: x axis labels.
:param (list) y: y axis labels.
:param (list[list]|ndarray) annotation_text: Text strings for
annotations. Should have the same dimensions as the z matrix. If no
text is added, the values of the z matrix are annotated. Default =
z matrix values.
:param (list|str) colorscale: heatmap colorscale.
:param (list) font_colors: List of two color strings: [min_text_color,
max_text_color] where min_text_color is applied to annotations for
heatmap values < (max_value - min_value)/2. If font_colors is not
defined, the colors are defined logically as black or white
depending on the heatmap's colorscale.
:param (bool) showscale: Display colorscale. Default = False
:param (bool) reversescale: Reverse colorscale. Default = False
:param kwargs: kwargs passed through plotly.graph_objs.Heatmap.
These kwargs describe other attributes about the annotated Heatmap
trace such as the colorscale. For more information on valid kwargs
call help(plotly.graph_objs.Heatmap)
Example 1: Simple annotated heatmap with default configuration
>>> import plotly.figure_factory as ff
>>> z = [[0.300000, 0.00000, 0.65, 0.300000],
... [1, 0.100005, 0.45, 0.4300],
... [0.300000, 0.00000, 0.65, 0.300000],
... [1, 0.100005, 0.45, 0.00000]]
>>> fig = ff.create_annotated_heatmap(z)
>>> fig.show()
"""
# Avoiding mutables in the call signature
font_colors = font_colors if font_colors is not None else []
validate_annotated_heatmap(z, x, y, annotation_text)
# validate colorscale
colorscale_validator = ColorscaleValidator()
colorscale = colorscale_validator.validate_coerce(colorscale)
annotations = _AnnotatedHeatmap(
z, x, y, annotation_text, colorscale, font_colors, reversescale, **kwargs
).make_annotations()
if x or y:
trace = dict(
type="heatmap",
z=z,
x=x,
y=y,
colorscale=colorscale,
showscale=showscale,
reversescale=reversescale,
**kwargs
)
layout = dict(
annotations=annotations,
xaxis=dict(ticks="", dtick=1, side="top", gridcolor="rgb(0, 0, 0)"),
yaxis=dict(ticks="", dtick=1, ticksuffix=" "),
)
else:
trace = dict(
type="heatmap",
z=z,
colorscale=colorscale,
showscale=showscale,
reversescale=reversescale,
**kwargs
)
layout = dict(
annotations=annotations,
xaxis=dict(
ticks="", side="top", gridcolor="rgb(0, 0, 0)", showticklabels=False
),
yaxis=dict(ticks="", ticksuffix=" ", showticklabels=False),
)
data = [trace]
return graph_objs.Figure(data=data, layout=layout)
def to_rgb_color_list(color_str, default):
if "rgb" in color_str:
return [int(v) for v in color_str.strip("rgb()").split(",")]
elif "#" in color_str:
return clrs.hex_to_rgb(color_str)
else:
return default
def should_use_black_text(background_color):
return (
background_color[0] * 0.299
+ background_color[1] * 0.587
+ background_color[2] * 0.114
) > 186
class _AnnotatedHeatmap(object):
"""
Refer to TraceFactory.create_annotated_heatmap() for docstring
"""
def __init__(
self, z, x, y, annotation_text, colorscale, font_colors, reversescale, **kwargs
):
self.z = z
if x:
self.x = x
else:
self.x = range(len(z[0]))
if y:
self.y = y
else:
self.y = range(len(z))
if annotation_text is not None:
self.annotation_text = annotation_text
else:
self.annotation_text = self.z
self.colorscale = colorscale
self.reversescale = reversescale
self.font_colors = font_colors
if np and isinstance(self.z, np.ndarray):
self.zmin = np.amin(self.z)
self.zmax = np.amax(self.z)
else:
self.zmin = min([v for row in self.z for v in row])
self.zmax = max([v for row in self.z for v in row])
if kwargs.get("zmin", None) is not None:
self.zmin = kwargs["zmin"]
if kwargs.get("zmax", None) is not None:
self.zmax = kwargs["zmax"]
self.zmid = (self.zmax + self.zmin) / 2
if kwargs.get("zmid", None) is not None:
self.zmid = kwargs["zmid"]
def get_text_color(self):
"""
Get font color for annotations.
The annotated heatmap can feature two text colors: min_text_color and
max_text_color. The min_text_color is applied to annotations for
heatmap values < (max_value - min_value)/2. The user can define these
two colors. Otherwise the colors are defined logically as black or
white depending on the heatmap's colorscale.
:rtype (string, string) min_text_color, max_text_color: text
color for annotations for heatmap values <
(max_value - min_value)/2 and text color for annotations for
heatmap values >= (max_value - min_value)/2
"""
# Plotly colorscales ranging from a lighter shade to a darker shade
colorscales = [
"Greys",
"Greens",
"Blues",
"YIGnBu",
"YIOrRd",
"RdBu",
"Picnic",
"Jet",
"Hot",
"Blackbody",
"Earth",
"Electric",
"Viridis",
"Cividis",
]
# Plotly colorscales ranging from a darker shade to a lighter shade
colorscales_reverse = ["Reds"]
white = "#FFFFFF"
black = "#000000"
if self.font_colors:
min_text_color = self.font_colors[0]
max_text_color = self.font_colors[-1]
elif self.colorscale in colorscales and self.reversescale:
min_text_color = black
max_text_color = white
elif self.colorscale in colorscales:
min_text_color = white
max_text_color = black
elif self.colorscale in colorscales_reverse and self.reversescale:
min_text_color = white
max_text_color = black
elif self.colorscale in colorscales_reverse:
min_text_color = black
max_text_color = white
elif isinstance(self.colorscale, list):
min_col = to_rgb_color_list(self.colorscale[0][1], [255, 255, 255])
max_col = to_rgb_color_list(self.colorscale[-1][1], [255, 255, 255])
# swap min/max colors if reverse scale
if self.reversescale:
min_col, max_col = max_col, min_col
if should_use_black_text(min_col):
min_text_color = black
else:
min_text_color = white
if should_use_black_text(max_col):
max_text_color = black
else:
max_text_color = white
else:
min_text_color = black
max_text_color = black
return min_text_color, max_text_color
def make_annotations(self):
"""
Get annotations for each cell of the heatmap with graph_objs.Annotation
:rtype (list[dict]) annotations: list of annotations for each cell of
the heatmap
"""
min_text_color, max_text_color = _AnnotatedHeatmap.get_text_color(self)
annotations = []
for n, row in enumerate(self.z):
for m, val in enumerate(row):
font_color = min_text_color if val < self.zmid else max_text_color
annotations.append(
graph_objs.layout.Annotation(
text=str(self.annotation_text[n][m]),
x=self.x[m],
y=self.y[n],
xref="x1",
yref="y1",
font=dict(color=font_color),
showarrow=False,
)
)
return annotations
| {
"repo_name": "plotly/plotly.py",
"path": "packages/python/plotly/plotly/figure_factory/_annotated_heatmap.py",
"copies": "1",
"size": "10279",
"license": "mit",
"hash": -164766190271062240,
"line_mean": 32.4820846906,
"line_max": 87,
"alpha_frac": 0.5679540811,
"autogenerated": false,
"ratio": 3.9292813455657494,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4997235426665749,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
from scout.constants import CONSEQUENCE, FEATURE_TYPES, SO_TERM_KEYS
from .transcript import Transcript
gene = dict(
# The hgnc gene id
hgnc_id=int, # required
hgnc_symbol=str,
# A list of Transcript objects
transcripts=list, # list of <transcript>
# This is the worst functional impact of all transcripts
functional_annotation=str, # choices=SO_TERM_KEYS
# This is the region of the most severe functional impact
region_annotation=str, # choices=FEATURE_TYPES
# This is most severe sift prediction of all transcripts
sift_prediction=str, # choices=CONSEQUENCE
# This is most severe polyphen prediction of all transcripts
polyphen_prediction=str, # choices=CONSEQUENCE
# The SpliceAI predictions for the transcript with the most severe consequence
spliceai_score=float, # highest delta score
spliceai_position=int, # position relative to the variant for prediction with highest delta score
spliceai_prediction=list, # list of str, with more detailed spliceai info
)
| {
"repo_name": "Clinical-Genomics/scout",
"path": "scout/models/variant/gene.py",
"copies": "1",
"size": "1095",
"license": "bsd-3-clause",
"hash": -5490195804594078000,
"line_mean": 42.8,
"line_max": 102,
"alpha_frac": 0.7415525114,
"autogenerated": false,
"ratio": 3.7889273356401385,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5030479847040139,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
from sqlalchemy.engine.base import Engine
from sqlalchemy.exc import StatementError
from twisted._threads import ThreadWorker, AlreadyQuit
from twisted.internet.defer import Deferred, fail
from twisted.python.failure import Failure
from threading import Thread
try:
from Queue import Queue
except ImportError:
from queue import Queue
def _threaded_worker():
def _start_thread(target):
thread = Thread(target=target)
thread.daemon = True
return thread.start()
return ThreadWorker(_start_thread, Queue())
def _defer_to_worker(deliver, worker, work, *args, **kwargs):
deferred = Deferred()
@worker.do
def container():
try:
result = work(*args, **kwargs)
except BaseException:
f = Failure()
deliver(lambda: deferred.errback(f))
else:
deliver(lambda: deferred.callback(result))
return deferred
class TwistedEngine(object):
def __init__(self, pool, dialect, url, reactor=None,
create_worker=_threaded_worker,
**kwargs):
if reactor is None:
raise TypeError("Must provide a reactor")
self._engine = Engine(pool, dialect, url, **kwargs)
self._reactor = reactor
self._create_worker = create_worker
self._engine_worker = self._create_worker()
@classmethod
def from_sqlalchemy_engine(cls, reactor, engine,
create_worker=_threaded_worker):
# Leaving the existing __init__ in place for compatibility reasons,
# this is a completely alternate constructor.
self = cls.__new__(cls)
self._reactor = reactor
self._engine = engine
self._create_worker = create_worker
self._engine_worker = create_worker()
return self
def _defer_to_engine(self, f, *a, **k):
return _defer_to_worker(self._reactor.callFromThread,
self._engine_worker, f, *a, **k)
@property
def dialect(self):
return self._engine.dialect
@property
def _has_events(self):
return self._engine._has_events
@property
def _execution_options(self):
return self._engine._execution_options
def _should_log_info(self):
return self._engine._should_log_info()
def execute(self, *args, **kwargs):
return (self._defer_to_engine(self._engine.execute, *args, **kwargs)
.addCallback(TwistedResultProxy, self._defer_to_engine))
def has_table(self, table_name, schema=None):
return self._defer_to_engine(
self._engine.has_table, table_name, schema)
def table_names(self, schema=None, connection=None):
if connection is not None:
connection = connection._connection
return self._defer_to_engine(
self._engine.table_names, schema, connection)
def connect(self):
worker = self._create_worker()
return (_defer_to_worker(self._reactor.callFromThread, worker,
self._engine.connect)
.addCallback(TwistedConnection, self, worker))
class TwistedConnection(object):
def __init__(self, connection, engine, worker):
self._connection = connection
self._engine = engine
self._cxn_worker = worker
def _defer_to_cxn(self, f, *a, **k):
return _defer_to_worker(self._engine._reactor.callFromThread,
self._cxn_worker, f, *a, **k)
def execute(self, *args, **kwargs):
try:
return (
self._defer_to_cxn(self._connection.execute, *args, **kwargs)
.addCallback(TwistedResultProxy, self._defer_to_cxn)
)
except AlreadyQuit:
return fail(StatementError("This Connection is closed.",
None, None, None))
def close(self, *args, **kwargs):
result = self._defer_to_cxn(self._connection.close, *args, **kwargs)
self._cxn_worker.quit()
return result
@property
def closed(self):
return self._connection.closed
def begin(self, *args, **kwargs):
return (self._defer_to_cxn(self._connection.begin, *args, **kwargs)
.addCallback(lambda txn: TwistedTransaction(txn, self)))
def begin_nested(self, *args, **kwargs):
return (
self._defer_to_cxn(self._connection.begin_nested, *args, **kwargs)
.addCallback(lambda txn: TwistedTransaction(txn, self))
)
def in_transaction(self):
return self._connection.in_transaction()
class TwistedTransaction(object):
def __init__(self, transaction, cxn):
self._transaction = transaction
self._cxn = cxn
def commit(self):
return self._cxn._defer_to_cxn(self._transaction.commit)
def rollback(self):
return self._cxn._defer_to_cxn(self._transaction.rollback)
def close(self):
return self._cxn._defer_to_cxn(self._transaction.close)
class TwistedResultProxy(object):
def __init__(self, result_proxy, deferrer):
self._result_proxy = result_proxy
self._deferrer = deferrer
def fetchone(self):
return self._deferrer(self._result_proxy.fetchone)
def fetchall(self):
return self._deferrer(self._result_proxy.fetchall)
def scalar(self):
return self._deferrer(self._result_proxy.scalar)
def first(self):
return self._deferrer(self._result_proxy.first)
def keys(self):
return self._deferrer(self._result_proxy.keys)
def close(self):
return self._deferrer(self._result_proxy.close)
@property
def returns_rows(self):
return self._result_proxy.returns_rows
@property
def rowcount(self):
return self._result_proxy.rowcount
@property
def inserted_primary_key(self):
return self._result_proxy.inserted_primary_key
| {
"repo_name": "alex/alchimia",
"path": "alchimia/engine.py",
"copies": "1",
"size": "6024",
"license": "mit",
"hash": 5682922467735901000,
"line_mean": 29.578680203,
"line_max": 78,
"alpha_frac": 0.6133798141,
"autogenerated": false,
"ratio": 4.117566643882434,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 197
} |
from __future__ import absolute_import, division
from sqlalchemy.sql import func, case
from changes.constants import Result
from changes.models import Build, TestCase, Source, Job
from changes.utils.http import build_uri
def get_flaky_tests(start_period, end_period, projects, maxFlakyTests):
test_queryset = TestCase.query.filter(
TestCase.project_id.in_(p.id for p in projects),
TestCase.result == Result.passed,
TestCase.date_created >= start_period,
TestCase.date_created < end_period,
).join(
Job, Job.id == TestCase.job_id,
).join(
Build, Build.id == Job.build_id,
).join(
Source, Source.id == Build.source_id,
).filter(
Source.patch_id == None, # NOQA
)
flaky_test_queryset = test_queryset.with_entities(
TestCase.name_sha,
TestCase.project_id,
func.sum(case([(TestCase.reruns > 0, 1)], else_=0)).label('reruns'),
func.sum(case([(TestCase.reruns > 1, 1)], else_=0)).label('double_reruns'),
func.count('*').label('count')
).group_by(
TestCase.name_sha,
TestCase.project_id
).order_by(
func.sum(TestCase.reruns).desc()
).limit(maxFlakyTests)
project_names = {p.id: p.name for p in projects}
flaky_list = []
for name_sha, project_id, reruns, double_reruns, count in flaky_test_queryset:
if reruns == 0:
continue
rerun = test_queryset.filter(
TestCase.name_sha == name_sha,
TestCase.project_id == project_id,
TestCase.reruns > 0,
).order_by(
TestCase.date_created.desc()
).first()
flaky_list.append({
'id': rerun.id,
'name': rerun.name,
'short_name': rerun.short_name,
'package': rerun.package,
'hash': name_sha,
'project_id': rerun.project_id,
'project_name': project_names[rerun.project_id],
'flaky_runs': reruns,
'double_reruns': double_reruns,
'passing_runs': count,
'link': build_uri('/projects/{0}/builds/{1}/jobs/{2}/tests/{3}/'.format(
rerun.project.slug,
rerun.job.build.id.hex,
rerun.job.id.hex,
rerun.id.hex)),
})
return flaky_list
| {
"repo_name": "wfxiang08/changes",
"path": "changes/lib/flaky_tests.py",
"copies": "2",
"size": "2345",
"license": "apache-2.0",
"hash": 6890584507589434000,
"line_mean": 31.5694444444,
"line_max": 84,
"alpha_frac": 0.568869936,
"autogenerated": false,
"ratio": 3.5801526717557253,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5149022607755726,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
from sys import stdout
from os import remove
from os.path import join, abspath, isdir
import os.path
from time import time, sleep
from multiprocessing import RawValue, Lock, Process, cpu_count
from string import Template
import numpy as np
from numpy.fft import irfftn as np_irfftn, rfftn as np_rfftn
from scipy.ndimage import binary_erosion, laplace
try:
from pyfftw import zeros_aligned, simd_alignment
from pyfftw.builders import rfftn as rfftn_builder, irfftn as irfftn_builder
PYFFTW = True
except ImportError:
PYFFTW = False
try:
import pyopencl as cl
import pyopencl.array as cl_array
from pyopencl.elementwise import ElementwiseKernel
from gpyfft import GpyFFT
OPENCL = True
except:
OPENCL = False
from ._powerfit import conj_multiply, calc_lcc, dilate_points
from ._extensions import rotate_grid3d
class _Counter(object):
"""Thread-safe counter object to follow PowerFit progress"""
def __init__(self):
self.val = RawValue('i', 0)
self.lock = Lock()
def increment(self):
with self.lock:
self.val.value += 1
def value(self):
with self.lock:
return self.val.value
class PowerFitter(object):
"""Wrapper around the Correlator classes for multiprocessing and GPU
accelerated searches providing an easy interface.
"""
def __init__(self, target, laplace=False):
self._target = target
self._rotations = None
self._template = None
self._mask = None
self._rotations = None
self._queues = None
self._nproc = 1
self._directory = abspath('./')
self._laplace = laplace
@property
def directory(self):
return self._directory
@directory.setter
def directory(self, directory):
if isdir(directory):
self._directory = abspath(directory)
else:
raise ValueError("Directory does not exist.")
def scan(self):
if self._queues is None:
self._cpu_scan()
else:
self._gpu_scan()
def _gpu_scan(self):
self._corr = GPUCorrelator(self._target.array, self._queues[0],
laplace=self._laplace)
self._corr.template = self._template.array
self._corr.mask = self._mask.array
self._corr.rotations = self._rotations
self._corr.scan()
self._lcc = self._corr.lcc
self._rot = self._corr.rot
def _cpu_scan(self):
nrot = self._rotations.shape[0]
self._nrot_per_job = nrot // self._nproc
processes = []
self._counter = _Counter()
self._njobs = self._nproc
if self._queues is not None:
self._njobs = len(self._queues)
for n in xrange(self._njobs):
init_rot = n * self._nrot_per_job
end_rot = init_rot + self._nrot_per_job
if n == self._njobs - 1:
end_rot = None
sub_rotations = self._rotations[init_rot: end_rot]
processes.append(Process(
target=self._run_correlator_instance,
args=(self._target, self._template, self._mask,
sub_rotations, self._laplace, self._counter, n,
self._queues, self._directory)
))
time0 = time()
for n in xrange(self._njobs):
processes[n].start()
while self._counter.value() < nrot:
n = self._counter.value()
p_done = (n + 1) / float(nrot) * 100
now = time()
eta = ((now - time0) / p_done) * (100 - p_done)
total = (now - time0) / p_done * (100)
stdout.write('{:7.2%} {:.0f}s {:.0f}s \r'.format(n / float(nrot), eta, total))
stdout.flush()
sleep(0.5)
stdout.write('\n')
for n in xrange(self._njobs):
processes[n].join()
self._combine()
@staticmethod
def _run_correlator_instance(target, template, mask, rotations, laplace,
counter, jobid, queues, directory):
correlator = CPUCorrelator(target.array, laplace=laplace)
correlator.template = template.array
correlator.mask = mask.array
correlator.rotations = rotations
correlator._counter = counter
correlator.scan()
np.save(join(directory, '_lcc_part_{:d}.npy').format(jobid), correlator._lcc)
np.save(join(directory, '_rot_part_{:d}.npy').format(jobid), correlator._rot)
def _combine(self):
# Combine all the intermediate results
lcc = np.zeros(self._target.shape)
rot = np.zeros(self._target.shape)
ind = np.zeros(lcc.shape, dtype=np.bool)
for n in range(self._njobs):
lcc_file = join(self._directory, '_lcc_part_{:d}.npy').format(n)
rot_file = join(self._directory, '_rot_part_{:d}.npy').format(n)
part_lcc = np.load(lcc_file)
part_rot = np.load(rot_file)
np.greater(part_lcc, lcc, ind)
lcc[ind] = part_lcc[ind]
# take care of the rotation index offset for each independent job
rot[ind] = part_rot[ind] + self._nrot_per_job * n
remove(lcc_file)
remove(rot_file)
self._lcc = lcc
self._rot = rot
class BaseCorrelator(object):
"""Base class that calculates the local cross-correlation"""
def __init__(self, target, laplace=False):
self._target = target / target.max()
self._rotations = None
self._template = None
self._mask = None
self._laplace = laplace
self._lcc_mask = self._get_lcc_mask(self._target)
self._rmax = min(target.shape) // 2
@staticmethod
def _get_lcc_mask(target):
return (target > target.max() * 0.05).astype(np.uint8)
@property
def target(self):
return self._target
@property
def mask(self):
return self._mask
@mask.setter
def mask(self, mask):
if self._template is None:
raise ValueError("First set the template.")
if self._target.shape != mask.shape:
raise ValueError("Shape of the mask is different from target.")
ind = mask != 0
# remember the normalization factor for the cross-correlation
self._norm_factor = ind.sum()
# If mask is only zeros, raise error
if self._norm_factor == 0:
raise ValueError('Zero-filled mask is not allowed.')
self._mask = mask.copy()
if self._laplace:
self._template = self._laplace_filter(self._template)
self._template *= self._mask
self._normalize_template(ind)
# multiply again for core-weighted correlation score
self._template *= self._mask
@staticmethod
def _laplace_filter(array):
"""Laplace transform"""
return laplace(array, mode='wrap')
def _normalize_template(self, ind):
# normalize the template over the mask
self._template[ind] -= self._template[ind].mean()
self._template[ind] /= self._template[ind].std()
@property
def rotations(self):
return self._rotations
@rotations.setter
def rotations(self, rotations):
"""Set the rotations that will be sampled."""
rotations = np.asarray(rotations, dtype=np.float64).reshape(-1, 3, 3)
self._rotations = rotations
@property
def template(self):
return self._template
@template.setter
def template(self, template):
if template.shape != self._target.shape:
raise ValueError("Shape of template does not match the target.")
# reset the mask
self._mask = None
self._template = template.copy()
@property
def lcc(self):
return self._lcc
@property
def rot(self):
return self._rot
def scan(self):
if any([req is None for req in (self._template, self._mask, self._rotations)]):
raise ValueError("First set the template, mask, and rotations.")
class CPUCorrelator(BaseCorrelator):
"""CPU implementation for calculating the local cross-correlation."""
def __init__(self, target, laplace=False, fftw=True):
super(CPUCorrelator, self).__init__(target, laplace=laplace)
self._fftw = PYFFTW and fftw
self._allocate_arrays(self._target.shape)
self._build_ffts()
target = self._target
if self._laplace:
target = self._laplace_filter(self._target)
# pre-calculate the FFTs of the target
if self._fftw:
self._rfftn(target, self._ft_target)
self._rfftn(target**2, self._ft_target2)
else:
self._ft_target = self._rfftn(target)
self._ft_target2 = self._rfftn(target**2)
def _allocate_arrays(self, shape):
# allocate all the required arrays
# real arrays
arrays = '_rot_template _rot_mask _rot_mask2 _gcc _ave _ave2 _lcc_scan _lcc _rot'.split()
for arr in arrays:
setattr(self, arr, self._allocate_array(shape, np.float64, self._fftw))
self._ind = np.zeros(shape, dtype=np.bool)
# complex arrays
self._ft_shape = self._get_ft_shape(shape)
arrays = '_target _target2 _template _mask _mask2 _gcc _ave _ave2'.split()
for arr in arrays:
setattr(self, '_ft' + arr,
self._allocate_array(self._ft_shape, np.complex128, self._fftw))
@staticmethod
def _allocate_array(shape, dtype, fftw):
if fftw:
return zeros_aligned(shape, dtype=dtype, n=simd_alignment)
else:
return np.zeros(shape, dtype)
@staticmethod
def _get_ft_shape(shape):
return list(shape[:-1]) + [shape[-1] // 2 + 1]
def _build_ffts(self):
# build the ffts
if self._fftw:
self._rfftn = rfftn_builder(self._gcc)
self._irfftn = irfftn_builder(self._ft_gcc, s=self._target.shape)
else:
# monkey patch the numpy fft interface
self._rfftn = np_rfftn
self._irfftn = np_irfftn
def scan(self):
super(CPUCorrelator, self).scan()
self._lcc.fill(0)
self._rot.fill(0)
for n in xrange(self._rotations.shape[0]):
# rotate template and mask
self._translational_scan(self._rotations[n])
# get the indices where the scanned lcc is greater
np.greater(self._lcc_scan, self._lcc, self._ind)
# remember lcc and rotation index
self._lcc[self._ind] = self._lcc_scan[self._ind]
self._rot[self._ind] = n
if hasattr(self, '_counter'):
self._counter.increment()
def _translational_scan(self, rotmat):
self._rotate_grids(rotmat)
self._get_lcc()
def _rotate_grids(self, rotmat):
rotate_grid3d(
self._template, rotmat, self._rmax,
self._rot_template, False
)
rotate_grid3d(
self._mask, rotmat, self._rmax,
self._rot_mask, True
)
def _get_lcc(self):
np.multiply(self._rot_mask, self._rot_mask, self._rot_mask2)
self._forward_ffts()
conj_multiply(
self._ft_template.ravel(), self._ft_target.ravel(),
self._ft_gcc.ravel()
)
conj_multiply(
self._ft_mask.ravel(), self._ft_target.ravel(),
self._ft_ave.ravel()
)
conj_multiply(
self._ft_mask2.ravel(), self._ft_target2.ravel(),
self._ft_ave2.ravel()
)
self._backward_ffts()
self._ave2 *= self._norm_factor
calc_lcc(
self._gcc.ravel(), self._ave.ravel(), self._ave2.ravel(),
self._lcc_mask.ravel(), self._lcc_scan.ravel()
)
def _forward_ffts(self):
if self._fftw:
self._rfftn(self._rot_template, self._ft_template)
self._rfftn(self._rot_mask, self._ft_mask)
self._rfftn(self._rot_mask2, self._ft_mask2)
else:
self._ft_template = self._rfftn(self._rot_template)
self._ft_mask = self._rfftn(self._rot_mask)
self._ft_mask2 = self._rfftn(self._rot_mask2)
def _backward_ffts(self):
if self._fftw:
self._irfftn(self._ft_gcc, self._gcc)
self._irfftn(self._ft_ave, self._ave)
self._irfftn(self._ft_ave2, self._ave2)
else:
self._gcc = self._irfftn(self._ft_gcc, s=self.target.shape)
self._ave = self._irfftn(self._ft_ave, s=self.target.shape)
self._ave2 = self._irfftn(self._ft_ave2, s=self.target.shape)
if OPENCL:
class GPUCorrelator(BaseCorrelator):
def __init__(self, target, queue, laplace=False):
super(GPUCorrelator, self).__init__(target, laplace=laplace)
self._queue = queue
self._ctx = self._queue.context
self._gpu = self._queue.device
self._allocate_arrays()
self._build_ffts()
self._generate_kernels()
target = self._target
if self._laplace:
target = self._laplace_filter(self._target)
# move some arrays to the GPU
self._gtarget = cl_array.to_device(self._queue, target.astype(np.float32))
self._lcc_mask = cl_array.to_device(self._queue,
self._lcc_mask.astype(np.int32))
# Do some one-time precalculations
self._rfftn(self._gtarget, self._ft_target)
self._k.multiply(self._gtarget, self._gtarget, self._target2)
self._rfftn(self._target2, self._ft_target2)
self._gshape = np.asarray(
list(self._target.shape) + [np.product(self._target.shape)],
dtype=np.int32)
def _allocate_arrays(self):
# Determine the required shape and size of an array
self._ft_shape = tuple(
[self._target.shape[0] // 2 + 1] + list(self._target.shape[1:])
)
self._shape = self._target.shape
# Allocate arrays on CPU
self._lcc = np.zeros(self._target.shape, dtype=np.float32)
self._rot = np.zeros(self._target.shape, dtype=np.int32)
# Allocate arrays on GPU
arrays = '_target2 _rot_template _rot_mask _rot_mask2 _gcc _ave _ave2 _glcc'.split()
for array in arrays:
setattr(self, array,
cl_array.zeros( self._queue, self._shape, dtype=np.float32)
)
self._grot = cl_array.zeros(self._queue, self._shape, dtype=np.int32)
# Allocate all complex arrays
ft_arrays = 'target target2 template mask mask2 gcc ave ave2 lcc'.split()
for ft_array in ft_arrays:
setattr(self, '_ft_' + ft_array,
cl_array.to_device(self._queue,
np.zeros(self._ft_shape, dtype=np.complex64))
)
def _build_ffts(self, batch_size=1):
self._rfftn = grfftn_builder(self._ctx, self._target.shape,
batch_size=batch_size)
self._irfftn = grfftn_builder(self._ctx, self._target.shape,
forward=False, batch_size=batch_size)
self._rfftn.bake(self._queue)
self._irfftn.bake(self._queue)
@property
def mask(self):
return BaseCorrelator.mask
@mask.setter
def mask(self, mask):
BaseCorrelator.mask.fset(self, mask)
self._norm_factor = np.float32(self._norm_factor)
self._rmax = np.int32(self._rmax)
self._gtemplate = cl.image_from_array(self._queue.context,
self._template.astype(np.float32))
self._gmask = cl.image_from_array(self._queue.context,
self._mask.astype(np.float32))
@property
def rotations(self):
return BaseCorrelator.rotations
@rotations.setter
def rotations(self, rotations):
BaseCorrelator.rotations.fset(self, rotations)
self._cl_rotations = np.zeros((self._rotations.shape[0], 16),
dtype=np.float32)
self._cl_rotations[:, :9] = self._rotations.reshape(-1, 9)
def _cl_rotate_grids(self, rotmat):
self._k.rotate_image3d(self._queue, self._gtemplate, rotmat,
self._rot_template)
self._k.rotate_image3d(self._queue, self._gmask, rotmat,
self._rot_mask, nearest=True)
self._queue.finish()
def _cl_get_gcc(self):
self._rfftn(self._rot_template, self._ft_template)
self._k.conj_multiply(self._ft_template, self._ft_target, self._ft_gcc)
self._irfftn(self._ft_gcc, self._gcc)
self._queue.finish()
def _cl_get_ave(self):
self._rfftn(self._rot_mask, self._ft_mask)
self._k.conj_multiply(self._ft_mask, self._ft_target, self._ft_ave)
self._irfftn(self._ft_ave, self._ave)
self._queue.finish()
def _cl_get_ave2(self):
self._k.multiply(self._rot_mask, self._rot_mask, self._rot_mask2)
self._rfftn(self._rot_mask2, self._ft_mask2)
self._k.conj_multiply(self._ft_mask2, self._ft_target2, self._ft_ave2)
self._irfftn(self._ft_ave2, self._ave2)
self._queue.finish()
def scan(self):
super(GPUCorrelator, self).scan()
self._glcc.fill(0)
self._grot.fill(0)
time0 = time()
for n in xrange(0, self._rotations.shape[0]):
rotmat = self._cl_rotations[n]
self._cl_rotate_grids(rotmat)
self._cl_get_gcc()
self._cl_get_ave()
self._cl_get_ave2()
self._k.calc_lcc_and_take_best(self._gcc, self._ave,
self._ave2, self._lcc_mask, self._norm_factor,
np.int32(n), self._glcc, self._grot)
self._queue.finish()
self._print_progress(n, self._rotations.shape[0], time0)
self._glcc.get(ary=self._lcc)
self._grot.get(ary=self._rot)
self._queue.finish()
@staticmethod
def _print_progress(n, nrot, time0):
p_done = (n + 1) / float(nrot) * 100
now = time()
eta = ((now - time0) / p_done) * (100 - p_done)
total = (now - time0) / p_done * (100)
stdout.write('{:7.2%} {:.0f}s {:.0f}s \r'.format(n / float(nrot), eta, total))
stdout.flush()
def _generate_kernels(self):
kernel_values = {'shape_x': self._shape[2],
'shape_y': self._shape[1],
'shape_z': self._shape[0],
'llength': self._rmax,
}
self._k = CLKernels(self._ctx, kernel_values)
class CLKernels(object):
def __init__(self, ctx, values):
self.sampler_nearest = cl.Sampler(ctx, True,
cl.addressing_mode.REPEAT, cl.filter_mode.NEAREST)
self.sampler_linear = cl.Sampler(ctx, True,
cl.addressing_mode.REPEAT, cl.filter_mode.LINEAR)
self.multiply = ElementwiseKernel(ctx,
"float *x, float *y, float *z",
"z[i] = x[i] * y[i];"
)
self.conj_multiply = ElementwiseKernel(ctx,
"cfloat_t *x, cfloat_t *y, cfloat_t *z",
"z[i] = cfloat_mul(cfloat_conj(x[i]), y[i]);"
)
self.calc_lcc_and_take_best = ElementwiseKernel(ctx,
"""float *gcc, float *ave, float *ave2, int *mask,
float norm_factor, int nrot, float *lcc, int *grot""",
"""float _lcc;
if (mask[i] > 0) {
_lcc = gcc[i] / sqrt(ave2[i] * norm_factor - ave[i] * ave[i]);
if (_lcc > lcc[i]) {
lcc[i] = _lcc;
grot[i] = nrot;
};
};
"""
)
kernel_file = os.path.join(os.path.dirname(__file__), 'kernels.cl')
with open(kernel_file) as f:
t = Template(f.read()).substitute(**values)
self._program = cl.Program(ctx, t).build()
self._gws_rotate_grid3d = (96, 64, 1)
def rotate_grid3d(self, queue, grid, rotmat, out, nearest=False):
args = (grid.data, rotmat, out.data, np.int32(nearest))
self._program.rotate_grid3d(queue, self._gws_rotate_grid3d, None, *args)
def rotate_image3d(self, queue, image, rotmat, out, nearest=False):
if nearest:
args = (image, self.sampler_nearest, rotmat, out.data)
else:
args = (image, self.sampler_linear, rotmat, out.data)
self._program.rotate_image3d(queue, self._gws_rotate_grid3d, None, *args)
class grfftn_builder(object):
_G = GpyFFT()
CLFFT_HERMITIAN_INTERLEAVED = 3
CLFFT_REAL = 5
def __init__(self, ctx, shape, forward=True, batch_size=1):
self.ctx = ctx
self.shape = shape
self.plan = self._G.create_plan(self.ctx, shape)
if forward:
layouts = (self.CLFFT_REAL, self.CLFFT_HERMITIAN_INTERLEAVED)
else:
layouts = (self.CLFFT_HERMITIAN_INTERLEAVED, self.CLFFT_REAL)
self.plan.layouts = layouts
self.plan.inplace = False
size = np.prod(shape)
ft_size = np.prod([shape[0] // 2 + 1] + list(shape)[1:])
if forward:
self.distances = (size, ft_size)
else:
self.distances = (ft_size, size)
self.plan.batch_size = batch_size
strides = (shape[-2] * shape[-1], shape[-1], 1)
self.plan.strides_in = strides
self.plan.strides_out = strides
self.forward = forward
def bake(self, queue):
self.queue = queue
self.plan.bake(queue)
def __call__(self, inarray, outarray):
self.plan.enqueue_transform(self.queue, inarray.data,
outarray.data, direction_forward=self.forward)
| {
"repo_name": "haddocking/powerfit",
"path": "powerfit/powerfitter.py",
"copies": "1",
"size": "22830",
"license": "apache-2.0",
"hash": -530803999587287000,
"line_mean": 34.8398744113,
"line_max": 97,
"alpha_frac": 0.5423127464,
"autogenerated": false,
"ratio": 3.6651147856798842,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4707427532079884,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
from sys import stdout
import numpy as np
def vectorize_labels(labels, vec_len):
vectorized = np.zeros((len(labels), vec_len))
for i in xrange(len(labels)):
vectorized[i][labels[i]] = 1
return vectorized
def devectorize_labels(vectorized):
devectorized = np.zeros((len(vectorized),))
for i in xrange(len(vectorized)):
devectorized[i] = np.argmax(vectorized[i])
return devectorized.astype(np.int)
def shuffle_data_labels(data, labels):
assert len(data) == len(labels)
indices = np.random.permutation(len(data))
return data[indices], labels[indices]
def iterate_with_progress(collections):
cursor = '.'
last_percent = -1
length = len(collections)
for index, item in enumerate(collections):
cur_percent = int(100.0 * ((index+1) / length))
if cur_percent > last_percent:
last_percent = cur_percent
stdout.write('\r' + cursor * cur_percent + " %d%%" % cur_percent)
if cur_percent == 100:
stdout.write('\n')
stdout.flush()
yield item
| {
"repo_name": "zhaoyan1117/NeuralNet",
"path": "nnet/util.py",
"copies": "1",
"size": "1140",
"license": "bsd-2-clause",
"hash": -6609278364835120000,
"line_mean": 29.8108108108,
"line_max": 77,
"alpha_frac": 0.6289473684,
"autogenerated": false,
"ratio": 3.619047619047619,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.973393284838548,
"avg_score": 0.0028124278124278122,
"num_lines": 37
} |
from __future__ import absolute_import, division
from twisted.internet import defer
from twisted.python import log, failure
from twisted.web import server
from twisted.web.iweb import IRenderable
from twisted.web.resource import Resource, IResource, getChildForRequest
from twisted.web.template import flattenString
from twisted.python.compat import unicode, intToBytes
from werkzeug.exceptions import HTTPException
from klein.interfaces import IKleinRequest
__all__ = ["KleinResource", "ensure_utf8_bytes"]
def ensure_utf8_bytes(v):
"""
Coerces a value which is either a C{unicode} or C{str} to a C{str}.
If ``v`` is a C{unicode} object it is encoded as utf-8.
"""
if isinstance(v, unicode):
v = v.encode("utf-8")
return v
class _StandInResource(object):
"""
A standin for a Resource.
This is a sentinel value for L{KleinResource}, to say that we are rendering
a L{Resource}, which may close the connection itself later.
"""
class _URLDecodeError(Exception):
"""
Raised if one or more string parts of the URL could not be decoded.
"""
__slots__ = ["errors"]
def __init__(self, errors):
"""
@param errors: List of decoding errors.
@type errors: L{list} of L{tuple} of L{str},
L{twisted.python.failure.Failure}
"""
self.errors = errors
def __repr__(self):
return "<URLDecodeError(errors={0!r})>".format(self.errors)
def _extractURLparts(request):
"""
Extracts and decodes URI parts from C{request}.
All strings must be UTF8-decodable.
@param request: A Twisted Web request.
@type request: L{twisted.web.iweb.IRequest}
@raise URLDecodeError: If one of the parts could not be decoded as UTF-8.
@return: L{tuple} of the URL scheme, the server name, the server port, the
path info and the script name.
@rtype: L{tuple} of L{unicode}, L{unicode}, L{int}, L{unicode}, L{unicode}
"""
server_name = request.getRequestHostname()
server_port = request.getHost().port
if (bool(request.isSecure()), server_port) not in [
(True, 443), (False, 80)]:
server_name = server_name + b":" + intToBytes(server_port)
script_name = b''
if request.prepath:
script_name = b'/'.join(request.prepath)
if not script_name.startswith(b'/'):
script_name = b'/' + script_name
path_info = b''
if request.postpath:
path_info = b'/'.join(request.postpath)
if not path_info.startswith(b'/'):
path_info = b'/' + path_info
url_scheme = u'https' if request.isSecure() else u'http'
utf8Failures = []
try:
server_name = server_name.decode("utf-8")
except UnicodeDecodeError:
utf8Failures.append(("SERVER_NAME", failure.Failure()))
try:
path_info = path_info.decode("utf-8")
except UnicodeDecodeError:
utf8Failures.append(("PATH_INFO", failure.Failure()))
try:
script_name = script_name.decode("utf-8")
except UnicodeDecodeError:
utf8Failures.append(("SCRIPT_NAME", failure.Failure()))
if utf8Failures:
raise _URLDecodeError(utf8Failures)
return url_scheme, server_name, server_port, path_info, script_name
class KleinResource(Resource):
"""
A ``Resource`` that can do URL routing.
"""
isLeaf = True
def __init__(self, app):
Resource.__init__(self)
self._app = app
def __eq__(self, other):
if isinstance(other, KleinResource):
return vars(self) == vars(other)
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
def render(self, request):
# Stuff we need to know for the mapper.
try:
url_scheme, server_name, server_port, path_info, script_name = \
_extractURLparts(request)
except _URLDecodeError as e:
for what, fail in e.errors:
log.err(fail, "Invalid encoding in {what}.".format(what=what))
request.setResponseCode(400)
return b"Non-UTF-8 encoding in URL."
# Bind our mapper.
mapper = self._app.url_map.bind(
server_name,
script_name,
path_info=path_info,
default_method=request.method,
url_scheme=url_scheme,
)
# Make the mapper available to the view.
kleinRequest = IKleinRequest(request)
kleinRequest.mapper = mapper
# Make sure we'll notice when the connection goes away unambiguously.
request_finished = [False]
def _finish(result):
request_finished[0] = True
def _execute():
# Actually doing the match right here. This can cause an exception
# to percolate up. If that happens it will be handled below in
# processing_failed, either by a user-registered error handler or
# one of our defaults.
(rule, kwargs) = mapper.match(return_rule=True)
endpoint = rule.endpoint
# Try pretty hard to fix up prepath and postpath.
segment_count = self._app.endpoints[endpoint].segment_count
request.prepath.extend(request.postpath[:segment_count])
request.postpath = request.postpath[segment_count:]
request.notifyFinish().addBoth(_finish)
# Standard Twisted Web stuff. Defer the method action, giving us
# something renderable or printable. Return NOT_DONE_YET and set up
# the incremental renderer.
d = defer.maybeDeferred(self._app.execute_endpoint,
endpoint,
request,
**kwargs)
request.notifyFinish().addErrback(lambda _: d.cancel())
return d
d = defer.maybeDeferred(_execute)
def write_response(r):
if r is not _StandInResource:
if isinstance(r, unicode):
r = r.encode('utf-8')
if r is not None:
request.write(r)
if not request_finished[0]:
request.finish()
def process(r):
if IResource.providedBy(r):
request.render(getChildForRequest(r, request))
return _StandInResource
if IRenderable.providedBy(r):
return flattenString(request, r).addCallback(process)
return r
d.addCallback(process)
def processing_failed(failure, error_handlers):
# The failure processor writes to the request. If the
# request is already finished we should suppress failure
# processing. We don't return failure here because there
# is no way to surface this failure to the user if the
# request is finished.
if request_finished[0]:
if not failure.check(defer.CancelledError):
log.err(failure, "Unhandled Error Processing Request.")
return
# If there are no more registered handlers, apply some defaults
if len(error_handlers) == 0:
if failure.check(HTTPException):
he = failure.value
request.setResponseCode(he.code)
resp = he.get_response({})
for header, value in resp.headers:
request.setHeader(ensure_utf8_bytes(header), ensure_utf8_bytes(value))
return ensure_utf8_bytes(he.get_body({}))
else:
request.processingFailed(failure)
return
error_handler = error_handlers[0]
# Each error handler is a tuple of (list_of_exception_types, handler_fn)
if failure.check(*error_handler[0]):
d = defer.maybeDeferred(self._app.execute_error_handler,
error_handler[1],
request,
failure)
return d.addErrback(processing_failed, error_handlers[1:])
return processing_failed(failure, error_handlers[1:])
d.addErrback(processing_failed, self._app._error_handlers)
d.addCallback(write_response).addErrback(log.err, _why="Unhandled Error writing response")
return server.NOT_DONE_YET
| {
"repo_name": "brighid/klein",
"path": "klein/resource.py",
"copies": "1",
"size": "8627",
"license": "mit",
"hash": -7650919095263021000,
"line_mean": 31.4323308271,
"line_max": 98,
"alpha_frac": 0.5854874232,
"autogenerated": false,
"ratio": 4.292039800995025,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00137996744642988,
"num_lines": 266
} |
from __future__ import absolute_import, division
from twisted.trial import unittest
import copy
import sys
from mock import Mock, patch
from twisted.python.components import registerAdapter
from klein import Klein
from klein.app import KleinRequest
from klein.interfaces import IKleinRequest
from klein.test.util import EqualityTestsMixin
class DummyRequest(object):
def __init__(self, n):
self.n = n
def __eq__(self, other):
return other.n == self.n
def __repr__(self):
return '<DummyRequest({n})>'.format(n=self.n)
registerAdapter(KleinRequest, DummyRequest, IKleinRequest)
class KleinEqualityTestCase(unittest.TestCase, EqualityTestsMixin):
"""
Tests for L{Klein}'s implementation of C{==} and C{!=}.
"""
class _One(object):
app = Klein()
def __eq__(self, other):
return True
def __ne__(self, other):
return False
def __hash__(self):
return id(self)
_another = Klein()
def anInstance(self):
# This is actually a new Klein instance every time since Klein.__get__
# creates a new Klein instance for every instance it is retrieved from.
# The different _One instance, at least, will not cause the Klein
# instances to be not-equal to each other since an instance of _One is
# equal to everything.
return self._One().app
def anotherInstance(self):
return self._another
class KleinTestCase(unittest.TestCase):
def test_route(self):
"""
L{Klein.route} adds functions as routable endpoints.
"""
app = Klein()
@app.route("/foo")
def foo(request):
return "foo"
c = app.url_map.bind("foo")
self.assertEqual(c.match("/foo"), ("foo", {}))
self.assertEqual(len(app.endpoints), 1)
self.assertEqual(app.execute_endpoint("foo", DummyRequest(1)), "foo")
def test_submountedRoute(self):
"""
L{Klein.subroute} adds functions as routable endpoints.
"""
app = Klein()
with app.subroute("/sub") as app:
@app.route("/prefixed_uri")
def foo_endpoint(request):
return b"foo"
c = app.url_map.bind("sub/prefixed_uri")
self.assertEqual(
c.match("/sub/prefixed_uri"), ("foo_endpoint", {}))
self.assertEqual(
len(app.endpoints), 1)
self.assertEqual(
app.execute_endpoint("foo_endpoint", DummyRequest(1)), b"foo")
def test_stackedRoute(self):
"""
L{Klein.route} can be stacked to create multiple endpoints of
a single function.
"""
app = Klein()
@app.route("/foo")
@app.route("/bar", endpoint="bar")
def foobar(request):
return "foobar"
self.assertEqual(len(app.endpoints), 2)
c = app.url_map.bind("foo")
self.assertEqual(c.match("/foo"), ("foobar", {}))
self.assertEqual(app.execute_endpoint("foobar", DummyRequest(1)), "foobar")
self.assertEqual(c.match("/bar"), ("bar", {}))
self.assertEqual(app.execute_endpoint("bar", DummyRequest(2)), "foobar")
def test_branchRoute(self):
"""
L{Klein.route} should create a branch path which consumes all children
when the branch keyword argument is True.
"""
app = Klein()
@app.route("/foo/", branch=True)
def foo(request):
return "foo"
c = app.url_map.bind("foo")
self.assertEqual(c.match("/foo/"), ("foo", {}))
self.assertEqual(
c.match("/foo/bar"),
("foo_branch", {'__rest__': 'bar'}))
self.assertEquals(app.endpoints["foo"].__name__, "foo")
self.assertEquals(
app.endpoints["foo_branch"].__name__,
"foo")
def test_classicalRoute(self):
"""
L{Klein.route} may be used a method decorator when a L{Klein} instance
is defined as a class variable.
"""
bar_calls = []
class Foo(object):
app = Klein()
@app.route("/bar")
def bar(self, request):
bar_calls.append((self, request))
return "bar"
foo = Foo()
c = foo.app.url_map.bind("bar")
self.assertEqual(c.match("/bar"), ("bar", {}))
self.assertEquals(foo.app.execute_endpoint("bar", DummyRequest(1)), "bar")
self.assertEqual(bar_calls, [(foo, DummyRequest(1))])
def test_classicalRouteWithTwoInstances(self):
"""
Multiple instances of a class with a L{Klein} attribute and
L{Klein.route}'d methods can be created and their L{Klein}s used
independently.
"""
class Foo(object):
app = Klein()
def __init__(self):
self.bar_calls = []
@app.route("/bar")
def bar(self, request):
self.bar_calls.append((self, request))
return "bar"
foo_1 = Foo()
foo_1_app = foo_1.app
foo_2 = Foo()
foo_2_app = foo_2.app
dr1 = DummyRequest(1)
dr2 = DummyRequest(2)
foo_1_app.execute_endpoint('bar', dr1)
foo_2_app.execute_endpoint('bar', dr2)
self.assertEqual(foo_1.bar_calls, [(foo_1, dr1)])
self.assertEqual(foo_2.bar_calls, [(foo_2, dr2)])
def test_classicalRouteWithBranch(self):
"""
Multiple instances of a class with a L{Klein} attribute and
L{Klein.route}'d methods can be created and their L{Klein}s used
independently.
"""
class Foo(object):
app = Klein()
def __init__(self):
self.bar_calls = []
@app.route("/bar/", branch=True)
def bar(self, request):
self.bar_calls.append((self, request))
return "bar"
foo_1 = Foo()
foo_1_app = foo_1.app
foo_2 = Foo()
foo_2_app = foo_2.app
dr1 = DummyRequest(1)
dr2 = DummyRequest(2)
foo_1_app.execute_endpoint('bar_branch', dr1)
foo_2_app.execute_endpoint('bar_branch', dr2)
self.assertEqual(foo_1.bar_calls, [(foo_1, dr1)])
self.assertEqual(foo_2.bar_calls, [(foo_2, dr2)])
def test_branchDoesntRequireTrailingSlash(self):
"""
L{Klein.route} should create a branch path which consumes all children,
when the branch keyword argument is True and there is no trailing /
on the path.
"""
app = Klein()
@app.route("/foo", branch=True)
def foo(request):
return "foo"
c = app.url_map.bind("foo")
self.assertEqual(c.match("/foo/bar"),
("foo_branch", {"__rest__": "bar"}))
@patch('klein.app.KleinResource')
@patch('klein.app.Site')
@patch('klein.app.log')
@patch('klein.app.reactor')
def test_run(self, reactor, mock_log, mock_site, mock_kr):
"""
L{Klein.run} configures a L{KleinResource} and a L{Site}
listening on the specified interface and port, and logs
to stdout.
"""
app = Klein()
app.run("localhost", 8080)
reactor.listenTCP.assert_called_with(
8080, mock_site.return_value, backlog=50, interface="localhost")
reactor.run.assert_called_with()
mock_site.assert_called_with(mock_kr.return_value)
mock_kr.assert_called_with(app)
mock_log.startLogging.assert_called_with(sys.stdout)
@patch('klein.app.KleinResource')
@patch('klein.app.Site')
@patch('klein.app.log')
@patch('klein.app.reactor')
def test_runWithLogFile(self, reactor, mock_log, mock_site, mock_kr):
"""
L{Klein.run} logs to the specified C{logFile}.
"""
app = Klein()
logFile = Mock()
app.run("localhost", 8080, logFile=logFile)
reactor.listenTCP.assert_called_with(
8080, mock_site.return_value, backlog=50, interface="localhost")
reactor.run.assert_called_with()
mock_site.assert_called_with(mock_kr.return_value)
mock_kr.assert_called_with(app)
mock_log.startLogging.assert_called_with(logFile)
@patch('klein.app.KleinResource')
@patch('klein.app.log')
@patch('klein.app.endpoints.serverFromString')
@patch('klein.app.reactor')
def test_runTCP6(self, reactor, mock_sfs, mock_log, mock_kr):
"""
L{Klein.run} called with tcp6 endpoint description.
"""
app = Klein()
interface = "2001\:0DB8\:f00e\:eb00\:\:1"
spec = "tcp6:8080:interface={0}".format(interface)
app.run(endpoint_description=spec)
reactor.run.assert_called_with()
mock_sfs.assert_called_with(reactor, spec)
mock_log.startLogging.assert_called_with(sys.stdout)
mock_kr.assert_called_with(app)
@patch('klein.app.KleinResource')
@patch('klein.app.log')
@patch('klein.app.endpoints.serverFromString')
@patch('klein.app.reactor')
def test_runSSL(self, reactor, mock_sfs, mock_log, mock_kr):
"""
L{Klein.run} called with SSL endpoint specification.
"""
app = Klein()
key = "key.pem"
cert = "cert.pem"
dh_params = "dhparam.pem"
spec_template = "ssl:443:privateKey={0}:certKey={1}"
spec = spec_template.format(key, cert, dh_params)
app.run(endpoint_description=spec)
reactor.run.assert_called_with()
mock_sfs.assert_called_with(reactor, spec)
mock_log.startLogging.assert_called_with(sys.stdout)
mock_kr.assert_called_with(app)
@patch('klein.app.KleinResource')
def test_resource(self, mock_kr):
"""
L{Klien.resource} returns a L{KleinResource}.
"""
app = Klein()
resource = app.resource()
mock_kr.assert_called_with(app)
self.assertEqual(mock_kr.return_value, resource)
def test_copy(self):
"""
L{Klein.__copy__} returns a new L{Klein} with all the registered endpoints
"""
app = Klein()
@app.route("/foo")
def foo(request):
return "foo"
app_copy = copy.copy(app)
@app.route('/bar')
def bar(request):
return 'bar'
dr1 = DummyRequest(1)
dr2 = DummyRequest(2)
dr3 = DummyRequest(3)
self.assertEquals(app.execute_endpoint('foo', dr1), 'foo')
self.assertEquals(app.execute_endpoint('bar', dr2), 'bar')
self.assertRaises(KeyError, app_copy.execute_endpoint, 'bar', dr3)
def test_error_handlers_list_is_copied(self):
"""
L{Klein.__copy__} returns a new L{Klein} with all the error handlers
"""
app = Klein()
app.handle_errors(ValueError)(lambda request, failure: 'foo')
app_copy = copy.copy(app)
self.assertEquals(app._error_handlers, app_copy._error_handlers)
app.handle_errors(KeyError)(lambda request, failure: 'foo')
self.assertNotEquals(app._error_handlers, app_copy._error_handlers)
| {
"repo_name": "joac/klein",
"path": "src/klein/test/test_app.py",
"copies": "1",
"size": "11218",
"license": "mit",
"hash": -6502252046380933000,
"line_mean": 28.2135416667,
"line_max": 83,
"alpha_frac": 0.5727402389,
"autogenerated": false,
"ratio": 3.6245557350565427,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4697295973956543,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
from uuid import uuid4
from datetime import datetime
from sqlalchemy import Column, DateTime, ForeignKey, String
from sqlalchemy.orm import relationship
from sqlalchemy.schema import Index, UniqueConstraint
from changes.config import db
from changes.db.types.guid import GUID
from changes.db.types.json import JSONEncodedDict
class FailureReason(db.Model):
"""
Always associated with a single jobstep. failurereason is not required to
fail a build. But if a jobstep fails, it can record why here. reason
column can be: [test_failures, missing_test, missing_artifact, timeout,
malformed_artifact, duplicate_test_name]
"""
__tablename__ = 'failurereason'
__table_args__ = (
Index('idx_failurereason_job_id', 'job_id'),
Index('idx_failurereason_build_id', 'build_id'),
Index('idx_failurereason_project_id', 'project_id'),
UniqueConstraint('step_id', 'reason', name='unq_failurereason_key'),
)
id = Column(GUID, nullable=False, primary_key=True, default=uuid4)
step_id = Column(GUID, ForeignKey('jobstep.id', ondelete="CASCADE"), nullable=False)
job_id = Column(GUID, ForeignKey('job.id', ondelete="CASCADE"), nullable=False)
build_id = Column(GUID, ForeignKey('build.id', ondelete="CASCADE"), nullable=False)
project_id = Column(GUID, ForeignKey('project.id', ondelete="CASCADE"), nullable=False)
reason = Column(String(32), nullable=False)
date_created = Column(DateTime, default=datetime.utcnow, server_default='now()', nullable=False)
data = Column(JSONEncodedDict) # extra metadata/info about the failure
step = relationship('JobStep')
job = relationship('Job')
build = relationship('Build')
project = relationship('Project')
def __init__(self, **kwargs):
super(FailureReason, self).__init__(**kwargs)
if self.id is None:
self.id = uuid4()
| {
"repo_name": "bowlofstew/changes",
"path": "changes/models/failurereason.py",
"copies": "3",
"size": "1947",
"license": "apache-2.0",
"hash": 6459041088107098000,
"line_mean": 40.4255319149,
"line_max": 100,
"alpha_frac": 0.6979969183,
"autogenerated": false,
"ratio": 3.957317073170732,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6155313991470732,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
import abc
import copy
import logging
import threading
import time
import weakref
from kafka.vendor import six
from kafka.coordinator.heartbeat import Heartbeat
from kafka import errors as Errors
from kafka.future import Future
from kafka.metrics import AnonMeasurable
from kafka.metrics.stats import Avg, Count, Max, Rate
from kafka.protocol.commit import GroupCoordinatorRequest, OffsetCommitRequest
from kafka.protocol.group import (HeartbeatRequest, JoinGroupRequest,
LeaveGroupRequest, SyncGroupRequest)
log = logging.getLogger('kafka.coordinator')
class MemberState(object):
UNJOINED = '<unjoined>' # the client is not part of a group
REBALANCING = '<rebalancing>' # the client has begun rebalancing
STABLE = '<stable>' # the client has joined and is sending heartbeats
class Generation(object):
def __init__(self, generation_id, member_id, protocol):
self.generation_id = generation_id
self.member_id = member_id
self.protocol = protocol
Generation.NO_GENERATION = Generation(
OffsetCommitRequest[2].DEFAULT_GENERATION_ID,
JoinGroupRequest[0].UNKNOWN_MEMBER_ID,
None)
class UnjoinedGroupException(Errors.KafkaError):
retriable = True
class BaseCoordinator(object):
"""
BaseCoordinator implements group management for a single group member
by interacting with a designated Kafka broker (the coordinator). Group
semantics are provided by extending this class. See ConsumerCoordinator
for example usage.
From a high level, Kafka's group management protocol consists of the
following sequence of actions:
1. Group Registration: Group members register with the coordinator providing
their own metadata (such as the set of topics they are interested in).
2. Group/Leader Selection: The coordinator select the members of the group
and chooses one member as the leader.
3. State Assignment: The leader collects the metadata from all the members
of the group and assigns state.
4. Group Stabilization: Each member receives the state assigned by the
leader and begins processing.
To leverage this protocol, an implementation must define the format of
metadata provided by each member for group registration in
:meth:`.group_protocols` and the format of the state assignment provided by
the leader in :meth:`._perform_assignment` and which becomes available to
members in :meth:`._on_join_complete`.
Note on locking: this class shares state between the caller and a background
thread which is used for sending heartbeats after the client has joined the
group. All mutable state as well as state transitions are protected with the
class's monitor. Generally this means acquiring the lock before reading or
writing the state of the group (e.g. generation, member_id) and holding the
lock when sending a request that affects the state of the group
(e.g. JoinGroup, LeaveGroup).
"""
DEFAULT_CONFIG = {
'group_id': 'kafka-python-default-group',
'session_timeout_ms': 10000,
'heartbeat_interval_ms': 3000,
'max_poll_interval_ms': 300000,
'retry_backoff_ms': 100,
'api_version': (0, 10, 1),
'metric_group_prefix': '',
}
def __init__(self, client, metrics, **configs):
"""
Keyword Arguments:
group_id (str): name of the consumer group to join for dynamic
partition assignment (if enabled), and to use for fetching and
committing offsets. Default: 'kafka-python-default-group'
session_timeout_ms (int): The timeout used to detect failures when
using Kafka's group management facilities. Default: 30000
heartbeat_interval_ms (int): The expected time in milliseconds
between heartbeats to the consumer coordinator when using
Kafka's group management feature. Heartbeats are used to ensure
that the consumer's session stays active and to facilitate
rebalancing when new consumers join or leave the group. The
value must be set lower than session_timeout_ms, but typically
should be set no higher than 1/3 of that value. It can be
adjusted even lower to control the expected time for normal
rebalances. Default: 3000
retry_backoff_ms (int): Milliseconds to backoff when retrying on
errors. Default: 100.
"""
self.config = copy.copy(self.DEFAULT_CONFIG)
for key in self.config:
if key in configs:
self.config[key] = configs[key]
if self.config['api_version'] < (0, 10, 1):
if self.config['max_poll_interval_ms'] != self.config['session_timeout_ms']:
raise Errors.KafkaConfigurationError("Broker version %s does not support "
"different values for max_poll_interval_ms "
"and session_timeout_ms")
self._client = client
self.group_id = self.config['group_id']
self.heartbeat = Heartbeat(**self.config)
self._heartbeat_thread = None
self._lock = threading.Condition()
self.rejoin_needed = True
self.rejoining = False # renamed / complement of java needsJoinPrepare
self.state = MemberState.UNJOINED
self.join_future = None
self.coordinator_id = None
self._find_coordinator_future = None
self._generation = Generation.NO_GENERATION
self.sensors = GroupCoordinatorMetrics(self.heartbeat, metrics,
self.config['metric_group_prefix'])
@abc.abstractmethod
def protocol_type(self):
"""
Unique identifier for the class of supported protocols
(e.g. "consumer" or "connect").
Returns:
str: protocol type name
"""
pass
@abc.abstractmethod
def group_protocols(self):
"""Return the list of supported group protocols and metadata.
This list is submitted by each group member via a JoinGroupRequest.
The order of the protocols in the list indicates the preference of the
protocol (the first entry is the most preferred). The coordinator takes
this preference into account when selecting the generation protocol
(generally more preferred protocols will be selected as long as all
members support them and there is no disagreement on the preference).
Note: metadata must be type bytes or support an encode() method
Returns:
list: [(protocol, metadata), ...]
"""
pass
@abc.abstractmethod
def _on_join_prepare(self, generation, member_id):
"""Invoked prior to each group join or rejoin.
This is typically used to perform any cleanup from the previous
generation (such as committing offsets for the consumer)
Arguments:
generation (int): The previous generation or -1 if there was none
member_id (str): The identifier of this member in the previous group
or '' if there was none
"""
pass
@abc.abstractmethod
def _perform_assignment(self, leader_id, protocol, members):
"""Perform assignment for the group.
This is used by the leader to push state to all the members of the group
(e.g. to push partition assignments in the case of the new consumer)
Arguments:
leader_id (str): The id of the leader (which is this member)
protocol (str): the chosen group protocol (assignment strategy)
members (list): [(member_id, metadata_bytes)] from
JoinGroupResponse. metadata_bytes are associated with the chosen
group protocol, and the Coordinator subclass is responsible for
decoding metadata_bytes based on that protocol.
Returns:
dict: {member_id: assignment}; assignment must either be bytes
or have an encode() method to convert to bytes
"""
pass
@abc.abstractmethod
def _on_join_complete(self, generation, member_id, protocol,
member_assignment_bytes):
"""Invoked when a group member has successfully joined a group.
Arguments:
generation (int): the generation that was joined
member_id (str): the identifier for the local member in the group
protocol (str): the protocol selected by the coordinator
member_assignment_bytes (bytes): the protocol-encoded assignment
propagated from the group leader. The Coordinator instance is
responsible for decoding based on the chosen protocol.
"""
pass
def coordinator_unknown(self):
"""Check if we know who the coordinator is and have an active connection
Side-effect: reset coordinator_id to None if connection failed
Returns:
bool: True if the coordinator is unknown
"""
return self.coordinator() is None
def coordinator(self):
"""Get the current coordinator
Returns: the current coordinator id or None if it is unknown
"""
if self.coordinator_id is None:
return None
elif self._client.is_disconnected(self.coordinator_id):
self.coordinator_dead('Node Disconnected')
return None
else:
return self.coordinator_id
def ensure_coordinator_ready(self):
"""Block until the coordinator for this group is known
(and we have an active connection -- java client uses unsent queue).
"""
with self._lock:
while self.coordinator_unknown():
# Prior to 0.8.2 there was no group coordinator
# so we will just pick a node at random and treat
# it as the "coordinator"
if self.config['api_version'] < (0, 8, 2):
self.coordinator_id = self._client.least_loaded_node()
if self.coordinator_id is not None:
self._client.maybe_connect(self.coordinator_id)
continue
future = self.lookup_coordinator()
self._client.poll(future=future)
if future.failed():
if future.retriable():
if getattr(future.exception, 'invalid_metadata', False):
log.debug('Requesting metadata for group coordinator request: %s', future.exception)
metadata_update = self._client.cluster.request_update()
self._client.poll(future=metadata_update)
else:
time.sleep(self.config['retry_backoff_ms'] / 1000)
else:
raise future.exception # pylint: disable-msg=raising-bad-type
def _reset_find_coordinator_future(self, result):
self._find_coordinator_future = None
def lookup_coordinator(self):
with self._lock:
if self._find_coordinator_future is not None:
return self._find_coordinator_future
# If there is an error sending the group coordinator request
# then _reset_find_coordinator_future will immediately fire and
# set _find_coordinator_future = None
# To avoid returning None, we capture the future in a local variable
future = self._send_group_coordinator_request()
self._find_coordinator_future = future
self._find_coordinator_future.add_both(self._reset_find_coordinator_future)
return future
def need_rejoin(self):
"""Check whether the group should be rejoined (e.g. if metadata changes)
Returns:
bool: True if it should, False otherwise
"""
return self.rejoin_needed
def poll_heartbeat(self):
"""
Check the status of the heartbeat thread (if it is active) and indicate
the liveness of the client. This must be called periodically after
joining with :meth:`.ensure_active_group` to ensure that the member stays
in the group. If an interval of time longer than the provided rebalance
timeout (max_poll_interval_ms) expires without calling this method, then
the client will proactively leave the group.
Raises: RuntimeError for unexpected errors raised from the heartbeat thread
"""
with self._lock:
if self._heartbeat_thread is not None:
if self._heartbeat_thread.failed:
# set the heartbeat thread to None and raise an exception.
# If the user catches it, the next call to ensure_active_group()
# will spawn a new heartbeat thread.
cause = self._heartbeat_thread.failed
self._heartbeat_thread = None
raise cause # pylint: disable-msg=raising-bad-type
# Awake the heartbeat thread if needed
if self.heartbeat.should_heartbeat():
self._lock.notify()
self.heartbeat.poll()
def time_to_next_heartbeat(self):
"""Returns seconds (float) remaining before next heartbeat should be sent
Note: Returns infinite if group is not joined
"""
with self._lock:
# if we have not joined the group, we don't need to send heartbeats
if self.state is MemberState.UNJOINED:
return float('inf')
return self.heartbeat.time_to_next_heartbeat()
def _handle_join_success(self, member_assignment_bytes):
with self._lock:
log.info("Successfully joined group %s with generation %s",
self.group_id, self._generation.generation_id)
self.state = MemberState.STABLE
self.rejoin_needed = False
if self._heartbeat_thread:
self._heartbeat_thread.enable()
def _handle_join_failure(self, _):
with self._lock:
self.state = MemberState.UNJOINED
def ensure_active_group(self):
"""Ensure that the group is active (i.e. joined and synced)"""
with self._lock:
if self._heartbeat_thread is None:
self._start_heartbeat_thread()
while self.need_rejoin() or self._rejoin_incomplete():
self.ensure_coordinator_ready()
# call on_join_prepare if needed. We set a flag
# to make sure that we do not call it a second
# time if the client is woken up before a pending
# rebalance completes. This must be called on each
# iteration of the loop because an event requiring
# a rebalance (such as a metadata refresh which
# changes the matched subscription set) can occur
# while another rebalance is still in progress.
if not self.rejoining:
self._on_join_prepare(self._generation.generation_id,
self._generation.member_id)
self.rejoining = True
# ensure that there are no pending requests to the coordinator.
# This is important in particular to avoid resending a pending
# JoinGroup request.
while not self.coordinator_unknown():
if not self._client.in_flight_request_count(self.coordinator_id):
break
self._client.poll()
else:
continue
# we store the join future in case we are woken up by the user
# after beginning the rebalance in the call to poll below.
# This ensures that we do not mistakenly attempt to rejoin
# before the pending rebalance has completed.
if self.join_future is None:
# Fence off the heartbeat thread explicitly so that it cannot
# interfere with the join group. Note that this must come after
# the call to _on_join_prepare since we must be able to continue
# sending heartbeats if that callback takes some time.
self._heartbeat_thread.disable()
self.state = MemberState.REBALANCING
future = self._send_join_group_request()
self.join_future = future # this should happen before adding callbacks
# handle join completion in the callback so that the
# callback will be invoked even if the consumer is woken up
# before finishing the rebalance
future.add_callback(self._handle_join_success)
# we handle failures below after the request finishes.
# If the join completes after having been woken up, the
# exception is ignored and we will rejoin
future.add_errback(self._handle_join_failure)
else:
future = self.join_future
self._client.poll(future=future)
if future.succeeded():
self._on_join_complete(self._generation.generation_id,
self._generation.member_id,
self._generation.protocol,
future.value)
self.join_future = None
self.rejoining = False
else:
self.join_future = None
exception = future.exception
if isinstance(exception, (Errors.UnknownMemberIdError,
Errors.RebalanceInProgressError,
Errors.IllegalGenerationError)):
continue
elif not future.retriable():
raise exception # pylint: disable-msg=raising-bad-type
time.sleep(self.config['retry_backoff_ms'] / 1000)
def _rejoin_incomplete(self):
return self.join_future is not None
def _send_join_group_request(self):
"""Join the group and return the assignment for the next generation.
This function handles both JoinGroup and SyncGroup, delegating to
:meth:`._perform_assignment` if elected leader by the coordinator.
Returns:
Future: resolves to the encoded-bytes assignment returned from the
group leader
"""
if self.coordinator_unknown():
e = Errors.GroupCoordinatorNotAvailableError(self.coordinator_id)
return Future().failure(e)
elif not self._client.ready(self.coordinator_id, metadata_priority=False):
e = Errors.NodeNotReadyError(self.coordinator_id)
return Future().failure(e)
# send a join group request to the coordinator
log.info("(Re-)joining group %s", self.group_id)
member_metadata = [
(protocol, metadata if isinstance(metadata, bytes) else metadata.encode())
for protocol, metadata in self.group_protocols()
]
if self.config['api_version'] < (0, 9):
raise Errors.KafkaError('JoinGroupRequest api requires 0.9+ brokers')
elif (0, 9) <= self.config['api_version'] < (0, 10, 1):
request = JoinGroupRequest[0](
self.group_id,
self.config['session_timeout_ms'],
self._generation.member_id,
self.protocol_type(),
member_metadata)
elif (0, 10, 1) <= self.config['api_version'] < (0, 11, 0):
request = JoinGroupRequest[1](
self.group_id,
self.config['session_timeout_ms'],
self.config['max_poll_interval_ms'],
self._generation.member_id,
self.protocol_type(),
member_metadata)
else:
request = JoinGroupRequest[2](
self.group_id,
self.config['session_timeout_ms'],
self.config['max_poll_interval_ms'],
self._generation.member_id,
self.protocol_type(),
member_metadata)
# create the request for the coordinator
log.debug("Sending JoinGroup (%s) to coordinator %s", request, self.coordinator_id)
future = Future()
_f = self._client.send(self.coordinator_id, request)
_f.add_callback(self._handle_join_group_response, future, time.time())
_f.add_errback(self._failed_request, self.coordinator_id,
request, future)
return future
def _failed_request(self, node_id, request, future, error):
# Marking coordinator dead
# unless the error is caused by internal client pipelining
if not isinstance(error, (Errors.NodeNotReadyError,
Errors.TooManyInFlightRequests)):
log.error('Error sending %s to node %s [%s]',
request.__class__.__name__, node_id, error)
self.coordinator_dead(error)
else:
log.debug('Error sending %s to node %s [%s]',
request.__class__.__name__, node_id, error)
future.failure(error)
def _handle_join_group_response(self, future, send_time, response):
error_type = Errors.for_code(response.error_code)
if error_type is Errors.NoError:
log.debug("Received successful JoinGroup response for group %s: %s",
self.group_id, response)
self.sensors.join_latency.record((time.time() - send_time) * 1000)
with self._lock:
if self.state is not MemberState.REBALANCING:
# if the consumer was woken up before a rebalance completes,
# we may have already left the group. In this case, we do
# not want to continue with the sync group.
future.failure(UnjoinedGroupException())
else:
self._generation = Generation(response.generation_id,
response.member_id,
response.group_protocol)
if response.leader_id == response.member_id:
log.info("Elected group leader -- performing partition"
" assignments using %s", self._generation.protocol)
self._on_join_leader(response).chain(future)
else:
self._on_join_follower().chain(future)
elif error_type is Errors.GroupLoadInProgressError:
log.debug("Attempt to join group %s rejected since coordinator %s"
" is loading the group.", self.group_id, self.coordinator_id)
# backoff and retry
future.failure(error_type(response))
elif error_type is Errors.UnknownMemberIdError:
# reset the member id and retry immediately
error = error_type(self._generation.member_id)
self.reset_generation()
log.debug("Attempt to join group %s failed due to unknown member id",
self.group_id)
future.failure(error)
elif error_type in (Errors.GroupCoordinatorNotAvailableError,
Errors.NotCoordinatorForGroupError):
# re-discover the coordinator and retry with backoff
self.coordinator_dead(error_type())
log.debug("Attempt to join group %s failed due to obsolete "
"coordinator information: %s", self.group_id,
error_type.__name__)
future.failure(error_type())
elif error_type in (Errors.InconsistentGroupProtocolError,
Errors.InvalidSessionTimeoutError,
Errors.InvalidGroupIdError):
# log the error and re-throw the exception
error = error_type(response)
log.error("Attempt to join group %s failed due to fatal error: %s",
self.group_id, error)
future.failure(error)
elif error_type is Errors.GroupAuthorizationFailedError:
future.failure(error_type(self.group_id))
else:
# unexpected error, throw the exception
error = error_type()
log.error("Unexpected error in join group response: %s", error)
future.failure(error)
def _on_join_follower(self):
# send follower's sync group with an empty assignment
version = 0 if self.config['api_version'] < (0, 11, 0) else 1
request = SyncGroupRequest[version](
self.group_id,
self._generation.generation_id,
self._generation.member_id,
{})
log.debug("Sending follower SyncGroup for group %s to coordinator %s: %s",
self.group_id, self.coordinator_id, request)
return self._send_sync_group_request(request)
def _on_join_leader(self, response):
"""
Perform leader synchronization and send back the assignment
for the group via SyncGroupRequest
Arguments:
response (JoinResponse): broker response to parse
Returns:
Future: resolves to member assignment encoded-bytes
"""
try:
group_assignment = self._perform_assignment(response.leader_id,
response.group_protocol,
response.members)
except Exception as e:
return Future().failure(e)
version = 0 if self.config['api_version'] < (0, 11, 0) else 1
request = SyncGroupRequest[version](
self.group_id,
self._generation.generation_id,
self._generation.member_id,
[(member_id,
assignment if isinstance(assignment, bytes) else assignment.encode())
for member_id, assignment in six.iteritems(group_assignment)])
log.debug("Sending leader SyncGroup for group %s to coordinator %s: %s",
self.group_id, self.coordinator_id, request)
return self._send_sync_group_request(request)
def _send_sync_group_request(self, request):
if self.coordinator_unknown():
e = Errors.GroupCoordinatorNotAvailableError(self.coordinator_id)
return Future().failure(e)
# We assume that coordinator is ready if we're sending SyncGroup
# as it typically follows a successful JoinGroup
# Also note that if client.ready() enforces a metadata priority policy,
# we can get into an infinite loop if the leader assignment process
# itself requests a metadata update
future = Future()
_f = self._client.send(self.coordinator_id, request)
_f.add_callback(self._handle_sync_group_response, future, time.time())
_f.add_errback(self._failed_request, self.coordinator_id,
request, future)
return future
def _handle_sync_group_response(self, future, send_time, response):
error_type = Errors.for_code(response.error_code)
if error_type is Errors.NoError:
self.sensors.sync_latency.record((time.time() - send_time) * 1000)
future.success(response.member_assignment)
return
# Always rejoin on error
self.request_rejoin()
if error_type is Errors.GroupAuthorizationFailedError:
future.failure(error_type(self.group_id))
elif error_type is Errors.RebalanceInProgressError:
log.debug("SyncGroup for group %s failed due to coordinator"
" rebalance", self.group_id)
future.failure(error_type(self.group_id))
elif error_type in (Errors.UnknownMemberIdError,
Errors.IllegalGenerationError):
error = error_type()
log.debug("SyncGroup for group %s failed due to %s", self.group_id, error)
self.reset_generation()
future.failure(error)
elif error_type in (Errors.GroupCoordinatorNotAvailableError,
Errors.NotCoordinatorForGroupError):
error = error_type()
log.debug("SyncGroup for group %s failed due to %s", self.group_id, error)
self.coordinator_dead(error)
future.failure(error)
else:
error = error_type()
log.error("Unexpected error from SyncGroup: %s", error)
future.failure(error)
def _send_group_coordinator_request(self):
"""Discover the current coordinator for the group.
Returns:
Future: resolves to the node id of the coordinator
"""
node_id = self._client.least_loaded_node()
if node_id is None:
return Future().failure(Errors.NoBrokersAvailable())
elif not self._client.ready(node_id, metadata_priority=False):
e = Errors.NodeNotReadyError(node_id)
return Future().failure(e)
log.debug("Sending group coordinator request for group %s to broker %s",
self.group_id, node_id)
request = GroupCoordinatorRequest[0](self.group_id)
future = Future()
_f = self._client.send(node_id, request)
_f.add_callback(self._handle_group_coordinator_response, future)
_f.add_errback(self._failed_request, node_id, request, future)
return future
def _handle_group_coordinator_response(self, future, response):
log.debug("Received group coordinator response %s", response)
error_type = Errors.for_code(response.error_code)
if error_type is Errors.NoError:
with self._lock:
coordinator_id = self._client.cluster.add_group_coordinator(self.group_id, response)
if not coordinator_id:
# This could happen if coordinator metadata is different
# than broker metadata
future.failure(Errors.IllegalStateError())
return
self.coordinator_id = coordinator_id
log.info("Discovered coordinator %s for group %s",
self.coordinator_id, self.group_id)
self._client.maybe_connect(self.coordinator_id)
self.heartbeat.reset_timeouts()
future.success(self.coordinator_id)
elif error_type is Errors.GroupCoordinatorNotAvailableError:
log.debug("Group Coordinator Not Available; retry")
future.failure(error_type())
elif error_type is Errors.GroupAuthorizationFailedError:
error = error_type(self.group_id)
log.error("Group Coordinator Request failed: %s", error)
future.failure(error)
else:
error = error_type()
log.error("Group coordinator lookup for group %s failed: %s",
self.group_id, error)
future.failure(error)
def coordinator_dead(self, error):
"""Mark the current coordinator as dead."""
if self.coordinator_id is not None:
log.warning("Marking the coordinator dead (node %s) for group %s: %s.",
self.coordinator_id, self.group_id, error)
self.coordinator_id = None
def generation(self):
"""Get the current generation state if the group is stable.
Returns: the current generation or None if the group is unjoined/rebalancing
"""
with self._lock:
if self.state is not MemberState.STABLE:
return None
return self._generation
def reset_generation(self):
"""Reset the generation and memberId because we have fallen out of the group."""
with self._lock:
self._generation = Generation.NO_GENERATION
self.rejoin_needed = True
self.state = MemberState.UNJOINED
def request_rejoin(self):
self.rejoin_needed = True
def _start_heartbeat_thread(self):
if self._heartbeat_thread is None:
log.info('Starting new heartbeat thread')
self._heartbeat_thread = HeartbeatThread(weakref.proxy(self))
self._heartbeat_thread.daemon = True
self._heartbeat_thread.start()
def _close_heartbeat_thread(self):
if self._heartbeat_thread is not None:
log.info('Stopping heartbeat thread')
try:
self._heartbeat_thread.close()
except ReferenceError:
pass
self._heartbeat_thread = None
def __del__(self):
self._close_heartbeat_thread()
def close(self):
"""Close the coordinator, leave the current group,
and reset local generation / member_id"""
self._close_heartbeat_thread()
self.maybe_leave_group()
def maybe_leave_group(self):
"""Leave the current group and reset local generation/memberId."""
with self._lock:
if (not self.coordinator_unknown()
and self.state is not MemberState.UNJOINED
and self._generation is not Generation.NO_GENERATION):
# this is a minimal effort attempt to leave the group. we do not
# attempt any resending if the request fails or times out.
log.info('Leaving consumer group (%s).', self.group_id)
version = 0 if self.config['api_version'] < (0, 11, 0) else 1
request = LeaveGroupRequest[version](self.group_id, self._generation.member_id)
future = self._client.send(self.coordinator_id, request)
future.add_callback(self._handle_leave_group_response)
future.add_errback(log.error, "LeaveGroup request failed: %s")
self._client.poll(future=future)
self.reset_generation()
def _handle_leave_group_response(self, response):
error_type = Errors.for_code(response.error_code)
if error_type is Errors.NoError:
log.debug("LeaveGroup request for group %s returned successfully",
self.group_id)
else:
log.error("LeaveGroup request for group %s failed with error: %s",
self.group_id, error_type())
def _send_heartbeat_request(self):
"""Send a heartbeat request"""
if self.coordinator_unknown():
e = Errors.GroupCoordinatorNotAvailableError(self.coordinator_id)
return Future().failure(e)
elif not self._client.ready(self.coordinator_id, metadata_priority=False):
e = Errors.NodeNotReadyError(self.coordinator_id)
return Future().failure(e)
version = 0 if self.config['api_version'] < (0, 11, 0) else 1
request = HeartbeatRequest[version](self.group_id,
self._generation.generation_id,
self._generation.member_id)
log.debug("Heartbeat: %s[%s] %s", request.group, request.generation_id, request.member_id) # pylint: disable-msg=no-member
future = Future()
_f = self._client.send(self.coordinator_id, request)
_f.add_callback(self._handle_heartbeat_response, future, time.time())
_f.add_errback(self._failed_request, self.coordinator_id,
request, future)
return future
def _handle_heartbeat_response(self, future, send_time, response):
self.sensors.heartbeat_latency.record((time.time() - send_time) * 1000)
error_type = Errors.for_code(response.error_code)
if error_type is Errors.NoError:
log.debug("Received successful heartbeat response for group %s",
self.group_id)
future.success(None)
elif error_type in (Errors.GroupCoordinatorNotAvailableError,
Errors.NotCoordinatorForGroupError):
log.warning("Heartbeat failed for group %s: coordinator (node %s)"
" is either not started or not valid", self.group_id,
self.coordinator())
self.coordinator_dead(error_type())
future.failure(error_type())
elif error_type is Errors.RebalanceInProgressError:
log.warning("Heartbeat failed for group %s because it is"
" rebalancing", self.group_id)
self.request_rejoin()
future.failure(error_type())
elif error_type is Errors.IllegalGenerationError:
log.warning("Heartbeat failed for group %s: generation id is not "
" current.", self.group_id)
self.reset_generation()
future.failure(error_type())
elif error_type is Errors.UnknownMemberIdError:
log.warning("Heartbeat: local member_id was not recognized;"
" this consumer needs to re-join")
self.reset_generation()
future.failure(error_type)
elif error_type is Errors.GroupAuthorizationFailedError:
error = error_type(self.group_id)
log.error("Heartbeat failed: authorization error: %s", error)
future.failure(error)
else:
error = error_type()
log.error("Heartbeat failed: Unhandled error: %s", error)
future.failure(error)
class GroupCoordinatorMetrics(object):
def __init__(self, heartbeat, metrics, prefix, tags=None):
self.heartbeat = heartbeat
self.metrics = metrics
self.metric_group_name = prefix + "-coordinator-metrics"
self.heartbeat_latency = metrics.sensor('heartbeat-latency')
self.heartbeat_latency.add(metrics.metric_name(
'heartbeat-response-time-max', self.metric_group_name,
'The max time taken to receive a response to a heartbeat request',
tags), Max())
self.heartbeat_latency.add(metrics.metric_name(
'heartbeat-rate', self.metric_group_name,
'The average number of heartbeats per second',
tags), Rate(sampled_stat=Count()))
self.join_latency = metrics.sensor('join-latency')
self.join_latency.add(metrics.metric_name(
'join-time-avg', self.metric_group_name,
'The average time taken for a group rejoin',
tags), Avg())
self.join_latency.add(metrics.metric_name(
'join-time-max', self.metric_group_name,
'The max time taken for a group rejoin',
tags), Max())
self.join_latency.add(metrics.metric_name(
'join-rate', self.metric_group_name,
'The number of group joins per second',
tags), Rate(sampled_stat=Count()))
self.sync_latency = metrics.sensor('sync-latency')
self.sync_latency.add(metrics.metric_name(
'sync-time-avg', self.metric_group_name,
'The average time taken for a group sync',
tags), Avg())
self.sync_latency.add(metrics.metric_name(
'sync-time-max', self.metric_group_name,
'The max time taken for a group sync',
tags), Max())
self.sync_latency.add(metrics.metric_name(
'sync-rate', self.metric_group_name,
'The number of group syncs per second',
tags), Rate(sampled_stat=Count()))
metrics.add_metric(metrics.metric_name(
'last-heartbeat-seconds-ago', self.metric_group_name,
'The number of seconds since the last controller heartbeat was sent',
tags), AnonMeasurable(
lambda _, now: (now / 1000) - self.heartbeat.last_send))
class HeartbeatThread(threading.Thread):
def __init__(self, coordinator):
super(HeartbeatThread, self).__init__()
self.name = coordinator.group_id + '-heartbeat'
self.coordinator = coordinator
self.enabled = False
self.closed = False
self.failed = None
def enable(self):
with self.coordinator._lock:
self.enabled = True
self.coordinator.heartbeat.reset_timeouts()
self.coordinator._lock.notify()
def disable(self):
self.enabled = False
def close(self):
self.closed = True
with self.coordinator._lock:
self.coordinator._lock.notify()
if self.is_alive():
self.join(self.coordinator.config['heartbeat_interval_ms'] / 1000)
if self.is_alive():
log.warning("Heartbeat thread did not fully terminate during close")
def run(self):
try:
log.debug('Heartbeat thread started')
while not self.closed:
self._run_once()
except ReferenceError:
log.debug('Heartbeat thread closed due to coordinator gc')
except RuntimeError as e:
log.error("Heartbeat thread for group %s failed due to unexpected error: %s",
self.coordinator.group_id, e)
self.failed = e
finally:
log.debug('Heartbeat thread closed')
def _run_once(self):
with self.coordinator._lock:
if not self.enabled:
log.debug('Heartbeat disabled. Waiting')
self.coordinator._lock.wait()
log.debug('Heartbeat re-enabled.')
return
if self.coordinator.state is not MemberState.STABLE:
# the group is not stable (perhaps because we left the
# group or because the coordinator kicked us out), so
# disable heartbeats and wait for the main thread to rejoin.
log.debug('Group state is not stable, disabling heartbeats')
self.disable()
return
# TODO: When consumer.wakeup() is implemented, we need to
# disable here to prevent propagating an exception to this
# heartbeat thread
self.coordinator._client.poll(timeout_ms=0)
if self.coordinator.coordinator_unknown():
future = self.coordinator.lookup_coordinator()
if not future.is_done or future.failed():
# the immediate future check ensures that we backoff
# properly in the case that no brokers are available
# to connect to (and the future is automatically failed).
self.coordinator._lock.wait(self.coordinator.config['retry_backoff_ms'] / 1000)
elif self.coordinator.heartbeat.session_timeout_expired():
# the session timeout has expired without seeing a
# successful heartbeat, so we should probably make sure
# the coordinator is still healthy.
log.warning('Heartbeat session expired, marking coordinator dead')
self.coordinator.coordinator_dead('Heartbeat session expired')
elif self.coordinator.heartbeat.poll_timeout_expired():
# the poll timeout has expired, which means that the
# foreground thread has stalled in between calls to
# poll(), so we explicitly leave the group.
log.warning('Heartbeat poll expired, leaving group')
self.coordinator.maybe_leave_group()
elif not self.coordinator.heartbeat.should_heartbeat():
# poll again after waiting for the retry backoff in case
# the heartbeat failed or the coordinator disconnected
log.log(0, 'Not ready to heartbeat, waiting')
self.coordinator._lock.wait(self.coordinator.config['retry_backoff_ms'] / 1000)
else:
self.coordinator.heartbeat.sent_heartbeat()
future = self.coordinator._send_heartbeat_request()
future.add_callback(self._handle_heartbeat_success)
future.add_errback(self._handle_heartbeat_failure)
def _handle_heartbeat_success(self, result):
with self.coordinator._lock:
self.coordinator.heartbeat.received_heartbeat()
def _handle_heartbeat_failure(self, exception):
with self.coordinator._lock:
if isinstance(exception, Errors.RebalanceInProgressError):
# it is valid to continue heartbeating while the group is
# rebalancing. This ensures that the coordinator keeps the
# member in the group for as long as the duration of the
# rebalance timeout. If we stop sending heartbeats, however,
# then the session timeout may expire before we can rejoin.
self.coordinator.heartbeat.received_heartbeat()
else:
self.coordinator.heartbeat.fail_heartbeat()
# wake up the thread if it's sleeping to reschedule the heartbeat
self.coordinator._lock.notify()
| {
"repo_name": "scrapinghub/kafka-python",
"path": "kafka/coordinator/base.py",
"copies": "1",
"size": "45793",
"license": "apache-2.0",
"hash": -8793720392441014000,
"line_mean": 43.9391560353,
"line_max": 131,
"alpha_frac": 0.5956150503,
"autogenerated": false,
"ratio": 4.703954802259887,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007307664313413409,
"num_lines": 1019
} |
from __future__ import absolute_import, division
import AppKit
import Quartz
import os
from .pdfContext import PDFContext
from .baseContext import Color
def _nsDataConverter(value):
if isinstance(value, AppKit.NSData):
return value
return AppKit.NSData.dataWithBytes_length_(value, len(value))
def _nsColorConverter(color):
if isinstance(color, AppKit.NSColor):
return color
color = Color(*color)
return color.getNSObject()
def _tiffCompressionConverter(value):
if value is None:
return AppKit.NSTIFFCompressionNone
elif isinstance(value, int):
return value
else:
t = dict(lzw=AppKit.NSTIFFCompressionLZW, packbits=AppKit.NSTIFFCompressionPackBits)
return t.get(value.lower(), AppKit.NSTIFFCompressionNone)
_nsImageOptions = {
# DrawBot Key NSImage property key converter or None doc
"imageColorSyncProfileData": (AppKit.NSImageColorSyncProfileData, _nsDataConverter, "A bytes or NSData object containing the ColorSync profile data."),
"imageTIFFCompressionMethod": (AppKit.NSImageCompressionMethod, _tiffCompressionConverter, "None, or 'lzw' or 'packbits', or an NSTIFFCompression constant"),
"imagePNGGamma": (AppKit.NSImageGamma, None, "The gamma value for the image. It is a floating-point number between 0.0 and 1.0, with 0.0 being black and 1.0 being the maximum color."),
"imagePNGInterlaced": (AppKit.NSImageInterlaced, None, "Boolean value that indicates whether the image should be interlaced."), # XXX doesn't seem to work
"imageJPEGCompressionFactor": (AppKit.NSImageCompressionFactor, None, "A float between 0.0 and 1.0, with 1.0 resulting in no compression and 0.0 resulting in the maximum compression possible"), # number
"imageJPEGProgressive": (AppKit.NSImageProgressive, None, "Boolean that indicates whether the image should use progressive encoding."),
# "imageJPEGEXIFData": (AppKit.NSImageEXIFData, None, ""), # dict XXX Doesn't seem to work
"imageFallbackBackgroundColor": (AppKit.NSImageFallbackBackgroundColor, _nsColorConverter, "The background color to use when writing to an image format (such as JPEG) that doesn't support alpha. The color's alpha value is ignored. The default background color, when this property is not specified, is white. The value of the property should be an NSColor object or a DrawBot RGB color tuple."),
"imageGIFDitherTransparency": (AppKit.NSImageDitherTransparency, None, "Boolean that indicates whether the image is dithered"),
"imageGIFRGBColorTable": (AppKit.NSImageRGBColorTable, _nsDataConverter, "A bytes or NSData object containing the RGB color table."),
}
def getSaveImageOptions(options):
return ImageContext.saveImageOptions + [
(dbKey, _nsImageOptions[dbKey][-1]) for dbKey in options if dbKey in _nsImageOptions
]
class ImageContext(PDFContext):
_saveImageFileTypes = {
"jpg": AppKit.NSJPEGFileType,
"jpeg": AppKit.NSJPEGFileType,
"tiff": AppKit.NSTIFFFileType,
"tif": AppKit.NSTIFFFileType,
"gif": AppKit.NSGIFFileType,
"png": AppKit.NSPNGFileType,
"bmp": AppKit.NSBMPFileType
}
fileExtensions = []
saveImageOptions = [
("imageResolution", "The resolution of the output image in PPI. Default is 72."),
("multipage", "Output a numbered image for each page or frame in the document."),
]
def _writeDataToFile(self, data, path, options):
multipage = options.get("multipage")
if multipage is None:
multipage = False
fileName, fileExt = os.path.splitext(path)
ext = fileExt[1:]
pdfDocument = Quartz.PDFDocument.alloc().initWithData_(data)
firstPage = 0
pageCount = pdfDocument.pageCount()
pathAdd = "_1"
if not multipage:
firstPage = pageCount - 1
pathAdd = ""
outputPaths = []
imageResolution = options.get("imageResolution", 72.0)
properties = {}
for key, value in options.items():
if key in _nsImageOptions:
nsKey, converter, _ = _nsImageOptions[key]
if converter is not None:
value = converter(value)
properties[nsKey] = value
for index in range(firstPage, pageCount):
pool = AppKit.NSAutoreleasePool.alloc().init()
try:
page = pdfDocument.pageAtIndex_(index)
image = AppKit.NSImage.alloc().initWithData_(page.dataRepresentation())
imageRep = _makeBitmapImageRep(image, imageResolution)
imageData = imageRep.representationUsingType_properties_(self._saveImageFileTypes[ext], properties)
imagePath = fileName + pathAdd + fileExt
imageData.writeToFile_atomically_(imagePath, True)
pathAdd = "_%s" % (index + 2)
outputPaths.append(imagePath)
del page, imageRep, imageData
finally:
del pool
return outputPaths
def _makeBitmapImageRep(image, imageResolution=72.0):
"""Construct a bitmap image representation at a given resolution."""
scaleFactor = max(1.0, imageResolution) / 72.0
rep = AppKit.NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(
None, # planes
int(image.size().width * scaleFactor), # pixelsWide
int(image.size().height * scaleFactor), # pixelsHigh
8, # bitsPerSample
4, # samplesPerPixel
True, # hasAlpha
False, # isPlanar
AppKit.NSDeviceRGBColorSpace, # colorSpaceName
0, # bytesPerRow
0 # bitsPerPixel
)
rep.setSize_(image.size())
AppKit.NSGraphicsContext.saveGraphicsState()
try:
AppKit.NSGraphicsContext.setCurrentContext_(
AppKit.NSGraphicsContext.graphicsContextWithBitmapImageRep_(rep))
image.drawAtPoint_fromRect_operation_fraction_((0, 0), AppKit.NSZeroRect, AppKit.NSCompositeSourceOver, 1.0)
finally:
AppKit.NSGraphicsContext.restoreGraphicsState()
return rep
# ================================
# = contexts for file extensions =
# ================================
class JPEGContext(ImageContext):
fileExtensions = ["jpg", "jpeg"]
saveImageOptions = getSaveImageOptions([
"imageJPEGCompressionFactor",
"imageJPEGProgressive",
"imageFallbackBackgroundColor",
"imageColorSyncProfileData",
])
class BMPContext(ImageContext):
fileExtensions = ["bmp"]
class PNGContext(ImageContext):
fileExtensions = ["png"]
saveImageOptions = getSaveImageOptions([
"imagePNGGamma",
"imagePNGInterlaced",
"imageColorSyncProfileData",
])
class TIFFContext(ImageContext):
fileExtensions = ["tif", "tiff"]
saveImageOptions = getSaveImageOptions([
"imageTIFFCompressionMethod",
"imageColorSyncProfileData",
])
| {
"repo_name": "schriftgestalt/drawbot",
"path": "drawBot/context/imageContext.py",
"copies": "1",
"size": "7678",
"license": "bsd-2-clause",
"hash": 5102690850530563000,
"line_mean": 41.8938547486,
"line_max": 406,
"alpha_frac": 0.6202135973,
"autogenerated": false,
"ratio": 4.234969663541092,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5355183260841092,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
import base64
import datetime
import hmac
import json
from hashlib import sha256
import phonenumbers
import pkg_resources
import six
from django.conf import settings
from django_redis import get_redis_connection
from rest_framework.exceptions import AuthenticationFailed
from seed_services_client.identity_store import IdentityStoreApiClient
from seed_services_client.message_sender import MessageSenderApiClient
from seed_services_client.stage_based_messaging import StageBasedMessagingApiClient
from temba_client.v2 import TembaClient
from wabclient import Client as WABClient
from ndoh_hub.auth import CachedTokenAuthentication
from ndoh_hub.constants import ( # noqa:F401
ID_TYPES,
JEMBI_LANGUAGES,
LANGUAGES,
PASSPORT_ORIGINS,
WHATSAPP_LANGUAGE_MAP,
)
sbm_client = StageBasedMessagingApiClient(
api_url=settings.STAGE_BASED_MESSAGING_URL,
auth_token=settings.STAGE_BASED_MESSAGING_TOKEN,
)
is_client = IdentityStoreApiClient(
api_url=settings.IDENTITY_STORE_URL, auth_token=settings.IDENTITY_STORE_TOKEN
)
ms_client = MessageSenderApiClient(
api_url=settings.MESSAGE_SENDER_URL, auth_token=settings.MESSAGE_SENDER_TOKEN
)
wab_client = WABClient(url=settings.ENGAGE_URL)
wab_client.connection.set_token(settings.ENGAGE_TOKEN)
rapidpro = None
if settings.EXTERNAL_REGISTRATIONS_V2:
rapidpro = TembaClient(settings.RAPIDPRO_URL, settings.RAPIDPRO_TOKEN)
VERSION = pkg_resources.require("ndoh-hub")[0].version
redis = get_redis_connection("redis")
def get_identity_msisdn(registrant_id):
"""
Given an identity UUID, returns the msisdn for the identity. Takes into
account default addresses, opted out addresses, and missing identities
or addresses. Returns None when it cannot find an MSISDN address.
"""
identity = is_client.get_identity(registrant_id)
if not identity:
return
msisdns = identity["details"].get("addresses", {}).get("msisdn", {})
identity_msisdn = None
for msisdn, details in msisdns.items():
if "default" in details and details["default"]:
return msisdn
if not ("optedout" in details and details["optedout"]):
identity_msisdn = msisdn
return identity_msisdn
def validate_signature(request):
secret = settings.TURN_HMAC_SECRET
try:
signature = request.META["HTTP_X_TURN_HOOK_SIGNATURE"]
except KeyError:
raise AuthenticationFailed("X-Turn-Hook-Signature header required")
h = hmac.new(secret.encode(), request.body, sha256)
if not hmac.compare_digest(base64.b64encode(h.digest()).decode(), signature):
raise AuthenticationFailed("Invalid hook signature")
def is_valid_uuid(id):
return len(id) == 36 and id[14] == "4" and id[19] in ["a", "b", "8", "9"]
def is_valid_date(date):
try:
datetime.datetime.strptime(date, "%Y-%m-%d")
return True
except Exception:
return False
def is_valid_edd_date(edd):
"""
Checks given Estimated Due Date is in the future but not more than
9 months away
"""
return edd > get_today() and edd < get_today() + datetime.timedelta(weeks=43)
def is_valid_edd(date):
"""
Checks given Estimated Due Date is in the future but not more than
9 months away
"""
if is_valid_date(date):
edd = datetime.datetime.strptime(date, "%Y-%m-%d").date()
return is_valid_edd_date(edd)
return False
def is_valid_baby_dob_date(dob: datetime.date) -> bool:
"""
Checks given baby date of birth is in the past but not more than 2 years old
"""
return dob < get_today() and dob > get_today() - datetime.timedelta(days=365 * 2)
def is_valid_lang(lang):
return lang in LANGUAGES
# TODO 15: Improve validation functions
def is_valid_msisdn(msisdn):
"""A very basic msisdn validation check"""
return msisdn[0] == "+" and len(msisdn) == 12
def is_valid_faccode(faccode):
"""A very basic faccode validation check"""
return len(faccode) >= 1
def is_valid_sanc_no(sanc_no):
"""A very basic sanc_no validation check"""
return len(sanc_no) >= 1
def is_valid_persal_no(persal_no):
"""A very basic persal_no validation check"""
return len(persal_no) >= 1
def is_valid_sa_id_no(sa_id_no):
"""A very basic sa_id_no validation check"""
return len(sa_id_no) == 13
def is_valid_passport_no(passport_no):
"""A very basic passport_no validation check"""
return len(passport_no) >= 1
def is_valid_passport_origin(passport_origin):
"""A passport_origin validation check"""
return passport_origin in PASSPORT_ORIGINS
def is_valid_id_type(id_type):
"""A ID type check"""
return id_type in ID_TYPES
def get_today():
return datetime.date.today()
def get_mom_age(today, mom_dob):
"""Calculate the mother's age in years"""
born = datetime.datetime.strptime(mom_dob, "%Y-%m-%d")
return today.year - born.year - ((today.month, today.day) < (born.month, born.day))
def get_available_metrics():
available_metrics = []
available_metrics.extend(settings.METRICS_REALTIME)
available_metrics.extend(settings.METRICS_SCHEDULED)
return available_metrics
def json_decode(data):
"""
Decodes the given JSON as primitives
"""
if isinstance(data, six.binary_type):
data = data.decode("utf-8")
return json.loads(data)
def normalise_msisdn(msisdn: str) -> str:
"""
Takes the MSISDN input, and normalises it to E164 format
"""
return phonenumbers.format_number(
phonenumbers.parse(msisdn, "ZA"), phonenumbers.PhoneNumberFormat.E164
)
class TokenAuthQueryString(CachedTokenAuthentication):
"""
Look for the token in the querystring parameter "token"
"""
def authenticate(self, request):
token = request.query_params.get("token", None)
if token is not None:
return self.authenticate_credentials(token)
return None
| {
"repo_name": "praekeltfoundation/ndoh-hub",
"path": "ndoh_hub/utils.py",
"copies": "1",
"size": "5990",
"license": "bsd-3-clause",
"hash": -1241683130017856000,
"line_mean": 26.6036866359,
"line_max": 87,
"alpha_frac": 0.6941569282,
"autogenerated": false,
"ratio": 3.4092202618099035,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4603377190009903,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
import collections
import copy
import errno
import io
import logging
from random import shuffle, uniform
# selectors in stdlib as of py3.4
try:
import selectors # pylint: disable=import-error
except ImportError:
# vendored backport module
from kafka.vendor import selectors34 as selectors
import socket
import struct
import sys
import threading
import time
from kafka.vendor import six
import kafka.errors as Errors
from kafka.future import Future
from kafka.metrics.stats import Avg, Count, Max, Rate
from kafka.oauth.abstract import AbstractTokenProvider
from kafka.protocol.admin import SaslHandShakeRequest
from kafka.protocol.commit import OffsetFetchRequest
from kafka.protocol.metadata import MetadataRequest
from kafka.protocol.parser import KafkaProtocol
from kafka.protocol.types import Int32, Int8
from kafka.version import __version__
if six.PY2:
ConnectionError = socket.error
TimeoutError = socket.error
BlockingIOError = Exception
log = logging.getLogger(__name__)
DEFAULT_KAFKA_PORT = 9092
SASL_QOP_AUTH = 1
SASL_QOP_AUTH_INT = 2
SASL_QOP_AUTH_CONF = 4
try:
import ssl
ssl_available = True
try:
SSLEOFError = ssl.SSLEOFError
SSLWantReadError = ssl.SSLWantReadError
SSLWantWriteError = ssl.SSLWantWriteError
SSLZeroReturnError = ssl.SSLZeroReturnError
except AttributeError:
# support older ssl libraries
log.debug('Old SSL module detected.'
' SSL error handling may not operate cleanly.'
' Consider upgrading to Python 3.3 or 2.7.9')
SSLEOFError = ssl.SSLError
SSLWantReadError = ssl.SSLError
SSLWantWriteError = ssl.SSLError
SSLZeroReturnError = ssl.SSLError
except ImportError:
# support Python without ssl libraries
ssl_available = False
class SSLWantReadError(Exception):
pass
class SSLWantWriteError(Exception):
pass
# needed for SASL_GSSAPI authentication:
try:
import gssapi
from gssapi.raw.misc import GSSError
except ImportError:
#no gssapi available, will disable gssapi mechanism
gssapi = None
GSSError = None
AFI_NAMES = {
socket.AF_UNSPEC: "unspecified",
socket.AF_INET: "IPv4",
socket.AF_INET6: "IPv6",
}
class ConnectionStates(object):
DISCONNECTING = '<disconnecting>'
DISCONNECTED = '<disconnected>'
CONNECTING = '<connecting>'
HANDSHAKE = '<handshake>'
CONNECTED = '<connected>'
AUTHENTICATING = '<authenticating>'
class BrokerConnection(object):
"""Initialize a Kafka broker connection
Keyword Arguments:
client_id (str): a name for this client. This string is passed in
each request to servers and can be used to identify specific
server-side log entries that correspond to this client. Also
submitted to GroupCoordinator for logging with respect to
consumer group administration. Default: 'kafka-python-{version}'
reconnect_backoff_ms (int): The amount of time in milliseconds to
wait before attempting to reconnect to a given host.
Default: 50.
reconnect_backoff_max_ms (int): The maximum amount of time in
milliseconds to wait when reconnecting to a broker that has
repeatedly failed to connect. If provided, the backoff per host
will increase exponentially for each consecutive connection
failure, up to this maximum. To avoid connection storms, a
randomization factor of 0.2 will be applied to the backoff
resulting in a random range between 20% below and 20% above
the computed value. Default: 1000.
request_timeout_ms (int): Client request timeout in milliseconds.
Default: 30000.
max_in_flight_requests_per_connection (int): Requests are pipelined
to kafka brokers up to this number of maximum requests per
broker connection. Default: 5.
receive_buffer_bytes (int): The size of the TCP receive buffer
(SO_RCVBUF) to use when reading data. Default: None (relies on
system defaults). Java client defaults to 32768.
send_buffer_bytes (int): The size of the TCP send buffer
(SO_SNDBUF) to use when sending data. Default: None (relies on
system defaults). Java client defaults to 131072.
socket_options (list): List of tuple-arguments to socket.setsockopt
to apply to broker connection sockets. Default:
[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
security_protocol (str): Protocol used to communicate with brokers.
Valid values are: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL.
Default: PLAINTEXT.
ssl_context (ssl.SSLContext): pre-configured SSLContext for wrapping
socket connections. If provided, all other ssl_* configurations
will be ignored. Default: None.
ssl_check_hostname (bool): flag to configure whether ssl handshake
should verify that the certificate matches the brokers hostname.
default: True.
ssl_cafile (str): optional filename of ca file to use in certificate
verification. default: None.
ssl_certfile (str): optional filename of file in pem format containing
the client certificate, as well as any ca certificates needed to
establish the certificate's authenticity. default: None.
ssl_keyfile (str): optional filename containing the client private key.
default: None.
ssl_password (callable, str, bytes, bytearray): optional password or
callable function that returns a password, for decrypting the
client private key. Default: None.
ssl_crlfile (str): optional filename containing the CRL to check for
certificate expiration. By default, no CRL check is done. When
providing a file, only the leaf certificate will be checked against
this CRL. The CRL can only be checked with Python 3.4+ or 2.7.9+.
default: None.
ssl_ciphers (str): optionally set the available ciphers for ssl
connections. It should be a string in the OpenSSL cipher list
format. If no cipher can be selected (because compile-time options
or other configuration forbids use of all the specified ciphers),
an ssl.SSLError will be raised. See ssl.SSLContext.set_ciphers
api_version (tuple): Specify which Kafka API version to use.
Accepted values are: (0, 8, 0), (0, 8, 1), (0, 8, 2), (0, 9),
(0, 10). Default: (0, 8, 2)
api_version_auto_timeout_ms (int): number of milliseconds to throw a
timeout exception from the constructor when checking the broker
api version. Only applies if api_version is None
selector (selectors.BaseSelector): Provide a specific selector
implementation to use for I/O multiplexing.
Default: selectors.DefaultSelector
state_change_callback (callable): function to be called when the
connection state changes from CONNECTING to CONNECTED etc.
metrics (kafka.metrics.Metrics): Optionally provide a metrics
instance for capturing network IO stats. Default: None.
metric_group_prefix (str): Prefix for metric names. Default: ''
sasl_mechanism (str): Authentication mechanism when security_protocol
is configured for SASL_PLAINTEXT or SASL_SSL. Valid values are:
PLAIN, GSSAPI, OAUTHBEARER.
sasl_plain_username (str): username for sasl PLAIN authentication.
Required if sasl_mechanism is PLAIN.
sasl_plain_password (str): password for sasl PLAIN authentication.
Required if sasl_mechanism is PLAIN.
sasl_kerberos_service_name (str): Service name to include in GSSAPI
sasl mechanism handshake. Default: 'kafka'
sasl_kerberos_domain_name (str): kerberos domain name to use in GSSAPI
sasl mechanism handshake. Default: one of bootstrap servers
sasl_oauth_token_provider (AbstractTokenProvider): OAuthBearer token provider
instance. (See kafka.oauth.abstract). Default: None
"""
DEFAULT_CONFIG = {
'client_id': 'kafka-python-' + __version__,
'node_id': 0,
'request_timeout_ms': 30000,
'reconnect_backoff_ms': 50,
'reconnect_backoff_max_ms': 1000,
'max_in_flight_requests_per_connection': 5,
'receive_buffer_bytes': None,
'send_buffer_bytes': None,
'socket_options': [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)],
'sock_chunk_bytes': 4096, # undocumented experimental option
'sock_chunk_buffer_count': 1000, # undocumented experimental option
'security_protocol': 'PLAINTEXT',
'ssl_context': None,
'ssl_check_hostname': True,
'ssl_cafile': None,
'ssl_certfile': None,
'ssl_keyfile': None,
'ssl_crlfile': None,
'ssl_password': None,
'ssl_ciphers': None,
'api_version': (0, 8, 2), # default to most restrictive
'selector': selectors.DefaultSelector,
'state_change_callback': lambda node_id, sock, conn: True,
'metrics': None,
'metric_group_prefix': '',
'sasl_mechanism': None,
'sasl_plain_username': None,
'sasl_plain_password': None,
'sasl_kerberos_service_name': 'kafka',
'sasl_kerberos_domain_name': None,
'sasl_oauth_token_provider': None
}
SECURITY_PROTOCOLS = ('PLAINTEXT', 'SSL', 'SASL_PLAINTEXT', 'SASL_SSL')
SASL_MECHANISMS = ('PLAIN', 'GSSAPI', 'OAUTHBEARER')
def __init__(self, host, port, afi, **configs):
self.host = host
self.port = port
self.afi = afi
self._sock_afi = afi
self._sock_addr = None
self._api_versions = None
self.config = copy.copy(self.DEFAULT_CONFIG)
for key in self.config:
if key in configs:
self.config[key] = configs[key]
self.node_id = self.config.pop('node_id')
if self.config['receive_buffer_bytes'] is not None:
self.config['socket_options'].append(
(socket.SOL_SOCKET, socket.SO_RCVBUF,
self.config['receive_buffer_bytes']))
if self.config['send_buffer_bytes'] is not None:
self.config['socket_options'].append(
(socket.SOL_SOCKET, socket.SO_SNDBUF,
self.config['send_buffer_bytes']))
assert self.config['security_protocol'] in self.SECURITY_PROTOCOLS, (
'security_protcol must be in ' + ', '.join(self.SECURITY_PROTOCOLS))
if self.config['security_protocol'] in ('SSL', 'SASL_SSL'):
assert ssl_available, "Python wasn't built with SSL support"
if self.config['security_protocol'] in ('SASL_PLAINTEXT', 'SASL_SSL'):
assert self.config['sasl_mechanism'] in self.SASL_MECHANISMS, (
'sasl_mechanism must be in ' + ', '.join(self.SASL_MECHANISMS))
if self.config['sasl_mechanism'] == 'PLAIN':
assert self.config['sasl_plain_username'] is not None, 'sasl_plain_username required for PLAIN sasl'
assert self.config['sasl_plain_password'] is not None, 'sasl_plain_password required for PLAIN sasl'
if self.config['sasl_mechanism'] == 'GSSAPI':
assert gssapi is not None, 'GSSAPI lib not available'
assert self.config['sasl_kerberos_service_name'] is not None, 'sasl_kerberos_service_name required for GSSAPI sasl'
if self.config['sasl_mechanism'] == 'OAUTHBEARER':
token_provider = self.config['sasl_oauth_token_provider']
assert token_provider is not None, 'sasl_oauth_token_provider required for OAUTHBEARER sasl'
assert callable(getattr(token_provider, "token", None)), 'sasl_oauth_token_provider must implement method #token()'
# This is not a general lock / this class is not generally thread-safe yet
# However, to avoid pushing responsibility for maintaining
# per-connection locks to the upstream client, we will use this lock to
# make sure that access to the protocol buffer is synchronized
# when sends happen on multiple threads
self._lock = threading.Lock()
# the protocol parser instance manages actual tracking of the
# sequence of in-flight requests to responses, which should
# function like a FIFO queue. For additional request data,
# including tracking request futures and timestamps, we
# can use a simple dictionary of correlation_id => request data
self.in_flight_requests = dict()
self._protocol = KafkaProtocol(
client_id=self.config['client_id'],
api_version=self.config['api_version'])
self.state = ConnectionStates.DISCONNECTED
self._reset_reconnect_backoff()
self._sock = None
self._send_buffer = b''
self._ssl_context = None
if self.config['ssl_context'] is not None:
self._ssl_context = self.config['ssl_context']
self._sasl_auth_future = None
self.last_attempt = 0
self._gai = []
self._sensors = None
if self.config['metrics']:
self._sensors = BrokerConnectionMetrics(self.config['metrics'],
self.config['metric_group_prefix'],
self.node_id)
def _dns_lookup(self):
self._gai = dns_lookup(self.host, self.port, self.afi)
if not self._gai:
log.error('DNS lookup failed for %s:%i (%s)',
self.host, self.port, self.afi)
return False
return True
def _next_afi_sockaddr(self):
if not self._gai:
if not self._dns_lookup():
return
afi, _, __, ___, sockaddr = self._gai.pop(0)
return (afi, sockaddr)
def connect_blocking(self, timeout=float('inf')):
if self.connected():
return True
timeout += time.time()
# First attempt to perform dns lookup
# note that the underlying interface, socket.getaddrinfo,
# has no explicit timeout so we may exceed the user-specified timeout
self._dns_lookup()
# Loop once over all returned dns entries
selector = None
while self._gai:
while time.time() < timeout:
self.connect()
if self.connected():
if selector is not None:
selector.close()
return True
elif self.connecting():
if selector is None:
selector = self.config['selector']()
selector.register(self._sock, selectors.EVENT_WRITE)
selector.select(1)
elif self.disconnected():
if selector is not None:
selector.close()
selector = None
break
else:
break
return False
def connect(self):
"""Attempt to connect and return ConnectionState"""
if self.state is ConnectionStates.DISCONNECTED and not self.blacked_out():
self.last_attempt = time.time()
next_lookup = self._next_afi_sockaddr()
if not next_lookup:
self.close(Errors.KafkaConnectionError('DNS failure'))
return self.state
else:
log.debug('%s: creating new socket', self)
assert self._sock is None
self._sock_afi, self._sock_addr = next_lookup
self._sock = socket.socket(self._sock_afi, socket.SOCK_STREAM)
for option in self.config['socket_options']:
log.debug('%s: setting socket option %s', self, option)
self._sock.setsockopt(*option)
self._sock.setblocking(False)
self.state = ConnectionStates.CONNECTING
self.config['state_change_callback'](self.node_id, self._sock, self)
log.info('%s: connecting to %s:%d [%s %s]', self, self.host,
self.port, self._sock_addr, AFI_NAMES[self._sock_afi])
if self.state is ConnectionStates.CONNECTING:
# in non-blocking mode, use repeated calls to socket.connect_ex
# to check connection status
ret = None
try:
ret = self._sock.connect_ex(self._sock_addr)
except socket.error as err:
ret = err.errno
# Connection succeeded
if not ret or ret == errno.EISCONN:
log.debug('%s: established TCP connection', self)
if self.config['security_protocol'] in ('SSL', 'SASL_SSL'):
log.debug('%s: initiating SSL handshake', self)
self.state = ConnectionStates.HANDSHAKE
self.config['state_change_callback'](self.node_id, self._sock, self)
# _wrap_ssl can alter the connection state -- disconnects on failure
self._wrap_ssl()
elif self.config['security_protocol'] == 'SASL_PLAINTEXT':
log.debug('%s: initiating SASL authentication', self)
self.state = ConnectionStates.AUTHENTICATING
self.config['state_change_callback'](self.node_id, self._sock, self)
else:
# security_protocol PLAINTEXT
log.info('%s: Connection complete.', self)
self.state = ConnectionStates.CONNECTED
self._reset_reconnect_backoff()
self.config['state_change_callback'](self.node_id, self._sock, self)
# Connection failed
# WSAEINVAL == 10022, but errno.WSAEINVAL is not available on non-win systems
elif ret not in (errno.EINPROGRESS, errno.EALREADY, errno.EWOULDBLOCK, 10022):
log.error('Connect attempt to %s returned error %s.'
' Disconnecting.', self, ret)
errstr = errno.errorcode.get(ret, 'UNKNOWN')
self.close(Errors.KafkaConnectionError('{} {}'.format(ret, errstr)))
return self.state
# Needs retry
else:
pass
if self.state is ConnectionStates.HANDSHAKE:
if self._try_handshake():
log.debug('%s: completed SSL handshake.', self)
if self.config['security_protocol'] == 'SASL_SSL':
log.debug('%s: initiating SASL authentication', self)
self.state = ConnectionStates.AUTHENTICATING
else:
log.info('%s: Connection complete.', self)
self.state = ConnectionStates.CONNECTED
self._reset_reconnect_backoff()
self.config['state_change_callback'](self.node_id, self._sock, self)
if self.state is ConnectionStates.AUTHENTICATING:
assert self.config['security_protocol'] in ('SASL_PLAINTEXT', 'SASL_SSL')
if self._try_authenticate():
# _try_authenticate has side-effects: possibly disconnected on socket errors
if self.state is ConnectionStates.AUTHENTICATING:
log.info('%s: Connection complete.', self)
self.state = ConnectionStates.CONNECTED
self._reset_reconnect_backoff()
self.config['state_change_callback'](self.node_id, self._sock, self)
if self.state not in (ConnectionStates.CONNECTED,
ConnectionStates.DISCONNECTED):
# Connection timed out
request_timeout = self.config['request_timeout_ms'] / 1000.0
if time.time() > request_timeout + self.last_attempt:
log.error('Connection attempt to %s timed out', self)
self.close(Errors.KafkaConnectionError('timeout'))
return self.state
return self.state
def _wrap_ssl(self):
assert self.config['security_protocol'] in ('SSL', 'SASL_SSL')
if self._ssl_context is None:
log.debug('%s: configuring default SSL Context', self)
self._ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) # pylint: disable=no-member
self._ssl_context.options |= ssl.OP_NO_SSLv2 # pylint: disable=no-member
self._ssl_context.options |= ssl.OP_NO_SSLv3 # pylint: disable=no-member
self._ssl_context.verify_mode = ssl.CERT_OPTIONAL
if self.config['ssl_check_hostname']:
self._ssl_context.check_hostname = True
if self.config['ssl_cafile']:
log.info('%s: Loading SSL CA from %s', self, self.config['ssl_cafile'])
self._ssl_context.load_verify_locations(self.config['ssl_cafile'])
self._ssl_context.verify_mode = ssl.CERT_REQUIRED
else:
log.info('%s: Loading system default SSL CAs from %s', self, ssl.get_default_verify_paths())
self._ssl_context.load_default_certs()
if self.config['ssl_certfile'] and self.config['ssl_keyfile']:
log.info('%s: Loading SSL Cert from %s', self, self.config['ssl_certfile'])
log.info('%s: Loading SSL Key from %s', self, self.config['ssl_keyfile'])
self._ssl_context.load_cert_chain(
certfile=self.config['ssl_certfile'],
keyfile=self.config['ssl_keyfile'],
password=self.config['ssl_password'])
if self.config['ssl_crlfile']:
if not hasattr(ssl, 'VERIFY_CRL_CHECK_LEAF'):
raise RuntimeError('This version of Python does not support ssl_crlfile!')
log.info('%s: Loading SSL CRL from %s', self, self.config['ssl_crlfile'])
self._ssl_context.load_verify_locations(self.config['ssl_crlfile'])
# pylint: disable=no-member
self._ssl_context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
if self.config['ssl_ciphers']:
log.info('%s: Setting SSL Ciphers: %s', self, self.config['ssl_ciphers'])
self._ssl_context.set_ciphers(self.config['ssl_ciphers'])
log.debug('%s: wrapping socket in ssl context', self)
try:
self._sock = self._ssl_context.wrap_socket(
self._sock,
server_hostname=self.host,
do_handshake_on_connect=False)
except ssl.SSLError as e:
log.exception('%s: Failed to wrap socket in SSLContext!', self)
self.close(e)
def _try_handshake(self):
assert self.config['security_protocol'] in ('SSL', 'SASL_SSL')
try:
self._sock.do_handshake()
return True
# old ssl in python2.6 will swallow all SSLErrors here...
except (SSLWantReadError, SSLWantWriteError):
pass
except (SSLZeroReturnError, ConnectionError, TimeoutError, SSLEOFError):
log.warning('SSL connection closed by server during handshake.')
self.close(Errors.KafkaConnectionError('SSL connection closed by server during handshake'))
# Other SSLErrors will be raised to user
return False
def _try_authenticate(self):
assert self.config['api_version'] is None or self.config['api_version'] >= (0, 10)
if self._sasl_auth_future is None:
# Build a SaslHandShakeRequest message
request = SaslHandShakeRequest[0](self.config['sasl_mechanism'])
future = Future()
sasl_response = self._send(request)
sasl_response.add_callback(self._handle_sasl_handshake_response, future)
sasl_response.add_errback(lambda f, e: f.failure(e), future)
self._sasl_auth_future = future
for r, f in self.recv():
f.success(r)
# A connection error could trigger close() which will reset the future
if self._sasl_auth_future is None:
return False
elif self._sasl_auth_future.failed():
ex = self._sasl_auth_future.exception
if not isinstance(ex, Errors.KafkaConnectionError):
raise ex # pylint: disable-msg=raising-bad-type
return self._sasl_auth_future.succeeded()
def _handle_sasl_handshake_response(self, future, response):
error_type = Errors.for_code(response.error_code)
if error_type is not Errors.NoError:
error = error_type(self)
self.close(error=error)
return future.failure(error_type(self))
if self.config['sasl_mechanism'] not in response.enabled_mechanisms:
return future.failure(
Errors.UnsupportedSaslMechanismError(
'Kafka broker does not support %s sasl mechanism. Enabled mechanisms are: %s'
% (self.config['sasl_mechanism'], response.enabled_mechanisms)))
elif self.config['sasl_mechanism'] == 'PLAIN':
return self._try_authenticate_plain(future)
elif self.config['sasl_mechanism'] == 'GSSAPI':
return self._try_authenticate_gssapi(future)
elif self.config['sasl_mechanism'] == 'OAUTHBEARER':
return self._try_authenticate_oauth(future)
else:
return future.failure(
Errors.UnsupportedSaslMechanismError(
'kafka-python does not support SASL mechanism %s' %
self.config['sasl_mechanism']))
def _send_bytes(self, data):
"""Send some data via non-blocking IO
Note: this method is not synchronized internally; you should
always hold the _lock before calling
Returns: number of bytes
Raises: socket exception
"""
total_sent = 0
while total_sent < len(data):
try:
sent_bytes = self._sock.send(data[total_sent:])
total_sent += sent_bytes
except (SSLWantReadError, SSLWantWriteError):
break
except (ConnectionError, TimeoutError) as e:
if six.PY2 and e.errno == errno.EWOULDBLOCK:
break
raise
except BlockingIOError:
if six.PY3:
break
raise
return total_sent
def _send_bytes_blocking(self, data):
self._sock.settimeout(self.config['request_timeout_ms'] / 1000)
total_sent = 0
try:
while total_sent < len(data):
sent_bytes = self._sock.send(data[total_sent:])
total_sent += sent_bytes
if total_sent != len(data):
raise ConnectionError('Buffer overrun during socket send')
return total_sent
finally:
self._sock.settimeout(0.0)
def _recv_bytes_blocking(self, n):
self._sock.settimeout(self.config['request_timeout_ms'] / 1000)
try:
data = b''
while len(data) < n:
fragment = self._sock.recv(n - len(data))
if not fragment:
raise ConnectionError('Connection reset during recv')
data += fragment
return data
finally:
self._sock.settimeout(0.0)
def _try_authenticate_plain(self, future):
if self.config['security_protocol'] == 'SASL_PLAINTEXT':
log.warning('%s: Sending username and password in the clear', self)
data = b''
# Send PLAIN credentials per RFC-4616
msg = bytes('\0'.join([self.config['sasl_plain_username'],
self.config['sasl_plain_username'],
self.config['sasl_plain_password']]).encode('utf-8'))
size = Int32.encode(len(msg))
err = None
close = False
with self._lock:
if not self._can_send_recv():
err = Errors.NodeNotReadyError(str(self))
close = False
else:
try:
self._send_bytes_blocking(size + msg)
# The server will send a zero sized message (that is Int32(0)) on success.
# The connection is closed on failure
data = self._recv_bytes_blocking(4)
except (ConnectionError, TimeoutError) as e:
log.exception("%s: Error receiving reply from server", self)
err = Errors.KafkaConnectionError("%s: %s" % (self, e))
close = True
if err is not None:
if close:
self.close(error=err)
return future.failure(err)
if data != b'\x00\x00\x00\x00':
error = Errors.AuthenticationFailedError('Unrecognized response during authentication')
return future.failure(error)
log.info('%s: Authenticated as %s via PLAIN', self, self.config['sasl_plain_username'])
return future.success(True)
def _try_authenticate_gssapi(self, future):
kerberos_damin_name = self.config['sasl_kerberos_domain_name'] or self.host
auth_id = self.config['sasl_kerberos_service_name'] + '@' + kerberos_damin_name
gssapi_name = gssapi.Name(
auth_id,
name_type=gssapi.NameType.hostbased_service
).canonicalize(gssapi.MechType.kerberos)
log.debug('%s: GSSAPI name: %s', self, gssapi_name)
err = None
close = False
with self._lock:
if not self._can_send_recv():
err = Errors.NodeNotReadyError(str(self))
close = False
else:
# Establish security context and negotiate protection level
# For reference RFC 2222, section 7.2.1
try:
# Exchange tokens until authentication either succeeds or fails
client_ctx = gssapi.SecurityContext(name=gssapi_name, usage='initiate')
received_token = None
while not client_ctx.complete:
# calculate an output token from kafka token (or None if first iteration)
output_token = client_ctx.step(received_token)
# pass output token to kafka, or send empty response if the security
# context is complete (output token is None in that case)
if output_token is None:
self._send_bytes_blocking(Int32.encode(0))
else:
msg = output_token
size = Int32.encode(len(msg))
self._send_bytes_blocking(size + msg)
# The server will send a token back. Processing of this token either
# establishes a security context, or it needs further token exchange.
# The gssapi will be able to identify the needed next step.
# The connection is closed on failure.
header = self._recv_bytes_blocking(4)
(token_size,) = struct.unpack('>i', header)
received_token = self._recv_bytes_blocking(token_size)
# Process the security layer negotiation token, sent by the server
# once the security context is established.
# unwraps message containing supported protection levels and msg size
msg = client_ctx.unwrap(received_token).message
# Kafka currently doesn't support integrity or confidentiality security layers, so we
# simply set QoP to 'auth' only (first octet). We reuse the max message size proposed
# by the server
msg = Int8.encode(SASL_QOP_AUTH & Int8.decode(io.BytesIO(msg[0:1]))) + msg[1:]
# add authorization identity to the response, GSS-wrap and send it
msg = client_ctx.wrap(msg + auth_id.encode(), False).message
size = Int32.encode(len(msg))
self._send_bytes_blocking(size + msg)
except (ConnectionError, TimeoutError) as e:
log.exception("%s: Error receiving reply from server", self)
err = Errors.KafkaConnectionError("%s: %s" % (self, e))
close = True
except Exception as e:
err = e
close = True
if err is not None:
if close:
self.close(error=err)
return future.failure(err)
log.info('%s: Authenticated as %s via GSSAPI', self, gssapi_name)
return future.success(True)
def _try_authenticate_oauth(self, future):
data = b''
msg = bytes(self._build_oauth_client_request().encode("utf-8"))
size = Int32.encode(len(msg))
err = None
close = False
with self._lock:
if not self._can_send_recv():
err = Errors.NodeNotReadyError(str(self))
close = False
else:
try:
# Send SASL OAuthBearer request with OAuth token
self._send_bytes_blocking(size + msg)
# The server will send a zero sized message (that is Int32(0)) on success.
# The connection is closed on failure
data = self._recv_bytes_blocking(4)
except (ConnectionError, TimeoutError) as e:
log.exception("%s: Error receiving reply from server", self)
err = Errors.KafkaConnectionError("%s: %s" % (self, e))
close = True
if err is not None:
if close:
self.close(error=err)
return future.failure(err)
if data != b'\x00\x00\x00\x00':
error = Errors.AuthenticationFailedError('Unrecognized response during authentication')
return future.failure(error)
log.info('%s: Authenticated via OAuth', self)
return future.success(True)
def _build_oauth_client_request(self):
token_provider = self.config['sasl_oauth_token_provider']
return "n,,\x01auth=Bearer {}{}\x01\x01".format(token_provider.token(), self._token_extensions())
def _token_extensions(self):
"""
Return a string representation of the OPTIONAL key-value pairs that can be sent with an OAUTHBEARER
initial request.
"""
token_provider = self.config['sasl_oauth_token_provider']
# Only run if the #extensions() method is implemented by the clients Token Provider class
# Builds up a string separated by \x01 via a dict of key value pairs
if callable(getattr(token_provider, "extensions", None)) and len(token_provider.extensions()) > 0:
msg = "\x01".join(["{}={}".format(k, v) for k, v in token_provider.extensions().items()])
return "\x01" + msg
else:
return ""
def blacked_out(self):
"""
Return true if we are disconnected from the given node and can't
re-establish a connection yet
"""
if self.state is ConnectionStates.DISCONNECTED:
if time.time() < self.last_attempt + self._reconnect_backoff:
return True
return False
def connection_delay(self):
"""
Return the number of milliseconds to wait, based on the connection
state, before attempting to send data. When disconnected, this respects
the reconnect backoff time. When connecting or connected, returns a very
large number to handle slow/stalled connections.
"""
time_waited = time.time() - (self.last_attempt or 0)
if self.state is ConnectionStates.DISCONNECTED:
return max(self._reconnect_backoff - time_waited, 0) * 1000
else:
# When connecting or connected, we should be able to delay
# indefinitely since other events (connection or data acked) will
# cause a wakeup once data can be sent.
return float('inf')
def connected(self):
"""Return True iff socket is connected."""
return self.state is ConnectionStates.CONNECTED
def connecting(self):
"""Returns True if still connecting (this may encompass several
different states, such as SSL handshake, authorization, etc)."""
return self.state in (ConnectionStates.CONNECTING,
ConnectionStates.HANDSHAKE,
ConnectionStates.AUTHENTICATING)
def disconnected(self):
"""Return True iff socket is closed"""
return self.state is ConnectionStates.DISCONNECTED
def _reset_reconnect_backoff(self):
self._failures = 0
self._reconnect_backoff = self.config['reconnect_backoff_ms'] / 1000.0
def _update_reconnect_backoff(self):
# Do not mark as failure if there are more dns entries available to try
if len(self._gai) > 0:
return
if self.config['reconnect_backoff_max_ms'] > self.config['reconnect_backoff_ms']:
self._failures += 1
self._reconnect_backoff = self.config['reconnect_backoff_ms'] * 2 ** (self._failures - 1)
self._reconnect_backoff = min(self._reconnect_backoff, self.config['reconnect_backoff_max_ms'])
self._reconnect_backoff *= uniform(0.8, 1.2)
self._reconnect_backoff /= 1000.0
log.debug('%s: reconnect backoff %s after %s failures', self, self._reconnect_backoff, self._failures)
def _close_socket(self):
if hasattr(self, '_sock') and self._sock is not None:
self._sock.close()
self._sock = None
def __del__(self):
self._close_socket()
def close(self, error=None):
"""Close socket and fail all in-flight-requests.
Arguments:
error (Exception, optional): pending in-flight-requests
will be failed with this exception.
Default: kafka.errors.KafkaConnectionError.
"""
if self.state is ConnectionStates.DISCONNECTED:
return
with self._lock:
if self.state is ConnectionStates.DISCONNECTED:
return
log.info('%s: Closing connection. %s', self, error or '')
self._update_reconnect_backoff()
self._sasl_auth_future = None
self._protocol = KafkaProtocol(
client_id=self.config['client_id'],
api_version=self.config['api_version'])
self._send_buffer = b''
if error is None:
error = Errors.Cancelled(str(self))
ifrs = list(self.in_flight_requests.items())
self.in_flight_requests.clear()
self.state = ConnectionStates.DISCONNECTED
# To avoid race conditions and/or deadlocks
# keep a reference to the socket but leave it
# open until after the state_change_callback
# This should give clients a change to deregister
# the socket fd from selectors cleanly.
sock = self._sock
self._sock = None
# drop lock before state change callback and processing futures
self.config['state_change_callback'](self.node_id, sock, self)
sock.close()
for (_correlation_id, (future, _timestamp)) in ifrs:
future.failure(error)
def _can_send_recv(self):
"""Return True iff socket is ready for requests / responses"""
return self.state in (ConnectionStates.AUTHENTICATING,
ConnectionStates.CONNECTED)
def send(self, request, blocking=True):
"""Queue request for async network send, return Future()"""
future = Future()
if self.connecting():
return future.failure(Errors.NodeNotReadyError(str(self)))
elif not self.connected():
return future.failure(Errors.KafkaConnectionError(str(self)))
elif not self.can_send_more():
return future.failure(Errors.TooManyInFlightRequests(str(self)))
return self._send(request, blocking=blocking)
def _send(self, request, blocking=True):
future = Future()
with self._lock:
if not self._can_send_recv():
# In this case, since we created the future above,
# we know there are no callbacks/errbacks that could fire w/
# lock. So failing + returning inline should be safe
return future.failure(Errors.NodeNotReadyError(str(self)))
correlation_id = self._protocol.send_request(request)
log.debug('%s Request %d: %s', self, correlation_id, request)
if request.expect_response():
sent_time = time.time()
assert correlation_id not in self.in_flight_requests, 'Correlation ID already in-flight!'
self.in_flight_requests[correlation_id] = (future, sent_time)
else:
future.success(None)
# Attempt to replicate behavior from prior to introduction of
# send_pending_requests() / async sends
if blocking:
self.send_pending_requests()
return future
def send_pending_requests(self):
"""Attempts to send pending requests messages via blocking IO
If all requests have been sent, return True
Otherwise, if the socket is blocked and there are more bytes to send,
return False.
"""
try:
with self._lock:
if not self._can_send_recv():
return False
data = self._protocol.send_bytes()
total_bytes = self._send_bytes_blocking(data)
if self._sensors:
self._sensors.bytes_sent.record(total_bytes)
return True
except (ConnectionError, TimeoutError) as e:
log.exception("Error sending request data to %s", self)
error = Errors.KafkaConnectionError("%s: %s" % (self, e))
self.close(error=error)
return False
def send_pending_requests_v2(self):
"""Attempts to send pending requests messages via non-blocking IO
If all requests have been sent, return True
Otherwise, if the socket is blocked and there are more bytes to send,
return False.
"""
try:
with self._lock:
if not self._can_send_recv():
return False
# _protocol.send_bytes returns encoded requests to send
# we send them via _send_bytes()
# and hold leftover bytes in _send_buffer
if not self._send_buffer:
self._send_buffer = self._protocol.send_bytes()
total_bytes = 0
if self._send_buffer:
total_bytes = self._send_bytes(self._send_buffer)
self._send_buffer = self._send_buffer[total_bytes:]
if self._sensors:
self._sensors.bytes_sent.record(total_bytes)
# Return True iff send buffer is empty
return len(self._send_buffer) == 0
except (ConnectionError, TimeoutError, Exception) as e:
log.exception("Error sending request data to %s", self)
error = Errors.KafkaConnectionError("%s: %s" % (self, e))
self.close(error=error)
return False
def can_send_more(self):
"""Return True unless there are max_in_flight_requests_per_connection."""
max_ifrs = self.config['max_in_flight_requests_per_connection']
return len(self.in_flight_requests) < max_ifrs
def recv(self):
"""Non-blocking network receive.
Return list of (response, future) tuples
"""
responses = self._recv()
if not responses and self.requests_timed_out():
log.warning('%s timed out after %s ms. Closing connection.',
self, self.config['request_timeout_ms'])
self.close(error=Errors.RequestTimedOutError(
'Request timed out after %s ms' %
self.config['request_timeout_ms']))
return ()
# augment responses w/ correlation_id, future, and timestamp
for i, (correlation_id, response) in enumerate(responses):
try:
with self._lock:
(future, timestamp) = self.in_flight_requests.pop(correlation_id)
except KeyError:
self.close(Errors.KafkaConnectionError('Received unrecognized correlation id'))
return ()
latency_ms = (time.time() - timestamp) * 1000
if self._sensors:
self._sensors.request_time.record(latency_ms)
log.debug('%s Response %d (%s ms): %s', self, correlation_id, latency_ms, response)
responses[i] = (response, future)
return responses
def _recv(self):
"""Take all available bytes from socket, return list of any responses from parser"""
recvd = []
err = None
with self._lock:
if not self._can_send_recv():
log.warning('%s cannot recv: socket not connected', self)
return ()
while len(recvd) < self.config['sock_chunk_buffer_count']:
try:
data = self._sock.recv(self.config['sock_chunk_bytes'])
# We expect socket.recv to raise an exception if there are no
# bytes available to read from the socket in non-blocking mode.
# but if the socket is disconnected, we will get empty data
# without an exception raised
if not data:
log.error('%s: socket disconnected', self)
err = Errors.KafkaConnectionError('socket disconnected')
break
else:
recvd.append(data)
except (SSLWantReadError, SSLWantWriteError):
break
except (ConnectionError, TimeoutError) as e:
if six.PY2 and e.errno == errno.EWOULDBLOCK:
break
log.exception('%s: Error receiving network data'
' closing socket', self)
err = Errors.KafkaConnectionError(e)
break
except BlockingIOError:
if six.PY3:
break
# For PY2 this is a catchall and should be re-raised
raise
# Only process bytes if there was no connection exception
if err is None:
recvd_data = b''.join(recvd)
if self._sensors:
self._sensors.bytes_received.record(len(recvd_data))
# We need to keep the lock through protocol receipt
# so that we ensure that the processed byte order is the
# same as the received byte order
try:
return self._protocol.receive_bytes(recvd_data)
except Errors.KafkaProtocolError as e:
err = e
self.close(error=err)
return ()
def requests_timed_out(self):
with self._lock:
if self.in_flight_requests:
get_timestamp = lambda v: v[1]
oldest_at = min(map(get_timestamp,
self.in_flight_requests.values()))
timeout = self.config['request_timeout_ms'] / 1000.0
if time.time() >= oldest_at + timeout:
return True
return False
def _handle_api_version_response(self, response):
error_type = Errors.for_code(response.error_code)
assert error_type is Errors.NoError, "API version check failed"
self._api_versions = dict([
(api_key, (min_version, max_version))
for api_key, min_version, max_version in response.api_versions
])
return self._api_versions
def get_api_versions(self):
if self._api_versions is not None:
return self._api_versions
version = self.check_version()
if version < (0, 10, 0):
raise Errors.UnsupportedVersionError(
"ApiVersion not supported by cluster version {} < 0.10.0"
.format(version))
# _api_versions is set as a side effect of check_versions() on a cluster
# that supports 0.10.0 or later
return self._api_versions
def _infer_broker_version_from_api_versions(self, api_versions):
# The logic here is to check the list of supported request versions
# in reverse order. As soon as we find one that works, return it
test_cases = [
# format (<broker version>, <needed struct>)
((1, 0, 0), MetadataRequest[5]),
((0, 11, 0), MetadataRequest[4]),
((0, 10, 2), OffsetFetchRequest[2]),
((0, 10, 1), MetadataRequest[2]),
]
# Get the best match of test cases
for broker_version, struct in sorted(test_cases, reverse=True):
if struct.API_KEY not in api_versions:
continue
min_version, max_version = api_versions[struct.API_KEY]
if min_version <= struct.API_VERSION <= max_version:
return broker_version
# We know that ApiVersionResponse is only supported in 0.10+
# so if all else fails, choose that
return (0, 10, 0)
def check_version(self, timeout=2, strict=False, topics=[]):
"""Attempt to guess the broker version.
Note: This is a blocking call.
Returns: version tuple, i.e. (0, 10), (0, 9), (0, 8, 2), ...
"""
timeout_at = time.time() + timeout
log.info('Probing node %s broker version', self.node_id)
# Monkeypatch some connection configurations to avoid timeouts
override_config = {
'request_timeout_ms': timeout * 1000,
'max_in_flight_requests_per_connection': 5
}
stashed = {}
for key in override_config:
stashed[key] = self.config[key]
self.config[key] = override_config[key]
# kafka kills the connection when it doesn't recognize an API request
# so we can send a test request and then follow immediately with a
# vanilla MetadataRequest. If the server did not recognize the first
# request, both will be failed with a ConnectionError that wraps
# socket.error (32, 54, or 104)
from kafka.protocol.admin import ApiVersionRequest, ListGroupsRequest
from kafka.protocol.commit import OffsetFetchRequest, GroupCoordinatorRequest
test_cases = [
# All cases starting from 0.10 will be based on ApiVersionResponse
((0, 10), ApiVersionRequest[0]()),
((0, 9), ListGroupsRequest[0]()),
((0, 8, 2), GroupCoordinatorRequest[0]('kafka-python-default-group')),
((0, 8, 1), OffsetFetchRequest[0]('kafka-python-default-group', [])),
((0, 8, 0), MetadataRequest[0](topics)),
]
for version, request in test_cases:
if not self.connect_blocking(timeout_at - time.time()):
raise Errors.NodeNotReadyError()
f = self.send(request)
# HACK: sleeping to wait for socket to send bytes
time.sleep(0.1)
# when broker receives an unrecognized request API
# it abruptly closes our socket.
# so we attempt to send a second request immediately
# that we believe it will definitely recognize (metadata)
# the attempt to write to a disconnected socket should
# immediately fail and allow us to infer that the prior
# request was unrecognized
mr = self.send(MetadataRequest[0](topics))
selector = self.config['selector']()
selector.register(self._sock, selectors.EVENT_READ)
while not (f.is_done and mr.is_done):
selector.select(1)
for response, future in self.recv():
future.success(response)
selector.close()
if f.succeeded():
if isinstance(request, ApiVersionRequest[0]):
# Starting from 0.10 kafka broker we determine version
# by looking at ApiVersionResponse
api_versions = self._handle_api_version_response(f.value)
version = self._infer_broker_version_from_api_versions(api_versions)
log.info('Broker version identifed as %s', '.'.join(map(str, version)))
log.info('Set configuration api_version=%s to skip auto'
' check_version requests on startup', version)
break
# Only enable strict checking to verify that we understand failure
# modes. For most users, the fact that the request failed should be
# enough to rule out a particular broker version.
if strict:
# If the socket flush hack did not work (which should force the
# connection to close and fail all pending requests), then we
# get a basic Request Timeout. This is not ideal, but we'll deal
if isinstance(f.exception, Errors.RequestTimedOutError):
pass
# 0.9 brokers do not close the socket on unrecognized api
# requests (bug...). In this case we expect to see a correlation
# id mismatch
elif (isinstance(f.exception, Errors.CorrelationIdError) and
version == (0, 10)):
pass
elif six.PY2:
assert isinstance(f.exception.args[0], socket.error)
assert f.exception.args[0].errno in (32, 54, 104)
else:
assert isinstance(f.exception.args[0], ConnectionError)
log.info("Broker is not v%s -- it did not recognize %s",
version, request.__class__.__name__)
else:
raise Errors.UnrecognizedBrokerVersion()
for key in stashed:
self.config[key] = stashed[key]
return version
def __str__(self):
return "<BrokerConnection node_id=%s host=%s:%d %s [%s %s]>" % (
self.node_id, self.host, self.port, self.state,
AFI_NAMES[self._sock_afi], self._sock_addr)
class BrokerConnectionMetrics(object):
def __init__(self, metrics, metric_group_prefix, node_id):
self.metrics = metrics
# Any broker may have registered summary metrics already
# but if not, we need to create them so we can set as parents below
all_conns_transferred = metrics.get_sensor('bytes-sent-received')
if not all_conns_transferred:
metric_group_name = metric_group_prefix + '-metrics'
bytes_transferred = metrics.sensor('bytes-sent-received')
bytes_transferred.add(metrics.metric_name(
'network-io-rate', metric_group_name,
'The average number of network operations (reads or writes) on all'
' connections per second.'), Rate(sampled_stat=Count()))
bytes_sent = metrics.sensor('bytes-sent',
parents=[bytes_transferred])
bytes_sent.add(metrics.metric_name(
'outgoing-byte-rate', metric_group_name,
'The average number of outgoing bytes sent per second to all'
' servers.'), Rate())
bytes_sent.add(metrics.metric_name(
'request-rate', metric_group_name,
'The average number of requests sent per second.'),
Rate(sampled_stat=Count()))
bytes_sent.add(metrics.metric_name(
'request-size-avg', metric_group_name,
'The average size of all requests in the window.'), Avg())
bytes_sent.add(metrics.metric_name(
'request-size-max', metric_group_name,
'The maximum size of any request sent in the window.'), Max())
bytes_received = metrics.sensor('bytes-received',
parents=[bytes_transferred])
bytes_received.add(metrics.metric_name(
'incoming-byte-rate', metric_group_name,
'Bytes/second read off all sockets'), Rate())
bytes_received.add(metrics.metric_name(
'response-rate', metric_group_name,
'Responses received sent per second.'),
Rate(sampled_stat=Count()))
request_latency = metrics.sensor('request-latency')
request_latency.add(metrics.metric_name(
'request-latency-avg', metric_group_name,
'The average request latency in ms.'),
Avg())
request_latency.add(metrics.metric_name(
'request-latency-max', metric_group_name,
'The maximum request latency in ms.'),
Max())
# if one sensor of the metrics has been registered for the connection,
# then all other sensors should have been registered; and vice versa
node_str = 'node-{0}'.format(node_id)
node_sensor = metrics.get_sensor(node_str + '.bytes-sent')
if not node_sensor:
metric_group_name = metric_group_prefix + '-node-metrics.' + node_str
bytes_sent = metrics.sensor(
node_str + '.bytes-sent',
parents=[metrics.get_sensor('bytes-sent')])
bytes_sent.add(metrics.metric_name(
'outgoing-byte-rate', metric_group_name,
'The average number of outgoing bytes sent per second.'),
Rate())
bytes_sent.add(metrics.metric_name(
'request-rate', metric_group_name,
'The average number of requests sent per second.'),
Rate(sampled_stat=Count()))
bytes_sent.add(metrics.metric_name(
'request-size-avg', metric_group_name,
'The average size of all requests in the window.'),
Avg())
bytes_sent.add(metrics.metric_name(
'request-size-max', metric_group_name,
'The maximum size of any request sent in the window.'),
Max())
bytes_received = metrics.sensor(
node_str + '.bytes-received',
parents=[metrics.get_sensor('bytes-received')])
bytes_received.add(metrics.metric_name(
'incoming-byte-rate', metric_group_name,
'Bytes/second read off node-connection socket'),
Rate())
bytes_received.add(metrics.metric_name(
'response-rate', metric_group_name,
'The average number of responses received per second.'),
Rate(sampled_stat=Count()))
request_time = metrics.sensor(
node_str + '.latency',
parents=[metrics.get_sensor('request-latency')])
request_time.add(metrics.metric_name(
'request-latency-avg', metric_group_name,
'The average request latency in ms.'),
Avg())
request_time.add(metrics.metric_name(
'request-latency-max', metric_group_name,
'The maximum request latency in ms.'),
Max())
self.bytes_sent = metrics.sensor(node_str + '.bytes-sent')
self.bytes_received = metrics.sensor(node_str + '.bytes-received')
self.request_time = metrics.sensor(node_str + '.latency')
def _address_family(address):
"""
Attempt to determine the family of an address (or hostname)
:return: either socket.AF_INET or socket.AF_INET6 or socket.AF_UNSPEC if the address family
could not be determined
"""
if address.startswith('[') and address.endswith(']'):
return socket.AF_INET6
for af in (socket.AF_INET, socket.AF_INET6):
try:
socket.inet_pton(af, address)
return af
except (ValueError, AttributeError, socket.error):
continue
return socket.AF_UNSPEC
def get_ip_port_afi(host_and_port_str):
"""
Parse the IP and port from a string in the format of:
* host_or_ip <- Can be either IPv4 address literal or hostname/fqdn
* host_or_ipv4:port <- Can be either IPv4 address literal or hostname/fqdn
* [host_or_ip] <- IPv6 address literal
* [host_or_ip]:port. <- IPv6 address literal
.. note:: IPv6 address literals with ports *must* be enclosed in brackets
.. note:: If the port is not specified, default will be returned.
:return: tuple (host, port, afi), afi will be socket.AF_INET or socket.AF_INET6 or socket.AF_UNSPEC
"""
host_and_port_str = host_and_port_str.strip()
if host_and_port_str.startswith('['):
af = socket.AF_INET6
host, rest = host_and_port_str[1:].split(']')
if rest:
port = int(rest[1:])
else:
port = DEFAULT_KAFKA_PORT
return host, port, af
else:
if ':' not in host_and_port_str:
af = _address_family(host_and_port_str)
return host_and_port_str, DEFAULT_KAFKA_PORT, af
else:
# now we have something with a colon in it and no square brackets. It could be
# either an IPv6 address literal (e.g., "::1") or an IP:port pair or a host:port pair
try:
# if it decodes as an IPv6 address, use that
socket.inet_pton(socket.AF_INET6, host_and_port_str)
return host_and_port_str, DEFAULT_KAFKA_PORT, socket.AF_INET6
except AttributeError:
log.warning('socket.inet_pton not available on this platform.'
' consider `pip install win_inet_pton`')
pass
except (ValueError, socket.error):
# it's a host:port pair
pass
host, port = host_and_port_str.rsplit(':', 1)
port = int(port)
af = _address_family(host)
return host, port, af
def collect_hosts(hosts, randomize=True):
"""
Collects a comma-separated set of hosts (host:port) and optionally
randomize the returned list.
"""
if isinstance(hosts, six.string_types):
hosts = hosts.strip().split(',')
result = []
afi = socket.AF_INET
for host_port in hosts:
host, port, afi = get_ip_port_afi(host_port)
if port < 0:
port = DEFAULT_KAFKA_PORT
result.append((host, port, afi))
if randomize:
shuffle(result)
return result
def is_inet_4_or_6(gai):
"""Given a getaddrinfo struct, return True iff ipv4 or ipv6"""
return gai[0] in (socket.AF_INET, socket.AF_INET6)
def dns_lookup(host, port, afi=socket.AF_UNSPEC):
"""Returns a list of getaddrinfo structs, optionally filtered to an afi (ipv4 / ipv6)"""
# XXX: all DNS functions in Python are blocking. If we really
# want to be non-blocking here, we need to use a 3rd-party
# library like python-adns, or move resolution onto its
# own thread. This will be subject to the default libc
# name resolution timeout (5s on most Linux boxes)
try:
return list(filter(is_inet_4_or_6,
socket.getaddrinfo(host, port, afi,
socket.SOCK_STREAM)))
except socket.gaierror as ex:
log.warning('DNS lookup failed for %s:%d,'
' exception was %s. Is your'
' advertised.listeners (called'
' advertised.host.name before Kafka 9)'
' correct and resolvable?',
host, port, ex)
return []
| {
"repo_name": "Yelp/kafka-python",
"path": "kafka/conn.py",
"copies": "1",
"size": "65180",
"license": "apache-2.0",
"hash": 7639884263143005000,
"line_mean": 43.4914675768,
"line_max": 131,
"alpha_frac": 0.5793648358,
"autogenerated": false,
"ratio": 4.388634527336386,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5467999363136385,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
import collections
import copy
import functools
import logging
import time
from kafka.vendor import six
from kafka.coordinator.base import BaseCoordinator, Generation
from kafka.coordinator.assignors.range import RangePartitionAssignor
from kafka.coordinator.assignors.roundrobin import RoundRobinPartitionAssignor
from kafka.coordinator.protocol import ConsumerProtocol
import kafka.errors as Errors
from kafka.future import Future
from kafka.metrics import AnonMeasurable
from kafka.metrics.stats import Avg, Count, Max, Rate
from kafka.protocol.commit import OffsetCommitRequest, OffsetFetchRequest
from kafka.structs import OffsetAndMetadata, TopicPartition
from kafka.util import WeakMethod
log = logging.getLogger(__name__)
class ConsumerCoordinator(BaseCoordinator):
"""This class manages the coordination process with the consumer coordinator."""
DEFAULT_CONFIG = {
'group_id': 'kafka-python-default-group',
'enable_auto_commit': True,
'auto_commit_interval_ms': 5000,
'default_offset_commit_callback': None,
'assignors': (RangePartitionAssignor, RoundRobinPartitionAssignor),
'session_timeout_ms': 10000,
'heartbeat_interval_ms': 3000,
'max_poll_interval_ms': 300000,
'retry_backoff_ms': 100,
'api_version': (0, 10, 1),
'exclude_internal_topics': True,
'metric_group_prefix': 'consumer'
}
def __init__(self, client, subscription, metrics, **configs):
"""Initialize the coordination manager.
Keyword Arguments:
group_id (str): name of the consumer group to join for dynamic
partition assignment (if enabled), and to use for fetching and
committing offsets. Default: 'kafka-python-default-group'
enable_auto_commit (bool): If true the consumer's offset will be
periodically committed in the background. Default: True.
auto_commit_interval_ms (int): milliseconds between automatic
offset commits, if enable_auto_commit is True. Default: 5000.
default_offset_commit_callback (callable): called as
callback(offsets, exception) response will be either an Exception
or None. This callback can be used to trigger custom actions when
a commit request completes.
assignors (list): List of objects to use to distribute partition
ownership amongst consumer instances when group management is
used. Default: [RangePartitionAssignor, RoundRobinPartitionAssignor]
heartbeat_interval_ms (int): The expected time in milliseconds
between heartbeats to the consumer coordinator when using
Kafka's group management feature. Heartbeats are used to ensure
that the consumer's session stays active and to facilitate
rebalancing when new consumers join or leave the group. The
value must be set lower than session_timeout_ms, but typically
should be set no higher than 1/3 of that value. It can be
adjusted even lower to control the expected time for normal
rebalances. Default: 3000
session_timeout_ms (int): The timeout used to detect failures when
using Kafka's group managementment facilities. Default: 30000
retry_backoff_ms (int): Milliseconds to backoff when retrying on
errors. Default: 100.
exclude_internal_topics (bool): Whether records from internal topics
(such as offsets) should be exposed to the consumer. If set to
True the only way to receive records from an internal topic is
subscribing to it. Requires 0.10+. Default: True
"""
super(ConsumerCoordinator, self).__init__(client, metrics, **configs)
self.config = copy.copy(self.DEFAULT_CONFIG)
for key in self.config:
if key in configs:
self.config[key] = configs[key]
self._subscription = subscription
self._is_leader = False
self._joined_subscription = set()
self._metadata_snapshot = self._build_metadata_snapshot(subscription, client.cluster)
self._assignment_snapshot = None
self._cluster = client.cluster
self.auto_commit_interval = self.config['auto_commit_interval_ms'] / 1000
self.next_auto_commit_deadline = None
self.completed_offset_commits = collections.deque()
if self.config['default_offset_commit_callback'] is None:
self.config['default_offset_commit_callback'] = self._default_offset_commit_callback
if self.config['group_id'] is not None:
if self.config['api_version'] >= (0, 9):
if not self.config['assignors']:
raise Errors.KafkaConfigurationError('Coordinator requires assignors')
if self.config['api_version'] < (0, 10, 1):
if self.config['max_poll_interval_ms'] != self.config['session_timeout_ms']:
raise Errors.KafkaConfigurationError("Broker version %s does not support "
"different values for max_poll_interval_ms "
"and session_timeout_ms")
if self.config['enable_auto_commit']:
if self.config['api_version'] < (0, 8, 1):
log.warning('Broker version (%s) does not support offset'
' commits; disabling auto-commit.',
self.config['api_version'])
self.config['enable_auto_commit'] = False
elif self.config['group_id'] is None:
log.warning('group_id is None: disabling auto-commit.')
self.config['enable_auto_commit'] = False
else:
self.next_auto_commit_deadline = time.time() + self.auto_commit_interval
self.consumer_sensors = ConsumerCoordinatorMetrics(
metrics, self.config['metric_group_prefix'], self._subscription)
self._cluster.request_update()
self._cluster.add_listener(WeakMethod(self._handle_metadata_update))
def __del__(self):
if hasattr(self, '_cluster') and self._cluster:
self._cluster.remove_listener(WeakMethod(self._handle_metadata_update))
super(ConsumerCoordinator, self).__del__()
def protocol_type(self):
return ConsumerProtocol.PROTOCOL_TYPE
def group_protocols(self):
"""Returns list of preferred (protocols, metadata)"""
if self._subscription.subscription is None:
raise Errors.IllegalStateError('Consumer has not subscribed to topics')
# dpkp note: I really dislike this.
# why? because we are using this strange method group_protocols,
# which is seemingly innocuous, to set internal state (_joined_subscription)
# that is later used to check whether metadata has changed since we joined a group
# but there is no guarantee that this method, group_protocols, will get called
# in the correct sequence or that it will only be called when we want it to be.
# So this really should be moved elsewhere, but I don't have the energy to
# work that out right now. If you read this at some later date after the mutable
# state has bitten you... I'm sorry! It mimics the java client, and that's the
# best I've got for now.
self._joined_subscription = set(self._subscription.subscription)
metadata_list = []
for assignor in self.config['assignors']:
metadata = assignor.metadata(self._joined_subscription)
group_protocol = (assignor.name, metadata)
metadata_list.append(group_protocol)
return metadata_list
def _handle_metadata_update(self, cluster):
# if we encounter any unauthorized topics, raise an exception
if cluster.unauthorized_topics:
raise Errors.TopicAuthorizationFailedError(cluster.unauthorized_topics)
if self._subscription.subscribed_pattern:
topics = []
for topic in cluster.topics(self.config['exclude_internal_topics']):
if self._subscription.subscribed_pattern.match(topic):
topics.append(topic)
if set(topics) != self._subscription.subscription:
self._subscription.change_subscription(topics)
self._client.set_topics(self._subscription.group_subscription())
# check if there are any changes to the metadata which should trigger
# a rebalance
if self._subscription.partitions_auto_assigned():
metadata_snapshot = self._build_metadata_snapshot(self._subscription, cluster)
if self._metadata_snapshot != metadata_snapshot:
self._metadata_snapshot = metadata_snapshot
# If we haven't got group coordinator support,
# just assign all partitions locally
if self._auto_assign_all_partitions():
self._subscription.assign_from_subscribed([
TopicPartition(topic, partition)
for topic in self._subscription.subscription
for partition in self._metadata_snapshot[topic]
])
def _auto_assign_all_partitions(self):
# For users that use "subscribe" without group support,
# we will simply assign all partitions to this consumer
if self.config['api_version'] < (0, 9):
return True
elif self.config['group_id'] is None:
return True
else:
return False
def _build_metadata_snapshot(self, subscription, cluster):
metadata_snapshot = {}
for topic in subscription.group_subscription():
partitions = cluster.partitions_for_topic(topic) or []
metadata_snapshot[topic] = set(partitions)
return metadata_snapshot
def _lookup_assignor(self, name):
for assignor in self.config['assignors']:
if assignor.name == name:
return assignor
return None
def _on_join_complete(self, generation, member_id, protocol,
member_assignment_bytes):
# only the leader is responsible for monitoring for metadata changes
# (i.e. partition changes)
if not self._is_leader:
self._assignment_snapshot = None
assignor = self._lookup_assignor(protocol)
assert assignor, 'Coordinator selected invalid assignment protocol: %s' % (protocol,)
assignment = ConsumerProtocol.ASSIGNMENT.decode(member_assignment_bytes)
# set the flag to refresh last committed offsets
self._subscription.needs_fetch_committed_offsets = True
# update partition assignment
self._subscription.assign_from_subscribed(assignment.partitions())
# give the assignor a chance to update internal state
# based on the received assignment
assignor.on_assignment(assignment)
# reschedule the auto commit starting from now
self.next_auto_commit_deadline = time.time() + self.auto_commit_interval
assigned = set(self._subscription.assigned_partitions())
log.info("Setting newly assigned partitions %s for group %s",
assigned, self.group_id)
# execute the user's callback after rebalance
if self._subscription.listener:
try:
self._subscription.listener.on_partitions_assigned(assigned)
except Exception:
log.exception("User provided listener %s for group %s"
" failed on partition assignment: %s",
self._subscription.listener, self.group_id,
assigned)
def poll(self):
"""
Poll for coordinator events. Only applicable if group_id is set, and
broker version supports GroupCoordinators. This ensures that the
coordinator is known, and if using automatic partition assignment,
ensures that the consumer has joined the group. This also handles
periodic offset commits if they are enabled.
"""
if self.group_id is None:
return
self._invoke_completed_offset_commit_callbacks()
self.ensure_coordinator_ready()
if self.config['api_version'] >= (0, 9) and self._subscription.partitions_auto_assigned():
if self.need_rejoin():
# due to a race condition between the initial metadata fetch and the
# initial rebalance, we need to ensure that the metadata is fresh
# before joining initially, and then request the metadata update. If
# metadata update arrives while the rebalance is still pending (for
# example, when the join group is still inflight), then we will lose
# track of the fact that we need to rebalance again to reflect the
# change to the topic subscription. Without ensuring that the
# metadata is fresh, any metadata update that changes the topic
# subscriptions and arrives while a rebalance is in progress will
# essentially be ignored. See KAFKA-3949 for the complete
# description of the problem.
if self._subscription.subscribed_pattern:
metadata_update = self._client.cluster.request_update()
self._client.poll(future=metadata_update)
self.ensure_active_group()
self.poll_heartbeat()
self._maybe_auto_commit_offsets_async()
def time_to_next_poll(self):
"""Return seconds (float) remaining until :meth:`.poll` should be called again"""
if not self.config['enable_auto_commit']:
return self.time_to_next_heartbeat()
if time.time() > self.next_auto_commit_deadline:
return 0
return min(self.next_auto_commit_deadline - time.time(),
self.time_to_next_heartbeat())
def _perform_assignment(self, leader_id, assignment_strategy, members):
assignor = self._lookup_assignor(assignment_strategy)
assert assignor, 'Invalid assignment protocol: %s' % (assignment_strategy,)
member_metadata = {}
all_subscribed_topics = set()
for member_id, metadata_bytes in members:
metadata = ConsumerProtocol.METADATA.decode(metadata_bytes)
member_metadata[member_id] = metadata
all_subscribed_topics.update(metadata.subscription) # pylint: disable-msg=no-member
# the leader will begin watching for changes to any of the topics
# the group is interested in, which ensures that all metadata changes
# will eventually be seen
# Because assignment typically happens within response callbacks,
# we cannot block on metadata updates here (no recursion into poll())
self._subscription.group_subscribe(all_subscribed_topics)
self._client.set_topics(self._subscription.group_subscription())
# keep track of the metadata used for assignment so that we can check
# after rebalance completion whether anything has changed
self._cluster.request_update()
self._is_leader = True
self._assignment_snapshot = self._metadata_snapshot
log.debug("Performing assignment for group %s using strategy %s"
" with subscriptions %s", self.group_id, assignor.name,
member_metadata)
assignments = assignor.assign(self._cluster, member_metadata)
log.debug("Finished assignment for group %s: %s", self.group_id, assignments)
group_assignment = {}
for member_id, assignment in six.iteritems(assignments):
group_assignment[member_id] = assignment
return group_assignment
def _on_join_prepare(self, generation, member_id):
# commit offsets prior to rebalance if auto-commit enabled
self._maybe_auto_commit_offsets_sync()
# execute the user's callback before rebalance
log.info("Revoking previously assigned partitions %s for group %s",
self._subscription.assigned_partitions(), self.group_id)
if self._subscription.listener:
try:
revoked = set(self._subscription.assigned_partitions())
self._subscription.listener.on_partitions_revoked(revoked)
except Exception:
log.exception("User provided subscription listener %s"
" for group %s failed on_partitions_revoked",
self._subscription.listener, self.group_id)
self._is_leader = False
self._subscription.reset_group_subscription()
def need_rejoin(self):
"""Check whether the group should be rejoined
Returns:
bool: True if consumer should rejoin group, False otherwise
"""
if not self._subscription.partitions_auto_assigned():
return False
if self._auto_assign_all_partitions():
return False
# we need to rejoin if we performed the assignment and metadata has changed
if (self._assignment_snapshot is not None
and self._assignment_snapshot != self._metadata_snapshot):
return True
# we need to join if our subscription has changed since the last join
if (self._joined_subscription is not None
and self._joined_subscription != self._subscription.subscription):
return True
return super(ConsumerCoordinator, self).need_rejoin()
def refresh_committed_offsets_if_needed(self):
"""Fetch committed offsets for assigned partitions."""
if self._subscription.needs_fetch_committed_offsets:
offsets = self.fetch_committed_offsets(self._subscription.assigned_partitions())
for partition, offset in six.iteritems(offsets):
# verify assignment is still active
if self._subscription.is_assigned(partition):
self._subscription.assignment[partition].committed = offset.offset
self._subscription.needs_fetch_committed_offsets = False
def fetch_committed_offsets(self, partitions):
"""Fetch the current committed offsets for specified partitions
Arguments:
partitions (list of TopicPartition): partitions to fetch
Returns:
dict: {TopicPartition: OffsetAndMetadata}
"""
if not partitions:
return {}
while True:
self.ensure_coordinator_ready()
# contact coordinator to fetch committed offsets
future = self._send_offset_fetch_request(partitions)
self._client.poll(future=future)
if future.succeeded():
return future.value
if not future.retriable():
raise future.exception # pylint: disable-msg=raising-bad-type
time.sleep(self.config['retry_backoff_ms'] / 1000)
def close(self, autocommit=True):
"""Close the coordinator, leave the current group,
and reset local generation / member_id.
Keyword Arguments:
autocommit (bool): If auto-commit is configured for this consumer,
this optional flag causes the consumer to attempt to commit any
pending consumed offsets prior to close. Default: True
"""
try:
if autocommit:
self._maybe_auto_commit_offsets_sync()
finally:
super(ConsumerCoordinator, self).close()
def _invoke_completed_offset_commit_callbacks(self):
while self.completed_offset_commits:
callback, offsets, exception = self.completed_offset_commits.popleft()
callback(offsets, exception)
def commit_offsets_async(self, offsets, callback=None):
"""Commit specific offsets asynchronously.
Arguments:
offsets (dict {TopicPartition: OffsetAndMetadata}): what to commit
callback (callable, optional): called as callback(offsets, response)
response will be either an Exception or a OffsetCommitResponse
struct. This callback can be used to trigger custom actions when
a commit request completes.
Returns:
kafka.future.Future
"""
self._invoke_completed_offset_commit_callbacks()
if not self.coordinator_unknown():
future = self._do_commit_offsets_async(offsets, callback)
else:
# we don't know the current coordinator, so try to find it and then
# send the commit or fail (we don't want recursive retries which can
# cause offset commits to arrive out of order). Note that there may
# be multiple offset commits chained to the same coordinator lookup
# request. This is fine because the listeners will be invoked in the
# same order that they were added. Note also that BaseCoordinator
# prevents multiple concurrent coordinator lookup requests.
future = self.lookup_coordinator()
future.add_callback(lambda r: functools.partial(self._do_commit_offsets_async, offsets, callback)())
if callback:
future.add_errback(lambda e: self.completed_offset_commits.appendleft((callback, offsets, e)))
# ensure the commit has a chance to be transmitted (without blocking on
# its completion). Note that commits are treated as heartbeats by the
# coordinator, so there is no need to explicitly allow heartbeats
# through delayed task execution.
self._client.poll(timeout_ms=0) # no wakeup if we add that feature
return future
def _do_commit_offsets_async(self, offsets, callback=None):
assert self.config['api_version'] >= (0, 8, 1), 'Unsupported Broker API'
assert all(map(lambda k: isinstance(k, TopicPartition), offsets))
assert all(map(lambda v: isinstance(v, OffsetAndMetadata),
offsets.values()))
if callback is None:
callback = self.config['default_offset_commit_callback']
self._subscription.needs_fetch_committed_offsets = True
future = self._send_offset_commit_request(offsets)
future.add_both(lambda res: self.completed_offset_commits.appendleft((callback, offsets, res)))
return future
def commit_offsets_sync(self, offsets):
"""Commit specific offsets synchronously.
This method will retry until the commit completes successfully or an
unrecoverable error is encountered.
Arguments:
offsets (dict {TopicPartition: OffsetAndMetadata}): what to commit
Raises error on failure
"""
assert self.config['api_version'] >= (0, 8, 1), 'Unsupported Broker API'
assert all(map(lambda k: isinstance(k, TopicPartition), offsets))
assert all(map(lambda v: isinstance(v, OffsetAndMetadata),
offsets.values()))
self._invoke_completed_offset_commit_callbacks()
if not offsets:
return
while True:
self.ensure_coordinator_ready()
future = self._send_offset_commit_request(offsets)
self._client.poll(future=future)
if future.succeeded():
return future.value
if not future.retriable():
raise future.exception # pylint: disable-msg=raising-bad-type
time.sleep(self.config['retry_backoff_ms'] / 1000)
def _maybe_auto_commit_offsets_sync(self):
if self.config['enable_auto_commit']:
try:
self.commit_offsets_sync(self._subscription.all_consumed_offsets())
# The three main group membership errors are known and should not
# require a stacktrace -- just a warning
except (Errors.UnknownMemberIdError,
Errors.IllegalGenerationError,
Errors.RebalanceInProgressError):
log.warning("Offset commit failed: group membership out of date"
" This is likely to cause duplicate message"
" delivery.")
except Exception:
log.exception("Offset commit failed: This is likely to cause"
" duplicate message delivery")
def _send_offset_commit_request(self, offsets):
"""Commit offsets for the specified list of topics and partitions.
This is a non-blocking call which returns a request future that can be
polled in the case of a synchronous commit or ignored in the
asynchronous case.
Arguments:
offsets (dict of {TopicPartition: OffsetAndMetadata}): what should
be committed
Returns:
Future: indicating whether the commit was successful or not
"""
assert self.config['api_version'] >= (0, 8, 1), 'Unsupported Broker API'
assert all(map(lambda k: isinstance(k, TopicPartition), offsets))
assert all(map(lambda v: isinstance(v, OffsetAndMetadata),
offsets.values()))
if not offsets:
log.debug('No offsets to commit')
return Future().success(None)
node_id = self.coordinator()
if node_id is None:
return Future().failure(Errors.GroupCoordinatorNotAvailableError)
# create the offset commit request
offset_data = collections.defaultdict(dict)
for tp, offset in six.iteritems(offsets):
offset_data[tp.topic][tp.partition] = offset
if self._subscription.partitions_auto_assigned():
generation = self.generation()
else:
generation = Generation.NO_GENERATION
# if the generation is None, we are not part of an active group
# (and we expect to be). The only thing we can do is fail the commit
# and let the user rejoin the group in poll()
if self.config['api_version'] >= (0, 9) and generation is None:
return Future().failure(Errors.CommitFailedError())
if self.config['api_version'] >= (0, 9):
request = OffsetCommitRequest[2](
self.group_id,
generation.generation_id,
generation.member_id,
OffsetCommitRequest[2].DEFAULT_RETENTION_TIME,
[(
topic, [(
partition,
offset.offset,
offset.metadata
) for partition, offset in six.iteritems(partitions)]
) for topic, partitions in six.iteritems(offset_data)]
)
elif self.config['api_version'] >= (0, 8, 2):
request = OffsetCommitRequest[1](
self.group_id, -1, '',
[(
topic, [(
partition,
offset.offset,
-1,
offset.metadata
) for partition, offset in six.iteritems(partitions)]
) for topic, partitions in six.iteritems(offset_data)]
)
elif self.config['api_version'] >= (0, 8, 1):
request = OffsetCommitRequest[0](
self.group_id,
[(
topic, [(
partition,
offset.offset,
offset.metadata
) for partition, offset in six.iteritems(partitions)]
) for topic, partitions in six.iteritems(offset_data)]
)
log.debug("Sending offset-commit request with %s for group %s to %s",
offsets, self.group_id, node_id)
future = Future()
_f = self._client.send(node_id, request)
_f.add_callback(self._handle_offset_commit_response, offsets, future, time.time())
_f.add_errback(self._failed_request, node_id, request, future)
return future
def _handle_offset_commit_response(self, offsets, future, send_time, response):
# TODO look at adding request_latency_ms to response (like java kafka)
self.consumer_sensors.commit_latency.record((time.time() - send_time) * 1000)
unauthorized_topics = set()
for topic, partitions in response.topics:
for partition, error_code in partitions:
tp = TopicPartition(topic, partition)
offset = offsets[tp]
error_type = Errors.for_code(error_code)
if error_type is Errors.NoError:
log.debug("Group %s committed offset %s for partition %s",
self.group_id, offset, tp)
if self._subscription.is_assigned(tp):
self._subscription.assignment[tp].committed = offset.offset
elif error_type is Errors.GroupAuthorizationFailedError:
log.error("Not authorized to commit offsets for group %s",
self.group_id)
future.failure(error_type(self.group_id))
return
elif error_type is Errors.TopicAuthorizationFailedError:
unauthorized_topics.add(topic)
elif error_type in (Errors.OffsetMetadataTooLargeError,
Errors.InvalidCommitOffsetSizeError):
# raise the error to the user
log.debug("OffsetCommit for group %s failed on partition %s"
" %s", self.group_id, tp, error_type.__name__)
future.failure(error_type())
return
elif error_type is Errors.GroupLoadInProgressError:
# just retry
log.debug("OffsetCommit for group %s failed: %s",
self.group_id, error_type.__name__)
future.failure(error_type(self.group_id))
return
elif error_type in (Errors.GroupCoordinatorNotAvailableError,
Errors.NotCoordinatorForGroupError,
Errors.RequestTimedOutError):
log.debug("OffsetCommit for group %s failed: %s",
self.group_id, error_type.__name__)
self.coordinator_dead(error_type())
future.failure(error_type(self.group_id))
return
elif error_type in (Errors.UnknownMemberIdError,
Errors.IllegalGenerationError,
Errors.RebalanceInProgressError):
# need to re-join group
error = error_type(self.group_id)
log.debug("OffsetCommit for group %s failed: %s",
self.group_id, error)
self.reset_generation()
future.failure(Errors.CommitFailedError())
return
else:
log.error("Group %s failed to commit partition %s at offset"
" %s: %s", self.group_id, tp, offset,
error_type.__name__)
future.failure(error_type())
return
if unauthorized_topics:
log.error("Not authorized to commit to topics %s for group %s",
unauthorized_topics, self.group_id)
future.failure(Errors.TopicAuthorizationFailedError(unauthorized_topics))
else:
future.success(None)
def _send_offset_fetch_request(self, partitions):
"""Fetch the committed offsets for a set of partitions.
This is a non-blocking call. The returned future can be polled to get
the actual offsets returned from the broker.
Arguments:
partitions (list of TopicPartition): the partitions to fetch
Returns:
Future: resolves to dict of offsets: {TopicPartition: int}
"""
assert self.config['api_version'] >= (0, 8, 1), 'Unsupported Broker API'
assert all(map(lambda k: isinstance(k, TopicPartition), partitions))
if not partitions:
return Future().success({})
node_id = self.coordinator()
if node_id is None:
return Future().failure(Errors.GroupCoordinatorNotAvailableError)
# Verify node is ready
if not self._client.ready(node_id):
log.debug("Node %s not ready -- failing offset fetch request",
node_id)
return Future().failure(Errors.NodeNotReadyError)
log.debug("Group %s fetching committed offsets for partitions: %s",
self.group_id, partitions)
# construct the request
topic_partitions = collections.defaultdict(set)
for tp in partitions:
topic_partitions[tp.topic].add(tp.partition)
if self.config['api_version'] >= (0, 8, 2):
request = OffsetFetchRequest[1](
self.group_id,
list(topic_partitions.items())
)
else:
request = OffsetFetchRequest[0](
self.group_id,
list(topic_partitions.items())
)
# send the request with a callback
future = Future()
_f = self._client.send(node_id, request)
_f.add_callback(self._handle_offset_fetch_response, future)
_f.add_errback(self._failed_request, node_id, request, future)
return future
def _handle_offset_fetch_response(self, future, response):
offsets = {}
for topic, partitions in response.topics:
for partition, offset, metadata, error_code in partitions:
tp = TopicPartition(topic, partition)
error_type = Errors.for_code(error_code)
if error_type is not Errors.NoError:
error = error_type()
log.debug("Group %s failed to fetch offset for partition"
" %s: %s", self.group_id, tp, error)
if error_type is Errors.GroupLoadInProgressError:
# just retry
future.failure(error)
elif error_type is Errors.NotCoordinatorForGroupError:
# re-discover the coordinator and retry
self.coordinator_dead(error_type())
future.failure(error)
elif error_type is Errors.UnknownTopicOrPartitionError:
log.warning("OffsetFetchRequest -- unknown topic %s"
" (have you committed any offsets yet?)",
topic)
continue
else:
log.error("Unknown error fetching offsets for %s: %s",
tp, error)
future.failure(error)
return
elif offset >= 0:
# record the position with the offset
# (-1 indicates no committed offset to fetch)
offsets[tp] = OffsetAndMetadata(offset, metadata)
else:
log.debug("Group %s has no committed offset for partition"
" %s", self.group_id, tp)
future.success(offsets)
def _default_offset_commit_callback(self, offsets, exception):
if exception is not None:
log.error("Offset commit failed: %s", exception)
def _commit_offsets_async_on_complete(self, offsets, exception):
if exception is not None:
log.warning("Auto offset commit failed for group %s: %s",
self.group_id, exception)
if getattr(exception, 'retriable', False):
self.next_auto_commit_deadline = min(time.time() + self.config['retry_backoff_ms'] / 1000, self.next_auto_commit_deadline)
else:
log.debug("Completed autocommit of offsets %s for group %s",
offsets, self.group_id)
def _maybe_auto_commit_offsets_async(self):
if self.config['enable_auto_commit']:
if self.coordinator_unknown():
self.next_auto_commit_deadline = time.time() + self.config['retry_backoff_ms'] / 1000
elif time.time() > self.next_auto_commit_deadline:
self.next_auto_commit_deadline = time.time() + self.auto_commit_interval
self.commit_offsets_async(self._subscription.all_consumed_offsets(),
self._commit_offsets_async_on_complete)
class ConsumerCoordinatorMetrics(object):
def __init__(self, metrics, metric_group_prefix, subscription):
self.metrics = metrics
self.metric_group_name = '%s-coordinator-metrics' % (metric_group_prefix,)
self.commit_latency = metrics.sensor('commit-latency')
self.commit_latency.add(metrics.metric_name(
'commit-latency-avg', self.metric_group_name,
'The average time taken for a commit request'), Avg())
self.commit_latency.add(metrics.metric_name(
'commit-latency-max', self.metric_group_name,
'The max time taken for a commit request'), Max())
self.commit_latency.add(metrics.metric_name(
'commit-rate', self.metric_group_name,
'The number of commit calls per second'), Rate(sampled_stat=Count()))
num_parts = AnonMeasurable(lambda config, now:
len(subscription.assigned_partitions()))
metrics.add_metric(metrics.metric_name(
'assigned-partitions', self.metric_group_name,
'The number of partitions currently assigned to this consumer'),
num_parts)
| {
"repo_name": "Aloomaio/kafka-python",
"path": "kafka/coordinator/consumer.py",
"copies": "1",
"size": "38546",
"license": "apache-2.0",
"hash": -4062537523424029000,
"line_mean": 45.6658595642,
"line_max": 138,
"alpha_frac": 0.6001141493,
"autogenerated": false,
"ratio": 4.829115509897269,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0011379412500610898,
"num_lines": 826
} |
from __future__ import absolute_import, division
import collections
import copy
import logging
import random
import socket
import threading
import time
import weakref
# selectors in stdlib as of py3.4
try:
import selectors # pylint: disable=import-error
except ImportError:
# vendored backport module
from kafka.vendor import selectors34 as selectors
from kafka.vendor import six
from kafka.cluster import ClusterMetadata
from kafka.conn import BrokerConnection, ConnectionStates, collect_hosts, get_ip_port_afi
from kafka import errors as Errors
from kafka.future import Future
from kafka.metrics import AnonMeasurable
from kafka.metrics.stats import Avg, Count, Rate
from kafka.metrics.stats.rate import TimeUnit
from kafka.protocol.metadata import MetadataRequest
from kafka.util import Dict, WeakMethod
# Although this looks unused, it actually monkey-patches socket.socketpair()
# and should be left in as long as we're using socket.socketpair() in this file
from kafka.vendor import socketpair
from kafka.version import __version__
if six.PY2:
ConnectionError = None
log = logging.getLogger('kafka.client')
class KafkaClient(object):
"""
A network client for asynchronous request/response network I/O.
This is an internal class used to implement the user-facing producer and
consumer clients.
This class is not thread-safe!
Attributes:
cluster (:any:`ClusterMetadata`): Local cache of cluster metadata, retrieved
via MetadataRequests during :meth:`~kafka.KafkaClient.poll`.
Keyword Arguments:
bootstrap_servers: 'host[:port]' string (or list of 'host[:port]'
strings) that the client should contact to bootstrap initial
cluster metadata. This does not have to be the full node list.
It just needs to have at least one broker that will respond to a
Metadata API Request. Default port is 9092. If no servers are
specified, will default to localhost:9092.
client_id (str): a name for this client. This string is passed in
each request to servers and can be used to identify specific
server-side log entries that correspond to this client. Also
submitted to GroupCoordinator for logging with respect to
consumer group administration. Default: 'kafka-python-{version}'
reconnect_backoff_ms (int): The amount of time in milliseconds to
wait before attempting to reconnect to a given host.
Default: 50.
reconnect_backoff_max_ms (int): The maximum amount of time in
milliseconds to backoff/wait when reconnecting to a broker that has
repeatedly failed to connect. If provided, the backoff per host
will increase exponentially for each consecutive connection
failure, up to this maximum. Once the maximum is reached,
reconnection attempts will continue periodically with this fixed
rate. To avoid connection storms, a randomization factor of 0.2
will be applied to the backoff resulting in a random range between
20% below and 20% above the computed value. Default: 1000.
request_timeout_ms (int): Client request timeout in milliseconds.
Default: 30000.
connections_max_idle_ms: Close idle connections after the number of
milliseconds specified by this config. The broker closes idle
connections after connections.max.idle.ms, so this avoids hitting
unexpected socket disconnected errors on the client.
Default: 540000
retry_backoff_ms (int): Milliseconds to backoff when retrying on
errors. Default: 100.
max_in_flight_requests_per_connection (int): Requests are pipelined
to kafka brokers up to this number of maximum requests per
broker connection. Default: 5.
receive_buffer_bytes (int): The size of the TCP receive buffer
(SO_RCVBUF) to use when reading data. Default: None (relies on
system defaults). Java client defaults to 32768.
send_buffer_bytes (int): The size of the TCP send buffer
(SO_SNDBUF) to use when sending data. Default: None (relies on
system defaults). Java client defaults to 131072.
socket_options (list): List of tuple-arguments to socket.setsockopt
to apply to broker connection sockets. Default:
[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
metadata_max_age_ms (int): The period of time in milliseconds after
which we force a refresh of metadata even if we haven't seen any
partition leadership changes to proactively discover any new
brokers or partitions. Default: 300000
security_protocol (str): Protocol used to communicate with brokers.
Valid values are: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL.
Default: PLAINTEXT.
ssl_context (ssl.SSLContext): Pre-configured SSLContext for wrapping
socket connections. If provided, all other ssl_* configurations
will be ignored. Default: None.
ssl_check_hostname (bool): Flag to configure whether SSL handshake
should verify that the certificate matches the broker's hostname.
Default: True.
ssl_cafile (str): Optional filename of CA file to use in certificate
verification. Default: None.
ssl_certfile (str): Optional filename of file in PEM format containing
the client certificate, as well as any CA certificates needed to
establish the certificate's authenticity. Default: None.
ssl_keyfile (str): Optional filename containing the client private key.
Default: None.
ssl_password (str): Optional password to be used when loading the
certificate chain. Default: None.
ssl_crlfile (str): Optional filename containing the CRL to check for
certificate expiration. By default, no CRL check is done. When
providing a file, only the leaf certificate will be checked against
this CRL. The CRL can only be checked with Python 3.4+ or 2.7.9+.
Default: None.
ssl_ciphers (str): optionally set the available ciphers for ssl
connections. It should be a string in the OpenSSL cipher list
format. If no cipher can be selected (because compile-time options
or other configuration forbids use of all the specified ciphers),
an ssl.SSLError will be raised. See ssl.SSLContext.set_ciphers
api_version (tuple): Specify which Kafka API version to use. If set
to None, KafkaClient will attempt to infer the broker version by
probing various APIs. Example: (0, 10, 2). Default: None
api_version_auto_timeout_ms (int): number of milliseconds to throw a
timeout exception from the constructor when checking the broker
api version. Only applies if api_version is None
selector (selectors.BaseSelector): Provide a specific selector
implementation to use for I/O multiplexing.
Default: selectors.DefaultSelector
metrics (kafka.metrics.Metrics): Optionally provide a metrics
instance for capturing network IO stats. Default: None.
metric_group_prefix (str): Prefix for metric names. Default: ''
sasl_mechanism (str): Authentication mechanism when security_protocol
is configured for SASL_PLAINTEXT or SASL_SSL. Valid values are:
PLAIN, GSSAPI, OAUTHBEARER, SCRAM-SHA-256, SCRAM-SHA-512.
sasl_plain_username (str): username for sasl PLAIN and SCRAM authentication.
Required if sasl_mechanism is PLAIN or one of the SCRAM mechanisms.
sasl_plain_password (str): password for sasl PLAIN and SCRAM authentication.
Required if sasl_mechanism is PLAIN or one of the SCRAM mechanisms.
sasl_kerberos_service_name (str): Service name to include in GSSAPI
sasl mechanism handshake. Default: 'kafka'
sasl_kerberos_domain_name (str): kerberos domain name to use in GSSAPI
sasl mechanism handshake. Default: one of bootstrap servers
sasl_oauth_token_provider (AbstractTokenProvider): OAuthBearer token provider
instance. (See kafka.oauth.abstract). Default: None
"""
DEFAULT_CONFIG = {
'bootstrap_servers': 'localhost',
'bootstrap_topics_filter': set(),
'client_id': 'kafka-python-' + __version__,
'request_timeout_ms': 30000,
'wakeup_timeout_ms': 3000,
'connections_max_idle_ms': 9 * 60 * 1000,
'reconnect_backoff_ms': 50,
'reconnect_backoff_max_ms': 1000,
'max_in_flight_requests_per_connection': 5,
'receive_buffer_bytes': None,
'send_buffer_bytes': None,
'socket_options': [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)],
'sock_chunk_bytes': 4096, # undocumented experimental option
'sock_chunk_buffer_count': 1000, # undocumented experimental option
'retry_backoff_ms': 100,
'metadata_max_age_ms': 300000,
'security_protocol': 'PLAINTEXT',
'ssl_context': None,
'ssl_check_hostname': True,
'ssl_cafile': None,
'ssl_certfile': None,
'ssl_keyfile': None,
'ssl_password': None,
'ssl_crlfile': None,
'ssl_ciphers': None,
'api_version': None,
'api_version_auto_timeout_ms': 2000,
'selector': selectors.DefaultSelector,
'metrics': None,
'metric_group_prefix': '',
'sasl_mechanism': None,
'sasl_plain_username': None,
'sasl_plain_password': None,
'sasl_kerberos_service_name': 'kafka',
'sasl_kerberos_domain_name': None,
'sasl_oauth_token_provider': None
}
def __init__(self, **configs):
self.config = copy.copy(self.DEFAULT_CONFIG)
for key in self.config:
if key in configs:
self.config[key] = configs[key]
# these properties need to be set on top of the initialization pipeline
# because they are used when __del__ method is called
self._closed = False
self._wake_r, self._wake_w = socket.socketpair()
self._selector = self.config['selector']()
self.cluster = ClusterMetadata(**self.config)
self._topics = set() # empty set will fetch all topic metadata
self._metadata_refresh_in_progress = False
self._conns = Dict() # object to support weakrefs
self._api_versions = None
self._connecting = set()
self._sending = set()
self._refresh_on_disconnects = True
self._last_bootstrap = 0
self._bootstrap_fails = 0
self._wake_r.setblocking(False)
self._wake_w.settimeout(self.config['wakeup_timeout_ms'] / 1000.0)
self._wake_lock = threading.Lock()
self._lock = threading.RLock()
# when requests complete, they are transferred to this queue prior to
# invocation. The purpose is to avoid invoking them while holding the
# lock above.
self._pending_completion = collections.deque()
self._selector.register(self._wake_r, selectors.EVENT_READ)
self._idle_expiry_manager = IdleConnectionManager(self.config['connections_max_idle_ms'])
self._sensors = None
if self.config['metrics']:
self._sensors = KafkaClientMetrics(self.config['metrics'],
self.config['metric_group_prefix'],
weakref.proxy(self._conns))
self._num_bootstrap_hosts = len(collect_hosts(self.config['bootstrap_servers']))
# Check Broker Version if not set explicitly
if self.config['api_version'] is None:
check_timeout = self.config['api_version_auto_timeout_ms'] / 1000
self.config['api_version'] = self.check_version(timeout=check_timeout)
def _can_bootstrap(self):
effective_failures = self._bootstrap_fails // self._num_bootstrap_hosts
backoff_factor = 2 ** effective_failures
backoff_ms = min(self.config['reconnect_backoff_ms'] * backoff_factor,
self.config['reconnect_backoff_max_ms'])
backoff_ms *= random.uniform(0.8, 1.2)
next_at = self._last_bootstrap + backoff_ms / 1000.0
now = time.time()
if next_at > now:
return False
return True
def _can_connect(self, node_id):
if node_id not in self._conns:
if self.cluster.broker_metadata(node_id):
return True
return False
conn = self._conns[node_id]
return conn.disconnected() and not conn.blacked_out()
def _conn_state_change(self, node_id, sock, conn):
with self._lock:
if conn.connecting():
# SSL connections can enter this state 2x (second during Handshake)
if node_id not in self._connecting:
self._connecting.add(node_id)
try:
self._selector.register(sock, selectors.EVENT_WRITE, conn)
except KeyError:
self._selector.modify(sock, selectors.EVENT_WRITE, conn)
if self.cluster.is_bootstrap(node_id):
self._last_bootstrap = time.time()
elif conn.connected():
log.debug("Node %s connected", node_id)
if node_id in self._connecting:
self._connecting.remove(node_id)
try:
self._selector.modify(sock, selectors.EVENT_READ, conn)
except KeyError:
self._selector.register(sock, selectors.EVENT_READ, conn)
if self._sensors:
self._sensors.connection_created.record()
self._idle_expiry_manager.update(node_id)
if self.cluster.is_bootstrap(node_id):
self._bootstrap_fails = 0
else:
for node_id in list(self._conns.keys()):
if self.cluster.is_bootstrap(node_id):
self._conns.pop(node_id).close()
# Connection failures imply that our metadata is stale, so let's refresh
elif conn.state is ConnectionStates.DISCONNECTED:
if node_id in self._connecting:
self._connecting.remove(node_id)
try:
self._selector.unregister(sock)
except KeyError:
pass
if self._sensors:
self._sensors.connection_closed.record()
idle_disconnect = False
if self._idle_expiry_manager.is_expired(node_id):
idle_disconnect = True
self._idle_expiry_manager.remove(node_id)
# If the connection has already by popped from self._conns,
# we can assume the disconnect was intentional and not a failure
if node_id not in self._conns:
pass
elif self.cluster.is_bootstrap(node_id):
self._bootstrap_fails += 1
elif self._refresh_on_disconnects and not self._closed and not idle_disconnect:
log.warning("Node %s connection failed -- refreshing metadata", node_id)
self.cluster.request_update()
def maybe_connect(self, node_id, wakeup=True):
"""Queues a node for asynchronous connection during the next .poll()"""
if self._can_connect(node_id):
self._connecting.add(node_id)
# Wakeup signal is useful in case another thread is
# blocked waiting for incoming network traffic while holding
# the client lock in poll().
if wakeup:
self.wakeup()
return True
return False
def _should_recycle_connection(self, conn):
# Never recycle unless disconnected
if not conn.disconnected():
return False
# Otherwise, only recycle when broker metadata has changed
broker = self.cluster.broker_metadata(conn.node_id)
if broker is None:
return False
host, _, afi = get_ip_port_afi(broker.host)
if conn.host != host or conn.port != broker.port:
log.info("Broker metadata change detected for node %s"
" from %s:%s to %s:%s", conn.node_id, conn.host, conn.port,
broker.host, broker.port)
return True
return False
def _maybe_connect(self, node_id):
"""Idempotent non-blocking connection attempt to the given node id."""
with self._lock:
conn = self._conns.get(node_id)
if conn is None:
broker = self.cluster.broker_metadata(node_id)
assert broker, 'Broker id %s not in current metadata' % (node_id,)
log.debug("Initiating connection to node %s at %s:%s",
node_id, broker.host, broker.port)
host, port, afi = get_ip_port_afi(broker.host)
cb = WeakMethod(self._conn_state_change)
conn = BrokerConnection(host, broker.port, afi,
state_change_callback=cb,
node_id=node_id,
**self.config)
self._conns[node_id] = conn
# Check if existing connection should be recreated because host/port changed
elif self._should_recycle_connection(conn):
self._conns.pop(node_id)
return False
elif conn.connected():
return True
conn.connect()
return conn.connected()
def ready(self, node_id, metadata_priority=True):
"""Check whether a node is connected and ok to send more requests.
Arguments:
node_id (int): the id of the node to check
metadata_priority (bool): Mark node as not-ready if a metadata
refresh is required. Default: True
Returns:
bool: True if we are ready to send to the given node
"""
self.maybe_connect(node_id)
return self.is_ready(node_id, metadata_priority=metadata_priority)
def connected(self, node_id):
"""Return True iff the node_id is connected."""
conn = self._conns.get(node_id)
if conn is None:
return False
return conn.connected()
def _close(self):
if not self._closed:
self._closed = True
self._wake_r.close()
self._wake_w.close()
self._selector.close()
def close(self, node_id=None):
"""Close one or all broker connections.
Arguments:
node_id (int, optional): the id of the node to close
"""
with self._lock:
if node_id is None:
self._close()
conns = list(self._conns.values())
self._conns.clear()
for conn in conns:
conn.close()
elif node_id in self._conns:
self._conns.pop(node_id).close()
else:
log.warning("Node %s not found in current connection list; skipping", node_id)
return
def __del__(self):
self._close()
def is_disconnected(self, node_id):
"""Check whether the node connection has been disconnected or failed.
A disconnected node has either been closed or has failed. Connection
failures are usually transient and can be resumed in the next ready()
call, but there are cases where transient failures need to be caught
and re-acted upon.
Arguments:
node_id (int): the id of the node to check
Returns:
bool: True iff the node exists and is disconnected
"""
conn = self._conns.get(node_id)
if conn is None:
return False
return conn.disconnected()
def connection_delay(self, node_id):
"""
Return the number of milliseconds to wait, based on the connection
state, before attempting to send data. When disconnected, this respects
the reconnect backoff time. When connecting, returns 0 to allow
non-blocking connect to finish. When connected, returns a very large
number to handle slow/stalled connections.
Arguments:
node_id (int): The id of the node to check
Returns:
int: The number of milliseconds to wait.
"""
conn = self._conns.get(node_id)
if conn is None:
return 0
return conn.connection_delay()
def is_ready(self, node_id, metadata_priority=True):
"""Check whether a node is ready to send more requests.
In addition to connection-level checks, this method also is used to
block additional requests from being sent during a metadata refresh.
Arguments:
node_id (int): id of the node to check
metadata_priority (bool): Mark node as not-ready if a metadata
refresh is required. Default: True
Returns:
bool: True if the node is ready and metadata is not refreshing
"""
if not self._can_send_request(node_id):
return False
# if we need to update our metadata now declare all requests unready to
# make metadata requests first priority
if metadata_priority:
if self._metadata_refresh_in_progress:
return False
if self.cluster.ttl() == 0:
return False
return True
def _can_send_request(self, node_id):
conn = self._conns.get(node_id)
if not conn:
return False
return conn.connected() and conn.can_send_more()
def send(self, node_id, request, wakeup=True):
"""Send a request to a specific node. Bytes are placed on an
internal per-connection send-queue. Actual network I/O will be
triggered in a subsequent call to .poll()
Arguments:
node_id (int): destination node
request (Struct): request object (not-encoded)
wakeup (bool): optional flag to disable thread-wakeup
Raises:
AssertionError: if node_id is not in current cluster metadata
Returns:
Future: resolves to Response struct or Error
"""
conn = self._conns.get(node_id)
if not conn or not self._can_send_request(node_id):
self.maybe_connect(node_id, wakeup=wakeup)
return Future().failure(Errors.NodeNotReadyError(node_id))
# conn.send will queue the request internally
# we will need to call send_pending_requests()
# to trigger network I/O
future = conn.send(request, blocking=False)
self._sending.add(conn)
# Wakeup signal is useful in case another thread is
# blocked waiting for incoming network traffic while holding
# the client lock in poll().
if wakeup:
self.wakeup()
return future
def poll(self, timeout_ms=None, future=None):
"""Try to read and write to sockets.
This method will also attempt to complete node connections, refresh
stale metadata, and run previously-scheduled tasks.
Arguments:
timeout_ms (int, optional): maximum amount of time to wait (in ms)
for at least one response. Must be non-negative. The actual
timeout will be the minimum of timeout, request timeout and
metadata timeout. Default: request_timeout_ms
future (Future, optional): if provided, blocks until future.is_done
Returns:
list: responses received (can be empty)
"""
if future is not None:
timeout_ms = 100
elif timeout_ms is None:
timeout_ms = self.config['request_timeout_ms']
elif not isinstance(timeout_ms, (int, float)):
raise TypeError('Invalid type for timeout: %s' % type(timeout_ms))
# Loop for futures, break after first loop if None
responses = []
while True:
with self._lock:
if self._closed:
break
# Attempt to complete pending connections
for node_id in list(self._connecting):
self._maybe_connect(node_id)
# Send a metadata request if needed
metadata_timeout_ms = self._maybe_refresh_metadata()
# If we got a future that is already done, don't block in _poll
if future is not None and future.is_done:
timeout = 0
else:
idle_connection_timeout_ms = self._idle_expiry_manager.next_check_ms()
timeout = min(
timeout_ms,
metadata_timeout_ms,
idle_connection_timeout_ms,
self.config['request_timeout_ms'])
# if there are no requests in flight, do not block longer than the retry backoff
if self.in_flight_request_count() == 0:
timeout = min(timeout, self.config['retry_backoff_ms'])
timeout = max(0, timeout) # avoid negative timeouts
self._poll(timeout / 1000)
# called without the lock to avoid deadlock potential
# if handlers need to acquire locks
responses.extend(self._fire_pending_completed_requests())
# If all we had was a timeout (future is None) - only do one poll
# If we do have a future, we keep looping until it is done
if future is None or future.is_done:
break
return responses
def _register_send_sockets(self):
while self._sending:
conn = self._sending.pop()
try:
key = self._selector.get_key(conn._sock)
events = key.events | selectors.EVENT_WRITE
self._selector.modify(key.fileobj, events, key.data)
except KeyError:
self._selector.register(conn._sock, selectors.EVENT_WRITE, conn)
def _poll(self, timeout):
# This needs to be locked, but since it is only called from within the
# locked section of poll(), there is no additional lock acquisition here
processed = set()
# Send pending requests first, before polling for responses
self._register_send_sockets()
start_select = time.time()
ready = self._selector.select(timeout)
end_select = time.time()
if self._sensors:
self._sensors.select_time.record((end_select - start_select) * 1000000000)
for key, events in ready:
if key.fileobj is self._wake_r:
self._clear_wake_fd()
continue
# Send pending requests if socket is ready to write
if events & selectors.EVENT_WRITE:
conn = key.data
if conn.connecting():
conn.connect()
else:
if conn.send_pending_requests_v2():
# If send is complete, we dont need to track write readiness
# for this socket anymore
if key.events ^ selectors.EVENT_WRITE:
self._selector.modify(
key.fileobj,
key.events ^ selectors.EVENT_WRITE,
key.data)
else:
self._selector.unregister(key.fileobj)
if not (events & selectors.EVENT_READ):
continue
conn = key.data
processed.add(conn)
if not conn.in_flight_requests:
# if we got an EVENT_READ but there were no in-flight requests, one of
# two things has happened:
#
# 1. The remote end closed the connection (because it died, or because
# a firewall timed out, or whatever)
# 2. The protocol is out of sync.
#
# either way, we can no longer safely use this connection
#
# Do a 1-byte read to check protocol didnt get out of sync, and then close the conn
try:
unexpected_data = key.fileobj.recv(1)
if unexpected_data: # anything other than a 0-byte read means protocol issues
log.warning('Protocol out of sync on %r, closing', conn)
except socket.error:
pass
conn.close(Errors.KafkaConnectionError('Socket EVENT_READ without in-flight-requests'))
continue
self._idle_expiry_manager.update(conn.node_id)
self._pending_completion.extend(conn.recv())
# Check for additional pending SSL bytes
if self.config['security_protocol'] in ('SSL', 'SASL_SSL'):
# TODO: optimize
for conn in self._conns.values():
if conn not in processed and conn.connected() and conn._sock.pending():
self._pending_completion.extend(conn.recv())
for conn in six.itervalues(self._conns):
if conn.requests_timed_out():
log.warning('%s timed out after %s ms. Closing connection.',
conn, conn.config['request_timeout_ms'])
conn.close(error=Errors.RequestTimedOutError(
'Request timed out after %s ms' %
conn.config['request_timeout_ms']))
if self._sensors:
self._sensors.io_time.record((time.time() - end_select) * 1000000000)
self._maybe_close_oldest_connection()
def in_flight_request_count(self, node_id=None):
"""Get the number of in-flight requests for a node or all nodes.
Arguments:
node_id (int, optional): a specific node to check. If unspecified,
return the total for all nodes
Returns:
int: pending in-flight requests for the node, or all nodes if None
"""
if node_id is not None:
conn = self._conns.get(node_id)
if conn is None:
return 0
return len(conn.in_flight_requests)
else:
return sum([len(conn.in_flight_requests)
for conn in list(self._conns.values())])
def _fire_pending_completed_requests(self):
responses = []
while True:
try:
# We rely on deque.popleft remaining threadsafe
# to allow both the heartbeat thread and the main thread
# to process responses
response, future = self._pending_completion.popleft()
except IndexError:
break
future.success(response)
responses.append(response)
return responses
def least_loaded_node(self):
"""Choose the node with fewest outstanding requests, with fallbacks.
This method will prefer a node with an existing connection and no
in-flight-requests. If no such node is found, a node will be chosen
randomly from disconnected nodes that are not "blacked out" (i.e.,
are not subject to a reconnect backoff). If no node metadata has been
obtained, will return a bootstrap node (subject to exponential backoff).
Returns:
node_id or None if no suitable node was found
"""
nodes = [broker.nodeId for broker in self.cluster.brokers()]
random.shuffle(nodes)
inflight = float('inf')
found = None
for node_id in nodes:
conn = self._conns.get(node_id)
connected = conn is not None and conn.connected()
blacked_out = conn is not None and conn.blacked_out()
curr_inflight = len(conn.in_flight_requests) if conn is not None else 0
if connected and curr_inflight == 0:
# if we find an established connection
# with no in-flight requests, we can stop right away
return node_id
elif not blacked_out and curr_inflight < inflight:
# otherwise if this is the best we have found so far, record that
inflight = curr_inflight
found = node_id
return found
def set_topics(self, topics):
"""Set specific topics to track for metadata.
Arguments:
topics (list of str): topics to check for metadata
Returns:
Future: resolves after metadata request/response
"""
if set(topics).difference(self._topics):
future = self.cluster.request_update()
else:
future = Future().success(set(topics))
self._topics = set(topics)
return future
def add_topic(self, topic):
"""Add a topic to the list of topics tracked via metadata.
Arguments:
topic (str): topic to track
Returns:
Future: resolves after metadata request/response
"""
if topic in self._topics:
return Future().success(set(self._topics))
self._topics.add(topic)
return self.cluster.request_update()
# This method should be locked when running multi-threaded
def _maybe_refresh_metadata(self, wakeup=False):
"""Send a metadata request if needed.
Returns:
int: milliseconds until next refresh
"""
ttl = self.cluster.ttl()
wait_for_in_progress_ms = self.config['request_timeout_ms'] if self._metadata_refresh_in_progress else 0
metadata_timeout = max(ttl, wait_for_in_progress_ms)
if metadata_timeout > 0:
return metadata_timeout
# Beware that the behavior of this method and the computation of
# timeouts for poll() are highly dependent on the behavior of
# least_loaded_node()
node_id = self.least_loaded_node()
if node_id is None:
log.debug("Give up sending metadata request since no node is available");
return self.config['reconnect_backoff_ms']
if self._can_send_request(node_id):
topics = list(self._topics)
if not topics and self.cluster.is_bootstrap(node_id):
topics = list(self.config['bootstrap_topics_filter'])
if self.cluster.need_all_topic_metadata or not topics:
topics = [] if self.config['api_version'] < (0, 10) else None
api_version = 0 if self.config['api_version'] < (0, 10) else 1
request = MetadataRequest[api_version](topics)
log.debug("Sending metadata request %s to node %s", request, node_id)
future = self.send(node_id, request, wakeup=wakeup)
future.add_callback(self.cluster.update_metadata)
future.add_errback(self.cluster.failed_update)
self._metadata_refresh_in_progress = True
def refresh_done(val_or_error):
self._metadata_refresh_in_progress = False
future.add_callback(refresh_done)
future.add_errback(refresh_done)
return self.config['request_timeout_ms']
# If there's any connection establishment underway, wait until it completes. This prevents
# the client from unnecessarily connecting to additional nodes while a previous connection
# attempt has not been completed.
if self._connecting:
return self.config['reconnect_backoff_ms']
if self.maybe_connect(node_id, wakeup=wakeup):
log.debug("Initializing connection to node %s for metadata request", node_id)
return self.config['reconnect_backoff_ms']
# connected but can't send more, OR connecting
# In either case we just need to wait for a network event
# to let us know the selected connection might be usable again.
return float('inf')
def get_api_versions(self):
"""Return the ApiVersions map, if available.
Note: A call to check_version must previously have succeeded and returned
version 0.10.0 or later
Returns: a map of dict mapping {api_key : (min_version, max_version)},
or None if ApiVersion is not supported by the kafka cluster.
"""
return self._api_versions
def check_version(self, node_id=None, timeout=2, strict=False):
"""Attempt to guess the version of a Kafka broker.
Note: It is possible that this method blocks longer than the
specified timeout. This can happen if the entire cluster
is down and the client enters a bootstrap backoff sleep.
This is only possible if node_id is None.
Returns: version tuple, i.e. (0, 10), (0, 9), (0, 8, 2), ...
Raises:
NodeNotReadyError (if node_id is provided)
NoBrokersAvailable (if node_id is None)
UnrecognizedBrokerVersion: please file bug if seen!
AssertionError (if strict=True): please file bug if seen!
"""
self._lock.acquire()
end = time.time() + timeout
while time.time() < end:
# It is possible that least_loaded_node falls back to bootstrap,
# which can block for an increasing backoff period
try_node = node_id or self.least_loaded_node()
if try_node is None:
self._lock.release()
raise Errors.NoBrokersAvailable()
self._maybe_connect(try_node)
conn = self._conns[try_node]
# We will intentionally cause socket failures
# These should not trigger metadata refresh
self._refresh_on_disconnects = False
try:
remaining = end - time.time()
version = conn.check_version(timeout=remaining, strict=strict, topics=list(self.config['bootstrap_topics_filter']))
if version >= (0, 10, 0):
# cache the api versions map if it's available (starting
# in 0.10 cluster version)
self._api_versions = conn.get_api_versions()
self._lock.release()
return version
except Errors.NodeNotReadyError:
# Only raise to user if this is a node-specific request
if node_id is not None:
self._lock.release()
raise
finally:
self._refresh_on_disconnects = True
# Timeout
else:
self._lock.release()
raise Errors.NoBrokersAvailable()
def wakeup(self):
with self._wake_lock:
try:
self._wake_w.sendall(b'x')
except socket.timeout:
log.warning('Timeout to send to wakeup socket!')
raise Errors.KafkaTimeoutError()
except socket.error:
log.warning('Unable to send to wakeup socket!')
def _clear_wake_fd(self):
# reading from wake socket should only happen in a single thread
while True:
try:
self._wake_r.recv(1024)
except socket.error:
break
def _maybe_close_oldest_connection(self):
expired_connection = self._idle_expiry_manager.poll_expired_connection()
if expired_connection:
conn_id, ts = expired_connection
idle_ms = (time.time() - ts) * 1000
log.info('Closing idle connection %s, last active %d ms ago', conn_id, idle_ms)
self.close(node_id=conn_id)
def bootstrap_connected(self):
"""Return True if a bootstrap node is connected"""
for node_id in self._conns:
if not self.cluster.is_bootstrap(node_id):
continue
if self._conns[node_id].connected():
return True
else:
return False
# OrderedDict requires python2.7+
try:
from collections import OrderedDict
except ImportError:
# If we dont have OrderedDict, we'll fallback to dict with O(n) priority reads
OrderedDict = dict
class IdleConnectionManager(object):
def __init__(self, connections_max_idle_ms):
if connections_max_idle_ms > 0:
self.connections_max_idle = connections_max_idle_ms / 1000
else:
self.connections_max_idle = float('inf')
self.next_idle_close_check_time = None
self.update_next_idle_close_check_time(time.time())
self.lru_connections = OrderedDict()
def update(self, conn_id):
# order should reflect last-update
if conn_id in self.lru_connections:
del self.lru_connections[conn_id]
self.lru_connections[conn_id] = time.time()
def remove(self, conn_id):
if conn_id in self.lru_connections:
del self.lru_connections[conn_id]
def is_expired(self, conn_id):
if conn_id not in self.lru_connections:
return None
return time.time() >= self.lru_connections[conn_id] + self.connections_max_idle
def next_check_ms(self):
now = time.time()
if not self.lru_connections:
return float('inf')
elif self.next_idle_close_check_time <= now:
return 0
else:
return int((self.next_idle_close_check_time - now) * 1000)
def update_next_idle_close_check_time(self, ts):
self.next_idle_close_check_time = ts + self.connections_max_idle
def poll_expired_connection(self):
if time.time() < self.next_idle_close_check_time:
return None
if not len(self.lru_connections):
return None
oldest_conn_id = None
oldest_ts = None
if OrderedDict is dict:
for conn_id, ts in self.lru_connections.items():
if oldest_conn_id is None or ts < oldest_ts:
oldest_conn_id = conn_id
oldest_ts = ts
else:
(oldest_conn_id, oldest_ts) = next(iter(self.lru_connections.items()))
self.update_next_idle_close_check_time(oldest_ts)
if time.time() >= oldest_ts + self.connections_max_idle:
return (oldest_conn_id, oldest_ts)
else:
return None
class KafkaClientMetrics(object):
def __init__(self, metrics, metric_group_prefix, conns):
self.metrics = metrics
self.metric_group_name = metric_group_prefix + '-metrics'
self.connection_closed = metrics.sensor('connections-closed')
self.connection_closed.add(metrics.metric_name(
'connection-close-rate', self.metric_group_name,
'Connections closed per second in the window.'), Rate())
self.connection_created = metrics.sensor('connections-created')
self.connection_created.add(metrics.metric_name(
'connection-creation-rate', self.metric_group_name,
'New connections established per second in the window.'), Rate())
self.select_time = metrics.sensor('select-time')
self.select_time.add(metrics.metric_name(
'select-rate', self.metric_group_name,
'Number of times the I/O layer checked for new I/O to perform per'
' second'), Rate(sampled_stat=Count()))
self.select_time.add(metrics.metric_name(
'io-wait-time-ns-avg', self.metric_group_name,
'The average length of time the I/O thread spent waiting for a'
' socket ready for reads or writes in nanoseconds.'), Avg())
self.select_time.add(metrics.metric_name(
'io-wait-ratio', self.metric_group_name,
'The fraction of time the I/O thread spent waiting.'),
Rate(time_unit=TimeUnit.NANOSECONDS))
self.io_time = metrics.sensor('io-time')
self.io_time.add(metrics.metric_name(
'io-time-ns-avg', self.metric_group_name,
'The average length of time for I/O per select call in nanoseconds.'),
Avg())
self.io_time.add(metrics.metric_name(
'io-ratio', self.metric_group_name,
'The fraction of time the I/O thread spent doing I/O'),
Rate(time_unit=TimeUnit.NANOSECONDS))
metrics.add_metric(metrics.metric_name(
'connection-count', self.metric_group_name,
'The current number of active connections.'), AnonMeasurable(
lambda config, now: len(conns)))
| {
"repo_name": "dpkp/kafka-python",
"path": "kafka/client_async.py",
"copies": "3",
"size": "45265",
"license": "apache-2.0",
"hash": -4958425075171728000,
"line_mean": 41.0287836583,
"line_max": 131,
"alpha_frac": 0.5944548768,
"autogenerated": false,
"ratio": 4.489239313696321,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6583694190496321,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
import collections
import copy
import logging
import threading
import time
from kafka.vendor import six
from kafka import errors as Errors
from kafka.metrics.measurable import AnonMeasurable
from kafka.metrics.stats import Avg, Max, Rate
from kafka.protocol.produce import ProduceRequest
from kafka.structs import TopicPartition
from kafka.version import __version__
log = logging.getLogger(__name__)
class Sender(threading.Thread):
"""
The background thread that handles the sending of produce requests to the
Kafka cluster. This thread makes metadata requests to renew its view of the
cluster and then sends produce requests to the appropriate nodes.
"""
DEFAULT_CONFIG = {
'max_request_size': 1048576,
'acks': 1,
'retries': 0,
'request_timeout_ms': 30000,
'guarantee_message_order': False,
'client_id': 'kafka-python-' + __version__,
'api_version': (0, 8, 0),
}
def __init__(self, client, metadata, accumulator, metrics, **configs):
super(Sender, self).__init__()
self.config = copy.copy(self.DEFAULT_CONFIG)
for key in self.config:
if key in configs:
self.config[key] = configs.pop(key)
self.name = self.config['client_id'] + '-network-thread'
self._client = client
self._accumulator = accumulator
self._metadata = client.cluster
self._running = True
self._force_close = False
self._topics_to_add = set()
self._sensors = SenderMetrics(metrics, self._client, self._metadata)
def run(self):
"""The main run loop for the sender thread."""
log.debug("Starting Kafka producer I/O thread.")
# main loop, runs until close is called
while self._running:
try:
self.run_once()
except Exception:
log.exception("Uncaught error in kafka producer I/O thread")
log.debug("Beginning shutdown of Kafka producer I/O thread, sending"
" remaining records.")
# okay we stopped accepting requests but there may still be
# requests in the accumulator or waiting for acknowledgment,
# wait until these are completed.
while (not self._force_close
and (self._accumulator.has_unsent()
or self._client.in_flight_request_count() > 0)):
try:
self.run_once()
except Exception:
log.exception("Uncaught error in kafka producer I/O thread")
if self._force_close:
# We need to fail all the incomplete batches and wake up the
# threads waiting on the futures.
self._accumulator.abort_incomplete_batches()
try:
self._client.close()
except Exception:
log.exception("Failed to close network client")
log.debug("Shutdown of Kafka producer I/O thread has completed.")
def run_once(self):
"""Run a single iteration of sending."""
while self._topics_to_add:
self._client.add_topic(self._topics_to_add.pop())
# get the list of partitions with data ready to send
result = self._accumulator.ready(self._metadata)
ready_nodes, next_ready_check_delay, unknown_leaders_exist = result
# if there are any partitions whose leaders are not known yet, force
# metadata update
if unknown_leaders_exist:
log.debug('Unknown leaders exist, requesting metadata update')
self._metadata.request_update()
# remove any nodes we aren't ready to send to
not_ready_timeout = float('inf')
for node in list(ready_nodes):
if not self._client.is_ready(node):
log.debug('Node %s not ready; delaying produce of accumulated batch', node)
self._client.maybe_connect(node, wakeup=False)
ready_nodes.remove(node)
not_ready_timeout = min(not_ready_timeout,
self._client.connection_delay(node))
# create produce requests
batches_by_node = self._accumulator.drain(
self._metadata, ready_nodes, self.config['max_request_size'])
if self.config['guarantee_message_order']:
# Mute all the partitions drained
for batch_list in six.itervalues(batches_by_node):
for batch in batch_list:
self._accumulator.muted.add(batch.topic_partition)
expired_batches = self._accumulator.abort_expired_batches(
self.config['request_timeout_ms'], self._metadata)
for expired_batch in expired_batches:
self._sensors.record_errors(expired_batch.topic_partition.topic, expired_batch.record_count)
self._sensors.update_produce_request_metrics(batches_by_node)
requests = self._create_produce_requests(batches_by_node)
# If we have any nodes that are ready to send + have sendable data,
# poll with 0 timeout so this can immediately loop and try sending more
# data. Otherwise, the timeout is determined by nodes that have
# partitions with data that isn't yet sendable (e.g. lingering, backing
# off). Note that this specifically does not include nodes with
# sendable data that aren't ready to send since they would cause busy
# looping.
poll_timeout_ms = min(next_ready_check_delay * 1000, not_ready_timeout)
if ready_nodes:
log.debug("Nodes with data ready to send: %s", ready_nodes) # trace
log.debug("Created %d produce requests: %s", len(requests), requests) # trace
poll_timeout_ms = 0
for node_id, request in six.iteritems(requests):
batches = batches_by_node[node_id]
log.debug('Sending Produce Request: %r', request)
(self._client.send(node_id, request, wakeup=False)
.add_callback(
self._handle_produce_response, node_id, time.time(), batches)
.add_errback(
self._failed_produce, batches, node_id))
# if some partitions are already ready to be sent, the select time
# would be 0; otherwise if some partition already has some data
# accumulated but not ready yet, the select time will be the time
# difference between now and its linger expiry time; otherwise the
# select time will be the time difference between now and the
# metadata expiry time
self._client.poll(timeout_ms=poll_timeout_ms)
def initiate_close(self):
"""Start closing the sender (won't complete until all data is sent)."""
self._running = False
self._accumulator.close()
self.wakeup()
def force_close(self):
"""Closes the sender without sending out any pending messages."""
self._force_close = True
self.initiate_close()
def add_topic(self, topic):
# This is generally called from a separate thread
# so this needs to be a thread-safe operation
# we assume that checking set membership across threads
# is ok where self._client._topics should never
# remove topics for a producer instance, only add them.
if topic not in self._client._topics:
self._topics_to_add.add(topic)
self.wakeup()
def _failed_produce(self, batches, node_id, error):
log.debug("Error sending produce request to node %d: %s", node_id, error) # trace
for batch in batches:
self._complete_batch(batch, error, -1, None)
def _handle_produce_response(self, node_id, send_time, batches, response):
"""Handle a produce response."""
# if we have a response, parse it
log.debug('Parsing produce response: %r', response)
if response:
batches_by_partition = dict([(batch.topic_partition, batch)
for batch in batches])
for topic, partitions in response.topics:
for partition_info in partitions:
if response.API_VERSION < 2:
partition, error_code, offset = partition_info
ts = None
else:
partition, error_code, offset, ts = partition_info
tp = TopicPartition(topic, partition)
error = Errors.for_code(error_code)
batch = batches_by_partition[tp]
self._complete_batch(batch, error, offset, ts)
if response.API_VERSION > 0:
self._sensors.record_throttle_time(response.throttle_time_ms, node=node_id)
else:
# this is the acks = 0 case, just complete all requests
for batch in batches:
self._complete_batch(batch, None, -1, None)
def _complete_batch(self, batch, error, base_offset, timestamp_ms=None):
"""Complete or retry the given batch of records.
Arguments:
batch (RecordBatch): The record batch
error (Exception): The error (or None if none)
base_offset (int): The base offset assigned to the records if successful
timestamp_ms (int, optional): The timestamp returned by the broker for this batch
"""
# Standardize no-error to None
if error is Errors.NoError:
error = None
if error is not None and self._can_retry(batch, error):
# retry
log.warning("Got error produce response on topic-partition %s,"
" retrying (%d attempts left). Error: %s",
batch.topic_partition,
self.config['retries'] - batch.attempts - 1,
error)
self._accumulator.reenqueue(batch)
self._sensors.record_retries(batch.topic_partition.topic, batch.record_count)
else:
if error is Errors.TopicAuthorizationFailedError:
error = error(batch.topic_partition.topic)
# tell the user the result of their request
batch.done(base_offset, timestamp_ms, error)
self._accumulator.deallocate(batch)
if error is not None:
self._sensors.record_errors(batch.topic_partition.topic, batch.record_count)
if getattr(error, 'invalid_metadata', False):
self._metadata.request_update()
# Unmute the completed partition.
if self.config['guarantee_message_order']:
self._accumulator.muted.remove(batch.topic_partition)
def _can_retry(self, batch, error):
"""
We can retry a send if the error is transient and the number of
attempts taken is fewer than the maximum allowed
"""
return (batch.attempts < self.config['retries']
and getattr(error, 'retriable', False))
def _create_produce_requests(self, collated):
"""
Transfer the record batches into a list of produce requests on a
per-node basis.
Arguments:
collated: {node_id: [RecordBatch]}
Returns:
dict: {node_id: ProduceRequest} (version depends on api_version)
"""
requests = {}
for node_id, batches in six.iteritems(collated):
requests[node_id] = self._produce_request(
node_id, self.config['acks'],
self.config['request_timeout_ms'], batches)
return requests
def _produce_request(self, node_id, acks, timeout, batches):
"""Create a produce request from the given record batches.
Returns:
ProduceRequest (version depends on api_version)
"""
produce_records_by_partition = collections.defaultdict(dict)
for batch in batches:
topic = batch.topic_partition.topic
partition = batch.topic_partition.partition
buf = batch.records.buffer()
produce_records_by_partition[topic][partition] = buf
kwargs = {}
if self.config['api_version'] >= (0, 11):
version = 3
kwargs = dict(transactional_id=None)
elif self.config['api_version'] >= (0, 10):
version = 2
elif self.config['api_version'] == (0, 9):
version = 1
else:
version = 0
return ProduceRequest[version](
required_acks=acks,
timeout=timeout,
topics=[(topic, list(partition_info.items()))
for topic, partition_info
in six.iteritems(produce_records_by_partition)],
**kwargs
)
def wakeup(self):
"""Wake up the selector associated with this send thread."""
self._client.wakeup()
def bootstrap_connected(self):
return self._client.bootstrap_connected()
class SenderMetrics(object):
def __init__(self, metrics, client, metadata):
self.metrics = metrics
self._client = client
self._metadata = metadata
sensor_name = 'batch-size'
self.batch_size_sensor = self.metrics.sensor(sensor_name)
self.add_metric('batch-size-avg', Avg(),
sensor_name=sensor_name,
description='The average number of bytes sent per partition per-request.')
self.add_metric('batch-size-max', Max(),
sensor_name=sensor_name,
description='The max number of bytes sent per partition per-request.')
sensor_name = 'compression-rate'
self.compression_rate_sensor = self.metrics.sensor(sensor_name)
self.add_metric('compression-rate-avg', Avg(),
sensor_name=sensor_name,
description='The average compression rate of record batches.')
sensor_name = 'queue-time'
self.queue_time_sensor = self.metrics.sensor(sensor_name)
self.add_metric('record-queue-time-avg', Avg(),
sensor_name=sensor_name,
description='The average time in ms record batches spent in the record accumulator.')
self.add_metric('record-queue-time-max', Max(),
sensor_name=sensor_name,
description='The maximum time in ms record batches spent in the record accumulator.')
sensor_name = 'produce-throttle-time'
self.produce_throttle_time_sensor = self.metrics.sensor(sensor_name)
self.add_metric('produce-throttle-time-avg', Avg(),
sensor_name=sensor_name,
description='The average throttle time in ms')
self.add_metric('produce-throttle-time-max', Max(),
sensor_name=sensor_name,
description='The maximum throttle time in ms')
sensor_name = 'records-per-request'
self.records_per_request_sensor = self.metrics.sensor(sensor_name)
self.add_metric('record-send-rate', Rate(),
sensor_name=sensor_name,
description='The average number of records sent per second.')
self.add_metric('records-per-request-avg', Avg(),
sensor_name=sensor_name,
description='The average number of records per request.')
sensor_name = 'bytes'
self.byte_rate_sensor = self.metrics.sensor(sensor_name)
self.add_metric('byte-rate', Rate(),
sensor_name=sensor_name,
description='The average number of bytes sent per second.')
sensor_name = 'record-retries'
self.retry_sensor = self.metrics.sensor(sensor_name)
self.add_metric('record-retry-rate', Rate(),
sensor_name=sensor_name,
description='The average per-second number of retried record sends')
sensor_name = 'errors'
self.error_sensor = self.metrics.sensor(sensor_name)
self.add_metric('record-error-rate', Rate(),
sensor_name=sensor_name,
description='The average per-second number of record sends that resulted in errors')
sensor_name = 'record-size-max'
self.max_record_size_sensor = self.metrics.sensor(sensor_name)
self.add_metric('record-size-max', Max(),
sensor_name=sensor_name,
description='The maximum record size across all batches')
self.add_metric('record-size-avg', Avg(),
sensor_name=sensor_name,
description='The average maximum record size per batch')
self.add_metric('requests-in-flight',
AnonMeasurable(lambda *_: self._client.in_flight_request_count()),
description='The current number of in-flight requests awaiting a response.')
self.add_metric('metadata-age',
AnonMeasurable(lambda _, now: (now - self._metadata._last_successful_refresh_ms) / 1000),
description='The age in seconds of the current producer metadata being used.')
def add_metric(self, metric_name, measurable, group_name='producer-metrics',
description=None, tags=None,
sensor_name=None):
m = self.metrics
metric = m.metric_name(metric_name, group_name, description, tags)
if sensor_name:
sensor = m.sensor(sensor_name)
sensor.add(metric, measurable)
else:
m.add_metric(metric, measurable)
def maybe_register_topic_metrics(self, topic):
def sensor_name(name):
return 'topic.{0}.{1}'.format(topic, name)
# if one sensor of the metrics has been registered for the topic,
# then all other sensors should have been registered; and vice versa
if not self.metrics.get_sensor(sensor_name('records-per-batch')):
self.add_metric('record-send-rate', Rate(),
sensor_name=sensor_name('records-per-batch'),
group_name='producer-topic-metrics.' + topic,
description= 'Records sent per second for topic ' + topic)
self.add_metric('byte-rate', Rate(),
sensor_name=sensor_name('bytes'),
group_name='producer-topic-metrics.' + topic,
description='Bytes per second for topic ' + topic)
self.add_metric('compression-rate', Avg(),
sensor_name=sensor_name('compression-rate'),
group_name='producer-topic-metrics.' + topic,
description='Average Compression ratio for topic ' + topic)
self.add_metric('record-retry-rate', Rate(),
sensor_name=sensor_name('record-retries'),
group_name='producer-topic-metrics.' + topic,
description='Record retries per second for topic ' + topic)
self.add_metric('record-error-rate', Rate(),
sensor_name=sensor_name('record-errors'),
group_name='producer-topic-metrics.' + topic,
description='Record errors per second for topic ' + topic)
def update_produce_request_metrics(self, batches_map):
for node_batch in batches_map.values():
records = 0
total_bytes = 0
for batch in node_batch:
# register all per-topic metrics at once
topic = batch.topic_partition.topic
self.maybe_register_topic_metrics(topic)
# per-topic record send rate
topic_records_count = self.metrics.get_sensor(
'topic.' + topic + '.records-per-batch')
topic_records_count.record(batch.record_count)
# per-topic bytes send rate
topic_byte_rate = self.metrics.get_sensor(
'topic.' + topic + '.bytes')
topic_byte_rate.record(batch.records.size_in_bytes())
# per-topic compression rate
topic_compression_rate = self.metrics.get_sensor(
'topic.' + topic + '.compression-rate')
topic_compression_rate.record(batch.records.compression_rate())
# global metrics
self.batch_size_sensor.record(batch.records.size_in_bytes())
if batch.drained:
self.queue_time_sensor.record(batch.drained - batch.created)
self.compression_rate_sensor.record(batch.records.compression_rate())
self.max_record_size_sensor.record(batch.max_record_size)
records += batch.record_count
total_bytes += batch.records.size_in_bytes()
self.records_per_request_sensor.record(records)
self.byte_rate_sensor.record(total_bytes)
def record_retries(self, topic, count):
self.retry_sensor.record(count)
sensor = self.metrics.get_sensor('topic.' + topic + '.record-retries')
if sensor:
sensor.record(count)
def record_errors(self, topic, count):
self.error_sensor.record(count)
sensor = self.metrics.get_sensor('topic.' + topic + '.record-errors')
if sensor:
sensor.record(count)
def record_throttle_time(self, throttle_time_ms, node=None):
self.produce_throttle_time_sensor.record(throttle_time_ms)
| {
"repo_name": "Yelp/kafka-python",
"path": "kafka/producer/sender.py",
"copies": "2",
"size": "21856",
"license": "apache-2.0",
"hash": -4967074597308093000,
"line_mean": 42.712,
"line_max": 113,
"alpha_frac": 0.5858345534,
"autogenerated": false,
"ratio": 4.489728841413312,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6075563394813311,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
import collections
import io
import threading
import time
from ..codec import (has_gzip, has_snappy, has_lz4,
gzip_encode, snappy_encode,
lz4_encode, lz4_encode_old_kafka)
from .. import errors as Errors
from ..metrics.stats import Rate
from ..protocol.types import Int32, Int64
from ..protocol.message import MessageSet, Message
class MessageSetBuffer(object):
"""Wrap a buffer for writing MessageSet batches.
Arguments:
buf (IO stream): a buffer for writing data. Typically BytesIO.
batch_size (int): maximum number of bytes to write to the buffer.
Keyword Arguments:
compression_type ('gzip', 'snappy', None): compress messages before
publishing. Default: None.
"""
_COMPRESSORS = {
'gzip': (has_gzip, gzip_encode, Message.CODEC_GZIP),
'snappy': (has_snappy, snappy_encode, Message.CODEC_SNAPPY),
'lz4': (has_lz4, lz4_encode, Message.CODEC_LZ4),
'lz4-old-kafka': (has_lz4, lz4_encode_old_kafka, Message.CODEC_LZ4),
}
def __init__(self, buf, batch_size, compression_type=None, message_version=0):
if compression_type is not None:
assert compression_type in self._COMPRESSORS, 'Unrecognized compression type'
# Kafka 0.8/0.9 had a quirky lz4...
if compression_type == 'lz4' and message_version == 0:
compression_type = 'lz4-old-kafka'
checker, encoder, attributes = self._COMPRESSORS[compression_type]
assert checker(), 'Compression Libraries Not Found'
self._compressor = encoder
self._compression_attributes = attributes
else:
self._compressor = None
self._compression_attributes = None
self._message_version = message_version
self._buffer = buf
# Init MessageSetSize to 0 -- update on close
self._buffer.seek(0)
self._buffer.write(Int32.encode(0))
self._batch_size = batch_size
self._closed = False
self._messages = 0
self._bytes_written = 4 # Int32 header is 4 bytes
self._final_size = None
def append(self, offset, message):
"""Append a Message to the MessageSet.
Arguments:
offset (int): offset of the message
message (Message or bytes): message struct or encoded bytes
Returns: bytes written
"""
if isinstance(message, Message):
encoded = message.encode()
else:
encoded = bytes(message)
msg = Int64.encode(offset) + Int32.encode(len(encoded)) + encoded
self._buffer.write(msg)
self._messages += 1
self._bytes_written += len(msg)
return len(msg)
def has_room_for(self, key, value):
if self._closed:
return False
if not self._messages:
return True
needed_bytes = MessageSet.HEADER_SIZE + Message.HEADER_SIZE
if key is not None:
needed_bytes += len(key)
if value is not None:
needed_bytes += len(value)
return self._buffer.tell() + needed_bytes < self._batch_size
def is_full(self):
if self._closed:
return True
return self._buffer.tell() >= self._batch_size
def close(self):
# This method may be called multiple times on the same batch
# i.e., on retries
# we need to make sure we only close it out once
# otherwise compressed messages may be double-compressed
# see Issue 718
if not self._closed:
if self._compressor:
# TODO: avoid copies with bytearray / memoryview
uncompressed_size = self._buffer.tell()
self._buffer.seek(4)
msg = Message(self._compressor(self._buffer.read(uncompressed_size - 4)),
attributes=self._compression_attributes,
magic=self._message_version)
encoded = msg.encode()
self._buffer.seek(4)
self._buffer.write(Int64.encode(0)) # offset 0 for wrapper msg
self._buffer.write(Int32.encode(len(encoded)))
self._buffer.write(encoded)
# Update the message set size (less the 4 byte header),
# and return with buffer ready for full read()
self._final_size = self._buffer.tell()
self._buffer.seek(0)
self._buffer.write(Int32.encode(self._final_size - 4))
self._buffer.seek(0)
self._closed = True
def size_in_bytes(self):
return self._final_size or self._buffer.tell()
def compression_rate(self):
return self.size_in_bytes() / self._bytes_written
def buffer(self):
return self._buffer
class SimpleBufferPool(object):
"""A simple pool of BytesIO objects with a weak memory ceiling."""
def __init__(self, memory, poolable_size, metrics=None, metric_group_prefix='producer-metrics'):
"""Create a new buffer pool.
Arguments:
memory (int): maximum memory that this buffer pool can allocate
poolable_size (int): memory size per buffer to cache in the free
list rather than deallocating
"""
self._poolable_size = poolable_size
self._lock = threading.RLock()
buffers = int(memory / poolable_size) if poolable_size else 0
self._free = collections.deque([io.BytesIO() for _ in range(buffers)])
self._waiters = collections.deque()
self.wait_time = None
if metrics:
self.wait_time = metrics.sensor('bufferpool-wait-time')
self.wait_time.add(metrics.metric_name(
'bufferpool-wait-ratio', metric_group_prefix,
'The fraction of time an appender waits for space allocation.'),
Rate())
def allocate(self, size, max_time_to_block_ms):
"""
Allocate a buffer of the given size. This method blocks if there is not
enough memory and the buffer pool is configured with blocking mode.
Arguments:
size (int): The buffer size to allocate in bytes [ignored]
max_time_to_block_ms (int): The maximum time in milliseconds to
block for buffer memory to be available
Returns:
io.BytesIO
"""
with self._lock:
# check if we have a free buffer of the right size pooled
if self._free:
return self._free.popleft()
elif self._poolable_size == 0:
return io.BytesIO()
else:
# we are out of buffers and will have to block
buf = None
more_memory = threading.Condition(self._lock)
self._waiters.append(more_memory)
# loop over and over until we have a buffer or have reserved
# enough memory to allocate one
while buf is None:
start_wait = time.time()
more_memory.wait(max_time_to_block_ms / 1000.0)
end_wait = time.time()
if self.wait_time:
self.wait_time.record(end_wait - start_wait)
if self._free:
buf = self._free.popleft()
else:
self._waiters.remove(more_memory)
raise Errors.KafkaTimeoutError(
"Failed to allocate memory within the configured"
" max blocking time")
# remove the condition for this thread to let the next thread
# in line start getting memory
removed = self._waiters.popleft()
assert removed is more_memory, 'Wrong condition'
# signal any additional waiters if there is more memory left
# over for them
if self._free and self._waiters:
self._waiters[0].notify()
# unlock and return the buffer
return buf
def deallocate(self, buf):
"""
Return buffers to the pool. If they are of the poolable size add them
to the free list, otherwise just mark the memory as free.
Arguments:
buffer_ (io.BytesIO): The buffer to return
"""
with self._lock:
# BytesIO.truncate here makes the pool somewhat pointless
# but we stick with the BufferPool API until migrating to
# bytesarray / memoryview. The buffer we return must not
# expose any prior data on read().
buf.truncate(0)
self._free.append(buf)
if self._waiters:
self._waiters[0].notify()
def queued(self):
"""The number of threads blocked waiting on memory."""
with self._lock:
return len(self._waiters)
'''
class BufferPool(object):
"""
A pool of ByteBuffers kept under a given memory limit. This class is fairly
specific to the needs of the producer. In particular it has the following
properties:
* There is a special "poolable size" and buffers of this size are kept in a
free list and recycled
* It is fair. That is all memory is given to the longest waiting thread
until it has sufficient memory. This prevents starvation or deadlock when
a thread asks for a large chunk of memory and needs to block until
multiple buffers are deallocated.
"""
def __init__(self, memory, poolable_size):
"""Create a new buffer pool.
Arguments:
memory (int): maximum memory that this buffer pool can allocate
poolable_size (int): memory size per buffer to cache in the free
list rather than deallocating
"""
self._poolable_size = poolable_size
self._lock = threading.RLock()
self._free = collections.deque()
self._waiters = collections.deque()
self._total_memory = memory
self._available_memory = memory
#self.metrics = metrics;
#self.waitTime = this.metrics.sensor("bufferpool-wait-time");
#MetricName metricName = metrics.metricName("bufferpool-wait-ratio", metricGrpName, "The fraction of time an appender waits for space allocation.");
#this.waitTime.add(metricName, new Rate(TimeUnit.NANOSECONDS));
def allocate(self, size, max_time_to_block_ms):
"""
Allocate a buffer of the given size. This method blocks if there is not
enough memory and the buffer pool is configured with blocking mode.
Arguments:
size (int): The buffer size to allocate in bytes
max_time_to_block_ms (int): The maximum time in milliseconds to
block for buffer memory to be available
Returns:
buffer
Raises:
InterruptedException If the thread is interrupted while blocked
IllegalArgumentException if size is larger than the total memory
controlled by the pool (and hence we would block forever)
"""
assert size <= self._total_memory, (
"Attempt to allocate %d bytes, but there is a hard limit of %d on"
" memory allocations." % (size, self._total_memory))
with self._lock:
# check if we have a free buffer of the right size pooled
if (size == self._poolable_size and len(self._free) > 0):
return self._free.popleft()
# now check if the request is immediately satisfiable with the
# memory on hand or if we need to block
free_list_size = len(self._free) * self._poolable_size
if self._available_memory + free_list_size >= size:
# we have enough unallocated or pooled memory to immediately
# satisfy the request
self._free_up(size)
self._available_memory -= size
raise NotImplementedError()
#return ByteBuffer.allocate(size)
else:
# we are out of memory and will have to block
accumulated = 0
buf = None
more_memory = threading.Condition(self._lock)
self._waiters.append(more_memory)
# loop over and over until we have a buffer or have reserved
# enough memory to allocate one
while (accumulated < size):
start_wait = time.time()
if not more_memory.wait(max_time_to_block_ms / 1000.0):
raise Errors.KafkaTimeoutError(
"Failed to allocate memory within the configured"
" max blocking time")
end_wait = time.time()
#this.waitTime.record(endWait - startWait, time.milliseconds());
# check if we can satisfy this request from the free list,
# otherwise allocate memory
if (accumulated == 0
and size == self._poolable_size
and self._free):
# just grab a buffer from the free list
buf = self._free.popleft()
accumulated = size
else:
# we'll need to allocate memory, but we may only get
# part of what we need on this iteration
self._free_up(size - accumulated)
got = min(size - accumulated, self._available_memory)
self._available_memory -= got
accumulated += got
# remove the condition for this thread to let the next thread
# in line start getting memory
removed = self._waiters.popleft()
assert removed is more_memory, 'Wrong condition'
# signal any additional waiters if there is more memory left
# over for them
if (self._available_memory > 0 or len(self._free) > 0):
if len(self._waiters) > 0:
self._waiters[0].notify()
# unlock and return the buffer
if buf is None:
raise NotImplementedError()
#return ByteBuffer.allocate(size)
else:
return buf
def _free_up(self, size):
"""
Attempt to ensure we have at least the requested number of bytes of
memory for allocation by deallocating pooled buffers (if needed)
"""
while self._free and self._available_memory < size:
self._available_memory += self._free.pop().capacity
def deallocate(self, buffer_, size=None):
"""
Return buffers to the pool. If they are of the poolable size add them
to the free list, otherwise just mark the memory as free.
Arguments:
buffer (io.BytesIO): The buffer to return
size (int): The size of the buffer to mark as deallocated, note
that this maybe smaller than buffer.capacity since the buffer
may re-allocate itself during in-place compression
"""
with self._lock:
if size is None:
size = buffer_.capacity
if (size == self._poolable_size and size == buffer_.capacity):
buffer_.seek(0)
buffer_.truncate()
self._free.append(buffer_)
else:
self._available_memory += size
if self._waiters:
more_mem = self._waiters[0]
more_mem.notify()
def available_memory(self):
"""The total free memory both unallocated and in the free list."""
with self._lock:
return self._available_memory + len(self._free) * self._poolable_size
def unallocated_memory(self):
"""Get the unallocated memory (not in the free list or in use)."""
with self._lock:
return self._available_memory
def queued(self):
"""The number of threads blocked waiting on memory."""
with self._lock:
return len(self._waiters)
def poolable_size(self):
"""The buffer size that will be retained in the free list after use."""
return self._poolable_size
def total_memory(self):
"""The total memory managed by this pool."""
return self._total_memory
'''
| {
"repo_name": "andresher/bigdata-project2",
"path": "kafka/producer/buffer.py",
"copies": "3",
"size": "16780",
"license": "mit",
"hash": -3322086831085530000,
"line_mean": 39.1435406699,
"line_max": 156,
"alpha_frac": 0.5672228844,
"autogenerated": false,
"ratio": 4.66759388038943,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009809668388022904,
"num_lines": 418
} |
from __future__ import absolute_import, division
import collections
import io
import threading
import time
from kafka.metrics.stats import Rate
import kafka.errors as Errors
class SimpleBufferPool(object):
"""A simple pool of BytesIO objects with a weak memory ceiling."""
def __init__(self, memory, poolable_size, metrics=None, metric_group_prefix='producer-metrics'):
"""Create a new buffer pool.
Arguments:
memory (int): maximum memory that this buffer pool can allocate
poolable_size (int): memory size per buffer to cache in the free
list rather than deallocating
"""
self._poolable_size = poolable_size
self._lock = threading.RLock()
buffers = int(memory / poolable_size) if poolable_size else 0
self._free = collections.deque([io.BytesIO() for _ in range(buffers)])
self._waiters = collections.deque()
self.wait_time = None
if metrics:
self.wait_time = metrics.sensor('bufferpool-wait-time')
self.wait_time.add(metrics.metric_name(
'bufferpool-wait-ratio', metric_group_prefix,
'The fraction of time an appender waits for space allocation.'),
Rate())
def allocate(self, size, max_time_to_block_ms):
"""
Allocate a buffer of the given size. This method blocks if there is not
enough memory and the buffer pool is configured with blocking mode.
Arguments:
size (int): The buffer size to allocate in bytes [ignored]
max_time_to_block_ms (int): The maximum time in milliseconds to
block for buffer memory to be available
Returns:
io.BytesIO
"""
with self._lock:
# check if we have a free buffer of the right size pooled
if self._free:
return self._free.popleft()
elif self._poolable_size == 0:
return io.BytesIO()
else:
# we are out of buffers and will have to block
buf = None
more_memory = threading.Condition(self._lock)
self._waiters.append(more_memory)
# loop over and over until we have a buffer or have reserved
# enough memory to allocate one
while buf is None:
start_wait = time.time()
more_memory.wait(max_time_to_block_ms / 1000.0)
end_wait = time.time()
if self.wait_time:
self.wait_time.record(end_wait - start_wait)
if self._free:
buf = self._free.popleft()
else:
self._waiters.remove(more_memory)
raise Errors.KafkaTimeoutError(
"Failed to allocate memory within the configured"
" max blocking time")
# remove the condition for this thread to let the next thread
# in line start getting memory
removed = self._waiters.popleft()
assert removed is more_memory, 'Wrong condition'
# signal any additional waiters if there is more memory left
# over for them
if self._free and self._waiters:
self._waiters[0].notify()
# unlock and return the buffer
return buf
def deallocate(self, buf):
"""
Return buffers to the pool. If they are of the poolable size add them
to the free list, otherwise just mark the memory as free.
Arguments:
buffer_ (io.BytesIO): The buffer to return
"""
with self._lock:
# BytesIO.truncate here makes the pool somewhat pointless
# but we stick with the BufferPool API until migrating to
# bytesarray / memoryview. The buffer we return must not
# expose any prior data on read().
buf.truncate(0)
self._free.append(buf)
if self._waiters:
self._waiters[0].notify()
def queued(self):
"""The number of threads blocked waiting on memory."""
with self._lock:
return len(self._waiters)
| {
"repo_name": "mumrah/kafka-python",
"path": "kafka/producer/buffer.py",
"copies": "5",
"size": "4370",
"license": "apache-2.0",
"hash": 6262416520791315000,
"line_mean": 37,
"line_max": 100,
"alpha_frac": 0.5581235698,
"autogenerated": false,
"ratio": 4.818081587651599,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7876205157451599,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
import collections
try:
basestring #PY2
bytes = str
range = xrange
except NameError:
basestring = str #PY3
unicode = str
def setdefault(obj, attr, value, search_mro=False, overwrite_none=False):
if search_mro:
exists = hasattr(obj, attr)
else:
exists = attr in obj.__dict__
if exists and overwrite_none:
if getattr(obj, attr) is None:
exists = False
if exists:
value = getattr(obj, attr)
else:
setattr(obj, attr, value)
return value
class Constant(int):
def __new__(cls, name, value):
return int.__new__(cls, value)
def __init__(self, name, value):
self.name = name
def __repr__(self):
return self.name
__str__ = __repr__
def listify(value):
if isinstance(value, list):
return value
elif value is None:
return []
elif isinstance(value, collections.Sequence) \
and not isinstance(value, basestring):
return list(value)
else:
return [value]
| {
"repo_name": "mlyundin/schematics",
"path": "schematics/util.py",
"copies": "1",
"size": "1091",
"license": "bsd-3-clause",
"hash": -8750721991879476000,
"line_mean": 19.5849056604,
"line_max": 73,
"alpha_frac": 0.5912007333,
"autogenerated": false,
"ratio": 3.9672727272727273,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5058473460572727,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
import contextlib
import itertools
import logging
import sys
import time
from signal import SIGINT, default_int_handler, signal
from pip._vendor import six
from pip._vendor.progress.bar import (
Bar, ChargingBar, FillingCirclesBar, FillingSquaresBar, IncrementalBar,
ShadyBar,
)
from pip._vendor.progress.helpers import HIDE_CURSOR, SHOW_CURSOR, WritelnMixin
from pip._vendor.progress.spinner import Spinner
from pip._internal.compat import WINDOWS
from pip._internal.utils.logging import get_indentation
from pip._internal.utils.misc import format_size
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Any
try:
from pip._vendor import colorama
# Lots of different errors can come from this, including SystemError and
# ImportError.
except Exception:
colorama = None
logger = logging.getLogger(__name__)
def _select_progress_class(preferred, fallback):
encoding = getattr(preferred.file, "encoding", None)
# If we don't know what encoding this file is in, then we'll just assume
# that it doesn't support unicode and use the ASCII bar.
if not encoding:
return fallback
# Collect all of the possible characters we want to use with the preferred
# bar.
characters = [
getattr(preferred, "empty_fill", six.text_type()),
getattr(preferred, "fill", six.text_type()),
]
characters += list(getattr(preferred, "phases", []))
# Try to decode the characters we're using for the bar using the encoding
# of the given file, if this works then we'll assume that we can use the
# fancier bar and if not we'll fall back to the plaintext bar.
try:
six.text_type().join(characters).encode(encoding)
except UnicodeEncodeError:
return fallback
else:
return preferred
_BaseBar = _select_progress_class(IncrementalBar, Bar) # type: Any
class InterruptibleMixin(object):
"""
Helper to ensure that self.finish() gets called on keyboard interrupt.
This allows downloads to be interrupted without leaving temporary state
(like hidden cursors) behind.
This class is similar to the progress library's existing SigIntMixin
helper, but as of version 1.2, that helper has the following problems:
1. It calls sys.exit().
2. It discards the existing SIGINT handler completely.
3. It leaves its own handler in place even after an uninterrupted finish,
which will have unexpected delayed effects if the user triggers an
unrelated keyboard interrupt some time after a progress-displaying
download has already completed, for example.
"""
def __init__(self, *args, **kwargs):
"""
Save the original SIGINT handler for later.
"""
super(InterruptibleMixin, self).__init__(*args, **kwargs)
self.original_handler = signal(SIGINT, self.handle_sigint)
# If signal() returns None, the previous handler was not installed from
# Python, and we cannot restore it. This probably should not happen,
# but if it does, we must restore something sensible instead, at least.
# The least bad option should be Python's default SIGINT handler, which
# just raises KeyboardInterrupt.
if self.original_handler is None:
self.original_handler = default_int_handler
def finish(self):
"""
Restore the original SIGINT handler after finishing.
This should happen regardless of whether the progress display finishes
normally, or gets interrupted.
"""
super(InterruptibleMixin, self).finish()
signal(SIGINT, self.original_handler)
def handle_sigint(self, signum, frame):
"""
Call self.finish() before delegating to the original SIGINT handler.
This handler should only be in place while the progress display is
active.
"""
self.finish()
self.original_handler(signum, frame)
class SilentBar(Bar):
def update(self):
pass
class BlueEmojiBar(IncrementalBar):
suffix = "%(percent)d%%"
bar_prefix = " "
bar_suffix = " "
phases = (u"\U0001F539", u"\U0001F537", u"\U0001F535") # type: Any
class DownloadProgressMixin(object):
def __init__(self, *args, **kwargs):
super(DownloadProgressMixin, self).__init__(*args, **kwargs)
self.message = (" " * (get_indentation() + 2)) + self.message
@property
def downloaded(self):
return format_size(self.index)
@property
def download_speed(self):
# Avoid zero division errors...
if self.avg == 0.0:
return "..."
return format_size(1 / self.avg) + "/s"
@property
def pretty_eta(self):
if self.eta:
return "eta %s" % self.eta_td
return ""
def iter(self, it, n=1):
for x in it:
yield x
self.next(n)
self.finish()
class WindowsMixin(object):
def __init__(self, *args, **kwargs):
# The Windows terminal does not support the hide/show cursor ANSI codes
# even with colorama. So we'll ensure that hide_cursor is False on
# Windows.
# This call neds to go before the super() call, so that hide_cursor
# is set in time. The base progress bar class writes the "hide cursor"
# code to the terminal in its init, so if we don't set this soon
# enough, we get a "hide" with no corresponding "show"...
if WINDOWS and self.hide_cursor:
self.hide_cursor = False
super(WindowsMixin, self).__init__(*args, **kwargs)
# Check if we are running on Windows and we have the colorama module,
# if we do then wrap our file with it.
if WINDOWS and colorama:
self.file = colorama.AnsiToWin32(self.file)
# The progress code expects to be able to call self.file.isatty()
# but the colorama.AnsiToWin32() object doesn't have that, so we'll
# add it.
self.file.isatty = lambda: self.file.wrapped.isatty()
# The progress code expects to be able to call self.file.flush()
# but the colorama.AnsiToWin32() object doesn't have that, so we'll
# add it.
self.file.flush = lambda: self.file.wrapped.flush()
class BaseDownloadProgressBar(WindowsMixin, InterruptibleMixin,
DownloadProgressMixin):
file = sys.stdout
message = "%(percent)d%%"
suffix = "%(downloaded)s %(download_speed)s %(pretty_eta)s"
# NOTE: The "type: ignore" comments on the following classes are there to
# work around https://github.com/python/typing/issues/241
class DefaultDownloadProgressBar(BaseDownloadProgressBar,
_BaseBar): # type: ignore
pass
class DownloadSilentBar(BaseDownloadProgressBar, SilentBar): # type: ignore
pass
class DownloadIncrementalBar(BaseDownloadProgressBar, # type: ignore
IncrementalBar):
pass
class DownloadChargingBar(BaseDownloadProgressBar, # type: ignore
ChargingBar):
pass
class DownloadShadyBar(BaseDownloadProgressBar, ShadyBar): # type: ignore
pass
class DownloadFillingSquaresBar(BaseDownloadProgressBar, # type: ignore
FillingSquaresBar):
pass
class DownloadFillingCirclesBar(BaseDownloadProgressBar, # type: ignore
FillingCirclesBar):
pass
class DownloadBlueEmojiProgressBar(BaseDownloadProgressBar, # type: ignore
BlueEmojiBar):
pass
class DownloadProgressSpinner(WindowsMixin, InterruptibleMixin,
DownloadProgressMixin, WritelnMixin, Spinner):
file = sys.stdout
suffix = "%(downloaded)s %(download_speed)s"
def next_phase(self):
if not hasattr(self, "_phaser"):
self._phaser = itertools.cycle(self.phases)
return next(self._phaser)
def update(self):
message = self.message % self
phase = self.next_phase()
suffix = self.suffix % self
line = ''.join([
message,
" " if message else "",
phase,
" " if suffix else "",
suffix,
])
self.writeln(line)
BAR_TYPES = {
"off": (DownloadSilentBar, DownloadSilentBar),
"on": (DefaultDownloadProgressBar, DownloadProgressSpinner),
"ascii": (DownloadIncrementalBar, DownloadProgressSpinner),
"pretty": (DownloadFillingCirclesBar, DownloadProgressSpinner),
"emoji": (DownloadBlueEmojiProgressBar, DownloadProgressSpinner)
}
def DownloadProgressProvider(progress_bar, max=None):
if max is None or max == 0:
return BAR_TYPES[progress_bar][1]().iter
else:
return BAR_TYPES[progress_bar][0](max=max).iter
################################################################
# Generic "something is happening" spinners
#
# We don't even try using progress.spinner.Spinner here because it's actually
# simpler to reimplement from scratch than to coerce their code into doing
# what we need.
################################################################
@contextlib.contextmanager
def hidden_cursor(file):
# The Windows terminal does not support the hide/show cursor ANSI codes,
# even via colorama. So don't even try.
if WINDOWS:
yield
# We don't want to clutter the output with control characters if we're
# writing to a file, or if the user is running with --quiet.
# See https://github.com/pypa/pip/issues/3418
elif not file.isatty() or logger.getEffectiveLevel() > logging.INFO:
yield
else:
file.write(HIDE_CURSOR)
try:
yield
finally:
file.write(SHOW_CURSOR)
class RateLimiter(object):
def __init__(self, min_update_interval_seconds):
self._min_update_interval_seconds = min_update_interval_seconds
self._last_update = 0
def ready(self):
now = time.time()
delta = now - self._last_update
return delta >= self._min_update_interval_seconds
def reset(self):
self._last_update = time.time()
class InteractiveSpinner(object):
def __init__(self, message, file=None, spin_chars="-\\|/",
# Empirically, 8 updates/second looks nice
min_update_interval_seconds=0.125):
self._message = message
if file is None:
file = sys.stdout
self._file = file
self._rate_limiter = RateLimiter(min_update_interval_seconds)
self._finished = False
self._spin_cycle = itertools.cycle(spin_chars)
self._file.write(" " * get_indentation() + self._message + " ... ")
self._width = 0
def _write(self, status):
assert not self._finished
# Erase what we wrote before by backspacing to the beginning, writing
# spaces to overwrite the old text, and then backspacing again
backup = "\b" * self._width
self._file.write(backup + " " * self._width + backup)
# Now we have a blank slate to add our status
self._file.write(status)
self._width = len(status)
self._file.flush()
self._rate_limiter.reset()
def spin(self):
if self._finished:
return
if not self._rate_limiter.ready():
return
self._write(next(self._spin_cycle))
def finish(self, final_status):
if self._finished:
return
self._write(final_status)
self._file.write("\n")
self._file.flush()
self._finished = True
# Used for dumb terminals, non-interactive installs (no tty), etc.
# We still print updates occasionally (once every 60 seconds by default) to
# act as a keep-alive for systems like Travis-CI that take lack-of-output as
# an indication that a task has frozen.
class NonInteractiveSpinner(object):
def __init__(self, message, min_update_interval_seconds=60):
self._message = message
self._finished = False
self._rate_limiter = RateLimiter(min_update_interval_seconds)
self._update("started")
def _update(self, status):
assert not self._finished
self._rate_limiter.reset()
logger.info("%s: %s", self._message, status)
def spin(self):
if self._finished:
return
if not self._rate_limiter.ready():
return
self._update("still running...")
def finish(self, final_status):
if self._finished:
return
self._update("finished with status '%s'" % (final_status,))
self._finished = True
@contextlib.contextmanager
def open_spinner(message):
# Interactive spinner goes directly to sys.stdout rather than being routed
# through the logging system, but it acts like it has level INFO,
# i.e. it's only displayed if we're at level INFO or better.
# Non-interactive spinner goes through the logging system, so it is always
# in sync with logging configuration.
if sys.stdout.isatty() and logger.getEffectiveLevel() <= logging.INFO:
spinner = InteractiveSpinner(message)
else:
spinner = NonInteractiveSpinner(message)
try:
with hidden_cursor(sys.stdout):
yield spinner
except KeyboardInterrupt:
spinner.finish("canceled")
raise
except Exception:
spinner.finish("error")
raise
else:
spinner.finish("done")
| {
"repo_name": "zvezdan/pip",
"path": "src/pip/_internal/utils/ui.py",
"copies": "7",
"size": "13637",
"license": "mit",
"hash": -1685821907862129000,
"line_mean": 31.3919239905,
"line_max": 79,
"alpha_frac": 0.6352570213,
"autogenerated": false,
"ratio": 4.151293759512938,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8286550780812938,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
import contextlib
import itertools
import logging
import sys
import time
from pip._vendor.progress import HIDE_CURSOR, SHOW_CURSOR
from pip._internal.utils.compat import WINDOWS
from pip._internal.utils.logging import get_indentation
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import IO, Iterator
logger = logging.getLogger(__name__)
class SpinnerInterface(object):
def spin(self):
# type: () -> None
raise NotImplementedError()
def finish(self, final_status):
# type: (str) -> None
raise NotImplementedError()
class InteractiveSpinner(SpinnerInterface):
def __init__(self, message, file=None, spin_chars="-\\|/",
# Empirically, 8 updates/second looks nice
min_update_interval_seconds=0.125):
# type: (str, IO[str], str, float) -> None
self._message = message
if file is None:
file = sys.stdout
self._file = file
self._rate_limiter = RateLimiter(min_update_interval_seconds)
self._finished = False
self._spin_cycle = itertools.cycle(spin_chars)
self._file.write(" " * get_indentation() + self._message + " ... ")
self._width = 0
def _write(self, status):
# type: (str) -> None
assert not self._finished
# Erase what we wrote before by backspacing to the beginning, writing
# spaces to overwrite the old text, and then backspacing again
backup = "\b" * self._width
self._file.write(backup + " " * self._width + backup)
# Now we have a blank slate to add our status
self._file.write(status)
self._width = len(status)
self._file.flush()
self._rate_limiter.reset()
def spin(self):
# type: () -> None
if self._finished:
return
if not self._rate_limiter.ready():
return
self._write(next(self._spin_cycle))
def finish(self, final_status):
# type: (str) -> None
if self._finished:
return
self._write(final_status)
self._file.write("\n")
self._file.flush()
self._finished = True
# Used for dumb terminals, non-interactive installs (no tty), etc.
# We still print updates occasionally (once every 60 seconds by default) to
# act as a keep-alive for systems like Travis-CI that take lack-of-output as
# an indication that a task has frozen.
class NonInteractiveSpinner(SpinnerInterface):
def __init__(self, message, min_update_interval_seconds=60):
# type: (str, float) -> None
self._message = message
self._finished = False
self._rate_limiter = RateLimiter(min_update_interval_seconds)
self._update("started")
def _update(self, status):
# type: (str) -> None
assert not self._finished
self._rate_limiter.reset()
logger.info("%s: %s", self._message, status)
def spin(self):
# type: () -> None
if self._finished:
return
if not self._rate_limiter.ready():
return
self._update("still running...")
def finish(self, final_status):
# type: (str) -> None
if self._finished:
return
self._update(
"finished with status '{final_status}'".format(**locals()))
self._finished = True
class RateLimiter(object):
def __init__(self, min_update_interval_seconds):
# type: (float) -> None
self._min_update_interval_seconds = min_update_interval_seconds
self._last_update = 0 # type: float
def ready(self):
# type: () -> bool
now = time.time()
delta = now - self._last_update
return delta >= self._min_update_interval_seconds
def reset(self):
# type: () -> None
self._last_update = time.time()
@contextlib.contextmanager
def open_spinner(message):
# type: (str) -> Iterator[SpinnerInterface]
# Interactive spinner goes directly to sys.stdout rather than being routed
# through the logging system, but it acts like it has level INFO,
# i.e. it's only displayed if we're at level INFO or better.
# Non-interactive spinner goes through the logging system, so it is always
# in sync with logging configuration.
if sys.stdout.isatty() and logger.getEffectiveLevel() <= logging.INFO:
spinner = InteractiveSpinner(message) # type: SpinnerInterface
else:
spinner = NonInteractiveSpinner(message)
try:
with hidden_cursor(sys.stdout):
yield spinner
except KeyboardInterrupt:
spinner.finish("canceled")
raise
except Exception:
spinner.finish("error")
raise
else:
spinner.finish("done")
@contextlib.contextmanager
def hidden_cursor(file):
# type: (IO[str]) -> Iterator[None]
# The Windows terminal does not support the hide/show cursor ANSI codes,
# even via colorama. So don't even try.
if WINDOWS:
yield
# We don't want to clutter the output with control characters if we're
# writing to a file, or if the user is running with --quiet.
# See https://github.com/pypa/pip/issues/3418
elif not file.isatty() or logger.getEffectiveLevel() > logging.INFO:
yield
else:
file.write(HIDE_CURSOR)
try:
yield
finally:
file.write(SHOW_CURSOR)
| {
"repo_name": "jsirois/pex",
"path": "pex/vendor/_vendored/pip/pip/_internal/cli/spinners.py",
"copies": "4",
"size": "5509",
"license": "apache-2.0",
"hash": -5204615404775641000,
"line_mean": 30.8439306358,
"line_max": 78,
"alpha_frac": 0.6159012525,
"autogenerated": false,
"ratio": 4.006545454545455,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 173
} |
from __future__ import absolute_import, division
import copy
import errno
import io
import logging
from random import shuffle, uniform
# selectors in stdlib as of py3.4
try:
import selectors # pylint: disable=import-error
except ImportError:
# vendored backport module
from kafka.vendor import selectors34 as selectors
import socket
import struct
import threading
import time
from kafka.vendor import six
import kafka.errors as Errors
from kafka.future import Future
from kafka.metrics.stats import Avg, Count, Max, Rate
from kafka.oauth.abstract import AbstractTokenProvider
from kafka.protocol.admin import SaslHandShakeRequest, DescribeAclsRequest_v2
from kafka.protocol.commit import OffsetFetchRequest
from kafka.protocol.offset import OffsetRequest
from kafka.protocol.produce import ProduceRequest
from kafka.protocol.metadata import MetadataRequest
from kafka.protocol.fetch import FetchRequest
from kafka.protocol.parser import KafkaProtocol
from kafka.protocol.types import Int32, Int8
from kafka.scram import ScramClient
from kafka.version import __version__
if six.PY2:
ConnectionError = socket.error
TimeoutError = socket.error
BlockingIOError = Exception
log = logging.getLogger(__name__)
DEFAULT_KAFKA_PORT = 9092
SASL_QOP_AUTH = 1
SASL_QOP_AUTH_INT = 2
SASL_QOP_AUTH_CONF = 4
try:
import ssl
ssl_available = True
try:
SSLEOFError = ssl.SSLEOFError
SSLWantReadError = ssl.SSLWantReadError
SSLWantWriteError = ssl.SSLWantWriteError
SSLZeroReturnError = ssl.SSLZeroReturnError
except AttributeError:
# support older ssl libraries
log.warning('Old SSL module detected.'
' SSL error handling may not operate cleanly.'
' Consider upgrading to Python 3.3 or 2.7.9')
SSLEOFError = ssl.SSLError
SSLWantReadError = ssl.SSLError
SSLWantWriteError = ssl.SSLError
SSLZeroReturnError = ssl.SSLError
except ImportError:
# support Python without ssl libraries
ssl_available = False
class SSLWantReadError(Exception):
pass
class SSLWantWriteError(Exception):
pass
# needed for SASL_GSSAPI authentication:
try:
import gssapi
from gssapi.raw.misc import GSSError
except ImportError:
#no gssapi available, will disable gssapi mechanism
gssapi = None
GSSError = None
AFI_NAMES = {
socket.AF_UNSPEC: "unspecified",
socket.AF_INET: "IPv4",
socket.AF_INET6: "IPv6",
}
class ConnectionStates(object):
DISCONNECTING = '<disconnecting>'
DISCONNECTED = '<disconnected>'
CONNECTING = '<connecting>'
HANDSHAKE = '<handshake>'
CONNECTED = '<connected>'
AUTHENTICATING = '<authenticating>'
class BrokerConnection(object):
"""Initialize a Kafka broker connection
Keyword Arguments:
client_id (str): a name for this client. This string is passed in
each request to servers and can be used to identify specific
server-side log entries that correspond to this client. Also
submitted to GroupCoordinator for logging with respect to
consumer group administration. Default: 'kafka-python-{version}'
reconnect_backoff_ms (int): The amount of time in milliseconds to
wait before attempting to reconnect to a given host.
Default: 50.
reconnect_backoff_max_ms (int): The maximum amount of time in
milliseconds to backoff/wait when reconnecting to a broker that has
repeatedly failed to connect. If provided, the backoff per host
will increase exponentially for each consecutive connection
failure, up to this maximum. Once the maximum is reached,
reconnection attempts will continue periodically with this fixed
rate. To avoid connection storms, a randomization factor of 0.2
will be applied to the backoff resulting in a random range between
20% below and 20% above the computed value. Default: 1000.
request_timeout_ms (int): Client request timeout in milliseconds.
Default: 30000.
max_in_flight_requests_per_connection (int): Requests are pipelined
to kafka brokers up to this number of maximum requests per
broker connection. Default: 5.
receive_buffer_bytes (int): The size of the TCP receive buffer
(SO_RCVBUF) to use when reading data. Default: None (relies on
system defaults). Java client defaults to 32768.
send_buffer_bytes (int): The size of the TCP send buffer
(SO_SNDBUF) to use when sending data. Default: None (relies on
system defaults). Java client defaults to 131072.
socket_options (list): List of tuple-arguments to socket.setsockopt
to apply to broker connection sockets. Default:
[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
security_protocol (str): Protocol used to communicate with brokers.
Valid values are: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL.
Default: PLAINTEXT.
ssl_context (ssl.SSLContext): pre-configured SSLContext for wrapping
socket connections. If provided, all other ssl_* configurations
will be ignored. Default: None.
ssl_check_hostname (bool): flag to configure whether ssl handshake
should verify that the certificate matches the brokers hostname.
default: True.
ssl_cafile (str): optional filename of ca file to use in certificate
verification. default: None.
ssl_certfile (str): optional filename of file in pem format containing
the client certificate, as well as any ca certificates needed to
establish the certificate's authenticity. default: None.
ssl_keyfile (str): optional filename containing the client private key.
default: None.
ssl_password (callable, str, bytes, bytearray): optional password or
callable function that returns a password, for decrypting the
client private key. Default: None.
ssl_crlfile (str): optional filename containing the CRL to check for
certificate expiration. By default, no CRL check is done. When
providing a file, only the leaf certificate will be checked against
this CRL. The CRL can only be checked with Python 3.4+ or 2.7.9+.
default: None.
ssl_ciphers (str): optionally set the available ciphers for ssl
connections. It should be a string in the OpenSSL cipher list
format. If no cipher can be selected (because compile-time options
or other configuration forbids use of all the specified ciphers),
an ssl.SSLError will be raised. See ssl.SSLContext.set_ciphers
api_version (tuple): Specify which Kafka API version to use.
Accepted values are: (0, 8, 0), (0, 8, 1), (0, 8, 2), (0, 9),
(0, 10). Default: (0, 8, 2)
api_version_auto_timeout_ms (int): number of milliseconds to throw a
timeout exception from the constructor when checking the broker
api version. Only applies if api_version is None
selector (selectors.BaseSelector): Provide a specific selector
implementation to use for I/O multiplexing.
Default: selectors.DefaultSelector
state_change_callback (callable): function to be called when the
connection state changes from CONNECTING to CONNECTED etc.
metrics (kafka.metrics.Metrics): Optionally provide a metrics
instance for capturing network IO stats. Default: None.
metric_group_prefix (str): Prefix for metric names. Default: ''
sasl_mechanism (str): Authentication mechanism when security_protocol
is configured for SASL_PLAINTEXT or SASL_SSL. Valid values are:
PLAIN, GSSAPI, OAUTHBEARER, SCRAM-SHA-256, SCRAM-SHA-512.
sasl_plain_username (str): username for sasl PLAIN and SCRAM authentication.
Required if sasl_mechanism is PLAIN or one of the SCRAM mechanisms.
sasl_plain_password (str): password for sasl PLAIN and SCRAM authentication.
Required if sasl_mechanism is PLAIN or one of the SCRAM mechanisms.
sasl_kerberos_service_name (str): Service name to include in GSSAPI
sasl mechanism handshake. Default: 'kafka'
sasl_kerberos_domain_name (str): kerberos domain name to use in GSSAPI
sasl mechanism handshake. Default: one of bootstrap servers
sasl_oauth_token_provider (AbstractTokenProvider): OAuthBearer token provider
instance. (See kafka.oauth.abstract). Default: None
"""
DEFAULT_CONFIG = {
'client_id': 'kafka-python-' + __version__,
'node_id': 0,
'request_timeout_ms': 30000,
'reconnect_backoff_ms': 50,
'reconnect_backoff_max_ms': 1000,
'max_in_flight_requests_per_connection': 5,
'receive_buffer_bytes': None,
'send_buffer_bytes': None,
'socket_options': [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)],
'sock_chunk_bytes': 4096, # undocumented experimental option
'sock_chunk_buffer_count': 1000, # undocumented experimental option
'security_protocol': 'PLAINTEXT',
'ssl_context': None,
'ssl_check_hostname': True,
'ssl_cafile': None,
'ssl_certfile': None,
'ssl_keyfile': None,
'ssl_crlfile': None,
'ssl_password': None,
'ssl_ciphers': None,
'api_version': (0, 8, 2), # default to most restrictive
'selector': selectors.DefaultSelector,
'state_change_callback': lambda node_id, sock, conn: True,
'metrics': None,
'metric_group_prefix': '',
'sasl_mechanism': None,
'sasl_plain_username': None,
'sasl_plain_password': None,
'sasl_kerberos_service_name': 'kafka',
'sasl_kerberos_domain_name': None,
'sasl_oauth_token_provider': None
}
SECURITY_PROTOCOLS = ('PLAINTEXT', 'SSL', 'SASL_PLAINTEXT', 'SASL_SSL')
SASL_MECHANISMS = ('PLAIN', 'GSSAPI', 'OAUTHBEARER', "SCRAM-SHA-256", "SCRAM-SHA-512")
def __init__(self, host, port, afi, **configs):
self.host = host
self.port = port
self.afi = afi
self._sock_afi = afi
self._sock_addr = None
self._api_versions = None
self.config = copy.copy(self.DEFAULT_CONFIG)
for key in self.config:
if key in configs:
self.config[key] = configs[key]
self.node_id = self.config.pop('node_id')
if self.config['receive_buffer_bytes'] is not None:
self.config['socket_options'].append(
(socket.SOL_SOCKET, socket.SO_RCVBUF,
self.config['receive_buffer_bytes']))
if self.config['send_buffer_bytes'] is not None:
self.config['socket_options'].append(
(socket.SOL_SOCKET, socket.SO_SNDBUF,
self.config['send_buffer_bytes']))
assert self.config['security_protocol'] in self.SECURITY_PROTOCOLS, (
'security_protocol must be in ' + ', '.join(self.SECURITY_PROTOCOLS))
if self.config['security_protocol'] in ('SSL', 'SASL_SSL'):
assert ssl_available, "Python wasn't built with SSL support"
if self.config['security_protocol'] in ('SASL_PLAINTEXT', 'SASL_SSL'):
assert self.config['sasl_mechanism'] in self.SASL_MECHANISMS, (
'sasl_mechanism must be in ' + ', '.join(self.SASL_MECHANISMS))
if self.config['sasl_mechanism'] in ('PLAIN', 'SCRAM-SHA-256', 'SCRAM-SHA-512'):
assert self.config['sasl_plain_username'] is not None, (
'sasl_plain_username required for PLAIN or SCRAM sasl'
)
assert self.config['sasl_plain_password'] is not None, (
'sasl_plain_password required for PLAIN or SCRAM sasl'
)
if self.config['sasl_mechanism'] == 'GSSAPI':
assert gssapi is not None, 'GSSAPI lib not available'
assert self.config['sasl_kerberos_service_name'] is not None, 'sasl_kerberos_service_name required for GSSAPI sasl'
if self.config['sasl_mechanism'] == 'OAUTHBEARER':
token_provider = self.config['sasl_oauth_token_provider']
assert token_provider is not None, 'sasl_oauth_token_provider required for OAUTHBEARER sasl'
assert callable(getattr(token_provider, "token", None)), 'sasl_oauth_token_provider must implement method #token()'
# This is not a general lock / this class is not generally thread-safe yet
# However, to avoid pushing responsibility for maintaining
# per-connection locks to the upstream client, we will use this lock to
# make sure that access to the protocol buffer is synchronized
# when sends happen on multiple threads
self._lock = threading.Lock()
# the protocol parser instance manages actual tracking of the
# sequence of in-flight requests to responses, which should
# function like a FIFO queue. For additional request data,
# including tracking request futures and timestamps, we
# can use a simple dictionary of correlation_id => request data
self.in_flight_requests = dict()
self._protocol = KafkaProtocol(
client_id=self.config['client_id'],
api_version=self.config['api_version'])
self.state = ConnectionStates.DISCONNECTED
self._reset_reconnect_backoff()
self._sock = None
self._send_buffer = b''
self._ssl_context = None
if self.config['ssl_context'] is not None:
self._ssl_context = self.config['ssl_context']
self._sasl_auth_future = None
self.last_attempt = 0
self._gai = []
self._sensors = None
if self.config['metrics']:
self._sensors = BrokerConnectionMetrics(self.config['metrics'],
self.config['metric_group_prefix'],
self.node_id)
def _dns_lookup(self):
self._gai = dns_lookup(self.host, self.port, self.afi)
if not self._gai:
log.error('DNS lookup failed for %s:%i (%s)',
self.host, self.port, self.afi)
return False
return True
def _next_afi_sockaddr(self):
if not self._gai:
if not self._dns_lookup():
return
afi, _, __, ___, sockaddr = self._gai.pop(0)
return (afi, sockaddr)
def connect_blocking(self, timeout=float('inf')):
if self.connected():
return True
timeout += time.time()
# First attempt to perform dns lookup
# note that the underlying interface, socket.getaddrinfo,
# has no explicit timeout so we may exceed the user-specified timeout
self._dns_lookup()
# Loop once over all returned dns entries
selector = None
while self._gai:
while time.time() < timeout:
self.connect()
if self.connected():
if selector is not None:
selector.close()
return True
elif self.connecting():
if selector is None:
selector = self.config['selector']()
selector.register(self._sock, selectors.EVENT_WRITE)
selector.select(1)
elif self.disconnected():
if selector is not None:
selector.close()
selector = None
break
else:
break
return False
def connect(self):
"""Attempt to connect and return ConnectionState"""
if self.state is ConnectionStates.DISCONNECTED and not self.blacked_out():
self.last_attempt = time.time()
next_lookup = self._next_afi_sockaddr()
if not next_lookup:
self.close(Errors.KafkaConnectionError('DNS failure'))
return self.state
else:
log.debug('%s: creating new socket', self)
assert self._sock is None
self._sock_afi, self._sock_addr = next_lookup
self._sock = socket.socket(self._sock_afi, socket.SOCK_STREAM)
for option in self.config['socket_options']:
log.debug('%s: setting socket option %s', self, option)
self._sock.setsockopt(*option)
self._sock.setblocking(False)
self.state = ConnectionStates.CONNECTING
self.config['state_change_callback'](self.node_id, self._sock, self)
log.info('%s: connecting to %s:%d [%s %s]', self, self.host,
self.port, self._sock_addr, AFI_NAMES[self._sock_afi])
if self.state is ConnectionStates.CONNECTING:
# in non-blocking mode, use repeated calls to socket.connect_ex
# to check connection status
ret = None
try:
ret = self._sock.connect_ex(self._sock_addr)
except socket.error as err:
ret = err.errno
# Connection succeeded
if not ret or ret == errno.EISCONN:
log.debug('%s: established TCP connection', self)
if self.config['security_protocol'] in ('SSL', 'SASL_SSL'):
log.debug('%s: initiating SSL handshake', self)
self.state = ConnectionStates.HANDSHAKE
self.config['state_change_callback'](self.node_id, self._sock, self)
# _wrap_ssl can alter the connection state -- disconnects on failure
self._wrap_ssl()
elif self.config['security_protocol'] == 'SASL_PLAINTEXT':
log.debug('%s: initiating SASL authentication', self)
self.state = ConnectionStates.AUTHENTICATING
self.config['state_change_callback'](self.node_id, self._sock, self)
else:
# security_protocol PLAINTEXT
log.info('%s: Connection complete.', self)
self.state = ConnectionStates.CONNECTED
self._reset_reconnect_backoff()
self.config['state_change_callback'](self.node_id, self._sock, self)
# Connection failed
# WSAEINVAL == 10022, but errno.WSAEINVAL is not available on non-win systems
elif ret not in (errno.EINPROGRESS, errno.EALREADY, errno.EWOULDBLOCK, 10022):
log.error('Connect attempt to %s returned error %s.'
' Disconnecting.', self, ret)
errstr = errno.errorcode.get(ret, 'UNKNOWN')
self.close(Errors.KafkaConnectionError('{} {}'.format(ret, errstr)))
return self.state
# Needs retry
else:
pass
if self.state is ConnectionStates.HANDSHAKE:
if self._try_handshake():
log.debug('%s: completed SSL handshake.', self)
if self.config['security_protocol'] == 'SASL_SSL':
log.debug('%s: initiating SASL authentication', self)
self.state = ConnectionStates.AUTHENTICATING
else:
log.info('%s: Connection complete.', self)
self.state = ConnectionStates.CONNECTED
self._reset_reconnect_backoff()
self.config['state_change_callback'](self.node_id, self._sock, self)
if self.state is ConnectionStates.AUTHENTICATING:
assert self.config['security_protocol'] in ('SASL_PLAINTEXT', 'SASL_SSL')
if self._try_authenticate():
# _try_authenticate has side-effects: possibly disconnected on socket errors
if self.state is ConnectionStates.AUTHENTICATING:
log.info('%s: Connection complete.', self)
self.state = ConnectionStates.CONNECTED
self._reset_reconnect_backoff()
self.config['state_change_callback'](self.node_id, self._sock, self)
if self.state not in (ConnectionStates.CONNECTED,
ConnectionStates.DISCONNECTED):
# Connection timed out
request_timeout = self.config['request_timeout_ms'] / 1000.0
if time.time() > request_timeout + self.last_attempt:
log.error('Connection attempt to %s timed out', self)
self.close(Errors.KafkaConnectionError('timeout'))
return self.state
return self.state
def _wrap_ssl(self):
assert self.config['security_protocol'] in ('SSL', 'SASL_SSL')
if self._ssl_context is None:
log.debug('%s: configuring default SSL Context', self)
self._ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) # pylint: disable=no-member
self._ssl_context.options |= ssl.OP_NO_SSLv2 # pylint: disable=no-member
self._ssl_context.options |= ssl.OP_NO_SSLv3 # pylint: disable=no-member
self._ssl_context.verify_mode = ssl.CERT_OPTIONAL
if self.config['ssl_check_hostname']:
self._ssl_context.check_hostname = True
if self.config['ssl_cafile']:
log.info('%s: Loading SSL CA from %s', self, self.config['ssl_cafile'])
self._ssl_context.load_verify_locations(self.config['ssl_cafile'])
self._ssl_context.verify_mode = ssl.CERT_REQUIRED
else:
log.info('%s: Loading system default SSL CAs from %s', self, ssl.get_default_verify_paths())
self._ssl_context.load_default_certs()
if self.config['ssl_certfile'] and self.config['ssl_keyfile']:
log.info('%s: Loading SSL Cert from %s', self, self.config['ssl_certfile'])
log.info('%s: Loading SSL Key from %s', self, self.config['ssl_keyfile'])
self._ssl_context.load_cert_chain(
certfile=self.config['ssl_certfile'],
keyfile=self.config['ssl_keyfile'],
password=self.config['ssl_password'])
if self.config['ssl_crlfile']:
if not hasattr(ssl, 'VERIFY_CRL_CHECK_LEAF'):
raise RuntimeError('This version of Python does not support ssl_crlfile!')
log.info('%s: Loading SSL CRL from %s', self, self.config['ssl_crlfile'])
self._ssl_context.load_verify_locations(self.config['ssl_crlfile'])
# pylint: disable=no-member
self._ssl_context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
if self.config['ssl_ciphers']:
log.info('%s: Setting SSL Ciphers: %s', self, self.config['ssl_ciphers'])
self._ssl_context.set_ciphers(self.config['ssl_ciphers'])
log.debug('%s: wrapping socket in ssl context', self)
try:
self._sock = self._ssl_context.wrap_socket(
self._sock,
server_hostname=self.host,
do_handshake_on_connect=False)
except ssl.SSLError as e:
log.exception('%s: Failed to wrap socket in SSLContext!', self)
self.close(e)
def _try_handshake(self):
assert self.config['security_protocol'] in ('SSL', 'SASL_SSL')
try:
self._sock.do_handshake()
return True
# old ssl in python2.6 will swallow all SSLErrors here...
except (SSLWantReadError, SSLWantWriteError):
pass
except (SSLZeroReturnError, ConnectionError, TimeoutError, SSLEOFError):
log.warning('SSL connection closed by server during handshake.')
self.close(Errors.KafkaConnectionError('SSL connection closed by server during handshake'))
# Other SSLErrors will be raised to user
return False
def _try_authenticate(self):
assert self.config['api_version'] is None or self.config['api_version'] >= (0, 10)
if self._sasl_auth_future is None:
# Build a SaslHandShakeRequest message
request = SaslHandShakeRequest[0](self.config['sasl_mechanism'])
future = Future()
sasl_response = self._send(request)
sasl_response.add_callback(self._handle_sasl_handshake_response, future)
sasl_response.add_errback(lambda f, e: f.failure(e), future)
self._sasl_auth_future = future
for r, f in self.recv():
f.success(r)
# A connection error could trigger close() which will reset the future
if self._sasl_auth_future is None:
return False
elif self._sasl_auth_future.failed():
ex = self._sasl_auth_future.exception
if not isinstance(ex, Errors.KafkaConnectionError):
raise ex # pylint: disable-msg=raising-bad-type
return self._sasl_auth_future.succeeded()
def _handle_sasl_handshake_response(self, future, response):
error_type = Errors.for_code(response.error_code)
if error_type is not Errors.NoError:
error = error_type(self)
self.close(error=error)
return future.failure(error_type(self))
if self.config['sasl_mechanism'] not in response.enabled_mechanisms:
return future.failure(
Errors.UnsupportedSaslMechanismError(
'Kafka broker does not support %s sasl mechanism. Enabled mechanisms are: %s'
% (self.config['sasl_mechanism'], response.enabled_mechanisms)))
elif self.config['sasl_mechanism'] == 'PLAIN':
return self._try_authenticate_plain(future)
elif self.config['sasl_mechanism'] == 'GSSAPI':
return self._try_authenticate_gssapi(future)
elif self.config['sasl_mechanism'] == 'OAUTHBEARER':
return self._try_authenticate_oauth(future)
elif self.config['sasl_mechanism'].startswith("SCRAM-SHA-"):
return self._try_authenticate_scram(future)
else:
return future.failure(
Errors.UnsupportedSaslMechanismError(
'kafka-python does not support SASL mechanism %s' %
self.config['sasl_mechanism']))
def _send_bytes(self, data):
"""Send some data via non-blocking IO
Note: this method is not synchronized internally; you should
always hold the _lock before calling
Returns: number of bytes
Raises: socket exception
"""
total_sent = 0
while total_sent < len(data):
try:
sent_bytes = self._sock.send(data[total_sent:])
total_sent += sent_bytes
except (SSLWantReadError, SSLWantWriteError):
break
except (ConnectionError, TimeoutError) as e:
if six.PY2 and e.errno == errno.EWOULDBLOCK:
break
raise
except BlockingIOError:
if six.PY3:
break
raise
return total_sent
def _send_bytes_blocking(self, data):
self._sock.settimeout(self.config['request_timeout_ms'] / 1000)
total_sent = 0
try:
while total_sent < len(data):
sent_bytes = self._sock.send(data[total_sent:])
total_sent += sent_bytes
if total_sent != len(data):
raise ConnectionError('Buffer overrun during socket send')
return total_sent
finally:
self._sock.settimeout(0.0)
def _recv_bytes_blocking(self, n):
self._sock.settimeout(self.config['request_timeout_ms'] / 1000)
try:
data = b''
while len(data) < n:
fragment = self._sock.recv(n - len(data))
if not fragment:
raise ConnectionError('Connection reset during recv')
data += fragment
return data
finally:
self._sock.settimeout(0.0)
def _try_authenticate_plain(self, future):
if self.config['security_protocol'] == 'SASL_PLAINTEXT':
log.warning('%s: Sending username and password in the clear', self)
data = b''
# Send PLAIN credentials per RFC-4616
msg = bytes('\0'.join([self.config['sasl_plain_username'],
self.config['sasl_plain_username'],
self.config['sasl_plain_password']]).encode('utf-8'))
size = Int32.encode(len(msg))
err = None
close = False
with self._lock:
if not self._can_send_recv():
err = Errors.NodeNotReadyError(str(self))
close = False
else:
try:
self._send_bytes_blocking(size + msg)
# The server will send a zero sized message (that is Int32(0)) on success.
# The connection is closed on failure
data = self._recv_bytes_blocking(4)
except (ConnectionError, TimeoutError) as e:
log.exception("%s: Error receiving reply from server", self)
err = Errors.KafkaConnectionError("%s: %s" % (self, e))
close = True
if err is not None:
if close:
self.close(error=err)
return future.failure(err)
if data != b'\x00\x00\x00\x00':
error = Errors.AuthenticationFailedError('Unrecognized response during authentication')
return future.failure(error)
log.info('%s: Authenticated as %s via PLAIN', self, self.config['sasl_plain_username'])
return future.success(True)
def _try_authenticate_scram(self, future):
if self.config['security_protocol'] == 'SASL_PLAINTEXT':
log.warning('%s: Exchanging credentials in the clear', self)
scram_client = ScramClient(
self.config['sasl_plain_username'], self.config['sasl_plain_password'], self.config['sasl_mechanism']
)
err = None
close = False
with self._lock:
if not self._can_send_recv():
err = Errors.NodeNotReadyError(str(self))
close = False
else:
try:
client_first = scram_client.first_message().encode('utf-8')
size = Int32.encode(len(client_first))
self._send_bytes_blocking(size + client_first)
(data_len,) = struct.unpack('>i', self._recv_bytes_blocking(4))
server_first = self._recv_bytes_blocking(data_len).decode('utf-8')
scram_client.process_server_first_message(server_first)
client_final = scram_client.final_message().encode('utf-8')
size = Int32.encode(len(client_final))
self._send_bytes_blocking(size + client_final)
(data_len,) = struct.unpack('>i', self._recv_bytes_blocking(4))
server_final = self._recv_bytes_blocking(data_len).decode('utf-8')
scram_client.process_server_final_message(server_final)
except (ConnectionError, TimeoutError) as e:
log.exception("%s: Error receiving reply from server", self)
err = Errors.KafkaConnectionError("%s: %s" % (self, e))
close = True
if err is not None:
if close:
self.close(error=err)
return future.failure(err)
log.info(
'%s: Authenticated as %s via %s', self, self.config['sasl_plain_username'], self.config['sasl_mechanism']
)
return future.success(True)
def _try_authenticate_gssapi(self, future):
kerberos_damin_name = self.config['sasl_kerberos_domain_name'] or self.host
auth_id = self.config['sasl_kerberos_service_name'] + '@' + kerberos_damin_name
gssapi_name = gssapi.Name(
auth_id,
name_type=gssapi.NameType.hostbased_service
).canonicalize(gssapi.MechType.kerberos)
log.debug('%s: GSSAPI name: %s', self, gssapi_name)
err = None
close = False
with self._lock:
if not self._can_send_recv():
err = Errors.NodeNotReadyError(str(self))
close = False
else:
# Establish security context and negotiate protection level
# For reference RFC 2222, section 7.2.1
try:
# Exchange tokens until authentication either succeeds or fails
client_ctx = gssapi.SecurityContext(name=gssapi_name, usage='initiate')
received_token = None
while not client_ctx.complete:
# calculate an output token from kafka token (or None if first iteration)
output_token = client_ctx.step(received_token)
# pass output token to kafka, or send empty response if the security
# context is complete (output token is None in that case)
if output_token is None:
self._send_bytes_blocking(Int32.encode(0))
else:
msg = output_token
size = Int32.encode(len(msg))
self._send_bytes_blocking(size + msg)
# The server will send a token back. Processing of this token either
# establishes a security context, or it needs further token exchange.
# The gssapi will be able to identify the needed next step.
# The connection is closed on failure.
header = self._recv_bytes_blocking(4)
(token_size,) = struct.unpack('>i', header)
received_token = self._recv_bytes_blocking(token_size)
# Process the security layer negotiation token, sent by the server
# once the security context is established.
# unwraps message containing supported protection levels and msg size
msg = client_ctx.unwrap(received_token).message
# Kafka currently doesn't support integrity or confidentiality security layers, so we
# simply set QoP to 'auth' only (first octet). We reuse the max message size proposed
# by the server
msg = Int8.encode(SASL_QOP_AUTH & Int8.decode(io.BytesIO(msg[0:1]))) + msg[1:]
# add authorization identity to the response, GSS-wrap and send it
msg = client_ctx.wrap(msg + auth_id.encode(), False).message
size = Int32.encode(len(msg))
self._send_bytes_blocking(size + msg)
except (ConnectionError, TimeoutError) as e:
log.exception("%s: Error receiving reply from server", self)
err = Errors.KafkaConnectionError("%s: %s" % (self, e))
close = True
except Exception as e:
err = e
close = True
if err is not None:
if close:
self.close(error=err)
return future.failure(err)
log.info('%s: Authenticated as %s via GSSAPI', self, gssapi_name)
return future.success(True)
def _try_authenticate_oauth(self, future):
data = b''
msg = bytes(self._build_oauth_client_request().encode("utf-8"))
size = Int32.encode(len(msg))
err = None
close = False
with self._lock:
if not self._can_send_recv():
err = Errors.NodeNotReadyError(str(self))
close = False
else:
try:
# Send SASL OAuthBearer request with OAuth token
self._send_bytes_blocking(size + msg)
# The server will send a zero sized message (that is Int32(0)) on success.
# The connection is closed on failure
data = self._recv_bytes_blocking(4)
except (ConnectionError, TimeoutError) as e:
log.exception("%s: Error receiving reply from server", self)
err = Errors.KafkaConnectionError("%s: %s" % (self, e))
close = True
if err is not None:
if close:
self.close(error=err)
return future.failure(err)
if data != b'\x00\x00\x00\x00':
error = Errors.AuthenticationFailedError('Unrecognized response during authentication')
return future.failure(error)
log.info('%s: Authenticated via OAuth', self)
return future.success(True)
def _build_oauth_client_request(self):
token_provider = self.config['sasl_oauth_token_provider']
return "n,,\x01auth=Bearer {}{}\x01\x01".format(token_provider.token(), self._token_extensions())
def _token_extensions(self):
"""
Return a string representation of the OPTIONAL key-value pairs that can be sent with an OAUTHBEARER
initial request.
"""
token_provider = self.config['sasl_oauth_token_provider']
# Only run if the #extensions() method is implemented by the clients Token Provider class
# Builds up a string separated by \x01 via a dict of key value pairs
if callable(getattr(token_provider, "extensions", None)) and len(token_provider.extensions()) > 0:
msg = "\x01".join(["{}={}".format(k, v) for k, v in token_provider.extensions().items()])
return "\x01" + msg
else:
return ""
def blacked_out(self):
"""
Return true if we are disconnected from the given node and can't
re-establish a connection yet
"""
if self.state is ConnectionStates.DISCONNECTED:
if time.time() < self.last_attempt + self._reconnect_backoff:
return True
return False
def connection_delay(self):
"""
Return the number of milliseconds to wait, based on the connection
state, before attempting to send data. When disconnected, this respects
the reconnect backoff time. When connecting or connected, returns a very
large number to handle slow/stalled connections.
"""
time_waited = time.time() - (self.last_attempt or 0)
if self.state is ConnectionStates.DISCONNECTED:
return max(self._reconnect_backoff - time_waited, 0) * 1000
else:
# When connecting or connected, we should be able to delay
# indefinitely since other events (connection or data acked) will
# cause a wakeup once data can be sent.
return float('inf')
def connected(self):
"""Return True iff socket is connected."""
return self.state is ConnectionStates.CONNECTED
def connecting(self):
"""Returns True if still connecting (this may encompass several
different states, such as SSL handshake, authorization, etc)."""
return self.state in (ConnectionStates.CONNECTING,
ConnectionStates.HANDSHAKE,
ConnectionStates.AUTHENTICATING)
def disconnected(self):
"""Return True iff socket is closed"""
return self.state is ConnectionStates.DISCONNECTED
def _reset_reconnect_backoff(self):
self._failures = 0
self._reconnect_backoff = self.config['reconnect_backoff_ms'] / 1000.0
def _update_reconnect_backoff(self):
# Do not mark as failure if there are more dns entries available to try
if len(self._gai) > 0:
return
if self.config['reconnect_backoff_max_ms'] > self.config['reconnect_backoff_ms']:
self._failures += 1
self._reconnect_backoff = self.config['reconnect_backoff_ms'] * 2 ** (self._failures - 1)
self._reconnect_backoff = min(self._reconnect_backoff, self.config['reconnect_backoff_max_ms'])
self._reconnect_backoff *= uniform(0.8, 1.2)
self._reconnect_backoff /= 1000.0
log.debug('%s: reconnect backoff %s after %s failures', self, self._reconnect_backoff, self._failures)
def _close_socket(self):
if hasattr(self, '_sock') and self._sock is not None:
self._sock.close()
self._sock = None
def __del__(self):
self._close_socket()
def close(self, error=None):
"""Close socket and fail all in-flight-requests.
Arguments:
error (Exception, optional): pending in-flight-requests
will be failed with this exception.
Default: kafka.errors.KafkaConnectionError.
"""
if self.state is ConnectionStates.DISCONNECTED:
return
with self._lock:
if self.state is ConnectionStates.DISCONNECTED:
return
log.info('%s: Closing connection. %s', self, error or '')
self._update_reconnect_backoff()
self._sasl_auth_future = None
self._protocol = KafkaProtocol(
client_id=self.config['client_id'],
api_version=self.config['api_version'])
self._send_buffer = b''
if error is None:
error = Errors.Cancelled(str(self))
ifrs = list(self.in_flight_requests.items())
self.in_flight_requests.clear()
self.state = ConnectionStates.DISCONNECTED
# To avoid race conditions and/or deadlocks
# keep a reference to the socket but leave it
# open until after the state_change_callback
# This should give clients a change to deregister
# the socket fd from selectors cleanly.
sock = self._sock
self._sock = None
# drop lock before state change callback and processing futures
self.config['state_change_callback'](self.node_id, sock, self)
sock.close()
for (_correlation_id, (future, _timestamp)) in ifrs:
future.failure(error)
def _can_send_recv(self):
"""Return True iff socket is ready for requests / responses"""
return self.state in (ConnectionStates.AUTHENTICATING,
ConnectionStates.CONNECTED)
def send(self, request, blocking=True):
"""Queue request for async network send, return Future()"""
future = Future()
if self.connecting():
return future.failure(Errors.NodeNotReadyError(str(self)))
elif not self.connected():
return future.failure(Errors.KafkaConnectionError(str(self)))
elif not self.can_send_more():
return future.failure(Errors.TooManyInFlightRequests(str(self)))
return self._send(request, blocking=blocking)
def _send(self, request, blocking=True):
future = Future()
with self._lock:
if not self._can_send_recv():
# In this case, since we created the future above,
# we know there are no callbacks/errbacks that could fire w/
# lock. So failing + returning inline should be safe
return future.failure(Errors.NodeNotReadyError(str(self)))
correlation_id = self._protocol.send_request(request)
log.debug('%s Request %d: %s', self, correlation_id, request)
if request.expect_response():
sent_time = time.time()
assert correlation_id not in self.in_flight_requests, 'Correlation ID already in-flight!'
self.in_flight_requests[correlation_id] = (future, sent_time)
else:
future.success(None)
# Attempt to replicate behavior from prior to introduction of
# send_pending_requests() / async sends
if blocking:
self.send_pending_requests()
return future
def send_pending_requests(self):
"""Attempts to send pending requests messages via blocking IO
If all requests have been sent, return True
Otherwise, if the socket is blocked and there are more bytes to send,
return False.
"""
try:
with self._lock:
if not self._can_send_recv():
return False
data = self._protocol.send_bytes()
total_bytes = self._send_bytes_blocking(data)
if self._sensors:
self._sensors.bytes_sent.record(total_bytes)
return True
except (ConnectionError, TimeoutError) as e:
log.exception("Error sending request data to %s", self)
error = Errors.KafkaConnectionError("%s: %s" % (self, e))
self.close(error=error)
return False
def send_pending_requests_v2(self):
"""Attempts to send pending requests messages via non-blocking IO
If all requests have been sent, return True
Otherwise, if the socket is blocked and there are more bytes to send,
return False.
"""
try:
with self._lock:
if not self._can_send_recv():
return False
# _protocol.send_bytes returns encoded requests to send
# we send them via _send_bytes()
# and hold leftover bytes in _send_buffer
if not self._send_buffer:
self._send_buffer = self._protocol.send_bytes()
total_bytes = 0
if self._send_buffer:
total_bytes = self._send_bytes(self._send_buffer)
self._send_buffer = self._send_buffer[total_bytes:]
if self._sensors:
self._sensors.bytes_sent.record(total_bytes)
# Return True iff send buffer is empty
return len(self._send_buffer) == 0
except (ConnectionError, TimeoutError, Exception) as e:
log.exception("Error sending request data to %s", self)
error = Errors.KafkaConnectionError("%s: %s" % (self, e))
self.close(error=error)
return False
def can_send_more(self):
"""Return True unless there are max_in_flight_requests_per_connection."""
max_ifrs = self.config['max_in_flight_requests_per_connection']
return len(self.in_flight_requests) < max_ifrs
def recv(self):
"""Non-blocking network receive.
Return list of (response, future) tuples
"""
responses = self._recv()
if not responses and self.requests_timed_out():
log.warning('%s timed out after %s ms. Closing connection.',
self, self.config['request_timeout_ms'])
self.close(error=Errors.RequestTimedOutError(
'Request timed out after %s ms' %
self.config['request_timeout_ms']))
return ()
# augment responses w/ correlation_id, future, and timestamp
for i, (correlation_id, response) in enumerate(responses):
try:
with self._lock:
(future, timestamp) = self.in_flight_requests.pop(correlation_id)
except KeyError:
self.close(Errors.KafkaConnectionError('Received unrecognized correlation id'))
return ()
latency_ms = (time.time() - timestamp) * 1000
if self._sensors:
self._sensors.request_time.record(latency_ms)
log.debug('%s Response %d (%s ms): %s', self, correlation_id, latency_ms, response)
responses[i] = (response, future)
return responses
def _recv(self):
"""Take all available bytes from socket, return list of any responses from parser"""
recvd = []
err = None
with self._lock:
if not self._can_send_recv():
log.warning('%s cannot recv: socket not connected', self)
return ()
while len(recvd) < self.config['sock_chunk_buffer_count']:
try:
data = self._sock.recv(self.config['sock_chunk_bytes'])
# We expect socket.recv to raise an exception if there are no
# bytes available to read from the socket in non-blocking mode.
# but if the socket is disconnected, we will get empty data
# without an exception raised
if not data:
log.error('%s: socket disconnected', self)
err = Errors.KafkaConnectionError('socket disconnected')
break
else:
recvd.append(data)
except (SSLWantReadError, SSLWantWriteError):
break
except (ConnectionError, TimeoutError) as e:
if six.PY2 and e.errno == errno.EWOULDBLOCK:
break
log.exception('%s: Error receiving network data'
' closing socket', self)
err = Errors.KafkaConnectionError(e)
break
except BlockingIOError:
if six.PY3:
break
# For PY2 this is a catchall and should be re-raised
raise
# Only process bytes if there was no connection exception
if err is None:
recvd_data = b''.join(recvd)
if self._sensors:
self._sensors.bytes_received.record(len(recvd_data))
# We need to keep the lock through protocol receipt
# so that we ensure that the processed byte order is the
# same as the received byte order
try:
return self._protocol.receive_bytes(recvd_data)
except Errors.KafkaProtocolError as e:
err = e
self.close(error=err)
return ()
def requests_timed_out(self):
with self._lock:
if self.in_flight_requests:
get_timestamp = lambda v: v[1]
oldest_at = min(map(get_timestamp,
self.in_flight_requests.values()))
timeout = self.config['request_timeout_ms'] / 1000.0
if time.time() >= oldest_at + timeout:
return True
return False
def _handle_api_version_response(self, response):
error_type = Errors.for_code(response.error_code)
assert error_type is Errors.NoError, "API version check failed"
self._api_versions = dict([
(api_key, (min_version, max_version))
for api_key, min_version, max_version in response.api_versions
])
return self._api_versions
def get_api_versions(self):
if self._api_versions is not None:
return self._api_versions
version = self.check_version()
if version < (0, 10, 0):
raise Errors.UnsupportedVersionError(
"ApiVersion not supported by cluster version {} < 0.10.0"
.format(version))
# _api_versions is set as a side effect of check_versions() on a cluster
# that supports 0.10.0 or later
return self._api_versions
def _infer_broker_version_from_api_versions(self, api_versions):
# The logic here is to check the list of supported request versions
# in reverse order. As soon as we find one that works, return it
test_cases = [
# format (<broker version>, <needed struct>)
((2, 5, 0), DescribeAclsRequest_v2),
((2, 4, 0), ProduceRequest[8]),
((2, 3, 0), FetchRequest[11]),
((2, 2, 0), OffsetRequest[5]),
((2, 1, 0), FetchRequest[10]),
((2, 0, 0), FetchRequest[8]),
((1, 1, 0), FetchRequest[7]),
((1, 0, 0), MetadataRequest[5]),
((0, 11, 0), MetadataRequest[4]),
((0, 10, 2), OffsetFetchRequest[2]),
((0, 10, 1), MetadataRequest[2]),
]
# Get the best match of test cases
for broker_version, struct in sorted(test_cases, reverse=True):
if struct.API_KEY not in api_versions:
continue
min_version, max_version = api_versions[struct.API_KEY]
if min_version <= struct.API_VERSION <= max_version:
return broker_version
# We know that ApiVersionResponse is only supported in 0.10+
# so if all else fails, choose that
return (0, 10, 0)
def check_version(self, timeout=2, strict=False, topics=[]):
"""Attempt to guess the broker version.
Note: This is a blocking call.
Returns: version tuple, i.e. (0, 10), (0, 9), (0, 8, 2), ...
"""
timeout_at = time.time() + timeout
log.info('Probing node %s broker version', self.node_id)
# Monkeypatch some connection configurations to avoid timeouts
override_config = {
'request_timeout_ms': timeout * 1000,
'max_in_flight_requests_per_connection': 5
}
stashed = {}
for key in override_config:
stashed[key] = self.config[key]
self.config[key] = override_config[key]
def reset_override_configs():
for key in stashed:
self.config[key] = stashed[key]
# kafka kills the connection when it doesn't recognize an API request
# so we can send a test request and then follow immediately with a
# vanilla MetadataRequest. If the server did not recognize the first
# request, both will be failed with a ConnectionError that wraps
# socket.error (32, 54, or 104)
from kafka.protocol.admin import ApiVersionRequest, ListGroupsRequest
from kafka.protocol.commit import OffsetFetchRequest, GroupCoordinatorRequest
test_cases = [
# All cases starting from 0.10 will be based on ApiVersionResponse
((0, 10), ApiVersionRequest[0]()),
((0, 9), ListGroupsRequest[0]()),
((0, 8, 2), GroupCoordinatorRequest[0]('kafka-python-default-group')),
((0, 8, 1), OffsetFetchRequest[0]('kafka-python-default-group', [])),
((0, 8, 0), MetadataRequest[0](topics)),
]
for version, request in test_cases:
if not self.connect_blocking(timeout_at - time.time()):
reset_override_configs()
raise Errors.NodeNotReadyError()
f = self.send(request)
# HACK: sleeping to wait for socket to send bytes
time.sleep(0.1)
# when broker receives an unrecognized request API
# it abruptly closes our socket.
# so we attempt to send a second request immediately
# that we believe it will definitely recognize (metadata)
# the attempt to write to a disconnected socket should
# immediately fail and allow us to infer that the prior
# request was unrecognized
mr = self.send(MetadataRequest[0](topics))
selector = self.config['selector']()
selector.register(self._sock, selectors.EVENT_READ)
while not (f.is_done and mr.is_done):
selector.select(1)
for response, future in self.recv():
future.success(response)
selector.close()
if f.succeeded():
if isinstance(request, ApiVersionRequest[0]):
# Starting from 0.10 kafka broker we determine version
# by looking at ApiVersionResponse
api_versions = self._handle_api_version_response(f.value)
version = self._infer_broker_version_from_api_versions(api_versions)
log.info('Broker version identified as %s', '.'.join(map(str, version)))
log.info('Set configuration api_version=%s to skip auto'
' check_version requests on startup', version)
break
# Only enable strict checking to verify that we understand failure
# modes. For most users, the fact that the request failed should be
# enough to rule out a particular broker version.
if strict:
# If the socket flush hack did not work (which should force the
# connection to close and fail all pending requests), then we
# get a basic Request Timeout. This is not ideal, but we'll deal
if isinstance(f.exception, Errors.RequestTimedOutError):
pass
# 0.9 brokers do not close the socket on unrecognized api
# requests (bug...). In this case we expect to see a correlation
# id mismatch
elif (isinstance(f.exception, Errors.CorrelationIdError) and
version == (0, 10)):
pass
elif six.PY2:
assert isinstance(f.exception.args[0], socket.error)
assert f.exception.args[0].errno in (32, 54, 104)
else:
assert isinstance(f.exception.args[0], ConnectionError)
log.info("Broker is not v%s -- it did not recognize %s",
version, request.__class__.__name__)
else:
reset_override_configs()
raise Errors.UnrecognizedBrokerVersion()
reset_override_configs()
return version
def __str__(self):
return "<BrokerConnection node_id=%s host=%s:%d %s [%s %s]>" % (
self.node_id, self.host, self.port, self.state,
AFI_NAMES[self._sock_afi], self._sock_addr)
class BrokerConnectionMetrics(object):
def __init__(self, metrics, metric_group_prefix, node_id):
self.metrics = metrics
# Any broker may have registered summary metrics already
# but if not, we need to create them so we can set as parents below
all_conns_transferred = metrics.get_sensor('bytes-sent-received')
if not all_conns_transferred:
metric_group_name = metric_group_prefix + '-metrics'
bytes_transferred = metrics.sensor('bytes-sent-received')
bytes_transferred.add(metrics.metric_name(
'network-io-rate', metric_group_name,
'The average number of network operations (reads or writes) on all'
' connections per second.'), Rate(sampled_stat=Count()))
bytes_sent = metrics.sensor('bytes-sent',
parents=[bytes_transferred])
bytes_sent.add(metrics.metric_name(
'outgoing-byte-rate', metric_group_name,
'The average number of outgoing bytes sent per second to all'
' servers.'), Rate())
bytes_sent.add(metrics.metric_name(
'request-rate', metric_group_name,
'The average number of requests sent per second.'),
Rate(sampled_stat=Count()))
bytes_sent.add(metrics.metric_name(
'request-size-avg', metric_group_name,
'The average size of all requests in the window.'), Avg())
bytes_sent.add(metrics.metric_name(
'request-size-max', metric_group_name,
'The maximum size of any request sent in the window.'), Max())
bytes_received = metrics.sensor('bytes-received',
parents=[bytes_transferred])
bytes_received.add(metrics.metric_name(
'incoming-byte-rate', metric_group_name,
'Bytes/second read off all sockets'), Rate())
bytes_received.add(metrics.metric_name(
'response-rate', metric_group_name,
'Responses received sent per second.'),
Rate(sampled_stat=Count()))
request_latency = metrics.sensor('request-latency')
request_latency.add(metrics.metric_name(
'request-latency-avg', metric_group_name,
'The average request latency in ms.'),
Avg())
request_latency.add(metrics.metric_name(
'request-latency-max', metric_group_name,
'The maximum request latency in ms.'),
Max())
# if one sensor of the metrics has been registered for the connection,
# then all other sensors should have been registered; and vice versa
node_str = 'node-{0}'.format(node_id)
node_sensor = metrics.get_sensor(node_str + '.bytes-sent')
if not node_sensor:
metric_group_name = metric_group_prefix + '-node-metrics.' + node_str
bytes_sent = metrics.sensor(
node_str + '.bytes-sent',
parents=[metrics.get_sensor('bytes-sent')])
bytes_sent.add(metrics.metric_name(
'outgoing-byte-rate', metric_group_name,
'The average number of outgoing bytes sent per second.'),
Rate())
bytes_sent.add(metrics.metric_name(
'request-rate', metric_group_name,
'The average number of requests sent per second.'),
Rate(sampled_stat=Count()))
bytes_sent.add(metrics.metric_name(
'request-size-avg', metric_group_name,
'The average size of all requests in the window.'),
Avg())
bytes_sent.add(metrics.metric_name(
'request-size-max', metric_group_name,
'The maximum size of any request sent in the window.'),
Max())
bytes_received = metrics.sensor(
node_str + '.bytes-received',
parents=[metrics.get_sensor('bytes-received')])
bytes_received.add(metrics.metric_name(
'incoming-byte-rate', metric_group_name,
'Bytes/second read off node-connection socket'),
Rate())
bytes_received.add(metrics.metric_name(
'response-rate', metric_group_name,
'The average number of responses received per second.'),
Rate(sampled_stat=Count()))
request_time = metrics.sensor(
node_str + '.latency',
parents=[metrics.get_sensor('request-latency')])
request_time.add(metrics.metric_name(
'request-latency-avg', metric_group_name,
'The average request latency in ms.'),
Avg())
request_time.add(metrics.metric_name(
'request-latency-max', metric_group_name,
'The maximum request latency in ms.'),
Max())
self.bytes_sent = metrics.sensor(node_str + '.bytes-sent')
self.bytes_received = metrics.sensor(node_str + '.bytes-received')
self.request_time = metrics.sensor(node_str + '.latency')
def _address_family(address):
"""
Attempt to determine the family of an address (or hostname)
:return: either socket.AF_INET or socket.AF_INET6 or socket.AF_UNSPEC if the address family
could not be determined
"""
if address.startswith('[') and address.endswith(']'):
return socket.AF_INET6
for af in (socket.AF_INET, socket.AF_INET6):
try:
socket.inet_pton(af, address)
return af
except (ValueError, AttributeError, socket.error):
continue
return socket.AF_UNSPEC
def get_ip_port_afi(host_and_port_str):
"""
Parse the IP and port from a string in the format of:
* host_or_ip <- Can be either IPv4 address literal or hostname/fqdn
* host_or_ipv4:port <- Can be either IPv4 address literal or hostname/fqdn
* [host_or_ip] <- IPv6 address literal
* [host_or_ip]:port. <- IPv6 address literal
.. note:: IPv6 address literals with ports *must* be enclosed in brackets
.. note:: If the port is not specified, default will be returned.
:return: tuple (host, port, afi), afi will be socket.AF_INET or socket.AF_INET6 or socket.AF_UNSPEC
"""
host_and_port_str = host_and_port_str.strip()
if host_and_port_str.startswith('['):
af = socket.AF_INET6
host, rest = host_and_port_str[1:].split(']')
if rest:
port = int(rest[1:])
else:
port = DEFAULT_KAFKA_PORT
return host, port, af
else:
if ':' not in host_and_port_str:
af = _address_family(host_and_port_str)
return host_and_port_str, DEFAULT_KAFKA_PORT, af
else:
# now we have something with a colon in it and no square brackets. It could be
# either an IPv6 address literal (e.g., "::1") or an IP:port pair or a host:port pair
try:
# if it decodes as an IPv6 address, use that
socket.inet_pton(socket.AF_INET6, host_and_port_str)
return host_and_port_str, DEFAULT_KAFKA_PORT, socket.AF_INET6
except AttributeError:
log.warning('socket.inet_pton not available on this platform.'
' consider `pip install win_inet_pton`')
pass
except (ValueError, socket.error):
# it's a host:port pair
pass
host, port = host_and_port_str.rsplit(':', 1)
port = int(port)
af = _address_family(host)
return host, port, af
def collect_hosts(hosts, randomize=True):
"""
Collects a comma-separated set of hosts (host:port) and optionally
randomize the returned list.
"""
if isinstance(hosts, six.string_types):
hosts = hosts.strip().split(',')
result = []
afi = socket.AF_INET
for host_port in hosts:
host, port, afi = get_ip_port_afi(host_port)
if port < 0:
port = DEFAULT_KAFKA_PORT
result.append((host, port, afi))
if randomize:
shuffle(result)
return result
def is_inet_4_or_6(gai):
"""Given a getaddrinfo struct, return True iff ipv4 or ipv6"""
return gai[0] in (socket.AF_INET, socket.AF_INET6)
def dns_lookup(host, port, afi=socket.AF_UNSPEC):
"""Returns a list of getaddrinfo structs, optionally filtered to an afi (ipv4 / ipv6)"""
# XXX: all DNS functions in Python are blocking. If we really
# want to be non-blocking here, we need to use a 3rd-party
# library like python-adns, or move resolution onto its
# own thread. This will be subject to the default libc
# name resolution timeout (5s on most Linux boxes)
try:
return list(filter(is_inet_4_or_6,
socket.getaddrinfo(host, port, afi,
socket.SOCK_STREAM)))
except socket.gaierror as ex:
log.warning('DNS lookup failed for %s:%d,'
' exception was %s. Is your'
' advertised.listeners (called'
' advertised.host.name before Kafka 9)'
' correct and resolvable?',
host, port, ex)
return []
| {
"repo_name": "scrapinghub/kafka-python",
"path": "kafka/conn.py",
"copies": "1",
"size": "68402",
"license": "apache-2.0",
"hash": -4989628922058063000,
"line_mean": 43.6196999348,
"line_max": 131,
"alpha_frac": 0.5782140873,
"autogenerated": false,
"ratio": 4.36599221293164,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0008768483497951125,
"num_lines": 1533
} |
from __future__ import absolute_import, division
import copy
import logging
import socket
import time
from kafka.errors import KafkaConfigurationError, UnsupportedVersionError
from kafka.vendor import six
from kafka.client_async import KafkaClient, selectors
from kafka.consumer.fetcher import Fetcher
from kafka.consumer.subscription_state import SubscriptionState
from kafka.coordinator.consumer import ConsumerCoordinator
from kafka.coordinator.assignors.range import RangePartitionAssignor
from kafka.coordinator.assignors.roundrobin import RoundRobinPartitionAssignor
from kafka.metrics import MetricConfig, Metrics
from kafka.protocol.offset import OffsetResetStrategy
from kafka.structs import TopicPartition
from kafka.version import __version__
log = logging.getLogger(__name__)
class KafkaConsumer(six.Iterator):
"""Consume records from a Kafka cluster.
The consumer will transparently handle the failure of servers in the Kafka
cluster, and adapt as topic-partitions are created or migrate between
brokers. It also interacts with the assigned kafka Group Coordinator node
to allow multiple consumers to load balance consumption of topics (requires
kafka >= 0.9.0.0).
The consumer is not thread safe and should not be shared across threads.
Arguments:
*topics (str): optional list of topics to subscribe to. If not set,
call :meth:`~kafka.KafkaConsumer.subscribe` or
:meth:`~kafka.KafkaConsumer.assign` before consuming records.
Keyword Arguments:
bootstrap_servers: 'host[:port]' string (or list of 'host[:port]'
strings) that the consumer should contact to bootstrap initial
cluster metadata. This does not have to be the full node list.
It just needs to have at least one broker that will respond to a
Metadata API Request. Default port is 9092. If no servers are
specified, will default to localhost:9092.
client_id (str): A name for this client. This string is passed in
each request to servers and can be used to identify specific
server-side log entries that correspond to this client. Also
submitted to GroupCoordinator for logging with respect to
consumer group administration. Default: 'kafka-python-{version}'
group_id (str or None): The name of the consumer group to join for dynamic
partition assignment (if enabled), and to use for fetching and
committing offsets. If None, auto-partition assignment (via
group coordinator) and offset commits are disabled.
Default: None
key_deserializer (callable): Any callable that takes a
raw message key and returns a deserialized key.
value_deserializer (callable): Any callable that takes a
raw message value and returns a deserialized value.
fetch_min_bytes (int): Minimum amount of data the server should
return for a fetch request, otherwise wait up to
fetch_max_wait_ms for more data to accumulate. Default: 1.
fetch_max_wait_ms (int): The maximum amount of time in milliseconds
the server will block before answering the fetch request if
there isn't sufficient data to immediately satisfy the
requirement given by fetch_min_bytes. Default: 500.
fetch_max_bytes (int): The maximum amount of data the server should
return for a fetch request. This is not an absolute maximum, if the
first message in the first non-empty partition of the fetch is
larger than this value, the message will still be returned to
ensure that the consumer can make progress. NOTE: consumer performs
fetches to multiple brokers in parallel so memory usage will depend
on the number of brokers containing partitions for the topic.
Supported Kafka version >= 0.10.1.0. Default: 52428800 (50 MB).
max_partition_fetch_bytes (int): The maximum amount of data
per-partition the server will return. The maximum total memory
used for a request = #partitions * max_partition_fetch_bytes.
This size must be at least as large as the maximum message size
the server allows or else it is possible for the producer to
send messages larger than the consumer can fetch. If that
happens, the consumer can get stuck trying to fetch a large
message on a certain partition. Default: 1048576.
request_timeout_ms (int): Client request timeout in milliseconds.
Default: 305000.
retry_backoff_ms (int): Milliseconds to backoff when retrying on
errors. Default: 100.
reconnect_backoff_ms (int): The amount of time in milliseconds to
wait before attempting to reconnect to a given host.
Default: 50.
reconnect_backoff_max_ms (int): The maximum amount of time in
milliseconds to wait when reconnecting to a broker that has
repeatedly failed to connect. If provided, the backoff per host
will increase exponentially for each consecutive connection
failure, up to this maximum. To avoid connection storms, a
randomization factor of 0.2 will be applied to the backoff
resulting in a random range between 20% below and 20% above
the computed value. Default: 1000.
max_in_flight_requests_per_connection (int): Requests are pipelined
to kafka brokers up to this number of maximum requests per
broker connection. Default: 5.
auto_offset_reset (str): A policy for resetting offsets on
OffsetOutOfRange errors: 'earliest' will move to the oldest
available message, 'latest' will move to the most recent. Any
other value will raise the exception. Default: 'latest'.
enable_auto_commit (bool): If True , the consumer's offset will be
periodically committed in the background. Default: True.
auto_commit_interval_ms (int): Number of milliseconds between automatic
offset commits, if enable_auto_commit is True. Default: 5000.
default_offset_commit_callback (callable): Called as
callback(offsets, response) response will be either an Exception
or an OffsetCommitResponse struct. This callback can be used to
trigger custom actions when a commit request completes.
check_crcs (bool): Automatically check the CRC32 of the records
consumed. This ensures no on-the-wire or on-disk corruption to
the messages occurred. This check adds some overhead, so it may
be disabled in cases seeking extreme performance. Default: True
metadata_max_age_ms (int): The period of time in milliseconds after
which we force a refresh of metadata, even if we haven't seen any
partition leadership changes to proactively discover any new
brokers or partitions. Default: 300000
partition_assignment_strategy (list): List of objects to use to
distribute partition ownership amongst consumer instances when
group management is used.
Default: [RangePartitionAssignor, RoundRobinPartitionAssignor]
max_poll_records (int): The maximum number of records returned in a
single call to :meth:`~kafka.KafkaConsumer.poll`. Default: 500
max_poll_interval_ms (int): The maximum delay between invocations of
:meth:`~kafka.KafkaConsumer.poll` when using consumer group
management. This places an upper bound on the amount of time that
the consumer can be idle before fetching more records. If
:meth:`~kafka.KafkaConsumer.poll` is not called before expiration
of this timeout, then the consumer is considered failed and the
group will rebalance in order to reassign the partitions to another
member. Default 300000
session_timeout_ms (int): The timeout used to detect failures when
using Kafka's group management facilities. The consumer sends
periodic heartbeats to indicate its liveness to the broker. If
no heartbeats are received by the broker before the expiration of
this session timeout, then the broker will remove this consumer
from the group and initiate a rebalance. Note that the value must
be in the allowable range as configured in the broker configuration
by group.min.session.timeout.ms and group.max.session.timeout.ms.
Default: 10000
heartbeat_interval_ms (int): The expected time in milliseconds
between heartbeats to the consumer coordinator when using
Kafka's group management facilities. Heartbeats are used to ensure
that the consumer's session stays active and to facilitate
rebalancing when new consumers join or leave the group. The
value must be set lower than session_timeout_ms, but typically
should be set no higher than 1/3 of that value. It can be
adjusted even lower to control the expected time for normal
rebalances. Default: 3000
receive_buffer_bytes (int): The size of the TCP receive buffer
(SO_RCVBUF) to use when reading data. Default: None (relies on
system defaults). The java client defaults to 32768.
send_buffer_bytes (int): The size of the TCP send buffer
(SO_SNDBUF) to use when sending data. Default: None (relies on
system defaults). The java client defaults to 131072.
socket_options (list): List of tuple-arguments to socket.setsockopt
to apply to broker connection sockets. Default:
[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
consumer_timeout_ms (int): number of milliseconds to block during
message iteration before raising StopIteration (i.e., ending the
iterator). Default block forever [float('inf')].
security_protocol (str): Protocol used to communicate with brokers.
Valid values are: PLAINTEXT, SSL. Default: PLAINTEXT.
ssl_context (ssl.SSLContext): Pre-configured SSLContext for wrapping
socket connections. If provided, all other ssl_* configurations
will be ignored. Default: None.
ssl_check_hostname (bool): Flag to configure whether ssl handshake
should verify that the certificate matches the brokers hostname.
Default: True.
ssl_cafile (str): Optional filename of ca file to use in certificate
verification. Default: None.
ssl_certfile (str): Optional filename of file in pem format containing
the client certificate, as well as any ca certificates needed to
establish the certificate's authenticity. Default: None.
ssl_keyfile (str): Optional filename containing the client private key.
Default: None.
ssl_password (str): Optional password to be used when loading the
certificate chain. Default: None.
ssl_crlfile (str): Optional filename containing the CRL to check for
certificate expiration. By default, no CRL check is done. When
providing a file, only the leaf certificate will be checked against
this CRL. The CRL can only be checked with Python 3.4+ or 2.7.9+.
Default: None.
ssl_ciphers (str): optionally set the available ciphers for ssl
connections. It should be a string in the OpenSSL cipher list
format. If no cipher can be selected (because compile-time options
or other configuration forbids use of all the specified ciphers),
an ssl.SSLError will be raised. See ssl.SSLContext.set_ciphers
api_version (tuple): Specify which Kafka API version to use. If set to
None, the client will attempt to infer the broker version by probing
various APIs. Different versions enable different functionality.
Examples:
(0, 9) enables full group coordination features with automatic
partition assignment and rebalancing,
(0, 8, 2) enables kafka-storage offset commits with manual
partition assignment only,
(0, 8, 1) enables zookeeper-storage offset commits with manual
partition assignment only,
(0, 8, 0) enables basic functionality but requires manual
partition assignment and offset management.
Default: None
api_version_auto_timeout_ms (int): number of milliseconds to throw a
timeout exception from the constructor when checking the broker
api version. Only applies if api_version set to None.
connections_max_idle_ms: Close idle connections after the number of
milliseconds specified by this config. The broker closes idle
connections after connections.max.idle.ms, so this avoids hitting
unexpected socket disconnected errors on the client.
Default: 540000
metric_reporters (list): A list of classes to use as metrics reporters.
Implementing the AbstractMetricsReporter interface allows plugging
in classes that will be notified of new metric creation. Default: []
metrics_num_samples (int): The number of samples maintained to compute
metrics. Default: 2
metrics_sample_window_ms (int): The maximum age in milliseconds of
samples used to compute metrics. Default: 30000
selector (selectors.BaseSelector): Provide a specific selector
implementation to use for I/O multiplexing.
Default: selectors.DefaultSelector
exclude_internal_topics (bool): Whether records from internal topics
(such as offsets) should be exposed to the consumer. If set to True
the only way to receive records from an internal topic is
subscribing to it. Requires 0.10+ Default: True
sasl_mechanism (str): Authentication mechanism when security_protocol
is configured for SASL_PLAINTEXT or SASL_SSL. Valid values are:
PLAIN, GSSAPI, OAUTHBEARER.
sasl_plain_username (str): Username for sasl PLAIN authentication.
Required if sasl_mechanism is PLAIN.
sasl_plain_password (str): Password for sasl PLAIN authentication.
Required if sasl_mechanism is PLAIN.
sasl_kerberos_service_name (str): Service name to include in GSSAPI
sasl mechanism handshake. Default: 'kafka'
sasl_kerberos_domain_name (str): kerberos domain name to use in GSSAPI
sasl mechanism handshake. Default: one of bootstrap servers
sasl_oauth_token_provider (AbstractTokenProvider): OAuthBearer token provider
instance. (See kafka.oauth.abstract). Default: None
Note:
Configuration parameters are described in more detail at
https://kafka.apache.org/documentation/#consumerconfigs
"""
DEFAULT_CONFIG = {
'bootstrap_servers': 'localhost',
'client_id': 'kafka-python-' + __version__,
'group_id': None,
'key_deserializer': None,
'value_deserializer': None,
'fetch_max_wait_ms': 500,
'fetch_min_bytes': 1,
'fetch_max_bytes': 52428800,
'max_partition_fetch_bytes': 1 * 1024 * 1024,
'request_timeout_ms': 305000, # chosen to be higher than the default of max_poll_interval_ms
'retry_backoff_ms': 100,
'reconnect_backoff_ms': 50,
'reconnect_backoff_max_ms': 1000,
'max_in_flight_requests_per_connection': 5,
'auto_offset_reset': 'latest',
'enable_auto_commit': True,
'auto_commit_interval_ms': 5000,
'default_offset_commit_callback': lambda offsets, response: True,
'check_crcs': True,
'metadata_max_age_ms': 5 * 60 * 1000,
'partition_assignment_strategy': (RangePartitionAssignor, RoundRobinPartitionAssignor),
'max_poll_records': 500,
'max_poll_interval_ms': 300000,
'session_timeout_ms': 10000,
'heartbeat_interval_ms': 3000,
'receive_buffer_bytes': None,
'send_buffer_bytes': None,
'socket_options': [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)],
'sock_chunk_bytes': 4096, # undocumented experimental option
'sock_chunk_buffer_count': 1000, # undocumented experimental option
'consumer_timeout_ms': float('inf'),
'security_protocol': 'PLAINTEXT',
'ssl_context': None,
'ssl_check_hostname': True,
'ssl_cafile': None,
'ssl_certfile': None,
'ssl_keyfile': None,
'ssl_crlfile': None,
'ssl_password': None,
'ssl_ciphers': None,
'api_version': None,
'api_version_auto_timeout_ms': 2000,
'connections_max_idle_ms': 9 * 60 * 1000,
'metric_reporters': [],
'metrics_num_samples': 2,
'metrics_sample_window_ms': 30000,
'metric_group_prefix': 'consumer',
'selector': selectors.DefaultSelector,
'exclude_internal_topics': True,
'sasl_mechanism': None,
'sasl_plain_username': None,
'sasl_plain_password': None,
'sasl_kerberos_service_name': 'kafka',
'sasl_kerberos_domain_name': None,
'sasl_oauth_token_provider': None
}
DEFAULT_SESSION_TIMEOUT_MS_0_9 = 30000
def __init__(self, *topics, **configs):
# Only check for extra config keys in top-level class
extra_configs = set(configs).difference(self.DEFAULT_CONFIG)
if extra_configs:
raise KafkaConfigurationError("Unrecognized configs: %s" % (extra_configs,))
self.config = copy.copy(self.DEFAULT_CONFIG)
self.config.update(configs)
deprecated = {'smallest': 'earliest', 'largest': 'latest'}
if self.config['auto_offset_reset'] in deprecated:
new_config = deprecated[self.config['auto_offset_reset']]
log.warning('use auto_offset_reset=%s (%s is deprecated)',
new_config, self.config['auto_offset_reset'])
self.config['auto_offset_reset'] = new_config
connections_max_idle_ms = self.config['connections_max_idle_ms']
request_timeout_ms = self.config['request_timeout_ms']
fetch_max_wait_ms = self.config['fetch_max_wait_ms']
if not (fetch_max_wait_ms < request_timeout_ms < connections_max_idle_ms):
raise KafkaConfigurationError(
"connections_max_idle_ms ({}) must be larger than "
"request_timeout_ms ({}) which must be larger than "
"fetch_max_wait_ms ({})."
.format(connections_max_idle_ms, request_timeout_ms, fetch_max_wait_ms))
metrics_tags = {'client-id': self.config['client_id']}
metric_config = MetricConfig(samples=self.config['metrics_num_samples'],
time_window_ms=self.config['metrics_sample_window_ms'],
tags=metrics_tags)
reporters = [reporter() for reporter in self.config['metric_reporters']]
self._metrics = Metrics(metric_config, reporters)
# TODO _metrics likely needs to be passed to KafkaClient, etc.
# api_version was previously a str. Accept old format for now
if isinstance(self.config['api_version'], str):
str_version = self.config['api_version']
if str_version == 'auto':
self.config['api_version'] = None
else:
self.config['api_version'] = tuple(map(int, str_version.split('.')))
log.warning('use api_version=%s [tuple] -- "%s" as str is deprecated',
str(self.config['api_version']), str_version)
self._client = KafkaClient(metrics=self._metrics, **self.config)
# Get auto-discovered version from client if necessary
if self.config['api_version'] is None:
self.config['api_version'] = self._client.config['api_version']
# Coordinator configurations are different for older brokers
# max_poll_interval_ms is not supported directly -- it must the be
# the same as session_timeout_ms. If the user provides one of them,
# use it for both. Otherwise use the old default of 30secs
if self.config['api_version'] < (0, 10, 1):
if 'session_timeout_ms' not in configs:
if 'max_poll_interval_ms' in configs:
self.config['session_timeout_ms'] = configs['max_poll_interval_ms']
else:
self.config['session_timeout_ms'] = self.DEFAULT_SESSION_TIMEOUT_MS_0_9
if 'max_poll_interval_ms' not in configs:
self.config['max_poll_interval_ms'] = self.config['session_timeout_ms']
if self.config['group_id'] is not None:
if self.config['request_timeout_ms'] <= self.config['session_timeout_ms']:
raise KafkaConfigurationError(
"Request timeout (%s) must be larger than session timeout (%s)" %
(self.config['request_timeout_ms'], self.config['session_timeout_ms']))
self._subscription = SubscriptionState(self.config['auto_offset_reset'])
self._fetcher = Fetcher(
self._client, self._subscription, self._metrics, **self.config)
self._coordinator = ConsumerCoordinator(
self._client, self._subscription, self._metrics,
assignors=self.config['partition_assignment_strategy'],
**self.config)
self._closed = False
self._iterator = None
self._consumer_timeout = float('inf')
if topics:
self._subscription.subscribe(topics=topics)
self._client.set_topics(topics)
def assign(self, partitions):
"""Manually assign a list of TopicPartitions to this consumer.
Arguments:
partitions (list of TopicPartition): Assignment for this instance.
Raises:
IllegalStateError: If consumer has already called
:meth:`~kafka.KafkaConsumer.subscribe`.
Warning:
It is not possible to use both manual partition assignment with
:meth:`~kafka.KafkaConsumer.assign` and group assignment with
:meth:`~kafka.KafkaConsumer.subscribe`.
Note:
This interface does not support incremental assignment and will
replace the previous assignment (if there was one).
Note:
Manual topic assignment through this method does not use the
consumer's group management functionality. As such, there will be
no rebalance operation triggered when group membership or cluster
and topic metadata change.
"""
self._subscription.assign_from_user(partitions)
self._client.set_topics([tp.topic for tp in partitions])
def assignment(self):
"""Get the TopicPartitions currently assigned to this consumer.
If partitions were directly assigned using
:meth:`~kafka.KafkaConsumer.assign`, then this will simply return the
same partitions that were previously assigned. If topics were
subscribed using :meth:`~kafka.KafkaConsumer.subscribe`, then this will
give the set of topic partitions currently assigned to the consumer
(which may be None if the assignment hasn't happened yet, or if the
partitions are in the process of being reassigned).
Returns:
set: {TopicPartition, ...}
"""
return self._subscription.assigned_partitions()
def close(self, autocommit=True):
"""Close the consumer, waiting indefinitely for any needed cleanup.
Keyword Arguments:
autocommit (bool): If auto-commit is configured for this consumer,
this optional flag causes the consumer to attempt to commit any
pending consumed offsets prior to close. Default: True
"""
if self._closed:
return
log.debug("Closing the KafkaConsumer.")
self._closed = True
self._coordinator.close(autocommit=autocommit)
self._metrics.close()
self._client.close()
try:
self.config['key_deserializer'].close()
except AttributeError:
pass
try:
self.config['value_deserializer'].close()
except AttributeError:
pass
log.debug("The KafkaConsumer has closed.")
def commit_async(self, offsets=None, callback=None):
"""Commit offsets to kafka asynchronously, optionally firing callback.
This commits offsets only to Kafka. The offsets committed using this API
will be used on the first fetch after every rebalance and also on
startup. As such, if you need to store offsets in anything other than
Kafka, this API should not be used. To avoid re-processing the last
message read if a consumer is restarted, the committed offset should be
the next message your application should consume, i.e.: last_offset + 1.
This is an asynchronous call and will not block. Any errors encountered
are either passed to the callback (if provided) or discarded.
Arguments:
offsets (dict, optional): {TopicPartition: OffsetAndMetadata} dict
to commit with the configured group_id. Defaults to currently
consumed offsets for all subscribed partitions.
callback (callable, optional): Called as callback(offsets, response)
with response as either an Exception or an OffsetCommitResponse
struct. This callback can be used to trigger custom actions when
a commit request completes.
Returns:
kafka.future.Future
"""
assert self.config['api_version'] >= (0, 8, 1), 'Requires >= Kafka 0.8.1'
assert self.config['group_id'] is not None, 'Requires group_id'
if offsets is None:
offsets = self._subscription.all_consumed_offsets()
log.debug("Committing offsets: %s", offsets)
future = self._coordinator.commit_offsets_async(
offsets, callback=callback)
return future
def commit(self, offsets=None):
"""Commit offsets to kafka, blocking until success or error.
This commits offsets only to Kafka. The offsets committed using this API
will be used on the first fetch after every rebalance and also on
startup. As such, if you need to store offsets in anything other than
Kafka, this API should not be used. To avoid re-processing the last
message read if a consumer is restarted, the committed offset should be
the next message your application should consume, i.e.: last_offset + 1.
Blocks until either the commit succeeds or an unrecoverable error is
encountered (in which case it is thrown to the caller).
Currently only supports kafka-topic offset storage (not zookeeper).
Arguments:
offsets (dict, optional): {TopicPartition: OffsetAndMetadata} dict
to commit with the configured group_id. Defaults to currently
consumed offsets for all subscribed partitions.
"""
assert self.config['api_version'] >= (0, 8, 1), 'Requires >= Kafka 0.8.1'
assert self.config['group_id'] is not None, 'Requires group_id'
if offsets is None:
offsets = self._subscription.all_consumed_offsets()
self._coordinator.commit_offsets_sync(offsets)
def committed(self, partition):
"""Get the last committed offset for the given partition.
This offset will be used as the position for the consumer
in the event of a failure.
This call may block to do a remote call if the partition in question
isn't assigned to this consumer or if the consumer hasn't yet
initialized its cache of committed offsets.
Arguments:
partition (TopicPartition): The partition to check.
Returns:
The last committed offset, or None if there was no prior commit.
"""
assert self.config['api_version'] >= (0, 8, 1), 'Requires >= Kafka 0.8.1'
assert self.config['group_id'] is not None, 'Requires group_id'
if not isinstance(partition, TopicPartition):
raise TypeError('partition must be a TopicPartition namedtuple')
if self._subscription.is_assigned(partition):
committed = self._subscription.assignment[partition].committed
if committed is None:
self._coordinator.refresh_committed_offsets_if_needed()
committed = self._subscription.assignment[partition].committed
else:
commit_map = self._coordinator.fetch_committed_offsets([partition])
if partition in commit_map:
committed = commit_map[partition].offset
else:
committed = None
return committed
def _fetch_all_topic_metadata(self):
"""A blocking call that fetches topic metadata for all topics in the
cluster that the user is authorized to view.
"""
cluster = self._client.cluster
if self._client._metadata_refresh_in_progress and self._client._topics:
future = cluster.request_update()
self._client.poll(future=future)
stash = cluster.need_all_topic_metadata
cluster.need_all_topic_metadata = True
future = cluster.request_update()
self._client.poll(future=future)
cluster.need_all_topic_metadata = stash
def topics(self):
"""Get all topics the user is authorized to view.
This will always issue a remote call to the cluster to fetch the latest
information.
Returns:
set: topics
"""
self._fetch_all_topic_metadata()
return self._client.cluster.topics()
def partitions_for_topic(self, topic):
"""This method first checks the local metadata cache for information
about the topic. If the topic is not found (either because the topic
does not exist, the user is not authorized to view the topic, or the
metadata cache is not populated), then it will issue a metadata update
call to the cluster.
Arguments:
topic (str): Topic to check.
Returns:
set: Partition ids
"""
cluster = self._client.cluster
partitions = cluster.partitions_for_topic(topic)
if partitions is None:
self._fetch_all_topic_metadata()
partitions = cluster.partitions_for_topic(topic)
return partitions
def poll(self, timeout_ms=0, max_records=None):
"""Fetch data from assigned topics / partitions.
Records are fetched and returned in batches by topic-partition.
On each poll, consumer will try to use the last consumed offset as the
starting offset and fetch sequentially. The last consumed offset can be
manually set through :meth:`~kafka.KafkaConsumer.seek` or automatically
set as the last committed offset for the subscribed list of partitions.
Incompatible with iterator interface -- use one or the other, not both.
Arguments:
timeout_ms (int, optional): Milliseconds spent waiting in poll if
data is not available in the buffer. If 0, returns immediately
with any records that are available currently in the buffer,
else returns empty. Must not be negative. Default: 0
max_records (int, optional): The maximum number of records returned
in a single call to :meth:`~kafka.KafkaConsumer.poll`.
Default: Inherit value from max_poll_records.
Returns:
dict: Topic to list of records since the last fetch for the
subscribed list of topics and partitions.
"""
assert timeout_ms >= 0, 'Timeout must not be negative'
if max_records is None:
max_records = self.config['max_poll_records']
assert isinstance(max_records, int), 'max_records must be an integer'
assert max_records > 0, 'max_records must be positive'
# Poll for new data until the timeout expires
start = time.time()
remaining = timeout_ms
while True:
records = self._poll_once(remaining, max_records)
if records:
return records
elapsed_ms = (time.time() - start) * 1000
remaining = timeout_ms - elapsed_ms
if remaining <= 0:
return {}
def _poll_once(self, timeout_ms, max_records):
"""Do one round of polling. In addition to checking for new data, this does
any needed heart-beating, auto-commits, and offset updates.
Arguments:
timeout_ms (int): The maximum time in milliseconds to block.
Returns:
dict: Map of topic to list of records (may be empty).
"""
self._coordinator.poll()
# Fetch positions if we have partitions we're subscribed to that we
# don't know the offset for
if not self._subscription.has_all_fetch_positions():
self._update_fetch_positions(self._subscription.missing_fetch_positions())
# If data is available already, e.g. from a previous network client
# poll() call to commit, then just return it immediately
records, partial = self._fetcher.fetched_records(max_records)
if records:
# Before returning the fetched records, we can send off the
# next round of fetches and avoid block waiting for their
# responses to enable pipelining while the user is handling the
# fetched records.
if not partial:
self._fetcher.send_fetches()
return records
# Send any new fetches (won't resend pending fetches)
self._fetcher.send_fetches()
timeout_ms = min(timeout_ms, self._coordinator.time_to_next_poll() * 1000)
self._client.poll(timeout_ms=timeout_ms)
# after the long poll, we should check whether the group needs to rebalance
# prior to returning data so that the group can stabilize faster
if self._coordinator.need_rejoin():
return {}
records, _ = self._fetcher.fetched_records(max_records)
return records
def position(self, partition):
"""Get the offset of the next record that will be fetched
Arguments:
partition (TopicPartition): Partition to check
Returns:
int: Offset
"""
if not isinstance(partition, TopicPartition):
raise TypeError('partition must be a TopicPartition namedtuple')
assert self._subscription.is_assigned(partition), 'Partition is not assigned'
offset = self._subscription.assignment[partition].position
if offset is None:
self._update_fetch_positions([partition])
offset = self._subscription.assignment[partition].position
return offset
def highwater(self, partition):
"""Last known highwater offset for a partition.
A highwater offset is the offset that will be assigned to the next
message that is produced. It may be useful for calculating lag, by
comparing with the reported position. Note that both position and
highwater refer to the *next* offset -- i.e., highwater offset is
one greater than the newest available message.
Highwater offsets are returned in FetchResponse messages, so will
not be available if no FetchRequests have been sent for this partition
yet.
Arguments:
partition (TopicPartition): Partition to check
Returns:
int or None: Offset if available
"""
if not isinstance(partition, TopicPartition):
raise TypeError('partition must be a TopicPartition namedtuple')
assert self._subscription.is_assigned(partition), 'Partition is not assigned'
return self._subscription.assignment[partition].highwater
def pause(self, *partitions):
"""Suspend fetching from the requested partitions.
Future calls to :meth:`~kafka.KafkaConsumer.poll` will not return any
records from these partitions until they have been resumed using
:meth:`~kafka.KafkaConsumer.resume`.
Note: This method does not affect partition subscription. In particular,
it does not cause a group rebalance when automatic assignment is used.
Arguments:
*partitions (TopicPartition): Partitions to pause.
"""
if not all([isinstance(p, TopicPartition) for p in partitions]):
raise TypeError('partitions must be TopicPartition namedtuples')
for partition in partitions:
log.debug("Pausing partition %s", partition)
self._subscription.pause(partition)
def paused(self):
"""Get the partitions that were previously paused using
:meth:`~kafka.KafkaConsumer.pause`.
Returns:
set: {partition (TopicPartition), ...}
"""
return self._subscription.paused_partitions()
def resume(self, *partitions):
"""Resume fetching from the specified (paused) partitions.
Arguments:
*partitions (TopicPartition): Partitions to resume.
"""
if not all([isinstance(p, TopicPartition) for p in partitions]):
raise TypeError('partitions must be TopicPartition namedtuples')
for partition in partitions:
log.debug("Resuming partition %s", partition)
self._subscription.resume(partition)
def seek(self, partition, offset):
"""Manually specify the fetch offset for a TopicPartition.
Overrides the fetch offsets that the consumer will use on the next
:meth:`~kafka.KafkaConsumer.poll`. If this API is invoked for the same
partition more than once, the latest offset will be used on the next
:meth:`~kafka.KafkaConsumer.poll`.
Note: You may lose data if this API is arbitrarily used in the middle of
consumption to reset the fetch offsets.
Arguments:
partition (TopicPartition): Partition for seek operation
offset (int): Message offset in partition
Raises:
AssertionError: If offset is not an int >= 0; or if partition is not
currently assigned.
"""
if not isinstance(partition, TopicPartition):
raise TypeError('partition must be a TopicPartition namedtuple')
assert isinstance(offset, int) and offset >= 0, 'Offset must be >= 0'
assert partition in self._subscription.assigned_partitions(), 'Unassigned partition'
log.debug("Seeking to offset %s for partition %s", offset, partition)
self._subscription.assignment[partition].seek(offset)
def seek_to_beginning(self, *partitions):
"""Seek to the oldest available offset for partitions.
Arguments:
*partitions: Optionally provide specific TopicPartitions, otherwise
default to all assigned partitions.
Raises:
AssertionError: If any partition is not currently assigned, or if
no partitions are assigned.
"""
if not all([isinstance(p, TopicPartition) for p in partitions]):
raise TypeError('partitions must be TopicPartition namedtuples')
if not partitions:
partitions = self._subscription.assigned_partitions()
assert partitions, 'No partitions are currently assigned'
else:
for p in partitions:
assert p in self._subscription.assigned_partitions(), 'Unassigned partition'
for tp in partitions:
log.debug("Seeking to beginning of partition %s", tp)
self._subscription.need_offset_reset(tp, OffsetResetStrategy.EARLIEST)
def seek_to_end(self, *partitions):
"""Seek to the most recent available offset for partitions.
Arguments:
*partitions: Optionally provide specific TopicPartitions, otherwise
default to all assigned partitions.
Raises:
AssertionError: If any partition is not currently assigned, or if
no partitions are assigned.
"""
if not all([isinstance(p, TopicPartition) for p in partitions]):
raise TypeError('partitions must be TopicPartition namedtuples')
if not partitions:
partitions = self._subscription.assigned_partitions()
assert partitions, 'No partitions are currently assigned'
else:
for p in partitions:
assert p in self._subscription.assigned_partitions(), 'Unassigned partition'
for tp in partitions:
log.debug("Seeking to end of partition %s", tp)
self._subscription.need_offset_reset(tp, OffsetResetStrategy.LATEST)
def subscribe(self, topics=(), pattern=None, listener=None):
"""Subscribe to a list of topics, or a topic regex pattern.
Partitions will be dynamically assigned via a group coordinator.
Topic subscriptions are not incremental: this list will replace the
current assignment (if there is one).
This method is incompatible with :meth:`~kafka.KafkaConsumer.assign`.
Arguments:
topics (list): List of topics for subscription.
pattern (str): Pattern to match available topics. You must provide
either topics or pattern, but not both.
listener (ConsumerRebalanceListener): Optionally include listener
callback, which will be called before and after each rebalance
operation.
As part of group management, the consumer will keep track of the
list of consumers that belong to a particular group and will
trigger a rebalance operation if one of the following events
trigger:
* Number of partitions change for any of the subscribed topics
* Topic is created or deleted
* An existing member of the consumer group dies
* A new member is added to the consumer group
When any of these events are triggered, the provided listener
will be invoked first to indicate that the consumer's assignment
has been revoked, and then again when the new assignment has
been received. Note that this listener will immediately override
any listener set in a previous call to subscribe. It is
guaranteed, however, that the partitions revoked/assigned
through this interface are from topics subscribed in this call.
Raises:
IllegalStateError: If called after previously calling
:meth:`~kafka.KafkaConsumer.assign`.
AssertionError: If neither topics or pattern is provided.
TypeError: If listener is not a ConsumerRebalanceListener.
"""
# SubscriptionState handles error checking
self._subscription.subscribe(topics=topics,
pattern=pattern,
listener=listener)
# Regex will need all topic metadata
if pattern is not None:
self._client.cluster.need_all_topic_metadata = True
self._client.set_topics([])
self._client.cluster.request_update()
log.debug("Subscribed to topic pattern: %s", pattern)
else:
self._client.cluster.need_all_topic_metadata = False
self._client.set_topics(self._subscription.group_subscription())
log.debug("Subscribed to topic(s): %s", topics)
def subscription(self):
"""Get the current topic subscription.
Returns:
set: {topic, ...}
"""
if self._subscription.subscription is None:
return None
return self._subscription.subscription.copy()
def unsubscribe(self):
"""Unsubscribe from all topics and clear all assigned partitions."""
self._subscription.unsubscribe()
self._coordinator.close()
self._client.cluster.need_all_topic_metadata = False
self._client.set_topics([])
log.debug("Unsubscribed all topics or patterns and assigned partitions")
def metrics(self, raw=False):
"""Get metrics on consumer performance.
This is ported from the Java Consumer, for details see:
https://kafka.apache.org/documentation/#consumer_monitoring
Warning:
This is an unstable interface. It may change in future
releases without warning.
"""
if raw:
return self._metrics.metrics.copy()
metrics = {}
for k, v in six.iteritems(self._metrics.metrics.copy()):
if k.group not in metrics:
metrics[k.group] = {}
if k.name not in metrics[k.group]:
metrics[k.group][k.name] = {}
metrics[k.group][k.name] = v.value()
return metrics
def offsets_for_times(self, timestamps):
"""Look up the offsets for the given partitions by timestamp. The
returned offset for each partition is the earliest offset whose
timestamp is greater than or equal to the given timestamp in the
corresponding partition.
This is a blocking call. The consumer does not have to be assigned the
partitions.
If the message format version in a partition is before 0.10.0, i.e.
the messages do not have timestamps, ``None`` will be returned for that
partition. ``None`` will also be returned for the partition if there
are no messages in it.
Note:
This method may block indefinitely if the partition does not exist.
Arguments:
timestamps (dict): ``{TopicPartition: int}`` mapping from partition
to the timestamp to look up. Unit should be milliseconds since
beginning of the epoch (midnight Jan 1, 1970 (UTC))
Returns:
``{TopicPartition: OffsetAndTimestamp}``: mapping from partition
to the timestamp and offset of the first message with timestamp
greater than or equal to the target timestamp.
Raises:
ValueError: If the target timestamp is negative
UnsupportedVersionError: If the broker does not support looking
up the offsets by timestamp.
KafkaTimeoutError: If fetch failed in request_timeout_ms
"""
if self.config['api_version'] <= (0, 10, 0):
raise UnsupportedVersionError(
"offsets_for_times API not supported for cluster version {}"
.format(self.config['api_version']))
for tp, ts in six.iteritems(timestamps):
timestamps[tp] = int(ts)
if ts < 0:
raise ValueError(
"The target time for partition {} is {}. The target time "
"cannot be negative.".format(tp, ts))
return self._fetcher.get_offsets_by_times(
timestamps, self.config['request_timeout_ms'])
def beginning_offsets(self, partitions):
"""Get the first offset for the given partitions.
This method does not change the current consumer position of the
partitions.
Note:
This method may block indefinitely if the partition does not exist.
Arguments:
partitions (list): List of TopicPartition instances to fetch
offsets for.
Returns:
``{TopicPartition: int}``: The earliest available offsets for the
given partitions.
Raises:
UnsupportedVersionError: If the broker does not support looking
up the offsets by timestamp.
KafkaTimeoutError: If fetch failed in request_timeout_ms.
"""
offsets = self._fetcher.beginning_offsets(
partitions, self.config['request_timeout_ms'])
return offsets
def end_offsets(self, partitions):
"""Get the last offset for the given partitions. The last offset of a
partition is the offset of the upcoming message, i.e. the offset of the
last available message + 1.
This method does not change the current consumer position of the
partitions.
Note:
This method may block indefinitely if the partition does not exist.
Arguments:
partitions (list): List of TopicPartition instances to fetch
offsets for.
Returns:
``{TopicPartition: int}``: The end offsets for the given partitions.
Raises:
UnsupportedVersionError: If the broker does not support looking
up the offsets by timestamp.
KafkaTimeoutError: If fetch failed in request_timeout_ms
"""
offsets = self._fetcher.end_offsets(
partitions, self.config['request_timeout_ms'])
return offsets
def _use_consumer_group(self):
"""Return True iff this consumer can/should join a broker-coordinated group."""
if self.config['api_version'] < (0, 9):
return False
elif self.config['group_id'] is None:
return False
elif not self._subscription.partitions_auto_assigned():
return False
return True
def _update_fetch_positions(self, partitions):
"""Set the fetch position to the committed position (if there is one)
or reset it using the offset reset policy the user has configured.
Arguments:
partitions (List[TopicPartition]): The partitions that need
updating fetch positions.
Raises:
NoOffsetForPartitionError: If no offset is stored for a given
partition and no offset reset policy is defined.
"""
# Lookup any positions for partitions which are awaiting reset (which may be the
# case if the user called :meth:`seek_to_beginning` or :meth:`seek_to_end`. We do
# this check first to avoid an unnecessary lookup of committed offsets (which
# typically occurs when the user is manually assigning partitions and managing
# their own offsets).
self._fetcher.reset_offsets_if_needed(partitions)
if not self._subscription.has_all_fetch_positions():
# if we still don't have offsets for all partitions, then we should either seek
# to the last committed position or reset using the auto reset policy
if (self.config['api_version'] >= (0, 8, 1) and
self.config['group_id'] is not None):
# first refresh commits for all assigned partitions
self._coordinator.refresh_committed_offsets_if_needed()
# Then, do any offset lookups in case some positions are not known
self._fetcher.update_fetch_positions(partitions)
def _message_generator(self):
assert self.assignment() or self.subscription() is not None, 'No topic subscription or manual partition assignment'
while time.time() < self._consumer_timeout:
self._coordinator.poll()
# Fetch offsets for any subscribed partitions that we arent tracking yet
if not self._subscription.has_all_fetch_positions():
partitions = self._subscription.missing_fetch_positions()
self._update_fetch_positions(partitions)
poll_ms = min((1000 * (self._consumer_timeout - time.time())), self.config['retry_backoff_ms'])
self._client.poll(timeout_ms=poll_ms)
# after the long poll, we should check whether the group needs to rebalance
# prior to returning data so that the group can stabilize faster
if self._coordinator.need_rejoin():
continue
# We need to make sure we at least keep up with scheduled tasks,
# like heartbeats, auto-commits, and metadata refreshes
timeout_at = self._next_timeout()
# Short-circuit the fetch iterator if we are already timed out
# to avoid any unintentional interaction with fetcher setup
if time.time() > timeout_at:
continue
for msg in self._fetcher:
yield msg
if time.time() > timeout_at:
log.debug("internal iterator timeout - breaking for poll")
break
self._client.poll(timeout_ms=0)
# An else block on a for loop only executes if there was no break
# so this should only be called on a StopIteration from the fetcher
# We assume that it is safe to init_fetches when fetcher is done
# i.e., there are no more records stored internally
else:
self._fetcher.send_fetches()
def _next_timeout(self):
timeout = min(self._consumer_timeout,
self._client.cluster.ttl() / 1000.0 + time.time(),
self._coordinator.time_to_next_poll() + time.time())
return timeout
def __iter__(self): # pylint: disable=non-iterator-returned
return self
def __next__(self):
if not self._iterator:
self._iterator = self._message_generator()
self._set_consumer_timeout()
try:
return next(self._iterator)
except StopIteration:
self._iterator = None
raise
def _set_consumer_timeout(self):
# consumer_timeout_ms can be used to stop iteration early
if self.config['consumer_timeout_ms'] >= 0:
self._consumer_timeout = time.time() + (
self.config['consumer_timeout_ms'] / 1000.0)
# Old KafkaConsumer methods are deprecated
def configure(self, **configs):
raise NotImplementedError(
'deprecated -- initialize a new consumer')
def set_topic_partitions(self, *topics):
raise NotImplementedError(
'deprecated -- use subscribe() or assign()')
def fetch_messages(self):
raise NotImplementedError(
'deprecated -- use poll() or iterator interface')
def get_partition_offsets(self, topic, partition,
request_time_ms, max_num_offsets):
raise NotImplementedError(
'deprecated -- send an OffsetRequest with KafkaClient')
def offsets(self, group=None):
raise NotImplementedError('deprecated -- use committed(partition)')
def task_done(self, message):
raise NotImplementedError(
'deprecated -- commit offsets manually if needed')
| {
"repo_name": "Aloomaio/kafka-python",
"path": "kafka/consumer/group.py",
"copies": "1",
"size": "55616",
"license": "apache-2.0",
"hash": -9064776579291590000,
"line_mean": 46.5757057314,
"line_max": 123,
"alpha_frac": 0.6398878021,
"autogenerated": false,
"ratio": 4.830293555671356,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5970181357771356,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
import cudamat as cm
import numpy as np
from ._base import LayerBase
from .._neural_net_exception import NeuralNetException
import nnet.activation_func as af
class OutputLayer(LayerBase):
SUPPORTED_LOSS_FUNC = [
'MSE',
'CEE'
]
SUPPORTED_CEE_ACT_FUNC = [
af.Sigmoid
]
def __init__(self, level, size, activation_func, loss_func):
self.level = level
self.size = size
self.loss_func = loss_func
if self.loss_func not in OutputLayer.SUPPORTED_LOSS_FUNC:
raise NeuralNetException('Loss func {0} is not supported.'.format(self.loss_func))
self.activation_func = activation_func
if self.loss_func == 'CEE' \
and (not any([type(activation_func) == f for f in OutputLayer.SUPPORTED_CEE_ACT_FUNC])):
raise NeuralNetException('Activation func {0} is not supported '
'with loss func CEE.'.format(self.activation_func))
def set_next_layer_size(self, next_size):
# Output layer does not have next layer.
pass
def init(self, batch_size):
self.batch_size = batch_size
self.my_delta = cm.empty((batch_size, self.size))
def forward_p(self, z, predict=False):
self.z = z
self.activation_func.apply(self.z)
return self.z
def forward_p_single(self, single_z):
return self.forward_p(single_z, True)
def backward_p(self, y):
self.z.subtract(y, self.my_delta)
self.my_delta.divide(float(self.my_delta.shape[0]))
if self.loss_func == 'MSE':
self.activation_func\
.mult_with_derivative(self.my_delta, self.z)
elif self.loss_func == 'CEE':
# Currently only support Sigmoid as loss function
# for output layer with loss function CEE.
pass
else:
raise NeuralNetException('Loss func {0} is not supported.'.format(self.loss_func))
return self.my_delta
def update(self, lr):
# No weights to update for output layer.
pass
def compute_loss(self, y):
if self.loss_func == 'MSE':
return self._compute_loss_MSE(y)
elif self.loss_func == 'CEE':
return self._compute_loss_CEE(y)
else:
raise NeuralNetException('Loss func {0} is not supported.'.format(self.loss_func))
def _compute_loss_MSE(self, y):
# Copy to cpu to compute loss due to numerical issue.
# This should not be a huge performance bottleneck
# since we don't compute loss at every iteration.
cpu_y = y.asarray().astype(np.double)
cpu_y_hat = self.z.asarray().astype(np.double)
diff = cpu_y - cpu_y_hat
return np.sum(diff**2) \
/ float(2*self.batch_size)
def _compute_loss_CEE(self, y):
# Copy to cpu to compute loss due to numerical issue.
# This should not be a huge performance bottleneck
# since we don't compute loss at every iteration.
cpu_y = y.asarray().astype(np.double)
cpu_y_hat = self.z.asarray().astype(np.double)
cpu_y_hat[np.nonzero(cpu_y_hat==0)] = 1e-8
cpu_y_hat[np.nonzero(cpu_y_hat==1)] = 1-1e-8
entropy = cpu_y * np.log(cpu_y_hat) \
+ (1.0 - cpu_y) * np.log(1.0 - cpu_y_hat)
return -np.sum(entropy) \
/ float(self.batch_size)
def dump_params(self):
del self.z
self._dump_np('my_delta')
def load_params(self):
self._load_np('my_delta')
| {
"repo_name": "zhaoyan1117/NeuralNet",
"path": "nnet/layer/_output_layer.py",
"copies": "1",
"size": "3626",
"license": "bsd-2-clause",
"hash": 1521059551373180000,
"line_mean": 32.2660550459,
"line_max": 104,
"alpha_frac": 0.5890788748,
"autogenerated": false,
"ratio": 3.541015625,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9622925203006873,
"avg_score": 0.0014338593586252994,
"num_lines": 109
} |
from __future__ import absolute_import, division
import enum
from zeus.config import db
from zeus.db.mixins import RepositoryBoundMixin, StandardAttributes
from zeus.db.types import GUID, StrEnum
from zeus.db.utils import model_repr
class FailureReason(RepositoryBoundMixin, StandardAttributes, db.Model):
class Reason(enum.Enum):
failing_tests = "failing_tests"
missing_tests = "missing_tests"
no_jobs = "no_jobs"
unresolvable_ref = "unresolvable_ref"
timeout = "timeout"
internal_error = "internal_error"
build_id = db.Column(
GUID, db.ForeignKey("build.id", ondelete="CASCADE"), nullable=True
)
job_id = db.Column(GUID, db.ForeignKey("job.id", ondelete="CASCADE"), nullable=True)
reason = db.Column(StrEnum(Reason), nullable=False)
build = db.relationship("Build")
job = db.relationship("Job")
__tablename__ = "failurereason"
__table_args__ = (
db.UniqueConstraint(
"build_id", "job_id", "reason", name="unq_failurereason_key"
),
db.Index(
"unq_failurereason_buildonly",
build_id,
reason,
unique=True,
postgresql_where=job_id.is_(None),
),
)
__repr__ = model_repr("reason")
| {
"repo_name": "getsentry/zeus",
"path": "zeus/models/failurereason.py",
"copies": "1",
"size": "1288",
"license": "apache-2.0",
"hash": 3234852153573868500,
"line_mean": 29.6666666667,
"line_max": 88,
"alpha_frac": 0.6242236025,
"autogenerated": false,
"ratio": 3.722543352601156,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4846766955101156,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
import hashlib
import logging
import random as insecure_random
import re
from datetime import datetime
from flask import current_app
from sqlalchemy.exc import IntegrityError
from sqlalchemy.sql import func
from changes.config import db
from changes.constants import Result
from changes.db.utils import create_or_update
from changes.lib.artifact_store_lib import ArtifactStoreClient
from changes.models.failurereason import FailureReason
from changes.models.itemstat import ItemStat
from changes.models.test import TestCase
from changes.models.testartifact import TestArtifact
from changes.models.testmessage import TestMessage
logger = logging.getLogger('changes.testresult')
class TestResult(object):
"""
A helper class which ensures that TestSuite instances are
managed correctly when TestCase's are created.
:param message_offsets: message_offsets is a list of tuples containing (label, start_offset, length)
"""
def __init__(self, step, name, message=None, package=None,
result=None, duration=None, date_created=None,
reruns=None, artifacts=None, owner=None, message_offsets=None):
self.step = step
self._name = name
self._package = package
self.message = message
self.result = result or Result.unknown
self.duration = duration # ms
self.date_created = date_created or datetime.utcnow()
self.reruns = reruns or 0
self.artifacts = artifacts
self.owner = owner
self.message_offsets = message_offsets or []
@property
def sep(self):
name = (self._package or self._name)
# handle the case where it might begin with some special character
if not re.match(r'^[a-zA-Z0-9]', name):
return '/'
elif '/' in name:
return '/'
return '.'
@property
def name_sha(self):
return TestCase.calculate_name_sha(self.name)
@property
def package(self):
return None
@property
def name(self):
if self._package:
return "%s%s%s" % (self._package, self.sep, self._name)
return self._name
id = name
class TestSuite(object):
"""A test suite is a collection of test results.
"""
def __init__(self, step, name=None, result=None, duration=None, date_created=None):
self.step = step
self.name = name
self.duration = duration
self.date_created = date_created
self.result = result or Result.unknown
self.test_results = []
class TestResultManager(object):
def __init__(self, step, artifact):
self.step = step
self.artifact = artifact
def clear(self):
"""
Removes all existing test data from this job.
"""
TestCase.query.filter(
TestCase.step_id == self.step.id,
).delete(synchronize_session=False)
def save(self, test_list):
if not test_list:
return
step = self.step
job = step.job
project = job.project
# Create all test cases.
testcase_list = []
# For tracking the name of any test we see with a bad
# duration, typically the first one if we see multiple.
bad_duration_test_name = None
bad_duration_value = None
for test in test_list:
duration = test.duration
# Maximum value for the Integer column type
if duration is not None and (duration > 2147483647 or duration < 0):
# If it is very large (>~25 days) or negative set it to 0
# since it is almost certainly wrong, and keeping it or truncating
# to max will give misleading total values.
if not bad_duration_test_name:
bad_duration_test_name = test.name
bad_duration_value = duration
duration = 0
testcase = TestCase(
job=job,
step=step,
name_sha=test.name_sha,
project=project,
name=test.name,
duration=duration,
message=test.message,
result=test.result,
date_created=test.date_created,
reruns=test.reruns,
owner=test.owner,
)
testcase_list.append(testcase)
if bad_duration_test_name:
# Include the project slug in the warning so project warnings aren't bucketed together.
logger.warning("Got bad test duration for " + project.slug + "; %s: %s",
bad_duration_test_name, bad_duration_value)
# Try an optimistic commit of all cases at once.
for testcase in testcase_list:
db.session.add(testcase)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
create_or_update(FailureReason, where={
'step_id': step.id,
'reason': 'duplicate_test_name',
}, values={
'project_id': step.project_id,
'build_id': step.job.build_id,
'job_id': step.job_id,
})
db.session.commit()
# Slowly make separate commits, to uncover duplicate test cases:
for i, testcase in enumerate(testcase_list):
db.session.add(testcase)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
original = _record_duplicate_testcase(testcase)
db.session.commit()
testcase_list[i] = original # so artifacts get stored
_record_test_failures(original.step) # so count is right
# Test artifacts and messages do not operate under a unique constraint, so
# they should insert cleanly without an integrity error.
as_client = ArtifactStoreClient(current_app.config['ARTIFACTS_SERVER'])
for test, testcase in zip(test_list, testcase_list):
if test.artifacts:
m = hashlib.md5()
m.update(test.id)
bucket_name_base = '{}_{}'.format(step.id.hex, m.hexdigest())
bucket_name = bucket_name_base
num_attempts = 0
max_duplicate_attempts = 5
while num_attempts < max_duplicate_attempts:
try:
as_client.create_bucket(bucket_name)
break
except Exception as e:
bucket_name = '{}_dup_{}'.format(bucket_name_base, "%05x" % insecure_random.getrandbits(4 * 5))
num_attempts += 1
if num_attempts == max_duplicate_attempts:
raise e
for ta in test.artifacts:
testartifact = TestArtifact(
name=ta['name'],
type=ta['type'],
test=testcase,)
testartifact.save_base64_content(ta['base64'], bucket_name)
db.session.add(testartifact)
as_client.close_bucket(bucket_name)
for (label, start, length) in test.message_offsets:
testmessage = TestMessage(
label=label,
start_offset=start,
length=length,
test=testcase,
artifact=self.artifact,
)
db.session.add(testmessage)
try:
db.session.commit()
except Exception:
db.session.rollback()
logger.exception('Failed to save artifacts and messages'
' for step {}'.format(step.id.hex))
try:
_record_test_counts(self.step)
_record_test_failures(self.step)
_record_test_duration(self.step)
_record_test_rerun_counts(self.step)
except Exception:
db.session.rollback()
logger.exception('Failed to record aggregate test statistics'
' for step {}'.format(step.id.hex))
def _record_test_counts(step):
create_or_update(ItemStat, where={
'item_id': step.id,
'name': 'test_count',
}, values={
'value': db.session.query(func.count(TestCase.id)).filter(
TestCase.step_id == step.id,
).as_scalar(),
})
db.session.commit()
def _record_test_failures(step):
create_or_update(ItemStat, where={
'item_id': step.id,
'name': 'test_failures',
}, values={
'value': db.session.query(func.count(TestCase.id)).filter(
TestCase.step_id == step.id,
TestCase.result == Result.failed,
).as_scalar(),
})
db.session.commit()
def _record_test_duration(step):
create_or_update(ItemStat, where={
'item_id': step.id,
'name': 'test_duration',
}, values={
'value': db.session.query(func.coalesce(func.sum(TestCase.duration), 0)).filter(
TestCase.step_id == step.id,
).as_scalar(),
})
def _record_test_rerun_counts(step):
create_or_update(ItemStat, where={
'item_id': step.id,
'name': 'test_rerun_count',
}, values={
'value': db.session.query(func.count(TestCase.id)).filter(
TestCase.step_id == step.id,
TestCase.reruns > 0,
).as_scalar(),
})
_DUPLICATE_TEST_COMPLAINT = """Error: Duplicate Test
Your test suite is reporting multiple results for this test, but Changes
can only store a single success or failure for each test.
* If you did not intend to run this test several times, simply repair
your scripts so that this test is only discovered and invoked once.
* If you intended to run this test several times and then report a
single success or failure, then run it inside of a loop yourself,
aggregate the results, and deliver a single verdict to Changes.
* If you want to invoke this test several times and have each result
reported separately, then give each run a unique name. Many testing
frameworks will do this automatically, appending a unique suffix
like "#1" or "[1]", when a test is invoked through a fixture.
Here are the job steps that reported a result for this test:
"""
def _record_duplicate_testcase(duplicate):
"""Find the TestCase that already exists for `duplicate` and update it.
Because of the unique constraint on TestCase, we cannot record the
`duplicate`. Instead, we go back and mark the first instance as
having failed because of the duplication, but discard all of the
other data delivered with the `duplicate`.
"""
original = (
TestCase.query
.filter_by(job_id=duplicate.job_id, name_sha=duplicate.name_sha)
.with_for_update().first()
)
prefix = _DUPLICATE_TEST_COMPLAINT
if (original.message is None) or not original.message.startswith(prefix):
original.message = '{}{}\n'.format(prefix, original.step.label)
original.result = Result.failed
if duplicate.step.label not in original.message:
original.message += '{}\n'.format(duplicate.step.label)
return original
| {
"repo_name": "dropbox/changes",
"path": "changes/models/testresult.py",
"copies": "1",
"size": "11482",
"license": "apache-2.0",
"hash": -4147516400314370600,
"line_mean": 33.6888217523,
"line_max": 119,
"alpha_frac": 0.5823027347,
"autogenerated": false,
"ratio": 4.360805165210786,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00033537517742045914,
"num_lines": 331
} |
from __future__ import absolute_import, division
import itertools
import mock
import pytz
from datetime import datetime, timedelta
from sentry.testutils import TestCase
from sentry.tsdb.base import BaseTSDB, ONE_MINUTE, ONE_HOUR, ONE_DAY
from sentry.utils.dates import to_timestamp
from six.moves import xrange
class BaseTSDBTest(TestCase):
def setUp(self):
self.tsdb = BaseTSDB(
rollups=(
# time in seconds, samples to keep
(10, 30), # 5 minutes at 10 seconds
(ONE_MINUTE, 120), # 2 hours at 1 minute
(ONE_HOUR, 24), # 1 days at 1 hour
(ONE_DAY, 30), # 30 days at 1 day
)
)
def test_normalize_to_epoch(self):
timestamp = datetime(2013, 5, 18, 15, 13, 58, 132928, tzinfo=pytz.UTC)
normalize_to_epoch = self.tsdb.normalize_to_epoch
result = normalize_to_epoch(timestamp, 60)
assert result == 1368889980
result = normalize_to_epoch(timestamp + timedelta(seconds=20), 60)
assert result == 1368890040
result = normalize_to_epoch(timestamp + timedelta(seconds=30), 60)
assert result == 1368890040
result = normalize_to_epoch(timestamp + timedelta(seconds=70), 60)
assert result == 1368890100
def test_rollup(self):
pre_results = {
1: [(1368889980, 5), (1368890040, 10), (1368893640, 7)],
}
post_results = self.tsdb.rollup(pre_results, 3600)
assert len(post_results) == 1
assert post_results[1] == [[1368889200, 15], [1368892800, 7]]
def test_calculate_expiry(self):
timestamp = datetime(2013, 5, 18, 15, 13, 58, 132928, tzinfo=pytz.UTC)
result = self.tsdb.calculate_expiry(10, 30, timestamp)
assert result == 1368890330
@mock.patch('django.utils.timezone.now')
def test_get_optimal_rollup_series_aligned_intervals(self, now):
now.return_value = datetime(2016, 8, 1, tzinfo=pytz.utc)
start = now() - timedelta(seconds=30)
assert self.tsdb.get_optimal_rollup_series(start) == (
10, [to_timestamp(start + timedelta(seconds=10) * i) for i in xrange(4)],
)
start = now() - timedelta(minutes=30)
assert self.tsdb.get_optimal_rollup_series(start) == (
ONE_MINUTE, [to_timestamp(start + timedelta(minutes=1) * i) for i in xrange(31)],
)
start = now() - timedelta(hours=5)
assert self.tsdb.get_optimal_rollup_series(start) == (
ONE_HOUR, [to_timestamp(start + timedelta(hours=1) * i) for i in xrange(6)],
)
start = now() - timedelta(days=7)
assert self.tsdb.get_optimal_rollup_series(start) == (
ONE_DAY, [to_timestamp(start + timedelta(hours=24) * i) for i in xrange(8)],
)
@mock.patch('django.utils.timezone.now')
def test_get_optimal_rollup_series_offset_intervals(self, now):
# This test is a funny one (notice it doesn't return a range that
# includes the start position.) This occurs because the algorithm for
# determining the series to be returned will attempt to return the same
# duration of time as represented by the start and end timestamps, but
# doesn't necessarily return data *from that specific interval* (the
# end timestamp is always included.)
now.return_value = datetime(2016, 8, 1, 0, 0, 15, tzinfo=pytz.utc)
start = now() - timedelta(seconds=19)
assert self.tsdb.get_optimal_rollup_series(
start, rollup=10
) == (
10, [
to_timestamp(datetime(2016, 8, 1, 0, 0, 0, tzinfo=pytz.utc)),
to_timestamp(datetime(2016, 8, 1, 0, 0, 10, tzinfo=pytz.utc)),
]
)
now.return_value = datetime(2016, 8, 1, 0, 0, 30, tzinfo=pytz.utc)
start = now() - timedelta(seconds=ONE_MINUTE - 1)
assert self.tsdb.get_optimal_rollup_series(
start, rollup=ONE_MINUTE
) == (ONE_MINUTE, [to_timestamp(datetime(2016, 8, 1, 0, 0, 0, tzinfo=pytz.utc))])
now.return_value = datetime(2016, 8, 1, 12, tzinfo=pytz.utc)
start = now() - timedelta(seconds=ONE_DAY - 1)
assert self.tsdb.get_optimal_rollup_series(
start, rollup=ONE_DAY
) == (ONE_DAY, [to_timestamp(datetime(2016, 8, 1, 0, tzinfo=pytz.utc))])
@mock.patch('django.utils.timezone.now')
def test_make_series_aligned_intervals(self, now):
now.return_value = datetime(2016, 8, 1, tzinfo=pytz.utc)
start = now() - timedelta(seconds=30)
assert self.tsdb.make_series(0, start) == [
(to_timestamp(start + timedelta(seconds=10) * i), 0) for i in xrange(4)
]
start = now() - timedelta(minutes=30)
assert self.tsdb.make_series(lambda timestamp: 1, start) == [
(to_timestamp(start + timedelta(minutes=1) * i), 1) for i in xrange(31)
]
counter = itertools.count()
start = now() - timedelta(hours=5)
assert self.tsdb.make_series(lambda timestamp: next(counter), start) == [
(to_timestamp(start + timedelta(hours=1) * i), i) for i in xrange(6)
]
start = now() - timedelta(days=7)
assert self.tsdb.make_series(0, start) == [
(to_timestamp(start + timedelta(hours=24) * i), 0) for i in xrange(8)
]
| {
"repo_name": "ifduyue/sentry",
"path": "tests/sentry/tsdb/test_base.py",
"copies": "3",
"size": "5410",
"license": "bsd-3-clause",
"hash": -3726191629301871000,
"line_mean": 39.9848484848,
"line_max": 93,
"alpha_frac": 0.6001848429,
"autogenerated": false,
"ratio": 3.499353169469599,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000975305695524314,
"num_lines": 132
} |
from __future__ import absolute_import, division
import json
import logging
import random
import re
import requests
import sys
import time
import uuid
from cStringIO import StringIO
from contextlib import closing
from datetime import datetime
from flask import current_app
from lxml import etree, objectify
from typing import Any # NOQA
from changes.artifacts.analytics_json import AnalyticsJsonHandler
from changes.artifacts.coverage import CoverageHandler
from changes.artifacts.dummylogfile import DummyLogFileHandler
from changes.artifacts.manager import Manager
from changes.artifacts.manifest_json import ManifestJsonHandler
from changes.artifacts.xunit import XunitHandler
from changes.backends.base import BaseBackend, UnrecoverableException
from changes.buildsteps.base import BuildStep
from changes.config import db, redis, statsreporter
from changes.constants import Result, Status
from changes.db.utils import get_or_create
from changes.jobs.sync_job_step import sync_job_step
from changes.lib.artifact_store_lib import ArtifactStoreClient
from changes.models.artifact import Artifact
from changes.models.failurereason import FailureReason
from changes.models.jobphase import JobPhase
from changes.models.jobstep import JobStep
from changes.models.log import LogSource, LOG_CHUNK_SIZE
from changes.models.node import Cluster, ClusterNode, Node
from changes.storage.artifactstore import ArtifactStoreFileStorage
from changes.utils.http import build_patch_uri
from changes.utils.text import chunked
RESULT_MAP = {
'SUCCESS': Result.passed,
'ABORTED': Result.aborted,
'FAILURE': Result.failed,
'REGRESSION': Result.failed,
'UNSTABLE': Result.failed,
}
QUEUE_ID_XPATH = '/queue/item[action/parameter/name="CHANGES_BID" and action/parameter/value="{job_id}"]/id'
BUILD_ID_XPATH = ('/freeStyleProject/build[action/parameter/name="CHANGES_BID" and '
'action/parameter/value="{job_id}"]/number')
ID_XML_RE = re.compile(r'<id>(\d+)</id>')
LOG_SYNC_TIMEOUT_SECS = 30
# Redis key for storing the master blacklist set
# The blacklist is used to temporarily remove jenkins masters from the pool of available masters.
MASTER_BLACKLIST_KEY = 'jenkins_master_blacklist'
# Default name for the Jenkins console log.
# Note that artifactstore may alter the name for deduplication, so this cannot directly be used.
JENKINS_LOG_NAME = 'jenkins-console'
class NotFound(Exception):
"""Indicates a 404 response from the Jenkins API."""
pass
class JenkinsBuilder(BaseBackend):
def __init__(self, master_urls=None, diff_urls=None, job_name=None,
auth_keyname=None, verify=True,
cluster=None, debug_config=None,
*args, **kwargs):
super(JenkinsBuilder, self).__init__(*args, **kwargs)
self.master_urls = master_urls
self.diff_urls = diff_urls
assert self.master_urls, 'No Jenkins masters specified'
self.logger = logging.getLogger('jenkins')
self.job_name = job_name
self.http_session = requests.Session()
self.auth = self.app.config[auth_keyname] if auth_keyname else None
self.verify = verify
self.cluster = cluster
self.debug_config = debug_config or {}
self.artifact_store_client = ArtifactStoreClient(current_app.config['ARTIFACTS_SERVER'])
def report_response_status(r, *args, **kwargs):
statsreporter.stats().incr('jenkins_api_response_{}'.format(r.status_code))
self.http_session.hooks['response'].append(report_response_status)
def _get_text_response(self, master_base_url, path, method='GET',
params=None, data=None):
"""Make an HTTP request and return a text response.
Params:
master_base_url (str): Jenkins master URL, in scheme://host form.
path (str): URL path on the master to access.
method (str): HTTP verb to use; Either 'GET' or 'POST'; 'GET' is the default.
params (dict): Optional dictionary of URL parameters to append to the URL.
data (dict): Optional body to attach to the request. If a dict is provided, it will be form-encoded.
Returns:
Content of the response, in unicode.
Raises:
NotFound if the server responded with a 404 status.
Exception for other error status codes.
"""
url = '{}/{}'.format(master_base_url, path.lstrip('/'))
if params is None:
params = {}
self.logger.info('Fetching %r', url)
resp = getattr(self.http_session, method.lower())(url, params=params,
data=data,
allow_redirects=False,
timeout=30,
auth=self.auth,
verify=self.verify)
if resp.status_code == 404:
raise NotFound
elif not (200 <= resp.status_code < 400):
exception_msg = 'Invalid response. Status code for %s was %s'
attrs = url, resp.status_code
self.logger.exception(exception_msg, *attrs)
raise Exception(exception_msg % attrs)
return resp.text
def _get_json_response(self, master_base_url, path):
"""Makes a Jenkins API request and returns the JSON response
Args:
master_base_url (str): Jenkins master URL, in scheme://host form.
path (str): URL path on the master to access.
Returns:
Parsed JSON from the request.
Raises:
NotFound if the server responded with a 404 status.
Exception for other error status codes.
ValueError if the response wasn't valid JSON.
"""
path = '{}/api/json/'.format(path.strip('/'))
text = self._get_text_response(master_base_url, path, method='GET')
return json.loads(text)
def _parse_parameters(self, json):
params = {}
for action in json['actions']:
params.update(
(p['name'], p.get('value'))
for p in action.get('parameters', [])
)
return params
def _get_artifactstore_bucket(self, step):
# Create the artifactstore bucket, if it doesn't already exist
bucket_name = step.data.get('jenkins_bucket_name')
if not bucket_name:
bucket_name = self.artifact_store_client.create_bucket(step.id.hex + '-jenkins').name
step.data['jenkins_bucket_name'] = bucket_name
db.session.add(step)
db.session.commit()
return bucket_name
def _create_job_step(self, phase, data, force_create=False, cluster=None, **defaults):
"""
Gets or creates the primary JobStep for a Jenkins Job.
Args:
phase (JobPhase): JobPhase the JobStep should be part of.
data (dict): JSON-serializable data associated with the Jenkins build.
force_create (bool): Force this JobStep to be created (rather than
retrieved). This is used when replacing a JobStep to make sure
we don't just get the old one.
cluster (Optional[str]): Cluster in which the JobStep will be run.
Returns:
JobStep: The JobStep that was retrieved or created.
"""
defaults['data'] = data
if cluster:
defaults['cluster'] = cluster
# TODO(kylec): Get rid of the kwargs.
if not defaults.get('label'):
# we update this once we have the build_no for this jobstep
defaults['label'] = '<Creating Jenkins build>'
where = {
'job': phase.job,
'project': phase.project,
'phase': phase,
}
if force_create:
# uuid is unique which forces jobstep to be created
where['id'] = uuid.uuid4()
step, created = get_or_create(JobStep, where=where, defaults=defaults)
assert created or not force_create
BuildStep.handle_debug_infra_failures(step, self.debug_config, 'primary')
return step
def fetch_artifact(self, jobstep, artifact_data):
"""
Fetch an artifact from a Jenkins job.
Args:
jobstep (JobStep): The JobStep associated with the artifact.
artifact_data (dict): Jenkins job artifact metadata dictionary.
Returns:
A streamed requests Response object.
Raises:
HTTPError: if the response code didn't indicate success.
Timeout: if the server took too long to respond.
"""
url = '{base}/job/{job}/{build}/artifact/{artifact}'.format(
base=jobstep.data['master'],
job=jobstep.data['job_name'],
build=jobstep.data['build_no'],
artifact=artifact_data['relativePath'],
)
return self._streaming_get(url)
def sync_artifact(self, artifact):
jobstep = artifact.step
resp = self.fetch_artifact(jobstep, artifact.data)
# NB: Accessing Response.content results in the entire artifact
# being loaded into memory.
if len(resp.content) == 0:
# Artifact store does not support empty artifacts, and they're not very useful, so just discard them.
self.logger.info('Artifact %s from jobstep %s is empty, discarding' % (artifact.name, jobstep.id.hex))
return
bucket_name = self._get_artifactstore_bucket(jobstep)
artifact.file.storage = 'changes.storage.artifactstore.ArtifactStoreFileStorage'
filename = ArtifactStoreFileStorage.get_filename_from_artifact_name(bucket_name, artifact.id.hex)
artifact.file.save(StringIO(resp.content), filename, path=artifact.name)
# commit file save regardless of whether handler is successful
db.session.commit()
# TODO(dcramer): requests doesnt seem to provide a non-binary file-like
# API, so we're stuffing it into StringIO
try:
self.get_artifact_manager(jobstep).process(artifact, StringIO(resp.content))
except Exception:
self.logger.exception(
'Failed to sync test results for job step %s', jobstep.id)
def _sync_log(self, jobstep):
bucket_name = self._get_artifactstore_bucket(jobstep)
# Note: artifactstore may alter the log name to deduplicate it, so always use data.get('log_artifact_name')
artifact_name = jobstep.data.get('log_artifact_name')
if not artifact_name:
artifact_name = self.artifact_store_client\
.create_chunked_artifact(bucket_name, artifact_name=JENKINS_LOG_NAME).name
jobstep.data['log_artifact_name'] = artifact_name
db.session.add(jobstep)
db.session.commit()
logsource, created = get_or_create(LogSource, where={
'name': artifact_name,
'step': jobstep,
}, defaults={
'job': jobstep.job,
'project': jobstep.project,
'date_created': jobstep.date_started,
'in_artifact_store': True,
})
if created:
offset = 0
else:
offset = jobstep.data.get('log_offset', 0)
url = '{base}/job/{job}/{build}/logText/progressiveText/'.format(
base=jobstep.data['master'],
job=jobstep.data['job_name'],
build=jobstep.data['build_no'],
)
start_time = time.time()
with closing(self._streaming_get(url, params={'start': offset})) as resp:
log_length = int(resp.headers['X-Text-Size'])
# When you request an offset that doesnt exist in the build log, Jenkins
# will instead return the entire log. Jenkins also seems to provide us
# with X-Text-Size which indicates the total size of the log
if offset > log_length:
return
# Jenkins will suggest to us that there is more data when the job has
# yet to complete
has_more = resp.headers.get('X-More-Data') == 'true'
# XXX: requests doesnt seem to guarantee chunk_size, so we force it
# with our own helper
iterator = resp.iter_content()
for chunk in chunked(iterator, LOG_CHUNK_SIZE):
chunk_size = len(chunk)
try:
self.artifact_store_client.post_artifact_chunk(bucket_name, artifact_name, offset, chunk)
offset += chunk_size
if time.time() > start_time + LOG_SYNC_TIMEOUT_SECS:
raise RuntimeError('TOO LONG TO DOWNLOAD LOG: %s' % logsource.get_url())
except Exception as e:
# On an exception or a timeout, attempt to truncate the log
# Catch all exceptions, including timeouts and HTTP errors
self.logger.warning('Exception when uploading logchunks: %s', e.message)
has_more = False
warning = ("\nLOG TRUNCATED. SEE FULL LOG AT "
"{base}/job/{job}/{build}/consoleText\n").format(
base=jobstep.data['master'],
job=jobstep.data['job_name'],
build=jobstep.data['build_no'])
self.artifact_store_client.post_artifact_chunk(bucket_name, artifact_name, offset, warning)
break
# We **must** track the log offset externally as Jenkins embeds encoded
# links and we cant accurately predict the next `start` param.
jobstep.data['log_offset'] = log_length
db.session.add(jobstep)
if not has_more:
self.artifact_store_client.close_chunked_artifact(bucket_name, artifact_name)
return True if has_more else None
def _pick_master(self, job_name, is_diff=False):
"""
Identify a master to run the given job on.
The master with the lowest queue for the given job is chosen. By random
sorting the first empty queue will be prioritized.
"""
candidate_urls = self.master_urls
if is_diff and self.diff_urls:
candidate_urls = self.diff_urls
blacklist = redis.smembers(MASTER_BLACKLIST_KEY)
master_urls = [c for c in candidate_urls if c not in blacklist]
if len(master_urls) == 0:
raise ValueError("No masters to pick from.")
if len(master_urls) == 1:
return master_urls[0]
random.shuffle(master_urls)
best_match = (sys.maxint, None)
for url in master_urls:
try:
queued_jobs = self._count_queued_jobs(url, job_name)
except:
self.logger.exception("Couldn't count queued jobs on master %s", url)
continue
if queued_jobs == 0:
return url
if best_match[0] > queued_jobs:
best_match = (queued_jobs, url)
best = best_match[1]
if not best:
raise Exception("Unable to successfully pick a master from {}.".format(master_urls))
return best
def _count_queued_jobs(self, master_base_url, job_name):
response = self._get_json_response(
master_base_url=master_base_url,
path='/queue/',
)
return sum((
1 for i in response['items']
if i['task']['name'] == job_name
))
def _find_job(self, master_base_url, job_name, changes_bid):
"""
Given a job identifier, we attempt to poll the various endpoints
for a limited amount of time, trying to match up either a queued item
or a running job that has the CHANGES_BID parameter.
This is necessary because Jenkins does not give us any identifying
information when we create a job initially.
The changes_bid parameter should be the corresponding value to look for in
the CHANGES_BID parameter.
The result is a mapping with the following keys:
- queued: is it currently present in the queue
- item_id: the queued item ID, if available
- build_no: the build number, if available
"""
# Check the queue first to ensure that we don't miss a transition
# from queue -> active jobs
item_id = self._find_queue_item_id(master_base_url, changes_bid)
build_no = None
if item_id:
# Saw it in the queue, so we don't know the build number yet.
build_no = None
else:
# Didn't see it in the queue, look for the build number on the assumption that it has begun.
build_no = self._find_build_no(master_base_url, job_name, changes_bid)
if build_no or item_id:
# If we found either, we know the Jenkins build exists and we can probably find it again.
return {
'job_name': job_name,
'queued': bool(item_id),
'item_id': item_id,
'build_no': build_no,
'uri': None,
}
return None
def _find_queue_item_id(self, master_base_url, changes_bid):
"""Looks in a Jenkins master's queue for an item, and returns the ID if found.
Args:
master_base_url (str): Jenkins master URL, in scheme://host form.
changes_bid (str): The identifier for this Jenkins build, typically the JobStep ID.
Returns:
str: Queue item id if found, otherwise None.
"""
xpath = QUEUE_ID_XPATH.format(job_id=changes_bid)
try:
response = self._get_text_response(
master_base_url=master_base_url,
path='/queue/api/xml/',
params={
'xpath': xpath,
'wrapper': 'x',
},
)
except NotFound:
return None
# it's possible that we managed to create multiple jobs in certain
# situations, so let's just get the newest one
try:
match = etree.fromstring(response).iter('id').next()
except StopIteration:
return None
return match.text
def _find_build_no(self, master_base_url, job_name, changes_bid):
"""Looks in a Jenkins master's list of current/recent builds for one with the given CHANGES_BID,
and returns the build number if found.
Args:
master_base_url (str): Jenkins master URL, in scheme://host form.
job_name (str): Name of the Jenkins project/job to look for the build in; ex: 'generic_build'.
changes_bid (str): The identifier for this Jenkins build, typically the JobStep ID.
Returns:
str: build number of the build if found, otherwise None.
"""
xpath = BUILD_ID_XPATH.format(job_id=changes_bid)
try:
response = self._get_text_response(
master_base_url=master_base_url,
path='/job/{job_name}/api/xml/'.format(job_name=job_name),
params={
'depth': 1,
'xpath': xpath,
'wrapper': 'x',
},
)
except NotFound:
return None
# it's possible that we managed to create multiple jobs in certain
# situations, so let's just get the newest one
try:
match = etree.fromstring(response).iter('number').next()
except StopIteration:
return None
return match.text
def _get_node(self, master_base_url, label):
node, created = get_or_create(Node, {'label': label})
if not created:
return node
try:
response = self._get_text_response(
master_base_url=master_base_url,
path='/computer/{}/config.xml'.format(label),
)
except NotFound:
return node
# lxml expects the response to be in bytes, so let's assume it's utf-8
# and send it back as the original format
response = response.encode('utf-8')
xml = objectify.fromstring(response)
cluster_names = xml.label.text.split(' ')
for cluster_name in cluster_names:
# remove swarm client as a cluster label as its not useful
if cluster_name == 'swarm':
continue
cluster, _ = get_or_create(Cluster, {'label': cluster_name})
get_or_create(ClusterNode, {'node': node, 'cluster': cluster})
return node
def _sync_step_from_queue(self, step):
try:
item = self._get_json_response(
step.data['master'],
'/queue/item/{}'.format(step.data['item_id']),
)
except NotFound:
# The build might've left the Jenkins queue since we last checked; look for the build_no of the
# running build.
build_no = self._find_build_no(step.data['master'], step.data['job_name'], changes_bid=step.id.hex)
if build_no:
step.data['queued'] = False
step.data['build_no'] = build_no
db.session.add(step)
self._sync_step_from_active(step)
return
step.status = Status.finished
step.result = Result.infra_failed
db.session.add(step)
self.logger.exception("Queued step not found in queue: {} (build: {})".format(step.id, step.job.build_id))
statsreporter.stats().incr('jenkins_item_not_found_in_queue')
return
if item.get('executable'):
build_no = item['executable']['number']
step.data['queued'] = False
step.data['build_no'] = build_no
step.data['uri'] = item['executable']['url']
db.session.add(step)
if item['blocked']:
step.status = Status.queued
db.session.add(step)
elif item.get('cancelled') and not step.data.get('build_no'):
step.status = Status.finished
step.result = Result.aborted
db.session.add(step)
elif item.get('executable'):
self._sync_step_from_active(step)
return
def _get_jenkins_job(self, step):
try:
job_name = step.data['job_name']
build_no = step.data['build_no']
except KeyError:
raise UnrecoverableException('Missing Jenkins job information')
try:
return self._get_json_response(
step.data['master'],
'/job/{}/{}'.format(job_name, build_no),
)
except NotFound:
raise UnrecoverableException('Unable to find job in Jenkins')
def _sync_step_from_active(self, step):
item = self._get_jenkins_job(step)
if not step.data.get('uri'):
step.data['uri'] = item['url']
# TODO(dcramer): we're doing a lot of work here when we might
# not need to due to it being sync'd previously
node = self._get_node(step.data['master'], item['builtOn'])
step.node = node
step.date_started = datetime.utcfromtimestamp(
item['timestamp'] / 1000)
if item['building']:
step.status = Status.in_progress
else:
step.status = Status.finished
step.result = RESULT_MAP[item['result']]
step.date_finished = datetime.utcfromtimestamp(
(item['timestamp'] + item['duration']) / 1000)
if step.status == Status.finished:
self._sync_results(step, item)
if db.session.is_modified(step):
db.session.add(step)
db.session.commit()
def _sync_results(self, step, item):
artifacts = item.get('artifacts', ())
# Detect and warn if there are duplicate artifact file names as we were relying on
# uniqueness before.
artifact_filenames = set()
for artifact in artifacts:
if artifact['fileName'] in artifact_filenames:
self.logger.warning('Duplicate artifact filename found: %s', artifact['fileName'])
artifact_filenames.add(artifact['fileName'])
self._sync_generic_results(step, artifacts)
# sync console log
self.logger.info('Syncing console log for %s', step.id)
try:
result = True
while result:
result = self._sync_log(step)
except Exception:
db.session.rollback()
current_app.logger.exception(
'Unable to sync console log for job step %r',
step.id.hex)
def verify_final_artifacts(self, step, artifacts):
# If the Jenkins run was aborted or timed out, we don't expect a manifest file.
if (step.result != Result.aborted and
not step.data.get('timed_out', False) and
not any(ManifestJsonHandler.can_process(a.name) for a in artifacts)):
db.session.add(FailureReason(
step_id=step.id,
job_id=step.job.id,
build_id=step.job.build_id,
project_id=step.job.project_id,
reason='missing_manifest_json',
))
step.result = Result.infra_failed
db.session.add(step)
db.session.commit()
def _get_artifact_path(self, artifact_data):
"""Given the artifact's info from Jenkins, return a relative path
to be used as a unique name in the database.
This assumes that Jenkins is set up to collect artifacts from a directory
named "artifacts" if Jenkins says the relative path starts with "artifacts/".
In those cases, remove the "artifacts/" prefix.
"""
artifact_dir = 'artifacts/'
if artifact_data['relativePath'].startswith(artifact_dir):
return artifact_data['relativePath'][len(artifact_dir):]
return artifact_data['relativePath']
def _handle_generic_artifact(self, jobstep, artifact):
artifact, created = get_or_create(Artifact, where={
'step': jobstep,
'name': self._get_artifact_path(artifact),
}, defaults={
'project': jobstep.project,
'job': jobstep.job,
'data': artifact,
})
if not created:
db.session.commit()
def _sync_generic_results(self, step, artifacts):
# sync artifacts
self.logger.info('Syncing artifacts for %s', step.id)
for artifact in artifacts:
self._handle_generic_artifact(jobstep=step, artifact=artifact)
def sync_job(self, job):
"""
Steps get created during the create_job and sync_step phases so we only
rely on those steps syncing.
"""
def sync_step(self, step):
if step.data.get('queued'):
self._sync_step_from_queue(step)
else:
self._sync_step_from_active(step)
def cancel_step(self, step):
# The Jenkins build_no won't exist if the job is still queued.
if step.data.get('build_no'):
url = '/job/{}/{}/stop/'.format(
step.data['job_name'], step.data['build_no'])
elif step.data.get('item_id'):
url = '/queue/cancelItem?id={}'.format(step.data['item_id'])
else:
url = None
step.status = Status.finished
step.result = Result.aborted
step.date_finished = datetime.utcnow()
db.session.add(step)
db.session.flush()
if not url:
# We don't know how to cancel the step or even if it is running, so
# we've done all we can.
return
try:
self._get_text_response(
master_base_url=step.data['master'],
path=url,
method='POST',
)
except NotFound:
return
except Exception:
self.logger.exception('Unable to cancel build upstream')
# If the build timed out and is in progress (off the Jenkins queue),
# try to grab the logs.
if not step.data.get('queued') and step.data.get('timed_out', False):
try:
self._sync_log(step)
except Exception:
self.logger.exception(
'Unable to fully sync console log for job step %r',
step.id.hex)
def get_job_parameters(self, job, changes_bid):
# type: (Any, str) -> Dict[str, str]
# TODO(kylec): Take a Source rather than a Job; we don't need a Job.
"""
Args:
job (Job): Job to use.
changes_bid (str): Changes BID; typically JobStep ID.
Returns:
dict: Parameters to be supplied to Jenkins for the job.
"""
params = {'CHANGES_BID': changes_bid}
source = job.build.source
if source.revision_sha:
params['REVISION'] = source.revision_sha
if source.patch:
params['PATCH_URL'] = build_patch_uri(source.patch.id)
phab_diff_id = source.data.get('phabricator.diffID')
if phab_diff_id:
params['PHAB_DIFF_ID'] = phab_diff_id
phab_revision_id = source.data.get('phabricator.revisionID')
if phab_revision_id:
params['PHAB_REVISION_ID'] = phab_revision_id
if self.cluster:
params['CLUSTER'] = self.cluster
return params
def create_jenkins_job_from_params(self, changes_bid, params, job_name=None, is_diff=False):
if job_name is None:
job_name = self.job_name
if not job_name:
raise UnrecoverableException('Missing Jenkins project configuration')
json_data = {
'parameter': params
}
master = self._pick_master(job_name, is_diff)
# TODO: Jenkins will return a 302 if it cannot queue the job which I
# believe implies that there is already a job with the same parameters
# queued.
self._get_text_response(
master_base_url=master,
path='/job/{}/build'.format(job_name),
method='POST',
data={
'json': json.dumps(json_data),
},
)
# we retry for a period of time as Jenkins doesn't have strong consistency
# guarantees and the job may not show up right away
t = time.time() + 5
job_data = None
while time.time() < t:
job_data = self._find_job(master, job_name, changes_bid)
if job_data:
break
time.sleep(0.3)
if job_data is None:
raise Exception('Unable to find matching job after creation. GLHF')
job_data['master'] = master
return job_data
def get_default_job_phase_label(self, job, job_name):
return 'Build {0}'.format(job_name)
def create_job(self, job, replaces=None):
"""
Creates a job within Jenkins.
Due to the way the API works, this consists of two steps:
- Submitting the job
- Polling for the newly created job to associate either a queue ID
or a finalized build number.
"""
phase, created = get_or_create(JobPhase, where={
'job': job,
'label': self.get_default_job_phase_label(job, self.job_name),
'project': job.project,
}, defaults={
'status': job.status,
})
assert not created or not replaces
step = self._create_job_step(
phase=phase,
data={'job_name': self.job_name},
status=job.status,
force_create=bool(replaces),
cluster=self.cluster
)
if replaces:
replaces.replacement_id = step.id
db.session.add(replaces)
db.session.commit()
# now create the jenkins build
# we don't commit immediately because we also want to update the job
# and jobstep using the job_data we get from jenkins
job_data = self.create_jenkins_build(step, commit=False)
if job_data['queued']:
job.status = Status.queued
else:
job.status = Status.in_progress
db.session.add(job)
assert 'master' in step.data
assert 'job_name' in step.data
assert 'build_no' in step.data or 'item_id' in step.data
# now we have the build_no/item_id and can set the full jobstep label
step.label = '{0} #{1}'.format(step.data['job_name'], step.data['build_no'] or step.data['item_id'])
db.session.add(step)
db.session.commit()
sync_job_step.delay(
step_id=step.id.hex,
task_id=step.id.hex,
parent_task_id=job.id.hex,
)
return step
def create_jenkins_build(self, step, job_name=None, commit=True, **kwargs):
"""
Create a jenkins build for the given jobstep.
If the given step already has a jenkins build associated with it, this
will not perform any work. If not, this creates the build, updates the
step to refer to the new build, optionally committing these changes.
Args:
step (JobStep): The shard we'd like to launch a jenkins build for.
job_name (str): Job's name. Default is self.job_name.
commit (bool): Whether to commit changes to database at the end.
kwargs: Additional arguments to be passed to get_job_parameters()
"""
if step.data.get('build_no') or step.data.get('item_id'):
return
params_dict = self.get_job_parameters(step.job, changes_bid=step.id.hex, **kwargs)
jenkins_params = [{'name': k, 'value': v} for k, v in params_dict.iteritems()]
is_diff = not step.job.source.is_commit()
job_data = self.create_jenkins_job_from_params(
changes_bid=step.id.hex,
params=jenkins_params,
job_name=job_name,
is_diff=is_diff
)
step.data.update(job_data)
db.session.add(step)
# Hook that allows other builders to add commands for the jobstep
# which tells changes-client what to run.
# TODO(kylec): Stop passing the params as env once the data is available
# in changes-client by other means.
self.create_commands(step, env=params_dict)
if commit:
db.session.commit()
return job_data
def get_artifact_manager(self, jobstep):
handlers = [CoverageHandler, XunitHandler, ManifestJsonHandler, AnalyticsJsonHandler]
if self.debug_config.get('fetch_jenkins_logs'):
handlers.append(DummyLogFileHandler)
return Manager(handlers)
def create_commands(self, step, env):
"""
Args:
step (JobStep): The JobStep to create commands under.
env (dict): Environment variables for the commands.
"""
pass
def can_snapshot(self):
return False
def _streaming_get(self, url, params=None):
"""
Perform an HTTP GET request with a streaming response.
Args:
url (str): The url to fetch.
params (dict): Optional dictionary of query parameters.
Returns:
A streamed requests Response object.
Raises:
HTTPError: if the response code didn't indicate success.
Timeout: if the server took too long to respond.
"""
resp = self.http_session.get(url, stream=True, timeout=15,
params=params, auth=self.auth,
verify=self.verify)
resp.raise_for_status()
return resp
| {
"repo_name": "dropbox/changes",
"path": "changes/backends/jenkins/builder.py",
"copies": "1",
"size": "36175",
"license": "apache-2.0",
"hash": 705409330078255600,
"line_mean": 36.9192872117,
"line_max": 118,
"alpha_frac": 0.5829716655,
"autogenerated": false,
"ratio": 4.242904058174994,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5325875723674993,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
import json
import logging
import re
import requests
import time
from cStringIO import StringIO
from contextlib import closing
from datetime import datetime
from flask import current_app
from lxml import etree, objectify
from changes.artifacts.coverage import CoverageHandler
from changes.artifacts.xunit import XunitHandler
from changes.backends.base import BaseBackend, UnrecoverableException
from changes.config import db
from changes.constants import Result, Status
from changes.db.utils import create_or_update, get_or_create
from changes.jobs.sync_artifact import sync_artifact
from changes.jobs.sync_job_step import sync_job_step
from changes.models import (
Artifact, Cluster, ClusterNode, TestResult,
LogSource, LogChunk, Node, JobPhase, JobStep, LOG_CHUNK_SIZE
)
from changes.utils.http import build_uri
from changes.utils.text import chunked
RESULT_MAP = {
'SUCCESS': Result.passed,
'ABORTED': Result.aborted,
'FAILURE': Result.failed,
'REGRESSION': Result.failed,
'UNSTABLE': Result.failed,
}
QUEUE_ID_XPATH = '/queue/item[action/parameter/name="CHANGES_BID" and action/parameter/value="{job_id}"]/id'
BUILD_ID_XPATH = '/freeStyleProject/build[action/parameter/name="CHANGES_BID" and action/parameter/value="{job_id}"]/number'
XUNIT_FILENAMES = ('junit.xml', 'xunit.xml', 'nosetests.xml')
COVERAGE_FILENAMES = ('coverage.xml',)
ID_XML_RE = re.compile(r'<id>(\d+)</id>')
class NotFound(Exception):
pass
class JenkinsBuilder(BaseBackend):
provider = 'jenkins'
def __init__(self, base_url=None, job_name=None, token=None, auth=None,
sync_phase_artifacts=True, *args, **kwargs):
super(JenkinsBuilder, self).__init__(*args, **kwargs)
self.base_url = base_url or self.app.config['JENKINS_URL']
self.token = token or self.app.config['JENKINS_TOKEN']
self.auth = auth or self.app.config['JENKINS_AUTH']
self.logger = logging.getLogger('jenkins')
self.job_name = job_name
# disabled by default as it's expensive
self.sync_phase_artifacts = sync_phase_artifacts
self.sync_log_artifacts = self.app.config.get('JENKINS_SYNC_LOG_ARTIFACTS', False)
self.sync_xunit_artifacts = self.app.config.get('JENKINS_SYNC_XUNIT_ARTIFACTS', True)
self.sync_coverage_artifacts = self.app.config.get('JENKINS_SYNC_COVERAGE_ARTIFACTS', True)
self.sync_file_artifacts = self.app.config.get('JENKINS_SYNC_FILE_ARTIFACTS', True)
def _get_raw_response(self, path, method='GET', params=None, **kwargs):
url = '{}/{}'.format(self.base_url, path.lstrip('/'))
kwargs.setdefault('allow_redirects', False)
kwargs.setdefault('timeout', 30)
kwargs.setdefault('auth', self.auth)
if params is None:
params = {}
if self.token is not None:
params.setdefault('token', self.token)
self.logger.info('Fetching %r', url)
resp = getattr(requests, method.lower())(url, params=params, **kwargs)
if resp.status_code == 404:
raise NotFound
elif not (200 <= resp.status_code < 400):
raise Exception('Invalid response. Status code was %s' % resp.status_code)
return resp.text
def _get_json_response(self, path, *args, **kwargs):
path = '{}/api/json/'.format(path.strip('/'))
data = self._get_raw_response(path, *args, **kwargs)
if not data:
return
try:
return json.loads(data)
except ValueError:
raise Exception('Invalid JSON data')
_get_response = _get_json_response
def _parse_parameters(self, json):
params = {}
for action in json['actions']:
params.update(
(p['name'], p.get('value'))
for p in action.get('parameters', [])
)
return params
def _create_job_step(self, phase, job_name=None, build_no=None,
label=None, uri=None, **kwargs):
# TODO(dcramer): we make an assumption that the job step label is unique
# but its not guaranteed to be the case. We can ignore this assumption
# by guaranteeing that the JobStep.id value is used for builds instead
# of the Job.id value.
defaults = {
'data': {
'job_name': job_name,
'build_no': build_no,
'uri': uri,
},
}
defaults.update(kwargs)
data = defaults['data']
if data['job_name'] and not label:
label = '{0} #{1}'.format(data['job_name'], data['build_no'] or data['item_id'])
assert label
step, created = get_or_create(JobStep, where={
'job': phase.job,
'project': phase.project,
'phase': phase,
'label': label,
}, defaults=defaults)
return step
def fetch_artifact(self, jobstep, artifact_data):
url = '{base}/job/{job}/{build}/artifact/{artifact}'.format(
base=self.base_url,
job=jobstep.data['job_name'],
build=jobstep.data['build_no'],
artifact=artifact_data['relativePath'],
)
return requests.get(url, stream=True, timeout=15)
def _sync_artifact_as_file(self, artifact):
jobstep = artifact.step
resp = self.fetch_artifact(jobstep, artifact.data)
step_id = jobstep.id.hex
artifact.file.save(
StringIO(resp.content), '{0}/{1}/{2}_{3}'.format(
step_id[:4], step_id[4:], artifact.id.hex, artifact.name
)
)
def _sync_artifact_as_xunit(self, artifact):
jobstep = artifact.step
resp = self.fetch_artifact(jobstep, artifact.data)
# TODO(dcramer): requests doesnt seem to provide a non-binary file-like
# API, so we're stuffing it into StringIO
try:
handler = XunitHandler(jobstep)
handler.process(StringIO(resp.content))
except Exception:
db.session.rollback()
self.logger.exception(
'Failed to sync test results for job step %s', jobstep.id)
else:
db.session.commit()
def _sync_artifact_as_coverage(self, artifact):
jobstep = artifact.step
resp = self.fetch_artifact(jobstep, artifact.data)
# TODO(dcramer): requests doesnt seem to provide a non-binary file-like
# API, so we're stuffing it into StringIO
try:
handler = CoverageHandler(jobstep)
handler.process(StringIO(resp.content))
except Exception:
db.session.rollback()
self.logger.exception(
'Failed to sync test results for job step %s', jobstep.id)
else:
db.session.commit()
def _sync_artifact_as_log(self, artifact):
jobstep = artifact.step
job = artifact.job
logsource, created = get_or_create(LogSource, where={
'name': artifact.data['displayPath'],
'job': job,
'step': jobstep,
}, defaults={
'job': job,
'project': job.project,
'date_created': job.date_started,
})
url = '{base}/job/{job}/{build}/artifact/{artifact}'.format(
base=self.base_url,
job=jobstep.data['job_name'],
build=jobstep.data['build_no'],
artifact=artifact.data['relativePath'],
)
offset = 0
session = requests.Session()
with closing(session.get(url, stream=True, timeout=15)) as resp:
iterator = resp.iter_content()
for chunk in chunked(iterator, LOG_CHUNK_SIZE):
chunk_size = len(chunk)
chunk, _ = create_or_update(LogChunk, where={
'source': logsource,
'offset': offset,
}, values={
'job': job,
'project': job.project,
'size': chunk_size,
'text': chunk,
})
offset += chunk_size
def _sync_log(self, jobstep, name, job_name, build_no):
job = jobstep.job
logsource, created = get_or_create(LogSource, where={
'name': name,
'step': jobstep,
}, defaults={
'job': job,
'project': jobstep.project,
'date_created': jobstep.date_started,
})
if created:
offset = 0
else:
offset = jobstep.data.get('log_offset', 0)
url = '{base}/job/{job}/{build}/logText/progressiveText/'.format(
base=self.base_url,
job=job_name,
build=build_no,
)
session = requests.Session()
with closing(session.get(url, params={'start': offset}, stream=True, timeout=15)) as resp:
log_length = int(resp.headers['X-Text-Size'])
# When you request an offset that doesnt exist in the build log, Jenkins
# will instead return the entire log. Jenkins also seems to provide us
# with X-Text-Size which indicates the total size of the log
if offset > log_length:
return
# XXX: requests doesnt seem to guarantee chunk_size, so we force it
# with our own helper
iterator = resp.iter_content()
for chunk in chunked(iterator, LOG_CHUNK_SIZE):
chunk_size = len(chunk)
chunk, _ = create_or_update(LogChunk, where={
'source': logsource,
'offset': offset,
}, values={
'job': job,
'project': job.project,
'size': chunk_size,
'text': chunk,
})
offset += chunk_size
# Jenkins will suggest to us that there is more data when the job has
# yet to complete
has_more = resp.headers.get('X-More-Data') == 'true'
# We **must** track the log offset externally as Jenkins embeds encoded
# links and we cant accurately predict the next `start` param.
jobstep.data['log_offset'] = log_length
db.session.add(jobstep)
return True if has_more else None
def _process_test_report(self, step, test_report):
test_list = []
if not test_report:
return test_list
for suite_data in test_report['suites']:
for case in suite_data['cases']:
message = []
if case['errorDetails']:
message.append('Error\n-----')
message.append(case['errorDetails'] + '\n')
if case['errorStackTrace']:
message.append('Stacktrace\n----------')
message.append(case['errorStackTrace'] + '\n')
if case['skippedMessage']:
message.append(case['skippedMessage'] + '\n')
if case['status'] in ('PASSED', 'FIXED'):
result = Result.passed
elif case['status'] in ('FAILED', 'REGRESSION'):
result = Result.failed
elif case['status'] == 'SKIPPED':
result = Result.skipped
else:
raise ValueError('Invalid test result: %s' % (case['status'],))
test_result = TestResult(
step=step,
name=case['name'],
package=case['className'] or None,
duration=int(case['duration'] * 1000),
message='\n'.join(message).strip(),
result=result,
)
test_list.append(test_result)
return test_list
def _find_job(self, job_name, job_id):
"""
Given a job identifier, we attempt to poll the various endpoints
for a limited amount of time, trying to match up either a queued item
or a running job that has the CHANGES_BID parameter.
This is nescesary because Jenkins does not give us any identifying
information when we create a job initially.
The job_id parameter should be the corresponding value to look for in
the CHANGES_BID parameter.
The result is a mapping with the following keys:
- queued: is it currently present in the queue
- item_id: the queued item ID, if available
- build_no: the build number, if available
"""
# Check the queue first to ensure that we don't miss a transition
# from queue -> active jobs
item = self._find_job_in_queue(job_name, job_id)
if item:
return item
return self._find_job_in_active(job_name, job_id)
def _find_job_in_queue(self, job_name, job_id):
xpath = QUEUE_ID_XPATH.format(
job_id=job_id,
)
try:
response = self._get_raw_response('/queue/api/xml/', params={
'xpath': xpath,
'wrapper': 'x',
})
except NotFound:
return
# it's possible that we managed to create multiple jobs in certain
# situations, so let's just get the newest one
try:
match = etree.fromstring(response).iter('id').next()
except StopIteration:
return
item_id = match.text
# TODO: it's possible this isnt queued when this gets run
return {
'job_name': job_name,
'queued': True,
'item_id': item_id,
'build_no': None,
'uri': None,
}
def _find_job_in_active(self, job_name, job_id):
xpath = BUILD_ID_XPATH.format(
job_id=job_id,
)
try:
response = self._get_raw_response('/job/{job_name}/api/xml/'.format(
job_name=job_name,
), params={
'depth': 1,
'xpath': xpath,
'wrapper': 'x',
})
except NotFound:
return
# it's possible that we managed to create multiple jobs in certain
# situations, so let's just get the newest one
try:
match = etree.fromstring(response).iter('number').next()
except StopIteration:
return
build_no = match.text
return {
'job_name': job_name,
'queued': False,
'item_id': None,
'build_no': build_no,
'uri': None,
}
def _get_node(self, label):
node, created = get_or_create(Node, {'label': label})
if not created:
return node
try:
response = self._get_raw_response('/computer/{}/config.xml'.format(
label
))
except NotFound:
return node
# lxml expects the response to be in bytes, so let's assume it's utf-8
# and send it back as the original format
response = response.encode('utf-8')
xml = objectify.fromstring(response)
cluster_names = xml.label.text.split(' ')
for cluster_name in cluster_names:
# remove swarm client as a cluster label as its not useful
if cluster_name == 'swarm':
continue
cluster, _ = get_or_create(Cluster, {'label': cluster_name})
get_or_create(ClusterNode, {'node': node, 'cluster': cluster})
return node
def _sync_step_from_queue(self, step):
# TODO(dcramer): when we hit a NotFound in the queue, maybe we should
# attempt to scrape the list of jobs for a matching CHANGES_BID, as this
# doesnt explicitly mean that the job doesnt exist
try:
item = self._get_response('/queue/item/{}'.format(
step.data['item_id']))
except NotFound:
step.status = Status.finished
step.result = Result.unknown
db.session.add(step)
return
if item.get('executable'):
build_no = item['executable']['number']
step.data['queued'] = False
step.data['build_no'] = build_no
step.data['uri'] = item['executable']['url']
db.session.add(step)
if item['blocked']:
step.status = Status.queued
db.session.add(step)
elif item.get('cancelled') and not step.data.get('build_no'):
step.status = Status.finished
step.result = Result.aborted
db.session.add(step)
elif item.get('executable'):
return self._sync_step_from_active(step)
def _sync_step_from_active(self, step):
try:
job_name = step.data['job_name']
build_no = step.data['build_no']
except KeyError:
raise UnrecoverableException('Missing Jenkins job information')
try:
item = self._get_response('/job/{}/{}'.format(
job_name, build_no))
except NotFound:
raise UnrecoverableException('Unable to find job in Jenkins')
if not step.data.get('uri'):
step.data['uri'] = item['url']
# TODO(dcramer): we're doing a lot of work here when we might
# not need to due to it being sync'd previously
node = self._get_node(item['builtOn'])
step.node = node
step.date_started = datetime.utcfromtimestamp(
item['timestamp'] / 1000)
if item['building']:
step.status = Status.in_progress
else:
step.status = Status.finished
step.result = RESULT_MAP[item['result']]
step.date_finished = datetime.utcfromtimestamp(
(item['timestamp'] + item['duration']) / 1000)
if step.status == Status.finished:
self._sync_results(step, item)
if db.session.is_modified(step):
db.session.add(step)
db.session.commit()
def _sync_results(self, step, item):
job_name = step.data['job_name']
build_no = step.data['build_no']
artifacts = item.get('artifacts', ())
if self.sync_phase_artifacts:
# if we are allowing phase artifacts and we find *any* artifacts
# that resemble a phase we need to change the behavior of the
# the remainder of tasks
phased_results = any(a['fileName'].endswith('phase.json') for a in artifacts)
else:
phased_results = False
# artifacts sync differently depending on the style of job results
if phased_results:
self._sync_phased_results(step, artifacts)
else:
self._sync_generic_results(step, artifacts)
# sync console log
self.logger.info('Syncing console log for %s', step.id)
try:
result = True
while result:
result = self._sync_log(
jobstep=step,
name=step.label,
job_name=job_name,
build_no=build_no,
)
except Exception:
db.session.rollback()
current_app.logger.exception(
'Unable to sync console log for job step %r',
step.id.hex)
def _handle_generic_artifact(self, jobstep, artifact, skip_checks=False):
artifact, created = get_or_create(Artifact, where={
'step': jobstep,
'name': artifact['fileName'],
}, defaults={
'project': jobstep.project,
'job': jobstep.job,
'data': artifact,
})
if not created:
db.session.commit()
sync_artifact.delay_if_needed(
artifact_id=artifact.id.hex,
task_id=artifact.id.hex,
parent_task_id=jobstep.id.hex,
skip_checks=skip_checks,
)
def _sync_phased_results(self, step, artifacts):
# due to the limitations of Jenkins and our requirement to have more
# insight into the actual steps a build process takes and unfortunately
# the best way to do this is to rewrite history within Changes
job = step.job
project = step.project
artifacts_by_name = dict(
(a['fileName'], a)
for a in artifacts
)
pending_artifacts = set(artifacts_by_name.keys())
phase_steps = set()
phase_step_data = {
'job_name': step.data['job_name'],
'build_no': step.data['build_no'],
'generated': True,
}
phases = set()
# fetch each phase and create it immediately (as opposed to async)
for artifact_data in artifacts:
artifact_filename = artifact_data['fileName']
if not artifact_filename.endswith('phase.json'):
continue
pending_artifacts.remove(artifact_filename)
resp = self.fetch_artifact(step, artifact_data)
phase_data = resp.json()
if phase_data['retcode']:
result = Result.failed
else:
result = Result.passed
date_started = datetime.utcfromtimestamp(phase_data['startTime'])
date_finished = datetime.utcfromtimestamp(phase_data['endTime'])
jobphase, created = get_or_create(JobPhase, where={
'job': job,
'label': phase_data['name'],
}, defaults={
'project': project,
'result': result,
'status': Status.finished,
'date_started': date_started,
'date_finished': date_finished,
})
phases.add(jobphase)
jobstep, created = get_or_create(JobStep, where={
'phase': jobphase,
'label': step.label,
}, defaults={
'job': job,
'node': step.node,
'project': project,
'result': result,
'status': Status.finished,
'date_started': date_started,
'date_finished': date_finished,
'data': phase_step_data,
})
sync_job_step.delay_if_needed(
task_id=jobstep.id.hex,
parent_task_id=job.id.hex,
step_id=jobstep.id.hex,
)
phase_steps.add(jobstep)
# capture the log if available
try:
log_artifact = artifacts_by_name[phase_data['log']]
except KeyError:
self.logger.warning('Unable to find logfile for phase: %s', phase_data)
else:
pending_artifacts.remove(log_artifact['fileName'])
self._handle_generic_artifact(
jobstep=jobstep,
artifact=log_artifact,
skip_checks=True,
)
# ideally we don't mark the base step as a failure if any of the phases
# report more correct results
if phases and step.result == Result.failed and any(p.result == Result.failed for p in phases):
step.result = Result.passed
db.session.add(step)
if not pending_artifacts:
return
# all remaining artifacts get bound to the final phase
final_step = sorted(phase_steps, key=lambda x: x.date_finished, reverse=True)[0]
for artifact_name in pending_artifacts:
self._handle_generic_artifact(
jobstep=final_step,
artifact=artifacts_by_name[artifact_name],
)
def _sync_generic_results(self, step, artifacts):
# sync artifacts
self.logger.info('Syncing artifacts for %s', step.id)
for artifact in artifacts:
self._handle_generic_artifact(jobstep=step, artifact=artifact)
def sync_job(self, job):
"""
Steps get created during the create_job and sync_step phases so we only
rely on those steps syncing.
"""
def sync_step(self, step):
if step.data.get('generated'):
return
if step.data.get('queued'):
self._sync_step_from_queue(step)
else:
self._sync_step_from_active(step)
def sync_artifact(self, artifact, skip_checks=False):
if not skip_checks:
if artifact.name.endswith('.log') and not self.sync_log_artifacts:
return
elif artifact.name.endswith(XUNIT_FILENAMES) and not self.sync_xunit_artifacts:
return
elif artifact.name.endswith(COVERAGE_FILENAMES) and not self.sync_coverage_artifacts:
return
elif not self.sync_file_artifacts:
return
if artifact.name.endswith('.log'):
self._sync_artifact_as_log(artifact)
elif artifact.name.endswith(XUNIT_FILENAMES):
self._sync_artifact_as_xunit(artifact)
elif artifact.name.endswith(COVERAGE_FILENAMES):
self._sync_artifact_as_coverage(artifact)
else:
self._sync_artifact_as_file(artifact)
db.session.commit()
def cancel_step(self, step):
if step.data.get('build_no'):
url = '/job/{}/{}/stop/'.format(
step.data['job_name'], step.data['build_no'])
method = 'GET'
else:
url = '/queue/cancelItem?id={}'.format(step.data['item_id'])
method = 'POST'
try:
self._get_raw_response(url, method=method)
except NotFound:
pass
step.status = Status.finished
step.result = Result.aborted
step.date_finished = datetime.utcnow()
db.session.add(step)
def get_job_parameters(self, job, target_id=None):
if target_id is None:
target_id = job.id.hex
params = [
{'name': 'CHANGES_BID', 'value': target_id},
]
if job.build.source.revision_sha:
params.append(
{'name': 'REVISION', 'value': job.build.source.revision_sha},
)
if job.build.source.patch:
params.append(
{
'name': 'PATCH_URL',
'value': build_uri('/api/0/patches/{0}/?raw=1'.format(
job.build.source.patch.id.hex)),
}
)
return params
def create_job_from_params(self, target_id, params, job_name=None):
if job_name is None:
job_name = self.job_name
if not job_name:
raise UnrecoverableException('Missing Jenkins project configuration')
json_data = {
'parameter': params
}
# TODO: Jenkins will return a 302 if it cannot queue the job which I
# believe implies that there is already a job with the same parameters
# queued.
self._get_response('/job/{}/build'.format(job_name), method='POST', data={
'json': json.dumps(json_data),
})
# we retry for a period of time as Jenkins doesn't have strong consistency
# guarantees and the job may not show up right away
t = time.time() + 5
job_data = None
while time.time() < t:
job_data = self._find_job(job_name, target_id)
if job_data:
break
time.sleep(0.3)
if job_data is None:
raise Exception('Unable to find matching job after creation. GLHF')
return job_data
def get_default_job_phase_label(self, job, job_data):
return 'Build {0}'.format(job_data['job_name'])
def create_job(self, job):
"""
Creates a job within Jenkins.
Due to the way the API works, this consists of two steps:
- Submitting the job
- Polling for the newly created job to associate either a queue ID
or a finalized build number.
"""
params = self.get_job_parameters(job)
job_data = self.create_job_from_params(
target_id=job.id.hex,
params=params,
)
if job_data['queued']:
job.status = Status.queued
else:
job.status = Status.in_progress
db.session.add(job)
phase, created = get_or_create(JobPhase, where={
'job': job,
'label': self.get_default_job_phase_label(job, job_data),
'project': job.project,
}, defaults={
'status': job.status,
})
if not created:
return
# TODO(dcramer): due to no unique constraints this section of code
# presents a race condition when run concurrently
step = self._create_job_step(
phase=phase,
status=job.status,
data=job_data,
)
db.session.commit()
sync_job_step.delay(
step_id=step.id.hex,
task_id=step.id.hex,
parent_task_id=job.id.hex,
)
| {
"repo_name": "alex/changes",
"path": "changes/backends/jenkins/builder.py",
"copies": "1",
"size": "29270",
"license": "apache-2.0",
"hash": -584135660296518900,
"line_mean": 33.4758539458,
"line_max": 124,
"alpha_frac": 0.5485138367,
"autogenerated": false,
"ratio": 4.192208536236036,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5240722372936036,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
import logging
import re
from functools import wraps
from itertools import chain
from textwrap import dedent
import docker
import requests
from requests.exceptions import HTTPError, RequestException
DOCKER_PORT = 2375
DOCKER_VERSION = "1.15"
TIMEOUT = 15
def _ignore_request_exception(f):
@wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except RequestException as e:
msg = "Error while contacting {}: {}".format(e.request.url, e)
logging.warning(msg)
return wrapper
def _is_no_such_container_exception(http_error):
return (http_error.response.status_code == requests.codes.not_found
and http_error.response.text.startswith("No such container: "))
@_ignore_request_exception
def _find_container_at_host(docker_client, container_id):
try:
info = docker_client.inspect_container(container_id)
except HTTPError as exc:
if not _is_no_such_container_exception(exc):
raise
else:
return info
def _get_repo_tags(docker_client, image_id):
# Unfortunately, there does not seem a better way :(
for image in docker_client.images():
if image["Id"] == image_id:
return image["RepoTags"]
def _indent(lines, indentation):
return indentation + ("\n" + indentation).join(lines.splitlines())
def _pretty_format_ports(ports):
return "\n".join(
"* {IP}:{PublicPort} -> {PrivatePort} ({Type})".format(**port)
for port in ports if "IP" in port
)
def _pretty_format_container(host, container, repo_tags):
msg = dedent("""\
Found container {id} on host {host}:
- Tags: {tags}
- Exposed ports: {ports}""")
if container["Ports"]:
ports = _pretty_format_ports(container["Ports"])
ports = "\n" + _indent(ports, " " * 6)
else:
ports = "(none)"
return msg.format(
id=container["Id"][:12],
host=host,
tags=", ".join(repo_tags),
ports=ports)
def _get_client(host):
base_url = "tcp://{}:{}".format(host, DOCKER_PORT)
return docker.Client(
base_url=base_url,
version=DOCKER_VERSION,
timeout=TIMEOUT)
def _network_settings_to_ports(settings):
for (port_and_type, ports) in settings["Ports"].items():
if ports:
(port, _, port_type) = port_and_type.partition("/")
yield {
"PrivatePort": int(port),
"IP": ports[0]["HostIp"],
"PublicPort": ports[0]["HostPort"],
"Type": port_type,
}
def find_container(executor, host_list, container_id):
def query(host):
docker_client = _get_client(host)
container = _find_container_at_host(docker_client, container_id)
return (host, docker_client, container)
containers = filter(None, executor.map(query, host_list))
for (host, docker_client, container) in containers:
if container is not None:
# "Unify" container model with that of find_containers_by_image
ports = _network_settings_to_ports(container["NetworkSettings"])
container["Ports"] = list(ports)
repo_tags = _get_repo_tags(docker_client, container["Image"])
return _pretty_format_container(host, container, repo_tags)
def _create_image_matcher(image):
escaped_image = re.escape(image)
without_registry_matcher = re.compile(escaped_image + "(:|$)").match
with_registry_matcher = re.compile("/{}(:|$)".format(escaped_image)).search
def matcher(s):
return bool(without_registry_matcher(s) or with_registry_matcher(s))
return matcher
def find_containers_by_image(executor, host_list, image):
matcher = _create_image_matcher(image)
@_ignore_request_exception
def query(host):
images = []
docker_client = _get_client(host)
for image in docker_client.images():
if any(matcher(tag) for tag in image["RepoTags"]):
images.append(image)
containers_found = []
if images:
for container in docker_client.containers():
for image in images:
if container["Image"] in image["RepoTags"]:
formatted_container = _pretty_format_container(
host, container, image["RepoTags"])
containers_found.append(formatted_container)
return containers_found
containers = filter(None, executor.map(query, host_list))
return list(chain.from_iterable(containers))
def is_container_id(x):
"""Crude heuristic to decide whether the given string is a possible
container ID.
"""
digits = 0
letters = 0
for char in x:
if '0' <= char <= '9':
digits += 1
elif 'a' <= char <= 'f':
letters += 1
else:
return False
return (digits / (letters + digits)) > 0.3
def find_containers(executor, host_list, image_or_container_id):
if is_container_id(image_or_container_id):
container = find_container(executor, host_list, image_or_container_id)
return [container] if container else []
else:
return find_containers_by_image(
executor, host_list, image_or_container_id)
| {
"repo_name": "Trundle/harpoon",
"path": "harpoon/docker.py",
"copies": "1",
"size": "5377",
"license": "mit",
"hash": 1120484453636972700,
"line_mean": 29.7257142857,
"line_max": 79,
"alpha_frac": 0.6062860331,
"autogenerated": false,
"ratio": 3.902031930333817,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5008317963433817,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
import logging
import re
import time
from record import Record
NULL_BSSID = "00:00:00:00:00:00"
MIN_ACCURACY, MAX_ACCURACY = 0, 20000 # radius in meters
MIN_ALTITUDE, MAX_ALTITUDE = -418, 8848 # meters, Dead Sea, Mount Everest :)
MIN_LATITUDE, MAX_LATITUDE = -90, +90 # degrees
MIN_LONGITUDE, MAX_LONGITUDE = -180, +180 # degrees
MIN_TIMESTAMP = 946684801 # 2000-01-01 00:00:01
def check(desc, val, ok):
if not ok:
raise ValueError("%s: \"%s\"" % (desc, str(val)))
def check_channel(channel):
check("channel", channel, 1 <= channel <= 14 or
36 <= channel <= 679 or
2816 <= channel <= 5580 or
16386 <= channel <= 18432 or
channel == 0)
# https://en.wikipedia.org/wiki/Received_signal_strength_indication
# Typical wireless signal range is -90 to -10 dBm.
def _dbm_from_rssi(rssi):
# Vendor RSSI_Max
# ====== ========
# Cisco 100
# Atheros 60 or 127?
# Symbol 31
# My MBP >= 45?
check("rssi", rssi, 0 <= rssi <= 100)
dbm = -99 + rssi # HACK
check("dbm", dbm, -99 <= dbm <= -10)
return dbm
_mac_colon_re = re.compile("""([0-9A-Za-z]{1,2}:){5} # xx:xx:xx:xx:xx:
[0-9A-Za-z]{1,2} # xx """, re.X)
_mac_dash_re = re.compile("""([0-9A-Za-z]{1,2}-){5} # xx-xx-xx-xx-xx-
[0-9A-Za-z]{1,2} # xx """, re.X)
def canonicalize_bssid(mac):
def split_mac(separator):
return tuple([int(xx, 16) for xx in mac.split(separator)])
if _mac_colon_re.match(mac):
bytes = split_mac(":")
elif _mac_dash_re.match(mac):
bytes = split_mac("-")
else:
raise ValueError("Bad BSSID: %s" % mac)
return "%02x:%02x:%02x:%02x:%02x:%02x" % bytes
_SSID_PREFIX_LIST = [
"ASUS",
"AndroidAP",
"AndroidTether",
"Galaxy Note",
"Galaxy S",
"Galaxy Tab",
"HTC ",
"HelloMoto",
"LG VS910 4G",
"MIFI",
"MiFi",
"Mifi",
"MOBILE",
"Mobile",
"PhoneAP",
"SAMSUNG",
"SCH-I",
"SPRINT",
"Samsung",
"Sprint",
"Verizon",
"VirginMobile",
"barnacle", # Android Barnacle Wifi Tether
"docomo",
"hellomoto",
"iPad",
"iPhone",
"ipad",
"mifi",
"mobile",
"myLGNet",
"myTouch 4G Hotspot",
"samsung",
"sprint",
"webOS Network",
# Transportation Wi-Fi
"AIRBUS FREE WIFI",
"AmtrakConnect",
"GBUS",
"GBusWifi",
"SF Shuttle Wireless",
"SST-PR-1", # Sears Home Service van hotspot?!
"Shuttle",
"Trimble ",
"VTA Free Wi-Fi",
"ac_transit_wifi_bus",
"airbusA380",
"amtrak_",
"shuttle",
]
_SSID_SUFFIX_LIST = [
# Mobile devices
" ASUS",
"-ASUS",
"_ASUS",
"MIFI",
"MiFi",
"Mifi",
"MyWi",
" Shuttle",
"Tether",
"iPad",
"iPhone",
"ipad",
"iphone",
"mifi",
"tether",
# Google's SSID opt-out
"_nomap",
]
_SSID_SUBSTRING_LIST = [
"MacBook",
"MiFi",
"Mifi",
]
def _is_mobile_ssid(ssid):
for prefix in _SSID_PREFIX_LIST:
if ssid.startswith(prefix):
return True
for suffix in _SSID_SUFFIX_LIST:
if ssid.endswith(suffix):
return True
for substring in _SSID_SUBSTRING_LIST:
if ssid.find(substring) != -1:
return True
return False
assert _is_mobile_ssid("Steve's iPhone") and _is_mobile_ssid("GBUS Turbo") and not _is_mobile_ssid("not a mobile ssid")
def AP(timestamp, bssid, latitude, longitude, accuracy=None, altitude=None, altitude_accuracy=None, channel=None, signal=None, ssid=""):
#
# Fixup measurements
#
if timestamp < MIN_TIMESTAMP or timestamp > time.time():
timestamp = 0
#
# Validate measurements
#
check("bssid", bssid, bssid == canonicalize_bssid(bssid))
check("latitude", latitude, MIN_LATITUDE <= latitude <= MAX_LATITUDE)
check("longitude", longitude, MIN_LONGITUDE <= longitude <= MAX_LONGITUDE)
check("ssid", ssid, 0 <= len(ssid) <= 32)
if accuracy:
check("accuracy", accuracy, MIN_ACCURACY <= accuracy <= MAX_ACCURACY)
if altitude:
check("altitude", altitude, MIN_ALTITUDE <= altitude <= MAX_ALTITUDE)
if altitude_accuracy:
check("altitude_accuracy", altitude_accuracy, MIN_ACCURACY <= altitude_accuracy <= MAX_ACCURACY)
if channel:
check_channel(channel)
if signal:
check("signal", signal, -120 <= signal <= 0)
#
# Filter out suspicious measurements
#
if ((latitude == 0 and longitude == 0) or
bssid == NULL_BSSID or
_is_mobile_ssid(ssid)):
return None
return Record(timestamp=timestamp,
bssid=bssid,
latitude=latitude,
longitude=longitude,
altitude=altitude,
accuracy=accuracy,
altitude_accuracy=altitude_accuracy,
channel=channel,
signal=signal,
ssid=ssid)
def test_re(r, group_count, tests):
for test in tests:
match = r.match(test)
if match is None:
logging.error(test)
assert False
groups = match.groups()
if len(groups) != group_count:
logging.warning(test)
logging.error(groups)
assert False
def print_ap(ap):
print "%s\t%d\t%f\t%f\t%s\t%s\t%s\t%s\t%s\t%s" % (ap.bssid,
ap.timestamp,
ap.latitude,
ap.longitude,
str(ap.accuracy) if ap.accuracy else "",
str(ap.altitude) if ap.altitude else "",
str(ap.altitude_accuracy) if ap.altitude_accuracy else "",
str(ap.channel) if ap.channel else "",
str(ap.signal) if ap.signal else "",
ap.ssid)
"""
BSSID;LAT;LON;SSID;Crypt;Beacon Interval;Connection Mode;Channel;RXL;Date;Time
"0:12:88:a8:28:69","298883372.481177","37.84261929","-122.24882423","89","0","-1","-1","-1","50"
# Latitude Longitude ( SSID ) Type ( BSSID ) Time (GMT) [ SNR Sig Noise ] # ( Name ) Flags Channelbits BcnIntvl
<gps-point bssid="00:01:E3:D2:F3:1B" source="00:01:E3:D2:F3:19" time-sec="1378290310" time-usec="122203" lat="52.456932" lon="5.876672" spd="4.213000" heading="215.389999" fix="3" alt="1.400000" signal_dbm="-69" noise_dbm="0"/>
N 50.8414667;E 4.3660500;( bombolong );ad-hoc;( 02:02:cf:87:27:b5 );01:00:00 (GMT);[ 76 ];# ( NULL );0002;0002;
bssid lat lon
MAC,SSID,AuthMode,FirstSeen,Channel,RSSI,CurrentLatitude,CurrentLongitude,AltitudeMeters,AccuracyMeters,Type
"""
def print_as_tsv(file, parse_line):
print "# BSSID\tTimestamp\tLatitude\tLongitude\tAccuracy\tAltitude\tAltitude_Accuracy\tChannel\tSignal_dBm\tSSID"
for line in file:
ap = parse_line(line.strip())
if ap is not None:
print_ap(ap)
| {
"repo_name": "cpeterso/stumbler-tsv",
"path": "tsv/wifi.py",
"copies": "1",
"size": "7414",
"license": "mpl-2.0",
"hash": 1310801603094752000,
"line_mean": 28.7751004016,
"line_max": 227,
"alpha_frac": 0.5319665498,
"autogenerated": false,
"ratio": 3.2403846153846154,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4272351165184616,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
import logging
import six
from datetime import timedelta
from django.utils import timezone
from sentry.auth.exceptions import IdentityNotValid
from sentry.models import AuthIdentity, OrganizationMember
from sentry.tasks.base import instrumented_task
from sentry.utils import metrics
logger = logging.getLogger("sentry.auth")
AUTH_CHECK_INTERVAL = 3600
@instrumented_task(name="sentry.tasks.check_auth", queue="auth")
def check_auth(**kwargs):
"""
Iterates over all accounts which have not been verified in the required
interval and creates a new job to verify them.
"""
# TODO(dcramer): we should remove identities if they've been inactivate
# for a reasonable interval
now = timezone.now()
cutoff = now - timedelta(seconds=AUTH_CHECK_INTERVAL)
identity_list = list(AuthIdentity.objects.filter(last_synced__lte=cutoff))
AuthIdentity.objects.filter(id__in=[i.id for i in identity_list]).update(last_synced=now)
for identity in identity_list:
check_auth_identity.apply_async(
kwargs={"auth_identity_id": identity.id}, expires=AUTH_CHECK_INTERVAL
)
@instrumented_task(name="sentry.tasks.check_auth_identity", queue="auth")
def check_auth_identity(auth_identity_id, **kwargs):
try:
auth_identity = AuthIdentity.objects.get(id=auth_identity_id)
except AuthIdentity.DoesNotExist:
logger.warning("AuthIdentity(id=%s) does not exist", auth_identity_id)
return
auth_provider = auth_identity.auth_provider
try:
om = OrganizationMember.objects.get(
user=auth_identity.user, organization=auth_provider.organization_id
)
except OrganizationMember.DoesNotExist:
logger.warning(
"Removing invalid AuthIdentity(id=%s) due to no organization access", auth_identity_id
)
auth_identity.delete()
return
prev_is_valid = not getattr(om.flags, "sso:invalid")
provider = auth_provider.get_provider()
try:
provider.refresh_identity(auth_identity)
except IdentityNotValid as exc:
if prev_is_valid:
logger.warning(
u"AuthIdentity(id=%s) notified as not valid: %s",
auth_identity_id,
six.text_type(exc),
exc_info=True,
)
metrics.incr("auth.identities.invalidated", skip_internal=False)
is_linked = False
is_valid = False
except Exception as exc:
# to ensure security we count any kind of error as an invalidation
# event
metrics.incr("auth.identities.refresh_error", skip_internal=False)
logger.exception(
u"AuthIdentity(id=%s) returned an error during validation: %s",
auth_identity_id,
six.text_type(exc),
)
is_linked = True
is_valid = False
else:
is_linked = True
is_valid = True
if getattr(om.flags, "sso:linked") != is_linked:
setattr(om.flags, "sso:linked", is_linked)
setattr(om.flags, "sso:invalid", not is_valid)
om.update(flags=om.flags)
now = timezone.now()
auth_identity.update(last_verified=now, last_synced=now)
| {
"repo_name": "mvaled/sentry",
"path": "src/sentry/tasks/check_auth.py",
"copies": "3",
"size": "3250",
"license": "bsd-3-clause",
"hash": -2367514026526764000,
"line_mean": 33.2105263158,
"line_max": 98,
"alpha_frac": 0.6593846154,
"autogenerated": false,
"ratio": 3.958587088915956,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6117971704315956,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
import logging
from lxml import etree
from changes.constants import Result
from changes.models import TestResult, TestResultManager
from .base import ArtifactHandler
class XunitHandler(ArtifactHandler):
logger = logging.getLogger('xunit')
def process(self, fp):
test_list = self.get_tests(fp)
manager = TestResultManager(self.step)
manager.save(test_list)
return test_list
def get_tests(self, fp):
try:
root = etree.fromstring(fp.read())
except Exception:
self.logger.exception('Failed to parse XML')
return []
if root.tag == 'unittest-results':
return self.get_bitten_tests(root)
return self.get_xunit_tests(root)
def get_bitten_tests(self, root):
step = self.step
results = []
# XXX(dcramer): bitten xml syntax, no clue what this
for node in root.iter('test'):
# classname, name, time
attrs = dict(node.items())
# AFAIK the spec says only one tag can be present
# http://windyroad.com.au/dl/Open%20Source/JUnit.xsd
if attrs['status'] == 'success':
result = Result.passed
elif attrs['status'] == 'skipped':
result = Result.skipped
elif attrs['status'] in ('error', 'failure'):
result = Result.failed
else:
result = None
try:
message = list(node.iter('traceback'))[0].text
except IndexError:
message = ''
# no matching status tags were found
if result is None:
result = Result.passed
results.append(TestResult(
step=step,
name=attrs['name'],
package=attrs.get('fixture') or None,
duration=float(attrs['duration']) * 1000,
result=result,
message=message,
))
return results
def get_xunit_tests(self, root):
step = self.step
results = []
for node in root.iter('testcase'):
# classname, name, time
attrs = dict(node.items())
# AFAIK the spec says only one tag can be present
# http://windyroad.com.au/dl/Open%20Source/JUnit.xsd
try:
r_node = list(node.iterchildren())[0]
except IndexError:
result = Result.passed
message = ''
else:
# TODO(cramer): whitelist tags that are not statuses
if r_node.tag == 'failure':
result = Result.failed
elif r_node.tag == 'skipped':
result = Result.skipped
elif r_node.tag == 'error':
result = Result.failed
else:
result = None
message = r_node.text
# no matching status tags were found
if result is None:
result = Result.passed
if attrs.get('time'):
duration = float(attrs['time']) * 1000
else:
duration = None
results.append(TestResult(
step=step,
name=attrs['name'],
package=attrs.get('classname') or None,
duration=duration,
result=result,
message=message,
reruns=int(attrs.get('rerun')) if attrs.get('rerun') else None
))
return results
| {
"repo_name": "alex/changes",
"path": "changes/artifacts/xunit.py",
"copies": "1",
"size": "3665",
"license": "apache-2.0",
"hash": 1408055693619089400,
"line_mean": 29.2892561983,
"line_max": 78,
"alpha_frac": 0.5091405184,
"autogenerated": false,
"ratio": 4.686700767263427,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5695841285663427,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
import math
import numpy as np
import cudamat as cm
from ._base import LayerBase
from .._neural_net_exception import NeuralNetException
class FullyConnectedLayer(LayerBase):
def __init__(self, level, size, activation_func,
sigma='c', use_bias=True, **kwargs):
self.level = level
self.size = size
self.activation_func = activation_func
self.use_bias = use_bias
self.sigma = sigma
self.use_momentum = kwargs.get('use_momentum', False)
if self.use_momentum:
self.momentum = kwargs.get('momentum', 0.9)
self.use_rmsprop = kwargs.get('use_rmsprop', False)
if self.use_rmsprop:
self.rmsprop_dr = kwargs.get('rmsprop_dr', 0.99)
if self.use_momentum and self.use_rmsprop:
raise NeuralNetException('Cannot use momentum and rmsprop together.')
self.use_dropout = kwargs.get('use_dropout', False)
if self.use_dropout:
self.dropout_p = kwargs.get('dropout_p', 0.5)
self.use_max_norm = kwargs.get('use_max_norm', False)
if self.use_max_norm:
self.max_norm_c = kwargs.get('max_norm_c', 4.0)
def set_next_layer_size(self, next_size):
self.next_size = next_size
def init(self, batch_size):
# Weights.
self._init_weights()
# Bias.
if self.use_bias:
self._init_bias()
# Propagation.
self._init_params(batch_size)
# Dropout mask.
if self.use_dropout:
self._init_dropout_mask(batch_size)
# Max norm params.
if self.use_max_norm:
self._init_max_norm_params()
def _init_weights(self):
var = math.sqrt(2.0/self.size) if self.sigma == 'c' else self.sigma
self.weights = cm.CUDAMatrix(
var * np.random.randn(self.size, self.next_size)
)
self.weights_grad = cm.empty(self.weights.shape)
if self.use_momentum:
self.weights_update = \
cm.CUDAMatrix(np.zeros(self.weights_grad.shape))
if self.use_rmsprop:
self.weights_rmsprop_cache = \
cm.CUDAMatrix(np.zeros(self.weights_grad.shape))
self.weights_grad_square = \
cm.CUDAMatrix(np.zeros(self.weights_grad.shape))
def _init_bias(self):
assert self.use_bias
self.biases = cm.CUDAMatrix(
np.zeros((1, self.next_size))
)
self.active_biases = cm.empty(self.biases.shape)
self.biases_grad = cm.empty(self.biases.shape)
if self.use_momentum:
self.biases_update = \
cm.CUDAMatrix(np.zeros(self.biases_grad.shape))
if self.use_rmsprop:
self.biases_rmsprop_cache = \
cm.CUDAMatrix(np.zeros(self.biases_grad.shape))
self.biases_grad_square = \
cm.CUDAMatrix(np.zeros(self.biases_grad.shape))
def _init_params(self, batch_size):
self.next_z = cm.empty((batch_size, self.next_size))
self.next_single_z = cm.empty((1, self.next_size))
if self.level != 1:
self.my_delta = cm.empty((batch_size, self.size))
else:
self.my_delta = None
def _init_dropout_mask(self, batch_size):
assert self.use_dropout
self.dropout_mask = cm.empty((batch_size, self.size))
def _init_max_norm_params(self):
assert self.use_max_norm
self.weights_square = cm.empty(self.weights.shape)
self.weights_factor = cm.empty((1, self.next_size))
self.weights_factor_mask = cm.empty((1, self.next_size))
def forward_p(self, z, predict=False):
self.z = z
self.activation_func.apply(self.z)
# Dropout regularization.
if self.use_dropout and (not predict):
self.dropout_mask\
.fill_with_rand()\
.less_than(self.dropout_p)\
.divide(self.dropout_p)
self.z.mult(self.dropout_mask)
cm.dot(self.z, self.weights, self.next_z)
if self.use_bias:
self.biases.mult(
self.activation_func.apply_scalar(1),
self.active_biases
)
self.next_z.add_row_vec(self.active_biases)
return self.next_z
def forward_p_single(self, single_z):
self.single_z = single_z
self.activation_func.apply(self.single_z)
cm.dot(self.single_z, self.weights, self.next_single_z)
if self.use_bias:
self.biases.mult(
self.activation_func.apply_scalar(1),
self.active_biases
)
self.next_single_z.add_row_vec(self.active_biases)
return self.next_single_z
def backward_p(self, next_delta):
# Compute weights grad.
cm.dot(self.z.T, next_delta, self.weights_grad)
# Compute biases grad.
if self.use_bias:
next_delta.sum(0, self.biases_grad)
if self.level != 1:
cm.dot(next_delta, self.weights.T, self.my_delta)
self.activation_func.mult_with_derivative(self.my_delta, self.z)
return self.my_delta
def update(self, lr):
if self.use_momentum:
self.weights_update.mult(self.momentum)
self.weights_update.subtract_mult(self.weights_grad, lr)
self.weights.add(self.weights_update)
if self.use_bias:
self.biases_update.mult(self.momentum)
self.biases_update.subtract_mult(self.biases_grad, lr)
self.biases.add(self.biases_update)
elif self.use_rmsprop:
self.weights_rmsprop_cache.mult(self.rmsprop_dr)
cm.pow(self.weights_grad, self.weights_grad_square)
self.weights_grad_square.mult(1.0 - self.rmsprop_dr)
self.weights_rmsprop_cache.add(self.weights_grad_square)
self.weights_rmsprop_cache.add(1e-8)
cm.sqrt(self.weights_rmsprop_cache)
self.weights_grad.mult(lr).divide(self.weights_rmsprop_cache)
self.weights.subtract(self.weights_grad)
self.biases_rmsprop_cache.mult(self.rmsprop_dr)
cm.pow(self.biases_grad, self.biases_grad_square)
self.biases_grad_square.mult(1.0 - self.rmsprop_dr)
self.biases_rmsprop_cache.add(self.biases_grad_square)
self.biases_rmsprop_cache.add(1e-8)
cm.sqrt(self.biases_rmsprop_cache)
self.biases_grad.mult(lr).divide(self.biases_rmsprop_cache)
self.biases.subtract(self.biases_grad)
else:
self.weights.subtract_mult(self.weights_grad, lr)
if self.use_bias:
self.biases.subtract_mult(self.biases_grad, lr)
# Max-norm regularization.
if self.use_max_norm:
cm.pow(self.weights, 2, self.weights_square)
self.weights_square.sum(0, self.weights_factor)
cm.sqrt(self.weights_factor, self.weights_factor)
# Avoid zero weight mags.
self.weights_factor.add(1e-8)
self.weights_factor.reciprocal().mult(self.max_norm_c)
# Filter not factor greater than 1.0
self.weights_factor.less_than(1.0, self.weights_factor_mask)
self.weights_factor.mult(self.weights_factor_mask)
# Change 0.0 entry to 1.0.
self.weights_factor_mask.less_than(1.0)
self.weights_factor.add(self.weights_factor_mask)
# Down scale over sized weights.
self.weights.mult_by_row(self.weights_factor)
def dump_params(self):
del self.z
del self.single_z
# Weights.
self._dump_np('weights')
self._dump_np('weights_grad')
if self.use_momentum:
self._dump_np('weights_update')
if self.use_rmsprop:
self._dump_np('weights_grad_square')
self._dump_np('weights_rmsprop_cache')
# Biases.
if self.use_bias:
self._dump_np('biases')
self._dump_np('active_biases')
self._dump_np('biases_grad')
if self.use_momentum:
self._dump_np('biases_update')
if self.use_rmsprop:
self._dump_np('biases_grad_square')
self._dump_np('biases_rmsprop_cache')
# Params.
self._dump_np('next_z')
self._dump_np('next_single_z')
if self.level != 1:
self._dump_np('my_delta')
# Dropout mask.
if self.use_dropout:
self._dump_np('dropout_mask')
# Max-norm.
if self.use_max_norm:
self._dump_np('weights_square')
self._dump_np('weights_factor')
self._dump_np('weights_factor_mask')
def load_params(self):
# Weights.
self._load_np('weights')
self._load_np('weights_grad')
if self.use_momentum:
self._load_np('weights_update')
# Backward compatibility.
if hasattr(self, 'use_rmsprop') and self.use_rmsprop:
self._load_np('weights_grad_square')
self._load_np('weights_rmsprop_cache')
# Biases.
if self.use_bias:
self._load_np('biases')
self._load_np('active_biases')
self._load_np('biases_grad')
if self.use_momentum:
self._load_np('biases_update')
# Backward compatibility.
if hasattr(self, 'use_rmsprop') and self.use_rmsprop:
self._load_np('biases_grad_square')
self._load_np('biases_rmsprop_cache')
# Params.
self._load_np('next_z')
self._load_np('next_single_z')
if self.level != 1:
self._load_np('my_delta')
# Dropout mask.
if self.use_dropout:
self._load_np('dropout_mask')
# Max-norm.
if self.use_max_norm:
self._load_np('weights_square')
self._load_np('weights_factor')
self._load_np('weights_factor_mask')
| {
"repo_name": "zhaoyan1117/NeuralNet",
"path": "nnet/layer/_fully_connected_layer.py",
"copies": "1",
"size": "10157",
"license": "bsd-2-clause",
"hash": -2613913611071390000,
"line_mean": 33.3141891892,
"line_max": 81,
"alpha_frac": 0.5687703062,
"autogenerated": false,
"ratio": 3.5513986013986014,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46201689075986013,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
import mock
from treq.test.util import TestCase
import treq
from treq._utils import set_global_pool
class TreqAPITests(TestCase):
def setUp(self):
set_global_pool(None)
agent_patcher = mock.patch('treq.api.Agent')
self.Agent = agent_patcher.start()
self.addCleanup(agent_patcher.stop)
client_patcher = mock.patch('treq.api.HTTPClient')
self.HTTPClient = client_patcher.start()
self.addCleanup(client_patcher.stop)
pool_patcher = mock.patch('treq._utils.HTTPConnectionPool')
self.HTTPConnectionPool = pool_patcher.start()
self.addCleanup(pool_patcher.stop)
self.client = self.HTTPClient.return_value
def test_default_pool(self):
resp = treq.get('http://test.com')
self.Agent.assert_called_once_with(
mock.ANY,
pool=self.HTTPConnectionPool.return_value
)
self.assertEqual(self.client.get.return_value, resp)
def test_cached_pool(self):
pool = self.HTTPConnectionPool.return_value
treq.get('http://test.com')
self.HTTPConnectionPool.return_value = mock.Mock()
treq.get('http://test.com')
self.Agent.assert_called_with(mock.ANY, pool=pool)
def test_custom_agent(self):
"""
A custom Agent is used if specified.
"""
custom_agent = mock.Mock()
treq.get('https://www.example.org/', agent=custom_agent)
self.HTTPClient.assert_called_once_with(custom_agent)
| {
"repo_name": "mithrandi/treq",
"path": "src/treq/test/test_api.py",
"copies": "3",
"size": "1562",
"license": "mit",
"hash": -2008709315475113700,
"line_mean": 27.4,
"line_max": 67,
"alpha_frac": 0.6434058899,
"autogenerated": false,
"ratio": 3.763855421686747,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5907261311586747,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
import mock
from twisted.trial.unittest import TestCase
import treq
from treq._utils import set_global_pool
class TreqAPITests(TestCase):
def setUp(self):
set_global_pool(None)
agent_patcher = mock.patch('treq.api.Agent')
self.Agent = agent_patcher.start()
self.addCleanup(agent_patcher.stop)
client_patcher = mock.patch('treq.api.HTTPClient')
self.HTTPClient = client_patcher.start()
self.addCleanup(client_patcher.stop)
pool_patcher = mock.patch('treq._utils.HTTPConnectionPool')
self.HTTPConnectionPool = pool_patcher.start()
self.addCleanup(pool_patcher.stop)
self.client = self.HTTPClient.return_value
def test_default_pool(self):
resp = treq.get('http://test.com')
self.Agent.assert_called_once_with(
mock.ANY,
pool=self.HTTPConnectionPool.return_value
)
self.assertEqual(self.client.get.return_value, resp)
def test_cached_pool(self):
pool = self.HTTPConnectionPool.return_value
treq.get('http://test.com')
self.HTTPConnectionPool.return_value = mock.Mock()
treq.get('http://test.com')
self.Agent.assert_called_with(mock.ANY, pool=pool)
def test_custom_agent(self):
"""
A custom Agent is used if specified.
"""
custom_agent = mock.Mock()
treq.get('https://www.example.org/', agent=custom_agent)
self.HTTPClient.assert_called_once_with(custom_agent)
| {
"repo_name": "pexip/os-python-treq",
"path": "src/treq/test/test_api.py",
"copies": "2",
"size": "1571",
"license": "mit",
"hash": 3594283512873735000,
"line_mean": 27.0535714286,
"line_max": 67,
"alpha_frac": 0.6448122215,
"autogenerated": false,
"ratio": 3.7855421686746986,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 56
} |
from __future__ import absolute_import, division
import numpy as np
from scipy.ndimage.interpolation import map_coordinates as sp_map_coordinates
import tensorflow as tf
def tf_flatten(a):
"""Flatten tensor"""
return tf.reshape(a, [-1])
def tf_repeat(a, repeats, axis=0):
"""TensorFlow version of np.repeat for 1D"""
# https://github.com/tensorflow/tensorflow/issues/8521
assert len(a.get_shape()) == 1
a = tf.expand_dims(a, -1)
a = tf.tile(a, [1, repeats])
a = tf_flatten(a)
return a
def tf_repeat_2d(a, repeats):
"""Tensorflow version of np.repeat for 2D"""
assert len(a.get_shape()) == 2
a = tf.expand_dims(a, 0)
a = tf.tile(a, [repeats, 1, 1])
return a
def tf_map_coordinates(input, coords, order=1):
"""Tensorflow verion of scipy.ndimage.map_coordinates
Note that coords is transposed and only 2D is supported
Parameters
----------
input : tf.Tensor. shape = (s, s)
coords : tf.Tensor. shape = (n_points, 2)
"""
assert order == 1
coords_lt = tf.cast(tf.floor(coords), 'int32')
coords_rb = tf.cast(tf.ceil(coords), 'int32')
coords_lb = tf.stack([coords_lt[:, 0], coords_rb[:, 1]], axis=1)
coords_rt = tf.stack([coords_rb[:, 0], coords_lt[:, 1]], axis=1)
vals_lt = tf.gather_nd(input, coords_lt)
vals_rb = tf.gather_nd(input, coords_rb)
vals_lb = tf.gather_nd(input, coords_lb)
vals_rt = tf.gather_nd(input, coords_rt)
coords_offset_lt = coords - tf.cast(coords_lt, 'float32')
vals_t = vals_lt + (vals_rt - vals_lt) * coords_offset_lt[:, 0]
vals_b = vals_lb + (vals_rb - vals_lb) * coords_offset_lt[:, 0]
mapped_vals = vals_t + (vals_b - vals_t) * coords_offset_lt[:, 1]
return mapped_vals
def sp_batch_map_coordinates(inputs, coords):
"""Reference implementation for batch_map_coordinates"""
coords = coords.clip(0, inputs.shape[1] - 1)
mapped_vals = np.array([
sp_map_coordinates(input, coord.T, mode='nearest', order=1)
for input, coord in zip(inputs, coords)
])
return mapped_vals
def tf_batch_map_coordinates(input, coords, order=1):
"""Batch version of tf_map_coordinates
Only supports 2D feature maps
Parameters
----------
input : tf.Tensor. shape = (b, s, s)
coords : tf.Tensor. shape = (b, n_points, 2)
Returns
-------
tf.Tensor. shape = (b, s, s)
"""
input_shape = tf.shape(input)
batch_size = input_shape[0]
input_size = input_shape[1]
n_coords = tf.shape(coords)[1]
coords = tf.clip_by_value(coords, 0, tf.cast(input_size, 'float32') - 1)
coords_lt = tf.cast(tf.floor(coords), 'int32')
coords_rb = tf.cast(tf.ceil(coords), 'int32')
coords_lb = tf.stack([coords_lt[..., 0], coords_rb[..., 1]], axis=-1)
coords_rt = tf.stack([coords_rb[..., 0], coords_lt[..., 1]], axis=-1)
idx = tf_repeat(tf.range(batch_size), n_coords)
def _get_vals_by_coords(input, coords):
indices = tf.stack([
idx, tf_flatten(coords[..., 0]), tf_flatten(coords[..., 1])
], axis=-1)
vals = tf.gather_nd(input, indices)
vals = tf.reshape(vals, (batch_size, n_coords))
return vals
vals_lt = _get_vals_by_coords(input, coords_lt)
vals_rb = _get_vals_by_coords(input, coords_rb)
vals_lb = _get_vals_by_coords(input, coords_lb)
vals_rt = _get_vals_by_coords(input, coords_rt)
coords_offset_lt = coords - tf.cast(coords_lt, 'float32')
vals_t = vals_lt + (vals_rt - vals_lt) * coords_offset_lt[..., 0]
vals_b = vals_lb + (vals_rb - vals_lb) * coords_offset_lt[..., 0]
mapped_vals = vals_t + (vals_b - vals_t) * coords_offset_lt[..., 1]
return mapped_vals
def sp_batch_map_offsets(input, offsets):
"""Reference implementation for tf_batch_map_offsets"""
batch_size = input.shape[0]
input_size = input.shape[1]
offsets = offsets.reshape(batch_size, -1, 2)
grid = np.stack(np.mgrid[:input_size, :input_size], -1).reshape(-1, 2)
grid = np.repeat([grid], batch_size, axis=0)
coords = offsets + grid
coords = coords.clip(0, input_size - 1)
mapped_vals = sp_batch_map_coordinates(input, coords)
return mapped_vals
def tf_batch_map_offsets(input, offsets, order=1):
"""Batch map offsets into input
Parameters
---------
input : tf.Tensor. shape = (b, s, s)
offsets: tf.Tensor. shape = (b, s, s, 2)
Returns
-------
tf.Tensor. shape = (b, s, s)
"""
input_shape = tf.shape(input)
batch_size = input_shape[0]
input_size = input_shape[1]
offsets = tf.reshape(offsets, (batch_size, -1, 2))
grid = tf.meshgrid(
tf.range(input_size), tf.range(input_size), indexing='ij'
)
grid = tf.stack(grid, axis=-1)
grid = tf.cast(grid, 'float32')
grid = tf.reshape(grid, (-1, 2))
grid = tf_repeat_2d(grid, batch_size)
coords = offsets + grid
mapped_vals = tf_batch_map_coordinates(input, coords)
return mapped_vals
| {
"repo_name": "andrewv587/pycharm-project",
"path": "deform-conv-dir/deform_conv.py",
"copies": "1",
"size": "4992",
"license": "apache-2.0",
"hash": 5017687186991629000,
"line_mean": 28.8922155689,
"line_max": 77,
"alpha_frac": 0.6061698718,
"autogenerated": false,
"ratio": 3.0701107011070112,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9176280572907012,
"avg_score": 0,
"num_lines": 167
} |
from __future__ import absolute_import, division
import numpy as np
from shapely.geometry import box, Polygon
def center_error(rects1, rects2):
r"""Center error.
Args:
rects1 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
rects2 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
"""
centers1 = rects1[..., :2] + (rects1[..., 2:] - 1) / 2
centers2 = rects2[..., :2] + (rects2[..., 2:] - 1) / 2
errors = np.sqrt(np.sum(np.power(centers1 - centers2, 2), axis=-1))
return errors
def normalized_center_error(rects1, rects2):
r"""Center error normalized by the size of ground truth.
Args:
rects1 (numpy.ndarray): prediction box. An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
rects2 (numpy.ndarray): groudn truth box. An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
"""
centers1 = rects1[..., :2] + (rects1[..., 2:] - 1) / 2
centers2 = rects2[..., :2] + (rects2[..., 2:] - 1) / 2
errors = np.sqrt(np.sum(np.power((centers1 - centers2)/np.maximum(np.array([[1.,1.]]), rects2[:, 2:]), 2), axis=-1))
return errors
def rect_iou(rects1, rects2, bound=None):
r"""Intersection over union.
Args:
rects1 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
rects2 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
bound (numpy.ndarray): A 4 dimensional array, denotes the bound
(min_left, min_top, max_width, max_height) for ``rects1`` and ``rects2``.
"""
assert rects1.shape == rects2.shape
if bound is not None:
# bounded rects1
rects1[:, 0] = np.clip(rects1[:, 0], 0, bound[0])
rects1[:, 1] = np.clip(rects1[:, 1], 0, bound[1])
rects1[:, 2] = np.clip(rects1[:, 2], 0, bound[0] - rects1[:, 0])
rects1[:, 3] = np.clip(rects1[:, 3], 0, bound[1] - rects1[:, 1])
# bounded rects2
rects2[:, 0] = np.clip(rects2[:, 0], 0, bound[0])
rects2[:, 1] = np.clip(rects2[:, 1], 0, bound[1])
rects2[:, 2] = np.clip(rects2[:, 2], 0, bound[0] - rects2[:, 0])
rects2[:, 3] = np.clip(rects2[:, 3], 0, bound[1] - rects2[:, 1])
rects_inter = _intersection(rects1, rects2)
areas_inter = np.prod(rects_inter[..., 2:], axis=-1)
areas1 = np.prod(rects1[..., 2:], axis=-1)
areas2 = np.prod(rects2[..., 2:], axis=-1)
areas_union = areas1 + areas2 - areas_inter
eps = np.finfo(float).eps
ious = areas_inter / (areas_union + eps)
ious = np.clip(ious, 0.0, 1.0)
return ious
def _intersection(rects1, rects2):
r"""Rectangle intersection.
Args:
rects1 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
rects2 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
"""
assert rects1.shape == rects2.shape
x1 = np.maximum(rects1[..., 0], rects2[..., 0])
y1 = np.maximum(rects1[..., 1], rects2[..., 1])
x2 = np.minimum(rects1[..., 0] + rects1[..., 2],
rects2[..., 0] + rects2[..., 2])
y2 = np.minimum(rects1[..., 1] + rects1[..., 3],
rects2[..., 1] + rects2[..., 3])
w = np.maximum(x2 - x1, 0)
h = np.maximum(y2 - y1, 0)
return np.stack([x1, y1, w, h]).T
def poly_iou(polys1, polys2, bound=None):
r"""Intersection over union of polygons.
Args:
polys1 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height); or an N x 8 numpy array, each line represent
the coordinates (x1, y1, x2, y2, x3, y3, x4, y4) of 4 corners.
polys2 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height); or an N x 8 numpy array, each line represent
the coordinates (x1, y1, x2, y2, x3, y3, x4, y4) of 4 corners.
bound (numpy.ndarray, optional): A 2 dimensional array, denotes the image bound
(width, height) for ``rects1`` and ``rects2``.
"""
assert polys1.ndim in [1, 2]
if polys1.ndim == 1:
polys1 = np.array([polys1])
polys2 = np.array([polys2])
assert len(polys1) == len(polys2)
polys1 = _to_polygon(polys1)
polys2 = _to_polygon(polys2)
if bound is not None:
bound = box(0, 0, bound[0], bound[1])
polys1 = [p.intersection(bound) for p in polys1]
polys2 = [p.intersection(bound) for p in polys2]
eps = np.finfo(float).eps
ious = []
for poly1, poly2 in zip(polys1, polys2):
area_inter = poly1.intersection(poly2).area
area_union = poly1.union(poly2).area
ious.append(area_inter / (area_union + eps))
ious = np.clip(ious, 0.0, 1.0)
return ious
def _to_polygon(polys):
r"""Convert 4 or 8 dimensional array to Polygons
Args:
polys (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height); or an N x 8 numpy array, each line represent
the coordinates (x1, y1, x2, y2, x3, y3, x4, y4) of 4 corners.
"""
def to_polygon(x):
assert len(x) in [4, 8]
if len(x) == 4:
return box(x[0], x[1], x[0] + x[2], x[1] + x[3])
elif len(x) == 8:
return Polygon([(x[2 * i], x[2 * i + 1]) for i in range(4)])
if polys.ndim == 1:
return to_polygon(polys)
else:
return [to_polygon(t) for t in polys]
| {
"repo_name": "got-10k/toolkit",
"path": "got10k/utils/metrics.py",
"copies": "1",
"size": "5804",
"license": "mit",
"hash": 3400233062668201500,
"line_mean": 36.6883116883,
"line_max": 120,
"alpha_frac": 0.5689179876,
"autogenerated": false,
"ratio": 3.127155172413793,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4196073160013793,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
import numpy as np
import catboost
from eli5.explain import explain_weights
from eli5._feature_importances import get_feature_importance_explanation
DESCRIPTION_CATBOOST = """CatBoost feature importances;
values are numbers 0 <= x <= 1; all values sum to 1."""
@explain_weights.register(catboost.CatBoost)
@explain_weights.register(catboost.CatBoostClassifier)
@explain_weights.register(catboost.CatBoostRegressor)
def explain_weights_catboost(catb,
vec=None,
top=20,
importance_type='PredictionValuesChange',
feature_names=None,
pool=None
):
"""
Return an explanation of an CatBoost estimator (CatBoostClassifier,
CatBoost, CatBoostRegressor) as feature importances.
See :func:`eli5.explain_weights` for description of
``top``, ``feature_names``,
``feature_re`` and ``feature_filter`` parameters.
``target_names`` and ``targets`` parameters are ignored.
Parameters
----------
:param 'importance_type' : str, optional
A way to get feature importance. Possible values are:
- 'PredictionValuesChange' (default) - The individual importance
values for each of the input features.
- 'LossFunctionChange' - The individual importance values for
each of the input features for ranking metrics
(requires training data to be passed or a similar dataset with Pool)
:param 'pool' : catboost.Pool, optional
To be passed if explain_weights_catboost has importance_type set
to LossFunctionChange. The catboost feature_importances uses the Pool
datatype to calculate the parameter for the specific importance_type.
"""
is_regression = _is_regression(catb)
catb_feature_names = catb.feature_names_
coef = _catb_feature_importance(catb, importance_type=importance_type, pool=pool)
return get_feature_importance_explanation(catb, vec, coef,
feature_names=feature_names,
estimator_feature_names=catb_feature_names,
feature_filter=None,
feature_re=None,
top=top,
description=DESCRIPTION_CATBOOST,
is_regression=is_regression,
num_features=coef.shape[-1]
)
def _is_regression(catb):
return isinstance(catb, catboost.CatBoostRegressor)
def _catb_feature_importance(catb, importance_type, pool=None):
if importance_type == "PredictionValuesChange":
fs = catb.get_feature_importance(type=importance_type)
elif importance_type == "LossFunctionChange":
if isinstance(pool, catboost.Pool):
fs = catb.get_feature_importance(data=pool, type=importance_type)
else:
raise ValueError(
'importance_type: "LossFunctionChange" requires catboost.Pool '
'datatype to be passed with parameter pool to calculate '
'metric. Either no datatype or invalid datatype was passed'
)
else:
raise ValueError(
'Only two importance_type "PredictionValuesChange" '
'and "LossFunctionChange" are supported. Invalid Parameter '
'{} for importance_type'.format(importance_type))
all_features = np.array(fs, dtype=np.float32)
return all_features/all_features.sum() | {
"repo_name": "TeamHG-Memex/eli5",
"path": "eli5/catboost.py",
"copies": "1",
"size": "3786",
"license": "mit",
"hash": -139734811684731220,
"line_mean": 43.5529411765,
"line_max": 89,
"alpha_frac": 0.5969360803,
"autogenerated": false,
"ratio": 4.605839416058394,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0026494530900895647,
"num_lines": 85
} |
from __future__ import absolute_import, division
import numpy as np
from ._base import PreprocessingBase
class NormalizeRmZeroStd(PreprocessingBase):
"""
Not exact a PCA, only remove features with std 0.
"""
def fit(self, X):
self.means = np.empty((0,))
self.stds = np.empty((0,))
transformed = np.empty(X.shape)
transformed_i = 0
for c in xrange(X.shape[1]):
col = X[:,c]
mean_col = np.mean(col)
std_col = np.std(col)
self.means = np.append(self.means, mean_col)
self.stds = np.append(self.stds, std_col)
if std_col != 0.0:
col -= mean_col
col /= std_col
transformed[:, transformed_i] = col
transformed_i += 1
self.num_features = transformed_i
return transformed[:,:transformed_i]
def transform(self, X):
if len(X) == 0:
# TODO: handle edge case nicely.
return np.zeros((len(X), np.count_nonzero(self.stds)))
transformed = np.empty(X.shape)
transformed_i = 0
non_zero_counter = 0
for c in xrange(X.shape[1]):
col = X[:,c]
mean_col = self.means[c]
std_col = self.stds[c]
x_std_col = np.std(col)
if x_std_col != 0 and std_col == 0:
non_zero_counter += 1
if std_col != 0.0:
col -= mean_col
col /= std_col
transformed[:, transformed_i] = col
transformed_i += 1
if non_zero_counter != 0:
print '{0} features with non zero std ' \
'are removed during this transform.'.format(non_zero_counter)
assert transformed_i == np.count_nonzero(self.stds)
return transformed[:,:transformed_i]
| {
"repo_name": "zhaoyan1117/NeuralNet",
"path": "nnet/preprocessing/_normalize_rm_zero_std.py",
"copies": "1",
"size": "1873",
"license": "bsd-2-clause",
"hash": -8554707934880720000,
"line_mean": 28.7301587302,
"line_max": 79,
"alpha_frac": 0.5125467165,
"autogenerated": false,
"ratio": 3.8302658486707566,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48428125651707565,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
import numpy as np
from ._base import PreprocessingBase
class Normalize(PreprocessingBase):
def fit(self, X):
self.means = np.empty((0,))
self.stds = np.empty((0,))
transformed = np.empty(X.shape)
for c in xrange(X.shape[1]):
transformed[:,c] = X[:,c]
mean_col = np.mean(transformed[:,c])
std_col = np.std(transformed[:,c])
self.means = np.append(self.means, mean_col)
self.stds = np.append(self.stds, std_col)
transformed[:,c] -= mean_col
if std_col != 0.0:
transformed[:,c] /= std_col
return transformed
def transform(self, X):
transformed = np.empty(X.shape)
for c in xrange(X.shape[1]):
transformed[:,c] = X[:,c]
mean_col = self.means[c]
std_col = self.stds[c]
transformed[:,c] -= mean_col
if std_col != 0.0:
transformed[:,c] /= std_col
return transformed
| {
"repo_name": "zhaoyan1117/NeuralNet",
"path": "nnet/preprocessing/_normalize.py",
"copies": "1",
"size": "1066",
"license": "bsd-2-clause",
"hash": 4449069613367455000,
"line_mean": 27.8108108108,
"line_max": 56,
"alpha_frac": 0.5262664165,
"autogenerated": false,
"ratio": 3.7142857142857144,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9707959840222993,
"avg_score": 0.006518458112544522,
"num_lines": 37
} |
from __future__ import absolute_import, division
import numpy as np
from ..logger import msg
from ..constants import ZGLOBAL
def add_k0s(k0, mesh, prop_from_node, alpha, maxl_from_area, silent=True):
msg('Adding K0s to K0...', silent=silent)
dof = 5
for tria in mesh.elements.values():
# n1 -> n2 -> n3 -> n1
n1, n2, n3 = tria.nodes
if np.dot(np.cross((n2.xyz - n1.xyz), (n3.xyz - n1.xyz)), ZGLOBAL) < 0:
n1, n2, n3 = n2, n1, n3
Ac = tria.get_area()
x1, y1, z1 = n1.xyz
x2, y2, z2 = n2.xyz
x3, y3, z3 = n3.xyz
a1 = x2 - x1
b1 = y2 - y1
c1 = x3 - x1
d1 = y3 - y1
if prop_from_node:
pn1 = n1.prop
pn2 = n2.prop
pn3 = n3.prop
k13 = 1/3*pn1.scf_k13 + 1/3*pn2.scf_k13 + 1/3*pn3.scf_k13
k23 = 1/3*pn1.scf_k23 + 1/3*pn2.scf_k23 + 1/3*pn3.scf_k23
E = 1/3*pn1.E + 1/3*pn2.E + 1/3*pn3.E
h = 1/3*pn1.h + 1/3*pn2.h + 1/3*pn3.h
else:
k13 = tria.prop.scf_k13
k23 = tria.prop.scf_k23
E = tria.prop.E
h = tria.prop.h
E44 = k13 * E[0, 0]
E45 = 0 # min(k13, k23) * E[0, 1]
E55 = k23 * E[1, 1]
if maxl_from_area:
maxl = Ac**0.5
else:
maxl = max([np.sum((e.n1.xyz - e.n2.xyz)**2)**0.5 for e in tria.edges])
factor = alpha*maxl**2/h**2
E44 = 1 / (1 + factor) * E44
# E45 = 1 / (1 + factor) * E45
E55 = 1 / (1 + factor) * E55
i1 = n1.index
i2 = n2.index
i3 = n3.index
k0[i1*dof+2, i1*dof+2] += (-a1 + c1)*(E45*(b1/2 - d1/2) + E55*(-a1/2 + c1/2))/(2*Ac) + (b1 - d1)*(E44*(b1/2 - d1/2) + E45*(-a1/2 + c1/2))/(2*Ac)
k0[i1*dof+2, i1*dof+3] += E44*(b1/2 - d1/2)/2 + E45*(-a1/2 + c1/2)/2
k0[i1*dof+2, i1*dof+4] += E45*(b1/2 - d1/2)/2 + E55*(-a1/2 + c1/2)/2
k0[i1*dof+2, i2*dof+2] += -c1*(E45*(b1/2 - d1/2) + E55*(-a1/2 + c1/2))/(2*Ac) + d1*(E44*(b1/2 - d1/2) + E45*(-a1/2 + c1/2))/(2*Ac)
k0[i1*dof+2, i2*dof+3] += -a1*c1*(E45*(b1/2 - d1/2) + E55*(-a1/2 + c1/2))/(4*Ac) + a1*d1*(E44*(b1/2 - d1/2) + E45*(-a1/2 + c1/2))/(4*Ac)
k0[i1*dof+2, i2*dof+4] += -b1*c1*(E45*(b1/2 - d1/2) + E55*(-a1/2 + c1/2))/(4*Ac) + b1*d1*(E44*(b1/2 - d1/2) + E45*(-a1/2 + c1/2))/(4*Ac)
k0[i1*dof+2, i3*dof+2] += a1*(E45*(b1/2 - d1/2) + E55*(-a1/2 + c1/2))/(2*Ac) - b1*(E44*(b1/2 - d1/2) + E45*(-a1/2 + c1/2))/(2*Ac)
k0[i1*dof+2, i3*dof+3] += a1*c1*(E45*(b1/2 - d1/2) + E55*(-a1/2 + c1/2))/(4*Ac) - b1*c1*(E44*(b1/2 - d1/2) + E45*(-a1/2 + c1/2))/(4*Ac)
k0[i1*dof+2, i3*dof+4] += a1*d1*(E45*(b1/2 - d1/2) + E55*(-a1/2 + c1/2))/(4*Ac) - b1*d1*(E44*(b1/2 - d1/2) + E45*(-a1/2 + c1/2))/(4*Ac)
k0[i1*dof+3, i1*dof+2] += E44*(b1 - d1)/4 + E45*(-a1 + c1)/4
k0[i1*dof+3, i1*dof+3] += Ac*E44/4
k0[i1*dof+3, i1*dof+4] += Ac*E45/4
k0[i1*dof+3, i2*dof+2] += E44*d1/4 - E45*c1/4
k0[i1*dof+3, i2*dof+3] += E44*a1*d1/8 - E45*a1*c1/8
k0[i1*dof+3, i2*dof+4] += E44*b1*d1/8 - E45*b1*c1/8
k0[i1*dof+3, i3*dof+2] += -E44*b1/4 + E45*a1/4
k0[i1*dof+3, i3*dof+3] += -E44*b1*c1/8 + E45*a1*c1/8
k0[i1*dof+3, i3*dof+4] += -E44*b1*d1/8 + E45*a1*d1/8
k0[i1*dof+4, i1*dof+2] += E45*(b1 - d1)/4 + E55*(-a1 + c1)/4
k0[i1*dof+4, i1*dof+3] += Ac*E45/4
k0[i1*dof+4, i1*dof+4] += Ac*E55/4
k0[i1*dof+4, i2*dof+2] += E45*d1/4 - E55*c1/4
k0[i1*dof+4, i2*dof+3] += E45*a1*d1/8 - E55*a1*c1/8
k0[i1*dof+4, i2*dof+4] += E45*b1*d1/8 - E55*b1*c1/8
k0[i1*dof+4, i3*dof+2] += -E45*b1/4 + E55*a1/4
k0[i1*dof+4, i3*dof+3] += -E45*b1*c1/8 + E55*a1*c1/8
k0[i1*dof+4, i3*dof+4] += -E45*b1*d1/8 + E55*a1*d1/8
k0[i2*dof+2, i1*dof+2] += (-a1 + c1)*(E45*d1/2 - E55*c1/2)/(2*Ac) + (b1 - d1)*(E44*d1/2 - E45*c1/2)/(2*Ac)
k0[i2*dof+2, i1*dof+3] += E44*d1/4 - E45*c1/4
k0[i2*dof+2, i1*dof+4] += E45*d1/4 - E55*c1/4
k0[i2*dof+2, i2*dof+2] += -c1*(E45*d1/2 - E55*c1/2)/(2*Ac) + d1*(E44*d1/2 - E45*c1/2)/(2*Ac)
k0[i2*dof+2, i2*dof+3] += -a1*c1*(E45*d1/2 - E55*c1/2)/(4*Ac) + a1*d1*(E44*d1/2 - E45*c1/2)/(4*Ac)
k0[i2*dof+2, i2*dof+4] += -b1*c1*(E45*d1/2 - E55*c1/2)/(4*Ac) + b1*d1*(E44*d1/2 - E45*c1/2)/(4*Ac)
k0[i2*dof+2, i3*dof+2] += a1*(E45*d1/2 - E55*c1/2)/(2*Ac) - b1*(E44*d1/2 - E45*c1/2)/(2*Ac)
k0[i2*dof+2, i3*dof+3] += a1*c1*(E45*d1/2 - E55*c1/2)/(4*Ac) - b1*c1*(E44*d1/2 - E45*c1/2)/(4*Ac)
k0[i2*dof+2, i3*dof+4] += a1*d1*(E45*d1/2 - E55*c1/2)/(4*Ac) - b1*d1*(E44*d1/2 - E45*c1/2)/(4*Ac)
k0[i2*dof+3, i1*dof+2] += (-a1 + c1)*(E45*a1*d1/4 - E55*a1*c1/4)/(2*Ac) + (b1 - d1)*(E44*a1*d1/4 - E45*a1*c1/4)/(2*Ac)
k0[i2*dof+3, i1*dof+3] += E44*a1*d1/8 - E45*a1*c1/8
k0[i2*dof+3, i1*dof+4] += E45*a1*d1/8 - E55*a1*c1/8
k0[i2*dof+3, i2*dof+2] += -c1*(E45*a1*d1/4 - E55*a1*c1/4)/(2*Ac) + d1*(E44*a1*d1/4 - E45*a1*c1/4)/(2*Ac)
k0[i2*dof+3, i2*dof+3] += -a1*c1*(E45*a1*d1/4 - E55*a1*c1/4)/(4*Ac) + a1*d1*(E44*a1*d1/4 - E45*a1*c1/4)/(4*Ac)
k0[i2*dof+3, i2*dof+4] += -b1*c1*(E45*a1*d1/4 - E55*a1*c1/4)/(4*Ac) + b1*d1*(E44*a1*d1/4 - E45*a1*c1/4)/(4*Ac)
k0[i2*dof+3, i3*dof+2] += a1*(E45*a1*d1/4 - E55*a1*c1/4)/(2*Ac) - b1*(E44*a1*d1/4 - E45*a1*c1/4)/(2*Ac)
k0[i2*dof+3, i3*dof+3] += a1*c1*(E45*a1*d1/4 - E55*a1*c1/4)/(4*Ac) - b1*c1*(E44*a1*d1/4 - E45*a1*c1/4)/(4*Ac)
k0[i2*dof+3, i3*dof+4] += a1*d1*(E45*a1*d1/4 - E55*a1*c1/4)/(4*Ac) - b1*d1*(E44*a1*d1/4 - E45*a1*c1/4)/(4*Ac)
k0[i2*dof+4, i1*dof+2] += (-a1 + c1)*(E45*b1*d1/4 - E55*b1*c1/4)/(2*Ac) + (b1 - d1)*(E44*b1*d1/4 - E45*b1*c1/4)/(2*Ac)
k0[i2*dof+4, i1*dof+3] += E44*b1*d1/8 - E45*b1*c1/8
k0[i2*dof+4, i1*dof+4] += E45*b1*d1/8 - E55*b1*c1/8
k0[i2*dof+4, i2*dof+2] += -c1*(E45*b1*d1/4 - E55*b1*c1/4)/(2*Ac) + d1*(E44*b1*d1/4 - E45*b1*c1/4)/(2*Ac)
k0[i2*dof+4, i2*dof+3] += -a1*c1*(E45*b1*d1/4 - E55*b1*c1/4)/(4*Ac) + a1*d1*(E44*b1*d1/4 - E45*b1*c1/4)/(4*Ac)
k0[i2*dof+4, i2*dof+4] += -b1*c1*(E45*b1*d1/4 - E55*b1*c1/4)/(4*Ac) + b1*d1*(E44*b1*d1/4 - E45*b1*c1/4)/(4*Ac)
k0[i2*dof+4, i3*dof+2] += a1*(E45*b1*d1/4 - E55*b1*c1/4)/(2*Ac) - b1*(E44*b1*d1/4 - E45*b1*c1/4)/(2*Ac)
k0[i2*dof+4, i3*dof+3] += a1*c1*(E45*b1*d1/4 - E55*b1*c1/4)/(4*Ac) - b1*c1*(E44*b1*d1/4 - E45*b1*c1/4)/(4*Ac)
k0[i2*dof+4, i3*dof+4] += a1*d1*(E45*b1*d1/4 - E55*b1*c1/4)/(4*Ac) - b1*d1*(E44*b1*d1/4 - E45*b1*c1/4)/(4*Ac)
k0[i3*dof+2, i1*dof+2] += (-a1 + c1)*(-E45*b1/2 + E55*a1/2)/(2*Ac) + (b1 - d1)*(-E44*b1/2 + E45*a1/2)/(2*Ac)
k0[i3*dof+2, i1*dof+3] += -E44*b1/4 + E45*a1/4
k0[i3*dof+2, i1*dof+4] += -E45*b1/4 + E55*a1/4
k0[i3*dof+2, i2*dof+2] += -c1*(-E45*b1/2 + E55*a1/2)/(2*Ac) + d1*(-E44*b1/2 + E45*a1/2)/(2*Ac)
k0[i3*dof+2, i2*dof+3] += -a1*c1*(-E45*b1/2 + E55*a1/2)/(4*Ac) + a1*d1*(-E44*b1/2 + E45*a1/2)/(4*Ac)
k0[i3*dof+2, i2*dof+4] += -b1*c1*(-E45*b1/2 + E55*a1/2)/(4*Ac) + b1*d1*(-E44*b1/2 + E45*a1/2)/(4*Ac)
k0[i3*dof+2, i3*dof+2] += a1*(-E45*b1/2 + E55*a1/2)/(2*Ac) - b1*(-E44*b1/2 + E45*a1/2)/(2*Ac)
k0[i3*dof+2, i3*dof+3] += a1*c1*(-E45*b1/2 + E55*a1/2)/(4*Ac) - b1*c1*(-E44*b1/2 + E45*a1/2)/(4*Ac)
k0[i3*dof+2, i3*dof+4] += a1*d1*(-E45*b1/2 + E55*a1/2)/(4*Ac) - b1*d1*(-E44*b1/2 + E45*a1/2)/(4*Ac)
k0[i3*dof+3, i1*dof+2] += (-a1 + c1)*(-E45*b1*c1/4 + E55*a1*c1/4)/(2*Ac) + (b1 - d1)*(-E44*b1*c1/4 + E45*a1*c1/4)/(2*Ac)
k0[i3*dof+3, i1*dof+3] += -E44*b1*c1/8 + E45*a1*c1/8
k0[i3*dof+3, i1*dof+4] += -E45*b1*c1/8 + E55*a1*c1/8
k0[i3*dof+3, i2*dof+2] += -c1*(-E45*b1*c1/4 + E55*a1*c1/4)/(2*Ac) + d1*(-E44*b1*c1/4 + E45*a1*c1/4)/(2*Ac)
k0[i3*dof+3, i2*dof+3] += -a1*c1*(-E45*b1*c1/4 + E55*a1*c1/4)/(4*Ac) + a1*d1*(-E44*b1*c1/4 + E45*a1*c1/4)/(4*Ac)
k0[i3*dof+3, i2*dof+4] += -b1*c1*(-E45*b1*c1/4 + E55*a1*c1/4)/(4*Ac) + b1*d1*(-E44*b1*c1/4 + E45*a1*c1/4)/(4*Ac)
k0[i3*dof+3, i3*dof+2] += a1*(-E45*b1*c1/4 + E55*a1*c1/4)/(2*Ac) - b1*(-E44*b1*c1/4 + E45*a1*c1/4)/(2*Ac)
k0[i3*dof+3, i3*dof+3] += a1*c1*(-E45*b1*c1/4 + E55*a1*c1/4)/(4*Ac) - b1*c1*(-E44*b1*c1/4 + E45*a1*c1/4)/(4*Ac)
k0[i3*dof+3, i3*dof+4] += a1*d1*(-E45*b1*c1/4 + E55*a1*c1/4)/(4*Ac) - b1*d1*(-E44*b1*c1/4 + E45*a1*c1/4)/(4*Ac)
k0[i3*dof+4, i1*dof+2] += (-a1 + c1)*(-E45*b1*d1/4 + E55*a1*d1/4)/(2*Ac) + (b1 - d1)*(-E44*b1*d1/4 + E45*a1*d1/4)/(2*Ac)
k0[i3*dof+4, i1*dof+3] += -E44*b1*d1/8 + E45*a1*d1/8
k0[i3*dof+4, i1*dof+4] += -E45*b1*d1/8 + E55*a1*d1/8
k0[i3*dof+4, i2*dof+2] += -c1*(-E45*b1*d1/4 + E55*a1*d1/4)/(2*Ac) + d1*(-E44*b1*d1/4 + E45*a1*d1/4)/(2*Ac)
k0[i3*dof+4, i2*dof+3] += -a1*c1*(-E45*b1*d1/4 + E55*a1*d1/4)/(4*Ac) + a1*d1*(-E44*b1*d1/4 + E45*a1*d1/4)/(4*Ac)
k0[i3*dof+4, i2*dof+4] += -b1*c1*(-E45*b1*d1/4 + E55*a1*d1/4)/(4*Ac) + b1*d1*(-E44*b1*d1/4 + E45*a1*d1/4)/(4*Ac)
k0[i3*dof+4, i3*dof+2] += a1*(-E45*b1*d1/4 + E55*a1*d1/4)/(2*Ac) - b1*(-E44*b1*d1/4 + E45*a1*d1/4)/(2*Ac)
k0[i3*dof+4, i3*dof+3] += a1*c1*(-E45*b1*d1/4 + E55*a1*d1/4)/(4*Ac) - b1*c1*(-E44*b1*d1/4 + E45*a1*d1/4)/(4*Ac)
k0[i3*dof+4, i3*dof+4] += a1*d1*(-E45*b1*d1/4 + E55*a1*d1/4)/(4*Ac) - b1*d1*(-E44*b1*d1/4 + E45*a1*d1/4)/(4*Ac)
msg('finished!', silent=silent)
return k0
| {
"repo_name": "compmech/meshless",
"path": "meshless/espim/plate2d_add_k0s_cell_based_no_smoothing.py",
"copies": "1",
"size": "9397",
"license": "bsd-2-clause",
"hash": -5724293151182580000,
"line_mean": 63.8068965517,
"line_max": 152,
"alpha_frac": 0.4770671491,
"autogenerated": false,
"ratio": 1.601670359638657,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7563182264686354,
"avg_score": 0.0031110488104606126,
"num_lines": 145
} |
from __future__ import absolute_import, division
import numpy as np
from ..logger import msg
from ..utils import area_of_polygon, getMid
def calc_kG(d, mesh, prop_from_node, silent=True):
"""Calculate the geometric stiffness matrix for a given input mesh
Parameters
----------
d : (N) array-like
Result from a static analysis, are used to compute the current membrane
stress distribution
mesh : :class:`pyNastran.bdf.BDF` object
The object must have the proper edge references as those returned by
:func:`.read_mesh` or :func:`.read_delaunay`
prop_from_node : bool
If the constitutive properties are assigned per node. Otherwise they
are considered assigned per element
Returns
-------
kG : (N, N) array-like
The geometric stiffness matrix
"""
msg('Calculating KG...', silent=silent)
dof = 5
n = d.shape[0] // dof
#TODO allocate less memory here...
kG = np.zeros((n*dof, n*dof), dtype=np.float64)
for edge in mesh.edges.values():
tria1 = edge.trias[0]
Ac = edge.Ac
ipts = edge.ipts
mid1 = getMid(tria1)
tmp = np.array([mid1, edge.n2.xyz, edge.n1.xyz])
Ac1 = area_of_polygon(tmp[:, 0], tmp[:, 1])
if len(edge.trias) == 1:
tria2 = None
elif len(edge.trias) == 2:
tria2 = edge.trias[1]
mid2 = getMid(tria2)
tmp = np.array([mid2, edge.n1.xyz, edge.n2.xyz])
Ac2 = area_of_polygon(tmp[:, 0], tmp[:, 1])
else:
raise RuntimeError('Found %d trias for edge' % len(edge.trias))
indices = set()
for ipt in ipts:
indices.add(ipt.n1.index)
indices.add(ipt.n2.index)
indices.add(ipt.n3.index)
indices = sorted(list(indices))
if len(ipts) == 3:
indices.append(0) # fourth dummy index
indexpos = dict([[ind, i] for i, ind in enumerate(indices)])
i1, i2, i3, i4 = indices
f1 = np.array([0, 0, 0, 0], dtype=float)
f2 = np.array([0, 0, 0, 0], dtype=float)
f3 = np.array([0, 0, 0, 0], dtype=float)
f4 = np.array([0, 0, 0, 0], dtype=float)
nx1 = ipts[0].nx
ny1 = ipts[0].ny
le1 = ipts[0].le
f1[indexpos[ipts[0].n1.index]] = ipts[0].f1
f1[indexpos[ipts[0].n2.index]] = ipts[0].f2
f1[indexpos[ipts[0].n3.index]] = ipts[0].f3
nx2 = ipts[1].nx
ny2 = ipts[1].ny
le2 = ipts[1].le
f2[indexpos[ipts[1].n1.index]] = ipts[1].f1
f2[indexpos[ipts[1].n2.index]] = ipts[1].f2
f2[indexpos[ipts[1].n3.index]] = ipts[1].f3
nx3 = ipts[2].nx
ny3 = ipts[2].ny
le3 = ipts[2].le
f3[indexpos[ipts[2].n1.index]] = ipts[2].f1
f3[indexpos[ipts[2].n2.index]] = ipts[2].f2
f3[indexpos[ipts[2].n3.index]] = ipts[2].f3
if len(ipts) == 3:
nx4 = 0
ny4 = 0
le4 = 0
else:
nx4 = ipts[3].nx
ny4 = ipts[3].ny
le4 = ipts[3].le
f4[indexpos[ipts[3].n1.index]] = ipts[3].f1
f4[indexpos[ipts[3].n2.index]] = ipts[3].f2
f4[indexpos[ipts[3].n3.index]] = ipts[3].f3
f11, f12, f13, f14 = f1
f21, f22, f23, f24 = f2
f31, f32, f33, f34 = f3
f41, f42, f43, f44 = f4
if prop_from_node:
pn1 = edge.n1.prop
pn2 = edge.n2.prop
po1 = edge.othernode1.prop
if tria2 is None:
A = 4/9*pn1.A + 4/9*pn2.A + 1/9*po1.A
B = 4/9*pn1.B + 4/9*pn2.B + 1/9*po1.B
else:
po2 = edge.othernode2.prop
A = 5/12*pn1.A + 5/12*pn2.A + 1/12*po1.A + 1/12*po2.A
B = 5/12*pn1.B + 5/12*pn2.B + 1/12*po1.B + 1/12*po2.B
else:
prop1 = tria1.prop
if tria2 is None:
A = prop1.A
B = prop1.B
else:
prop2 = tria2.prop
A = (Ac1*prop1.A + Ac2*prop2.A)/Ac
B = (Ac1*prop1.B + Ac2*prop2.B)/Ac
d1 = d[i1*dof: i1*dof+5]
d2 = d[i2*dof: i2*dof+5]
d3 = d[i3*dof: i3*dof+5]
d4 = d[i4*dof: i4*dof+5]
# d1... are [4, 4] [4, 5]
dc = np.dot(np.array([f1, f2, f3, f4]), np.array([d1, d2, d3, d4]))
# dc is [4, 5]
u_c = dc[:, 0]
v_c = dc[:, 1]
phix_c = dc[:, 3]
phiy_c = dc[:, 4]
les = np.array([le1, le2, le3, le4])
nx = np.array([nx1, nx2, nx3, nx4])
ny = np.array([ny1, ny2, ny3, ny4])
em = np.zeros(3)
em[0] = 1/Ac*(les*nx*u_c).sum()
em[1] = 1/Ac*(les*ny*v_c).sum()
em[2] = 1/Ac*(les*(ny*u_c + nx*v_c)).sum()
eb = np.zeros(3)
eb[0] = 1/Ac*(les*nx*phix_c).sum()
eb[1] = 1/Ac*(les*ny*phiy_c).sum()
eb[2] = 1/Ac*(les*(ny*phix_c + nx*phiy_c)).sum()
Nxx, Nyy, Nxy = np.dot(A, em) + np.dot(B, eb)
#TODO calculate only upper triangle
kG[i1*dof+2, i1*dof+2] += Ac*((Nxx*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + Nxy*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + (Nxy*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + Nyy*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)
kG[i1*dof+2, i2*dof+2] += Ac*((Nxx*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + Nxy*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + (Nxy*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + Nyy*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)
kG[i1*dof+2, i3*dof+2] += Ac*((Nxx*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + Nxy*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + (Nxy*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + Nyy*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)
kG[i1*dof+2, i4*dof+2] += Ac*((Nxx*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + Nxy*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + (Nxy*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + Nyy*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)
kG[i2*dof+2, i1*dof+2] += Ac*((Nxx*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + Nxy*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + (Nxy*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + Nyy*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)
kG[i2*dof+2, i2*dof+2] += Ac*((Nxx*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + Nxy*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + (Nxy*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + Nyy*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)
kG[i2*dof+2, i3*dof+2] += Ac*((Nxx*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + Nxy*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + (Nxy*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + Nyy*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)
kG[i2*dof+2, i4*dof+2] += Ac*((Nxx*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + Nxy*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + (Nxy*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + Nyy*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)
kG[i3*dof+2, i1*dof+2] += Ac*((Nxx*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + Nxy*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + (Nxy*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + Nyy*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)
kG[i3*dof+2, i2*dof+2] += Ac*((Nxx*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + Nxy*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + (Nxy*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + Nyy*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)
kG[i3*dof+2, i3*dof+2] += Ac*((Nxx*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + Nxy*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + (Nxy*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + Nyy*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)
kG[i3*dof+2, i4*dof+2] += Ac*((Nxx*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + Nxy*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + (Nxy*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + Nyy*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)
kG[i4*dof+2, i1*dof+2] += Ac*((Nxx*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + Nxy*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + (Nxy*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + Nyy*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)
kG[i4*dof+2, i2*dof+2] += Ac*((Nxx*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + Nxy*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + (Nxy*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + Nyy*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)
kG[i4*dof+2, i3*dof+2] += Ac*((Nxx*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + Nxy*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + (Nxy*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + Nyy*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)
kG[i4*dof+2, i4*dof+2] += Ac*((Nxx*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + Nxy*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + (Nxy*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + Nyy*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)
msg('finished!', silent=silent)
return kG
| {
"repo_name": "compmech/meshless",
"path": "meshless/espim/plate2d_calc_kG.py",
"copies": "1",
"size": "11838",
"license": "bsd-2-clause",
"hash": 2069918762163999700,
"line_mean": 65.5056179775,
"line_max": 418,
"alpha_frac": 0.5601452948,
"autogenerated": false,
"ratio": 1.940655737704918,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7969729057813248,
"avg_score": 0.006214394938334007,
"num_lines": 178
} |
from __future__ import absolute_import, division
import operator
import re
from functools import partial, reduce
from django.apps import apps
from django.db import connections
from django.db.models import Q
from django.utils.lru_cache import lru_cache
from django.utils.six.moves import zip_longest
from wagtail.wagtailsearch.index import Indexed, RelatedFields, SearchField
try:
# Only use the GPLv2 licensed unidecode if it's installed.
from unidecode import unidecode
except ImportError:
def unidecode(value):
return value
def get_postgresql_connections():
return [connection for connection in connections.all()
if connection.vendor == 'postgresql']
# Reduce any iterable to a single value using a logical OR e.g. (a | b | ...)
OR = partial(reduce, operator.or_)
# Reduce any iterable to a single value using a logical AND e.g. (a & b & ...)
AND = partial(reduce, operator.and_)
# Reduce any iterable to a single value using an addition
ADD = partial(reduce, operator.add)
def keyword_split(keywords):
"""
Return all the keywords in a keyword string.
Keeps keywords surrounded by quotes together, removing the surrounding quotes:
>>> keyword_split('Hello I\\'m looking for "something special"')
['Hello', "I'm", 'looking', 'for', 'something special']
Nested quoted strings are returned as is:
>>> keyword_split("He said \\"I'm looking for 'something special'\\" so I've given him the 'special item'")
['He', 'said', "I'm looking for 'something special'", 'so', "I've", 'given', 'him', 'the', 'special item']
"""
matches = re.findall(r'"([^"]+)"|\'([^\']+)\'|(\S+)', keywords)
return [match[0] or match[1] or match[2] for match in matches]
def get_descendant_models(model):
"""
Returns all descendants of a model, including the model itself.
"""
descendant_models = {other_model for other_model in apps.get_models()
if issubclass(other_model, model)}
descendant_models.add(model)
return descendant_models
def get_descendants_content_types_pks(models, db_alias):
return get_content_types_pks(
tuple(descendant_model for model in models
for descendant_model in get_descendant_models(model)), db_alias)
@lru_cache()
def get_content_types_pks(models, db_alias):
# We import it locally because this file is loaded before apps are ready.
from django.contrib.contenttypes.models import ContentType
return list(ContentType._default_manager.using(db_alias)
.filter(OR([Q(app_label=model._meta.app_label,
model=model._meta.model_name)
for model in models]))
.values_list('pk', flat=True))
def get_search_fields(search_fields):
for search_field in search_fields:
if isinstance(search_field, SearchField):
yield search_field
elif isinstance(search_field, RelatedFields):
for sub_field in get_search_fields(search_field.fields):
yield sub_field
WEIGHTS = 'ABCD'
WEIGHTS_COUNT = len(WEIGHTS)
# These are filled when apps are ready.
BOOSTS_WEIGHTS = []
WEIGHTS_VALUES = []
def get_boosts():
boosts = set()
for model in apps.get_models():
if issubclass(model, Indexed):
for search_field in get_search_fields(model.get_search_fields()):
boost = search_field.boost
if boost is not None:
boosts.add(boost)
return boosts
def determine_boosts_weights(boosts=()):
if not boosts:
boosts = get_boosts()
boosts = list(sorted(boosts, reverse=True))
min_boost = boosts[-1]
if len(boosts) <= WEIGHTS_COUNT:
return list(zip_longest(boosts, WEIGHTS, fillvalue=min(min_boost, 0)))
max_boost = boosts[0]
boost_step = (max_boost - min_boost) / (WEIGHTS_COUNT - 1)
return [(max_boost - (i * boost_step), weight)
for i, weight in enumerate(WEIGHTS)]
def get_weight(boost):
if boost is None:
return WEIGHTS[-1]
for max_boost, weight in BOOSTS_WEIGHTS:
if boost >= max_boost:
return weight
return weight
| {
"repo_name": "wagtail/wagtail-pg-search-backend",
"path": "wagtail_pgsearchbackend/utils.py",
"copies": "1",
"size": "4196",
"license": "mit",
"hash": -8986085859116415000,
"line_mean": 32.0393700787,
"line_max": 111,
"alpha_frac": 0.6530028599,
"autogenerated": false,
"ratio": 3.797285067873303,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9949107385520992,
"avg_score": 0.00023610845046192737,
"num_lines": 127
} |
from __future__ import absolute_import, division
import os
import unittest
from unittest import TestCase
from unittest.mock import patch
from dnaStreaming.config import Config
class TestConfig(TestCase):
def tearDown(self):
self.ensure_remove_environment_variable(Config.ENV_VAR_USER_KEY)
self.ensure_remove_environment_variable(Config.ENV_VAR_SUBSCRIPTION_ID)
self.ensure_remove_environment_variable(Config.ENV_VAR_API_HOST)
self.ensure_remove_environment_variable(Config.ENV_VAR_USER_ID)
self.ensure_remove_environment_variable(Config.ENV_VAR_CLIENT_ID)
self.ensure_remove_environment_variable(Config.ENV_VAR_PASSWORD)
def ensure_remove_environment_variable(self, key):
if key in os.environ:
os.environ.pop(key)
def test_customer_config_not_found_success(self):
# Arrange
config = Config()
path_bogus = '\\does\\not\\exist'
config.customer_config_path = path_bogus
error_message_expected = 'No such file or directory'
# Act
was_exception_thrown = False
error_message_actual = None
try:
config._validate()
except FileNotFoundError as ex:
error_message_actual = ex.strerror
error_message_filename = ex.filename
was_exception_thrown = True
# Assert
assert was_exception_thrown
assert error_message_expected == error_message_actual
assert path_bogus == error_message_filename
def test_get_vals_from_file_success(self):
# Arrange
config = Config()
fileFolder = os.path.dirname(os.path.realpath(__file__))
config._set_customer_config_path(os.path.join(fileFolder, 'test_customer_config.json'))
# Act
user_key = config.get_user_key()
subscription = config.subscription()
oauth2_credentials = config.oauth2_credentials()
# Assert
assert user_key
assert subscription == 'bar'
assert oauth2_credentials.get('user_id')
assert oauth2_credentials.get('password')
assert oauth2_credentials.get('client_id')
def test_environment_variables_success(self):
# Arrange
os.environ[Config.ENV_VAR_USER_KEY] = '123'
os.environ[Config.ENV_VAR_SUBSCRIPTION_ID] = 'ABC'
os.environ[Config.ENV_VAR_USER_ID] = 'user'
os.environ[Config.ENV_VAR_CLIENT_ID] = 'client'
os.environ[Config.ENV_VAR_PASSWORD] = 'password'
# Act
config = Config()
fileFolder = os.path.dirname(os.path.realpath(__file__))
config._set_customer_config_path(os.path.join(fileFolder, 'test_customer_config.json'))
config._initialize()
# Assert
assert os.environ[Config.ENV_VAR_USER_KEY] == config.get_user_key()
subscription_id = config.subscription()
assert subscription_id == 'ABC'
assert os.environ[Config.ENV_VAR_USER_ID] == config.oauth2_credentials().get('user_id')
assert os.environ[Config.ENV_VAR_CLIENT_ID] == config.oauth2_credentials().get('client_id')
assert os.environ[Config.ENV_VAR_PASSWORD] == config.oauth2_credentials().get('password')
def test_environment_variable_service_account_id_success(self):
# Arrange
os.environ[Config.ENV_VAR_SERVICE_ACCOUNT_ID] = 'lemme_in'
os.environ[Config.ENV_VAR_SUBSCRIPTION_ID] = 'ABC'
# Act
config = Config()
fileFolder = os.path.dirname(os.path.realpath(__file__))
config._set_customer_config_path(os.path.join(fileFolder, 'test_customer_config.json'))
config._initialize()
# Assert
assert os.environ[Config.ENV_VAR_SERVICE_ACCOUNT_ID] == config.get_user_key()
subscription_id = config.subscription()
assert subscription_id == 'ABC'
def test_oauth2_creds_not_provided(self):
# Arrange
config = Config()
# Act
creds = config.oauth2_credentials()
# Assert
assert creds is None
def test_user_key_passed_success(self):
# Arrange
# Act
config = Config(user_key='123')
# Assert
assert config.get_user_key() == '123'
def test_service_account_id_passed_success(self):
# Arrange
# Act
config = Config(service_account_id='123')
# Assert
assert config.get_user_key() == '123'
@patch.object(Config, '_fetch_jwt', return_value='test_jwt_value')
def test_get_headers_jwt(self, fetch_jwt_mock):
# Arrange
config = Config()
fileFolder = os.path.dirname(os.path.realpath(__file__))
config._set_customer_config_path(os.path.join(fileFolder, 'test_customer_config.json'))
headers_expected = {
'Authorization': 'test_jwt_value'
}
# Act
headers_actual = config.get_authentication_headers()
# Assert
assert headers_actual == headers_expected
fetch_jwt_mock.assert_called_once()
@patch.object(Config, '_fetch_jwt', return_value='test_jwt_value')
def test_get_headers_user_key(self, fetch_jwt_mock):
# Arrange
user_key = "just some user key"
config = Config(user_key)
headers_expected = {
'user-key': user_key
}
# Act
headers_actual = config.get_authentication_headers()
# Assert
assert headers_actual == headers_expected
fetch_jwt_mock.assert_not_called()
if __name__ == '__main__' and __package__ is None:
unittest.main()
| {
"repo_name": "dowjones/dj-dna-streams-python",
"path": "dnaStreaming/test/test_config.py",
"copies": "1",
"size": "5590",
"license": "mit",
"hash": 1336545254640459800,
"line_mean": 32.0769230769,
"line_max": 99,
"alpha_frac": 0.624686941,
"autogenerated": false,
"ratio": 3.823529411764706,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4948216352764706,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
import os
from io import BytesIO
from six.moves.urllib.parse import parse_qs
from mock import Mock, call
from twisted.internet.defer import succeed, Deferred, fail, CancelledError
from twisted.internet.error import ConnectionLost
from twisted.internet.unix import Server
from twisted.web import server
from twisted.web.http_headers import Headers
from twisted.web.resource import Resource
from twisted.web.static import File
from twisted.web.template import Element, XMLString, renderer
from twisted.web.test.test_web import DummyChannel
from twisted.python.compat import unicode, _PY3
from werkzeug.exceptions import NotFound
from klein import Klein
from klein.interfaces import IKleinRequest
from klein.resource import (
KleinResource,
_URLDecodeError,
_extractURLparts,
ensure_utf8_bytes,
)
from klein.test.util import TestCase, EqualityTestsMixin
def requestMock(path, method=b"GET", host=b"localhost", port=8080,
isSecure=False, body=None, headers=None):
if not headers:
headers = {}
if not body:
body = b''
path, qpath = (path.split(b"?", 1) + [b""])[:2]
request = server.Request(DummyChannel(), False)
request.site = Mock(server.Site)
request.gotLength(len(body))
request.content = BytesIO()
request.content.write(body)
request.content.seek(0)
request.args = parse_qs(qpath)
request.requestHeaders = Headers(headers)
request.setHost(host, port, isSecure)
request.uri = path
request.prepath = []
request.postpath = path.split(b'/')[1:]
request.method = method
request.clientproto = b'HTTP/1.1'
request.setHeader = Mock(wraps=request.setHeader)
request.setResponseCode = Mock(wraps=request.setResponseCode)
request._written = BytesIO()
request.finishCount = 0
request.writeCount = 0
def registerProducer(producer, streaming):
request.producer = producer
for x in range(2):
if request.producer:
request.producer.resumeProducing()
def unregisterProducer():
request.producer = None
def finish():
request.finishCount += 1
if not request.startedWriting:
request.write(b'')
if not request.finished:
request.finished = True
request._cleanup()
def write(data):
request.writeCount += 1
request.startedWriting = True
if not request.finished:
request._written.write(data)
else:
raise RuntimeError('Request.write called on a request after '
'Request.finish was called.')
def getWrittenData():
return request._written.getvalue()
request.finish = finish
request.write = write
request.getWrittenData = getWrittenData
request.registerProducer = registerProducer
request.unregisterProducer = unregisterProducer
request.processingFailed = Mock(wraps=request.processingFailed)
return request
def _render(resource, request, notifyFinish=True):
result = resource.render(request)
if isinstance(result, bytes):
request.write(result)
request.finish()
return succeed(None)
elif result is server.NOT_DONE_YET:
if request.finished or not notifyFinish:
return succeed(None)
else:
return request.notifyFinish()
else:
raise ValueError("Unexpected return value: %r" % (result,))
class SimpleElement(Element):
loader = XMLString("""
<h1 xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1" t:render="name" />
""")
def __init__(self, name):
self._name = name
@renderer
def name(self, request, tag):
return tag(self._name)
class DeferredElement(SimpleElement):
@renderer
def name(self, request, tag):
self.deferred = Deferred()
self.deferred.addCallback(lambda ignored: tag(self._name))
return self.deferred
class LeafResource(Resource):
isLeaf = True
def render(self, request):
return b"I am a leaf in the wind."
class ChildResource(Resource):
isLeaf = True
def __init__(self, name):
self._name = name
def render(self, request):
return b"I'm a child named " + self._name + b"!"
class ChildrenResource(Resource):
def render(self, request):
return b"I have children!"
def getChild(self, path, request):
if path == b'':
return self
return ChildResource(path)
class ProducingResource(Resource):
def __init__(self, path, strings):
self.path = path
self.strings = strings
def render_GET(self, request):
producer = MockProducer(request, self.strings)
producer.start()
return server.NOT_DONE_YET
class MockProducer(object):
def __init__(self, request, strings):
self.request = request
self.strings = strings
def start(self):
self.request.registerProducer(self, False)
def resumeProducing(self):
if self.strings:
self.request.write(self.strings.pop(0))
else:
self.request.unregisterProducer()
self.request.finish()
class KleinResourceEqualityTests(TestCase, EqualityTestsMixin):
"""
Tests for L{KleinResource}'s implementation of C{==} and C{!=}.
"""
class _One(object):
oneKlein = Klein()
@oneKlein.route("/foo")
def foo(self):
pass
_one = _One()
class _Another(object):
anotherKlein = Klein()
@anotherKlein.route("/bar")
def bar(self):
pass
_another = _Another()
def anInstance(self):
return self._one.oneKlein
def anotherInstance(self):
return self._another.anotherKlein
class KleinResourceTests(TestCase):
def setUp(self):
self.app = Klein()
self.kr = KleinResource(self.app)
def assertFired(self, deferred, result=None):
"""
Assert that the given deferred has fired with the given result.
"""
self.assertEqual(self.successResultOf(deferred), result)
def assertNotFired(self, deferred):
"""
Assert that the given deferred has not fired with a result.
"""
_pawn = object()
result = getattr(deferred, 'result', _pawn)
if result != _pawn:
self.fail("Expected deferred not to have fired, "
"but it has: %r" % (deferred,))
def test_simplePost(self):
app = self.app
# The order in which these functions are defined
# matters. If the more generic one is defined first
# then it will eat requests that should have been handled
# by the more specific handler.
@app.route("/", methods=["POST"])
def handle_post(request):
return b'posted'
@app.route("/")
def handle(request):
return b'gotted'
request = requestMock(b'/', b'POST')
request2 = requestMock(b'/')
d = _render(self.kr, request)
self.assertFired(d)
self.assertEqual(request.getWrittenData(), b'posted')
d2 = _render(self.kr, request2)
self.assertFired(d2)
self.assertEqual(request2.getWrittenData(), b'gotted')
def test_simpleRouting(self):
app = self.app
@app.route("/")
def slash(request):
return b'ok'
request = requestMock(b'/')
d = _render(self.kr, request)
self.assertFired(d)
self.assertEqual(request.getWrittenData(), b'ok')
def test_branchRendering(self):
app = self.app
@app.route("/", branch=True)
def slash(request):
return b'ok'
request = requestMock(b'/foo')
d = _render(self.kr, request)
self.assertFired(d)
self.assertEqual(request.getWrittenData(), b'ok')
def test_branchWithExplicitChildrenRouting(self):
app = self.app
@app.route("/")
def slash(request):
return b'ok'
@app.route("/zeus")
def wooo(request):
return b'zeus'
request = requestMock(b'/zeus')
request2 = requestMock(b'/')
d = _render(self.kr, request)
self.assertFired(d)
self.assertEqual(request.getWrittenData(), b'zeus')
d2 = _render(self.kr, request2)
self.assertFired(d2)
self.assertEqual(request2.getWrittenData(), b'ok')
def test_branchWithExplicitChildBranch(self):
app = self.app
@app.route("/", branch=True)
def slash(request):
return b'ok'
@app.route("/zeus/", branch=True)
def wooo(request):
return b'zeus'
request = requestMock(b'/zeus/foo')
request2 = requestMock(b'/')
d = _render(self.kr, request)
self.assertFired(d)
self.assertEqual(request.getWrittenData(), b'zeus')
d2 = _render(self.kr, request2)
self.assertFired(d2)
self.assertEqual(request2.getWrittenData(), b'ok')
def test_deferredRendering(self):
app = self.app
deferredResponse = Deferred()
@app.route("/deferred")
def deferred(request):
return deferredResponse
request = requestMock(b"/deferred")
d = _render(self.kr, request)
self.assertNotFired(d)
deferredResponse.callback(b'ok')
self.assertFired(d)
self.assertEqual(request.getWrittenData(), b'ok')
def test_elementRendering(self):
app = self.app
@app.route("/element/<string:name>")
def element(request, name):
return SimpleElement(name)
request = requestMock(b"/element/foo")
d = _render(self.kr, request)
self.assertFired(d)
self.assertEqual(request.getWrittenData(),
b"<!DOCTYPE html>\n<h1>foo</h1>")
def test_deferredElementRendering(self):
app = self.app
elements = []
@app.route("/element/<string:name>")
def element(request, name):
it = DeferredElement(name)
elements.append(it)
return it
request = requestMock(b"/element/bar")
d = _render(self.kr, request)
self.assertEqual(len(elements), 1)
[oneElement] = elements
self.assertNoResult(d)
oneElement.deferred.callback(None)
self.assertFired(d)
self.assertEqual(request.getWrittenData(),
b"<!DOCTYPE html>\n<h1>bar</h1>")
def test_leafResourceRendering(self):
app = self.app
request = requestMock(b"/resource/leaf")
@app.route("/resource/leaf")
def leaf(request):
return LeafResource()
d = _render(self.kr, request)
self.assertFired(d)
self.assertEqual(request.getWrittenData(),
b"I am a leaf in the wind.")
def test_childResourceRendering(self):
app = self.app
request = requestMock(b"/resource/children/betty")
@app.route("/resource/children/", branch=True)
def children(request):
return ChildrenResource()
d = _render(self.kr, request)
self.assertFired(d)
self.assertEqual(request.getWrittenData(),
b"I'm a child named betty!")
def test_childrenResourceRendering(self):
app = self.app
request = requestMock(b"/resource/children/")
@app.route("/resource/children/", branch=True)
def children(request):
return ChildrenResource()
d = _render(self.kr, request)
self.assertFired(d)
self.assertEqual(request.getWrittenData(), b"I have children!")
def test_producerResourceRendering(self):
"""
Test that Klein will correctly handle producing L{Resource}s.
Producing Resources close the connection by themselves, sometimes after
Klein has 'finished'. This test lets Klein finish its handling of the
request before doing more producing.
"""
app = self.app
request = requestMock(b"/resource")
@app.route("/resource", branch=True)
def producer(request):
return ProducingResource(request, [b"a", b"b", b"c", b"d"])
d = _render(self.kr, request, notifyFinish=False)
self.assertNotEqual(request.getWrittenData(), b"abcd", "The full "
"response should not have been written at this "
"point.")
while request.producer:
request.producer.resumeProducing()
self.assertEqual(self.successResultOf(d), None)
self.assertEqual(request.getWrittenData(), b"abcd")
self.assertEqual(request.writeCount, 4)
self.assertEqual(request.finishCount, 1)
self.assertEqual(request.producer, None)
def test_notFound(self):
request = requestMock(b"/fourohofour")
d = _render(self.kr, request)
self.assertFired(d)
request.setResponseCode.assert_called_with(404)
self.assertIn(b"404 Not Found", request.getWrittenData())
def test_renderUnicode(self):
app = self.app
request = requestMock(b"/snowman")
@app.route("/snowman")
def snowman(request):
return u'\u2603'
d = _render(self.kr, request)
self.assertFired(d)
self.assertEqual(request.getWrittenData(), b"\xE2\x98\x83")
def test_renderNone(self):
app = self.app
request = requestMock(b"/None")
@app.route("/None")
def none(request):
return None
d = _render(self.kr, request)
self.assertFired(d)
self.assertEqual(request.getWrittenData(), b'')
self.assertEqual(request.finishCount, 1)
self.assertEqual(request.writeCount, 1)
def test_staticRoot(self):
app = self.app
request = requestMock(b"/__init__.py")
@app.route("/", branch=True)
def root(request):
return File(os.path.dirname(__file__))
d = _render(self.kr, request)
self.assertFired(d)
self.assertEqual(request.getWrittenData(),
open(
os.path.join(
os.path.dirname(__file__), "__init__.py"), 'rb').read())
self.assertEqual(request.finishCount, 1)
def test_explicitStaticBranch(self):
app = self.app
request = requestMock(b"/static/__init__.py")
@app.route("/static/", branch=True)
def root(request):
return File(os.path.dirname(__file__))
d = _render(self.kr, request)
self.assertFired(d)
self.assertEqual(request.getWrittenData(),
open(
os.path.join(
os.path.dirname(__file__), "__init__.py"), 'rb').read())
self.assertEqual(request.writeCount, 1)
self.assertEqual(request.finishCount, 1)
def test_staticDirlist(self):
app = self.app
request = requestMock(b"/")
@app.route("/", branch=True)
def root(request):
return File(os.path.dirname(__file__))
d = _render(self.kr, request)
self.assertFired(d)
self.assertIn(b'Directory listing', request.getWrittenData())
self.assertEqual(request.writeCount, 1)
self.assertEqual(request.finishCount, 1)
def test_addSlash(self):
app = self.app
request = requestMock(b"/foo")
@app.route("/foo/")
def foo(request):
return "foo"
d = _render(self.kr, request)
self.assertFired(d)
self.assertEqual(request.setHeader.call_count, 3)
request.setHeader.assert_has_calls(
[call(b'Content-Type', b'text/html; charset=utf-8'),
call(b'Content-Length', b'259'),
call(b'Location', b'http://localhost:8080/foo/')])
def test_methodNotAllowed(self):
app = self.app
request = requestMock(b"/foo", method=b'DELETE')
@app.route("/foo", methods=['GET'])
def foo(request):
return "foo"
d = _render(self.kr, request)
self.assertFired(d)
self.assertEqual(request.code, 405)
def test_methodNotAllowedWithRootCollection(self):
app = self.app
request = requestMock(b"/foo/bar", method=b'DELETE')
@app.route("/foo/bar", methods=['GET'])
def foobar(request):
return b"foo/bar"
@app.route("/foo/", methods=['DELETE'])
def foo(request):
return b"foo"
d = _render(self.kr, request)
self.assertFired(d)
self.assertEqual(request.code, 405)
def test_noImplicitBranch(self):
app = self.app
request = requestMock(b"/foo")
@app.route("/")
def root(request):
return b"foo"
d = _render(self.kr, request)
self.assertFired(d)
self.assertEqual(request.code, 404)
def test_strictSlashes(self):
app = self.app
request = requestMock(b"/foo/bar")
request_url = [None]
@app.route("/foo/bar/", strict_slashes=False)
def root(request):
request_url[0] = request.URLPath()
return b"foo"
d = _render(self.kr, request)
self.assertFired(d)
self.assertEqual(str(request_url[0]),
"http://localhost:8080/foo/bar")
self.assertEqual(request.getWrittenData(), b'foo')
self.assertEqual(request.code, 200)
def test_URLPath(self):
app = self.app
request = requestMock(b'/egg/chicken')
request_url = [None]
@app.route("/egg/chicken")
def wooo(request):
request_url[0] = request.URLPath()
return b'foo'
d = _render(self.kr, request)
self.assertFired(d)
self.assertEqual(str(request_url[0]),
'http://localhost:8080/egg/chicken')
def test_URLPath_root(self):
app = self.app
request = requestMock(b'/')
request_url = [None]
@app.route("/")
def root(request):
request_url[0] = request.URLPath()
d = _render(self.kr, request)
self.assertFired(d)
self.assertEqual(str(request_url[0]), 'http://localhost:8080/')
def test_URLPath_traversedResource(self):
app = self.app
request = requestMock(b'/resource/foo')
request_url = [None]
class URLPathResource(Resource):
def render(self, request):
request_url[0] = request.URLPath()
def getChild(self, request, segment):
return self
@app.route("/resource/", branch=True)
def root(request):
return URLPathResource()
d = _render(self.kr, request)
self.assertFired(d)
self.assertEqual(str(request_url[0]),
'http://localhost:8080/resource/foo')
def test_handlerRaises(self):
app = self.app
request = requestMock(b"/")
failures = []
class RouteFailureTest(Exception):
pass
@app.route("/")
def root(request):
def _capture_failure(f):
failures.append(f)
return f
return fail(RouteFailureTest("die")).addErrback(_capture_failure)
d = _render(self.kr, request)
self.assertFired(d)
self.assertEqual(request.code, 500)
request.processingFailed.assert_called_once_with(failures[0])
self.flushLoggedErrors(RouteFailureTest)
def test_genericErrorHandler(self):
app = self.app
request = requestMock(b"/")
failures = []
class RouteFailureTest(Exception):
pass
@app.route("/")
def root(request):
raise RouteFailureTest("not implemented")
@app.handle_errors
def handle_errors(request, failure):
failures.append(failure)
request.setResponseCode(501)
return
d = _render(self.kr, request)
self.assertFired(d)
self.assertEqual(request.code, 501)
assert not request.processingFailed.called
def test_typeSpecificErrorHandlers(self):
app = self.app
request = requestMock(b"/")
type_error_handled = False
generic_error_handled = False
failures = []
class TypeFilterTestError(Exception):
pass
@app.route("/")
def root(request):
return fail(TypeFilterTestError("not implemented"))
@app.handle_errors(TypeError)
def handle_type_error(request, failure):
global type_error_handled
type_error_handled = True
return
@app.handle_errors(TypeFilterTestError)
def handle_type_filter_test_error(request, failure):
failures.append(failure)
request.setResponseCode(501)
return
@app.handle_errors
def handle_generic_error(request, failure):
global generic_error_handled
generic_error_handled = True
return
d = _render(self.kr, request)
self.assertFired(d)
self.assertEqual(request.processingFailed.called, False)
self.assertEqual(type_error_handled, False)
self.assertEqual(generic_error_handled, False)
self.assertEqual(len(failures), 1)
self.assertEqual(request.code, 501)
def test_notFoundException(self):
app = self.app
request = requestMock(b"/foo")
generic_error_handled = False
@app.route("/")
def root(request):
pass
@app.handle_errors(NotFound)
def handle_not_found(request, failure):
request.setResponseCode(404)
return b'Custom Not Found'
@app.handle_errors
def handle_generic_error(request, failure):
global generic_error_handled
generic_error_handled = True
return
d = _render(self.kr, request)
self.assertFired(d)
self.assertEqual(request.processingFailed.called, False)
self.assertEqual(generic_error_handled, False)
self.assertEqual(request.code, 404)
self.assertEqual(request.getWrittenData(), b'Custom Not Found')
self.assertEqual(request.writeCount, 1)
def test_requestWriteAfterFinish(self):
app = self.app
request = requestMock(b"/")
@app.route("/")
def root(request):
request.finish()
return b'foo'
d = _render(self.kr, request)
self.assertFired(d)
self.assertEqual(request.writeCount, 2)
self.assertEqual(request.getWrittenData(), b'')
[failure] = self.flushLoggedErrors(RuntimeError)
self.assertEqual(
str(failure.value),
("Request.write called on a request after Request.finish was "
"called."))
def test_requestFinishAfterConnectionLost(self):
app = self.app
request = requestMock(b"/")
finished = Deferred()
@app.route("/")
def root(request):
request.notifyFinish().addBoth(lambda _: finished.callback(b'foo'))
return finished
d = _render(self.kr, request)
def _eb(result):
[failure] = self.flushLoggedErrors(RuntimeError)
self.assertEqual(
str(failure.value),
("Request.finish called on a request after its connection was "
"lost; use Request.notifyFinish to keep track of this."))
d.addErrback(lambda _: finished)
d.addErrback(_eb)
self.assertNotFired(d)
request.connectionLost(ConnectionLost())
self.assertFired(d)
def test_routeHandlesRequestFinished(self):
app = self.app
request = requestMock(b"/")
cancelled = []
@app.route("/")
def root(request):
_d = Deferred()
_d.addErrback(cancelled.append)
request.notifyFinish().addCallback(lambda _: _d.cancel())
return _d
d = _render(self.kr, request)
request.finish()
self.assertFired(d)
cancelled[0].trap(CancelledError)
self.assertEqual(request.getWrittenData(), b'')
self.assertEqual(request.writeCount, 1)
self.assertEqual(request.processingFailed.call_count, 0)
def test_url_for(self):
app = self.app
request = requestMock(b'/foo/1')
relative_url = [None]
@app.route("/foo/<int:bar>")
def foo(request, bar):
krequest = IKleinRequest(request)
relative_url[0] = krequest.url_for('foo', {'bar': bar + 1})
d = _render(self.kr, request)
self.assertFired(d)
self.assertEqual(relative_url[0], '/foo/2')
def test_cancelledDeferred(self):
app = self.app
request = requestMock(b"/")
inner_d = Deferred()
@app.route("/")
def root(request):
return inner_d
d = _render(self.kr, request)
inner_d.cancel()
self.assertFired(d)
self.flushLoggedErrors(CancelledError)
def test_external_url_for(self):
app = self.app
request = requestMock(b'/foo/1')
relative_url = [None]
@app.route("/foo/<int:bar>")
def foo(request, bar):
krequest = IKleinRequest(request)
relative_url[0] = krequest.url_for('foo', {'bar': bar + 1},
force_external=True)
d = _render(self.kr, request)
self.assertFired(d)
self.assertEqual(relative_url[0], 'http://localhost:8080/foo/2')
def test_cancelledIsEatenOnConnectionLost(self):
app = self.app
request = requestMock(b"/")
@app.route("/")
def root(request):
_d = Deferred()
request.notifyFinish().addErrback(lambda _: _d.cancel())
return _d
d = _render(self.kr, request)
self.assertNotFired(d)
request.connectionLost(ConnectionLost())
def _cb(result):
self.assertEqual(request.processingFailed.call_count, 0)
d.addErrback(lambda f: f.trap(ConnectionLost))
d.addCallback(_cb)
self.assertFired(d)
def test_cancelsOnConnectionLost(self):
app = self.app
request = requestMock(b"/")
handler_d = Deferred()
@app.route("/")
def root(request):
return handler_d
d = _render(self.kr, request)
self.assertNotFired(d)
request.connectionLost(ConnectionLost())
handler_d.addErrback(lambda f: f.trap(CancelledError))
d.addErrback(lambda f: f.trap(ConnectionLost))
d.addCallback(lambda _: handler_d)
self.assertFired(d)
def test_ensure_utf8_bytes(self):
self.assertEqual(ensure_utf8_bytes(u"abc"), b"abc")
self.assertEqual(ensure_utf8_bytes(u"\u2202"), b"\xe2\x88\x82")
self.assertEqual(ensure_utf8_bytes(b"\xe2\x88\x82"), b"\xe2\x88\x82")
def test_decodesPath(self):
"""
server_name, path_info, and script_name are decoded as UTF-8 before
being handed to werkzeug.
"""
request = requestMock(b"/f\xc3\xb6\xc3\xb6")
_render(self.kr, request)
kreq = IKleinRequest(request)
self.assertIsInstance(kreq.mapper.server_name, unicode)
self.assertIsInstance(kreq.mapper.path_info, unicode)
self.assertIsInstance(kreq.mapper.script_name, unicode)
def test_failedDecodePathInfo(self):
"""
If decoding of one of the URL parts (in this case PATH_INFO) fails, the
error is logged and 400 returned.
"""
request = requestMock(b"/f\xc3\xc3\xb6")
_render(self.kr, request)
rv = request.getWrittenData()
self.assertEqual(b"Non-UTF-8 encoding in URL.", rv)
self.assertEqual(1, len(self.flushLoggedErrors(UnicodeDecodeError)))
def test_urlDecodeErrorReprPy2(self):
"""
URLDecodeError.__repr__ formats properly.
"""
self.assertEqual(
"<URLDecodeError(errors=<type 'exceptions.ValueError'>)>",
repr(_URLDecodeError(ValueError)),
)
def test_urlDecodeErrorReprPy3(self):
"""
URLDecodeError.__repr__ formats properly.
"""
self.assertEqual(
"<URLDecodeError(errors=<class 'ValueError'>)>",
repr(_URLDecodeError(ValueError)),
)
if _PY3:
test_urlDecodeErrorReprPy2.skip = "Only works on Py2"
else:
test_urlDecodeErrorReprPy3.skip = "Only works on Py3"
class ExtractURLpartsTests(TestCase):
"""
Tests for L{klein.resource._extractURLparts}.
"""
def test_types(self):
"""
Returns the correct types.
"""
url_scheme, server_name, server_port, path_info, script_name = \
_extractURLparts(requestMock(b"/f\xc3\xb6\xc3\xb6"))
self.assertIsInstance(url_scheme, unicode)
self.assertIsInstance(server_name, unicode)
self.assertIsInstance(server_port, int)
self.assertIsInstance(path_info, unicode)
self.assertIsInstance(script_name, unicode)
def assertDecodingFailure(self, exception, part):
"""
Checks whether C{exception} consists of a single L{UnicodeDecodeError}
for C{part}.
"""
self.assertEqual(1, len(exception.errors))
actualPart, actualFail = exception.errors[0]
self.assertEqual(part, actualPart)
self.assertIsInstance(actualFail.value, UnicodeDecodeError)
def test_failServerName(self):
"""
Raises URLDecodeError if SERVER_NAME can't be decoded.
"""
request = requestMock(b"/foo")
request.getRequestHostname = lambda: b"f\xc3\xc3\xb6"
e = self.assertRaises(_URLDecodeError, _extractURLparts, request)
self.assertDecodingFailure(e, "SERVER_NAME")
def test_failPathInfo(self):
"""
Raises URLDecodeError if PATH_INFO can't be decoded.
"""
request = requestMock(b"/f\xc3\xc3\xb6")
e = self.assertRaises(_URLDecodeError, _extractURLparts, request)
self.assertDecodingFailure(e, "PATH_INFO")
def test_failScriptName(self):
"""
Raises URLDecodeError if SCRIPT_NAME can't be decoded.
"""
request = requestMock(b"/foo")
request.prepath = [b"f\xc3\xc3\xb6"]
e = self.assertRaises(_URLDecodeError, _extractURLparts, request)
self.assertDecodingFailure(e, "SCRIPT_NAME")
def test_failAll(self):
"""
If multiple parts fail, they all get appended to the errors list of
URLDecodeError.
"""
request = requestMock(b"/f\xc3\xc3\xb6")
request.prepath = [b"f\xc3\xc3\xb6"]
request.getRequestHostname = lambda: b"f\xc3\xc3\xb6"
e = self.assertRaises(_URLDecodeError, _extractURLparts, request)
self.assertEqual(
set(["SERVER_NAME", "PATH_INFO", "SCRIPT_NAME"]),
set(part for part, _ in e.errors)
)
def test_afUnixSocket(self):
"""
Test proper handling of AF_UNIX sockets
"""
request = requestMock(b"/f\xc3\xb6\xc3\xb6")
server_mock = Mock(Server)
server_mock.getRequestHostname = u'/var/run/twisted.socket'
request.host = server_mock
url_scheme, server_name, server_port, path_info, script_name = _extractURLparts(request)
self.assertIsInstance(url_scheme, unicode)
self.assertIsInstance(server_name, unicode)
self.assertIsInstance(server_port, int)
self.assertIsInstance(path_info, unicode)
self.assertIsInstance(script_name, unicode)
| {
"repo_name": "joac/klein",
"path": "src/klein/test/test_resource.py",
"copies": "2",
"size": "32047",
"license": "mit",
"hash": 4703557324631047000,
"line_mean": 26.7463203463,
"line_max": 96,
"alpha_frac": 0.5943146004,
"autogenerated": false,
"ratio": 3.992898081235983,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5587212681635984,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
import os
import numpy as np
from pyrl import fittools, runtools, tasktools, utils
from pyrl.figtools import apply_alpha, Figure
#/////////////////////////////////////////////////////////////////////////////////////////
def plot_trial(pg, m, init, init_b, rng, figspath, name):
context = {}
if 0 not in m.cohs:
context['cohs'] = [0] + m.cohs
trial = m.generate_trial_condition(rng, context)
U, Z, A, R, M, init, states_0, perf = pg.run_trials([trial], init=init)
if pg.baseline_net is not None:
(init_b, baseline_states_0, b,
rpe) = pg.baseline_run_trials(U, A, R, M, init=init_b)
else:
b = None
U = U[:,0,:]
Z = Z[:,0,:]
A = A[:,0,:]
R = R[:,0]
M = M[:,0]
t = int(np.sum(M))
w = 0.65
h = 0.18
x = 0.17
dy = h + 0.05
y0 = 0.08
y1 = y0 + dy
y2 = y1 + dy
y3 = y2 + dy
fig = Figure(h=6)
plots = {'observables': fig.add([x, y3, w, h]),
'policy': fig.add([x, y2, w, h]),
'actions': fig.add([x, y1, w, h]),
'rewards': fig.add([x, y0, w, h])}
time = trial['time']
dt = time[1] - time[0]
act_time = time[:t]
obs_time = time[:t] + dt
reward_time = act_time + dt
xlim = (0, max(time))
#-------------------------------------------------------------------------------------
# Observables
#-------------------------------------------------------------------------------------
plot = plots['observables']
plot.plot(obs_time, U[:t,0], 'o', ms=5, mew=0, mfc=Figure.colors('blue'))
plot.plot(obs_time, U[:t,0], lw=1.25, color=Figure.colors('blue'), label='Fixation')
plot.plot(obs_time, U[:t,1], 'o', ms=5, mew=0, mfc=Figure.colors('orange'))
plot.plot(obs_time, U[:t,1], lw=1.25, color=Figure.colors('orange'), label='Left')
plot.plot(obs_time, U[:t,2], 'o', ms=5, mew=0, mfc=Figure.colors('purple'))
plot.plot(obs_time, U[:t,2], lw=1.25, color=Figure.colors('purple'), label='Right')
try:
plot.plot(obs_time, U[:t,3], 'o', ms=5, mew=0, mfc=Figure.colors('green'))
plot.plot(obs_time, U[:t,3], lw=1.25, color=Figure.colors('green'), label='Sure')
except IndexError:
pass
plot.xlim(*xlim)
plot.ylim(0, 1)
plot.ylabel('Observables')
coh = trial['left_right']*trial['coh']
if coh < 0:
color = Figure.colors('orange')
elif coh > 0:
color = Figure.colors('purple')
else:
color = Figure.colors('k')
plot.text_upper_right('Coh = {:.1f}\%'.format(coh), color=color)
props = {'prop': {'size': 7}, 'handlelength': 1.2,
'handletextpad': 1.2, 'labelspacing': 0.8}
plot.legend(bbox_to_anchor=(1.2, 0.8), **props)
plot.highlight(0, m.iti)
#-------------------------------------------------------------------------------------
# Policy
#-------------------------------------------------------------------------------------
plot = plots['policy']
plot.plot(act_time, Z[:t,0], 'o', ms=5, mew=0, mfc=Figure.colors('blue'))
plot.plot(act_time, Z[:t,0], lw=1.25, color=Figure.colors('blue'),
label='Fixate')
plot.plot(act_time, Z[:t,1], 'o', ms=5, mew=0, mfc=Figure.colors('orange'))
plot.plot(act_time, Z[:t,1], lw=1.25, color=Figure.colors('orange'),
label='Saccade LEFT')
plot.plot(act_time, Z[:t,2], 'o', ms=5, mew=0, mfc=Figure.colors('purple'))
plot.plot(act_time, Z[:t,2], lw=1.25, color=Figure.colors('purple'),
label='Saccade RIGHT')
try:
plot.plot(act_time, Z[:t,3], 'o', ms=5, mew=0, mfc=Figure.colors('green'))
plot.plot(act_time, Z[:t,3], lw=1.25, color=Figure.colors('green'),
label='Saccade SURE')
except IndexError:
pass
plot.xlim(*xlim)
plot.ylim(0, 1)
plot.ylabel('Action probabilities')
props = {'prop': {'size': 7}, 'handlelength': 1.2,
'handletextpad': 1.2, 'labelspacing': 0.8}
plot.legend(bbox_to_anchor=(1.27, 0.8), **props)
plot.highlight(0, m.iti)
#-------------------------------------------------------------------------------------
# Actions
#-------------------------------------------------------------------------------------
plot = plots['actions']
actions = [np.argmax(a) for a in A[:t]]
plot.plot(act_time, actions, 'o', ms=5, mew=0, mfc=Figure.colors('red'))
plot.plot(act_time, actions, lw=1.25, color=Figure.colors('red'))
plot.xlim(*xlim)
yticklabels = ['Fixate', 'Saccade LEFT', 'Saccade RIGHT']
if A.shape[1] == 4:
yticklabels += ['Saccade sure']
plot.yticklabels(yticklabels)
plot.ylim(0, len(yticklabels)-1)
plot.yticks(range(len(yticklabels)))
plot.ylabel('Action')
plot.highlight(0, m.iti)
#-------------------------------------------------------------------------------------
# Rewards
#-------------------------------------------------------------------------------------
plot = plots['rewards']
plot.plot(reward_time, R[:t], 'o', ms=5, mew=0, mfc=Figure.colors('red'))
plot.plot(reward_time, R[:t], lw=1.25, color=Figure.colors('red'))
# Prediction
if b is not None:
plot.plot(reward_time, b[:t], 'o', ms=5, mew=0, mfc=Figure.colors('orange'))
plot.plot(reward_time, b[:t], lw=1.25, color=Figure.colors('orange'))
plot.xlim(*xlim)
plot.ylim(m.R_TERMINATE, m.R_CORRECT)
plot.xlabel('Time (ms)')
plot.ylabel('Reward')
plot.highlight(0, m.iti)
#-------------------------------------------------------------------------------------
fig.save(path=figspath, name=name)
fig.close()
#-------------------------------------------------------------------------------------
return init, init_b
def compute_dprime(trials, perf, r):
"""
Compute d' for choice.
"""
N = r.shape[-1]
L = np.zeros(N)
L2 = np.zeros(N)
R = np.zeros(N)
R2 = np.zeros(N)
nL = 0
nR = 0
for n, trial in enumerate(trials):
if perf.choices[n] is None:
continue
stimulus = trial['epochs']['stimulus']
r_n = r[stimulus,n]
if perf.choices[n] == 'L':
L += np.sum(r_n, axis=0)
L2 += np.sum(r_n**2, axis=0)
nL += r_n.shape[0]
else:
R += np.sum(r_n, axis=0)
R2 += np.sum(r_n**2, axis=0)
nR += r_n.shape[0]
mean_L = L/nL
var_L = L2/nL - mean_L**2
mean_R = R/nR
var_R = R2/nR - mean_R**2
return -utils.div(mean_L - mean_R, np.sqrt((var_L + var_R)/2))
def get_preferred_targets(trials, perf, r, verbose=False):
"""
Determine preferred targets.
"""
dprime = compute_dprime(trials, perf, r)
if verbose:
for i in xrange(len(dprime)):
if abs(dprime[i]) > 0.5:
print(i, dprime[i])
return 2*(dprime > 0) - 1
def sort_func(s, preferred_targets, target, trial):
choices = preferred_targets*target
if s == 'choice':
return [(choice,) for choice in choices]
elif s == 'motion-choice':
cohs = preferred_targets*trial['left_right_m']*trial['coh_m']
return [(choice, coh, trial['context']) for choice, coh in zip(choices, cohs)]
elif s == 'color-choice':
cohs = preferred_targets*trial['left_right_c']*trial['coh_c']
return [(choice, coh, trial['context']) for choice, coh in zip(choices, cohs)]
elif s == 'context-choice':
return [(choice, trial['context']) for choice in choices]
elif s == 'all':
cohs_m = preferred_targets*trial['left_right_m']*trial['coh_m']
cohs_c = preferred_targets*trial['left_right_c']*trial['coh_c']
return [(choice, coh_m, coh_c, trial['context'])
for choice, coh_m, coh_c in zip(choices, cohs_m, cohs_c)]
else:
raise ValueError
def sort(trialsfile, all_plots, units=None, network='p', **kwargs):
"""
Sort trials.
"""
# Load trials
trials, U, Z, Z_b, A, P, M, perf, r_p, r_v = utils.load(trialsfile)
# Which network?
if network == 'p':
r = r_p
else:
r = r_v
# Number of units
N = r.shape[-1]
# Same for every trial
time = trials[0]['time']
Ntime = len(time)
# Aligned time
time_a = np.concatenate((-time[1:][::-1], time))
Ntime_a = len(time_a)
#=====================================================================================
# Preferred targets
#=====================================================================================
preferred_targets = get_preferred_targets(trials, perf, r)
#=====================================================================================
# Sort trials
#=====================================================================================
sortby = ['choice', 'motion-choice', 'color-choice', 'context-choice', 'all']
#-------------------------------------------------------------------------------------
# Sort
#-------------------------------------------------------------------------------------
sorted_trials = {s: {} for s in sortby}
X = 0
X2 = 0
NX = 0
for n, trial in enumerate(trials):
if perf.choices[n] == 'R':
target = +1
else:
target = -1
if perf.corrects[n]:
for s in sortby:
sorted_trial = sort_func(s, preferred_targets, target, trial)
for u, cond in enumerate(sorted_trial):
sorted_trials[s].setdefault(cond, []).append((n, u))
# For normalizing
Mn = np.tile(M[:,n], (N,1)).T
Rn = r[:,n]*Mn
X += np.sum(Rn)
X2 += np.sum(Rn**2)
NX += np.sum(Mn)
mean = X/NX
sd = np.sqrt(X2/NX - mean**2)
#-------------------------------------------------------------------------------------
# Average within conditions
#-------------------------------------------------------------------------------------
for s in sorted_trials:
# Collect
trials_by_cond = {}
for cond, n_u in sorted_trials[s].items():
# Storage
trials_by_cond.setdefault(cond, {'r': np.zeros((Ntime_a, N)),
'n': np.zeros((Ntime_a, N))})
for n, u in n_u:
# Firing rates
Mn = M[:,n]
Rnu = r[:,n,u]*Mn
# Normalize
Rnu = (Rnu - mean)/sd
# Align point
t0 = trials[n]['epochs']['stimulus'][0] - 1
# Before
n_b = Rnu[:t0].shape[0]
trials_by_cond[cond]['r'][Ntime-1-n_b:Ntime-1,u] += Rnu[:t0]
trials_by_cond[cond]['n'][Ntime-1-n_b:Ntime-1,u] += Mn[:t0]
# After
n_a = Rnu[t0:].shape[0]
trials_by_cond[cond]['r'][Ntime-1:Ntime-1+n_a,u] += Rnu[t0:]
trials_by_cond[cond]['n'][Ntime-1:Ntime-1+n_a,u] += Mn[t0:]
# Average
for cond in trials_by_cond:
trials_by_cond[cond] = utils.div(trials_by_cond[cond]['r'],
trials_by_cond[cond]['n'])
# Save
sorted_trials[s] = trials_by_cond
if all_plots is None:
return time_a, sorted_trials
#=====================================================================================
# Plot functions
#=====================================================================================
lw = kwargs.get('lw', 1)
linestyles = {
+1: '-',
-1: '--'
}
def plot_choice(plot, unit, w):
t = time_a[w]
y = [[0, 0.5]]
for (choice,), r_cond in sorted_trials['choice'].items():
plot.plot(t, r_cond[w,unit], linestyles[choice], color=Figure.colors('red'), lw=lw)
y.append(r_cond[w,unit])
plot.lim('y', y)
return t, y
def plot_motion_choice(plot, unit, w):
cohs = []
for (choice, signed_coh, context) in sorted_trials['motion-choice']:
cohs.append(abs(signed_coh))
cohs = sorted(list(set(cohs)))
t = time_a[w]
y = [[0, 0.5]]
for (choice, signed_coh, context), r_cond in sorted_trials['motion-choice'].items():
if context != 'm':
continue
idx = cohs.index(abs(signed_coh))
basecolor = 'k'
if idx == 0:
color = apply_alpha(basecolor, 0.4)
elif idx == 1:
color = apply_alpha(basecolor, 0.7)
else:
color = apply_alpha(basecolor, 1)
plot.plot(t, r_cond[w,unit], linestyles[choice], color=color, lw=lw)
y.append(r_cond[w,unit])
plot.lim('y', y)
return t, y
def plot_color_choice(plot, unit, w):
cohs = []
for (choice, signed_coh, context) in sorted_trials['color-choice']:
cohs.append(abs(signed_coh))
cohs = sorted(list(set(cohs)))
t = time_a[w]
y = [[0, 0.5]]
for (choice, signed_coh, context), r_cond in sorted_trials['color-choice'].items():
if context != 'c':
continue
idx = cohs.index(abs(signed_coh))
basecolor = Figure.colors('darkblue')
if idx == 0:
color = apply_alpha(basecolor, 0.4)
elif idx == 1:
color = apply_alpha(basecolor, 0.7)
else:
color = apply_alpha(basecolor, 1)
plot.plot(t, r_cond[w,unit], linestyles[choice], color=color, lw=lw)
y.append(r_cond[w,unit])
plot.lim('y', y)
return t, y
def plot_context_choice(plot, unit, w):
t = time_a[w]
y = [[0, 0.5]]
for (choice, context), r_cond in sorted_trials['context-choice'].items():
if context == 'm':
color = 'k'
else:
color = Figure.colors('darkblue')
plot.plot(t, r_cond[w,unit], linestyles[choice], color=color, lw=lw)
y.append(r_cond[w, unit])
plot.lim('y', y)
return t, y
#=====================================================================================
# Plot
#=====================================================================================
if units is not None:
tmin = kwargs.get('tmin', 100)
tmax = kwargs.get('tmax', 850)
w, = np.where((tmin <= time_a ) & (time_a <= tmax))
for plots, unit in zip(all_plots, units):
yall = []
plot = plots['choice']
t, y = plot_choice(plot, unit, w)
yall += y
plot = plots['motion-choice']
t, y = plot_motion_choice(plot, unit, w)
yall += y
plot = plots['color-choice']
t, y = plot_color_choice(plot, unit, w)
yall += y
plot = plots['context-choice']
t, y = plot_context_choice(plot, unit, w)
yall += y
else:
figspath, name = all_plots
for unit in xrange(N):
w = 2.5
h = 6
fig = Figure(w=w, h=h, axislabelsize=7.5, ticklabelsize=6.5)
w = 0.55
h = 0.17
x0 = 0.3
y0 = 0.77
dy = 0.06
fig.add('choice', [x0, y0, w, h])
fig.add('motion-choice', [x0, fig['choice'].y-dy-h, w, h])
fig.add('color-choice', [x0, fig['motion-choice'].y-dy-h, w, h])
fig.add('context-choice', [x0, fig['color-choice'].y-dy-h, w, h])
#-----------------------------------------------------------------------------
w, = np.where((-100 <= time_a ) & (time_a <= 750))
yall = []
plot = fig['choice']
t, y = plot_choice(plot, unit, w)
yall += y
plot = fig['motion-choice']
t, y = plot_motion_choice(plot, unit, w)
yall += y
plot = fig['color-choice']
t, y = plot_color_choice(plot, unit, w)
yall += y
plot = fig['context-choice']
t, y = plot_context_choice(plot, unit, w)
yall += y
for plot in fig.plots.values():
plot.lim('y', yall)
#-----------------------------------------------------------------------------
fig.save(path=figspath, name=name+'_{}{:03d}'.format(network, unit))
fig.close()
#/////////////////////////////////////////////////////////////////////////////////////////
def sort_statespace(trialsfile, all_plots, units=None, network='p', **kwargs):
"""
Sort trials (for state-space analysis).
"""
# Load trials
trials, U, Z, Z_b, A, P, M, perf, r_p, r_v = utils.load(trialsfile)
# Which network?
if network == 'p':
r = r_p
else:
r = r_v
# Number of units
N = r.shape[-1]
# Same for every trial
time = trials[0]['time']
Ntime = len(time)
# Aligned time
time_a = np.concatenate((-time[1:][::-1], time))
Ntime_a = len(time_a)
#=====================================================================================
# Preferred targets
#=====================================================================================
preferred_targets = get_preferred_targets(trials, perf, r)
#=====================================================================================
# Sort trials
#=====================================================================================
sortby = ['choice', 'motion-choice', 'color-choice', 'context-choice', 'all']
#-------------------------------------------------------------------------------------
# Sort
#-------------------------------------------------------------------------------------
sorted_trials = {s: {} for s in sortby}
X = 0
X2 = 0
NX = 0
for n, trial in enumerate(trials):
if perf.choices[n] == 'R':
target = +1
else:
target = -1
if perf.corrects[n]:
for s in sortby:
sorted_trial = sort_func(s, preferred_targets, target, trial)
for u, cond in enumerate(sorted_trial):
sorted_trials[s].setdefault(cond, []).append((n, u))
# For normalizing
Mn = np.tile(M[:,n], (N,1)).T
Rn = r[:,n]*Mn
X += np.sum(Rn, axis=0)
X2 += np.sum(Rn**2, axis=0)
NX += np.sum(Mn, axis=0)
mean = X/NX
sd = np.sqrt(X2/NX - mean**2)
#-------------------------------------------------------------------------------------
# Average within conditions
#-------------------------------------------------------------------------------------
for s in sorted_trials:
# Collect
trials_by_cond = {}
for cond, n_u in sorted_trials[s].items():
# Storage
trials_by_cond.setdefault(cond, {'r': np.zeros((Ntime_a, N)),
'n': np.zeros((Ntime_a, N))})
for n, u in n_u:
# Firing rates
Mn = M[:,n]
Rnu = r[:,n,u]*Mn
# Normalize
Rnu = (Rnu - mean[u])/sd[u]
# Align point
t0 = trials[n]['epochs']['stimulus'][0] - 1
# Before
n_b = Rnu[:t0].shape[0]
trials_by_cond[cond]['r'][Ntime-1-n_b:Ntime-1,u] += Rnu[:t0]
trials_by_cond[cond]['n'][Ntime-1-n_b:Ntime-1,u] += Mn[:t0]
# After
n_a = Rnu[t0:].shape[0]
trials_by_cond[cond]['r'][Ntime-1:Ntime-1+n_a,u] += Rnu[t0:]
trials_by_cond[cond]['n'][Ntime-1:Ntime-1+n_a,u] += Mn[t0:]
# Average
for cond in trials_by_cond:
trials_by_cond[cond] = utils.div(trials_by_cond[cond]['r'],
trials_by_cond[cond]['n'])
# Save
sorted_trials[s] = trials_by_cond
if all_plots is None:
return time_a, sorted_trials
#=====================================================================================
# Plot functions
#=====================================================================================
lw = kwargs.get('lw', 1)
linestyles = {
+1: '-',
-1: '--'
}
def plot_choice(plot, unit, w):
t = time_a[w]
y = [[0, 0.5]]
for (choice,), r_cond in sorted_trials['choice'].items():
plot.plot(t, r_cond[w,unit], linestyles[choice], color=Figure.colors('red'), lw=lw)
y.append(r_cond[w,unit])
plot.lim('y', y)
return t, y
def plot_motion_choice(plot, unit, w):
cohs = []
for (choice, signed_coh, context) in sorted_trials['motion-choice']:
cohs.append(abs(signed_coh))
cohs = sorted(list(set(cohs)))
t = time_a[w]
y = [[0, 0.5]]
for (choice, signed_coh, context), r_cond in sorted_trials['motion-choice'].items():
if context != 'm':
continue
idx = cohs.index(abs(signed_coh))
basecolor = 'k'
if idx == 0:
color = apply_alpha(basecolor, 0.4)
elif idx == 1:
color = apply_alpha(basecolor, 0.7)
else:
color = apply_alpha(basecolor, 1)
plot.plot(t, r_cond[w,unit], linestyles[choice], color=color, lw=lw)
y.append(r_cond[w,unit])
plot.lim('y', y)
return t, y
def plot_color_choice(plot, unit, w):
cohs = []
for (choice, signed_coh, context) in sorted_trials['color-choice']:
cohs.append(abs(signed_coh))
cohs = sorted(list(set(cohs)))
t = time_a[w]
y = [[0, 0.5]]
for (choice, signed_coh, context), r_cond in sorted_trials['color-choice'].items():
if context != 'c':
continue
idx = cohs.index(abs(signed_coh))
basecolor = Figure.colors('darkblue')
if idx == 0:
color = apply_alpha(basecolor, 0.4)
elif idx == 1:
color = apply_alpha(basecolor, 0.7)
else:
color = apply_alpha(basecolor, 1)
plot.plot(t, r_cond[w,unit], linestyles[choice], color=color, lw=lw)
y.append(r_cond[w,unit])
plot.lim('y', y)
return t, y
def plot_context_choice(plot, unit, w):
t = time_a[w]
y = [[0, 0.5]]
for (choice, context), r_cond in sorted_trials['context-choice'].items():
if context == 'm':
color = 'k'
else:
color = Figure.colors('darkblue')
plot.plot(t, r_cond[w,unit], linestyles[choice], color=color, lw=lw)
y.append(r_cond[w, unit])
plot.lim('y', y)
return t, y
#=====================================================================================
# Plot
#=====================================================================================
if units is not None:
tmin = kwargs.get('tmin', 100)
tmax = kwargs.get('tmax', 850)
w, = np.where((tmin <= time_a ) & (time_a <= tmax))
for plots, unit in zip(all_plots, units):
yall = []
plot = plots['choice']
t, y = plot_choice(plot, unit, w)
yall += y
plot = plots['motion-choice']
t, y = plot_motion_choice(plot, unit, w)
yall += y
plot = plots['color-choice']
t, y = plot_color_choice(plot, unit, w)
yall += y
plot = plots['context-choice']
t, y = plot_context_choice(plot, unit, w)
yall += y
else:
figspath, name = all_plots
for unit in xrange(N):
w = 2.5
h = 6
fig = Figure(w=w, h=h, axislabelsize=7.5, ticklabelsize=6.5)
w = 0.55
h = 0.17
x0 = 0.3
y0 = 0.77
dy = 0.06
fig.add('choice', [x0, y0, w, h])
fig.add('motion-choice', [x0, fig['choice'].y-dy-h, w, h])
fig.add('color-choice', [x0, fig['motion-choice'].y-dy-h, w, h])
fig.add('context-choice', [x0, fig['color-choice'].y-dy-h, w, h])
#-----------------------------------------------------------------------------
w, = np.where((-100 <= time_a ) & (time_a <= 750))
yall = []
plot = fig['choice']
t, y = plot_choice(plot, unit, w)
yall += y
plot = fig['motion-choice']
t, y = plot_motion_choice(plot, unit, w)
yall += y
plot = fig['color-choice']
t, y = plot_color_choice(plot, unit, w)
yall += y
plot = fig['context-choice']
t, y = plot_context_choice(plot, unit, w)
yall += y
for plot in fig.plots.values():
plot.lim('y', yall)
#-----------------------------------------------------------------------------
fig.save(path=figspath, name=name+'_{}{:03d}'.format(network, unit))
fig.close()
#/////////////////////////////////////////////////////////////////////////////////////////
def is_active(r):
return np.std(r, axis=0) > 0.1
def get_active_units(r, M):
N = r.shape[-1]
M_ = (np.tile(M.T, (N, 1, 1))).T
r_ = r*M_
n = np.sum(M)
var = (r_**2).sum(axis=0).sum(axis=0)/n - (r_.sum(axis=0).sum(axis=0)/n)**2
return np.where(np.sqrt(var) > 0.2)[0]
# Regression coefficients
CHOICE = 0
MOTION = 1
COLOUR = 2
CONTEXT = 3
CONSTANT = 4
CHOICE_MOTION = 5
CHOICE_COLOUR = 6
CHOICE_CONTEXT = 7
MOTION_COLOUR = 8
MOTION_CONTEXT = 9
COLOUR_CONTEXT = 10
nreg = 11
def plot_taskaxes(plot, yax, p_vc, basecolor):
abscohs = []
for choice, coh, context in p_vc:
abscohs.append(abs(coh))
abscohs = sorted(list(set(abscohs)))
#-------------------------------------------------------------------------------------
# Subtract mean
#-------------------------------------------------------------------------------------
p = p_vc.values()[0]
Xchoice = np.zeros_like(p[CHOICE])
Xmotion = np.zeros_like(p[MOTION])
Xcolour = np.zeros_like(p[COLOUR])
for p in p_vc.values():
Xchoice += p[CHOICE]
Xmotion += p[MOTION]
Xcolour += p[COLOUR]
mean_choice = Xchoice/len(p_vc)
mean_motion = Xmotion/len(p_vc)
mean_colour = Xcolour/len(p_vc)
for cond, p in p_vc.items():
p[CHOICE] -= mean_choice
p[MOTION] -= mean_motion
p[COLOUR] -= mean_colour
#-------------------------------------------------------------------------------------
xall = []
yall = []
for cond, p in p_vc.items():
idx = abscohs.index(abs(cond[1]))
if idx == 0:
color = apply_alpha(basecolor, 0.4)
elif idx == 1:
color = apply_alpha(basecolor, 0.7)
else:
color = apply_alpha(basecolor, 1)
if cond[1] > 0:
prop = dict(mfc=color, mec=color, ms=2.5, mew=0.5)
else:
prop = dict(mfc='w', mec=color, ms=2.5, mew=0.5)
plot.plot(p[CHOICE], p[yax], '-', color=color, lw=0.75)
plot.plot(p[CHOICE][::2], p[yax][::2], 'o', color=color, **prop)
xall.append(p[CHOICE])
yall.append(p[yax])
if yax == MOTION:
plot.ylabel('Motion')
elif yax == COLOUR:
plot.ylabel('Color')
return np.concatenate(xall), np.concatenate(yall)
def plot_statespace(units, t, sorted_trials, Q, plots):
# Task axes
M = Q.T
# Epoch to plot
w, = np.where((0 <= t) & (t <= 800))
# Down-sample
dt = t[1] - t[0]
step = int(50/dt)
w = w[::step]
# Colors
color_m = 'k'
color_c = Figure.colors('darkblue')
xall = []
yall = []
#-------------------------------------------------------------------------------------
# Labels
#-------------------------------------------------------------------------------------
plots['c1'].xlabel('Choice')
#-------------------------------------------------------------------------------------
# Motion context: motion vs. choice, sorted by coherence
#-------------------------------------------------------------------------------------
plot = plots['m1']
p_vc = {}
for cond, r in sorted_trials['motion-choice'].items():
if cond[2] == 'm':
p_vc[cond] = M.dot(r.T[units,:][:,w])
x, y = plot_taskaxes(plot, MOTION, p_vc, color_m)
xall.append(x)
yall.append(y)
plot.ylabel('Motion')
#-------------------------------------------------------------------------------------
# Motion context: motion vs. choice, sorted by coherence
#-------------------------------------------------------------------------------------
plot = plots['m2']
p_vc = {}
for cond, r in sorted_trials['motion-choice'].items():
if cond[2] == 'm':
p_vc[cond] = M.dot(r.T[units,:][:,w])
x, y = plot_taskaxes(plot, COLOUR, p_vc, color_m)
xall.append(x)
yall.append(y)
#-------------------------------------------------------------------------------------
# Motion context: colour vs. choice, sorted by colour
#-------------------------------------------------------------------------------------
plot = plots['m3']
p_vc = {}
for cond, r in sorted_trials['color-choice'].items():
if cond[2] == 'm':
p_vc[cond] = M.dot(r.T[units,:][:,w])
x, y = plot_taskaxes(plot, COLOUR, p_vc, color_c)
xall.append(x)
yall.append(y)
#-------------------------------------------------------------------------------------
# Colour context: motion vs. choice, sorted by motion
#-------------------------------------------------------------------------------------
plot = plots['c1']
p_vc = {}
for cond, r in sorted_trials['motion-choice'].items():
if cond[2] == 'c':
p_vc[cond] = M.dot(r.T[units,:][:,w])
x, y = plot_taskaxes(plot, MOTION, p_vc, color_m)
xall.append(x)
yall.append(y)
#-------------------------------------------------------------------------------------
# Colour context: motion vs. choice, sorted by colour
#-------------------------------------------------------------------------------------
plot = plots['c2']
p_vc = {}
for cond, r in sorted_trials['color-choice'].items():
if cond[2] == 'c':
p_vc[cond] = M.dot(r.T[units,:][:,w])
x, y = plot_taskaxes(plot, MOTION, p_vc, color_c)
xall.append(x)
yall.append(y)
#-------------------------------------------------------------------------------------
# Colour context: colour vs. choice, sorted by colour
#-------------------------------------------------------------------------------------
plot = plots['c3']
p_vc = {}
for cond, r in sorted_trials['color-choice'].items():
if cond[2] == 'c':
p_vc[cond] = M.dot(r.T[units,:][:,w])
x, y = plot_taskaxes(plot, COLOUR, p_vc, color_c)
xall.append(x)
yall.append(y)
#-------------------------------------------------------------------------------------
# Shared axes
#-------------------------------------------------------------------------------------
xall = np.concatenate(xall)
yall = np.concatenate(yall)
for plot in plots.values():
#plot.aspect(1.5)
plot.lim('x', xall)
plot.lim('y', yall)
def statespace(trialsfile, plots=None, dt_reg=50, **kwargs):
"""
State-space analysis.
"""
# Load trials
trials_, U, Z, Z_b, A, P, M, perf, r_p, r_v = utils.load(trialsfile)
# Use policy network for this analysis
r = r_p
N = r.shape[-1]
# Time step
time = trials_[0]['time']
Ntime = len(time)
dt = time[1] - time[0]
step = int(dt_reg/dt)
#=====================================================================================
# Setup
#=====================================================================================
# Active units
units = get_active_units(r, M)
print("[ mante.statespace ] Performing regression on {} active units."
.format(len(units)))
# Preferred targets for active units
preferred_targets = get_preferred_targets(trials_, perf, r)[units]
# Stimulus period
stimulus = np.asarray(trials_[0]['epochs']['stimulus'])[::step]
trials = []
cohs_m = []
cohs_c = []
for n, trial_ in enumerate(trials_):
if perf.choices[n] is None:
continue
cohs_m.append(trial_['coh_m'])
cohs_c.append(trial_['coh_c'])
trial = {}
trial['target'] = +1 if perf.choices[n] == 'R' else -1
trial['t'] = time[stimulus]
trial['r'] = r[stimulus,n,:][:,units].T
trials.append(trial)
maxcoh_m = max(cohs_m)
maxcoh_c = max(cohs_c)
#-------------------------------------------------------------------------------------
# Normalize
#-------------------------------------------------------------------------------------
X = 0
X2 = 0
n = 0
for trial in trials:
r = trial['r']
X += np.sum(r, axis=1)
X2 += np.sum(r**2, axis=1)
n += r.shape[1]
mean = X/n
sd = np.sqrt(X2/n - mean**2)
mean = np.tile(mean, (r.shape[1], 1)).T
sd = np.tile(sd, (r.shape[1], 1)).T
for trial in trials:
trial['r'] = (trial['r'] - mean)/sd
#-------------------------------------------------------------------------------------
# Regress
#-------------------------------------------------------------------------------------
nunits, ntime = trials[0]['r'].shape
ntrials = len(trials)
# Coefficient matrix
r = np.zeros((nunits, ntime, ntrials))
F = np.zeros((nunits, nreg, ntrials))
for i, trial in enumerate(trials):
info = trials_[i]
# First-order terms
r[:,:,i] = trial['r']
F[:,CHOICE,i] = preferred_targets*trial['target']
F[:,MOTION,i] = preferred_targets*info['left_right_m']*info['coh_m']/maxcoh_m
F[:,COLOUR,i] = preferred_targets*info['left_right_c']*info['coh_c']/maxcoh_c
F[:,CONTEXT,i] = +1 if info['context'] == 'm' else -1
# Interaction terms
F[:,CHOICE_MOTION, i] = F[:,CHOICE,i]*F[:,MOTION,i]
F[:,CHOICE_COLOUR, i] = F[:,CHOICE,i]*F[:,COLOUR,i]
F[:,CHOICE_CONTEXT,i] = F[:,CHOICE,i]*F[:,CONTEXT,i]
F[:,MOTION_COLOUR, i] = F[:,MOTION,i]*F[:,COLOUR,i]
F[:,MOTION_CONTEXT,i] = F[:,MOTION,i]*F[:,CONTEXT,i]
F[:,COLOUR_CONTEXT,i] = F[:,COLOUR,i]*F[:,CONTEXT,i]
F[:,CONSTANT,:] = 1
# Regression coefficients
beta = np.zeros((nunits, ntime, nreg))
for i in xrange(nunits):
A = np.linalg.inv(F[i].dot(F[i].T)).dot(F[i])
for k in xrange(ntime):
beta[i,k] = A.dot(r[i,k])
if np.any(np.isnan(beta[i,k])):
raise RuntimeError("[ mante.regress ] Regression failed.")
#-------------------------------------------------------------------------------------
# Sort trials
#-------------------------------------------------------------------------------------
utils.println("[ mante.statespace ] Sorting trials ...")
time_a, sorted_trials = sort_statespace(trialsfile, None)
print(" done!")
#-------------------------------------------------------------------------------------
# Normalize within conditions
#-------------------------------------------------------------------------------------
'''
for s in sorted_trials:
# Normalize
X = 0
X2 = 0
n = 0
for cond, r_ in sorted_trials[s].items():
r = r_.T
X += np.sum(r, axis=1)
X2 += np.sum(r**2, axis=1)
n += r.shape[1]
mean = X/n
std = np.sqrt(X2/n - mean**2)
mean = np.tile(mean, (ntime, 1)).T
sd = np.tile(sd, (ntime, 1)).T
for cond, r in sorted_trials[s].items():
sorted_trials[s][cond] = (r - mean)/std
'''
#-------------------------------------------------------------------------------------
# Denoising matrix
#-------------------------------------------------------------------------------------
'''
all_conditions = sorted_trials['all']
for cond, r in all_conditions.items():
all_conditions[cond] = r.T[units,::step]
# Data matrix
X = np.zeros((all_conditions.values()[0].shape[0],
len(all_conditions)*all_conditions.values()[0].shape[1]))
c = 0
for cond, r in sorted_trials['all'].items():
X[:,c:c+r.shape[1]] = r
c += r.shape[1]
U, S, V = np.linalg.svd(X.T)
assert np.all(S[:-1] >= S[1:])
npca = 12
W = V[:npca,:]
D = (W.T).dot(W)
assert np.all(D.T == D)
'''
#-------------------------------------------------------------------------------------
# Task axes
#-------------------------------------------------------------------------------------
# Rearrange from (units, time, reg) to (reg, time, units)
beta = np.swapaxes(beta, 0, 2)
# Denoise
# beta = beta.dot(D.T)
# Time-independent regression vectors
beta_max = np.zeros((nreg, nunits))
for v in xrange(nreg):
imax = np.argmax(np.linalg.norm(beta[v], axis=1))
beta_max[v] = beta[v,imax]
Bmax = beta_max[:4].T
Q, R = np.linalg.qr(Bmax)
Q = Q*np.sign(np.diag(R))
#=====================================================================================
# Plot regression coefficients
#=====================================================================================
if isinstance(plots, tuple):
figspath, name = plots
w = 4.5
r = 1.4
fig = Figure(w=w, r=r)
x0 = 0.15
y0 = 0.1
w = 0.3
h = w/r
DX = 0.17
DY = 0.1
fig.add('context-motion', [x0, y0, w, h])
fig.add('context-color', [fig[-1].right+DX, y0, w, h])
fig.add('context-choice', [x0, fig[-1].top+DY, w, h])
fig.add('color-motion', [fig[-1].right+DX, fig[-1].y, w, h])
fig.add('motion-choice', [x0, fig[-1].top+DY, w, h])
fig.add('color-choice', [fig[-1].right+DX, fig[-1].y, w, h])
#---------------------------------------------------------------------------------
regaxes = {'choice': CHOICE, 'motion': MOTION,
'color': COLOUR, 'context': CONTEXT}
xall = []
yall = []
for k, plot in fig.plots.items():
Y, X = k.split('-')
plot.equal()
plot.xlabel(X.capitalize())
plot.ylabel(Y.capitalize())
x = Q[:,regaxes[X]]
y = Q[:,regaxes[Y]]
plot.plot(x, y, 'o', mfc='0.2', mec='w', ms=2.5, mew=0.3, zorder=10)
xall += [x, -x]
yall += [y, -y]
#M = 0.4
#assert np.all(abs(x) <= M)
#assert np.all(abs(y) <= M)
#plot.xlim(-M, M)
#plot.xticks([-M, 0, M])
#plot.ylim(-M, M)
#plot.yticks([-M, 0, M])
# Limits
for k, plot in fig.plots.items():
plot.lim('x', xall)
plot.lim('y', yall)
plot.hline(0, lw=0.5, color='k', zorder=1)
plot.vline(0, lw=0.5, color='k', zorder=1)
#---------------------------------------------------------------------------------
fig.save(path=figspath, name='regress_coeffs')
fig.close()
#=====================================================================================
if isinstance(plots, dict):
plot_statespace(units, time_a, sorted_trials, Q, plots)
else:
figspath, name = plots
w = utils.mm_to_inch(174)
r = 0.7
fig = Figure(w=w, r=r, thickness=0.8, axislabelsize=9, ticklabelsize=7.5,
labelpadx=4.5, labelpady=4.5)
w = 0.24
h = 0.35
x0 = 0.1
y0 = 0.1
dx = 0.07
dy = 0.15
fig.add('c1', [x0, y0, w, h])
fig.add('c2', [fig[-1].right+dx, fig[-1].y, w, h])
fig.add('c3', [fig[-1].right+dx, fig[-1].y, w, h])
fig.add('m1', [fig['c1'].x, fig['c1'].top+dy, w, h])
fig.add('m2', [fig[-1].right+dx, fig[-1].y, w, h])
fig.add('m3', [fig[-1].right+dx, fig[-1].y, w, h])
#---------------------------------------------------------------------------------
plot_statespace(units, time_a, sorted_trials, Q, fig.plots)
#---------------------------------------------------------------------------------
# Legends
#---------------------------------------------------------------------------------
fig['m2'].text_upper_center('Motion context', dy=0.03, fontsize=8.5, color='k')
fig['c2'].text_upper_center('Color context', dy=0.03, fontsize=8.5,
color=Figure.colors('darkblue'))
ms_filled = 3
ms_empty = 3
mew_filled = 0.5
mew_empty = 0.5
y = 1.13
dx = 0.08
dy = 0.06
fontsize = 6
for context, plot, basecolor in zip(['Motion', 'Color'],
[fig['c1'], fig['c3']],
['k', Figure.colors('darkblue')]):
transform = plot.ax.transAxes
colors = [apply_alpha(basecolor, alpha) for alpha in [0.4, 0.7, 1]]
for i in xrange(3):
plot.plot(0.5+(i+0.5)*dx, y, 'o', mfc=colors[i], mec=colors[i],
ms=ms_filled, mew=mew_filled, transform=transform)
plot.plot(0.5-(i+0.5)*dx, y, 'o', mfc='none', mec=colors[i],
ms=ms_empty, mew=mew_empty, transform=transform)
# Strength label
plot.text(0.5, y+dy, 'Weak', ha='center', va='bottom', fontsize=fontsize,
color=colors[0], transform=transform)
plot.text(0.5+2.5*dx, y+dy, 'Strong', ha='center', va='bottom',
fontsize=fontsize, color=colors[-1], transform=transform)
plot.text(0.5-2.5*dx, y+dy, 'Strong', ha='center', va='bottom',
fontsize=fontsize, color=colors[-1], transform=transform)
if context == 'Motion':
plot.text(0.5-5*dx, y, context, ha='right', va='center',
fontsize=1.2*fontsize, color=colors[-1], transform=transform)
else:
plot.text(0.5+5*dx, y, context, ha='left', va='center',
fontsize=1.2*fontsize, color=colors[-1], transform=transform)
# Choice label
plot.text(0.5+2.5*dx, y-dy, 'To choice 1', ha='center', va='top',
fontsize=fontsize, color='k', transform=transform)
plot.text(0.5-2.5*dx, y-dy, 'To choice 2', ha='center', va='top',
fontsize=fontsize, color='k', transform=transform)
#-----------------------------------------------------------------------------
fig.save(path=figspath, name=name)
fig.close()
#/////////////////////////////////////////////////////////////////////////////////////////
def psychometric(trialsfile, plots=None, **kwargs):
"""
Compute and plot the psychometric functions.
"""
# Load trials
trials, A, R, M, perf = utils.load(trialsfile)
# Sort results by context, coherence
results = {cond: {} for cond in ['mm', 'mc', 'cm', 'cc']}
for n, trial in enumerate(trials):
if not perf.decisions[n]:
continue
coh_m = trial['left_right_m']*trial['coh_m']
coh_c = trial['left_right_c']*trial['coh_c']
if perf.choices[n] == 'R':
choice = 1
else:
choice = 0
if trial['context'] == 'm':
motion_choices = results['mm'].setdefault(coh_m, [])
color_choices = results['mc'].setdefault(coh_c, [])
else:
motion_choices = results['cm'].setdefault(coh_m, [])
color_choices = results['cc'].setdefault(coh_c, [])
motion_choices.append(choice)
color_choices.append(choice)
# Convert to P(right)
for k, choices_by_coh in results.items():
cohs = np.sort(choices_by_coh.keys())
p1 = np.zeros(len(cohs))
for i, coh in enumerate(cohs):
choices = choices_by_coh[coh]
p1[i] = sum(choices)/len(choices)
results[k] = (cohs, p1)
#-------------------------------------------------------------------------------------
# Plot
#-------------------------------------------------------------------------------------
if plots is not None:
lw = kwargs.get('lw', 1.25)
ms = kwargs.get('ms', 5)
color_m = 'k'
color_c = Figure.colors('darkblue')
x_all = {'m': [], 'c': []}
for k, v in results.items():
# Context
if k[0] == 'm':
color = color_m
label = 'Motion context'
else:
color = color_c
label = 'Color context'
plot = plots[k[1]]
cohs, p1 = v
plot.plot(cohs, 100*p1, 'o', ms=ms, mew=0, mfc=color, zorder=10)
props = dict(color=color, lw=lw, zorder=5, label=label)
try:
popt, func = fittools.fit_psychometric(cohs, p1)
fit_cohs = np.linspace(min(cohs), max(cohs), 201)
fit_p1 = func(fit_cohs, **popt)
plot.plot(fit_cohs, 100*fit_p1, **props)
except RuntimeError:
print("Unable to fit, drawing a line through the points.")
plot.plot(cohs, 100*p1, **props)
x_all[k[1]].append(cohs)
for s in ['m', 'c']:
plots[s].lim('x', x_all[s])
plots[s].ylim(0, 100)
plots[s].yticks([0, 50, 100])
#/////////////////////////////////////////////////////////////////////////////////////////
def performance(savefile, plot, **kwargs):
perf_history = utils.load(savefile)['perf_history']
all_trials = []
all_corrects = []
best_trials = []
best_corrects = []
for niters, ntrials, perf, is_best in perf_history:
if is_best:
if perf.n_decision > 0:
p_correct = perf.n_correct/perf.n_decision
best_trials.append(ntrials)
best_corrects.append(p_correct)
if perf.n_decision > 0:
p_correct = perf.n_correct/perf.n_decision
all_trials.append(ntrials)
all_corrects.append(p_correct)
all_trials = np.asarray(all_trials)
all_corrects = np.asarray(all_corrects)
best_trials = np.asarray(best_trials)
best_corrects = np.asarray(best_corrects)
M = 100
plot.plot(all_trials/M, 100*all_corrects, color='0.8', lw=1.5)
plot.plot(all_trials/M, 100*all_corrects, 'o', mfc='0.8', mew=0)
plot.plot(best_trials/M, 100*best_corrects, color='k', lw=1.5)
plot.plot(best_trials/M, 100*best_corrects, 'o', mfc='k', mew=0)
plot.xlim(0, max(all_trials/M))
plot.ylim(0, 100)
plot.xlabel(r'Number of trials ($\times$ 10$^2$)')
plot.ylabel('Percent correct')
#/////////////////////////////////////////////////////////////////////////////////////////
def do(action, args, config):
"""
Manage tasks.
"""
print("ACTION*: " + str(action))
print("ARGS*: " + str(args))
#=====================================================================================
if action == 'performance':
fig = Figure(axislabelsize=10, ticklabelsize=9)
plot = fig.add()
performance(config['savefile'], plot)
fig.save(path=config['figspath'], name='performance')
fig.close()
#=====================================================================================
elif 'trials' in action:
try:
trials_per_condition = int(args[0])
except:
trials_per_condition = 1000
model = config['model']
pg = model.get_pg(config['savefile'], config['seed'], config['dt'])
spec = model.spec
mcs = spec.contexts
cohs = spec.cohs
left_rights = spec.left_rights
n_conditions = spec.n_conditions
n_trials = n_conditions * trials_per_condition
print("{} trials".format(n_trials))
task = model.Task()
trials = []
for n in xrange(n_trials):
k = tasktools.unravel_index(n, (len(mcs),
len(left_rights), len(left_rights),
len(cohs), len(cohs)))
context = {
'context': mcs[k.pop(0)],
'left_right_m': left_rights[k.pop(0)],
'left_right_c': left_rights[k.pop(0)],
'coh_m': cohs[k.pop(0)],
'coh_c': cohs[k.pop(0)]
}
trials.append(task.get_condition(pg.rng, pg.dt, context))
runtools.run(action, trials, pg, config['trialspath'])
#=====================================================================================
elif action == 'psychometric':
trialsfile = runtools.behaviorfile(config['trialspath'])
fig = Figure(w=6, h=2.7)
x0 = 0.12
y0 = 0.2
w = 0.36
h = 0.7
dx = 0.1
fig.add('m', [x0, y0, w, h])
fig.add('c', [fig[-1].right+dx, y0, w, h])
psychometric(trialsfile, fig.plots)
fig['m'].xlabel('Percent motion coherence')
fig['m'].ylabel('Percent right')
fig['c'].xlabel('Percent color coherence')
fig.save(path=config['figspath'], name='psychometric')
fig.close()
#=====================================================================================
elif action == 'sort':
if 'value' in args:
network = 'v'
else:
network = 'p'
trialsfile = runtools.activityfile(config['trialspath'])
sort(trialsfile, (config['figspath'], 'sorted'), network=network)
#=====================================================================================
elif action == 'statespace':
trialsfile = runtools.activityfile(config['trialspath'])
statespace(trialsfile, (config['figspath'], 'statespace'))
| {
"repo_name": "frsong/pyrl",
"path": "examples/analysis/mante.py",
"copies": "1",
"size": "51395",
"license": "mit",
"hash": -1213279458656972000,
"line_mean": 32.0089916506,
"line_max": 95,
"alpha_frac": 0.4163440023,
"autogenerated": false,
"ratio": 3.6160557236332935,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9500711844778824,
"avg_score": 0.006337576230893955,
"num_lines": 1557
} |
from __future__ import absolute_import, division
import os
import numpy as np
from pyrl import runtools, tasktools, utils
from pyrl.figtools import Figure, mpl
#/////////////////////////////////////////////////////////////////////////////////////////
cmap = mpl.cm.jet
norm = mpl.colors.Normalize(vmin=10, vmax=34)
smap = mpl.cm.ScalarMappable(norm, cmap)
#/////////////////////////////////////////////////////////////////////////////////////////
def performance(trialsfile, plot, **kwargs):
# Load trials
trials, A, R, M, perf = utils.load(trialsfile)
correct_by_cond = {}
for n, trial in enumerate(trials):
if not perf.decisions[n]:
continue
gt_lt = trial['gt_lt']
fpair = trial['fpair']
if gt_lt == '>':
f1, f2 = fpair
else:
f2, f1 = fpair
cond = (f1, f2)
correct_by_cond.setdefault(cond, []).append(perf.corrects[n])
pcorrect_by_cond = {}
for c in correct_by_cond:
corrects = correct_by_cond[c]
pcorrect_by_cond[c] = utils.divide(sum(corrects), len(corrects))
#-------------------------------------------------------------------------------------
# Plot
#-------------------------------------------------------------------------------------
plot.equal()
lw = kwargs.get('lw', 1)
fontsize = kwargs.get('fontsize', 10)
_min, _max = kwargs.get('lims', (10-4, 34+4))
r = kwargs.get('r', 1.5)
for (f1, f2), pcorrect in pcorrect_by_cond.items():
plot.circle((f1, f2), r, ec='none', fc=smap.to_rgba(f1))
plot.text(f1, f2, '{}'.format(int(100*pcorrect)), color='w', fontsize=fontsize,
ha='center', va='center')
plot.xlim(_min, _max)
plot.ylim(_min, _max)
plot.plot([_min, _max], [_min, _max], color='k', lw=lw)
def sort(trialsfile, plots, units=None, network='p', **kwargs):
"""
Sort trials.
"""
# Load trials
data = utils.load(trialsfile)
if len(data) == 9:
trials, U, Z, A, P, M, perf, r_p, r_v = data
else:
trials, U, Z, Z_b, A, P, M, perf, r_p, r_v = data
# Which network?
if network == 'p':
r = r_p
else:
r = r_v
# Data shape
Ntime = r.shape[0]
N = r.shape[-1]
# Same for every trial
time = trials[0]['time']
# Aligned time
time_a = np.concatenate((-time[1:][::-1], time))
Ntime_a = len(time_a)
#=====================================================================================
# Sort trials
#=====================================================================================
# Sort
trials_by_cond = {}
for n, trial in enumerate(trials):
if perf.choices[n] is None or not perf.corrects[n]:
continue
# Condition
gt_lt = trial['gt_lt']
fpair = trial['fpair']
if gt_lt == '>':
f1, f2 = fpair
else:
f2, f1 = fpair
cond = (f1, f2)
# Firing rates
Mn = np.tile(M[:,n], (N,1)).T
Rn = r[:,n]*Mn
# Align point
t0 = trial['epochs']['f1'][0] - 1
# Storage
trials_by_cond.setdefault(cond, {'r': np.zeros((Ntime_a, N)),
'n': np.zeros((Ntime_a, N))})
# Before
n_b = Rn[:t0].shape[0]
trials_by_cond[cond]['r'][Ntime-1-n_b:Ntime-1] += Rn[:t0]
trials_by_cond[cond]['n'][Ntime-1-n_b:Ntime-1] += Mn[:t0]
# After
n_a = Rn[t0:].shape[0]
trials_by_cond[cond]['r'][Ntime-1:Ntime-1+n_a] += Rn[t0:]
trials_by_cond[cond]['n'][Ntime-1:Ntime-1+n_a] += Mn[t0:]
# Average
for cond in trials_by_cond:
trials_by_cond[cond] = utils.div(trials_by_cond[cond]['r'],
trials_by_cond[cond]['n'])
#=====================================================================================
# Plot
#=====================================================================================
lw = kwargs.get('lw', 1.5)
w, = np.where((time_a >= -500) & (time_a <= 4000))
def plot_sorted(plot, unit):
t = 1e-3*time_a[w]
yall = [[1]]
for (f1, f2), r in trials_by_cond.items():
plot.plot(t, r[w,unit], color=smap.to_rgba(f1), lw=lw)
yall.append(r[w,unit])
return t, yall
if units is not None:
for plot, unit in zip(plots, units):
plot_sorted(plot, unit)
else:
figspath, name = plots
for unit in xrange(N):
fig = Figure()
plot = fig.add()
#-----------------------------------------------------------------------------
t, yall = plot_sorted(plot, unit)
plot.xlim(t[0], t[-1])
plot.lim('y', yall, lower=0)
plot.highlight(0, 0.5)
plot.highlight(3.5, 4)
#-----------------------------------------------------------------------------
fig.save(path=figspath, name=name+'_{}{:03d}'.format(network, unit))
fig.close()
#/////////////////////////////////////////////////////////////////////////////////////////
def do(action, args, config):
"""
Manage tasks.
"""
print("ACTION*: " + str(action))
print("ARGS*: " + str(args))
if 'trials' in action:
try:
trials_per_condition = int(args[0])
except:
trials_per_condition = 100
model = config['model']
pg = model.get_pg(config['savefile'], config['seed'], config['dt'])
spec = model.spec
gt_lts = spec.gt_lts
fpairs = spec.fpairs
n_conditions = spec.n_conditions
n_trials = trials_per_condition * n_conditions
print("{} trials".format(n_trials))
task = model.Task()
trials = []
for n in xrange(n_trials):
k = tasktools.unravel_index(n, (len(gt_lts), len(fpairs)))
context = {
'delay': 3000,
'gt_lt': gt_lts[k.pop(0)],
'fpair': fpairs[k.pop(0)]
}
trials.append(task.get_condition(pg.rng, pg.dt, context))
runtools.run(action, trials, pg, config['trialspath'])
#=====================================================================================
elif action == 'performance':
trialsfile = runtools.behaviorfile(config['trialspath'])
fig = Figure()
plot = fig.add()
performance(trialsfile, plot)
plot.xlabel('$f_1$ (Hz)')
plot.ylabel('$f_2$ (Hz)')
fig.save(os.path.join(config['figspath'], action))
#=====================================================================================
elif action == 'sort':
if 'value' in args:
network = 'v'
else:
network = 'p'
trialsfile = runtools.activityfile(config['trialspath'])
sort(trialsfile, (config['figspath'], 'sorted'), network=network)
| {
"repo_name": "frsong/pyrl",
"path": "examples/analysis/romo.py",
"copies": "1",
"size": "7096",
"license": "mit",
"hash": 8372435807800582000,
"line_mean": 28.8151260504,
"line_max": 90,
"alpha_frac": 0.4199549042,
"autogenerated": false,
"ratio": 3.6483290488431876,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9546680031852014,
"avg_score": 0.004320784238234655,
"num_lines": 238
} |
from __future__ import absolute_import, division
import posixpath
from braces.views import AjaxResponseMixin
from django import http, shortcuts
from django.conf import settings
from django.contrib import messages
from django.core.files.base import ContentFile
from django.core.urlresolvers import reverse
from django.utils.decorators import method_decorator
from django.views import generic
from django.views.generic import RedirectView
from django.utils.translation import ugettext_lazy as _
from django_downloadview import VirtualDownloadView
from cobra.apps.svnkit.utils.util import get_readme
from .markup.hightlighter import get_pygmentize_diff
from cobra.core.decorators import has_access
from cobra.core.loading import get_model
from cobra.views.mixins import ExtraContextMixin
from . import exceptions
from .decorators import autosync_repositories
from .utils import diff
Repository = get_model('svnkit', 'Repository')
Changeset = get_model('svnkit', 'Changeset')
Change = get_model('svnkit', 'Change')
Content = get_model('svnkit', 'Content')
class SvnChangesetListView(ExtraContextMixin, generic.ListView):
context_object_name = "changesets"
template_name = 'svnkit/changeset_list.html'
paginate_by = settings.COBRA_SVNKIT_CHANGESETS_PER_PAGE
@method_decorator(has_access)
@method_decorator(autosync_repositories)
def dispatch(self, request, *args, **kwargs):
self.organization = kwargs.get('organization')
self.project = kwargs.get('project')
self.repository = self.get_repository()
return super(SvnChangesetListView, self).dispatch(request, *args, **kwargs)
def get_repository(self):
repository_lookup = {'project': self.project}
repository = shortcuts.get_object_or_404(
Repository, **repository_lookup)
return repository
def get_queryset(self):
return self.repository.changesets.all()
class SvnChangesetView(ExtraContextMixin, generic.DetailView):
context_object_name = "changeset"
template_name = 'svnkit/changeset.html'
@method_decorator(has_access)
@method_decorator(autosync_repositories)
def dispatch(self, request, *args, **kwargs):
self.organization = kwargs.get('organization')
self.project = kwargs.get('project')
self.repository = self.get_repository()
return super(SvnChangesetView, self).dispatch(request, *args, **kwargs)
def get_object(self, queryset=None):
if hasattr(self, 'object'):
return self.object
else:
self.revision = self.kwargs.get('revision') or self.repository.get_latest_revision()
changeset = shortcuts.get_object_or_404(Changeset, repository=self.repository, revision=self.revision)
return changeset
def get_repository(self):
repository_lookup = {'project': self.project}
repository = shortcuts.get_object_or_404(
Repository, **repository_lookup)
return repository
def get_context_data(self, **kwargs):
ctx = {}
ctx.update(kwargs)
ctx['revision'] = self.revision
return super(SvnChangesetView, self).get_context_data(**ctx)
class SvnNodeView(ExtraContextMixin, generic.TemplateView):
@method_decorator(has_access)
@method_decorator(autosync_repositories)
def dispatch(self, request, *args, **kwargs):
return super(SvnNodeView, self).dispatch(request, *args, **kwargs)
# def post(self, request, *args, **kwargs):
# r = request.POST.get('revision', '').lower()
# if r.startswith('r'):
# r = r[1:]
# if r.isdigit():
# return http.HttpResponseRedirect(reverse(
# 'svnkit:node-revision',
# args=(kwargs['organization'].slug, kwargs['project'].slug, r, self.kwargs.get('path'))))
def get(self, request, *args, **kwargs):
self.organization = kwargs.get('organization')
self.project = kwargs.get('project')
self.repository = self.get_repository()
self.revision = self.kwargs.get('revision') or self.repository.get_latest_revision()
self.changeset = shortcuts.get_object_or_404(Changeset, repository=self.repository, revision=self.revision)
self.path = self.kwargs.get('path') or posixpath.sep
try:
self.node = self.repository.get_node(self.path, self.revision)
if self.node.is_directory():
self.readme = get_readme(self.repository, path=self.path, revision=self.revision)
except exceptions.InvalidNode:
self.node = None
return super(SvnNodeView, self).get(request, *args, **kwargs)
def get_repository(self):
repository_lookup = {'project': self.project}
repository = shortcuts.get_object_or_404(
Repository, **repository_lookup)
return repository
def get_context_data(self, **kwargs):
ctx = {}
ctx.update(kwargs)
ctx['revision'] = self.revision
ctx['changeset'] = self.changeset
ctx['path'] = self.path
ctx['node'] = self.node
ctx['readme'] = getattr(self, 'readme', None)
return super(SvnNodeView, self).get_context_data(**ctx)
def get_template_names(self):
if not self.node:
return 'svnkit/node_invalid.html'
if self.node.is_directory():
return 'svnkit/node_directory.html'
else:
return 'svnkit/node_file.html'
def get_readme_content(self):
if self.node.is_directory():
children = self.node.children.all()
class SvnNodeHistoryView(ExtraContextMixin, generic.ListView):
context_object_name = "changesets"
template_name = 'svnkit/node_history_list.html'
paginate_by = settings.COBRA_SVNKIT_NODE_HISTORY_PER_PAGE
@method_decorator(has_access)
@method_decorator(autosync_repositories)
def dispatch(self, request, *args, **kwargs):
self.organization = kwargs.get('organization')
self.project = kwargs.get('project')
self.repository = self.get_repository()
return super(SvnNodeHistoryView, self).dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
self.path = self.kwargs['path']
try:
self.node = self.repository.get_node(self.path)
except exceptions.InvalidNode:
self.node = None
return super(SvnNodeHistoryView, self).get(request, *args, **kwargs)
def get_repository(self):
repository_lookup = {'project': self.project}
repository = shortcuts.get_object_or_404(
Repository, **repository_lookup)
return repository
def get_queryset(self):
changeset_list = [c.changeset for c in Change.objects.filter(path=self.repository.prefix + self.path,
changeset__in=self.repository.changesets.all())]
return changeset_list
def get_context_data(self, **kwargs):
ctx = {}
ctx.update(kwargs)
ctx['path'] = self.path
ctx['node'] = self.node
return super(SvnNodeHistoryView, self).get_context_data(**ctx)
class SvnContentView(VirtualDownloadView):
@method_decorator(has_access)
@method_decorator(autosync_repositories)
def dispatch(self, request, *args, **kwargs):
self.organization = kwargs.get('organization')
self.project = kwargs.get('project')
self.repository = self.get_repository()
return super(SvnContentView, self).dispatch(request, *args, **kwargs)
def get_repository(self):
repository_lookup = {'project': self.project}
repository = shortcuts.get_object_or_404(
Repository, **repository_lookup)
return repository
def get_file(self):
"""Return :class:`django.core.files.base.ContentFile` object."""
content = shortcuts.get_object_or_404(Content, pk=self.kwargs['content_id'])
return ContentFile(content.get_data(), name=self.kwargs['path'])
class SvnNodeDiffView(AjaxResponseMixin, ExtraContextMixin, generic.TemplateView):
"""View a diff of two revisions at a node."""
template_name = 'svnkit/node_diff.html'
@method_decorator(has_access)
@method_decorator(autosync_repositories)
def dispatch(self, request, *args, **kwargs):
self.organization = kwargs.get('organization')
self.project = kwargs.get('project')
self.repository = self.get_repository()
return super(SvnNodeDiffView, self).dispatch(request, *args, **kwargs)
def get_ajax(self, request, *args, **kwargs):
self.template_name = 'svnkit/node_diff_data.html'
return super(SvnNodeDiffView, self).get_ajax(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
self.path = self.kwargs['path']
self.revision = self.kwargs['to_revision']
self.from_changeset = shortcuts.get_object_or_404(
Changeset, repository=self.repository, revision=self.kwargs['from_revision'])
self.to_changeset = shortcuts.get_object_or_404(
Changeset, repository=self.repository, revision=self.kwargs['to_revision'])
try:
self.from_node = self.repository.get_node(self.kwargs['path'], self.kwargs['from_revision'])
except exceptions.InvalidNode:
self.revision = self.kwargs['from_revision']
self.template_name = 'svnkit/node_invalid.html'
return super(SvnNodeDiffView, self).get(request, *args, **kwargs)
try:
self.to_node = self.repository.get_node(self.kwargs['path'], self.kwargs['to_revision'])
except exceptions.InvalidNode:
self.revision = self.kwargs['to_revision']
self.template_name = 'svnkit/node_invalid.html'
return super(SvnNodeDiffView, self).get(request, *args, **kwargs)
if not (self.from_node.is_file() and self.to_node.is_file()):
raise http.Http404('Invalid node type for diff.')
if self.from_node.content.is_binary() or self.to_node.content.is_binary():
raise http.Http404('Cannot diff binary nodes.')
try:
content_from = self.from_node.content.get_data().decode('utf-8')
except UnicodeDecodeError:
content_from = self.from_node.content.get_data().decode('gbk')
try:
content_to = self.to_node.content.get_data().decode('utf-8')
except UnicodeDecodeError:
content_to = self.to_node.content.get_data().decode('gbk')
self.diff_data, self.addition_num, self.deletion_num = diff.diff_lines(
content_from,
content_to, self.from_node.get_basename())
self.addition_stats, self.deletion_stats, self.nil_stats = self._calc_diffstas(self.addition_num, self.deletion_num)
self.ajax_diff_html = get_pygmentize_diff(content_from, content_to)
return super(SvnNodeDiffView, self).get(request, *args, **kwargs)
def _calc_diffstas(self, addition_num, deletion_num):
total = addition_num + deletion_num
if total<=5:
return (range(addition_num), range(deletion_num), range(5-total))
else:
addition = int((addition_num / total)*5)
deletion = int((deletion_num / total)*5)
return (range(addition), range(deletion), range(5-addition-deletion))
def get_repository(self):
repository_lookup = {'project': self.project}
repository = shortcuts.get_object_or_404(
Repository, **repository_lookup)
return repository
def get_context_data(self, **kwargs):
ctx = {}
ctx.update(kwargs)
ctx['revision'] = getattr(self, 'revision', None)
ctx['path'] = getattr(self, 'path', None)
ctx['from_changeset'] = self.from_changeset
ctx['to_changeset'] = self.to_changeset
ctx['from_node'] = getattr(self, 'from_node', None)
ctx['to_node'] = getattr(self, 'to_node', None)
ctx['diff_data'] = getattr(self, 'diff_data', None)
ctx['addition_num'] = getattr(self, 'addition_num', None)
ctx['deletion_num'] = getattr(self, 'deletion_num', None)
ctx['addition_stats'] = getattr(self, 'addition_stats', None)
ctx['deletion_stats'] = getattr(self, 'deletion_stats', None)
ctx['nil_stats'] = getattr(self, 'nil_stats', None)
ctx['ajax_diff_html'] = self.ajax_diff_html
return super(SvnNodeDiffView, self).get_context_data(**ctx)
class SvnJumpRevisionView(RedirectView):
pattern_name = 'svnkit:node-revision'
def post(self, request, *args, **kwargs):
current_request_path = request.POST.get('current_request_path')
p = request.POST.get('path', '/')
r = request.POST.get('revision', '').lower()
if r.startswith('r'):
r = r[1:]
if r.isdigit():
kwargs.update({
'revision': r,
'path': p
})
messages.add_message(
request, messages.SUCCESS,
_('Jump to revision "%s" successfully.') % (r,))
else:
messages.add_message(
request, messages.ERROR,
_('Your input revision "%s" is not right format, please input digit or r(+)number.') % (r,))
self.url = current_request_path
return super(SvnJumpRevisionView, self).post(request, *args, **kwargs) | {
"repo_name": "lyoniionly/django-cobra",
"path": "src/cobra/apps/svnkit/views.py",
"copies": "1",
"size": "13501",
"license": "apache-2.0",
"hash": 4986688258263391000,
"line_mean": 39.6686746988,
"line_max": 124,
"alpha_frac": 0.6408414192,
"autogenerated": false,
"ratio": 3.8508271534512266,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9986452044696507,
"avg_score": 0.0010433055909439765,
"num_lines": 332
} |
from __future__ import absolute_import, division
import pprint
import re
import copy
import itertools
import operator
import pymongo
import six
from bson.code import Code
from . import signals
from functools import reduce
__all__ = ['queryset_manager', 'Q', 'InvalidQueryError',
'DO_NOTHING', 'NULLIFY', 'CASCADE', 'DENY']
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
# Delete rules
DO_NOTHING = 0
NULLIFY = 1
CASCADE = 2
DENY = 3
class DoesNotExist(Exception):
pass
class MultipleObjectsReturned(Exception):
pass
class InvalidQueryError(Exception):
pass
class OperationError(Exception):
pass
RE_TYPE = type(re.compile(''))
class QNodeVisitor(object):
"""Base visitor class for visiting Q-object nodes in a query tree.
"""
def visit_combination(self, combination):
"""Called by QCombination objects.
"""
return combination
def visit_query(self, query):
"""Called by (New)Q objects.
"""
return query
class SimplificationVisitor(QNodeVisitor):
"""Simplifies query trees by combinging unnecessary 'and' connection nodes
into a single Q-object.
"""
def visit_combination(self, combination):
if combination.operation == combination.AND:
# The simplification only applies to 'simple' queries
if all(isinstance(node, Q) for node in combination.children):
queries = [node.query for node in combination.children]
return Q(**self._query_conjunction(queries))
return combination
def _query_conjunction(self, queries):
"""Merges query dicts - effectively &ing them together.
"""
query_ops = set()
combined_query = {}
for query in queries:
ops = set(query.keys())
# Make sure that the same operation isn't applied more than once
# to a single field
intersection = ops.intersection(query_ops)
if intersection:
msg = 'Duplicate query conditions: '
raise InvalidQueryError(msg + ', '.join(intersection))
query_ops.update(ops)
combined_query.update(copy.deepcopy(query))
return combined_query
class QueryTreeTransformerVisitor(QNodeVisitor):
"""Transforms the query tree in to a form that may be used with MongoDB.
"""
def visit_combination(self, combination):
if combination.operation == combination.AND:
# MongoDB doesn't allow us to have too many $or operations in our
# queries, so the aim is to move the ORs up the tree to one
# 'master' $or. Firstly, we must find all the necessary parts (part
# of an AND combination or just standard Q object), and store them
# separately from the OR parts.
or_groups = []
and_parts = []
for node in combination.children:
if isinstance(node, QCombination):
if node.operation == node.OR:
# Any of the children in an $or component may cause
# the query to succeed
or_groups.append(node.children)
elif node.operation == node.AND:
and_parts.append(node)
elif isinstance(node, Q):
and_parts.append(node)
# Now we combine the parts into a usable query. AND together all of
# the necessary parts. Then for each $or part, create a new query
# that ANDs the necessary part with the $or part.
clauses = []
for or_group in itertools.product(*or_groups):
q_object = reduce(lambda a, b: a & b, and_parts, Q())
q_object = reduce(lambda a, b: a & b, or_group, q_object)
clauses.append(q_object)
# Finally, $or the generated clauses in to one query. Each of the
# clauses is sufficient for the query to succeed.
return reduce(lambda a, b: a | b, clauses, Q())
if combination.operation == combination.OR:
children = []
# Crush any nested ORs in to this combination as MongoDB doesn't
# support nested $or operations
for node in combination.children:
if (isinstance(node, QCombination) and
node.operation == combination.OR):
children += node.children
else:
children.append(node)
combination.children = children
return combination
class QueryCompilerVisitor(QNodeVisitor):
"""Compiles the nodes in a query tree to a PyMongo-compatible query
dictionary.
"""
def __init__(self, document):
self.document = document
def visit_combination(self, combination):
if combination.operation == combination.OR:
return {'$or': combination.children}
elif combination.operation == combination.AND:
return self._mongo_query_conjunction(combination.children)
return combination
def visit_query(self, query):
return QuerySet._transform_query(self.document, **query.query)
def _mongo_query_conjunction(self, queries):
"""Merges Mongo query dicts - effectively &ing them together.
"""
combined_query = {}
for query in queries:
for field, ops in query.items():
if field not in combined_query:
combined_query[field] = ops
else:
# The field is already present in the query the only way
# we can merge is if both the existing value and the new
# value are operation dicts, reject anything else
if (not isinstance(combined_query[field], dict) or
not isinstance(ops, dict)):
message = 'Conflicting values for ' + field
raise InvalidQueryError(message)
current_ops = set(combined_query[field].keys())
new_ops = set(ops.keys())
# Make sure that the same operation isn't applied more than
# once to a single field
intersection = current_ops.intersection(new_ops)
if intersection:
msg = 'Duplicate query conditions: '
raise InvalidQueryError(msg + ', '.join(intersection))
# Right! We've got two non-overlapping dicts of operations!
combined_query[field].update(copy.deepcopy(ops))
return combined_query
class QNode(object):
"""Base class for nodes in query trees.
"""
AND = 0
OR = 1
def to_query(self, document):
query = self.accept(SimplificationVisitor())
query = query.accept(QueryTreeTransformerVisitor())
query = query.accept(QueryCompilerVisitor(document))
return query
def accept(self, visitor):
raise NotImplementedError
def _combine(self, other, operation):
"""Combine this node with another node into a QCombination object.
"""
if other.empty:
return self
if self.empty:
return other
return QCombination(operation, [self, other])
@property
def empty(self):
return False
def __or__(self, other):
return self._combine(other, self.OR)
def __and__(self, other):
return self._combine(other, self.AND)
class QCombination(QNode):
"""Represents the combination of several conditions by a given logical
operator.
"""
def __init__(self, operation, children):
self.operation = operation
self.children = []
for node in children:
# If the child is a combination of the same type, we can merge its
# children directly into this combinations children
if isinstance(node, QCombination) and node.operation == operation:
self.children += node.children
else:
self.children.append(node)
def accept(self, visitor):
for i in range(len(self.children)):
if isinstance(self.children[i], QNode):
self.children[i] = self.children[i].accept(visitor)
return visitor.visit_combination(self)
@property
def empty(self):
return not bool(self.children)
class Q(QNode):
"""A simple query object, used in a query tree to build up more complex
query structures.
"""
def __init__(self, **query):
self.query = query
def accept(self, visitor):
return visitor.visit_query(self)
@property
def empty(self):
return not bool(self.query)
class QueryFieldList(object):
"""Object that handles combinations of .only() and .exclude() calls"""
ONLY = 1
EXCLUDE = 0
def __init__(self, fields=[], value=ONLY, always_include=[]):
self.value = value
self.fields = set(fields)
self.always_include = set(always_include)
self._id = None
def as_dict(self):
field_list = dict((field, self.value) for field in self.fields)
if self._id is not None:
field_list['_id'] = self._id
return field_list
def __add__(self, f):
if not self.fields:
self.fields = f.fields
self.value = f.value
elif self.value is self.ONLY and f.value is self.ONLY:
self.fields = self.fields.intersection(f.fields)
elif self.value is self.EXCLUDE and f.value is self.EXCLUDE:
self.fields = self.fields.union(f.fields)
elif self.value is self.ONLY and f.value is self.EXCLUDE:
self.fields -= f.fields
elif self.value is self.EXCLUDE and f.value is self.ONLY:
self.value = self.ONLY
self.fields = f.fields - self.fields
if '_id' in f.fields:
self._id = f.value
if self.always_include:
if self.value is self.ONLY and self.fields:
self.fields = self.fields.union(self.always_include)
else:
self.fields -= self.always_include
return self
def reset(self):
self.fields = set([])
self.value = self.ONLY
def __bool__(self):
return bool(self.fields)
__nonzero__ = __bool__
class QuerySet(six.Iterator):
"""A set of results returned from a query. Wraps a MongoDB cursor,
providing :class:`~mongoengine.Document` objects as the results.
"""
__already_indexed = set()
def __init__(self, document, collection):
self._document = document
self._collection_obj = collection
self._mongo_query = None
self._query_obj = Q()
self._initial_query = {}
self._where_clause = None
self._loaded_fields = QueryFieldList()
self._ordering = []
self._class_check = True
self._read_preference = None
self._scalar = []
# If inheritance is allowed, only return instances and instances of
# subclasses of the class being used
if document._meta.get('allow_inheritance'):
cls_list = document._get_subdocuments() + [document]
cls_list = [c._class_name for c in cls_list]
self._initial_query = {'_cls': {'$in': cls_list}}
self._loaded_fields = QueryFieldList(always_include=['_cls'])
self._cursor_obj = None
self._limit = None
self._skip = None
self._hint = -1 # Using -1 as None is a valid value for hint
self._batch_size = None
self._no_cursor_timeout = None
def clone(self):
"""Creates a copy of the current :class:`~mongoengine.queryset.QuerySet`
.. versionadded:: 0.5
"""
c = self.__class__(self._document, self._collection_obj)
copy_props = ('_initial_query', '_query_obj', '_where_clause',
'_loaded_fields', '_ordering',
'_limit', '_skip', '_hint', '_batch_size',
'_read_preference',)
for prop in copy_props:
val = getattr(self, prop)
setattr(c, prop, copy.copy(val))
return c
@property
def _query(self):
if self._mongo_query is None:
self._mongo_query = self._query_obj.to_query(self._document)
if self._class_check:
self._mongo_query.update(self._initial_query)
return self._mongo_query
def ensure_index(self, key_or_list, drop_dups=False, background=False,
**kwargs):
"""Ensure that the given indexes are in place.
:param key_or_list: a single index key or a list of index keys (to
construct a multi-field index); keys may be prefixed with a **+**
or a **-** to determine the index ordering
"""
index_spec = QuerySet._build_index_spec(self._document, key_or_list)
self._collection.ensure_index(
index_spec['fields'],
drop_dups=drop_dups,
background=background,
sparse=index_spec.get('sparse', False),
unique=index_spec.get('unique', False))
return self
@classmethod
def _build_index_spec(cls, doc_cls, spec):
"""Build a PyMongo index spec from a MongoEngine index spec.
"""
if isinstance(spec, six.string_types):
spec = {'fields': [spec]}
if isinstance(spec, (list, tuple)):
spec = {'fields': spec}
index_list = []
for key in spec['fields']:
# Get ASCENDING direction from +, DESCENDING from -, GEO2D from *
direction = pymongo.ASCENDING
if key.startswith("-"):
direction = pymongo.DESCENDING
elif key.startswith("*"):
direction = pymongo.GEO2D
if key.startswith(("+", "-", "*")):
key = key[1:]
# Use real field name, do it manually because we need field
# objects for the next part (list field checking)
parts = key.split('.')
fields = QuerySet._lookup_field(doc_cls, parts)
parts = [field.db_field for field in fields]
key = '.'.join(parts)
index_list.append((key, direction))
spec['fields'] = index_list
if spec.get('sparse', False) and len(spec['fields']) > 1:
raise ValueError(
'Sparse indexes can only have one field in them. '
'See https://jira.mongodb.org/browse/SERVER-2193')
return spec
@classmethod
def _reset_already_indexed(cls, document=None):
"""Helper to reset already indexed, can be useful for testing
purposes"""
if document:
cls.__already_indexed.discard(document)
cls.__already_indexed.clear()
def __call__(self, q_obj=None, class_check=True, **query):
"""Filter the selected documents by calling the
:class:`~mongoengine.queryset.QuerySet` with a query.
:param q_obj: a :class:`~mongoengine.queryset.Q` object to be used in
the query; the :class:`~mongoengine.queryset.QuerySet` is filtered
multiple times with different :class:`~mongoengine.queryset.Q`
objects, only the last one will be used
:param class_check: If set to False bypass class name check when
querying collection
:param query: Django-style query keyword arguments
"""
query = Q(**query)
if q_obj:
query &= q_obj
self._query_obj &= query
self._mongo_query = None
self._cursor_obj = None
self._class_check = class_check
return self
def filter(self, *q_objs, **query):
"""An alias of :meth:`~mongoengine.queryset.QuerySet.__call__`
"""
return self.__call__(*q_objs, **query)
def all(self):
"""Returns all documents."""
return self.__call__()
@property
def _collection(self):
"""Property that returns the collection object. This allows us to
perform operations only if the collection is accessed.
"""
if self._document not in QuerySet.__already_indexed:
# Ensure collection exists
QuerySet.__already_indexed.add(self._document)
return self._collection_obj
@property
def _cursor_args(self):
cursor_args = {
}
if self._loaded_fields:
cursor_args['projection'] = self._loaded_fields.as_dict()
if self._no_cursor_timeout is not None:
cursor_args['no_cursor_timeout'] = self._no_cursor_timeout
return cursor_args
@property
def _cursor(self):
if self._cursor_obj is None:
collection = self._collection
if self._read_preference:
collection = collection.with_options(
read_preference=self._read_preference)
self._cursor_obj = collection.find(
self._query, **self._cursor_args)
# Apply where clauses to cursor
if self._where_clause:
self._cursor_obj.where(self._where_clause)
# apply default ordering
if self._ordering:
self._cursor_obj.sort(self._ordering)
elif self._document._meta['ordering']:
self.order_by(*self._document._meta['ordering'])
self._cursor_obj.sort(self._ordering)
if self._limit is not None:
self._cursor_obj.limit(self._limit)
if self._skip is not None:
self._cursor_obj.skip(self._skip)
if self._hint != -1:
self._cursor_obj.hint(self._hint)
if self._batch_size is not None:
self._cursor_obj.batch_size(self._batch_size)
return self._cursor_obj
@classmethod
def _lookup_field(cls, document, parts):
"""Lookup a field based on its attribute and return a list containing
the field's parents and the field.
"""
if not isinstance(parts, (list, tuple)):
parts = [parts]
fields = []
field = None
for field_name in parts:
# Handle ListField indexing:
if field_name.isdigit():
try:
new_field = field.field
except AttributeError as err:
raise InvalidQueryError(
"Can't use index on unsubscriptable field (%s)" % err)
fields.append(field_name)
continue
if field is None:
# Look up first field from the document
if field_name == 'pk':
# Deal with "primary key" alias
field_name = document._meta['id_field']
if field_name in document._fields:
field = document._fields[field_name]
elif document._dynamic:
from .base import BaseDynamicField
field = BaseDynamicField(db_field=field_name)
else:
raise InvalidQueryError('Cannot resolve field "%s"'
% field_name)
else:
from .fields import ReferenceField, GenericReferenceField # noqa
if isinstance(field, (ReferenceField, GenericReferenceField)):
raise InvalidQueryError('Cannot perform join in mongoDB: %s'
% '__'.join(parts))
# Look up subfield on the previous field
new_field = field.lookup_member(field_name)
from .base import ComplexBaseField
if not new_field and isinstance(field, ComplexBaseField):
fields.append(field_name)
continue
elif not new_field:
raise InvalidQueryError('Cannot resolve field "%s"'
% field_name)
field = new_field # update field to the new field type
fields.append(field)
return fields
@classmethod
def _translate_field_name(cls, doc_cls, field, sep='.'):
"""Translate a field attribute name to a database field name.
"""
parts = field.split(sep)
parts = [f.db_field for f in QuerySet._lookup_field(doc_cls, parts)]
return '.'.join(parts)
@classmethod
def _transform_query(cls, _doc_cls=None, _field_operation=False, **query):
"""Transform a query from Django-style format to Mongo format.
"""
operators = ['ne', 'gt', 'gte', 'lt', 'lte', 'in', 'nin', 'mod',
'all', 'size', 'exists', 'not']
geo_operators = ['within_distance', 'within_spherical_distance',
'within_box', 'within_polygon', 'near', 'near_sphere']
match_operators = ['contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith',
'exact', 'iexact']
custom_operators = ['match']
mongo_query = {}
for key, value in query.items():
if key == "__raw__":
mongo_query.update(value)
continue
parts = key.split('__')
indices = [(i, p) for i, p in enumerate(parts) if p.isdigit()]
parts = [part for part in parts if not part.isdigit()]
# Check for an operator and transform to mongo-style if there is
op = None
if parts[-1] in (operators + match_operators +
geo_operators + custom_operators):
op = parts.pop()
negate = False
if parts[-1] == 'not':
parts.pop()
negate = True
if _doc_cls:
# Switch field names to proper names [set in Field(name='foo')]
fields = QuerySet._lookup_field(_doc_cls, parts)
parts = []
cleaned_fields = []
for field in fields:
append_field = True
if isinstance(field, str):
parts.append(field)
append_field = False
else:
parts.append(field.db_field)
if append_field:
cleaned_fields.append(field)
# Convert value to proper value
field = cleaned_fields[-1]
singular_ops = [None, 'ne', 'gt', 'gte', 'lt', 'lte', 'not']
singular_ops += match_operators
if op in singular_ops:
if not isinstance(field, six.string_types):
value = field.prepare_query_value(op, value)
elif op in match_operators and isinstance(
value, six.string_types):
from . import StringField
value = StringField.prepare_query_value(op, value)
else:
value = field
elif op in ('in', 'nin', 'all', 'near'):
# 'in', 'nin' and 'all' require a list of values
value = [field.prepare_query_value(op, v) for v in value]
# if op and op not in match_operators:
if op:
if op in geo_operators:
if op == "within_distance":
value = {'$within': {'$center': value}}
elif op == "within_spherical_distance":
value = {'$within': {'$centerSphere': value}}
elif op == "within_polygon":
value = {'$within': {'$polygon': value}}
elif op == "near":
value = {'$near': value}
elif op == "near_sphere":
value = {'$nearSphere': value}
elif op == 'within_box':
value = {'$within': {'$box': value}}
else:
raise NotImplementedError("Geo method '%s' has not "
"been implemented" % op)
elif op in custom_operators:
if op == 'match':
value = {"$elemMatch": value}
else:
NotImplementedError("Custom method '%s' has not "
"been implemented" % op)
elif op not in match_operators:
value = {'$' + op: value}
if negate:
value = {'$not': value}
for i, part in indices:
parts.insert(i, part)
key = '.'.join(parts)
if op is None or key not in mongo_query:
mongo_query[key] = value
elif key in mongo_query and isinstance(mongo_query[key], dict):
mongo_query[key].update(value)
return mongo_query
def get(self, *q_objs, **query):
"""Retrieve the the matching object raising
:class:`~mongoengine.queryset.MultipleObjectsReturned` or
`DocumentName.MultipleObjectsReturned` exception if multiple results and
:class:`~mongoengine.queryset.DoesNotExist` or
`DocumentName.DoesNotExist` if no results are found.
.. versionadded:: 0.3
"""
self.limit(2)
self.__call__(*q_objs, **query)
try:
result1 = next(self)
except StopIteration:
raise self._document.DoesNotExist(
"%s matching query does not exist."
% self._document._class_name)
try:
next(self)
except StopIteration:
return result1
self.rewind()
message = u'%d items returned, instead of 1' % self.count()
raise self._document.MultipleObjectsReturned(message)
def get_or_create(self, write_options=None, auto_save=True,
*q_objs, **query):
"""Retrieve unique object or create, if it doesn't exist. Returns a
tuple of ``(object, created)``, where ``object`` is the retrieved or
created object and ``created`` is a boolean specifying whether a new
object was created.
Raises :class:`~mongoengine.queryset.MultipleObjectsReturned` or
`DocumentName.MultipleObjectsReturned` if multiple results are found.
A new document will be created if the document doesn't exists; a
dictionary of default values for the new document may be provided as a
keyword argument called :attr:`defaults`.
:param write_options: optional extra keyword arguments used if we
have to create a new document.
Passes any write_options onto :meth:`~mongoengine.Document.save`
.. versionadded:: 0.3
:param auto_save: if the object to be saved automatically if not found.
.. versionadded:: 0.6
"""
defaults = query.get('defaults', {})
if 'defaults' in query:
del query['defaults']
try:
doc = self.get(*q_objs, **query)
return doc, False
except self._document.DoesNotExist:
query.update(defaults)
doc = self._document(**query)
if auto_save:
doc.save(write_options=write_options)
return doc, True
def create(self, **kwargs):
"""Create new object. Returns the saved object instance.
.. versionadded:: 0.4
"""
doc = self._document(**kwargs)
doc.save()
return doc
def first(self):
"""Retrieve the first object matching the query.
"""
try:
result = self[0]
except IndexError:
result = None
return result
def insert(self, doc_or_docs, load_bulk=True):
"""bulk insert documents
:param docs_or_doc: a document or list of documents to be inserted
:param load_bulk (optional): If True returns list of document instances
By default returns document instances, set ``load_bulk`` to False to
return just ``ObjectIds``
.. versionadded:: 0.5
"""
from .document import Document
docs = doc_or_docs
return_one = False
if isinstance(docs, Document) or issubclass(docs.__class__, Document):
return_one = True
docs = [docs]
raw = []
for doc in docs:
if not isinstance(doc, self._document):
msg = "Some documents inserted aren't instances of %s" \
% str(self._document)
raise OperationError(msg)
if doc.pk:
msg = "Some documents have ObjectIds use doc.update() instead"
raise OperationError(msg)
raw.append(doc.to_mongo())
signals.pre_bulk_insert.send(self._document, documents=docs)
ids = self._collection.insert(raw)
if not load_bulk:
signals.post_bulk_insert.send(
self._document, documents=docs, loaded=False)
return return_one and ids[0] or ids
documents = self.in_bulk(ids)
results = []
for obj_id in ids:
results.append(documents.get(obj_id))
signals.post_bulk_insert.send(
self._document, documents=results, loaded=True)
return return_one and results[0] or results
def with_id(self, object_id):
"""Retrieve the object matching the id provided. Uses `object_id` only
and raises InvalidQueryError if a filter has been applied.
:param object_id: the value for the id of the document to look up
.. versionchanged:: 0.6 Raises InvalidQueryError if filter has been set
"""
if not self._query_obj.empty:
raise InvalidQueryError(
"Cannot use a filter whilst using `with_id`")
return self.filter(pk=object_id).first()
def in_bulk(self, object_ids):
"""Retrieve a set of documents by their ids.
:param object_ids: a list or tuple of ``ObjectId``\ s
:rtype: dict of ObjectIds as keys and collection-specific
Document subclasses as values.
.. versionadded:: 0.3
"""
doc_map = {}
docs = self._collection.find({'_id': {'$in': object_ids}},
**self._cursor_args)
if self._scalar:
for doc in docs:
doc_map[doc['_id']] = self._get_scalar(
self._document._from_son(doc))
else:
for doc in docs:
doc_map[doc['_id']] = self._document._from_son(doc)
return doc_map
def __next__(self):
"""Wrap the result in a :class:`~mongoengine.Document` object.
"""
try:
if self._limit == 0:
raise StopIteration
if self._scalar:
return self._get_scalar(self._document._from_son(
next(self._cursor)))
return self._document._from_son(next(self._cursor))
except StopIteration as e:
self.rewind()
raise e
def rewind(self):
"""Rewind the cursor to its unevaluated state.
.. versionadded:: 0.3
"""
self._cursor.rewind()
def count(self):
"""Count the selected elements in the query.
"""
if self._limit == 0:
return 0
return self._cursor.count(with_limit_and_skip=True)
def __len__(self):
return self.count()
def map_reduce(self, map_f, reduce_f, output, finalize_f=None, limit=None,
scope=None):
"""Perform a map/reduce query using the current query spec
and ordering. While ``map_reduce`` respects ``QuerySet`` chaining,
it must be the last call made, as it does not return a maleable
``QuerySet``.
See the :meth:`~mongoengine.tests.QuerySetTest.test_map_reduce`
and :meth:`~mongoengine.tests.QuerySetTest.test_map_advanced`
tests in ``tests.queryset.QuerySetTest`` for usage examples.
:param map_f: map function, as :class:`~bson.code.Code` or string
:param reduce_f: reduce function, as
:class:`~bson.code.Code` or string
:param output: output collection name, if set to 'inline' will try to
use
:class:`~pymongo.collection.Collection.inline_map_reduce`
:param finalize_f: finalize function, an optional function that
performs any post-reduction processing.
:param scope: values to insert into map/reduce global scope. Optional.
:param limit: number of objects from current query to provide
to map/reduce method
Returns an iterator yielding
:class:`~mongoengine.document.MapReduceDocument`.
.. note::
Map/Reduce changed in server version **>= 1.7.4**. The PyMongo
:meth:`~pymongo.collection.Collection.map_reduce` helper requires
PyMongo version **>= 1.11**.
.. versionchanged:: 0.5
- removed ``keep_temp`` keyword argument, which was only relevant
for MongoDB server versions older than 1.7.4
.. versionadded:: 0.3
"""
from .document import MapReduceDocument
if not hasattr(self._collection, "map_reduce"):
raise NotImplementedError("Requires MongoDB >= 1.7.1")
map_f_scope = {}
if isinstance(map_f, Code):
map_f_scope = map_f.scope
map_f = six.text_type(map_f)
map_f = Code(self._sub_js_fields(map_f), map_f_scope)
reduce_f_scope = {}
if isinstance(reduce_f, Code):
reduce_f_scope = reduce_f.scope
reduce_f = six.text_type(reduce_f)
reduce_f_code = self._sub_js_fields(reduce_f)
reduce_f = Code(reduce_f_code, reduce_f_scope)
mr_args = {'query': self._query}
if finalize_f:
finalize_f_scope = {}
if isinstance(finalize_f, Code):
finalize_f_scope = finalize_f.scope
finalize_f = six.text_type(finalize_f)
finalize_f_code = self._sub_js_fields(finalize_f)
finalize_f = Code(finalize_f_code, finalize_f_scope)
mr_args['finalize'] = finalize_f
if scope:
mr_args['scope'] = scope
if limit:
mr_args['limit'] = limit
if output == 'inline' and not self._ordering:
map_reduce_function = 'inline_map_reduce'
else:
map_reduce_function = 'map_reduce'
mr_args['out'] = output
results = getattr(self._collection,
map_reduce_function)(map_f, reduce_f, **mr_args)
if map_reduce_function == 'map_reduce':
results = results.find()
if self._ordering:
results = results.sort(self._ordering)
for doc in results:
yield MapReduceDocument(self._document, self._collection,
doc['_id'], doc['value'])
def limit(self, n):
"""Limit the number of returned documents to `n`. This may also be
achieved using array-slicing syntax (e.g. ``User.objects[:5]``).
:param n: the maximum number of objects to return
"""
if self._cursor_obj is not None:
if n == 0:
self._cursor_obj.limit(1)
else:
self._cursor_obj.limit(n)
self._limit = n
# Return self to allow chaining
return self
def skip(self, n):
"""Skip `n` documents before returning the results. This may also be
achieved using array-slicing syntax (e.g. ``User.objects[5:]``).
:param n: the number of objects to skip before returning results
"""
if self._cursor_obj is not None and n:
self._cursor_obj.skip(n)
self._skip = n
return self
def hint(self, index=None):
"""Added 'hint' support, telling Mongo the proper index to use for the
query.
Judicious use of hints can greatly improve query performance. When doing
a query on multiple fields (at least one of which is indexed) pass the
indexed field as a hint to the query.
Hinting will not do anything if the corresponding index does not exist.
The last hint applied to this cursor takes precedence over all others.
.. versionadded:: 0.5
"""
if self._cursor_obj is not None:
self._cursor_obj.hint(index)
self._hint = index
return self
def batch_size(self, size):
"""Limit the number of documents returned in a single batch (each batch
requires a round trip to the server).
See http://api.mongodb.com/python/current/api/pymongo/cursor.html#pymongo.cursor.Cursor.batch_size # noqa
for details.
:param size: desired size of each batch.
"""
if self._cursor_obj is not None:
self._cursor_obj.batch_size(size)
self._batch_size = size
return self
def timeout(self, yes_timeout):
self._no_cursor_timeout = not yes_timeout
return self
def __getitem__(self, key):
"""Support skip and limit using getitem and slicing syntax.
"""
# Slice provided
if isinstance(key, slice):
try:
self._cursor_obj = self._cursor[key]
self._skip, self._limit = key.start, key.stop
if key.start and key.stop:
self._limit = key.stop - key.start
except IndexError as err:
# PyMongo raises an error if key.start == key.stop, catch it,
# bin it, kill it.
start = key.start or 0
if start >= 0 and key.stop >= 0 and key.step is None:
if start == key.stop:
self.limit(0)
self._skip, self._limit = key.start, key.stop - start
return self
raise err
# Allow further QuerySet modifications to be performed
return self
# Integer index provided
elif isinstance(key, int):
if self._scalar:
return self._get_scalar(self._document._from_son(
self._cursor[key]))
return self._document._from_son(self._cursor[key])
raise AttributeError
def distinct(self, field):
"""Return a list of distinct values for a given field.
:param field: the field to select distinct values from
.. versionadded:: 0.4
.. versionchanged:: 0.5 - Fixed handling references
"""
from .dereference import DeReference
return DeReference()(self._cursor.distinct(field), 1)
def only(self, *fields):
"""Load only a subset of this document's fields. ::
post = BlogPost.objects(...).only("title", "author.name")
:param fields: fields to include
.. versionadded:: 0.3
.. versionchanged:: 0.5 - Added subfield support
"""
fields = dict([(f, QueryFieldList.ONLY) for f in fields])
return self.fields(**fields)
def exclude(self, *fields):
"""Opposite to .only(), exclude some document's fields. ::
post = BlogPost.objects(...).exclude("comments")
:param fields: fields to exclude
.. versionadded:: 0.5
"""
fields = dict([(f, QueryFieldList.EXCLUDE) for f in fields])
return self.fields(**fields)
def fields(self, **kwargs):
"""Manipulate how you load this document's fields. Used by `.only()`
and `.exclude()` to manipulate which fields to retrieve. Fields also
allows for a greater level of control for example:
Retrieving a Subrange of Array Elements:
You can use the $slice operator to retrieve a subrange of elements in
an array ::
post = BlogPost.objects(...).fields(slice__comments=5) // first 5
:param kwargs: A dictionary identifying what to include
.. versionadded:: 0.5
"""
# Check for an operator and transform to mongo-style if there is
operators = ["slice"]
cleaned_fields = []
for key, value in kwargs.items():
parts = key.split('__')
op = None
if parts[0] in operators:
op = parts.pop(0)
value = {'$' + op: value}
key = '.'.join(parts)
cleaned_fields.append((key, value))
fields = sorted(cleaned_fields, key=operator.itemgetter(1))
for value, group in itertools.groupby(fields, lambda x: x[1]):
fields = [field for field, value in group]
fields = self._fields_to_dbfields(fields)
self._loaded_fields += QueryFieldList(fields, value=value)
return self
def all_fields(self):
"""Include all fields. Reset all previously calls of .only() and .exclude(). ::
post = BlogPost.objects(...).exclude("comments") \
.only("title").all_fields()
.. versionadded:: 0.5
"""
self._loaded_fields = QueryFieldList(
always_include=self._loaded_fields.always_include)
return self
def _fields_to_dbfields(self, fields):
"""Translate fields paths to its db equivalents"""
ret = []
for field in fields:
field = ".".join(
f.db_field for f in
QuerySet._lookup_field(self._document, field.split('.')))
ret.append(field)
return ret
def order_by(self, *keys):
"""Order the :class:`~mongoengine.queryset.QuerySet` by the keys. The
order may be specified by prepending each of the keys by a + or a -.
Ascending order is assumed.
:param keys: fields to order the query results by; keys may be
prefixed with **+** or **-** to determine the ordering direction
"""
key_list = []
for key in keys:
if not key:
continue
direction = pymongo.ASCENDING
if key[0] == '-':
direction = pymongo.DESCENDING
if key[0] in ('-', '+'):
key = key[1:]
key = key.replace('__', '.')
try:
key = QuerySet._translate_field_name(self._document, key)
except:
pass
key_list.append((key, direction))
self._ordering = key_list
return self
def explain(self, format=False):
"""Return an explain plan record for the
:class:`~mongoengine.queryset.QuerySet`\ 's cursor.
:param format: format the plan before returning it
"""
plan = self._cursor.explain()
if format:
plan = pprint.pformat(plan)
return plan
def read_preference(self, read_preference):
"""Specify the read preference when querying.
:param read_preference: the ReadPreference to use
"""
self._read_preference = read_preference
return self
def delete(self, w=1, _from_doc_delete=False):
"""Delete the documents matched by the query.
"""
queryset = self.clone()
# This is taken from actual MongoEngine, url
# https://github.com/MongoEngine/mongoengine/pull/105
has_delete_signal = (
signals.pre_delete.has_receivers_for(self._document) or
signals.post_delete.has_receivers_for(self._document))
if has_delete_signal and not _from_doc_delete:
for d in queryset:
d.delete()
return
# Check for DENY rules before actually deleting/nullifying any other
# references
for rule_entry in queryset._document._meta['delete_rules']:
document_cls, field_name = rule_entry
rule = queryset._document._meta['delete_rules'][rule_entry]
if rule == DENY \
and document_cls.objects(**{field_name + '__in': self}).count() > 0: # noqa
msg = \
u'Could not delete document (at least %s.%s refers to it)' \
% (document_cls.__name__, field_name)
raise OperationError(msg)
for rule_entry in queryset._document._meta['delete_rules']:
document_cls, field_name = rule_entry
rule = queryset._document._meta['delete_rules'][rule_entry]
if rule == CASCADE:
document_cls.objects(**{field_name + '__in': self}).delete(w=w)
elif rule == NULLIFY:
document_cls.objects(**{field_name + '__in': self}).update(
w=w,
**{'unset__%s' % field_name: 1})
self._collection.remove(self._query, w=w)
@classmethod
def _transform_update(cls, _doc_cls=None, **update):
"""Transform an update spec from Django-style format to Mongo format.
"""
operators = ['set', 'unset', 'inc', 'dec', 'pop', 'push', 'push_all',
'pull', 'pull_all', 'add_to_set']
mongo_update = {}
for key, value in update.items():
if key == "__raw__":
mongo_update.update(value)
continue
parts = key.split('__')
# Check for an operator and transform to mongo-style if there is
op = None
if parts[0] in operators:
op = parts.pop(0)
# Convert Pythonic names to Mongo equivalents
if op in ('push_all', 'pull_all'):
op = op.replace('_all', 'All')
elif op == 'dec':
# Support decrement by flipping a positive value's sign
# and using 'inc'
op = 'inc'
if value > 0:
value = -value
elif op == 'add_to_set':
op = op.replace('_to_set', 'ToSet')
if _doc_cls:
# Switch field names to proper names [set in Field(name='foo')]
fields = QuerySet._lookup_field(_doc_cls, parts)
parts = []
cleaned_fields = []
for field in fields:
append_field = True
if isinstance(field, str):
# Convert the S operator to $
if field == 'S':
field = '$'
parts.append(field)
append_field = False
else:
parts.append(field.db_field)
if append_field:
cleaned_fields.append(field)
# Convert value to proper value
field = cleaned_fields[-1]
if op in (None, 'set', 'push', 'pull', 'addToSet'):
if field.required or value is not None:
value = field.prepare_query_value(op, value)
elif op in ('pushAll', 'pullAll'):
value = [field.prepare_query_value(op, v) for v in value]
key = '.'.join(parts)
if not op:
raise InvalidQueryError(
"Updates must supply an operation eg: set__FIELD=value")
if op == 'pushAll':
op = 'push' # convert to non-deprecated keyword.
if not isinstance(value, (set, tuple, list)):
value = [value]
value = {key: {'$each': value}}
else:
value = {key: value}
key = '$' + op
if key not in mongo_update:
mongo_update[key] = value
elif key in mongo_update and isinstance(mongo_update[key], dict):
mongo_update[key].update(value)
return mongo_update
def update(self, w=1, upsert=False, multi=True, write_options=None,
**update):
"""Perform an atomic update on the fields matched by the query.
:param upsert: Any existing document with that "_id" is overwritten.
:param write_options: extra keyword arguments for
:meth:`~pymongo.collection.Collection.update`
.. versionadded:: 0.2
"""
if not update:
raise OperationError("No update parameters, would remove data")
if not write_options:
write_options = {}
update = QuerySet._transform_update(self._document, **update)
query = self._query
try:
ret = self._collection.update(query, update, multi=multi,
upsert=upsert, w=w,
**write_options)
if ret is not None and 'n' in ret:
return ret['n']
except pymongo.errors.OperationFailure as err:
if six.text_type(err) == u'multi not coded yet':
message = u'update() method requires MongoDB 1.1.3+'
raise OperationError(message)
raise OperationError(u'Update failed (%s)' % six.text_type(err))
def update_one(self, w=1, upsert=False, write_options=None, **update):
"""Perform an atomic update on first field matched by the query.
:param upsert: Any existing document with that "_id" is overwritten.
:param write_options: extra keyword arguments for
:meth:`~pymongo.collection.Collection.update`
:param update: Django-style update keyword arguments
.. versionadded:: 0.2
"""
if not update:
raise OperationError("No update parameters, would remove data")
if not write_options:
write_options = {}
update = QuerySet._transform_update(self._document, **update)
query = self._query
try:
# Explicitly provide 'multi=False' to newer versions of PyMongo
# as the default may change to 'True'
ret = self._collection.update(
query, update, multi=False, upsert=upsert, w=w, **write_options)
if ret is not None and 'n' in ret:
return ret['n']
except pymongo.errors.OperationFailure as e:
raise OperationError(u'Update failed [%s]' % six.text_type(e))
def __iter__(self):
self.rewind()
return self
def _get_scalar(self, doc):
def lookup(obj, name):
chunks = name.split('__')
for chunk in chunks:
obj = getattr(obj, chunk)
return obj
data = [lookup(doc, n) for n in self._scalar]
if len(data) == 1:
return data[0]
return tuple(data)
def scalar(self, *fields):
"""Instead of returning Document instances, return either a specific
value or a tuple of values in order.
This effects all results and can be unset by calling ``scalar``
without arguments. Calls ``only`` automatically.
:param fields: One or more fields to return instead of a Document.
"""
self._scalar = list(fields)
if fields:
self.only(*fields)
else:
self.all_fields()
return self
def values_list(self, *fields):
"""An alias for scalar"""
return self.scalar(*fields)
def _sub_js_fields(self, code):
"""When fields are specified with [~fieldname] syntax, where
*fieldname* is the Python name of a field, *fieldname* will be
substituted for the MongoDB name of the field (specified using the
:attr:`name` keyword argument in a field's constructor).
"""
def field_sub(match):
# Extract just the field name, and look up the field objects
field_name = match.group(1).split('.')
fields = QuerySet._lookup_field(self._document, field_name)
# Substitute the correct name for the field into the javascript
return u'["%s"]' % fields[-1].db_field
def field_path_sub(match):
# Extract just the field name, and look up the field objects
field_name = match.group(1).split('.')
fields = QuerySet._lookup_field(self._document, field_name)
# Substitute the correct name for the field into the javascript
return ".".join([f.db_field for f in fields])
code = re.sub(r'\[\s*~([A-z_][A-z_0-9.]+?)\s*\]', field_sub, code)
code = re.sub(r'\{\{\s*~([A-z_][A-z_0-9.]+?)\s*\}\}',
field_path_sub, code)
return code
def exec_js(self, code, *fields, **options):
"""Execute a Javascript function on the server. A list of fields may be
provided, which will be translated to their correct names and supplied
as the arguments to the function. A few extra variables are added to
the function's scope: ``collection``, which is the name of the
collection in use; ``query``, which is an object representing the
current query; and ``options``, which is an object containing any
options specified as keyword arguments.
As fields in MongoEngine may use different names in the database (set
using the :attr:`db_field` keyword argument to a :class:`Field`
constructor), a mechanism exists for replacing MongoEngine field names
with the database field names in Javascript code. When accessing a
field, use square-bracket notation, and prefix the MongoEngine field
name with a tilde (~).
:param code: a string of Javascript code to execute
:param fields: fields that you will be using in your function, which
will be passed in to your function as arguments
:param options: options that you want available to the function
(accessed in Javascript through the ``options`` object)
"""
code = self._sub_js_fields(code)
fields = [QuerySet._translate_field_name(self._document, f)
for f in fields]
collection = self._document._get_collection_name()
scope = {
'collection': collection,
'options': options or {},
}
query = self._query
if self._where_clause:
query['$where'] = self._where_clause
scope['query'] = query
code = Code(code, scope=scope)
db = self._document._get_db()
return db.eval(code, *fields)
def where(self, where_clause):
"""Filter ``QuerySet`` results with a ``$where`` clause (a Javascript
expression). Performs automatic field name substitution like
:meth:`mongoengine.queryset.Queryset.exec_js`.
.. note:: When using this mode of query, the database will call your
function, or evaluate your predicate clause, for each object
in the collection.
.. versionadded:: 0.5
"""
where_clause = self._sub_js_fields(where_clause)
self._where_clause = where_clause
return self
def sum(self, field):
"""Sum over the values of the specified field.
:param field: the field to sum over; use dot-notation to refer to
embedded document fields
.. versionchanged:: 0.5 - updated to map_reduce as db.eval doesnt work
with sharding.
"""
map_func = Code("""
function() {
emit(1, this[field] || 0);
}
""", scope={'field': field})
reduce_func = Code("""
function(key, values) {
var sum = 0;
for (var i in values) {
sum += values[i];
}
return sum;
}
""")
for result in self.map_reduce(map_func, reduce_func, output='inline'):
return result.value
else:
return 0
def average(self, field):
"""Average over the values of the specified field.
:param field: the field to average over; use dot-notation to refer to
embedded document fields
.. versionchanged:: 0.5 - updated to map_reduce as db.eval doesnt work
with sharding.
"""
map_func = Code("""
function() {
if (this.hasOwnProperty(field))
emit(1, {t: this[field] || 0, c: 1});
}
""", scope={'field': field})
reduce_func = Code("""
function(key, values) {
var out = {t: 0, c: 0};
for (var i in values) {
var value = values[i];
out.t += value.t;
out.c += value.c;
}
return out;
}
""")
finalize_func = Code("""
function(key, value) {
return value.t / value.c;
}
""")
for result in self.map_reduce(map_func,
reduce_func,
finalize_f=finalize_func,
output='inline'):
return result.value
else:
return 0
def item_frequencies(self, field, normalize=False, map_reduce=True):
"""Returns a dictionary of all items present in a field across
the whole queried set of documents, and their corresponding frequency.
This is useful for generating tag clouds, or searching documents.
.. note::
Can only do direct simple mappings and cannot map across
:class:`~mongoengine.ReferenceField` or
:class:`~mongoengine.GenericReferenceField` for more complex
counting a manual map reduce call would is required.
If the field is a :class:`~mongoengine.ListField`, the items within
each list will be counted individually.
:param field: the field to use
:param normalize: normalize the results so they add to 1.0
:param map_reduce: Use map_reduce over exec_js
.. versionchanged:: 0.5 defaults to map_reduce and can handle embedded
document lookups
"""
if map_reduce:
return self._item_frequencies_map_reduce(field, normalize=normalize)
return self._item_frequencies_exec_js(field, normalize=normalize)
def _item_frequencies_map_reduce(self, field, normalize=False):
map_func = """
function() {
path = '{{~%(field)s}}'.split('.');
field = this;
for (p in path) {
if (field)
field = field[path[p]];
else
break;
}
if (field && field.constructor == Array) {
field.forEach(function(item) {
emit(item, 1);
});
} else if (field) {
emit(field, 1);
} else {
emit(null, 1);
}
}
""" % dict(field=field)
reduce_func = """
function(key, values) {
var total = 0;
var valuesSize = values.length;
for (var i=0; i < valuesSize; i++) {
total += parseInt(values[i], 10);
}
return total;
}
"""
values = self.map_reduce(map_func, reduce_func, 'inline')
frequencies = {}
for f in values:
key = f.key
if isinstance(key, float):
if int(key) == key:
key = int(key)
key = str(key)
frequencies[key] = f.value
if normalize:
count = sum(frequencies.values())
frequencies = {k: v / count for k, v in frequencies.items()}
return frequencies
def _item_frequencies_exec_js(self, field, normalize=False):
"""Uses exec_js to execute"""
freq_func = """
function(path) {
path = path.split('.');
if (options.normalize) {
var total = 0.0;
db[collection].find(query).forEach(function(doc) {
field = doc;
for (p in path) {
if (field)
field = field[path[p]];
else
break;
}
if (field && field.constructor == Array) {
total += field.length;
} else {
total++;
}
});
}
var frequencies = {};
var inc = 1.0;
if (options.normalize) {
inc /= total;
}
db[collection].find(query).forEach(function(doc) {
field = doc;
for (p in path) {
if (field)
field = field[path[p]];
else
break;
}
if (field && field.constructor == Array) {
field.forEach(function(item) {
frequencies[item] = inc + (isNaN(frequencies[item]) ? 0: frequencies[item]);
});
} else {
var item = field;
frequencies[item] = inc + (isNaN(frequencies[item]) ? 0: frequencies[item]);
}
});
return frequencies;
}
""" # noqa
data = self.exec_js(freq_func, field, normalize=normalize)
if 'undefined' in data:
data[None] = data['undefined']
del(data['undefined'])
return data
def __repr__(self):
limit = REPR_OUTPUT_SIZE + 1
start = (0 if self._skip is None else self._skip)
if self._limit is None:
stop = start + limit
if self._limit is not None:
if self._limit > limit:
stop = start + limit
else:
stop = start + self._limit
try:
data = list(self[start:stop])
except pymongo.errors.InvalidOperation:
return ".. queryset mid-iteration .."
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return repr(data)
def select_related(self, max_depth=1):
"""Handles dereferencing of :class:`~bson.dbref.DBRef` objects to
a maximum depth in order to cut down the number queries to mongodb.
.. versionadded:: 0.5
"""
from .dereference import DeReference
# Make select related work the same for querysets
max_depth += 1
return DeReference()(self, max_depth=max_depth)
class QuerySetManager(object):
get_queryset = None
def __init__(self, queryset_func=None):
if queryset_func:
self.get_queryset = queryset_func
self._collections = {}
def __get__(self, instance, owner):
"""Descriptor for instantiating a new QuerySet object when
Document.objects is accessed.
"""
if instance is not None:
# Document class being used rather than a document object
return self
# owner is the document that contains the QuerySetManager
queryset_class = owner._meta['queryset_class'] or QuerySet
queryset = queryset_class(owner, owner._get_collection())
if self.get_queryset:
if six.get_function_code(self.get_queryset).co_argcount == 1:
queryset = self.get_queryset(queryset)
else:
queryset = self.get_queryset(owner, queryset)
return queryset
def queryset_manager(func):
"""Decorator that allows you to define custom QuerySet managers on
:class:`~mongoengine.Document` classes. The manager must be a function that
accepts a :class:`~mongoengine.Document` class as its first argument, and a
:class:`~mongoengine.queryset.QuerySet` as its second argument. The method
function should return a :class:`~mongoengine.queryset.QuerySet`, probably
the same one that was passed in, but modified in some way.
"""
if six.get_function_code(func).co_argcount == 1:
import warnings
msg = 'Methods decorated with queryset_manager should take 2 arguments'
warnings.warn(msg, DeprecationWarning)
return QuerySetManager(func)
| {
"repo_name": "conversocial/mongoengine",
"path": "mongoengine/queryset.py",
"copies": "1",
"size": "65638",
"license": "mit",
"hash": 3737054200342665000,
"line_mean": 35.3847006652,
"line_max": 114,
"alpha_frac": 0.5389103873,
"autogenerated": false,
"ratio": 4.571209694268403,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5610120081568404,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
import pytz
import six
from datetime import datetime, timedelta
from sentry.testutils.cases import OutcomesSnubaTest
from sentry.tsdb.base import TSDBModel
from sentry.tsdb.snuba import SnubaTSDB
from sentry.utils.dates import to_timestamp
from sentry.utils.outcomes import Outcome
def floor_to_hour_epoch(value):
value = value.replace(minute=0, second=0, microsecond=0)
return int(to_timestamp(value))
def floor_to_10s_epoch(value):
seconds = value.second
floored_second = 10 * (seconds // 10)
value = value.replace(second=floored_second, microsecond=0)
return int(to_timestamp(value))
class SnubaTSDBTest(OutcomesSnubaTest):
def setUp(self):
super(SnubaTSDBTest, self).setUp()
self.db = SnubaTSDB()
# Set up the times
self.now = datetime.now(pytz.utc)
self.start_time = self.now - timedelta(days=7)
self.one_day_later = self.start_time + timedelta(days=1)
self.day_before_start_time = self.start_time - timedelta(days=1)
def test_organization_outcomes(self):
other_organization = self.create_organization()
for outcome in [Outcome.ACCEPTED, Outcome.RATE_LIMITED, Outcome.FILTERED]:
self.store_outcomes(
self.organization.id, self.project.id, outcome.value, self.start_time, 1, 3
)
self.store_outcomes(
self.organization.id, self.project.id, outcome.value, self.one_day_later, 1, 4
)
# Also create some outcomes we shouldn't be querying
self.store_outcomes(
other_organization.id, self.project.id, outcome.value, self.one_day_later, 1, 5
)
self.store_outcomes(
self.organization.id,
self.project.id,
outcome.value,
self.day_before_start_time,
1,
6,
)
for tsdb_model, granularity, floor_func, start_time_count, day_later_count in [
(TSDBModel.organization_total_received, 3600, floor_to_hour_epoch, 3 * 3, 4 * 3),
(TSDBModel.organization_total_rejected, 3600, floor_to_hour_epoch, 3, 4),
(TSDBModel.organization_total_blacklisted, 3600, floor_to_hour_epoch, 3, 4),
(TSDBModel.organization_total_received, 10, floor_to_10s_epoch, 3 * 3, 4 * 3),
(TSDBModel.organization_total_rejected, 10, floor_to_10s_epoch, 3, 4),
(TSDBModel.organization_total_blacklisted, 10, floor_to_10s_epoch, 3, 4),
]:
# Query SnubaTSDB
response = self.db.get_range(
tsdb_model, [self.organization.id], self.start_time, self.now, granularity, None
)
# Assert that the response has values set for the times we expect, and nothing more
assert self.organization.id in response
response_dict = {k: v for (k, v) in response[self.organization.id]}
assert response_dict[floor_func(self.start_time)] == start_time_count
assert response_dict[floor_func(self.one_day_later)] == day_later_count
for time, count in response[self.organization.id]:
if time not in [floor_func(self.start_time), floor_func(self.one_day_later)]:
assert count == 0
def test_project_outcomes(self):
other_project = self.create_project(organization=self.organization)
for outcome in [Outcome.ACCEPTED, Outcome.RATE_LIMITED, Outcome.FILTERED]:
self.store_outcomes(
self.organization.id, self.project.id, outcome.value, self.start_time, 1, 3
)
self.store_outcomes(
self.organization.id, self.project.id, outcome.value, self.one_day_later, 1, 4
)
# Also create some outcomes we shouldn't be querying
self.store_outcomes(
self.organization.id, other_project.id, outcome.value, self.one_day_later, 1, 5
)
self.store_outcomes(
self.organization.id,
self.project.id,
outcome.value,
self.day_before_start_time,
1,
6,
)
for tsdb_model, granularity, floor_func, start_time_count, day_later_count in [
(TSDBModel.project_total_received, 3600, floor_to_hour_epoch, 3 * 3, 4 * 3),
(TSDBModel.project_total_rejected, 3600, floor_to_hour_epoch, 3, 4),
(TSDBModel.project_total_blacklisted, 3600, floor_to_hour_epoch, 3, 4),
(TSDBModel.project_total_received, 10, floor_to_10s_epoch, 3 * 3, 4 * 3),
(TSDBModel.project_total_rejected, 10, floor_to_10s_epoch, 3, 4),
(TSDBModel.project_total_blacklisted, 10, floor_to_10s_epoch, 3, 4),
]:
response = self.db.get_range(
tsdb_model, [self.project.id], self.start_time, self.now, granularity, None
)
# Assert that the response has values set for the times we expect, and nothing more
assert self.project.id in response
response_dict = {k: v for (k, v) in response[self.project.id]}
assert response_dict[floor_func(self.start_time)] == start_time_count
assert response_dict[floor_func(self.one_day_later)] == day_later_count
for time, count in response[self.project.id]:
if time not in [floor_func(self.start_time), floor_func(self.one_day_later)]:
assert count == 0
def test_key_outcomes(self):
project_key = self.create_project_key(project=self.project)
other_project = self.create_project(organization=self.organization)
other_project_key = self.create_project_key(project=other_project)
for outcome in [Outcome.ACCEPTED, Outcome.RATE_LIMITED, Outcome.FILTERED]:
self.store_outcomes(
self.organization.id,
self.project.id,
outcome.value,
self.start_time,
project_key.id,
3,
)
self.store_outcomes(
self.organization.id,
self.project.id,
outcome.value,
self.one_day_later,
project_key.id,
4,
)
# Also create some outcomes we shouldn't be querying
self.store_outcomes(
self.organization.id,
self.project.id,
outcome.value,
self.one_day_later,
other_project_key.id,
5,
)
self.store_outcomes(
self.organization.id,
self.project.id,
outcome.value,
self.day_before_start_time,
project_key.id,
6,
)
for tsdb_model, granularity, floor_func, start_time_count, day_later_count in [
(TSDBModel.key_total_received, 3600, floor_to_hour_epoch, 3 * 3, 4 * 3),
(TSDBModel.key_total_rejected, 3600, floor_to_hour_epoch, 3, 4),
(TSDBModel.key_total_blacklisted, 3600, floor_to_hour_epoch, 3, 4),
(TSDBModel.key_total_received, 10, floor_to_10s_epoch, 3 * 3, 4 * 3),
(TSDBModel.key_total_rejected, 10, floor_to_10s_epoch, 3, 4),
(TSDBModel.key_total_blacklisted, 10, floor_to_10s_epoch, 3, 4),
]:
response = self.db.get_range(
# with [project_key.id, six.text_type(project_key.id)], we are imitating the hack in
# project_key_stats.py cause that is what `get_range` will be called with.
tsdb_model,
[project_key.id, six.text_type(project_key.id)],
self.start_time,
self.now,
granularity,
None,
)
# Assert that the response has values set for the times we expect, and nothing more
assert project_key.id in response
response_dict = {k: v for (k, v) in response[project_key.id]}
assert response_dict[floor_func(self.start_time)] == start_time_count
assert response_dict[floor_func(self.one_day_later)] == day_later_count
for time, count in response[project_key.id]:
if time not in [floor_func(self.start_time), floor_func(self.one_day_later)]:
assert count == 0
def test_all_tsdb_models_have_an_entry_in_model_query_settings(self):
# Ensure that the models we expect to be using Snuba are using Snuba
exceptions = [
TSDBModel.project_total_forwarded # this is not outcomes and will be moved separately
]
# does not include the internal TSDB model
models = [
model for model in list(TSDBModel) if 0 < model.value < 700 and model not in exceptions
]
for model in models:
assert model in SnubaTSDB.model_query_settings
def test_outcomes_have_a_10s_setting(self):
exceptions = [
TSDBModel.project_total_forwarded # this is not outcomes and will be moved separately
]
def is_an_outcome(model):
if model in exceptions:
return False
# 100 - 200: project outcomes
# 200 - 300: organization outcomes
# 500 - 600: key outcomes
# 600 - 700: filtered project based outcomes
return (
(100 <= model.value < 200)
or (200 <= model.value < 300)
or (500 <= model.value < 600)
or (600 <= model.value < 700)
)
models = [x for x in list(TSDBModel) if is_an_outcome(x)]
for model in models:
assert model in SnubaTSDB.lower_rollup_query_settings
| {
"repo_name": "beeftornado/sentry",
"path": "tests/sentry/tsdb/test_snuba.py",
"copies": "1",
"size": "9972",
"license": "bsd-3-clause",
"hash": -5009904867030452000,
"line_mean": 40.7238493724,
"line_max": 100,
"alpha_frac": 0.5768150822,
"autogenerated": false,
"ratio": 3.8133843212237095,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48901994034237095,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
import re
from changes.config import db
from changes.db.utils import get_or_create
from changes.jobs.sync_job_step import sync_job_step
from changes.models import JobPhase
from .builder import JenkinsBuilder
BASE_XPATH = '/freeStyleProject/build[action/cause/upstreamProject="{upstream_job}" and action/cause/upstreamBuild="{build_no}"]/number'
DOWNSTREAM_XML_RE = re.compile(r'<number>(\d+)</number>')
class JenkinsFactoryBuilder(JenkinsBuilder):
def __init__(self, *args, **kwargs):
self.downstream_job_names = kwargs.pop('downstream_job_names', ())
super(JenkinsFactoryBuilder, self).__init__(*args, **kwargs)
def _get_downstream_jobs(self, step, downstream_job_name):
xpath = BASE_XPATH.format(
upstream_job=step.data['job_name'],
build_no=step.data['build_no']
)
response = self._get_raw_response('/job/{job_name}/api/xml/'.format(
job_name=downstream_job_name,
), params={
'depth': 1,
'xpath': xpath,
'wrapper': 'a',
})
if not response:
return []
return map(int, DOWNSTREAM_XML_RE.findall(response))
def sync_step(self, step):
if step.data.get('job_name') != self.job_name:
return super(JenkinsFactoryBuilder, self).sync_step(step)
job = step.job
# for any downstream jobs, pull their results using xpath magic
for downstream_job_name in self.downstream_job_names:
downstream_build_nos = self._get_downstream_jobs(step, downstream_job_name)
if not downstream_build_nos:
continue
phase, created = get_or_create(JobPhase, where={
'job': job,
'label': downstream_job_name,
}, defaults={
'project_id': job.project_id,
})
db.session.commit()
for build_no in downstream_build_nos:
# XXX(dcramer): ideally we would grab this with the first query
# but because we dont want to rely on an XML parser, we're doing
# a second http request for build details
downstream_step = self._create_job_step(
phase, downstream_job_name, build_no)
db.session.commit()
sync_job_step.delay_if_needed(
step_id=downstream_step.id.hex,
task_id=downstream_step.id.hex,
parent_task_id=job.id.hex,
)
return super(JenkinsFactoryBuilder, self).sync_step(step)
| {
"repo_name": "alex/changes",
"path": "changes/backends/jenkins/factory_builder.py",
"copies": "1",
"size": "2659",
"license": "apache-2.0",
"hash": 7501269454299409000,
"line_mean": 34.9324324324,
"line_max": 136,
"alpha_frac": 0.5885671305,
"autogenerated": false,
"ratio": 3.8817518248175182,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4970318955317518,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
import six
import itertools
from functools import reduce, partial
from operator import or_
from django.db.models import Q
from sentry.models import Release, Project, ProjectStatus, EventUser
from sentry.utils.dates import to_timestamp
from sentry.utils.geo import geo_by_addr as _geo_by_addr
HEALTH_ID_KEY = "_health_id"
def make_health_id(lookup, value):
# Convert a lookup and value into
# a string that can be used back in a request query.
return "%s:%s" % (lookup.name, lookup.encoder(value))
def serialize_releases(organization, item_list, user, lookup):
return {
(r.version,): {
HEALTH_ID_KEY: make_health_id(lookup, [r.version]),
"value": {"id": r.id, "version": r.version},
}
for r in Release.objects.filter(
organization=organization, version__in={i[0] for i in item_list}
)
}
def geo_by_addr(ip):
try:
geo = _geo_by_addr(ip)
except Exception:
geo = None
if not geo:
return
rv = {}
for k in "country_code", "city", "region":
d = geo.get(k)
if isinstance(d, six.binary_type):
d = d.decode("ISO-8859-1")
rv[k] = d
return rv
def serialize_eventusers(organization, item_list, user, lookup):
if not item_list:
return {}
# We have no reliable way to map the tag value format
# back into real EventUser rows. EventUser is only unique
# per-project, and this is an organization aggregate.
# This means a single value maps to multiple rows.
filters = reduce(
or_,
[Q(hash=EventUser.hash_from_tag(tag), project_id=project) for tag, project in item_list],
)
eu_by_key = {(eu.tag_value, eu.project_id): eu for eu in EventUser.objects.filter(filters)}
projects = serialize_projects(organization, {i[1] for i in item_list}, user)
rv = {}
for tag, project in item_list:
eu = eu_by_key.get((tag, project))
if eu is None:
attr, value = tag.split(":", 1)
eu = EventUser(project_id=project, **{EventUser.attr_from_keyword(attr): value})
rv[(tag, project)] = {
HEALTH_ID_KEY: make_health_id(lookup, [eu.tag_value, eu.project_id]),
"value": {
"id": six.text_type(eu.id) if eu.id else None,
"project": projects.get(eu.project_id),
"hash": eu.hash,
"tagValue": eu.tag_value,
"identifier": eu.ident,
"username": eu.username,
"email": eu.email,
"ipAddress": eu.ip_address,
"dateCreated": eu.date_added,
"label": eu.get_label(),
"name": eu.get_display_name(),
"geo": geo_by_addr(eu.ip_address),
},
}
return rv
def encoder_eventuser(value):
# EventUser needs to be encoded as a
# project_id, value tuple.
tag_value, project_id = value
return "%d:%s" % (project_id, tag_value)
def serialize_projects(organization, item_list, user):
return {
id: {"id": id, "slug": slug}
for id, slug in Project.objects.filter(
id__in=item_list, organization=organization, status=ProjectStatus.VISIBLE
).values_list("id", "slug")
}
def serialize_noop(organization, item_list, user, lookup):
return {i: {HEALTH_ID_KEY: make_health_id(lookup, [i[0]]), "value": i[0]} for i in item_list}
def encoder_noop(row):
if not row:
return None
return row[0]
def value_from_row(row, tagkey):
return tuple(row[k] for k in tagkey)
def zerofill(data, start, end, rollup):
rv = []
end = int(to_timestamp(end))
rollup_start = (int(to_timestamp(start)) // rollup) * rollup
rollup_end = (end // rollup) * rollup
# Fudge the end value when we're only getting a single window.
# This ensure that we get both values for a single large window that
# straddles two buckets. An example of this is a 1d window that starts
# mid day.
if rollup_end - rollup_start == rollup:
rollup_end += 1
i = 0
for key in six.moves.xrange(rollup_start, rollup_end, rollup):
try:
while data[i][0] < key:
rv.append(data[i])
i += 1
if data[i][0] == key:
rv.append(data[i])
i += 1
continue
except IndexError:
pass
rv.append((key, []))
# Add any remaining rows that are not aligned to the rollup and are lower than the
# end date.
if i < len(data):
rv.extend(row for row in data[i:] if row[0] < rollup_end)
return rv
class SnubaLookup(object):
"""
A SnubaLookup consists of all of the attributes needed to facilitate making
a query for a column in Snuba. This covers which columns are selected, the extra conditions
that need to be applied, how values are serialized in/out of Snuba, etc.
"""
__slots__ = (
"name",
"tagkey",
"columns",
"selected_columns",
"conditions",
"serializer",
"encoder",
"filter_key",
)
__registry = {}
def __init__(
self,
name,
tagkey=None,
extra=None,
selected_columns=None,
conditions=None,
serializer=serialize_noop,
encoder=encoder_noop,
filter_key=None,
):
cls = type(self)
assert name not in cls.__registry
self.name = name
self.tagkey = tagkey or name
self.columns = [self.tagkey] + list(extra or [])
self.serializer = partial(serializer, lookup=self)
self.encoder = encoder
self.conditions = conditions or [[self.tagkey, "IS NOT NULL", None]]
self.selected_columns = selected_columns or []
self.filter_key = filter_key or self.tagkey
cls.__registry[name] = self
@classmethod
def get(cls, name):
return cls.__registry[name]
SnubaLookup(
"user",
"tags[sentry:user]",
["project_id"],
serializer=serialize_eventusers,
encoder=encoder_eventuser,
# User is a complex query and can't be treated as a single value.
# And EventUser is a tuple of project_id and the tag value. So we need
# to make sure we always keep them together and query them as a single unit.
filter_key=("concat", (("toString", ("project_id",)), "':'", "tags[sentry:user]")),
)
SnubaLookup("release", "tags[sentry:release]", serializer=serialize_releases)
# error.type is special in that in ClickHouse, it's an array. But we need
# to make sure that we don't do any queries across a NULL value or an empty array
# so we must filter them out explicitly. We also are choosing to explicitly take the
# first element of the exception_stacks array as the "primary" error type for the event.
# This is slightly inaccurate due to the fact that a single error may have multiple
# errors.
SnubaLookup(
"error.type",
"error_type",
selected_columns=[
("ifNull", ("arrayElement", ("exception_stacks.type", 1), "''"), "error_type")
],
conditions=[[("notEmpty", ("exception_stacks.type",)), "=", 1], [("error_type", "!=", "")]],
)
# Similar to error.type, we need to also guard against NULL types, but for this case,
# the NULL type is actually significant for us, which means "unknown". So we want
# to also retain and capture this.
SnubaLookup(
"error.handled",
"error_handled",
selected_columns=[("arrayElement", ("exception_stacks.mechanism_handled", 1), "error_handled")],
conditions=[[("notEmpty", ("exception_stacks.mechanism_handled",)), "=", 1]],
)
# Simple tags don't need any special treatment
for _tag in ("transaction", "os", "os.name", "browser", "browser.name", "device", "device.family"):
SnubaLookup(_tag, "tags[%s]" % _tag)
class BaseSnubaSerializer(object):
def __init__(self, organization, lookup, user):
self.organization = organization
self.lookup = lookup
self.user = user
def get_attrs(self, item_list):
if self.lookup is None:
return item_list
return self.lookup.serializer(self.organization, item_list, self.user)
class SnubaResultSerializer(BaseSnubaSerializer):
"""
Serializer for the top values Snuba results.
"""
def serialize(self, result):
counts_by_value = {
value_from_row(r, self.lookup.columns): r["count"] for r in result.previous["data"]
}
projects = serialize_projects(
self.organization,
{p for r in result.current["data"] for p in r.get("top_projects", [])},
self.user,
)
attrs = self.get_attrs(
[value_from_row(r, self.lookup.columns) for r in result.current["data"]]
)
data = []
for r in result.current["data"]:
value = value_from_row(r, self.lookup.columns)
row = {
"count": r["count"],
"lastCount": counts_by_value.get(value, 0),
self.lookup.name: attrs.get(value),
}
if "top_projects" in r:
row["topProjects"] = [projects[p] for p in r["top_projects"]]
if "total_projects" in r:
row["totalProjects"] = r["total_projects"]
data.append(row)
return {
"data": data,
"totals": {
"count": result.current["totals"]["count"],
"lastCount": result.previous["totals"]["count"],
},
}
class SnubaTSResultSerializer(BaseSnubaSerializer):
"""
Serializer for time-series Snuba data.
"""
def serialize(self, result, column="count", order=None):
data = [
(key, list(group))
for key, group in itertools.groupby(result.data["data"], key=lambda r: r["time"])
]
if self.lookup:
attrs = self.get_attrs(
[value_from_row(r, self.lookup.columns) for _, v in data for r in v]
)
rv = []
for k, v in data:
row = []
for r in v:
item = {"count": r.get(column, 0)}
if self.lookup:
value = value_from_row(r, self.lookup.columns)
item[self.lookup.name] = (attrs.get(value),)
row.append(item)
rv.append((k, row))
res = {"data": zerofill(rv, result.start, result.end, result.rollup)}
if result.data.get("totals"):
res["totals"] = {"count": result.data["totals"][column]}
# If order is passed let that overwrite whats in data since its order for multi-axis
if order is not None:
res["order"] = order
elif "order" in result.data:
res["order"] = result.data["order"]
return res
| {
"repo_name": "beeftornado/sentry",
"path": "src/sentry/api/serializers/snuba.py",
"copies": "1",
"size": "10945",
"license": "bsd-3-clause",
"hash": 2720376784241565700,
"line_mean": 31.2861356932,
"line_max": 100,
"alpha_frac": 0.5806304249,
"autogenerated": false,
"ratio": 3.7227891156462585,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4803419540546258,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
import sys
from collections import OrderedDict
import numpy as np
import theano
from theano import tensor
from . import matrixtools, nptools, theanotools
from .debug import DEBUG
from .recurrent import Recurrent
from . import tasktools
configs_required = ['Nin', 'Nout']
configs_default = {
'alpha': 1,
'N': 50,
'p0': 1,
'rho': 1.5,
'f_out': 'softmax',
'L2_r': 0,
'Win': 1,
'Win_mask': None,
'Wout': 0,
'bout': 0,
'x0': 0.5,
'L1_Wrec': 0,
'L2_Wrec': 0,
'fix': [],
'ei': None
}
def random_sign(rng, size):
return 2*rng.randint(2, size=size) - 1
class GRU(Recurrent):
"""
Modified Gated Recurrent Units.
"""
def get_dim(self, name):
if name == 'Win':
return (self.Nin, 3*self.N)
if name == 'bin':
return 3*self.N
if name == 'Wrec_gates':
return (self.N, 2*self.N)
if name == 'Wrec':
return (self.N, self.N)
if name == 'Wout':
return (self.N, self.Nout)
if name == 'bout':
return self.Nout
if name == 'x0':
return self.N
raise ValueError(name)
def __init__(self, config, params=None, masks=None, seed=1, name=''):
super(GRU, self).__init__('gru', name)
#=================================================================================
# Config
#=================================================================================
self.config = {}
# Required
for k in configs_required:
if k not in config:
print("[ {} ] Error: {} is required.".format(self.name, k))
sys.exit()
self.config[k] = config[k]
# Defaults available
for k in configs_default:
if k in config:
self.config[k] = config[k]
else:
self.config[k] = configs_default[k]
#=================================================================================
# Activations
#=================================================================================
# Hidden
self.f_hidden = theanotools.relu
self.firing_rate = nptools.relu
# Output
if self.config['f_out'] == 'softmax':
self.f_out = theanotools.softmax
self.f_log_out = theanotools.log_softmax
elif self.config['f_out'] == 'linear':
self.f_out = (lambda x: x)
self.f_log_out = tensor.log
else:
raise ValueError(self.config['f_out'])
#=================================================================================
# Network shape
#=================================================================================
self.Nin = self.config['Nin']
self.N = self.config['N']
self.Nout = self.config['Nout']
#=================================================================================
# Initialize parameters
#=================================================================================
#self.config['ei'], EXC, INH = tasktools.generate_ei(self.N)
# Masks
'''
if self.config['ei'] is not None:
inh, = np.where(self.config['ei'] < 0)
for k in ['Wrec_gates', 'Wrec']:#, 'Wout']:
self.masks[k] = np.ones(self.get_dim(k))
self.masks[k][inh] *= -1
#self.masks[k] = theanotools.shared(self.masks[k])
'''
if params is None:
#-----------------------------------------------------------------------------
# Random number generator
#-----------------------------------------------------------------------------
rng = nptools.get_rng(seed, __name__)
#-----------------------------------------------------------------------------
# Connection masks
#-----------------------------------------------------------------------------
masks = {}
# Input masks
if self.config['Win_mask'] is not None:
print("[ {} ] Setting mask for Win.".format(self.name))
masks['Win'] = self.config['Win_mask']
if self.config['p0'] < 1:
# Recurrent in-degree
K = int(self.config['p0']*self.N)
idx = np.arange(self.N)
# Wrec
M = np.zeros(self.get_dim('Wrec'))
for j in xrange(M.shape[1]):
M[rng.permutation(idx)[:K],j] = 1
masks['Wrec'] = M
# Wrec (gates)
M = np.zeros(self.get_dim('Wrec_gates'))
for j in xrange(M.shape[1]):
M[rng.permutation(idx)[:K],j] = 1
masks['Wrec_gates'] = M
#-----------------------------------------------------------------------------
# Network parameteres
#-----------------------------------------------------------------------------
params = OrderedDict()
if self.config['ei'] is None:
# Input weights
params['Win'] = self.config['Win']*rng.normal(size=self.get_dim('Win'))
#k = 4
#params['Win'] = self.config['Win']*rng.gamma(k, 1/k, size=self.get_dim('Win'))
#params['Win'] *= random_sign(rng, self.get_dim('Win'))
# Input biases
params['bin'] = np.zeros(self.get_dim('bin'))
# Recurrent weights
k = 4
params['Wrec_gates'] = rng.gamma(k, 1/k, self.get_dim('Wrec_gates'))
params['Wrec'] = rng.gamma(k, 1/k, self.get_dim('Wrec'))
params['Wrec_gates'] *= random_sign(rng, self.get_dim('Wrec_gates'))
params['Wrec'] *= random_sign(rng, self.get_dim('Wrec'))
#params['Wrec_gates'] = rng.normal(size=self.get_dim('Wrec_gates'))
#params['Wrec'] = rng.normal(size=self.get_dim('Wrec'))
# Output weights
if self.config['Wout'] > 0:
print("[ {} ] Initialize Wout to random normal.".format(self.name))
params['Wout'] = self.config['Wout']*rng.normal(size=self.get_dim('Wout'))
else:
print("[ {} ] Initialize Wout to zeros.".format(self.name))
params['Wout'] = np.zeros(self.get_dim('Wout'))
# Output biases
params['bout'] = self.config['bout']*np.ones(self.get_dim('bout'))
# Initial condition
params['x0'] = self.config['x0']*np.ones(self.get_dim('x0'))
else:
raise NotImplementedError
'''
params['Win'] = rng.normal(size=self.get_dim('Win'))
params['bin'] = np.zeros(self.get_dim('bin'))
#params['Wrec_gates'] = rng.normal(size=self.get_dim('Wrec_gates'))
#params['Wrec'] = rng.normal(size=self.get_dim('Wrec'))
params['Wout'] = np.zeros(self.get_dim('Wout'))
params['bout'] = np.zeros(self.get_dim('bout'))
params['x0'] = 0.2*np.ones(self.get_dim('x0'))
#params['Wout'] = 0.1*np.ones(self.get_dim('Wout'))
exc, = np.where(self.config['ei'] > 0)
inh, = np.where(self.config['ei'] < 0)
k = 2
theta = 0.1/k
params['Wrec_gates'] = rng.gamma(k, theta, size=self.get_dim('Wrec_gates'))
params['Wrec'] = rng.gamma(k, theta, size=self.get_dim('Wrec'))
for i in xrange(params['Wrec_gates'].shape[1]):
totE = np.sum(params['Wrec_gates'][exc,i])
totI = np.sum(params['Wrec_gates'][inh,i])
params['Wrec_gates'][inh,i] *= totE/totI
for i in xrange(params['Wrec'].shape[1]):
totE = np.sum(params['Wrec'][exc,i])
totI = np.sum(params['Wrec'][inh,i])
params['Wrec'][inh,i] *= totE/totI
'''
# Desired spectral radius
rho = self.config['rho']
Wrec_gates = params['Wrec_gates'].copy()
if 'Wrec_gates' in masks:
Wrec_gates *= masks['Wrec_gates']
Wrec = params['Wrec'].copy()
if 'Wrec' in masks:
Wrec *= masks['Wrec']
rho0 = matrixtools.spectral_radius(Wrec_gates[:,:self.N])
params['Wrec_gates'][:,:self.N] *= rho/rho0
rho0 = matrixtools.spectral_radius(Wrec_gates[:,self.N:])
params['Wrec_gates'][:,self.N:] *= rho/rho0
rho0 = matrixtools.spectral_radius(Wrec)
params['Wrec'] *= rho/rho0
#=================================================================================
# Display spectral radii
#=================================================================================
"""
Wrec_gates = params['Wrec_gates'].copy()
if 'Wrec_gates' in masks:
Wrec_gates *= masks['Wrec_gates']
Wrec = params['Wrec'].copy()
if 'Wrec' in masks:
Wrec *= masks['Wrec']
rho0 = matrixtools.spectral_radius(Wrec_gates[:,:self.N])
print("rho = {}".format(rho0))
#params['Wrec_gates'][:,:self.N] *= rho/rho0
rho0 = matrixtools.spectral_radius(Wrec_gates[:,self.N:])
print("rho = {}".format(rho0))
#params['Wrec_gates'][:,self.N:] *= rho/rho0
rho0 = matrixtools.spectral_radius(Wrec)
print("rho = {}".format(rho0))
#params['Wrec'] *= rho/rho0
"""
#=================================================================================
# Give to Theano
#=================================================================================
# Share
for k, v in params.items():
self.params[k] = theanotools.shared(v, k)
for k, v in masks.items():
self.masks[k] = theanotools.shared(v)
# Fixed parameters
if DEBUG and self.config['fix']:
print("[ {} ] Fixed parameters: ".format(self.name) + ', '.join(self.config['fix']))
# Trainable parameters
self.trainables = [self.params[k]
for k in self.params if k not in self.config['fix']]
#=================================================================================
# Setup
#=================================================================================
# Leak
self.alpha = self.config['alpha']
print("[ {} ] alpha = {}".format(self.name, self.alpha))
#=================================================================================
# Define a step
#=================================================================================
def step(u, q, x_tm1, alpha, Win, bin, Wrec_gates, Wrec):
inputs_t = u.dot(Win) + bin
state_inputs = inputs_t[:,:self.N]
gate_inputs = inputs_t[:,self.N:]
r_tm1 = self.f_hidden(x_tm1)
gate_values = tensor.nnet.sigmoid(r_tm1.dot(Wrec_gates) + gate_inputs)
update_values = gate_values[:,:self.N]
g = gate_values[:,self.N:]
x_t = ((1 - alpha*update_values)*x_tm1
+ alpha*update_values*((g*r_tm1).dot(Wrec) + state_inputs + q))
return x_t
self.step = step
self.step_params = [self.alpha]
self.step_params += [self.get(k)
for k in ['Win', 'bin', 'Wrec_gates', 'Wrec']]
def get_regs(self, x0_, x, M):
"""
Regularization terms.
"""
regs = 0
#=================================================================================
# L1 recurrent weights
#=================================================================================
L1_Wrec = self.config['L1_Wrec']
if L1_Wrec > 0:
print("L1_Wrec = {}".format(L1_Wrec))
W = self.get('Wrec')
reg = tensor.sum(abs(W))
size = tensor.prod(W.shape)
#W = self.get('Wrec_gates')
#reg += tensor.sum(abs(W))
#size += tensor.prod(W.shape)
regs += L1_Wrec * reg/size
#=================================================================================
# L2 recurrent weights
#=================================================================================
L2_Wrec = self.config['L2_Wrec']
if L2_Wrec > 0:
print("L2_Wrec = {}".format(L2_Wrec))
W = self.get('Wrec')
reg = tensor.sum(tensor.sqr(W))
size = tensor.prod(W.shape)
W = self.get('Wrec_gates')
reg += tensor.sum(tensor.sqr(W))
size += tensor.prod(W.shape)
regs += L2_Wrec * reg/size
#=================================================================================
# Firing rates
#=================================================================================
L2_r = self.config['L2_r']
if L2_r > 0:
# Repeat (T, B) -> (T, B, N)
M_ = (tensor.tile(M.T, (x.shape[-1], 1, 1))).T
# Combine t=0 with t>0
x_all = tensor.concatenate(
[x0_.reshape((1, x0_.shape[0], x0_.shape[1])), x],
axis=0
)
# Firing rate
r = self.f_hidden(x_all)
# Regularization
regs += L2_r * tensor.sum(tensor.sqr(r)*M_)/tensor.sum(M_)
#=================================================================================
return regs
| {
"repo_name": "frsong/pyrl",
"path": "pyrl/gru.py",
"copies": "1",
"size": "14283",
"license": "mit",
"hash": -20953905864797276,
"line_mean": 35.6230769231,
"line_max": 96,
"alpha_frac": 0.3829027515,
"autogenerated": false,
"ratio": 4.0669134396355355,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49498161911355354,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
import tensorflow as tf
from keras.layers import Conv2D
from keras.initializers import RandomNormal
from deform_conv import tf_batch_map_offsets
class ConvOffset2D(Conv2D):
"""ConvOffset2D
Convolutional layer responsible for learning the 2D offsets and output the
deformed feature map using bilinear interpolation
Note that this layer does not perform convolution on the deformed feature
map. See get_deform_cnn in cnn.py for usage
"""
def __init__(self, filters, init_normal_stddev=0.01, **kwargs):
"""Init
Parameters
----------
filters : int
Number of channel of the input feature map
init_normal_stddev : float
Normal kernel initialization
**kwargs:
Pass to superclass. See Con2D layer in Keras
"""
self.filters = filters
super(ConvOffset2D, self).__init__(
self.filters * 2, (3, 3), padding='same', use_bias=False,
kernel_initializer=RandomNormal(0, init_normal_stddev),
**kwargs
)
def call(self, x):
"""Return the deformed featured map"""
x_shape = x.get_shape()
offsets = super(ConvOffset2D, self).call(x)
# offsets: (b*c, h, w, 2)
offsets = self._to_bc_h_w_2(offsets, x_shape)
# x: (b*c, h, w)
x = self._to_bc_h_w(x, x_shape)
# X_offset: (b*c, h, w)
x_offset = tf_batch_map_offsets(x, offsets)
# x_offset: (b, h, w, c)
x_offset = self._to_b_h_w_c(x_offset, x_shape)
return x_offset
def compute_output_shape(self, input_shape):
"""Output shape is the same as input shape
Because this layer does only the deformation part
"""
return input_shape
@staticmethod
def _to_bc_h_w_2(x, x_shape):
"""(b, h, w, 2c) -> (b*c, h, w, 2)"""
x = tf.transpose(x, [0, 3, 1, 2])
x = tf.reshape(x, (-1, int(x_shape[1]), int(x_shape[2]), 2))
return x
@staticmethod
def _to_bc_h_w(x, x_shape):
"""(b, h, w, c) -> (b*c, h, w)"""
x = tf.transpose(x, [0, 3, 1, 2])
x = tf.reshape(x, (-1, int(x_shape[1]), int(x_shape[2])))
return x
@staticmethod
def _to_b_h_w_c(x, x_shape):
"""(b*c, h, w) -> (b, h, w, c)"""
x = tf.reshape(
x, (-1, int(x_shape[3]), int(x_shape[1]), int(x_shape[2]))
)
x = tf.transpose(x, [0, 2, 3, 1])
return x
| {
"repo_name": "andrewv587/pycharm-project",
"path": "deform-conv-dir/layers.py",
"copies": "1",
"size": "2533",
"license": "apache-2.0",
"hash": -2990394093258062300,
"line_mean": 28.8,
"line_max": 78,
"alpha_frac": 0.5479668377,
"autogenerated": false,
"ratio": 3.234993614303959,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42829604520039594,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
import textwrap
from pprint import PrettyPrinter
from _plotly_utils.utils import *
from _plotly_utils.data_utils import *
# Pretty printing
def _list_repr_elided(v, threshold=200, edgeitems=3, indent=0, width=80):
"""
Return a string representation for of a list where list is elided if
it has more than n elements
Parameters
----------
v : list
Input list
threshold :
Maximum number of elements to display
Returns
-------
str
"""
if isinstance(v, list):
open_char, close_char = "[", "]"
elif isinstance(v, tuple):
open_char, close_char = "(", ")"
else:
raise ValueError("Invalid value of type: %s" % type(v))
if len(v) <= threshold:
disp_v = v
else:
disp_v = list(v[:edgeitems]) + ["..."] + list(v[-edgeitems:])
v_str = open_char + ", ".join([str(e) for e in disp_v]) + close_char
v_wrapped = "\n".join(
textwrap.wrap(
v_str,
width=width,
initial_indent=" " * (indent + 1),
subsequent_indent=" " * (indent + 1),
)
).strip()
return v_wrapped
class ElidedWrapper(object):
"""
Helper class that wraps values of certain types and produces a custom
__repr__() that may be elided and is suitable for use during pretty
printing
"""
def __init__(self, v, threshold, indent):
self.v = v
self.indent = indent
self.threshold = threshold
@staticmethod
def is_wrappable(v):
numpy = get_module("numpy")
if isinstance(v, (list, tuple)) and len(v) > 0 and not isinstance(v[0], dict):
return True
elif numpy and isinstance(v, numpy.ndarray):
return True
elif isinstance(v, str):
return True
else:
return False
def __repr__(self):
numpy = get_module("numpy")
if isinstance(self.v, (list, tuple)):
# Handle lists/tuples
res = _list_repr_elided(
self.v, threshold=self.threshold, indent=self.indent
)
return res
elif numpy and isinstance(self.v, numpy.ndarray):
# Handle numpy arrays
# Get original print opts
orig_opts = numpy.get_printoptions()
# Set threshold to self.max_list_elements
numpy.set_printoptions(
**dict(orig_opts, threshold=self.threshold, edgeitems=3, linewidth=80)
)
res = self.v.__repr__()
# Add indent to all but the first line
res_lines = res.split("\n")
res = ("\n" + " " * self.indent).join(res_lines)
# Restore print opts
numpy.set_printoptions(**orig_opts)
return res
elif isinstance(self.v, str):
# Handle strings
if len(self.v) > 80:
return "(" + repr(self.v[:30]) + " ... " + repr(self.v[-30:]) + ")"
else:
return self.v.__repr__()
else:
return self.v.__repr__()
class ElidedPrettyPrinter(PrettyPrinter):
"""
PrettyPrinter subclass that elides long lists/arrays/strings
"""
def __init__(self, *args, **kwargs):
self.threshold = kwargs.pop("threshold", 200)
PrettyPrinter.__init__(self, *args, **kwargs)
def _format(self, val, stream, indent, allowance, context, level):
if ElidedWrapper.is_wrappable(val):
elided_val = ElidedWrapper(val, self.threshold, indent)
return self._format(elided_val, stream, indent, allowance, context, level)
else:
return PrettyPrinter._format(
self, val, stream, indent, allowance, context, level
)
def node_generator(node, path=()):
"""
General, node-yielding generator.
Yields (node, path) tuples when it finds values that are dict
instances.
A path is a sequence of hashable values that can be used as either keys to
a mapping (dict) or indices to a sequence (list). A path is always wrt to
some object. Given an object, a path explains how to get from the top level
of that object to a nested value in the object.
:param (dict) node: Part of a dict to be traversed.
:param (tuple[str]) path: Defines the path of the current node.
:return: (Generator)
Example:
>>> for node, path in node_generator({'a': {'b': 5}}):
... print(node, path)
{'a': {'b': 5}} ()
{'b': 5} ('a',)
"""
if not isinstance(node, dict):
return # in case it's called with a non-dict node at top level
yield node, path
for key, val in node.items():
if isinstance(val, dict):
for item in node_generator(val, path + (key,)):
yield item
def get_by_path(obj, path):
"""
Iteratively get on obj for each key in path.
:param (list|dict) obj: The top-level object.
:param (tuple[str]|tuple[int]) path: Keys to access parts of obj.
:return: (*)
Example:
>>> figure = {'data': [{'x': [5]}]}
>>> path = ('data', 0, 'x')
>>> get_by_path(figure, path)
[5]
"""
for key in path:
obj = obj[key]
return obj
def decode_unicode(coll):
if isinstance(coll, list):
for no, entry in enumerate(coll):
if isinstance(entry, (dict, list)):
coll[no] = decode_unicode(entry)
else:
if isinstance(entry, str):
try:
coll[no] = str(entry)
except UnicodeEncodeError:
pass
elif isinstance(coll, dict):
keys, vals = list(coll.keys()), list(coll.values())
for key, val in zip(keys, vals):
if isinstance(val, (dict, list)):
coll[key] = decode_unicode(val)
elif isinstance(val, str):
try:
coll[key] = str(val)
except UnicodeEncodeError:
pass
coll[str(key)] = coll.pop(key)
return coll
| {
"repo_name": "plotly/plotly.py",
"path": "packages/python/plotly/plotly/utils.py",
"copies": "1",
"size": "6190",
"license": "mit",
"hash": 6601925392878992000,
"line_mean": 28.6172248804,
"line_max": 86,
"alpha_frac": 0.5431340872,
"autogenerated": false,
"ratio": 4.011665586519767,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00028660781836825394,
"num_lines": 209
} |
from __future__ import absolute_import, division
import textwrap
from pprint import PrettyPrinter
from _plotly_utils.utils import *
# Pretty printing
def _list_repr_elided(v, threshold=200, edgeitems=3, indent=0, width=80):
"""
Return a string representation for of a list where list is elided if
it has more than n elements
Parameters
----------
v : list
Input list
threshold :
Maximum number of elements to display
Returns
-------
str
"""
if isinstance(v, list):
open_char, close_char = "[", "]"
elif isinstance(v, tuple):
open_char, close_char = "(", ")"
else:
raise ValueError("Invalid value of type: %s" % type(v))
if len(v) <= threshold:
disp_v = v
else:
disp_v = list(v[:edgeitems]) + ["..."] + list(v[-edgeitems:])
v_str = open_char + ", ".join([str(e) for e in disp_v]) + close_char
v_wrapped = "\n".join(
textwrap.wrap(
v_str,
width=width,
initial_indent=" " * (indent + 1),
subsequent_indent=" " * (indent + 1),
)
).strip()
return v_wrapped
class ElidedWrapper(object):
"""
Helper class that wraps values of certain types and produces a custom
__repr__() that may be elided and is suitable for use during pretty
printing
"""
def __init__(self, v, threshold, indent):
self.v = v
self.indent = indent
self.threshold = threshold
@staticmethod
def is_wrappable(v):
numpy = get_module("numpy")
if isinstance(v, (list, tuple)) and len(v) > 0 and not isinstance(v[0], dict):
return True
elif numpy and isinstance(v, numpy.ndarray):
return True
elif isinstance(v, str):
return True
else:
return False
def __repr__(self):
numpy = get_module("numpy")
if isinstance(self.v, (list, tuple)):
# Handle lists/tuples
res = _list_repr_elided(
self.v, threshold=self.threshold, indent=self.indent
)
return res
elif numpy and isinstance(self.v, numpy.ndarray):
# Handle numpy arrays
# Get original print opts
orig_opts = numpy.get_printoptions()
# Set threshold to self.max_list_elements
numpy.set_printoptions(
**dict(orig_opts, threshold=self.threshold, edgeitems=3, linewidth=80)
)
res = self.v.__repr__()
# Add indent to all but the first line
res_lines = res.split("\n")
res = ("\n" + " " * self.indent).join(res_lines)
# Restore print opts
numpy.set_printoptions(**orig_opts)
return res
elif isinstance(self.v, str):
# Handle strings
if len(self.v) > 80:
return "(" + repr(self.v[:30]) + " ... " + repr(self.v[-30:]) + ")"
else:
return self.v.__repr__()
else:
return self.v.__repr__()
class ElidedPrettyPrinter(PrettyPrinter):
"""
PrettyPrinter subclass that elides long lists/arrays/strings
"""
def __init__(self, *args, **kwargs):
self.threshold = kwargs.pop("threshold", 200)
PrettyPrinter.__init__(self, *args, **kwargs)
def _format(self, val, stream, indent, allowance, context, level):
if ElidedWrapper.is_wrappable(val):
elided_val = ElidedWrapper(val, self.threshold, indent)
return self._format(elided_val, stream, indent, allowance, context, level)
else:
return PrettyPrinter._format(
self, val, stream, indent, allowance, context, level
)
def node_generator(node, path=()):
"""
General, node-yielding generator.
Yields (node, path) tuples when it finds values that are dict
instances.
A path is a sequence of hashable values that can be used as either keys to
a mapping (dict) or indices to a sequence (list). A path is always wrt to
some object. Given an object, a path explains how to get from the top level
of that object to a nested value in the object.
:param (dict) node: Part of a dict to be traversed.
:param (tuple[str]) path: Defines the path of the current node.
:return: (Generator)
Example:
>>> for node, path in node_generator({'a': {'b': 5}}):
... print(node, path)
{'a': {'b': 5}} ()
{'b': 5} ('a',)
"""
if not isinstance(node, dict):
return # in case it's called with a non-dict node at top level
yield node, path
for key, val in node.items():
if isinstance(val, dict):
for item in node_generator(val, path + (key,)):
yield item
def get_by_path(obj, path):
"""
Iteratively get on obj for each key in path.
:param (list|dict) obj: The top-level object.
:param (tuple[str]|tuple[int]) path: Keys to access parts of obj.
:return: (*)
Example:
>>> figure = {'data': [{'x': [5]}]}
>>> path = ('data', 0, 'x')
>>> get_by_path(figure, path)
[5]
"""
for key in path:
obj = obj[key]
return obj
def decode_unicode(coll):
if isinstance(coll, list):
for no, entry in enumerate(coll):
if isinstance(entry, (dict, list)):
coll[no] = decode_unicode(entry)
else:
if isinstance(entry, str):
try:
coll[no] = str(entry)
except UnicodeEncodeError:
pass
elif isinstance(coll, dict):
keys, vals = list(coll.keys()), list(coll.values())
for key, val in zip(keys, vals):
if isinstance(val, (dict, list)):
coll[key] = decode_unicode(val)
elif isinstance(val, str):
try:
coll[key] = str(val)
except UnicodeEncodeError:
pass
coll[str(key)] = coll.pop(key)
return coll
| {
"repo_name": "plotly/python-api",
"path": "packages/python/plotly/plotly/utils.py",
"copies": "1",
"size": "6152",
"license": "mit",
"hash": 1368567271607791400,
"line_mean": 28.4354066986,
"line_max": 86,
"alpha_frac": 0.5416124837,
"autogenerated": false,
"ratio": 4.0209150326797385,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5062527516379738,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
import textwrap
def chunked(iterator, chunk_size):
"""
Given an iterator, chunk it up into ~chunk_size, but be aware of newline
termination as an intended goal.
"""
result = ''
for chunk in iterator:
result += chunk
while len(result) >= chunk_size:
newline_pos = result.rfind('\n', 0, chunk_size)
if newline_pos == -1:
newline_pos = chunk_size
else:
newline_pos += 1
yield result[:newline_pos]
result = result[newline_pos:]
if result:
yield result
def nl2br(value):
return value.replace('\n', '<br>\n')
def break_long_lines(text, *args, **kwargs):
"""
Wraps the single paragraph in text (a string) so every line is at most
width characters long. Short lines in text will not be touched.
"""
result = []
for line in text.split('\n'):
result.append(textwrap.fill(line, *args, **kwargs))
return '\n'.join(result)
| {
"repo_name": "bowlofstew/changes",
"path": "changes/utils/text.py",
"copies": "3",
"size": "1050",
"license": "apache-2.0",
"hash": -985502997929510100,
"line_mean": 26.6315789474,
"line_max": 76,
"alpha_frac": 0.5838095238,
"autogenerated": false,
"ratio": 3.9622641509433962,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6046073674743396,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
import time
import os
import sys
import errno
from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout,
AlreadyLocked)
class MkdirLockFile(LockBase):
"""Lock file by creating a directory."""
def __init__(self, path, threaded=True):
"""
>>> lock = MkdirLockFile('somefile')
>>> lock = MkdirLockFile('somefile', threaded=False)
"""
LockBase.__init__(self, path, threaded)
# Lock file itself is a directory. Place the unique file name into
# it.
self.unique_name = os.path.join(self.lock_file,
"%s.%s%s" % (self.hostname,
self.tname,
self.pid))
def acquire(self, timeout=None):
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
if timeout is None:
wait = 0.1
else:
wait = max(0, timeout / 10)
while True:
try:
os.mkdir(self.lock_file)
except OSError:
err = sys.exc_info()[1]
if err.errno == errno.EEXIST:
# Already locked.
if os.path.exists(self.unique_name):
# Already locked by me.
return
if timeout is not None and time.time() > end_time:
if timeout > 0:
raise LockTimeout
else:
# Someone else has the lock.
raise AlreadyLocked
time.sleep(wait)
else:
# Couldn't create the lock for some other reason
raise LockFailed("failed to create %s" % self.lock_file)
else:
open(self.unique_name, "wb").close()
return
def release(self):
if not self.is_locked():
raise NotLocked
elif not os.path.exists(self.unique_name):
raise NotMyLock
os.unlink(self.unique_name)
os.rmdir(self.lock_file)
def is_locked(self):
return os.path.exists(self.lock_file)
def i_am_locking(self):
return (self.is_locked() and
os.path.exists(self.unique_name))
def break_lock(self):
if os.path.exists(self.lock_file):
for name in os.listdir(self.lock_file):
os.unlink(os.path.join(self.lock_file, name))
os.rmdir(self.lock_file)
| {
"repo_name": "alivesay/squire",
"path": "lib/lockfile-0.9.1/lockfile/mkdirlockfile.py",
"copies": "18",
"size": "2701",
"license": "unlicense",
"hash": -1346094643958306000,
"line_mean": 33.1898734177,
"line_max": 76,
"alpha_frac": 0.4853757867,
"autogenerated": false,
"ratio": 4.427868852459016,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
import time
import os
from . import LockBase, NotLocked, NotMyLock, LockTimeout, AlreadyLocked
class SQLiteLockFile(LockBase):
"Demonstrate SQL-based locking."
import tempfile
_fd, testdb = tempfile.mkstemp()
os.close(_fd)
os.unlink(testdb)
del _fd, tempfile
def __init__(self, path, threaded=True):
"""
>>> lock = SQLiteLockFile('somefile')
>>> lock = SQLiteLockFile('somefile', threaded=False)
"""
LockBase.__init__(self, path, threaded)
self.lock_file = unicode(self.lock_file)
self.unique_name = unicode(self.unique_name)
import sqlite3
self.connection = sqlite3.connect(SQLiteLockFile.testdb)
c = self.connection.cursor()
try:
c.execute("create table locks"
"("
" lock_file varchar(32),"
" unique_name varchar(32)"
")")
except sqlite3.OperationalError:
pass
else:
self.connection.commit()
import atexit
atexit.register(os.unlink, SQLiteLockFile.testdb)
def acquire(self, timeout=None):
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
if timeout is None:
wait = 0.1
elif timeout <= 0:
wait = 0
else:
wait = timeout / 10
cursor = self.connection.cursor()
while True:
if not self.is_locked():
# Not locked. Try to lock it.
cursor.execute("insert into locks"
" (lock_file, unique_name)"
" values"
" (?, ?)",
(self.lock_file, self.unique_name))
self.connection.commit()
# Check to see if we are the only lock holder.
cursor.execute("select * from locks"
" where unique_name = ?",
(self.unique_name,))
rows = cursor.fetchall()
if len(rows) > 1:
# Nope. Someone else got there. Remove our lock.
cursor.execute("delete from locks"
" where unique_name = ?",
(self.unique_name,))
self.connection.commit()
else:
# Yup. We're done, so go home.
return
else:
# Check to see if we are the only lock holder.
cursor.execute("select * from locks"
" where unique_name = ?",
(self.unique_name,))
rows = cursor.fetchall()
if len(rows) == 1:
# We're the locker, so go home.
return
# Maybe we should wait a bit longer.
if timeout is not None and time.time() > end_time:
if timeout > 0:
# No more waiting.
raise LockTimeout
else:
# Someone else has the lock and we are impatient..
raise AlreadyLocked
# Well, okay. We'll give it a bit longer.
time.sleep(wait)
def release(self):
if not self.is_locked():
raise NotLocked
if not self.i_am_locking():
raise NotMyLock((self._who_is_locking(), self.unique_name))
cursor = self.connection.cursor()
cursor.execute("delete from locks"
" where unique_name = ?",
(self.unique_name,))
self.connection.commit()
def _who_is_locking(self):
cursor = self.connection.cursor()
cursor.execute("select unique_name from locks"
" where lock_file = ?",
(self.lock_file,))
return cursor.fetchone()[0]
def is_locked(self):
cursor = self.connection.cursor()
cursor.execute("select * from locks"
" where lock_file = ?",
(self.lock_file,))
rows = cursor.fetchall()
return not not rows
def i_am_locking(self):
cursor = self.connection.cursor()
cursor.execute("select * from locks"
" where lock_file = ?"
" and unique_name = ?",
(self.lock_file, self.unique_name))
return not not cursor.fetchall()
def break_lock(self):
cursor = self.connection.cursor()
cursor.execute("delete from locks"
" where lock_file = ?",
(self.lock_file,))
self.connection.commit()
| {
"repo_name": "sorenh/cc",
"path": "vendor/lockfile/lockfile/sqlitelockfile.py",
"copies": "3",
"size": "4984",
"license": "apache-2.0",
"hash": -1206400192914358500,
"line_mean": 34.0985915493,
"line_max": 72,
"alpha_frac": 0.4691011236,
"autogenerated": false,
"ratio": 4.710775047258979,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00212036105522021,
"num_lines": 142
} |
from __future__ import absolute_import, division
import time
import os
try:
unicode
except NameError:
unicode = str
from . import LockBase, NotLocked, NotMyLock, LockTimeout, AlreadyLocked
class SQLiteLockFile(LockBase):
"Demonstrate SQL-based locking."
testdb = None
def __init__(self, path, threaded=True, timeout=None):
"""
>>> lock = SQLiteLockFile('somefile')
>>> lock = SQLiteLockFile('somefile', threaded=False)
"""
LockBase.__init__(self, path, threaded, timeout)
self.lock_file = unicode(self.lock_file)
self.unique_name = unicode(self.unique_name)
if SQLiteLockFile.testdb is None:
import tempfile
_fd, testdb = tempfile.mkstemp()
os.close(_fd)
os.unlink(testdb)
del _fd, tempfile
SQLiteLockFile.testdb = testdb
import sqlite3
self.connection = sqlite3.connect(SQLiteLockFile.testdb)
c = self.connection.cursor()
try:
c.execute("create table locks"
"("
" lock_file varchar(32),"
" unique_name varchar(32)"
")")
except sqlite3.OperationalError:
pass
else:
self.connection.commit()
import atexit
atexit.register(os.unlink, SQLiteLockFile.testdb)
def acquire(self, timeout=None):
timeout = timeout if timeout is not None else self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
if timeout is None:
wait = 0.1
elif timeout <= 0:
wait = 0
else:
wait = timeout / 10
cursor = self.connection.cursor()
while True:
if not self.is_locked():
# Not locked. Try to lock it.
cursor.execute("insert into locks"
" (lock_file, unique_name)"
" values"
" (?, ?)",
(self.lock_file, self.unique_name))
self.connection.commit()
# Check to see if we are the only lock holder.
cursor.execute("select * from locks"
" where unique_name = ?",
(self.unique_name,))
rows = cursor.fetchall()
if len(rows) > 1:
# Nope. Someone else got there. Remove our lock.
cursor.execute("delete from locks"
" where unique_name = ?",
(self.unique_name,))
self.connection.commit()
else:
# Yup. We're done, so go home.
return
else:
# Check to see if we are the only lock holder.
cursor.execute("select * from locks"
" where unique_name = ?",
(self.unique_name,))
rows = cursor.fetchall()
if len(rows) == 1:
# We're the locker, so go home.
return
# Maybe we should wait a bit longer.
if timeout is not None and time.time() > end_time:
if timeout > 0:
# No more waiting.
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
# Someone else has the lock and we are impatient..
raise AlreadyLocked("%s is already locked" % self.path)
# Well, okay. We'll give it a bit longer.
time.sleep(wait)
def release(self):
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
if not self.i_am_locking():
raise NotMyLock("%s is locked, but not by me (by %s)" %
(self.unique_name, self._who_is_locking()))
cursor = self.connection.cursor()
cursor.execute("delete from locks"
" where unique_name = ?",
(self.unique_name,))
self.connection.commit()
def _who_is_locking(self):
cursor = self.connection.cursor()
cursor.execute("select unique_name from locks"
" where lock_file = ?",
(self.lock_file,))
return cursor.fetchone()[0]
def is_locked(self):
cursor = self.connection.cursor()
cursor.execute("select * from locks"
" where lock_file = ?",
(self.lock_file,))
rows = cursor.fetchall()
return not not rows
def i_am_locking(self):
cursor = self.connection.cursor()
cursor.execute("select * from locks"
" where lock_file = ?"
" and unique_name = ?",
(self.lock_file, self.unique_name))
return not not cursor.fetchall()
def break_lock(self):
cursor = self.connection.cursor()
cursor.execute("delete from locks"
" where lock_file = ?",
(self.lock_file,))
self.connection.commit()
| {
"repo_name": "GiovanniConserva/TestDeploy",
"path": "venv/Lib/site-packages/pip/_vendor/lockfile/sqlitelockfile.py",
"copies": "536",
"size": "5506",
"license": "bsd-3-clause",
"hash": 7542428149643833000,
"line_mean": 34.2948717949,
"line_max": 75,
"alpha_frac": 0.4734834726,
"autogenerated": false,
"ratio": 4.693947144075021,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
import torch
from torch.autograd import Variable
import numpy as np
from scipy.ndimage.interpolation import map_coordinates as sp_map_coordinates
def th_flatten(a):
"""Flatten tensor"""
return a.contiguous().view(a.nelement())
def th_repeat(a, repeats, axis=0):
"""Torch version of np.repeat for 1D"""
assert len(a.size()) == 1
return th_flatten(torch.transpose(a.repeat(repeats, 1), 0, 1))
def np_repeat_2d(a, repeats):
"""Tensorflow version of np.repeat for 2D"""
assert len(a.shape) == 2
a = np.expand_dims(a, 0)
a = np.tile(a, [repeats, 1, 1])
return a
def th_gather_2d(input, coords):
inds = coords[:, 0]*input.size(1) + coords[:, 1]
x = torch.index_select(th_flatten(input), 0, inds)
return x.view(coords.size(0))
def th_map_coordinates(input, coords, order=1):
"""Tensorflow verion of scipy.ndimage.map_coordinates
Note that coords is transposed and only 2D is supported
Parameters
----------
input : tf.Tensor. shape = (s, s)
coords : tf.Tensor. shape = (n_points, 2)
"""
assert order == 1
input_size = input.size(0)
coords = torch.clamp(coords, 0, input_size - 1)
coords_lt = coords.floor().long()
coords_rb = coords.ceil().long()
coords_lb = torch.stack([coords_lt[:, 0], coords_rb[:, 1]], 1)
coords_rt = torch.stack([coords_rb[:, 0], coords_lt[:, 1]], 1)
vals_lt = th_gather_2d(input, coords_lt.detach())
vals_rb = th_gather_2d(input, coords_rb.detach())
vals_lb = th_gather_2d(input, coords_lb.detach())
vals_rt = th_gather_2d(input, coords_rt.detach())
coords_offset_lt = coords - coords_lt.type(coords.data.type())
vals_t = vals_lt + (vals_rt - vals_lt) * coords_offset_lt[:, 0]
vals_b = vals_lb + (vals_rb - vals_lb) * coords_offset_lt[:, 0]
mapped_vals = vals_t + (vals_b - vals_t) * coords_offset_lt[:, 1]
return mapped_vals
def sp_batch_map_coordinates(inputs, coords):
"""Reference implementation for batch_map_coordinates"""
# coords = coords.clip(0, inputs.shape[1] - 1)
assert (coords.shape[2] == 2)
height = coords[:,:,0].clip(0, inputs.shape[1] - 1)
width = coords[:,:,1].clip(0, inputs.shape[2] - 1)
np.concatenate((np.expand_dims(height, axis=2), np.expand_dims(width, axis=2)), 2)
mapped_vals = np.array([
sp_map_coordinates(input, coord.T, mode='nearest', order=1)
for input, coord in zip(inputs, coords)
])
return mapped_vals
def th_batch_map_coordinates(input, coords, order=1):
"""Batch version of th_map_coordinates
Only supports 2D feature maps
Parameters
----------
input : tf.Tensor. shape = (b, s, s)
coords : tf.Tensor. shape = (b, n_points, 2)
Returns
-------
tf.Tensor. shape = (b, s, s)
"""
batch_size = input.size(0)
input_height = input.size(1)
input_width = input.size(2)
n_coords = coords.size(1)
# coords = torch.clamp(coords, 0, input_size - 1)
coords = torch.cat((torch.clamp(coords.narrow(2, 0, 1), 0, input_height - 1), torch.clamp(coords.narrow(2, 1, 1), 0, input_width - 1)), 2)
assert (coords.size(1) == n_coords)
coords_lt = coords.floor().long()
coords_rb = coords.ceil().long()
coords_lb = torch.stack([coords_lt[..., 0], coords_rb[..., 1]], 2)
coords_rt = torch.stack([coords_rb[..., 0], coords_lt[..., 1]], 2)
idx = th_repeat(torch.arange(0, batch_size), n_coords).long()
idx = Variable(idx, requires_grad=False)
if input.is_cuda:
idx = idx.cuda()
def _get_vals_by_coords(input, coords):
indices = torch.stack([
idx, th_flatten(coords[..., 0]), th_flatten(coords[..., 1])
], 1)
inds = indices[:, 0]*input.size(1)*input.size(2)+ indices[:, 1]*input.size(2) + indices[:, 2]
vals = th_flatten(input).index_select(0, inds)
vals = vals.view(batch_size, n_coords)
return vals
vals_lt = _get_vals_by_coords(input, coords_lt.detach())
vals_rb = _get_vals_by_coords(input, coords_rb.detach())
vals_lb = _get_vals_by_coords(input, coords_lb.detach())
vals_rt = _get_vals_by_coords(input, coords_rt.detach())
coords_offset_lt = coords - coords_lt.type(coords.data.type())
vals_t = coords_offset_lt[..., 0]*(vals_rt - vals_lt) + vals_lt
vals_b = coords_offset_lt[..., 0]*(vals_rb - vals_lb) + vals_lb
mapped_vals = coords_offset_lt[..., 1]* (vals_b - vals_t) + vals_t
return mapped_vals
def sp_batch_map_offsets(input, offsets):
"""Reference implementation for tf_batch_map_offsets"""
batch_size = input.shape[0]
input_height = input.shape[1]
input_width = input.shape[2]
offsets = offsets.reshape(batch_size, -1, 2)
grid = np.stack(np.mgrid[:input_height, :input_width], -1).reshape(-1, 2)
grid = np.repeat([grid], batch_size, axis=0)
coords = offsets + grid
# coords = coords.clip(0, input_size - 1)
mapped_vals = sp_batch_map_coordinates(input, coords)
return mapped_vals
def th_generate_grid(batch_size, input_height, input_width, dtype, cuda):
grid = np.meshgrid(
range(input_height), range(input_width), indexing='ij'
)
grid = np.stack(grid, axis=-1)
grid = grid.reshape(-1, 2)
grid = np_repeat_2d(grid, batch_size)
grid = torch.from_numpy(grid).type(dtype)
if cuda:
grid = grid.cuda()
return Variable(grid, requires_grad=False)
def th_batch_map_offsets(input, offsets, grid=None, order=1):
"""Batch map offsets into input
Parameters
---------
input : torch.Tensor. shape = (b, s, s)
offsets: torch.Tensor. shape = (b, s, s, 2)
Returns
-------
torch.Tensor. shape = (b, s, s)
"""
batch_size = input.size(0)
input_height = input.size(1)
input_width = input.size(2)
offsets = offsets.view(batch_size, -1, 2)
if grid is None:
grid = th_generate_grid(batch_size, input_height, input_width, offsets.data.type(), offsets.data.is_cuda)
coords = offsets + grid
mapped_vals = th_batch_map_coordinates(input, coords)
return mapped_vals
| {
"repo_name": "oeway/pytorch-deform-conv",
"path": "torch_deform_conv/deform_conv.py",
"copies": "1",
"size": "6144",
"license": "mit",
"hash": 4894246253913568000,
"line_mean": 31.5079365079,
"line_max": 142,
"alpha_frac": 0.6153971354,
"autogenerated": false,
"ratio": 3.1077389984825494,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9219616835947402,
"avg_score": 0.0007038595870294291,
"num_lines": 189
} |
from __future__ import absolute_import, division
import torch
import torch.nn as nn
import numpy as np
from torch_deform_conv.deform_conv import th_batch_map_offsets, th_generate_grid
class ConvOffset2D(nn.Conv2d):
"""ConvOffset2D
Convolutional layer responsible for learning the 2D offsets and output the
deformed feature map using bilinear interpolation
Note that this layer does not perform convolution on the deformed feature
map. See get_deform_cnn in cnn.py for usage
"""
def __init__(self, filters, init_normal_stddev=0.01, **kwargs):
"""Init
Parameters
----------
filters : int
Number of channel of the input feature map
init_normal_stddev : float
Normal kernel initialization
**kwargs:
Pass to superclass. See Con2d layer in pytorch
"""
self.filters = filters
self._grid_param = None
super(ConvOffset2D, self).__init__(self.filters, self.filters*2, 3, padding=1, bias=False, **kwargs)
self.weight.data.copy_(self._init_weights(self.weight, init_normal_stddev))
def forward(self, x):
"""Return the deformed featured map"""
x_shape = x.size()
offsets = super(ConvOffset2D, self).forward(x)
# offsets: (b*c, h, w, 2)
offsets = self._to_bc_h_w_2(offsets, x_shape)
# x: (b*c, h, w)
x = self._to_bc_h_w(x, x_shape)
# X_offset: (b*c, h, w)
x_offset = th_batch_map_offsets(x, offsets, grid=self._get_grid(self,x))
# x_offset: (b, h, w, c)
x_offset = self._to_b_c_h_w(x_offset, x_shape)
return x_offset
@staticmethod
def _get_grid(self, x):
batch_size, input_height, input_width = x.size(0), x.size(1), x.size(2)
dtype, cuda = x.data.type(), x.data.is_cuda
if self._grid_param == (batch_size, input_height, input_width, dtype, cuda):
return self._grid
self._grid_param = (batch_size, input_height, input_width, dtype, cuda)
self._grid = th_generate_grid(batch_size, input_height, input_width, dtype, cuda)
return self._grid
@staticmethod
def _init_weights(weights, std):
fan_out = weights.size(0)
fan_in = weights.size(1) * weights.size(2) * weights.size(3)
w = np.random.normal(0.0, std, (fan_out, fan_in))
return torch.from_numpy(w.reshape(weights.size()))
@staticmethod
def _to_bc_h_w_2(x, x_shape):
"""(b, 2c, h, w) -> (b*c, h, w, 2)"""
x = x.contiguous().view(-1, int(x_shape[2]), int(x_shape[3]), 2)
return x
@staticmethod
def _to_bc_h_w(x, x_shape):
"""(b, c, h, w) -> (b*c, h, w)"""
x = x.contiguous().view(-1, int(x_shape[2]), int(x_shape[3]))
return x
@staticmethod
def _to_b_c_h_w(x, x_shape):
"""(b*c, h, w) -> (b, c, h, w)"""
x = x.contiguous().view(-1, int(x_shape[1]), int(x_shape[2]), int(x_shape[3]))
return x
| {
"repo_name": "oeway/pytorch-deform-conv",
"path": "torch_deform_conv/layers.py",
"copies": "1",
"size": "3001",
"license": "mit",
"hash": -613533520039269500,
"line_mean": 33.1022727273,
"line_max": 108,
"alpha_frac": 0.5788070643,
"autogenerated": false,
"ratio": 3.155625657202944,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42344327215029437,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch_deform_conv.layers import ConvOffset2D
class ConvNet(nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
# conv11
self.conv11 = nn.Conv2d(1, 32, 3, padding=1)
self.bn11 = nn.BatchNorm2d(32)
# conv12
self.conv12 = nn.Conv2d(32, 64, 3, padding=1, stride=2)
self.bn12 = nn.BatchNorm2d(64)
# conv21
self.conv21 = nn.Conv2d(64, 128, 3, padding= 1)
self.bn21 = nn.BatchNorm2d(128)
# conv22
self.conv22 = nn.Conv2d(128, 128, 3, padding=1, stride=2)
self.bn22 = nn.BatchNorm2d(128)
# out
self.fc = nn.Linear(128, 10)
def forward(self, x):
x = F.relu(self.conv11(x))
x = self.bn11(x)
x = F.relu(self.conv12(x))
x = self.bn12(x)
x = F.relu(self.conv21(x))
x = self.bn21(x)
x = F.relu(self.conv22(x))
x = self.bn22(x)
x = F.avg_pool2d(x, kernel_size=[x.size(2), x.size(3)])
x = self.fc(x.view(x.size()[:2]))#
x = F.softmax(x)
return x
class DeformConvNet(nn.Module):
def __init__(self):
super(DeformConvNet, self).__init__()
# conv11
self.conv11 = nn.Conv2d(1, 32, 3, padding=1)
self.bn11 = nn.BatchNorm2d(32)
# conv12
self.offset12 = ConvOffset2D(32)
self.conv12 = nn.Conv2d(32, 64, 3, padding=1, stride=2)
self.bn12 = nn.BatchNorm2d(64)
# conv21
self.offset21 = ConvOffset2D(64)
self.conv21 = nn.Conv2d(64, 128, 3, padding= 1)
self.bn21 = nn.BatchNorm2d(128)
# conv22
self.offset22 = ConvOffset2D(128)
self.conv22 = nn.Conv2d(128, 128, 3, padding=1, stride=2)
self.bn22 = nn.BatchNorm2d(128)
# out
self.fc = nn.Linear(128, 10)
def forward(self, x):
x = F.relu(self.conv11(x))
x = self.bn11(x)
x = self.offset12(x)
x = F.relu(self.conv12(x))
x = self.bn12(x)
x = self.offset21(x)
x = F.relu(self.conv21(x))
x = self.bn21(x)
x = self.offset22(x)
x = F.relu(self.conv22(x))
x = self.bn22(x)
x = F.avg_pool2d(x, kernel_size=[x.size(2), x.size(3)])
x = self.fc(x.view(x.size()[:2]))
x = F.softmax(x)
return x
def freeze(self, module_classes):
'''
freeze modules for finetuning
'''
for k, m in self._modules.items():
if any([type(m) == mc for mc in module_classes]):
for param in m.parameters():
param.requires_grad = False
def unfreeze(self, module_classes):
'''
unfreeze modules
'''
for k, m in self._modules.items():
if any([isinstance(m, mc) for mc in module_classes]):
for param in m.parameters():
param.requires_grad = True
def parameters(self):
return filter(lambda p: p.requires_grad, super(DeformConvNet, self).parameters())
def get_cnn():
return ConvNet()
def get_deform_cnn(trainable=True, freeze_filter=[nn.Conv2d, nn.Linear]):
model = DeformConvNet()
if not trainable:
model.freeze(freeze_filter)
return model
| {
"repo_name": "oeway/pytorch-deform-conv",
"path": "torch_deform_conv/cnn.py",
"copies": "1",
"size": "3415",
"license": "mit",
"hash": -3918284970811787000,
"line_mean": 26.5403225806,
"line_max": 89,
"alpha_frac": 0.5373352855,
"autogenerated": false,
"ratio": 3.1215722120658134,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9127636187687742,
"avg_score": 0.006254261975614188,
"num_lines": 124
} |
from __future__ import absolute_import, division
import uuid
from datetime import datetime
from sqlalchemy import Column, DateTime, ForeignKey, String, Text, Integer
from sqlalchemy.orm import relationship
from sqlalchemy.schema import Index, UniqueConstraint
from changes.config import db
from changes.db.types.guid import GUID
class FileCoverage(db.Model):
"""
Unique to file/job/project. Contains a data-blob-string, where each
character is either
- U Unconvered
- C Covered
- N No Info
filled in when file coverage artifacts are collected (updated with
additional lines for each new artifact in a job)
"""
__tablename__ = 'filecoverage'
__table_args__ = (
Index('idx_filecoverage_job_id', 'job_id'),
Index('idx_filecoverage_project_id', 'project_id'),
Index('idx_filecoverage_step_id', 'step_id'),
UniqueConstraint('job_id', 'filename', name='unq_job_filname'),
)
id = Column(GUID, nullable=False, primary_key=True, default=uuid.uuid4)
step_id = Column(GUID, ForeignKey('jobstep.id', ondelete="CASCADE"))
job_id = Column(GUID, ForeignKey('job.id', ondelete="CASCADE"), nullable=False)
project_id = Column(GUID, ForeignKey('project.id', ondelete="CASCADE"), nullable=False)
filename = Column(String(256), nullable=False, primary_key=True)
data = Column(Text)
date_created = Column(DateTime, default=datetime.utcnow)
lines_covered = Column(Integer)
lines_uncovered = Column(Integer)
diff_lines_covered = Column(Integer)
diff_lines_uncovered = Column(Integer)
step = relationship('JobStep')
job = relationship('Job')
project = relationship('Project')
def __init__(self, **kwargs):
super(FileCoverage, self).__init__(**kwargs)
if not self.id:
self.id = uuid.uuid4()
| {
"repo_name": "dropbox/changes",
"path": "changes/models/filecoverage.py",
"copies": "1",
"size": "1845",
"license": "apache-2.0",
"hash": 8695273067251792000,
"line_mean": 33.1666666667,
"line_max": 91,
"alpha_frac": 0.6818428184,
"autogenerated": false,
"ratio": 3.9006342494714588,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004217467985583927,
"num_lines": 54
} |
from __future__ import absolute_import, division
import uuid
from datetime import datetime
from sqlalchemy import Column, DateTime, ForeignKey, Text, Integer
from changes.config import db
from changes.constants import Result, ResultSource, Status
from changes.db.types.enum import Enum
from changes.db.types.guid import GUID
class BazelTarget(db.Model):
__tablename__ = 'bazeltarget'
id = Column(GUID, nullable=False, primary_key=True, default=uuid.uuid4)
step_id = Column(GUID, ForeignKey('jobstep.id', ondelete="CASCADE"))
job_id = Column(GUID, ForeignKey('job.id', ondelete="CASCADE"), nullable=False)
name = Column(Text, nullable=False)
status = Column(Enum(Status), nullable=False, default=Status.unknown)
result = Column(Enum(Result), default=Result.unknown, nullable=False)
result_source = Column(Enum(ResultSource), default=ResultSource.from_self)
duration = Column(Integer, default=0)
date_created = Column(DateTime, default=datetime.utcnow, nullable=False)
def __init__(self, **kwargs):
super(BazelTarget, self).__init__(**kwargs)
if self.id is None:
self.id = uuid.uuid4()
if self.result is None:
self.result = Result.unknown
if self.status is None:
self.status = Status.unknown
if self.date_created is None:
self.date_created = datetime.utcnow()
| {
"repo_name": "dropbox/changes",
"path": "changes/models/bazeltarget.py",
"copies": "1",
"size": "1396",
"license": "apache-2.0",
"hash": 4154827816043156000,
"line_mean": 38.8857142857,
"line_max": 83,
"alpha_frac": 0.6948424069,
"autogenerated": false,
"ratio": 3.8885793871866294,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5083421794086629,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
import uuid
from sqlalchemy import Column, ForeignKey, String
from sqlalchemy.orm import relationship
from sqlalchemy.schema import UniqueConstraint
from changes.config import db
from changes.constants import Result
from changes.db.types.enum import Enum
from changes.db.types.guid import GUID
class RevisionResult(db.Model):
__tablename__ = 'revisionresult'
__table_args__ = (
UniqueConstraint('project_id', 'revision_sha', name='unq_project_revision_pair'),
)
id = Column(GUID, nullable=False, primary_key=True, default=uuid.uuid4)
build_id = Column(GUID, ForeignKey('build.id'))
revision_sha = Column(String(40), nullable=False)
project_id = Column(GUID, ForeignKey('project.id', ondelete='CASCADE'), nullable=False)
result = Column(Enum(Result), nullable=False, default=Result.unknown)
build = relationship('Build')
project = relationship('Project')
def __init__(self, **kwargs):
super(RevisionResult, self).__init__(**kwargs)
if self.id is None:
self.id = uuid.uuid4()
if self.result is None:
self.result = Result.unknown
| {
"repo_name": "dropbox/changes",
"path": "changes/models/revisionresult.py",
"copies": "1",
"size": "1184",
"license": "apache-2.0",
"hash": 2488367511353378000,
"line_mean": 32.8285714286,
"line_max": 91,
"alpha_frac": 0.6959459459,
"autogenerated": false,
"ratio": 4,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.51959459459,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
from keras.layers import Input, Conv2D, Activation, GlobalAvgPool2D, Dense, BatchNormalization
from layers import ConvOffset2D
def get_cnn():
inputs = l = Input((28, 28, 1), name='input')
# conv11
l = Conv2D(32, (3, 3), padding='same', name='conv11')(l)
l = Activation('relu', name='conv11_relu')(l)
l = BatchNormalization(name='conv11_bn')(l)
# conv12
l = Conv2D(64, (3, 3), padding='same', strides=(2, 2), name='conv12')(l)
l = Activation('relu', name='conv12_relu')(l)
l = BatchNormalization(name='conv12_bn')(l)
# conv21
l = Conv2D(128, (3, 3), padding='same', name='conv21')(l)
l = Activation('relu', name='conv21_relu')(l)
l = BatchNormalization(name='conv21_bn')(l)
# conv22
l = Conv2D(128, (3, 3), padding='same', strides=(2, 2), name='conv22')(l)
l = Activation('relu', name='conv22_relu')(l)
l = BatchNormalization(name='conv22_bn')(l)
# out
l = GlobalAvgPool2D(name='avg_pool')(l)
l = Dense(10, name='fc1')(l)
outputs = l = Activation('softmax', name='out')(l)
return inputs, outputs
def get_deform_cnn(trainable):
inputs = l = Input((28, 28, 1), name='input')
# conv11
l = Conv2D(32, (3, 3), padding='same', name='conv11', trainable=trainable)(l)
l = Activation('relu', name='conv11_relu')(l)
l = BatchNormalization(name='conv11_bn')(l)
# conv12
l_offset = ConvOffset2D(32, name='conv12_offset')(l)
l = Conv2D(64, (3, 3), padding='same', strides=(2, 2), name='conv12', trainable=trainable)(l_offset)
l = Activation('relu', name='conv12_relu')(l)
l = BatchNormalization(name='conv12_bn')(l)
# conv21
l_offset = ConvOffset2D(64, name='conv21_offset')(l)
l = Conv2D(128, (3, 3), padding='same', name='conv21', trainable=trainable)(l_offset)
l = Activation('relu', name='conv21_relu')(l)
l = BatchNormalization(name='conv21_bn')(l)
# conv22
l_offset = ConvOffset2D(128, name='conv22_offset')(l)
l = Conv2D(128, (3, 3), padding='same', strides=(2, 2), name='conv22', trainable=trainable)(l_offset)
l = Activation('relu', name='conv22_relu')(l)
l = BatchNormalization(name='conv22_bn')(l)
# out
l = GlobalAvgPool2D(name='avg_pool')(l)
l = Dense(10, name='fc1', trainable=trainable)(l)
outputs = l = Activation('softmax', name='out')(l)
return inputs, outputs
| {
"repo_name": "andrewv587/pycharm-project",
"path": "deform-conv-dir/cnn.py",
"copies": "1",
"size": "2420",
"license": "apache-2.0",
"hash": 4663909647229439000,
"line_mean": 33.5714285714,
"line_max": 105,
"alpha_frac": 0.6198347107,
"autogenerated": false,
"ratio": 3.025,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9098356801879643,
"avg_score": 0.009295581764071349,
"num_lines": 70
} |
from __future__ import absolute_import, division
import collections
import importlib
import math
import uuid
class Undefined(object):
def __repr__(self):
return "Undefined"
def __bool__(self):
return False
def __nonzero__(self):
return False
Undefined = Undefined()
def import_object(module_name, object_path=None):
if not object_path:
if ':' not in module_name:
raise ValueError("cannot import object %r" % module_name)
module_name, object_path = module_name.split(':')
mod = importlib.import_module(module_name)
obj = mod
for objname in object_path.split('.'):
obj = getattr(obj, objname)
return obj
def make_id():
return uuid.uuid4().hex
_sqrt2 = math.sqrt(2)
class Accumulator(object):
def __init__(self):
self.n = 0
self.sum = 0
self.square_sum = 0
self._mean = None
self._stddev = None
def add(self, value):
self.n += 1
self.sum += value
self.square_sum += value * value
self._mean = None
self._stddev = None
def remove(self, value):
self.n -= 1
self.sum -= value
self.square_sum -= value * value
self._mean = None
self._stddev = None
@property
def mean(self):
if not self.n:
return 0.
if self._mean is None:
self._mean = self.sum / self.n
return self._mean
@property
def stddev(self):
if not self.n:
return 0.
if self._stddev is None:
mean = self.mean
self._stddev = math.sqrt(self.square_sum / self.n - mean * mean)
return self._stddev
@property
def stats(self):
return {'mean': self.mean, 'stddev': self.stddev, 'n': self.n}
class SampleWindow(Accumulator):
def __init__(self, n=100, factor=1):
super(SampleWindow, self).__init__()
self.size = n
self.factor = factor
self.values = collections.deque([])
self.total = Accumulator()
def __len__(self):
return len(self.values)
def is_full(self):
return len(self.values) == self.size
def add(self, value):
value = value * self.factor
super(SampleWindow, self).add(value)
self.total.add(value)
if self.is_full():
self.remove(self.values.popleft())
self.values.append(value)
def p(self, value):
"""
returns the probability for samples greater than `value` given a normal
distribution with mean and standard deviation derived from this window.
"""
if self.stddev == 0:
return 1. if value == self.mean else 0.
return 1 - math.erf(abs(value * self.factor - self.mean) / (self.stddev * _sqrt2))
| {
"repo_name": "kpanic/lymph",
"path": "iris/utils/__init__.py",
"copies": "2",
"size": "2825",
"license": "apache-2.0",
"hash": -5229553133795462000,
"line_mean": 23.5652173913,
"line_max": 90,
"alpha_frac": 0.5663716814,
"autogenerated": false,
"ratio": 3.912742382271468,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5479114063671467,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
"""
Unit tests for M2Crypto.EVP.
Copyright (c) 2004-2007 Open Source Applications Foundation
Author: Heikki Toivonen
"""
import base64
import hashlib
import io
import logging
from binascii import a2b_hex, hexlify, unhexlify
from M2Crypto import BIO, EVP, RSA, Rand, m2, util
from tests import unittest
from tests.fips import fips_mode
log = logging.getLogger('test_EVP')
class EVPTestCase(unittest.TestCase):
def _gen_callback(self, *args):
pass
def _pass_callback(self, *args):
return b'foobar'
def _assign_rsa(self):
rsa = RSA.gen_key(1024, 3, callback=self._gen_callback)
pkey = EVP.PKey()
pkey.assign_rsa(rsa, capture=0) # capture=1 should cause crash
return rsa
def test_assign(self):
rsa = self._assign_rsa()
rsa.check_key()
def test_pem(self):
rsa = RSA.gen_key(1024, 3, callback=self._gen_callback)
pkey = EVP.PKey()
pkey.assign_rsa(rsa)
result_w_callback = pkey.as_pem(callback=self._pass_callback)
result_wo_callback = pkey.as_pem(cipher=None)
self.assertNotEqual(result_w_callback, result_wo_callback)
with self.assertRaises(ValueError):
pkey.as_pem(cipher='noXX$$%%suchcipher',
callback=self._pass_callback)
def test_as_der(self):
"""
Test DER encoding the PKey instance after assigning
a RSA key to it.
"""
rsa = RSA.gen_key(1024, 3, callback=self._gen_callback)
pkey = EVP.PKey()
pkey.assign_rsa(rsa)
der_blob = pkey.as_der()
# A quick but not thorough sanity check
self.assertEqual(len(der_blob), 160)
def test_get_digestbyname(self):
with self.assertRaises(EVP.EVPError):
m2.get_digestbyname('sha513')
self.assertNotEqual(m2.get_digestbyname('sha1'), None)
def test_MessageDigest(self): # noqa
with self.assertRaises(ValueError):
EVP.MessageDigest('sha513')
md = EVP.MessageDigest('sha1')
self.assertEqual(md.update(b'Hello'), 1)
self.assertEqual(util.octx_to_num(md.final()),
1415821221623963719413415453263690387336440359920)
# temporarily remove sha1 from m2
old_sha1 = m2.sha1
del m2.sha1
# now run the same test again, relying on EVP.MessageDigest() to call
# get_digestbyname() under the hood
md = EVP.MessageDigest('sha1')
self.assertEqual(md.update(b'Hello'), 1)
self.assertEqual(util.octx_to_num(md.final()),
1415821221623963719413415453263690387336440359920)
# put sha1 back in place
m2.sha1 = old_sha1
def test_as_der_capture_key(self):
"""
Test DER encoding the PKey instance after assigning
a RSA key to it. Have the PKey instance capture the RSA key.
"""
rsa = RSA.gen_key(1024, 3, callback=self._gen_callback)
pkey = EVP.PKey()
pkey.assign_rsa(rsa, 1)
der_blob = pkey.as_der()
# A quick but not thorough sanity check
self.assertEqual(len(der_blob), 160)
def test_size(self):
rsa = RSA.gen_key(1024, 3, callback=self._gen_callback)
pkey = EVP.PKey()
pkey.assign_rsa(rsa)
size = pkey.size()
self.assertEqual(size, 128)
def test_hmac(self):
self.assertEqual(util.octx_to_num(EVP.hmac(b'key', b'data')),
92800611269186718152770431077867383126636491933,
util.octx_to_num(EVP.hmac(b'key', b'data')))
if not fips_mode: # Disabled algorithms
self.assertEqual(util.octx_to_num(EVP.hmac(b'key', b'data',
algo='md5')),
209168838103121722341657216703105225176,
util.octx_to_num(EVP.hmac(b'key', b'data',
algo='md5')))
self.assertEqual(util.octx_to_num(EVP.hmac(b'key', b'data',
algo='ripemd160')),
1176807136224664126629105846386432860355826868536,
util.octx_to_num(EVP.hmac(b'key', b'data',
algo='ripemd160')))
if m2.OPENSSL_VERSION_NUMBER >= 0x90800F:
self.assertEqual(util.octx_to_num(EVP.hmac(b'key', b'data',
algo='sha224')),
2660082265842109788381286338540662430962855478412025487066970872635,
util.octx_to_num(EVP.hmac(b'key', b'data',
algo='sha224')))
self.assertEqual(util.octx_to_num(EVP.hmac(b'key', b'data',
algo='sha256')),
36273358097036101702192658888336808701031275731906771612800928188662823394256,
util.octx_to_num(EVP.hmac(b'key', b'data',
algo='sha256')))
self.assertEqual(util.octx_to_num(EVP.hmac(b'key', b'data',
algo='sha384')),
30471069101236165765942696708481556386452105164815350204559050657318908408184002707969468421951222432574647369766282,
util.octx_to_num(EVP.hmac(b'key', b'data',
algo='sha384')))
self.assertEqual(util.octx_to_num(EVP.hmac(b'key', b'data',
algo='sha512')),
3160730054100700080556942280820129108466291087966635156623014063982211353635774277148932854680195471287740489442390820077884317620321797003323909388868696,
util.octx_to_num(EVP.hmac(b'key', b'data',
algo='sha512')))
with self.assertRaises(ValueError):
EVP.hmac(b'key', b'data', algo='sha513')
def test_get_rsa(self):
"""
Testing retrieving the RSA key from the PKey instance.
"""
rsa = RSA.gen_key(1024, 3, callback=self._gen_callback)
self.assertIsInstance(rsa, RSA.RSA)
pkey = EVP.PKey()
pkey.assign_rsa(rsa)
rsa2 = pkey.get_rsa()
self.assertIsInstance(rsa2, RSA.RSA_pub)
self.assertEqual(rsa.e, rsa2.e)
self.assertEqual(rsa.n, rsa2.n)
# FIXME
# hanging call is
# m2.rsa_write_key(self.rsa, bio._ptr(), ciph, callback)s
# from RSA.py/save_key_bio
pem = rsa.as_pem(callback=self._pass_callback)
pem2 = rsa2.as_pem()
assert pem
assert pem2
self.assertNotEqual(pem, pem2)
message = b'This is the message string'
digest = hashlib.sha1(message).digest()
self.assertEqual(rsa.sign(digest), rsa2.sign(digest))
rsa3 = RSA.gen_key(1024, 3, callback=self._gen_callback)
self.assertNotEqual(rsa.sign(digest), rsa3.sign(digest))
def test_load_key_string_pubkey(self):
"""
Testing creating a PKey instance from PEM string.
"""
rsa = RSA.gen_key(1024, 3, callback=self._gen_callback)
self.assertIsInstance(rsa, RSA.RSA)
rsa_pem = BIO.MemoryBuffer()
rsa.save_pub_key_bio(rsa_pem)
pkey = EVP.load_key_string_pubkey(rsa_pem.read())
rsa2 = pkey.get_rsa()
self.assertIsInstance(rsa2, RSA.RSA_pub)
self.assertEqual(rsa.e, rsa2.e)
self.assertEqual(rsa.n, rsa2.n)
pem = rsa.as_pem(callback=self._pass_callback)
pem2 = rsa2.as_pem()
assert pem
assert pem2
self.assertNotEqual(pem, pem2)
def test_get_rsa_fail(self):
"""
Testing trying to retrieve the RSA key from the PKey instance
when it is not holding a RSA Key. Should raise a ValueError.
"""
pkey = EVP.PKey()
with self.assertRaises(ValueError):
pkey.get_rsa()
def test_get_modulus(self):
pkey = EVP.PKey()
with self.assertRaises(ValueError):
pkey.get_modulus()
rsa = RSA.gen_key(1024, 3, callback=self._gen_callback)
pkey.assign_rsa(rsa)
mod = pkey.get_modulus()
self.assertGreater(len(mod), 0, mod)
self.assertEqual(len(mod.strip(b'0123456789ABCDEF')), 0)
def test_verify_final(self):
from M2Crypto import X509
pkey = EVP.load_key('tests/signer_key.pem')
pkey.sign_init()
pkey.sign_update(b'test message')
sig = pkey.sign_final()
# OK
x509 = X509.load_cert('tests/signer.pem')
pubkey = x509.get_pubkey()
pubkey.verify_init()
pubkey.verify_update(b'test message')
self.assertEqual(pubkey.verify_final(sig), 1)
# wrong cert
x509 = X509.load_cert('tests/x509.pem')
pubkey = x509.get_pubkey()
pubkey.verify_init()
pubkey.verify_update(b'test message')
self.assertEqual(pubkey.verify_final(sig), 0)
# wrong message
x509 = X509.load_cert('tests/signer.pem')
pubkey = x509.get_pubkey()
pubkey.verify_init()
pubkey.verify_update(b'test message not')
self.assertEqual(pubkey.verify_final(sig), 0)
def test_load_bad(self):
with self.assertRaises(BIO.BIOError):
EVP.load_key('thisdoesnotexist-dfgh56789')
with self.assertRaises(EVP.EVPError):
EVP.load_key('tests/signer.pem') # not a key
with self.assertRaises(EVP.EVPError):
EVP.load_key_bio(BIO.MemoryBuffer(b'no a key'))
def test_pad(self):
self.assertEqual(util.pkcs5_pad('Hello World'),
'Hello World\x05\x05\x05\x05\x05')
self.assertEqual(util.pkcs7_pad('Hello World', 15),
'Hello World\x04\x04\x04\x04')
with self.assertRaises(ValueError):
util.pkcs7_pad('Hello', 256)
def test_pkey_verify_crash(self):
SIGN_PRIVATE = EVP.load_key('tests/rsa.priv.pem')
SIGN_PUBLIC = RSA.load_pub_key('tests/rsa.pub.pem')
def sign(data):
SIGN_PRIVATE.sign_init()
SIGN_PRIVATE.sign_update(data)
signed_data = SIGN_PRIVATE.sign_final()
return base64.b64encode(signed_data)
def verify(response):
signature = base64.b64decode(response['sign'])
data = response['data']
verify_evp = EVP.PKey()
# capture parameter on the following line is required by
# the documentation
verify_evp.assign_rsa(SIGN_PUBLIC, capture=False)
verify_evp.verify_init()
verify_evp.verify_update(data)
# m2.verify_final(self.ctx, sign, self.pkey)
fin_res = verify_evp.verify_final(signature)
return fin_res == 1
data = b"test message"
signature = sign(data)
res = {"data": data, "sign": signature}
self.assertTrue(verify(res)) # works fine
self.assertTrue(verify(res)) # segmentation fault in *verify_final*
class CipherTestCase(unittest.TestCase):
def cipher_filter(self, cipher, inf, outf):
while 1:
buf = inf.read()
if not buf:
break
outf.write(cipher.update(buf))
outf.write(cipher.final())
return outf.getvalue()
def try_algo(self, algo):
enc = 1
dec = 0
otxt = b'against stupidity the gods themselves contend in vain'
k = EVP.Cipher(algo, b'goethe', b'12345678', enc,
1, 'sha1', b'saltsalt', 5)
pbuf = io.BytesIO(otxt)
cbuf = io.BytesIO()
ctxt = self.cipher_filter(k, pbuf, cbuf)
pbuf.close()
cbuf.close()
j = EVP.Cipher(algo, b'goethe', b'12345678', dec,
1, 'sha1', b'saltsalt', 5)
pbuf = io.BytesIO()
cbuf = io.BytesIO(ctxt)
ptxt = self.cipher_filter(j, cbuf, pbuf)
pbuf.close()
cbuf.close()
self.assertEqual(otxt, ptxt, '%s algorithm cipher test failed' % algo)
def test_ciphers(self):
ciphers = [
'des_ede_ecb', 'des_ede_cbc', 'des_ede_cfb', 'des_ede_ofb',
'des_ede3_ecb', 'des_ede3_cbc', 'des_ede3_cfb', 'des_ede3_ofb',
'aes_128_ecb', 'aes_128_cbc', 'aes_128_cfb', 'aes_128_ofb',
'aes_128_ctr', 'aes_192_ecb', 'aes_192_cbc', 'aes_192_cfb',
'aes_192_ofb', 'aes_192_ctr', 'aes_256_ecb', 'aes_256_cbc',
'aes_256_cfb', 'aes_256_ofb', 'aes_256_ctr']
nonfips_ciphers = ['bf_ecb', 'bf_cbc', 'bf_cfb', 'bf_ofb',
# 'idea_ecb', 'idea_cbc', 'idea_cfb', 'idea_ofb',
'cast5_ecb', 'cast5_cbc', 'cast5_cfb', 'cast5_ofb',
# 'rc5_ecb', 'rc5_cbc', 'rc5_cfb', 'rc5_ofb',
'des_ecb', 'des_cbc', 'des_cfb', 'des_ofb',
'rc4', 'rc2_40_cbc']
if not fips_mode: # Disabled algorithms
ciphers += nonfips_ciphers
for i in ciphers:
self.try_algo(i)
# idea might not be compiled in
ciphers = ['idea_ecb', 'idea_cbc', 'idea_cfb', 'idea_ofb']
try:
for i in ciphers:
self.try_algo(i)
except ValueError as e:
if str(e) != "('unknown cipher', 'idea_ecb')":
raise
# rc5 might not be compiled in
ciphers = ['rc5_ecb', 'rc5_cbc', 'rc5_cfb', 'rc5_ofb']
try:
for i in ciphers:
self.try_algo(i)
except ValueError as e:
if str(e) != "('unknown cipher', 'rc5_ecb')":
raise
with self.assertRaises(ValueError):
self.try_algo('nosuchalgo4567')
def test_AES(self): # noqa
enc = 1
dec = 0
tests = [
# test vectors from rfc 3602
# Case #1: Encrypting 16 bytes (1 block) using AES-CBC with
# 128-bit key
{
'KEY': '06a9214036b8a15b512e03d534120006',
'IV': '3dafba429d9eb430b422da802c9fac41',
'PT': b'Single block msg',
'CT': b'e353779c1079aeb82708942dbe77181a',
},
# Case #2: Encrypting 32 bytes (2 blocks) using AES-CBC with
# 128-bit key
{
'KEY': 'c286696d887c9aa0611bbb3e2025a45a',
'IV': '562e17996d093d28ddb3ba695a2e6f58',
'PT': unhexlify(b'000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f'),
'CT': b'd296cd94c2cccf8a3a863028b5e1dc0a7586602d253cfff91b8266bea6d61ab1',
},
# Case #3: Encrypting 48 bytes (3 blocks) using AES-CBC with
# 128-bit key
{
'KEY': '6c3ea0477630ce21a2ce334aa746c2cd',
'IV': 'c782dc4c098c66cbd9cd27d825682c81',
'PT': b'This is a 48-byte message (exactly 3 AES blocks)',
'CT': b'd0a02b3836451753d493665d33f0e8862dea54cdb293abc7506939276772f8d5021c19216bad525c8579695d83ba2684',
},
]
# Test with padding
for test in tests:
# encrypt
k = EVP.Cipher(alg='aes_128_cbc', key=unhexlify(test['KEY']),
iv=unhexlify(test['IV']), op=enc)
pbuf = io.BytesIO(test['PT'])
cbuf = io.BytesIO()
ciphertext = hexlify(self.cipher_filter(k, pbuf, cbuf))
cipherpadding = ciphertext[len(test['PT']) * 2:]
# Remove the padding from the end
ciphertext = ciphertext[:len(test['PT']) * 2]
pbuf.close()
cbuf.close()
self.assertEqual(ciphertext, test['CT'])
# decrypt
j = EVP.Cipher(alg='aes_128_cbc', key=unhexlify(test['KEY']),
iv=unhexlify(test['IV']), op=dec)
pbuf = io.BytesIO()
cbuf = io.BytesIO(unhexlify(test['CT'] + cipherpadding))
plaintext = self.cipher_filter(j, cbuf, pbuf)
pbuf.close()
cbuf.close()
self.assertEqual(plaintext, test['PT'])
# Test without padding
for test in tests:
# encrypt
k = EVP.Cipher(alg='aes_128_cbc', key=unhexlify(test['KEY']),
iv=unhexlify(test['IV']), op=enc, padding=False)
pbuf = io.BytesIO(test['PT'])
cbuf = io.BytesIO()
ciphertext = hexlify(self.cipher_filter(k, pbuf, cbuf))
pbuf.close()
cbuf.close()
self.assertEqual(ciphertext, test['CT'])
# decrypt
j = EVP.Cipher(alg='aes_128_cbc', key=unhexlify(test['KEY']),
iv=unhexlify(test['IV']), op=dec, padding=False)
pbuf = io.BytesIO()
cbuf = io.BytesIO(unhexlify(test['CT']))
plaintext = self.cipher_filter(j, cbuf, pbuf)
pbuf.close()
cbuf.close()
self.assertEqual(plaintext, test['PT'])
def test_AES_ctr(self): # noqa
# In CTR mode, encrypt and decrypt are actually the same
# operation because you encrypt the nonce value, then use the
# output of that to XOR the plaintext. So we set operation=0,
# even though this setting is ignored by OpenSSL.
op = 0
nonce = unhexlify('4a45a048a1e9f7c1bd17f2908222b964') # CTR nonce value, 16 bytes
key = unhexlify('8410ad66fe53a09addc0d041ae00bc6d70e8038ec17019f27e52eecd3846757e')
plaintext_value = b'This is three blocks of text with unicode char \x03'
ciphertext_values = {
'128': unhexlify('6098fb2e49b3f7ed34f841f43f825d84cf4834021511594b931c85f04662544bdb4f38232e9d87fda6280ab1ef450e27'), # noqa
'192': unhexlify('2299b1c5363824cb92b5851dedc73f49f30b23fb23f288492e840c951ce703292a5c6de6fc7f0625c403648f8ca4a582'), # noqa
'256': unhexlify('713e34bcd2c59affc9185a716c3c6aef5c9bf7b9914337dd96e9d7436344bcb9c35175afb54adb78aab322829ce9cb4a'), # noqa
}
for key_size in [128, 192, 256]:
alg = 'aes_%s_ctr' % str(key_size)
# Our key for this test is 256 bits in length (32 bytes).
# We will trim it to the appopriate length for testing AES-128
# and AES-192 as well (so 16 and 24 bytes, respectively).
key_truncated = key[0:(key_size // 8)]
# Test encrypt operations
cipher = EVP.Cipher(alg=alg, key=key_truncated, iv=nonce, op=op)
ciphertext = cipher.update(plaintext_value)
ciphertext = ciphertext + cipher.final()
self.assertEqual(ciphertext, ciphertext_values[str(key_size)])
# Test decrypt operations
cipher = EVP.Cipher(alg=alg, key=key_truncated, iv=nonce, op=op)
plaintext = cipher.update(ciphertext_values[str(key_size)])
plaintext = plaintext + cipher.final()
# XXX not quite sure this is the actual intention
# but for now let's be happy to find the same content even if with
# a different type - XXX
self.assertEqual(plaintext, plaintext_value)
def test_raises(self):
def _cipherFilter(cipher, inf, outf): # noqa
while 1:
buf = inf.read()
if not buf:
break
outf.write(cipher.update(buf))
outf.write(cipher.final())
return outf.getvalue()
def decrypt(ciphertext, key, iv, alg='aes_256_cbc'):
cipher = EVP.Cipher(alg=alg, key=key, iv=iv, op=0)
pbuf = io.BytesIO()
cbuf = io.BytesIO(ciphertext)
plaintext = _cipherFilter(cipher, cbuf, pbuf)
pbuf.close()
cbuf.close()
return plaintext
with self.assertRaises(EVP.EVPError):
decrypt(
unhexlify('941d3647a642fab26d9f99a195098b91252c652d07235b9db35758c401627711724637648e45cad0f1121751a1240a4134998cfdf3c4a95c72de2a2444de3f9e40d881d7f205630b0d8ce142fdaebd8d7fbab2aea3dc47f5f29a0e9b55aae59222671d8e2877e1fb5cd8ef1c427027e0'),
unhexlify('5f2cc54067f779f74d3cf1f78c735aec404c8c3a4aaaa02eb1946f595ea4cddb'),
unhexlify('0001efa4bd154ee415b9413a421cedf04359fff945a30e7c115465b1c780a85b65c0e45c'))
with self.assertRaises(EVP.EVPError):
decrypt(
unhexlify('a78a510416c1a6f1b48077cc9eeb4287dcf8c5d3179ef80136c18876d774570d'),
unhexlify('5cd148eeaf680d4ff933aed83009cad4110162f53ef89fd44fad09611b0524d4'),
unhexlify(''))
class PBKDF2TestCase(unittest.TestCase):
def test_rfc3211_test_vectors(self):
password = b'password'
salt = unhexlify('1234567878563412')
iter = 5
keylen = 8
ret = EVP.pbkdf2(password, salt, iter, keylen)
self.assertEqual(ret, unhexlify(b'd1daa78615f287e6'))
password = b'All n-entities must communicate with other n-entities' + \
b' via n-1 entiteeheehees'
salt = unhexlify('1234567878563412')
iter = 500
keylen = 16
ret = EVP.pbkdf2(password, salt, iter, keylen)
self.assertEqual(ret, unhexlify(b'6a8970bf68c92caea84a8df285108586'))
class HMACTestCase(unittest.TestCase):
data1 = [b'', b'More text test vectors to stuff up EBCDIC machines :-)',
a2b_hex("b760e92d6662d351eb3801057695ac0346295356")]
data2 = [a2b_hex(b'0b' * 16), b"Hi There",
a2b_hex("675b0b3a1b4ddf4e124872da6c2f632bfed957e9")]
data3 = [b'Jefe', b"what do ya want for nothing?",
a2b_hex("effcdf6ae5eb2fa2d27416d5f184df9c259a7c79")]
data4 = [a2b_hex(b'aa' * 16), a2b_hex(b'dd' * 50),
a2b_hex("d730594d167e35d5956fd8003d0db3d3f46dc7bb")]
data = [data1, data2, data3, data4]
def test_simple(self):
algo = 'sha1'
for d in self.data:
h = EVP.HMAC(d[0], algo)
h.update(d[1])
ret = h.final()
self.assertEqual(ret, d[2])
with self.assertRaises(ValueError):
EVP.HMAC(d[0], algo='nosuchalgo')
def make_chain_HMAC(self, key, start, input, algo='sha1'): # noqa
chain = []
hmac = EVP.HMAC(key, algo)
hmac.update(repr(start))
digest = hmac.final()
chain.append((digest, start))
for i in input:
hmac.reset(digest)
hmac.update(repr(i))
digest = hmac.final()
chain.append((digest, i))
return chain
def make_chain_hmac(self, key, start, input, algo='sha1'):
chain = []
digest = EVP.hmac(key, start, algo)
chain.append((digest, start))
for i in input:
digest = EVP.hmac(digest, i, algo)
chain.append((digest, i))
return chain
def verify_chain_hmac(self, key, start, chain, algo='sha1'):
digest = EVP.hmac(key, start, algo)
c = chain[0]
if c[0] != digest or c[1] != start:
return 0
for d, v in chain[1:]:
digest = EVP.hmac(digest, v, algo)
if digest != d:
return 0
return 1
def verify_chain_HMAC(self, key, start, chain, algo='sha1'): # noqa
hmac = EVP.HMAC(key, algo)
hmac.update(start)
digest = hmac.final()
c = chain[0]
if c[0] != digest or c[1] != start:
return 0
for d, v in chain[1:]:
hmac.reset(digest)
hmac.update(v)
digest = hmac.final()
if digest != d:
return 0
return 1
def test_complicated(self):
make_chain = self.make_chain_hmac
verify_chain = self.verify_chain_hmac
key = b'numero uno'
start = b'zeroth item'
input = [b'first item', b'go go go', b'fly fly fly']
chain = make_chain(key, start, input)
self.assertEqual(verify_chain(b'some key', start, chain), 0)
self.assertEqual(verify_chain(key, start, chain), 1)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(EVPTestCase))
suite.addTest(unittest.makeSuite(CipherTestCase))
suite.addTest(unittest.makeSuite(PBKDF2TestCase))
suite.addTest(unittest.makeSuite(HMACTestCase))
return suite
if __name__ == '__main__':
Rand.load_file('randpool.dat', -1)
unittest.TextTestRunner().run(suite())
Rand.save_file('randpool.dat')
| {
"repo_name": "Edzvu/Edzvu.github.io",
"path": "M2Crypto-0.35.2/tests/test_evp.py",
"copies": "1",
"size": "24835",
"license": "mit",
"hash": -8210561619617866000,
"line_mean": 38.1102362205,
"line_max": 254,
"alpha_frac": 0.5654922488,
"autogenerated": false,
"ratio": 3.388130968622101,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44536232174221013,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
# selectors in stdlib as of py3.4
try:
import selectors # pylint: disable=import-error
except ImportError:
# vendored backport module
import kafka.vendor.selectors34 as selectors
import socket
import time
import pytest
from kafka.client_async import KafkaClient, IdleConnectionManager
from kafka.cluster import ClusterMetadata
from kafka.conn import ConnectionStates
import kafka.errors as Errors
from kafka.future import Future
from kafka.protocol.metadata import MetadataRequest
from kafka.protocol.produce import ProduceRequest
from kafka.structs import BrokerMetadata
@pytest.fixture
def cli(mocker, conn):
client = KafkaClient(api_version=(0, 9))
mocker.patch.object(client, '_selector')
client.poll(future=client.cluster.request_update())
return client
def test_bootstrap(mocker, conn):
conn.state = ConnectionStates.CONNECTED
cli = KafkaClient(api_version=(0, 9))
mocker.patch.object(cli, '_selector')
future = cli.cluster.request_update()
cli.poll(future=future)
assert future.succeeded()
args, kwargs = conn.call_args
assert args == ('localhost', 9092, socket.AF_UNSPEC)
kwargs.pop('state_change_callback')
kwargs.pop('node_id')
assert kwargs == cli.config
conn.send.assert_called_once_with(MetadataRequest[0]([]), blocking=False)
assert cli._bootstrap_fails == 0
assert cli.cluster.brokers() == set([BrokerMetadata(0, 'foo', 12, None),
BrokerMetadata(1, 'bar', 34, None)])
def test_can_connect(cli, conn):
# Node is not in broker metadata - can't connect
assert not cli._can_connect(2)
# Node is in broker metadata but not in _conns
assert 0 not in cli._conns
assert cli._can_connect(0)
# Node is connected, can't reconnect
assert cli._maybe_connect(0) is True
assert not cli._can_connect(0)
# Node is disconnected, can connect
cli._conns[0].state = ConnectionStates.DISCONNECTED
assert cli._can_connect(0)
# Node is disconnected, but blacked out
conn.blacked_out.return_value = True
assert not cli._can_connect(0)
def test_maybe_connect(cli, conn):
try:
# Node not in metadata, raises AssertionError
cli._maybe_connect(2)
except AssertionError:
pass
else:
assert False, 'Exception not raised'
# New node_id creates a conn object
assert 0 not in cli._conns
conn.state = ConnectionStates.DISCONNECTED
conn.connect.side_effect = lambda: conn._set_conn_state(ConnectionStates.CONNECTING)
assert cli._maybe_connect(0) is False
assert cli._conns[0] is conn
def test_conn_state_change(mocker, cli, conn):
sel = cli._selector
node_id = 0
cli._conns[node_id] = conn
conn.state = ConnectionStates.CONNECTING
sock = conn._sock
cli._conn_state_change(node_id, sock, conn)
assert node_id in cli._connecting
sel.register.assert_called_with(sock, selectors.EVENT_WRITE, conn)
conn.state = ConnectionStates.CONNECTED
cli._conn_state_change(node_id, sock, conn)
assert node_id not in cli._connecting
sel.modify.assert_called_with(sock, selectors.EVENT_READ, conn)
# Failure to connect should trigger metadata update
assert cli.cluster._need_update is False
conn.state = ConnectionStates.DISCONNECTED
cli._conn_state_change(node_id, sock, conn)
assert node_id not in cli._connecting
assert cli.cluster._need_update is True
sel.unregister.assert_called_with(sock)
conn.state = ConnectionStates.CONNECTING
cli._conn_state_change(node_id, sock, conn)
assert node_id in cli._connecting
conn.state = ConnectionStates.DISCONNECTED
cli._conn_state_change(node_id, sock, conn)
assert node_id not in cli._connecting
def test_ready(mocker, cli, conn):
maybe_connect = mocker.patch.object(cli, 'maybe_connect')
node_id = 1
cli.ready(node_id)
maybe_connect.assert_called_with(node_id)
def test_is_ready(mocker, cli, conn):
cli._maybe_connect(0)
cli._maybe_connect(1)
# metadata refresh blocks ready nodes
assert cli.is_ready(0)
assert cli.is_ready(1)
cli._metadata_refresh_in_progress = True
assert not cli.is_ready(0)
assert not cli.is_ready(1)
# requesting metadata update also blocks ready nodes
cli._metadata_refresh_in_progress = False
assert cli.is_ready(0)
assert cli.is_ready(1)
cli.cluster.request_update()
cli.cluster.config['retry_backoff_ms'] = 0
assert not cli._metadata_refresh_in_progress
assert not cli.is_ready(0)
assert not cli.is_ready(1)
cli.cluster._need_update = False
# if connection can't send more, not ready
assert cli.is_ready(0)
conn.can_send_more.return_value = False
assert not cli.is_ready(0)
conn.can_send_more.return_value = True
# disconnected nodes, not ready
assert cli.is_ready(0)
conn.state = ConnectionStates.DISCONNECTED
assert not cli.is_ready(0)
def test_close(mocker, cli, conn):
mocker.patch.object(cli, '_selector')
call_count = conn.close.call_count
# Unknown node - silent
cli.close(2)
call_count += 0
assert conn.close.call_count == call_count
# Single node close
cli._maybe_connect(0)
assert conn.close.call_count == call_count
cli.close(0)
call_count += 1
assert conn.close.call_count == call_count
# All node close
cli._maybe_connect(1)
cli.close()
# +2 close: node 1, node bootstrap (node 0 already closed)
call_count += 2
assert conn.close.call_count == call_count
def test_is_disconnected(cli, conn):
# False if not connected yet
conn.state = ConnectionStates.DISCONNECTED
assert not cli.is_disconnected(0)
cli._maybe_connect(0)
assert cli.is_disconnected(0)
conn.state = ConnectionStates.CONNECTING
assert not cli.is_disconnected(0)
conn.state = ConnectionStates.CONNECTED
assert not cli.is_disconnected(0)
def test_send(cli, conn):
# Send to unknown node => raises AssertionError
try:
cli.send(2, None)
assert False, 'Exception not raised'
except AssertionError:
pass
# Send to disconnected node => NodeNotReady
conn.state = ConnectionStates.DISCONNECTED
f = cli.send(0, None)
assert f.failed()
assert isinstance(f.exception, Errors.NodeNotReadyError)
conn.state = ConnectionStates.CONNECTED
cli._maybe_connect(0)
# ProduceRequest w/ 0 required_acks -> no response
request = ProduceRequest[0](0, 0, [])
assert request.expect_response() is False
ret = cli.send(0, request)
assert conn.send.called_with(request)
assert isinstance(ret, Future)
request = MetadataRequest[0]([])
cli.send(0, request)
assert conn.send.called_with(request)
def test_poll(mocker):
metadata = mocker.patch.object(KafkaClient, '_maybe_refresh_metadata')
_poll = mocker.patch.object(KafkaClient, '_poll')
ifrs = mocker.patch.object(KafkaClient, 'in_flight_request_count')
ifrs.return_value = 1
cli = KafkaClient(api_version=(0, 9))
# metadata timeout wins
metadata.return_value = 1000
cli.poll()
_poll.assert_called_with(1.0)
# user timeout wins
cli.poll(250)
_poll.assert_called_with(0.25)
# default is request_timeout_ms
metadata.return_value = 1000000
cli.poll()
_poll.assert_called_with(cli.config['request_timeout_ms'] / 1000.0)
# If no in-flight-requests, drop timeout to retry_backoff_ms
ifrs.return_value = 0
cli.poll()
_poll.assert_called_with(cli.config['retry_backoff_ms'] / 1000.0)
def test__poll():
pass
def test_in_flight_request_count():
pass
def test_least_loaded_node():
pass
def test_set_topics(mocker):
request_update = mocker.patch.object(ClusterMetadata, 'request_update')
request_update.side_effect = lambda: Future()
cli = KafkaClient(api_version=(0, 10))
# replace 'empty' with 'non empty'
request_update.reset_mock()
fut = cli.set_topics(['t1', 't2'])
assert not fut.is_done
request_update.assert_called_with()
# replace 'non empty' with 'same'
request_update.reset_mock()
fut = cli.set_topics(['t1', 't2'])
assert fut.is_done
assert fut.value == set(['t1', 't2'])
request_update.assert_not_called()
# replace 'non empty' with 'empty'
request_update.reset_mock()
fut = cli.set_topics([])
assert fut.is_done
assert fut.value == set()
request_update.assert_not_called()
@pytest.fixture
def client(mocker):
_poll = mocker.patch.object(KafkaClient, '_poll')
cli = KafkaClient(request_timeout_ms=9999999,
reconnect_backoff_ms=2222,
connections_max_idle_ms=float('inf'),
api_version=(0, 9))
ttl = mocker.patch.object(cli.cluster, 'ttl')
ttl.return_value = 0
return cli
def test_maybe_refresh_metadata_ttl(mocker, client):
client.cluster.ttl.return_value = 1234
mocker.patch.object(KafkaClient, 'in_flight_request_count', return_value=1)
client.poll(timeout_ms=12345678)
client._poll.assert_called_with(1.234)
def test_maybe_refresh_metadata_backoff(mocker, client):
mocker.patch.object(KafkaClient, 'in_flight_request_count', return_value=1)
now = time.time()
t = mocker.patch('time.time')
t.return_value = now
client.poll(timeout_ms=12345678)
client._poll.assert_called_with(2.222) # reconnect backoff
def test_maybe_refresh_metadata_in_progress(mocker, client):
client._metadata_refresh_in_progress = True
mocker.patch.object(KafkaClient, 'in_flight_request_count', return_value=1)
client.poll(timeout_ms=12345678)
client._poll.assert_called_with(9999.999) # request_timeout_ms
def test_maybe_refresh_metadata_update(mocker, client):
mocker.patch.object(client, 'least_loaded_node', return_value='foobar')
mocker.patch.object(client, '_can_send_request', return_value=True)
mocker.patch.object(KafkaClient, 'in_flight_request_count', return_value=1)
send = mocker.patch.object(client, 'send')
client.poll(timeout_ms=12345678)
client._poll.assert_called_with(9999.999) # request_timeout_ms
assert client._metadata_refresh_in_progress
request = MetadataRequest[0]([])
send.assert_called_once_with('foobar', request, wakeup=False)
def test_maybe_refresh_metadata_cant_send(mocker, client):
mocker.patch.object(client, 'least_loaded_node', return_value='foobar')
mocker.patch.object(client, '_can_connect', return_value=True)
mocker.patch.object(client, '_maybe_connect', return_value=True)
mocker.patch.object(client, 'maybe_connect', return_value=True)
mocker.patch.object(KafkaClient, 'in_flight_request_count', return_value=1)
now = time.time()
t = mocker.patch('time.time')
t.return_value = now
# first poll attempts connection
client.poll(timeout_ms=12345678)
client._poll.assert_called_with(2.222) # reconnect backoff
client.maybe_connect.assert_called_once_with('foobar', wakeup=False)
# poll while connecting should not attempt a new connection
client._connecting.add('foobar')
client._can_connect.reset_mock()
client.poll(timeout_ms=12345678)
client._poll.assert_called_with(2.222) # connection timeout (reconnect timeout)
assert not client._can_connect.called
assert not client._metadata_refresh_in_progress
def test_schedule():
pass
def test_unschedule():
pass
def test_idle_connection_manager(mocker):
t = mocker.patch.object(time, 'time')
t.return_value = 0
idle = IdleConnectionManager(100)
assert idle.next_check_ms() == float('inf')
idle.update('foo')
assert not idle.is_expired('foo')
assert idle.poll_expired_connection() is None
assert idle.next_check_ms() == 100
t.return_value = 90 / 1000
assert not idle.is_expired('foo')
assert idle.poll_expired_connection() is None
assert idle.next_check_ms() == 10
t.return_value = 100 / 1000
assert idle.is_expired('foo')
assert idle.next_check_ms() == 0
conn_id, conn_ts = idle.poll_expired_connection()
assert conn_id == 'foo'
assert conn_ts == 0
idle.remove('foo')
assert idle.next_check_ms() == float('inf')
| {
"repo_name": "scrapinghub/kafka-python",
"path": "test/test_client_async.py",
"copies": "7",
"size": "12342",
"license": "apache-2.0",
"hash": 4248268982813794000,
"line_mean": 29.1760391198,
"line_max": 88,
"alpha_frac": 0.6846540269,
"autogenerated": false,
"ratio": 3.5082433200682206,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.769289734696822,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function)
from astropy.io import fits
from astropy.table import Table
from astropy.time import Time
import astropy.units as u
import os
import numpy as np
from srttools.io import mkdir_p, locations, read_data_fitszilla, \
get_chan_columns, classify_chan_columns
from srttools.utils import scantype, force_move_file, minmax, median_diff
from srttools.fit import detrend_spectroscopic_data
import warnings
from astropy import log
def default_scan_info_table():
return Table(names=['scan_id', 'start', 'stop',
'ra_min', 'ra_max', 'ra_d',
'dec_min', 'dec_max', 'dec_d',
'az_min', 'az_max', 'az_d',
'el_min', 'el_max', 'el_d',
'glon_min', 'glon_max', 'glon_d',
'glat_min', 'glat_max', 'glat_d',
'is_skydip', 'kind', 'direction'],
dtype=[int, float, float,
float, float, float, float, float, float,
float, float, float, float, float, float,
float, float, float, float, float, float,
bool, 'S10', 'S5'])
def get_subscan_info(subscan):
info = default_scan_info_table()
scan_id = subscan.meta['SubScanID']
start, stop = minmax(subscan['time'])
ramin, ramax = minmax(subscan['ra'])
decmin, decmax = minmax(subscan['dec'])
azmin, azmax = minmax(subscan['az'])
elmin, elmax = minmax(subscan['el'])
is_skydip = subscan.meta['is_skydip']
d_ra = median_diff(subscan['ra'])
d_dec = median_diff(subscan['dec'])
d_az = median_diff(subscan['az'])
d_el = median_diff(subscan['el'])
ravar = (ramax - ramin) * np.cos(np.mean((decmin, decmax)))
decvar = decmax - decmin
azvar = (azmax - azmin) * np.cos(np.mean((elmin, elmax)))
elvar = elmax - elmin
tot_eq = np.sqrt(ravar ** 2 + decvar ** 2)
tot_hor = np.sqrt(elvar ** 2 + azvar ** 2)
ravar /= tot_eq
decvar /= tot_hor
directions = np.array(['ra', 'dec', 'az', 'el'])
allvars = np.array([ravar, decvar, azvar, elvar])
if tot_eq > 2 * tot_hor:
kind = 'point'
direction = ''
else:
kind = 'line'
direction = directions[np.argmax(allvars)]
info.add_row([scan_id, start, stop,
ramin, ramax, d_ra, decmin, decmax, d_dec,
azmin, azmax, d_az, elmin, elmax, d_el,
0, 0, 0, 0, 0, 0, is_skydip, kind, direction])
return info
def format_direction(direction):
"""
Examples
--------
>>> format_direction('ra')
'ra'
>>> format_direction('el')
'alat'
>>> format_direction('az')
'alon'
"""
lowerdir = direction.lower()
if lowerdir == 'el':
return 'alat'
elif lowerdir == 'az':
return 'alon'
return direction
def get_observing_strategy_from_subscan_info(info):
"""Get observing strategy from subscan information."""
kinds = info['kind']
skydips = info['is_skydip']
lines = info[kinds == 'line']
points = info[kinds == 'point']
ctype = 'RA/DEC'
durations = (info['stop'] - info['start']) * 86400
xspc, yspc = (0, 0)
zigzag = False
stype = 'MAP'
direction = 'Unkn'
length = 0
if np.all(skydips):
stype = 'SKYDIP'
mode = 'OTF'
geom = 'LINE'
direction = 'ALAT'
elif len(lines) > len(points):
mode = 'OTF'
ra_lines = lines[lines['direction'] == 'ra']
dec_lines = lines[lines['direction'] == 'dec']
az_lines = lines[lines['direction'] == 'az']
el_lines = lines[lines['direction'] == 'el']
directions = np.array(['ra', 'dec', 'az', 'el'])
nsub = np.array([len(lines[lines['direction'] == d])
for d in directions])
direction = directions[np.argmax(nsub)]
if direction in ['ra', 'dec']:
lon_lines, dlon, lat_lines, dlat = ra_lines, 'ra', dec_lines, 'dec'
elif direction in ['az', 'el']:
lon_lines, dlon, lat_lines, dlat = az_lines, 'az', el_lines, 'el'
else:
raise ValueError('Unknown scan direction')
ctype = format_direction(dlon) + '/' + format_direction(dlat)
sample_dist_lon = lon_lines[dlon + '_d']
sample_dist_lat = lon_lines[dlat + '_d']
if len(lon_lines) == len(lat_lines):
geom = 'CROSS'
zigzag = True
length = \
np.median(lon_lines[dlon + '_max'] - lon_lines[dlon + '_min'])
elif len(lon_lines) > len(lat_lines):
geom = 'LINE'
# if we see an inversion of direction, set zigzag to True
zigzag = np.any(sample_dist_lon[:-1] * sample_dist_lon[1:] < 0)
length = \
np.median(lon_lines[dlon + '_max'] - lon_lines[dlon + '_min'])
direction = format_direction(dlon)
xspc = 0
yspc = median_diff(info[dlat + '_min'], sorting=True)
else:
geom = 'LINE'
zigzag = np.any(sample_dist_lat[:-1] * sample_dist_lat[1:] < 0)
length = \
np.median(lat_lines[dlat + '_max'] - lat_lines[dlat + '_min'])
direction = format_direction(dlat)
yspc = 0
xspc = median_diff(info[dlon + '_min'], sorting=True)
else:
mode = 'RASTER'
geom = 'SINGLE'
results = type('results', (), {})()
results.mode = mode
results.geom = geom
results.sep = (xspc, yspc)
results.zigzag = zigzag
results.length = length
results.type = stype
results.ctype = ctype
results.stype = stype
results.scanvel = length / np.median(durations)
results.direction = direction
results.nobs = len(info['scan_id'])
results.scantime = np.median(durations)
return results
def _copy_hdu_and_adapt_length(hdu, length):
data = hdu.data
columns = []
for col in data.columns:
newvals = [data[col.name][0]] * length
newcol = fits.Column(name=col.name, array=newvals,
format=col.format)
columns.append(newcol)
newhdu = fits.BinTableHDU.from_columns(columns)
newhdu.header = hdu.header
return newhdu
keywords_to_reset = [
'11CD2F', '11CD2I', '11CD2J', '11CD2R', '11CD2S',
'1CRPX2F', '1CRPX2I', '1CRPX2J', '1CRPX2R', '1CRPX2S', '1CRVL2F',
'1CRVL2I', '1CRVL2J', '1CRVL2R', '1CRVL2S', '1CTYP2F', '1CTYP2I',
'1CTYP2J', '1CTYP2R', '1CTYP2S', '1CUNI2F', '1CUNI2I', '1CUNI2J',
'1CUNI2R', '1CUNI2S', '1SOBS2F', '1SOBS2I', '1SOBS2J', '1SOBS2R',
'1SOBS2S', '1SPEC2F', '1SPEC2I', '1SPEC2J', '1SPEC2R', '1SPEC2S',
'1VSOU2R', 'AN', 'ANRX', 'AW', 'AWRX', 'BANDWID', 'BLATOBJ', 'BLONGOBJ',
'CA', 'CARX', 'DEWCABIN', 'DEWRTMOD', 'DEWUSER', 'DEWZERO', 'DISTANCE',
'ECCENTR', 'FDELTACA', 'FDELTAIA', 'FDELTAIE', 'FDELTAX', 'FDELTAXT',
'FDELTAY', 'FDELTAYT', 'FDELTAZ', 'FDELTAZT', 'FDTYPCOD', 'FEBEBAND',
'FEBEFEED', 'FEGAIN', 'FREQRES', 'FRTHRWHI', 'FRTHRWLO', 'GRPID1',
'GRPLC1', 'HACA', 'HACA2', 'HACA2RX', 'HACA3', 'HACA3RX', 'HACARX',
'HASA', 'HASA2', 'HASA2RX', 'HASARX', 'HECA2', 'HECA2RX', 'HECA3',
'HECA3RX', 'HECE', 'HECE2', 'HECE2RX', 'HECE6', 'HECE6RX', 'HECERX',
'HESA', 'HESA2', 'HESA2RX', 'HESA3', 'HESA3RX', 'HESA4', 'HESA4RX',
'HESA5', 'HESA5RX', 'HESARX', 'HESE', 'HESERX', 'HSCA', 'HSCA2',
'HSCA2RX', 'HSCA5', 'HSCA5RX', 'HSCARX', 'HSSA3', 'HSSA3RX', 'IA', 'IARX',
'IE', 'IERX', 'INCLINAT', 'LATOBJ', 'LONGASC', 'LONGOBJ', 'LONGSTRN',
'NFEBE', 'NOPTREFL', 'NPAE', 'NPAERX', 'NPHASES', 'NRX', 'NRXRX', 'NRY',
'NRYRX', 'NUSEBAND', 'OMEGA', 'OPTPATH', 'ORBEPOCH', 'ORBEQNOX', 'PATLAT',
'PATLONG', 'PDELTACA', 'PDELTAIA', 'PDELTAIE', 'PERIDATE', 'PERIDIST',
'REFOFFX', 'REFOFFY', 'REF_ONLN', 'REF_POL', 'RESTFREQ', 'SBSEP',
'SCANLEN', 'SCANLINE', 'SCANNUM', 'SCANPAR1', 'SCANPAR2', 'SCANROT',
'SCANRPTS', 'SCANSKEW', 'SCANTIME', 'SCANXSPC', 'SCANXVEL', 'SCANYSPC',
'SIDEBAND', 'SIG_ONLN', 'SIG_POL', 'SKYFREQ', 'SWTCHMOD', 'TBLANK',
'TRANSITI', 'TSYNC', 'WCSNM2F', 'WCSNM2I', 'WCSNM2J', 'WCSNM2R',
'WCSNM2S', 'WOBTHROW', 'WOBUSED']
def pack_data(scan, polar_dict, detrend=False):
"""Pack data into MBFITS-ready format
Examples
--------
>>> scan = {'Feed0_LCP': np.arange(4), 'Feed0_RCP': np.arange(4, 8)}
>>> polar = {'LCP': 'Feed0_LCP', 'RCP': 'Feed0_RCP'}
>>> res = pack_data(scan, polar)
>>> np.allclose(res, [[0, 4], [1, 5], [2, 6], [3, 7]])
True
>>> scan = {'Feed0_LCP': np.arange(2), 'Feed0_RCP': np.arange(2, 4),
... 'Feed0_Q': np.arange(4, 6), 'Feed0_U': np.arange(6, 8)}
>>> polar = {'LCP': 'Feed0_LCP', 'RCP': 'Feed0_RCP', 'Q': 'Feed0_Q',
... 'U': 'Feed0_U'}
>>> res = pack_data(scan, polar)
>>> np.allclose(res, [[0, 2, 4, 6], [1, 3, 5, 7]])
True
>>> scan = {'Feed0_LCP': np.ones((2, 4)), 'Feed0_RCP': np.zeros((2, 4))}
>>> polar = {'LCP': 'Feed0_LCP', 'RCP': 'Feed0_RCP'}
>>> res = pack_data(scan, polar)
>>> np.allclose(res, [[[ 1., 1., 1., 1.], [ 0., 0., 0., 0.]],
... [[ 1., 1., 1., 1.], [ 0., 0., 0., 0.]]])
True
"""
polar_list = list(polar_dict.keys())
if 'LCP' in polar_list:
data = [scan[polar_dict['LCP']], scan[polar_dict['RCP']]]
try:
data.append(scan[polar_dict['Q']])
data.append(scan[polar_dict['U']])
except KeyError:
pass
else: # pragma: no cover
raise ValueError('Polarization kind not implemented yet')
if detrend:
new_data = []
for d in data:
detr, _ = detrend_spectroscopic_data(0, d, 'als')
new_data.append(detr)
data = new_data
return np.stack(data, axis=1)
def reset_all_keywords(header):
"""Set a specific list of keywords to zero or empty string.
Examples
--------
>>> from astropy.io.fits import Header
>>> h = Header({'SCANNUM': 5, 'OPTPATH': 'dafafa', 'a': 'blabla'})
>>> h2 = reset_all_keywords(h)
>>> h2['SCANNUM']
0
>>> h2['OPTPATH']
''
>>> # This is not in the list of keywords to eliminate
>>> h2['a']
'blabla'
"""
import six
for key in keywords_to_reset:
if key in header:
if isinstance(header[key], six.string_types):
header[key] = ''
else:
header[key] = type(header[key])(0)
return header
class MBFITS_creator():
def __init__(self, dirname, test=False):
self.dirname = dirname
self.test = test
mkdir_p(dirname)
curdir = os.path.dirname(__file__)
datadir = os.path.join(curdir, '..', 'data')
self.template_dir = os.path.join(datadir, 'mbfits_template')
self.FEBE = {}
self.GROUPING = 'GROUPING.fits'
with fits.open(os.path.join(self.template_dir,
'GROUPING.fits'),
memmap=False) as grouping_template:
grouping_template[1].data = grouping_template[1].data[:1]
grouping_template.writeto(
os.path.join(self.dirname, self.GROUPING), overwrite=True)
self.SCAN = 'SCAN.fits'
with fits.open(os.path.join(self.template_dir,
'SCAN.fits'),
memmap=False) as scan_template:
scan_template[1].data['FEBE'][0] = 'EMPTY'
scan_template.writeto(os.path.join(self.dirname, self.SCAN),
overwrite=True)
self.date_obs = Time.now()
self.scan_info = default_scan_info_table()
self.nfeeds = None
self.ra = 0
self.dec = 0
self.site = None
self.lst = 1e32
def fill_in_summary(self, summaryfile):
log.info('Loading {}'.format(summaryfile))
with fits.open(summaryfile, memmap=False) as hdul:
header = hdul[0].header
hdudict = dict(header.items())
self.ra = np.degrees(hdudict['RightAscension'])
self.dec = np.degrees(hdudict['Declination'])
self.restfreq = None
if 'RESTFREQ1' in hdudict:
self.resfreq = hdudict['RESTFREQ1']
try:
self.date_obs = Time(hdudict['DATE-OBS'])
except KeyError:
self.date_obs = Time(hdudict['DATE'])
try:
self.obsid = int(hdudict['OBSID'])
except (KeyError, ValueError):
self.obsid = 9999
with fits.open(os.path.join(self.dirname, self.GROUPING),
memmap=False) as grouphdul:
groupheader = grouphdul[0].header
groupdict = dict(groupheader.items())
for key in hdudict.keys():
if key in groupdict:
groupheader[key] = hdudict[key]
groupheader['RA'] = self.ra
groupheader['DEC'] = self.dec
groupheader['DATE-OBS'] = self.date_obs.value
groupheader['MJD-OBS'] = self.date_obs.mjd
groupheader['SCANNUM'] = self.obsid
grouphdul.writeto('tmp.fits', overwrite=True)
force_move_file('tmp.fits', os.path.join(self.dirname, self.GROUPING))
with fits.open(os.path.join(self.dirname, self.SCAN),
memmap=False) as scanhdul:
scanheader = reset_all_keywords(scanhdul[1].header)
scandict = dict(scanheader.items())
for key in hdudict.keys():
if key[:5] in ['NAXIS', 'PGCOU', 'GCOUN']:
continue
if key in scandict:
scanheader[key] = hdudict[key]
# Todo: update with correct keywords
scanheader['DATE-OBS'] = self.date_obs.value
scanheader['MJD'] = self.date_obs.mjd
scanheader['SCANNUM'] = self.obsid
scanhdul.writeto('tmp.fits', overwrite=True)
force_move_file('tmp.fits', os.path.join(self.dirname, self.SCAN))
def add_subscan(self, scanfile, detrend=False):
log.info('Loading {}'.format(scanfile))
subscan = read_data_fitszilla(scanfile)
subscan_info = get_subscan_info(subscan)
self.scan_info.add_row(subscan_info[0])
time = Time(subscan['time'] * u.day, scale='utc', format='mjd')
if self.date_obs.mjd > time[0].mjd:
self.date_obs = time[0]
if self.site is None:
self.site = subscan.meta['site']
chans = get_chan_columns(subscan)
combinations = classify_chan_columns(chans)
if self.nfeeds is None:
self.nfeeds = len(combinations.keys())
for feed in combinations:
felabel = subscan.meta['receiver'] + '{}'.format(feed)
febe = felabel + '-' + subscan.meta['backend']
datapar = os.path.join(self.template_dir, '1',
'FLASH460L-XFFTS-DATAPAR.fits')
with fits.open(datapar, memmap=False) as subs_par_template:
n = len(subscan)
# ------------- Update DATAPAR --------------
subs_par_template[1] = \
_copy_hdu_and_adapt_length(subs_par_template[1], n)
newtable = Table(subs_par_template[1].data)
newtable['MJD'] = subscan['time']
newtable['LST'][:] = \
time.sidereal_time('apparent',
locations[subscan.meta['site']].lon
).value
if newtable['LST'][0] < self.lst:
self.lst = newtable['LST'][0]
newtable['INTEGTIM'][:] = \
subscan['Feed0_LCP'].meta['sample_rate']
newtable['RA'] = subscan['ra'].to(u.deg)
newtable['DEC'] = subscan['dec'].to(u.deg)
newtable['AZIMUTH'] = subscan['az'].to(u.deg)
newtable['ELEVATIO'] = subscan['el'].to(u.deg)
_, direction = scantype(subscan['ra'], subscan['dec'],
el=subscan['el'], az=subscan['az'])
direction_cut = \
direction.replace('<', '').replace('>', '').lower()
if direction_cut in ['ra', 'dec']:
baslon = subscan['ra'].to(u.deg)
baslat = subscan['dec'].to(u.deg)
yoff = baslat.value - self.dec
# GLS projection
xoff = \
(baslon.value - self.ra)
newtable['LONGOFF'] = xoff * np.cos(np.radians(self.dec))
newtable['LATOFF'] = yoff
elif direction_cut in ['el', 'az']:
warnings.warn('AltAz projection not implemented properly')
baslon, baslat = \
subscan['az'].to(u.deg), subscan['el'].to(u.deg)
newtable['LONGOFF'] = 0 * u.deg
newtable['LATOFF'] = 0 * u.deg
else:
raise ValueError('Unknown coordinates')
newtable['CBASLONG'] = baslon
newtable['CBASLAT'] = baslat
newtable['BASLONG'] = baslon
newtable['BASLAT'] = baslat
newhdu = fits.table_to_hdu(newtable)
subs_par_template[1].data = newhdu.data
subs_par_template[1].header['DATE-OBS'] = \
time[0].fits.replace('(UTC)', '')
subs_par_template[1].header['LST'] = newtable['LST'][0]
subs_par_template[1].header['FEBE'] = febe
subs_par_template[1].header['SCANDIR'] = \
format_direction(direction_cut).upper()
subs_par_template[1].header['SCANNUM'] = self.obsid
outdir = str(subscan.meta['SubScanID'])
mkdir_p(os.path.join(self.dirname, outdir))
new_datapar = os.path.join(outdir,
febe + '-DATAPAR.fits')
subs_par_template.writeto('tmp.fits', overwrite=True)
force_move_file('tmp.fits',
os.path.join(self.dirname, new_datapar))
arraydata = os.path.join(self.template_dir, '1',
'FLASH460L-XFFTS-ARRAYDATA-1.fits')
new_arraydata_rows = []
bands = list(combinations[feed].keys())
for baseband in combinations[feed]:
nbands = np.max(bands)
ch = list(combinations[feed][baseband].values())[0]
packed_data = pack_data(subscan, combinations[feed][baseband],
detrend=detrend)
# ------------- Update ARRAYDATA -------------
with fits.open(arraydata, memmap=False) as subs_template:
subs_template[1] = \
_copy_hdu_and_adapt_length(subs_template[1], n)
new_header = \
reset_all_keywords(subs_template[1].header)
new_header['SCANNUM'] = self.obsid
new_header['SUBSNUM'] = subscan.meta['SubScanID']
new_header['DATE-OBS'] = self.date_obs.fits
new_header['FEBE'] = febe
new_header['BASEBAND'] = baseband
new_header['NUSEBAND'] = nbands
new_header['CHANNELS'] = subscan.meta['channels']
new_header['SKYFREQ'] = \
subscan[ch].meta['frequency'].to('Hz').value
if self.restfreq is not None:
new_header['RESTFREQ'] = self.restfreq
else:
new_header['RESTFREQ'] = new_header['SKYFREQ']
bandwidth = subscan[ch].meta['bandwidth'].to('Hz').value
new_header['BANDWID'] = bandwidth
new_header['FREQRES'] = bandwidth / new_header['CHANNELS']
# Todo: check sideband
new_header['SIDEBAND'] = 'USB'
# Todo: check all these strange keywords. These are
# probably NOT the rest frequencies!
new_header['1CRVL2F'] = new_header['RESTFREQ']
new_header['1CRVL2S'] = new_header['RESTFREQ']
for i in ['1CRPX2S', '1CRPX2R', '1CRPX2F', '1CRPX2J']:
new_header[i] = (new_header['CHANNELS'] + 1) // 2
subs_template[1].header = new_header
newtable = Table(subs_template[1].data)
newtable['MJD'] = subscan['time']
newtable['DATA'] = packed_data
newhdu = fits.table_to_hdu(newtable)
subs_template[1].data = newhdu.data
subname = febe + '-ARRAYDATA-{}.fits'.format(baseband)
new_sub = \
os.path.join(outdir, subname)
subs_template.writeto('tmp.fits', overwrite=True)
new_arraydata_rows.append([2, new_sub, 'URL',
'ARRAYDATA-MBFITS',
subscan.meta['SubScanID'], febe,
baseband])
force_move_file('tmp.fits',
os.path.join(self.dirname, new_sub))
# Finally, update GROUPING file
with fits.open(os.path.join(self.dirname,
self.GROUPING),
memmap=False) as grouping:
newtable = Table(grouping[1].data)
if febe not in self.FEBE:
nfebe = len(list(self.FEBE.keys()))
new_febe = self.add_febe(febe, combinations, feed,
subscan[ch].meta,
bands=bands)
grouping[0].header['FEBE{}'.format(nfebe)] = febe
grouping[0].header['FREQ{}'.format(nfebe)] = \
subscan[ch].meta['frequency'].to('Hz').value
grouping[0].header['BWID{}'.format(nfebe)] = \
subscan[ch].meta['bandwidth'].to('Hz').value
grouping[0].header['LINE{}'.format(nfebe)] = ''
newtable.add_row([2, new_febe, 'URL', 'FEBEPAR-MBFITS',
-999, febe, -999])
self.FEBE[febe] = new_febe
newtable.add_row([2, new_datapar, 'URL', 'DATAPAR-MBFITS',
-999, febe, -999])
for row in new_arraydata_rows:
newtable.add_row(row)
new_hdu = fits.table_to_hdu(newtable)
grouping[1].data = new_hdu.data
grouping[0].header['INSTRUME'] = subscan[ch].meta['backend']
grouping[0].header['TELESCOP'] = self.site
grouping.writeto('tmp.fits', overwrite=True)
force_move_file('tmp.fits',
os.path.join(self.dirname, self.GROUPING))
if self.test:
break
def add_febe(self, febe, feed_info, feed, meta, bands=None):
if bands is None:
bands = [1]
polar = 'N'
polar_code = polar[0]
febe_name = febe + '-FEBEPAR.fits'
with fits.open(
os.path.join(self.template_dir,
'FLASH460L-XFFTS-FEBEPAR.fits'),
memmap=False) as febe_template:
febe_template[1].header = \
reset_all_keywords(febe_template[1].header)
febedata = Table(febe_template[1].data)
# FEBEFEED stores the total number of feeds for the receiver in
# use. A receiver outputting two polarisations counts as two
# feeds. For an array, count the total no. of pixels, even if
# not all in use.
febedata['USEBAND'] = np.array([bands])
febedata['NUSEFEED'] = np.array([[2]])
febedata['USEFEED'] = \
np.array([[feed * 2 + 1, feed * 2 + 2,
feed * 2 + 1, feed * 2 + 2]])
febedata['BESECTS'] = np.array([[0]])
febedata['FEEDTYPE'] = np.array([[1, 2, 3, 4]])
febedata['POLTY'][:] = np.array([polar_code])
febedata['POLA'][:] = np.array([[0., 0.]])
new_hdu = fits.table_to_hdu(febedata)
febe_template[1].data = new_hdu.data
# TODO: fill in the information given in the subscan[ch]
new_febe = os.path.join(self.dirname, febe_name)
febe_template[1].header['DATE-OBS'] = self.date_obs.fits
febe_template[1].header['FEBE'] = febe
febe_template[1].header['FEBEFEED'] = self.nfeeds * 2
febe_template[1].header['NUSEBAND'] = max(bands)
febe_template[1].header['NPHASES'] = 1
febe_template[1].header['SWTCHMOD'] = 'NONE'
febe_template[1].header['SCANNUM'] = self.obsid
if 'Q' in feed_info[feed][bands[0]].keys():
febe_template[1].header['FDTYPCOD'] = '1:L, 2:R, 3:Q, 4:U'
else:
febe_template[1].header['FDTYPCOD'] = '1:L, 2:R'
febe_template.writeto('tmp.fits', overwrite=True)
force_move_file('tmp.fits', new_febe)
with fits.open(os.path.join(self.dirname, self.SCAN),
memmap=False) as scan:
newtable = Table(scan[1].data)
if newtable['FEBE'][0].strip() == 'EMPTY':
newtable['FEBE'][0] = febe
else:
newtable.add_row([febe])
new_hdu = fits.table_to_hdu(newtable)
scan[1].data = new_hdu.data
scanheader = scan[1].header
scanheader['SITELONG'] = np.degrees(meta['SiteLongitude'])
scanheader['SITELAT'] = np.degrees(meta['SiteLatitude'])
scanheader['SITEELEV'] = meta['SiteHeight']
diameter = 64. if meta['site'].lower().strip() == 'srt' else 32.
scanheader['DIAMETER'] = diameter
scanheader['PROJID'] = meta['Project_Name']
scan.writeto('tmp.fits', overwrite=True)
force_move_file('tmp.fits', os.path.join(self.dirname, self.SCAN))
return febe_name
def update_scan_info(self):
info = \
get_observing_strategy_from_subscan_info(self.scan_info)
with fits.open(os.path.join(self.dirname, self.SCAN),
memmap=False) as scanhdul:
scanheader = scanhdul[1].header
# Todo: update with correct keywords
scanheader['CTYPE'] = info.ctype
scanheader['CTYPE1'] = 'RA---GLS'
scanheader['CTYPE2'] = 'DEC--GLS'
scanheader['CRVAL1'] = self.ra
scanheader['CRVAL2'] = self.dec
scanheader['BLONGOBJ'] = self.ra
scanheader['BLATOBJ'] = self.dec
scanheader['LONGOBJ'] = self.ra if not info.ctype[0] == 'A' else 0
scanheader['LATOBJ'] = self.dec if not info.ctype[0] == 'A' else 0
scanheader['EQUINOX'] = 2000.
scanheader['GRPLC1'] = 'GROUPING.fits'
scanheader['LST'] = self.lst
scanheader['LATPOLE'] = 90.
scanheader['LONPOLE'] = 0.
scanheader['PATLONG'] = 0
scanheader['MOVEFRAM'] = False
if info.ctype == 'ALON/ALAT':
scanheader['WCSNAME'] = 'Absolute horizontal'
scanheader['SCANTYPE'] = info.stype.upper()
scanheader['SCANDIR'] = info.direction.upper()
scanheader['SCANXVEL'] = info.scanvel
scanheader['SCANTIME'] = info.scantime
scanheader['SCANMODE'] = info.mode.upper()
scanheader['SCANGEOM'] = info.geom.upper()
scanheader['SCANLINE'] = 1
scanheader['SCANLEN'] = np.degrees(info.length)
scanheader['SCANYSPC'] = np.degrees(info.sep[1])
scanheader['SCANXSPC'] = np.degrees(info.sep[0])
scanheader['SCANPAR1'] = -999
scanheader['SCANPAR2'] = -999
scanheader['ZIGZAG'] = info.zigzag
scanheader['PHASE1'] = 'sig'
scanheader['PHASE2'] = 'sig'
scanheader['NOBS'] = info.nobs
scanheader['NSUBS'] = info.nobs
scanheader['WOBCYCLE'] = 0.
scanheader['WOBDIR'] = 'NONE'
scanheader['WOBMODE'] = 'NONE'
scanheader['WOBPATT'] = 'NONE'
scanhdul.writeto('tmp.fits', overwrite=True)
force_move_file('tmp.fits', os.path.join(self.dirname, self.SCAN))
def wrap_up_file(self):
import copy
prihdu = fits.PrimaryHDU()
with fits.open(os.path.join(self.dirname, self.GROUPING),
memmap=False) as grouhdl:
prihdu.header = copy.deepcopy(grouhdl[0].header)
file_list = list(zip(grouhdl[1].data['MEMBER_LOCATION'],
grouhdl[1].data['EXTNAME'],
grouhdl[1].data['FEBE']))
hdulists = {}
for febe in self.FEBE.keys():
hdulists[febe] = fits.HDUList([prihdu])
with fits.open(os.path.join(self.dirname, self.SCAN),
memmap=False) as scanhdul:
scanhdul[1].data['FEBE'] = [febe]
newhdu = type(scanhdul[1])()
newhdu.data = scanhdul[1].data
newhdu.header = scanhdul[1].header
hdulists[febe].append(newhdu)
for fname, ext, febe in file_list:
if febe == '':
continue
with fits.open(os.path.join(self.dirname, fname),
memmap=False) as hl:
newhdu = type(hl[ext])()
newhdu.data = hl[ext].data
newhdu.header = hl[ext].header
hdulists[febe].append(newhdu)
fnames = {}
for febe, hdulist in hdulists.items():
fname = self.dirname + '.' + febe + '.fits'
hdulist.writeto(fname, overwrite=True)
hdulist.close()
fnames[febe] = fname
return fnames
| {
"repo_name": "matteobachetti/srt-single-dish-tools",
"path": "srttools/converters/mbfits.py",
"copies": "1",
"size": "30651",
"license": "bsd-3-clause",
"hash": 6018597750660430000,
"line_mean": 39.5436507937,
"line_max": 79,
"alpha_frac": 0.5084662817,
"autogenerated": false,
"ratio": 3.42545820295038,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.443392448465038,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function)
from astropy.io import fits
from astropy.time import Time
from astropy.io.fits.column import _parse_tdim
import astropy.units as u
import astropy.constants as c
import os
import numpy as np
from srttools.io import mkdir_p, locations, read_data_fitszilla, \
get_chan_columns, classify_chan_columns, interpret_chan_name
import glob
model_primary_header = """
SIMPLE = T
BITPIX = 8
NAXIS = 0
EXTEND = T
BLOCKED = T
ORIGIN = 'SRT'
CREATOR = ' '
END
"""
model_header = """
XTENSION= 'BINTABLE' / binary table extension
BITPIX = 8 / 8-bit bytes
NAXIS = 2 / 2-dimensional binary table
NAXIS1 = 280 / width of table in bytes
NAXIS2 = 32 / number of rows in table
PCOUNT = 2621440 / size of special data area
GCOUNT = 1 / one data group (required keyword)
TFIELDS = 39 / number of fields in each row
EXTNAME = 'SINGLE DISH' / name of this binary table extension
NMATRIX = 1 / Number of DATA arrays
OBSERVER= '' / Observer name(s)
PROJID = '' / Project name
TELESCOP= 'SRT ' / Telescope name
OBSGEO-X= 4865182.7660 / [m] Antenna ITRF X-coordinate
OBSGEO-Y= 791922.6890 / [m] Antenna ITRF Y-coordinate
OBSGEO-Z= 4035137.1740 / [m] Antenna ITRF Z-coordinate
TTYPE1 = 'SCAN ' / label for field
TFORM1 = '1I ' / format of field
TTYPE2 = 'CYCLE ' / label for field
TFORM2 = '1J ' / format of field
TTYPE3 = 'DATE-OBS' / label for field
TFORM3 = '10A ' / format of field
TTYPE4 = 'TIME ' / label for field
TFORM4 = '1D ' / format of field
TUNIT4 = 's ' / units of field
TTYPE5 = 'EXPOSURE' / label for field
TFORM5 = '1E ' / format of field
TUNIT5 = 's ' / units of field
TTYPE6 = 'OBJECT ' / label for field
TFORM6 = '16A ' / format of field
TTYPE7 = 'OBJ-RA ' / label for field
TFORM7 = '1D ' / format of field
TUNIT7 = 'deg ' / units of field
TTYPE8 = 'OBJ-DEC ' / label for field
TFORM8 = '1D ' / format of field
TUNIT8 = 'deg ' / units of field
TTYPE9 = 'RESTFRQ ' / label for field
TFORM9 = '1D ' / format of field
TUNIT9 = 'Hz ' / units of field
TTYPE10 = 'OBSMODE ' / label for field
TFORM10 = '16A ' / format of field
TTYPE11 = 'BEAM ' / label for field
TFORM11 = '1I ' / format of field
TTYPE12 = 'IF ' / label for field
TFORM12 = '1I ' / format of field
TTYPE13 = 'FREQRES ' / label for field
TFORM13 = '1D ' / format of field
TUNIT13 = 'Hz ' / units of field
TTYPE14 = 'BANDWID ' / label for field
TFORM14 = '1D ' / format of field
TUNIT14 = 'Hz ' / units of field
CTYPE1 = 'FREQ ' / DATA array axis 1: frequency in Hz.
TTYPE15 = 'CRPIX1 ' / label for field
TFORM15 = '1E ' / format of field
TTYPE16 = 'CRVAL1 ' / label for field
TFORM16 = '1D ' / format of field
TUNIT16 = 'Hz ' / units of field
TTYPE17 = 'CDELT1 ' / label for field
TFORM17 = '1D ' / format of field
TUNIT17 = 'Hz ' / units of field
CTYPE2 = 'STOKES ' / DATA array axis 2: polarization code
CRPIX2 = 1.0E+00 / Polarization code reference pixel
CRVAL2 = -5.0E+00 / Polarization code at reference pixel (XX)
CDELT2 = -1.0E+00 / Polarization code axis increment
CTYPE3 = 'RA ' / DATA array axis 3 (degenerate): RA (mid-int)
CRPIX3 = 1.0E+00 / RA reference pixel
TTYPE18 = 'CRVAL3 ' / label for field
TFORM18 = '1D ' / format of field
TUNIT18 = 'deg ' / units of field
CDELT3 = -1.0E+00 / RA axis increment
CTYPE4 = 'DEC ' / DATA array axis 4 (degenerate): Dec (mid-int)
CRPIX4 = 1.0E+00 / Dec reference pixel
TTYPE19 = 'CRVAL4 ' / label for field
TFORM19 = '1D ' / format of field
TUNIT19 = 'deg ' / units of field
CDELT4 = 1.0E+00 / Dec axis increment
TTYPE20 = 'SCANRATE' / label for field
TFORM20 = '2E ' / format of field
TUNIT20 = 'deg/s ' / units of field
SPECSYS = 'LSRK ' / Doppler reference frame (transformed)
SSYSOBS = 'TOPOCENT' / Doppler reference frame of observation
EQUINOX = 2.0E+03 / Equinox of equatorial coordinates
RADESYS = 'FK5 ' / Equatorial coordinate frame
TTYPE21 = 'TSYS ' / label for field
TFORM21 = '2E ' / format of field
TUNIT21 = 'K ' / units of field
TTYPE22 = 'CALFCTR ' / label for field
TFORM22 = '2E ' / format of field
TTYPE23 = 'DATA ' / label for field
TFORM23 = '1PE(16384)' / format of field
TTYPE24 = 'TDIM23 ' / label for field
TFORM24 = '16A ' / format of field
TUNIT24 = 'K ' / units of field
TTYPE25 = 'FLAGGED ' / label for field
TFORM25 = '1PB(16384)' / format of field
TTYPE26 = 'TDIM25 ' / label for field
TFORM26 = '16A ' / format of field
TTYPE27 = 'TCAL ' / label for field
TFORM27 = '2E ' / format of field
TUNIT27 = 'Jy ' / units of field
TTYPE28 = 'TCALTIME' / label for field
TFORM28 = '16A ' / format of field
TTYPE29 = 'AZIMUTH ' / label for field
TFORM29 = '1E ' / format of field
TUNIT29 = 'deg ' / units of field
TTYPE30 = 'ELEVATIO' / label for field
TFORM30 = '1E ' / format of field
TUNIT30 = 'deg ' / units of field
TTYPE31 = 'PARANGLE' / label for field
TFORM31 = '1E ' / format of field
TUNIT31 = 'deg ' / units of field
TTYPE32 = 'FOCUSAXI' / label for field
TFORM32 = '1E ' / format of field
TUNIT32 = 'm ' / units of field
TTYPE33 = 'FOCUSTAN' / label for field
TFORM33 = '1E ' / format of field
TUNIT33 = 'm ' / units of field
TTYPE34 = 'FOCUSROT' / label for field
TFORM34 = '1E ' / format of field
TUNIT34 = 'deg ' / units of field
TTYPE35 = 'TAMBIENT' / label for field
TFORM35 = '1E ' / format of field
TUNIT35 = 'C ' / units of field
TTYPE36 = 'PRESSURE' / label for field
TFORM36 = '1E ' / format of field
TUNIT36 = 'Pa ' / units of field
TTYPE37 = 'HUMIDITY' / label for field
TFORM37 = '1E ' / format of field
TUNIT37 = '% ' / units of field
TTYPE38 = 'WINDSPEE' / label for field
TFORM38 = '1E ' / format of field
TUNIT38 = 'm/s ' / units of field
TTYPE39 = 'WINDDIRE' / label for field
TFORM39 = '1E ' / format of field
TUNIT39 = 'deg ' / units of field
"""
# LIST_TTYPE = ["SCAN", "CYCLE", "DATE-OBS", "TIME",
# "EXPOSURE", "OBJECT", "OBJ_RA", "OBJ_DEC",
# "RESTFREQ", "OBSMODE", "BEAM", "_IF",
# "FREQRES", "BANDWID", "CRPIX1", "CRVAL1",
# "CDELT1", "CRVAL3", "CRVAL4", "SCANRATE",
# "TSYS", "CALFCTR", # "DATA", "FLAGGED",
# "XCALFCTR", "TCAL", "TCALTIME", "AZIMUTH",
# "ELEVATIO", "PARANGLE", "FOCUSAXI", "FOCUSTAN",
# "FOCUSROT", "TAMBIENT", "PRESSURE", "HUMIDITY",
# "WINDSPEE", "WINDDIRE"]
#
# LIST_TFORM = ["I", "I", "10A", "D",
# "E", "16A", "D", "D",
# "D", "16A", "I", "I",
# "D", "D", "E ", "D",
# "D", "D", "D", "2E",
# "2E", "2E", # tformat, tformat2,
# "2E", "2E", "16A", "E",
# "E", "E", "E", "E",
# "E ", "E", "E", "E",
# "E", "E"]
#
# LIST_TUNIT = [""] * len(LIST_TFORM)
def get_data_description_from_model_header(data_format=None):
header = fits.Header.fromstring(model_header, sep='\n')
num = []
list_ttype = []
list_tform = []
list_tunit = []
list_tdim = []
headerdict = dict(header)
for k in header:
if not k.startswith('TTYPE'):
continue
n = k.replace('TTYPE', '')
unit = ""
if 'TUNIT' + n in headerdict:
unit = header['TUNIT' + n]
tform = header['TFORM' + n]
tdim = "0"
if header[k] in ['DATA', 'FLAGGED'] and data_format is not None:
tform = "{}D".format(np.product(data_format))
tdim = str(data_format)
num.append(int(n))
list_ttype.append(header[k])
list_tform.append(tform)
list_tunit.append(unit)
list_tdim.append(tdim)
num = np.asarray(num)
list_ttype = np.asarray(list_ttype)
list_tform = np.asarray(list_tform)
list_tunit = np.asarray(list_tunit)
list_tdim = np.asarray(list_tdim)
order = np.argsort(num)
list_ttype = list_ttype[order]
list_tform = list_tform[order]
list_tunit = list_tunit[order]
list_tdim = list_tdim[order]
return list_ttype, list_tform, list_tunit, list_tdim
def _get_empty_array(length, dim):
"""
Examples
--------
>>> np.all(_get_empty_array(10, "0")[1].flatten() == np.zeros(10))
True
>>> np.all(_get_empty_array(10, "(2,2)")[1].flatten() == np.zeros(40))
True
"""
dim = _parse_tdim(dim)
if dim == ():
return dim, np.zeros(length)
return (dim[1], dim[0]), np.zeros((length, dim[1], dim[0]))
def get_model_HDUlist(data_format, length=1, **kwargs):
"""Produce a model CLASS-compatible HDUlist."""
cols = []
list_ttype, list_tform, list_tunit, list_tdim = \
get_data_description_from_model_header(data_format)
for ttype, tform, tunit, dim in \
zip(list_ttype, list_tform, list_tunit, list_tdim):
newdim, array = _get_empty_array(length, dim)
if newdim != ():
newcol = fits.Column(name=ttype, format=tform, unit=tunit,
dim=newdim, array=array)
else:
newcol = fits.Column(name=ttype, format=tform, unit=tunit,
array=array)
cols.append(newcol)
coldefs = fits.ColDefs(cols)
hdu = fits.BinTableHDU.from_columns(
coldefs, header=fits.Header.fromstring(model_header, sep='\n'),
name='SINGLE DISH', **kwargs)
primary_hdu = fits.PrimaryHDU(
header=fits.Header.fromstring(model_primary_header, sep='\n'))
return fits.HDUList([primary_hdu, hdu])
class SDFITS_creator():
"""SDFITS converter"""
def __init__(self, dirname, scandir=None, average=True, use_calon=False,
test=False):
"""Initialization.
Initialization is easy. If scandir is given, the conversion is
done right away.
Parameters
----------
dirname : str
Output directory for products
Other Parameters
----------------
scandir : str
Input data directory (to be clear, the directory containing a set
of subscans plus a summary.fits file)
average : bool, default True
Average all spectra of a given configuration?
use_calon : bool, default False
If False, only the OFF + CAL is used for the calibration. If True,
Also the ON + CAL is used and the calibration constant is averaged
with that obtained through OFF + CAL.
test : bool
Only use for unit tests
"""
self.dirname = dirname
self.test = test
mkdir_p(dirname)
self.summary = {}
self.tables = {}
self.average = average
if scandir is not None:
self.get_scan(scandir, average=average)
self.write_tables_to_disk()
def fill_in_summary(self, summaryfile):
"""Fill in the information contained in the summary.fits file."""
with fits.open(summaryfile) as hdul:
self.summary.update(hdul[0].header)
def get_scan(self, scandir, average=False):
"""Treat the data and produce the output, uncalibrated files.
Fills in the `self.tables` attribute with a dictionary of HDU lists
containing a primary header and a MATRIX extension in CLASS-compatible
FITS format
Parameters
----------
scandir : str
Input data directory (to be clear, the directory containing a set
of subscans plus a summary.fits file)
Other Parameters
----------------
average : bool, default True
Average all spectra of a given configuration?
Returns
-------
tables
"""
scandir = scandir.rstrip('/')
fname = os.path.join(scandir, 'summary.fits')
self.fill_in_summary(fname)
for fname in sorted(glob.glob(os.path.join(scandir, '*.fits'))):
if 'summary' in fname:
continue
subscan = read_data_fitszilla(fname)
location = locations[subscan.meta['site']]
times = Time(subscan['time'] * u.day, format='mjd', scale='utc',
location=location)
date_col = [t.strftime('%d/%m/%y') for t in times.to_datetime()]
# Different from CLASS converter - here we take seconds from the
# First day (when data span multiple dats)
ut_col = (times.mjd - np.floor(times.mjd[0])) * 86400
allcolumns = get_chan_columns(subscan)
channels = \
[subscan[ch].meta['channels'] for ch in allcolumns]
if not len(set(channels)) == 1:
raise ValueError("Only files with the same number of spectral "
"bins in each channel are supported. Please "
"report")
classif = classify_chan_columns(allcolumns)
feeds = list(classif.keys())
for f in feeds:
azimuth = subscan['az'][:, f].to(u.deg).value
elevation = subscan['el'][:, f].to(u.deg).value
crval3 = subscan['ra'][:, f].to(u.deg).value
crval4 = subscan['dec'][:, f].to(u.deg).value
columns_allbase = [a for a in allcolumns
if a.startswith('Feed{}'.format(f))]
basebands = \
[interpret_chan_name(ch)[2] for ch in columns_allbase]
for baseband in basebands:
if baseband is None:
baseband = 0
columns = columns_allbase
else:
columns = [ch for ch in columns_allbase if
ch.endswith('{}'.format(baseband))]
ncol = len(columns)
data_matrix = \
np.stack(zip(*[subscan[ch] for ch in columns]))
shape = data_matrix[0].shape
array = subscan[columns[0]]
newhdu = \
get_model_HDUlist(data_format=shape,
length=len(array))
data = newhdu[1].data
nbin = subscan.meta['channels']
bandwidth = array.meta['bandwidth']
restfreq_label = 'RESTFREQ{}'.format(baseband + 1)
if restfreq_label not in self.summary:
restfreq_label = 'RESTFREQ1'
restfreq = self.summary[restfreq_label] * u.MHz
#
# data['RESTFREQ'] = restfreq.to(u.Hz).value
data['EXPOSURE'] = \
array.meta['integration_time'].value
data['TIME'] = ut_col
data['TSYS'] = 1
df = (bandwidth / nbin).to('Hz')
data['CDELT1'] = df
deltav = - df / restfreq * c.c
data['FREQRES'] = df.to('Hz').value
data['TDIM23'] = str(data_matrix[0].shape)
data['TDIM25'] = str(data_matrix[0].shape)
data['DATA'] = data_matrix
data['OBJECT'] = subscan.meta['SOURCE']
data['AZIMUTH'] = azimuth
data['ELEVATIO'] = elevation
data['CRPIX1'] = nbin // 2 + 1
data['CRVAL1'] = \
array.meta['frequency'] + array.meta['bandwidth'] / 2
data['CRVAL3'] = crval3
data['CRVAL4'] = crval4
data['PARANGLE'] = subscan['par_angle']
data['FOCUSROT'] = subscan['derot_angle']
data['CRVAL4'] = crval4
weather = subscan['weather']
data["HUMIDITY"] = weather[:, 0]
data["TAMBIENT"] = weather[:, 1]
data["PRESSURE"] = weather[:, 2]
data["BEAM"] = f
data['DATE-OBS'] = date_col[0]
data['OBJ-RA'] = subscan['ra'][:, f].to(u.deg).value
data['OBJ-DEC'] = subscan['dec'][:, f].to(u.deg).value
data['RESTFRQ'] = restfreq.to(u.Hz).value
data['BANDWID'] = array.meta['bandwidth'].to(u.Hz).value
data['OBSMODE'] = subscan.meta['SubScanType']
header = newhdu[1].header
header['TELESCOP'] = subscan.meta['site']
header['OBSERVER'] = subscan.meta['OBSERVER']
header['OBSGEO-X'] = \
locations[subscan.meta['site']].x.to('m').value
header['OBSGEO-Y'] = \
locations[subscan.meta['site']].y.to('m').value
header['OBSGEO-Z'] = \
locations[subscan.meta['site']].z.to('m').value
header['CTYPE1'] = "FREQ"
header['CRVAL'] = 0
header['CRVAL3'] = \
np.mean(subscan['ra'][:, f].to(u.deg).value)
header['CRVAL4'] = \
np.mean(subscan['dec'][:, f].to(u.deg).value)
header['LINE'] = subscan.meta['SOURCE']
header['DATE-OBS'] = date_col[0]
header['DATE-RED'] = \
Time.now().to_datetime().strftime('%d/%m/%y')
header['LINE'] = \
"FEED{}-{:3.3f}-MHz".format(f,
bandwidth.to('MHz').value)
header['CDELT1'] = df.to('Hz').value
header['CDELT3'] = \
subscan.meta["ra_offset"].to(u.deg).value
header['CDELT4'] = \
subscan.meta["dec_offset"].to(u.deg).value
header['RESTFREQ'] = restfreq.to(u.Hz).value
header['MAXIS1'] = channels[0]
filekey = \
os.path.basename(scandir) + \
'_all_feed{}_bband{}'.format(f, baseband)
if filekey in list(self.tables.keys()):
hdul = self.tables[filekey]
nrows1, nrows2 = len(hdul[1].data), len(data)
nrows = nrows1 + nrows2
newhdu = fits.BinTableHDU.from_columns(hdul[1].columns,
nrows=nrows)
for col in hdul[1].columns:
name = col.name
newhdu.data[name][:nrows1] = hdul[1].data[name]
newhdu.data[name][nrows1:] = data[name]
hdul[1].data = newhdu.data
else:
self.tables[filekey] = newhdu
return self.tables
def write_tables_to_disk(self):
"""Write all HDU lists produced until now in separate FITS files."""
for (filekey, table) in self.tables.items():
outfile = os.path.join(self.dirname, '{}.fits'.format(filekey))
table.writeto(outfile, overwrite=True)
| {
"repo_name": "matteobachetti/srt-single-dish-tools",
"path": "srttools/converters/sdfits.py",
"copies": "1",
"size": "21136",
"license": "bsd-3-clause",
"hash": 2668685654415365000,
"line_mean": 40.9365079365,
"line_max": 79,
"alpha_frac": 0.4872728993,
"autogenerated": false,
"ratio": 3.6260078915765996,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9613139067520591,
"avg_score": 0.000028344671201814057,
"num_lines": 504
} |
from __future__ import (absolute_import, division,
print_function)
from astropy.io import fits
import numpy as np
from scipy.optimize import curve_fit
from .utils import HAS_MPL
from astropy import log
if HAS_MPL:
import matplotlib.pyplot as plt
def exptau(airmass, tatm, tau, t0):
"""Function to fit to the T vs airmass data."""
bx = np.exp(-tau * airmass)
return tatm * (1 - bx) + t0
def calculate_opacity(file, plot=True, tatm=None, tau0=None, t0=None):
"""Calculate opacity from a skydip scan.
Atmosphere temperature is fixed, from Buffa et al.'s calculations.
Parameters
----------
file : str
File name of the skydip scan in Fits format
plot : bool
Plot diagnostics about the fit
Other parameters
----------------
tatm : float
Atmospheric temperature (fixed in the fit). The default value is
calculated from an empyrical formula
tau0 : float
Initial opacity in the fit. The default value is
np.log(2 / (1 + np.sqrt(1 - 4 * (t30 - t90) / tatm))), where
t30 and t90 are the Tsys values calculated at 30 and 90 degrees of
elevation respectively.
t0 : float
Initial value for Tsys in the fit.
Returns
-------
opacities : dict
Dictionary containing the opacities calculated for each channel, plus
the time in the middle of the observation.
"""
with fits.open(file) as hdulist:
data = hdulist['DATA TABLE'].data
tempdata = hdulist['ANTENNA TEMP TABLE'].data
rfdata = hdulist['RF INPUTS'].data
time = np.mean(data['Time'])
freq = (rfdata['frequency'] + rfdata['bandwidth'] / 2)[0]
elevation = data['el']
airmass = 1 / np.sin(elevation)
if tatm is None:
airtemp = np.median(data['weather'][:, 1])
tatm = 0.683 * (airtemp + 273.15) + 78
el30 = np.argmin(np.abs(elevation - np.radians(30)))
el90 = np.argmin(np.abs(elevation - np.radians(90)))
results = {'time': time}
for ch in ['Ch0', 'Ch1']:
temp = tempdata[ch]
if plot and HAS_MPL:
fig = plt.figure(ch)
plt.scatter(airmass, temp, c='k')
if tau0 is None:
t90 = temp[el90]
t30 = temp[el30]
tau0 = np.log(2 / (1 + np.sqrt(1 - 4 * (t30 - t90) / tatm)))
if t0 is None:
t0 = freq / 1e3
init_par = [tatm, tau0, t0]
epsilon = 1.e-5
par, _ = curve_fit(exptau, airmass, temp, p0=init_par,
maxfev=10000000,
bounds=([tatm - epsilon, -np.inf, -np.inf],
[tatm + epsilon, np.inf, np.inf]))
log.info('The opacity for channel {} is {}'.format(ch, par[1]))
if plot and HAS_MPL:
plt.plot(airmass, exptau(airmass, *par), color='r', zorder=10)
plt.xlabel('Airmass')
plt.ylabel('T (K)')
plt.title('T_atm: {:.2f}; tau: {:.4f}; t0: {:.2f}'.format(*par))
plt.savefig(file.replace('.fits', '_fit_{}.png'.format(ch)))
plt.close(fig)
results[ch] = par[1]
return results
def main_opacity(args=None):
import argparse
description = ('Calculate opacity from a skydip scan and plot the fit '
'results')
parser = argparse.ArgumentParser(description=description)
parser.add_argument("files", nargs='+',
help="File to inspect",
default=None, type=str)
parser.add_argument("--tatm", type=float, default=None,
help='Atmospheric temperature')
parser.add_argument("--tau0", type=float, default=None,
help='Initial value for tau (to be fit)')
parser.add_argument("--t0", type=float, default=None,
help='Initial value for Tsys (to be fitted)')
args = parser.parse_args(args)
for f in args.files:
_ = calculate_opacity(f, tatm=args.tatm, tau0=args.tau0, t0=args.t0)
| {
"repo_name": "matteobachetti/srt-single-dish-tools",
"path": "srttools/opacity.py",
"copies": "1",
"size": "4070",
"license": "bsd-3-clause",
"hash": 5056985700740327000,
"line_mean": 30.796875,
"line_max": 77,
"alpha_frac": 0.5638820639,
"autogenerated": false,
"ratio": 3.563922942206655,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46278050061066556,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function)
from flask import request
from future.builtins import ( # noqa
bytes, dict, int, list, object, range, str,
ascii, chr, hex, input, next, oct, open,
pow, round, super,
filter, map, zip)
from flask.views import MethodView
from conf.appconfig import MIME_HEALTH_V1, SCHEMA_HEALTH_V1, MIME_JSON, \
HEALTH_OK, BOOLEAN_TRUE_VALUES
from deployer.services.health import get_health
from deployer.views import hypermedia
from deployer.views.util import build_response
class HealthApi(MethodView):
"""
Health API
"""
@hypermedia.produces(
{
MIME_HEALTH_V1: SCHEMA_HEALTH_V1,
MIME_JSON: SCHEMA_HEALTH_V1
}, default=MIME_HEALTH_V1)
def get(self, **kwargs):
"""
Health endpoint for Orchestrator
:return: Flask Json Response containing version.
"""
check_celery = request.args.get('celery', 'true').lower() in \
BOOLEAN_TRUE_VALUES
health = get_health(check_celery)
failed_checks = [
health_status['status'] for health_status in health.values()
if health_status['status'] != HEALTH_OK
]
http_status = 200 if not failed_checks else 500
return build_response(health, status=http_status)
def register(app, **kwargs):
"""
Registers HealthApi ('/health')
Only GET operation is available.
:param app: Flask application
:return: None
"""
app.add_url_rule('/health', view_func=HealthApi.as_view('health'),
methods=['GET'])
| {
"repo_name": "totem/cluster-deployer",
"path": "deployer/views/health.py",
"copies": "1",
"size": "1645",
"license": "mit",
"hash": 3685962042694551000,
"line_mean": 29.462962963,
"line_max": 73,
"alpha_frac": 0.623100304,
"autogenerated": false,
"ratio": 3.7133182844243793,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9836418588424379,
"avg_score": 0,
"num_lines": 54
} |
from __future__ import (absolute_import, division,
print_function)
import math
import numpy as np
import matplotlib
from matplotlib.figure import Figure
from matplotlib.axes import Axes
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.collections import BrokenBarHCollection
import matplotlib.ticker as mticker
from matplotlib.colors import LogNorm
from enum import Enum
from mpl_toolkits.axes_grid1 import ImageGrid
from atom.api import Atom, Str, observe, Typed, Int, List, Dict, Float, Bool
from skbeam.core.fitting.xrf_model import (K_TRANSITIONS, L_TRANSITIONS, M_TRANSITIONS)
from skbeam.fluorescence import XrfElement as Element
from ..core.xrf_utils import get_eline_parameters
import logging
logger = logging.getLogger(__name__)
def get_color_name():
# usually line plot will not go beyond 10
first_ten = ['indigo', 'maroon', 'green', 'darkblue', 'darkgoldenrod', 'blue',
'darkcyan', 'sandybrown', 'black', 'darkolivegreen']
# Avoid red color, as those color conflict with emission lines' color.
nonred_list = [v for v in matplotlib.colors.cnames.keys()
if 'pink' not in v and 'fire' not in v and
'sage' not in v and 'tomato' not in v and 'red' not in v]
return first_ten + nonred_list + list(matplotlib.colors.cnames.keys())
class PlotTypes(Enum):
LINLOG = 0
LINEAR = 1
class EnergyRangePresets(Enum):
SELECTED_RANGE = 0
FULL_SPECTRUM = 1
class MapTypes(Enum):
LINEAR = 0
LOG = 1
class MapAxesUnits(Enum):
PIXELS = 0
POSITIONS = 1
class LinePlotModel(Atom):
"""
This class performs all the required line plots.
Attributes
----------
data : array
Experimental data
_fit : class object
Figure object from matplotlib
_ax : class object
Axis object from matplotlib
_canvas : class object
Canvas object from matplotlib
element_id : int
Index of element
parameters : `atom.List`
A list of `Parameter` objects, subclassed from the `Atom` base class.
These `Parameter` objects hold all relevant xrf information
elist : list
Emission energy and intensity for given element
plot_opt : int
Linear or log plot
total_y : dict
Results for k lines
total_l : dict
Results for l and m lines
prefit_x : array
X axis with limited range
plot_title : str
Title for plotting
fit_x : array
x value for fitting
fit_y : array
fitted data
plot_type_names : list
linear or log plot
max_v : float
max value of data array
incident_energy : float
in KeV
param_model : Typed(object)
Reference to ParamModel object
"""
# data = Typed(object) # Typed(np.ndarray)
exp_data_label = Str('experiment')
number_pts_to_show = Int(3000) # The number of spectrum point to show
# -------------------------------------------------------------
# Preview plot (raw experimental spectra)
_fig_preview = Typed(Figure)
_ax_preview = Typed(Axes)
_lines_preview = List()
_bahr_preview = Typed(BrokenBarHCollection)
plot_type_preview = Typed(PlotTypes)
energy_range_preview = Typed(EnergyRangePresets)
min_v_preview = Float()
max_v_preview = Float()
min_e_preview = Float()
max_e_preview = Float()
# -----------------------------------------------------------
# Preview of Total Count Maps
_fig_maps = Typed(Figure)
map_type_preview = Typed(MapTypes)
map_axes_units_preview = Typed(MapAxesUnits)
map_scatter_plot = Bool(False)
map_preview_color_scheme = Str('viridis')
map_preview_range_low = Float(-1)
map_preview_range_high = Float(-1)
# ------------------------------------------------------------
_fig = Typed(Figure)
_ax = Typed(Axes)
_canvas = Typed(object)
plot_fit_x_min = Float(0) # The variables are used to store x_min and x_max for the current plot
plot_fit_x_max = Float(0)
element_id = Int(0)
elist = List()
scale_opt = Int(0)
# total_y = Dict()
# total_l = Dict()
# total_m = Dict()
# total_pileup = Dict()
prefit_x = Typed(object)
plot_title = Str()
# fit_x = Typed(np.ndarray)
# fit_y = Typed(np.ndarray)
# residual = Typed(np.ndarray)
plot_type_names = List()
max_v = Float()
incident_energy = Float(12.0)
energy_range_names = List()
energy_range_fitting = Str()
eline_obj = List()
plot_exp_opt = Bool(False)
plot_exp_obj = Typed(Line2D)
show_exp_opt = Bool(False) # Flag: show spectrum preview
# Reference to artist responsible for displaying the selected range of energies on the plot
plot_energy_barh = Typed(BrokenBarHCollection)
t_bar = Typed(object)
plot_exp_list = List()
auto_fit_obj = List()
show_autofit_opt = Bool()
plot_fit_obj = List() # Typed(Line2D)
show_fit_opt = Bool(False)
# fit_all = Typed(object)
plot_style = Dict()
roi_plot_dict = Dict()
roi_dict = Typed(object) # OrderedDict()
log_range = List()
linear_range = List()
plot_escape_line = Int(0)
emission_line_window = Bool(True)
det_materials = Int(0)
escape_e = Float(1.73998)
limit_cut = Int()
# prefix_name_roi = Str()
# element_for_roi = Str()
# element_list_roi = List()
# roi_dict = Typed(object) #OrderedDict()
# img_dict = Dict()
# roi_result = Dict()
# Reference to ParamModel object
param_model = Typed(object)
# Reference to FileIOModel object
io_model = Typed(object)
# Location of the vertical (mouse-selected) marker on the plot.
# Value is in kev. Negative value - no marker is placed.
vertical_marker_kev = Float(-1)
# Reference to the respective Matplotlib artist
line_vertical_marker = Typed(object)
vertical_marker_is_visible = Bool(False)
report_marker_state = Typed(object)
def __init__(self, *, param_model, io_model):
# Reference to ParamModel object
self.param_model = param_model
self.io_model = io_model
# self.data = None
self._fig = plt.figure()
self._ax = self._fig.add_subplot(111)
try:
self._ax.set_axis_bgcolor('lightgrey')
except AttributeError:
self._ax.set_facecolor('lightgrey')
self._ax.set_xlabel('Energy (keV)')
self._ax.set_ylabel('Spectrum (Counts)')
self._ax.grid(which="both")
self._ax.set_yscale('log')
self.plot_type_names = ['LinLog', 'Linear']
self.energy_range_names = ["selected", "full"]
self.energy_range_fitting = "selected"
self._ax.autoscale_view(tight=True)
self._ax.legend(loc=2)
self._color_config()
self._fig.tight_layout(pad=0.5)
self.max_v = 1.0
# when we calculate max value, data smaller than 500, 0.5 Kev, can be ignored.
# And the last point of data is also huge, and should be cut off.
self.limit_cut = 100
# self._ax.margins(x=0.0, y=0.10)
# --------------------------------------------------------------
# Spectrum preview figure
self._fig_preview = Figure()
self.plot_type_preview = PlotTypes.LINLOG
self.energy_range_preview = EnergyRangePresets.SELECTED_RANGE
# --------------------------------------------------------------
# Preview of Total Count Maps
self._fig_maps = Figure()
self.map_type_preview = MapTypes.LINEAR
self.map_axes_units_preview = MapAxesUnits.PIXELS
def _color_config(self):
self.plot_style = {
'experiment': {'color': 'blue', 'linestyle': '',
'marker': '.', 'label': self.exp_data_label},
'background': {'color': 'indigo', 'marker': '+',
'markersize': 1, 'label': 'background'},
'emission_line': {'color': 'black', 'linewidth': 2},
'roi_line': {'color': 'red', 'linewidth': 2},
'k_line': {'color': 'green', 'label': 'k lines'},
'l_line': {'color': 'magenta', 'label': 'l lines'},
'm_line': {'color': 'brown', 'label': 'm lines'},
'compton': {'color': 'darkcyan', 'linewidth': 1.5, 'label': 'compton'},
'elastic': {'color': 'purple', 'label': 'elastic'},
'escape': {'color': 'darkblue', 'label': 'escape'},
'pileup': {'color': 'darkgoldenrod', 'label': 'pileup'},
'userpeak': {'color': 'orange', 'label': 'userpeak'},
# 'auto_fit': {'color': 'black', 'label': 'auto fitted', 'linewidth': 2.5},
'fit': {'color': 'red', 'label': 'fit', 'linewidth': 2.5},
'residual': {'color': 'black', 'label': 'residual', 'linewidth': 2.0}
}
def plot_exp_data_update(self, change):
"""
Observer function to be connected to the fileio model
in the top-level gui.py startup
Parameters
----------
changed : dict
This is the dictionary that gets passed to a function
with the @observe decorator
"""
self.plot_exp_opt = False # exp data for fitting
self.show_exp_opt = False # all exp data from different channels
self.show_fit_opt = False
# Reset currently selected element_id (mostly to reset GUI elements)
self.element_id = 0
def init_mouse_event(self):
"""Set up callback for mouse button-press event"""
# Reference to the toolbar
self.t_bar = self._fig.canvas.toolbar
# Set callback for Button Press event
self._fig.canvas.mpl_connect("button_press_event", self.canvas_onpress)
def _update_canvas(self):
# It may be sufficient to initialize the event only once, but at this point
# it seems to be the most reliable option. May be changed in the future.
self.init_mouse_event()
self.plot_vertical_marker()
self._ax.legend(loc=2)
try:
self._ax.legend(framealpha=0.2).set_draggable(True)
except AttributeError:
self._ax.legend(framealpha=0.2)
self._fig.tight_layout(pad=0.5)
# self._ax.margins(x=0.0, y=0.10)
# when we click the home button on matplotlib gui,
# relim will remember the previously defined x range
self._ax.relim(visible_only=True)
self._fig.canvas.draw()
def _update_ylimit(self):
# manually define y limit, from experience
self.log_range = [self.max_v*1e-5, self.max_v*2]
# self.linear_range = [-0.3*self.max_v, self.max_v*1.2]
self.linear_range = [0, self.max_v*1.2]
def exp_label_update(self, change):
"""
Observer function to be connected to the fileio model
in the top-level gui.py startup
Parameters
----------
changed : dict
This is the dictionary that gets passed to a function
with the @observe decorator
"""
self.exp_data_label = change['value']
self.plot_style['experiment']['label'] = change['value']
# @observe('exp_data_label')
# def _change_exp_label(self, change):
# if change['type'] == 'create':
# return
# self.plot_style['experiment']['label'] = change['value']
@observe('parameters')
def _update_energy(self, change):
if 'coherent_sct_energy' not in self.param_model.param_new:
return
self.incident_energy = self.param_model.param_new['coherent_sct_energy']['value']
def set_energy_range_fitting(self, energy_range_name):
if energy_range_name not in self.energy_range_names:
raise ValueError(f"Unknown energy range name {energy_range_name}. "
f"Allowed names: {self.energy_range_names}")
self.energy_range_fitting = energy_range_name
self.plot_experiment()
def set_incident_energy(self, change):
"""
The observer function that changes the value of incident energy
and upper bound for fitted energy range. Should not be called directly.
Parameters
----------
change : dict
``change["value"]`` is the new value of incident energy
"""
self.change_incident_energy(change["value"])
def change_incident_energy(self, energy_new):
"""
The function that perfroms the changes the value of incident energy
and upper bound for fitted energy range.
Parameters
----------
incident_energy : float
New value of incident energy
"""
margin = 0.8 # Value by which the upper bound of the range used for fitting
# exceeds the incident energy. Selected for convenience, but
# is subject to change. This is the place to change it to take effect
# throughout the program.
# Limit the number of decimal points for better visual presentation
energy_new = round(energy_new, ndigits=6)
# Change the value twice to ensure that all observer functions are called
self.incident_energy = energy_new + 1.0 # Arbitrary number different from 'energy_new'
self.incident_energy = energy_new
if 'coherent_sct_energy' in self.param_model.param_new:
self.param_model.param_new['coherent_sct_energy']['value'] = energy_new
# Change the value twice to ensure that all observer functions are called
self.param_model.energy_bound_high_buf = energy_new + 1.8 # Arbitrary number
upper_bound = energy_new + margin
# Limit the number of decimal points for better visual presentation
upper_bound = round(upper_bound, ndigits=5)
self.param_model.energy_bound_high_buf = upper_bound
@observe('scale_opt')
def _new_opt(self, change):
self.log_linear_plot()
self._update_canvas()
def energy_bound_high_update(self, change):
"""Observer function for 'param_model.energy_bound_high_buf'"""
if self.io_model.data is None:
return
self.exp_data_update({"value": self.io_model.data})
self.plot_selected_energy_range_original(e_high=change["value"])
self.plot_vertical_marker(e_high=change["value"])
self._update_canvas()
def energy_bound_low_update(self, change):
"""Observer function for 'param_model.energy_bound_low_buf'"""
if self.io_model.data is None:
return
self.exp_data_update({"value": self.io_model.data})
self.plot_selected_energy_range_original(e_low=change["value"])
self.plot_vertical_marker(e_low=change["value"])
self._update_canvas()
def log_linear_plot(self):
if self.plot_type_names[self.scale_opt] == 'LinLog':
self._ax.set_yscale('log')
# self._ax.margins(x=0.0, y=0.5)
# self._ax.autoscale_view(tight=True)
# self._ax.relim(visible_only=True)
self._ax.set_ylim(self.log_range)
else:
self._ax.set_yscale('linear')
# self._ax.margins(x=0.0, y=0.10)
# self._ax.autoscale_view(tight=True)
# self._ax.relim(visible_only=True)
self._ax.set_ylim(self.linear_range)
def exp_data_update(self, change):
"""
Observer function to be connected to the fileio model
in the top-level gui.py startup
Parameters
----------
changed : dict
This is the dictionary that gets passed to a function
with the @observe decorator
"""
# TODO: This function does not change the data. Instead it is expected to
# perform a number of operation when data is changed.
# self.data = change['value']
if self.io_model.data is None:
return
e_range = self.energy_range_fitting
e_range_full, e_range_selected = "full", "selected"
if set([e_range_full, e_range_selected]) < set(self.energy_range_names):
raise ValueError(f"Some names for energy range {(e_range_full, e_range_selected)} are not supported. "
"Please report the error to the development team.")
if e_range not in (e_range_full, e_range_selected):
logger.error(f"Spectrum preview: Unknown option for the energy range: {e_range}\n"
"Please report the error to the development team.")
# This is not a critical error, so we still can proceed
e_range = e_range_full
if not self.param_model.param_new:
return
# The number of points in the displayed dataset
n_dset_points = len(self.io_model.data)
if e_range == e_range_selected:
n_range_low, n_range_high = self.selected_range_indices(n_indexes=n_dset_points)
else:
n_range_low, n_range_high = 0, n_dset_points
n_low = int(np.clip(n_range_low, a_min=0, a_max=n_dset_points - 1))
n_high = int(np.clip(n_range_high, a_min=1, a_max=n_dset_points))
# Find the maximum value (skip the first and last 'limit_cut' points of the dataset
n1, n2 = max(self.limit_cut, n_low), min(n_dset_points-self.limit_cut, n_high)
if n2 <= n1: # This is just a precaution: it is expected that n_dset_points >> 2 * limit_cut
n1, n2 = n_low, n_high
self.max_v = float(np.max(self.io_model.data[n1: n2]))
try:
self.plot_exp_obj.remove()
logger.debug('Previous experimental data is removed.')
except AttributeError:
logger.debug('No need to remove experimental data.')
data_arr = self.io_model.data
x_v = (self.param_model.param_new['e_offset']['value'] +
np.arange(n_low, n_high) *
self.param_model.param_new['e_linear']['value'] +
np.arange(n_low, n_high)**2 *
self.param_model.param_new['e_quadratic']['value'])
data_arr = data_arr[n_low: n_high]
self.plot_exp_obj, = self._ax.plot(x_v, data_arr,
linestyle=self.plot_style['experiment']['linestyle'],
color=self.plot_style['experiment']['color'],
marker=self.plot_style['experiment']['marker'],
label=self.plot_style['experiment']['label'])
# Rescale the plot along x-axis if needed
x_min, x_max = x_v[0], x_v[-1]
if (x_min != self.plot_fit_x_min) or (x_max != self.plot_fit_x_max):
self.plot_fit_x_min = x_min
self.plot_fit_x_max = x_max
self._ax.set_xlim(x_min, x_max)
self._update_ylimit()
self.log_linear_plot()
self._set_eline_select_controls()
self.plot_selected_energy_range_original()
# _show_hide_exp_plot is called to show or hide current plot based
# on the state of _show_exp_opt flag
self._show_hide_exp_plot(self.show_exp_opt or self.plot_exp_opt)
def _show_hide_exp_plot(self, plot_show):
if self.io_model.data is None:
return
try:
if plot_show:
self.plot_exp_obj.set_visible(True)
lab = self.plot_exp_obj.get_label()
self.plot_exp_obj.set_label(lab.strip('_'))
else:
self.plot_exp_obj.set_visible(False)
lab = self.plot_exp_obj.get_label()
self.plot_exp_obj.set_label('_' + lab)
self._update_canvas()
except Exception:
pass
@observe('plot_exp_opt')
def _new_exp_plot_opt(self, change):
if self.io_model.data is None:
return
if change['type'] != 'create':
if change['value']:
self.plot_experiment()
# _show_hide_exp_plot is already called inside 'plot_experiment()',
# but visibility flag was not used correctly. So we need to
# call it again.
self._show_hide_exp_plot(change['value'])
self._set_eline_select_controls()
# @observe('show_exp_opt')
# def _update_exp(self, change):
# if change['type'] != 'create':
# if change['value']:
# if len(self.plot_exp_list):
# for v in self.plot_exp_list:
# v.set_visible(True)
# lab = v.get_label()
# if lab != '_nolegend_':
# v.set_label(lab.strip('_'))
# else:
# if len(self.plot_exp_list):
# for v in self.plot_exp_list:
# v.set_visible(False)
# lab = v.get_label()
# if lab != '_nolegend_':
# v.set_label('_' + lab)
# self._update_canvas()
@observe('show_fit_opt')
def _update_fit(self, change):
if change['type'] != 'create':
if change['value']:
for v in self.plot_fit_obj:
v.set_visible(True)
lab = v.get_label()
if lab != '_nolegend_':
v.set_label(lab.strip('_'))
else:
for v in self.plot_fit_obj:
v.set_visible(False)
lab = v.get_label()
if lab != '_nolegend_':
v.set_label('_' + lab)
self._update_canvas()
def plot_experiment(self):
"""
PLot raw experiment data for fitting.
"""
# Do nothing if no data is loaded
if self.io_model.data is None:
return
data_arr = np.asarray(self.io_model.data)
self.exp_data_update({'value': data_arr})
def plot_vertical_marker(self, *, e_low=None, e_high=None):
# It doesn't seem necessary to force the marker inside the selected range.
# It may be used for purposes that require to set it outside the range
# self._vertical_marker_set_inside_range(e_low=e_low, e_high=e_high)
x_v = (self.vertical_marker_kev, self.vertical_marker_kev)
y_v = (-1e30, 1e30) # This will cover the range of possible values of accumulated counts
if self.line_vertical_marker:
self._ax.lines.remove(self.line_vertical_marker)
self.line_vertical_marker = None
if self.vertical_marker_is_visible:
self.line_vertical_marker, = self._ax.plot(x_v, y_v, color="blue")
def set_plot_vertical_marker(self, marker_position=None, mouse_clicked=False):
"""
The function is called when setting the position of the marker interactively
If the parameter `marker_position` is `None`, then don't set or change the value.
Just make the marker visible.
"""
# Ignore the new value if it is outside the range of selected energies.
# If 'marker_position' is None, then show the marker at its current location.
# Totally ignore clicks if 'marker_position' is outside the range (but still
# display the marker if 'mouse_clicked' is False.
marker_in_range = True
if marker_position is not None:
e_low = self.param_model.param_new['non_fitting_values']['energy_bound_low']['value']
e_high = self.param_model.param_new['non_fitting_values']['energy_bound_high']['value']
if e_low <= marker_position <= e_high or not mouse_clicked:
# If the function was called to display marker (e.g. for existing peak) outside
# the selected range, then show it. If button was clicked, then ignore it.
self.vertical_marker_kev = marker_position
else:
marker_in_range = False
if marker_in_range:
# Make the marker visible
self.vertical_marker_is_visible = True
# Compute peak intensity. The displayed value will change only for user defined peak,
# since it is moved to the position of the marker.
self.compute_manual_peak_intensity()
# Update the location of the marker and the canvas
self.plot_vertical_marker()
self._update_canvas()
if mouse_clicked:
try:
self.report_marker_state(True) # This is an externally set callback function
except Exception:
pass
def hide_plot_vertical_marker(self, mouse_clicked=False):
"""Hide vertical marker"""
self.vertical_marker_is_visible = False
self.plot_vertical_marker()
self._update_canvas()
if mouse_clicked:
try:
self.report_marker_state(False) # This is an externally set callback function
except Exception:
pass
def plot_selected_energy_range_original(self, *, e_low=None, e_high=None):
"""
Plot the range of energies selected for processing. The range may be optionally
provided as arguments. The range values that are not provided, are read from
globally accessible dictionary of parameters. The values passed as arguments
are mainly used if the function is called during interactive update of the
range, when the order of update is undetermined and the parameter dictionary
may be updated after the function is called.
"""
# The range of energy selected for analysis
if e_low is None:
e_low = self.param_model.param_new['non_fitting_values']['energy_bound_low']['value']
if e_high is None:
e_high = self.param_model.param_new['non_fitting_values']['energy_bound_high']['value']
n_x = 4096 # Set to the maximum possible number of points
# Generate the values for 'energy' axis
x_v = (self.param_model.param_new['e_offset']['value'] +
np.arange(n_x) *
self.param_model.param_new['e_linear']['value'] +
np.arange(n_x) ** 2 *
self.param_model.param_new['e_quadratic']['value'])
ss = (x_v < e_high) & (x_v > e_low)
y_min, y_max = -1e30, 1e30 # Select the max and min values for plotted rectangles
# Remove the plot if it exists
if self.plot_energy_barh in self._ax.collections:
self._ax.collections.remove(self.plot_energy_barh)
# Create the new plot (based on new parameters if necessary
self.plot_energy_barh = BrokenBarHCollection.span_where(
x_v, ymin=y_min, ymax=y_max, where=ss, facecolor='white', edgecolor='yellow', alpha=1)
self._ax.add_collection(self.plot_energy_barh)
def plot_multi_exp_data(self):
while(len(self.plot_exp_list)):
self.plot_exp_list.pop().remove()
color_n = get_color_name()
self.max_v = 1.0
m = 0
for (k, v) in self.io_model.data_sets.items():
if v.selected_for_preview:
data_arr = np.asarray(v.data)
# Truncate the array (1D spectrum)
data_arr = data_arr[0: self.number_pts_to_show]
self.max_v = np.max([self.max_v,
np.max(data_arr[self.limit_cut:-self.limit_cut])])
x_v = (self.param_model.param_new['e_offset']['value'] +
np.arange(len(data_arr)) *
self.param_model.param_new['e_linear']['value'] +
np.arange(len(data_arr))**2 *
self.param_model.param_new['e_quadratic']['value'])
plot_exp_obj, = self._ax.plot(x_v, data_arr,
color=color_n[m],
label=v.filename.split('.')[0],
linestyle=self.plot_style['experiment']['linestyle'],
marker=self.plot_style['experiment']['marker'])
self.plot_exp_list.append(plot_exp_obj)
m += 1
self.plot_selected_energy_range_original()
self._update_ylimit()
self.log_linear_plot()
self._update_canvas()
def plot_emission_line(self):
"""
Plot emission line and escape peaks associated with given lines.
The value of self.max_v is needed in this function in order to plot
the relative height of each emission line.
"""
while(len(self.eline_obj)):
self.eline_obj.pop().remove()
escape_e = self.escape_e
if len(self.elist):
for i in range(len(self.elist)):
eline, = self._ax.plot([self.elist[i][0], self.elist[i][0]],
[0, self.elist[i][1]*self.max_v],
color=self.plot_style['emission_line']['color'],
linewidth=self.plot_style['emission_line']['linewidth'])
self.eline_obj.append(eline)
if self.plot_escape_line and self.elist[i][0] > escape_e:
eline, = self._ax.plot([self.elist[i][0]-escape_e,
self.elist[i][0]-escape_e],
[0, self.elist[i][1]*self.max_v],
color=self.plot_style['escape']['color'],
linewidth=self.plot_style['emission_line']['linewidth'])
self.eline_obj.append(eline)
def _set_eline_select_controls(self, *, element_id=None, data="use_self_data"):
if element_id is None:
element_id = self.element_id
if data == "use_self_data":
data = self.io_model.data
def is_line_in_selected_list(self, n_id):
"""
Checks if the line with ID ``n_id`` is in the list of
selected element lines.
Used to enable/disable 'Add Line' and 'Remove Line' buttons.
Parameters
----------
n_id : Int
index of the element emission line in the list
(often equal to ``self.element_id``)
Returns True if the element line
is in the list of selected lines. False otherwise.
"""
ename = self.get_element_line_name_by_id(n_id)
if ename is None:
return False
if self.param_model.EC.is_element_in_list(ename):
return True
else:
return False
def is_element_line_id_valid(self, n_id):
"""
Checks if ID (``n_id``) of the element emission line is valid,
i.e. the name of the line may be obtained by using the ID.
Parameters
----------
n_id : Int
index of the element emission line in the list
(often equal to 'self.element_id')
Returns True if the element line is valid
"""
# There may be a more efficient way to check 'n_id',
# but we want to use the same function as we use
# to retrive the line name
ename = self.get_element_line_name_by_id(n_id)
if ename is None:
return False
else:
return True
def get_element_line_name_by_id(self, n_id):
"""
Retrieves the name of the element emission line from its ID
(the number in the list). The lines are numbered starting with 1.
If the ID is invalid, the function returns None.
Parameters
----------
n_id : int
index of the element emission line in the list
(often equal to 'self.element_id')
Returns the line name (str). If the name can not be retrieved, then
the function returns None.
"""
if n_id < 1:
# Elements are numbered starting with 1. Element #0 does not exist.
# (Element #0 means that no element is selected)
return None
# This is the fixed list of element emission line names.
# The element with ID==1 is found in total_list[0]
total_list = self.param_model.get_user_peak_list()
try:
ename = total_list[n_id-1]
except Exception:
ename = None
return ename
def _vertical_marker_set_inside_range(self, *, e_low=None, e_high=None):
"""
Don't move the marker if it is inside range. If it is outside range,
then set the marker to the center of the range
"""
# The range of energy selected for analysis
if e_low is None:
e_low = self.param_model.param_new['non_fitting_values']['energy_bound_low']['value']
if e_high is None:
e_high = self.param_model.param_new['non_fitting_values']['energy_bound_high']['value']
# By default, place the marker in the middle of the range if its original position
# is outside the range
if (self.vertical_marker_kev > e_high) or (self.vertical_marker_kev < e_low):
self.vertical_marker_kev = (e_low + e_high) / 2.0
def _fill_elist(self):
_elist = []
incident_energy = self.incident_energy
k_len = len(K_TRANSITIONS)
l_len = len(L_TRANSITIONS)
m_len = len(M_TRANSITIONS)
ename = self.get_element_line_name_by_id(self.element_id)
if ename is not None:
_elist = []
if ename.lower().startswith("userpeak"):
# Make sure that the marker is in the selected range of energies
self._vertical_marker_set_inside_range()
# The tuple structure: (center_energy, ratio)
_elist.append((self.vertical_marker_kev, 1.0))
elif '_K' in ename:
e = Element(ename[:-2])
if e.cs(incident_energy)['ka1'] != 0:
for i in range(k_len):
_elist.append((e.emission_line.all[i][1],
e.cs(incident_energy).all[i][1]
/ e.cs(incident_energy).all[0][1]))
elif '_L' in ename:
e = Element(ename[:-2])
if e.cs(incident_energy)['la1'] != 0:
for i in range(k_len, k_len+l_len):
_elist.append((e.emission_line.all[i][1],
e.cs(incident_energy).all[i][1]
/ e.cs(incident_energy).all[k_len][1]))
else:
e = Element(ename[:-2])
if e.cs(incident_energy)['ma1'] != 0:
for i in range(k_len+l_len, k_len+l_len+m_len):
_elist.append((e.emission_line.all[i][1],
e.cs(incident_energy).all[i][1]
/ e.cs(incident_energy).all[k_len+l_len][1]))
return _elist
def _get_pileup_lines(self, eline):
"""
Returns the energy (center) of pileup peak. And the energies of two components.
Parameters
----------
eline: str
Name of the pileup peak, e.g. V_Ka1-Co_Ka1
Returns
-------
list(float)
Energy in keV of pileup peak and two components
"""
try:
element_line1, element_line2 = eline.split('-')
e1_cen = get_eline_parameters(element_line1, self.incident_energy)["energy"]
e2_cen = get_eline_parameters(element_line2, self.incident_energy)["energy"]
en = [e1_cen + e2_cen, e1_cen, e2_cen]
except Exception:
en = []
return en
def _fill_elist_pileup(self, eline=None):
if eline is None:
eline = self.param_model.e_name
elist = []
energies = self._get_pileup_lines(eline)
if energies:
elist = list(zip(energies, [1, 0.2, 0.2]))
return elist
def _fill_elist_userpeak(self):
"""
Fill the list of 'emission lines' for user defined peak. There is only ONE
'emission line', with position determined by the location of the marker.
If the marker is not currently visible, then don't put any emission lines in the list.
The list is used during adding user-defined peaks.
"""
elist = []
energy, marker_visible = self.get_suggested_new_manual_peak_energy()
if marker_visible:
elist.append((energy, 1))
return elist
def _reset_eline_plot(self):
while len(self.eline_obj):
self.eline_obj.pop().remove()
self.elist = []
self._fig.canvas.draw()
@observe('element_id')
def set_element(self, change):
self._set_eline_select_controls(element_id=change['value'])
self.compute_manual_peak_intensity(n_id=change['value'])
if change['value'] == 0:
self._reset_eline_plot()
return
self.plot_current_eline()
def plot_current_eline(self, eline=None):
"""
Plots emission lines for the selected peak based on 'self.element_id` and provided `eline`.
"""
if eline is None:
eline = self.param_model.e_name
incident_energy = self.incident_energy
# Name of the emission line (if emission line is selected)
ename = self.get_element_line_name_by_id(self.element_id)
# Check if pileup peak is selected
is_pileup = self.param_model.get_eline_name_category(eline) == "pileup"
if (ename is not None) or is_pileup:
logger.debug('Plot emission line for element: '
'{} with incident energy {}'.format(self.element_id,
incident_energy))
if ename is not None:
self.elist = self._fill_elist()
elif is_pileup:
self.elist = self._fill_elist_pileup(eline)
else:
self.elist = [] # Just in case
self.plot_emission_line()
self._update_canvas()
# Do it the second time, since the 'self.elist' has changed
self.compute_manual_peak_intensity(n_id=self.element_id)
else:
self._reset_eline_plot()
logger.debug(f"Selected emission line with ID #{self.element_id} is not in the list.")
@observe('det_materials')
def _update_det_materials(self, change):
if change['value'] == 0:
self.escape_e = 1.73998
else:
self.escape_e = 9.88640
def change_escape_peak_settings(self, plot_escape_line, det_material):
self.plot_escape_line = plot_escape_line
self.det_materials = det_material
# Now update the displayed emission line
self.plot_emission_line()
self._update_canvas()
def plot_roi_bound(self):
"""
Plot roi with low, high and ceter value.
"""
for k, v in self.roi_plot_dict.items():
for data in v:
data.remove()
self.roi_plot_dict.clear()
if len(self.roi_dict):
# self._ax.hold(True)
for k, v in self.roi_dict.items():
temp_list = []
for linev in np.array([v.left_val, v.line_val, v.right_val])/1000.:
lineplot, = self._ax.plot([linev, linev],
[0, 1*self.max_v],
color=self.plot_style['roi_line']['color'],
linewidth=self.plot_style['roi_line']['linewidth'])
if v.show_plot:
lineplot.set_visible(True)
else:
lineplot.set_visible(False)
temp_list.append(lineplot)
self.roi_plot_dict.update({k: temp_list})
self._update_canvas()
@observe('roi_dict')
def show_roi_bound(self, change):
logger.debug('roi dict changed {}'.format(change['value']))
self.plot_roi_bound()
if len(self.roi_dict):
for k, v in self.roi_dict.items():
if v.show_plot:
for ln in self.roi_plot_dict[k]:
ln.set_visible(True)
else:
for ln in self.roi_plot_dict[k]:
ln.set_visible(False)
self._update_canvas()
def get_suggested_new_manual_peak_energy(self):
"""
Returns energy pointed by the vertical marker in keV and the status of the marker.
Returns
-------
float
Energy of the manual peak center in keV. The energy is determined
by vertical marker on the screen.
bool
True if the vertical marker is visible, otherwise False.
"""
energy = self.vertical_marker_kev
marker_visible = self.vertical_marker_is_visible
return energy, marker_visible
def _compute_intensity(self, elist):
# Some default value
intensity = 1000.0
if self.io_model.data is not None and self.param_model.param_new is not None \
and self.param_model.prefit_x is not None \
and self.param_model.total_y is not None \
and len(self.io_model.data) > 1 and len(self.param_model.prefit_x) > 1:
# Range of energies in fitting results
e_fit_min = self.param_model.prefit_x[0]
e_fit_max = self.param_model.prefit_x[-1]
de_fit = (e_fit_max - e_fit_min) / (len(self.param_model.prefit_x) - 1)
e_raw_min = self.param_model.param_new['e_offset']['value']
e_raw_max = self.param_model.param_new['e_offset']['value'] + \
(len(self.io_model.data) - 1) * self.param_model.param_new['e_linear']['value'] + \
(len(self.io_model.data) - 1) ** 2 * self.param_model.param_new['e_quadratic']['value']
de_raw = (e_raw_max - e_raw_min) / (len(self.io_model.data) - 1)
# Note: the above algorithm for finding 'de_raw' is far from perfect but will
# work for now. As a result 'de_fit' and
# 'de_raw' == sself.param_model.param_new['e_linear']['value'].
# So the quadratic coefficent is ignored. This is OK, since currently
# quadratic coefficient is always ZERO. When the program is rewritten,
# the complete algorithm should be revised.
# Find the line with maximum energy. It must come first in the list,
# but let's check just to make sure
max_line_energy, max_line_intensity = 0, 0
if elist:
for e, i in elist:
# e - line peak energy
# i - peak intensity relative to maximum peak
if e >= e_fit_min and e <= e_fit_max and e > e_raw_min and e < e_raw_max:
if max_line_intensity < i:
max_line_energy, max_line_intensity = e, i
# Find the index of peak maximum in the 'fitted' data array
n = (max_line_energy - e_fit_min) / de_fit
n = np.clip(n, 0, len(self.param_model.total_y) - 1)
n_fit = int(round(n))
# Find the index of peak maximum in the 'raw' data array
n = (max_line_energy - e_raw_min) / de_raw
n = np.clip(n, 0, len(self.io_model.data) - 1)
n_raw = int(round(n))
# Intensity of the fitted data at the peak
in_fit = self.param_model.total_y[n_fit]
# Intensity of the raw data at the peak
in_raw = self.io_model.data[n_raw]
# The estimated peak intensity is the difference:
intensity = in_raw - in_fit
# The following step is questionable. We assign some reasonably small number.
# The desired value can always be manually entered
if intensity < 0.0:
intensity = abs(in_raw / 100)
return intensity
def compute_manual_peak_intensity(self, n_id=None):
if n_id is None:
n_id = self.element_id
# Check if the emission line is in the list of supported emission lines (e.g. Ca_K)
if not self.is_element_line_id_valid(n_id):
# This is not a supported emission line (n_id==0)
# This means we are probably dealing with user defined peak.
if self.is_line_in_selected_list(n_id):
# Display intensity if the peak is in the list.
name = self.get_element_line_name_by_id(n_id)
intensity = self.param_model.EC.element_dict[name].maxv
else:
elist = self._fill_elist_userpeak()
intensity = self._compute_intensity(elist)
else:
if self.is_line_in_selected_list(n_id):
# Display intensity if the peak is in the list.
name = self.get_element_line_name_by_id(n_id)
intensity = self.param_model.EC.element_dict[name].maxv
else:
# This is a new peak
elist = self._fill_elist()
intensity = self._compute_intensity(elist)
# Round the intensity for nicer printing
self.param_model.add_element_intensity = round(intensity, 2)
def plot_fit(self, fit_x, fit_y, fit_all, residual=None):
"""
Parameters
----------
fit_x : array
energy axis
fit_y : array
fitted spectrum
fit_all : dict
dict of individual line
residual : array
residual between fit and exp
"""
if fit_x is None or fit_y is None:
return
while(len(self.plot_fit_obj)):
self.plot_fit_obj.pop().remove()
ln, = self._ax.plot(fit_x, fit_y,
color=self.plot_style['fit']['color'],
label=self.plot_style['fit']['label'],
linewidth=self.plot_style['fit']['linewidth'])
self.plot_fit_obj.append(ln)
if residual is not None:
# shiftv = 1.5 # move residual down by some amount
ln, = self._ax.plot(fit_x,
residual - 0.15*self.max_v, # shiftv*(np.max(np.abs(self.residual))),
label=self.plot_style['residual']['label'],
color=self.plot_style['residual']['color'])
self.plot_fit_obj.append(ln)
k_num = 0
l_num = 0
m_num = 0
p_num = 0
for k, v in fit_all.items():
if k == 'background':
ln, = self._ax.plot(fit_x, v,
color=self.plot_style['background']['color'],
# marker=self.plot_style['background']['marker'],
# markersize=self.plot_style['background']['markersize'],
label=self.plot_style['background']['label'])
self.plot_fit_obj.append(ln)
elif k == 'compton':
ln, = self._ax.plot(fit_x, v,
color=self.plot_style['compton']['color'],
linewidth=self.plot_style['compton']['linewidth'],
label=self.plot_style['compton']['label'])
self.plot_fit_obj.append(ln)
elif k == 'elastic':
ln, = self._ax.plot(fit_x, v,
color=self.plot_style['elastic']['color'],
label=self.plot_style['elastic']['label'])
self.plot_fit_obj.append(ln)
elif k == 'escape':
ln, = self._ax.plot(fit_x, v,
color=self.plot_style['escape']['color'],
label=self.plot_style['escape']['label'])
self.plot_fit_obj.append(ln)
elif 'user' in k.lower():
ln, = self._ax.plot(fit_x, v,
color=self.plot_style['userpeak']['color'],
label=self.plot_style['userpeak']['label'])
self.plot_fit_obj.append(ln)
elif '-' in k: # Si_K-Si_K
if p_num == 0:
ln, = self._ax.plot(fit_x, v,
color=self.plot_style['pileup']['color'],
label=self.plot_style['pileup']['label'])
else:
ln, = self._ax.plot(fit_x, v,
color=self.plot_style['pileup']['color'],
label='_nolegend_')
self.plot_fit_obj.append(ln)
p_num += 1
elif ('_K' in k.upper()) and (len(k) <= 4):
if k_num == 0:
ln, = self._ax.plot(fit_x, v,
color=self.plot_style['k_line']['color'],
label=self.plot_style['k_line']['label'])
else:
ln, = self._ax.plot(fit_x, v,
color=self.plot_style['k_line']['color'],
label='_nolegend_')
self.plot_fit_obj.append(ln)
k_num += 1
elif ('_L' in k.upper()) and (len(k) <= 4):
if l_num == 0:
ln, = self._ax.plot(fit_x, v,
color=self.plot_style['l_line']['color'],
label=self.plot_style['l_line']['label'])
else:
ln, = self._ax.plot(fit_x, v,
color=self.plot_style['l_line']['color'],
label='_nolegend_')
self.plot_fit_obj.append(ln)
l_num += 1
elif ('_M' in k.upper()) and (len(k) <= 4):
if m_num == 0:
ln, = self._ax.plot(fit_x, v,
color=self.plot_style['m_line']['color'],
label=self.plot_style['m_line']['label'])
else:
ln, = self._ax.plot(fit_x, v,
color=self.plot_style['m_line']['color'],
label='_nolegend_')
self.plot_fit_obj.append(ln)
m_num += 1
else:
pass
# self._update_canvas()
def canvas_onpress(self, event):
"""Callback, mouse button pressed"""
if (self.t_bar.mode == ""):
if event.inaxes == self._ax:
if event.button == 1:
xd = event.xdata
self.set_plot_vertical_marker(marker_position=xd, mouse_clicked=True)
else:
self.hide_plot_vertical_marker(mouse_clicked=True)
# ===========================================================
# Functions for plotting spectrum preview
def selected_range_indices(self, *, e_low=None, e_high=None,
n_indexes=None, margin=2.0):
"""
The function computes the range of indices based on the selected energy range
and parameters for the energy axis.
Parameters
----------
e_low, e_high: float or None
Energy values (in keV) that set the selected range
n_indexes: int
Total number of indexes in the energy array (typically 4096)
margin: float
The displayed energy range is extended by the value of `margin` in both directions.
Returns
-------
n_low, n_high: int
The range of indices of the energy array (n_low..n_high-1) that cover the selected energy range
"""
# The range of energy selected for analysis
if e_low is None:
e_low = self.param_model.param_new['non_fitting_values']['energy_bound_low']['value']
if e_high is None:
e_high = self.param_model.param_new['non_fitting_values']['energy_bound_high']['value']
# Protection for the case if e_high < e_low
e_high = e_high if e_high > e_low else e_low
# Extend the range (by the value of 'margin')
e_low, e_high = e_low - margin, e_high + margin
# The following calculations ignore quadratic term, which is expected to be small
c0 = self.param_model.param_new['e_offset']['value']
c1 = self.param_model.param_new['e_linear']['value']
# If more precision if needed, then implement more complicated algorithm using
# the quadratic term: c2 = self.param_model.param_new['e_quadratic']['value']
n_low = int(np.clip(int((e_low - c0) / c1), a_min=0, a_max=n_indexes - 1))
n_high = int(np.clip(int((e_high - c0) / c1) + 1, a_min=1, a_max=n_indexes))
return n_low, n_high
def _datasets_max_size(self, *, only_displayed=True):
"""
Return maximum size of the longest available dataset. The datasets that contain
no data are ignored.
Parameters
----------
only_displayed: bool
Limit search to the datasets that are going to be displayed
"""
max_size = 0
for dset in self.io_model.data_sets.values():
if not only_displayed or dset.selected_for_preview:
# Raw data shape: (n_rows, n_columns, n_energy_bins)
max_size = max(max_size, dset.get_raw_data_shape()[2])
return max_size
def plot_selected_energy_range(self, *, axes, barh_existing, e_low=None, e_high=None, n_points=4096):
"""
Plot the range of energies selected for processing. The range may be optionally
provided as arguments. The range values that are not provided, are read from
globally accessible dictionary of parameters. The values passed as arguments
are mainly used if the function is called during interactive update of the
range, when the order of update is undetermined and the parameter dictionary
may be updated after the function is called.
"""
# The range of energy selected for analysis
if e_low is None:
e_low = self.param_model.param_new['non_fitting_values']['energy_bound_low']['value']
if e_high is None:
e_high = self.param_model.param_new['non_fitting_values']['energy_bound_high']['value']
# Model coefficients for the energy axis
c0 = self.param_model.param_new['e_offset']['value']
c1 = self.param_model.param_new['e_linear']['value']
c2 = self.param_model.param_new['e_quadratic']['value']
# Generate the values for 'energy' axis
x_v = (c0 + np.arange(n_points) * c1 + np.arange(n_points) ** 2 * c2)
ss = (x_v < e_high + c1) & (x_v > e_low - c1)
# Trim both arrays to minimize the number of points
x_v = x_v[ss]
ss = ss[ss]
ss[0] = False
ss[-1] = False
# Negative values will work for semilog plot as well
y_min, y_max = -1e30, 1e30 # Select the max and min values for plotted rectangles
# Remove the plot if it exists
if barh_existing in axes.collections:
axes.collections.remove(barh_existing)
# Create the new plot (based on new parameters if necessary
barh_new = BrokenBarHCollection.span_where(
x_v, ymin=y_min, ymax=y_max, where=ss, facecolor='white', edgecolor='yellow', alpha=1)
axes.add_collection(barh_new)
return barh_new
def prepare_preview_spectrum_plot(self):
if self._ax_preview:
self._ax_preview.clear()
else:
self._ax_preview = self._fig_preview.add_subplot(111)
self._ax_preview.set_facecolor('lightgrey')
self._ax_preview.grid(which="both")
self._fig_preview.set_visible(False)
def _show_preview_spectrum_plot(self):
# Completely redraw the plot each time the function is called
self.prepare_preview_spectrum_plot()
# Remove all lines from the plot
while len(self._lines_preview):
self._lines_preview.pop().remove()
# The list of color names
color_names = get_color_name()
e_range = self.energy_range_preview
e_range_supported = (EnergyRangePresets.SELECTED_RANGE, EnergyRangePresets.FULL_SPECTRUM)
if e_range not in e_range_supported:
logger.error(f"Spectrum preview: Unknown option for the energy range: {e_range}\n"
"Please report the error to the development team.")
# This is not a critical error, so we still can proceed
e_range = EnergyRangePresets.FULL_SPECTRUM
p_type = self.plot_type_preview
p_type_supported = (PlotTypes.LINLOG, PlotTypes.LINEAR)
if p_type not in p_type_supported:
logger.error(f"Spectrum preview: Unknown option for the plot type: {p_type}\n"
"Please report the error to the development team.")
p_type = PlotTypes.LINEAR
# Maximum number of points in the displayed dataset
n_dset_points = self._datasets_max_size()
if e_range == EnergyRangePresets.SELECTED_RANGE:
n_range_low, n_range_high = self.selected_range_indices(n_indexes=n_dset_points)
else:
n_range_low, n_range_high = 0, n_dset_points
# All available datasets, we will print only the selected datasets
dset_names = list(self.io_model.data_sets.keys())
if p_type == PlotTypes.LINLOG:
top_margin_coef = 2.0
# Minimum for semilog plots may need to be computed, but 1.0 is good
self.min_v_preview = 1.0
self._ax_preview.set_yscale("log")
else:
top_margin_coef = 1.05
self.min_v_preview = 0.0 # Minimum will always be 0 for linear plots
self.max_v_preview = 1.0
self.min_e_preview = 1000.0 # Start with some large number
self.max_e_preview = 0.1 # Start with some small number
for n_line, dset_name in enumerate(dset_names):
dset = self.io_model.data_sets[dset_name]
# Select color (even if the dataset is not displayed). This is done in order
# to ensure that each dataset is assigned the unique color.
color = color_names[n_line % len(color_names)]
if dset.selected_for_preview:
data_arr = np.asarray(dset.get_total_spectrum())
if data_arr is None: # Just a precaution, it shouldn't happen
logger.error("Spectrum review: attempting to print empty dataset.")
continue
# The assumption is that some datasets may have different length (which is
# currently not the case). So we have to take it into account when using
# maximum dataset length. This is essentially a safety precaution.
n_low = int(np.clip(n_range_low, a_min=0, a_max=data_arr.size - 1))
n_high = int(np.clip(n_range_high, a_min=1, a_max=data_arr.size))
# From now on we work with the trimmed data array
x_v = (self.param_model.param_new['e_offset']['value'] +
np.arange(n_low, n_high) *
self.param_model.param_new['e_linear']['value'] +
np.arange(n_low, n_high) ** 2 *
self.param_model.param_new['e_quadratic']['value'])
data_arr = data_arr[n_low: n_high]
self.max_v_preview = np.max(
[self.max_v_preview,
np.max(data_arr[self.limit_cut:-self.limit_cut])])
self.max_e_preview = np.max([self.max_e_preview, x_v[-1]])
self.min_e_preview = np.min([self.min_e_preview, x_v[0]])
line, = self._ax_preview.plot(x_v, data_arr,
color=color,
label=dset.filename.split('.')[0],
linestyle=self.plot_style['experiment']['linestyle'],
marker=self.plot_style['experiment']['marker'])
self._lines_preview.append(line)
self._ax_preview.set_xlim(self.min_e_preview, self.max_e_preview)
self._ax_preview.set_ylim(self.min_v_preview, self.max_v_preview * top_margin_coef)
self._ax_preview.legend()
self._ax_preview.set_xlabel("Energy (keV)")
self._ax_preview.set_ylabel("Total Spectrum (Counts)")
self._fig_preview.set_visible(True)
# Reset navigation toolbar (specifically clear ZOOM history, since it becomes invalid
# when the new data is loaded, i.e. zooming out may not show the whole plot)
tb = self._fig_preview.canvas.toolbar
tb.update()
self._bahr_preview = self.plot_selected_energy_range(
axes=self._ax_preview, barh_existing=self._bahr_preview)
def _hide_preview_spectrum_plot(self,):
self._fig_preview.set_visible(False)
def update_preview_spectrum_plot(self, *, hide=False):
"""
Update spectrum preview plot based on available/selected dataset and `hide` flag.
Parameters
----------
hide: bool
`False` - plot data if datasets are available and at least one dataset is selected,
otherwise hide the plot, `True` - hide the plot in any case
"""
# Find out if any data is selected
show_plot = False
if self.io_model.data_sets:
show_plot = any([_.selected_for_preview for _ in self.io_model.data_sets.values()])
logger.debug(f"LinePlotModel.update_preview_spectrum_plot(): show_plot={show_plot} hide={hide}")
if show_plot and not hide:
logger.debug("LinePlotModel.update_preview_spectrum_plot(): plotting existing datasets")
self._show_preview_spectrum_plot()
else:
logger.debug("LinePlotModel.update_preview_spectrum_plot(): hiding plots")
self._hide_preview_spectrum_plot()
self._fig_preview.canvas.draw()
# ===========================================================================================
# Plotting the preview of Total Count Maps
def clear_map_preview_range(self):
self.set_map_preview_range(low=-1, high=-1)
def set_map_preview_range(self, *, low, high):
self.map_preview_range_low = low
self.map_preview_range_high = high
def get_selected_datasets(self):
"""Returns the datasets selected for preview"""
return {k: v for (k, v) in self.io_model.data_sets.items() if v.selected_for_preview}
def _compute_map_preview_range(self, img_dict, key_list):
range_min, range_max = None, None
for key in key_list:
data = img_dict[key]
v_min, v_max = np.min(data), np.max(data)
if range_min is None or range_max is None:
range_min, range_max = v_min, v_max
else:
range_min, range_max = min(range_min, v_min), max(range_max, v_max)
return range_min, range_max
def _show_total_count_map_preview(self):
self._fig_maps.set_visible(True)
self._fig_maps.clf()
selected_dsets = self.get_selected_datasets()
data_for_plotting = {k: v.get_total_count() for (k, v) in selected_dsets.items()}
# Check if positions data is available. Positions data may be unavailable
# (not recorded in HDF5 file) if experiment is has not been completed.
# While the data from the completed part of experiment may still be used,
# plotting vs. x-y or scatter plot may not be displayed.
positions_data_available = False
if 'positions' in self.io_model.img_dict.keys():
data_for_plotting["positions"] = self.io_model.img_dict["positions"]
positions_data_available = True
# Create local copies of self.pixel_or_pos, self.scatter_show and self.grid_interpolate
pixel_or_pos_local = self.map_axes_units_preview
scatter_show_local = self.map_scatter_plot
# Disable plotting vs x-y coordinates if 'positions' data is not available
if not positions_data_available:
if pixel_or_pos_local:
pixel_or_pos_local = MapAxesUnits.PIXELS # Switch to plotting vs. pixel number
logger.error("'Positions' data is not available. Plotting vs. x-y coordinates is disabled")
if scatter_show_local:
scatter_show_local = False # Switch to plotting vs. pixel number
logger.error("'Positions' data is not available. Scatter plot is disabled.")
# low_lim = 1e-4 # define the low limit for log image
plot_interp = 'Nearest'
grey_use = self.map_preview_color_scheme
ncol = int(np.ceil(np.sqrt(len(selected_dsets))))
try:
nrow = int(np.ceil(len(selected_dsets)/float(ncol)))
except ZeroDivisionError:
ncol = 1
nrow = 1
a_pad_v = 0.8
a_pad_h = 0.5
n_displayed_axes = ncol * nrow # Total number of axes in the grid
grid = ImageGrid(self._fig_maps, 111,
nrows_ncols=(nrow, ncol),
axes_pad=(a_pad_v, a_pad_h),
cbar_location='right',
cbar_mode='each',
cbar_size='7%',
cbar_pad='2%',
share_all=True)
def _compute_equal_axes_ranges(x_min, x_max, y_min, y_max):
"""
Compute ranges for x- and y- axes of the plot. Make sure that the ranges for x- and y-axes are
always equal and fit the maximum of the ranges for x and y values:
max(abs(x_max-x_min), abs(y_max-y_min))
The ranges are set so that the data is always centered in the middle of the ranges
Parameters
----------
x_min, x_max, y_min, y_max : float
lower and upper boundaries of the x and y values
Returns
-------
x_axis_min, x_axis_max, y_axis_min, y_axis_max : float
lower and upper boundaries of the x- and y-axes ranges
"""
x_axis_min, x_axis_max, y_axis_min, y_axis_max = x_min, x_max, y_min, y_max
x_range, y_range = abs(x_max - x_min), abs(y_max - y_min)
if x_range > y_range:
y_center = (y_max + y_min) / 2
y_axis_max = y_center + x_range / 2
y_axis_min = y_center - x_range / 2
else:
x_center = (x_max + x_min) / 2
x_axis_max = x_center + y_range / 2
x_axis_min = x_center - y_range / 2
return x_axis_min, x_axis_max, y_axis_min, y_axis_max
def _adjust_data_range_using_min_ratio(c_min, c_max, c_axis_range, *, min_ratio=0.01):
"""
Adjust the range for plotted data along one axis (x or y). The adjusted range is
applied to the 'extend' attribute of imshow(). The adjusted range is always greater
than 'axis_range * min_ratio'. Such transformation has no physical meaning
and performed for aesthetic reasons: stretching the image presentation of
a scan with only a few lines (1-3) greatly improves visibility of data.
Parameters
----------
c_min, c_max : float
boundaries of the data range (along x or y axis)
c_axis_range : float
range presented along the same axis
Returns
-------
cmin, c_max : float
adjusted boundaries of the data range
"""
c_range = c_max - c_min
if c_range < c_axis_range * min_ratio:
c_center = (c_max + c_min) / 2
c_new_range = c_axis_range * min_ratio
c_min = c_center - c_new_range / 2
c_max = c_center + c_new_range / 2
return c_min, c_max
# Hide the axes that are unused (they are unsightly)
for i in range(len(selected_dsets), n_displayed_axes):
grid[i].set_visible(False)
grid.cbar_axes[i].set_visible(False)
for i, (k, v) in enumerate(selected_dsets.items()):
data_arr = data_for_plotting[k]
if pixel_or_pos_local == MapAxesUnits.POSITIONS or scatter_show_local:
# xd_min, xd_max, yd_min, yd_max = min(self.x_pos), max(self.x_pos),
# min(self.y_pos), max(self.y_pos)
x_pos_2D = data_for_plotting['positions']['x_pos']
y_pos_2D = data_for_plotting['positions']['y_pos']
xd_min, xd_max, yd_min, yd_max = x_pos_2D.min(), x_pos_2D.max(), y_pos_2D.min(), y_pos_2D.max()
xd_axis_min, xd_axis_max, yd_axis_min, yd_axis_max = \
_compute_equal_axes_ranges(xd_min, xd_max, yd_min, yd_max)
xd_min, xd_max = _adjust_data_range_using_min_ratio(xd_min, xd_max, xd_axis_max - xd_axis_min)
yd_min, yd_max = _adjust_data_range_using_min_ratio(yd_min, yd_max, yd_axis_max - yd_axis_min)
# Adjust the direction of each axis depending on the direction in which encoder values changed
# during the experiment. Data is plotted starting from the upper-right corner of the plot
if x_pos_2D[0, 0] > x_pos_2D[0, -1]:
xd_min, xd_max, xd_axis_min, xd_axis_max = xd_max, xd_min, xd_axis_max, xd_axis_min
if y_pos_2D[0, 0] > y_pos_2D[-1, 0]:
yd_min, yd_max, yd_axis_min, yd_axis_max = yd_max, yd_min, yd_axis_max, yd_axis_min
else:
yd, xd = data_arr.shape
xd_min, xd_max, yd_min, yd_max = 0, xd, 0, yd
if (yd <= math.floor(xd / 100)) and (xd >= 200):
yd_min, yd_max = -math.floor(xd / 200), math.ceil(xd / 200)
if (xd <= math.floor(yd / 100)) and (yd >= 200):
xd_min, xd_max = -math.floor(yd / 200), math.ceil(yd / 200)
xd_axis_min, xd_axis_max, yd_axis_min, yd_axis_max = \
_compute_equal_axes_ranges(xd_min, xd_max, yd_min, yd_max)
# Compute range for data values
low_limit = self.map_preview_range_low
high_limit = self.map_preview_range_high
# If limit is not set, then compute the limit based on the selected datasets.
# It is assumed that at least one dataset is selected.
if low_limit == -1 and high_limit == -1:
low_limit, high_limit = \
self._compute_map_preview_range(data_for_plotting, selected_dsets.keys())
if low_limit is None or high_limit is None:
low_limit, high_limit = 0
# Set some minimum range for the colorbar (otherwise it will have white fill)
if math.isclose(low_limit, high_limit, abs_tol=2e-20):
if abs(low_limit) < 1e-20: # The value is zero
dv = 1e-20
else:
dv = math.fabs(low_limit * 0.01)
high_limit += dv
low_limit -= dv
if self.map_type_preview == MapTypes.LINEAR:
if not scatter_show_local:
im = grid[i].imshow(data_arr,
cmap=grey_use,
interpolation=plot_interp,
extent=(xd_min, xd_max, yd_max, yd_min),
origin='upper',
clim=(low_limit, high_limit))
grid[i].set_ylim(yd_axis_max, yd_axis_min)
else:
xx = self.io_model.img_dict['positions']['x_pos']
yy = self.io_model.img_dict['positions']['y_pos']
# The following condition prevents crash if different file is loaded while
# the scatter plot is open (PyXRF specific issue)
if data_arr.shape == xx.shape and data_arr.shape == yy.shape:
im = grid[i].scatter(xx, yy, c=data_arr,
marker='s', s=500,
alpha=1.0, # Originally: alpha=0.8
cmap=grey_use,
vmin=low_limit, vmax=high_limit,
linewidths=1, linewidth=0)
grid[i].set_ylim(yd_axis_max, yd_axis_min)
grid[i].set_xlim(xd_axis_min, xd_axis_max)
grid_title = k
# Display only the channel name (e.g. 'sum', 'det1' etc.)
grid_title = grid_title.split("_")[-1]
grid[i].text(0, 1.01, grid_title, ha='left', va='bottom', transform=grid[i].axes.transAxes)
grid.cbar_axes[i].colorbar(im)
im.colorbar.formatter = im.colorbar.ax.yaxis.get_major_formatter()
# im.colorbar.ax.get_xaxis().set_ticks([])
# im.colorbar.ax.get_xaxis().set_ticks([], minor=True)
grid.cbar_axes[i].ticklabel_format(style='sci', scilimits=(-3, 4), axis='both')
else:
# maxz = np.max(data_arr)
# # Set some reasonable minimum range for the colorbar
# # Zeros or negative numbers will be shown in white
# if maxz <= 1e-30:
# maxz = 1
if not scatter_show_local:
im = grid[i].imshow(data_arr,
# norm=LogNorm(vmin=low_lim*maxz,
# vmax=maxz, clip=True),
norm=LogNorm(vmin=low_limit,
vmax=high_limit, clip=True),
cmap=grey_use,
interpolation=plot_interp,
extent=(xd_min, xd_max, yd_max, yd_min),
origin='upper',
# clim=(low_lim*maxz, maxz))
clim=(low_limit, high_limit))
grid[i].set_ylim(yd_axis_max, yd_axis_min)
else:
im = grid[i].scatter(self.io_model.img_dict['positions']['x_pos'],
self.io_model.img_dict['positions']['y_pos'],
# norm=LogNorm(vmin=low_lim*maxz,
# vmax=maxz, clip=True),
norm=LogNorm(vmin=low_limit,
vmax=high_limit, clip=True),
c=data_arr, marker='s', s=500, alpha=1.0, # Originally: alpha=0.8
cmap=grey_use,
linewidths=1, linewidth=0)
grid[i].set_ylim(yd_axis_min, yd_axis_max)
grid[i].set_xlim(xd_axis_min, xd_axis_max)
grid_title = k
# Display only the channel name (e.g. 'sum', 'det1' etc.)
grid_title = grid_title.split("_")[-1]
grid[i].text(0, 1.01, grid_title, ha='left', va='bottom', transform=grid[i].axes.transAxes)
grid.cbar_axes[i].colorbar(im)
im.colorbar.formatter = im.colorbar.ax.yaxis.get_major_formatter()
im.colorbar.ax.get_xaxis().set_ticks([])
im.colorbar.ax.get_xaxis().set_ticks([], minor=True)
im.colorbar.ax.yaxis.set_minor_formatter(mticker.LogFormatter())
grid[i].get_xaxis().set_major_locator(mticker.MaxNLocator(nbins="auto"))
grid[i].get_yaxis().set_major_locator(mticker.MaxNLocator(nbins="auto"))
grid[i].get_xaxis().get_major_formatter().set_useOffset(False)
grid[i].get_yaxis().get_major_formatter().set_useOffset(False)
self._fig_maps.canvas.draw_idle()
def _hide_total_count_map_preview(self):
self._fig_maps.set_visible(False)
def update_total_count_map_preview(self, *, hide=False, new_plot=False):
"""
Update total count map preview based on available/selected dataset and `hide` flag.
Parameters
----------
hide: bool
`True` - plot data if datasets are available and at least one dataset is selected,
otherwise hide the plot, `False` - hide the plot in any case
new_plot: bool
`True` - plotting new data that was just loaded, reset the plot settings
"""
if new_plot and not hide:
# Clear the displayed data range. The range will be computed based on the available data.
self.clear_map_preview_range()
# Find out if any data is selected
show_plot = False
if self.io_model.data_sets:
show_plot = any([_.selected_for_preview for _ in self.io_model.data_sets.values()])
logger.debug(f"LinePlotModel.update_total_count_map_preview(): show_plot={show_plot} hide={hide}")
if show_plot and not hide:
logger.debug("LinePlotModel.update_total_count_map_preview(): plotting existing datasets")
self._show_total_count_map_preview()
else:
logger.debug("LinePlotModel.update_total_count_map_preview(): hiding plots")
self._hide_total_count_map_preview()
self._fig_maps.canvas.draw()
| {
"repo_name": "NSLS-II-HXN/PyXRF",
"path": "pyxrf/model/lineplot.py",
"copies": "1",
"size": "79123",
"license": "bsd-3-clause",
"hash": 2505826788844610000,
"line_mean": 40.4690775681,
"line_max": 114,
"alpha_frac": 0.5377197528,
"autogenerated": false,
"ratio": 3.8706095294002543,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4908329282200254,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function)
import numpy as np
try:
import matplotlib.pyplot as plt
HAS_MPL = True
except ImportError:
HAS_MPL = False
from .fit import mad
def mask_zeros(image, expo=None, npix_tol=None):
"""Mask the lines containing zeros in the image.
Parameters
----------
image : 2d array
Input image
npix_tol : int
Number of tolerated pixels with value 0
Returns
-------
masked_image : 2d array
The masked image
mask : 2d array
The boolean mask to obtain masked_image from mask
Examples
--------
>>> import numpy as np
>>> img = [[0, 1, 1], [0, 1, 1], [1, 1, 1]]
>>> masked_image, mask = mask_zeros(img, expo=img, npix_tol=1)
>>> np.all(masked_image == [[1, 1], [1, 1], [1, 1]])
True
>>> np.all(mask == [[False, True, True], [False, True, True],
... [False, True, True]])
True
>>> masked_image, mask = mask_zeros(img, npix_tol=2)
>>> np.all(masked_image == img)
True
>>> img = [[0, 0, 0], [1, 1, 1], [1, 1, 1]]
>>> masked_image, mask = mask_zeros(img, npix_tol=1)
>>> np.all(masked_image == [[1, 1, 1], [1, 1, 1]])
True
"""
image = np.asarray(image)
mask = np.ones(image.shape, dtype=bool)
if npix_tol is None:
return image, mask
if expo is None:
expo = image
expo = np.asarray(expo)
good_hor = 0
for i in range(expo.shape[0]):
line = expo[i, :]
if len(line[line == 0]) > npix_tol:
mask[i, :] = False
else:
good_hor += 1
good_ver = 0
for i in range(expo.shape[1]):
line = expo[:, i]
if len(line[line == 0]) > npix_tol:
mask[:, i] = False
else:
good_ver += 1
masked_image = image[mask].reshape((good_hor, good_ver))
return masked_image, mask
def clip_and_smooth(img, clip_sigma=3, smooth_window=10, direction=0):
"""
Examples
--------
>>> img = np.zeros((2,2))
>>> np.all(clip_and_smooth(img, smooth_window=(5, 5)) == img)
True
>>> img = np.array([[0, 0], [1, 1]])
>>> np.all(clip_and_smooth(img, direction=0) == img)
True
>>> img = np.array([[0, 1], [0, 1]])
>>> np.all(clip_and_smooth(img, direction=1) == img)
True
>>> img = np.array([[1, 1.], [8., 1]])
>>> np.allclose(clip_and_smooth(img, clip_sigma=1, smooth_window=0),
... [[1, 1], [3.0310889132455352, 1]])
True
"""
from scipy.ndimage import gaussian_filter, gaussian_filter1d
import collections
if img.shape[0] * img.shape[0] > 100:
rms = mad(img.flatten())
else:
rms = np.std(img.flatten())
median = np.median(img)
bad = img - median > clip_sigma * rms
img[bad] = clip_sigma * rms
bad = median - img > clip_sigma * rms
img[bad] = - clip_sigma * rms
if smooth_window == 0:
pass
elif isinstance(smooth_window, collections.Iterable):
img = gaussian_filter(img, np.array(smooth_window) / 5)
else:
img = gaussian_filter1d(img, smooth_window / 5,
axis=np.logical_not(direction))
return img
def basket_weaving(img_hor, img_ver, clip_sigma=3, niter_max=10,
expo_hor=None, expo_ver=None, window_shape='hanning'):
"""Basket-Weaving algorithm from Mueller et al. 1707.05573v6."""
it = 1
if expo_hor is None:
expo_hor = np.ones_like(img_hor)
if expo_ver is None:
expo_ver = np.ones_like(img_ver)
img_hor = np.copy(img_hor)
img_ver = np.copy(img_ver)
width = np.max(img_hor.shape)
while it <= niter_max:
window = width // 2 - 4 * it
if window < 4:
break
diff = img_hor - img_ver
diff = clip_and_smooth(diff, clip_sigma=clip_sigma,
smooth_window=(0., window))
img_hor = img_hor - diff
diff = img_ver - img_hor
diff = clip_and_smooth(diff, clip_sigma=clip_sigma,
smooth_window=(window, 0.))
img_ver = img_ver - diff
it += 1
img_final = img_ver * expo_ver + img_hor * expo_hor
expo = expo_hor + expo_ver
good = expo > 0
img_final[good] = img_final[good] / expo[good]
return img_final
def destripe_wrapper(image_hor, image_ver, alg='basket-weaving',
niter=10, expo_hor=None, expo_ver=None,
npix_tol=None, clip_sigma=3, label="img"):
if expo_hor is None or expo_ver is None:
image_mean = (image_hor + image_ver) / 2
expo_hor = expo_ver = np.ones_like(image_mean)
masked_image, mask = mask_zeros(image_mean, npix_tol=npix_tol)
else:
image_mean = \
(image_hor*expo_hor + image_ver*expo_ver) / (expo_hor + expo_ver)
masked_image, mask = mask_zeros(image_mean, expo_hor + expo_ver,
npix_tol=npix_tol)
if HAS_MPL:
fig = plt.figure()
plt.imshow(image_hor[mask].reshape(masked_image.shape))
plt.savefig(label + '_hor.png')
plt.imshow(image_ver[mask].reshape(masked_image.shape))
plt.savefig(label + '_ver.png')
diff_img = image_ver[mask] - image_hor[mask]
plt.imshow(diff_img.reshape(masked_image.shape))
plt.savefig(label + '_diff.png')
plt.close(fig)
fig = plt.figure()
plt.imshow(expo_hor[mask].reshape(masked_image.shape))
plt.savefig(label + '_expoh.png')
plt.imshow(expo_ver[mask].reshape(masked_image.shape))
plt.savefig(label + '_expov.png')
plt.imshow(image_mean[mask].reshape(masked_image.shape))
plt.savefig(label + '_initial.png')
plt.close(fig)
image_mean[mask] = \
basket_weaving(image_hor[mask].reshape(masked_image.shape),
image_ver[mask].reshape(masked_image.shape),
niter_max=niter,
expo_hor=expo_hor[mask].reshape(masked_image.shape),
expo_ver=expo_ver[mask].reshape(masked_image.shape),
clip_sigma=clip_sigma
).flatten()
if HAS_MPL:
plt.imshow(image_mean[mask].reshape(masked_image.shape))
plt.savefig(label + '_destr.png')
if alg == 'basket-weaving':
return image_mean
| {
"repo_name": "matteobachetti/srt-single-dish-tools",
"path": "srttools/destripe.py",
"copies": "1",
"size": "6447",
"license": "bsd-3-clause",
"hash": 4728438608578571000,
"line_mean": 30.7586206897,
"line_max": 77,
"alpha_frac": 0.542888165,
"autogenerated": false,
"ratio": 3.2527749747729566,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9295663139772956,
"avg_score": 0,
"num_lines": 203
} |
from __future__ import (absolute_import, division,
print_function)
import xml.etree.ElementTree as ET
from astropy.table import Table
from astropy.time import Time
import os
import numpy as np
def load_acs_log_file(fname, full=False):
with open(fname) as fd:
doc = fd.read()
# fix missing tags
doc = "<!DOCTYPE xml>\n<html>\n" + doc + "\n</html>"
tree = ET.fromstring(doc)
is_cal_on = False
table = Table(names=['kind', 'Time', 'file', 'CAL', 'text'],
dtype=['<U10', 'O', '<U200', bool, '<U200'])
for line_el in tree.iter():
line = dict(line_el.attrib)
if line == {}:
continue
if 'TimeStamp' in line:
time = Time(line['TimeStamp'])
else:
continue
text = line_el.text
if text is None:
continue
if 'calOff' in text:
is_cal_on = False
elif 'calOn' in text:
is_cal_on = True
file = ''
if '.fits' in line_el.text:
file = os.path.basename(text.replace('FILE_OPENED', '').strip())
text = ''
else:
if not full:
continue
table.add_row([line_el.tag, time, file, is_cal_on, text])
return table
def main_parse_acs_logs(args=None):
import argparse
description = ('Read ACS logs and return useful information')
parser = argparse.ArgumentParser(description=description)
parser.add_argument("files", nargs='*',
help="Single files to preprocess",
default=None, type=str)
parser.add_argument("--to-csv", action='store_true', default=False,
help='Save a CSV file with the results')
parser.add_argument("--list-calon", action='store_true', default=False,
help='List files with calibration mark on')
args = parser.parse_args(args)
for fname in args.files:
# Get the full table only when relevant
full = args.to_csv
table = load_acs_log_file(fname, full=full)
if args.list_calon:
print("\n\nList of files with the calibration mark on:\n")
good = (table['CAL'] == True) & (table['file'] != '')
if np.any(good):
for fname in table['file'][good]:
print(fname)
| {
"repo_name": "matteobachetti/srt-single-dish-tools",
"path": "srttools/parse_acs_logs.py",
"copies": "1",
"size": "2390",
"license": "bsd-3-clause",
"hash": 890626363340643500,
"line_mean": 30.8666666667,
"line_max": 76,
"alpha_frac": 0.540167364,
"autogenerated": false,
"ratio": 3.950413223140496,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49905805871404957,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function)
import collections
import logging
import math
import numpy as np
import os
import six
import warnings
from astropy.io import fits
from astropy.table import Table
import stingray.utils as utils
from .utils import order_list_of_arrays, is_string
from .utils import assign_value_if_none
from .gti import _get_gti_from_extension, load_gtis
try:
# Python 2
import cPickle as pickle
except:
# Python 3
import pickle
_H5PY_INSTALLED = True
try:
import h5py
except:
_H5PY_INSTALLED = False
def get_file_extension(fname):
"""Get the extension from the file name."""
return os.path.splitext(fname)[1]
def high_precision_keyword_read(hdr, keyword):
"""Read FITS header keywords, also if split in two.
In the case where the keyword is split in two, like
MJDREF = MJDREFI + MJDREFF
in some missions, this function returns the summed value. Otherwise, the
content of the single keyword
Parameters
----------
hdr : dict_like
The FITS header structure, or a dictionary
keyword : str
The key to read in the header
Returns
-------
value : long double
The value of the key, or None if something went wrong
"""
try:
value = np.longdouble(hdr[keyword])
return value
except:
pass
try:
if len(keyword) == 8:
keyword = keyword[:7]
value = np.longdouble(hdr[keyword + 'I'])
value += np.longdouble(hdr[keyword + 'F'])
return value
except:
return None
def _get_additional_data(lctable, additional_columns):
additional_data = {}
if additional_columns is not None:
for a in additional_columns:
try:
additional_data[a] = np.array(lctable.field(a))
except: # pragma: no cover
if a == 'PI':
logging.warning('Column PI not found. Trying with PHA')
additional_data[a] = np.array(lctable.field('PHA'))
else:
raise Exception('Column' + a + 'not found')
return additional_data
def load_events_and_gtis(fits_file, additional_columns=None,
gtistring='GTI,STDGTI',
gti_file=None, hduname='EVENTS', column='TIME'):
"""Load event lists and GTIs from one or more files.
Loads event list from HDU EVENTS of file fits_file, with Good Time
intervals. Optionally, returns additional columns of data from the same
HDU of the events.
Parameters
----------
fits_file : str
return_limits: bool, optional
Return the TSTART and TSTOP keyword values
additional_columns: list of str, optional
A list of keys corresponding to the additional columns to extract from
the event HDU (ex.: ['PI', 'X'])
Returns
-------
ev_list : array-like
gtis: [[gti0_0, gti0_1], [gti1_0, gti1_1], ...]
additional_data: dict
A dictionary, where each key is the one specified in additional_colums.
The data are an array with the values of the specified column in the
fits file.
t_start : float
t_stop : float
"""
gtistring = assign_value_if_none(gtistring, 'GTI,STDGTI')
lchdulist = fits.open(fits_file)
# Load data table
try:
lctable = lchdulist[hduname].data
except: # pragma: no cover
logging.warning('HDU %s not found. Trying first extension' % hduname)
lctable = lchdulist[1].data
# Read event list
ev_list = np.array(lctable.field(column), dtype=np.longdouble)
# Read TIMEZERO keyword and apply it to events
try:
timezero = np.longdouble(lchdulist[1].header['TIMEZERO'])
except: # pragma: no cover
logging.warning("No TIMEZERO in file")
timezero = np.longdouble(0.)
ev_list += timezero
# Read TSTART, TSTOP from header
try:
t_start = np.longdouble(lchdulist[1].header['TSTART'])
t_stop = np.longdouble(lchdulist[1].header['TSTOP'])
except: # pragma: no cover
logging.warning("Tstart and Tstop error. using defaults")
t_start = ev_list[0]
t_stop = ev_list[-1]
# Read and handle GTI extension
accepted_gtistrings = gtistring.split(',')
if gti_file is None:
# Select first GTI with accepted name
try:
gti_list = \
_get_gti_from_extension(
lchdulist, accepted_gtistrings=accepted_gtistrings)
except: # pragma: no cover
warnings.warn("No extensions found with a valid name. "
"Please check the `accepted_gtistrings` values.")
gti_list = np.array([[t_start, t_stop]],
dtype=np.longdouble)
else:
gti_list = load_gtis(gti_file, gtistring)
additional_data = _get_additional_data(lctable, additional_columns)
lchdulist.close()
# Sort event list
order = np.argsort(ev_list)
ev_list = ev_list[order]
additional_data = order_list_of_arrays(additional_data, order)
returns = _empty()
returns.ev_list = ev_list
returns.gti_list = gti_list
returns.additional_data = additional_data
returns.t_start = t_start
returns.t_stop = t_stop
return returns
class _empty():
def __init__(self):
pass
def mkdir_p(path): # pragma: no cover
"""Safe mkdir function.
Parameters
----------
path : str
Name of the directory/ies to create
Notes
-----
Found at
http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
"""
import os
import errno
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def read_header_key(fits_file, key, hdu=1):
"""Read the header key key from HDU hdu of the file fits_file.
Parameters
----------
fits_file: str
key: str
The keyword to be read
Other Parameters
----------------
hdu : int
"""
hdulist = fits.open(fits_file)
try:
value = hdulist[hdu].header[key]
except: # pragma: no cover
value = ''
hdulist.close()
return value
def ref_mjd(fits_file, hdu=1):
"""Read MJDREFF+ MJDREFI or, if failed, MJDREF, from the FITS header.
Parameters
----------
fits_file : str
Returns
-------
mjdref : numpy.longdouble
the reference MJD
Other Parameters
----------------
hdu : int
"""
import collections
if isinstance(fits_file, collections.Iterable) and\
not is_string(fits_file): # pragma: no cover
fits_file = fits_file[0]
logging.info("opening %s" % fits_file)
hdulist = fits.open(fits_file)
ref_mjd_val = high_precision_keyword_read(hdulist[hdu].header, "MJDREF")
hdulist.close()
return ref_mjd_val
def common_name(str1, str2, default='common'):
"""Strip two strings of the letters not in common.
Filenames must be of same length and only differ by a few letters.
Parameters
----------
str1 : str
str2 : str
Returns
-------
common_str : str
A string containing the parts of the two names in common
Other Parameters
----------------
default : str
The string to return if common_str is empty
"""
if not len(str1) == len(str2):
return default
common_str = ''
# Extract the MP root of the name (in case they're event files)
for i, letter in enumerate(str1):
if str2[i] == letter:
common_str += letter
# Remove leading and trailing underscores and dashes
common_str = common_str.rstrip('_').rstrip('-')
common_str = common_str.lstrip('_').lstrip('-')
if common_str == '':
common_str = default
logging.debug('common_name: %s %s -> %s' % (str1, str2, common_str))
return common_str
def split_numbers(number):
"""
Split high precision number(s) into doubles.
TODO: Consider the option of using a third number to specify shift.
Parameters
----------
number: long double
The input high precision number which is to be split
Returns
-------
number_I: double
First part of high precision number
number_F: double
Second part of high precision number
"""
if isinstance(number, collections.Iterable):
mods = [math.modf(n) for n in number]
number_F = [f for f,_ in mods]
number_I = [i for _,i in mods]
else:
number_F, number_I = math.modf(number)
return np.double(number_I), np.double(number_F)
def _save_pickle_object(object, filename):
"""
Save a class object in pickle format.
Parameters
----------
object: class instance
A class object whose attributes are saved in a
dictionary format
filename: str
Name of the file in which object is saved
"""
with open(filename, "wb" ) as f:
pickle.dump(object, f)
def _retrieve_pickle_object(filename):
"""
Retrieves a pickled class object.
Parameters
----------
filename: str
Name of the file in which object is saved
Returns
-------
data: class object
"""
with open(filename, "rb" ) as f:
return pickle.load(f)
def _save_hdf5_object(object, filename):
"""
Save a class object in hdf5 format.
Parameters
----------
object: class instance
A class object whose attributes are saved in a
dictionary format
filename: str
Name of the file in which object is saved
"""
items = vars(object)
attrs = [name for name in items if items[name] is not None]
with h5py.File(filename, 'w') as hf:
for attr in attrs:
data = items[attr]
# If data is a single number, store as an attribute.
if _isattribute(data):
if isinstance(data, np.longdouble):
data_I, data_F= split_numbers(data)
names = [attr+'_I', attr+'_F']
hf.attrs[names[0]] = data_I
hf.attrs[names[1]] = data_F
else:
hf.attrs[attr] = data
# If data is an array or list, create a dataset.
else:
try:
if isinstance(data[0], np.longdouble):
data_I, data_F= split_numbers(data)
names = [attr+'_I', attr+'_F']
hf.create_dataset(names[0], data=data_I)
hf.create_dataset(names[1], data=data_F)
else:
hf.create_dataset(attr, data=data)
except IndexError:
# To account for numpy arrays of type 'None' (0-d)
pass
def _retrieve_hdf5_object(filename):
"""
Retrieves an hdf5 format class object.
Parameters
----------
filename: str
The name of file with which object was saved
Returns
-------
data: dictionary
Loads the data from an hdf5 object file and returns
in dictionary format.
"""
with h5py.File(filename, 'r') as hf:
dset_keys = hf.keys()
attr_keys = hf.attrs.keys()
data = {}
dset_copy = list(dset_keys)[:]
for key in dset_keys:
# Make sure key hasn't been removed
if key in dset_copy:
# Longdouble case
if key[-2:] in ['_I', '_F']:
m_key = key[:-2]
# Add integer and float parts
data[m_key] = np.longdouble(hf[m_key+'_I'].value)
data[m_key] += np.longdouble(hf[m_key+'_F'].value)
# Remove integer and float parts from attributes
dset_copy.remove(m_key+'_I')
dset_copy.remove(m_key+'_F')
else:
data[key] = hf[key].value
attr_copy = list(attr_keys)[:]
for key in attr_keys:
# Make sure key hasn't been removed
if key in attr_copy:
# Longdouble case
if key[-2:] in ['_I', '_F']:
m_key = key[:-2]
# Add integer and float parts
data[m_key] = np.longdouble(hf.attrs[m_key+'_I'])
data[m_key] += np.longdouble(hf.attrs[m_key+'_F'])
# Remove integer and float parts from attributes
attr_copy.remove(m_key+'_I')
attr_copy.remove(m_key+'_F')
else:
data[key] = hf.attrs[key]
return data
def _save_ascii_object(object, filename, fmt="%.18e", **kwargs):
"""
Save an array to a text file.
Parameters
----------
object : numpy.ndarray
An array with the data to be saved
filename : str
The file name to save to
fmt : str or sequence of strs, optional
Use for formatting of columns. See `numpy.savetxt` documentation
for details.
Other Parameters
----------------
kwargs : any keyword argument taken by `numpy.savetxt`
"""
try:
np.savetxt(filename, object, fmt=fmt, **kwargs)
except TypeError:
raise Exception("Formatting of columns not recognized! Use 'fmt' option to "
"format columns including strings or mixed types!")
pass
def _retrieve_ascii_object(filename, **kwargs):
"""
Helper function to retrieve ascii objects from file.
Uses astropy.Table for reading and storing the data.
Parameters
----------
filename : str
The name of the file with the data to be retrieved.
Additional Keyword Parameters
-----------------------------
usecols : {int | iterable}
The indices of the columns in the file to be returned.
By default, all columns will be returned
skiprows : int
The number of rows at the beginning to skip
By default, no rows will be skipped.
names : iterable
A list of column names to be attached to the columns.
By default, no column names are added, unless they are specified
in the file header and can be read by astropy.Table.read
automatically.
Returns
-------
data : astropy.Table object
An astropy.Table object with the data from the file
Example
-------
"""
if not isinstance(filename, six.string_types):
raise TypeError("filename must be string!")
if 'usecols' in list(kwargs.keys()):
if np.size(kwargs['usecols']) != 2:
raise ValueError("Need to define two columns")
usecols = kwargs["usecols"]
else:
usecols = None
if 'skiprows' in list(kwargs.keys()):
assert isinstance(kwargs["skiprows"], int)
skiprows = kwargs["skiprows"]
else:
skiprows = 0
if "names" in list(kwargs.keys()):
names = kwargs["names"]
else:
names = None
data = Table.read(filename, data_start=skiprows,
names=names, format="ascii")
if usecols is None:
return data
else:
colnames = np.array(data.colnames)
cols = colnames[usecols]
return data[cols]
def _save_fits_object(object, filename, **kwargs):
"""
Save a class object in fits format.
Parameters
----------
object: class instance
A class object whose attributes would be saved in a dictionary format.
filename: str
The file name to save to
Additional Keyword Parameters
-----------------------------
tnames: str iterable
The names of HDU tables. For instance, in case of eventlist,
tnames could be ['EVENTS', 'GTI']
colsassign: dictionary iterable
This indicates the correct tables to which to assign columns
to. If this is None or if a column is not provided, it/they will
be assigned to the first table.
For example, [{'gti':'GTI'}] indicates that gti values should be
stored in GTI table.
"""
tables = []
if 'colsassign' in list(kwargs.keys()):
colsassign = kwargs['colsassign']
iscolsassigned = True
else:
iscolsassigned = False
if 'tnames' in list(kwargs.keys()):
tables = kwargs['tnames']
else:
tables = ['MAIN']
items = vars(object)
attrs = [name for name in items if items[name] is not None]
cols = []
hdrs = []
for t in tables:
cols.append([])
hdrs.append(fits.Header())
for attr in attrs:
data = items[attr]
# Get the index of table to which column belongs
if iscolsassigned and attr in colsassign.keys():
index = tables.index(colsassign[attr])
else:
index = 0
# If data is a single number, store as metadata
if _isattribute(data):
if isinstance(data, np.longdouble):
# Longdouble case. Split and save integer and float parts
data_I, data_F = split_numbers(data)
names = [attr+'_I', attr+'_F']
hdrs[index][names[0]] = data_I
hdrs[index][names[1]] = data_F
else:
# Normal case. Save as it is
hdrs[index][attr] = data
# If data is an array or list, insert as table column
else:
try:
if isinstance(data[0], np.longdouble):
# Longdouble case. Split and save integer and float parts
data_I, data_F= split_numbers(data)
names = [attr+'_I', attr+'_F']
cols[index].append(fits.Column(name=names[0],format='D', array=data_I))
cols[index].append(fits.Column(name=names[1],format='D', array=data_F))
else:
# Normal case. Save as it is
cols[index].append(fits.Column(name=attr,format=_lookup_format(data[0]),
array=data))
except IndexError:
# To account for numpy arrays of type 'None' (0-d)
pass
tbhdu = fits.HDUList()
# Create binary tables
for i in range(0, len(tables)):
if cols[i] != []:
tbhdu.append(fits.BinTableHDU.from_columns(cols[i], header=hdrs[i], name=tables[i]))
tbhdu.writeto(filename)
def _retrieve_fits_object(filename, **kwargs):
"""
Retrieves a fits format class object.
Parameters
----------
filename: str
The name of file with which object was saved
Additional Keyword Parameters
-----------------------------
cols: str iterable
The names of columns to extract from fits tables.
Returns
-------
data: dictionary
Loads the data from a fits object file and returns
in dictionary format.
"""
data = {}
if 'cols' in list(kwargs.keys()):
cols = [col.upper() for col in kwargs['cols']]
else:
cols = []
with fits.open(filename) as hdulist:
fits_cols = []
# Get columns from all tables
for i in range(1,len(hdulist)):
fits_cols.append([h.upper() for h in hdulist[i].data.names])
for c in cols:
for i in range(0, len(fits_cols)):
# .upper() is used because `fits` stores values in upper case
hdr_keys = [h.upper() for h in hdulist[i+1].header.keys()]
# Longdouble case. Check for columns
if c+'_I' in fits_cols[i] or c+'_F' in fits_cols[i]:
if c not in data.keys():
data[c] = np.longdouble(hdulist[i+1].data[c+'_I'])
data[c] += np.longdouble(hdulist[i+1].data[c+'_F'])
# Longdouble case. Check for header keys
if c+'_I' in hdr_keys or c+'_F' in hdr_keys:
if c not in data.keys():
data[c] = np.longdouble(hdulist[i+1].header[c+'_I'])
data[c] += np.longdouble(hdulist[i+1].header[c+'_F'])
# Normal case. Check for columns
elif c in fits_cols[i]:
data[c] = hdulist[i+1].data[c]
# Normal case. Check for header keys
elif c in hdr_keys:
data[c] = hdulist[i+1].header[c]
return data
def _lookup_format(var):
"""
Looks up relevant format in fits.
"""
lookup = {"<type 'int'>":"J", "<type 'float'>":"E",
"<type 'numpy.int64'>": "K", "<type 'numpy.float64'>":"D",
"<type 'numpy.float128'>":"D", "<type 'str'>":"30A",
"<type 'bool'": "L"}
form = type(var)
try:
return lookup[str(form)]
except KeyError:
# If an entry is not contained in lookup dictionary
return "D"
def _isattribute(data):
"""
Check if data is a single number or an array.
"""
if isinstance(data, np.ndarray) or isinstance(data, list):
return False
else:
return True
def write(input_, filename, format_='pickle', **kwargs):
"""
Pickle a class instance. For parameters depending on
`format_`, see individual function definitions.
Parameters
----------
object: a class instance
The object to be stored.
filename: str
The name of the file to be created.
format_: str
The format in which to store file. Formats supported
are pickle, hdf5, ascii or fits.
"""
if format_ == 'pickle':
_save_pickle_object(input_, filename)
elif format_ == 'hdf5':
if _H5PY_INSTALLED:
_save_hdf5_object(input_, filename)
else:
utils.simon('h5py not installed, using pickle instead' \
'to save object.')
_save_pickle_object(input_, filename.split('.')[0]+
'.pickle')
elif format_ == 'ascii':
_save_ascii_object(input_, filename, **kwargs)
elif format_ == 'fits':
_save_fits_object(input_, filename, **kwargs)
else:
utils.simon('Format not understood.')
def read(filename, format_='pickle', **kwargs):
"""
Return a pickled class instance.
Parameters
----------
filename: str
The name of the file to be retrieved.
format_: str
The format used to store file. Supported formats are
pickle, hdf5, ascii or fits.
Returns
-------
If format_ is 'pickle', a class object is returned.
If format_ is 'ascii', astropy.table object is returned.
If format_ is 'hdf5' or 'fits', a dictionary object is returned.
"""
if format_ == 'pickle':
return _retrieve_pickle_object(filename)
elif format_ == 'hdf5':
if _H5PY_INSTALLED:
return _retrieve_hdf5_object(filename)
else:
utils.simon('h5py not installed, cannot read an' \
'hdf5 object.')
elif format_ == 'ascii':
return _retrieve_ascii_object(filename, **kwargs)
elif format_ == 'fits':
return _retrieve_fits_object(filename, **kwargs)
else:
utils.simon('Format not understood.')
def savefig(filename, **kwargs):
"""
Save a figure plotted by Matplotlib.
Note : This function is supposed to be used after the ``plot``
function. Otherwise it will save a blank image with no plot.
Parameters
----------
filename : str
The name of the image file. Extension must be specified in the
file name. For example filename with `.png` extension will give a
rasterized image while `.pdf` extension will give a vectorized
output.
kwargs : keyword arguments
Keyword arguments to be passed to ``savefig`` function of
``matplotlib.pyplot``. For example use `bbox_inches='tight'` to
remove the undesirable whitepace around the image.
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Matplotlib required for savefig()")
if not plt.fignum_exists(1):
utils.simon("use ``plot`` function to plot the image first and "
"then use ``savefig`` to save the figure.")
plt.savefig(filename, **kwargs)
| {
"repo_name": "pabell/stingray",
"path": "stingray/io.py",
"copies": "1",
"size": "24667",
"license": "mit",
"hash": -8732638213825773000,
"line_mean": 27.4509803922,
"line_max": 96,
"alpha_frac": 0.5620059188,
"autogenerated": false,
"ratio": 4.045096753033781,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5107102671833781,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function)
import numpy as np
from collections import OrderedDict
import os
import re
from atom.api import (Atom, Str, observe, List, Int, Bool, Typed)
from skbeam.fluorescence import XrfElement as Element
from skbeam.core.fitting.xrf_model import K_LINE, L_LINE, M_LINE
from .fileio import save_fitdata_to_hdf
from .fit_spectrum import get_energy_bin_range
from ..core.map_processing import compute_selected_rois, TerminalProgressBar
import logging
logger = logging.getLogger(__name__)
class ROISettings(Atom):
"""
This class defines basic data structure for roi calculation.
Attributes
----------
prefix : str
prefix name
line_val : float
emission energy of primary line
left_val : float
left boundary
right_val : float
right boundary
default_left : float
default_right : float
step : float
min step value to change
show_plot : bool
option to plot
"""
prefix = Str()
line_val = Int()
left_val = Int()
right_val = Int()
default_left = Int()
default_right = Int()
step = Int(1)
show_plot = Bool(False)
@observe('left_val')
def _value_update(self, change):
if change['type'] == 'create':
return
logger.debug('left value is changed {}'.format(change))
@observe('show_plot')
def _plot_opt(self, change):
if change['type'] == 'create':
return
logger.debug('show plot is changed {}'.format(change))
class ROIModel(Atom):
"""
Control roi calculation according to given inputs.
Parameters
----------
parameters : Dict
parameter values used for fitting
data_dict : Dict
dict of 3D data
element_for_roi : str
inputs given by users
element_list_roi : list
list of elements after parsing
roi_dict : dict
dict of ROISettings object
enable_roi_computation : Bool
enables/disables GUI element that start ROI computation
At least one element must be selected and all entry in the element
list must be valid before ROI may be computed
result_folder : Str
directory which contains HDF5 file, in which results of processing are saved
hdf_path : Str
full path to the HDF5 file, in which results are saved
hdf_name : Str
name of the HDF file, in which results are saved
data_title : str
The title of the selected dataset (from ``fileio`` module)
data_title_base : str
The title changed for internal use (suffix is removed)
data_title_adjusted : str
The title changed for internal use (suffix 'sum' is removed if it exists)
suffix_name_roi : str
The suffix may have values 'sum', 'det1', 'det2' etc.
"""
# Reference to ParamModel object
param_model = Typed(object)
# Reference to FileIOModel object
io_model = Typed(object)
element_for_roi = Str()
element_list_roi = List()
roi_dict = OrderedDict()
enable_roi_computation = Bool(False)
subtract_background = Bool(False)
result_folder = Str()
hdf_path = Str()
hdf_name = Str()
data_title = Str()
data_title_base = Str()
data_title_adjusted = Str()
suffix_name_roi = Str()
def filename_update(self, change):
"""
Observer function to be connected to the fileio model
in the top-level gui.py startup
Parameters
----------
changed : dict
This is the dictionary that gets passed to a function
with the @observe decorator
"""
self.hdf_name = change['value']
# output to .h5 file
self.hdf_path = os.path.join(self.result_folder, self.hdf_name)
def result_folder_changed(self, change):
"""
Observer function to be connected to the fileio model
in the top-level gui.py startup
Parameters
----------
changed : dict
This is the dictionary that gets passed to a function
with the @observe decorator
"""
self.result_folder = change['value']
def data_title_update(self, change):
"""
Observer function to be connected to the fileio model
in the top-level gui.py startup
Parameters
----------
changed : dict
This is the dictionary that gets passed to a function
with the @observe decorator
"""
self.data_title = change['value']
# It is assumed, that ``self.data_title`` was created in the ``fileio`` module
# and has dataset label attached to the end of it.
# The labels are ``sum``, ``det1``, ``det2`` etc. depending on the number
# of detector channels.
self.suffix_name_roi = self.data_title.split('_')[-1]
self.data_title_base = '_'.join(self.data_title.split("_")[:-1])
if self.suffix_name_roi == "sum":
# If suffix is 'sum', then remove the suffix
self.data_title_adjusted = self.data_title_base
else:
# Else keep the original title
self.data_title_adjusted = self.data_title
def __init__(self, *, param_model, io_model):
# Initialize with an empty string (no elements selected)
self.param_model = param_model
self.io_model = io_model
self.element_for_roi = ""
self.enable_roi_computation = False
@observe('element_for_roi')
def _update_element(self, change):
"""
Get element information as a string and parse it as a list.
This element information means the ones for roi setup.
"""
self.element_for_roi = self.element_for_roi.strip(' ')
# Remove leading and trailing ','
self.element_for_roi = self.element_for_roi.strip(',')
# Remove leading and trailing '.'
self.element_for_roi = self.element_for_roi.strip('.')
try:
if len(self.element_for_roi) == 0:
logger.debug('No elements entered.')
self.remove_all_roi()
self.element_list_roi = []
self.enable_roi_computation = False
return
elif ',' in self.element_for_roi:
element_list = [v.strip(' ') for v in self.element_for_roi.split(',')]
else:
element_list = [v for v in self.element_for_roi.split(' ')]
# with self.suppress_notifications():
# self.element_list_roi = element_list
logger.debug('Current elements for ROI sum are: {}'.format(element_list))
self.update_roi(element_list)
self.element_list_roi = element_list
self.enable_roi_computation = True
except Exception as ex:
logger.warning(f"Incorrect specification of element lines for ROI computation: {ex}")
self.enable_roi_computation = False
def select_elements_from_list(self, element_list):
self.element_for_roi = ', '.join(element_list)
def use_all_elements(self):
self.element_for_roi = ', '.join(K_LINE+L_LINE) # +M_LINE)
def clear_selected_elements(self):
self.element_for_roi = ""
def remove_all_roi(self):
self.roi_dict.clear()
def update_roi(self, element_list, std_ratio=4):
"""
Update elements without touching old ones.
Parameters
----------
element_list : list
list of elements for roi
std_ratio : float, optional
Define the range of roi for given element.
Notes
-----
The unit of energy is in ev in this function. The reason is
SpinBox in Enaml can only read integer as input. To be updated.
"""
eline_list = K_LINE + L_LINE + M_LINE
for v in element_list:
if v in self.roi_dict:
continue
if v not in eline_list:
raise ValueError(f"Emission line {v} is unknown")
if '_K' in v:
temp = v.split('_')[0]
e = Element(temp)
val = int(e.emission_line['ka1']*1000)
elif '_L' in v:
temp = v.split('_')[0]
e = Element(temp)
val = int(e.emission_line['la1']*1000)
elif '_M' in v:
temp = v.split('_')[0]
e = Element(temp)
val = int(e.emission_line['ma1']*1000)
delta_v = int(self.get_sigma(val/1000)*1000)
roi = ROISettings(prefix=self.suffix_name_roi,
line_val=val,
left_val=val-delta_v*std_ratio,
right_val=val+delta_v*std_ratio,
default_left=val-delta_v*std_ratio,
default_right=val+delta_v*std_ratio,
step=1,
show_plot=False)
self.roi_dict.update({v: roi})
# remove old items not included in element_list
for k in self.roi_dict.copy().keys():
if k not in element_list:
del self.roi_dict[k]
def get_sigma(self, energy, epsilon=2.96):
"""
Calculate the std at given energy.
"""
temp_val = 2 * np.sqrt(2 * np.log(2))
return np.sqrt((self.param_model.param_new['fwhm_offset']['value']/temp_val)**2 +
energy*epsilon*self.param_model.param_new['fwhm_fanoprime']['value'])
def get_roi_sum(self):
"""
Save roi sum into a dict.
Returns
-------
dict
nested dict as output
"""
roi_result = {}
datav = self.io_model.data_sets[self.data_title].raw_data
logger.info(f"Computing ROIs for dataset {self.data_title} ...")
snip_param = {
"e_offset": self.param_model.param_new["e_offset"]["value"],
"e_linear": self.param_model.param_new["e_linear"]["value"],
"e_quadratic": self.param_model.param_new["e_quadratic"]["value"],
"b_width": self.param_model.param_new["non_fitting_values"]["background_width"]
}
n_bin_low, n_bin_high = get_energy_bin_range(
num_energy_bins=datav.shape[2],
low_e=self.param_model.param_new['non_fitting_values']['energy_bound_low']['value'],
high_e=self.param_model.param_new['non_fitting_values']['energy_bound_high']['value'],
e_offset=self.param_model.param_new['e_offset']['value'],
e_linear=self.param_model.param_new['e_linear']['value'])
# Prepare the 'roi_dict' parameter for computations
roi_dict = {_: (self.roi_dict[_].left_val/1000.0, self.roi_dict[_].right_val/1000.0)
for _ in self.roi_dict.keys()}
roi_dict_computed = compute_selected_rois(
data=datav,
data_sel_indices=(n_bin_low, n_bin_high),
roi_dict=roi_dict,
snip_param=snip_param,
use_snip=self.subtract_background,
chunk_pixels=5000,
n_chunks_min=4,
progress_bar=TerminalProgressBar("Computing ROIs: "),
client=None)
# Save ROI data to HDF5 file
self.saveROImap_to_hdf(roi_dict_computed)
# Add scalers to the ROI dataset, so that they can be selected from Image Wizard.
# We don't want to save scalers to the file, since they are already in the file.
# So we add scalers after data is saved.
scaler_key = f"{self.data_title_base}_scaler"
if scaler_key in self.io_model.img_dict:
roi_dict_computed.update(self.io_model.img_dict[scaler_key])
roi_result[f"{self.data_title_adjusted}_roi"] = roi_dict_computed
logger.info("ROI is computed.")
return roi_result
def saveROImap_to_hdf(self, data_dict_roi):
# Generate the path to computed ROIs in the HDF5 file
det_name = "detsum" # Assume that ROIs are computed using the sum of channels
# Search for channel name in the data title. Channels are named
# det1, det2, ... , i.e. 'det' followed by integer number.
# The channel name is always located at the end of the ``data_title``.
# If the channel name is found, then build the path using this name.
srch = re.search("det\d+$", self.data_title) # noqa: W605
if srch:
det_name = srch.group(0)
inner_path = f"xrfmap/{det_name}"
try:
save_fitdata_to_hdf(self.hdf_path, data_dict_roi, datapath=inner_path,
data_saveas='xrf_roi', dataname_saveas='xrf_roi_name')
except Exception as ex:
logger.error(f"Failed to save ROI data to file '{self.hdf_path}'\n"
f" Exception: {ex}")
else:
logger.info(f"ROI data was successfully saved to file '{self.hdf_name}'")
| {
"repo_name": "NSLS-II-HXN/PyXRF",
"path": "pyxrf/model/roi_model.py",
"copies": "1",
"size": "13120",
"license": "bsd-3-clause",
"hash": 8678859499701920000,
"line_mean": 33.801061008,
"line_max": 98,
"alpha_frac": 0.5728658537,
"autogenerated": false,
"ratio": 3.9269679736605805,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9997001027667196,
"avg_score": 0.0005665599386769705,
"num_lines": 377
} |
from __future__ import (absolute_import, division,
print_function)
import numpy as np
import json
from collections import OrderedDict
import copy
import math
from atom.api import (Atom, Str, observe, Typed,
Int, Dict, List, Float, Bool)
from skbeam.fluorescence import XrfElement as Element
from skbeam.core.fitting.xrf_model import (ParamController,
compute_escape_peak, trim,
construct_linear_model,
linear_spectrum_fitting)
from skbeam.core.fitting.xrf_model import (K_LINE, L_LINE, M_LINE)
from ..core.map_processing import snip_method_numba
from ..core.xrf_utils import check_if_eline_supported, get_eline_parameters, get_element_atomic_number
from ..core.utils import gaussian_sigma_to_fwhm, gaussian_fwhm_to_sigma
import logging
logger = logging.getLogger(__name__)
bound_options = ['none', 'lohi', 'fixed', 'lo', 'hi']
fit_strategy_list = ['fit_with_tail', 'free_more',
'e_calibration', 'linear',
'adjust_element1', 'adjust_element2', 'adjust_element3']
autofit_param = ['e_offset', 'e_linear', 'fwhm_offset', 'fwhm_fanoprime']
class PreFitStatus(Atom):
"""
Data structure for pre fit analysis.
Attributes
----------
z : str
z number of element
spectrum : array
spectrum of given element
status : bool
True as plot is visible
stat_copy : bool
copy of status
maxv : float
max value of a spectrum
norm : float
norm value in respect to the strongest peak
lbd_stat : bool
define plotting status under a threshold value
"""
z = Str()
energy = Str()
area = Float()
spectrum = Typed(np.ndarray)
status = Bool(False)
stat_copy = Bool(False)
maxv = Float()
norm = Float()
lbd_stat = Bool(False)
class ElementController(object):
"""
This class performs basic ways to rank elements, show elements,
calculate normed intensity, and etc.
"""
def __init__(self):
self.element_dict = OrderedDict()
def delete_item(self, k):
try:
del self.element_dict[k]
self.update_norm()
logger.debug('Item {} is deleted.'.format(k))
except KeyError:
pass
def order(self, option='z'):
"""
Order dict in different ways.
"""
if option == 'z':
self.element_dict = OrderedDict(sorted(
self.element_dict.items(), key=lambda t: t[1].z))
elif option == 'energy':
self.element_dict = OrderedDict(sorted(
self.element_dict.items(), key=lambda t: t[1].energy))
elif option == 'name':
self.element_dict = OrderedDict(sorted(
self.element_dict.items(), key=lambda t: t[0]))
elif option == 'maxv':
self.element_dict = OrderedDict(sorted(
self.element_dict.items(), key=lambda t: t[1].maxv, reverse=True))
def add_to_dict(self, dictv):
"""
This function updates the dictionary element if it already exists.
"""
self.element_dict.update(dictv)
logger.debug('Item {} is added.'.format(list(dictv.keys())))
self.update_norm()
def update_norm(self, threshv=0.0):
"""
Calculate the normalized intensity for each element peak.
Parameters
----------
threshv : float
No value is shown when smaller than the threshold value
"""
# Do nothing if no elements are selected
if not self.element_dict:
return
max_dict = np.max([v.maxv for v in self.element_dict.values()])
for v in self.element_dict.values():
v.norm = v.maxv/max_dict*100
v.lbd_stat = bool(v.norm > threshv)
# also delete smaller values
# there is some bugs in plotting when values < 0.0
self.delete_peaks_below_threshold(threshv=threshv)
def delete_all(self):
self.element_dict.clear()
def is_element_in_list(self, element_line_name):
"""
Check if element 'k' is in the list of selected elements
"""
if element_line_name in self.element_dict.keys():
return True
else:
return False
def get_element_list(self):
current_elements = [v for v
in self.element_dict.keys()
if (v.lower() != v)]
# logger.info('Current Elements for '
# 'fitting are {}'.format(current_elements))
return current_elements
def update_peak_ratio(self):
"""
If 'maxv' is modified, then the values of 'area' and 'spectrum' are adjusted accordingly:
(1) maximum of spectrum is set equal to 'maxv'; (2) 'area' is scaled proportionally.
It is important that only 'maxv' is changed before this function is called.
"""
for v in self.element_dict.values():
max_spectrum = np.max(v.spectrum)
if not math.isclose(max_spectrum, 0.0, abs_tol=1e-20):
factor = v.maxv / max_spectrum
else:
factor = 0.0
v.spectrum *= factor
v.area *= factor
self.update_norm()
def turn_on_all(self, option=True):
"""
Set plotting status on for all lines.
"""
if option is True:
_plot = option
else:
_plot = False
for v in self.element_dict.values():
v.status = _plot
def delete_peaks_below_threshold(self, threshv=0.1):
"""
Delete elements smaller than threshold value. Non element
peaks are not included.
"""
remove_list = []
non_element = ['compton', 'elastic', 'background']
for k, v in self.element_dict.items():
if math.isnan(v.norm) or (v.norm >= threshv) or (k in non_element):
continue
# We don't want to delete userpeaks or pileup peaks (they are always added manually).
if ("-" in k) or (k.lower().startswith("userpeak")):
continue
remove_list.append(k)
for name in remove_list:
del self.element_dict[name]
return remove_list
def delete_unselected_items(self):
remove_list = []
non_element = ['compton', 'elastic', 'background']
for k, v in self.element_dict.items():
if k in non_element:
continue
if v.status is False:
remove_list.append(k)
for name in remove_list:
del self.element_dict[name]
return remove_list
class ParamModel(Atom):
"""
The module used for maintain the set of fitting parameters.
Attributes
----------
parameters : `atom.Dict`
A list of `Parameter` objects, subclassed from the `Atom` base class.
These `Parameter` objects hold all relevant xrf information.
data : array
1D array of spectrum
prefit_x : array
xX axis with range defined by low and high limits.
param_d : dict
Parameters can be transferred into this dictionary.
param_new : dict
More information are saved, such as element position and width.
total_y : dict
Results from k lines
total_y_l : dict
Results from l lines
total_y_m : dict
Results from l lines
e_list : str
All elements used for fitting.
file_path : str
The path where file is saved.
element_list : list
The list of element lines selected for fitting
n_selected_elines_for_fitting : Int
The number of element lines selected for fitting
n_selected_pure_elines_for_fitting : Int
The number of element lines selected for fitting
excluding pileup peaks and user defined peaks.
Only 'pure' lines like Ca_K, K_K etc.
"""
# Reference to FileIOModel object
io_model = Typed(object)
default_parameters = Dict()
# data = Typed(np.ndarray)
prefit_x = Typed(object)
result_dict_names = List()
param_new = Dict()
total_y = Typed(object)
# total_l = Dict()
# total_m = Dict()
# total_pileup = Dict()
e_name = Str() # Element line name selected in combo box
add_element_intensity = Float(1000.0)
element_list = List()
# data_sets = Typed(OrderedDict)
EC = Typed(object)
x0 = Typed(np.ndarray)
y0 = Typed(np.ndarray)
max_area_dig = Int(2)
auto_fit_all = Dict()
bound_val = Float(1.0)
energy_bound_high_buf = Float(0.0)
energy_bound_low_buf = Float(0.0)
n_selected_elines_for_fitting = Int(0)
n_selected_pure_elines_for_fitting = Int(0)
parameters_changed_cb = List()
def __init__(self, *, default_parameters, io_model):
try:
self.io_model = io_model
self.default_parameters = default_parameters
self.param_new = copy.deepcopy(default_parameters)
# TODO: do we set 'element_list' as a list of keys of 'EC.element_dict'
self.element_list = get_element_list(self.param_new)
except ValueError:
logger.info('No default parameter files are chosen.')
self.EC = ElementController()
# The following line is part of the fix for automated updating of the energy bound
# in 'Automatic Element Finding' dialog box
self.energy_bound_high_buf = self.param_new['non_fitting_values']['energy_bound_high']['value']
self.energy_bound_low_buf = self.param_new['non_fitting_values']['energy_bound_low']['value']
def add_parameters_changed_cb(self, cb):
"""
Add callback to the list of callback function that are called after parameters are updated.
"""
self.parameters_changed_cb.append(cb)
def remove_parameters_changed_cb(self, cb):
"""
Remove reference from the list of callback functions.
"""
self.parameters_changed_cb = [_ for _ in self.parameters_changed_cb if _ != cb]
def parameters_changed(self):
"""
Run callback functions in the list. This method is expected to be called after the parameters
are update to initiate necessary updates in the GUI.
"""
for cb in self.parameters_changed_cb:
cb()
def default_param_update(self, default_parameters):
"""
Replace the reference to the dictionary of default parameters.
Parameters
----------
default_parameters : dict
Reference to complete and valid dictionary of default parameters.
"""
self.default_parameters = default_parameters
# The following function is part of the fix for automated updating of the energy bound
# in 'Automatic Element Finding' dialog box
@observe('energy_bound_high_buf')
def _update_energy_bound_high_buf(self, change):
self.param_new['non_fitting_values']['energy_bound_high']['value'] = change['value']
self.define_range()
@observe('energy_bound_low_buf')
def _update_energy_bound_high_low(self, change):
self.param_new['non_fitting_values']['energy_bound_low']['value'] = change['value']
self.define_range()
def get_new_param_from_file(self, param_path):
"""
Update parameters if new param_path is given.
Parameters
----------
param_path : str
path to save the file
"""
with open(param_path, 'r') as json_data:
self.param_new = json.load(json_data)
self.create_spectrum_from_param_dict(reset=True)
logger.info('Elements read from file are: {}'.format(self.element_list))
def update_new_param(self, param, reset=True):
"""
Update the parameters based on the dictionary of parameters. Set ``reset=False``
if selection status of elemental lines should be kept.
Parameters
----------
param : dict
new dictionary of parameters
reset : boolean
reset (``True``) or clear (``False``) selection status of the element lines.
"""
self.param_new = param
self.create_spectrum_from_param_dict(reset=reset)
@observe('bound_val')
def _update_bound(self, change):
if change['type'] != 'create':
logger.info(f"Peaks with values than the threshold {self.bound_val} will be removed from the list.")
def define_range(self):
"""
Cut x range according to values define in param_dict.
"""
if self.io_model.data is None:
return
lowv = self.param_new['non_fitting_values']['energy_bound_low']['value']
highv = self.param_new['non_fitting_values']['energy_bound_high']['value']
self.x0, self.y0 = define_range(self.io_model.data, lowv, highv,
self.param_new['e_offset']['value'],
self.param_new['e_linear']['value'])
def create_spectrum_from_param_dict(self, reset=True):
"""
Create spectrum profile with based on the current set of parameters.
(``self.param_new`` -> ``self.EC`` and ``self.element_list``).
Typical use: update self.param_new, then call this function.
Set ``reset=False`` to keep selection status of the elemental lines.
Parameters
----------
reset : boolean
clear or keep status of the elemental lines (in ``self.EC``).
"""
param_dict = self.param_new
self.element_list = get_element_list(param_dict)
self.define_range()
self.prefit_x, pre_dict, area_dict = calculate_profile(self.x0,
self.y0,
param_dict,
self.element_list)
# add escape peak
if param_dict['non_fitting_values']['escape_ratio'] > 0:
pre_dict['escape'] = trim_escape_peak(self.io_model.data,
param_dict, len(self.y0))
temp_dict = OrderedDict()
for e in pre_dict.keys():
if e in ['background', 'escape']:
spectrum = pre_dict[e]
# summed spectrum here is not correct,
# as the interval is assumed as 1, not energy interval
# however area of background and escape is not used elsewhere, not important
area = np.sum(spectrum)
ps = PreFitStatus(z=get_Z(e), energy=get_energy(e),
area=float(area), spectrum=spectrum,
maxv=float(np.around(np.max(spectrum), self.max_area_dig)),
norm=-1, status=True, lbd_stat=False)
temp_dict[e] = ps
elif '-' in e: # pileup peaks
energy = self.get_pileup_peak_energy(e)
energy = f"{energy:.4f}"
spectrum = pre_dict[e]
area = area_dict[e]
ps = PreFitStatus(z=get_Z(e), energy=str(energy),
area=area, spectrum=spectrum,
maxv=np.around(np.max(spectrum), self.max_area_dig),
norm=-1, status=True, lbd_stat=False)
temp_dict[e] = ps
else:
ename = e.split('_')[0]
for k, v in param_dict.items():
energy = get_energy(e) # For all peaks except Userpeaks
if ename in k and 'area' in k:
spectrum = pre_dict[e]
area = area_dict[e]
elif ename == 'compton' and k == 'compton_amplitude':
spectrum = pre_dict[e]
area = area_dict[e]
elif ename == 'elastic' and k == 'coherent_sct_amplitude':
spectrum = pre_dict[e]
area = area_dict[e]
elif self.get_eline_name_category(ename) == "userpeak":
key = ename + "_delta_center"
energy = param_dict[key]["value"] + 5.0
energy = f"{energy:.4f}"
else:
continue
ps = PreFitStatus(z=get_Z(ename), energy=energy,
area=area, spectrum=spectrum,
maxv=np.around(np.max(spectrum), self.max_area_dig),
norm=-1, status=True, lbd_stat=False)
temp_dict[e] = ps
# Copy element status
if not reset:
element_status = {_: self.EC.element_dict[_].status for _ in self.EC.element_dict}
self.EC.delete_all()
self.EC.add_to_dict(temp_dict)
if not reset:
for key in self.EC.element_dict.keys():
if key in element_status:
self.EC.element_dict[key].status = element_status[key]
self.result_dict_names = list(self.EC.element_dict.keys())
def get_selected_eline_energy_fwhm(self, eline):
"""
Returns values of energy and fwhm for the peak 'eline' from the dictionary `self.param_new`.
The emission line must exist in the dictionary. Primarily intended for use
with user-defined peaks.
Parameters
----------
eline: str
emission line (e.g. Ca_K) or peak name (e.g. Userpeak2, V_Ka1-Co_Ka1)
"""
if eline not in self.EC.element_dict:
raise ValueError(f"Emission line '{eline}' is not in the list of selected lines.")
keys = self._generate_param_keys(eline)
if not keys["key_dcenter"] or not keys["key_dsigma"]:
raise ValueError(f"Failed to generate keys for the emission line '{eline}'.")
energy = self.param_new[keys["key_dcenter"]]["value"] + 5.0
dsigma = self.param_new[keys["key_dsigma"]]["value"]
fwhm = gaussian_sigma_to_fwhm(dsigma) + self._compute_fwhm_base(energy)
return energy, fwhm
def get_pileup_peak_energy(self, eline):
"""
Returns the energy (center) of pileup peak. Returns None if there is an error.
Parameters
----------
eline: str
Name of the pileup peak, e.g. V_Ka1-Co_Ka1
Returns
-------
float or None
Energy in keV or None
"""
incident_energy = self.param_new["coherent_sct_energy"]["value"]
try:
element_line1, element_line2 = eline.split('-')
e1_cen = get_eline_parameters(element_line1, incident_energy)["energy"]
e2_cen = get_eline_parameters(element_line2, incident_energy)["energy"]
en = e1_cen + e2_cen
except Exception:
en = None
return en
def add_peak_manual(self, userpeak_center=2.5):
"""
Manually add an emission line (or peak).
Parameters
----------
userpeak_center: float
Center of the user defined peak. Ignored if emission line other
than 'userpeak' is added
"""
self._manual_input(userpeak_center=userpeak_center)
self.update_name_list()
self.data_for_plot()
def remove_peak_manual(self):
"""
Manually add an emission line (or peak). The name emission line (peak) to be deleted
must be writtent to `self.e_name` before calling the function.
"""
if self.e_name not in self.EC.element_dict:
msg = f"Line '{self.e_name}' is not in the list of selected lines,\n" \
f"therefore it can not be deleted from the list."
raise RuntimeError(msg)
# Update parameter list
self._remove_parameters_for_eline(self.e_name)
# Update EC
self.EC.delete_item(self.e_name)
self.EC.update_peak_ratio()
self.update_name_list()
self.data_for_plot()
def remove_elements_below_threshold(self, threshv=None):
if threshv is None:
threshv = self.bound_val
deleted_elements = self.EC.delete_peaks_below_threshold(threshv=threshv)
for eline in deleted_elements:
self._remove_parameters_for_eline(eline)
self.EC.update_peak_ratio()
self.update_name_list()
self.data_for_plot()
def remove_elements_unselected(self):
deleted_elements = self.EC.delete_unselected_items()
for eline in deleted_elements:
self._remove_parameters_for_eline(eline)
self.EC.update_peak_ratio()
self.update_name_list()
self.data_for_plot()
def _remove_parameters_for_eline(self, eline):
"""Remove entries for `eline` from the dictionary `self.param_new`"""
if self.get_eline_name_category(eline) == "pileup":
key_prefix = "pileup_" + self.e_name.replace("-", "_")
else:
key_prefix = eline
# It is sufficient to compare using lowercase. It could be more reliable.
key_prefix = key_prefix.lower()
keys_to_delete = [_ for _ in self.param_new.keys()
if _.lower().startswith(key_prefix)]
for key in keys_to_delete:
del self.param_new[key]
# Add name to the name list
_remove_element_from_list(eline, self.param_new)
def _manual_input(self, userpeak_center=2.5):
"""
Manually add an emission line (or peak).
Parameters
----------
userpeak_center: float
Center of the user defined peak. Ignored if emission line other
than 'userpeak' is added
"""
if self.e_name in self.EC.element_dict:
msg = f"Line '{self.e_name}' is in the list of selected lines. \n" \
f"Duplicate entries are not allowed."
raise RuntimeError(msg)
default_area = 1e2
# Add the new data entry to the parameter dictionary. This operation is necessary for 'userpeak'
# lines, because they need to be placed to the specific position (by setting 'delta_center'
# parameter, while regular element lines are placed to their default positions.
d_energy = userpeak_center - 5.0
# PC.params will contain a deepcopy of 'self.param_new' with the new line added
PC = ParamController(self.param_new, [self.e_name])
if self.get_eline_name_category(self.e_name) == "userpeak":
energy = userpeak_center
# Default values for 'delta_center'
dc = copy.deepcopy(PC.params[f"{self.e_name}_delta_center"])
# Modify the default values in the dictionary of parameters
PC.params[f"{self.e_name}_delta_center"]["value"] = d_energy
PC.params[f"{self.e_name}_delta_center"]["min"] = d_energy - (dc["value"] - dc["min"])
PC.params[f"{self.e_name}_delta_center"]["max"] = d_energy + (dc["max"] - dc["value"])
elif self.get_eline_name_category(self.e_name) == "pileup":
energy = self.get_pileup_peak_energy(self.e_name)
else:
energy = get_energy(self.e_name)
param_tmp = PC.params
param_tmp = create_full_dict(param_tmp, fit_strategy_list)
# Add name to the name list
_add_element_to_list(self.e_name, param_tmp)
# 'self.param_new' is used to provide 'hint' values for the model, but all active
# emission lines in 'elemental_lines' will be included in the model.
# The model will contain lines in 'elemental_lines', Compton and elastic
x, data_out, area_dict = calculate_profile(self.x0,
self.y0,
param_tmp,
elemental_lines=[self.e_name],
default_area=default_area)
# Check if element profile was calculated successfully.
# Calculation may fail if the selected line is not activated.
# The calculation is performed using ``xraylib` library, so there is no
# control over it.
if self.e_name not in data_out:
raise Exception(f"Failed to add the emission line '{self.e_name}': line is not activated.")
# If model was generated successfully (the emission line was successfully added), then
# make temporary parameters permanent
self.param_new = param_tmp
ratio_v = self.add_element_intensity / np.max(data_out[self.e_name])
ps = PreFitStatus(z=get_Z(self.e_name),
energy=energy if isinstance(energy, str) else f"{energy:.4f}",
area=area_dict[self.e_name]*ratio_v,
spectrum=data_out[self.e_name]*ratio_v,
maxv=self.add_element_intensity,
norm=-1,
status=True, # for plotting
lbd_stat=False)
self.EC.add_to_dict({self.e_name: ps})
self.EC.update_peak_ratio()
def _generate_param_keys(self, eline):
"""
Returns prefix of the key from `param_new` dictionary based on emission line name
If eline is actual emission line (like Ca_K), then the `key_dcenter` and `key_dsigma`
point to 'a1' line (Ca_ka1). Function has to be extended if access to specific lines is
required. Function is primarily intended for use with user-defined peaks.
"""
category = self.get_eline_name_category(eline)
if category == "pileup":
eline = eline.replace("-", "_")
key_area = "pileup_" + eline + "_area"
key_dcenter = "pileup_" + eline + "delta_center"
key_dsigma = "pileup_" + eline + "delta_sigma"
elif category == "eline":
eline = eline[:-1] + eline[-1].lower()
key_area = eline + "a1_area"
key_dcenter = eline + "a1_delta_center"
key_dsigma = eline + "a1_delta_sigma"
elif category == "userpeak":
key_area = eline + "_area"
key_dcenter = eline + "_delta_center"
key_dsigma = eline + "_delta_sigma"
elif eline == "compton":
key_area = eline + "_amplitude"
key_dcenter, key_dsigma = "", ""
else:
# No key exists (for "background", "escape", "elastic")
key_area, key_dcenter, key_dsigma = "", "", ""
return {"key_area": key_area, "key_dcenter": key_dcenter, "key_dsigma": key_dsigma}
def modify_peak_height(self, maxv_new):
"""
Modify the height of the emission line.
Parameters
----------
new_maxv: float
New maximum value for the emission line `self.e_name`
"""
ignored_peaks = {"escape"}
if self.e_name in ignored_peaks:
msg = f"Height of the '{self.e_name}' peak can not be changed."
raise RuntimeError(msg)
if self.e_name not in self.EC.element_dict:
msg = f"Attempt to modify maximum value for the emission line '{self.e_name},'\n" \
f"which is not currently selected."
raise RuntimeError(msg)
key = self._generate_param_keys(self.e_name)["key_area"]
maxv_current = self.EC.element_dict[self.e_name].maxv
coef = maxv_new / maxv_current if maxv_current > 0 else 0
# Only 'maxv' needs to be updated.
self.EC.element_dict[self.e_name].maxv = maxv_new
# The following function updates 'spectrum', 'area' and 'norm'.
self.EC.update_peak_ratio()
# Some of the parameters are represented only in EC, not in 'self.param_new'.
# (particularly "background" and "elastic")
if key:
self.param_new[key]["value"] *= coef
def _compute_fwhm_base(self, energy):
# Computes 'sigma' value based on default parameters and peak energy (for Userpeaks)
# does not include corrections for fwhm.
# If both peak center (energy) and fwhm is updated, energy needs to be set first,
# since it is used in computation of ``fwhm_base``
sigma = gaussian_fwhm_to_sigma(self.param_new["fwhm_offset"]["value"])
sigma_sqr = energy + 5.0 # center
sigma_sqr *= self.param_new["non_fitting_values"]["epsilon"] # epsilon
sigma_sqr *= self.param_new["fwhm_fanoprime"]["value"] # fanoprime
sigma_sqr += sigma * sigma # We have computed the expression under sqrt
sigma_total = np.sqrt(sigma_sqr)
return gaussian_sigma_to_fwhm(sigma_total)
def _update_userpeak_energy(self, eline, energy_new, fwhm_new):
"""
Set new center for the user-defined peak at 'new_energy'
"""
# According to the accepted peak model, as energy of the peak center grows,
# the peak becomes wider. The most user friendly solution is to automatically
# increase FWHM as the peak moves along the energy axis to the right and
# decrease otherwise. So generally, the user should first place the peak
# center at the desired energy, and then adjust FWHM.
# We change energy, so we will have to change FWHM as well
# so before updating energy we will save the difference between
# the default (base) FWHM and the displayed FWHM
name_userpeak_dcenter = eline + "_delta_center"
old_energy = self.param_new[name_userpeak_dcenter]["value"]
# This difference represents the required change in fwhm
fwhm_difference = fwhm_new - self._compute_fwhm_base(old_energy)
# Now we change energy.
denergy = energy_new - 5.0
v_center = self.param_new[name_userpeak_dcenter]["value"]
v_max = self.param_new[name_userpeak_dcenter]["max"]
v_min = self.param_new[name_userpeak_dcenter]["min"]
# Keep the possible range for value change the same
self.param_new[name_userpeak_dcenter]["value"] = denergy
self.param_new[name_userpeak_dcenter]["max"] = denergy + v_max - v_center
self.param_new[name_userpeak_dcenter]["min"] = denergy - (v_center - v_min)
# The base value is updated now (since the energy has changed)
fwhm_base = self._compute_fwhm_base(energy_new)
fwhm = fwhm_difference + fwhm_base
return fwhm
def _update_userpeak_fwhm(self, eline, energy_new, fwhm_new):
name_userpeak_dsigma = eline + "_delta_sigma"
fwhm_base = self._compute_fwhm_base(energy_new)
dfwhm = fwhm_new - fwhm_base
dsigma = gaussian_fwhm_to_sigma(dfwhm)
v_center = self.param_new[name_userpeak_dsigma]["value"]
v_max = self.param_new[name_userpeak_dsigma]["max"]
v_min = self.param_new[name_userpeak_dsigma]["min"]
# Keep the possible range for value change the same
self.param_new[name_userpeak_dsigma]["value"] = dsigma
self.param_new[name_userpeak_dsigma]["max"] = dsigma + v_max - v_center
self.param_new[name_userpeak_dsigma]["min"] = dsigma - (v_center - v_min)
def _update_userpeak_energy_fwhm(self, eline, fwhm_new, energy_new):
"""
Update energy and fwhm of the user-defined peak 'eline'. The 'delta_center'
and 'delta_sigma' parameters in the `self.param_new` dictionary are updated.
`area` should be updated after call to this function. This function also
doesn't change entries in the `EC` dictionary.
"""
# Ensure, that the values are greater than some small value to ensure that
# there is no computational problems.
# Energy resolution for the existing beamlines is 0.01 keV, so 0.001 is small
# enough both for center energy and FWHM.
energy_small_value = 0.001
energy_new = max(energy_new, energy_small_value)
fwhm_new = max(fwhm_new, energy_small_value)
fwhm_new = self._update_userpeak_energy(eline, energy_new, fwhm_new)
self._update_userpeak_fwhm(eline, energy_new, fwhm_new)
def modify_userpeak_params(self, maxv_new, fwhm_new, energy_new):
if self.get_eline_name_category(self.e_name) != "userpeak":
msg = f"Hight and width can be modified only for a user defined peak.\n" \
f"The function was called for '{self.e_name}' peak"
raise RuntimeError(msg)
if self.e_name not in self.EC.element_dict:
msg = f"Attempt to modify maximum value for the emission line '{self.e_name},'\n" \
f"which is not currently selected."
raise RuntimeError(msg)
# Some checks of the input values
if maxv_new <= 0.0:
raise ValueError("Peak height must be a positive number greater than 0.001.")
if energy_new <= 0.0:
raise ValueError("User peak energy must be a positive number greater than 0.001.")
if fwhm_new <= 0:
raise ValueError("User peak FWHM must be a positive number.")
# Make sure that the energy of the user peak is within the selected fitting range
energy_bound_high = \
self.param_new["non_fitting_values"]["energy_bound_high"]["value"]
energy_bound_low = \
self.param_new["non_fitting_values"]["energy_bound_low"]["value"]
if energy_new > energy_bound_high or energy_new < energy_bound_low:
raise ValueError("User peak energy is outside the selected range.")
# This updates 'delta_center' and 'delta_sigma' entries of the 'self.param_new' dictionary
self._update_userpeak_energy_fwhm(self.e_name, fwhm_new, energy_new)
default_area = 1e2
key = self._generate_param_keys(self.e_name)["key_area"]
# Set area to default area, change it later once the area is computed
self.param_new[key]["value"] = default_area
# 'self.param_new' is used to provide 'hint' values for the model, but all active
# emission lines in 'elemental_lines' will be included in the model.
# The model will contain lines in 'elemental_lines', Compton and elastic
x, data_out, area_dict = calculate_profile(self.x0,
self.y0,
self.param_new,
elemental_lines=[self.e_name],
default_area=default_area)
ratio_v = maxv_new / np.max(data_out[self.e_name])
area = area_dict[self.e_name] * ratio_v
self.param_new[key]["value"] = area
ps = PreFitStatus(z=get_Z(self.e_name),
energy=f"{energy_new:.4f}",
area=area,
spectrum=data_out[self.e_name]*ratio_v,
maxv=maxv_new,
norm=-1,
status=True, # for plotting
lbd_stat=False)
self.EC.element_dict[self.e_name] = ps
logger.debug(f"The parameters of the user defined peak. The new values:\n"
f" Energy: {energy_new} keV, FWHM: {fwhm_new}, Maximum: {maxv_new}\n")
def generate_pileup_peak_name(self, name1, name2):
"""
Returns name for the pileup peak. The element line with the lowest
energy is placed first in the name.
"""
incident_energy = self.param_new["coherent_sct_energy"]["value"]
e1 = get_eline_parameters(name1, incident_energy)["energy"]
e2 = get_eline_parameters(name2, incident_energy)["energy"]
if e1 > e2:
name1, name2 = name2, name1
return name1 + '-' + name2
def update_name_list(self):
"""
When result_dict_names change, the looper in enaml will update.
"""
# need to clean list first, in order to refresh the list in GUI
self.result_dict_names = []
self.result_dict_names = list(self.EC.element_dict.keys())
self.element_list = get_element_list(self.param_new)
peak_list = self.get_user_peak_list()
# Create the list of selected emission lines such as Ca_K, K_K, etc.
# No pileup or user peaks
pure_peak_list = [n for n in self.result_dict_names if n in peak_list]
self.n_selected_elines_for_fitting = len(self.result_dict_names)
self.n_selected_pure_elines_for_fitting = len(pure_peak_list)
logger.info(f"The update list of emission lines: {self.result_dict_names}")
def get_eline_name_category(self, eline_name):
"""
Returns the category to which `eline_name` belongs: `eline`, `userpeak`,
`pileup` or `other`.
Parameters
----------
eline_name: str
Name to be analyzed
Returns
-------
str
category: one of `("eline", "userpeak", "pileup" or "other")`
"""
if check_if_eline_supported(eline_name):
return "eline"
elif eline_name.lower().startswith("userpeak"):
return "userpeak"
elif "-" in eline_name: # This is specific to currently accepted naming convention
return "pileup"
else:
return "other"
def _sort_eline_list(self, element_list):
"""
Sort the list of elements
"""
names_elines, names_userpeaks, names_pileup_peaks, names_other = [], [], [], []
for name in element_list:
if self.get_eline_name_category(name) == "eline":
try:
z = get_element_atomic_number(name.split('_')[0])
except Exception:
z = 0
names_elines.append([name, z])
elif self.get_eline_name_category(name) == "userpeak":
names_userpeaks.append(name)
elif self.get_eline_name_category(name) == "pileup":
names_pileup_peaks.append(name)
else:
names_other.append(name)
names_elines.sort(key=lambda v: int(v[1])) # Sort by Z (atomic number)
names_elines = [_[0] for _ in names_elines] # Get rid of Z
names_userpeaks.sort()
names_pileup_peaks.sort()
names_other.sort()
return names_elines + names_userpeaks + names_pileup_peaks + names_other
def get_sorted_result_dict_names(self):
"""
The function returns the list of selected emission lines. The emission lines are
sorted in the following order: emission line names (sorted in the order of growing
atomic number Z), userpeaks (in alphabetic order), pileup peaks (in alphabetic order),
other peaks (in alphabetic order).
Returns
-------
list(str)
the list if emission line names
"""
return self._sort_eline_list(self.result_dict_names)
def get_sorted_element_list(self):
"""
Returns sorted ``element_list``.
"""
return self._sort_eline_list(self.element_list)
def read_param_from_file(self, param_path):
"""
Update parameters if new param_path is given.
Parameters
----------
param_path : str
path to save the file
"""
with open(param_path, 'r') as json_data:
param = json.load(json_data)
self.update_new_param(param, reset=True)
def find_peak(self, *, threshv=0.1, elemental_lines=None):
"""
Run automatic peak finding, and save results as dict of object.
Parameters
----------
threshv: float
The value will not be shown on GUI if it is smaller than the threshold.
elemental_lines: list(str)
The list of elemental lines to find. If ``None``, then all supported
lines (K, L and M) are searched.
"""
self.define_range() # in case the energy calibraiton changes
self.prefit_x, out_dict, area_dict = linear_spectrum_fitting(self.x0,
self.y0,
self.param_new,
elemental_lines=elemental_lines)
logger.info('Energy range: {}, {}'.format(
self.param_new['non_fitting_values']['energy_bound_low']['value'],
self.param_new['non_fitting_values']['energy_bound_high']['value']))
prefit_dict = OrderedDict()
for k, v in out_dict.items():
ps = PreFitStatus(z=get_Z(k),
energy=get_energy(k),
area=area_dict[k],
spectrum=v,
maxv=np.around(np.max(v), self.max_area_dig),
norm=-1,
lbd_stat=False)
prefit_dict.update({k: ps})
logger.info('Automatic Peak Finding found elements as : {}'.format(
list(prefit_dict.keys())))
self.EC.delete_all()
self.EC.add_to_dict(prefit_dict)
self.create_full_param()
def create_full_param(self):
"""
Update current ``self.param_new`` with elements from ``self.EC`` (delete elements that
are not in ``self.EC`` and update the existing elements.
"""
self.define_range()
# We set 'self.element_list' from 'EC' (because we want to set elements of 'self.param_new'
# from 'EC.element_dict'
self.element_list = self.EC.get_element_list()
self.param_new = update_param_from_element(self.param_new, self.element_list)
element_temp = [e for e in self.element_list if len(e) <= 4]
pileup_temp = [e for e in self.element_list if '-' in e]
userpeak_temp = [e for e in self.element_list if 'user' in e.lower()]
# update area values in param_new according to results saved in ElementController
if len(self.EC.element_dict):
for k, v in self.param_new.items():
if 'area' in k:
if 'pileup' in k:
name_cut = k[7:-5] # remove pileup_ and _area
for p in pileup_temp:
if name_cut == p.replace('-', '_'):
v['value'] = self.EC.element_dict[p].area
elif 'user' in k.lower():
for p in userpeak_temp:
if p in k:
v['value'] = self.EC.element_dict[p].area
else:
for e in element_temp:
k_name, k_line, _ = k.split('_')
e_name, e_line = e.split('_')
if k_name == e_name and e_line.lower() == k_line[0]: # attention: S_k and As_k
v['value'] = self.EC.element_dict[e].area
if 'compton' in self.EC.element_dict:
self.param_new['compton_amplitude']['value'] = self.EC.element_dict['compton'].area
if 'coherent_sct_amplitude' in self.EC.element_dict:
self.param_new['coherent_sct_amplitude']['value'] = self.EC.element_dict['elastic'].area
if 'escape' in self.EC.element_dict:
self.param_new['non_fitting_values']['escape_ratio'] = (self.EC.element_dict['escape'].maxv
/ np.max(self.y0))
else:
self.param_new['non_fitting_values']['escape_ratio'] = 0.0
def data_for_plot(self):
"""
Save data in terms of K, L, M lines for plot.
"""
self.total_y = None
self.auto_fit_all = {}
for k, v in self.EC.element_dict.items():
if v.status is True:
self.auto_fit_all[k] = v.spectrum
if self.total_y is None:
self.total_y = np.array(v.spectrum) # need to copy an array
else:
self.total_y += v.spectrum
# for k, v in new_dict.items():
# if '-' in k: # pileup
# self.total_pileup[k] = self.EC.element_dict[k].spectrum
# elif 'K' in k:
# self.total_y[k] = self.EC.element_dict[k].spectrum
# elif 'L' in k:
# self.total_l[k] = self.EC.element_dict[k].spectrum
# elif 'M' in k:
# self.total_m[k] = self.EC.element_dict[k].spectrum
# else:
# self.total_y[k] = self.EC.element_dict[k].spectrum
def get_user_peak_list(self):
"""
Returns the list of element emission peaks
"""
return K_LINE + L_LINE + M_LINE
def get_selected_emission_line_data(self):
"""
Assembles the full emission line data for processing.
Returns
-------
list(dict)
Each dictionary includes the following data: "name" (e.g. Ca_ka1 etc.),
"area" (estimated peak area based on current fitting results), "ratio"
(ratio such as Ca_ka2/Ca_ka1)
"""
# Full list of supported emission lines (such as Ca_K)
supported_elines = self.get_user_peak_list()
# Parameter keys start with full emission line name (eg. Ca_ka1)
param_keys = list(self.param_new.keys())
incident_energy = self.param_new["coherent_sct_energy"]["value"]
full_line_list = []
for eline in self.EC.element_dict.keys():
if eline not in supported_elines:
continue
area = self.EC.element_dict[eline].area
lines = [_ for _ in param_keys if _.lower().startswith(eline.lower())]
lines = set(['_'.join(_.split('_')[:2]) for _ in lines])
for ln in lines:
eline_info = get_eline_parameters(ln, incident_energy)
data = {"name": ln, "area": area,
"ratio": eline_info["ratio"],
"energy": eline_info["energy"]}
full_line_list.append(data)
return full_line_list
def guess_pileup_peak_components(self, energy, tolerance=0.05):
"""
Provides a guess on components of pileup peak based on the set of selected emission lines,
and selected energy.
Parameters
----------
energy: float
Approximate (selected) energy of pileup peak location
tolerance: float
Allowed deviation of the sum of component energies from the selected energy, keV
Returns
-------
tuple(str, str, float)
Component emission lines (such as Ca_ka1, K_ka1 etc) and the energy of
the resulting pileup peak.
"""
line_data = self.get_selected_emission_line_data()
energy_min, energy_max = energy - tolerance, energy + tolerance
# Not very efficient algorithm, which tries all combinations of lines
pileup_components, areas = [], []
for n1, line1 in enumerate(line_data):
for n2 in range(n1, len(line_data)):
line2 = line_data[n2]
if energy_min < line1["energy"] + line2["energy"] < energy_max:
if line1 == line2:
area = line1["area"] * line1["ratio"]
else:
area = line1["area"] * line1["ratio"] + line2["area"] * line2["ratio"]
pileup_components.append((line1["name"], line2["name"],
line1["energy"] + line2["energy"]))
areas.append(area)
if len(areas):
# Find index with maximum area
n = areas.index(max(areas))
return pileup_components[n]
else:
return None
def save_as(file_path, data):
"""
Save full param dict into a file.
"""
with open(file_path, 'w') as outfile:
json.dump(data, outfile,
sort_keys=True, indent=4)
def define_range(data, low, high, a0, a1):
"""
Cut x range according to values define in param_dict.
Parameters
----------
data : array
raw spectrum
low : float
low bound in KeV
high : float
high bound in KeV
a0 : float
offset term of energy calibration
a1 : float
linear term of energy calibration
Returns
-------
x : array
trimmed channel number
y : array
trimmed spectrum according to x
"""
x = np.arange(data.size)
# ratio to transfer energy value back to channel value
# approx_ratio = 100
low_new = int(np.around((low - a0)/a1))
high_new = int(np.around((high - a0)/a1))
x0, y0 = trim(x, data, low_new, high_new)
return x0, y0
def calculate_profile(x, y, param, elemental_lines,
default_area=1e5):
"""
Calculate the spectrum profile based on given parameters. Use function
construct_linear_model from xrf_model.
Parameters
----------
x : array
channel array
y : array
spectrum intensity
param : dict
parameters
elemental_lines : list
such as Si_K, Pt_M
default_area : float
default value for the gaussian area of each element
Returns
-------
x : array
trimmed energy range
temp_d : dict
dict of array
area_dict : dict
dict of area for elements and other peaks
"""
# Need to use deepcopy here to avoid unexpected change on parameter dict
fitting_parameters = copy.deepcopy(param)
total_list, matv, area_dict = construct_linear_model(x,
fitting_parameters,
elemental_lines,
default_area=default_area)
temp_d = {k: v for (k, v) in zip(total_list, matv.transpose())}
# add background
bg = snip_method_numba(y,
fitting_parameters['e_offset']['value'],
fitting_parameters['e_linear']['value'],
fitting_parameters['e_quadratic']['value'],
width=fitting_parameters['non_fitting_values']['background_width'])
temp_d['background'] = bg
x_energy = (fitting_parameters['e_offset']['value']
+ fitting_parameters['e_linear']['value'] * x
+ fitting_parameters['e_quadratic']['value'] * x**2)
return x_energy, temp_d, area_dict
def trim_escape_peak(data, param_dict, y_size):
"""
Calculate escape peak within required range.
Parameters
----------
data : array
raw spectrum
param_dict : dict
parameters for fitting
y_size : int
the size of trimmed spectrum
Returns
-------
array :
trimmed escape peak spectrum
"""
ratio = param_dict['non_fitting_values']['escape_ratio']
xe, ye = compute_escape_peak(data, ratio, param_dict)
lowv = param_dict['non_fitting_values']['energy_bound_low']['value']
highv = param_dict['non_fitting_values']['energy_bound_high']['value']
xe, es_peak = trim(xe, ye, lowv, highv)
logger.info('Escape peak is considered with ratio {}'.format(ratio))
# align to the same length
if y_size > es_peak.size:
temp = es_peak
es_peak = np.zeros(y_size)
es_peak[:temp.size] = temp
else:
es_peak = es_peak[:y_size]
return es_peak
def create_full_dict(param, name_list,
fixed_list=['adjust_element2', 'adjust_element3']):
"""
Create full param dict so each item has the same nested dict.
This is for GUI purpose only.
Pamameters
----------
param : dict
all parameters including element
name_list : list
strategy names
Returns
-------
dict: with update
"""
param_new = copy.deepcopy(param)
for n in name_list:
for k, v in param_new.items():
if k == 'non_fitting_values':
continue
if n not in v:
# enforce newly created parameter to be fixed
# for strategy in fixed_list
if n in fixed_list:
v.update({n: 'fixed'})
else:
v.update({n: v['bound_type']})
return param_new
def strip_line(ename):
return ename.split('_')[0]
def get_Z(ename):
"""
Return element's Z number.
Parameters
----------
ename : str
element name
Returns
-------
int or None
element Z number
"""
non_element = ['compton', 'elastic', 'background', 'escape']
if (ename.lower() in non_element) or '-' in ename or 'user' in ename.lower():
return '-'
else:
e = Element(strip_line(ename))
return str(e.Z)
def get_energy(ename):
"""
Return energy value by given elemental name. Need to consider non-elemental case.
"""
non_element = ['compton', 'elastic', 'background', 'escape']
if (ename.lower() in non_element) or 'user' in ename.lower():
return '-'
else:
e = Element(strip_line(ename))
ename = ename.lower()
if '_k' in ename:
energy = e.emission_line['ka1']
elif '_l' in ename:
energy = e.emission_line['la1']
elif '_m' in ename:
energy = e.emission_line['ma1']
return str(np.around(energy, 4))
def get_element_list(param):
""" Extract elements from parameter class object """
element_list = param['non_fitting_values']['element_list']
element_list = [e.strip(' ') for e in element_list.split(',')]
# Unfortunately, "".split(",") returns [""] instead of [], but we need [] !!!
if element_list == [""]:
element_list = []
return element_list
def _set_element_list(element_list, param):
element_list = ", ".join(element_list)
param['non_fitting_values']['element_list'] = element_list
def _add_element_to_list(eline, param):
""" Add element to list in the parameter class object """
elist = get_element_list(param)
elist_lower = [_.lower() for _ in elist]
if eline.lower() not in elist_lower:
elist.append(eline)
_set_element_list(elist, param)
def _remove_element_from_list(eline, param):
""" Add element to list in the parameter class object """
elist = get_element_list(param)
elist_lower = [_.lower() for _ in elist]
try:
index = elist_lower.index(eline.lower())
elist.pop(index)
_set_element_list(elist, param)
except ValueError:
pass
def param_dict_cleaner(parameter, element_list):
"""
Make sure param only contains element from element_list.
Parameters
----------
parameter : dict
fitting parameters
element_list : list
list of elemental lines
Returns
-------
dict :
new param dict containing given elements
"""
param = copy.deepcopy(parameter)
param_new = {}
elines_list = [e for e in element_list if len(e) <= 4]
elines_lower = [e.lower() for e in elines_list]
pileup_list = [e for e in element_list if '-' in e]
userpeak_list = [e for e in element_list if 'user' in e.lower()]
new_element_set = set()
for k, v in param.items():
if k == 'non_fitting_values' or k == k.lower():
param_new.update({k: v})
elif 'pileup' in k:
for p in pileup_list:
if p.replace('-', '_') in k:
param_new.update({k: v})
new_element_set.add(p)
elif 'user' in k.lower():
for p in userpeak_list:
if p in k:
param_new.update({k: v})
new_element_set.add(p)
elif k[:3].lower() in elines_lower:
index = elines_lower.index(k[:3].lower())
param_new.update({k: v})
new_element_set.add(elines_list[index])
elif k[:4].lower() in elines_lower:
index = elines_lower.index(k[:4].lower())
param_new.update({k: v})
new_element_set.add(elines_list[index])
new_element_list = list(new_element_set)
_set_element_list(new_element_list, param_new)
return param_new
def update_param_from_element(param, element_list):
"""
Clean up or extend param according to new element list.
Parameters
----------
param : dict
fitting parameters
element_list : list
list of elemental lines
Returns
-------
dict
"""
param_new = copy.deepcopy(param)
for eline in element_list:
_add_element_to_list(eline, param_new)
# first remove some items not included in element_list
param_new = param_dict_cleaner(param_new,
element_list)
# second add some elements to a full parameter dict
# create full parameter list including elements
PC = ParamController(param_new, element_list)
# parameter values not updated based on param_new, so redo it
param_temp = PC.params
# enforce adjust_element area to be fixed,
# while bound_type in xrf_model is defined as none for area
# for k, v in param_temp.items():
# if '_area' in k:
# v['bound_type'] = 'fixed'
for k, v in param_temp.items():
if k == 'non_fitting_values':
continue
if k in param_new:
param_temp[k] = param_new[k]
# for k1 in v.keys():
# v[k1] = param_new[k][k1]
param_new = param_temp
# to create full param dict, for GUI only
param_new = create_full_dict(param_new, fit_strategy_list)
return param_new
| {
"repo_name": "NSLS-II-HXN/PyXRF",
"path": "pyxrf/model/parameters.py",
"copies": "1",
"size": "58600",
"license": "bsd-3-clause",
"hash": 6706441899772911000,
"line_mean": 36.6123234917,
"line_max": 112,
"alpha_frac": 0.5575767918,
"autogenerated": false,
"ratio": 3.92025689055392,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9973406452595317,
"avg_score": 0.0008854459517206488,
"num_lines": 1558
} |
from __future__ import (absolute_import, division,
print_function)
import numpy as np
import math
from functools import partial
from matplotlib.figure import Figure, Axes
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.ticker as mticker
from mpl_toolkits.axes_grid1.axes_rgb import make_rgb_axes
from atom.api import Atom, Str, Typed, Int, List, Dict, Bool
from ..core.utils import normalize_data_by_scaler, grid_interpolate
from ..core.xrf_utils import check_if_eline_supported
from .draw_image import DrawImageAdvanced
import logging
logger = logging.getLogger(__name__)
np.seterr(divide='ignore', invalid='ignore') # turn off warning on invalid division
class DrawImageRGB(Atom):
"""
This class draws RGB image.
Attributes
----------
fig : object
matplotlib Figure
ax : Axes
The `Axes` object of matplotlib
ax_r : Axes
The `Axes` object to add the artist too
ax_g : Axes
The `Axes` object to add the artist too
ax_b : Axes
The `Axes` object to add the artist too
file_name : str
stat_dict : dict
determine which image to show
img_dict : dict
multiple data sets to plot, such as fit data, or roi data
img_dict_keys : list
data_opt : int
index to show which data is chosen to plot
dict_to_plot : dict
selected data dict to plot, i.e., fitting data or roi is selected
map_keys : list
keys of dict_to_plot
color_opt : str
orange or gray plot
scaler_norm_dict : dict
scaler normalization data, from img_dict
scaler_items : list
keys of scaler_norm_dict
scaler_name_index : int
index to select on GUI level
scaler_data : None or numpy
selected scaler data
pixel_or_pos : int
index to choose plot with pixel (== 0) or with positions (== 1)
grid_interpolate: bool
choose to interpolate 2D image in terms of x,y or not
plot_all : Bool
to control plot all of the data or not
"""
# Reference to FileIOMOdel
io_model = Typed(object)
fig = Typed(Figure)
ax = Typed(Axes)
ax_r = Typed(Axes)
ax_g = Typed(Axes)
ax_b = Typed(Axes)
data_opt = Int(0)
img_title = Str()
# plot_opt = Int(0)
# plot_item = Str()
dict_to_plot = Dict()
map_keys = List()
scaler_norm_dict = Dict()
scaler_items = List()
scaler_name_index = Int()
scaler_data = Typed(object)
pixel_or_pos = Int(0)
grid_interpolate = Bool(False)
plot_all = Bool(False)
limit_dict = Dict()
range_dict = Dict()
# 'stat_dict' is legacy from 'DrawImageAdvanced' class. It is not used here,
# but it may be repurposed in the future if multicolor map presentation is developed
stat_dict = Dict()
# Contains dictionary {"red": <key>, "green": <key>, "blue": <key>}, key is the key
# from the dictionary 'self.dict_to_plot' or None.
rgb_keys = List(str) # The list of keys in 'rgb_dict'
rgb_dict = Dict()
# Reference used to access some fields
img_model_adv = Typed(DrawImageAdvanced)
# Variable that indicates whether quanitative normalization should be applied to data
# Associated with 'Quantitative' checkbox
quantitative_normalization = Bool(False)
rgb_name_list = List() # List of names for RGB channels printed on the plot
rgb_limit = Dict()
name_not_scalable = List()
def __init__(self, *, io_model, img_model_adv):
self.io_model = io_model
self.img_model_adv = img_model_adv
self.fig = plt.figure(figsize=(3, 2))
self.rgb_name_list = ['R', 'G', 'B']
# Do not apply scaler norm on following data
self.name_not_scalable = ['r2_adjust', 'r_factor', 'alive', 'dead', 'elapsed_time',
'scaler_alive', 'i0_time', 'time', 'time_diff', 'dwell_time']
self.rgb_keys = ["red", "green", "blue"]
self._init_rgb_dict()
def img_dict_updated(self, change):
"""
Observer function to be connected to the fileio model
in the top-level gui.py startup
Parameters
----------
changed : bool
True - 'io_model.img_dict` was updated, False - ignore
"""
if change['value']:
self.select_dataset(self.io_model.img_dict_default_selected_item)
self.init_plot_status()
def init_plot_status(self):
# init of pos values
self.set_pixel_or_pos(0)
# init of scaler for normalization
self.scaler_name_index = 0
scaler_groups = [v for v in list(self.io_model.img_dict.keys()) if 'scaler' in v]
if len(scaler_groups) > 0:
# self.scaler_group_name = scaler_groups[0]
self.scaler_norm_dict = self.io_model.img_dict[scaler_groups[0]]
# for GUI purpose only
self.scaler_items = []
self.scaler_items = list(self.scaler_norm_dict.keys())
self.scaler_items.sort()
self.scaler_data = None
logger.debug('The following groups are included for RGB image display: {}'.
format(self.io_model.img_dict_keys))
self.show_image()
def select_dataset(self, dataset_index):
"""
Select dataset. Meaning of the index: 0 - no dataset is selected,
1, 2, ... datasets with index 0, 1, ... is selected
Parameters
----------
dataset_index: int
index of the selected dataset
"""
self.data_opt = dataset_index
try:
if self.data_opt == 0:
self.dict_to_plot = {}
self.map_keys.clear()
self.init_limits_and_stat()
self.img_title = ''
elif self.data_opt > 0:
plot_item = self._get_current_plot_item()
self.img_title = str(plot_item)
self.dict_to_plot = self.io_model.img_dict[plot_item]
# for GUI purpose only
self.set_map_keys()
self.init_limits_and_stat()
# Select the first 3 entries for RGB display
for n in range(min(len(self.rgb_keys), len(self.map_keys))):
self.rgb_dict[self.rgb_keys[n]] = self.map_keys[n]
except IndexError:
pass
# Redraw image
self.show_image()
def set_map_keys(self):
"""
Create sorted list of map keys. The list starts with sorted sequence of emission lines,
followed by the sorted list of scalers and other maps.
"""
self.map_keys.clear()
# The key to use with 'img_dict', the name of the current dataset.
plot_item = self._get_current_plot_item()
keys_unsorted = list(self.io_model.img_dict[plot_item].keys())
if len(keys_unsorted) != len(set(keys_unsorted)):
logger.warning("DrawImageAdvanced:set_map_keys(): repeated keys "
f"in the dictionary 'img_dict': {keys_unsorted}")
keys_elines, keys_scalers = [], []
for key in keys_unsorted:
if check_if_eline_supported(key): # Check if 'key' is an emission line (such as "Ca_K")
keys_elines.append(key)
else:
keys_scalers.append(key)
keys_elines.sort()
keys_scalers.sort()
self.map_keys = keys_elines + keys_scalers
def get_selected_scaler_name(self):
if self.scaler_name_index == 0:
return None
else:
return self.scaler_items[self.scaler_name_index - 1]
def set_scaler_index(self, scaler_index):
self.scaler_name_index = scaler_index
if self.scaler_name_index == 0:
self.scaler_data = None
else:
try:
scaler_name = self.scaler_items[self.scaler_name_index-1]
except IndexError:
scaler_name = None
if scaler_name:
self.scaler_data = self.scaler_norm_dict[scaler_name]
logger.info('Use scaler data to normalize, '
'and the shape of scaler data is {}, '
'with (low, high) as ({}, {})'.format(self.scaler_data.shape,
np.min(self.scaler_data),
np.max(self.scaler_data)))
self.set_low_high_value() # reset low high values based on normalization
self.show_image()
def _get_current_plot_item(self):
"""Get the key for the current plot item (use in dictionary 'img_dict')"""
return self.io_model.img_dict_keys[self.data_opt - 1]
def set_pixel_or_pos(self, pixel_or_pos):
self.pixel_or_pos = pixel_or_pos
self.show_image()
def set_grid_interpolate(self, grid_interpolate):
self.grid_interpolate = grid_interpolate
self.show_image()
def enable_quantitative_normalization(self, enable):
"""
Enable/Disable quantitative normalization.
Parameters
----------
enable: bool
Enable quantitative normalization if True, disable if False.
"""
self.quantitative_normalization = bool(enable)
self.set_low_high_value() # reset low high values based on normalization
self.show_image()
def set_low_high_value(self):
"""Set default low and high values based on normalization for each image.
"""
# do not apply scaler norm on not scalable data
self.range_dict.clear()
for data_name in self.dict_to_plot.keys():
if self.quantitative_normalization:
# Quantitative normalization
data_arr, _ = self.img_model_adv.param_quant_analysis.apply_quantitative_normalization(
data_in=self.dict_to_plot[data_name],
scaler_dict=self.scaler_norm_dict,
scaler_name_default=self.get_selected_scaler_name(),
data_name=data_name,
name_not_scalable=self.name_not_scalable)
else:
# Normalize by the selected scaler in a regular way
data_arr = normalize_data_by_scaler(data_in=self.dict_to_plot[data_name],
scaler=self.scaler_data,
data_name=data_name,
name_not_scalable=self.name_not_scalable)
lowv, highv = np.min(data_arr), np.max(data_arr)
# Create some 'artificially' small range in case the array is constant
if lowv == highv:
lowv -= 0.005
highv += 0.005
self.range_dict[data_name] = {'low': lowv, 'low_default': lowv,
'high': highv, 'high_default': highv}
def reset_low_high(self, name):
"""Reset low and high value to default based on normalization.
"""
self.range_dict[name]['low'] = self.range_dict[name]['low_default']
self.range_dict[name]['high'] = self.range_dict[name]['high_default']
self.limit_dict[name]['low'] = 0.0
self.limit_dict[name]['high'] = 100.0
self.show_image()
def _init_rgb_dict(self):
self.rgb_dict = {_: None for _ in self.rgb_keys}
def init_limits_and_stat(self):
"""
Set plotting status for all the 2D images.
Note: 'self.map_keys' must be updated before calling this function!
"""
self.stat_dict.clear()
self.stat_dict = {k: False for k in self.map_keys}
self._init_rgb_dict()
self.limit_dict.clear()
self.limit_dict = {k: {'low': 0.0, 'high': 100.0} for k in self.map_keys}
self.set_low_high_value()
def preprocess_data(self):
"""
Normalize data or prepare for linear/log plot.
"""
selected_data = []
selected_name = []
quant_norm_applied = []
rgb_color_to_keys = self.get_rgb_items_for_plot()
for data_key in rgb_color_to_keys.values():
if data_key in self.dict_to_plot:
selected_name.append(data_key)
if self.scaler_data is not None:
if np.count_nonzero(self.scaler_data) == 0:
logger.warning('scaler is zero - scaling was not applied')
elif len(self.scaler_data[self.scaler_data == 0]) > 0:
logger.warning('scaler data has zero values')
for i, k in enumerate(selected_name):
q_norm_applied = False
if self.quantitative_normalization:
# Quantitative normalization
data_arr, q_norm_applied = \
self.img_model_adv.param_quant_analysis.apply_quantitative_normalization(
data_in=self.dict_to_plot[k],
scaler_dict=self.scaler_norm_dict,
scaler_name_default=self.get_selected_scaler_name(),
data_name=k,
name_not_scalable=self.name_not_scalable)
else:
# Normalize by the selected scaler in a regular way
data_arr = normalize_data_by_scaler(data_in=self.dict_to_plot[k],
scaler=self.scaler_data,
data_name=k,
name_not_scalable=self.name_not_scalable)
selected_data.append(data_arr)
quant_norm_applied.append(q_norm_applied)
return selected_data, selected_name, rgb_color_to_keys, quant_norm_applied
def show_image(self):
# Don't plot the image if dictionary is empty (causes a lot of issues)
if not self.io_model.img_dict:
return
self.fig.clf()
self.ax = self.fig.add_subplot(111)
self.ax_r, self.ax_g, self.ax_b = make_rgb_axes(self.ax, pad=0.02)
# Check if positions data is available. Positions data may be unavailable
# (not recorded in HDF5 file) if experiment is has not been completed.
# While the data from the completed part of experiment may still be used,
# plotting vs. x-y or scatter plot may not be displayed.
positions_data_available = False
if 'positions' in self.io_model.img_dict.keys():
positions_data_available = True
# Create local copy of self.pixel_or_pos and self.grid_interpolate
pixel_or_pos_local = self.pixel_or_pos
grid_interpolate_local = self.grid_interpolate
# Disable plotting vs x-y coordinates if 'positions' data is not available
if not positions_data_available:
if pixel_or_pos_local:
pixel_or_pos_local = 0 # Switch to plotting vs. pixel number
logger.error("'Positions' data is not available. Plotting vs. x-y coordinates is disabled")
if grid_interpolate_local:
grid_interpolate_local = False # Switch to plotting vs. pixel number
logger.error("'Positions' data is not available. Interpolation is disabled.")
selected_data, selected_names, rgb_color_to_keys, quant_norm_applied = self.preprocess_data()
selected_data = np.asarray(selected_data)
# Hide unused axes
if rgb_color_to_keys["red"] is None:
self.ax_r.set_visible(False)
if rgb_color_to_keys["green"] is None:
self.ax_g.set_visible(False)
if rgb_color_to_keys["blue"] is None:
self.ax_b.set_visible(False)
if selected_data.ndim != 3:
# There is no data to display. Hide the last axis and exit
self.ax.set_visible(False)
return
def _compute_equal_axes_ranges(x_min, x_max, y_min, y_max):
"""
Compute ranges for x- and y- axes of the plot. Make sure that the ranges for x- and y-axes are
always equal and fit the maximum of the ranges for x and y values:
max(abs(x_max-x_min), abs(y_max-y_min))
The ranges are set so that the data is always centered in the middle of the ranges
Parameters
----------
x_min, x_max, y_min, y_max : float
lower and upper boundaries of the x and y values
Returns
-------
x_axis_min, x_axis_max, y_axis_min, y_axis_max : float
lower and upper boundaries of the x- and y-axes ranges
"""
x_axis_min, x_axis_max, y_axis_min, y_axis_max = x_min, x_max, y_min, y_max
x_range, y_range = abs(x_max - x_min), abs(y_max - y_min)
if x_range > y_range:
y_center = (y_max + y_min) / 2
y_axis_max = y_center + x_range / 2
y_axis_min = y_center - x_range / 2
else:
x_center = (x_max + x_min) / 2
x_axis_max = x_center + y_range / 2
x_axis_min = x_center - y_range / 2
return x_axis_min, x_axis_max, y_axis_min, y_axis_max
def _adjust_data_range_using_min_ratio(c_min, c_max, c_axis_range, *, min_ratio=0.01):
"""
Adjust the range for plotted data along one axis (x or y). The adjusted range is
applied to the 'extent' attribute of imshow(). The adjusted range is always greater
than 'axis_range * min_ratio'. Such transformation has no physical meaning
and performed for aesthetic reasons: stretching the image presentation of
a scan with only a few lines (1-3) greatly improves visibility of data.
Parameters
----------
c_min, c_max : float
boundaries of the data range (along x or y axis)
c_axis_range : float
range presented along the same axis
Returns
-------
cmin, c_max : float
adjusted boundaries of the data range
"""
c_range = c_max - c_min
if c_range < c_axis_range * min_ratio:
c_center = (c_max + c_min) / 2
c_new_range = c_axis_range * min_ratio
c_min = c_center - c_new_range / 2
c_max = c_center + c_new_range / 2
return c_min, c_max
if pixel_or_pos_local:
# xd_min, xd_max, yd_min, yd_max = min(self.x_pos), max(self.x_pos),
# min(self.y_pos), max(self.y_pos)
x_pos_2D = self.io_model.img_dict['positions']['x_pos']
y_pos_2D = self.io_model.img_dict['positions']['y_pos']
xd_min, xd_max, yd_min, yd_max = x_pos_2D.min(), x_pos_2D.max(), y_pos_2D.min(), y_pos_2D.max()
xd_axis_min, xd_axis_max, yd_axis_min, yd_axis_max = \
_compute_equal_axes_ranges(xd_min, xd_max, yd_min, yd_max)
xd_min, xd_max = _adjust_data_range_using_min_ratio(xd_min, xd_max, xd_axis_max - xd_axis_min)
yd_min, yd_max = _adjust_data_range_using_min_ratio(yd_min, yd_max, yd_axis_max - yd_axis_min)
# Adjust the direction of each axis depending on the direction in which encoder values changed
# during the experiment. Data is plotted starting from the upper-right corner of the plot
if x_pos_2D[0, 0] > x_pos_2D[0, -1]:
xd_min, xd_max, xd_axis_min, xd_axis_max = xd_max, xd_min, xd_axis_max, xd_axis_min
if y_pos_2D[0, 0] > y_pos_2D[-1, 0]:
yd_min, yd_max, yd_axis_min, yd_axis_max = yd_max, yd_min, yd_axis_max, yd_axis_min
else:
if selected_data.ndim == 3:
# Set equal ranges for the axes data
yd, xd = selected_data.shape[1], selected_data.shape[2]
xd_min, xd_max, yd_min, yd_max = 0, xd, 0, yd
# Select minimum range for data
if (yd <= math.floor(xd / 100)) and (xd >= 200):
yd_min, yd_max = -math.floor(xd / 200), math.ceil(xd / 200)
if (xd <= math.floor(yd / 100)) and (yd >= 200):
xd_min, xd_max = -math.floor(yd / 200), math.ceil(yd / 200)
xd_axis_min, xd_axis_max, yd_axis_min, yd_axis_max = \
_compute_equal_axes_ranges(xd_min, xd_max, yd_min, yd_max)
name_r, data_r, limits_r = "", None, {"low": 0, "high": 100.0}
name_g, data_g, limits_g = "", None, {"low": 0, "high": 100.0}
name_b, data_b, limits_b = "", None, {"low": 0, "high": 100.0}
for color, name in rgb_color_to_keys.items():
if name:
try:
ind = selected_names.index(name)
name_label = name
if quant_norm_applied[ind]:
name_label += " - Q" # Add suffix to name if quantitative normalization was applied
if color == "red":
name_r, data_r = name_label, selected_data[ind]
limits_r = self.limit_dict[name]
elif color == "green":
name_g, data_g = name_label, selected_data[ind]
limits_g = self.limit_dict[name]
elif color == "blue":
name_b, data_b = name_label, selected_data[ind]
limits_b = self.limit_dict[name]
except ValueError:
pass
def _norm_data(data):
"""
Normalize data between (0, 1).
Parameters
----------
data : 2D array
"""
if data is None:
return data
data_min = np.min(data)
c_norm = np.max(data) - data_min
return (data - data_min) / c_norm if (c_norm != 0) else (data - data_min)
def _stretch_range(data_in, v_low, v_high):
# 'data is already normalized, so that the values are in the range 0..1
# v_low, v_high are in the range 0..100
if data_in is None:
return data_in
if (v_low <= 0) and (v_high >= 100):
return data_in
if v_high - v_low < 1: # This should not happen in practice, but check just in case
v_high = v_low + 1
v_low, v_high = v_low / 100.0, v_high / 100.0
c = 1.0 / (v_high - v_low)
data_out = (data_in - v_low) * c
return np.clip(data_out, 0, 1.0)
# Interpolate non-uniformly spaced data to uniform grid
if grid_interpolate_local:
data_r, _, _ = grid_interpolate(data_r,
self.io_model.img_dict['positions']['x_pos'],
self.io_model.img_dict['positions']['y_pos'])
data_g, _, _ = grid_interpolate(data_g,
self.io_model.img_dict['positions']['x_pos'],
self.io_model.img_dict['positions']['y_pos'])
data_b, _, _ = grid_interpolate(data_b,
self.io_model.img_dict['positions']['x_pos'],
self.io_model.img_dict['positions']['y_pos'])
# The dictionaries 'rgb_view_data' and 'pos_limits' are used for monitoring
# the map values at current cursor positions.
rgb_view_data = {_: None for _ in self.rgb_keys}
if data_r is not None:
rgb_view_data["red"] = data_r
if data_g is not None:
rgb_view_data["green"] = data_g
if data_b is not None:
rgb_view_data["blue"] = data_b
pos_limits = {"x_low": xd_min, "x_high": xd_max,
"y_low": yd_min, "y_high": yd_max}
# Normalize data
data_r_norm = _norm_data(data_r)
data_g_norm = _norm_data(data_g)
data_b_norm = _norm_data(data_b)
data_r_norm = _stretch_range(data_r_norm, limits_r['low'], limits_r['high'])
data_g_norm = _stretch_range(data_g_norm, limits_g['low'], limits_g['high'])
data_b_norm = _stretch_range(data_b_norm, limits_b['low'], limits_b['high'])
R, G, B, RGB = make_cube(data_r_norm, data_g_norm, data_b_norm)
red_patch = mpatches.Patch(color='red', label=name_r)
green_patch = mpatches.Patch(color='green', label=name_g)
blue_patch = mpatches.Patch(color='blue', label=name_b)
def format_coord_func(x, y, *, pixel_or_pos, rgb_color_to_keys,
rgb_view_data, pos_limits, colors=None):
x0, y0 = pos_limits["x_low"], pos_limits["y_low"]
if colors is None:
colors = list(rgb_color_to_keys.keys())
s = ""
for n, color in enumerate(self.rgb_keys):
if (color not in colors) or (rgb_color_to_keys[color] is None) \
or (rgb_view_data[color] is None):
continue
map = rgb_view_data[color]
ny, nx = map.shape
dy = (pos_limits["y_high"] - y0) / ny if ny else 0
dx = (pos_limits["x_high"] - x0) / nx if nx else 0
cy = 1 / dy if dy else 1
cx = 1 / dx if dx else 1
x_pixel = math.floor((x - x0) * cx)
y_pixel = math.floor((y - y0) * cy)
if (0 <= x_pixel < nx) and (0 <= y_pixel < ny):
# The following line is extremely useful for debugging the feature. Keep it.
# s += f" <b>{rgb_color_to_keys[color]}</b>: {x_pixel} {y_pixel}"
s += f" <b>{rgb_color_to_keys[color]}</b>: {map[y_pixel, x_pixel]:.5g}"
s = " - " + s if s else s # Add dash if something is to be printed
if pixel_or_pos:
# Spatial coordinates (double)
s_coord = f"({x:.5g}, {y:.5g})"
else:
# Pixel coordinates (int)
s_coord = f"({int(x)}, {int(y)})"
return s_coord + s
format_coord = partial(format_coord_func,
pixel_or_pos=pixel_or_pos_local,
rgb_color_to_keys=rgb_color_to_keys,
rgb_view_data=rgb_view_data,
pos_limits=pos_limits)
def format_cursor_data(data):
return "" # Print nothing
kwargs = dict(origin="upper", interpolation="nearest", extent=(xd_min, xd_max, yd_max, yd_min))
if RGB is not None:
img = self.ax.imshow(RGB, **kwargs)
self.ax.format_coord = format_coord
img.format_cursor_data = format_cursor_data
self.ax.set_xlim(xd_axis_min, xd_axis_max)
self.ax.set_ylim(yd_axis_max, yd_axis_min)
if R is not None:
img = self.ax_r.imshow(R, **kwargs)
self.ax_r.set_xlim(xd_axis_min, xd_axis_max)
self.ax_r.set_ylim(yd_axis_max, yd_axis_min)
format_coord_r = partial(format_coord, colors=["red"])
self.ax_r.format_coord = format_coord_r
img.format_cursor_data = format_cursor_data
if G is not None:
img = self.ax_g.imshow(G, **kwargs)
self.ax_g.set_xlim(xd_axis_min, xd_axis_max)
self.ax_g.set_ylim(yd_axis_max, yd_axis_min)
format_coord_g = partial(format_coord, colors=["green"])
self.ax_g.format_coord = format_coord_g
img.format_cursor_data = format_cursor_data
if B is not None:
img = self.ax_b.imshow(B, **kwargs)
self.ax_b.set_xlim(xd_axis_min, xd_axis_max)
self.ax_b.set_ylim(yd_axis_max, yd_axis_min)
format_coord_b = partial(format_coord, colors=["blue"])
self.ax_b.format_coord = format_coord_b
img.format_cursor_data = format_cursor_data
self.ax.xaxis.set_major_locator(mticker.MaxNLocator(nbins="auto"))
self.ax.yaxis.set_major_locator(mticker.MaxNLocator(nbins="auto"))
plt.setp(self.ax_r.get_xticklabels(), visible=False)
plt.setp(self.ax_r.get_yticklabels(), visible=False)
plt.setp(self.ax_g.get_xticklabels(), visible=False)
plt.setp(self.ax_g.get_yticklabels(), visible=False)
plt.setp(self.ax_b.get_xticklabels(), visible=False)
plt.setp(self.ax_b.get_yticklabels(), visible=False)
# self.ax_r.set_xticklabels([])
# self.ax_r.set_yticklabels([])
# sb_x = 38
# sb_y = 46
# sb_length = 10
# sb_height = 1
# ax.add_patch(mpatches.Rectangle(( sb_x, sb_y), sb_length, sb_height, color='white'))
# ax.text(sb_x + sb_length /2, sb_y - 1*sb_height, '100 nm', color='w', ha='center',
# va='bottom', backgroundcolor='black', fontsize=18)
self.ax_r.legend(loc="upper left", bbox_to_anchor=(1.1, 0), frameon=False,
handles=[red_patch, green_patch, blue_patch], mode="expand")
# self.fig.tight_layout(pad=4.0, w_pad=0.8, h_pad=0.8)
# self.fig.tight_layout()
# self.fig.canvas.draw_idle()
# self.fig.suptitle(self.img_title, fontsize=20)
self.fig.canvas.draw_idle()
def get_selected_items_for_plot(self):
"""Collect the selected items for plotting.
"""
# We want the dictionary to be sorted the same way as 'map_keys'
sdict = self.stat_dict
selected_keys = [_ for _ in self.map_keys if (_ in sdict) and (sdict[_] is True)]
return selected_keys
def get_rgb_items_for_plot(self):
# Verify integrity of the dictionary
if len(self.rgb_dict) != 3:
raise ValueError("DrawImageRGB.get_rgb_items_for_plot: dictionary 'rgb_dict' has "
f"{len(self.rgb_dict)} elements. Expected number of elements: "
f"{len(self.rgb_keys)}.")
for key in self.rgb_keys:
if key not in self.rgb_dict:
raise ValueError("DrawImageRGB.get_rgb_items_for_plot: dictionary 'rgb_dict' is "
f"incomplete or contains incorrect set of keys: {list(self.rgb_dict.keys())}. "
f"Expected keys: {self.rgb_keys}: ")
return self.rgb_dict
def make_cube(r, g, b):
"""
Create 3D array for rgb image.
Parameters
----------
r : 2D array
g : 2D array
b : 2D array
"""
if r is None and g is None and b is None:
logger.error("'make_cube': 'r', 'g' and 'b' input arrays are all None")
R, G, B, RGB = None
else:
for arr in [r, g, b]:
if arr is not None:
ny, nx = arr.shape
break
R = np.zeros([ny, nx, 3])
R[:, :, 0] = r
G = np.zeros_like(R)
G[:, :, 1] = g
B = np.zeros_like(R)
B[:, :, 2] = b
RGB = R + G + B
return R, G, B, RGB
| {
"repo_name": "NSLS-II-HXN/PyXRF",
"path": "pyxrf/model/draw_image_rgb.py",
"copies": "1",
"size": "31402",
"license": "bsd-3-clause",
"hash": -2354020982847958500,
"line_mean": 39.3106546855,
"line_max": 112,
"alpha_frac": 0.538054901,
"autogenerated": false,
"ratio": 3.6492736780941315,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46873285790941316,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
exec(open("ground.py").read())
#
import matplotlib.pyplot as plt
import skimage
from skimage import data
# mine
import hamiltonian
import diffeo
import sde
from utility import *
#
def run(dict,canload=0):
import os.path
if 'fname' in dict:
filename=dict['fname']
else:
print("No filename given")
exit(1)
print("\n",filename,"============================================","\n")
plt.ion()
G=hamiltonian.GaussGreen(dict['ell'],0)
no_steps=dict['no_steps']
if isinstance(no_steps, list):
ODE=diffeo.MultiShoot(G,1)
else:
ODE=diffeo.Shoot(G) # use single shooting
#
ODE.set_no_steps(dict['no_steps'])
ODE.set_landmarks(dict['landmarks_n'])
ODE.solve()
# plot warp
plot_setup()
plt.axis('equal')
ODE.plot_warp()
plt.savefig(filename+'warp.pdf',bbox_inches='tight')
#
# load test image
#image = data.checkerboard()
#image = data.coffee()
image = mpl.image.imread('51h2011_1_0.jpg')
#
# apply warp to image
new_image=ODE.warp(image)
# plotting and save to png
plot_setup()
plt.close()
fig, (ax0, ax1) = plt.subplots(1, 2,
figsize=(8, 3),
sharex=True,
sharey=True,
subplot_kw={'adjustable':'box-forced'}
)
ax0.imshow(image, cmap=plt.cm.gray, interpolation='none')
mpl.image.imsave('hand_alt.jpg',image,cmap=plt.cm.gray)
ax0.axis('off')
#
ax1.imshow(new_image, cmap=plt.cm.gray, interpolation='none')
mpl.image.imsave('hand_new.jpg',new_image,cmap=plt.cm.gray)
ax1.axis('off')
plt.show()
print("finished.")
####################################################################
if __name__ == "__main__":
# do this
plt.ion()
noise_var=0.00
#dict=exp1(noise_var)
dict=exp2(noise_var)
# dict=exp4(noise_var)
# dict=exp4(noise_var)
run(dict)
| {
"repo_name": "tonyshardlow/reg_sde",
"path": "run_hand.py",
"copies": "1",
"size": "2142",
"license": "mit",
"hash": 2205955652354660600,
"line_mean": 27.56,
"line_max": 76,
"alpha_frac": 0.5317460317,
"autogenerated": false,
"ratio": 3.3838862559241707,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9344329461119723,
"avg_score": 0.014260565300889547,
"num_lines": 75
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.