text
stringlengths 0
1.05M
| meta
dict |
---|---|
from __future__ import absolute_import, division, print_function
from time import ctime
from ...external.qt.QtGui import QWidget, QTableWidgetItem
from ... import core
from ..qtutil import load_ui
class MessageWidget(QWidget, core.hub.HubListener):
""" This simple class displays all messages broadcast
by a hub. It is mainly intended for debugging """
def __init__(self):
QWidget.__init__(self)
self.ui = load_ui('messagewidget', self)
self.ui.messageTable.setColumnCount(3)
labels = ['Time', 'Message', 'Sender']
self.ui.messageTable.setHorizontalHeaderLabels(labels)
def register_to_hub(self, hub):
# catch all messages
hub.subscribe(self, core.message.Message,
handler=self.process_message,
filter=lambda x: True)
def process_message(self, message):
row = self.ui.messageTable.rowCount() * 0
self.ui.messageTable.insertRow(0)
tm = QTableWidgetItem(ctime().split()[3])
typ = str(type(message)).split("'")[-2].split('.')[-1]
mtyp = QTableWidgetItem(typ)
typ = str(type(message.sender)).split("'")[-2].split('.')[-1]
sender = QTableWidgetItem(typ)
self.ui.messageTable.setItem(row, 0, tm)
self.ui.messageTable.setItem(row, 1, mtyp)
self.ui.messageTable.setItem(row, 2, sender)
self.ui.messageTable.resizeColumnsToContents()
| {
"repo_name": "JudoWill/glue",
"path": "glue/qt/widgets/message_widget.py",
"copies": "1",
"size": "1441",
"license": "bsd-3-clause",
"hash": -3913619630665721300,
"line_mean": 35.9487179487,
"line_max": 69,
"alpha_frac": 0.6370575989,
"autogenerated": false,
"ratio": 3.8021108179419527,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9939168416841953,
"avg_score": 0,
"num_lines": 39
} |
from __future__ import absolute_import, division, print_function
from toolz import compose, identity
from datashape.predicates import isscalar
from ..expr import (Expr, ElemWise, Selection, Sort, Apply, Distinct, Join,
By, Label, Summary, by, ReLabel, Like, Reduction, Head)
from .python import (compute, rrowfunc, rowfunc, ElemWise, pair_assemble,
reduce_by_funcs, binops, like_regex_predicate)
from ..expr.broadcast import broadcast_collect
from ..compatibility import builtins, unicode
from ..expr import reductions
from ..dispatch import dispatch
from .core import compute, compute_up
__all__ = ['RDD', 'pyspark', 'SparkContext']
class Dummy(object):
sum = max = min = count = distinct = mean = variance = stdev = None
try:
import py4j
from pyspark import SparkContext
import pyspark
from pyspark.rdd import RDD
try:
from pyspark.sql import DataFrame as SparkDataFrame
except ImportError:
SparkDataFrame = Dummy
RDD.min
except (AttributeError, ImportError):
SparkContext = Dummy
pyspark = Dummy()
pyspark.rdd = Dummy()
RDD = Dummy
SparkDataFrame = Dummy
# PySpark adds a SIGCHLD signal handler, but that breaks other packages, so we
# remove it
# See https://issues.apache.org/jira/browse/SPARK-1394
try:
import signal
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
except:
pass
@dispatch(Expr, RDD)
def optimize(expr, seq):
return broadcast_collect(expr)
@dispatch(ElemWise, RDD)
def compute_up(t, rdd, **kwargs):
func = rowfunc(t)
return rdd.map(func)
@dispatch(Selection, RDD)
def compute_up(t, rdd, **kwargs):
predicate = optimize(t.predicate, rdd)
predicate = rrowfunc(predicate, t._child)
return rdd.filter(predicate)
rdd_reductions = {
reductions.sum: RDD.sum,
reductions.min: RDD.min,
reductions.max: RDD.max,
reductions.count: RDD.count,
reductions.mean: RDD.mean,
reductions.var: RDD.variance,
reductions.std: RDD.stdev,
reductions.nunique: compose(RDD.count, RDD.distinct)}
@dispatch(tuple(rdd_reductions), RDD)
def compute_up(t, rdd, **kwargs):
return rdd_reductions[type(t)](rdd)
def istruthy(x):
return not not x
@dispatch(reductions.any, RDD)
def compute_up(t, rdd, **kwargs):
return istruthy(rdd.filter(identity).take(1))
@dispatch(reductions.all, RDD)
def compute_up(t, rdd, **kwargs):
return not rdd.filter(lambda x: not x).take(1)
@dispatch(Head, RDD)
def compute_up(t, rdd, **kwargs):
return rdd.take(t.n)
@dispatch(Apply, RDD)
def compute_up(t, rdd, **kwargs):
if t._splittable:
return rdd.mapPartitions(t.func)
else:
raise NotImplementedError("Can only apply splittable functions."
"To apply function to each partition add "
"splittable=True kwarg to call to apply. "
"t.apply(func, dshape, splittable=True)")
@dispatch(Sort, RDD)
def compute_up(t, rdd, **kwargs):
if isinstance(t.key, (str, unicode, tuple, list)):
key = rowfunc(t._child[t.key])
else:
key = optimize(t.key, rdd)
key = rrowfunc(key, t._child)
return (rdd.keyBy(key)
.sortByKey(ascending=t.ascending)
.map(lambda x: x[1]))
@dispatch(Distinct, RDD)
def compute_up(t, rdd, **kwargs):
return rdd.distinct()
def jgetattr(data, attr, default=None):
"""Spark's API doesn't properly implement the ``getattr`` interface, so
we work around it.
"""
try:
return getattr(data, attr, default)
except py4j.protocol.Py4JJavaError:
return default
@compute_up.register(Join, SparkDataFrame, SparkDataFrame)
def spark_df_join(t, lhs, rhs, **kwargs):
# ship to rdd land, so we can reuse handling of combining records code
rdd = compute_up(t, lhs.rdd, rhs.rdd, **kwargs)
return lhs.sql_ctx.createDataFrame(rdd)
@compute_up.register(Join, RDD, RDD)
def spark_join(t, lhs, rhs, **kwargs):
on_left = rowfunc(t.lhs[t.on_left])
on_right = rowfunc(t.rhs[t.on_right])
lhs = lhs.keyBy(on_left)
rhs = rhs.keyBy(on_right)
how = t.how
if how == 'inner':
joiner = lhs.join
elif how == 'left':
joiner = lhs.leftOuterJoin
elif how == 'right':
joiner = lhs.rightOuterJoin
elif how == 'outer':
joiner = lhs.fullOuterJoin
else:
raise ValueError("Invalid join type %r, must be one of "
"{'inner', 'left', 'right', 'outer'}" % how)
rdd = joiner(rhs)
assemble = pair_assemble(t)
return rdd.map(lambda x: assemble(x[1]))
@dispatch(By, RDD)
def compute_up(t, rdd, **kwargs):
grouper = optimize(t.grouper, rdd)
apply = optimize(t.apply, rdd)
t = by(grouper, apply)
if ((isinstance(t.apply, Reduction) and type(t.apply) in binops) or
(isinstance(t.apply, Summary) and
builtins.all(type(val) in binops for val in t.apply.values))):
grouper, binop, combiner, initial = reduce_by_funcs(t)
if isscalar(t.grouper.dshape.measure):
keyfunc = lambda x: (x,)
else:
keyfunc = identity
if isscalar(t.apply.dshape.measure):
valfunc = lambda x: (x,)
else:
valfunc = identity
unpack = lambda kv: keyfunc(kv[0]) + valfunc(kv[1])
create = lambda v: binop(initial, v)
return (rdd.keyBy(grouper)
.combineByKey(create, binop, combiner)
.map(unpack))
else:
raise NotImplementedError("By only implemented for common reductions."
"\nGot %s" % type(t.apply))
@dispatch((Label, ReLabel), RDD)
def compute_up(t, rdd, **kwargs):
return rdd
@dispatch(Summary, RDD)
def compute_up(t, rdd, **kwargs):
rdd = rdd.cache()
return tuple(compute(value, {t._child: rdd}) for value in t.values)
@dispatch(Like, RDD)
def compute_up(t, rdd, **kwargs):
predicate = like_regex_predicate(t)
return rdd.filter(predicate)
| {
"repo_name": "mrocklin/blaze",
"path": "blaze/compute/spark.py",
"copies": "1",
"size": "6081",
"license": "bsd-3-clause",
"hash": 4312741346220125700,
"line_mean": 26.6409090909,
"line_max": 78,
"alpha_frac": 0.6327906594,
"autogenerated": false,
"ratio": 3.420134983127109,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45529256425271086,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from toolz import compose, identity
from datashape.predicates import isscalar
from ..expr import (
Expr, ElemWise, SimpleSelection, Sort, Apply, Distinct, Join, By, Label,
Summary, by, ReLabel, Like, Reduction, Head
)
from .python import (compute, rrowfunc, rowfunc, pair_assemble,
reduce_by_funcs, binops, like_regex_predicate)
from ..expr.broadcast import broadcast_collect
from ..expr.optimize import simple_selections
from ..compatibility import builtins, unicode
from ..expr import reductions
from ..dispatch import dispatch
from .core import compute_up
import py4j
from pyspark import SparkContext
import pyspark
from pyspark.rdd import RDD
__all__ = ['RDD', 'pyspark', 'SparkContext']
# PySpark adds a SIGCHLD signal handler, but that breaks other packages, so we
# remove it
# See https://issues.apache.org/jira/browse/SPARK-1394
try:
import signal
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
except:
pass
@dispatch(Expr, RDD)
def optimize(expr, seq):
return simple_selections(broadcast_collect(expr))
@dispatch(ElemWise, RDD)
def compute_up(t, rdd, **kwargs):
func = rowfunc(t)
return rdd.map(func)
@dispatch(SimpleSelection, RDD)
def compute_up(t, rdd, **kwargs):
predicate = optimize(t.predicate, rdd)
predicate = rrowfunc(predicate, t._child)
return rdd.filter(predicate)
rdd_reductions = {
reductions.sum: RDD.sum,
reductions.min: RDD.min,
reductions.max: RDD.max,
reductions.count: RDD.count,
reductions.mean: RDD.mean,
reductions.var: RDD.variance,
reductions.std: RDD.stdev,
reductions.nunique: compose(RDD.count, RDD.distinct)
}
@dispatch(tuple(rdd_reductions), RDD)
def compute_up(t, rdd, **kwargs):
return rdd_reductions[type(t)](rdd)
def istruthy(x):
return not not x
@dispatch(reductions.any, RDD)
def compute_up(t, rdd, **kwargs):
return istruthy(rdd.filter(identity).take(1))
@dispatch(reductions.all, RDD)
def compute_up(t, rdd, **kwargs):
return not rdd.filter(lambda x: not x).take(1)
@dispatch(Head, RDD)
def compute_up(t, rdd, **kwargs):
return rdd.take(t.n)
@dispatch(Apply, RDD)
def compute_up(t, rdd, **kwargs):
if t._splittable:
return rdd.mapPartitions(t.func)
else:
raise NotImplementedError("Can only apply splittable functions."
"To apply function to each partition add "
"splittable=True kwarg to call to apply. "
"t.apply(func, dshape, splittable=True)")
@dispatch(Sort, RDD)
def compute_up(t, rdd, **kwargs):
if isinstance(t.key, (str, unicode, tuple, list)):
key = rowfunc(t._child[t.key])
else:
key = optimize(t.key, rdd)
key = rrowfunc(key, t._child)
return (rdd.keyBy(key)
.sortByKey(ascending=t.ascending)
.map(lambda x: x[1]))
@dispatch(Distinct, RDD)
def compute_up(t, rdd, **kwargs):
if t.on:
raise NotImplementedError(
'spark backend cannot specify what columns to distinct on'
)
return rdd.distinct()
def jgetattr(data, attr, default=None):
"""Spark's API doesn't properly implement the ``getattr`` interface, so
we work around it.
"""
try:
return getattr(data, attr, default)
except py4j.protocol.Py4JJavaError:
return default
@compute_up.register(Join, RDD, RDD)
def spark_join(t, lhs, rhs, **kwargs):
on_left = rowfunc(t.lhs[t.on_left])
on_right = rowfunc(t.rhs[t.on_right])
lhs = lhs.keyBy(on_left)
rhs = rhs.keyBy(on_right)
how = t.how
if how == 'inner':
joiner = lhs.join
elif how == 'left':
joiner = lhs.leftOuterJoin
elif how == 'right':
joiner = lhs.rightOuterJoin
elif how == 'outer':
joiner = lhs.fullOuterJoin
else:
raise ValueError("Invalid join type %r, must be one of "
"{'inner', 'left', 'right', 'outer'}" % how)
rdd = joiner(rhs)
assemble = pair_assemble(t)
return rdd.map(lambda x: assemble(x[1]))
@dispatch(By, RDD)
def compute_up(t, rdd, **kwargs):
grouper = optimize(t.grouper, rdd)
apply = optimize(t.apply, rdd)
t = by(grouper, apply)
if ((isinstance(t.apply, Reduction) and type(t.apply) in binops) or
(isinstance(t.apply, Summary) and
builtins.all(type(val) in binops for val in t.apply.values))):
grouper, binop, combiner, initial = reduce_by_funcs(t)
if isscalar(t.grouper.dshape.measure):
keyfunc = lambda x: (x,)
else:
keyfunc = identity
if isscalar(t.apply.dshape.measure):
valfunc = lambda x: (x,)
else:
valfunc = identity
unpack = lambda kv: keyfunc(kv[0]) + valfunc(kv[1])
create = lambda v: binop(initial, v)
return (rdd.keyBy(grouper)
.combineByKey(create, binop, combiner)
.map(unpack))
else:
raise NotImplementedError("By only implemented for common reductions."
"\nGot %s" % type(t.apply))
@dispatch((Label, ReLabel), RDD)
def compute_up(t, rdd, **kwargs):
return rdd
@dispatch(Summary, RDD)
def compute_up(t, rdd, **kwargs):
rdd = rdd.cache()
return tuple(compute(value, {t._child: rdd}) for value in t.values)
@dispatch(Like, RDD)
def compute_up(t, rdd, **kwargs):
predicate = like_regex_predicate(t)
return rdd.filter(predicate)
| {
"repo_name": "ChinaQuants/blaze",
"path": "blaze/compute/spark.py",
"copies": "1",
"size": "5580",
"license": "bsd-3-clause",
"hash": 7889645674172198000,
"line_mean": 26.0873786408,
"line_max": 78,
"alpha_frac": 0.632078853,
"autogenerated": false,
"ratio": 3.387978142076503,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4520056995076503,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from toolz import curry, concat, first
from multipledispatch import MDNotImplementedError
from ..expr import (Selection, Head, Field, Projection, ReLabel, ElemWise,
Arithmetic, Broadcast, Symbol, Summary, Like, Sort, Apply,
Reduction, symbol, IsIn, Label, Distinct, By, Slice, Expr,
path)
from ..expr.optimize import lean_projection
from ..expr.split import split
from ..partition import partitions
from .core import compute
from .pmap import get_default_pmap
from collections import Iterator, Iterable
import datashape
import bcolz
import numpy as np
import pandas as pd
from ..dispatch import dispatch
from odo import into
__all__ = ['bcolz']
COMFORTABLE_MEMORY_SIZE = 1e9
@dispatch(Expr, (bcolz.ctable, bcolz.carray))
def optimize(expr, _):
return lean_projection(expr) # This is handled in pre_compute
@dispatch(Expr, (bcolz.ctable, bcolz.carray))
def pre_compute(expr, data, scope=None, **kwargs):
return data
@dispatch((bcolz.carray, bcolz.ctable))
def discover(data):
return datashape.from_numpy(data.shape, data.dtype)
Cheap = (Head, ElemWise, Distinct, Symbol)
@dispatch(Head, (bcolz.ctable, bcolz.carray))
def compute_down(expr, data, **kwargs):
""" Cheap and simple computation in simple case
If we're given a head and the entire expression is cheap to do (e.g.
elemwises, selections, ...) then compute on data directly, without
parallelism"""
leaf = expr._leaves()[0]
if all(isinstance(e, Cheap) for e in path(expr, leaf)):
return compute(expr, {leaf: into(Iterator, data)}, **kwargs)
else:
raise MDNotImplementedError()
@dispatch((Broadcast, Arithmetic, ReLabel, Summary, Like, Sort, Label, Head,
Selection, ElemWise, Apply, Reduction, Distinct, By, IsIn),
(bcolz.ctable, bcolz.carray))
def compute_up(expr, data, **kwargs):
""" This is only necessary because issubclass(bcolz.carray, Iterator)
So we have to explicitly avoid the streaming Python backend"""
raise NotImplementedError()
@dispatch(Field, bcolz.ctable)
def compute_up(expr, data, **kwargs):
return data[str(expr._name)]
@dispatch(Projection, bcolz.ctable)
def compute_up(expr, data, **kwargs):
return data[list(map(str, expr.fields))]
@dispatch(Slice, (bcolz.carray, bcolz.ctable))
def compute_up(expr, x, **kwargs):
return x[expr.index]
def compute_chunk(source, chunk, chunk_expr, data_index):
part = source[data_index]
return compute(chunk_expr, {chunk: part})
def get_chunksize(data):
if isinstance(data, bcolz.carray):
return data.chunklen
elif isinstance(data, bcolz.ctable):
return min(data[c].chunklen for c in data.names)
else:
raise TypeError("Don't know how to compute chunksize for type %r" %
type(data).__name__)
@dispatch(Expr, (bcolz.carray, bcolz.ctable))
def compute_down(expr, data, chunksize=None, map=None, **kwargs):
if map is None:
map = get_default_pmap()
leaf = expr._leaves()[0]
if chunksize is None:
chunksize = max(2**16, get_chunksize(data))
# If the bottom expression is a projection or field then want to do
# compute_up first
children = set(e for e in expr._traverse()
if isinstance(e, Expr)
and any(i is expr._leaves()[0] for i in e._inputs))
if len(children) == 1 and isinstance(first(children), (Field, Projection)):
raise MDNotImplementedError()
chunk = symbol('chunk', chunksize * leaf.schema)
(chunk, chunk_expr), (agg, agg_expr) = split(leaf, expr, chunk=chunk)
data_parts = partitions(data, chunksize=(chunksize,))
parts = list(map(curry(compute_chunk, data, chunk, chunk_expr),
data_parts))
if isinstance(parts[0], np.ndarray):
intermediate = np.concatenate(parts)
elif isinstance(parts[0], pd.DataFrame):
intermediate = pd.concat(parts)
elif isinstance(parts[0], Iterable):
intermediate = list(concat(parts))
else:
raise TypeError("Don't know how to concatenate objects of type %r" %
type(parts[0]).__name__)
return compute(agg_expr, {agg: intermediate})
| {
"repo_name": "caseyclements/blaze",
"path": "blaze/compute/bcolz.py",
"copies": "7",
"size": "4311",
"license": "bsd-3-clause",
"hash": -8936558064895738000,
"line_mean": 30.2391304348,
"line_max": 79,
"alpha_frac": 0.6666666667,
"autogenerated": false,
"ratio": 3.5510708401976934,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00016852039096730705,
"num_lines": 138
} |
from __future__ import absolute_import, division, print_function
from toolz import curry
from .optimize import fuse, cull
import multiprocessing
from .async import get_async # TODO: get better get
from .context import _globals
from sys import version
if version < '3':
import copy_reg as copyreg
else:
import copyreg
def _reduce_method_descriptor(m):
return getattr, (m.__objclass__, m.__name__)
# type(set.union) is used as a proxy to <class 'method_descriptor'>
copyreg.pickle(type(set.union), _reduce_method_descriptor)
import pickle
import cloudpickle
def _dumps(x):
return cloudpickle.dumps(x, protocol=pickle.HIGHEST_PROTOCOL)
_loads = pickle.loads
def _process_get_id():
return multiprocessing.current_process().ident
def get(dsk, keys, num_workers=None, func_loads=None, func_dumps=None,
optimize_graph=True, **kwargs):
""" Multiprocessed get function appropriate for Bags
Parameters
----------
dsk : dict
dask graph
keys : object or list
Desired results from graph
num_workers : int
Number of worker processes (defaults to number of cores)
func_dumps : function
Function to use for function serialization
(defaults to cloudpickle.dumps)
func_loads : function
Function to use for function deserialization
(defaults to cloudpickle.loads)
optimize_graph : bool
If True [default], `fuse` is applied to the graph before computation.
"""
pool = _globals['pool']
if pool is None:
pool = multiprocessing.Pool(num_workers)
cleanup = True
else:
cleanup = False
manager = multiprocessing.Manager()
queue = manager.Queue()
apply_async = pickle_apply_async(pool.apply_async,
func_dumps=func_dumps,
func_loads=func_loads)
# Optimize Dask
dsk2, dependencies = cull(dsk, keys)
if optimize_graph:
dsk3, dependencies = fuse(dsk2, keys, dependencies)
else:
dsk3 = dsk2
try:
# Run
result = get_async(apply_async, len(pool._pool), dsk3, keys,
queue=queue, get_id=_process_get_id, **kwargs)
finally:
if cleanup:
pool.close()
return result
def apply_func(sfunc, sargs, skwds, loads=None):
loads = loads or _globals.get('loads') or _loads
func = loads(sfunc)
args = loads(sargs)
kwds = loads(skwds)
return func(*args, **kwds)
@curry
def pickle_apply_async(apply_async, func, args=(), kwds={},
func_loads=None, func_dumps=None):
dumps = func_dumps or _globals.get('func_dumps') or _dumps
sfunc = dumps(func)
sargs = dumps(args)
skwds = dumps(kwds)
return apply_async(curry(apply_func, loads=func_loads),
args=[sfunc, sargs, skwds])
| {
"repo_name": "mikegraham/dask",
"path": "dask/multiprocessing.py",
"copies": "1",
"size": "2898",
"license": "bsd-3-clause",
"hash": 2271024215044405500,
"line_mean": 27.4117647059,
"line_max": 77,
"alpha_frac": 0.6273291925,
"autogenerated": false,
"ratio": 3.83841059602649,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.994771792260251,
"avg_score": 0.0036043731847960024,
"num_lines": 102
} |
from __future__ import absolute_import, division, print_function
from toolz import curry, pipe, partial
from .optimize import fuse, cull
import multiprocessing
from .async import get_async # TODO: get better get
from .context import _globals
from sys import version
if version < '3':
import copy_reg as copyreg
else:
import copyreg
def _reduce_method_descriptor(m):
return getattr, (m.__objclass__, m.__name__)
# type(set.union) is used as a proxy to <class 'method_descriptor'>
copyreg.pickle(type(set.union), _reduce_method_descriptor)
import pickle
import cloudpickle
def _dumps(x):
return cloudpickle.dumps(x, protocol=pickle.HIGHEST_PROTOCOL)
_loads = pickle.loads
def _process_get_id():
return multiprocessing.current_process().ident
def get(dsk, keys, optimizations=[], num_workers=None,
func_loads=None, func_dumps=None, **kwargs):
""" Multiprocessed get function appropriate for Bags
Parameters
----------
dsk: dict
dask graph
keys: object or list
Desired results from graph
optimizations: list of functions
optimizations to perform on graph before execution
num_workers: int
Number of worker processes (defaults to number of cores)
func_dumps: function
Function to use for function serialization
(defaults to cloudpickle.dumps)
func_loads: function
Function to use for function deserialization
(defaults to cloudpickle.loads)
"""
pool = _globals['pool']
if pool is None:
pool = multiprocessing.Pool(num_workers)
cleanup = True
else:
cleanup = False
manager = multiprocessing.Manager()
queue = manager.Queue()
apply_async = pickle_apply_async(pool.apply_async,
func_dumps=func_dumps,
func_loads=func_loads)
# Optimize Dask
dsk2 = fuse(dsk, keys)
dsk3 = pipe(dsk2, partial(cull, keys=keys), *optimizations)
try:
# Run
result = get_async(apply_async, len(pool._pool), dsk3, keys,
queue=queue, get_id=_process_get_id, **kwargs)
finally:
if cleanup:
pool.close()
return result
def apply_func(sfunc, sargs, skwds, loads=None):
loads = loads or _globals.get('loads') or _loads
func = loads(sfunc)
args = loads(sargs)
kwds = loads(skwds)
return func(*args, **kwds)
@curry
def pickle_apply_async(apply_async, func, args=(), kwds={},
func_loads=None, func_dumps=None):
dumps = func_dumps or _globals.get('func_dumps') or _dumps
sfunc = dumps(func)
sargs = dumps(args)
skwds = dumps(kwds)
return apply_async(curry(apply_func, loads=func_loads),
args=[sfunc, sargs, skwds])
| {
"repo_name": "vikhyat/dask",
"path": "dask/multiprocessing.py",
"copies": "1",
"size": "2835",
"license": "bsd-3-clause",
"hash": 568479967618670000,
"line_mean": 27.35,
"line_max": 73,
"alpha_frac": 0.6320987654,
"autogenerated": false,
"ratio": 3.857142857142857,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4989241622542857,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from toolz import curry, pipe, partial
from .optimize import fuse, cull
import multiprocessing
import dill
import pickle
from .async import get_async # TODO: get better get
from .context import _globals
def get(dsk, keys, optimizations=[fuse], num_workers=None,
func_loads=None, func_dumps=None):
""" Multiprocessed get function appropriate for Bags
Parameters
----------
dsk: dict
dask graph
keys: object or list
Desired results from graph
optimizations: list of functions
optimizations to perform on graph before execution
num_workers: int
Number of worker processes (defaults to number of cores)
func_dumps: function
Function to use for function serialization (defaults to dill.dumps)
func_loads: function
Function to use for function deserialization (defaults to dill.loads)
"""
pool = _globals['pool']
if pool is None:
pool = multiprocessing.Pool(num_workers)
cleanup = True
else:
cleanup = False
manager = multiprocessing.Manager()
queue = manager.Queue()
apply_async = dill_apply_async(pool.apply_async,
func_dumps=func_dumps, func_loads=func_loads)
# Optimize Dask
dsk2 = pipe(dsk, partial(cull, keys=keys), *optimizations)
try:
# Run
result = get_async(apply_async, len(pool._pool), dsk2, keys,
queue=queue)
finally:
if cleanup:
pool.close()
return result
def apply_func(sfunc, sargs, skwds, loads=None):
loads = loads or _globals.get('loads') or dill.loads
func = loads(sfunc)
args = loads(sargs)
kwds = loads(skwds)
return func(*args, **kwds)
@curry
def dill_apply_async(apply_async, func, args=(), kwds={},
func_loads=None, func_dumps=None):
dumps = func_dumps or _globals.get('func_dumps') or dill.dumps
sfunc = dumps(func)
sargs = dumps(args)
skwds = dumps(kwds)
return apply_async(curry(apply_func, loads=func_loads),
args=[sfunc, sargs, skwds])
| {
"repo_name": "esc/dask",
"path": "dask/multiprocessing.py",
"copies": "3",
"size": "2184",
"license": "bsd-3-clause",
"hash": 3992673364523097600,
"line_mean": 28.9178082192,
"line_max": 80,
"alpha_frac": 0.6336996337,
"autogenerated": false,
"ratio": 3.8792184724689167,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6012918106168916,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from toolz import curry, pipe, partial
from .optimize import fuse, cull
import multiprocessing
import psutil
import dill
import pickle
from .async import get_async # TODO: get better get
def get(dsk, keys, optimizations=[fuse]):
""" Multiprocessed get function appropriate for Bags """
pool = multiprocessing.Pool(psutil.cpu_count())
manager = multiprocessing.Manager()
queue = manager.Queue()
apply_async = dill_apply_async(pool.apply_async)
# Optimize Dask
dsk2 = pipe(dsk, partial(cull, keys=keys), *optimizations)
try:
# Run
result = get_async(apply_async, psutil.cpu_count(), dsk2, keys,
queue=queue)
finally:
pool.close()
return result
def dill_apply_func(sfunc, sargs, skwds):
func = dill.loads(sfunc)
args = dill.loads(sargs)
kwds = dill.loads(skwds)
return func(*args, **kwds)
@curry
def dill_apply_async(apply_async, func, args=(), kwds={}):
sfunc = dill.dumps(func)
sargs = dill.dumps(args)
skwds = dill.dumps(kwds)
return apply_async(dill_apply_func, args=[sfunc, sargs, skwds])
| {
"repo_name": "PeterDSteinberg/dask",
"path": "dask/multiprocessing.py",
"copies": "1",
"size": "1188",
"license": "bsd-3-clause",
"hash": -8470237787221103000,
"line_mean": 26.6279069767,
"line_max": 71,
"alpha_frac": 0.664983165,
"autogenerated": false,
"ratio": 3.375,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4539983165,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from toolz import isdistinct, frequencies, concat, unique, get
import datashape
from datashape import Option, Record, Unit, dshape, var
from datashape.predicates import isscalar, iscollection, isrecord
from .core import common_subexpression
from .expressions import Expr, ElemWise, label
__all__ = ['Sort', 'Distinct', 'Head', 'Merge', 'distinct', 'merge',
'head', 'sort', 'Join', 'join', 'transform']
class Sort(Expr):
""" Table in sorted order
Examples
--------
>>> from blaze import symbol
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> accounts.sort('amount', ascending=False).schema
dshape("{name: string, amount: int32}")
Some backends support sorting by arbitrary rowwise tables, e.g.
>>> accounts.sort(-accounts.amount) # doctest: +SKIP
"""
__slots__ = '_hash', '_child', '_key', 'ascending'
@property
def dshape(self):
return self._child.dshape
@property
def key(self):
if self._key is () or self._key is None:
return self._child.fields[0]
if isinstance(self._key, tuple):
return list(self._key)
else:
return self._key
def _len(self):
return self._child._len()
@property
def _name(self):
return self._child._name
def __str__(self):
return "%s.sort(%s, ascending=%s)" % (self._child, repr(self._key),
self.ascending)
def sort(child, key=None, ascending=True):
""" Sort collection
Parameters
----------
key: string, list of strings, Expr
Defines by what you want to sort. Either:
A single column string, ``t.sort('amount')``
A list of column strings, ``t.sort(['name', 'amount'])``
A Table Expression, ``t.sort(-t.amount)``
ascending: bool
Determines order of the sort
"""
if not isrecord(child.dshape.measure):
key = None
if isinstance(key, list):
key = tuple(key)
return Sort(child, key, ascending)
class Distinct(Expr):
"""
Removes duplicate rows from the table, so every row is distinct
Examples
--------
>>> from blaze import symbol
>>> t = symbol('t', 'var * {name: string, amount: int, id: int}')
>>> e = distinct(t)
>>> data = [('Alice', 100, 1),
... ('Bob', 200, 2),
... ('Alice', 100, 1)]
>>> from blaze.compute.python import compute
>>> sorted(compute(e, data))
[('Alice', 100, 1), ('Bob', 200, 2)]
"""
__slots__ = '_hash', '_child',
@property
def dshape(self):
return datashape.var * self._child.dshape.measure
@property
def fields(self):
return self._child.fields
@property
def _name(self):
return self._child._name
def __str__(self):
return 'distinct(%s)' % self._child
def distinct(expr):
return Distinct(expr)
class Head(Expr):
""" First ``n`` elements of collection
Examples
--------
>>> from blaze import symbol
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> accounts.head(5).dshape
dshape("5 * {name: string, amount: int32}")
"""
__slots__ = '_hash', '_child', 'n'
@property
def dshape(self):
return self.n * self._child.dshape.subshape[0]
def _len(self):
return min(self._child._len(), self.n)
@property
def _name(self):
return self._child._name
def __str__(self):
return '%s.head(%d)' % (self._child, self.n)
def head(child, n=10):
return Head(child, n)
head.__doc__ = Head.__doc__
def merge(*exprs, **kwargs):
if len(exprs) + len(kwargs) == 1:
if exprs:
return exprs[0]
if kwargs:
[(k, v)] = kwargs.items()
return v.label(k)
# Get common sub expression
exprs = exprs + tuple(label(v, k) for k, v in kwargs.items())
try:
child = common_subexpression(*exprs)
except:
raise ValueError("No common sub expression found for input expressions")
result = Merge(child, exprs)
if not isdistinct(result.fields):
raise ValueError("Repeated columns found: " + ', '.join(k for k, v in
frequencies(result.fields).items() if v > 1))
return result
def transform(t, replace=True, **kwargs):
""" Add named columns to table
>>> from blaze import symbol
>>> t = symbol('t', 'var * {x: int, y: int}')
>>> transform(t, z=t.x + t.y).fields
['x', 'y', 'z']
"""
if replace and set(t.fields).intersection(set(kwargs)):
t = t[[c for c in t.fields if c not in kwargs]]
args = [t] + [v.label(k) for k, v in kwargs.items()]
return merge(*args)
def schema_concat(exprs):
""" Concatenate schemas together. Supporting both Records and Units
In the case of Units, the name is taken from expr.name
"""
names, values = [], []
for c in exprs:
schema = c.schema[0]
if isinstance(schema, Option):
schema = schema.ty
if isinstance(schema, Record):
names.extend(schema.names)
values.extend(schema.types)
elif isinstance(schema, Unit):
names.append(c._name)
values.append(schema)
else:
raise TypeError("All schemas must have Record or Unit shape."
"\nGot %s" % c.schema[0])
return dshape(Record(list(zip(names, values))))
class Merge(ElemWise):
""" Merge many fields together
Examples
--------
>>> from blaze import symbol
>>> accounts = symbol('accounts', 'var * {name: string, x: int, y: real}')
>>> merge(accounts.name, z=accounts.x + accounts.y).fields
['name', 'z']
"""
__slots__ = '_hash', '_child', 'children'
@property
def schema(self):
return schema_concat(self.children)
@property
def fields(self):
return list(concat(child.fields for child in self.children))
def _subterms(self):
yield self
for i in self.children:
for node in i._subterms():
yield node
def _get_field(self, key):
for child in self.children:
if key in child.fields:
if isscalar(child.dshape.measure):
return child
else:
return child[key]
def _project(self, key):
if not isinstance(key, (tuple, list)):
raise TypeError("Expected tuple or list, got %s" % key)
return merge(*[self[c] for c in key])
def _leaves(self):
return list(unique(concat(i._leaves() for i in self.children)))
def unpack(l):
""" Unpack items from collections of nelements 1
>>> unpack('hello')
'hello'
>>> unpack(['hello'])
'hello'
"""
if isinstance(l, (tuple, list, set)) and len(l) == 1:
return next(iter(l))
else:
return l
class Join(Expr):
""" Join two tables on common columns
Parameters
----------
lhs : Expr
rhs : Expr
on_left : string
on_right : string
Examples
--------
>>> from blaze import symbol
>>> names = symbol('names', 'var * {name: string, id: int}')
>>> amounts = symbol('amounts', 'var * {amount: int, id: int}')
Join tables based on shared column name
>>> joined = join(names, amounts, 'id')
Join based on different column names
>>> amounts = symbol('amounts', 'var * {amount: int, acctNumber: int}')
>>> joined = join(names, amounts, 'id', 'acctNumber')
See Also
--------
blaze.expr.collections.Merge
"""
__slots__ = '_hash', 'lhs', 'rhs', '_on_left', '_on_right', 'how'
__inputs__ = 'lhs', 'rhs'
@property
def on_left(self):
if isinstance(self._on_left, tuple):
return list(self._on_left)
else:
return self._on_left
@property
def on_right(self):
if isinstance(self._on_right, tuple):
return list(self._on_right)
else:
return self._on_right
@property
def schema(self):
"""
Examples
--------
>>> from blaze import symbol
>>> t = symbol('t', 'var * {name: string, amount: int}')
>>> s = symbol('t', 'var * {name: string, id: int}')
>>> join(t, s).schema
dshape("{name: string, amount: int32, id: int32}")
>>> join(t, s, how='left').schema
dshape("{name: string, amount: int32, id: ?int32}")
Overlapping but non-joined fields append _left, _right
>>> a = symbol('a', 'var * {x: int, y: int}')
>>> b = symbol('b', 'var * {x: int, y: int}')
>>> join(a, b, 'x').fields
['x', 'y_left', 'y_right']
"""
option = lambda dt: dt if isinstance(dt, Option) else Option(dt)
joined = [[name, dt] for name, dt in self.lhs.schema[0].parameters[0]
if name in self.on_left]
left = [[name, dt] for name, dt in
zip(self.lhs.fields, types_of_fields(self.lhs.fields, self.lhs))
if name not in self.on_left]
right = [[name, dt] for name, dt in
zip(self.rhs.fields, types_of_fields(self.rhs.fields, self.rhs))
if name not in self.on_right]
# Handle overlapping but non-joined case, e.g.
left_other = [name for name, dt in left if name not in self.on_left]
right_other = [name for name, dt in right if name not in self.on_right]
overlap = set.intersection(set(left_other), set(right_other))
left = [[name+'_left' if name in overlap else name, dt]
for name, dt in left]
right = [[name+'_right' if name in overlap else name, dt]
for name, dt in right]
if self.how in ('right', 'outer'):
left = [[name, option(dt)] for name, dt in left]
if self.how in ('left', 'outer'):
right = [[name, option(dt)] for name, dt in right]
return dshape(Record(joined + left + right))
@property
def dshape(self):
# TODO: think if this can be generalized
return var * self.schema
def types_of_fields(fields, expr):
""" Get the types of fields in an expression
Examples
--------
>>> from blaze import symbol
>>> expr = symbol('e', 'var * {x: int64, y: float32}')
>>> types_of_fields('y', expr)
ctype("float32")
>>> types_of_fields(['y', 'x'], expr)
(ctype("float32"), ctype("int64"))
>>> types_of_fields('x', expr.x)
ctype("int64")
"""
if isinstance(expr.dshape.measure, Record):
return get(fields, expr.dshape.measure)
else:
if isinstance(fields, (tuple, list, set)):
assert len(fields) == 1
fields = fields[0]
assert fields == expr._name
return expr.dshape.measure
def join(lhs, rhs, on_left=None, on_right=None, how='inner'):
if not on_left and not on_right:
on_left = on_right = unpack(list(sorted(
set(lhs.fields) & set(rhs.fields),
key=lhs.fields.index)))
if not on_right:
on_right = on_left
if isinstance(on_left, tuple):
on_left = list(on_left)
if isinstance(on_right, tuple):
on_right = list(on_right)
if not on_left or not on_right:
raise ValueError("Can not Join. No shared columns between %s and %s"%
(lhs, rhs))
if types_of_fields(on_left, lhs) != types_of_fields(on_right, rhs):
raise TypeError("Schema's of joining columns do not match")
_on_left = tuple(on_left) if isinstance(on_left, list) else on_left
_on_right = (tuple(on_right) if isinstance(on_right, list)
else on_right)
how = how.lower()
if how not in ('inner', 'outer', 'left', 'right'):
raise ValueError("How parameter should be one of "
"\n\tinner, outer, left, right."
"\nGot: %s" % how)
return Join(lhs, rhs, _on_left, _on_right, how)
join.__doc__ = Join.__doc__
from .expressions import dshape_method_list
dshape_method_list.extend([
(iscollection, set([sort, head])),
(lambda ds: len(ds.shape) == 1, set([distinct])),
])
| {
"repo_name": "mrocklin/blaze",
"path": "blaze/expr/collections.py",
"copies": "1",
"size": "12355",
"license": "bsd-3-clause",
"hash": -289890167739737660,
"line_mean": 27.5334872979,
"line_max": 80,
"alpha_frac": 0.5549170376,
"autogenerated": false,
"ratio": 3.694677033492823,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4749594071092823,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from toolz import memoize, first
from datashape import discover, var
from .utils import cls_name
class Chunks(object):
""" An Iterable of chunked data
Iterates over chunks of in-memory data. Contains an iterable or a function
that returns an iterator.
>>> c = Chunks([[1, 2, 3], [4, 5, 6]])
>>> next(iter(c))
[1, 2, 3]
For typed containers see the ``chunks`` function which generates
parametrized Chunks classes.
>>> c = chunks(list)([[1, 2, 3], [4, 5, 6]])
>>> next(iter(c))
[1, 2, 3]
>>> c.container.__name__
'list'
"""
def __init__(self, data):
self.data = data
def __iter__(self):
if callable(self.data):
return self.data()
else:
return iter(self.data)
def chunks(cls):
""" Parametrized Chunks Class """
return type('Chunks_' + cls_name(cls).replace('.', '_'), (Chunks,), {'container': cls})
chunks.__doc__ = Chunks.__doc__
chunks = memoize(chunks)
@discover.register(Chunks)
def discover_chunks(c, **kwargs):
return var * discover(first(c)).subshape[0]
| {
"repo_name": "mrocklin/into",
"path": "into/chunks.py",
"copies": "1",
"size": "1164",
"license": "bsd-3-clause",
"hash": -3962605725969169400,
"line_mean": 22.28,
"line_max": 91,
"alpha_frac": 0.5962199313,
"autogenerated": false,
"ratio": 3.4540059347181007,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45502258660181005,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from toolz import memoize, merge
from functools import wraps
from .csv import CSV
import datashape
import sqlalchemy as sa
from datashape import discover, dshape
from datashape import coretypes as ct
from collections import namedtuple
from contextlib import contextmanager
from .ssh import SSH, _SSH
from .sql import metadata_of_engine, sa
from ..utils import tmpfile, sample
from ..append import append
from ..resource import resource
from ..directory import _Directory, Directory
from ..compatibility import unicode
try:
from pywebhdfs.webhdfs import PyWebHdfsClient
except ImportError:
pass
class _HDFS(object):
""" Parent class for data on Hadoop File System
Examples
--------
>>> HDFS(CSV)('/path/to/file.csv', host='54.91.255.255',
... port=14000, user='hdfs') # doctest: +SKIP
Alternatively use resource strings
>>> resource('hdfs://hdfs@54.91.255.255:/path/to/file.csv') # doctest: +SKIP
"""
def __init__(self, *args, **kwargs):
hdfs = kwargs.pop('hdfs', None)
host = kwargs.pop('host', None)
port = str(kwargs.pop('port', '14000'))
user = kwargs.pop('user', 'hdfs')
if not hdfs and (host and port and user):
hdfs = PyWebHdfsClient(host=host, port=port, user_name=user)
if hdfs is None:
raise ValueError("No HDFS credentials found.\n"
"Either supply a PyWebHdfsClient instance or keywords\n"
" host=, port=, user=")
self.hdfs = hdfs
self.subtype.__init__(self, *args, **kwargs)
@memoize
def HDFS(cls):
return type('HDFS(%s)' % cls.__name__, (_HDFS, cls), {'subtype': cls})
@sample.register(HDFS(CSV))
@contextmanager
def sample_hdfs_csv(data, length=10000):
sample = data.hdfs.read_file(data.path.lstrip('/'), length=length)
with tmpfile('.csv') as fn:
with open(fn, 'w') as f:
f.write(sample)
yield fn
@discover.register(HDFS(CSV))
def discover_hdfs_csv(data, length=10000, **kwargs):
with sample(data, length=length) as fn:
result = discover(CSV(fn, encoding=data.encoding,
dialect=data.dialect,
has_header=data.has_header))
return result
@sample.register(HDFS(Directory(CSV)))
@contextmanager
def sample_hdfs_directory_csv(data, **kwargs):
files = data.hdfs.list_dir(data.path.lstrip('/'))
one_file = data.path + '/' + files['FileStatuses']['FileStatus'][0]['pathSuffix']
csv = HDFS(CSV)(one_file, hdfs=data.hdfs)
with sample(csv, **kwargs) as fn:
yield fn
@discover.register(HDFS(Directory(CSV)))
def discover_hdfs_directory(data, length=10000, **kwargs):
with sample(data, length=length) as fn:
o = data.container(fn, **data.kwargs)
result = discover(o)
return result
"""
Hive Tables
===========
Hive acts a bit differently from other databases that we interact with through
SQL. As a result we need to special case a lot of code.
Foremost, a database is just a directory on HDFS holding something like a CSV
file (or Avro, Parquet, etc..) As a result when we construct a Table we
actually have to know a lot about our CSV files (e.g. delimiter.)
This breaks the current into model a bit because we usually create things with
`resource` and then `append` on to them. Here we need to know both at the
same time. Enter a convenient hack, a token for a proxy table
"""
TableProxy = namedtuple('TableProxy', 'engine,name')
"""
resource('hive://...::tablename') now gives us one of these. The
subsequent call to `append` does the actual creation.
We're looking for better solutions. For the moment, this works.
"""
@resource.register('hive://.+::.+', priority=16)
def resource_hive_table(uri, **kwargs):
uri, table = uri.split('::')
engine = resource(uri)
metadata = metadata_of_engine(engine)
if table in metadata.tables:
return metadata.tables[table]
metadata.reflect(engine, views=False)
if table in metadata.tables:
return metadata.tables[table]
return TableProxy(engine, table)
hive_types = {
ct.int8: 'TINYINT',
ct.int16: 'SMALLINT',
ct.int32: 'INT',
ct.int64: 'BIGINT',
ct.float32: 'FLOAT',
ct.float64: 'DOUBLE',
ct.date_: 'DATE',
ct.datetime_: 'TIMESTAMP',
ct.string: 'STRING',
ct.bool_: 'BOOLEAN'}
def dshape_to_hive(ds):
""" Convert datashape measure to Hive dtype string
>>> dshape_to_hive('int16')
'SMALLINT'
>>> dshape_to_hive('?int32') # Ignore option types
'INT'
>>> dshape_to_hive('string[256]')
'VARCHAR(256)'
"""
if isinstance(ds, (str, unicode)):
ds = datashape.dshape(ds)
if isinstance(ds, ct.DataShape):
ds = ds.measure
if isinstance(ds, ct.Option):
ds = ds.ty
if isinstance(ds, ct.String):
if ds.fixlen:
return 'VARCHAR(%d)' % ds.fixlen
else:
return 'STRING'
if ds in hive_types:
return hive_types[ds]
raise NotImplementedError("No Hive dtype known for %s" % ds)
def create_hive_statement(tbl_name, dshape, path=None, table_type='',
db_name='default', **dialect):
""" Generic CREATE TABLE statement for hive
Parameters
----------
tbl_name : string
Specifies table name "mytable"
dshape : DataShape
Datashape of the desired table
path : string (optional)
Location of data
table_type : string (optional)
Table Modifier like EXTERNAL or LOCAL
db_name : string
Specifies database name. Defaults to "default"
**dialect : keyword arguments dict
CSV dialect with keys delimiter, has_header, etc.
Example
-------
>>> ds = dshape('var * {name: string, balance: int64, when: datetime}')
>>> print(create_hive_statement('accounts', ds, delimiter=',')) # doctest: +NORMALIZE_WHITESPACE
CREATE TABLE default.accounts (
name STRING,
balance BIGINT,
when TIMESTAMP
)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
STORED AS TEXTFILE
>>> print(create_hive_statement('accounts', ds, delimiter=',',
... has_header=True, path='/data/accounts/', table_type='EXTERNAL')) # doctest: +NORMALIZE_WHITESPACE
CREATE EXTERNAL TABLE default.accounts (
name STRING,
balance BIGINT,
when TIMESTAMP
)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
STORED AS TEXTFILE
LOCATION '/data/accounts/'
TBLPROPERTIES ("skip.header.line.count"="1")
"""
if db_name:
db_name = db_name + '.'
if not table_type:
table_type = ''
# Column names and types from datashape
ds = dshape or discover(data)
assert isinstance(ds.measure, ct.Record)
columns = [(name, dshape_to_hive(typ))
for name, typ in zip(ds.measure.names, ds.measure.types)]
column_text = ',\n '.join('%20s %s' % col for col in columns).lstrip()
statement = """
CREATE {table_type} TABLE {db_name}{tbl_name} (
{column_text}
)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY '{delimiter}'
STORED AS TEXTFILE
"""
if path:
statement = statement +"""
LOCATION '{path}'
"""
if dialect.get('has_header'):
statement = statement + """
TBLPROPERTIES ("skip.header.line.count"="1")"""
return statement.format(**merge(dialect, locals())).strip('\n')
"""
Load Data from HDFS or SSH into Hive
====================================
We push types like HDFS(CSV) and HDFS(Directory(CSV)) into Hive tables. This
requires that we bring a bit of the CSV file locally, inspect it (sniff for csv
dialect), generate the appropriate CREATE EXTERNAL TABLE command, and then
execute.
"""
@append.register(TableProxy, HDFS(Directory(CSV)))
def create_new_hive_table_from_csv(tbl, data, dshape=None, path=None, **kwargs):
"""
Create new Hive table from directory of CSV files on HDFS
Actually executes command.
"""
if isinstance(data, _SSH):
table_type = None
elif isinstance(data, _HDFS):
table_type = 'EXTERNAL'
else:
table_type = None
if not dshape:
dshape = discover(data)
if tbl.engine.dialect.name == 'hive':
statement = create_hive_statement(tbl.name, dshape,
path=data.path,
db_name = str(tbl.engine.url).split('/')[-1],
table_type=table_type,
**dialect_of(data))
else:
raise NotImplementedError("Don't know how to migrate directory of csvs"
" on HDFS to database of dialect %s" % tbl.engine.dialect.name)
with tbl.engine.connect() as conn:
conn.execute(statement)
metadata = metadata_of_engine(tbl.engine)
tbl2 = sa.Table(tbl.name, metadata, autoload=True,
autoload_with=tbl.engine)
return tbl2
@append.register(TableProxy, (SSH(CSV), SSH(Directory(CSV))))
def append_remote_csv_to_new_table(tbl, data, dshape=None, **kwargs):
if not dshape:
dshape = discover(data)
if tbl.engine.dialect.name == 'hive':
statement = create_hive_statement(tbl.name, dshape,
db_name = str(tbl.engine.url).split('/')[-1],
**dialect_of(data))
else:
raise NotImplementedError("Don't know how to migrate directory of csvs"
" on Local disk to database of dialect %s" % tbl.engine.dialect.name)
with tbl.engine.connect() as conn:
conn.execute(statement)
metadata = metadata_of_engine(tbl.engine)
tbl2 = sa.Table(tbl.name, metadata, autoload=True,
autoload_with=tbl.engine)
return append(tbl2, data, **kwargs)
@append.register(sa.Table, (SSH(CSV), SSH(Directory(CSV))))
def append_remote_csv_to_table(tbl, csv, **kwargs):
"""
Load Remote data into existing Hive table
"""
path = csv.path
if path[0] != '/':
path = '/home/%s/%s' % (csv.auth['username'], csv.path)
if tbl.bind.dialect.name == 'hive':
statement = ('LOAD DATA LOCAL INPATH "%(path)s" INTO TABLE %(tablename)s' %
{'path': path, 'tablename': tbl.name})
else:
raise NotImplementedError("Don't know how to migrate csvs on remote "
"disk to database of dialect %s" % tbl.engine.dialect.name)
with tbl.bind.connect() as conn:
conn.execute(statement)
return tbl
import csv
sniffer = csv.Sniffer()
def dialect_of(data, **kwargs):
""" CSV dialect of a CSV file stored in SSH, HDFS, or a Directory. """
keys = set(['delimiter', 'doublequote', 'escapechar', 'lineterminator',
'quotechar', 'quoting', 'skipinitialspace', 'strict', 'has_header'])
if isinstance(data, (HDFS(CSV), SSH(CSV))):
with sample(data) as fn:
d = dialect_of(CSV(fn, **data.dialect))
elif isinstance(data, (HDFS(Directory(CSV)), SSH(Directory(CSV)))):
with sample(data) as fn:
d = dialect_of(CSV(fn, **data.kwargs))
elif isinstance(data, Directory(CSV)):
d = dialect_of(next(data))
else:
assert isinstance(data, CSV)
# Get sample text
with open(data.path, 'r') as f:
text = f.read(1000)
result = dict()
d = sniffer.sniff(text)
d = dict((k, getattr(d, k)) for k in keys if hasattr(d, k))
if data.has_header is None:
d['has_header'] = sniffer.has_header(text)
d.update(data.dialect)
d.update(kwargs)
d = dict((k, v) for k, v in d.items() if k in keys)
return d
| {
"repo_name": "mrocklin/into",
"path": "into/backends/hdfs.py",
"copies": "1",
"size": "11885",
"license": "bsd-3-clause",
"hash": -3467632272508071400,
"line_mean": 29.9505208333,
"line_max": 114,
"alpha_frac": 0.6091712242,
"autogenerated": false,
"ratio": 3.700186799501868,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4809358023701868,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from toolz import memoize, merge, partition_all
from multipledispatch import MDNotImplementedError
import re
import os
from .csv import CSV
from .json import JSON, JSONLines
from .text import TextFile
import pandas as pd
import uuid
import datashape
import sqlalchemy as sa
from datashape import discover
from datashape import coretypes as ct
from collections import namedtuple, Iterator
from contextlib import contextmanager
from .ssh import SSH
from .spark import SparkDataFrame, SchemaRDD
from .sql import metadata_of_engine, sa
from ..utils import tmpfile, sample, ignoring, raises
from ..temp import Temp
from ..append import append
from ..convert import convert
from ..chunks import chunks
from ..resource import resource
from ..directory import Directory
from ..compatibility import unicode
with ignoring(ImportError):
from pywebhdfs.webhdfs import PyWebHdfsClient
from pywebhdfs.errors import FileNotFound
class _HDFS(object):
""" Parent class for data on Hadoop File System
Examples
--------
>>> HDFS(CSV)('/path/to/file.csv', host='54.91.255.255',
... port=14000, user='hdfs') # doctest: +SKIP
Alternatively use resource strings
>>> resource('hdfs://hdfs@54.91.255.255:/path/to/file.csv') # doctest: +SKIP
"""
def __init__(self, *args, **kwargs):
hdfs = kwargs.get('hdfs', None)
host = kwargs.get('host', None)
user = (kwargs.get('user_name') or
kwargs.get('user') or
kwargs.get('username') or None)
port = str(kwargs.get('port', 14000))
if not hdfs and (host and port and user):
hdfs = PyWebHdfsClient(host=host, port=str(port),
user_name=user)
if hdfs is None:
raise ValueError("No HDFS credentials found.\n"
"Either supply a PyWebHdfsClient instance or keywords\n"
" host=, port=, user=")
self.hdfs = hdfs
self.subtype.__init__(self, *args, **kwargs)
@memoize
def HDFS(cls):
return type('HDFS(%s)' % cls.__name__, (_HDFS, cls), {'subtype': cls})
@sample.register(_HDFS)
@contextmanager
def sample_hdfs_csv(data, length=10000):
sample = data.hdfs.read_file(data.path.lstrip('/'), length=length)
with tmpfile(data.canonical_extension) as fn:
with open(fn, 'w') as f:
f.write(sample)
yield fn
@discover.register(HDFS(JSON))
@discover.register(HDFS(JSONLines))
@discover.register(HDFS(TextFile))
def discover_hdfs_file(data, **kwargs):
with sample(data) as fn:
result = discover(data.subtype(fn, **kwargs))
return result
@discover.register(HDFS(CSV))
def discover_hdfs_csv(data, length=10000, **kwargs):
with sample(data, length=length) as fn:
result = discover(CSV(fn, encoding=data.encoding,
dialect=data.dialect,
has_header=data.has_header))
return result
@sample.register(HDFS(Directory(CSV)))
@contextmanager
def sample_hdfs_directory_csv(data, **kwargs):
files = data.hdfs.list_dir(data.path.lstrip('/'))
one_file = data.path + '/' + files['FileStatuses']['FileStatus'][0]['pathSuffix']
csv = HDFS(CSV)(one_file, hdfs=data.hdfs)
with sample(csv, **kwargs) as fn:
yield fn
@discover.register(HDFS(Directory(CSV)))
def discover_hdfs_directory(data, length=10000, **kwargs):
with sample(data, length=length) as fn:
o = data.container(fn, **data.kwargs)
result = discover(o)
return result
"""
Hive Tables
===========
Hive acts a bit differently from other databases that we interact with through
SQL. As a result we need to special case a lot of code.
Foremost, a database is just a directory on HDFS holding something like a CSV
file (or Avro, Parquet, etc..) As a result when we construct a Table we
actually have to know a lot about our CSV files (e.g. delimiter.)
This breaks the current odo model a bit because we usually create things with
`resource` and then `append` on to them. Here we need to know both at the
same time. Enter a convenient hack, a token for a proxy table
"""
TableProxy = namedtuple('TableProxy', 'engine,name,stored_as')
"""
resource('hive://...::tablename') now gives us one of these. The
subsequent call to `append` does the actual creation.
We're looking for better solutions. For the moment, this works.
"""
@resource.register('hive://.+::.+', priority=16)
def resource_hive_table(uri, stored_as='TEXTFILE', external=True, dshape=None, **kwargs):
if dshape:
dshape = datashape.dshape(dshape)
uri, table = uri.split('::')
engine = resource(uri)
metadata = metadata_of_engine(engine)
# If table exists then return it
with ignoring(sa.exc.NoSuchTableError):
return sa.Table(table, metadata, autoload=True,
autoload_with=engine)
# Enough information to make an internal table
if dshape and (not external or external and kwargs.get('path')):
table_type = 'EXTERNAL' if external else ''
statement = create_hive_statement(table, dshape,
db_name=engine.url.database, stored_as=stored_as,
table_type=table_type, **kwargs)
with engine.connect() as conn:
conn.execute(statement)
return sa.Table(table, metadata, autoload=True,
autoload_with=engine)
else:
return TableProxy(engine, table, stored_as)
hive_types = {
ct.int8: 'TINYINT',
ct.int16: 'SMALLINT',
ct.int32: 'INT',
ct.int64: 'BIGINT',
ct.float32: 'FLOAT',
ct.float64: 'DOUBLE',
ct.date_: 'DATE',
ct.datetime_: 'TIMESTAMP',
ct.string: 'STRING',
ct.bool_: 'BOOLEAN'}
def dshape_to_hive(ds):
""" Convert datashape measure to Hive dtype string
>>> dshape_to_hive('var * {name: string, balance: int32}')
['name STRING', 'balance INT']
>>> dshape_to_hive('int16')
'SMALLINT'
>>> dshape_to_hive('?int32') # Ignore option types
'INT'
>>> dshape_to_hive('string[256]')
'VARCHAR(256)'
"""
if isinstance(ds, (str, unicode)):
ds = datashape.dshape(ds)
if isinstance(ds, ct.DataShape):
ds = ds.measure
if isinstance(ds, ct.Record):
columns = [(name, dshape_to_hive(typ))
for name, typ in zip(ds.measure.names, ds.measure.types)]
return ['%s %s' % col for col in columns]
if isinstance(ds, ct.Option):
ds = ds.ty
if isinstance(ds, ct.String):
if ds.fixlen:
return 'VARCHAR(%d)' % ds.fixlen
else:
return 'STRING'
if ds in hive_types:
return hive_types[ds]
raise NotImplementedError("No Hive dtype known for %s" % ds)
def create_hive_statement(tbl_name, dshape, path=None, table_type='',
db_name='default', stored_as='TEXTFILE', **kwargs):
""" Generic CREATE TABLE statement for hive
Parameters
----------
tbl_name : string
Specifies table name "mytable"
dshape : DataShape
Datashape of the desired table
path : string (optional)
Location of data
table_type : string (optional)
Table Modifier like EXTERNAL or LOCAL
db_name : string
Specifies database name. Defaults to "default"
stored_as: string
Target storage format like TEXTFILE or PARQUET
**kwargs: keyword arguments dict
CSV dialect with keys delimiter, has_header, etc.
Example
-------
>>> from datashape import dshape
>>> ds = dshape('var * {name: string, balance: int64, when: datetime}')
>>> print(create_hive_statement('accounts', ds, delimiter=',')) # doctest: +NORMALIZE_WHITESPACE
CREATE TABLE default.accounts (
name STRING,
balance BIGINT,
when TIMESTAMP
)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
STORED AS TEXTFILE
>>> print(create_hive_statement('accounts', ds, delimiter=',',
... has_header=True, path='/data/accounts/', table_type='EXTERNAL')) # doctest: +NORMALIZE_WHITESPACE
CREATE EXTERNAL TABLE default.accounts (
name STRING,
balance BIGINT,
when TIMESTAMP
)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
STORED AS TEXTFILE
LOCATION '/data/accounts/'
TBLPROPERTIES ("skip.header.line.count"="1")
>>> print(create_hive_statement('accounts', ds, stored_as='PARQUET')) # doctest: +NORMALIZE_WHITESPACE
CREATE TABLE default.accounts (
name STRING,
balance BIGINT,
when TIMESTAMP
)
STORED AS PARQUET
"""
if db_name:
db_name = db_name + '.'
if not table_type:
table_type = ''
# Column names and types from datashape
assert isinstance(dshape.measure, ct.Record)
columns = dshape_to_hive(dshape)
column_text = ',\n '.join('%20s %s' % tuple(col.split())
for col in columns).lstrip()
statement = """
CREATE {table_type} TABLE {db_name}{tbl_name} (
{column_text}
)
"""
if 'delimiter' in kwargs:
statement += """
ROW FORMAT DELIMITED
FIELDS TERMINATED BY '{delimiter}'
"""
statement += """
STORED AS {stored_as}
"""
if path:
statement = statement +"""
LOCATION '{path}'
"""
if kwargs.get('has_header'):
statement = statement + """
TBLPROPERTIES ("skip.header.line.count"="1")"""
return statement.format(**merge(kwargs, locals())).strip('\n')
"""
Load Data from HDFS or SSH into Hive
====================================
We push types like HDFS(CSV) and HDFS(Directory(CSV)) into Hive tables. This
requires that we bring a bit of the CSV file locally, inspect it (sniff for csv
dialect), generate the appropriate CREATE EXTERNAL TABLE command, and then
execute.
"""
@append.register(TableProxy, HDFS(Directory(CSV)))
def create_new_hive_table_from_csv(tbl, data, dshape=None, path=None, **kwargs):
"""
Create new Hive table from directory of CSV files on HDFS
Actually executes command.
"""
table_type = 'EXTERNAL'
if not dshape:
dshape = discover(data)
if tbl.engine.dialect.name == 'hive':
statement = create_hive_statement(tbl.name, dshape,
path=data.path,
db_name = str(tbl.engine.url).split('/')[-1],
table_type=table_type,
**dialect_of(data))
else:
raise NotImplementedError("Don't know how to migrate directory of csvs"
" on HDFS to database of dialect %s" % tbl.engine.dialect.name)
with tbl.engine.connect() as conn:
conn.execute(statement)
metadata = metadata_of_engine(tbl.engine)
tbl2 = sa.Table(tbl.name, metadata, autoload=True,
autoload_with=tbl.engine)
return tbl2
@append.register(TableProxy, (HDFS(CSV), Temp(HDFS(CSV))))
def create_new_hive_table_from_csv_file(tbl, data, dshape=None, path=None, **kwargs):
raise ValueError(
"Can not create a new Hive table from a single CSV file on HDFS.\n"
"Instead try loading a complete directory or base your data outside of"
" HDFS")
@append.register(TableProxy, (SSH(CSV), SSH(Directory(CSV))))
def append_remote_csv_to_new_table(tbl, data, dshape=None, **kwargs):
if not dshape:
dshape = discover(data)
if tbl.engine.dialect.name == 'hive':
statement = create_hive_statement(tbl.name, dshape,
db_name = str(tbl.engine.url).split('/')[-1],
**dialect_of(data))
else:
raise NotImplementedError("Don't know how to migrate directory of csvs"
" on Local disk to database of dialect %s" % tbl.engine.dialect.name)
with tbl.engine.connect() as conn:
conn.execute(statement)
metadata = metadata_of_engine(tbl.engine)
tbl2 = sa.Table(tbl.name, metadata, autoload=True,
autoload_with=tbl.engine)
return append(tbl2, data, **kwargs)
@append.register(TableProxy, object)
def append_anything_to_tableproxy(tbl, data, **kwargs):
return append(tbl, convert(Temp(SSH(CSV)), data, **kwargs), **kwargs)
@append.register(sa.Table, (SSH(CSV), SSH(Directory(CSV))))
def append_remote_csv_to_table(tbl, csv, **kwargs):
"""
Load Remote data into existing Hive table
"""
path = csv.path
if path[0] != '/':
path = '/home/%s/%s' % (csv.auth['username'], csv.path)
if tbl.bind.dialect.name == 'hive':
statement =('LOAD DATA LOCAL INPATH "%(path)s" INTO TABLE %(tablename)s'
% {'path': path, 'tablename': tbl.name})
else:
raise NotImplementedError("Don't know how to migrate csvs on remote "
"disk to database of dialect %s" % tbl.engine.dialect.name)
with tbl.bind.connect() as conn:
conn.execute(statement)
return tbl
@append.register(sa.Table, (HDFS(CSV), HDFS(Directory(CSV)), Temp(HDFS(CSV))))
def append_hdfs_csv_to_table(tbl, csv, **kwargs):
"""
Load Remote data into existing Hive table
"""
if tbl.bind.dialect.name != 'hive':
raise NotImplementedError("Don't know how to migrate csvs on remote "
"disk to database of dialect %s" % tbl.engine.dialect.name)
statement =('LOAD DATA INPATH "%(path)s" INTO TABLE %(tablename)s'
% {'path': csv.path, 'tablename': tbl.name})
with tbl.bind.connect() as conn:
conn.execute(statement)
return tbl
@append.register(TextFile, HDFS(TextFile))
@append.register(JSONLines, HDFS(JSONLines))
@append.register(JSON, HDFS(JSON))
@append.register(CSV, HDFS(CSV))
def append_hdfs_file_to_local(target, source, **kwargs):
text = source.hdfs.read_file(source.path.lstrip('/'), **kwargs)
with open(target.path, 'w') as f:
f.write(text)
return target
@convert.register(Temp(TextFile), HDFS(TextFile))
@convert.register(Temp(JSONLines), HDFS(JSONLines))
@convert.register(Temp(JSON), HDFS(JSON))
@convert.register(Temp(CSV), HDFS(CSV))
def convert_hdfs_file_to_temp_local(source, **kwargs):
ext = os.path.splitext(source.path)[1].strip('.')
fn = '.%s.%s' % (str(uuid.uuid1()), ext)
tmp = Temp(source.subtype)(fn)
return append(tmp, source, **kwargs)
@append.register(HDFS(TextFile), TextFile)
@append.register(HDFS(JSONLines), JSONLines)
@append.register(HDFS(JSON), JSON)
@append.register(HDFS(CSV), (CSV, Temp(CSV)))
def append_local_file_to_hdfs(target, source, blocksize=100000, **kwargs):
if raises(FileNotFound,
lambda: target.hdfs.list_dir(target.path.lstrip('/'))):
target.hdfs.create_file(target.path.lstrip('/'), '')
with open(source.path, 'r') as f:
blocks = partition_all(blocksize, f)
for block in blocks:
target.hdfs.append_file(target.path.lstrip('/'), ''.join(block))
return target
import csv
sniffer = csv.Sniffer()
def dialect_of(data, **kwargs):
""" CSV dialect of a CSV file stored in SSH, HDFS, or a Directory. """
keys = set(['delimiter', 'doublequote', 'escapechar', 'lineterminator',
'quotechar', 'quoting', 'skipinitialspace', 'strict', 'has_header'])
if isinstance(data, (HDFS(CSV), SSH(CSV))):
with sample(data) as fn:
d = dialect_of(CSV(fn, **data.dialect))
elif isinstance(data, (HDFS(Directory(CSV)), SSH(Directory(CSV)))):
with sample(data) as fn:
d = dialect_of(CSV(fn, **data.kwargs))
elif isinstance(data, Directory(CSV)):
d = dialect_of(next(data))
else:
assert isinstance(data, CSV)
# Get sample text
with open(data.path, 'r') as f:
text = f.read()
result = dict()
d = sniffer.sniff(text)
d = dict((k, getattr(d, k)) for k in keys if hasattr(d, k))
if data.has_header is None:
d['has_header'] = sniffer.has_header(text)
else:
d['has_header'] = data.has_header
d.update(data.dialect)
d.update(kwargs)
d = dict((k, v) for k, v in d.items() if k in keys)
return d
types_by_extension = {'csv': CSV, 'json': JSONLines, 'txt': TextFile,
'log': TextFile}
hdfs_pattern = '(((?P<user>[a-zA-Z]\w*)@)?(?P<host>[\w.-]*)?(:(?P<port>\d+))?:)?(?P<path>[/\w.*-]+)'
@resource.register('hdfs://.*', priority=16)
def resource_hdfs(uri, **kwargs):
if 'hdfs://' in uri:
uri = uri[len('hdfs://'):]
d = re.match(hdfs_pattern, uri).groupdict()
d = dict((k, v) for k, v in d.items() if v is not None)
path = d.pop('path')
kwargs.update(d)
try:
subtype = types_by_extension[path.split('.')[-1]]
if '*' in path:
subtype = Directory(subtype)
path = path.rsplit('/', 1)[0] + '/'
except KeyError:
subtype = type(resource(path))
return HDFS(subtype)(path, **kwargs)
@append.register(HDFS(TextFile), (Iterator, object))
@append.register(HDFS(JSONLines), (Iterator, object, SparkDataFrame,
SchemaRDD))
@append.register(HDFS(JSON), (list, object))
@append.register(HDFS(CSV), (chunks(pd.DataFrame), pd.DataFrame, object))
def append_object_to_hdfs(target, source, **kwargs):
tmp = convert(Temp(target.subtype), source, **kwargs)
return append(target, tmp, **kwargs)
@append.register(HDFS(TextFile), SSH(TextFile))
@append.register(HDFS(JSONLines), SSH(JSONLines))
@append.register(HDFS(JSON), SSH(JSON))
@append.register(HDFS(CSV), SSH(CSV))
def append_remote_file_to_hdfs(target, source, **kwargs):
raise MDNotImplementedError()
@append.register(HDFS(TextFile), HDFS(TextFile))
@append.register(HDFS(JSONLines), HDFS(JSONLines))
@append.register(HDFS(JSON), HDFS(JSON))
@append.register(HDFS(CSV), HDFS(CSV))
def append_hdfs_file_to_hdfs_file(target, source, **kwargs):
raise MDNotImplementedError()
@append.register(SSH(TextFile), HDFS(TextFile))
@append.register(SSH(JSONLines), HDFS(JSONLines))
@append.register(SSH(JSON), HDFS(JSON))
@append.register(SSH(CSV), HDFS(CSV))
def append_hdfs_file_to_remote(target, source, **kwargs):
raise MDNotImplementedError()
| {
"repo_name": "ywang007/odo",
"path": "odo/backends/hdfs.py",
"copies": "1",
"size": "18483",
"license": "bsd-3-clause",
"hash": 4363464210117431000,
"line_mean": 31.4263157895,
"line_max": 114,
"alpha_frac": 0.6204079424,
"autogenerated": false,
"ratio": 3.6008182349503213,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47212261773503217,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from toolz import merge
from multipledispatch import Dispatcher
from .convert import convert
from .append import append
from .resource import resource
from datashape import discover, var
from datashape.dispatch import namespace
from datashape.predicates import isdimension
from .compatibility import unicode
if 'into' not in namespace:
namespace['into'] = Dispatcher('into')
into = namespace['into']
@into.register(type, object)
def into_type(a, b, **kwargs):
try:
if 'dshape' not in kwargs:
kwargs['dshape'] = discover(b)
except NotImplementedError:
pass
return convert(a, b, **kwargs)
@into.register(object, object)
def into_object(a, b, **kwargs):
""" Push one dataset into another
Examples
--------
>>> # Convert things into new things
>>> L = into(list, (1, 2, 3))
>>> L
[1, 2, 3]
>>> # Add things onto existing things
>>> _ = into(L, (4, 5, 6))
>>> L
[1, 2, 3, 4, 5, 6]
>>> # Specify things with strings
>>> into('myfile.csv', [('Alice', 1), ('Bob', 2)]) # doctest: +SKIP
See Also
--------
into.convert.convert - Convert things into new things
into.append.append - Add things onto existing things
into.resource.resource - Specify things with strings
"""
if isinstance(b, (str, unicode)):
b = resource(b, **kwargs)
try:
if 'dshape' not in kwargs:
kwargs['dshape'] = discover(b)
except NotImplementedError:
pass
return append(a, b, **kwargs)
@into.register(str, object)
def into_string(uri, b, **kwargs):
ds = kwargs.pop('dshape', None)
if not ds:
ds = discover(b)
if isdimension(ds[0]):
resource_ds = 0 * ds.subshape[0]
else:
resource_ds = ds
a = resource(uri, dshape=resource_ds, expected_dshape=ds, **kwargs)
return into(a, b, dshape=ds, **kwargs)
@into.register((type, str), str)
def into_string_string(a, b, **kwargs):
r = resource(b, **kwargs)
return into(a, r, **kwargs)
@into.register(object)
def into_curried(o, **kwargs1):
def curried_into(other, **kwargs2):
return into(o, other, **merge(kwargs2, kwargs1))
return curried_into
| {
"repo_name": "mrocklin/into",
"path": "into/into.py",
"copies": "1",
"size": "2281",
"license": "bsd-3-clause",
"hash": 596511214330033300,
"line_mean": 24.3444444444,
"line_max": 72,
"alpha_frac": 0.6207803595,
"autogenerated": false,
"ratio": 3.514637904468413,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46354182639684127,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from toolz import merge, partial
from ..base import tokenize
from .. import threaded
def _partial_fit(model, x, y, kwargs=None):
kwargs = kwargs or dict()
model.partial_fit(x, y, **kwargs)
return model
def fit(model, x, y, get=threaded.get, **kwargs):
""" Fit scikit learn model against dask arrays
Model must support the ``partial_fit`` interface for online or batch
learning.
This method will be called on dask arrays in sequential order. Ideally
your rows are independent and identically distributed.
Parameters
----------
model: sklearn model
Any model supporting partial_fit interface
x: dask Array
Two dimensional array, likely tall and skinny
y: dask Array
One dimensional array with same chunks as x's rows
kwargs:
options to pass to partial_fit
Examples
--------
>>> import dask.array as da
>>> X = da.random.random((10, 3), chunks=(5, 3))
>>> y = da.random.randint(0, 2, 10, chunks=(5,))
>>> from sklearn.linear_model import SGDClassifier
>>> sgd = SGDClassifier()
>>> sgd = da.learn.fit(sgd, X, y, classes=[1, 0])
>>> sgd # doctest: +SKIP
SGDClassifier(alpha=0.0001, class_weight=None, epsilon=0.1, eta0=0.0,
fit_intercept=True, l1_ratio=0.15, learning_rate='optimal',
loss='hinge', n_iter=5, n_jobs=1, penalty='l2', power_t=0.5,
random_state=None, shuffle=False, verbose=0, warm_start=False)
This passes all of X and y through the classifier sequentially. We can use
the classifier as normal on in-memory data
>>> import numpy as np
>>> sgd.predict(np.random.random((4, 3))) # doctest: +SKIP
array([1, 0, 0, 1])
Or predict on a larger dataset
>>> z = da.random.random((400, 3), chunks=(100, 3))
>>> da.learn.predict(sgd, z) # doctest: +SKIP
dask.array<x_11, shape=(400,), chunks=((100, 100, 100, 100),), dtype=int64>
"""
assert x.ndim == 2
assert y.ndim == 1
assert x.chunks[0] == y.chunks[0]
assert hasattr(model, 'partial_fit')
if len(x.chunks[1]) > 1:
x = x.reblock(chunks=(x.chunks[0], sum(x.chunks[1])))
nblocks = len(x.chunks[0])
name = 'fit-' + tokenize(model, x, y, kwargs)
dsk = {(name, -1): model}
dsk.update(dict(((name, i), (_partial_fit, (name, i - 1),
(x.name, i, 0),
(y.name, i), kwargs))
for i in range(nblocks)))
return get(merge(x.dask, y.dask, dsk), (name, nblocks - 1))
def _predict(model, x):
return model.predict(x)[:, None]
def predict(model, x):
""" Predict with a scikit learn model
Parameters
----------
model: scikit learn classifier
x: dask Array
See docstring for ``da.learn.fit``
"""
assert x.ndim == 2
if len(x.chunks[1]) > 1:
x = x.reblock(chunks=(x.chunks[0], sum(x.chunks[1])))
func = partial(_predict, model)
return x.map_blocks(func, chunks=(x.chunks[0], (1,))).squeeze()
| {
"repo_name": "jeffery-do/Vizdoombot",
"path": "doom/lib/python3.5/site-packages/dask/array/learn.py",
"copies": "1",
"size": "3135",
"license": "mit",
"hash": -3054004758489405400,
"line_mean": 29.7352941176,
"line_max": 79,
"alpha_frac": 0.5901116427,
"autogenerated": false,
"ratio": 3.411316648531012,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9501428291231012,
"avg_score": 0,
"num_lines": 102
} |
from __future__ import absolute_import, division, print_function
from toolz import (
isdistinct, frequencies, concat as tconcat, unique, get, first,
)
import datashape
from datashape import DataShape, Option, Record, Unit, dshape, var, Fixed, Var
from datashape.predicates import isscalar, iscollection, isrecord
from .core import common_subexpression
from .expressions import Expr, ElemWise, label
from .expressions import dshape_method_list
from ..compatibility import zip_longest
__all__ = ['Sort', 'Distinct', 'Head', 'Merge', 'IsIn', 'distinct', 'merge',
'head', 'sort', 'Join', 'join', 'transform', 'Concat', 'concat']
class Sort(Expr):
""" Table in sorted order
Examples
--------
>>> from blaze import symbol
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> accounts.sort('amount', ascending=False).schema
dshape("{name: string, amount: int32}")
Some backends support sorting by arbitrary rowwise tables, e.g.
>>> accounts.sort(-accounts.amount) # doctest: +SKIP
"""
__slots__ = '_hash', '_child', '_key', 'ascending'
@property
def dshape(self):
return self._child.dshape
@property
def key(self):
if self._key is () or self._key is None:
return self._child.fields[0]
if isinstance(self._key, tuple):
return list(self._key)
else:
return self._key
def _len(self):
return self._child._len()
@property
def _name(self):
return self._child._name
def __str__(self):
return "%s.sort(%s, ascending=%s)" % (self._child, repr(self._key),
self.ascending)
def sort(child, key=None, ascending=True):
""" Sort collection
Parameters
----------
key: string, list of strings, Expr
Defines by what you want to sort. Either:
A single column string, ``t.sort('amount')``
A list of column strings, ``t.sort(['name', 'amount'])``
A Table Expression, ``t.sort(-t.amount)``
ascending: bool
Determines order of the sort
"""
if not isrecord(child.dshape.measure):
key = None
if isinstance(key, list):
key = tuple(key)
return Sort(child, key, ascending)
class Distinct(Expr):
"""
Removes duplicate rows from the table, so every row is distinct
Examples
--------
>>> from blaze import symbol
>>> t = symbol('t', 'var * {name: string, amount: int, id: int}')
>>> e = distinct(t)
>>> data = [('Alice', 100, 1),
... ('Bob', 200, 2),
... ('Alice', 100, 1)]
>>> from blaze.compute.python import compute
>>> sorted(compute(e, data))
[('Alice', 100, 1), ('Bob', 200, 2)]
"""
__slots__ = '_hash', '_child',
@property
def dshape(self):
return datashape.var * self._child.dshape.measure
@property
def fields(self):
return self._child.fields
@property
def _name(self):
return self._child._name
def __str__(self):
return 'distinct(%s)' % self._child
def distinct(expr):
return Distinct(expr)
class Head(Expr):
""" First ``n`` elements of collection
Examples
--------
>>> from blaze import symbol
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> accounts.head(5).dshape
dshape("5 * {name: string, amount: int32}")
"""
__slots__ = '_hash', '_child', 'n'
@property
def dshape(self):
return self.n * self._child.dshape.subshape[0]
def _len(self):
return min(self._child._len(), self.n)
@property
def _name(self):
return self._child._name
def __str__(self):
return '%s.head(%d)' % (self._child, self.n)
def head(child, n=10):
return Head(child, n)
head.__doc__ = Head.__doc__
def merge(*exprs, **kwargs):
if len(exprs) + len(kwargs) == 1:
if exprs:
return exprs[0]
if kwargs:
[(k, v)] = kwargs.items()
return v.label(k)
# Get common sub expression
exprs += tuple(label(v, k) for k, v in sorted(kwargs.items(), key=first))
try:
child = common_subexpression(*exprs)
except Exception:
raise ValueError("No common subexpression found for input expressions")
result = Merge(child, exprs)
if not isdistinct(result.fields):
raise ValueError("Repeated columns found: " + ', '.join(k for k, v in
frequencies(result.fields).items() if v > 1))
return result
def transform(t, replace=True, **kwargs):
""" Add named columns to table
>>> from blaze import symbol
>>> t = symbol('t', 'var * {x: int, y: int}')
>>> transform(t, z=t.x + t.y).fields
['x', 'y', 'z']
"""
if replace and set(t.fields).intersection(set(kwargs)):
t = t[[c for c in t.fields if c not in kwargs]]
args = [t] + [v.label(k) for k, v in sorted(kwargs.items(), key=first)]
return merge(*args)
def schema_concat(exprs):
""" Concatenate schemas together. Supporting both Records and Units
In the case of Units, the name is taken from expr.name
"""
names, values = [], []
for c in exprs:
schema = c.schema[0]
if isinstance(schema, Option):
schema = schema.ty
if isinstance(schema, Record):
names.extend(schema.names)
values.extend(schema.types)
elif isinstance(schema, Unit):
names.append(c._name)
values.append(schema)
else:
raise TypeError("All schemas must have Record or Unit shape."
"\nGot %s" % c.schema[0])
return dshape(Record(list(zip(names, values))))
class Merge(ElemWise):
""" Merge many fields together
Examples
--------
>>> from blaze import symbol
>>> accounts = symbol('accounts', 'var * {name: string, x: int, y: real}')
>>> merge(accounts.name, z=accounts.x + accounts.y).fields
['name', 'z']
"""
__slots__ = '_hash', '_child', 'children'
@property
def schema(self):
return schema_concat(self.children)
@property
def fields(self):
return list(tconcat(child.fields for child in self.children))
def _subterms(self):
yield self
for i in self.children:
for node in i._subterms():
yield node
def _get_field(self, key):
for child in self.children:
if key in child.fields:
if isscalar(child.dshape.measure):
return child
else:
return child[key]
def _project(self, key):
if not isinstance(key, (tuple, list)):
raise TypeError("Expected tuple or list, got %s" % key)
return merge(*[self[c] for c in key])
def _leaves(self):
return list(unique(tconcat(i._leaves() for i in self.children)))
def unpack(l):
""" Unpack items from collections of nelements 1
>>> unpack('hello')
'hello'
>>> unpack(['hello'])
'hello'
"""
if isinstance(l, (tuple, list, set)) and len(l) == 1:
return next(iter(l))
else:
return l
class Join(Expr):
""" Join two tables on common columns
Parameters
----------
lhs : Expr
rhs : Expr
on_left : string
on_right : string
suffixes: pair
Examples
--------
>>> from blaze import symbol
>>> names = symbol('names', 'var * {name: string, id: int}')
>>> amounts = symbol('amounts', 'var * {amount: int, id: int}')
Join tables based on shared column name
>>> joined = join(names, amounts, 'id')
Join based on different column names
>>> amounts = symbol('amounts', 'var * {amount: int, acctNumber: int}')
>>> joined = join(names, amounts, 'id', 'acctNumber')
See Also
--------
blaze.expr.collections.Merge
"""
__slots__ = (
'_hash', 'lhs', 'rhs', '_on_left', '_on_right', 'how', 'suffixes',
)
__inputs__ = 'lhs', 'rhs'
@property
def on_left(self):
if isinstance(self._on_left, tuple):
return list(self._on_left)
else:
return self._on_left
@property
def on_right(self):
if isinstance(self._on_right, tuple):
return list(self._on_right)
else:
return self._on_right
@property
def schema(self):
"""
Examples
--------
>>> from blaze import symbol
>>> t = symbol('t', 'var * {name: string, amount: int}')
>>> s = symbol('t', 'var * {name: string, id: int}')
>>> join(t, s).schema
dshape("{name: string, amount: int32, id: int32}")
>>> join(t, s, how='left').schema
dshape("{name: string, amount: int32, id: ?int32}")
Overlapping but non-joined fields append _left, _right
>>> a = symbol('a', 'var * {x: int, y: int}')
>>> b = symbol('b', 'var * {x: int, y: int}')
>>> join(a, b, 'x').fields
['x', 'y_left', 'y_right']
"""
option = lambda dt: dt if isinstance(dt, Option) else Option(dt)
joined = [[name, dt] for name, dt in self.lhs.schema[0].parameters[0]
if name in self.on_left]
left = [[name, dt] for name, dt in
zip(self.lhs.fields, types_of_fields(
self.lhs.fields, self.lhs))
if name not in self.on_left]
right = [[name, dt] for name, dt in
zip(self.rhs.fields, types_of_fields(
self.rhs.fields, self.rhs))
if name not in self.on_right]
# Handle overlapping but non-joined case, e.g.
left_other = [name for name, dt in left if name not in self.on_left]
right_other = [name for name, dt in right if name not in self.on_right]
overlap = set.intersection(set(left_other), set(right_other))
left_suffix, right_suffix = self.suffixes
left = [[name + left_suffix if name in overlap else name, dt]
for name, dt in left]
right = [[name + right_suffix if name in overlap else name, dt]
for name, dt in right]
if self.how in ('right', 'outer'):
left = [[name, option(dt)] for name, dt in left]
if self.how in ('left', 'outer'):
right = [[name, option(dt)] for name, dt in right]
return dshape(Record(joined + left + right))
@property
def dshape(self):
# TODO: think if this can be generalized
return var * self.schema
def types_of_fields(fields, expr):
""" Get the types of fields in an expression
Examples
--------
>>> from blaze import symbol
>>> expr = symbol('e', 'var * {x: int64, y: float32}')
>>> types_of_fields('y', expr)
ctype("float32")
>>> types_of_fields(['y', 'x'], expr)
(ctype("float32"), ctype("int64"))
>>> types_of_fields('x', expr.x)
ctype("int64")
"""
if isinstance(expr.dshape.measure, Record):
return get(fields, expr.dshape.measure)
else:
if isinstance(fields, (tuple, list, set)):
assert len(fields) == 1
fields = fields[0]
assert fields == expr._name
return expr.dshape.measure
def join(lhs, rhs, on_left=None, on_right=None,
how='inner', suffixes=('_left', '_right')):
if not on_left and not on_right:
on_left = on_right = unpack(list(sorted(
set(lhs.fields) & set(rhs.fields),
key=lhs.fields.index)))
if not on_right:
on_right = on_left
if isinstance(on_left, tuple):
on_left = list(on_left)
if isinstance(on_right, tuple):
on_right = list(on_right)
if not on_left or not on_right:
raise ValueError("Can not Join. No shared columns between %s and %s" %
(lhs, rhs))
if types_of_fields(on_left, lhs) != types_of_fields(on_right, rhs):
raise TypeError("Schema's of joining columns do not match")
_on_left = tuple(on_left) if isinstance(on_left, list) else on_left
_on_right = (tuple(on_right) if isinstance(on_right, list)
else on_right)
how = how.lower()
if how not in ('inner', 'outer', 'left', 'right'):
raise ValueError("How parameter should be one of "
"\n\tinner, outer, left, right."
"\nGot: %s" % how)
return Join(lhs, rhs, _on_left, _on_right, how, suffixes)
join.__doc__ = Join.__doc__
class Concat(Expr):
""" Stack tables on common columns
Parameters
----------
lhs : Expr
rhs : Expr
axis : int, optional
The axis to concatenate on.
Examples
--------
>>> from blaze import symbol
>>> names = symbol('names', '5 * {name: string, id: int32}')
>>> more_names = symbol('more_names', '7 * {name: string, id: int32}')
Vertically stack these tables.
>>> stacked = concat(names, more_names)
>>> stacked.dshape
dshape("12 * {name: string, id: int32}")
>>> mat_a = symbol('a', '3 * 5 * int32')
>>> mat_b = symbol('b', '3 * 5 * int32')
Vertically stack these matricies.
>>> vstacked = concat(mat_a, mat_b, axis=0)
>>> vstacked.dshape
dshape("6 * 5 * int32")
Horizontally stack these matricies.
>>> hstacked = concat(mat_a, mat_b, axis=1)
>>> hstacked.dshape
dshape("3 * 10 * int32")
See Also
--------
blaze.expr.collections.Merge
"""
__slots__ = '_hash', 'lhs', 'rhs', 'axis'
__inputs__ = 'lhs', 'rhs'
@property
def dshape(self):
axis = self.axis
ldshape = self.lhs.dshape
lshape = ldshape.shape
return DataShape(
*(lshape[:axis] + (
_shape_add(lshape[axis], self.rhs.dshape.shape[axis]),
) + lshape[axis + 1:] + (ldshape.measure,))
)
def _shape_add(a, b):
if isinstance(a, Var) or isinstance(b, Var):
return var
return Fixed(a.val + b.val)
def concat(lhs, rhs, axis=0):
ldshape = lhs.dshape
rdshape = rhs.dshape
if ldshape.measure != rdshape.measure:
raise TypeError(
'Mismatched measures: {l} != {r}'.format(
l=ldshape.measure, r=rdshape.measure
),
)
lshape = ldshape.shape
rshape = rdshape.shape
for n, (a, b) in enumerate(zip_longest(lshape, rshape, fillvalue=None)):
if n != axis and a != b:
raise TypeError(
'Shapes are not equal along axis {n}: {a} != {b}'.format(
n=n, a=a, b=b,
),
)
if axis < 0 or 0 < len(lshape) <= axis:
raise ValueError(
"Invalid axis '{a}', must be in range: [0, {n})".format(
a=axis, n=len(lshape)
),
)
return Concat(lhs, rhs, axis)
concat.__doc__ = Concat.__doc__
class IsIn(ElemWise):
"""Return a boolean expression indicating whether another expression
contains values that are members of a collection.
"""
__slots__ = '_hash', '_child', '_keys'
@property
def schema(self):
return datashape.bool_
def isin(child, keys):
if isinstance(keys, Expr):
raise TypeError('keys argument cannot be an expression, '
'it must be an iterable object such as a list, '
'tuple or set')
return IsIn(child, frozenset(keys))
isin.__doc__ = IsIn.__doc__
dshape_method_list.extend([
(iscollection, set([sort, head])),
(lambda ds: len(ds.shape) == 1, set([distinct])),
(lambda ds: len(ds.shape) == 1 and isscalar(ds.measure), set([isin])),
])
| {
"repo_name": "dwillmer/blaze",
"path": "blaze/expr/collections.py",
"copies": "1",
"size": "15808",
"license": "bsd-3-clause",
"hash": -7713161890210125000,
"line_mean": 27.128113879,
"line_max": 109,
"alpha_frac": 0.549215587,
"autogenerated": false,
"ratio": 3.6882874475035,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9736389418030111,
"avg_score": 0.00022272329467766508,
"num_lines": 562
} |
from __future__ import absolute_import, division, print_function
from tornado.concurrent import Future
from tornado import gen
from tornado.escape import json_decode, utf8, to_unicode, recursive_unicode, native_str, to_basestring # noqa: E501
from tornado.httpclient import HTTPClientError
from tornado.httputil import format_timestamp
from tornado.ioloop import IOLoop
from tornado.iostream import IOStream
from tornado import locale
from tornado.locks import Event
from tornado.log import app_log, gen_log
from tornado.simple_httpclient import SimpleAsyncHTTPClient
from tornado.template import DictLoader
from tornado.testing import AsyncHTTPTestCase, AsyncTestCase, ExpectLog, gen_test
from tornado.test.util import unittest, skipBefore35, exec_test, ignore_deprecation
from tornado.util import ObjectDict, unicode_type, timedelta_to_seconds, PY3
from tornado.web import (
Application, RequestHandler, StaticFileHandler, RedirectHandler as WebRedirectHandler,
HTTPError, MissingArgumentError, ErrorHandler, authenticated, asynchronous, url,
_create_signature_v1, create_signed_value, decode_signed_value, get_signature_key_version,
UIModule, Finish, stream_request_body, removeslash, addslash, GZipContentEncoding,
)
import binascii
import contextlib
import copy
import datetime
import email.utils
import gzip
from io import BytesIO
import itertools
import logging
import os
import re
import socket
if PY3:
import urllib.parse as urllib_parse # py3
else:
import urllib as urllib_parse # py2
wsgi_safe_tests = []
def relpath(*a):
return os.path.join(os.path.dirname(__file__), *a)
def wsgi_safe(cls):
wsgi_safe_tests.append(cls)
return cls
class WebTestCase(AsyncHTTPTestCase):
"""Base class for web tests that also supports WSGI mode.
Override get_handlers and get_app_kwargs instead of get_app.
Append to wsgi_safe to have it run in wsgi_test as well.
"""
def get_app(self):
self.app = Application(self.get_handlers(), **self.get_app_kwargs())
return self.app
def get_handlers(self):
raise NotImplementedError()
def get_app_kwargs(self):
return {}
class SimpleHandlerTestCase(WebTestCase):
"""Simplified base class for tests that work with a single handler class.
To use, define a nested class named ``Handler``.
"""
def get_handlers(self):
return [('/', self.Handler)]
class HelloHandler(RequestHandler):
def get(self):
self.write('hello')
class CookieTestRequestHandler(RequestHandler):
# stub out enough methods to make the secure_cookie functions work
def __init__(self, cookie_secret='0123456789', key_version=None):
# don't call super.__init__
self._cookies = {}
if key_version is None:
self.application = ObjectDict(settings=dict(cookie_secret=cookie_secret))
else:
self.application = ObjectDict(settings=dict(cookie_secret=cookie_secret,
key_version=key_version))
def get_cookie(self, name):
return self._cookies.get(name)
def set_cookie(self, name, value, expires_days=None):
self._cookies[name] = value
# See SignedValueTest below for more.
class SecureCookieV1Test(unittest.TestCase):
def test_round_trip(self):
handler = CookieTestRequestHandler()
handler.set_secure_cookie('foo', b'bar', version=1)
self.assertEqual(handler.get_secure_cookie('foo', min_version=1),
b'bar')
def test_cookie_tampering_future_timestamp(self):
handler = CookieTestRequestHandler()
# this string base64-encodes to '12345678'
handler.set_secure_cookie('foo', binascii.a2b_hex(b'd76df8e7aefc'),
version=1)
cookie = handler._cookies['foo']
match = re.match(br'12345678\|([0-9]+)\|([0-9a-f]+)', cookie)
self.assertTrue(match)
timestamp = match.group(1)
sig = match.group(2)
self.assertEqual(
_create_signature_v1(handler.application.settings["cookie_secret"],
'foo', '12345678', timestamp),
sig)
# shifting digits from payload to timestamp doesn't alter signature
# (this is not desirable behavior, just confirming that that's how it
# works)
self.assertEqual(
_create_signature_v1(handler.application.settings["cookie_secret"],
'foo', '1234', b'5678' + timestamp),
sig)
# tamper with the cookie
handler._cookies['foo'] = utf8('1234|5678%s|%s' % (
to_basestring(timestamp), to_basestring(sig)))
# it gets rejected
with ExpectLog(gen_log, "Cookie timestamp in future"):
self.assertTrue(
handler.get_secure_cookie('foo', min_version=1) is None)
def test_arbitrary_bytes(self):
# Secure cookies accept arbitrary data (which is base64 encoded).
# Note that normal cookies accept only a subset of ascii.
handler = CookieTestRequestHandler()
handler.set_secure_cookie('foo', b'\xe9', version=1)
self.assertEqual(handler.get_secure_cookie('foo', min_version=1), b'\xe9')
# See SignedValueTest below for more.
class SecureCookieV2Test(unittest.TestCase):
KEY_VERSIONS = {
0: 'ajklasdf0ojaisdf',
1: 'aslkjasaolwkjsdf'
}
def test_round_trip(self):
handler = CookieTestRequestHandler()
handler.set_secure_cookie('foo', b'bar', version=2)
self.assertEqual(handler.get_secure_cookie('foo', min_version=2), b'bar')
def test_key_version_roundtrip(self):
handler = CookieTestRequestHandler(cookie_secret=self.KEY_VERSIONS,
key_version=0)
handler.set_secure_cookie('foo', b'bar')
self.assertEqual(handler.get_secure_cookie('foo'), b'bar')
def test_key_version_roundtrip_differing_version(self):
handler = CookieTestRequestHandler(cookie_secret=self.KEY_VERSIONS,
key_version=1)
handler.set_secure_cookie('foo', b'bar')
self.assertEqual(handler.get_secure_cookie('foo'), b'bar')
def test_key_version_increment_version(self):
handler = CookieTestRequestHandler(cookie_secret=self.KEY_VERSIONS,
key_version=0)
handler.set_secure_cookie('foo', b'bar')
new_handler = CookieTestRequestHandler(cookie_secret=self.KEY_VERSIONS,
key_version=1)
new_handler._cookies = handler._cookies
self.assertEqual(new_handler.get_secure_cookie('foo'), b'bar')
def test_key_version_invalidate_version(self):
handler = CookieTestRequestHandler(cookie_secret=self.KEY_VERSIONS,
key_version=0)
handler.set_secure_cookie('foo', b'bar')
new_key_versions = self.KEY_VERSIONS.copy()
new_key_versions.pop(0)
new_handler = CookieTestRequestHandler(cookie_secret=new_key_versions,
key_version=1)
new_handler._cookies = handler._cookies
self.assertEqual(new_handler.get_secure_cookie('foo'), None)
class FinalReturnTest(WebTestCase):
def get_handlers(self):
test = self
class FinishHandler(RequestHandler):
@gen.coroutine
def get(self):
test.final_return = self.finish()
yield test.final_return
class RenderHandler(RequestHandler):
def create_template_loader(self, path):
return DictLoader({'foo.html': 'hi'})
@gen.coroutine
def get(self):
test.final_return = self.render('foo.html')
return [("/finish", FinishHandler),
("/render", RenderHandler)]
def get_app_kwargs(self):
return dict(template_path='FinalReturnTest')
def test_finish_method_return_future(self):
response = self.fetch(self.get_url('/finish'))
self.assertEqual(response.code, 200)
self.assertIsInstance(self.final_return, Future)
self.assertTrue(self.final_return.done())
def test_render_method_return_future(self):
response = self.fetch(self.get_url('/render'))
self.assertEqual(response.code, 200)
self.assertIsInstance(self.final_return, Future)
class CookieTest(WebTestCase):
def get_handlers(self):
class SetCookieHandler(RequestHandler):
def get(self):
# Try setting cookies with different argument types
# to ensure that everything gets encoded correctly
self.set_cookie("str", "asdf")
self.set_cookie("unicode", u"qwer")
self.set_cookie("bytes", b"zxcv")
class GetCookieHandler(RequestHandler):
def get(self):
self.write(self.get_cookie("foo", "default"))
class SetCookieDomainHandler(RequestHandler):
def get(self):
# unicode domain and path arguments shouldn't break things
# either (see bug #285)
self.set_cookie("unicode_args", "blah", domain=u"foo.com",
path=u"/foo")
class SetCookieSpecialCharHandler(RequestHandler):
def get(self):
self.set_cookie("equals", "a=b")
self.set_cookie("semicolon", "a;b")
self.set_cookie("quote", 'a"b')
class SetCookieOverwriteHandler(RequestHandler):
def get(self):
self.set_cookie("a", "b", domain="example.com")
self.set_cookie("c", "d", domain="example.com")
# A second call with the same name clobbers the first.
# Attributes from the first call are not carried over.
self.set_cookie("a", "e")
class SetCookieMaxAgeHandler(RequestHandler):
def get(self):
self.set_cookie("foo", "bar", max_age=10)
class SetCookieExpiresDaysHandler(RequestHandler):
def get(self):
self.set_cookie("foo", "bar", expires_days=10)
class SetCookieFalsyFlags(RequestHandler):
def get(self):
self.set_cookie("a", "1", secure=True)
self.set_cookie("b", "1", secure=False)
self.set_cookie("c", "1", httponly=True)
self.set_cookie("d", "1", httponly=False)
return [("/set", SetCookieHandler),
("/get", GetCookieHandler),
("/set_domain", SetCookieDomainHandler),
("/special_char", SetCookieSpecialCharHandler),
("/set_overwrite", SetCookieOverwriteHandler),
("/set_max_age", SetCookieMaxAgeHandler),
("/set_expires_days", SetCookieExpiresDaysHandler),
("/set_falsy_flags", SetCookieFalsyFlags)
]
def test_set_cookie(self):
response = self.fetch("/set")
self.assertEqual(sorted(response.headers.get_list("Set-Cookie")),
["bytes=zxcv; Path=/",
"str=asdf; Path=/",
"unicode=qwer; Path=/",
])
def test_get_cookie(self):
response = self.fetch("/get", headers={"Cookie": "foo=bar"})
self.assertEqual(response.body, b"bar")
response = self.fetch("/get", headers={"Cookie": 'foo="bar"'})
self.assertEqual(response.body, b"bar")
response = self.fetch("/get", headers={"Cookie": "/=exception;"})
self.assertEqual(response.body, b"default")
def test_set_cookie_domain(self):
response = self.fetch("/set_domain")
self.assertEqual(response.headers.get_list("Set-Cookie"),
["unicode_args=blah; Domain=foo.com; Path=/foo"])
def test_cookie_special_char(self):
response = self.fetch("/special_char")
headers = sorted(response.headers.get_list("Set-Cookie"))
self.assertEqual(len(headers), 3)
self.assertEqual(headers[0], 'equals="a=b"; Path=/')
self.assertEqual(headers[1], 'quote="a\\"b"; Path=/')
# python 2.7 octal-escapes the semicolon; older versions leave it alone
self.assertTrue(headers[2] in ('semicolon="a;b"; Path=/',
'semicolon="a\\073b"; Path=/'),
headers[2])
data = [('foo=a=b', 'a=b'),
('foo="a=b"', 'a=b'),
('foo="a;b"', '"a'), # even quoted, ";" is a delimiter
('foo=a\\073b', 'a\\073b'), # escapes only decoded in quotes
('foo="a\\073b"', 'a;b'),
('foo="a\\"b"', 'a"b'),
]
for header, expected in data:
logging.debug("trying %r", header)
response = self.fetch("/get", headers={"Cookie": header})
self.assertEqual(response.body, utf8(expected))
def test_set_cookie_overwrite(self):
response = self.fetch("/set_overwrite")
headers = response.headers.get_list("Set-Cookie")
self.assertEqual(sorted(headers),
["a=e; Path=/", "c=d; Domain=example.com; Path=/"])
def test_set_cookie_max_age(self):
response = self.fetch("/set_max_age")
headers = response.headers.get_list("Set-Cookie")
self.assertEqual(sorted(headers),
["foo=bar; Max-Age=10; Path=/"])
def test_set_cookie_expires_days(self):
response = self.fetch("/set_expires_days")
header = response.headers.get("Set-Cookie")
match = re.match("foo=bar; expires=(?P<expires>.+); Path=/", header)
self.assertIsNotNone(match)
expires = datetime.datetime.utcnow() + datetime.timedelta(days=10)
header_expires = datetime.datetime(
*email.utils.parsedate(match.groupdict()["expires"])[:6])
self.assertTrue(abs(timedelta_to_seconds(expires - header_expires)) < 10)
def test_set_cookie_false_flags(self):
response = self.fetch("/set_falsy_flags")
headers = sorted(response.headers.get_list("Set-Cookie"))
# The secure and httponly headers are capitalized in py35 and
# lowercase in older versions.
self.assertEqual(headers[0].lower(), 'a=1; path=/; secure')
self.assertEqual(headers[1].lower(), 'b=1; path=/')
self.assertEqual(headers[2].lower(), 'c=1; httponly; path=/')
self.assertEqual(headers[3].lower(), 'd=1; path=/')
class AuthRedirectRequestHandler(RequestHandler):
def initialize(self, login_url):
self.login_url = login_url
def get_login_url(self):
return self.login_url
@authenticated
def get(self):
# we'll never actually get here because the test doesn't follow redirects
self.send_error(500)
class AuthRedirectTest(WebTestCase):
def get_handlers(self):
return [('/relative', AuthRedirectRequestHandler,
dict(login_url='/login')),
('/absolute', AuthRedirectRequestHandler,
dict(login_url='http://example.com/login'))]
def test_relative_auth_redirect(self):
response = self.fetch(self.get_url('/relative'),
follow_redirects=False)
self.assertEqual(response.code, 302)
self.assertEqual(response.headers['Location'], '/login?next=%2Frelative')
def test_absolute_auth_redirect(self):
response = self.fetch(self.get_url('/absolute'),
follow_redirects=False)
self.assertEqual(response.code, 302)
self.assertTrue(re.match(
'http://example.com/login\?next=http%3A%2F%2F127.0.0.1%3A[0-9]+%2Fabsolute',
response.headers['Location']), response.headers['Location'])
class ConnectionCloseHandler(RequestHandler):
def initialize(self, test):
self.test = test
@gen.coroutine
def get(self):
self.test.on_handler_waiting()
never_finish = Event()
yield never_finish.wait()
def on_connection_close(self):
self.test.on_connection_close()
class ConnectionCloseTest(WebTestCase):
def get_handlers(self):
return [('/', ConnectionCloseHandler, dict(test=self))]
def test_connection_close(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
s.connect(("127.0.0.1", self.get_http_port()))
self.stream = IOStream(s)
self.stream.write(b"GET / HTTP/1.0\r\n\r\n")
self.wait()
def on_handler_waiting(self):
logging.debug('handler waiting')
self.stream.close()
def on_connection_close(self):
logging.debug('connection closed')
self.stop()
class EchoHandler(RequestHandler):
def get(self, *path_args):
# Type checks: web.py interfaces convert argument values to
# unicode strings (by default, but see also decode_argument).
# In httpserver.py (i.e. self.request.arguments), they're left
# as bytes. Keys are always native strings.
for key in self.request.arguments:
if type(key) != str:
raise Exception("incorrect type for key: %r" % type(key))
for value in self.request.arguments[key]:
if type(value) != bytes:
raise Exception("incorrect type for value: %r" %
type(value))
for value in self.get_arguments(key):
if type(value) != unicode_type:
raise Exception("incorrect type for value: %r" %
type(value))
for arg in path_args:
if type(arg) != unicode_type:
raise Exception("incorrect type for path arg: %r" % type(arg))
self.write(dict(path=self.request.path,
path_args=path_args,
args=recursive_unicode(self.request.arguments)))
class RequestEncodingTest(WebTestCase):
def get_handlers(self):
return [("/group/(.*)", EchoHandler),
("/slashes/([^/]*)/([^/]*)", EchoHandler),
]
def fetch_json(self, path):
return json_decode(self.fetch(path).body)
def test_group_question_mark(self):
# Ensure that url-encoded question marks are handled properly
self.assertEqual(self.fetch_json('/group/%3F'),
dict(path='/group/%3F', path_args=['?'], args={}))
self.assertEqual(self.fetch_json('/group/%3F?%3F=%3F'),
dict(path='/group/%3F', path_args=['?'], args={'?': ['?']}))
def test_group_encoding(self):
# Path components and query arguments should be decoded the same way
self.assertEqual(self.fetch_json('/group/%C3%A9?arg=%C3%A9'),
{u"path": u"/group/%C3%A9",
u"path_args": [u"\u00e9"],
u"args": {u"arg": [u"\u00e9"]}})
def test_slashes(self):
# Slashes may be escaped to appear as a single "directory" in the path,
# but they are then unescaped when passed to the get() method.
self.assertEqual(self.fetch_json('/slashes/foo/bar'),
dict(path="/slashes/foo/bar",
path_args=["foo", "bar"],
args={}))
self.assertEqual(self.fetch_json('/slashes/a%2Fb/c%2Fd'),
dict(path="/slashes/a%2Fb/c%2Fd",
path_args=["a/b", "c/d"],
args={}))
def test_error(self):
# Percent signs (encoded as %25) should not mess up printf-style
# messages in logs
with ExpectLog(gen_log, ".*Invalid unicode"):
self.fetch("/group/?arg=%25%e9")
class TypeCheckHandler(RequestHandler):
def prepare(self):
self.errors = {}
self.check_type('status', self.get_status(), int)
# get_argument is an exception from the general rule of using
# type str for non-body data mainly for historical reasons.
self.check_type('argument', self.get_argument('foo'), unicode_type)
self.check_type('cookie_key', list(self.cookies.keys())[0], str)
self.check_type('cookie_value', list(self.cookies.values())[0].value, str)
# Secure cookies return bytes because they can contain arbitrary
# data, but regular cookies are native strings.
if list(self.cookies.keys()) != ['asdf']:
raise Exception("unexpected values for cookie keys: %r" %
self.cookies.keys())
self.check_type('get_secure_cookie', self.get_secure_cookie('asdf'), bytes)
self.check_type('get_cookie', self.get_cookie('asdf'), str)
self.check_type('xsrf_token', self.xsrf_token, bytes)
self.check_type('xsrf_form_html', self.xsrf_form_html(), str)
self.check_type('reverse_url', self.reverse_url('typecheck', 'foo'), str)
self.check_type('request_summary', self._request_summary(), str)
def get(self, path_component):
# path_component uses type unicode instead of str for consistency
# with get_argument()
self.check_type('path_component', path_component, unicode_type)
self.write(self.errors)
def post(self, path_component):
self.check_type('path_component', path_component, unicode_type)
self.write(self.errors)
def check_type(self, name, obj, expected_type):
actual_type = type(obj)
if expected_type != actual_type:
self.errors[name] = "expected %s, got %s" % (expected_type,
actual_type)
class DecodeArgHandler(RequestHandler):
def decode_argument(self, value, name=None):
if type(value) != bytes:
raise Exception("unexpected type for value: %r" % type(value))
# use self.request.arguments directly to avoid recursion
if 'encoding' in self.request.arguments:
return value.decode(to_unicode(self.request.arguments['encoding'][0]))
else:
return value
def get(self, arg):
def describe(s):
if type(s) == bytes:
return ["bytes", native_str(binascii.b2a_hex(s))]
elif type(s) == unicode_type:
return ["unicode", s]
raise Exception("unknown type")
self.write({'path': describe(arg),
'query': describe(self.get_argument("foo")),
})
class LinkifyHandler(RequestHandler):
def get(self):
self.render("linkify.html", message="http://example.com")
class UIModuleResourceHandler(RequestHandler):
def get(self):
self.render("page.html", entries=[1, 2])
class OptionalPathHandler(RequestHandler):
def get(self, path):
self.write({"path": path})
class FlowControlHandler(RequestHandler):
# These writes are too small to demonstrate real flow control,
# but at least it shows that the callbacks get run.
with ignore_deprecation():
@asynchronous
def get(self):
self.write("1")
with ignore_deprecation():
self.flush(callback=self.step2)
def step2(self):
self.write("2")
with ignore_deprecation():
self.flush(callback=self.step3)
def step3(self):
self.write("3")
self.finish()
class MultiHeaderHandler(RequestHandler):
def get(self):
self.set_header("x-overwrite", "1")
self.set_header("X-Overwrite", 2)
self.add_header("x-multi", 3)
self.add_header("X-Multi", "4")
class RedirectHandler(RequestHandler):
def get(self):
if self.get_argument('permanent', None) is not None:
self.redirect('/', permanent=int(self.get_argument('permanent')))
elif self.get_argument('status', None) is not None:
self.redirect('/', status=int(self.get_argument('status')))
else:
raise Exception("didn't get permanent or status arguments")
class EmptyFlushCallbackHandler(RequestHandler):
@gen.coroutine
def get(self):
# Ensure that the flush callback is run whether or not there
# was any output. The gen.Task and direct yield forms are
# equivalent.
yield self.flush() # "empty" flush, but writes headers
yield self.flush() # empty flush
self.write("o")
yield self.flush() # flushes the "o"
yield self.flush() # empty flush
self.finish("k")
class HeaderInjectionHandler(RequestHandler):
def get(self):
try:
self.set_header("X-Foo", "foo\r\nX-Bar: baz")
raise Exception("Didn't get expected exception")
except ValueError as e:
if "Unsafe header value" in str(e):
self.finish(b"ok")
else:
raise
class GetArgumentHandler(RequestHandler):
def prepare(self):
if self.get_argument('source', None) == 'query':
method = self.get_query_argument
elif self.get_argument('source', None) == 'body':
method = self.get_body_argument
else:
method = self.get_argument
self.finish(method("foo", "default"))
class GetArgumentsHandler(RequestHandler):
def prepare(self):
self.finish(dict(default=self.get_arguments("foo"),
query=self.get_query_arguments("foo"),
body=self.get_body_arguments("foo")))
# This test is shared with wsgi_test.py
@wsgi_safe
class WSGISafeWebTest(WebTestCase):
COOKIE_SECRET = "WebTest.COOKIE_SECRET"
def get_app_kwargs(self):
loader = DictLoader({
"linkify.html": "{% module linkify(message) %}",
"page.html": """\
<html><head></head><body>
{% for e in entries %}
{% module Template("entry.html", entry=e) %}
{% end %}
</body></html>""",
"entry.html": """\
{{ set_resources(embedded_css=".entry { margin-bottom: 1em; }",
embedded_javascript="js_embed()",
css_files=["/base.css", "/foo.css"],
javascript_files="/common.js",
html_head="<meta>",
html_body='<script src="/analytics.js"/>') }}
<div class="entry">...</div>""",
})
return dict(template_loader=loader,
autoescape="xhtml_escape",
cookie_secret=self.COOKIE_SECRET)
def tearDown(self):
super(WSGISafeWebTest, self).tearDown()
RequestHandler._template_loaders.clear()
def get_handlers(self):
urls = [
url("/typecheck/(.*)", TypeCheckHandler, name='typecheck'),
url("/decode_arg/(.*)", DecodeArgHandler, name='decode_arg'),
url("/decode_arg_kw/(?P<arg>.*)", DecodeArgHandler),
url("/linkify", LinkifyHandler),
url("/uimodule_resources", UIModuleResourceHandler),
url("/optional_path/(.+)?", OptionalPathHandler),
url("/multi_header", MultiHeaderHandler),
url("/redirect", RedirectHandler),
url("/web_redirect_permanent", WebRedirectHandler, {"url": "/web_redirect_newpath"}),
url("/web_redirect", WebRedirectHandler,
{"url": "/web_redirect_newpath", "permanent": False}),
url("//web_redirect_double_slash", WebRedirectHandler,
{"url": '/web_redirect_newpath'}),
url("/header_injection", HeaderInjectionHandler),
url("/get_argument", GetArgumentHandler),
url("/get_arguments", GetArgumentsHandler),
]
return urls
def fetch_json(self, *args, **kwargs):
response = self.fetch(*args, **kwargs)
response.rethrow()
return json_decode(response.body)
def test_types(self):
cookie_value = to_unicode(create_signed_value(self.COOKIE_SECRET,
"asdf", "qwer"))
response = self.fetch("/typecheck/asdf?foo=bar",
headers={"Cookie": "asdf=" + cookie_value})
data = json_decode(response.body)
self.assertEqual(data, {})
response = self.fetch("/typecheck/asdf?foo=bar", method="POST",
headers={"Cookie": "asdf=" + cookie_value},
body="foo=bar")
def test_decode_argument(self):
# These urls all decode to the same thing
urls = ["/decode_arg/%C3%A9?foo=%C3%A9&encoding=utf-8",
"/decode_arg/%E9?foo=%E9&encoding=latin1",
"/decode_arg_kw/%E9?foo=%E9&encoding=latin1",
]
for req_url in urls:
response = self.fetch(req_url)
response.rethrow()
data = json_decode(response.body)
self.assertEqual(data, {u'path': [u'unicode', u'\u00e9'],
u'query': [u'unicode', u'\u00e9'],
})
response = self.fetch("/decode_arg/%C3%A9?foo=%C3%A9")
response.rethrow()
data = json_decode(response.body)
self.assertEqual(data, {u'path': [u'bytes', u'c3a9'],
u'query': [u'bytes', u'c3a9'],
})
def test_decode_argument_invalid_unicode(self):
# test that invalid unicode in URLs causes 400, not 500
with ExpectLog(gen_log, ".*Invalid unicode.*"):
response = self.fetch("/typecheck/invalid%FF")
self.assertEqual(response.code, 400)
response = self.fetch("/typecheck/invalid?foo=%FF")
self.assertEqual(response.code, 400)
def test_decode_argument_plus(self):
# These urls are all equivalent.
urls = ["/decode_arg/1%20%2B%201?foo=1%20%2B%201&encoding=utf-8",
"/decode_arg/1%20+%201?foo=1+%2B+1&encoding=utf-8"]
for req_url in urls:
response = self.fetch(req_url)
response.rethrow()
data = json_decode(response.body)
self.assertEqual(data, {u'path': [u'unicode', u'1 + 1'],
u'query': [u'unicode', u'1 + 1'],
})
def test_reverse_url(self):
self.assertEqual(self.app.reverse_url('decode_arg', 'foo'),
'/decode_arg/foo')
self.assertEqual(self.app.reverse_url('decode_arg', 42),
'/decode_arg/42')
self.assertEqual(self.app.reverse_url('decode_arg', b'\xe9'),
'/decode_arg/%E9')
self.assertEqual(self.app.reverse_url('decode_arg', u'\u00e9'),
'/decode_arg/%C3%A9')
self.assertEqual(self.app.reverse_url('decode_arg', '1 + 1'),
'/decode_arg/1%20%2B%201')
def test_uimodule_unescaped(self):
response = self.fetch("/linkify")
self.assertEqual(response.body,
b"<a href=\"http://example.com\">http://example.com</a>")
def test_uimodule_resources(self):
response = self.fetch("/uimodule_resources")
self.assertEqual(response.body, b"""\
<html><head><link href="/base.css" type="text/css" rel="stylesheet"/><link href="/foo.css" type="text/css" rel="stylesheet"/>
<style type="text/css">
.entry { margin-bottom: 1em; }
</style>
<meta>
</head><body>
<div class="entry">...</div>
<div class="entry">...</div>
<script src="/common.js" type="text/javascript"></script>
<script type="text/javascript">
//<![CDATA[
js_embed()
//]]>
</script>
<script src="/analytics.js"/>
</body></html>""") # noqa: E501
def test_optional_path(self):
self.assertEqual(self.fetch_json("/optional_path/foo"),
{u"path": u"foo"})
self.assertEqual(self.fetch_json("/optional_path/"),
{u"path": None})
def test_multi_header(self):
response = self.fetch("/multi_header")
self.assertEqual(response.headers["x-overwrite"], "2")
self.assertEqual(response.headers.get_list("x-multi"), ["3", "4"])
def test_redirect(self):
response = self.fetch("/redirect?permanent=1", follow_redirects=False)
self.assertEqual(response.code, 301)
response = self.fetch("/redirect?permanent=0", follow_redirects=False)
self.assertEqual(response.code, 302)
response = self.fetch("/redirect?status=307", follow_redirects=False)
self.assertEqual(response.code, 307)
def test_web_redirect(self):
response = self.fetch("/web_redirect_permanent", follow_redirects=False)
self.assertEqual(response.code, 301)
self.assertEqual(response.headers['Location'], '/web_redirect_newpath')
response = self.fetch("/web_redirect", follow_redirects=False)
self.assertEqual(response.code, 302)
self.assertEqual(response.headers['Location'], '/web_redirect_newpath')
def test_web_redirect_double_slash(self):
response = self.fetch("//web_redirect_double_slash", follow_redirects=False)
self.assertEqual(response.code, 301)
self.assertEqual(response.headers['Location'], '/web_redirect_newpath')
def test_header_injection(self):
response = self.fetch("/header_injection")
self.assertEqual(response.body, b"ok")
def test_get_argument(self):
response = self.fetch("/get_argument?foo=bar")
self.assertEqual(response.body, b"bar")
response = self.fetch("/get_argument?foo=")
self.assertEqual(response.body, b"")
response = self.fetch("/get_argument")
self.assertEqual(response.body, b"default")
# Test merging of query and body arguments.
# In singular form, body arguments take precedence over query arguments.
body = urllib_parse.urlencode(dict(foo="hello"))
response = self.fetch("/get_argument?foo=bar", method="POST", body=body)
self.assertEqual(response.body, b"hello")
# In plural methods they are merged.
response = self.fetch("/get_arguments?foo=bar",
method="POST", body=body)
self.assertEqual(json_decode(response.body),
dict(default=['bar', 'hello'],
query=['bar'],
body=['hello']))
def test_get_query_arguments(self):
# send as a post so we can ensure the separation between query
# string and body arguments.
body = urllib_parse.urlencode(dict(foo="hello"))
response = self.fetch("/get_argument?source=query&foo=bar",
method="POST", body=body)
self.assertEqual(response.body, b"bar")
response = self.fetch("/get_argument?source=query&foo=",
method="POST", body=body)
self.assertEqual(response.body, b"")
response = self.fetch("/get_argument?source=query",
method="POST", body=body)
self.assertEqual(response.body, b"default")
def test_get_body_arguments(self):
body = urllib_parse.urlencode(dict(foo="bar"))
response = self.fetch("/get_argument?source=body&foo=hello",
method="POST", body=body)
self.assertEqual(response.body, b"bar")
body = urllib_parse.urlencode(dict(foo=""))
response = self.fetch("/get_argument?source=body&foo=hello",
method="POST", body=body)
self.assertEqual(response.body, b"")
body = urllib_parse.urlencode(dict())
response = self.fetch("/get_argument?source=body&foo=hello",
method="POST", body=body)
self.assertEqual(response.body, b"default")
def test_no_gzip(self):
response = self.fetch('/get_argument')
self.assertNotIn('Accept-Encoding', response.headers.get('Vary', ''))
self.assertNotIn('gzip', response.headers.get('Content-Encoding', ''))
class NonWSGIWebTests(WebTestCase):
def get_handlers(self):
return [("/flow_control", FlowControlHandler),
("/empty_flush", EmptyFlushCallbackHandler),
]
def test_flow_control(self):
self.assertEqual(self.fetch("/flow_control").body, b"123")
def test_empty_flush(self):
response = self.fetch("/empty_flush")
self.assertEqual(response.body, b"ok")
@wsgi_safe
class ErrorResponseTest(WebTestCase):
def get_handlers(self):
class DefaultHandler(RequestHandler):
def get(self):
if self.get_argument("status", None):
raise HTTPError(int(self.get_argument("status")))
1 / 0
class WriteErrorHandler(RequestHandler):
def get(self):
if self.get_argument("status", None):
self.send_error(int(self.get_argument("status")))
else:
1 / 0
def write_error(self, status_code, **kwargs):
self.set_header("Content-Type", "text/plain")
if "exc_info" in kwargs:
self.write("Exception: %s" % kwargs["exc_info"][0].__name__)
else:
self.write("Status: %d" % status_code)
class FailedWriteErrorHandler(RequestHandler):
def get(self):
1 / 0
def write_error(self, status_code, **kwargs):
raise Exception("exception in write_error")
return [url("/default", DefaultHandler),
url("/write_error", WriteErrorHandler),
url("/failed_write_error", FailedWriteErrorHandler),
]
def test_default(self):
with ExpectLog(app_log, "Uncaught exception"):
response = self.fetch("/default")
self.assertEqual(response.code, 500)
self.assertTrue(b"500: Internal Server Error" in response.body)
response = self.fetch("/default?status=503")
self.assertEqual(response.code, 503)
self.assertTrue(b"503: Service Unavailable" in response.body)
response = self.fetch("/default?status=435")
self.assertEqual(response.code, 435)
self.assertTrue(b"435: Unknown" in response.body)
def test_write_error(self):
with ExpectLog(app_log, "Uncaught exception"):
response = self.fetch("/write_error")
self.assertEqual(response.code, 500)
self.assertEqual(b"Exception: ZeroDivisionError", response.body)
response = self.fetch("/write_error?status=503")
self.assertEqual(response.code, 503)
self.assertEqual(b"Status: 503", response.body)
def test_failed_write_error(self):
with ExpectLog(app_log, "Uncaught exception"):
response = self.fetch("/failed_write_error")
self.assertEqual(response.code, 500)
self.assertEqual(b"", response.body)
@wsgi_safe
class StaticFileTest(WebTestCase):
# The expected MD5 hash of robots.txt, used in tests that call
# StaticFileHandler.get_version
robots_txt_hash = b"f71d20196d4caf35b6a670db8c70b03d"
static_dir = os.path.join(os.path.dirname(__file__), 'static')
def get_handlers(self):
class StaticUrlHandler(RequestHandler):
def get(self, path):
with_v = int(self.get_argument('include_version', 1))
self.write(self.static_url(path, include_version=with_v))
class AbsoluteStaticUrlHandler(StaticUrlHandler):
include_host = True
class OverrideStaticUrlHandler(RequestHandler):
def get(self, path):
do_include = bool(self.get_argument("include_host"))
self.include_host = not do_include
regular_url = self.static_url(path)
override_url = self.static_url(path, include_host=do_include)
if override_url == regular_url:
return self.write(str(False))
protocol = self.request.protocol + "://"
protocol_length = len(protocol)
check_regular = regular_url.find(protocol, 0, protocol_length)
check_override = override_url.find(protocol, 0, protocol_length)
if do_include:
result = (check_override == 0 and check_regular == -1)
else:
result = (check_override == -1 and check_regular == 0)
self.write(str(result))
return [('/static_url/(.*)', StaticUrlHandler),
('/abs_static_url/(.*)', AbsoluteStaticUrlHandler),
('/override_static_url/(.*)', OverrideStaticUrlHandler),
('/root_static/(.*)', StaticFileHandler, dict(path='/'))]
def get_app_kwargs(self):
return dict(static_path=relpath('static'))
def test_static_files(self):
response = self.fetch('/robots.txt')
self.assertTrue(b"Disallow: /" in response.body)
response = self.fetch('/static/robots.txt')
self.assertTrue(b"Disallow: /" in response.body)
self.assertEqual(response.headers.get("Content-Type"), "text/plain")
def test_static_compressed_files(self):
response = self.fetch("/static/sample.xml.gz")
self.assertEqual(response.headers.get("Content-Type"),
"application/gzip")
response = self.fetch("/static/sample.xml.bz2")
self.assertEqual(response.headers.get("Content-Type"),
"application/octet-stream")
# make sure the uncompressed file still has the correct type
response = self.fetch("/static/sample.xml")
self.assertTrue(response.headers.get("Content-Type")
in set(("text/xml", "application/xml")))
def test_static_url(self):
response = self.fetch("/static_url/robots.txt")
self.assertEqual(response.body,
b"/static/robots.txt?v=" + self.robots_txt_hash)
def test_absolute_static_url(self):
response = self.fetch("/abs_static_url/robots.txt")
self.assertEqual(response.body, (
utf8(self.get_url("/")) +
b"static/robots.txt?v=" +
self.robots_txt_hash
))
def test_relative_version_exclusion(self):
response = self.fetch("/static_url/robots.txt?include_version=0")
self.assertEqual(response.body, b"/static/robots.txt")
def test_absolute_version_exclusion(self):
response = self.fetch("/abs_static_url/robots.txt?include_version=0")
self.assertEqual(response.body,
utf8(self.get_url("/") + "static/robots.txt"))
def test_include_host_override(self):
self._trigger_include_host_check(False)
self._trigger_include_host_check(True)
def _trigger_include_host_check(self, include_host):
path = "/override_static_url/robots.txt?include_host=%s"
response = self.fetch(path % int(include_host))
self.assertEqual(response.body, utf8(str(True)))
def get_and_head(self, *args, **kwargs):
"""Performs a GET and HEAD request and returns the GET response.
Fails if any ``Content-*`` headers returned by the two requests
differ.
"""
head_response = self.fetch(*args, method="HEAD", **kwargs)
get_response = self.fetch(*args, method="GET", **kwargs)
content_headers = set()
for h in itertools.chain(head_response.headers, get_response.headers):
if h.startswith('Content-'):
content_headers.add(h)
for h in content_headers:
self.assertEqual(head_response.headers.get(h),
get_response.headers.get(h),
"%s differs between GET (%s) and HEAD (%s)" %
(h, head_response.headers.get(h),
get_response.headers.get(h)))
return get_response
def test_static_304_if_modified_since(self):
response1 = self.get_and_head("/static/robots.txt")
response2 = self.get_and_head("/static/robots.txt", headers={
'If-Modified-Since': response1.headers['Last-Modified']})
self.assertEqual(response2.code, 304)
self.assertTrue('Content-Length' not in response2.headers)
self.assertTrue('Last-Modified' not in response2.headers)
def test_static_304_if_none_match(self):
response1 = self.get_and_head("/static/robots.txt")
response2 = self.get_and_head("/static/robots.txt", headers={
'If-None-Match': response1.headers['Etag']})
self.assertEqual(response2.code, 304)
def test_static_304_etag_modified_bug(self):
response1 = self.get_and_head("/static/robots.txt")
response2 = self.get_and_head("/static/robots.txt", headers={
'If-None-Match': '"MISMATCH"',
'If-Modified-Since': response1.headers['Last-Modified']})
self.assertEqual(response2.code, 200)
def test_static_if_modified_since_pre_epoch(self):
# On windows, the functions that work with time_t do not accept
# negative values, and at least one client (processing.js) seems
# to use if-modified-since 1/1/1960 as a cache-busting technique.
response = self.get_and_head("/static/robots.txt", headers={
'If-Modified-Since': 'Fri, 01 Jan 1960 00:00:00 GMT'})
self.assertEqual(response.code, 200)
def test_static_if_modified_since_time_zone(self):
# Instead of the value from Last-Modified, make requests with times
# chosen just before and after the known modification time
# of the file to ensure that the right time zone is being used
# when parsing If-Modified-Since.
stat = os.stat(relpath('static/robots.txt'))
response = self.get_and_head('/static/robots.txt', headers={
'If-Modified-Since': format_timestamp(stat.st_mtime - 1)})
self.assertEqual(response.code, 200)
response = self.get_and_head('/static/robots.txt', headers={
'If-Modified-Since': format_timestamp(stat.st_mtime + 1)})
self.assertEqual(response.code, 304)
def test_static_etag(self):
response = self.get_and_head('/static/robots.txt')
self.assertEqual(utf8(response.headers.get("Etag")),
b'"' + self.robots_txt_hash + b'"')
def test_static_with_range(self):
response = self.get_and_head('/static/robots.txt', headers={
'Range': 'bytes=0-9'})
self.assertEqual(response.code, 206)
self.assertEqual(response.body, b"User-agent")
self.assertEqual(utf8(response.headers.get("Etag")),
b'"' + self.robots_txt_hash + b'"')
self.assertEqual(response.headers.get("Content-Length"), "10")
self.assertEqual(response.headers.get("Content-Range"),
"bytes 0-9/26")
def test_static_with_range_full_file(self):
response = self.get_and_head('/static/robots.txt', headers={
'Range': 'bytes=0-'})
# Note: Chrome refuses to play audio if it gets an HTTP 206 in response
# to ``Range: bytes=0-`` :(
self.assertEqual(response.code, 200)
robots_file_path = os.path.join(self.static_dir, "robots.txt")
with open(robots_file_path) as f:
self.assertEqual(response.body, utf8(f.read()))
self.assertEqual(response.headers.get("Content-Length"), "26")
self.assertEqual(response.headers.get("Content-Range"), None)
def test_static_with_range_full_past_end(self):
response = self.get_and_head('/static/robots.txt', headers={
'Range': 'bytes=0-10000000'})
self.assertEqual(response.code, 200)
robots_file_path = os.path.join(self.static_dir, "robots.txt")
with open(robots_file_path) as f:
self.assertEqual(response.body, utf8(f.read()))
self.assertEqual(response.headers.get("Content-Length"), "26")
self.assertEqual(response.headers.get("Content-Range"), None)
def test_static_with_range_partial_past_end(self):
response = self.get_and_head('/static/robots.txt', headers={
'Range': 'bytes=1-10000000'})
self.assertEqual(response.code, 206)
robots_file_path = os.path.join(self.static_dir, "robots.txt")
with open(robots_file_path) as f:
self.assertEqual(response.body, utf8(f.read()[1:]))
self.assertEqual(response.headers.get("Content-Length"), "25")
self.assertEqual(response.headers.get("Content-Range"), "bytes 1-25/26")
def test_static_with_range_end_edge(self):
response = self.get_and_head('/static/robots.txt', headers={
'Range': 'bytes=22-'})
self.assertEqual(response.body, b": /\n")
self.assertEqual(response.headers.get("Content-Length"), "4")
self.assertEqual(response.headers.get("Content-Range"),
"bytes 22-25/26")
def test_static_with_range_neg_end(self):
response = self.get_and_head('/static/robots.txt', headers={
'Range': 'bytes=-4'})
self.assertEqual(response.body, b": /\n")
self.assertEqual(response.headers.get("Content-Length"), "4")
self.assertEqual(response.headers.get("Content-Range"),
"bytes 22-25/26")
def test_static_invalid_range(self):
response = self.get_and_head('/static/robots.txt', headers={
'Range': 'asdf'})
self.assertEqual(response.code, 200)
def test_static_unsatisfiable_range_zero_suffix(self):
response = self.get_and_head('/static/robots.txt', headers={
'Range': 'bytes=-0'})
self.assertEqual(response.headers.get("Content-Range"),
"bytes */26")
self.assertEqual(response.code, 416)
def test_static_unsatisfiable_range_invalid_start(self):
response = self.get_and_head('/static/robots.txt', headers={
'Range': 'bytes=26'})
self.assertEqual(response.code, 416)
self.assertEqual(response.headers.get("Content-Range"),
"bytes */26")
def test_static_head(self):
response = self.fetch('/static/robots.txt', method='HEAD')
self.assertEqual(response.code, 200)
# No body was returned, but we did get the right content length.
self.assertEqual(response.body, b'')
self.assertEqual(response.headers['Content-Length'], '26')
self.assertEqual(utf8(response.headers['Etag']),
b'"' + self.robots_txt_hash + b'"')
def test_static_head_range(self):
response = self.fetch('/static/robots.txt', method='HEAD',
headers={'Range': 'bytes=1-4'})
self.assertEqual(response.code, 206)
self.assertEqual(response.body, b'')
self.assertEqual(response.headers['Content-Length'], '4')
self.assertEqual(utf8(response.headers['Etag']),
b'"' + self.robots_txt_hash + b'"')
def test_static_range_if_none_match(self):
response = self.get_and_head('/static/robots.txt', headers={
'Range': 'bytes=1-4',
'If-None-Match': b'"' + self.robots_txt_hash + b'"'})
self.assertEqual(response.code, 304)
self.assertEqual(response.body, b'')
self.assertTrue('Content-Length' not in response.headers)
self.assertEqual(utf8(response.headers['Etag']),
b'"' + self.robots_txt_hash + b'"')
def test_static_404(self):
response = self.get_and_head('/static/blarg')
self.assertEqual(response.code, 404)
def test_path_traversal_protection(self):
# curl_httpclient processes ".." on the client side, so we
# must test this with simple_httpclient.
self.http_client.close()
self.http_client = SimpleAsyncHTTPClient()
with ExpectLog(gen_log, ".*not in root static directory"):
response = self.get_and_head('/static/../static_foo.txt')
# Attempted path traversal should result in 403, not 200
# (which means the check failed and the file was served)
# or 404 (which means that the file didn't exist and
# is probably a packaging error).
self.assertEqual(response.code, 403)
@unittest.skipIf(os.name != 'posix', 'non-posix OS')
def test_root_static_path(self):
# Sometimes people set the StaticFileHandler's path to '/'
# to disable Tornado's path validation (in conjunction with
# their own validation in get_absolute_path). Make sure
# that the stricter validation in 4.2.1 doesn't break them.
path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'static/robots.txt')
response = self.get_and_head('/root_static' + urllib_parse.quote(path))
self.assertEqual(response.code, 200)
@wsgi_safe
class StaticDefaultFilenameTest(WebTestCase):
def get_app_kwargs(self):
return dict(static_path=relpath('static'),
static_handler_args=dict(default_filename='index.html'))
def get_handlers(self):
return []
def test_static_default_filename(self):
response = self.fetch('/static/dir/', follow_redirects=False)
self.assertEqual(response.code, 200)
self.assertEqual(b'this is the index\n', response.body)
def test_static_default_redirect(self):
response = self.fetch('/static/dir', follow_redirects=False)
self.assertEqual(response.code, 301)
self.assertTrue(response.headers['Location'].endswith('/static/dir/'))
@wsgi_safe
class StaticFileWithPathTest(WebTestCase):
def get_app_kwargs(self):
return dict(static_path=relpath('static'),
static_handler_args=dict(default_filename='index.html'))
def get_handlers(self):
return [("/foo/(.*)", StaticFileHandler, {
"path": relpath("templates/"),
})]
def test_serve(self):
response = self.fetch("/foo/utf8.html")
self.assertEqual(response.body, b"H\xc3\xa9llo\n")
@wsgi_safe
class CustomStaticFileTest(WebTestCase):
def get_handlers(self):
class MyStaticFileHandler(StaticFileHandler):
@classmethod
def make_static_url(cls, settings, path):
version_hash = cls.get_version(settings, path)
extension_index = path.rindex('.')
before_version = path[:extension_index]
after_version = path[(extension_index + 1):]
return '/static/%s.%s.%s' % (before_version, version_hash,
after_version)
def parse_url_path(self, url_path):
extension_index = url_path.rindex('.')
version_index = url_path.rindex('.', 0, extension_index)
return '%s%s' % (url_path[:version_index],
url_path[extension_index:])
@classmethod
def get_absolute_path(cls, settings, path):
return 'CustomStaticFileTest:' + path
def validate_absolute_path(self, root, absolute_path):
return absolute_path
@classmethod
def get_content(self, path, start=None, end=None):
assert start is None and end is None
if path == 'CustomStaticFileTest:foo.txt':
return b'bar'
raise Exception("unexpected path %r" % path)
def get_content_size(self):
if self.absolute_path == 'CustomStaticFileTest:foo.txt':
return 3
raise Exception("unexpected path %r" % self.absolute_path)
def get_modified_time(self):
return None
@classmethod
def get_version(cls, settings, path):
return "42"
class StaticUrlHandler(RequestHandler):
def get(self, path):
self.write(self.static_url(path))
self.static_handler_class = MyStaticFileHandler
return [("/static_url/(.*)", StaticUrlHandler)]
def get_app_kwargs(self):
return dict(static_path="dummy",
static_handler_class=self.static_handler_class)
def test_serve(self):
response = self.fetch("/static/foo.42.txt")
self.assertEqual(response.body, b"bar")
def test_static_url(self):
with ExpectLog(gen_log, "Could not open static file", required=False):
response = self.fetch("/static_url/foo.txt")
self.assertEqual(response.body, b"/static/foo.42.txt")
@wsgi_safe
class HostMatchingTest(WebTestCase):
class Handler(RequestHandler):
def initialize(self, reply):
self.reply = reply
def get(self):
self.write(self.reply)
def get_handlers(self):
return [("/foo", HostMatchingTest.Handler, {"reply": "wildcard"})]
def test_host_matching(self):
self.app.add_handlers("www.example.com",
[("/foo", HostMatchingTest.Handler, {"reply": "[0]"})])
self.app.add_handlers(r"www\.example\.com",
[("/bar", HostMatchingTest.Handler, {"reply": "[1]"})])
self.app.add_handlers("www.example.com",
[("/baz", HostMatchingTest.Handler, {"reply": "[2]"})])
self.app.add_handlers("www.e.*e.com",
[("/baz", HostMatchingTest.Handler, {"reply": "[3]"})])
response = self.fetch("/foo")
self.assertEqual(response.body, b"wildcard")
response = self.fetch("/bar")
self.assertEqual(response.code, 404)
response = self.fetch("/baz")
self.assertEqual(response.code, 404)
response = self.fetch("/foo", headers={'Host': 'www.example.com'})
self.assertEqual(response.body, b"[0]")
response = self.fetch("/bar", headers={'Host': 'www.example.com'})
self.assertEqual(response.body, b"[1]")
response = self.fetch("/baz", headers={'Host': 'www.example.com'})
self.assertEqual(response.body, b"[2]")
response = self.fetch("/baz", headers={'Host': 'www.exe.com'})
self.assertEqual(response.body, b"[3]")
@wsgi_safe
class DefaultHostMatchingTest(WebTestCase):
def get_handlers(self):
return []
def get_app_kwargs(self):
return {'default_host': "www.example.com"}
def test_default_host_matching(self):
self.app.add_handlers("www.example.com",
[("/foo", HostMatchingTest.Handler, {"reply": "[0]"})])
self.app.add_handlers(r"www\.example\.com",
[("/bar", HostMatchingTest.Handler, {"reply": "[1]"})])
self.app.add_handlers("www.test.com",
[("/baz", HostMatchingTest.Handler, {"reply": "[2]"})])
response = self.fetch("/foo")
self.assertEqual(response.body, b"[0]")
response = self.fetch("/bar")
self.assertEqual(response.body, b"[1]")
response = self.fetch("/baz")
self.assertEqual(response.code, 404)
response = self.fetch("/foo", headers={"X-Real-Ip": "127.0.0.1"})
self.assertEqual(response.code, 404)
self.app.default_host = "www.test.com"
response = self.fetch("/baz")
self.assertEqual(response.body, b"[2]")
@wsgi_safe
class NamedURLSpecGroupsTest(WebTestCase):
def get_handlers(self):
class EchoHandler(RequestHandler):
def get(self, path):
self.write(path)
return [("/str/(?P<path>.*)", EchoHandler),
(u"/unicode/(?P<path>.*)", EchoHandler)]
def test_named_urlspec_groups(self):
response = self.fetch("/str/foo")
self.assertEqual(response.body, b"foo")
response = self.fetch("/unicode/bar")
self.assertEqual(response.body, b"bar")
@wsgi_safe
class ClearHeaderTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
self.set_header("h1", "foo")
self.set_header("h2", "bar")
self.clear_header("h1")
self.clear_header("nonexistent")
def test_clear_header(self):
response = self.fetch("/")
self.assertTrue("h1" not in response.headers)
self.assertEqual(response.headers["h2"], "bar")
class Header204Test(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
self.set_status(204)
self.finish()
def test_204_headers(self):
response = self.fetch('/')
self.assertEqual(response.code, 204)
self.assertNotIn("Content-Length", response.headers)
self.assertNotIn("Transfer-Encoding", response.headers)
@wsgi_safe
class Header304Test(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
self.set_header("Content-Language", "en_US")
self.write("hello")
def test_304_headers(self):
response1 = self.fetch('/')
self.assertEqual(response1.headers["Content-Length"], "5")
self.assertEqual(response1.headers["Content-Language"], "en_US")
response2 = self.fetch('/', headers={
'If-None-Match': response1.headers["Etag"]})
self.assertEqual(response2.code, 304)
self.assertTrue("Content-Length" not in response2.headers)
self.assertTrue("Content-Language" not in response2.headers)
# Not an entity header, but should not be added to 304s by chunking
self.assertTrue("Transfer-Encoding" not in response2.headers)
@wsgi_safe
class StatusReasonTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
reason = self.request.arguments.get('reason', [])
self.set_status(int(self.get_argument('code')),
reason=reason[0] if reason else None)
def get_http_client(self):
# simple_httpclient only: curl doesn't expose the reason string
return SimpleAsyncHTTPClient()
def test_status(self):
response = self.fetch("/?code=304")
self.assertEqual(response.code, 304)
self.assertEqual(response.reason, "Not Modified")
response = self.fetch("/?code=304&reason=Foo")
self.assertEqual(response.code, 304)
self.assertEqual(response.reason, "Foo")
response = self.fetch("/?code=682&reason=Bar")
self.assertEqual(response.code, 682)
self.assertEqual(response.reason, "Bar")
response = self.fetch("/?code=682")
self.assertEqual(response.code, 682)
self.assertEqual(response.reason, "Unknown")
@wsgi_safe
class DateHeaderTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
self.write("hello")
def test_date_header(self):
response = self.fetch('/')
header_date = datetime.datetime(
*email.utils.parsedate(response.headers['Date'])[:6])
self.assertTrue(header_date - datetime.datetime.utcnow() <
datetime.timedelta(seconds=2))
@wsgi_safe
class RaiseWithReasonTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
raise HTTPError(682, reason="Foo")
def get_http_client(self):
# simple_httpclient only: curl doesn't expose the reason string
return SimpleAsyncHTTPClient()
def test_raise_with_reason(self):
response = self.fetch("/")
self.assertEqual(response.code, 682)
self.assertEqual(response.reason, "Foo")
self.assertIn(b'682: Foo', response.body)
def test_httperror_str(self):
self.assertEqual(str(HTTPError(682, reason="Foo")), "HTTP 682: Foo")
def test_httperror_str_from_httputil(self):
self.assertEqual(str(HTTPError(682)), "HTTP 682: Unknown")
@wsgi_safe
class ErrorHandlerXSRFTest(WebTestCase):
def get_handlers(self):
# note that if the handlers list is empty we get the default_host
# redirect fallback instead of a 404, so test with both an
# explicitly defined error handler and an implicit 404.
return [('/error', ErrorHandler, dict(status_code=417))]
def get_app_kwargs(self):
return dict(xsrf_cookies=True)
def test_error_xsrf(self):
response = self.fetch('/error', method='POST', body='')
self.assertEqual(response.code, 417)
def test_404_xsrf(self):
response = self.fetch('/404', method='POST', body='')
self.assertEqual(response.code, 404)
@wsgi_safe
class GzipTestCase(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
for v in self.get_arguments('vary'):
self.add_header('Vary', v)
# Must write at least MIN_LENGTH bytes to activate compression.
self.write('hello world' + ('!' * GZipContentEncoding.MIN_LENGTH))
def get_app_kwargs(self):
return dict(
gzip=True,
static_path=os.path.join(os.path.dirname(__file__), 'static'))
def assert_compressed(self, response):
# simple_httpclient renames the content-encoding header;
# curl_httpclient doesn't.
self.assertEqual(
response.headers.get(
'Content-Encoding',
response.headers.get('X-Consumed-Content-Encoding')),
'gzip')
def test_gzip(self):
response = self.fetch('/')
self.assert_compressed(response)
self.assertEqual(response.headers['Vary'], 'Accept-Encoding')
def test_gzip_static(self):
# The streaming responses in StaticFileHandler have subtle
# interactions with the gzip output so test this case separately.
response = self.fetch('/robots.txt')
self.assert_compressed(response)
self.assertEqual(response.headers['Vary'], 'Accept-Encoding')
def test_gzip_not_requested(self):
response = self.fetch('/', use_gzip=False)
self.assertNotIn('Content-Encoding', response.headers)
self.assertEqual(response.headers['Vary'], 'Accept-Encoding')
def test_vary_already_present(self):
response = self.fetch('/?vary=Accept-Language')
self.assert_compressed(response)
self.assertEqual([s.strip() for s in response.headers['Vary'].split(',')],
['Accept-Language', 'Accept-Encoding'])
def test_vary_already_present_multiple(self):
# Regression test for https://github.com/tornadoweb/tornado/issues/1670
response = self.fetch('/?vary=Accept-Language&vary=Cookie')
self.assert_compressed(response)
self.assertEqual([s.strip() for s in response.headers['Vary'].split(',')],
['Accept-Language', 'Cookie', 'Accept-Encoding'])
@wsgi_safe
class PathArgsInPrepareTest(WebTestCase):
class Handler(RequestHandler):
def prepare(self):
self.write(dict(args=self.path_args, kwargs=self.path_kwargs))
def get(self, path):
assert path == 'foo'
self.finish()
def get_handlers(self):
return [('/pos/(.*)', self.Handler),
('/kw/(?P<path>.*)', self.Handler)]
def test_pos(self):
response = self.fetch('/pos/foo')
response.rethrow()
data = json_decode(response.body)
self.assertEqual(data, {'args': ['foo'], 'kwargs': {}})
def test_kw(self):
response = self.fetch('/kw/foo')
response.rethrow()
data = json_decode(response.body)
self.assertEqual(data, {'args': [], 'kwargs': {'path': 'foo'}})
@wsgi_safe
class ClearAllCookiesTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
self.clear_all_cookies()
self.write('ok')
def test_clear_all_cookies(self):
response = self.fetch('/', headers={'Cookie': 'foo=bar; baz=xyzzy'})
set_cookies = sorted(response.headers.get_list('Set-Cookie'))
# Python 3.5 sends 'baz="";'; older versions use 'baz=;'
self.assertTrue(set_cookies[0].startswith('baz=;') or
set_cookies[0].startswith('baz="";'))
self.assertTrue(set_cookies[1].startswith('foo=;') or
set_cookies[1].startswith('foo="";'))
class PermissionError(Exception):
pass
@wsgi_safe
class ExceptionHandlerTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
exc = self.get_argument('exc')
if exc == 'http':
raise HTTPError(410, "no longer here")
elif exc == 'zero':
1 / 0
elif exc == 'permission':
raise PermissionError('not allowed')
def write_error(self, status_code, **kwargs):
if 'exc_info' in kwargs:
typ, value, tb = kwargs['exc_info']
if isinstance(value, PermissionError):
self.set_status(403)
self.write('PermissionError')
return
RequestHandler.write_error(self, status_code, **kwargs)
def log_exception(self, typ, value, tb):
if isinstance(value, PermissionError):
app_log.warning('custom logging for PermissionError: %s',
value.args[0])
else:
RequestHandler.log_exception(self, typ, value, tb)
def test_http_error(self):
# HTTPErrors are logged as warnings with no stack trace.
# TODO: extend ExpectLog to test this more precisely
with ExpectLog(gen_log, '.*no longer here'):
response = self.fetch('/?exc=http')
self.assertEqual(response.code, 410)
def test_unknown_error(self):
# Unknown errors are logged as errors with a stack trace.
with ExpectLog(app_log, 'Uncaught exception'):
response = self.fetch('/?exc=zero')
self.assertEqual(response.code, 500)
def test_known_error(self):
# log_exception can override logging behavior, and write_error
# can override the response.
with ExpectLog(app_log,
'custom logging for PermissionError: not allowed'):
response = self.fetch('/?exc=permission')
self.assertEqual(response.code, 403)
@wsgi_safe
class BuggyLoggingTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
1 / 0
def log_exception(self, typ, value, tb):
1 / 0
def test_buggy_log_exception(self):
# Something gets logged even though the application's
# logger is broken.
with ExpectLog(app_log, '.*'):
self.fetch('/')
@wsgi_safe
class UIMethodUIModuleTest(SimpleHandlerTestCase):
"""Test that UI methods and modules are created correctly and
associated with the handler.
"""
class Handler(RequestHandler):
def get(self):
self.render('foo.html')
def value(self):
return self.get_argument("value")
def get_app_kwargs(self):
def my_ui_method(handler, x):
return "In my_ui_method(%s) with handler value %s." % (
x, handler.value())
class MyModule(UIModule):
def render(self, x):
return "In MyModule(%s) with handler value %s." % (
x, self.handler.value())
loader = DictLoader({
'foo.html': '{{ my_ui_method(42) }} {% module MyModule(123) %}',
})
return dict(template_loader=loader,
ui_methods={'my_ui_method': my_ui_method},
ui_modules={'MyModule': MyModule})
def tearDown(self):
super(UIMethodUIModuleTest, self).tearDown()
# TODO: fix template loader caching so this isn't necessary.
RequestHandler._template_loaders.clear()
def test_ui_method(self):
response = self.fetch('/?value=asdf')
self.assertEqual(response.body,
b'In my_ui_method(42) with handler value asdf. '
b'In MyModule(123) with handler value asdf.')
@wsgi_safe
class GetArgumentErrorTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
try:
self.get_argument('foo')
self.write({})
except MissingArgumentError as e:
self.write({'arg_name': e.arg_name,
'log_message': e.log_message})
def test_catch_error(self):
response = self.fetch('/')
self.assertEqual(json_decode(response.body),
{'arg_name': 'foo',
'log_message': 'Missing argument foo'})
class MultipleExceptionTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
exc_count = 0
with ignore_deprecation():
@asynchronous
def get(self):
IOLoop.current().add_callback(lambda: 1 / 0)
IOLoop.current().add_callback(lambda: 1 / 0)
def log_exception(self, typ, value, tb):
MultipleExceptionTest.Handler.exc_count += 1
def test_multi_exception(self):
with ignore_deprecation():
# This test verifies that multiple exceptions raised into the same
# ExceptionStackContext do not generate extraneous log entries
# due to "Cannot send error response after headers written".
# log_exception is called, but it does not proceed to send_error.
response = self.fetch('/')
self.assertEqual(response.code, 500)
response = self.fetch('/')
self.assertEqual(response.code, 500)
# Each of our two requests generated two exceptions, we should have
# seen at least three of them by now (the fourth may still be
# in the queue).
self.assertGreater(MultipleExceptionTest.Handler.exc_count, 2)
@wsgi_safe
class SetLazyPropertiesTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def prepare(self):
self.current_user = 'Ben'
self.locale = locale.get('en_US')
def get_user_locale(self):
raise NotImplementedError()
def get_current_user(self):
raise NotImplementedError()
def get(self):
self.write('Hello %s (%s)' % (self.current_user, self.locale.code))
def test_set_properties(self):
# Ensure that current_user can be assigned to normally for apps
# that want to forgo the lazy get_current_user property
response = self.fetch('/')
self.assertEqual(response.body, b'Hello Ben (en_US)')
@wsgi_safe
class GetCurrentUserTest(WebTestCase):
def get_app_kwargs(self):
class WithoutUserModule(UIModule):
def render(self):
return ''
class WithUserModule(UIModule):
def render(self):
return str(self.current_user)
loader = DictLoader({
'without_user.html': '',
'with_user.html': '{{ current_user }}',
'without_user_module.html': '{% module WithoutUserModule() %}',
'with_user_module.html': '{% module WithUserModule() %}',
})
return dict(template_loader=loader,
ui_modules={'WithUserModule': WithUserModule,
'WithoutUserModule': WithoutUserModule})
def tearDown(self):
super(GetCurrentUserTest, self).tearDown()
RequestHandler._template_loaders.clear()
def get_handlers(self):
class CurrentUserHandler(RequestHandler):
def prepare(self):
self.has_loaded_current_user = False
def get_current_user(self):
self.has_loaded_current_user = True
return ''
class WithoutUserHandler(CurrentUserHandler):
def get(self):
self.render_string('without_user.html')
self.finish(str(self.has_loaded_current_user))
class WithUserHandler(CurrentUserHandler):
def get(self):
self.render_string('with_user.html')
self.finish(str(self.has_loaded_current_user))
class CurrentUserModuleHandler(CurrentUserHandler):
def get_template_namespace(self):
# If RequestHandler.get_template_namespace is called, then
# get_current_user is evaluated. Until #820 is fixed, this
# is a small hack to circumvent the issue.
return self.ui
class WithoutUserModuleHandler(CurrentUserModuleHandler):
def get(self):
self.render_string('without_user_module.html')
self.finish(str(self.has_loaded_current_user))
class WithUserModuleHandler(CurrentUserModuleHandler):
def get(self):
self.render_string('with_user_module.html')
self.finish(str(self.has_loaded_current_user))
return [('/without_user', WithoutUserHandler),
('/with_user', WithUserHandler),
('/without_user_module', WithoutUserModuleHandler),
('/with_user_module', WithUserModuleHandler)]
@unittest.skip('needs fix')
def test_get_current_user_is_lazy(self):
# TODO: Make this test pass. See #820.
response = self.fetch('/without_user')
self.assertEqual(response.body, b'False')
def test_get_current_user_works(self):
response = self.fetch('/with_user')
self.assertEqual(response.body, b'True')
def test_get_current_user_from_ui_module_is_lazy(self):
response = self.fetch('/without_user_module')
self.assertEqual(response.body, b'False')
def test_get_current_user_from_ui_module_works(self):
response = self.fetch('/with_user_module')
self.assertEqual(response.body, b'True')
@wsgi_safe
class UnimplementedHTTPMethodsTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
pass
def test_unimplemented_standard_methods(self):
for method in ['HEAD', 'GET', 'DELETE', 'OPTIONS']:
response = self.fetch('/', method=method)
self.assertEqual(response.code, 405)
for method in ['POST', 'PUT']:
response = self.fetch('/', method=method, body=b'')
self.assertEqual(response.code, 405)
class UnimplementedNonStandardMethodsTest(SimpleHandlerTestCase):
# wsgiref.validate complains about unknown methods in a way that makes
# this test not wsgi_safe.
class Handler(RequestHandler):
def other(self):
# Even though this method exists, it won't get called automatically
# because it is not in SUPPORTED_METHODS.
self.write('other')
def test_unimplemented_patch(self):
# PATCH is recently standardized; Tornado supports it by default
# but wsgiref.validate doesn't like it.
response = self.fetch('/', method='PATCH', body=b'')
self.assertEqual(response.code, 405)
def test_unimplemented_other(self):
response = self.fetch('/', method='OTHER',
allow_nonstandard_methods=True)
self.assertEqual(response.code, 405)
@wsgi_safe
class AllHTTPMethodsTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def method(self):
self.write(self.request.method)
get = delete = options = post = put = method
def test_standard_methods(self):
response = self.fetch('/', method='HEAD')
self.assertEqual(response.body, b'')
for method in ['GET', 'DELETE', 'OPTIONS']:
response = self.fetch('/', method=method)
self.assertEqual(response.body, utf8(method))
for method in ['POST', 'PUT']:
response = self.fetch('/', method=method, body=b'')
self.assertEqual(response.body, utf8(method))
class PatchMethodTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
SUPPORTED_METHODS = RequestHandler.SUPPORTED_METHODS + ('OTHER',)
def patch(self):
self.write('patch')
def other(self):
self.write('other')
def test_patch(self):
response = self.fetch('/', method='PATCH', body=b'')
self.assertEqual(response.body, b'patch')
def test_other(self):
response = self.fetch('/', method='OTHER',
allow_nonstandard_methods=True)
self.assertEqual(response.body, b'other')
@wsgi_safe
class FinishInPrepareTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def prepare(self):
self.finish('done')
def get(self):
# It's difficult to assert for certain that a method did not
# or will not be called in an asynchronous context, but this
# will be logged noisily if it is reached.
raise Exception('should not reach this method')
def test_finish_in_prepare(self):
response = self.fetch('/')
self.assertEqual(response.body, b'done')
@wsgi_safe
class Default404Test(WebTestCase):
def get_handlers(self):
# If there are no handlers at all a default redirect handler gets added.
return [('/foo', RequestHandler)]
def test_404(self):
response = self.fetch('/')
self.assertEqual(response.code, 404)
self.assertEqual(response.body,
b'<html><title>404: Not Found</title>'
b'<body>404: Not Found</body></html>')
@wsgi_safe
class Custom404Test(WebTestCase):
def get_handlers(self):
return [('/foo', RequestHandler)]
def get_app_kwargs(self):
class Custom404Handler(RequestHandler):
def get(self):
self.set_status(404)
self.write('custom 404 response')
return dict(default_handler_class=Custom404Handler)
def test_404(self):
response = self.fetch('/')
self.assertEqual(response.code, 404)
self.assertEqual(response.body, b'custom 404 response')
@wsgi_safe
class DefaultHandlerArgumentsTest(WebTestCase):
def get_handlers(self):
return [('/foo', RequestHandler)]
def get_app_kwargs(self):
return dict(default_handler_class=ErrorHandler,
default_handler_args=dict(status_code=403))
def test_403(self):
response = self.fetch('/')
self.assertEqual(response.code, 403)
@wsgi_safe
class HandlerByNameTest(WebTestCase):
def get_handlers(self):
# All three are equivalent.
return [('/hello1', HelloHandler),
('/hello2', 'tornado.test.web_test.HelloHandler'),
url('/hello3', 'tornado.test.web_test.HelloHandler'),
]
def test_handler_by_name(self):
resp = self.fetch('/hello1')
self.assertEqual(resp.body, b'hello')
resp = self.fetch('/hello2')
self.assertEqual(resp.body, b'hello')
resp = self.fetch('/hello3')
self.assertEqual(resp.body, b'hello')
class StreamingRequestBodyTest(WebTestCase):
def get_handlers(self):
@stream_request_body
class StreamingBodyHandler(RequestHandler):
def initialize(self, test):
self.test = test
def prepare(self):
self.test.prepared.set_result(None)
def data_received(self, data):
self.test.data.set_result(data)
def get(self):
self.test.finished.set_result(None)
self.write({})
@stream_request_body
class EarlyReturnHandler(RequestHandler):
def prepare(self):
# If we finish the response in prepare, it won't continue to
# the (non-existent) data_received.
raise HTTPError(401)
@stream_request_body
class CloseDetectionHandler(RequestHandler):
def initialize(self, test):
self.test = test
def on_connection_close(self):
super(CloseDetectionHandler, self).on_connection_close()
self.test.close_future.set_result(None)
return [('/stream_body', StreamingBodyHandler, dict(test=self)),
('/early_return', EarlyReturnHandler),
('/close_detection', CloseDetectionHandler, dict(test=self))]
def connect(self, url, connection_close):
# Use a raw connection so we can control the sending of data.
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
s.connect(("127.0.0.1", self.get_http_port()))
stream = IOStream(s)
stream.write(b"GET " + url + b" HTTP/1.1\r\n")
if connection_close:
stream.write(b"Connection: close\r\n")
stream.write(b"Transfer-Encoding: chunked\r\n\r\n")
return stream
@gen_test
def test_streaming_body(self):
self.prepared = Future()
self.data = Future()
self.finished = Future()
stream = self.connect(b"/stream_body", connection_close=True)
yield self.prepared
stream.write(b"4\r\nasdf\r\n")
# Ensure the first chunk is received before we send the second.
data = yield self.data
self.assertEqual(data, b"asdf")
self.data = Future()
stream.write(b"4\r\nqwer\r\n")
data = yield self.data
self.assertEquals(data, b"qwer")
stream.write(b"0\r\n\r\n")
yield self.finished
data = yield stream.read_until_close()
# This would ideally use an HTTP1Connection to read the response.
self.assertTrue(data.endswith(b"{}"))
stream.close()
@gen_test
def test_early_return(self):
stream = self.connect(b"/early_return", connection_close=False)
data = yield stream.read_until_close()
self.assertTrue(data.startswith(b"HTTP/1.1 401"))
@gen_test
def test_early_return_with_data(self):
stream = self.connect(b"/early_return", connection_close=False)
stream.write(b"4\r\nasdf\r\n")
data = yield stream.read_until_close()
self.assertTrue(data.startswith(b"HTTP/1.1 401"))
@gen_test
def test_close_during_upload(self):
self.close_future = Future()
stream = self.connect(b"/close_detection", connection_close=False)
stream.close()
yield self.close_future
# Each method in this handler returns a yieldable object and yields to the
# IOLoop so the future is not immediately ready. Ensure that the
# yieldables are respected and no method is called before the previous
# one has completed.
@stream_request_body
class BaseFlowControlHandler(RequestHandler):
def initialize(self, test):
self.test = test
self.method = None
self.methods = []
@contextlib.contextmanager
def in_method(self, method):
if self.method is not None:
self.test.fail("entered method %s while in %s" %
(method, self.method))
self.method = method
self.methods.append(method)
try:
yield
finally:
self.method = None
@gen.coroutine
def prepare(self):
# Note that asynchronous prepare() does not block data_received,
# so we don't use in_method here.
self.methods.append('prepare')
yield gen.moment
@gen.coroutine
def post(self):
with self.in_method('post'):
yield gen.moment
self.write(dict(methods=self.methods))
class BaseStreamingRequestFlowControlTest(object):
def get_httpserver_options(self):
# Use a small chunk size so flow control is relevant even though
# all the data arrives at once.
return dict(chunk_size=10, decompress_request=True)
def get_http_client(self):
# simple_httpclient only: curl doesn't support body_producer.
return SimpleAsyncHTTPClient()
# Test all the slightly different code paths for fixed, chunked, etc bodies.
def test_flow_control_fixed_body(self):
response = self.fetch('/', body='abcdefghijklmnopqrstuvwxyz',
method='POST')
response.rethrow()
self.assertEqual(json_decode(response.body),
dict(methods=['prepare', 'data_received',
'data_received', 'data_received',
'post']))
def test_flow_control_chunked_body(self):
chunks = [b'abcd', b'efgh', b'ijkl']
@gen.coroutine
def body_producer(write):
for i in chunks:
yield write(i)
response = self.fetch('/', body_producer=body_producer, method='POST')
response.rethrow()
self.assertEqual(json_decode(response.body),
dict(methods=['prepare', 'data_received',
'data_received', 'data_received',
'post']))
def test_flow_control_compressed_body(self):
bytesio = BytesIO()
gzip_file = gzip.GzipFile(mode='w', fileobj=bytesio)
gzip_file.write(b'abcdefghijklmnopqrstuvwxyz')
gzip_file.close()
compressed_body = bytesio.getvalue()
response = self.fetch('/', body=compressed_body, method='POST',
headers={'Content-Encoding': 'gzip'})
response.rethrow()
self.assertEqual(json_decode(response.body),
dict(methods=['prepare', 'data_received',
'data_received', 'data_received',
'post']))
class DecoratedStreamingRequestFlowControlTest(
BaseStreamingRequestFlowControlTest,
WebTestCase):
def get_handlers(self):
class DecoratedFlowControlHandler(BaseFlowControlHandler):
@gen.coroutine
def data_received(self, data):
with self.in_method('data_received'):
yield gen.moment
return [('/', DecoratedFlowControlHandler, dict(test=self))]
@skipBefore35
class NativeStreamingRequestFlowControlTest(
BaseStreamingRequestFlowControlTest,
WebTestCase):
def get_handlers(self):
class NativeFlowControlHandler(BaseFlowControlHandler):
data_received = exec_test(globals(), locals(), """
async def data_received(self, data):
with self.in_method('data_received'):
import asyncio
await asyncio.sleep(0)
""")["data_received"]
return [('/', NativeFlowControlHandler, dict(test=self))]
@wsgi_safe
class IncorrectContentLengthTest(SimpleHandlerTestCase):
def get_handlers(self):
test = self
self.server_error = None
# Manually set a content-length that doesn't match the actual content.
class TooHigh(RequestHandler):
def get(self):
self.set_header("Content-Length", "42")
try:
self.finish("ok")
except Exception as e:
test.server_error = e
raise
class TooLow(RequestHandler):
def get(self):
self.set_header("Content-Length", "2")
try:
self.finish("hello")
except Exception as e:
test.server_error = e
raise
return [('/high', TooHigh),
('/low', TooLow)]
def test_content_length_too_high(self):
# When the content-length is too high, the connection is simply
# closed without completing the response. An error is logged on
# the server.
with ExpectLog(app_log, "(Uncaught exception|Exception in callback)"):
with ExpectLog(gen_log,
"(Cannot send error response after headers written"
"|Failed to flush partial response)"):
with self.assertRaises(HTTPClientError):
self.fetch("/high", raise_error=True)
self.assertEqual(str(self.server_error),
"Tried to write 40 bytes less than Content-Length")
def test_content_length_too_low(self):
# When the content-length is too low, the connection is closed
# without writing the last chunk, so the client never sees the request
# complete (which would be a framing error).
with ExpectLog(app_log, "(Uncaught exception|Exception in callback)"):
with ExpectLog(gen_log,
"(Cannot send error response after headers written"
"|Failed to flush partial response)"):
with self.assertRaises(HTTPClientError):
self.fetch("/low", raise_error=True)
self.assertEqual(str(self.server_error),
"Tried to write more data than Content-Length")
class ClientCloseTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
if self.request.version.startswith('HTTP/1'):
# Simulate a connection closed by the client during
# request processing. The client will see an error, but the
# server should respond gracefully (without logging errors
# because we were unable to write out as many bytes as
# Content-Length said we would)
self.request.connection.stream.close()
self.write('hello')
else:
# TODO: add a HTTP2-compatible version of this test.
self.write('requires HTTP/1.x')
def test_client_close(self):
with self.assertRaises((HTTPClientError, unittest.SkipTest)):
response = self.fetch('/', raise_error=True)
if response.body == b'requires HTTP/1.x':
self.skipTest('requires HTTP/1.x')
self.assertEqual(response.code, 599)
class SignedValueTest(unittest.TestCase):
SECRET = "It's a secret to everybody"
SECRET_DICT = {0: "asdfbasdf", 1: "12312312", 2: "2342342"}
def past(self):
return self.present() - 86400 * 32
def present(self):
return 1300000000
def test_known_values(self):
signed_v1 = create_signed_value(SignedValueTest.SECRET, "key", "value",
version=1, clock=self.present)
self.assertEqual(
signed_v1,
b"dmFsdWU=|1300000000|31c934969f53e48164c50768b40cbd7e2daaaa4f")
signed_v2 = create_signed_value(SignedValueTest.SECRET, "key", "value",
version=2, clock=self.present)
self.assertEqual(
signed_v2,
b"2|1:0|10:1300000000|3:key|8:dmFsdWU=|"
b"3d4e60b996ff9c5d5788e333a0cba6f238a22c6c0f94788870e1a9ecd482e152")
signed_default = create_signed_value(SignedValueTest.SECRET,
"key", "value", clock=self.present)
self.assertEqual(signed_default, signed_v2)
decoded_v1 = decode_signed_value(SignedValueTest.SECRET, "key",
signed_v1, min_version=1,
clock=self.present)
self.assertEqual(decoded_v1, b"value")
decoded_v2 = decode_signed_value(SignedValueTest.SECRET, "key",
signed_v2, min_version=2,
clock=self.present)
self.assertEqual(decoded_v2, b"value")
def test_name_swap(self):
signed1 = create_signed_value(SignedValueTest.SECRET, "key1", "value",
clock=self.present)
signed2 = create_signed_value(SignedValueTest.SECRET, "key2", "value",
clock=self.present)
# Try decoding each string with the other's "name"
decoded1 = decode_signed_value(SignedValueTest.SECRET, "key2", signed1,
clock=self.present)
self.assertIs(decoded1, None)
decoded2 = decode_signed_value(SignedValueTest.SECRET, "key1", signed2,
clock=self.present)
self.assertIs(decoded2, None)
def test_expired(self):
signed = create_signed_value(SignedValueTest.SECRET, "key1", "value",
clock=self.past)
decoded_past = decode_signed_value(SignedValueTest.SECRET, "key1",
signed, clock=self.past)
self.assertEqual(decoded_past, b"value")
decoded_present = decode_signed_value(SignedValueTest.SECRET, "key1",
signed, clock=self.present)
self.assertIs(decoded_present, None)
def test_payload_tampering(self):
# These cookies are variants of the one in test_known_values.
sig = "3d4e60b996ff9c5d5788e333a0cba6f238a22c6c0f94788870e1a9ecd482e152"
def validate(prefix):
return (b'value' ==
decode_signed_value(SignedValueTest.SECRET, "key",
prefix + sig, clock=self.present))
self.assertTrue(validate("2|1:0|10:1300000000|3:key|8:dmFsdWU=|"))
# Change key version
self.assertFalse(validate("2|1:1|10:1300000000|3:key|8:dmFsdWU=|"))
# length mismatch (field too short)
self.assertFalse(validate("2|1:0|10:130000000|3:key|8:dmFsdWU=|"))
# length mismatch (field too long)
self.assertFalse(validate("2|1:0|10:1300000000|3:keey|8:dmFsdWU=|"))
def test_signature_tampering(self):
prefix = "2|1:0|10:1300000000|3:key|8:dmFsdWU=|"
def validate(sig):
return (b'value' ==
decode_signed_value(SignedValueTest.SECRET, "key",
prefix + sig, clock=self.present))
self.assertTrue(validate(
"3d4e60b996ff9c5d5788e333a0cba6f238a22c6c0f94788870e1a9ecd482e152"))
# All zeros
self.assertFalse(validate("0" * 32))
# Change one character
self.assertFalse(validate(
"4d4e60b996ff9c5d5788e333a0cba6f238a22c6c0f94788870e1a9ecd482e152"))
# Change another character
self.assertFalse(validate(
"3d4e60b996ff9c5d5788e333a0cba6f238a22c6c0f94788870e1a9ecd482e153"))
# Truncate
self.assertFalse(validate(
"3d4e60b996ff9c5d5788e333a0cba6f238a22c6c0f94788870e1a9ecd482e15"))
# Lengthen
self.assertFalse(validate(
"3d4e60b996ff9c5d5788e333a0cba6f238a22c6c0f94788870e1a9ecd482e1538"))
def test_non_ascii(self):
value = b"\xe9"
signed = create_signed_value(SignedValueTest.SECRET, "key", value,
clock=self.present)
decoded = decode_signed_value(SignedValueTest.SECRET, "key", signed,
clock=self.present)
self.assertEqual(value, decoded)
def test_key_versioning_read_write_default_key(self):
value = b"\xe9"
signed = create_signed_value(SignedValueTest.SECRET_DICT,
"key", value, clock=self.present,
key_version=0)
decoded = decode_signed_value(SignedValueTest.SECRET_DICT,
"key", signed, clock=self.present)
self.assertEqual(value, decoded)
def test_key_versioning_read_write_non_default_key(self):
value = b"\xe9"
signed = create_signed_value(SignedValueTest.SECRET_DICT,
"key", value, clock=self.present,
key_version=1)
decoded = decode_signed_value(SignedValueTest.SECRET_DICT,
"key", signed, clock=self.present)
self.assertEqual(value, decoded)
def test_key_versioning_invalid_key(self):
value = b"\xe9"
signed = create_signed_value(SignedValueTest.SECRET_DICT,
"key", value, clock=self.present,
key_version=0)
newkeys = SignedValueTest.SECRET_DICT.copy()
newkeys.pop(0)
decoded = decode_signed_value(newkeys,
"key", signed, clock=self.present)
self.assertEqual(None, decoded)
def test_key_version_retrieval(self):
value = b"\xe9"
signed = create_signed_value(SignedValueTest.SECRET_DICT,
"key", value, clock=self.present,
key_version=1)
key_version = get_signature_key_version(signed)
self.assertEqual(1, key_version)
@wsgi_safe
class XSRFTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
version = int(self.get_argument("version", "2"))
# This would be a bad idea in a real app, but in this test
# it's fine.
self.settings["xsrf_cookie_version"] = version
self.write(self.xsrf_token)
def post(self):
self.write("ok")
def get_app_kwargs(self):
return dict(xsrf_cookies=True)
def setUp(self):
super(XSRFTest, self).setUp()
self.xsrf_token = self.get_token()
def get_token(self, old_token=None, version=None):
if old_token is not None:
headers = self.cookie_headers(old_token)
else:
headers = None
response = self.fetch(
"/" if version is None else ("/?version=%d" % version),
headers=headers)
response.rethrow()
return native_str(response.body)
def cookie_headers(self, token=None):
if token is None:
token = self.xsrf_token
return {"Cookie": "_xsrf=" + token}
def test_xsrf_fail_no_token(self):
with ExpectLog(gen_log, ".*'_xsrf' argument missing"):
response = self.fetch("/", method="POST", body=b"")
self.assertEqual(response.code, 403)
def test_xsrf_fail_body_no_cookie(self):
with ExpectLog(gen_log, ".*XSRF cookie does not match POST"):
response = self.fetch(
"/", method="POST",
body=urllib_parse.urlencode(dict(_xsrf=self.xsrf_token)))
self.assertEqual(response.code, 403)
def test_xsrf_fail_argument_invalid_format(self):
with ExpectLog(gen_log, ".*'_xsrf' argument has invalid format"):
response = self.fetch(
"/", method="POST",
headers=self.cookie_headers(),
body=urllib_parse.urlencode(dict(_xsrf='3|')))
self.assertEqual(response.code, 403)
def test_xsrf_fail_cookie_invalid_format(self):
with ExpectLog(gen_log, ".*XSRF cookie does not match POST"):
response = self.fetch(
"/", method="POST",
headers=self.cookie_headers(token='3|'),
body=urllib_parse.urlencode(dict(_xsrf=self.xsrf_token)))
self.assertEqual(response.code, 403)
def test_xsrf_fail_cookie_no_body(self):
with ExpectLog(gen_log, ".*'_xsrf' argument missing"):
response = self.fetch(
"/", method="POST", body=b"",
headers=self.cookie_headers())
self.assertEqual(response.code, 403)
def test_xsrf_fail_malformed_header(self):
with ExpectLog(gen_log, ".*XSRF cookie does not match POST"):
response = self.fetch(
"/", method="POST", body=b"",
headers=dict({"X-Xsrftoken": b"null"}, **self.cookie_headers()))
self.assertEqual(response.code, 403)
def test_xsrf_success_short_token(self):
response = self.fetch(
"/", method="POST",
body=urllib_parse.urlencode(dict(_xsrf='deadbeef')),
headers=self.cookie_headers(token='deadbeef'))
self.assertEqual(response.code, 200)
def test_xsrf_success_non_hex_token(self):
response = self.fetch(
"/", method="POST",
body=urllib_parse.urlencode(dict(_xsrf='xoxo')),
headers=self.cookie_headers(token='xoxo'))
self.assertEqual(response.code, 200)
def test_xsrf_success_post_body(self):
response = self.fetch(
"/", method="POST",
body=urllib_parse.urlencode(dict(_xsrf=self.xsrf_token)),
headers=self.cookie_headers())
self.assertEqual(response.code, 200)
def test_xsrf_success_query_string(self):
response = self.fetch(
"/?" + urllib_parse.urlencode(dict(_xsrf=self.xsrf_token)),
method="POST", body=b"",
headers=self.cookie_headers())
self.assertEqual(response.code, 200)
def test_xsrf_success_header(self):
response = self.fetch("/", method="POST", body=b"",
headers=dict({"X-Xsrftoken": self.xsrf_token}, # type: ignore
**self.cookie_headers()))
self.assertEqual(response.code, 200)
def test_distinct_tokens(self):
# Every request gets a distinct token.
NUM_TOKENS = 10
tokens = set()
for i in range(NUM_TOKENS):
tokens.add(self.get_token())
self.assertEqual(len(tokens), NUM_TOKENS)
def test_cross_user(self):
token2 = self.get_token()
# Each token can be used to authenticate its own request.
for token in (self.xsrf_token, token2):
response = self.fetch(
"/", method="POST",
body=urllib_parse.urlencode(dict(_xsrf=token)),
headers=self.cookie_headers(token))
self.assertEqual(response.code, 200)
# Sending one in the cookie and the other in the body is not allowed.
for cookie_token, body_token in ((self.xsrf_token, token2),
(token2, self.xsrf_token)):
with ExpectLog(gen_log, '.*XSRF cookie does not match POST'):
response = self.fetch(
"/", method="POST",
body=urllib_parse.urlencode(dict(_xsrf=body_token)),
headers=self.cookie_headers(cookie_token))
self.assertEqual(response.code, 403)
def test_refresh_token(self):
token = self.xsrf_token
tokens_seen = set([token])
# A user's token is stable over time. Refreshing the page in one tab
# might update the cookie while an older tab still has the old cookie
# in its DOM. Simulate this scenario by passing a constant token
# in the body and re-querying for the token.
for i in range(5):
token = self.get_token(token)
# Tokens are encoded uniquely each time
tokens_seen.add(token)
response = self.fetch(
"/", method="POST",
body=urllib_parse.urlencode(dict(_xsrf=self.xsrf_token)),
headers=self.cookie_headers(token))
self.assertEqual(response.code, 200)
self.assertEqual(len(tokens_seen), 6)
def test_versioning(self):
# Version 1 still produces distinct tokens per request.
self.assertNotEqual(self.get_token(version=1),
self.get_token(version=1))
# Refreshed v1 tokens are all identical.
v1_token = self.get_token(version=1)
for i in range(5):
self.assertEqual(self.get_token(v1_token, version=1), v1_token)
# Upgrade to a v2 version of the same token
v2_token = self.get_token(v1_token)
self.assertNotEqual(v1_token, v2_token)
# Each v1 token can map to many v2 tokens.
self.assertNotEqual(v2_token, self.get_token(v1_token))
# The tokens are cross-compatible.
for cookie_token, body_token in ((v1_token, v2_token),
(v2_token, v1_token)):
response = self.fetch(
"/", method="POST",
body=urllib_parse.urlencode(dict(_xsrf=body_token)),
headers=self.cookie_headers(cookie_token))
self.assertEqual(response.code, 200)
@wsgi_safe
class XSRFCookieKwargsTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
self.write(self.xsrf_token)
def get_app_kwargs(self):
return dict(xsrf_cookies=True,
xsrf_cookie_kwargs=dict(httponly=True))
def test_xsrf_httponly(self):
response = self.fetch("/")
self.assertIn('httponly;', response.headers['Set-Cookie'].lower())
@wsgi_safe
class FinishExceptionTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
self.set_status(401)
self.set_header('WWW-Authenticate', 'Basic realm="something"')
if self.get_argument('finish_value', ''):
raise Finish('authentication required')
else:
self.write('authentication required')
raise Finish()
def test_finish_exception(self):
for u in ['/', '/?finish_value=1']:
response = self.fetch(u)
self.assertEqual(response.code, 401)
self.assertEqual('Basic realm="something"',
response.headers.get('WWW-Authenticate'))
self.assertEqual(b'authentication required', response.body)
@wsgi_safe
class DecoratorTest(WebTestCase):
def get_handlers(self):
class RemoveSlashHandler(RequestHandler):
@removeslash
def get(self):
pass
class AddSlashHandler(RequestHandler):
@addslash
def get(self):
pass
return [("/removeslash/", RemoveSlashHandler),
("/addslash", AddSlashHandler),
]
def test_removeslash(self):
response = self.fetch("/removeslash/", follow_redirects=False)
self.assertEqual(response.code, 301)
self.assertEqual(response.headers['Location'], "/removeslash")
response = self.fetch("/removeslash/?foo=bar", follow_redirects=False)
self.assertEqual(response.code, 301)
self.assertEqual(response.headers['Location'], "/removeslash?foo=bar")
def test_addslash(self):
response = self.fetch("/addslash", follow_redirects=False)
self.assertEqual(response.code, 301)
self.assertEqual(response.headers['Location'], "/addslash/")
response = self.fetch("/addslash?foo=bar", follow_redirects=False)
self.assertEqual(response.code, 301)
self.assertEqual(response.headers['Location'], "/addslash/?foo=bar")
@wsgi_safe
class CacheTest(WebTestCase):
def get_handlers(self):
class EtagHandler(RequestHandler):
def get(self, computed_etag):
self.write(computed_etag)
def compute_etag(self):
return self._write_buffer[0]
return [
('/etag/(.*)', EtagHandler)
]
def test_wildcard_etag(self):
computed_etag = '"xyzzy"'
etags = '*'
self._test_etag(computed_etag, etags, 304)
def test_strong_etag_match(self):
computed_etag = '"xyzzy"'
etags = '"xyzzy"'
self._test_etag(computed_etag, etags, 304)
def test_multiple_strong_etag_match(self):
computed_etag = '"xyzzy1"'
etags = '"xyzzy1", "xyzzy2"'
self._test_etag(computed_etag, etags, 304)
def test_strong_etag_not_match(self):
computed_etag = '"xyzzy"'
etags = '"xyzzy1"'
self._test_etag(computed_etag, etags, 200)
def test_multiple_strong_etag_not_match(self):
computed_etag = '"xyzzy"'
etags = '"xyzzy1", "xyzzy2"'
self._test_etag(computed_etag, etags, 200)
def test_weak_etag_match(self):
computed_etag = '"xyzzy1"'
etags = 'W/"xyzzy1"'
self._test_etag(computed_etag, etags, 304)
def test_multiple_weak_etag_match(self):
computed_etag = '"xyzzy2"'
etags = 'W/"xyzzy1", W/"xyzzy2"'
self._test_etag(computed_etag, etags, 304)
def test_weak_etag_not_match(self):
computed_etag = '"xyzzy2"'
etags = 'W/"xyzzy1"'
self._test_etag(computed_etag, etags, 200)
def test_multiple_weak_etag_not_match(self):
computed_etag = '"xyzzy3"'
etags = 'W/"xyzzy1", W/"xyzzy2"'
self._test_etag(computed_etag, etags, 200)
def _test_etag(self, computed_etag, etags, status_code):
response = self.fetch(
'/etag/' + computed_etag,
headers={'If-None-Match': etags}
)
self.assertEqual(response.code, status_code)
@wsgi_safe
class RequestSummaryTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
# remote_ip is optional, although it's set by
# both HTTPServer and WSGIAdapter.
# Clobber it to make sure it doesn't break logging.
self.request.remote_ip = None
self.finish(self._request_summary())
def test_missing_remote_ip(self):
resp = self.fetch("/")
self.assertEqual(resp.body, b"GET / (None)")
class HTTPErrorTest(unittest.TestCase):
def test_copy(self):
e = HTTPError(403, reason="Go away")
e2 = copy.copy(e)
self.assertIsNot(e, e2)
self.assertEqual(e.status_code, e2.status_code)
self.assertEqual(e.reason, e2.reason)
class ApplicationTest(AsyncTestCase):
def test_listen(self):
app = Application([])
server = app.listen(0, address='127.0.0.1')
server.stop()
class URLSpecReverseTest(unittest.TestCase):
def test_reverse(self):
self.assertEqual('/favicon.ico', url(r'/favicon\.ico', None).reverse())
self.assertEqual('/favicon.ico', url(r'^/favicon\.ico$', None).reverse())
def test_non_reversible(self):
# URLSpecs are non-reversible if they include non-constant
# regex features outside capturing groups. Currently, this is
# only strictly enforced for backslash-escaped character
# classes.
paths = [
r'^/api/v\d+/foo/(\w+)$',
]
for path in paths:
# A URLSpec can still be created even if it cannot be reversed.
url_spec = url(path, None)
try:
result = url_spec.reverse()
self.fail("did not get expected exception when reversing %s. "
"result: %s" % (path, result))
except ValueError:
pass
def test_reverse_arguments(self):
self.assertEqual('/api/v1/foo/bar',
url(r'^/api/v1/foo/(\w+)$', None).reverse('bar'))
class RedirectHandlerTest(WebTestCase):
def get_handlers(self):
return [
('/src', WebRedirectHandler, {'url': '/dst'}),
('/src2', WebRedirectHandler, {'url': '/dst2?foo=bar'}),
(r'/(.*?)/(.*?)/(.*)', WebRedirectHandler, {'url': '/{1}/{0}/{2}'})]
def test_basic_redirect(self):
response = self.fetch('/src', follow_redirects=False)
self.assertEqual(response.code, 301)
self.assertEqual(response.headers['Location'], '/dst')
def test_redirect_with_argument(self):
response = self.fetch('/src?foo=bar', follow_redirects=False)
self.assertEqual(response.code, 301)
self.assertEqual(response.headers['Location'], '/dst?foo=bar')
def test_redirect_with_appending_argument(self):
response = self.fetch('/src2?foo2=bar2', follow_redirects=False)
self.assertEqual(response.code, 301)
self.assertEqual(response.headers['Location'], '/dst2?foo=bar&foo2=bar2')
def test_redirect_pattern(self):
response = self.fetch('/a/b/c', follow_redirects=False)
self.assertEqual(response.code, 301)
self.assertEqual(response.headers['Location'], '/b/a/c')
| {
"repo_name": "hhru/tornado",
"path": "tornado/test/web_test.py",
"copies": "1",
"size": "117041",
"license": "apache-2.0",
"hash": -2416916136678318000,
"line_mean": 38.341512605,
"line_max": 125,
"alpha_frac": 0.5876914927,
"autogenerated": false,
"ratio": 4.05014187833068,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.513783337103068,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from tornado.escape import _unicode
from tornado import gen
from tornado.httpclient import HTTPResponse, HTTPError, AsyncHTTPClient, main, _RequestProxy
from tornado import httputil
from tornado.http1connection import HTTP1Connection, HTTP1ConnectionParameters
from tornado.ioloop import IOLoop
from tornado.iostream import StreamClosedError
from tornado.netutil import Resolver, OverrideResolver, _client_ssl_defaults
from tornado.log import gen_log
from tornado import stack_context
from tornado.tcpclient import TCPClient
from tornado.util import PY3
import base64
import collections
import copy
import functools
import re
import socket
import sys
import time
from io import BytesIO
if PY3:
import urllib.parse as urlparse
else:
import urlparse
try:
import ssl
except ImportError:
# ssl is not available on Google App Engine.
ssl = None
class HTTPTimeoutError(HTTPError):
"""Error raised by SimpleAsyncHTTPClient on timeout.
For historical reasons, this is a subclass of `.HTTPClientError`
which simulates a response code of 599.
.. versionadded:: 5.1
"""
def __init__(self, message):
super(HTTPTimeoutError, self).__init__(599, message=message)
def __str__(self):
return self.message
class HTTPStreamClosedError(HTTPError):
"""Error raised by SimpleAsyncHTTPClient when the underlying stream is closed.
When a more specific exception is available (such as `ConnectionResetError`),
it may be raised instead of this one.
For historical reasons, this is a subclass of `.HTTPClientError`
which simulates a response code of 599.
.. versionadded:: 5.1
"""
def __init__(self, message):
super(HTTPStreamClosedError, self).__init__(599, message=message)
def __str__(self):
return self.message
class SimpleAsyncHTTPClient(AsyncHTTPClient):
"""Non-blocking HTTP client with no external dependencies.
This class implements an HTTP 1.1 client on top of Tornado's IOStreams.
Some features found in the curl-based AsyncHTTPClient are not yet
supported. In particular, proxies are not supported, connections
are not reused, and callers cannot select the network interface to be
used.
"""
def initialize(self, max_clients=10,
hostname_mapping=None, max_buffer_size=104857600,
resolver=None, defaults=None, max_header_size=None,
max_body_size=None):
"""Creates a AsyncHTTPClient.
Only a single AsyncHTTPClient instance exists per IOLoop
in order to provide limitations on the number of pending connections.
``force_instance=True`` may be used to suppress this behavior.
Note that because of this implicit reuse, unless ``force_instance``
is used, only the first call to the constructor actually uses
its arguments. It is recommended to use the ``configure`` method
instead of the constructor to ensure that arguments take effect.
``max_clients`` is the number of concurrent requests that can be
in progress; when this limit is reached additional requests will be
queued. Note that time spent waiting in this queue still counts
against the ``request_timeout``.
``hostname_mapping`` is a dictionary mapping hostnames to IP addresses.
It can be used to make local DNS changes when modifying system-wide
settings like ``/etc/hosts`` is not possible or desirable (e.g. in
unittests).
``max_buffer_size`` (default 100MB) is the number of bytes
that can be read into memory at once. ``max_body_size``
(defaults to ``max_buffer_size``) is the largest response body
that the client will accept. Without a
``streaming_callback``, the smaller of these two limits
applies; with a ``streaming_callback`` only ``max_body_size``
does.
.. versionchanged:: 4.2
Added the ``max_body_size`` argument.
"""
super(SimpleAsyncHTTPClient, self).initialize(defaults=defaults)
self.max_clients = max_clients
self.queue = collections.deque()
self.active = {}
self.waiting = {}
self.max_buffer_size = max_buffer_size
self.max_header_size = max_header_size
self.max_body_size = max_body_size
# TCPClient could create a Resolver for us, but we have to do it
# ourselves to support hostname_mapping.
if resolver:
self.resolver = resolver
self.own_resolver = False
else:
self.resolver = Resolver()
self.own_resolver = True
if hostname_mapping is not None:
self.resolver = OverrideResolver(resolver=self.resolver,
mapping=hostname_mapping)
self.tcp_client = TCPClient(resolver=self.resolver)
def close(self):
super(SimpleAsyncHTTPClient, self).close()
if self.own_resolver:
self.resolver.close()
self.tcp_client.close()
def fetch_impl(self, request, callback):
key = object()
self.queue.append((key, request, callback))
if not len(self.active) < self.max_clients:
timeout_handle = self.io_loop.add_timeout(
self.io_loop.time() + min(request.connect_timeout,
request.request_timeout),
functools.partial(self._on_timeout, key, "in request queue"))
else:
timeout_handle = None
self.waiting[key] = (request, callback, timeout_handle)
self._process_queue()
if self.queue:
gen_log.debug("max_clients limit reached, request queued. "
"%d active, %d queued requests." % (
len(self.active), len(self.queue)))
def _process_queue(self):
with stack_context.NullContext():
while self.queue and len(self.active) < self.max_clients:
key, request, callback = self.queue.popleft()
if key not in self.waiting:
continue
self._remove_timeout(key)
self.active[key] = (request, callback)
release_callback = functools.partial(self._release_fetch, key)
self._handle_request(request, release_callback, callback)
def _connection_class(self):
return _HTTPConnection
def _handle_request(self, request, release_callback, final_callback):
self._connection_class()(
self, request, release_callback,
final_callback, self.max_buffer_size, self.tcp_client,
self.max_header_size, self.max_body_size)
def _release_fetch(self, key):
del self.active[key]
self._process_queue()
def _remove_timeout(self, key):
if key in self.waiting:
request, callback, timeout_handle = self.waiting[key]
if timeout_handle is not None:
self.io_loop.remove_timeout(timeout_handle)
del self.waiting[key]
def _on_timeout(self, key, info=None):
"""Timeout callback of request.
Construct a timeout HTTPResponse when a timeout occurs.
:arg object key: A simple object to mark the request.
:info string key: More detailed timeout information.
"""
request, callback, timeout_handle = self.waiting[key]
self.queue.remove((key, request, callback))
error_message = "Timeout {0}".format(info) if info else "Timeout"
timeout_response = HTTPResponse(
request, 599, error=HTTPTimeoutError(error_message),
request_time=self.io_loop.time() - request.start_time)
self.io_loop.add_callback(callback, timeout_response)
del self.waiting[key]
class _HTTPConnection(httputil.HTTPMessageDelegate):
_SUPPORTED_METHODS = set(["GET", "HEAD", "POST", "PUT", "DELETE", "PATCH", "OPTIONS"])
def __init__(self, client, request, release_callback,
final_callback, max_buffer_size, tcp_client,
max_header_size, max_body_size):
self.io_loop = IOLoop.current()
self.start_time = self.io_loop.time()
self.start_wall_time = time.time()
self.client = client
self.request = request
self.release_callback = release_callback
self.final_callback = final_callback
self.max_buffer_size = max_buffer_size
self.tcp_client = tcp_client
self.max_header_size = max_header_size
self.max_body_size = max_body_size
self.code = None
self.headers = None
self.chunks = []
self._decompressor = None
# Timeout handle returned by IOLoop.add_timeout
self._timeout = None
self._sockaddr = None
IOLoop.current().add_callback(self.run)
@gen.coroutine
def run(self):
try:
self.parsed = urlparse.urlsplit(_unicode(self.request.url))
if self.parsed.scheme not in ("http", "https"):
raise ValueError("Unsupported url scheme: %s" %
self.request.url)
# urlsplit results have hostname and port results, but they
# didn't support ipv6 literals until python 2.7.
netloc = self.parsed.netloc
if "@" in netloc:
userpass, _, netloc = netloc.rpartition("@")
host, port = httputil.split_host_and_port(netloc)
if port is None:
port = 443 if self.parsed.scheme == "https" else 80
if re.match(r'^\[.*\]$', host):
# raw ipv6 addresses in urls are enclosed in brackets
host = host[1:-1]
self.parsed_hostname = host # save final host for _on_connect
if self.request.allow_ipv6 is False:
af = socket.AF_INET
else:
af = socket.AF_UNSPEC
ssl_options = self._get_ssl_options(self.parsed.scheme)
timeout = min(self.request.connect_timeout, self.request.request_timeout)
if timeout:
self._timeout = self.io_loop.add_timeout(
self.start_time + timeout,
stack_context.wrap(functools.partial(self._on_timeout, "while connecting")))
stream = yield self.tcp_client.connect(
host, port, af=af,
ssl_options=ssl_options,
max_buffer_size=self.max_buffer_size)
if self.final_callback is None:
# final_callback is cleared if we've hit our timeout.
stream.close()
return
self.stream = stream
self.stream.set_close_callback(self.on_connection_close)
self._remove_timeout()
if self.final_callback is None:
return
if self.request.request_timeout:
self._timeout = self.io_loop.add_timeout(
self.start_time + self.request.request_timeout,
stack_context.wrap(functools.partial(self._on_timeout, "during request")))
if (self.request.method not in self._SUPPORTED_METHODS and
not self.request.allow_nonstandard_methods):
raise KeyError("unknown method %s" % self.request.method)
for key in ('network_interface',
'proxy_host', 'proxy_port',
'proxy_username', 'proxy_password',
'proxy_auth_mode'):
if getattr(self.request, key, None):
raise NotImplementedError('%s not supported' % key)
if "Connection" not in self.request.headers:
self.request.headers["Connection"] = "close"
if "Host" not in self.request.headers:
if '@' in self.parsed.netloc:
self.request.headers["Host"] = self.parsed.netloc.rpartition('@')[-1]
else:
self.request.headers["Host"] = self.parsed.netloc
username, password = None, None
if self.parsed.username is not None:
username, password = self.parsed.username, self.parsed.password
elif self.request.auth_username is not None:
username = self.request.auth_username
password = self.request.auth_password or ''
if username is not None:
if self.request.auth_mode not in (None, "basic"):
raise ValueError("unsupported auth_mode %s",
self.request.auth_mode)
self.request.headers["Authorization"] = (
b"Basic " + base64.b64encode(
httputil.encode_username_password(username, password)))
if self.request.user_agent:
self.request.headers["User-Agent"] = self.request.user_agent
if not self.request.allow_nonstandard_methods:
# Some HTTP methods nearly always have bodies while others
# almost never do. Fail in this case unless the user has
# opted out of sanity checks with allow_nonstandard_methods.
body_expected = self.request.method in ("POST", "PATCH", "PUT")
body_present = (self.request.body is not None or
self.request.body_producer is not None)
if ((body_expected and not body_present) or
(body_present and not body_expected)):
raise ValueError(
'Body must %sbe None for method %s (unless '
'allow_nonstandard_methods is true)' %
('not ' if body_expected else '', self.request.method))
if self.request.expect_100_continue:
self.request.headers["Expect"] = "100-continue"
if self.request.body is not None:
# When body_producer is used the caller is responsible for
# setting Content-Length (or else chunked encoding will be used).
self.request.headers["Content-Length"] = str(len(
self.request.body))
if (self.request.method == "POST" and
"Content-Type" not in self.request.headers):
self.request.headers["Content-Type"] = "application/x-www-form-urlencoded"
if self.request.decompress_response:
self.request.headers["Accept-Encoding"] = "gzip"
req_path = ((self.parsed.path or '/') +
(('?' + self.parsed.query) if self.parsed.query else ''))
self.connection = self._create_connection(stream)
start_line = httputil.RequestStartLine(self.request.method,
req_path, '')
self.connection.write_headers(start_line, self.request.headers)
if self.request.expect_100_continue:
yield self.connection.read_response(self)
else:
yield self._write_body(True)
except Exception:
if not self._handle_exception(*sys.exc_info()):
raise
def _get_ssl_options(self, scheme):
if scheme == "https":
if self.request.ssl_options is not None:
return self.request.ssl_options
# If we are using the defaults, don't construct a
# new SSLContext.
if (self.request.validate_cert and
self.request.ca_certs is None and
self.request.client_cert is None and
self.request.client_key is None):
return _client_ssl_defaults
ssl_ctx = ssl.create_default_context(
ssl.Purpose.SERVER_AUTH,
cafile=self.request.ca_certs)
if not self.request.validate_cert:
ssl_ctx.check_hostname = False
ssl_ctx.verify_mode = ssl.CERT_NONE
if self.request.client_cert is not None:
ssl_ctx.load_cert_chain(self.request.client_cert,
self.request.client_key)
if hasattr(ssl, 'OP_NO_COMPRESSION'):
# See netutil.ssl_options_to_context
ssl_ctx.options |= ssl.OP_NO_COMPRESSION
return ssl_ctx
return None
def _on_timeout(self, info=None):
"""Timeout callback of _HTTPConnection instance.
Raise a `HTTPTimeoutError` when a timeout occurs.
:info string key: More detailed timeout information.
"""
self._timeout = None
error_message = "Timeout {0}".format(info) if info else "Timeout"
if self.final_callback is not None:
self._handle_exception(HTTPTimeoutError, HTTPTimeoutError(error_message),
None)
def _remove_timeout(self):
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
def _create_connection(self, stream):
stream.set_nodelay(True)
connection = HTTP1Connection(
stream, True,
HTTP1ConnectionParameters(
no_keep_alive=True,
max_header_size=self.max_header_size,
max_body_size=self.max_body_size,
decompress=self.request.decompress_response),
self._sockaddr)
return connection
@gen.coroutine
def _write_body(self, start_read):
if self.request.body is not None:
self.connection.write(self.request.body)
elif self.request.body_producer is not None:
fut = self.request.body_producer(self.connection.write)
if fut is not None:
yield fut
self.connection.finish()
if start_read:
try:
yield self.connection.read_response(self)
except StreamClosedError:
if not self._handle_exception(*sys.exc_info()):
raise
def _release(self):
if self.release_callback is not None:
release_callback = self.release_callback
self.release_callback = None
release_callback()
def _run_callback(self, response):
self._release()
if self.final_callback is not None:
final_callback = self.final_callback
self.final_callback = None
self.io_loop.add_callback(final_callback, response)
def _handle_exception(self, typ, value, tb):
if self.final_callback:
self._remove_timeout()
if isinstance(value, StreamClosedError):
if value.real_error is None:
value = HTTPStreamClosedError("Stream closed")
else:
value = value.real_error
self._run_callback(HTTPResponse(self.request, 599, error=value,
request_time=self.io_loop.time() - self.start_time,
start_time=self.start_wall_time,
))
if hasattr(self, "stream"):
# TODO: this may cause a StreamClosedError to be raised
# by the connection's Future. Should we cancel the
# connection more gracefully?
self.stream.close()
return True
else:
# If our callback has already been called, we are probably
# catching an exception that is not caused by us but rather
# some child of our callback. Rather than drop it on the floor,
# pass it along, unless it's just the stream being closed.
return isinstance(value, StreamClosedError)
def on_connection_close(self):
if self.final_callback is not None:
message = "Connection closed"
if self.stream.error:
raise self.stream.error
try:
raise HTTPStreamClosedError(message)
except HTTPStreamClosedError:
self._handle_exception(*sys.exc_info())
def headers_received(self, first_line, headers):
if self.request.expect_100_continue and first_line.code == 100:
self._write_body(False)
return
self.code = first_line.code
self.reason = first_line.reason
self.headers = headers
if self._should_follow_redirect():
return
if self.request.header_callback is not None:
# Reassemble the start line.
self.request.header_callback('%s %s %s\r\n' % first_line)
for k, v in self.headers.get_all():
self.request.header_callback("%s: %s\r\n" % (k, v))
self.request.header_callback('\r\n')
def _should_follow_redirect(self):
return (self.request.follow_redirects and
self.request.max_redirects > 0 and
self.code in (301, 302, 303, 307, 308))
def finish(self):
data = b''.join(self.chunks)
self._remove_timeout()
original_request = getattr(self.request, "original_request",
self.request)
if self._should_follow_redirect():
assert isinstance(self.request, _RequestProxy)
new_request = copy.copy(self.request.request)
new_request.url = urlparse.urljoin(self.request.url,
self.headers["Location"])
new_request.max_redirects = self.request.max_redirects - 1
del new_request.headers["Host"]
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.4
# Client SHOULD make a GET request after a 303.
# According to the spec, 302 should be followed by the same
# method as the original request, but in practice browsers
# treat 302 the same as 303, and many servers use 302 for
# compatibility with pre-HTTP/1.1 user agents which don't
# understand the 303 status.
if self.code in (302, 303):
new_request.method = "GET"
new_request.body = None
for h in ["Content-Length", "Content-Type",
"Content-Encoding", "Transfer-Encoding"]:
try:
del self.request.headers[h]
except KeyError:
pass
new_request.original_request = original_request
final_callback = self.final_callback
self.final_callback = None
self._release()
fut = self.client.fetch(new_request, raise_error=False)
fut.add_done_callback(lambda f: final_callback(f.result()))
self._on_end_request()
return
if self.request.streaming_callback:
buffer = BytesIO()
else:
buffer = BytesIO(data) # TODO: don't require one big string?
response = HTTPResponse(original_request,
self.code, reason=getattr(self, 'reason', None),
headers=self.headers,
request_time=self.io_loop.time() - self.start_time,
start_time=self.start_wall_time,
buffer=buffer,
effective_url=self.request.url)
self._run_callback(response)
self._on_end_request()
def _on_end_request(self):
self.stream.close()
def data_received(self, chunk):
if self._should_follow_redirect():
# We're going to follow a redirect so just discard the body.
return
if self.request.streaming_callback is not None:
self.request.streaming_callback(chunk)
else:
self.chunks.append(chunk)
if __name__ == "__main__":
AsyncHTTPClient.configure(SimpleAsyncHTTPClient)
main()
| {
"repo_name": "hhru/tornado",
"path": "tornado/simple_httpclient.py",
"copies": "2",
"size": "24586",
"license": "apache-2.0",
"hash": 2095159873029857000,
"line_mean": 42.4381625442,
"line_max": 98,
"alpha_frac": 0.5727649882,
"autogenerated": false,
"ratio": 4.566493313521545,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6139258301721545,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from tornado import gen, ioloop
from tornado.log import app_log
from tornado.simple_httpclient import SimpleAsyncHTTPClient, HTTPTimeoutError
from tornado.test.util import unittest, skipBefore35, exec_test, ignore_deprecation
from tornado.testing import AsyncHTTPTestCase, AsyncTestCase, bind_unused_port, gen_test, ExpectLog
from tornado.web import Application
import contextlib
import os
import platform
import traceback
import warnings
try:
import asyncio
except ImportError:
asyncio = None
@contextlib.contextmanager
def set_environ(name, value):
old_value = os.environ.get(name)
os.environ[name] = value
try:
yield
finally:
if old_value is None:
del os.environ[name]
else:
os.environ[name] = old_value
class AsyncTestCaseTest(AsyncTestCase):
def test_exception_in_callback(self):
with ignore_deprecation():
self.io_loop.add_callback(lambda: 1 / 0)
try:
self.wait()
self.fail("did not get expected exception")
except ZeroDivisionError:
pass
def test_wait_timeout(self):
time = self.io_loop.time
# Accept default 5-second timeout, no error
self.io_loop.add_timeout(time() + 0.01, self.stop)
self.wait()
# Timeout passed to wait()
self.io_loop.add_timeout(time() + 1, self.stop)
with self.assertRaises(self.failureException):
self.wait(timeout=0.01)
# Timeout set with environment variable
self.io_loop.add_timeout(time() + 1, self.stop)
with set_environ('ASYNC_TEST_TIMEOUT', '0.01'):
with self.assertRaises(self.failureException):
self.wait()
def test_subsequent_wait_calls(self):
"""
This test makes sure that a second call to wait()
clears the first timeout.
"""
self.io_loop.add_timeout(self.io_loop.time() + 0.00, self.stop)
self.wait(timeout=0.02)
self.io_loop.add_timeout(self.io_loop.time() + 0.03, self.stop)
self.wait(timeout=0.15)
def test_multiple_errors(self):
with ignore_deprecation():
def fail(message):
raise Exception(message)
self.io_loop.add_callback(lambda: fail("error one"))
self.io_loop.add_callback(lambda: fail("error two"))
# The first error gets raised; the second gets logged.
with ExpectLog(app_log, "multiple unhandled exceptions"):
with self.assertRaises(Exception) as cm:
self.wait()
self.assertEqual(str(cm.exception), "error one")
class AsyncHTTPTestCaseTest(AsyncHTTPTestCase):
@classmethod
def setUpClass(cls):
super(AsyncHTTPTestCaseTest, cls).setUpClass()
# An unused port is bound so we can make requests upon it without
# impacting a real local web server.
cls.external_sock, cls.external_port = bind_unused_port()
def get_app(self):
return Application()
def test_fetch_segment(self):
path = '/path'
response = self.fetch(path)
self.assertEqual(response.request.url, self.get_url(path))
@gen_test
def test_fetch_full_http_url(self):
path = 'http://localhost:%d/path' % self.external_port
with contextlib.closing(SimpleAsyncHTTPClient(force_instance=True)) as client:
with self.assertRaises(HTTPTimeoutError) as cm:
yield client.fetch(path, request_timeout=0.1, raise_error=True)
self.assertEqual(cm.exception.response.request.url, path)
@gen_test
def test_fetch_full_https_url(self):
path = 'https://localhost:%d/path' % self.external_port
with contextlib.closing(SimpleAsyncHTTPClient(force_instance=True)) as client:
with self.assertRaises(HTTPTimeoutError) as cm:
yield client.fetch(path, request_timeout=0.1, raise_error=True)
self.assertEqual(cm.exception.response.request.url, path)
@classmethod
def tearDownClass(cls):
cls.external_sock.close()
super(AsyncHTTPTestCaseTest, cls).tearDownClass()
class AsyncTestCaseWrapperTest(unittest.TestCase):
def test_undecorated_generator(self):
class Test(AsyncTestCase):
def test_gen(self):
yield
test = Test('test_gen')
result = unittest.TestResult()
test.run(result)
self.assertEqual(len(result.errors), 1)
self.assertIn("should be decorated", result.errors[0][1])
@skipBefore35
@unittest.skipIf(platform.python_implementation() == 'PyPy',
'pypy destructor warnings cannot be silenced')
def test_undecorated_coroutine(self):
namespace = exec_test(globals(), locals(), """
class Test(AsyncTestCase):
async def test_coro(self):
pass
""")
test_class = namespace['Test']
test = test_class('test_coro')
result = unittest.TestResult()
# Silence "RuntimeWarning: coroutine 'test_coro' was never awaited".
with warnings.catch_warnings():
warnings.simplefilter('ignore')
test.run(result)
self.assertEqual(len(result.errors), 1)
self.assertIn("should be decorated", result.errors[0][1])
def test_undecorated_generator_with_skip(self):
class Test(AsyncTestCase):
@unittest.skip("don't run this")
def test_gen(self):
yield
test = Test('test_gen')
result = unittest.TestResult()
test.run(result)
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.skipped), 1)
def test_other_return(self):
class Test(AsyncTestCase):
def test_other_return(self):
return 42
test = Test('test_other_return')
result = unittest.TestResult()
test.run(result)
self.assertEqual(len(result.errors), 1)
self.assertIn("Return value from test method ignored", result.errors[0][1])
class SetUpTearDownTest(unittest.TestCase):
def test_set_up_tear_down(self):
"""
This test makes sure that AsyncTestCase calls super methods for
setUp and tearDown.
InheritBoth is a subclass of both AsyncTestCase and
SetUpTearDown, with the ordering so that the super of
AsyncTestCase will be SetUpTearDown.
"""
events = []
result = unittest.TestResult()
class SetUpTearDown(unittest.TestCase):
def setUp(self):
events.append('setUp')
def tearDown(self):
events.append('tearDown')
class InheritBoth(AsyncTestCase, SetUpTearDown):
def test(self):
events.append('test')
InheritBoth('test').run(result)
expected = ['setUp', 'test', 'tearDown']
self.assertEqual(expected, events)
class GenTest(AsyncTestCase):
def setUp(self):
super(GenTest, self).setUp()
self.finished = False
def tearDown(self):
self.assertTrue(self.finished)
super(GenTest, self).tearDown()
@gen_test
def test_sync(self):
self.finished = True
@gen_test
def test_async(self):
yield gen.moment
self.finished = True
def test_timeout(self):
# Set a short timeout and exceed it.
@gen_test(timeout=0.1)
def test(self):
yield gen.sleep(1)
# This can't use assertRaises because we need to inspect the
# exc_info triple (and not just the exception object)
try:
test(self)
self.fail("did not get expected exception")
except ioloop.TimeoutError:
# The stack trace should blame the add_timeout line, not just
# unrelated IOLoop/testing internals.
self.assertIn(
"gen.sleep(1)",
traceback.format_exc())
self.finished = True
def test_no_timeout(self):
# A test that does not exceed its timeout should succeed.
@gen_test(timeout=1)
def test(self):
yield gen.sleep(0.1)
test(self)
self.finished = True
def test_timeout_environment_variable(self):
@gen_test(timeout=0.5)
def test_long_timeout(self):
yield gen.sleep(0.25)
# Uses provided timeout of 0.5 seconds, doesn't time out.
with set_environ('ASYNC_TEST_TIMEOUT', '0.1'):
test_long_timeout(self)
self.finished = True
def test_no_timeout_environment_variable(self):
@gen_test(timeout=0.01)
def test_short_timeout(self):
yield gen.sleep(1)
# Uses environment-variable timeout of 0.1, times out.
with set_environ('ASYNC_TEST_TIMEOUT', '0.1'):
with self.assertRaises(ioloop.TimeoutError):
test_short_timeout(self)
self.finished = True
def test_with_method_args(self):
@gen_test
def test_with_args(self, *args):
self.assertEqual(args, ('test',))
yield gen.moment
test_with_args(self, 'test')
self.finished = True
def test_with_method_kwargs(self):
@gen_test
def test_with_kwargs(self, **kwargs):
self.assertDictEqual(kwargs, {'test': 'test'})
yield gen.moment
test_with_kwargs(self, test='test')
self.finished = True
@skipBefore35
def test_native_coroutine(self):
namespace = exec_test(globals(), locals(), """
@gen_test
async def test(self):
self.finished = True
""")
namespace['test'](self)
@skipBefore35
def test_native_coroutine_timeout(self):
# Set a short timeout and exceed it.
namespace = exec_test(globals(), locals(), """
@gen_test(timeout=0.1)
async def test(self):
await gen.sleep(1)
""")
try:
namespace['test'](self)
self.fail("did not get expected exception")
except ioloop.TimeoutError:
self.finished = True
@unittest.skipIf(asyncio is None, "asyncio module not present")
class GetNewIOLoopTest(AsyncTestCase):
def get_new_ioloop(self):
# Use the current loop instead of creating a new one here.
return ioloop.IOLoop.current()
def setUp(self):
# This simulates the effect of an asyncio test harness like
# pytest-asyncio.
self.orig_loop = asyncio.get_event_loop()
self.new_loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.new_loop)
super(GetNewIOLoopTest, self).setUp()
def tearDown(self):
super(GetNewIOLoopTest, self).tearDown()
# AsyncTestCase must not affect the existing asyncio loop.
self.assertFalse(asyncio.get_event_loop().is_closed())
asyncio.set_event_loop(self.orig_loop)
self.new_loop.close()
def test_loop(self):
self.assertIs(self.io_loop.asyncio_loop, self.new_loop)
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "hhru/tornado",
"path": "tornado/test/testing_test.py",
"copies": "2",
"size": "11254",
"license": "apache-2.0",
"hash": 1011243570614542300,
"line_mean": 31.1542857143,
"line_max": 99,
"alpha_frac": 0.6103607606,
"autogenerated": false,
"ratio": 4.105800802626779,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5716161563226778,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from tornado import gen, ioloop
from tornado.log import app_log
from tornado.test.util import unittest, skipBefore35, exec_test
from tornado.testing import AsyncHTTPTestCase, AsyncTestCase, bind_unused_port, gen_test, ExpectLog
from tornado.web import Application
import contextlib
import os
import platform
import traceback
import warnings
@contextlib.contextmanager
def set_environ(name, value):
old_value = os.environ.get(name)
os.environ[name] = value
try:
yield
finally:
if old_value is None:
del os.environ[name]
else:
os.environ[name] = old_value
class AsyncTestCaseTest(AsyncTestCase):
def test_exception_in_callback(self):
self.io_loop.add_callback(lambda: 1 / 0)
try:
self.wait()
self.fail("did not get expected exception")
except ZeroDivisionError:
pass
def test_wait_timeout(self):
time = self.io_loop.time
# Accept default 5-second timeout, no error
self.io_loop.add_timeout(time() + 0.01, self.stop)
self.wait()
# Timeout passed to wait()
self.io_loop.add_timeout(time() + 1, self.stop)
with self.assertRaises(self.failureException):
self.wait(timeout=0.01)
# Timeout set with environment variable
self.io_loop.add_timeout(time() + 1, self.stop)
with set_environ('ASYNC_TEST_TIMEOUT', '0.01'):
with self.assertRaises(self.failureException):
self.wait()
def test_subsequent_wait_calls(self):
"""
This test makes sure that a second call to wait()
clears the first timeout.
"""
self.io_loop.add_timeout(self.io_loop.time() + 0.00, self.stop)
self.wait(timeout=0.02)
self.io_loop.add_timeout(self.io_loop.time() + 0.03, self.stop)
self.wait(timeout=0.15)
def test_multiple_errors(self):
def fail(message):
raise Exception(message)
self.io_loop.add_callback(lambda: fail("error one"))
self.io_loop.add_callback(lambda: fail("error two"))
# The first error gets raised; the second gets logged.
with ExpectLog(app_log, "multiple unhandled exceptions"):
with self.assertRaises(Exception) as cm:
self.wait()
self.assertEqual(str(cm.exception), "error one")
class AsyncHTTPTestCaseTest(AsyncHTTPTestCase):
@classmethod
def setUpClass(cls):
super(AsyncHTTPTestCaseTest, cls).setUpClass()
# An unused port is bound so we can make requests upon it without
# impacting a real local web server.
cls.external_sock, cls.external_port = bind_unused_port()
def get_app(self):
return Application()
def test_fetch_segment(self):
path = '/path'
response = self.fetch(path)
self.assertEqual(response.request.url, self.get_url(path))
def test_fetch_full_http_url(self):
path = 'http://localhost:%d/path' % self.external_port
response = self.fetch(path, request_timeout=0.1)
self.assertEqual(response.request.url, path)
def test_fetch_full_https_url(self):
path = 'https://localhost:%d/path' % self.external_port
response = self.fetch(path, request_timeout=0.1)
self.assertEqual(response.request.url, path)
@classmethod
def tearDownClass(cls):
cls.external_sock.close()
super(AsyncHTTPTestCaseTest, cls).tearDownClass()
class AsyncTestCaseWrapperTest(unittest.TestCase):
def test_undecorated_generator(self):
class Test(AsyncTestCase):
def test_gen(self):
yield
test = Test('test_gen')
result = unittest.TestResult()
test.run(result)
self.assertEqual(len(result.errors), 1)
self.assertIn("should be decorated", result.errors[0][1])
@skipBefore35
@unittest.skipIf(platform.python_implementation() == 'PyPy',
'pypy destructor warnings cannot be silenced')
def test_undecorated_coroutine(self):
namespace = exec_test(globals(), locals(), """
class Test(AsyncTestCase):
async def test_coro(self):
pass
""")
test_class = namespace['Test']
test = test_class('test_coro')
result = unittest.TestResult()
# Silence "RuntimeWarning: coroutine 'test_coro' was never awaited".
with warnings.catch_warnings():
warnings.simplefilter('ignore')
test.run(result)
self.assertEqual(len(result.errors), 1)
self.assertIn("should be decorated", result.errors[0][1])
def test_undecorated_generator_with_skip(self):
class Test(AsyncTestCase):
@unittest.skip("don't run this")
def test_gen(self):
yield
test = Test('test_gen')
result = unittest.TestResult()
test.run(result)
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.skipped), 1)
def test_other_return(self):
class Test(AsyncTestCase):
def test_other_return(self):
return 42
test = Test('test_other_return')
result = unittest.TestResult()
test.run(result)
self.assertEqual(len(result.errors), 1)
self.assertIn("Return value from test method ignored", result.errors[0][1])
class SetUpTearDownTest(unittest.TestCase):
def test_set_up_tear_down(self):
"""
This test makes sure that AsyncTestCase calls super methods for
setUp and tearDown.
InheritBoth is a subclass of both AsyncTestCase and
SetUpTearDown, with the ordering so that the super of
AsyncTestCase will be SetUpTearDown.
"""
events = []
result = unittest.TestResult()
class SetUpTearDown(unittest.TestCase):
def setUp(self):
events.append('setUp')
def tearDown(self):
events.append('tearDown')
class InheritBoth(AsyncTestCase, SetUpTearDown):
def test(self):
events.append('test')
InheritBoth('test').run(result)
expected = ['setUp', 'test', 'tearDown']
self.assertEqual(expected, events)
class GenTest(AsyncTestCase):
def setUp(self):
super(GenTest, self).setUp()
self.finished = False
def tearDown(self):
self.assertTrue(self.finished)
super(GenTest, self).tearDown()
@gen_test
def test_sync(self):
self.finished = True
@gen_test
def test_async(self):
yield gen.Task(self.io_loop.add_callback)
self.finished = True
def test_timeout(self):
# Set a short timeout and exceed it.
@gen_test(timeout=0.1)
def test(self):
yield gen.Task(self.io_loop.add_timeout, self.io_loop.time() + 1)
# This can't use assertRaises because we need to inspect the
# exc_info triple (and not just the exception object)
try:
test(self)
self.fail("did not get expected exception")
except ioloop.TimeoutError:
# The stack trace should blame the add_timeout line, not just
# unrelated IOLoop/testing internals.
self.assertIn(
"gen.Task(self.io_loop.add_timeout, self.io_loop.time() + 1)",
traceback.format_exc())
self.finished = True
def test_no_timeout(self):
# A test that does not exceed its timeout should succeed.
@gen_test(timeout=1)
def test(self):
time = self.io_loop.time
yield gen.Task(self.io_loop.add_timeout, time() + 0.1)
test(self)
self.finished = True
def test_timeout_environment_variable(self):
@gen_test(timeout=0.5)
def test_long_timeout(self):
time = self.io_loop.time
yield gen.Task(self.io_loop.add_timeout, time() + 0.25)
# Uses provided timeout of 0.5 seconds, doesn't time out.
with set_environ('ASYNC_TEST_TIMEOUT', '0.1'):
test_long_timeout(self)
self.finished = True
def test_no_timeout_environment_variable(self):
@gen_test(timeout=0.01)
def test_short_timeout(self):
time = self.io_loop.time
yield gen.Task(self.io_loop.add_timeout, time() + 1)
# Uses environment-variable timeout of 0.1, times out.
with set_environ('ASYNC_TEST_TIMEOUT', '0.1'):
with self.assertRaises(ioloop.TimeoutError):
test_short_timeout(self)
self.finished = True
def test_with_method_args(self):
@gen_test
def test_with_args(self, *args):
self.assertEqual(args, ('test',))
yield gen.Task(self.io_loop.add_callback)
test_with_args(self, 'test')
self.finished = True
def test_with_method_kwargs(self):
@gen_test
def test_with_kwargs(self, **kwargs):
self.assertDictEqual(kwargs, {'test': 'test'})
yield gen.Task(self.io_loop.add_callback)
test_with_kwargs(self, test='test')
self.finished = True
@skipBefore35
def test_native_coroutine(self):
namespace = exec_test(globals(), locals(), """
@gen_test
async def test(self):
self.finished = True
""")
namespace['test'](self)
@skipBefore35
def test_native_coroutine_timeout(self):
# Set a short timeout and exceed it.
namespace = exec_test(globals(), locals(), """
@gen_test(timeout=0.1)
async def test(self):
await gen.sleep(1)
""")
try:
namespace['test'](self)
self.fail("did not get expected exception")
except ioloop.TimeoutError:
self.finished = True
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "Lancher/tornado",
"path": "tornado/test/testing_test.py",
"copies": "1",
"size": "10031",
"license": "apache-2.0",
"hash": 8723894450234192000,
"line_mean": 30.9458598726,
"line_max": 99,
"alpha_frac": 0.6036287509,
"autogenerated": false,
"ratio": 4.007590890930883,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00006976038823172582,
"num_lines": 314
} |
from __future__ import absolute_import, division, print_function
from tqdm import tqdm
import struct
import matplotlib.pyplot as plt
from chxanalys.chx_libs import (np, roi, time, datetime, os, getpass, db, get_images,LogNorm,Figure, RUN_GUI)
#from chxanalys.chx_generic_functions import (get_circular_average)
#from chxanalys.XPCS_SAXS import (get_circular_average)
from chxanalys.chx_libs import ( colors, markers, colors_, markers_)
import os
from chxanalys.chx_generic_functions import ( save_arrays )
from skbeam.core.utils import multi_tau_lags
from skbeam.core.roi import extract_label_indices
from collections import namedtuple
import logging
logger = logging.getLogger(__name__)
from chxanalys.chx_compress import (compress_eigerdata, read_compressed_eigerdata,init_compress_eigerdata,
Multifile,pass_FD,get_avg_imgc,mean_intensityc, get_each_frame_intensityc)
from modest_image import ModestImage, imshow
#from chxanalys.chx_compress import *
def get_time_edge_avg_img(FD, frame_edge,show_progress =True):
'''YG Dev Nov 14, 2017@CHX
Get averaged img by giving FD and frame edges
Parameters
----------
FD: Multifile class
compressed file
frame_edge: np.array, can be created by create_time_slice( Nimg, slice_num= 3,
slice_width= 1, edges = None )
e.g., np.array([[ 5, 6],
[2502, 2503],
[4999, 5000]])
Return:
array: (N of frame_edge, averaged image) , i.e., d[0] gives the first averaged image
'''
Nt = len( frame_edge )
d = np.zeros(Nt, dtype=object)
for i in range(Nt):
t1,t2 = frame_edge[i]
d[i] = get_avg_imgc( FD, beg=t1,end=t2, sampling = 1, plot_ = False,show_progress=show_progress )
return d
def plot_imgs( imgs, image_name=None, *argv, **kwargs):
#NOT WORKing NOW....
N = len(imgs)
sx = np.ceil( np.sqrt(N) )
pass
def cal_waterfallc(FD, labeled_array, qindex=1,
bin_waterfall = False, waterfall_roi_size = None, save=False, *argv,**kwargs):
"""Compute the mean intensity for each ROI in the compressed file (FD)
Parameters
----------
FD: Multifile class
compressed file
labeled_array : array
labeled array; 0 is background.
Each ROI is represented by a nonzero integer. It is not required that
the ROI labels are contiguous
qindex : int, qindex=1, give the first ring in SAXS geometry. NOTE: qindex=0 is non-photon pixels.
The ROI's to use.
bin_waterfall: if True, will bin the waterfall along y-axis
waterfall_roi_size: the size of waterfall roi, (x-size, y-size), if bin, will bin along y
save: save the waterfall
Returns
-------
waterfall : array
The mean intensity of each ROI for all `images`
Dimensions:
len(mean_intensity) == len(index)
len(mean_intensity[0]) == len(images)
index : list
The labels for each element of the `mean_intensity` list
"""
sampling =1
labeled_array_ = np.array( labeled_array == qindex, dtype= np.int64)
qind, pixelist = roi.extract_label_indices( labeled_array_ )
if labeled_array_.shape != ( FD.md['ncols'],FD.md['nrows']):
raise ValueError(
" `image` shape (%d, %d) in FD is not equal to the labeled_array shape (%d, %d)" %( FD.md['ncols'],FD.md['nrows'], labeled_array_.shape[0], labeled_array_.shape[1]) )
# pre-allocate an array for performance
# might be able to use list comprehension to make this faster
watf = np.zeros( [ int( ( FD.end - FD.beg)/sampling ), len(qind)] )
#fra_pix = np.zeros_like( pixelist, dtype=np.float64)
timg = np.zeros( FD.md['ncols'] * FD.md['nrows'] , dtype=np.int32 )
timg[pixelist] = np.arange( 1, len(pixelist) + 1 )
#maxqind = max(qind)
norm = np.bincount( qind )[1:]
n= 0
#for i in tqdm(range( FD.beg , FD.end )):
for i in tqdm(range( FD.beg, FD.end, sampling ), desc= 'Get waterfall for q index=%s'%qindex ):
(p,v) = FD.rdrawframe(i)
w = np.where( timg[p] )[0]
pxlist = timg[ p[w] ] -1
watf[n][pxlist] = v[w]
n +=1
if bin_waterfall:
watf_ = watf.copy()
watf = np.zeros( [ watf_.shape[0], waterfall_roi_size[0] ])
for i in range(waterfall_roi_size[1] ):
watf += watf_[:, waterfall_roi_size[0]*i: waterfall_roi_size[0]*(i+1) ]
watf /= waterfall_roi_size[0]
if save:
path = kwargs['path']
uid = kwargs['uid']
np.save( path + '%s_waterfall'%uid, watf)
return watf
def plot_waterfallc(wat, qindex=1, aspect = None,vmax=None, vmin=None, interpolation = 'none',
save=False, return_fig=False, cmap='viridis',*argv,**kwargs):
'''plot waterfall for a giving compressed file
FD: class object, the compressed file handler
labeled_array: np.array, a ROI mask
qindex: the index number of q, will calculate where( labeled_array == qindex)
aspect: the aspect ratio of the plot
Return waterfall
Plot the waterfall
'''
#wat = cal_waterfallc( FD, labeled_array, qindex=qindex)
if RUN_GUI:
fig = Figure(figsize=(8,6))
ax = fig.add_subplot(111)
else:
fig, ax = plt.subplots(figsize=(8,6))
if 'uid' in kwargs:
uid = kwargs['uid']
else:
uid = 'uid'
#fig, ax = plt.subplots(figsize=(8,6))
ax.set_ylabel('Pixel')
ax.set_xlabel('Frame')
ax.set_title('%s_Waterfall_Plot_@qind=%s'%(uid, qindex) )
if 'beg' in kwargs:
beg = kwargs['beg']
else:
beg=0
extent = [ beg, len(wat)+beg, 0, len( wat.T) ]
if vmax is None:
vmax=wat.max()
if vmin is None:
vmin = wat.min()
if aspect is None:
aspect = wat.shape[0]/wat.shape[1]
im = imshow(ax, wat.T, cmap=cmap, vmax=vmax,extent= extent,interpolation = interpolation )
#im = ax.imshow(wat.T, cmap='viridis', vmax=vmax,extent= extent,interpolation = interpolation )
fig.colorbar( im )
ax.set_aspect( aspect)
if save:
#dt =datetime.now()
#CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute)
path = kwargs['path']
#fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png'
fp = path + "%s_waterfall"%uid + '.png'
plt.savefig( fp, dpi=fig.dpi)
#plt.show()
if return_fig:
return fig,ax, im
def get_waterfallc(FD, labeled_array, qindex=1, aspect = 1.0,
vmax=None, save=False, *argv,**kwargs):
'''plot waterfall for a giving compressed file
FD: class object, the compressed file handler
labeled_array: np.array, a ROI mask
qindex: the index number of q, will calculate where( labeled_array == qindex)
aspect: the aspect ratio of the plot
Return waterfall
Plot the waterfall
'''
wat = cal_waterfallc( FD, labeled_array, qindex=qindex)
fig, ax = plt.subplots(figsize=(8,6))
ax.set_ylabel('Pixel')
ax.set_xlabel('Frame')
ax.set_title('Waterfall_Plot_@qind=%s'%qindex)
im = ax.imshow(wat.T, cmap='viridis', vmax=vmax)
fig.colorbar( im )
ax.set_aspect( aspect)
if save:
#dt =datetime.now()
#CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute)
path = kwargs['path']
if 'uid' in kwargs:
uid = kwargs['uid']
else:
uid = 'uid'
#fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png'
fp = path + "uid=%s--Waterfall-"%uid + '.png'
fig.savefig( fp, dpi=fig.dpi)
#plt.show()
return wat
def cal_each_ring_mean_intensityc( FD, ring_mask, sampling=1, timeperframe=None, multi_cor= False,
*argv,**kwargs):
"""
get time dependent mean intensity of each ring
"""
mean_int_sets, index_list = mean_intensityc(FD, ring_mask, sampling, index=None, multi_cor=multi_cor)
if timeperframe is None:
times = np.arange( FD.end - FD.beg ) + FD.beg # get the time for each frame
else:
times = ( FD.beg + np.arange( FD.end - FD.beg ) )*timeperframe
num_rings = len( np.unique( ring_mask)[1:] )
return times, mean_int_sets
def plot_each_ring_mean_intensityc( times, mean_int_sets, xlabel= 'Frame',save=False, *argv,**kwargs):
"""
Plot time dependent mean intensity of each ring
"""
num_rings = mean_int_sets.shape[1]
fig, ax = plt.subplots(figsize=(8, 8))
uid = 'uid'
if 'uid' in kwargs.keys():
uid = kwargs['uid']
ax.set_title("%s--Mean intensity of each ROI"%uid)
for i in range(num_rings):
#print( markers[i], colors[i] )
ax.plot( times, mean_int_sets[:,i], label="ROI "+str(i+1),marker = markers[i], color=colors[i], ls='-')
ax.set_xlabel(xlabel)
ax.set_ylabel("Mean Intensity")
ax.legend(loc = 'best',fontsize='x-small', fancybox=True, framealpha=0.5)
if save:
path = kwargs['path']
fp = path + "%s_t_ROIs"%uid + '.png'
fig.savefig( fp, dpi=fig.dpi)
save_arrays( np.hstack( [times.reshape(len(times),1), mean_int_sets]),
label= ['frame']+ ['ROI_%d'%i for i in range( num_rings ) ],
filename='%s_t_ROIs'%uid, path= path )
#plt.show()
def get_each_ring_mean_intensityc( FD, ring_mask, sampling=1, timeperframe=None, plot_ = False, save=False, *argv,**kwargs):
"""
get time dependent mean intensity of each ring
"""
mean_int_sets, index_list = mean_intensityc(FD, ring_mask, sampling, index=None)
if timeperframe is None:
times = np.arange( FD.end - FD.beg ) + FD.beg # get the time for each frame
else:
times = ( FD.beg + np.arange( FD.end - FD.beg ) )*timeperframe
num_rings = len( np.unique( ring_mask)[1:] )
if plot_:
fig, ax = plt.subplots(figsize=(8, 8))
uid = 'uid'
if 'uid' in kwargs.keys():
uid = kwargs['uid']
ax.set_title("%s--Mean intensity of each ROI"%uid)
for i in range(num_rings):
ax.plot( times, mean_int_sets[:,i], label="ROI "+str(i+1),marker = 'o', ls='-')
if timeperframe is not None:
ax.set_xlabel("Time, sec")
else:
ax.set_xlabel("Frame")
ax.set_ylabel("Mean Intensity")
ax.legend(loc = 'best',fontsize='x-small')
if save:
#dt =datetime.now()
#CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute)
path = kwargs['path']
#fp = path + "uid= %s--Mean intensity of each ring-"%uid + CurTime + '.png'
fp = path + "%s_Mean_intensity_of_each_ROI"%uid + '.png'
fig.savefig( fp, dpi=fig.dpi)
save_arrays( np.hstack( [times.reshape(len(times),1), mean_int_sets]),
label= ['frame']+ ['ROI_%d'%i for i in range( num_rings ) ],
filename='%s_t_ROIs'%uid, path= path )
#plt.show()
return times, mean_int_sets
| {
"repo_name": "yugangzhang/chxanalys",
"path": "chxanalys/chx_compress_analysis.py",
"copies": "1",
"size": "11807",
"license": "bsd-3-clause",
"hash": -5319008908975442000,
"line_mean": 33.4227405248,
"line_max": 178,
"alpha_frac": 0.5580587787,
"autogenerated": false,
"ratio": 3.192806922660898,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9039403252931271,
"avg_score": 0.04229248968592548,
"num_lines": 343
} |
from __future__ import absolute_import, division, print_function
from trakt.core.configuration import ConfigurationManager
from trakt.core.emitter import Emitter
from trakt.core.http import HttpClient
from trakt.interfaces import construct_map
from trakt.interfaces.base import InterfaceProxy
from trakt.mapper.core.base import Mapper
from trakt.version import __version__
class TraktClient(Emitter):
base_url = 'https://api.trakt.tv'
version = __version__
__interfaces = None
def __init__(self, adapter_kwargs=None):
# Set parameter defaults
if adapter_kwargs is None:
adapter_kwargs = {}
adapter_kwargs.setdefault('max_retries', 3)
# Construct
self.configuration = ConfigurationManager()
self.http = HttpClient(self, adapter_kwargs)
self.__interfaces = construct_map(self)
self._site_url = None
@property
def site_url(self):
if self._site_url is not None:
return self._site_url
url = self.base_url
schema_end = url.find('://') + 3
domain_start = url.find('.', schema_end) + 1
return url[0:schema_end] + url[domain_start:]
@site_url.setter
def site_url(self, value):
self._site_url = value
def construct(self, media, item, keys=None, **kwargs):
return Mapper.construct(self, media, item, keys, **kwargs)
@staticmethod
def get_ids(media, item, parent=None):
return Mapper.get_ids(media, item, parent)
def __getitem__(self, path):
parts = path.strip('/').split('/')
cur = self.__interfaces
parameters = []
while parts and type(cur) is dict:
key = parts.pop(0)
if key not in cur:
if '*' in cur:
if key != '*':
parameters.append(key)
cur = cur['*']
continue
return None
cur = cur[key]
if type(cur) is dict:
cur = cur.get(None)
if parts:
parameters.extend(parts)
if parameters:
return InterfaceProxy(cur, parameters)
return cur
| {
"repo_name": "fuzeman/trakt.py",
"path": "trakt/client.py",
"copies": "2",
"size": "2203",
"license": "mit",
"hash": 8800547733175715000,
"line_mean": 24.6162790698,
"line_max": 66,
"alpha_frac": 0.578302315,
"autogenerated": false,
"ratio": 4.2122370936902485,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5790539408690248,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from trakt.core.emitter import Emitter
from trakt.interfaces.base import Interface
from datetime import datetime, timedelta
from threading import Thread
import calendar
import logging
import requests
import time
log = logging.getLogger(__name__)
class DeviceOAuthInterface(Interface):
path = 'oauth/device'
def code(self, **kwargs):
client_id = self.client.configuration['client.id']
if not client_id:
raise ValueError('"client.id" configuration parameter is required')
response = self.http.post(
'code',
data={
'client_id': client_id
}
)
data = self.get_data(response, **kwargs)
if isinstance(data, requests.Response):
return data
if not data:
return None
return data
def poll(self, device_code, expires_in, interval, **kwargs):
"""Construct the device authentication poller.
:param device_code: Device authentication code
:type device_code: str
:param expires_in: Device authentication code expiry (in seconds)
:type in: int
:param interval: Device authentication poll interval
:type interval: int
:rtype: DeviceOAuthPoller
"""
return DeviceOAuthPoller(self.client, device_code, expires_in, interval)
def token(self, device_code, **kwargs):
client_id = self.client.configuration['client.id']
client_secret = self.client.configuration['client.secret']
if not client_id:
raise ValueError('"client.id" and "client.secret" configuration parameters are required')
response = self.http.post(
'token',
data={
'client_id': client_id,
'client_secret': client_secret,
'code': device_code
}
)
data = self.get_data(response, **kwargs)
if isinstance(data, requests.Response):
return data
if not data:
return None
return data
class DeviceOAuthPoller(Interface, Emitter):
def __init__(self, client, device_code, expires_in, interval):
super(DeviceOAuthPoller, self).__init__(client)
self.device_code = device_code
self.expires_in = expires_in
self.interval = interval
# Calculate code expiry date/time
self.expires_at = datetime.utcnow() + timedelta(seconds=self.expires_in)
# Private attributes
self._abort = False
self._active = False
self._running = False
self._thread = None
@property
def active(self):
return self._active
def has_expired(self):
return datetime.utcnow() > self.expires_at
def start(self, daemon=None):
if self._active or self._thread:
raise Exception('Poller already started')
# Construct thread process wrapper
def wrapper():
try:
self._process()
except Exception as ex:
log.warning('Exception raised in DeviceOAuthPoller: %s', ex, exc_info=True)
finally:
self._active = False
self._running = False
if self._abort:
self.emit('aborted')
# Construct poller thread
self._thread = Thread(
target=wrapper,
name='%s:%s' % (DeviceOAuthPoller.__module__, DeviceOAuthPoller.__name__)
)
# Set `daemon` state
if daemon is not None:
self._thread.daemon = daemon
# Start polling
self._abort = False
self._active = True
self._running = True
self._thread.start()
def stop(self):
# Flag as thread abort
self._abort = True
# Flag thread to stop
self._running = False
def _process(self):
while self._running:
# Ensure code hasn't expired yet
if self.has_expired():
self.emit('expired')
break
# Trigger "poll" event, check if we should continue polling
if not self._should_poll():
self.stop()
break
# Poll for token
response = self.client['oauth/device'].token(self.device_code, parse=False)
if response:
# Parse authorization
data = self.get_data(response)
if 'created_at' not in data:
data['created_at'] = calendar.timegm(datetime.utcnow().utctimetuple())
# Authentication complete
self.emit('authenticated', data)
break
# Sleep for defined interval
time.sleep(self.interval)
def _poll_callback(self, state=True):
self._abort = not state
def _should_poll(self):
# Assume poller should abort if `callback` isn't fired
self._abort = True
# Trigger "poll" event
self.emit('poll', self._poll_callback)
# Continue polling if `abort` flag isn't set
return not self._abort
| {
"repo_name": "fuzeman/trakt.py",
"path": "trakt/interfaces/oauth/device.py",
"copies": "2",
"size": "5234",
"license": "mit",
"hash": -7022844239277860000,
"line_mean": 26.5473684211,
"line_max": 101,
"alpha_frac": 0.5670615208,
"autogenerated": false,
"ratio": 4.694170403587444,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6261231924387444,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from trakt.core.errors import log_request_error
from trakt.core.exceptions import RequestFailedError, ServerError, ClientError
from trakt.core.pagination import PaginationIterator
from trakt.helpers import setdefault
import functools
import logging
log = logging.getLogger(__name__)
def authenticated(func):
@functools.wraps(func)
def wrap(*args, **kwargs):
if 'authenticated' not in kwargs:
kwargs['authenticated'] = True
return func(*args, **kwargs)
return wrap
def application(func):
@functools.wraps(func)
def wrap(*args, **kwargs):
if args and isinstance(args[0], Interface):
interface = args[0]
setdefault(kwargs, {
'app_version': interface.client.configuration['app.version'],
'app_date': interface.client.configuration['app.date']
}, lambda key, value: value)
return func(*args, **kwargs)
return wrap
class Interface(object):
path = None
def __init__(self, client):
self.client = client
def __getitem__(self, name):
if hasattr(self, name):
return getattr(self, name)
raise ValueError('Unknown action "%s" on %s' % (name, self))
@property
def http(self):
if not self.client:
return None
return self.client.http.configure(self.path)
def get_data(self, response, exceptions=False, parse=True):
if response is None:
if exceptions:
raise RequestFailedError('No response available')
log.warning('Request failed (no response returned)')
return None
# Return response, if parsing is disabled or pagination is enabled
if not parse or isinstance(response, PaginationIterator):
return response
# Check status code, log any errors
error = False
if response.status_code < 200 or response.status_code >= 300:
log_request_error(log, response)
# Raise an exception (if enabled)
if exceptions:
if response.status_code >= 500:
raise ServerError(response)
else:
raise ClientError(response)
# Set error flag
error = True
# Return `None` if we encountered an error, return response data
if error:
return None
# Parse response, return data
content_type = response.headers.get('content-type')
if content_type and content_type.startswith('application/json'):
# Try parse json response
try:
data = response.json()
except Exception as e:
log.warning('unable to parse JSON response: %s', e)
return None
else:
log.debug('response returned content-type: %r, falling back to raw data', content_type)
# Fallback to raw content
data = response.content
return data
class InterfaceProxy(object):
def __init__(self, interface, args):
self.interface = interface
self.args = list(args)
def __getattr__(self, name):
value = getattr(self.interface, name)
if not callable(value):
return value
@functools.wraps(value)
def wrap(*args, **kwargs):
args = self.args + list(args)
return value(*args, **kwargs)
return wrap
| {
"repo_name": "Razzeee/script.module.trakt",
"path": "lib/trakt/interfaces/base/__init__.py",
"copies": "2",
"size": "3543",
"license": "mit",
"hash": -3134134419622761000,
"line_mean": 26.6796875,
"line_max": 99,
"alpha_frac": 0.5921535422,
"autogenerated": false,
"ratio": 4.680317040951123,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6272470583151123,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from trakt.core.errors import log_request_error
from trakt.core.exceptions import ServerError, ClientError, RequestFailedError
from trakt.core.helpers import try_convert
from six.moves.urllib.parse import urlsplit, urlunsplit, parse_qsl
import logging
log = logging.getLogger(__name__)
class PaginationIterator(object):
def __init__(self, client, request, exceptions=False):
self.client = client
self.request = request
self.exceptions = exceptions
self.per_page = None
self.total_items = None
self.total_pages = None
self._mapper = None
# Parse request url
scheme, netloc, path, query = urlsplit(self.request.url)[:4]
self.url = urlunsplit([scheme, netloc, path, '', ''])
self.query = dict(parse_qsl(query))
# Resolve pagination details
self.resolve()
def get(self, page):
request = self.request.copy()
# Build query parameters
query = self.query.copy()
query['page'] = page
query['limit'] = self.per_page
# Construct request
request.prepare_url(self.url, query)
# Send request
response = self._send(request)
if not response:
return None
# Parse response, return data
content_type = response.headers.get('content-type')
if content_type and content_type.startswith('application/json'):
# Try parse json response
try:
items = response.json()
except Exception as e:
log.warning('Unable to parse page: %s', e)
return None
else:
log.warning('Received a page with an invalid content type: %r', content_type)
return None
if self._mapper:
return self._mapper(items)
return items
def resolve(self):
request = self.request.copy()
request.prepare_method('HEAD')
# Send request
if not self._send(request):
log.warning('Unable to resolve pagination state')
# Reset state
self.per_page = None
self.total_items = None
self.total_pages = None
def with_mapper(self, mapper):
if self._mapper:
raise ValueError('Iterator has already been bound to a mapper')
# Update mapper
self._mapper = mapper
return self
def _send(self, request):
response = self.client.http.send(request)
if response is None:
if self.exceptions:
raise RequestFailedError('No response available')
log.warning('Request failed (no response returned)')
return None
if response.status_code < 200 or response.status_code >= 300:
log_request_error(log, response)
# Raise an exception (if enabled)
if self.exceptions:
if response.status_code >= 500:
raise ServerError(response)
else:
raise ClientError(response)
return None
# Update pagination state
self.per_page = try_convert(response.headers.get('x-pagination-limit'), int)
self.total_items = try_convert(response.headers.get('x-pagination-item-count'), int)
self.total_pages = try_convert(response.headers.get('x-pagination-page-count'), int)
return response
def __iter__(self):
if self.total_pages is None:
if self.exceptions:
raise ValueError("Pagination state hasn't been resolved")
log.warning("Pagination state hasn't been resolved")
return
# Retrieve current page number
current = int(self.query.get('page', 1))
# Fetch pages
while current <= self.total_pages:
items = self.get(current)
if not items:
log.warning('Unable to retrieve page #%d, pagination iterator cancelled', current)
break
for item in items:
yield item
current += 1
| {
"repo_name": "fuzeman/trakt.py",
"path": "trakt/core/pagination.py",
"copies": "2",
"size": "4190",
"license": "mit",
"hash": 6038767379833236000,
"line_mean": 28.3006993007,
"line_max": 98,
"alpha_frac": 0.5835322196,
"autogenerated": false,
"ratio": 4.6452328159645235,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000380994429640281,
"num_lines": 143
} |
from __future__ import absolute_import, division, print_function
from trakt.core.helpers import clean_username, dictfilter
from trakt.core.pagination import PaginationIterator
from trakt.interfaces.base import Interface, authenticated
from trakt.mapper import ListMapper, ListItemMapper
import requests
class UsersListInterface(Interface):
path = 'users/*/lists/*'
def get(self, username, id, **kwargs):
# Send request
response = self.http.get(
'/users/%s/lists/%s' % (clean_username(username), id),
)
# Parse response
item = self.get_data(response, **kwargs)
if isinstance(item, requests.Response):
return item
if not item:
return None
# Map item to list object
return ListMapper.custom_list(
self.client, item,
username=username
)
def items(self, username, id, media=None, extended=None, page=None, per_page=None, **kwargs):
response = self.http.get(
'/users/%s/lists/%s/items' % (clean_username(username), id),
query={
'type': media,
'extended': extended,
'page': page,
'limit': per_page
},
**dictfilter(kwargs, get=[
'exceptions'
], pop=[
'authenticated',
'pagination',
'validate_token'
])
)
# Parse response
items = self.get_data(response, **kwargs)
if isinstance(items, PaginationIterator):
return items.with_mapper(lambda items: ListItemMapper.process_many(self.client, items))
if isinstance(items, requests.Response):
return items
return ListItemMapper.process_many(self.client, items)
#
# Owner actions
#
@authenticated
def add(self, username, id, items, **kwargs):
# Send request
response = self.http.post(
'/users/%s/lists/%s/items' % (clean_username(username), id),
data=items,
**dictfilter(kwargs, pop=[
'authenticated',
'validate_token'
])
)
# Parse response
return self.get_data(response, **kwargs)
@authenticated
def delete(self, username, id, **kwargs):
# Send request
response = self.http.delete(
'/users/%s/lists/%s' % (clean_username(username), id),
**dictfilter(kwargs, pop=[
'authenticated',
'validate_token'
])
)
return 200 <= response.status_code < 300
@authenticated
def update(self, username, id, name=None, description=None, privacy=None, display_numbers=None,
allow_comments=None, return_type='object', **kwargs):
data = {
'name': name,
'description': description,
'privacy': privacy,
'allow_comments': allow_comments,
'display_numbers': display_numbers
}
# Remove attributes with `None` values
for key in list(data.keys()):
if data[key] is not None:
continue
del data[key]
# Send request
response = self.http.put(
'/users/%s/lists/%s' % (clean_username(username), id),
data=data,
**dictfilter(kwargs, pop=[
'authenticated',
'validate_token'
])
)
# Parse response
item = self.get_data(response, **kwargs)
if isinstance(item, requests.Response):
return item
if not item:
return None
if return_type == 'data':
return item
if return_type == 'object':
# Map item to list object
return ListMapper.custom_list(
self.client, item,
username=username
)
raise ValueError('Unsupported value for "return_type": %r', return_type)
@authenticated
def remove(self, username, id, items, **kwargs):
# Send request
response = self.http.post(
'/users/%s/lists/%s/items/remove' % (clean_username(username), id),
data=items,
**dictfilter(kwargs, pop=[
'authenticated',
'validate_token'
])
)
# Parse response
return self.get_data(response, **kwargs)
#
# Actions
#
@authenticated
def like(self, username, id, **kwargs):
# Send request
response = self.http.post(
'/users/%s/lists/%s/like' % (clean_username(username), id),
**dictfilter(kwargs, pop=[
'authenticated',
'validate_token'
])
)
return 200 <= response.status_code < 300
@authenticated
def unlike(self, username, id, **kwargs):
# Send request
response = self.http.delete(
'/users/%s/lists/%s/like' % (clean_username(username), id),
**dictfilter(kwargs, pop=[
'authenticated',
'validate_token'
])
)
return 200 <= response.status_code < 300
| {
"repo_name": "Razzeee/script.module.trakt",
"path": "lib/trakt/interfaces/users/lists/list_.py",
"copies": "2",
"size": "5294",
"license": "mit",
"hash": -6769575800529381000,
"line_mean": 26.8631578947,
"line_max": 99,
"alpha_frac": 0.5251227805,
"autogenerated": false,
"ratio": 4.497875955819881,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00022394610865788812,
"num_lines": 190
} |
from __future__ import absolute_import, division, print_function
from trakt.core.helpers import clean_username, dictfilter
from trakt.interfaces.base import Interface, authenticated
from trakt.mapper import UserMapper
import requests
class UsersProfileInterface(Interface):
path = 'users/*'
def get(self, username, extended=None, **kwargs):
response = self.http.get(
'/users/%s' % (clean_username(username)),
query={
'extended': extended
},
**dictfilter(kwargs, get=[
'exceptions'
], pop=[
'authenticated',
'validate_token'
])
)
# Parse response
item = self.get_data(response, **kwargs)
if isinstance(item, requests.Response):
return item
if type(item) is not dict:
return None
return UserMapper.user(self.client, item)
@authenticated
def follow(self, username, **kwargs):
response = self.http.post(
'/users/%s/follow' % (clean_username(username))
)
return 200 <= response.status_code < 300
@authenticated
def unfollow(self, username, **kwargs):
response = self.http.delete(
'/users/%s/follow' % (clean_username(username))
)
return 200 <= response.status_code < 300
| {
"repo_name": "Razzeee/script.module.trakt",
"path": "lib/trakt/interfaces/users/profile.py",
"copies": "2",
"size": "1391",
"license": "mit",
"hash": 8598389061825952000,
"line_mean": 25.75,
"line_max": 64,
"alpha_frac": 0.5736879942,
"autogenerated": false,
"ratio": 4.487096774193549,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 52
} |
from __future__ import absolute_import, division, print_function
from trakt.core.helpers import clean_username, dictfilter, to_iso8601_datetime
from trakt.core.pagination import PaginationIterator
from trakt.interfaces.base import Interface, authenticated
from trakt.mapper import SyncMapper
import requests
class UsersHistoryInterface(Interface):
path = 'users/*/history'
flags = {'is_watched': True}
def get(self, username, media=None, id=None, start_at=None, end_at=None, store=None,
extended=None, page=None, per_page=None, **kwargs):
if not media and id:
raise ValueError('The "id" parameter also requires the "media" parameter to be defined')
# Build parameters
params = []
if media:
params.append(media)
if id:
params.append(id)
# Build query
query = {
'extended': extended,
'page': page,
'limit': per_page
}
if start_at:
query['start_at'] = to_iso8601_datetime(start_at)
if end_at:
query['end_at'] = to_iso8601_datetime(end_at)
# Send request
response = self.http.get(
'/users/%s/history' % (clean_username(username)),
params=params,
query=query,
**dictfilter(kwargs, get=[
'exceptions'
], pop=[
'authenticated',
'pagination',
'validate_token'
])
)
# Parse response
items = self.get_data(response, **kwargs)
if isinstance(items, PaginationIterator):
return items.with_mapper(lambda items: SyncMapper.process(
self.client, store, items,
media=media,
flat=True,
**self.flags
))
if isinstance(items, requests.Response):
return items
if type(items) is not list:
return None
return SyncMapper.process(
self.client, store, items,
media=media,
flat=True,
**self.flags
)
#
# Shortcut methods
#
@authenticated
def movies(self, username, id=None, start_at=None, end_at=None, store=None, **kwargs):
return self.get(
username, 'movies',
id=id,
start_at=start_at,
end_at=end_at,
store=store,
**kwargs
)
@authenticated
def shows(self, username, id=None, start_at=None, end_at=None, store=None, **kwargs):
return self.get(
username, 'shows',
id=id,
start_at=start_at,
end_at=end_at,
store=store,
**kwargs
)
@authenticated
def seasons(self, username, id=None, start_at=None, end_at=None, store=None, **kwargs):
return self.get(
username, 'seasons',
id=id,
start_at=start_at,
end_at=end_at,
store=store,
**kwargs
)
@authenticated
def episodes(self, username, id=None, start_at=None, end_at=None, store=None, **kwargs):
return self.get(
username, 'episodes',
id=id,
start_at=start_at,
end_at=end_at,
store=store,
**kwargs
)
| {
"repo_name": "Razzeee/script.module.trakt",
"path": "lib/trakt/interfaces/users/history.py",
"copies": "2",
"size": "3409",
"license": "mit",
"hash": 2359633299591964700,
"line_mean": 25.842519685,
"line_max": 100,
"alpha_frac": 0.523613963,
"autogenerated": false,
"ratio": 4.172582619339045,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000510703312163355,
"num_lines": 127
} |
from __future__ import absolute_import, division, print_function
from trakt.core.helpers import clean_username
from trakt.interfaces.base import Interface
from trakt.mapper import ListMapper
import requests
# Import child interfaces
from trakt.interfaces.users.lists.list_ import UsersListInterface # noqa: I100
__all__ = (
'UsersListsInterface',
'UsersListInterface'
)
class UsersListsInterface(Interface):
path = 'users/*/lists'
def create(self, username, name, description=None, privacy='private', display_numbers=False,
allow_comments=True, sort_by='rank', sort_how='asc', **kwargs):
"""Create a new list.
:param username: Username (or :code:`me`)
:type username: :class:`~python:str`
:param name: Name
:type name: :class:`~python:str`
:param description: Description
:type description: :class:`~python:str`
:param privacy: Privacy (:code:`private`, :code:`friends`, or :code:`public`)
:type description: :class:`~python:str`
:param display_numbers: Flag indicating this list displays numbers
:type description: :class:`~python:bool`
:param allow_comments: Flag indicating this list allows comments
:type description: :class:`~python:bool`
:param sort_by: Sort By (:code:`rank`, :code:`added`, :code:`title`, :code:`released`,
:code:`runtime`, :code:`popularity`, :code:`percentage`, :code:`votes`,
:code:`my_rating`, :code:`random`, :code:`watched`, :code:`collected`)
:type sort_by: :class:`~python:str`
:param sort_how: Sort Direction (:code:`asc`, or :code:`desc`)
:type sort_how: :class:`~python:str`
:return: List
:rtype: trakt.objects.CustomList
"""
data = {
'name': name,
'description': description,
'privacy': privacy,
'allow_comments': allow_comments,
'display_numbers': display_numbers,
'sort_by': sort_by,
'sort_how': sort_how
}
# Remove attributes with `None` values
for key in list(data.keys()):
if data[key] is not None:
continue
del data[key]
# Send request
response = self.http.post(
'/users/%s/lists' % clean_username(username),
data=data
)
# Parse response
item = self.get_data(response, **kwargs)
if isinstance(item, requests.Response):
return item
if not item:
return None
# Map item to list object
return ListMapper.custom_list(
self.client, item,
username=username
)
def get(self, username, **kwargs):
"""Retrieve lists for user.
:param username: Username (or :code:`me`)
:type username: :class:`~python:str`
:return: List
:rtype: trakt.objects.CustomList
"""
if kwargs.get('parse') is False:
raise ValueError("Parse can't be disabled on this method")
# Send request
response = self.http.get(
'/users/%s/lists' % clean_username(username),
)
# Parse response
items = self.get_data(response, **kwargs)
if not items:
return
# Map items to list objects
for item in items:
yield ListMapper.custom_list(
self.client, item,
username=username
)
| {
"repo_name": "Razzeee/script.module.trakt",
"path": "lib/trakt/interfaces/users/lists/__init__.py",
"copies": "2",
"size": "3559",
"license": "mit",
"hash": 6926271391074210000,
"line_mean": 28.4132231405,
"line_max": 96,
"alpha_frac": 0.5703849396,
"autogenerated": false,
"ratio": 4.2622754491017965,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5832660388701797,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from trakt.core.helpers import dictfilter, from_iso8601_datetime, to_iso8601_datetime
from trakt.objects.core.helpers import update_attributes
LABELS = {
'last_progress_change': {
'watched': 'last_watched_at',
'collection': 'last_collected_at'
},
'episode_progress_change': {
'watched': 'last_watched_at',
'collection': 'collected_at'
}
}
class BaseProgress(object):
def __init__(self, aired=None, completed=None):
self.aired = aired
"""
:type: :class:`~python:int`
Number of aired episodes
"""
self.completed = completed
"""
:type: :class:`~python:int`
Number of completed episodes
"""
def to_dict(self):
return {
'aired': self.aired,
'completed': self.completed
}
def _update(self, info=None, **kwargs):
if not info:
return
update_attributes(self, info, [
'aired',
'completed'
])
def __repr__(self):
return '%d/%d episodes completed' % (self.completed, self.aired)
class Progress(BaseProgress):
progress_type = None
"""
:type: :class:`~python:str`
Progress Type (:code:`watched` or :code:`collection`)
"""
def __init__(self, client, aired=None, completed=None):
super(Progress, self).__init__(aired, completed)
self._client = client
self.last_progress_change = None
"""
:type: :class:`~python:datetime.datetime`
Last watched or collected date/time
"""
self.reset_at = None
"""
:type: :class:`~python:datetime.datetime`
Reset date/time (not applicable for collected progress)
"""
self.seasons = {}
"""
:type: :class:`~python:dict`
Season Progress, defined as :code:`{season_num: SeasonProgress}`
"""
self.hidden_seasons = None
"""
:type: :class:`~python:dict`
Hidden Seasons, defined as :code:`{season_num: Season}`
"""
self.next_episode = None
"""
:type: :class:`trakt.objects.episode.Episode`
Next Episode the user should watch or collect
"""
self.last_episode = None
"""
:type: :class:`trakt.objects.episode.Episode`
Last Episode the user watched or collected
"""
def to_dict(self):
"""Dump progress to a dictionary.
:return: Progress dictionary
:rtype: :class:`~python:dict`
"""
result = super(Progress, self).to_dict()
label = LABELS['last_progress_change'][self.progress_type]
result[label] = to_iso8601_datetime(self.last_progress_change)
if self.progress_type == 'watched':
result['reset_at'] = self.reset_at
result['seasons'] = [
season.to_dict()
for season in self.seasons.values()
]
if self.hidden_seasons:
result['hidden_seasons'] = [
dictfilter(season.to_dict(), pop=['number', 'ids'])
for season in self.hidden_seasons.values()
]
if self.next_episode:
result['next_episode'] = dictfilter(self.next_episode.to_dict(), pop=['season', 'number', 'title', 'ids'])
result['next_episode']['season'] = self.next_episode.keys[0][0]
if self.last_episode:
result['last_episode'] = dictfilter(self.last_episode.to_dict(), pop=['season', 'number', 'title', 'ids'])
result['last_episode']['season'] = self.last_episode.keys[0][0]
return result
def _update(self, info=None, **kwargs):
if not info:
return
super(Progress, self)._update(info, **kwargs)
label = LABELS['last_progress_change'][self.progress_type]
if label in info:
self.last_progress_change = from_iso8601_datetime(info.get(label))
if 'reset_at' in info:
self.reset_at = from_iso8601_datetime(info.get('reset_at'))
if 'seasons' in info:
for season in info['seasons']:
season_progress = SeasonProgress._construct(season, progress_type=self.progress_type)
if season_progress:
self.seasons[season_progress.pk] = season_progress
if 'hidden_seasons' in info:
self.hidden_seasons = {}
for season in info['hidden_seasons']:
hidden_season = self._client.construct('season', season)
if hidden_season:
self.hidden_seasons[hidden_season.pk] = hidden_season
if 'next_episode' in info:
episode = self._client.construct('episode', info['next_episode'])
if episode:
self.next_episode = episode
if 'last_episode' in info:
episode = self._client.construct('episode', info['last_episode'])
if episode:
self.last_episode = episode
@classmethod
def _construct(cls, client, info=None, **kwargs):
if not info:
return
progress = cls(client)
progress._update(info, **kwargs)
return progress
class WatchedProgress(Progress):
progress_type = 'watched'
class CollectionProgress(Progress):
progress_type = 'collection'
class SeasonProgress(BaseProgress):
def __init__(self, pk=None, aired=None, completed=None):
super(SeasonProgress, self).__init__(aired, completed)
self.pk = pk
"""
:type: :class:`~python:int`
Season Number
"""
self.episodes = {}
"""
:type: :class:`~python:dict`
Episode Progress, defined as :code:`{episode_num: EpisodeProgress}`
"""
def to_dict(self):
result = super(SeasonProgress, self).to_dict()
result['number'] = self.pk
result['episodes'] = [
episode.to_dict()
for episode in self.episodes.values()
]
return result
def _update(self, info=None, **kwargs):
if not info:
return
super(SeasonProgress, self)._update(info, **kwargs)
self.pk = info['number']
if 'episodes' in info:
for episode in info['episodes']:
episode_progress = EpisodeProgress._construct(episode, **kwargs)
if episode_progress:
self.episodes[episode_progress.pk] = episode_progress
@classmethod
def _construct(cls, info=None, **kwargs):
if not info:
return
season_progress = cls()
season_progress._update(info, **kwargs)
return season_progress
class EpisodeProgress(object):
def __init__(self, pk=None):
self.progress_type = None
self.pk = pk
"""
:type: :class:`~python:int`
Episode Number
"""
self.completed = None
"""
:type: :class:`~python:bool`
Whether or not the episode has been watched or collected
"""
self.progress_timestamp = None
"""
:type: :class:`~python:datetime.datetime`
Date/time episode was collected or last watched
"""
def to_dict(self):
result = {
'number': self.pk,
'completed': self.completed if self.completed is not None else 0
}
if self.progress_type:
label = LABELS['episode_progress_change'][self.progress_type]
else:
label = 'progress_timestamp'
result[label] = to_iso8601_datetime(self.progress_timestamp)
return result
def _update(self, info=None, **kwargs):
if not info:
return
self.pk = info['number']
if 'progress_type' in kwargs:
self.progress_type = kwargs['progress_type']
self.completed = info['completed']
if 'last_watched_at' in info:
self.progress_timestamp = from_iso8601_datetime(info.get('last_watched_at'))
elif 'collected_at' in info:
self.progress_timestamp = from_iso8601_datetime(info.get('collected_at'))
@classmethod
def _construct(cls, info=None, **kwargs):
if not info:
return
episode_progress = cls()
episode_progress._update(info, **kwargs)
return episode_progress
| {
"repo_name": "fuzeman/trakt.py",
"path": "trakt/objects/progress.py",
"copies": "2",
"size": "8497",
"license": "mit",
"hash": 7357554500729827000,
"line_mean": 25.225308642,
"line_max": 118,
"alpha_frac": 0.5577262563,
"autogenerated": false,
"ratio": 4.110788582486696,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5668514838786696,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from trakt.core.helpers import dictfilter
from trakt.core.pagination import PaginationIterator
from trakt.interfaces.base import Interface, authenticated
from trakt.interfaces.users.following import UsersFollowingInterface
from trakt.interfaces.users.friends import UsersFriendsInterface
from trakt.interfaces.users.history import UsersHistoryInterface
from trakt.interfaces.users.lists import UsersListInterface, UsersListsInterface
from trakt.interfaces.users.profile import UsersProfileInterface
from trakt.interfaces.users.ratings import UsersRatingsInterface
from trakt.interfaces.users.settings import UsersSettingsInterface
from trakt.interfaces.users.watched import UsersWatchedInterface
from trakt.interfaces.users.watchlist import UsersWatchlistInterface
from trakt.mapper import CommentMapper, ListMapper
import logging
import requests
log = logging.getLogger(__name__)
__all__ = (
'UsersInterface',
'UsersFollowingInterface',
'UsersFriendsInterface',
'UsersHistoryInterface',
'UsersListsInterface',
'UsersListInterface',
'UsersProfileInterface',
'UsersRatingsInterface',
'UsersSettingsInterface',
'UsersWatchedInterface',
'UsersWatchlistInterface'
)
class UsersInterface(Interface):
path = 'users'
@authenticated
def likes(self, type=None, page=None, per_page=None, **kwargs):
if type and type not in ['comments', 'lists']:
raise ValueError('Unknown type specified: %r' % type)
if kwargs.get('parse') is False:
raise ValueError("Parse can't be disabled on this method")
# Send request
response = self.http.get('likes', [type], query={
'page': page,
'limit': per_page
}, **dictfilter(kwargs, get=[
'exceptions'
], pop=[
'authenticated',
'pagination',
'validate_token'
]))
# Parse response
items = self.get_data(response, **kwargs)
if isinstance(items, PaginationIterator):
return items.with_mapper(self._map_items)
if isinstance(items, requests.Response):
return items
return self._map_items(items)
def _map_items(self, items):
if items is None:
return None
# Map items to comment/list objects
return [
item for item in [self._map(item) for item in items]
if item
]
def _map(self, item):
item_type = item.get('type')
if item_type == 'comment':
return CommentMapper.comment(
self.client, item
)
if item_type == 'list':
return ListMapper.custom_list(
self.client, item
)
log.warning('Unknown item returned, type: %r', item_type)
return None
| {
"repo_name": "Razzeee/script.module.trakt",
"path": "lib/trakt/interfaces/users/__init__.py",
"copies": "2",
"size": "2902",
"license": "mit",
"hash": 4965273064682331000,
"line_mean": 29.5473684211,
"line_max": 80,
"alpha_frac": 0.6595451413,
"autogenerated": false,
"ratio": 4.430534351145038,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6090079492445037,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from trakt.core.helpers import dictfilter
from trakt.interfaces.base import Interface, authenticated, application
class ScrobbleInterface(Interface):
path = 'scrobble'
@application
@authenticated
def action(self, action, movie=None, show=None, episode=None, progress=0.0, **kwargs):
"""Perform scrobble action.
:param action: Action to perform (either :code:`start`, :code:`pause` or :code:`stop`)
:type action: :class:`~python:str`
:param movie: Movie definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'tmdb': 118340
}
}
:type movie: :class:`~python:dict`
:param show: Show definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Breaking Bad',
'year': 2008,
'ids': {
'tvdb': 81189
}
}
:type show: :class:`~python:dict`
:param episode: Episode definition (or `None`)
**Example:**
.. code-block:: python
{
"season": 3,
"number": 11
}
:type episode: :class:`~python:dict`
:param progress: Current movie/episode progress percentage
:type progress: :class:`~python:float`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response (or `None`)
**Example:**
.. code-block:: python
{
'action': 'start',
'progress': 1.25,
'sharing': {
'facebook': true,
'twitter': true,
'tumblr': false
},
'movie': {
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'trakt': 28,
'slug': 'guardians-of-the-galaxy-2014',
'imdb': 'tt2015381',
'tmdb': 118340
}
}
}
:rtype: :class:`~python:dict`
"""
if movie and (show or episode):
raise ValueError('Only one media type should be provided')
if not movie and not episode:
raise ValueError('Missing media item')
data = {
'progress': progress,
'app_version': kwargs.pop('app_version', self.client.version),
'app_date': kwargs.pop('app_date', None)
}
if movie:
# TODO validate
data['movie'] = movie
elif episode:
if show:
data['show'] = show
# TODO validate
data['episode'] = episode
response = self.http.post(
action,
data=data,
**dictfilter(kwargs, pop=[
'authenticated',
'validate_token'
])
)
return self.get_data(response, **kwargs)
@application
@authenticated
def start(self, movie=None, show=None, episode=None, progress=0.0, **kwargs):
"""Send the scrobble "start" action.
Use this method when the video initially starts playing or is un-paused. This will
remove any playback progress if it exists.
**Note:** A watching status will auto expire after the remaining runtime has elapsed.
There is no need to re-send every 15 minutes.
:param movie: Movie definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'tmdb': 118340
}
}
:type movie: :class:`~python:dict`
:param show: Show definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Breaking Bad',
'year': 2008,
'ids': {
'tvdb': 81189
}
}
:type show: :class:`~python:dict`
:param episode: Episode definition (or `None`)
**Example:**
.. code-block:: python
{
"season": 3,
"number": 11
}
:type episode: :class:`~python:dict`
:param progress: Current movie/episode progress percentage
:type progress: :class:`~python:float`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response (or `None`)
**Example:**
.. code-block:: python
{
'action': 'start',
'progress': 1.25,
'sharing': {
'facebook': true,
'twitter': true,
'tumblr': false
},
'movie': {
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'trakt': 28,
'slug': 'guardians-of-the-galaxy-2014',
'imdb': 'tt2015381',
'tmdb': 118340
}
}
}
:rtype: :class:`~python:dict`
"""
return self.action(
'start',
movie, show, episode,
progress,
**kwargs
)
@application
@authenticated
def pause(self, movie=None, show=None, episode=None, progress=0.0, **kwargs):
"""Send the scrobble "pause' action.
Use this method when the video is paused. The playback progress will be saved and
:code:`Trakt['sync/playback'].get()` can be used to resume the video from this exact
position. Un-pause a video by calling the :code:`Trakt['scrobble'].start()` method again.
:param movie: Movie definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'tmdb': 118340
}
}
:type movie: :class:`~python:dict`
:param show: Show definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Breaking Bad',
'year': 2008,
'ids': {
'tvdb': 81189
}
}
:type show: :class:`~python:dict`
:param episode: Episode definition (or `None`)
**Example:**
.. code-block:: python
{
"season": 3,
"number": 11
}
:type episode: :class:`~python:dict`
:param progress: Current movie/episode progress percentage
:type progress: :class:`~python:float`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response (or `None`)
**Example:**
.. code-block:: python
{
'action': 'pause',
'progress': 75,
'sharing': {
'facebook': true,
'twitter': true,
'tumblr': false
},
'movie': {
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'trakt': 28,
'slug': 'guardians-of-the-galaxy-2014',
'imdb': 'tt2015381',
'tmdb': 118340
}
}
}
:rtype: :class:`~python:dict`
"""
return self.action(
'pause',
movie, show, episode,
progress,
**kwargs
)
@application
@authenticated
def stop(self, movie=None, show=None, episode=None, progress=0.0, **kwargs):
"""Send the scrobble "stop" action.
Use this method when the video is stopped or finishes playing on its own. If the
progress is above 80%, the video will be scrobbled and the :code:`action` will be set
to **scrobble**.
If the progress is less than 80%, it will be treated as a *pause* and the :code:`action`
will be set to **pause**. The playback progress will be saved and :code:`Trakt['sync/playback'].get()`
can be used to resume the video from this exact position.
**Note:** If you prefer to use a threshold higher than 80%, you should use :code:`Trakt['scrobble'].pause()`
yourself so it doesn't create duplicate scrobbles.
:param movie: Movie definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'tmdb': 118340
}
}
:type movie: :class:`~python:dict`
:param show: Show definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Breaking Bad',
'year': 2008,
'ids': {
'tvdb': 81189
}
}
:type show: :class:`~python:dict`
:param episode: Episode definition (or `None`)
**Example:**
.. code-block:: python
{
"season": 3,
"number": 11
}
:type episode: :class:`~python:dict`
:param progress: Current movie/episode progress percentage
:type progress: :class:`~python:float`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response (or `None`)
**Example:**
.. code-block:: python
{
'action': 'scrobble',
'progress': 99.9,
'sharing': {
'facebook': true,
'twitter': true,
'tumblr': false
},
'movie': {
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'trakt': 28,
'slug': 'guardians-of-the-galaxy-2014',
'imdb': 'tt2015381',
'tmdb': 118340
}
}
}
:rtype: :class:`~python:dict`
"""
return self.action(
'stop',
movie, show, episode,
progress,
**kwargs
)
| {
"repo_name": "fuzeman/trakt.py",
"path": "trakt/interfaces/scrobble.py",
"copies": "2",
"size": "11832",
"license": "mit",
"hash": 6081555412406097000,
"line_mean": 25.3518930958,
"line_max": 116,
"alpha_frac": 0.4110040568,
"autogenerated": false,
"ratio": 5.00507614213198,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003601024238429425,
"num_lines": 449
} |
from __future__ import absolute_import, division, print_function
from trakt.core.helpers import dictfilter
from trakt.interfaces.base import Interface, authenticated
from trakt.mapper.summary import SummaryMapper
from datetime import datetime
import requests
class Base(Interface):
def new(self, media, **kwargs):
if media != 'shows':
raise ValueError("Media '%s' does not support the `new()` method" % (media,))
return self.get(media, 'new', **kwargs)
def premieres(self, media, **kwargs):
if media != 'shows':
raise ValueError("Media '%s' does not support the `premieres()` method" % (media,))
return self.get(media, 'premieres', **kwargs)
def get(self, source, media, collection=None, start_date=None, days=None, query=None, years=None, genres=None,
languages=None, countries=None, runtimes=None, ratings=None, certifications=None, networks=None,
status=None, **kwargs):
"""Retrieve calendar items.
The `all` calendar displays info for all shows airing during the specified period. The `my` calendar displays
episodes for all shows that have been watched, collected, or watchlisted.
:param source: Calendar source (`all` or `my`)
:type source: str
:param media: Media type (`dvd`, `movies` or `shows`)
:type media: str
:param collection: Collection type (`new`, `premieres`)
:type collection: str or None
:param start_date: Start date (defaults to today)
:type start_date: datetime or None
:param days: Number of days to display (defaults to `7`)
:type days: int or None
:param query: Search title or description.
:type query: str or None
:param years: Year or range of years (e.g. `2014`, or `2014-2016`)
:type years: int or str or tuple or None
:param genres: Genre slugs (e.g. `action`)
:type genres: str or list of str or None
:param languages: Language codes (e.g. `en`)
:type languages: str or list of str or None
:param countries: Country codes (e.g. `us`)
:type countries: str or list of str or None
:param runtimes: Runtime range in minutes (e.g. `30-90`)
:type runtimes: str or tuple or None
:param ratings: Rating range between `0` and `100` (e.g. `75-100`)
:type ratings: str or tuple or None
:param certifications: US Content Certification (e.g. `pg-13`, `tv-pg`)
:type certifications: str or list of str or None
:param networks: (TV) Network name (e.g. `HBO`)
:type networks: str or list of str or None
:param status: (TV) Show status (e.g. `returning series`, `in production`, ended`)
:type status: str or list of str or None
:return: Items
:rtype: list of trakt.objects.video.Video
"""
if source not in ['all', 'my']:
raise ValueError('Unknown collection type: %s' % (source,))
if media not in ['dvd', 'movies', 'shows']:
raise ValueError('Unknown media type: %s' % (media,))
# Default `start_date` to today when only `days` is provided
if start_date is None and days:
start_date = datetime.utcnow()
# Request calendar collection
response = self.http.get(
'/calendars/%s/%s%s' % (
source, media,
('/' + collection) if collection else ''
),
params=[
start_date.strftime('%Y-%m-%d') if start_date else None,
days
],
query={
'query': query,
'years': years,
'genres': genres,
'languages': languages,
'countries': countries,
'runtimes': runtimes,
'ratings': ratings,
'certifications': certifications,
# TV
'networks': networks,
'status': status
},
**dictfilter(kwargs, pop=[
'authenticated',
'validate_token'
])
)
# Parse response
items = self.get_data(response, **kwargs)
if isinstance(items, requests.Response):
return items
# Map items
if media == 'shows':
return SummaryMapper.episodes(
self.client, items,
parse_show=True
)
return SummaryMapper.movies(self.client, items)
class AllCalendarsInterface(Base):
path = 'calendars/all/*'
def get(self, media, collection=None, start_date=None, days=None, **kwargs):
return super(AllCalendarsInterface, self).get(
'all', media, collection,
start_date=start_date,
days=days,
**kwargs
)
class MyCalendarsInterface(Base):
path = 'calendars/my/*'
@authenticated
def get(self, media, collection=None, start_date=None, days=None, **kwargs):
return super(MyCalendarsInterface, self).get(
'my', media, collection,
start_date=start_date,
days=days,
**kwargs
)
| {
"repo_name": "Razzeee/script.module.trakt",
"path": "lib/trakt/interfaces/calendars.py",
"copies": "2",
"size": "5257",
"license": "mit",
"hash": 3092990022794795500,
"line_mean": 31.85625,
"line_max": 117,
"alpha_frac": 0.5700970135,
"autogenerated": false,
"ratio": 4.162311955661124,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005984238069650288,
"num_lines": 160
} |
from __future__ import absolute_import, division, print_function
from trakt.core.helpers import from_iso8601_datetime
from trakt.objects.core.helpers import update_attributes
from trakt.objects.media import Media
class Video(Media):
def __init__(self, client, keys=None, index=None):
super(Video, self).__init__(client, keys, index)
self.action = None
"""
:type: :class:`~python:str`
Item action (e.g. history action: "checkin", "scrobble" or "watch")
"""
self.id = None
"""
:type: :class:`~python:long`
Item id (e.g. history id)
"""
self.collected_at = None
"""
:type: :class:`~python:datetime.datetime`
Timestamp of when this item was added to your collection (or `None`)
"""
self.paused_at = None
"""
:type: :class:`~python:datetime.datetime`
Timestamp of when this item was paused (or `None`)
"""
self.watched_at = None
"""
:type: :class:`~python:datetime.datetime`
Timestamp of when this item was watched (or `None`)
"""
self.progress = None
"""
:type: :class:`~python:float`
Playback progress for item (or `None`)
"""
# Flags
self.is_watched = None
"""
:type: :class:`~python:bool`
Flag indicating this item has been watched (or `None`)
"""
self.is_collected = None
"""
:type: :class:`~python:bool`
Flag indicating this item has been collected (or `None`)
"""
def _update(self, info=None, is_watched=None, is_collected=None, **kwargs):
if not info:
return
super(Video, self)._update(info, **kwargs)
update_attributes(self, info, [
'progress'
])
if 'action' in info:
self.action = info.get('action')
if 'id' in info:
self.id = info.get('id')
# Set timestamps
if 'collected_at' in info:
self.collected_at = from_iso8601_datetime(info.get('collected_at'))
if 'paused_at' in info:
self.paused_at = from_iso8601_datetime(info.get('paused_at'))
if 'watched_at' in info:
self.watched_at = from_iso8601_datetime(info.get('watched_at'))
# Set flags
if is_watched is not None:
self.is_watched = is_watched
if is_collected is not None:
self.is_collected = is_collected
| {
"repo_name": "fuzeman/trakt.py",
"path": "trakt/objects/video.py",
"copies": "2",
"size": "2549",
"license": "mit",
"hash": -539413071400909300,
"line_mean": 24.49,
"line_max": 79,
"alpha_frac": 0.5460965084,
"autogenerated": false,
"ratio": 3.939721792890263,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5485818301290263,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from trakt.core.helpers import from_iso8601_datetime
from trakt.objects.core.helpers import update_attributes
from trakt.objects.rating import Rating
class Media(object):
def __init__(self, client, keys=None, index=None):
self._client = client
self.keys = keys
"""
:type: :class:`~python:list` of :class:`~python:tuple`
Keys (for imdb, tvdb, etc..), defined as:
..code-block::
[
(<service>, <id>)
]
"""
self.index = index
"""
:type: :class:`~python:int`
Playlist item index
"""
self.images = None
"""
:type: :class:`~python:dict`
Images (or `None`), defined as:
.. code-block:: python
{
<type>: {
<size>: <url>
}
}
+------------------+----------------+---------------------------------------+
| Type | Size | Dimensions |
+==================+================+=======================================+
| :code:`banner` | :code:`full` | 1000x185 (movie/show), 758x140 (show) |
+------------------+----------------+---------------------------------------+
| :code:`clearart` | :code:`full` | 1000x562 |
+------------------+----------------+---------------------------------------+
| :code:`fanart` | :code:`full` | 1920x1080 (typical), 1280x720 |
+------------------+----------------+---------------------------------------+
| | :code:`medium` | 1280x720 |
+------------------+----------------+---------------------------------------+
| | :code:`thumb` | 853x480 |
+------------------+----------------+---------------------------------------+
| :code:`logo` | :code:`full` | 800x310 |
+------------------+----------------+---------------------------------------+
| :code:`poster` | :code:`full` | 1000x1500 |
+------------------+----------------+---------------------------------------+
| | :code:`medium` | 600x900 |
+------------------+----------------+---------------------------------------+
| | :code:`thumb` | 300x450 |
+------------------+----------------+---------------------------------------+
| :code:`thumb` | :code:`full` | 1000x562 (movie), 500x281 (show) |
+------------------+----------------+---------------------------------------+
"""
self.overview = None
"""
:type: :class:`~python:str`
Overview (or `None`)
"""
self.plays = None
"""
:type: :class:`~python:int`
Number of plays (or `None`)
"""
self.rating = None
"""
:type: :class:`~python:int`
Community rating (0 - 10) (or `None`)
"""
self.score = None
"""
:type: :class:`~python:float`
Search score (or `None`)
"""
# Flags
self.in_watchlist = None
"""
:type: :class:`~python:bool`
Flag indicating this item is in your watchlist (or `None`)
"""
#
# Timestamps
#
self.last_updated_at = None
"""
:type: :class:`~python:datetime.datetime`
Timestamp of when this item was last updated (or `None`)
"""
self.last_watched_at = None
"""
:type: :class:`~python:datetime.datetime`
Timestamp of when this item was last watched (or `None`)
"""
self.listed_at = None
"""
:type: :class:`~python:datetime.datetime`
Timestamp of when this item was added to the list (or `None`)
"""
self.reset_at = None
"""
:type: :class:`~python:datetime.datetime`
Timestamp of when this item was reset (or `None`)
"""
@property
def pk(self):
"""Retrieve the primary key (unique identifier for the item).
Provides the following identifiers (by media type):
- **movie:** imdb
- **show:** tvdb
- **season:** tvdb
- **episode:** tvdb
- **custom_list:** trakt
- **person:** tmdb
:return: :code:`(<service>, <value>)` or :code:`None` if no primary key is available
:rtype: :class:`~python:tuple`
"""
if not self.keys:
return None
return self.keys[0]
def get_key(self, service):
for k_service, k_value in self.keys:
if k_service == service:
return k_value
return None
def _update(self, info=None, in_watchlist=None, **kwargs):
if not info:
return
update_attributes(self, info, [
'plays',
# Extended Info
'overview',
# Search
'score'
])
if 'images' in info:
self.images = info['images']
# Set timestamps
if 'last_updated_at' in info:
self.last_updated_at = from_iso8601_datetime(info.get('last_updated_at'))
if 'last_watched_at' in info:
self.last_watched_at = from_iso8601_datetime(info.get('last_watched_at'))
if 'listed_at' in info:
self.listed_at = from_iso8601_datetime(info.get('listed_at'))
if 'reset_at' in info:
self.reset_at = from_iso8601_datetime(info.get('reset_at'))
# Set flags
if in_watchlist is not None:
self.in_watchlist = in_watchlist
self.rating = Rating._construct(self._client, info) or self.rating
def __getstate__(self):
state = self.__dict__
if hasattr(self, '_client'):
del state['_client']
return state
def __str__(self):
return self.__repr__()
| {
"repo_name": "fuzeman/trakt.py",
"path": "trakt/objects/media.py",
"copies": "2",
"size": "6300",
"license": "mit",
"hash": 5991390565998411000,
"line_mean": 28.5774647887,
"line_max": 92,
"alpha_frac": 0.3874603175,
"autogenerated": false,
"ratio": 4.6255506607929515,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007601665674634621,
"num_lines": 213
} |
from __future__ import absolute_import, division, print_function
from trakt.core.helpers import from_iso8601_datetime
from trakt.objects.core.helpers import update_attributes
class List(object):
def __init__(self, client, keys, user):
self._client = client
self.keys = keys
"""
:type: :class:`~python:list` of :class:`~python:tuple`
Keys (for trakt, imdb, tvdb, etc..), defined as:
..code-block::
[
(<service>, <id>)
]
"""
self.user = user
"""
:type: :class:`trakt.objects.User`
Author
"""
self.name = None
"""
:type: :class:`~python:str`
Name
"""
self.description = None
"""
:type: :class:`~python:str`
Description
"""
self.privacy = None
"""
:type: :class:`~python:str`
Privacy
**Possible values:**
- :code:`private`
- :code:`friends`
- :code:`public`
"""
self.likes = None
"""
:type: :class:`~python:int`
Number of likes
"""
self.allow_comments = None
"""
:type: :class:`~python:bool`
Flag indicating this list allows comments
"""
self.display_numbers = None
"""
:type: :class:`~python:bool`
Flag indicating this list displays numbers
"""
self.sort_by = None
"""
:type: :class:`~python:str`
Sort By
**Possible values:**
- :code:`rank`
- :code:`added`
- :code:`title`
- :code:`released`
- :code:`runtime`
- :code:`popularity`
- :code:`percentage`
- :code:`votes`
- :code:`my_rating`
- :code:`random`
- :code:`watched`
- :code:`collected`
"""
self.sort_how = None
"""
:type: :class:`~python:str`
Sort Direction
**Possible values:**
- :code:`asc`
- :code:`desc`
"""
self.created_at = None
"""
:type: :class:`~python:datetime.datetime`
Timestamp of when this list was created
"""
self.liked_at = None
"""
:type: :class:`~python:datetime.datetime`
Timestamp of when this list was liked
"""
self.updated_at = None
"""
:type: :class:`~python:datetime.datetime`
Timestamp of when this list was last updated
"""
self.comment_count = None
"""
:type: :class:`~python:int`
Number of comments
"""
self.item_count = None
"""
:type: :class:`~python:int`
Number of items
"""
@property
def id(self):
"""Retrieve the list identifier.
:rtype: :class:`~python:int`
"""
if self.pk is None:
return None
_, sid = self.pk
return sid
@property
def pk(self):
"""Retrieve the primary key (unique identifier for the list).
:return: :code:`("trakt", <id>)` or :code:`None` if no primary key is available
:rtype: :class:`~python:tuple`
"""
if not self.keys:
return None
return self.keys[0]
@property
def username(self):
"""Retrieve author username.
:rtype: :class:`~python:str`
"""
if not self.user:
return None
return self.user.username
@property
def like_count(self):
"""Retrieve the number of likes.
:rtype: :class:`~python:int`
"""
return self.likes
def _update(self, info=None):
if not info:
return
if 'created_at' in info:
self.updated_at = from_iso8601_datetime(info.get('updated_at'))
if 'liked_at' in info:
self.liked_at = from_iso8601_datetime(info.get('liked_at'))
if 'updated_at' in info:
self.updated_at = from_iso8601_datetime(info.get('updated_at'))
update_attributes(self, info, [
'name',
'description',
'privacy',
'likes',
'allow_comments',
'display_numbers',
'sort_by',
'sort_how',
'comment_count',
'item_count'
])
def items(self, **kwargs):
"""Retrieve list items.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Current list items
:rtype: :class:`~python:list` of :class:`trakt.objects.media.Media`
"""
return self._client['users/*/lists/*'].items(self.user.username, self.id, **kwargs)
#
# Actions
#
def like(self, **kwargs):
"""Like the list.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool`
"""
return self._client['users/*/lists/*'].like(self.user.username, self.id, **kwargs)
def unlike(self, **kwargs):
"""Un-like the list.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool`
"""
return self._client['users/*/lists/*'].unlike(self.user.username, self.id, **kwargs)
def __getstate__(self):
state = self.__dict__
if hasattr(self, '_client'):
del state['_client']
return state
def __repr__(self):
_, sid = self.pk
return '<List %r (%s)>' % (self.name, sid)
def __str__(self):
return self.__repr__()
| {
"repo_name": "fuzeman/trakt.py",
"path": "trakt/objects/list/base.py",
"copies": "2",
"size": "5889",
"license": "mit",
"hash": -1104939050643923100,
"line_mean": 20.4145454545,
"line_max": 92,
"alpha_frac": 0.4939718119,
"autogenerated": false,
"ratio": 4.182528409090909,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00015990872997120607,
"num_lines": 275
} |
from __future__ import absolute_import, division, print_function
from trakt.core.helpers import from_iso8601_datetime
from trakt.objects.core.helpers import update_attributes
class User(object):
def __init__(self, client, keys):
self._client = client
self.keys = keys
"""
:type: :class:`~python:list` of :class:`~python:tuple`
Keys (for trakt, imdb, tvdb, etc..), defined as:
..code-block::
[
(<service>, <id>)
]
"""
self.name = None
"""
:type: :class:`~python:str`
Name
"""
self.username = None
"""
:type: :class:`~python:str`
Username
"""
self.vip = None
"""
:type: :class:`~python:bool`
User has VIP
"""
self.vip_ep = None
"""
:type: :class:`~python:bool`
User has VIP Executive Producer Credit
"""
self.private = None
"""
:type: :class:`~python:bool`
User profile is private
"""
self.followed_at = None
"""
:type: :class:`~python:datetime.datetime`
Timestamp of when this user was followed
"""
self.friends_at = None
"""
:type: :class:`~python:datetime.datetime`
Timestamp of when this user was friended
"""
@property
def id(self):
"""Retrieve the user identifier.
:rtype: :class:`~python:int`
"""
if self.pk is None:
return None
_, sid = self.pk
return sid
@property
def pk(self):
"""Retrieve the primary key (unique identifier for the user).
:return: :code:`("trakt", <id>)` or :code:`None` if no primary key is available
:rtype: :class:`~python:tuple`
"""
if not self.keys:
return None
return self.keys[0]
def follow(self, **kwargs):
return self._client['users/*'].follow(
self.id,
**kwargs
)
def following(self, **kwargs):
return self._client['users/*/following'].get(
self.id,
**kwargs
)
def friends(self, **kwargs):
return self._client['users/*/friends'].get(
self.id,
**kwargs
)
def history(self, **kwargs):
return self._client['users/*/history'].get(
self.id,
**kwargs
)
def ratings(self, **kwargs):
return self._client['users/*/ratings'].get(
self.id,
**kwargs
)
def unfollow(self, **kwargs):
return self._client['users/*'].unfollow(
self.id,
**kwargs
)
def watchlist(self, **kwargs):
return self._client['users/*/watchlist'].get(
self.id,
**kwargs
)
def _update(self, info=None):
if not info:
return
if 'followed_at' in info:
self.followed_at = from_iso8601_datetime(info.get('followed_at'))
if 'friends_at' in info:
self.friends_at = from_iso8601_datetime(info.get('friends_at'))
update_attributes(self, info, [
'username',
'name',
'private',
'vip',
'vip_ep'
])
@classmethod
def _construct(cls, client, keys, info, **kwargs):
if not info:
return None
u = cls(client, keys, **kwargs)
u._update(info)
return u
def __repr__(self):
return '<User %r (%s)>' % (self.name, self.id)
| {
"repo_name": "fuzeman/trakt.py",
"path": "trakt/objects/user.py",
"copies": "2",
"size": "3663",
"license": "mit",
"hash": -8339221587484293000,
"line_mean": 20.4210526316,
"line_max": 87,
"alpha_frac": 0.4851214851,
"autogenerated": false,
"ratio": 4.167235494880546,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00006645401382243488,
"num_lines": 171
} |
from __future__ import absolute_import, division, print_function
from trakt.core.helpers import from_iso8601_datetime
class Rating(object):
def __init__(self, client, value=None, timestamp=None, votes=None):
self._client = client
self.value = value
"""
:type: :class:`~python:int`
Rating value (0 - 10)
"""
self.votes = votes
"""
:type: :class:`~python:int`
Number of votes
"""
self.timestamp = timestamp
"""
:type: :class:`~python:datetime.datetime`
Rating timestamp
"""
@classmethod
def _construct(cls, client, info):
if not info or 'rating' not in info:
return
r = cls(client)
r.value = info.get('rating')
r.votes = info.get('votes')
r.timestamp = from_iso8601_datetime(info.get('rated_at'))
return r
def __getstate__(self):
state = self.__dict__
if hasattr(self, '_client'):
del state['_client']
return state
def __eq__(self, other):
if not isinstance(other, Rating):
return NotImplemented
return self.value == other.value and self.timestamp == other.timestamp
def __repr__(self):
return '<Rating %s/10 voted by %s (%s) >' % (self.value, self.votes, self.timestamp)
def __str__(self):
return self.__repr__()
| {
"repo_name": "fuzeman/trakt.py",
"path": "trakt/objects/rating.py",
"copies": "2",
"size": "1426",
"license": "mit",
"hash": 4684152475789499000,
"line_mean": 22.7666666667,
"line_max": 92,
"alpha_frac": 0.5455820477,
"autogenerated": false,
"ratio": 4.039660056657223,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0001792114695340502,
"num_lines": 60
} |
from __future__ import absolute_import, division, print_function
from trakt.core.helpers import from_iso8601_datetime, to_iso8601_datetime, deprecated
from trakt.objects.core.helpers import update_attributes
from trakt.objects.media import Media
from six import iteritems
class Show(Media):
def __init__(self, client, keys, index=None):
super(Show, self).__init__(client, keys, index)
self.title = None
"""
:type: :class:`~python:str`
Title
"""
self.year = None
"""
:type: :class:`~python:int`
Year
"""
self.seasons = {}
"""
:type: :class:`~python:dict`
Seasons, defined as :code:`{season_num: Season}`
**Note:** this field might not be available with some methods
"""
self.watchers = None # trending
"""
:type: :class:`~python:int`
Number of active watchers (returned by the :code:`Trakt['movies'].trending()`
and :code:`Trakt['shows'].trending()` methods)
"""
self.user_count = None # recommended
"""
:type: :class:`~python:int`
Number of user recommendations (returned by the :code:`Trakt['movies'].recommended()`
and :code:`Trakt['shows'].recommended()` methods)
"""
self.first_aired = None
"""
:type: :class:`~python:datetime.datetime`
First air date
"""
self.airs = None
"""
:type: :class:`~python:dict`
Dictionary with day, time and timezone in which the show airs
"""
self.runtime = None
"""
:type: :class:`~python:int`
Duration (in minutes)
"""
self.certification = None
"""
:type: :class:`~python:str`
Content certification (e.g :code:`TV-MA`)
"""
self.network = None
"""
:type: :class:`~python:str`
Network in which the show is aired
"""
self.country = None
"""
:type: :class:`~python:str`
Country in which the show is aired
"""
self.updated_at = None
"""
:type: :class:`~python:datetime.datetime`
Updated date/time
"""
self.status = None
"""
:type: :class:`~python:str`
Value of :code:`returning series` (airing right now),
:code:`in production` (airing soon), :code:`planned` (in development),
:code:`canceled`, or :code:`ended`
"""
self.homepage = None
"""
:type: :class:`~python:str`
Homepage URL
"""
self.language = None
"""
:type: :class:`~python:str`
Language (for title, overview, etc..)
"""
self.available_translations = None
"""
:type: :class:`~python:list`
Available translations (for title, overview, etc..)
"""
self.genres = None
"""
:type: :class:`~python:list`
Genres
"""
self.aired_episodes = None
"""
:type: :class:`~python:int`
Aired episode count
"""
def episodes(self):
"""Return a flat episode iterator.
:returns: Iterator :code:`((season_num, episode_num), Episode)`
:rtype: iterator
"""
for sk, season in iteritems(self.seasons):
# Yield each episode in season
for ek, episode in iteritems(season.episodes):
yield (sk, ek), episode
def to_identifier(self):
"""Return the show identifier which is compatible with requests that require show definitions.
:return: Show identifier/definition
:rtype: :class:`~python:dict`
"""
return {
'ids': dict(self.keys),
'title': self.title,
'year': self.year
}
@deprecated('Show.to_info() has been moved to Show.to_dict()')
def to_info(self):
"""Dump show to a dictionary.
**Deprecated:** use the :code:`to_dict()` method instead.
"""
return self.to_dict()
def to_dict(self):
"""Dump show to a dictionary.
:return: Show dictionary
:rtype: :class:`~python:dict`
"""
result = self.to_identifier()
result['seasons'] = [
season.to_dict()
for season in self.seasons.values()
]
result['in_watchlist'] = self.in_watchlist if self.in_watchlist is not None else 0
if self.rating:
result['rating'] = self.rating.value
result['votes'] = self.rating.votes
result['rated_at'] = to_iso8601_datetime(self.rating.timestamp)
# Extended Info
if self.first_aired:
result['first_aired'] = to_iso8601_datetime(self.first_aired)
if self.updated_at:
result['updated_at'] = to_iso8601_datetime(self.updated_at)
if self.overview:
result['overview'] = self.overview
if self.airs:
result['airs'] = self.airs
if self.runtime:
result['runtime'] = self.runtime
if self.certification:
result['certification'] = self.certification
if self.network:
result['network'] = self.network
if self.country:
result['country'] = self.country
if self.status:
result['status'] = self.status
if self.homepage:
result['homepage'] = self.homepage
if self.language:
result['language'] = self.language
if self.available_translations:
result['available_translations'] = self.available_translations
if self.genres:
result['genres'] = self.genres
if self.aired_episodes:
result['aired_episodes'] = self.aired_episodes
return result
def _update(self, info=None, **kwargs):
if not info:
return
super(Show, self)._update(info, **kwargs)
update_attributes(self, info, [
'title',
# Recommended
'user_count',
# Trending
'watchers',
# Extended Info
'airs',
'runtime',
'certification',
'network',
'country',
'status',
'homepage',
'language',
'available_translations',
'genres',
'aired_episodes'
])
# Ensure `year` attribute is an integer (fixes incorrect type returned by search)
if info.get('year'):
self.year = int(info['year'])
# Extended Info
if 'first_aired' in info:
self.first_aired = from_iso8601_datetime(info.get('first_aired'))
if 'updated_at' in info:
self.updated_at = from_iso8601_datetime(info.get('updated_at'))
@classmethod
def _construct(cls, client, keys, info=None, index=None, **kwargs):
show = cls(client, keys, index=index)
show._update(info, **kwargs)
return show
def __repr__(self):
return '<Show %r (%s)>' % (self.title, self.year)
| {
"repo_name": "fuzeman/trakt.py",
"path": "trakt/objects/show.py",
"copies": "2",
"size": "7238",
"license": "mit",
"hash": -3381719351902817300,
"line_mean": 23.7030716724,
"line_max": 102,
"alpha_frac": 0.5268029842,
"autogenerated": false,
"ratio": 4.2081395348837205,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00022424222453934666,
"num_lines": 293
} |
from __future__ import absolute_import, division, print_function
from trakt.core.helpers import from_iso8601_datetime, to_iso8601_datetime,\
from_iso8601_date, to_iso8601_date, deprecated
from trakt.objects.core.helpers import update_attributes
from trakt.objects.video import Video
class Movie(Video):
def __init__(self, client, keys, index=None):
super(Movie, self).__init__(client, keys, index)
self.title = None
"""
:type: :class:`~python:str`
Title
"""
self.year = None
"""
:type: :class:`~python:int`
Year
"""
self.watchers = None # trending
"""
:type: :class:`~python:int`
Number of active watchers (returned by the :code:`Trakt['movies'].trending()`
and :code:`Trakt['shows'].trending()` methods)
"""
self.user_count = None # recommended
"""
:type: :class:`~python:int`
Number of user recommendations (returned by the :code:`Trakt['movies'].recommended()`
and :code:`Trakt['shows'].recommended()` methods)
"""
self.tagline = None
"""
:type: :class:`~python:str`
Tagline
"""
self.released = None
"""
:type: :class:`~python:datetime.date`
Release date
"""
self.runtime = None
"""
:type: :class:`~python:int`
Duration (in minutes)
"""
self.certification = None
"""
:type: :class:`~python:str`
Content certification (e.g :code:`PG-13`)
"""
self.updated_at = None
"""
:type: :class:`~python:datetime.datetime`
Updated date/time
"""
self.homepage = None
"""
:type: :class:`~python:str`
Homepage URL
"""
self.trailer = None
"""
:type: :class:`~python:str`
Trailer URL
"""
self.language = None
"""
:type: :class:`~python:str`
Language (for title, overview, etc..)
"""
self.available_translations = None
"""
:type: :class:`~python:list`
Available translations (for title, overview, etc..)
"""
self.genres = None
"""
:type: :class:`~python:list`
Genres
"""
def to_identifier(self):
"""Return the movie identifier which is compatible with requests that require movie definitions.
:return: Movie identifier/definition
:rtype: :class:`~python:dict`
"""
return {
'ids': dict(self.keys),
'title': self.title,
'year': self.year
}
@deprecated('Movie.to_info() has been moved to Movie.to_dict()')
def to_info(self):
"""Dump movie to a dictionary.
**Deprecated:** use the :code:`to_dict()` method instead.
"""
return self.to_dict()
def to_dict(self):
"""Dump movie to a dictionary.
:return: Movie dictionary
:rtype: :class:`~python:dict`
"""
result = self.to_identifier()
result.update({
'watched': 1 if self.is_watched else 0,
'collected': 1 if self.is_collected else 0,
'plays': self.plays if self.plays is not None else 0,
'in_watchlist': self.in_watchlist if self.in_watchlist is not None else 0,
'progress': self.progress,
'last_watched_at': to_iso8601_datetime(self.last_watched_at),
'collected_at': to_iso8601_datetime(self.collected_at),
'paused_at': to_iso8601_datetime(self.paused_at)
})
if self.rating:
result['rating'] = self.rating.value
result['votes'] = self.rating.votes
result['rated_at'] = to_iso8601_datetime(self.rating.timestamp)
# Extended Info
if self.released:
result['released'] = to_iso8601_date(self.released)
if self.updated_at:
result['updated_at'] = to_iso8601_datetime(self.updated_at)
if self.overview:
result['overview'] = self.overview
if self.tagline:
result['tagline'] = self.tagline
if self.runtime:
result['runtime'] = self.runtime
if self.certification:
result['certification'] = self.certification
if self.homepage:
result['homepage'] = self.homepage
if self.trailer:
result['trailer'] = self.trailer
if self.language:
result['language'] = self.language
if self.available_translations:
result['available_translations'] = self.available_translations
if self.genres:
result['genres'] = self.genres
return result
def _update(self, info=None, **kwargs):
if not info:
return
super(Movie, self)._update(info, **kwargs)
update_attributes(self, info, [
'title',
# Recommended
'user_count',
# Trending
'watchers',
# Extended Info
'tagline',
'certification',
'homepage',
'trailer',
'language',
'available_translations',
'genres'
])
# Ensure `year` attribute is an integer (fixes incorrect type returned by search)
if info.get('year'):
self.year = int(info['year'])
# Extended Info
if info.get('runtime'):
self.runtime = info['runtime']
if 'released' in info:
self.released = from_iso8601_date(info.get('released'))
if 'updated_at' in info:
self.updated_at = from_iso8601_datetime(info.get('updated_at'))
@classmethod
def _construct(cls, client, keys, info, index=None, **kwargs):
movie = cls(client, keys, index=index)
movie._update(info, **kwargs)
return movie
def __repr__(self):
return '<Movie %r (%s)>' % (self.title, self.year)
| {
"repo_name": "Razzeee/script.module.trakt",
"path": "lib/trakt/objects/movie.py",
"copies": "2",
"size": "6122",
"license": "mit",
"hash": -4094487682651731500,
"line_mean": 24.0901639344,
"line_max": 104,
"alpha_frac": 0.5310356093,
"autogenerated": false,
"ratio": 4.1816939890710385,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5712729598371039,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from trakt.core.helpers import synchronized
from six.moves import xrange
from six.moves import _thread as thread
from threading import RLock
import logging
log = logging.getLogger(__name__)
class ListCollection(object):
def __init__(self, *lists):
self._lists = lists or []
self._lock = RLock()
@synchronized(lambda self: self._lock)
def append(self, value):
collection = self._lists[-1]
if type(collection) is not list:
raise ValueError()
collection.append(value)
@synchronized(lambda self: self._lock)
def find_list(self, index):
count = len(self)
if index >= count:
raise IndexError()
if index < 0:
index += count
pos = 0
for lst in self.lists():
l_len = len(lst)
if pos <= index < pos + l_len:
return lst, index - pos
else:
pos += l_len
return None, None
@synchronized(lambda self: self._lock)
def lists(self, resolve=True):
for collection in self._lists:
if resolve and callable(collection):
collection = collection()
yield collection
@synchronized(lambda self: self._lock)
def pop(self, index=None):
if index is None:
index = len(self) - 1
list, index = self.find_list(index)
if list is None:
raise IndexError()
return list.pop(index)
@synchronized(lambda self: self._lock)
def __eq__(self, other):
if len(self) != len(other):
return False
for x in xrange(len(self)):
if self[x] != other[x]:
return False
return True
@synchronized(lambda self: self._lock)
def __contains__(self, value):
for x in self:
if x == value:
return True
return False
def __getitem__(self, index):
list, index = self.find_list(index)
if list is None:
raise IndexError()
return list[index]
@synchronized(lambda self: self._lock)
def __iter__(self):
for lst in self.lists():
# Yield items from each list
for x in lst:
yield x
@synchronized(lambda self: self._lock)
def __len__(self):
return sum([len(lst) for lst in self.lists()])
def __setitem__(self, index, value):
list, index = self.find_list(index)
if list is None:
raise IndexError()
list[index] = value
def __repr__(self):
return '[%s]' % ', '.join(repr(x) for x in self)
__hash__ = None
class ContextCollection(object):
def __init__(self, base=None):
self.base = base or []
self._lock = RLock()
self._threads = {}
@synchronized(lambda self: self._lock)
def build(self, ident):
if ident not in self._threads:
self._threads[ident] = ListCollection(lambda: self.base, [])
return self._threads[ident]
@property
def current(self):
ident = thread.get_ident()
try:
return self._threads[ident]
except KeyError:
return self.build(ident)
def append(self, value):
self.current.append(value)
@synchronized(lambda self: self._lock)
def clear(self):
ident = thread.get_ident()
if ident not in self._threads:
return
del self._threads[ident]
def pop(self, index=None):
return self.current.pop(index)
def __getitem__(self, index):
return self.current[index]
def __len__(self):
return len(self.current)
| {
"repo_name": "fuzeman/trakt.py",
"path": "trakt/core/context_collection.py",
"copies": "2",
"size": "3772",
"license": "mit",
"hash": -6519713933198830000,
"line_mean": 22.1411042945,
"line_max": 72,
"alpha_frac": 0.5509013786,
"autogenerated": false,
"ratio": 4.167955801104973,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 163
} |
from __future__ import absolute_import, division, print_function
from trakt.core.helpers import to_iso8601_datetime, from_iso8601_datetime, deprecated
from trakt.objects.core.helpers import update_attributes
from trakt.objects.media import Media
class Season(Media):
def __init__(self, client, keys=None, index=None):
super(Season, self).__init__(client, keys, index)
self.show = None
"""
:type: :class:`trakt.objects.show.Show`
Show
"""
self.episodes = {}
"""
:type: :class:`~python:dict`
Episodes, defined as :code:`{episode_num: Episode}`
**Note:** this field might not be available with some methods
"""
self.first_aired = None
"""
:type: :class:`~python:datetime.datetime`
First air date
"""
self.episode_count = None
"""
:type: :class:`~python:int`
Total episode count
"""
self.aired_episodes = None
"""
:type: :class:`~python:int`
Aired episode count
"""
def to_identifier(self):
"""Return the season identifier which is compatible with requests that require season definitions.
:return: Season identifier/definition
:rtype: :class:`~python:dict`
"""
return {
'number': self.pk,
'episodes': [
episode.to_dict()
for episode in self.episodes.values()
]
}
@deprecated('Season.to_info() has been moved to Season.to_dict()')
def to_info(self):
"""Dump season to a dictionary.
**Deprecated:** use the :code:`to_dict()` method instead.
"""
return self.to_dict()
def to_dict(self):
"""Dump season to a dictionary.
:return: Season dictionary
:rtype: :class:`~python:dict`
"""
result = self.to_identifier()
result.update({
'ids': dict([
(key, value) for (key, value) in self.keys[1:] # NOTE: keys[0] is the season identifier
])
})
if self.rating:
result['rating'] = self.rating.value
result['votes'] = self.rating.votes
result['rated_at'] = to_iso8601_datetime(self.rating.timestamp)
result['in_watchlist'] = self.in_watchlist if self.in_watchlist is not None else 0
# Extended Info
if self.first_aired:
result['first_aired'] = to_iso8601_datetime(self.first_aired)
if self.episode_count:
result['episode_count'] = self.episode_count
if self.aired_episodes:
result['aired_episodes'] = self.aired_episodes
return result
def _update(self, info=None, **kwargs):
if not info:
return
super(Season, self)._update(info, **kwargs)
update_attributes(self, info, [
# Extended Info
'episode_count',
'aired_episodes'
])
# Extended Info
if 'first_aired' in info:
self.first_aired = from_iso8601_datetime(info.get('first_aired'))
@classmethod
def _construct(cls, client, keys, info=None, index=None, **kwargs):
season = cls(client, keys, index=index)
season._update(info, **kwargs)
return season
def __repr__(self):
if self.show:
return '<Season %r - S%02d>' % (self.show.title, self.pk)
return '<Season S%02d>' % self.pk
| {
"repo_name": "Razzeee/script.module.trakt",
"path": "lib/trakt/objects/season.py",
"copies": "2",
"size": "3527",
"license": "mit",
"hash": -7450151774140827000,
"line_mean": 25.5187969925,
"line_max": 106,
"alpha_frac": 0.5520272186,
"autogenerated": false,
"ratio": 4.06805074971165,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003119287359555494,
"num_lines": 133
} |
from __future__ import absolute_import, division, print_function
from trakt.core.helpers import to_iso8601_datetime
from trakt.interfaces.base import authenticated
from trakt.interfaces.sync.core.mixins import Get, Add, Remove
class SyncHistoryInterface(Get, Add, Remove):
path = 'sync/history'
flags = {'is_watched': True}
def get(self, media=None, id=None, start_at=None, end_at=None, store=None,
extended=None, page=None, per_page=None, **kwargs):
if not media and id:
raise ValueError('The "id" parameter also requires the "media" parameter to be defined')
# Build parameters
params = []
if id:
params.append(id)
# Build query
query = {
'extended': extended,
'page': page,
'limit': per_page
}
if start_at:
query['start_at'] = to_iso8601_datetime(start_at)
if end_at:
query['end_at'] = to_iso8601_datetime(end_at)
# Request watched history
return super(SyncHistoryInterface, self).get(
media, store, params,
flat=True,
query=query,
**kwargs
)
#
# Shortcut methods
#
@authenticated
def movies(self, id=None, start_at=None, end_at=None, store=None, **kwargs):
return self.get(
'movies',
id=id,
start_at=start_at,
end_at=end_at,
store=store,
**kwargs
)
@authenticated
def shows(self, id=None, start_at=None, end_at=None, store=None, **kwargs):
return self.get(
'shows',
id=id,
start_at=start_at,
end_at=end_at,
store=store,
**kwargs
)
@authenticated
def seasons(self, id=None, start_at=None, end_at=None, store=None, **kwargs):
return self.get(
'seasons',
id=id,
start_at=start_at,
end_at=end_at,
store=store,
**kwargs
)
@authenticated
def episodes(self, id=None, start_at=None, end_at=None, store=None, **kwargs):
return self.get(
'episodes',
id=id,
start_at=start_at,
end_at=end_at,
store=store,
**kwargs
)
| {
"repo_name": "fuzeman/trakt.py",
"path": "trakt/interfaces/sync/history.py",
"copies": "2",
"size": "2373",
"license": "mit",
"hash": -7275314896081792000,
"line_mean": 25.0769230769,
"line_max": 100,
"alpha_frac": 0.5242309313,
"autogenerated": false,
"ratio": 3.871125611745514,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5395356543045514,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from trakt.core.helpers import try_convert
from six.moves.urllib_parse import parse_qsl
import functools
import httmock
import itertools
import json
import math
import os
CURRENT_DIR = os.path.abspath(os.path.dirname(__file__))
FIXTURES_DIR = os.path.abspath(os.path.join(CURRENT_DIR, '..', 'fixtures'))
def authenticated(func):
@functools.wraps(func)
def wrapper(url, request, *args, **kwargs):
if not is_authenticated(request):
return httmock.httmock.response(403)
return func(url, request, *args, **kwargs)
return wrapper
def is_authenticated(request):
# Ensure API Key has been provided
if request.headers.get('trakt-api-key') not in ['mock-client_id', 'mock']:
return False
# OAuth
if request.headers.get('Authorization') in ['Bearer mock-access_token', 'Bearer mock']:
return True
# xAuth
return (
request.headers.get('trakt-user-login') == 'mock' and request.headers.get('trakt-user-token') == 'mock'
)
def get_content(netloc, path, query=None):
components = path.strip('/').split('/') + list(itertools.chain.from_iterable([
('#' + key, value) for key, value in sorted(parse_qsl(query or ''))
]))
path = None
# Search for matching fixture
current = os.path.join(FIXTURES_DIR, netloc)
for component in components:
current = os.path.join(current, component)
if os.path.exists(current + '.json'):
path = current + '.json'
if not os.path.exists(current):
break
if not path:
return None
# Read fixture content
with open(path, 'r') as fp:
return fp.read()
def get_json(netloc, path, query=None):
content = get_content(netloc, path, query)
if content is None:
return None
return json.loads(content)
def get_fixture(netloc, path, query=None, request=None):
content = get_content(netloc, path, query)
if content is None:
return httmock.response(404, request=request)
return httmock.response(
200, content, {
'Content-Type': 'application/json'
},
request=request
)
def paginate(url, request, content_type='application/json'):
parameters = dict(parse_qsl(url.query))
page = try_convert(parameters.get('page'), int) or 1
limit = try_convert(parameters.get('limit'), int) or 10
# Retrieve items from fixture
items = get_json(url.netloc, url.path, url.query)
if items is None:
return httmock.response(404, request=request)
# Calculate page count and item offset
offset = (page - 1) * limit
page_count = int(math.ceil(float(len(items)) / limit))
if request.method == 'HEAD':
return httmock.response(
200, '', {
'Content-Type': content_type,
'X-Pagination-Page': page,
'X-Pagination-Limit': limit,
'X-Pagination-Page-Count': page_count,
'X-Pagination-Item-Count': len(items)
},
request=request
)
if request.method == 'GET':
return httmock.response(
200, json.dumps(items[offset:offset + limit]), {
'Content-Type': content_type,
'X-Pagination-Page': page,
'X-Pagination-Limit': limit,
'X-Pagination-Page-Count': page_count,
'X-Pagination-Item-Count': len(items)
},
request=request
)
return httmock.response(404, request=request)
@httmock.urlmatch(netloc='api.trakt.tv')
def fixtures(url, request):
return get_fixture(
url.netloc, url.path,
query=url.query,
request=request
)
@httmock.all_requests
def unknown(url, request):
return httmock.response(501, request=request)
@httmock.urlmatch(netloc='api.trakt.tv', method='GET', path=r'/calendars/all/\w+/\d{4}-\d{2}-\d{2}(/\d{1,2})?')
def calendars_all_period(url, request):
return fixtures(url, request)
@httmock.urlmatch(netloc='api.trakt.tv', method='GET', path=r'/calendars/my/\w+')
@authenticated
def calendars_my(url, request):
return fixtures(url, request)
@httmock.urlmatch(netloc='api.trakt.tv', method='POST', path='/oauth/token')
def oauth_token(url, request):
assert request.body
# Validate request body
data = json.loads(request.body)
assert data.get('client_id') == 'mock-client_id'
assert data.get('client_secret') == 'mock-client_secret'
assert data.get('grant_type') in ['authorization_code', 'refresh_token']
assert data.get('redirect_uri') == 'urn:ietf:wg:oauth:2.0:oob'
if data['grant_type'] == 'authorization_code':
assert data.get('code') == 'ABCD1234'
else:
assert data.get('refresh_token') == 'mock-refresh_token'
# Return mock token
return httmock.response(200, json.dumps({
'access_token': 'mock-access_token',
'token_type': 'bearer',
'expires_in': 7200,
'refresh_token': 'mock-refresh_token',
'scope': 'public'
}), {
'Content-Type': 'application/json'
})
@httmock.urlmatch(netloc='api.trakt.tv', method='POST', path='/oauth/device/code')
def oauth_device_code(url, request):
assert request.body
# Validate request body
data = json.loads(request.body)
assert data.get('client_id') == 'mock-client_id'
# Return mock device code
return httmock.response(200, json.dumps({
'device_code': 'mock-device_code',
'user_code': 'mock-user_code',
'verification_url': 'https://trakt.tv/activate',
'expires_in': 600,
'interval': 5
}), {
'Content-Type': 'application/json'
})
@httmock.urlmatch(netloc='api.trakt.tv', method='POST', path='/oauth/device/token')
def oauth_device_token(url, request):
assert request.body
# Validate request body
data = json.loads(request.body)
assert data.get('client_id') == 'mock-client_id'
assert data.get('client_secret') == 'mock-client_secret'
assert data.get('code') == 'mock-device_code'
# Return mock token
return httmock.response(200, json.dumps({
'access_token': 'mock-access_token',
'token_type': 'bearer',
'expires_in': 7200,
'refresh_token': 'mock-refresh_token',
'scope': 'public'
}), {
'Content-Type': 'application/json'
})
@httmock.urlmatch(netloc='api.trakt.tv', path=r'/users/likes')
@authenticated
def likes(url, request, content_type='application/json'):
return paginate(url, request, content_type=content_type)
@httmock.urlmatch(netloc='api.trakt.tv', path=r'/users/likes')
def likes_invalid_content_type(url, request):
return likes(url, request, content_type='text/plain')
@httmock.urlmatch(netloc='api.trakt.tv', path=r'/users/likes')
@authenticated
def likes_invalid_json(url, request):
parameters = dict(parse_qsl(url.query))
page = try_convert(parameters.get('page'), int) or 1
# Return invalid response for page #2
if request.method == 'GET' and page == 2:
return httmock.response(
200, '<invalid-json-response>', {
'Content-Type': 'application/json'
},
request=request
)
# Return page
return likes(url, request)
@httmock.urlmatch(netloc='api.trakt.tv', path=r'/users/likes')
@authenticated
def likes_request_failure(url, request):
parameters = dict(parse_qsl(url.query))
page = try_convert(parameters.get('page'), int) or 1
# Return invalid response for page #2
if request.method == 'GET' and page == 2:
return httmock.response(400, request=request)
# Return page
return likes(url, request)
@httmock.urlmatch(netloc='api.trakt.tv', method='GET', path=r'/users/[\w-]+/lists')
@authenticated
def lists(url, request):
return fixtures(url, request)
@httmock.urlmatch(netloc='api.trakt.tv', method='POST', path=r'/users/[\w-]+/lists')
@authenticated
def list_create(url, request):
data = json.loads(request.body)
assert data
assert data.get('name')
# Generate slug from list name
slug = data['name'].lower().replace(' ', '-')
# Return fixture
return get_fixture(
url.netloc, '%s/%s' % (url.path, slug),
query=url.query,
request=request
)
@httmock.urlmatch(netloc='api.trakt.tv', method='GET', path=r'/users/[\w-]+/lists/[\w-]+')
@authenticated
def list_get(url, request):
return fixtures(url, request)
@httmock.urlmatch(netloc='api.trakt.tv', method='DELETE', path=r'/users/[\w-]+/lists/[\w-]+')
@authenticated
def list_delete(url, request):
return httmock.response(204, request=request)
@httmock.urlmatch(netloc='api.trakt.tv', method='PUT', path=r'/users/[\w-]+/lists/[\w-]+')
@authenticated
def list_update(url, request):
return fixtures(url, request)
@httmock.urlmatch(netloc='api.trakt.tv', method='POST', path=r'/users/[\w-]+/lists/[\w-]+/like')
@authenticated
def list_like(url, request):
return httmock.response(204, request=request)
@httmock.urlmatch(netloc='api.trakt.tv', method='DELETE', path=r'/users/[\w-]+/lists/[\w-]+/like')
@authenticated
def list_unlike(url, request):
return httmock.response(204, request=request)
@httmock.urlmatch(netloc='api.trakt.tv', method='POST', path=r'/users/[\w-]+/lists/[\w-]+/items')
@authenticated
def list_item_add(url, request):
return fixtures(url, request)
@httmock.urlmatch(netloc='api.trakt.tv', method='POST', path=r'/users/[\w-]+/lists/[\w-]+/items/remove')
@authenticated
def list_item_remove(url, request):
return fixtures(url, request)
@authenticated
def scrobble(url, request, action):
data = json.loads(request.body)
assert data
# Ensure provided identifier is correct
assert data.get('movie', {}).get('ids', {}).get('tmdb') == 76341
# Return response
return httmock.response(
200, {
'id': 9832,
'action': action,
'progress': data.get('progress'),
'sharing': {
'facebook': False,
'twitter': True,
'tumblr': False
},
'movie': {
'title': 'Mad Max: Fury Road',
'year': 2015,
'ids': {
'trakt': 56360,
'slug': 'mad-max-fury-road-2015',
'imdb': 'tt1392190',
'tmdb': 76341
}
}
}, {
'Content-Type': 'application/json'
},
request=request
)
@httmock.urlmatch(netloc='api.trakt.tv', method='POST', path='/scrobble/start')
def scrobble_start(url, request):
return scrobble(
url, request,
action='start'
)
@httmock.urlmatch(netloc='api.trakt.tv', method='POST', path='/scrobble/pause')
def scrobble_pause(url, request):
return scrobble(
url, request,
action='pause'
)
@httmock.urlmatch(netloc='api.trakt.tv', method='POST', path='/scrobble/stop')
def scrobble_stop(url, request):
return scrobble(
url, request,
action='stop'
)
@httmock.urlmatch(netloc='api.trakt.tv', method='GET', path=r'/sync/\w+')
@authenticated
def sync_get(url, request):
return fixtures(url, request)
@httmock.urlmatch(netloc='api.trakt.tv', path=r'/sync/history(/\w+)?')
@authenticated
def sync_history(url, request):
return paginate(url, request)
@httmock.urlmatch(netloc='api.trakt.tv', method='DELETE', path=r'/sync/playback/\d+')
@authenticated
def sync_playback_delete(url, request):
return httmock.response(204, request=request)
@httmock.urlmatch(netloc='api.trakt.tv', path=r'/sync/watchlist(/\w+)?')
@authenticated
def sync_watchlist(url, request):
return paginate(url, request)
| {
"repo_name": "fuzeman/trakt.py",
"path": "tests/core/mock.py",
"copies": "1",
"size": "11876",
"license": "mit",
"hash": -5045864339730944000,
"line_mean": 26.6186046512,
"line_max": 111,
"alpha_frac": 0.6193162681,
"autogenerated": false,
"ratio": 3.4765807962529274,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45958970643529273,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from trakt import Trakt
from trakt.objects import Episode
import logging
import os
logging.basicConfig(level=logging.DEBUG)
def print_lookup(id, service):
print("Trakt['search'].lookup(%r, %r)" % (id, service))
items = Trakt['search'].lookup(id, service, per_page=10)
item = items[0]
if type(item) is Episode and item.show:
sk, ek = item.pk
print('\t%s (%s) - S%02dE%02d %r' % (item.show.title, item.show.year, sk, ek, item.title))
else:
print('\t%s (%s)' % (item.title, item.year))
def print_query(query, media=None, year=None):
print("Trakt['search'].query(%r, %r, %r)" % (query, media, year))
items = Trakt['search'].query(query, media, year, pagination=True, per_page=10)
for item in items.get(1): # Retrieve first page
if type(item) is Episode and item.show:
sk, ek = item.pk
print('\t[%.2d%%] %s (%s) - S%02dE%02d %r' % (item.score, item.show.title, item.show.year,
sk, ek, item.title))
else:
print('\t[%.2d%%] %s (%s)' % (item.score, item.title, item.year))
if __name__ == '__main__':
# Configure
Trakt.configuration.defaults.client(
id=os.environ.get('CLIENT_ID')
)
# Lookup by id
print_lookup('tt0848228', 'imdb')
print_lookup('tt0903747', 'imdb')
print_lookup('tt0959621', 'imdb')
# Search by name
print_query('The Avengers', 'movie')
print_query('Breaking Bad', 'show')
print_query('Fly', 'episode')
| {
"repo_name": "fuzeman/trakt.py",
"path": "examples/search.py",
"copies": "1",
"size": "1610",
"license": "mit",
"hash": -5589272025454624000,
"line_mean": 29.3773584906,
"line_max": 102,
"alpha_frac": 0.5813664596,
"autogenerated": false,
"ratio": 3.132295719844358,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4213662179444358,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from trakt import Trakt
import logging
import os
logging.basicConfig(level=logging.DEBUG)
if __name__ == '__main__':
# Configure
Trakt.configuration.defaults.client(
id=os.environ.get('CLIENT_ID')
)
# Login
username = os.environ.get('USERNAME')
token = os.environ.get('AUTH_TOKEN')
if token is None:
# Attempt authentication (retrieve new token)
token = Trakt['auth'].login(username, os.environ.get('PASSWORD'))
print('Using token: %r' % token)
with Trakt.configuration.auth(username, token):
print(Trakt['sync/collection'].movies())
with Trakt.configuration.http(retry=True):
print(Trakt['movies'].get('tron-legacy-2010')) # use only traktId, trakt slug or imdbId
print(Trakt['shows'].get(1390)) # use only traktId, trakt slug or imdbId
print(Trakt['shows'].seasons('tt0944947'))
print(Trakt['shows'].season('game-of-thrones', 1))
print(Trakt['shows'].episode('game-of-thrones', 1, 1))
| {
"repo_name": "fuzeman/trakt.py",
"path": "examples/media_center.py",
"copies": "1",
"size": "1103",
"license": "mit",
"hash": 3127401884226761000,
"line_mean": 28.0263157895,
"line_max": 100,
"alpha_frac": 0.6355394379,
"autogenerated": false,
"ratio": 3.664451827242525,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.47999912651425247,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from trakt.interfaces.base import authenticated
from trakt.interfaces.sync.core.mixins import Get, Add, Remove
class SyncRatingsInterface(Get, Add, Remove):
path = 'sync/ratings'
@authenticated
def get(self, media=None, rating=None, store=None, extended=None, flat=False, page=None, per_page=None, **kwargs):
if media and not flat and page is not None:
raise ValueError('`page` parameter is only supported with `flat=True`')
# Build parameters
params = []
if rating is not None:
params.append(rating)
# Build query
query = {
'extended': extended,
'page': page,
'limit': per_page
}
# Request ratings
return super(SyncRatingsInterface, self).get(
media, store, params,
flat=flat or media is None,
query=query,
**kwargs
)
#
# Shortcut methods
#
@authenticated
def all(self, rating=None, store=None, **kwargs):
return self.get(
'all',
rating=rating,
store=store,
**kwargs
)
@authenticated
def movies(self, rating=None, store=None, **kwargs):
return self.get(
'movies',
rating=rating,
store=store,
**kwargs
)
@authenticated
def shows(self, rating=None, store=None, **kwargs):
return self.get(
'shows',
rating=rating,
store=store,
**kwargs
)
@authenticated
def seasons(self, rating=None, store=None, **kwargs):
return self.get(
'seasons',
rating=rating,
store=store,
**kwargs
)
@authenticated
def episodes(self, rating=None, store=None, **kwargs):
return self.get(
'episodes',
rating=rating,
store=store,
**kwargs
)
| {
"repo_name": "fuzeman/trakt.py",
"path": "trakt/interfaces/sync/ratings.py",
"copies": "2",
"size": "2061",
"license": "mit",
"hash": -6678374722440508000,
"line_mean": 23.8313253012,
"line_max": 118,
"alpha_frac": 0.531780689,
"autogenerated": false,
"ratio": 4.375796178343949,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5907576867343949,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from trakt.interfaces.base import authenticated
from trakt.interfaces.sync.core.mixins import Get, Add, Remove
class SyncWatchlistInterface(Get, Add, Remove):
path = 'sync/watchlist'
flags = {'in_watchlist': True}
def get(self, media=None, sort=None, store=None, extended=None, flat=False,
page=None, per_page=None, **kwargs):
if media and not flat and page is not None:
raise ValueError('`page` parameter is only supported with `flat=True`')
# Build parameters
params = []
if sort:
params.append(sort)
# Build query
query = {
'extended': extended,
'page': page,
'limit': per_page
}
# Request watched history
return super(SyncWatchlistInterface, self).get(
media, store,
params=params,
query=query,
flat=flat or media is None,
**kwargs
)
#
# Shortcut methods
#
@authenticated
def movies(self, sort=None, store=None, **kwargs):
return self.get(
'movies',
sort=sort,
store=store,
**kwargs
)
@authenticated
def shows(self, sort=None, store=None, **kwargs):
return self.get(
'shows',
sort=sort,
store=store,
**kwargs
)
@authenticated
def seasons(self, sort=None, store=None, **kwargs):
return self.get(
'seasons',
sort=sort,
store=store,
**kwargs
)
@authenticated
def episodes(self, sort=None, store=None, **kwargs):
return self.get(
'episodes',
sort=sort,
store=store,
**kwargs
)
| {
"repo_name": "Razzeee/script.module.trakt",
"path": "lib/trakt/interfaces/sync/watchlist.py",
"copies": "2",
"size": "1880",
"license": "mit",
"hash": 3548079214164012500,
"line_mean": 23.4155844156,
"line_max": 83,
"alpha_frac": 0.5271276596,
"autogenerated": false,
"ratio": 4.331797235023042,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5858924894623041,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from trakt.interfaces import auth
from trakt.interfaces import calendars
from trakt.interfaces import lists
from trakt.interfaces import movies
from trakt.interfaces import oauth
from trakt.interfaces import scrobble
from trakt.interfaces import search
from trakt.interfaces import shows
from trakt.interfaces import sync
from trakt.interfaces import users
INTERFACES = [
# /
auth.AuthInterface,
oauth.OAuthInterface,
oauth.DeviceOAuthInterface,
oauth.PinOAuthInterface,
scrobble.ScrobbleInterface,
search.SearchInterface,
# /calendars/
calendars.AllCalendarsInterface,
calendars.MyCalendarsInterface,
# /lists/
lists.ListsInterface,
# /sync/
sync.SyncInterface,
sync.SyncCollectionInterface,
sync.SyncHistoryInterface,
sync.SyncPlaybackInterface,
sync.SyncRatingsInterface,
sync.SyncWatchedInterface,
sync.SyncWatchlistInterface,
# /shows/
shows.ShowsInterface,
# /movies/
movies.MoviesInterface,
# /users/
users.UsersInterface,
users.UsersProfileInterface,
users.UsersSettingsInterface,
# /users/following
users.UsersFollowingInterface,
# /users/following
users.UsersFriendsInterface,
# /users/history
users.UsersHistoryInterface,
# /users/lists/
users.UsersListsInterface,
users.UsersListInterface,
# /users/ratings
users.UsersRatingsInterface,
# /users/watched
users.UsersWatchedInterface,
# /users/watchlist
users.UsersWatchlistInterface
]
def get_interfaces():
for interface in INTERFACES:
if not interface.path:
continue
path = interface.path.strip('/')
if path:
path = path.split('/')
else:
path = []
yield path, interface
def construct_map(client, d=None, interfaces=None):
if d is None:
d = {}
if interfaces is None:
interfaces = get_interfaces()
for path, interface in interfaces:
if len(path) == 0:
continue
key = path.pop(0)
if len(path) == 0:
d[key] = interface(client)
continue
value = d.get(key, {})
if type(value) is not dict:
value = {None: value}
construct_map(client, value, [(path, interface)])
d[key] = value
return d
| {
"repo_name": "fuzeman/trakt.py",
"path": "trakt/interfaces/__init__.py",
"copies": "2",
"size": "2427",
"license": "mit",
"hash": 2743520479756268000,
"line_mean": 19.5677966102,
"line_max": 64,
"alpha_frac": 0.6588380717,
"autogenerated": false,
"ratio": 4.099662162162162,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 118
} |
from __future__ import absolute_import, division, print_function
from trakt.mapper.core.base import Mapper
from trakt.mapper.user import UserMapper
class ListMapper(Mapper):
@classmethod
def custom_list(cls, client, item, username=None, **kwargs):
if 'list' in item:
i_list = item['list']
else:
i_list = item
# Retrieve item keys
pk, keys = cls.get_ids('custom_list', i_list)
if pk is None:
return None
# Retrieve user details
i_user = i_list.get('user') or {}
if username:
i_user.setdefault('username', username)
# Create list
custom_list = cls.construct(
client, 'custom_list', i_list, keys,
user=UserMapper.user(client, i_user),
**kwargs
)
# Update with root info
if 'list' in item:
custom_list._update(item)
return custom_list
@classmethod
def public_lists(cls, client, items, **kwargs):
if not items:
return None
return [
cls.public_list(client, item, **kwargs) for item in items
if item
]
@classmethod
def public_list(cls, client, item, **kwargs):
if 'list' in item:
i_list = item['list']
else:
i_list = item
# Retrieve item keys
pk, keys = cls.get_ids('public_list', i_list)
if pk is None:
return None
# Retrieve totals
comment_total = i_list.get('comment_count')
like_total = i_list.get('likes')
# Create list
public_list = cls.construct(
client, 'public_list', i_list, keys,
user=UserMapper.user(client, i_list['user']),
**kwargs
)
public_list._update({
'comment_total': comment_total,
'like_total': like_total
})
# Update with root info
if 'list' in item:
info = item.copy()
info['likes'] = info.pop('like_count')
public_list._update(info)
return public_list
| {
"repo_name": "Razzeee/script.module.trakt",
"path": "lib/trakt/mapper/list.py",
"copies": "2",
"size": "2142",
"license": "mit",
"hash": 6986715976120110000,
"line_mean": 23.9069767442,
"line_max": 69,
"alpha_frac": 0.5275443511,
"autogenerated": false,
"ratio": 4.103448275862069,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5630992626962069,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from trakt.mapper.core.base import Mapper
import logging
log = logging.getLogger(__name__)
class SearchMapper(Mapper):
@classmethod
def process(cls, client, item, media=None, **kwargs):
if media is None:
# Retrieve `media` from `item`
media = item.get('type')
if not media:
log.warning('Item %r has no "type" defined', media)
return None
# Find function for `media`
func = getattr(cls, media, None)
if not func:
log.warning('Unknown media type: %r', media)
return None
# Map item
return func(client, item, **kwargs)
@classmethod
def process_many(cls, client, items, **kwargs):
if not items:
return None
return [item for item in [cls.process(client, item, **kwargs) for item in items] if item]
@classmethod
def movie(cls, client, item, **kwargs):
if 'movie' in item:
i_movie = item['movie']
else:
i_movie = item
# Retrieve item keys
pk, keys = cls.get_ids('movie', i_movie)
if pk is None:
return None
# Create object
movie = cls.construct(client, 'movie', i_movie, keys, **kwargs)
if 'movie' in item:
movie._update(item)
return movie
@classmethod
def list(cls, client, item, **kwargs):
if 'list' in item:
i_list = item['list']
else:
i_list = item
# Retrieve item keys
pk, keys = cls.get_ids('custom_list', i_list)
if pk is None:
return None
# Create object
custom_list = cls.construct(client, 'custom_list', i_list, keys, **kwargs)
# Update with root info
if 'list' in item:
custom_list._update(item)
return custom_list
@classmethod
def officiallist(cls, client, item, **kwargs):
return None
@classmethod
def person(cls, client, item, **kwargs):
if 'person' in item:
i_person = item['person']
else:
i_person = item
# Retrieve item keys
pk, keys = cls.get_ids('person', i_person)
if pk is None:
return None
# Create object
person = cls.construct(client, 'person', i_person, keys, **kwargs)
# Update with root info
if 'person' in item:
person._update(item)
return person
@classmethod
def show(cls, client, item, **kwargs):
if 'show' in item:
i_show = item['show']
else:
i_show = item
# Retrieve item keys
pk, keys = cls.get_ids('show', i_show)
if pk is None:
return None
# Create object
show = cls.construct(client, 'show', i_show, keys, **kwargs)
# Update with root info
if 'show' in item:
show._update(item)
return show
@classmethod
def episodes(cls, client, items, **kwargs):
return [cls.episode(client, item, **kwargs) for item in items]
@classmethod
def episode(cls, client, item, **kwargs):
if 'episode' in item:
i_episode = item['episode']
else:
i_episode = item
# Retrieve item keys
pk, keys = cls.get_ids('episode', i_episode)
if pk is None:
return None
# Create object
episode = cls.construct(client, 'episode', i_episode, keys, **kwargs)
if 'show' in item:
episode.show = cls.show(client, item['show'])
if 'season' in item:
episode.season = cls.season(client, item['season'])
# Update with root info
if 'episode' in item:
episode._update(item)
return episode
| {
"repo_name": "fuzeman/trakt.py",
"path": "trakt/mapper/search.py",
"copies": "2",
"size": "3893",
"license": "mit",
"hash": 2731941647348405000,
"line_mean": 23.4842767296,
"line_max": 97,
"alpha_frac": 0.5384022605,
"autogenerated": false,
"ratio": 4.123940677966102,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0001399514113442604,
"num_lines": 159
} |
from __future__ import absolute_import, division, print_function
from trakt.mapper.core.base import Mapper
import logging
log = logging.getLogger(__name__)
class SyncMapper(Mapper):
@classmethod
def process(cls, client, store, items, media=None, flat=False, **kwargs):
if flat:
return cls.iterate_items(
client, store, items, cls.item,
media=media,
**kwargs
)
return cls.map_items(
client, store, items, cls.item,
media=media,
**kwargs
)
@classmethod
def item(cls, client, store, item, media=None, **kwargs):
i_type = item.get('type') or media
if not i_type:
raise ValueError('Unknown item type')
# Find item type function
if i_type.startswith('movie'):
func = cls.movie
elif i_type.startswith('show'):
func = cls.show
elif i_type.startswith('season'):
func = cls.season
elif i_type.startswith('episode'):
func = cls.episode
else:
raise ValueError('Unknown item type: %r' % i_type)
# Map item
return func(
client, store, item,
**kwargs
)
#
# Movie
#
@classmethod
def movies(cls, client, store, items, **kwargs):
return cls.map_items(client, store, items, cls.movie, **kwargs)
@classmethod
def movie(cls, client, store, item, **kwargs):
movie = cls.map_item(client, store, item, 'movie', **kwargs)
# Update with root info
if 'movie' in item:
movie._update(item)
return movie
#
# Show
#
@classmethod
def shows(cls, client, store, items, **kwargs):
return cls.map_items(client, store, items, cls.show, **kwargs)
@classmethod
def show(cls, client, store, item, append=False, **kwargs):
show = cls.map_item(
client, store, item, 'show',
append=append,
**kwargs
)
# Update with root info
if 'show' in item:
show._update(item)
# Process any episodes in the item
for i_season in item.get('seasons', []):
season_num = i_season.get('number')
season = cls.show_season(client, show, season_num, **kwargs)
for i_episode in i_season.get('episodes', []):
episode_num = i_episode.get('number')
cls.show_episode(client, season, episode_num, i_episode, **kwargs)
return show
@classmethod
def show_season(cls, client, show, season_num, item=None, **kwargs):
season = cls.map_item(client, show.seasons, item, 'season', key=season_num, parent=show, **kwargs)
season.show = show
# Update with root info
if item and 'season' in item:
season._update(item)
return season
@classmethod
def show_episode(cls, client, season, episode_num, item=None, **kwargs):
episode = cls.map_item(
client, season.episodes, item, 'episode',
key=episode_num,
parent=season,
**kwargs
)
episode.show = season.show
episode.season = season
# Update with root info
if item and 'episode' in item:
episode._update(item)
return episode
#
# Season
#
@classmethod
def seasons(cls, client, store, items, **kwargs):
return cls.map_items(client, store, items, cls.season, **kwargs)
@classmethod
def season(cls, client, store, item, **kwargs):
i_season = item.get('season', {})
season_num = i_season.get('number')
# Build `show`
show = cls.show(client, store, item['show'])
if show is None:
# Unable to create show
return None
# Build `season`
season = cls.show_season(client, show, season_num, item, **kwargs)
return season
#
# Episode
#
@classmethod
def episodes(cls, client, store, items, **kwargs):
return cls.map_items(client, store, items, cls.episode, **kwargs)
@classmethod
def episode(cls, client, store, item, append=False, **kwargs):
i_episode = item.get('episode', {})
season_num = i_episode.get('season')
episode_num = i_episode.get('number')
# Build `show`
show = cls.show(client, store, item['show'])
if show is None:
# Unable to create show
return None
# Build `season`
season = cls.show_season(
client, show, season_num,
**kwargs
)
# Build `episode`
episode = cls.show_episode(
client, season, episode_num, item,
append=append,
**kwargs
)
return episode
#
# Helpers
#
@classmethod
def map_items(cls, client, store, items, func, **kwargs):
if store is None:
store = {}
for item in items:
result = func(
client, store, item,
**kwargs
)
if result is None:
log.warning('Unable to map item: %s', item)
return store
@classmethod
def iterate_items(cls, client, store, items, func, media=None, **kwargs):
if store is None:
store = {}
if 'movies' not in store:
store['movies'] = {}
if 'shows' not in store:
store['shows'] = {}
if 'seasons' not in store:
store['seasons'] = {}
if 'episodes' not in store:
store['episodes'] = {}
for item in items:
i_type = item.get('type') or media
if not i_type:
raise ValueError('Unknown item type')
if i_type.startswith('movie'):
i_store = store['movies']
elif i_type.startswith('show'):
i_store = store['shows']
elif i_type.startswith('season'):
i_store = store['seasons']
elif i_type.startswith('episode'):
i_store = store['episodes']
else:
raise ValueError('Unknown item type: %r' % i_type)
# Map item
result = func(
client, i_store, item,
append=True,
media=media,
**kwargs
)
if result is None:
log.warning('Unable to map item: %s', item)
continue
# Yield item in iterator
yield result
@classmethod
def map_item(cls, client, store, item, media, key=None, parent=None, append=False, **kwargs):
if item and media in item:
i_data = item[media]
else:
i_data = item
# Retrieve item key
pk, keys = cls.get_ids(media, i_data, parent=parent)
if key is not None:
pk = key
if not keys:
keys = [pk]
if pk is None:
# Item has no keys
return None
if store is None or pk not in store or append:
# Construct item
obj = cls.construct(client, media, i_data, keys, **kwargs)
if store is None:
return obj
# Update store
if append:
if pk in store:
store[pk].append(obj)
else:
store[pk] = [obj]
else:
store[pk] = obj
return obj
else:
# Update existing item
store[pk]._update(i_data, **kwargs)
return store[pk]
| {
"repo_name": "fuzeman/trakt.py",
"path": "trakt/mapper/sync.py",
"copies": "2",
"size": "7788",
"license": "mit",
"hash": -4535826640752833500,
"line_mean": 24.6184210526,
"line_max": 106,
"alpha_frac": 0.5095017976,
"autogenerated": false,
"ratio": 4.262725779967159,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5772227577567159,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from trakt.mapper.core.base import Mapper
class ListItemMapper(Mapper):
@classmethod
def process(cls, client, item, media=None, **kwargs):
if media is None:
# Retrieve `media` from `item`
media = item.get('type')
if not media:
return ValueError()
# Find function for `media`
func = getattr(cls, media, None)
if not func:
raise ValueError('Unknown media type: %r', media)
# Map item
return func(client, item, **kwargs)
@classmethod
def process_many(cls, client, items, **kwargs):
if not items:
return None
return [
item for item in [cls.process(client, item, index=x + 1, **kwargs) for x, item in enumerate(items)]
if item
]
@classmethod
def movie(cls, client, item, **kwargs):
if 'movie' in item:
i_movie = item['movie']
else:
i_movie = item
# Retrieve item keys
pk, keys = cls.get_ids('movie', i_movie)
if pk is None:
return None
# Create object
movie = cls.construct(client, 'movie', i_movie, keys, **kwargs)
if 'movie' in item:
movie._update(item)
return movie
@classmethod
def list(cls, client, item, **kwargs):
return None
@classmethod
def officiallist(cls, client, item, **kwargs):
return None
@classmethod
def person(cls, client, item, **kwargs):
if 'person' in item:
i_person = item['person']
else:
i_person = item
# Retrieve item keys
pk, keys = cls.get_ids('person', i_person)
if pk is None:
return None
# Create object
person = cls.construct(client, 'person', i_person, keys, **kwargs)
# Update with root info
if 'person' in item:
person._update(item)
return person
@classmethod
def show(cls, client, item, **kwargs):
if 'show' in item:
i_show = item['show']
else:
i_show = item
# Retrieve item keys
pk, keys = cls.get_ids('show', i_show)
if pk is None:
return None
# Create object
show = cls.construct(client, 'show', i_show, keys, **kwargs)
# Update with root info
if 'show' in item:
show._update(item)
return show
@classmethod
def seasons(cls, client, items, **kwargs):
return [cls.season(client, item, **kwargs) for item in items]
@classmethod
def season(cls, client, item, **kwargs):
if 'season' in item:
i_season = item['season']
else:
i_season = item
# Retrieve item keys
pk, keys = cls.get_ids('season', i_season)
if pk is None:
return None
# Create object
season = cls.construct(client, 'season', i_season, keys, **kwargs)
if 'show' in item:
season.show = cls.show(client, item['show'])
return season
@classmethod
def episodes(cls, client, items, **kwargs):
return [cls.episode(client, item, **kwargs) for item in items]
@classmethod
def episode(cls, client, item, **kwargs):
if 'episode' in item:
i_episode = item['episode']
else:
i_episode = item
# Retrieve item keys
pk, keys = cls.get_ids('episode', i_episode)
if pk is None:
return None
# Create object
episode = cls.construct(client, 'episode', i_episode, keys, **kwargs)
if 'show' in item:
episode.show = cls.show(client, item['show'])
if 'season' in item:
episode.season = cls.season(client, item['season'])
# Update with root info
if 'episode' in item:
episode._update(item)
return episode
| {
"repo_name": "Razzeee/script.module.trakt",
"path": "lib/trakt/mapper/list_item.py",
"copies": "2",
"size": "4027",
"license": "mit",
"hash": 6711551216506225000,
"line_mean": 23.7055214724,
"line_max": 111,
"alpha_frac": 0.5383660293,
"autogenerated": false,
"ratio": 4.147270854788878,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5685636884088878,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from trakt.mapper.core.base import Mapper
class SummaryMapper(Mapper):
@classmethod
def movies(cls, client, items, **kwargs):
if not items:
return None
return [item for item in [cls.movie(client, item, **kwargs) for item in items] if item]
@classmethod
def movie(cls, client, item, **kwargs):
if not item:
return None
if 'movie' in item:
i_movie = item['movie']
else:
i_movie = item
# Retrieve item keys
pk, keys = cls.get_ids('movie', i_movie)
if pk is None:
return None
# Create object
movie = cls.construct(client, 'movie', i_movie, keys, **kwargs)
# Update with root info
if 'movie' in item:
movie._update(item)
return movie
@classmethod
def shows(cls, client, items, **kwargs):
if not items:
return None
return [item for item in [cls.show(client, item, **kwargs) for item in items] if item]
@classmethod
def show(cls, client, item, **kwargs):
if not item:
return None
if 'show' in item:
i_show = item['show']
else:
i_show = item
# Retrieve item keys
pk, keys = cls.get_ids('show', i_show)
if pk is None:
return None
# Create object
show = cls.construct(client, 'show', i_show, keys, **kwargs)
# Update with root info
if 'show' in item:
show._update(item)
return show
@classmethod
def seasons(cls, client, items, **kwargs):
if not items:
return None
return [item for item in [cls.season(client, item, **kwargs) for item in items] if item]
@classmethod
def season(cls, client, item, **kwargs):
if not item:
return None
if 'season' in item:
i_season = item['season']
else:
i_season = item
# Retrieve item keys
pk, keys = cls.get_ids('season', i_season)
if pk is None:
return None
# Create object
season = cls.construct(client, 'season', i_season, keys, **kwargs)
# Update with root info
if 'season' in item:
season._update(item)
# Process any episodes in the item
for i_episode in item.get('episodes', []):
episode_num = i_episode.get('number')
cls.season_episode(client, season, episode_num, i_episode, **kwargs)
return season
@classmethod
def season_episode(cls, client, season, episode_num, item=None, **kwargs):
if not item:
return
# Construct episode
episode = cls.episode(client, item, **kwargs)
episode.show = season.show
episode.season = season
# Store episode in `season`
season.episodes[episode_num] = episode
@classmethod
def episodes(cls, client, items, **kwargs):
if not items:
return None
return [item for item in [cls.episode(client, item, **kwargs) for item in items] if item]
@classmethod
def episode(cls, client, item, parse_show=False, **kwargs):
if not item:
return None
if 'episode' in item:
i_episode = item['episode']
else:
i_episode = item
# Retrieve item keys
pk, keys = cls.get_ids('episode', i_episode)
if pk is None:
return None
# Create object
episode = cls.construct(client, 'episode', i_episode, keys, **kwargs)
if parse_show:
episode.show = cls.show(client, item)
# Update with root info
if 'episode' in item:
episode._update(item)
return episode
| {
"repo_name": "fuzeman/trakt.py",
"path": "trakt/mapper/summary.py",
"copies": "2",
"size": "3894",
"license": "mit",
"hash": 7564190859349394000,
"line_mean": 24.1225806452,
"line_max": 97,
"alpha_frac": 0.5485362096,
"autogenerated": false,
"ratio": 4.200647249190938,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00034710981581712616,
"num_lines": 155
} |
from __future__ import absolute_import, division, print_function
from trakt.objects import User, Movie, Show, Episode, Season, CustomList, Comment, Person, PublicList,\
WatchedProgress, CollectionProgress
IDENTIFIERS = {
'movie': [
'imdb',
'tmdb',
'slug',
'trakt'
],
'show': [
'tvdb',
'tmdb',
'imdb',
'tvrage',
'slug',
'trakt'
],
'season': [
'tvdb',
'tmdb',
'trakt'
],
'episode': [
'tvdb',
'tmdb',
'imdb',
'tvrage',
'trakt'
],
'custom_list': [
'trakt',
'slug'
],
'public_list': [
'trakt',
'slug'
],
'person': [
'tmdb',
'imdb',
'tvrage',
'slug',
'trakt'
],
'user': [
'slug'
]
}
class Mapper(object):
@staticmethod
def get_ids(media, item, parent=None):
if not item:
return None, []
ids = item.get('ids', {})
keys = []
for key in IDENTIFIERS.get(media, []):
value = ids.get(key)
if not value:
continue
keys.append((key, str(value)))
if media == 'season' and 'number' in item:
keys.insert(0, item.get('number'))
if media == 'episode':
# Special seasons are typically represented as Season '0'
# so using a simple 'or' condition to use parent will result
# in an attribute error if parent is None
season_no = item.get('season')
if season_no is None and parent is not None:
season_no = parent.pk
keys.insert(0, (
season_no,
item.get('number')
))
if media == 'comment':
keys.insert(0, ('trakt', item.get('id')))
if not len(keys):
return None, []
return keys[0], keys
@classmethod
def construct(cls, client, media, item, keys=None, **kwargs):
if keys is None:
_, keys = cls.get_ids(media, item)
if media == 'movie':
return Movie._construct(client, keys, item, **kwargs)
if media == 'show':
return Show._construct(client, keys, item, **kwargs)
if media == 'season':
return Season._construct(client, keys, item, **kwargs)
if media == 'episode':
return Episode._construct(client, keys, item, **kwargs)
if media == 'comment':
return Comment._construct(client, keys, item, **kwargs)
if media == 'custom_list':
return CustomList._construct(client, keys, item, **kwargs)
if media == 'public_list':
return PublicList._construct(client, keys, item, **kwargs)
if media == 'person':
return Person._construct(client, keys, item, **kwargs)
if media == 'watched_progress':
return WatchedProgress._construct(client, item, **kwargs)
if media == 'collection_progress':
return CollectionProgress._construct(client, item, **kwargs)
if media == 'user':
return User._construct(client, keys, item, **kwargs)
raise ValueError('Unknown media type provided')
| {
"repo_name": "Razzeee/script.module.trakt",
"path": "lib/trakt/mapper/core/base.py",
"copies": "2",
"size": "3319",
"license": "mit",
"hash": -1016653998788618200,
"line_mean": 23.0507246377,
"line_max": 103,
"alpha_frac": 0.5004519434,
"autogenerated": false,
"ratio": 4.133250311332503,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5633702254732503,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from .tree import Tree
class TreeTraversal(object):
def __init__(self, tree):
if not isinstance(tree, Tree):
raise TypeError("Input is not a tree object: %s" %
type(tree))
self.tree = tree
self.stack = [tree]
def __iter__(self):
return self
def next(self):
raise NotImplementedError()
def __next__(self):
return self.next()
class PreOrderTraversal(TreeTraversal):
def next(self):
if not self.stack:
raise StopIteration()
result = self.stack.pop()
c = result.children
for i in range(len(c)):
self.stack.append(c[len(c) - i - 1])
return result
class PostOrderTraversal(TreeTraversal):
def __init__(self, tree):
TreeTraversal.__init__(self, tree)
self.popped = {}
def next(self):
if not self.stack:
raise StopIteration()
result = self.stack.pop()
c = result.children
if result in self.popped:
return result
self.popped[result] = 1
self.stack.append(result)
for i in range(len(c)):
self.stack.append(c[len(c) - i - 1])
return self.next()
| {
"repo_name": "JudoWill/glue",
"path": "glue/core/tree_traversal.py",
"copies": "1",
"size": "1312",
"license": "bsd-3-clause",
"hash": 4778426507562630000,
"line_mean": 22.0175438596,
"line_max": 64,
"alpha_frac": 0.5518292683,
"autogenerated": false,
"ratio": 4.01223241590214,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.506406168420214,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from twisted.python.components import proxyForInterface
from twisted.web.iweb import IResponse
from requests.cookies import cookiejar_from_dict
from treq.content import collect, content, json_content, text_content
class _Response(proxyForInterface(IResponse)):
"""
A wrapper for :class:`twisted.web.iweb.IResponse` which manages cookies and
adds a few convenience methods.
"""
def __init__(self, original, cookiejar):
self.original = original
self._cookiejar = cookiejar
def collect(self, collector):
"""
Incrementally collect the body of the response, per
:func:`treq.collect()`.
:param collector: A single argument callable that will be called
with chunks of body data as it is received.
:returns: A `Deferred` that fires when the entire body has been
received.
"""
return collect(self.original, collector)
def content(self):
"""
Read the entire body all at once, per :func:`treq.content()`.
:returns: A `Deferred` that fires with a `bytes` object when the entire
body has been received.
"""
return content(self.original)
def json(self):
"""
Collect the response body as JSON per :func:`treq.json_content()`.
:rtype: Deferred that fires with the decoded JSON when the entire body
has been read.
"""
return json_content(self.original)
def text(self, encoding='ISO-8859-1'):
"""
Read the entire body all at once as text, per
:func:`treq.text_content()`.
:rtype: A `Deferred` that fires with a unicode string when the entire
body has been received.
"""
return text_content(self.original, encoding)
def history(self):
"""
Get a list of all responses that (such as intermediate redirects),
that ultimately ended in the current response. The responses are
ordered chronologically.
:returns: A `list` of :class:`~treq.response._Response` objects
"""
if not hasattr(self, "previousResponse"):
raise NotImplementedError(
"Twisted < 13.1.0 does not support response history.")
response = self
history = []
while response.previousResponse is not None:
history.append(_Response(response.previousResponse,
self._cookiejar))
response = response.previousResponse
history.reverse()
return history
def cookies(self):
"""
Get a copy of this response's cookies.
:rtype: :class:`requests.cookies.RequestsCookieJar`
"""
jar = cookiejar_from_dict({})
if self._cookiejar is not None:
for cookie in self._cookiejar:
jar.set_cookie(cookie)
return jar
| {
"repo_name": "mithrandi/treq",
"path": "src/treq/response.py",
"copies": "1",
"size": "2990",
"license": "mit",
"hash": 2951767748140525000,
"line_mean": 29.824742268,
"line_max": 79,
"alpha_frac": 0.6130434783,
"autogenerated": false,
"ratio": 4.686520376175548,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5799563854475549,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from twisted.python.components import proxyForInterface
from twisted.web.iweb import IResponse, UNKNOWN_LENGTH
from twisted.python import reflect
from requests.cookies import cookiejar_from_dict
from treq.content import collect, content, json_content, text_content
class _Response(proxyForInterface(IResponse)):
"""
A wrapper for :class:`twisted.web.iweb.IResponse` which manages cookies and
adds a few convenience methods.
"""
def __init__(self, original, cookiejar):
self.original = original
self._cookiejar = cookiejar
def __repr__(self):
"""
Generate a representation of the response which includes the HTTP
status code, Content-Type header, and body size, if available.
"""
if self.original.length == UNKNOWN_LENGTH:
size = 'unknown size'
else:
size = '{:,d} bytes'.format(self.original.length)
# Display non-ascii bits of the content-type header as backslash
# escapes.
content_type_bytes = b', '.join(
self.original.headers.getRawHeaders(b'content-type', ()))
content_type = repr(content_type_bytes).lstrip('b')[1:-1]
return "<{} {} '{:.40s}' {}>".format(
reflect.qual(self.__class__),
self.original.code,
content_type,
size,
)
def collect(self, collector):
"""
Incrementally collect the body of the response, per
:func:`treq.collect()`.
:param collector: A single argument callable that will be called
with chunks of body data as it is received.
:returns: A `Deferred` that fires when the entire body has been
received.
"""
return collect(self.original, collector)
def content(self):
"""
Read the entire body all at once, per :func:`treq.content()`.
:returns: A `Deferred` that fires with a `bytes` object when the entire
body has been received.
"""
return content(self.original)
def json(self, **kwargs):
"""
Collect the response body as JSON per :func:`treq.json_content()`.
:param kwargs: Any keyword arguments accepted by :py:func:`json.loads`
:rtype: Deferred that fires with the decoded JSON when the entire body
has been read.
"""
return json_content(self.original, **kwargs)
def text(self, encoding='ISO-8859-1'):
"""
Read the entire body all at once as text, per
:func:`treq.text_content()`.
:rtype: A `Deferred` that fires with a unicode string when the entire
body has been received.
"""
return text_content(self.original, encoding)
def history(self):
"""
Get a list of all responses that (such as intermediate redirects),
that ultimately ended in the current response. The responses are
ordered chronologically.
:returns: A `list` of :class:`~treq.response._Response` objects
"""
response = self
history = []
while response.previousResponse is not None:
history.append(_Response(response.previousResponse,
self._cookiejar))
response = response.previousResponse
history.reverse()
return history
def cookies(self):
"""
Get a copy of this response's cookies.
:rtype: :class:`requests.cookies.RequestsCookieJar`
"""
jar = cookiejar_from_dict({})
if self._cookiejar is not None:
for cookie in self._cookiejar:
jar.set_cookie(cookie)
return jar
| {
"repo_name": "pexip/os-python-treq",
"path": "src/treq/response.py",
"copies": "2",
"size": "3778",
"license": "mit",
"hash": 8652705691150468000,
"line_mean": 31.2905982906,
"line_max": 79,
"alpha_frac": 0.6032292218,
"autogenerated": false,
"ratio": 4.54632972322503,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.614955894502503,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from twisted.web.client import Agent
from treq.client import HTTPClient
from treq._utils import default_pool, default_reactor
def head(url, **kwargs):
"""
Make a ``HEAD`` request.
See :py:func:`treq.request`
"""
return _client(**kwargs).head(url, **kwargs)
def get(url, headers=None, **kwargs):
"""
Make a ``GET`` request.
See :py:func:`treq.request`
"""
return _client(**kwargs).get(url, headers=headers, **kwargs)
def post(url, data=None, **kwargs):
"""
Make a ``POST`` request.
See :py:func:`treq.request`
"""
return _client(**kwargs).post(url, data=data, **kwargs)
def put(url, data=None, **kwargs):
"""
Make a ``PUT`` request.
See :py:func:`treq.request`
"""
return _client(**kwargs).put(url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
"""
Make a ``PATCH`` request.
See :py:func:`treq.request`
"""
return _client(**kwargs).patch(url, data=data, **kwargs)
def delete(url, **kwargs):
"""
Make a ``DELETE`` request.
See :py:func:`treq.request`
"""
return _client(**kwargs).delete(url, **kwargs)
def request(method, url, **kwargs):
"""
Make an HTTP request.
:param str method: HTTP method. Example: ``'GET'``, ``'HEAD'``. ``'PUT'``,
``'POST'``.
:param str url: http or https URL, which may include query arguments.
:param headers: Optional HTTP Headers to send with this request.
:type headers: Headers or None
:param params: Optional parameters to be append as the query string to
the URL, any query string parameters in the URL already will be
preserved.
:type params: dict w/ str or list/tuple of str values, list of 2-tuples, or
None.
:param data: Optional request body.
:type data: str, file-like, IBodyProducer, or None
:param reactor: Optional twisted reactor.
:param bool persistent: Use persistent HTTP connections. Default: ``True``
:param bool allow_redirects: Follow HTTP redirects. Default: ``True``
:param auth: HTTP Basic Authentication information.
:type auth: tuple of ``('username', 'password')``.
:param cookies: Cookies to send with this request. The HTTP kind, not the
tasty kind.
:type cookies: ``dict`` or ``cookielib.CookieJar``
:param int timeout: Request timeout seconds. If a response is not
received within this timeframe, a connection is aborted with
``CancelledError``.
:rtype: Deferred that fires with an IResponse provider.
"""
return _client(**kwargs).request(method, url, **kwargs)
#
# Private API
#
def _client(*args, **kwargs):
agent = kwargs.get('agent')
if agent is None:
reactor = default_reactor(kwargs.get('reactor'))
pool = default_pool(reactor,
kwargs.get('pool'),
kwargs.get('persistent'))
agent = Agent(reactor, pool=pool)
return HTTPClient(agent)
| {
"repo_name": "glyph/treq",
"path": "treq/api.py",
"copies": "1",
"size": "3067",
"license": "mit",
"hash": 594815240672210300,
"line_mean": 24.9915254237,
"line_max": 79,
"alpha_frac": 0.6224323443,
"autogenerated": false,
"ratio": 3.843358395989975,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9965790740289975,
"avg_score": 0,
"num_lines": 118
} |
from __future__ import absolute_import, division, print_function
from urllib.parse import urlparse, parse_qs
from os.path import join, dirname
import shutil
import mimetypes
import unittest
import httmock
import tempfile
from ..cache import guess_url_file_extension, EsriRestDownloadTask
class TestCacheExtensionGuessing (unittest.TestCase):
def response_content(self, url, request):
''' Fake HTTP responses for use with HTTMock in tests.
'''
scheme, host, path, _, query, _ = urlparse(url.geturl())
tests_dirname = dirname(__file__)
if host == 'fake-cwd.local':
with open(tests_dirname + path, 'rb') as file:
type, _ = mimetypes.guess_type(file.name)
return httmock.response(200, file.read(), headers={'Content-Type': type})
elif (host, path) == ('www.ci.berkeley.ca.us', '/uploadedFiles/IT/GIS/Parcels.zip'):
with open(join(tests_dirname, 'data', 'us-ca-berkeley-excerpt.zip'), 'rb') as file:
return httmock.response(200, file.read(), headers={'Content-Type': 'application/octet-stream'})
elif (host, path) == ('data.sfgov.org', '/download/kvej-w5kb/ZIPPED%20SHAPEFILE'):
return httmock.response(302, '', headers={'Location': 'http://apps.sfgov.org/datafiles/view.php?file=sfgis/eas_addresses_with_units.zip'})
elif (host, path, query) == ('apps.sfgov.org', '/datafiles/view.php', 'file=sfgis/eas_addresses_with_units.zip'):
with open(join(tests_dirname, 'data', 'us-ca-san_francisco-excerpt.zip'), 'rb') as file:
return httmock.response(200, file.read(), headers={'Content-Type': 'application/download', 'Content-Disposition': 'attachment; filename=eas_addresses_with_units.zip;'})
elif (host, path, query) == ('dcatlas.dcgis.dc.gov', '/catalog/download.asp', 'downloadID=2182&downloadTYPE=ESRI'):
return httmock.response(200, b'FAKE'*99, headers={'Content-Type': 'application/x-zip-compressed'})
elif (host, path, query) == ('data.northcowichan.ca', '/DataBrowser/DownloadCsv', 'container=mncowichan&entitySet=PropertyReport&filter=NOFILTER'):
return httmock.response(200, b'FAKE,FAKE\n'*99, headers={'Content-Type': 'text/csv', 'Content-Disposition': 'attachment; filename=PropertyReport.csv'})
raise NotImplementedError(url.geturl())
def test_urls(self):
with httmock.HTTMock(self.response_content):
assert guess_url_file_extension('http://fake-cwd.local/conforms/lake-man-3740.csv') == '.csv'
assert guess_url_file_extension('http://fake-cwd.local/data/us-ca-carson-0.json') == '.json'
assert guess_url_file_extension('http://fake-cwd.local/data/us-ca-oakland-excerpt.zip') == '.zip'
assert guess_url_file_extension('http://www.ci.berkeley.ca.us/uploadedFiles/IT/GIS/Parcels.zip') == '.zip'
assert guess_url_file_extension('https://data.sfgov.org/download/kvej-w5kb/ZIPPED%20SHAPEFILE') == '.zip'
assert guess_url_file_extension('http://dcatlas.dcgis.dc.gov/catalog/download.asp?downloadID=2182&downloadTYPE=ESRI') == '.zip'
assert guess_url_file_extension('http://data.northcowichan.ca/DataBrowser/DownloadCsv?container=mncowichan&entitySet=PropertyReport&filter=NOFILTER') == '.csv', guess_url_file_extension('http://data.northcowichan.ca/DataBrowser/DownloadCsv?container=mncowichan&entitySet=PropertyReport&filter=NOFILTER')
class TestCacheEsriDownload (unittest.TestCase):
def setUp(self):
''' Prepare a clean temporary directory, and work there.
'''
self.workdir = tempfile.mkdtemp(prefix='testCache-')
def tearDown(self):
shutil.rmtree(self.workdir)
def response_content(self, url, request):
''' Fake HTTP responses for use with HTTMock in tests.
'''
scheme, host, path, _, query, _ = urlparse(url.geturl())
data_dirname = join(dirname(__file__), 'data')
local_path = False
if host == 'www.carsonproperty.info':
qs = parse_qs(query)
if path == '/ArcGIS/rest/services/basemap/MapServer/1/query':
body_data = parse_qs(request.body) if request.body else {}
if qs.get('returnIdsOnly') == ['true']:
local_path = join(data_dirname, 'us-ca-carson-ids-only.json')
elif qs.get('returnCountOnly') == ['true']:
local_path = join(data_dirname, 'us-ca-carson-count-only.json')
elif body_data.get('outSR') == ['4326']:
local_path = join(data_dirname, 'us-ca-carson-0.json')
elif path == '/ArcGIS/rest/services/basemap/MapServer/1':
if qs.get('f') == ['json']:
local_path = join(data_dirname, 'us-ca-carson-metadata.json')
if host == 'gis.cmpdd.org':
qs = parse_qs(query)
if path == '/arcgis/rest/services/Viewers/Madison/MapServer/13/query':
body_data = parse_qs(request.body) if request.body else {}
if qs.get('returnIdsOnly') == ['true']:
local_path = join(data_dirname, 'us-ms-madison-ids-only.json')
elif qs.get('returnCountOnly') == ['true']:
local_path = join(data_dirname, 'us-ms-madison-count-only.json')
elif qs.get('outStatistics'):
local_path = join(data_dirname, 'us-ms-madison-outStatistics.json')
elif body_data.get('outSR') == ['4326']:
local_path = join(data_dirname, 'us-ms-madison-0.json')
elif path == '/arcgis/rest/services/Viewers/Madison/MapServer/13':
if qs.get('f') == ['json']:
local_path = join(data_dirname, 'us-ms-madison-metadata.json')
if host == 'sampleserver6.arcgisonline.com':
qs = parse_qs(query)
if path == '/arcgis/rest/services/Recreation/FeatureServer/0/query':
body_data = parse_qs(request.body) if request.body else {}
if qs.get('returnCountOnly') == ['true']:
local_path = join(data_dirname, 'us-esri-test-count-only.json')
elif body_data.get('outSR') == ['4326']:
local_path = join(data_dirname, 'us-esri-test-0.json')
elif path == '/arcgis/rest/services/Recreation/FeatureServer/0':
if qs.get('f') == ['json']:
local_path = join(data_dirname, 'us-esri-test-metadata.json')
if local_path:
type, _ = mimetypes.guess_type(local_path)
with open(local_path, 'rb') as file:
return httmock.response(200, file.read(), headers={'Content-Type': type})
raise NotImplementedError(url.geturl())
def test_download_carson(self):
""" ESRI Caching Supports Object ID Enumeration """
with httmock.HTTMock(self.response_content):
task = EsriRestDownloadTask('us-ca-carson')
task.download(['http://www.carsonproperty.info/ArcGIS/rest/services/basemap/MapServer/1'], self.workdir)
def test_download_madison(self):
""" ESRI Caching Supports Statistics Pagination """
with httmock.HTTMock(self.response_content):
task = EsriRestDownloadTask('us-ms-madison')
task.download(['http://gis.cmpdd.org/arcgis/rest/services/Viewers/Madison/MapServer/13'], self.workdir)
def test_download_esri_sample(self):
""" ESRI Caching Supports Advanced Query Pagination """
with httmock.HTTMock(self.response_content):
task = EsriRestDownloadTask('us-esri-test')
task.download(['https://sampleserver6.arcgisonline.com/arcgis/rest/services/Recreation/FeatureServer/0'], self.workdir)
| {
"repo_name": "slibby/machine",
"path": "openaddr/tests/cache.py",
"copies": "1",
"size": "7932",
"license": "isc",
"hash": -8399634389998902000,
"line_mean": 52.5945945946,
"line_max": 315,
"alpha_frac": 0.6125819465,
"autogenerated": false,
"ratio": 3.592391304347826,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4704973250847826,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from weakref import WeakKeyDictionary
from toolz import curry, concat, first, memoize
from multipledispatch import MDNotImplementedError
from ..expr import (
Distinct,
ElemWise,
Expr,
Field,
Head,
Projection,
Slice,
Symbol,
path,
symbol,
)
from ..expr.optimize import lean_projection, simple_selections
from ..expr.split import split
from ..partition import partitions
from .core import compute
from .pmap import get_default_pmap
from collections import Iterator, Iterable
import datashape
import bcolz
import numpy as np
import pandas as pd
from ..dispatch import dispatch
from odo import into
__all__ = ['bcolz']
COMFORTABLE_MEMORY_SIZE = 1e9
@memoize(cache=WeakKeyDictionary())
def box(type_):
"""Create a non-iterable box type for an object.
Parameters
----------
type_ : type
The type to create a box for.
Returns
-------
box : type
A type to box values of type ``type_``.
"""
class c(object):
__slots__ = 'value',
def __init__(self, value):
if not isinstance(value, type_):
raise TypeError(
"values must be of type '%s' (received '%s')" % (
type_.__name__, type(value).__name__,
),
)
self.value = value
c.__name__ = 'box(%s)' % type_.__name__
return c
@dispatch(Expr, (box(bcolz.ctable), box(bcolz.carray)))
def optimize(expr, _):
return simple_selections(lean_projection(expr))
@dispatch(Expr, (bcolz.ctable, bcolz.carray))
def pre_compute(expr, data, scope=None, **kwargs):
# box the data so that we don't need to deal with ambiguity of ctable
# and carray being instances of the Iterator ABC.
return box(type(data))(data)
@dispatch(Expr, (box(bcolz.ctable), box(bcolz.carray)))
def post_compute(expr, data, **kwargs):
# Unbox the bcolz objects.
return data.value
@dispatch((box(bcolz.carray), box(bcolz.ctable)))
def discover(data):
val = data.value
return datashape.from_numpy(val.shape, val.dtype)
Cheap = (Head, ElemWise, Distinct, Symbol)
@dispatch(Head, (box(bcolz.ctable), box(bcolz.carray)))
def compute_down(expr, data, **kwargs):
""" Cheap and simple computation in simple case
If we're given a head and the entire expression is cheap to do (e.g.
elemwises, selections, ...) then compute on data directly, without
parallelism"""
leaf = expr._leaves()[0]
if all(isinstance(e, Cheap) for e in path(expr, leaf)):
val = data.value
return compute(
expr,
{leaf: into(Iterator, val)},
return_type='native',
**kwargs
)
else:
raise MDNotImplementedError()
@dispatch(Field, box(bcolz.ctable))
def compute_up(expr, data, **kwargs):
return data.value[str(expr._name)]
@dispatch(Projection, box(bcolz.ctable))
def compute_up(expr, data, **kwargs):
return data.value[list(map(str, expr.fields))]
@dispatch(Slice, (box(bcolz.carray), box(bcolz.ctable)))
def compute_up(expr, data, **kwargs):
return data.value[expr.index]
def compute_chunk(source, chunk, chunk_expr, data_index):
part = source[data_index]
return compute(chunk_expr, {chunk: part}, return_type='native')
def get_chunksize(data):
if isinstance(data, bcolz.carray):
return data.chunklen
elif isinstance(data, bcolz.ctable):
return min(data[c].chunklen for c in data.names)
else:
raise TypeError("Don't know how to compute chunksize for type %r" %
type(data).__name__)
@dispatch(Expr, (box(bcolz.carray), box(bcolz.ctable)))
def compute_down(expr, data, chunksize=None, map=None, **kwargs):
data = data.value
if map is None:
map = get_default_pmap()
leaf = expr._leaves()[0]
if chunksize is None:
chunksize = max(2**16, get_chunksize(data))
# If the bottom expression is a projection or field then want to do
# compute_up first
children = {
e for e in expr._traverse()
if isinstance(e, Expr)
and any(i is expr._leaves()[0] for i in e._inputs)
}
if len(children) == 1 and isinstance(first(children), (Field, Projection)):
raise MDNotImplementedError()
chunk = symbol('chunk', chunksize * leaf.schema)
(chunk, chunk_expr), (agg, agg_expr) = split(leaf, expr, chunk=chunk)
data_parts = partitions(data, chunksize=(chunksize,))
parts = list(map(curry(compute_chunk, data, chunk, chunk_expr),
data_parts))
if isinstance(parts[0], np.ndarray):
intermediate = np.concatenate(parts)
elif isinstance(parts[0], pd.DataFrame):
intermediate = pd.concat(parts)
elif isinstance(parts[0], Iterable):
intermediate = list(concat(parts))
else:
raise TypeError("Don't know how to concatenate objects of type %r" %
type(parts[0]).__name__)
return compute(agg_expr, {agg: intermediate}, return_type='native')
def _asarray(a):
if isinstance(a, (bcolz.carray, bcolz.ctable)):
return a[:]
return np.array(list(a))
@compute_down.register(Expr, (box(bcolz.carray), box(bcolz.ctable)), Iterable)
@compute_down.register(Expr, Iterable, (box(bcolz.carray), box(bcolz.ctable)))
def bcolz_mixed(expr, a, b, **kwargs):
return compute(
expr,
dict(zip(expr._leaves(), map(_asarray, (a.value, b.value)))),
return_type='native',
)
| {
"repo_name": "ContinuumIO/blaze",
"path": "blaze/compute/bcolz.py",
"copies": "3",
"size": "5577",
"license": "bsd-3-clause",
"hash": 1218582729515177000,
"line_mean": 26.6089108911,
"line_max": 79,
"alpha_frac": 0.629908553,
"autogenerated": false,
"ratio": 3.50534255185418,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00011512779184895233,
"num_lines": 202
} |
from __future__ import absolute_import, division, print_function
from workflows.recipe.recipe import Recipe
from workflows.recipe.wrapper import RecipeWrapper
def _wrap_subscription(transport_layer, subscription_call, channel, callback,
*args, **kwargs):
'''Internal method to create an intercepting function for incoming messages
to interpret recipes. This function is then used to subscribe to a channel
on the transport layer.
:param transport_layer: Reference to underlying transport object.
:param subscription_call: Reference to the subscribing function of the
transport layer.
:param channel: Channel name to subscribe to.
:param callback: Real function to be called when messages are received.
The callback will pass three arguments,
a RecipeWrapper object (details below), the header as
a dictionary structure, and the message.
:param allow_non_recipe_messages: Pass on incoming messages that do not
include recipe information. In this case the first
argument to the callback function will be 'None'.
:param log_extender: If the recipe contains useful contextual information
for log messages, such as a unique ID which can be used
to connect all messages originating from the same
recipe, then the information will be passed to this
function, which must be a context manager factory.
:return: Return value of call to subscription_call.
'''
allow_non_recipe_messages = kwargs.pop('allow_non_recipe_messages', False)
log_extender = kwargs.pop('log_extender', None)
def unwrap_recipe(header, message):
'''This is a helper function unpacking incoming messages when they are
in a recipe format. Other messages are passed through unmodified.
:param header: A dictionary of message headers. If the header contains
an entry 'workflows-recipe' then the message is parsed
and the embedded recipe information is passed on in a
RecipeWrapper object to the target function.
:param message: Incoming deserialized message object.
'''
if header.get('workflows-recipe') in (True, 'True', 'true', 1):
rw = RecipeWrapper(message=message, transport=transport_layer)
if log_extender and rw.environment and rw.environment.get('ID'):
with log_extender('recipe_ID', rw.environment['ID']):
return callback(rw, header, message.get('payload'))
return callback(rw, header, message.get('payload'))
if allow_non_recipe_messages:
return callback(None, header, message)
# self.log.warning('Discarding non-recipe message:\n' + \
# "First 1000 characters of header:\n%s\n" + \
# "First 1000 characters of message:\n%s",
# str(header)[:1000], str(message)[:1000])
transport_layer.nack(header)
return subscription_call(channel, unwrap_recipe, *args, **kwargs)
def wrap_subscribe(transport_layer, channel, callback, *args, **kwargs):
'''Listen to a queue on the transport layer, similar to the subscribe call in
transport/common_transport.py. Intercept all incoming messages and parse
for recipe information.
See common_transport.subscribe for possible additional keyword arguments.
:param transport_layer: Reference to underlying transport object.
:param channel: Queue name to subscribe to.
:param callback: Function to be called when messages are received.
The callback will pass three arguments,
a RecipeWrapper object (details below), the header as
a dictionary structure, and the message.
:return: A unique subscription ID
'''
return _wrap_subscription(transport_layer, transport_layer.subscribe,
channel, callback, *args, **kwargs)
def wrap_subscribe_broadcast(transport_layer, channel, callback, *args, **kwargs):
'''Listen to a topic on the transport layer, similar to the
subscribe_broadcast call in transport/common_transport.py. Intercept all
incoming messages and parse for recipe information.
See common_transport.subscribe_broadcast for possible arguments.
:param transport_layer: Reference to underlying transport object.
:param channel: Topic name to subscribe to.
:param callback: Function to be called when messages are received.
The callback will pass three arguments,
a RecipeWrapper object (details below), the header as
a dictionary structure, and the message.
:return: A unique subscription ID
'''
return _wrap_subscription(
transport_layer,
transport_layer.subscribe_broadcast,
channel,
callback,
*args, **kwargs
)
| {
"repo_name": "xia2/workflows",
"path": "workflows/recipe/__init__.py",
"copies": "1",
"size": "5071",
"license": "bsd-3-clause",
"hash": -4851574098059145000,
"line_mean": 51.8229166667,
"line_max": 82,
"alpha_frac": 0.6576612108,
"autogenerated": false,
"ratio": 4.8853564547206165,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6043017665520617,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import#, division, print_function
from wsgiref import simple_server
import json
import sys, os
import datetime
import functools
import falcon
from server import (doctor, patient, appointment, obj, prescription, comment,
discharge, auth)
from server import rediscli
from server.utils import logger
def max_body(limit):
def hook(req, resp, resource, params):
length = req.content_length
if length is not None and length > limit:
msg = ('The size of the request is too large. The body must not '
'exceed ' + str(limit) + ' bytes in length.')
raise falcon.HTTPRequestEntityTooLarge(
'Request body is too large', msg)
return hook
def authentication(req, required_roles, doctorid='', patientid=''):
"""
required_roles is a list of role's string
TODO: change back to the decorator way of authentication
may get params value (value of field expression) here by:
params['doctorid'], see docstring in falcon.hooks.before(action)
"""
token = req.get_header('token')
role_req = req.get_header('role')
logger.debug('role:{}, token:{}'.format(role_req, token))
if token is None or role_req not in required_roles:
description = ('Please provide an auth token '
'and a correspond role'
'as part of the request.')
raise falcon.HTTPUnauthorized('Auth token and Role required',
description,
href='http://pa2515-hms-server.readthedocs.org/en/latest/auth')
# for role in required_roles:
if role_req == 'admin':
token_true = rediscli.get_data('auth/admin')
# token_true = 'abc'
logger.debug('token in headers:{}, \n token in redis:{}'.format(
token, token_true
))
if token == token_true:
return True
elif role_req == 'doctor':
logger.debug('in doctor authentication doctorid:{}'.format(doctorid))
token_true = rediscli.get_data('auth/{}'.format(doctorid))
# logger.debug('patient_list:{}, type:{}'.format(patient_list, type(patient_list)))
logger.debug('token in headers:{}, \n token in redis:{}'.format(
token, token_true
))
if token == token_true:
return True
elif role_req == 'patient':
logger.debug('in patient authentication patientid:{}'.format(patientid))
token_true = rediscli.get_data('auth/{}'.format(patientid))
# logger.debug('patient_list:{}, type:{}'.format(patient_list, type(patient_list)))
logger.debug('token in headers:{}, \n token in redis:{}'.format(
token, token_true
))
if token == token_true:
return True
# elif role_req == 'patient':
# token = uuid.uuid4().hex
# rediscli.set_data('auth/{}'.format(username), token)
description = ('The provided auth token is not valid. '
'Please request a new token and try again.')
raise falcon.HTTPUnauthorized('Authentication required',
description,
href='http://pa2515-hms-server.readthedocs.org/en/latest/auth')
return False
# class authentication:
# """
# required_roles is a list of role's string
# """
# def __init__(self, required_roles=[]):
# self.required_roles = required_roles
# # def hook(req, resp, resource, params, required_roles, *args, **kwargs):
#
# def __call__(self, f):
# def hook(*args, **kwargs):
# req = args[1]
#
# token = req.get_header('token')
# if token is None:
# description = ('Please provide an auth token '
# 'as part of the request.')
#
# raise falcon.HTTPUnauthorized('Auth token required',
# description,
# href='http://pa2515-hms-server.readthedocs.org/en/latest/auth')
#
# for role in required_roles:
# if role == 'admin':
# token_true = rediscli.get_data('auth/admin')
# # token_true = 'abc'
# logger.debug('token in headers:{}, \n token in redis:{}'.format(
# token, token_true
# ))
# if token == token_true:
# return True
#
# elif role == 'doctor':
# logger.debug('in doctor authentication doctorid:{}'.format(doctorid))
# token_true = rediscli.get_data('auth/{}'.format(doctorid))
# logger.debug('patient_list:{}, type:{}'.format(patient_list, type(patient_list)))
# logger.debug('token in headers:{}, \n token in redis:{}'.format(
# token, token_true
# ))
# if token == token_true:
# return True
# # elif role == 'patient':
# # token = uuid.uuid4().hex
# # rediscli.set_data('auth/{}'.format(username), token)
#
# description = ('The provided auth token is not valid. '
# 'Please request a new token and try again.')
#
# raise falcon.HTTPUnauthorized('Authentication required',
# description,
# href='http://pa2515-hms-server.readthedocs.org/en/latest/auth')
# return hook
class RegDoctorListener:
@falcon.before(max_body(64*1024))
# @falcon.before(authentication(['admin']))
def on_post(self, req, resp):
"""
Register a doctor in the system. The post data is in json format.
:param req.header.token: token
:param req.header.role: role
:returns: a json contains doctor's id, or other related info
{"doctorid":'d001', "info":{"info1":''}}
"""
authentication(req, ['admin'])
resp_dict = {}
try:
# have pre-processed by JSONTranslator, post_data is a dict
post_data = req.context['doc']
# logger.debug('username:%s, password:%s, data:%s'
# % (username, password, post_data))
# logger.debug('env:%s , \nstream:%s, \ncontext:, \ninput:' % (
# req.env, req.stream.read()))
except Exception as ex:
logger.error('error when try to get headers and data, ', ex)
raise falcon.HTTPBadRequest('bad req',
'when read from req, please check if the req is correct.')
try:
"""
handle_request:
"""
status, doctorid, password = doctor.register_doctor(post_data)
except Exception as ex:
logger.exception('error when register doctor, ', ex)
resp_dict['info'] = 'Error when register doctor {}'.format(
post_data['last_name'])
resp.status = falcon.HTTP_500
resp.body = json.dumps(resp_dict, sort_keys=True, indent=4)
else:
if status:
logger.debug('register ok, status positive')
resp_dict['info'] = 'Register doctor {} success'.format(
post_data['last_name'])
resp_dict['doctorid'] = doctorid
resp_dict['password'] = password
resp.status = falcon.HTTP_201
resp.body = json.dumps(resp_dict)
else:
logger.exception('return error when try to register doctor, ', ex)
resp_dict['errinfo'] = 'Error when register doctor {}'.format(
post_data['last_name'])
resp.status = falcon.HTTP_400
resp.body = json.dumps(resp_dict)
class DoctorListener:
# def __init__(self, doctorid):
# self.doctorid = doctorid
def on_get(self, req, resp, doctorid):
"""
Get info of a doctor in the system. The response data is in json format.
:param req.header.token: token
:param req.header.role: role
:returns: a json contains doctor's info
{"doctorid":'d001', "info":{"info1":''}}
"""
authentication(req, ['admin', 'doctor'], doctorid=doctorid)
resp_dict = {}
try:
"""
handle_request:
"""
status, doctorinfo = doctor.get_doctor(doctorid)
except Exception as ex:
logger.exception('error when get doctor, ', ex)
resp_dict['info'] = 'Error when get doctor {}'.format(
doctorid)
resp.status = falcon.HTTP_500
resp.body = json.dumps(resp_dict, sort_keys=True, indent=4)
else:
if status:
logger.debug('get ok, status positive')
resp_dict['info'] = 'Get doctor {} success'.format(
doctorid)
resp_dict['doctorinfo'] = doctorinfo
# resp.status = status or falcon.HTTP_200
resp.status = falcon.HTTP_200
resp.body = json.dumps(resp_dict)
else:
logger.exception('return error when try to get doctor')
resp_dict['info'] = 'Error when get doctor {}'.format(
doctorid)
resp.status = falcon.HTTP_400
resp.body = json.dumps(resp_dict)
@falcon.before(max_body(64*1024))
# @falcon.before(authentication(['admin', 'doctor']))
def on_put(self, req, resp, doctorid):
"""
Edit a doctor in the system. The PUT data is in json format.
:param req.header.token: token
:param req.header.role: role
:returns: a json contains doctor's id, or other related info
{"doctorid":'d001', "info":{"info1":''}}
"""
authentication(req, ['admin', 'doctor'], doctorid=doctorid)
resp_dict = {}
logger.debug('in doctor put')
try:
# have pre-processed by JSONTranslator, post_data is a dict
logger.debug('in doctor put, before got post_data')
post_data = req.context['doc']
# logger.debug('username:%s, password:%s, data:%s'
# % (username, password, post_data))
# logger.debug('env:%s , \nstream:%s, \ncontext:, \ninput:' % (
# req.env, req.stream.read()))
except Exception as ex:
logger.error('error when try to get data, ', ex)
raise falcon.HTTPBadRequest('bad req',
'when read from req, please check if the req is correct.')
try:
"""
handle_request:
"""
status, doctorid = doctor.edit_doctor(doctorid, post_data)
except Exception as ex:
logger.exception('error when edit doctor, ', ex)
resp_dict['info'] = 'Error when edit doctor {}'.format(
doctorid)
resp.status = falcon.HTTP_500
resp.body = json.dumps(resp_dict, sort_keys=True, indent=4)
else:
if status:
logger.debug('Edit ok, status positive')
resp_dict['info'] = 'Edit doctor {} success'.format(
doctorid)
resp_dict['doctorid'] = doctorid
# resp.status = status or falcon.HTTP_200
resp.status = falcon.HTTP_200
resp.body = json.dumps(resp_dict)
else:
logger.exception('return error when try to Edit doctor, ', ex)
resp_dict['errinfo'] = 'Error when Edit doctor {}'.format(
doctorid)
resp.status = falcon.HTTP_400
resp.body = json.dumps(resp_dict)
def on_delete(self, req, resp):
pass
class DoctorListListener:
# def __init__(self, doctorid):
# self.doctorid = doctorid
def on_get(self, req, resp):
"""
Get info of a doctor in the system. The response data is in json format.
:param req.header.token: token
:param req.header.role: role
:returns: a json contains doctor's info
{"doctorid":'d001', "info":{"info1":''}}
"""
# authentication(req, ['admin', 'doctor', 'patient'])
resp_dict = {}
try:
"""
handle_request:
"""
status, doctor_list = doctor.get_doctors()
except Exception as ex:
logger.exception('error when get doctor, ', ex)
resp_dict['info'] = 'Error when get doctors'
resp.status = falcon.HTTP_500
resp.body = json.dumps(resp_dict, sort_keys=True, indent=4)
else:
if status:
logger.debug('get ok, status positive')
resp_dict['info'] = 'Get doctors success'
resp_dict['doctor_list'] = doctor_list
# resp.status = status or falcon.HTTP_200
logger.debug(json.dumps(resp_dict))
resp.status = falcon.HTTP_200
resp.body = json.dumps(resp_dict)
else:
logger.exception('return error when try to get doctor')
resp_dict['info'] = 'Error when get doctors'
resp.status = falcon.HTTP_400
resp.body = json.dumps(resp_dict)
class RegPatientListener:
@falcon.before(max_body(64*1024))
def on_post(self, req, resp):
"""
Register a patient in the system. The post data is in json format.
:param req.header.token: token
:param req.header.role: role
:returns: a json contains patient's id, or other related info
{"patientid":'d001', "info":{"info1":''}}
"""
authentication(req, ['admin', 'doctor'])
resp_dict = {}
try:
# have pre-processed by JSONTranslator, post_data is a dict
post_data = req.context['doc']
# logger.debug('username:%s, password:%s, data:%s'
# % (username, password, post_data))
# logger.debug('env:%s , \nstream:%s, \ncontext:, \ninput:' % (
# req.env, req.stream.read()))
except Exception as ex:
logger.error('error when try to get headers and data, ', ex)
raise falcon.HTTPBadRequest('bad req',
'when read from req, please check if the req is correct.')
try:
"""
handle_request:
"""
status, patientid, password = patient.register_patient(post_data)
except Exception as ex:
logger.exception('error when register patient, ', ex)
resp_dict['info'] = 'Error when register patient {}'.format(
post_data['last_name'])
resp.status = falcon.HTTP_500
resp.body = json.dumps(resp_dict, sort_keys=True, indent=4)
else:
if status:
logger.debug('register ok, status positive')
resp_dict['info'] = 'Register patient {} success'.format(
post_data['last_name'])
resp_dict['patientid'] = patientid
resp_dict['password'] = password
# resp.status = status or falcon.HTTP_200
resp.status = falcon.HTTP_201
resp.body = json.dumps(resp_dict)
else:
logger.exception('return error when try to register patient, ', ex)
resp_dict['errinfo'] = 'Error when register patient {}'.format(
post_data['last_name'])
resp.status = falcon.HTTP_400
resp.body = json.dumps(resp_dict)
# resp.body = json.dumps(resp_dict, sort_keys=True,
# indent=4)
class PatientListener:
def on_get(self, req, resp, patientid):
"""
Get info of a patient in the system. The response data is in json format.
:param req.header.token: token
:param req.header.role: role
:returns: a json contains patient's info
{"patientid":'d001', "info":{"info1":''}}
"""
authentication(req, ['admin', 'doctor', 'patient'], patientid=patientid)
resp_dict = {}
try:
"""
handle_request:
"""
status, patientinfo = patient.get_patient(patientid)
except Exception as ex:
logger.exception('error when get patient, ', ex)
resp_dict['info'] = 'Error when get patient {}'.format(
patientid)
resp.status = falcon.HTTP_500
resp.body = json.dumps(resp_dict, sort_keys=True, indent=4)
else:
if status:
logger.debug('get ok, status positive')
# resp_dict['info'] = 'Get patient {} success'.format(
# patientid)
# resp_dict['patientinfo'] = patientinfo
# resp.status = status or falcon.HTTP_200
resp.status = falcon.HTTP_200
resp.body = patientinfo
else:
logger.exception('return error when try to get patient')
resp_dict['info'] = 'Error when get patient {}'.format(
patientid)
resp.status = falcon.HTTP_400
resp.body = json.dumps(resp_dict)
@falcon.before(max_body(64*1024))
def on_put(self, req, resp, patientid):
"""
Edit a patient in the system. The PUT data is in json format.
:param req.header.token: token
:param req.header.role: role
:returns: a json contains patient's id, or other related info
{"patientid":'d001', "info":{"info1":''}}
"""
authentication(req, ['admin', 'doctor', 'patient'], patientid=patientid)
resp_dict = {}
logger.debug('in patient put')
try:
# have pre-processed by JSONTranslator, post_data is a dict
logger.debug('in patient put, before got post_data')
post_data = req.context['doc']
# logger.debug('username:%s, password:%s, data:%s'
# % (username, password, post_data))
# logger.debug('env:%s , \nstream:%s, \ncontext:, \ninput:' % (
# req.env, req.stream.read()))
except Exception as ex:
logger.error('error when try to get headers and data, ', ex)
raise falcon.HTTPBadRequest('bad req',
'when read from req, please check if the req is correct.')
try:
"""
handle_request:
"""
status, patientid = patient.edit_patient(patientid, post_data)
except Exception as ex:
logger.exception('error when edit patient, ', ex)
resp_dict['info'] = 'Error when edit patient {}'.format(
patientid)
resp.status = falcon.HTTP_500
resp.body = json.dumps(resp_dict, sort_keys=True, indent=4)
else:
if status:
logger.debug('Edit ok, status positive')
resp_dict['info'] = 'Edit patient {} success'.format(
patientid)
resp_dict['patientid'] = patientid
# resp.status = status or falcon.HTTP_200
resp.status = falcon.HTTP_200
resp.body = json.dumps(resp_dict)
else:
logger.exception('return error when try to Edit patient, ', ex)
resp_dict['errinfo'] = 'Error when Edit patient {}'.format(
patientid)
resp.status = falcon.HTTP_400
resp.body = json.dumps(resp_dict)
def on_delete(self, req, resp):
pass
class MakeAppointmentListener:
@falcon.before(max_body(64*1024))
def on_post(self, req, resp):
"""
Make an appointment in the system. The post data is in json format.
:param req.header.username: username
:param req.header.password: password
:returns: a json contains the appointment's url, or other related info
"""
resp_dict = {}
try:
# have pre-processed by JSONTranslator, post_data is a dict
post_data = req.context['doc']
# authentication(req, ['admin', 'doctor', 'patient'],
# doctorid=post_data['doctorid'], patientid=post_data['patientid'])
# logger.debug('env:%s , \nstream:%s, \ncontext:, \ninput:' % (
# req.env, req.stream.read()))
except Exception as ex:
logger.error('error when try to get headers and data, ', ex)
raise falcon.HTTPBadRequest('bad req',
'when read from req, please check if the req is correct.')
try:
status, appointment_url = appointment.make_appointment(post_data)
except Exception as ex:
logger.exception('error when Make an appointment, ', ex)
resp_dict['info'] = 'Error when Make an appointment {}'.format(
post_data['doctorid']+post_data['patientid']+post_data['datetimeslot'])
resp.status = falcon.HTTP_500
resp.body = json.dumps(resp_dict, sort_keys=True, indent=4)
else:
if status:
logger.debug('make appointment ok, status positive')
resp_dict['info'] = 'make appointment {} success'.format(
post_data['doctorid']+post_data['patientid']+post_data['datetimeslot'])
resp_dict['appointment_url'] = appointment_url
# resp.status = status or falcon.HTTP_200
resp.status = falcon.HTTP_201
resp.body = json.dumps(resp_dict,
sort_keys=True, indent=4)
else:
logger.error('return error when try to make appointment')
resp_dict['errinfo'] = 'Error when make appointment {}'.format(
post_data['doctorid']+post_data['patientid']+post_data['datetimeslot'])
resp.status = falcon.HTTP_400
resp.body = json.dumps(resp_dict)
class AppointmentListener:
def on_get(self, req, resp, doctorid, datetimeslot, patientid):
"""
Get info of a doctor in the system. The response data is in json format.
:param req.header.username: username
:param req.header.password: password
:returns: a json contains doctor's info
{"doctorid":'d001', "info":{"info1":''}}
"""
# authentication(req, ['admin', 'doctor', 'patient'],
# doctorid=doctorid, patientid=doctorid)
resp_dict = {}
try:
"""
handle_request:
"""
apmt_url = doctorid + '/' + datetimeslot + '/' + patientid
status, appointment_info = appointment.get_appointment(apmt_url)
except Exception as ex:
logger.exception('error when get appointment_info, ', ex)
resp_dict['info'] = 'Error when get appointment_info {}'.format(
apmt_url)
resp.status = falcon.HTTP_500
resp.body = json.dumps(resp_dict, sort_keys=True, indent=4)
else:
if status:
logger.debug('get ok, status positive')
# resp_dict['info'] = 'Get appointment_info {} success'.format(
# apmt_url)
# resp_dict['appointment_info'] = appointment_info
# resp.status = status or falcon.HTTP_200
resp.status = falcon.HTTP_200
resp.body = json.dumps(appointment_info,
sort_keys=True, indent=4)
else:
logger.exception('return error when try to get appointment_info')
resp_dict['info'] = 'Error when get appointment_info {}'.format(
apmt_url)
resp.status = falcon.HTTP_400
resp.body = json.dumps(resp_dict, sort_keys=True,
indent=4)
def on_put(self, req, resp):
pass
def on_delete(self, req, resp, doctorid, datetimeslot, patientid):
"""
Delete info of a doctor in the system. The response data is in json format.
:param req.header.username: username
:param req.header.password: password
:returns: a json contains doctor's info
{"doctorid":'d001', "info":{"info1":''}}
"""
# authentication(req, ['admin', 'doctor', 'patient'],
# doctorid=doctorid, patientid=doctorid)
resp_dict = {}
try:
"""
handle_request:
"""
apmt_url = doctorid + '/' + datetimeslot + '/' + patientid
status, appointment_info = appointment.delete_appointment(doctorid, datetimeslot, patientid)
except Exception as ex:
logger.exception('error when delete appointment_info, ', ex)
resp_dict['info'] = 'Error when delete appointment_info {}'.format(
apmt_url)
resp.status = falcon.HTTP_500
resp.body = json.dumps(resp_dict, sort_keys=True, indent=4)
else:
if status:
logger.debug('delete ok, status positive')
# resp_dict['info'] = 'Get appointment_info {} success'.format(
# apmt_url)
# resp_dict['appointment_info'] = appointment_info
# resp.status = status or falcon.HTTP_200
resp.status = falcon.HTTP_200
resp.body = json.dumps(appointment_info,
sort_keys=True, indent=4)
else:
logger.exception('return error when try to delete appointment_info')
resp_dict['info'] = 'Error when delete appointment_info {}'.format(
apmt_url)
resp.status = falcon.HTTP_400
resp.body = json.dumps(resp_dict, sort_keys=True,
indent=4)
class AppointmentListListener:
def on_get(self, req, resp, doctorid, date):
"""
Get info of a doctor in the system. The response data is in json format.
:param req.header.username: username
:param req.header.password: password
:returns: a json contains doctor's info
{"doctorid":'d001', "info":{"info1":''}}
"""
resp_dict = {}
try:
"""
handle_request:
"""
status, appointment_info = appointment.check_appointment(doctorid, date)
except Exception as ex:
logger.exception('error when get appointment_info, ', ex)
resp_dict['info'] = 'Error when get appointment_info {}'.format(
apmt_url)
resp.status = falcon.HTTP_500
resp.body = json.dumps(resp_dict, sort_keys=True, indent=4)
else:
if status:
logger.debug('get ok, status positive')
resp_dict['info'] = 'Get appointment_info {} success'.format(
apmt_url)
# resp_dict['appointment_info'] = appointment_info
# resp.status = status or falcon.HTTP_200
resp.status = falcon.HTTP_200
resp.body = json.dumps(appointment_info,
sort_keys=True, indent=4)
else:
logger.exception('return error when try to get appointment_info')
resp_dict['info'] = 'Error when get appointment_info {}'.format(
apmt_url)
resp.status = falcon.HTTP_400
resp.body = json.dumps(resp_dict, sort_keys=True,
indent=4)
def on_put(self, req, resp):
pass
def on_delete(self, req, resp):
pass
class AppointmentSinkAdapter(object):
def __call__(self, req, resp, doctor_date):
"""
:param req.header.username: the username, should be tenant:user when dev
:param req.header.password: password
:doctor_date the part in the request url /v1/disk/(?P<doctor_date>.+?), to
identify the resource to manipulate
:returns: a json contains correspond response info
GET: the temp_url of the file in a resp dict
PUT: the auth_token and storage_url in a resp dict for uploading file
DELETE: description of if the operation success or fail
"""
logger.debug('in sink req.method:%s doctor_date:%s' % (
req.method, doctor_date))
resp_dict = {}
try:
username = req.get_header('username') or 'un'
password = req.get_header('password') or 'pw'
req_dir = req.get_header('dir') or None
logger.debug('username:%s, password:%s' % (username, password))
except:
raise falcon.HTTPBadRequest('bad req',
'when read from req, please check if the req is correct.')
if req.method == 'GET':
try:
url_list = doctor_date.split('/')
if len(url_list) == 2:
# check_appointment
logger.debug(url_list)
status, schedule = appointment.check_appointment(url_list[0], url_list[1])
logger.debug('in sink schedule data:{}'.format(schedule))
# except UserNotExistException:
# logger.debug('in UserNotExistException')
# resp_dict['info'] = 'user:%s does not exist' % username
# resp.status = falcon.HTTP_404
# resp.body = json.dumps(resp_dict, encoding='utf-8')
except:
description = ('Unknown error, username and passwd ok!')
raise falcon.HTTPServiceUnavailable(
'Service Error',
description,
30)
else:
# resp_dict['info'] = 'doctor_date:%s ' % doctor_date
if status:
logger.debug('get ok, status positive')
resp.status = falcon.HTTP_200
resp.body = schedule
else:
logger.exception('return error when try to get appointment_info')
resp_dict['info'] = 'Error when get appointment_info {}'.format(
apmt_url)
resp.status = falcon.HTTP_400
resp.body = json.dumps(resp_dict, sort_keys=True,
indent=4)
class PostObjListener:
@falcon.before(max_body(64*1024))
def on_post(self, req, resp, patientid):
"""
Register a doctor in the system. The post data is in json format.
:param req.header.username: username
:param req.header.password: password
:returns: a json contains patient's id, or other related info
{"patientid":'d001', "info":{"info1":''}}
"""
resp_dict = {}
try:
# have pre-processed by JSONTranslator, post_data is a dict
post_data = req.context['doc']
# logger.debug('username:%s, password:%s, data:%s'
# % (username, password, post_data))
# logger.debug('env:%s , \nstream:%s, \ncontext:, \ninput:' % (
# req.env, req.stream.read()))
except Exception as ex:
logger.error('error when try to get headers and data, ', ex)
raise falcon.HTTPBadRequest('bad req',
'when read from req, please check if the req is correct.')
try:
"""
handle_request:
"""
status, obj_dict = obj.upload_obj(patientid, post_data)
except Exception as ex:
logger.exception('error when register patient, ', ex)
resp_dict['info'] = 'Error when register patient {}'.format(
'obj')
resp.status = falcon.HTTP_500
resp.body = json.dumps(resp_dict, sort_keys=True, indent=4)
else:
if status:
logger.debug('register ok, status positive')
# resp_dict['info'] = 'Register patient {} success'.format(
# 'obj')
# resp_dict['objid'] = objid
# resp.status = status or falcon.HTTP_200
resp.status = falcon.HTTP_201
resp.body = json.dumps(obj_dict)
else:
logger.exception('return error when try to register patient, ', ex)
resp_dict['errinfo'] = 'Error when register patient {}'.format(
'obj')
resp.status = falcon.HTTP_400
resp.body = json.dumps(resp_dict)
# resp.body = json.dumps(resp_dict, sort_keys=True,
# indent=4)
class ObjectListener:
def on_get(self, req, resp, patientid, objid):
"""
Get info of a patient in the system. The response data is in json format.
:param req.header.username: username
:param req.header.password: password
:returns: a json contains doctor's info
{"objid":'d001', "info":{"info1":''}}
"""
resp_dict = {}
try:
"""
handle_request:
"""
status, obj_dict = obj.get_obj(patientid, objid)
except Exception as ex:
logger.exception('error when get object, ', ex)
resp_dict['errinfo'] = 'Error when get patietn:{} object {}'.format(
patientid, objid)
resp.status = falcon.HTTP_500
resp.body = json.dumps(resp_dict, sort_keys=True, indent=4)
else:
if status:
logger.debug('get ok, status positive')
# resp_dict['info'] = 'Register patient {} success'.format(
# 'obj')
# resp_dict['objid'] = objid
# resp.status = status or falcon.HTTP_200
resp.status = falcon.HTTP_200
resp.body = json.dumps(obj_dict)
else:
logger.exception('return error when try to get object, ', ex)
resp_dict['errinfo'] = 'Error when get patietn:{} object {}'.format(
patientid, objid)
resp.status = falcon.HTTP_400
resp.body = json.dumps(resp_dict)
# resp.body = json.dumps(resp_dict, sort_keys=True,
# indent=4)
def on_delete(self, req, resp, patientid, objid):
"""
Get info of a patient in the system. The response data is in json format.
:param req.header.username: username
:param req.header.password: password
:returns: a json contains doctor's info
{"objid":'d001', "info":{"info1":''}}
"""
resp_dict = {}
try:
"""
handle_request:
"""
status, obj_dict = obj.delete_obj(patientid, objid)
except Exception as ex:
logger.exception('error when delete object, ', ex)
resp_dict['errinfo'] = 'Error when delete patietn:{} object {}'.format(
patientid, objid)
resp.status = falcon.HTTP_500
resp.body = json.dumps(resp_dict, sort_keys=True, indent=4)
else:
if status:
logger.debug('delete ok, status positive')
# resp_dict['info'] = 'Register patient {} success'.format(
# 'obj')
# resp_dict['objid'] = objid
# resp.status = status or falcon.HTTP_200
resp.status = falcon.HTTP_204
resp.body = json.dumps(obj_dict)
else:
logger.exception('return error when try to delete object, ', ex)
resp_dict['errinfo'] = 'Error when delete patietn:{} object {}'.format(
patientid, objid)
resp.status = falcon.HTTP_400
resp.body = json.dumps(resp_dict)
# resp.body = json.dumps(resp_dict, sort_keys=True,
# indent=4)
class ObjectListListener:
def on_get(self, req, resp, patientid):
"""
Get info of a patient in the system. The response data is in json format.
:param req.header.username: username
:param req.header.password: password
:returns: a json contains doctor's info
{"patientid":'d001', "info":{"info1":''}}
"""
resp_dict = {}
try:
"""
handle_request:
"""
status, objs_dict_list = obj.get_objs(patientid)
except Exception as ex:
logger.exception('error when get objs, ', ex)
resp_dict['info'] = 'Error when get objs {}'.format(
'obj')
resp.status = falcon.HTTP_500
resp.body = json.dumps(resp_dict, sort_keys=True, indent=4)
else:
if status:
logger.debug('get objs ok, status positive')
# resp_dict['info'] = 'Register {} success'.format(
# 'obj')
# resp_dict['objid'] = objid
# resp.status = status or falcon.HTTP_200
resp.status = falcon.HTTP_200
resp.body = json.dumps(objs_dict_list)
else:
logger.exception('return error when try to get objs, ', ex)
resp_dict['errinfo'] = 'Error when get objs {}'.format(
'obj')
resp.status = falcon.HTTP_400
resp.body = json.dumps(resp_dict)
# resp.body = json.dumps(resp_dict, sort_keys=True,
# indent=4)
class PostPrescriptionListener:
@falcon.before(max_body(64*1024))
def on_post(self, req, resp, doctorid, patientid):
"""
Register a doctor in the system. The post data is in json format.
:param req.header.username: username
:param req.header.password: password
:returns: a json contains patient's id, or other related info
{"patientid":'d001', "info":{"info1":''}}
"""
resp_dict = {}
try:
# have pre-processed by JSONTranslator, post_data is a dict
post_data = req.context['doc']
# logger.debug('env:%s , \nstream:%s, \ncontext:, \ninput:' % (
# req.env, req.stream.read()))
except Exception as ex:
logger.error('error when try to get headers and data, ', ex)
raise falcon.HTTPBadRequest('bad req',
'when read from req, please check if the req is correct.')
try:
"""
handle_request:
"""
status, prescription_dict = prescription.upload_prescription(
patientid, doctorid, post_data)
except Exception as ex:
logger.exception('error when post prescription, ', ex)
resp_dict['info'] = 'Error when post prescription {}'.format(
'obj')
resp.status = falcon.HTTP_500
resp.body = json.dumps(resp_dict, sort_keys=True, indent=4)
else:
if status:
logger.debug('post prescription ok, status positive')
# resp_dict['info'] = 'Register patient {} success'.format(
# 'obj')
# resp_dict['objid'] = objid
# resp.status = status or falcon.HTTP_200
resp.status = falcon.HTTP_201
resp.body = json.dumps(prescription_dict)
else:
logger.exception('return error when try to post prescription, ', ex)
resp_dict['errinfo'] = 'Error when post prescription {}'.format(
'obj')
resp.status = falcon.HTTP_400
resp.body = json.dumps(resp_dict)
# resp.body = json.dumps(resp_dict, sort_keys=True,
# indent=4)
class PrescriptionListListener:
def on_get(self, req, resp, patientid):
"""
Get info of a patient in the system. The response data is in json format.
:param req.header.username: username
:param req.header.password: password
:returns: a json contains doctor's info
{"patientid":'d001', "info":{"info1":''}}
"""
resp_dict = {}
try:
"""
handle_request:
"""
status, prescription_list = prescription.get_prescriptions(
patientid)
except Exception as ex:
logger.exception('error when get prescriptions, ', ex)
resp_dict['info'] = 'Error when get prescriptions {}'.format(
'obj')
resp.status = falcon.HTTP_500
resp.body = json.dumps(resp_dict, sort_keys=True, indent=4)
else:
if status:
logger.debug('get prescriptions ok, status positive')
# resp_dict['info'] = 'Register {} success'.format(
# 'obj')
# resp_dict['objid'] = objid
# resp.status = status or falcon.HTTP_200
resp.status = falcon.HTTP_200
resp.body = json.dumps(prescription_list)
else:
logger.exception('return error when try to get prescriptions, ', ex)
resp_dict['errinfo'] = 'Error when get prescriptions {}'
resp.status = falcon.HTTP_400
resp.body = json.dumps(resp_dict)
# resp.body = json.dumps(resp_dict, sort_keys=True,
# indent=4)
class PostCommentListener:
@falcon.before(max_body(64*1024))
def on_post(self, req, resp, doctorid, patientid):
"""
Register a doctor in the system. The post data is in json format.
:param req.header.username: username
:param req.header.password: password
:returns: a json contains patient's id, or other related info
{"patientid":'d001', "info":{"info1":''}}
"""
resp_dict = {}
try:
# have pre-processed by JSONTranslator, post_data is a dict
post_data = req.context['doc']
# logger.debug('env:%s , \nstream:%s, \ncontext:, \ninput:' % (
# req.env, req.stream.read()))
except Exception as ex:
logger.error('error when try to get headers and data, ', ex)
raise falcon.HTTPBadRequest('bad req',
'when read from req, please check if the req is correct.')
try:
"""
handle_request:
"""
status, comment_dict = comment.upload_comment(
patientid, doctorid, post_data)
except Exception as ex:
logger.exception('error when post comment, ', ex)
resp_dict['info'] = 'Error when post comment {}'.format(
'obj')
resp.status = falcon.HTTP_500
resp.body = json.dumps(resp_dict, sort_keys=True, indent=4)
else:
if status:
logger.debug('post comment ok, status positive')
# resp_dict['info'] = 'Register patient {} success'.format(
# 'obj')
# resp_dict['objid'] = objid
# resp.status = status or falcon.HTTP_200
resp.status = falcon.HTTP_201
resp.body = json.dumps(comment_dict)
else:
logger.exception('return error when try to post comment, ', ex)
resp_dict['errinfo'] = 'Error when post comment {}'.format(
'obj')
resp.status = falcon.HTTP_400
resp.body = json.dumps(resp_dict)
# resp.body = json.dumps(resp_dict, sort_keys=True,
# indent=4)
class CommentListListener:
def on_get(self, req, resp, patientid):
"""
Get info of a patient in the system. The response data is in json format.
:param req.header.username: username
:param req.header.password: password
:returns: a json contains doctor's info
{"patientid":'d001', "info":{"info1":''}}
"""
resp_dict = {}
try:
"""
handle_request:
"""
status, comment_list = comment.get_comments(patientid)
except Exception as ex:
logger.exception('error when get comments, ', ex)
resp_dict['info'] = 'Error when get comments {}'.format(
'obj')
resp.status = falcon.HTTP_500
resp.body = json.dumps(resp_dict, sort_keys=True, indent=4)
else:
if status:
logger.debug('get comments ok, status positive')
# resp_dict['info'] = 'Register {} success'.format(
# 'obj')
# resp_dict['objid'] = objid
# resp.status = status or falcon.HTTP_200
resp.status = falcon.HTTP_200
resp.body = json.dumps(comment_list)
else:
logger.exception('return error when try to get comments, ', ex)
resp_dict['errinfo'] = 'Error when get comments'
resp.status = falcon.HTTP_400
resp.body = json.dumps(resp_dict)
# resp.body = json.dumps(resp_dict, sort_keys=True,
# indent=4)
class PostDischargeListener:
@falcon.before(max_body(64*1024))
def on_post(self, req, resp, doctorid, patientid):
"""
Register a doctor in the system. The post data is in json format.
:param req.header.username: username
:param req.header.password: password
:returns: a json contains patient's id, or other related info
{"patientid":'d001', "info":{"info1":''}}
"""
resp_dict = {}
try:
# have pre-processed by JSONTranslator, post_data is a dict
post_data = req.context['doc']
# logger.debug('env:%s , \nstream:%s, \ncontext:, \ninput:' % (
# req.env, req.stream.read()))
except Exception as ex:
logger.error('error when try to get headers and data, ', ex)
raise falcon.HTTPBadRequest('bad req',
'when read from req, please check if the req is correct.')
try:
"""
handle_request:
"""
status, discharge_dict = discharge.upload_discharge(
patientid, doctorid, post_data)
except Exception as ex:
logger.exception('error when post discharge, ', ex)
resp_dict['info'] = 'Error when post discharge {}'.format(
'obj')
resp.status = falcon.HTTP_500
resp.body = json.dumps(resp_dict, sort_keys=True, indent=4)
else:
if status:
logger.debug('post discharge ok, status positive')
# resp_dict['info'] = 'Register patient {} success'.format(
# 'obj')
# resp_dict['objid'] = objid
# resp.status = status or falcon.HTTP_200
resp.status = falcon.HTTP_201
resp.body = json.dumps(discharge_dict)
else:
logger.exception('return error when try to post discharge, ', ex)
resp_dict['errinfo'] = 'Error when post discharge {}'.format(
'obj')
resp.status = falcon.HTTP_400
resp.body = json.dumps(resp_dict)
# resp.body = json.dumps(resp_dict, sort_keys=True,
# indent=4)
class DischargeListener:
@falcon.before(max_body(64*1024))
def on_put(self, req, resp, doctorid, patientid, indate):
"""
Register a doctor in the system. The post data is in json format.
:param req.header.username: username
:param req.header.password: password
:returns: a json contains patient's id, or other related info
{"patientid":'d001', "info":{"info1":''}}
"""
resp_dict = {}
try:
# have pre-processed by JSONTranslator, post_data is a dict
post_data = req.context['doc']
# logger.debug('env:%s , \nstream:%s, \ncontext:, \ninput:' % (
# req.env, req.stream.read()))
except Exception as ex:
logger.error('error when try to get headers and data, ', ex)
raise falcon.HTTPBadRequest('bad req',
'when read from req, please check if the req is correct.')
try:
"""
handle_request:
"""
status, discharge_dict = discharge.update_discharge(
patientid, doctorid, indate, post_data)
except Exception as ex:
logger.exception('error when post discharge, ', ex)
resp_dict['info'] = 'Error when post discharge {}'.format(
'obj')
resp.status = falcon.HTTP_500
resp.body = json.dumps(resp_dict, sort_keys=True, indent=4)
else:
if status:
logger.debug('put discharge ok, status positive')
# resp_dict['info'] = 'Register patient {} success'.format(
# 'obj')
# resp_dict['objid'] = objid
# resp.status = status or falcon.HTTP_200
resp.status = falcon.HTTP_200
resp.body = json.dumps(discharge_dict)
else:
logger.exception('return error when try to post discharge, ', ex)
resp_dict['errinfo'] = 'Error when post discharge {}'.format(
'obj')
resp.status = falcon.HTTP_400
resp.body = json.dumps(resp_dict)
# resp.body = json.dumps(resp_dict, sort_keys=True,
# indent=4)
class DischargeListListener:
def on_get(self, req, resp, patientid):
"""
Get info of a patient in the system. The response data is in json format.
:param req.header.username: username
:param req.header.password: password
:returns: a json contains doctor's info
{"patientid":'d001', "info":{"info1":''}}
"""
resp_dict = {}
try:
"""
handle_request:
"""
status, discharge_list = discharge.get_discharges(patientid)
except Exception as ex:
logger.exception('error when get discharges, ', ex)
resp_dict['info'] = 'Error when get discharges {}'.format(
'obj')
resp.status = falcon.HTTP_500
resp.body = json.dumps(resp_dict, sort_keys=True, indent=4)
else:
if status:
logger.debug('get discharges ok, status positive')
# resp_dict['info'] = 'Register {} success'.format(
# 'obj')
# resp_dict['objid'] = objid
# resp.status = status or falcon.HTTP_200
resp.status = falcon.HTTP_200
resp.body = json.dumps(discharge_list)
else:
logger.exception('return error when try to get discharges, ', ex)
resp_dict['errinfo'] = 'Error when get discharges'
resp.status = falcon.HTTP_400
resp.body = json.dumps(resp_dict)
# resp.body = json.dumps(resp_dict, sort_keys=True,
# indent=4)
class AuthListener:
@falcon.before(max_body(64*1024))
def on_post(self, req, resp, role):
"""
Get info of a patient in the system. The response data is in json format.
:param req.header.username: username
:param req.header.password: password
:returns: a json contains doctor's info
{"role":'d001', "info":{"info1":''}}
"""
resp_dict = {}
try:
# username = req.get_header('username') or 'un'
# password = req.get_header('password') or 'pw'
# post_data = req.params.get('data')
# have pre-processed by JSONTranslator, post_data is a dict
post_data = req.context['doc']
logger.debug('type of post_data:{}'.format(type(post_data)))
if not ('password' in post_data.keys() and 'username' in post_data.keys()):
resp_dict['errinfo'] = 'Error, no password or username in post data'
resp.status = falcon.HTTP_400
resp.body = json.dumps(resp_dict)
# logger.debug('username:%s, password:%s, data:%s'
# % (username, password, post_data))
# logger.debug('env:%s , \nstream:%s, \ncontext:, \ninput:' % (
# req.env, req.stream.read()))
except Exception as ex:
logger.error('error when try to get headers and data, ', ex)
raise falcon.HTTPBadRequest('bad req',
'when read from req, please check if the req is correct.')
try:
"""
handle_request:
"""
status, token = auth.authentication(role, post_data)
except Exception as ex:
logger.exception('error when get objs, ', ex)
resp_dict['info'] = 'Error when get objs {}'.format(
'obj')
resp.status = falcon.HTTP_500
resp.body = json.dumps(resp_dict, sort_keys=True, indent=4)
else:
if status:
logger.debug('get objs ok, status positive')
# resp_dict['info'] = 'Register {} success'.format(
# 'obj')
resp_dict['token'] = token
# resp.status = status or falcon.HTTP_200
resp.status = falcon.HTTP_200
resp.body = json.dumps(resp_dict)
# resp.body = token
else:
logger.exception('return error when try to get objs, ', ex)
resp_dict['errinfo'] = 'Error when get objs {}'.format(
'obj')
resp.status = falcon.HTTP_400
resp.body = json.dumps(resp_dict)
# resp.body = json.dumps(resp_dict, sort_keys=True,
# indent=4)
class TestListener:
def on_get(self, req, resp):
"""
For test if server runs
"""
resp_dict = {}
resp_dict['info'] = 'Server runs successfully'
resp.status = falcon.HTTP_200
resp.body = json.dumps(resp_dict)
| {
"repo_name": "pa2515-group2/server",
"path": "server/apiv1.py",
"copies": "1",
"size": "55116",
"license": "mit",
"hash": 7196187356804249000,
"line_mean": 39.3484626647,
"line_max": 111,
"alpha_frac": 0.5246752304,
"autogenerated": false,
"ratio": 4.092063256366471,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.511673848676647,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
# https://github.com/joschu/modular_rl/blob/master/modular_rl/distributions.py
import numpy as np
def categorical_sample(prob_nk):
"""
Sample from categorical distribution
Each row specifies class probabilities
"""
prob_nk = np.asarray(prob_nk)
assert prob_nk.ndim == 2
N = prob_nk.shape[0]
csprob_nk = np.cumsum(prob_nk, axis=1)
return np.argmax(csprob_nk > np.random.rand(N, 1), axis=1)
TINY = np.finfo(np.float32).tiny
def categorical_kl(p_nk, q_nk):
p_nk = np.asarray(p_nk, dtype=np.float32)
q_nk = np.asarray(q_nk, dtype=np.float32)
ratio_nk = p_nk / (q_nk + TINY) # so we don't get warnings
# next two lines give us zero when p_nk==q_nk==0 but inf when q_nk==0
ratio_nk[p_nk == 0] = 1
ratio_nk[(q_nk == 0) & (p_nk != 0)] = np.inf
return (p_nk * np.log(ratio_nk)).sum(axis=1)
def categorical_entropy(p_nk):
p_nk = np.asarray(p_nk, dtype=np.float32)
p_nk = p_nk.copy()
p_nk[p_nk == 0] = 1
return (-p_nk * np.log(p_nk)).sum(axis=1)
| {
"repo_name": "domluna/deep_rl",
"path": "deep_rl/misc/distributions.py",
"copies": "1",
"size": "1090",
"license": "mit",
"hash": -5246486837221091000,
"line_mean": 29.2777777778,
"line_max": 78,
"alpha_frac": 0.628440367,
"autogenerated": false,
"ratio": 2.678132678132678,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3806573045132678,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
# huckle's imports
from . import package
from . import config
from . import hutils
from . import hclinav
import sys
# navigate through the command line sequence for a given cliname
def navigate(argv):
nav = hclinav.navigator(root=config.url, apiname=config.cliname)
if len(argv) == 1:
hclinav.traverse_execution(nav)
length = len(argv[1:])
for i, x in enumerate(argv[1:]):
nav = hclinav.traverse_argument(nav, x)
if i == length - 1:
hclinav.traverse_execution(nav)
# huckle's minimal set of commands
def cli():
if len(sys.argv) > 2:
if sys.argv[1] == "cli" and sys.argv[2] == "install":
if len(sys.argv) > 3:
hclinav.pull(sys.argv[3])
else:
huckle_help()
elif sys.argv[1] == "cli" and sys.argv[2] == "run":
if len(sys.argv) > 3:
config.parse_configuration(sys.argv[3])
navigate(sys.argv[3:])
else:
huckle_help()
elif sys.argv[1] == "cli" and sys.argv[2] == "ls":
config.list_clis()
elif sys.argv[1] == "cli" and sys.argv[2] == "rm":
if len(sys.argv) > 3:
config.remove_cli(sys.argv[3])
else:
huckle_help()
elif sys.argv[1] == "cli" and sys.argv[2] == "config":
if len(sys.argv) > 3:
config.config_list(sys.argv[3])
else:
huckle_help()
elif sys.argv[1] == "help":
hclinav.display_man_page(config.huckle_manpage_path)
sys.exit(0)
else:
huckle_help()
elif len(sys.argv) == 2:
if sys.argv[1] == "--version":
show_dependencies()
elif sys.argv[1] == "help":
hclinav.display_man_page(config.huckle_manpage_path)
sys.exit(0)
else:
huckle_help()
else:
huckle_help()
def huckle_help():
hutils.eprint("for help, use:\n")
hutils.eprint(" huckle help")
sys.exit(2)
# show huckle's version and the version of its dependencies
def show_dependencies():
dependencies = ""
for i, x in enumerate(package.dependencies):
dependencies += " "
dependencies += package.dependencies[i].rsplit('==', 1)[0] + "/"
dependencies += package.dependencies[i].rsplit('==', 1)[1]
print("huckle/" + package.__version__ + dependencies)
| {
"repo_name": "cometaj2/huckle",
"path": "huckle/huckle.py",
"copies": "1",
"size": "2589",
"license": "mit",
"hash": -1901537832825469400,
"line_mean": 25.4183673469,
"line_max": 72,
"alpha_frac": 0.5268443414,
"autogenerated": false,
"ratio": 3.4612299465240643,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4488074287924064,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
"""Implements blaze._elwise_eval function.
This function is meant to do OOC operations following a different
strategy than the canonical Blaze approach, and should be phased out as
soon as the canonical approach can do these sort of things efficiently.
"""
import sys, math
from dynd import nd, ndt
from .. import array, empty
from .eval import eval as blaze_eval, append
import datashape
import re
if sys.version_info >= (3, 0):
xrange = range
def dict_viewkeys(d):
return d.keys()
else:
def dict_viewkeys(d):
return d.iterkeys()
min_numexpr_version = '2.2' # the minimum version of Numexpr needed
numexpr_here = False
try:
import numexpr
except ImportError:
pass
else:
if numexpr.__version__ >= min_numexpr_version:
numexpr_here = True
if numexpr_here:
import numexpr
from numexpr.expressions import functions as numexpr_functions
class Defaults(object):
"""Class to taylor the setters and getters of default values."""
def __init__(self):
self.choices = {}
# Choices setup
self.choices['vm'] = ("numexpr", "python")
def check_choices(self, name, value):
if value not in self.choices[name]:
raise ValueError(
"value must be in: %s" % (self.choices[name],))
#
# Properties start here...
#
@property
def vm(self):
return self.__vm
@vm.setter
def vm(self, value):
self.check_choices('vm', value)
if value == "numexpr" and not numexpr_here:
raise ValueError(
"cannot use `numexpr` virtual machine "
"(minimum required version is probably not installed)")
self.__vm = value
# Default values start here...
defaults = Defaults()
if numexpr_here:
defaults.vm = "numexpr"
else:
defaults.vm = "python"
"""
The virtual machine to be used in computations (via `eval`). It can
be 'numexpr' or 'python'. Default is 'numexpr', if installed. If
not, then the default is 'python'.
"""
# Compute the product of a sequence
def prod(seq):
ret = 1
for i in seq:
ret *= int(i)
return ret
def _elwise_eval(expression, vm=None, user_dict={}, **kwargs):
"""
eval(expression, vm=None, user_dict=None, **kwargs)
Evaluate an `expression` and return the result.
Parameters
----------
expression : string
A string forming an expression, like '2*a+3*b'. The values for 'a' and
'b' are variable names to be taken from the calling function's frame.
These variables may be scalars or Blaze arrays.
vm : string
The virtual machine to be used in computations. It can be 'numexpr'
or 'python'. The default is to use 'numexpr' if it is installed.
user_dict : dict
An user-provided dictionary where the variables in expression
can be found by name.
kwargs : list of parameters or dictionary
Any parameter supported by the blaze.array constructor.
Useful for setting properties of the returned array object.
Returns
-------
out : array object
The outcome of the expression. You can tailor the
properties of this array by passing additional arguments
supported by blaze.array constructor in `kwargs`.
"""
if vm is None:
vm = defaults.vm
else:
defaults.vm = vm
# Get variables and column names participating in expression
depth = kwargs.pop('depth', 2)
vars = _getvars(expression, user_dict, depth, vm=vm)
# The next is a hack to try to prevent people of using axis=dim,
# where dim is > 0.
if ("axis" in expression and
re.findall("axis\s*=\s*[1-9]", expression)):
raise NotImplementedError(
"reductions in axis different than 0 are not supported yet")
# Gather info about sizes and lengths
rowsize, vlen = 0, 1
for name in dict_viewkeys(vars):
var = vars[name]
# Scalars
if not hasattr(var, "__len__"):
continue
if not hasattr(var, "dshape"):
try:
var = array(var)
except:
raise ValueError(
"sequence cannot be converted into a blaze array")
# From now on, we only have Blaze arrays
rowsize += var.dshape.measure.itemsize * prod(var.dshape.shape[1:])
# Check for length
if vlen > 1 and vlen != len(var):
raise ValueError("arrays must have the same length")
vlen = len(var)
if rowsize == 0 or vlen == 0:
# All scalars or zero-length objects
if vm == "python":
return eval(expression, vars)
else:
return numexpr.evaluate(expression, local_dict=vars)
return _eval_blocks(expression, vars, vlen, rowsize, vm, **kwargs)
def _getvars(expression, user_dict, depth, vm):
"""Get the variables in `expression`.
`depth` specifies the depth of the frame in order to reach local
or global variables.
"""
cexpr = compile(expression, '<string>', 'eval')
if vm == "python":
exprvars = [ var for var in cexpr.co_names
if var not in ['None', 'False', 'True'] ]
else:
# Check that var is not a numexpr function here. This is useful for
# detecting unbound variables in expressions. This is not necessary
# for the 'python' engine.
exprvars = [ var for var in cexpr.co_names
if var not in ['None', 'False', 'True']
and var not in numexpr_functions ]
# Get the local and global variable mappings of the user frame
user_locals, user_globals = {}, {}
user_frame = sys._getframe(depth)
user_locals = user_frame.f_locals
user_globals = user_frame.f_globals
# Look for the required variables
reqvars = {}
for var in exprvars:
# Get the value
if var in user_dict:
val = user_dict[var]
elif var in user_locals:
val = user_locals[var]
elif var in user_globals:
val = user_globals[var]
else:
if vm == "numexpr":
raise NameError("variable name ``%s`` not found" % var)
val = None
# Check the value
if (vm == "numexpr" and
hasattr(val, 'dshape') and
val.dshape.measure.name == 'uint64'):
raise NotImplementedError(
"variable ``%s`` refers to "
"a 64-bit unsigned integer object, that is "
"not yet supported in numexpr expressions; "
"rather, use the 'python' vm." % var )
if val is not None:
reqvars[var] = val
return reqvars
def _eval_blocks(expression, vars, vlen, rowsize, vm, **kwargs):
"""Perform the evaluation in blocks."""
# Compute the optimal block size (in elements)
# The next is based on experiments, but YMMV
if vm == "numexpr":
# If numexpr, make sure that operands fit in L3 chache
bsize = 2**20 # 1 MB is common for L3
else:
# If python, make sure that operands fit in L2 chache
bsize = 2**17 # 256 KB is common for L2
bsize //= rowsize
# Evaluation seems more efficient if block size is a power of 2
bsize = 2 ** (int(math.log(bsize, 2)))
if vlen < 100*1000:
bsize //= 8
elif vlen < 1000*1000:
bsize //= 4
elif vlen < 10*1000*1000:
bsize //= 2
# Protection against too large rowsizes
if bsize == 0:
bsize = 1
vars_ = {}
# Convert operands into Blaze arrays and get temporaries for vars
maxndims = 0
for name in dict_viewkeys(vars):
var = vars[name]
if not hasattr(var, "dshape"):
# Convert sequences into regular Blaze arrays
vars[name] = var = array(var)
if hasattr(var, "__len__"):
ndims = len(var.dshape.shape)
if ndims > maxndims:
maxndims = ndims
if len(var) > bsize:
# Variable is too large; get a container for a chunk
res_shape, res_dtype = datashape.to_numpy(var.dshape)
res_shape = list(res_shape)
res_shape[0] = bsize
dshape = datashape.from_numpy(res_shape, res_dtype)
vars_[name] = empty(dshape)
if 'ddesc' in kwargs and kwargs['ddesc'] is not None:
res_ddesc = True
else:
res_ddesc = False
for i in xrange(0, vlen, bsize):
# Correction for the block size
if i+bsize > vlen:
bsize = vlen - i
# Get buffers for vars
for name in dict_viewkeys(vars):
var = vars[name]
if hasattr(var, "__len__") and len(var) > bsize:
vars_[name] = var[i:i+bsize]
else:
if hasattr(var, "__getitem__"):
vars_[name] = var[:]
else:
vars_[name] = var
# Perform the evaluation for this block
# We need array evals
if vm == "python":
res_block = eval(expression, vars_)
dynd_block = blaze_eval(res_block).ddesc.dynd_arr()
else:
res_block = numexpr.evaluate(expression, local_dict=vars_)
# numexpr returns a numpy array, and we need dynd/blaze ones
dynd_block = nd.array(res_block)
res_block = array(res_block)
if i == 0:
scalar = False
dim_reduction = False
# Detection of reduction operations
if res_block.dshape.shape == ():
scalar = True
result = dynd_block
continue
elif len(res_block.dshape.shape) < maxndims:
dim_reduction = True
result = dynd_block
continue
block_shape, block_dtype = datashape.to_numpy(res_block.dshape)
out_shape = list(block_shape)
if res_ddesc:
out_shape[0] = 0
dshape = datashape.from_numpy(out_shape, block_dtype)
result = empty(dshape, **kwargs)
append(result, dynd_block)
else:
out_shape[0] = vlen
dshape = datashape.from_numpy(out_shape, block_dtype)
result = empty(dshape, **kwargs)
# The next is a workaround for bug #183
#result[:bsize] = res_block
result[:bsize] = dynd_block
else:
if scalar:
result += dynd_block
result = result.eval()
elif dim_reduction:
if len(res_block) < len(result):
result[:bsize] += dynd_block
else:
result += dynd_block
result = result.eval()
elif res_ddesc:
append(result, dynd_block)
else:
# The next is a workaround for bug #183
#result[i:i+bsize] = res_block
result[i:i+bsize] = dynd_block
# Scalars and dim reductions generate dynd array for workaround
# different issues in Blaze array operations (see #197)
if isinstance(result, nd.array):
if scalar:
return array(result)
else:
# If not an scalar pass the arguments (persistency, etc.)
return array(result, **kwargs)
return result
| {
"repo_name": "FrancescAlted/blaze",
"path": "blaze/compute/elwise_eval.py",
"copies": "3",
"size": "11560",
"license": "bsd-3-clause",
"hash": -1912411008971910100,
"line_mean": 32.4104046243,
"line_max": 78,
"alpha_frac": 0.5683391003,
"autogenerated": false,
"ratio": 4.018074383037887,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6086413483337888,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
"""Implements the blaze.eval function"""
from .air import compile, run
from .. import array
#------------------------------------------------------------------------
# Eval
#------------------------------------------------------------------------
def eval(arr, ddesc=None, caps={'efficient-write': True},
out=None, debug=False):
"""Evaluates a deferred blaze kernel tree
data descriptor into a concrete array.
If the array is already concrete, merely
returns it unchanged.
Parameters
----------
ddesc: DDesc instance, optional
A data descriptor for storing the result, if evaluating to a BLZ
output or (in the future) to a distributed array.
caps: { str : object }
Capabilities for evaluation and storage
TODO: elaborate on values
out: Array
Output array to store the result in, or None for a new array
strategy: str
Evaluation strategy.
Currently supported: 'py', 'jit'
"""
if arr.ddesc.capabilities.deferred:
result = eval_deferred(
arr, ddesc=ddesc, caps=caps, out=out, debug=debug)
elif arr.ddesc.capabilities.remote:
# Retrieve the data to local memory
# TODO: Caching should play a role here.
result = array(arr.ddesc.dynd_arr())
else:
# TODO: This isn't right if the data descriptor is different, requires
# a copy then.
result = arr
return result
def eval_deferred(arr, ddesc, caps, out, debug=False):
expr = arr.ddesc.expr
graph, ctx = expr
# collected 'params' from the expression
args = [ctx.terms[param] for param in ctx.params]
func, env = compile(expr, ddesc=ddesc)
result = run(func, env, ddesc=ddesc, caps=caps, out=out, debug=debug)
return result
#------------------------------------------------------------------------
# Append
#------------------------------------------------------------------------
def append(arr, values):
"""Append a list of values."""
# XXX If not efficient appends supported, this should raise
# a `PerformanceWarning`
if arr.ddesc.capabilities.appendable:
arr.ddesc.append(values)
else:
raise ValueError('Data source cannot be appended to')
| {
"repo_name": "FrancescAlted/blaze",
"path": "blaze/compute/eval.py",
"copies": "3",
"size": "2330",
"license": "bsd-3-clause",
"hash": -2235761076629800200,
"line_mean": 30.0666666667,
"line_max": 78,
"alpha_frac": 0.5673819742,
"autogenerated": false,
"ratio": 4.412878787878788,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.014817272444858653,
"num_lines": 75
} |
from __future__ import absolute_import, division, print_function
# Implements the blaze.eval function
from .air import compile, run
from .. import array
#------------------------------------------------------------------------
# Eval
#------------------------------------------------------------------------
def eval(arr, storage=None, caps={'efficient-write': True}, out=None,
strategy=None, debug=False):
"""Evaluates a deferred blaze kernel tree
data descriptor into a concrete array.
If the array is already concrete, merely
returns it unchanged.
Parameters
----------
storage: blaze.Storage, optional
Where to store the result, if evaluating to a BLZ
output or (in the future) to a distributed array.
caps: { str : object }
Capabilities for evaluation and storage
TODO: elaborate on values
out: Array
Output array to store the result in, or None for a new array
strategy: str
Evaluation strategy.
Currently supported: 'py', 'jit'
"""
strategy = strategy or arr._data.strategy
if arr._data.capabilities.deferred:
result = eval_deferred(arr, storage, caps, out, strategy, debug=debug)
elif arr._data.capabilities.remote:
# Retrieve the data to local memory
# TODO: Caching should play a role here.
result = array(arr._data.dynd_arr())
else:
# TODO: This isn't right if the storage is different, requires
# a copy then.
result = arr
return result
def eval_deferred(arr, storage, caps, out, strategy, debug=False):
expr = arr._data.expr
graph, ctx = expr
# collected 'params' from the expression
args = [ctx.terms[param] for param in ctx.params]
func, env = compile(expr, strategy, debug=debug)
result = run(func, env, args,
storage=storage, caps=caps, out=out,
strategy=strategy, debug=debug)
return result
#------------------------------------------------------------------------
# Append
#------------------------------------------------------------------------
def append(arr, values):
"""Append a list of values."""
# XXX If not efficient appends supported, this should raise
# a `PerformanceWarning`
if hasattr(arr._data, 'append'):
arr._data.append(values)
else:
raise NotImplementedError('append is not implemented for this '
'object')
| {
"repo_name": "aburan28/blaze",
"path": "blaze/compute/eval.py",
"copies": "1",
"size": "2484",
"license": "bsd-3-clause",
"hash": 658342732723517600,
"line_mean": 31.2597402597,
"line_max": 78,
"alpha_frac": 0.5599838969,
"autogenerated": false,
"ratio": 4.634328358208955,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5694312255108955,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
# Implements the blaze.eval function
from .air import prepare, interps
from .. import array
#------------------------------------------------------------------------
# Eval
#------------------------------------------------------------------------
def eval(arr, storage=None, caps={'efficient-write': True}, out=None,
strategy=None):
"""Evaluates a deferred blaze kernel tree
data descriptor into a concrete array.
If the array is already concrete, merely
returns it unchanged.
Parameters
----------
storage: blaze.Storage, optional
Where to store the result, if evaluating to a BLZ
output or (in the future) to a distributed array.
caps: { str : object }
Capabilities for evaluation and storage
TODO: elaborate on values
out: Array
Output array to store the result in, or None for a new array
strategy: str
Evaluation strategy.
Currently supported: 'py', 'jit'
"""
strategy = strategy or arr._data.strategy
if arr._data.capabilities.deferred:
result = eval_deferred(arr, storage, caps, out, strategy)
elif arr._data.capabilities.remote:
# Retrieve the data to local memory
# TODO: Caching should play a role here.
result = array(arr._data.dynd_arr())
else:
# TODO: This isn't right if the storage is different, requires
# a copy then.
result = arr
return result
def eval_deferred(arr, storage, caps, out, strategy):
expr = arr._data.expr
graph, ctx = expr
# Construct and transform AIR
func, env = prepare(expr, strategy)
# Find evaluator
interp = interps.lookup_interp(strategy)
# Interpreter-specific compilation/assembly
func, env = interp.compile(func, env)
# Run with collected 'params' from the expression
args = [ctx.terms[param] for param in ctx.params]
result = interp.interpret(func, env, args=args, storage=storage,
caps=caps, out=out, strategy=strategy)
return result
#------------------------------------------------------------------------
# Append
#------------------------------------------------------------------------
def append(arr, values):
"""Append a list of values."""
# XXX If not efficient appends supported, this should raise
# a `PerformanceWarning`
if hasattr(arr._data, 'append'):
arr._data.append(values)
else:
raise NotImplementedError('append is not implemented for this '
'object')
| {
"repo_name": "zeeshanali/blaze",
"path": "blaze/compute/eval.py",
"copies": "1",
"size": "2639",
"license": "bsd-3-clause",
"hash": -7475637602937031000,
"line_mean": 30.4166666667,
"line_max": 73,
"alpha_frac": 0.5695339144,
"autogenerated": false,
"ratio": 4.654320987654321,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5723854902054322,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import abc
import collections
import inspect
import pprint
import re
import struct
import six
# TODO: properly segregate the GSS in its own data structure
# rewrite this with the faster GSS modifications.
# SPPF
# Replace copying of stream data with indices
# Rewrite the closures as classes and *profile*.
# Typing
# Swierstra's paper
# Use exceptions for continuation control flow, e.g. handling parse
# failures.
# Success =
# Failure =
# Label = collections.namedtuple('Label', 'combinator start end')
# DEFAULT_FAILURE = Failure('This is the default failure for combinators that cannot generate their own failures. It should never be returned.', -1)
# Namedtuples test for structural equality which is bizarrely not what I want here.
# class Success(collections.namedtuple('Success', 'tree tail')):
# def copy(self):
# return Success(self.tree, self.tail)
# def __str__(self):
# return 'Success: ' + str(self.tree) + ", '" + str(self.tail) + "'"
# __repr__ = __str__
# class Failure(collections.namedtuple('Failure', 'message tail')):
# def copy(self):
# return Failure(self.message, self.tail)
# def __str__(self):
# return 'Failure: ' + self.message % self.tail
# __repr__ = __str__
def read_only(value):
return property(lambda g: value)
class Result(object):
pass
class Success(Result):
def __init__(self, value, stream, index):
self.value = value
self.stream = stream
self.index = index
def copy(self):
return Success(self.value, self.stream, self.index)
def __str__(self):
return 'Success: %r, %r' % (self.value, self.stream[self.index:])
__repr__ = __str__
class Failure(Result):
def __init__(self, message, stream, index):
self.message = message
self.stream = stream
self.index = index
def copy(self):
return Failure(self.message, self.stream, self.index)
def __str__(self):
return 'Failure: %s, %s' % (self.message, self.stream[self.index:])
# def __str__(self):
# return 'Failure: %s' % (self.message % self.stream[self.index:])
__repr__ = __str__
class GraphStructuredStack(object):
NodeLabel = read_only(collections.namedtuple('NodeLabel', 'nonterminal index'))
Node = read_only(collections.namedtuple('Node', 'edges U P'))
# EdgeLabel = read_only(collections.namedtuple('EdgeLabel', 'descriptor tree'))
# Edge = read_only(collections.namedtuple('Edge', 'to'))
GrammarSlot = read_only(collections.namedtuple('GrammarSlot', 'nonterminal slot'))
Descriptor = read_only(collections.namedtuple('Descriptor', 'grammar_slot node index'))
def __init__(self):
self.nodes = collections.defaultdict(lambda: self.Node({}, set(), set()))
self.dispatch_stack = [] #
def create(self, L: GrammarSlot, u: Node, index: int, tree):
v = self.nodes[self.NodeLabel(L.nonterminal, index)]
# if self.EdgeLabel(L, tree) not in v.edges:
if L not in v.edges:
# v.edges[EdgeLabel(L, tree)] = u
v.edges[L] = u
for k in v.P:
self.add(L, u, k)
return v
def pop(self, u, index):
if index not in u.P:
u.P.add(i)
for L in u.edges:
self.add(L, u.edges[L], i)
def add(self, L: GrammarSlot, node: Node, index: int):
d = self.Descriptor(L, node, index)
if d not in u.U:
self.dispatch_stack.append(d)
u.U.add(L)
def main_loop():
gss = GraphStructuredStack()
# Initialize stuff
while gss.dispatch_stack:
# c_i = index, c_u = gss_node, c_n = sppf_node
L, u, i, w = gss.dispatch_stack.pop() # GrammarSlot, GraphStructuredStack.node, int, SPPFnode
class Combinator(six.with_metaclass(abc.ABCMeta, object)):
# Since combinators pass the same input stream among themselves,
# to avoid generating runtime errors due to passing text to
# terminal combinators expecting binary data or binary data to
# combinators expecting text, all combinators will be assigned as
# either text or binary data. Higher-order combinators inherit
# their type from their sub-combinators while terminals have their
# data type explicitly defined. While some terminals are always
# binary (struct), others depend on how they're initialized (re,
# string.startswith, and possibly bits depending on which backend
# I use and how I set up the API). In Python 2, objects of type
# 'str' are treated as binary data and objects of type 'unicode'
# are treated as text. In Python 3, objects of type 'str' are
# treated as text and types 'bytes' and 'bytearray' are treated as
# binary data. Objects of type 'memoryview' are binary on both.
# This can lead to weird situations where the Strings combinator
# accepts binary data, but that's a consequence of Python's design
# and not something I can do anything about.
# Testing on 3.4 and 2.7 suggests that struct doesn't actually
# care what it's given, bytes, str, or unicode, but enforcing
# consistency on its input won't hurt. array accepts *only*
# native strings, so str/bytes on Python 2 and str/unicode on
# Python 3.
@abc.abstractmethod
def __init__(self, arg_type, *combinators, **kws):
# print(self, arg_type, combinators, kws)
if len(combinators) == 0:
raise TypeError('%s takes at least 1 argument.' % type(self).__name__)
if any(not isinstance(c, arg_type) for c in combinators):
raise TypeError('Arguments to %s must be %ss.' % (type(self).__name__, arg_type.__name__))
# Temporarily disabled until GSS segregation
# def __setattr__(self, key, value):
# raise AttributeError('%s object does not support item assignment.' % type(self).__name__)
# def __delattr__(self, key):
# raise AttributeError('%s object does not support item deletion.' % type(self).__name__)
def __add__(self, other):
""" + is the operator Python uses for concatenation."""
return Sequence(self, other)
def __radd__(self, other):
return Sequence(other, self)
def __or__(self, other):
return Alternation(self, other)
def __ror__(self, other):
return Alternation(other, self)
def __rshift__(self, act):
"""There's no standard syntax here so the >> operator seems as good
as any."""
return Action(self, act)
def parse(self, stream, index=0):
self.stack = []
# A map of a stream position to a map mapping combinator
# instances to sets of continuations. In the standard GLL
# algorithm, this seems to correspond to the GSS nodes, though
# note that Spiewak seems to have already transformed the GSS
# like Afroozeh and Izmaylova, with nodes recorded using only
# the nonterminal/combinator instance and the input position.
# Spiewak uses function identity to avoid needing to keep
# track of edge labels.
self.backlinks = {}
# A map of a stream position to sets of combinators. This
# corresponds to U.
self.done = {}
# A map of a stream position to a map mapping combinator instances
# to sets of successes. This corresponds to P.
self.popped = {}
# A map of Results to sets of functions.
self.saved = {}
successes = set()
failures = set()
def nonterminal_success(result, failure, index):
# if result.tail:
# failures.add(Failure('Unexpected trailing characters: "{}"'.format(str(result.tail))))
# else:
# Ugly hack
if isinstance(result, Result):
successes.add(result)
else:
successes.add(Success(result, stream, index))
def nonterminal_failure(result, index):
if isinstance(result, Result):
failures.add(result)
else:
failures.add(Failure(result, stream, index))
self._parse(self, nonterminal_success, nonterminal_failure, stream, index)
while self.stack:
# print('Main loop:', self)
combinator, index = self.stack.pop()
# These functions all correspond to the pop() method in
# the standard GLL algorithm.
def setup_popped(combinator=combinator, index=index):
# print('Popped:', pprint.pformat(self.popped), combinator, stream, sep='\n')
if index not in self.popped:
self.popped[index] = {}
if combinator not in self.popped[index]:
self.popped[index][combinator] = set()
# The saved set is not part of the original algorithm,
# Spiewak added it. He's using result identity here
# to check if something's been done, but there has to
# be a better way.
def setup_saved(result):
if result not in self.saved:
self.saved[result] = set()
def trampoline_success(tree, failure, current_index, past_index=index, combinator=combinator, setup_popped=setup_popped):
result = Success(tree, stream, current_index)
# print('Trampoline success: ', tree, pprint.pformat(self.backlinks), pprint.pformat(self.saved), sep='\n')
# print('Trampoline success:', pprint.pformat(self.popped), combinator, stream, sep='\n')
setup_popped()
self.popped[past_index][combinator].add(result)
setup_saved(result)
for success in self.backlinks[past_index][combinator]:
if success not in self.saved[result]:
self.saved[result].add(success)
# print(success, tree, failure, current_index)
success(tree, failure, current_index)
# def trampoline_failure(message, current_index, combinator=combinator, past_index=index, setup_popped=setup_popped):
# result = Failure(message, stream, current_index)
# setup_popped()
# setup_saved(result)
# for success in self.backlinks[past_index][combinator]:
# # print(success, failure)
# if success not in self.saved[result]:
# self.saved[result].add(success)
combinator._parse(self, trampoline_success, nonterminal_failure, stream, index)
if successes:
return list(successes)
else:
return list(failures)
def add(self, combinator, success, failure, index):
# print('Add:', self)
if index not in self.backlinks:
self.backlinks[index] = {}
if combinator not in self.backlinks[index]:
self.backlinks[index][combinator] = set()
if success not in self.backlinks[index][combinator]:
self.backlinks[index][combinator].add(success)
if index in self.popped and combinator in self.popped[index]:
for result in self.popped[index][combinator].copy():
success(result.value, failure, result.index)
else:
if index not in self.done:
self.done[index] = set()
if combinator not in self.done[index]:
self.stack.append((combinator, index))
self.done[index].add(combinator)
# def __str__(self):
# return '\n'.join(['Trampoline', 'Stack', pprint.pformat(self.stack), 'Backlinks', pprint.pformat(self.backlinks), 'Done', pprint.pformat(self.done), 'Popped', pprint.pformat(self.popped), 'Saved', pprint.pformat(self.saved)])
@abc.abstractmethod
def _parse(self, trampoline, success, failure, stream, index):
raise NotImplementedError
# def unparse(self, tree, stream, index=0):
# pass
# def _unparse(self, tree, stream, index):
# raise NotImplementedError
class Alternation(Combinator):
def __init__(self, *combinators, **kws):
super(Alternation, self).__init__(Combinator, *combinators, **kws)
vars(self)['combinators'] = frozenset(combinators)
def _parse(self, trampoline, success, failure, stream, index):
for combinator in self.combinators:
trampoline.add(combinator, success, failure, index)
def __or__(self, other):
if isinstance(other, Alternation):
return Alternation(*(self.combinators | other.combinators))
else:
return Alternation(*(self.combinators | frozenset([other])))
def __ror__(self, other):
if isinstance(other, Alternation):
return Alternation(*(other.combinators | self.combinators))
else:
return Alternation(*(frozenset([other]) | self.combinators))
# In the standard version of the GLL algorithm, the GSS stores the
# position within a nonterminal, what they call the grammar slot.
# Spiewak's version doesn't because it's forcing all nonterminals to
# be length 2 and using separate continuations for processing the
# first grammar slot and the second grammar slot. Allowing the use of
# arbitrary-length sequences will require modifying the GSS handling.
class Sequence(Combinator):
def __init__(self, left, right, **kws):
vars(self)['left'] = left
vars(self)['right'] = right
def _parse(self, trampoline, success, failure, stream, index):
def left_success(tree1, failure, index):
def right_success(tree2, failure, index):
success((tree1, tree2), failure, index)
self.right._parse(trampoline, right_success, failure, stream, index)
self.left._parse(trampoline, left_success, failure, stream, index)
# class Sequence(Combinator):
# def __init__(self, *combinators, **kws):
# super(Sequence, self).__init__(Combinator, *combinators, **kws)
# vars(self)['combinators'] = combinators
# def _parse(self, trampoline, success, failure, stream, index):
# trees = []
# # The clean way to do is with a separate index variable,
# # but Python 2.7 doesn't allow an inner function to alter
# # the variable of an outer one. The proper way of working
# # around this is probably using classes instead of
# # closures because classes have mutable state.
# combinators = iter(self.combinators)
# index = 0
# def sequence_continuation(tree, failure, stream, index):
# nonlocal index
# index += 1
# # print('Sequence continuation:', index, sequence_continuation, success, trees, failure, stream, index)
# trees.append(tree)
# try:
# combinator = next(combinators)
# except StopIteration:
# # print('Sequence continuation call:', success, trees, failure, stream, index)
# success(tuple(trees), failure, stream, index)
# return
# combinator._parse(trampoline, sequence_continuation, failure, stream, index)
# next(combinators)._parse(trampoline, sequence_continuation, failure, stream, index)
# def __add__(self, other):
# if isinstance(other, Sequence):
# return Sequence(*(self.combinators + other.combinators))
# else:
# return Sequence(*(self.combinators + (other,)))
# def __radd__(self, other):
# if isinstance(other, Sequence):
# return Sequence(*(other.combinators + self.combinators))
# else:
# return Sequence(*((other,) + self.combinators))
# def __mul__(self, other):
# type(self)(*(other * self.combinators))
# __rmul__ = __mul__
# class Lazy(Combinator):
# def __init__(self, name):
# if (six.PY3 and name.isidentifier()) or name.isalnum():
# vars(self)['name'] = name
# else:
# raise SyntaxError("Lazy initialized with a string that isn't a valid Python identifier: %s" % name)
# def combinator(self):
# try:
# return vars(self)['combinator']
# except KeyError:
# frames = inspect.getouterframes(inspect.currentframe())
# # print(frames)
# try:
# for frame in frames:
# combinator = frame[0].f_locals.get(self.name, None)
# if combinator:
# break
# else:
# combinator = frame[0].f_globals.get(self.name, None)
# if combinator:
# break
# else:
# raise NameError("Name '%s' isn't defined" % self.name)
# finally:
# del frames
# if isinstance(combinator, Combinator):
# vars(self)['combinator'] = combinator
# return combinator
# else:
# raise TypeError("'%s' refers to an object that is a %s instead of a combinator." % (self.name, type(combinator)))
# combinator = property(combinator)
# def _parse(self, trampoline, success, failure, stream, index):
# combinator = self.combinator
# self._parse = combinator._parse
# combinator._parse(trampoline, success, failure, stream, index)
class Lazy(Combinator):
def __init__(self, function):
vars(self)['function'] = function
def combinator(self):
try:
return vars(self)['combinator']
except KeyError:
return self.function()
combinator = property(combinator)
def _parse(self, trampoline, success, failure, stream, index):
combinator = self.combinator
self._parse = combinator._parse
combinator._parse(trampoline, success, failure, stream, index)
class Action(Combinator):
def __init__(self, combinator, action):
self.combinator = combinator
self.action = action
def _parse(self, trampoline, success, failure, stream, index):
def action_continuation(tree, failure, index):
# print('Action:', tree, self.action(tree))
success(self.action(tree), failure, index)
self.combinator._parse(trampoline, action_continuation, failure, stream, index)
class Terminal(Sequence):
"""This inherits from Sequence: note that there's no real distinction
between a sequence of terminals and a single terminal.
Parsers have a type of input they expect, and you can't mix
parsers that accept different kinds of input. Note this is
different from passing output to a subparser using Act/>>.
"""
def __init__(self, *combinators, **kws):
# TODO: should rework this to not hard-code Combinator
Combinator.__init__(self, Terminal, *combinators, **kws)
vars(self)['combinators'] = combinators
def parse(self, stream, index=0):
result = None
def terminal_success(tree, failure, index):
nonlocal result
result = Success(tree, stream, index)
def terminal_failure(message, index):
nonlocal result
result = Failure(message, stream, index)
self._parse(None, terminal_success, terminal_failure, stream, index)
return result
def _parse(self, trampoline, success, failure, stream, index):
trees = []
# The clean way to do is with a separate index variable,
# but Python 2.7 doesn't allow an inner function to alter
# the variable of an outer one. The proper way of working
# around this is probably using classes instead of
# closures because classes have mutable state.
combinators = iter(self.combinators)
def terminal_continuation(tree, failure, index):
# print('Sequence continuation:', index, sequence_continuation, success, trees, failure, stream)
trees.append(tree)
try:
combinator = next(combinators)
except StopIteration:
# print('Sequence continuation call:', success, trees, failure, stream)
success(tuple(trees), failure, index)
return
combinator._parse(trampoline, terminal_continuation, failure, stream, index)
next(combinators)._parse(trampoline, terminal_continuation, failure, stream, index)
def __add__(self, other):
if isinstance(other, Terminal):
if type(self) is Terminal and type(other) is Terminal:
return Terminal(*(self.combinators + other.combinators))
elif type(self) is Terminal:
return Terminal(*(self.combinators + (other,)))
elif type(other) is Terminal:
return Terminal(*((self,) + other.combinators))
else:
return Terminal(self, other)
else:
return NotImplemented
def __radd__(self, other):
if isinstance(other, Terminal):
if type(self) is Terminal and type(other) is Terminal:
return Terminal(*(other.combinators + self.combinators))
elif type(self) is Terminal:
return Terminal(*((other,) + self.combinators))
elif type(other) is Terminal:
return Terminal(*(other.combinators) + (self,))
else:
return Terminal(other, self)
else:
return NotImplemented
class Strings(Terminal):
def __init__(self, *strings, **kws):
vars(self)['strings_lengths'] = [(s, len(s)) for s in strings]
vars(self)['combinators'] = (self,)
def _parse(self, trampoline, success, failure, stream, index):
trees = []
for string, length in self.strings_lengths:
if (length > len(stream) - index):
return failure('Unexpected end of stream (expected %r)' % string, index)
else:
if stream.startswith(string, index):
trees.append(string)
index += length
else:
return failure('Expected %r got %%r' % string, index)
return success(tuple(trees), failure, index)
def __str__(self):
return 'Strings(%s)' % ', '.join(repr(s) for s, _ in self.strings_lengths)
__repr__ = __str__
class Cache(dict):
def __init__(self, factory, *args, **kws):
self.factory = factory
super(Cache, self).__init__(*args, **kws)
@classmethod
def fromkeys(cls, factory, seq, value=None):
instance = super(Cache, cls).fromkeys(seq, value)
instance.factory = factory
return instance
def __missing__(self, key):
return self.factory(key)
class Regex(Terminal):
regexes = read_only(Cache(re.compile))
def __init__(self, pattern, **kws):
vars(self)['regex'] = self.regexes[pattern]
vars(self)['combinators'] = (self,)
def _parse(self, trampoline, success, failure, stream, index):
match = self.regex.match(stream, index)
if match:
# This API is kind of ugly, a regex always needs at least
# one group to return a tree element correctly.
return success(match.groups(), failure, match.end())
else:
return failure("%r didn't match %%r" % self.regex.pattern, index)
def __str__(self):
return 'Regex(%r)' % self.regex.pattern
__repr__ = __str__
class Binary(Terminal):
structs = read_only(struct.Struct)
def __init__(self, format_string, **kws):
vars(self)['struct'] = self.structs[format_string]
vars(self)['combinators'] = (self,)
def _parse(self, trampoline, success, failure, stream, index):
try:
return success(self.struct.unpack_from(stream, index), index + self.struct.size)
except struct.error as error:
return failure(error.args[0] + 'at %r', index)
if __name__ == '__main__':
import cProfile
import platform
import sys
import time
import timeit
CPYTHON = True if platform.python_implementation() == 'CPython' else False
if six.PY3 and CPYTHON:
import tracemalloc
tracemalloc.start()
import trace_calls
trace_calls = trace_calls.TraceCalls(files=('continuation_gll_combinators.py',))
# sys.settrace(trace_calls)
# The implementation in Spiewak's paper doesn't seem to be
# complete because the only parser that will ever return
# "Unexpected trailing characters" is a non-terminal parser.
strings = Strings('ab')
print('Strings success,', strings.parse('ababab'))
print('Strings failure,', strings.parse('bcbcbc'))
terminal = Strings('a') + Strings('b')
print('Terminal success,', terminal.parse('ababab'))
terminal = Terminal(Strings('a'), Strings('b'))
print('Terminal success,', terminal.parse('ababab'))
sequence = Sequence(Strings('abc'), Strings('def')) + Strings('ghi') + Strings('jkl')
print('Sequence success,', sequence.parse('abcdefghijkl'))
# sequence = Strings('abc') + Strings('def') + Strings('ghi') + Strings('jkl')
# print('Sequence success,', sequence.parse('abcdefghijkl'))
# sequence = Sequence(Strings('abc'), Strings('def'), Strings('ghi'), Strings('jkl'))
# print('Sequence success,', sequence.parse('abcdefghijkl'))
alternation = Strings('ab') | Strings('bc')
print('Alternation success,', alternation.parse('bcbcbc'))
alternation = Alternation(Strings('ab'), Strings('bc'))
print('Alternation success,', alternation.parse('bcbcbc'))
sequence_alternation = Strings('a') + (Strings('b') | Strings('c'))
print('Sequence alternation success,', sequence_alternation.parse('abc'))
print('Sequence alternation success,', sequence_alternation.parse('acb'))
print('Sequence alternation failure,', sequence_alternation.parse('cba'))
sequence_alternation = Sequence(Strings('a'), Alternation(Strings('b'), Strings('c')))
print('Sequence alternation success,', sequence_alternation.parse('abc'))
print('Sequence alternation success,', sequence_alternation.parse('acb'))
print('Sequence alternation failure,', sequence_alternation.parse('cba'))
alpha = Regex('([a-zA-Z])')
hex_char = Regex('([a-fA-F0-9])')
alpha_or_hex = alpha | hex_char
print('Alpha success,', alpha_or_hex.parse('xyz'))
print('Alpha and hex success,', alpha_or_hex.parse('ABC'))
print('Hex success,', alpha_or_hex.parse('123'))
a = Strings('a')
l = Lazy(lambda: a)
print('Lazy,', l.parse('a'))
print('Lazy,', l.parse('a'))
ambiguous = (Lazy(lambda: ambiguous) + Lazy(lambda: ambiguous) + Lazy(lambda: ambiguous)) | (Lazy(lambda: ambiguous) + Lazy(lambda: ambiguous)) | Strings('a')
print('Highly ambiguous,', ambiguous.parse('aaa'))
# pprint.pprint(ambiguous.combinators)
# There's a major divergence from Koopman and Plasmeijer here
# because regexes behave like their deterministic combinators, but
# deterministic behavior at the word level here is probably more
# realistic for profiling.
word = Regex('([a-zA-Z]+)')
sentence = ((word + Strings('.')) >> (lambda t: t[0])) | ((word + Regex(r'[\s,]') + Lazy(lambda: sentence)) >> (lambda t: t[0][0] + t[1]))
print('Sentence success,', sentence.parse('The quick brown fox jumps over the lazy dog.'))
num = (Strings('0') | Strings('1')) >> (lambda t: int(t[0]))
print('Calculator,', num.parse('010101'))
print('Calculator,', num.parse('101010'))
expr = (num + Strings('+') + Lazy(lambda: expr)) >> (lambda t: t[0][0] + t[1]) | (num + Strings('-') + Lazy(lambda: expr)) >> (lambda t: t[0][0] - t[1]) | num
print('Calculator,', expr.parse('1+1'))
print('Calculator,', expr.parse('1+1+1'))
print('Calculator,', expr.parse('0+1-1+1+1'))
print('Calculator,', expr.parse('1+1+1+1+1'))
print('Calculator,', expr.parse('0-1-1-1-1'))
print('Calculator,', expr.parse('1-1-2'))
print('Calculator,', expr.parse('3'))
dictionary = [w.strip().replace("'", '') for w in open('/usr/share/dict/words', 'r').read().splitlines() if w.strip().isalpha()]
sample = ' '.join(dictionary[:10000]) + '.'
start = time.clock()
sentence.parse(sample)
end = time.clock()
print('Dictionary, %.4f seconds' % (end - start,))
print('Highly ambiguous')
def time_ambiguous(max_length):
for i in range(2, max_length):
print(i, timeit.timeit('ambiguous.parse("' + i * 'a' + '")', 'gc.enable(); from __main__ import ambiguous', number=1000), 'seconds')
cProfile.run('''
time_ambiguous(7)
''')
if six.PY3 and CPYTHON:
snapshot = tracemalloc.take_snapshot()
top_stats = snapshot.statistics('lineno')
print("[ Top 10 ]")
for stat in top_stats[:10]:
print(stat)
print('Stack depth:', trace_calls.max_depth)
| {
"repo_name": "ceridwen/combinators",
"path": "continuation_gll_combinators.py",
"copies": "1",
"size": "29171",
"license": "mit",
"hash": -9190462845608499000,
"line_mean": 39.6847977685,
"line_max": 235,
"alpha_frac": 0.6078982551,
"autogenerated": false,
"ratio": 3.9176739188826217,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5025572173982621,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import appr.pack as packager
DEFAULT_MEDIA_TYPE = 'kpm'
class BlobBase(object):
def __init__(self, package_name, blob, b64_encoded=True):
self.package = package_name
self.packager = packager.ApprPackage(blob, b64_encoded)
@classmethod
def get(cls, package_name, digest):
b64blob = cls._fetch_b64blob(package_name, digest)
return cls(package_name, b64blob)
def save(self, content_media_type):
raise NotImplementedError
@classmethod
def delete(cls, package_name, digest):
raise NotImplementedError
@classmethod
def _fetch_b64blob(cls, package_name, digest):
raise NotImplementedError
@property
def b64blob(self):
return self.packager.b64blob
@property
def blob(self):
return self.packager.blob
@property
def digest(self):
return self.packager.digest
@property
def size(self):
return self.packager.size
| {
"repo_name": "cn-app-registry/cnr-server",
"path": "appr/models/blob_base.py",
"copies": "2",
"size": "1029",
"license": "apache-2.0",
"hash": 923713067636571500,
"line_mean": 22.9302325581,
"line_max": 64,
"alpha_frac": 0.6627793975,
"autogenerated": false,
"ratio": 3.853932584269663,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5516711981769662,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import argparse
from itertools import chain, islice
import multiprocessing
from streamcorpus_pipeline._clean_visible import cleanse, make_clean_visible
import sys
import zlib
import cbor
from gensim import corpora, models
import happybase
import dossier.models.features as features
from dossier.web import streaming_sample
import kvlayer
import yakonfig
def status(*args, **kwargs):
kwargs['end'] = ''
args = list(args)
args[0] = '\033[2K\r' + args[0]
print(*args, **kwargs)
sys.stdout.flush()
def batch_iter(n, iterable):
iterable = iter(iterable)
while True:
yield chain([next(iterable)], islice(iterable, n-1))
def unpack_noun_phrases(row):
body = cbor.loads(zlib.decompress(row['f:response.body']))
body = make_clean_visible(body.encode('utf-8')).decode('utf-8')
body = cleanse(body)
return features.noun_phrases(body)
class App(yakonfig.cmd.ArgParseCmd):
def __init__(self, *args, **kwargs):
yakonfig.cmd.ArgParseCmd.__init__(self, *args, **kwargs)
def args_tfidf(self, p):
p.add_argument('--host', default='localhost')
p.add_argument('--port', default=9090, type=int)
p.add_argument('--table-prefix', default='')
p.add_argument('--limit', default=100, type=int)
p.add_argument('--batch-size', default=1000, type=int)
p.add_argument('-p', '--processes',
default=multiprocessing.cpu_count(), type=int)
p.add_argument('ids', metavar='INPUT_ROW_KEY_SAMPLE_FILE',
help='A file containing row keys to use for a sample.')
p.add_argument('out', metavar='OUTPUT_TFIDF_MODEL_FILE',
help='The file path to write the tfidf model to.')
def do_tfidf(self, args):
conn = happybase.Connection(host=args.host, port=args.port,
table_prefix=args.table_prefix)
t = conn.table('artifact')
corpus = []
print('Extracting random sample...')
sample = streaming_sample(open(args.ids), args.limit)
print('Building corpus...')
batches = batch_iter(args.batch_size, (s.strip() for s in sample))
pool = multiprocessing.Pool(processes=args.processes)
for i, batch in enumerate(batches, 1):
rows = (row for _, row in t.rows(list(batch)))
for noun_phrases in pool.imap(unpack_noun_phrases, rows):
corpus.append(noun_phrases)
status('%d of %d batches done' % (i, args.limit / args.batch_size))
print('Computing model...')
dictionary = corpora.Dictionary(corpus)
bows = [dictionary.doc2bow(tokens) for tokens in corpus]
tfidf = models.TfidfModel(bows, id2word=dictionary)
tfidf.save(args.out)
def args_ids(self, p):
p.add_argument('--host', default='localhost')
p.add_argument('--port', default=9090, type=int)
p.add_argument('--table-prefix', default='')
p.add_argument('--limit', default=None, type=int)
def do_ids(self, args):
conn = happybase.Connection(host=args.host, port=args.port,
table_prefix=args.table_prefix)
t = conn.table('artifact')
hbase_filter = 'FirstKeyOnlyFilter() AND KeyOnlyFilter()'
ids = islice(enumerate(t.scan(filter=hbase_filter)), args.limit)
for i, (key, data) in ids:
print(key)
if i % 100000 == 0:
print('%d keys received' % i, file=sys.stderr)
def main():
p = argparse.ArgumentParser(
description='Specific utilities for working with the ad corpus.')
app = App()
app.add_arguments(p)
args = yakonfig.parse_args(p, [kvlayer, yakonfig])
app.main(args)
if __name__ == '__main__':
main()
| {
"repo_name": "dossier/dossier.models",
"path": "dossier/models/run_ads.py",
"copies": "1",
"size": "3867",
"license": "mit",
"hash": -7160002083156033000,
"line_mean": 34.4770642202,
"line_max": 79,
"alpha_frac": 0.6144297905,
"autogenerated": false,
"ratio": 3.5938661710037176,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9708295961503718,
"avg_score": 0,
"num_lines": 109
} |
from __future__ import absolute_import, division, print_function
import argparse
from itertools import imap
import multiprocessing
import sys
from gensim import models
from dossier.fc import FeatureCollectionChunk
from dossier.store import Store
from dossier.store.store import fc_dumps
from dossier.models.etl import Ads, Scrapy, add_sip_to_fc
import kvlayer
import yakonfig
HBASE_MAX_KEY_VALUE_SIZE = 10 * 1024 * 1024 - 100
def batch_progress(cids_and_fcs, add, limit=5, batch_size=100):
def status(*args, **kwargs):
kwargs['end'] = ''
args = list(args)
args[0] = '\033[2K\r' + args[0]
print(*args, **kwargs)
sys.stdout.flush()
total = 'all' if limit is None else str(limit)
status('0 of %s done' % total)
batch = []
last_cid = None
for i, (cid, fc) in enumerate(cids_and_fcs, 1):
if fc is None:
continue
# Since we can restart the scanner, we may end up regenerating
# FCs for the same key in the same batch. This results in
# undefined behavior in kvlayer.
if not any(cid == cid2 for cid2, _ in batch):
# HBase doesn't allow more than 10MB for any particular key/value.
# This is supposedly configurable, but it's unclear how to
# configure it with happybase.
if data_size(cid, fc) <= HBASE_MAX_KEY_VALUE_SIZE:
batch.append((cid, fc))
if len(batch) >= batch_size:
add(batch)
last_cid = batch[-1][0]
batch = []
if i % 100 == 0:
status('%d of %s done (last id: %r)'
% (i, 'all' if limit is None else str(limit), last_cid))
if len(batch) > 0:
add(batch)
status('done')
def data_size(cid, fc):
return len(cid) + len(fc_dumps(fc))
class App(yakonfig.cmd.ArgParseCmd):
def __init__(self, *args, **kwargs):
yakonfig.cmd.ArgParseCmd.__init__(self, *args, **kwargs)
self._store = None
self._chunk = None
self.tfidf = None
@property
def store(self):
if self._store is None:
feature_indexes = None
try:
conf = yakonfig.get_global_config('dossier.store')
feature_indexes = conf['feature_indexes']
except KeyError:
pass
self._store = Store(kvlayer.client(),
feature_indexes=feature_indexes)
return self._store
def done(self):
if self._chunk is not None:
self._chunk.flush()
def get_output_accumulator(self, output_path=None):
if output_path is not None:
self._chunk = FeatureCollectionChunk(path=output_path, mode='wb')
def add(cids_and_fcs):
if self.tfidf is not None:
for _, fc in cids_and_fcs:
add_sip_to_fc(fc, self.tfidf)
if output_path is not None:
for _, fc in cids_and_fcs:
self._chunk.add(fc)
else:
self.store.put(cids_and_fcs)
return add
def get_mapper(self, args):
cpus = getattr(args, 'processes', 1)
if cpus == 1:
return imap
else:
pool = multiprocessing.Pool(processes=cpus)
return pool.imap
def args_etl_ads(self, p):
p.add_argument('--host', default='localhost')
p.add_argument('--port', default=9090, type=int)
p.add_argument('--table-prefix', default='')
p.add_argument('--limit', default=None, type=int)
p.add_argument('--batch-size', default=1000, type=int)
p.add_argument('--start', default=None, type=str)
p.add_argument('--stop', default=None, type=str)
p.add_argument('-p', '--processes',
default=multiprocessing.cpu_count(), type=int)
p.add_argument('-o', '--output', default=None)
p.add_argument('--tfidf', default=None, type=str,
help='Path to TF-IDF background model. Can be '
'generated with the `dossier.etl tfidf` script.')
def do_etl_ads(self, args):
if args.tfidf is not None:
self.tfidf = models.TfidfModel.load(args.tfidf)
etl = Ads(args.host, args.port, table_prefix=args.table_prefix)
gen = etl.cids_and_fcs(self.get_mapper(args), args.start, args.stop,
limit=args.limit)
self.etl(args, etl, gen)
def args_etl_scrapy(self, p):
p.add_argument('-p', '--processes',
default=multiprocessing.cpu_count(), type=int)
p.add_argument('--batch-size', default=1000, type=int)
p.add_argument('--limit', default=None, type=int)
p.add_argument('-o', '--output', default=None)
p.add_argument('--url-prefix', default=None,
help='Override the URL prefix to use when fixing '
'relative URLs. When omitted, detect '
'automatically.')
p.add_argument('--tfidf', default=None, type=str,
help='Path to TF-IDF background model. Can be '
'generated with the `dossier.etl tfidf` script.')
p.add_argument('input',
help='Scrapy data. Only supports CSV format currently.')
def do_etl_scrapy(self, args):
if args.tfidf is not None:
self.tfidf = models.TfidfModel.load(args.tfidf)
url_prefix = args.url_prefix
if url_prefix is None:
url_prefix = Scrapy.detect_url_prefix(open(args.input))
if url_prefix is not None:
print('Auto-detected URL prefix:', url_prefix)
etl = Scrapy(open(args.input), url_prefix=url_prefix)
gen = etl.cids_and_fcs(self.get_mapper(args), limit=args.limit)
self.etl(args, etl, gen)
def etl(self, args, etl, gen):
add = self.get_output_accumulator(args.output)
try:
batch_progress(gen, add, limit=args.limit,
batch_size=args.batch_size)
finally:
self.done()
def main():
p = argparse.ArgumentParser(
description='Utilities for generating FCs from artifacts.')
app = App()
app.add_arguments(p)
args = yakonfig.parse_args(p, [kvlayer, yakonfig])
app.main(args)
if __name__ == '__main__':
main()
| {
"repo_name": "dossier/dossier.models",
"path": "dossier/models/run.py",
"copies": "1",
"size": "6450",
"license": "mit",
"hash": 3356616756700338700,
"line_mean": 35.0335195531,
"line_max": 79,
"alpha_frac": 0.5604651163,
"autogenerated": false,
"ratio": 3.6689419795221845,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47294070958221845,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import argparse
import base64
import os
import tempfile
import requests
from appr.commands.command_base import CommandBase, PackageSplit
from appr.formats.helm.manifest_chart import ManifestChart
from appr.formats.appr.manifest_jsonnet import ManifestJsonnet
from appr.formats.utils import detect_format
from appr.pack import pack_kub
from appr.utils import package_filename
class PushCmd(CommandBase):
name = 'push'
help_message = "push a package to the registry"
default_media_type = None
def __init__(self, options):
super(PushCmd, self).__init__(options)
self.registry_host = options.registry_host
self.force = options.force
self.manifest = None
self.media_type = options.media_type
if options.media_type is self.default_media_type:
self.media_type = os.getenv("APPR_DEFAULT_MEDIA_TYPE", self.default_media_type)
self.channel = options.channel
self.version = options.version
self.filter_files = True
self.metadata = None
self.prefix = None
self.manifest_name = None
self.package_name = options.package
self.package_parts = options.package_parts
self.pname = self.package_parts.get('package', None)
self.namespace = self.package_parts.get('namespace', None)
if self.namespace is None:
self.namespace = options.ns
self.version_parts = options.version_parts
if self.version == "default":
self.version = None
self.ssl_verify = options.cacert or not options.insecure
self.status = ''
@classmethod
def _add_arguments(cls, parser):
cls._add_registryhost_option(parser)
cls._add_mediatype_option(parser, cls.default_media_type, required=False)
cls._add_packageversion_option(parser)
parser.add_argument("--ns", "--namespace", default=None, help=argparse.SUPPRESS)
parser.add_argument("-f", "--force", action='store_true', default=False, help="force push")
parser.add_argument("-c", "--channel", default=None, help="Set a channel")
parser.add_argument("--version-parts", default={}, help=argparse.SUPPRESS)
parser.add_argument("--package-parts", default={}, help=argparse.SUPPRESS)
parser.add_argument('package', nargs='?', default=None, action=PackageSplit,
help="repository dest")
def _push(self):
client = self.RegistryClient(self.registry_host, requests_verify=self.ssl_verify)
filename = package_filename(self.package_name, self.version, self.media_type)
kubepath = os.path.join(tempfile.gettempdir(), filename + ".tar.gz")
pack_kub(kubepath, filter_files=self.filter_files, prefix=self.prefix)
kubefile = open(kubepath, 'rb')
body = {
"manifest_name": self.manifest_name,
"name": self.package_name,
"release": self.version,
"metadata": self.metadata,
"media_type": self.media_type,
"blob": base64.b64encode(kubefile.read())}
try:
client.push(self.package_name, body, self.force)
self.status = "package: %s (%s | %s) pushed\n" % (self.package_name, self.version,
self.media_type)
except requests.exceptions.RequestException as exc:
if not (self.channel and exc.response.status_code in [409, 400]):
raise
kubefile.close()
os.remove(kubepath)
if self.channel:
client.create_channel_release(self.package_name, self.channel, self.version)
self.status += ">>> Release '%s' added to '%s'" % (self.version, self.channel)
def _chart(self):
self.manifest = ManifestChart()
self.manifest_name = self.manifest.name
if self.pname is None:
self.pname = self.manifest.name
self.prefix = self.pname
self.filter_files = False
if self.namespace is None:
raise argparse.ArgumentTypeError("Missing option: --namespace")
self.package_name = "%s/%s" % (self.namespace, self.pname)
if self.version is None:
self.version = self.manifest.version
self.metadata = self.manifest.metadata()
def _all_formats(self):
self.filter_files = False
if self.version is None or self.version == "default":
raise argparse.ArgumentTypeError("Missing option: --version")
if self.package_name is None:
raise argparse.ArgumentTypeError("Missing option: --name")
self.namespace, self.pname = self.package_name.split("/")
def _kpm(self):
self.filter_files = False
self.manifest = ManifestJsonnet()
ns, name = self.manifest.package['name'].split("/")
if not self.namespace:
self.namespace = ns
if not self.pname:
self.pname = name
self.package_name = "%s/%s" % (self.namespace, self.pname)
if not self.version or self.version == "default":
self.version = self.manifest.package['version']
self.metadata = self.manifest.metadata()
def _init(self):
if self.media_type is None:
self.media_type = detect_format(".").media_type
if self.media_type in ["kpm", "kpm-compose"]:
self._kpm()
elif self.media_type in ['helm', 'chart']:
self._chart()
else:
self._all_formats()
def _call(self):
self._init()
self._push()
def _render_dict(self):
return {
"manifest_name": self.manifest_name,
"package": self.package_name,
"version": self.version,
"media_type": self.media_type,
"channel": self.channel}
def _render_console(self):
return self.status
| {
"repo_name": "cn-app-registry/cnr-server",
"path": "appr/commands/push.py",
"copies": "2",
"size": "5965",
"license": "apache-2.0",
"hash": 1692158277801392400,
"line_mean": 38.5033112583,
"line_max": 99,
"alpha_frac": 0.6127409891,
"autogenerated": false,
"ratio": 4.006044325050369,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.561878531415037,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import argparse
import copy
import json
import os
import re
import subprocess
import requests
import yaml
from appr.client import ApprClient
from appr.utils import parse_package_name, parse_version, split_package_name
from appr.render_jsonnet import RenderJsonnet
def _set_package(parser, namespace, dest, package_parts):
parsed_version = parse_version(package_parts['version'])
setattr(namespace, "registry_host", package_parts['host'])
setattr(namespace, 'version', parsed_version['value'])
setattr(namespace, 'version_parts', parsed_version)
package = "%s/%s" % (package_parts['namespace'], package_parts['package'])
setattr(namespace, dest, package)
setattr(namespace, "package_parts", package_parts)
class PackageName(argparse.Action):
def __call__(self, parser, namespace, value, option_string=None):
try:
name = value[0]
package_parts = parse_package_name(name)
_set_package(parser, namespace, self.dest, package_parts)
except ValueError as exc:
raise parser.error(str(exc))
class RegistryHost(argparse.Action):
def __call__(self, parser, namespace, value, option_string=None):
setattr(namespace, self.dest, value[0])
class PackageSplit(argparse.Action):
def __call__(self, parser, namespace, value, option_string=None):
name = value
package_parts = split_package_name(name)
_set_package(parser, namespace, self.dest, package_parts)
class LoadVariables(argparse.Action):
def _parse_cmd(self, var):
r = {}
try:
return json.loads(var)
except:
for v in var.split(","):
sp = re.match("(.+?)=(.+)", v)
if sp is None:
raise ValueError("Malformed variable: %s" % v)
key, value = sp.group(1), sp.group(2)
r[key] = value
return r
def _load_from_file(self, filename, ext):
with open(filename, 'r') as f:
if ext in ['.yml', '.yaml']:
return yaml.load(f.read())
elif ext == '.json':
return json.loads(f.read())
elif ext in [".jsonnet", "libjsonnet"]:
r = RenderJsonnet()
return r.render_jsonnet(f.read())
else:
raise ValueError("File extension is not in [yaml, json, jsonnet]: %s" % filename)
def load_variables(self, var):
_, ext = os.path.splitext(var)
if ext not in ['.yaml', '.yml', '.json', '.jsonnet']:
return self._parse_cmd(var)
else:
return self._load_from_file(var, ext)
def __call__(self, parser, namespace, values, option_string=None):
items = copy.copy(argparse._ensure_value(namespace, self.dest, {}))
try:
items.update(self.load_variables(values))
except ValueError as exc:
raise parser.error(option_string + ": " + str(exc))
setattr(namespace, self.dest, items)
class CommandBase(object):
name = 'command-base'
help_message = 'describe the command'
RegistryClient = ApprClient
default_media_type = "-"
parse_unknown = False
output_default = 'text'
def __init__(self, args_options, unknown=None):
self.unknown = unknown
self.args_options = args_options
self.output = args_options.output
def render(self):
if self.output == 'none':
return
elif self.output == 'json':
self._render_json()
elif self.output == 'yaml':
self._render_yaml()
else:
print(self._render_console())
def render_error(self, payload):
if self.output == 'json':
self._render_json(payload)
elif self.output == 'yaml':
self._render_yaml(payload)
else:
raise argparse.ArgumentTypeError("\n" + yaml.safe_dump(
payload, default_flow_style=False, width=float("inf")))
@classmethod
def call(cls, options, unknown=None, render=True):
# @TODO(ant31): all methods should have the 'unknown' parameter
if cls.parse_unknown:
obj = cls(options, unknown)
else:
obj = cls(options)
obj.exec_cmd(render=render)
def exec_cmd(self, render=True):
try:
self._call()
except requests.exceptions.RequestException as exc:
payload = {"message": str(exc)}
if exc.response is not None:
content = None
try:
content = exc.response.json()
except ValueError:
content = exc.response.content
payload["response"] = content
self.render_error(payload)
exit(2)
except subprocess.CalledProcessError as exc:
payload = {"message": str(exc.output)}
self.render_error(payload)
exit(exc.returncode)
if render:
self.render()
@classmethod
def add_parser(cls, subparsers, env=None):
parser = subparsers.add_parser(cls.name, help=cls.help_message)
cls._add_output_option(parser)
cls._add_arguments(parser)
parser.set_defaults(func=cls.call, env=env, which_cmd=cls.name,
parse_unknown=cls.parse_unknown)
def _render_json(self, value=None):
if not value:
value = self._render_dict()
print(json.dumps(value, indent=2, separators=(',', ': ')))
def _render_dict(self):
raise NotImplementedError
def _render_console(self):
raise NotImplementedError
def _render_yaml(self, value=None):
if not value:
value = self._render_dict()
print(yaml.safe_dump(value, default_flow_style=False, width=float("inf")))
def _call(self):
raise NotImplementedError
@classmethod
def _add_arguments(cls, parser):
raise NotImplementedError
@classmethod
def _add_registryhost_option(cls, parser):
parser.add_argument("-H", "--registry-host", default=None, help=argparse.SUPPRESS)
parser.add_argument("-k", "--insecure", action="store_true", default=False,
help="turn off verification of the https certificate")
parser.add_argument("--cacert", default=None,
help="CA certificate to verify peer against (SSL)")
@classmethod
def _add_output_option(cls, parser):
parser.add_argument("--output", default=cls.output_default, choices=[
'text', 'none', 'json', 'yaml'], help="output format")
@classmethod
def _add_mediatype_option(cls, parser, default="-", required=False):
default = os.getenv("APPR_DEFAULT_MEDIA_TYPE", default)
if default is not None:
required = False
parser.add_argument(
"-t", "--media-type", default=default, required=required,
help='package format: [kpm, kpm-compose, helm, docker-compose, kubernetes, appr]')
@classmethod
def _add_packagename_option(cls, parser):
parser.add_argument('package', nargs=1, default=None, action=PackageName,
help="package-name")
@classmethod
def _add_packagesplit_option(cls, parser):
parser.add_argument('package', nargs="?", default=None, action=PackageSplit,
help="registry-host.com/namespace/name")
@classmethod
def _add_packageversion_option(cls, parser):
parser.add_argument("-v", "--version", help="package VERSION", default='default')
@classmethod
def _add_registryhost_arg(cls, parser):
parser.add_argument("registry_host", nargs=1, action=RegistryHost, help='registry API url')
parser.add_argument("-k", "--insecure", action="store_true", default=False,
help="turn off verification of the https certificate")
parser.add_argument("--cacert", nargs='?', default=None,
help="CA certificate to verify peer against (SSL)")
| {
"repo_name": "cn-app-registry/cnr-server",
"path": "appr/commands/command_base.py",
"copies": "2",
"size": "8196",
"license": "apache-2.0",
"hash": 1446535286699183400,
"line_mean": 34.7903930131,
"line_max": 99,
"alpha_frac": 0.5907759883,
"autogenerated": false,
"ratio": 4.126888217522659,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5717664205822658,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import argparse
import getpass
from builtins import input
from appr.commands.command_base import CommandBase, PackageSplit
class LoginCmd(CommandBase):
name = 'login'
help_message = "login"
def __init__(self, options):
super(LoginCmd, self).__init__(options)
self.registry_host = options.registry_host
self.signup = options.signup
self.password = options.password
self.email = options.email
self.user = options.user
self.status = None
self.ssl_verify = options.cacert or not options.insecure
self.package_parts = options.package_parts
pname = self.package_parts.get('package', None)
namespace = self.package_parts.get('namespace', None)
self.package = None
if pname:
self.package = "%s/%s" % (namespace, pname)
elif namespace:
self.package = namespace
@classmethod
def _add_arguments(cls, parser):
cls._add_registryhost_option(parser)
parser.add_argument('registry', nargs='?', default=None, action=PackageSplit,
help="registry url: quay.io[/namespace][/repo]\n" +
"If namespace and/or repo are passed, creds only requested for it")
parser.add_argument("-s", "--signup", action='store_true', default=False,
help="Create a new account and login")
parser.add_argument("-u", "--user", nargs="?", default=None, help="username")
parser.add_argument("-p", "--password", nargs="?", default=None, help="password")
parser.add_argument("-e", "--email", nargs="?", default=None, help="email for signup")
def _call(self):
client = self.RegistryClient(self.registry_host, requests_verify=self.ssl_verify)
if self.user is not None:
self.user = self.user
else:
self.user = input("Username: ")
if self.password is not None:
p1 = self.password
else:
p1 = getpass.getpass()
if self.signup:
if self.password is not None:
p2 = p1
else:
p2 = getpass.getpass('Password confirmation: ')
if self.email is not None:
email = self.email
else:
email = input("Email: ")
if p1 != p2:
raise argparse.ArgumentError("password", message="Error: password mismatch")
client.signup(self.user, p1, p2, email)
self.status = "Registration complete"
else:
client.login(self.user, p1, scope=self.package)
self.status = "Login succeeded"
def _render_dict(self):
return {"user": self.user, "status": self.status, "host": self.registry_host, "scope": self.package}
def _render_console(self):
return " >>> %s" % self.status
| {
"repo_name": "app-registry/appr",
"path": "appr/commands/login.py",
"copies": "2",
"size": "2963",
"license": "apache-2.0",
"hash": -3439766426872732700,
"line_mean": 36.9871794872,
"line_max": 108,
"alpha_frac": 0.5821802227,
"autogenerated": false,
"ratio": 4.190947666195191,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012633735984835764,
"num_lines": 78
} |
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import random
import time
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
import transformers
from src.modeling_highway_bert import DeeBertForSequenceClassification
from src.modeling_highway_roberta import DeeRobertaForSequenceClassification
from transformers import (
WEIGHTS_NAME,
AdamW,
BertConfig,
BertTokenizer,
RobertaConfig,
RobertaTokenizer,
get_linear_schedule_with_warmup,
)
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes as output_modes
from transformers import glue_processors as processors
from transformers.trainer_utils import is_main_process
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
"bert": (BertConfig, DeeBertForSequenceClassification, BertTokenizer),
"roberta": (RobertaConfig, DeeRobertaForSequenceClassification, RobertaTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def get_wanted_result(result):
if "spearmanr" in result:
print_result = result["spearmanr"]
elif "f1" in result:
print_result = result["f1"]
elif "mcc" in result:
print_result = result["mcc"]
elif "acc" in result:
print_result = result["acc"]
else:
raise ValueError("Primary metric unclear in the results")
return print_result
def train(args, train_dataset, model, tokenizer, train_highway=False):
"""Train the model"""
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
if train_highway:
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in model.named_parameters()
if ("highway" in n) and (not any(nd in n for nd in no_decay))
],
"weight_decay": args.weight_decay,
},
{
"params": [
p for n, p in model.named_parameters() if ("highway" in n) and (any(nd in n for nd in no_decay))
],
"weight_decay": 0.0,
},
]
else:
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in model.named_parameters()
if ("highway" not in n) and (not any(nd in n for nd in no_decay))
],
"weight_decay": args.weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if ("highway" not in n) and (any(nd in n for nd in no_decay))
],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet"] else None
) # XLM, DistilBERT and RoBERTa don't use segment_ids
inputs["train_highway"] = train_highway
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer)
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, prefix="", output_layer=-1, eval_highway=False):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
eval_outputs_dirs = (args.output_dir, args.output_dir + "-MM") if args.task_name == "mnli" else (args.output_dir,)
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
exit_layer_counter = {(i + 1): 0 for i in range(model.num_layers)}
st = time.time()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet"] else None
) # XLM, DistilBERT and RoBERTa don't use segment_ids
if output_layer >= 0:
inputs["output_layer"] = output_layer
outputs = model(**inputs)
if eval_highway:
exit_layer_counter[outputs[-1]] += 1
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_time = time.time() - st
logger.info("Eval time: {}".format(eval_time))
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif args.output_mode == "regression":
preds = np.squeeze(preds)
result = compute_metrics(eval_task, preds, out_label_ids)
results.update(result)
if eval_highway:
logger.info("Exit layer counter: {}".format(exit_layer_counter))
actual_cost = sum([l * c for l, c in exit_layer_counter.items()])
full_cost = len(eval_dataloader) * model.num_layers
logger.info("Expected saving: {}".format(actual_cost / full_cost))
if args.early_exit_entropy >= 0:
save_fname = (
args.plot_data_dir
+ "/"
+ args.model_name_or_path[2:]
+ "/entropy_{}.npy".format(args.early_exit_entropy)
)
if not os.path.exists(os.path.dirname(save_fname)):
os.makedirs(os.path.dirname(save_fname))
print_result = get_wanted_result(result)
np.save(save_fname, np.array([exit_layer_counter, eval_time, actual_cost / full_cost, print_result]))
logger.info("Entropy={}\tResult={:.2f}".format(args.early_exit_entropy, 100 * print_result))
output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return results
def load_and_cache_examples(args, task, tokenizer, evaluate=False):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = processors[task]()
output_mode = output_modes[task]
# Load data features from cache or dataset file
cached_features_file = os.path.join(
args.data_dir,
"cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train",
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
str(task),
),
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
if task in ["mnli", "mnli-mm"] and args.model_type in ["roberta"]:
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list[1], label_list[2] = label_list[2], label_list[1]
examples = (
processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir)
)
features = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
if features[0].token_type_ids is None:
# For RoBERTa (a potential bug!)
all_token_type_ids = torch.tensor([[0] * args.max_seq_length for f in features], dtype=torch.long)
else:
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
if output_mode == "classification":
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif output_mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
return dataset
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name.",
)
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--plot_data_dir",
default="./plotting/",
type=str,
required=False,
help="The directory to store data for plotting figures.",
)
# Other parameters
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name"
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from huggingface.co",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument(
"--evaluate_during_training", action="store_true", help="Rul evaluation during training at each logging step."
)
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model."
)
parser.add_argument("--eval_each_highway", action="store_true", help="Set this flag to evaluate each highway.")
parser.add_argument(
"--eval_after_first_stage",
action="store_true",
help="Set this flag to evaluate after training only bert (not highway).",
)
parser.add_argument("--eval_highway", action="store_true", help="Set this flag if it's evaluating highway models")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.")
parser.add_argument(
"--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation."
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform."
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--early_exit_entropy", default=-1, type=float, help="Entropy threshold for early exit.")
parser.add_argument("--logging_steps", type=int, default=50, help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=50, help="Save checkpoint every X updates steps.")
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory"
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets"
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
args = parser.parse_args()
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set seed
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.model_type == "bert":
model.bert.encoder.set_early_exit_entropy(args.early_exit_entropy)
model.bert.init_highway_pooler()
elif args.model_type == "roberta":
model.roberta.encoder.set_early_exit_entropy(args.early_exit_entropy)
model.roberta.init_highway_pooler()
else:
raise NotImplementedError()
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)
global_step, tr_loss = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
if args.eval_after_first_stage:
result = evaluate(args, model, tokenizer, prefix="")
print_result = get_wanted_result(result)
train(args, train_dataset, model, tokenizer, train_highway=True)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Load a trained model and vocabulary that you have fine-tuned
model = model_class.from_pretrained(args.output_dir)
tokenizer = tokenizer_class.from_pretrained(args.output_dir)
model.to(args.device)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
model = model_class.from_pretrained(checkpoint)
if args.model_type == "bert":
model.bert.encoder.set_early_exit_entropy(args.early_exit_entropy)
elif args.model_type == "roberta":
model.roberta.encoder.set_early_exit_entropy(args.early_exit_entropy)
else:
raise NotImplementedError()
model.to(args.device)
result = evaluate(args, model, tokenizer, prefix=prefix, eval_highway=args.eval_highway)
print_result = get_wanted_result(result)
logger.info("Result: {}".format(print_result))
if args.eval_each_highway:
last_layer_results = print_result
each_layer_results = []
for i in range(model.num_layers):
logger.info("\n")
_result = evaluate(
args, model, tokenizer, prefix=prefix, output_layer=i, eval_highway=args.eval_highway
)
if i + 1 < model.num_layers:
each_layer_results.append(get_wanted_result(_result))
each_layer_results.append(last_layer_results)
save_fname = args.plot_data_dir + "/" + args.model_name_or_path[2:] + "/each_layer.npy"
if not os.path.exists(os.path.dirname(save_fname)):
os.makedirs(os.path.dirname(save_fname))
np.save(save_fname, np.array(each_layer_results))
info_str = "Score of each layer:"
for i in range(model.num_layers):
info_str += " {:.2f}".format(100 * each_layer_results[i])
logger.info(info_str)
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update(result)
return results
if __name__ == "__main__":
main()
| {
"repo_name": "huggingface/pytorch-transformers",
"path": "examples/research_projects/deebert/run_glue_deebert.py",
"copies": "1",
"size": "31704",
"license": "apache-2.0",
"hash": 1212722272058749000,
"line_mean": 42.4301369863,
"line_max": 150,
"alpha_frac": 0.6069581125,
"autogenerated": false,
"ratio": 3.7973409989220266,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49042991114220263,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import argparse
import itertools
import json
import os.path as path
import sys
import time
import traceback
import bottle
import nfldb
from nfldb.types import _play_categories, _player_categories
import nflvid
import nflfan
try:
strtype = basestring
except NameError:
strtype = str
# I am cheating a bit here by using a single DB connection for all requests.
# The best way to do things is still a bit unclear to me, although I suspect
# it might involve writing a simple Bottle plugin. But then I need a connection
# pool... *sigh*... Hasn't somebody solved this already? Suggestions please!
#
# TODO: I think psycopg2 has a connection pool feature. Use it.
db = None
"""A global database connection."""
conf = None
"""The nflfan configuration."""
web_path = path.split(__file__)[0]
"""The absolute path of the directory containing web assets."""
builtins = {}
"""An environment of builtin functions passed to every template."""
@bottle.get('/', name='home')
def v_home():
phase, season, week = nfldb.current(db)
params = { 'season': season, 'phase': phase, 'week': week }
url = bottle.default_app().get_url('v_games', **params)
bottle.response.status = 302
bottle.response.set_header('Location', url)
return ''
@bottle.get('/seasons/<season:int>/phases/<phase>'
'/weeks/<week:int>/games',
name='v_games')
def v_games(season, phase, week):
phase = as_phase(phase)
q = nfldb.Query(db).sort([('finished', 'asc'), ('gsis_id', 'asc')])
games = q.game(season_year=season, season_type=phase, week=week).as_games()
return template('games', season=season, phase=phase, week=week,
games=games)
@bottle.get('/query', name='v_query')
def v_query():
params = bottle.request.params
args = {}
if 'game_season_year' in params:
args['season'] = params.get('game_season_year')
if 'game_season_type' in params:
args['phase'] = as_phase(params.get('game_season_type'))
if 'game_week' in params:
args['week'] = params.get('game_week')
phase, season, week = nfldb.current(db)
args.setdefault('season', season)
args.setdefault('phase', phase)
args.setdefault('week', week)
return template('query', **args)
@bottle.get('/seasons/<season:int>/phases/<phase>'
'/weeks/<week:int>/leagues',
name='v_leagues')
def v_leagues(season, phase, week):
lgs = valid_leagues(leagues(season=season, phase=phase), week)
return template('leagues', season=season, phase=phase, week=week,
leagues=lgs)
@bottle.get('/seasons/<season:int>/phases/<phase>'
'/weeks/<week:int>/matchups',
name='v_matchups')
def v_matchups(season, phase, week):
lgs = valid_leagues(leagues(season=season, phase=phase), week)
return template('matchups', season=season, phase=phase, week=week,
leagues=lgs)
@bottle.get('/favicon.ico')
def v_favicon():
bottle.abort(404, "No favicon")
@bottle.get('/robots.txt')
def v_robots():
return bottle.static_file('robots.txt', root=web_path)
@bottle.get('/vid/<gsis_id>/<play_id>')
def static_vid(gsis_id, play_id):
root = path.join(conf.get('footage_pbp_path', ''), gsis_id)
return bottle.static_file(play_id, root=root)
@bottle.get('/css/<name:path>')
def static_css(name):
return bottle.static_file(name, root=path.join(web_path, 'css'))
@bottle.get('/js/<name:path>')
def static_js(name):
return bottle.static_file(name, root=path.join(web_path, 'js'))
@bottle.get('/fonts/<name:path>')
def static_fonts(name):
return bottle.static_file(name, root=path.join(web_path, 'fonts'))
def rest(f):
def _(*args, **kwargs):
bottle.response.content_type = 'application/json'
try:
return json.dumps(f(*args, **kwargs), indent=2)
except Exception as e:
bottle.response.content_type = 'text/plain'
bottle.response.status = 500
traceback.print_exc()
return str(e)
return _
@bottle.get('/v1/current', name='v1_current')
@rest
def rest_current():
phase, season, week = nfldb.current(db)
return {'phase': str(phase), 'season': season, 'week': week}
@bottle.get('/v1/seasons', name='v1_seasons')
@rest
def rest_seasons():
return range(2009, 2015)
@bottle.get('/v1/seasons/<season:int>/phases', name='v1_phases')
@rest
def rest_phases(season):
return ['Preseason', 'Regular', 'Postseason']
@bottle.get('/v1/seasons/<season:int>/phases/<phase>/weeks', name='v1_weeks')
@bottle.get('/v1/leagues/<lg>/weeks', name='v1_league_weeks')
@rest
def rest_weeks(season=None, phase=None, lg=None):
if lg is not None:
lg = league(lg)
return rest_weeks(season=lg.season, phase=lg.phase)
phase = as_phase(phase)
if phase == nfldb.Enums.season_phase.Preseason:
return range(0, 5)
elif phase == nfldb.Enums.season_phase.Regular:
return range(1, 18)
elif phase == nfldb.Enums.season_phase.Postseason:
return range(1, 5)
else:
assert False, 'unreachable'
@bottle.get('/v1/seasons/<season:int>/phases/<phase>'
'/weeks/<week:int>/games',
name='v1_games')
@bottle.get('/v1/games/<gsis_id>', name='v1_game')
@rest
def rest_games(season=None, phase=None, week=None, gsis_id=None):
if gsis_id is None:
q = nfldb.Query(db)
q.game(season_year=season, season_type=as_phase(phase), week=week)
return map(as_rest_game, nfldb_sort(q).as_games())
else:
return as_rest_game(nfldb.Game.from_id(db, gsis_id))
@bottle.get('/v1/games/<gsis_id>/drives', name='v1_drives')
@bottle.get('/v1/games/<gsis_id>/drives/<drive_id>', name='v1_drive')
@rest
def rest_drives(gsis_id, drive_id=None):
if drive_id is None:
q = nfldb.Query(db)
q.game(gsis_id=gsis_id)
return map(as_rest_drive, q.sort(('drive_id', 'asc')).as_drives())
else:
return as_rest_drive(nfldb.Drive.from_id(db, gsis_id, drive_id))
@bottle.get('/v1/plays', name='v1_plays')
@bottle.get('/v1/seasons/<season:int>/phases/<phase>/weeks/<week:int>/plays',
name='v1_week_plays')
@bottle.get('/v1/games/<gsis_id>/plays', name='v1_game_plays')
@bottle.get('/v1/games/<gsis_id>/drives/<drive_id>/plays',
name='v1_drive_plays')
@bottle.get('/v1/games/<gsis_id>/drives/<drive_id>/plays/<play_id>',
name='v1_drive_play')
@rest
def rest_plays(season=None, phase=None, week=None,
gsis_id=None, drive_id=None, play_id=None):
if play_id is None:
params = bottle.request.query
if None not in (season, phase, week):
params['game_season_year'] = season
params['game_season_type'] = as_phase(phase)
params['game_week'] = week
if gsis_id is not None:
params['game_gsis_id'] = gsis_id
if drive_id is not None:
params['drive_drive_id'] = drive_id
q = nfldb_query(params=params)
return map(as_rest_play, q.as_plays(fill=True))
else:
return as_rest_play(nfldb.Play.from_id(db, gsis_id, drive_id, play_id))
@bottle.get('/v1/seasons/<season:int>/phases/<phase>/weeks/<week:int>/players',
name='v1_week_players')
@bottle.get('/v1/games/<gsis_id>/players', name='v1_game_players')
@rest
def rest_players(season=None, phase=None, week=None, gsis_id=None):
phase = as_phase(phase)
q = nfldb.Query(db)
if None not in (season, phase, week):
q.game(season_year=season, season_type=phase, week=week)
if gsis_id is not None:
q.game(gsis_id=gsis_id)
return map(as_rest_player, q.sort(('full_name', 'asc')).as_players())
@bottle.get('/v1/players', name='v1_players')
@bottle.get('/v1/players/<player_id>', name='v1_player')
@rest
def rest_player(player_id=None):
if player_id is None:
bottle.abort(400, 'Cannot list all players.')
return as_rest_player(nfldb.Player.from_id(db, player_id))
@bottle.get('/v1/seasons/<season:int>/phases/<phase>/leagues',
name='v1_season_leagues')
@rest
def rest_season_leagues(season, phase):
lgs = leagues(season=season, phase=phase)
return map(as_rest_league, lgs)
@bottle.get('/v1/leagues', name='v1_leagues')
@bottle.get('/v1/leagues/<lg>', name='v1_league')
@rest
def rest_leagues(lg=None):
if lg is None:
return map(as_rest_league, leagues())
else:
lg = league(lg)
return as_rest_league(lg)
@bottle.get('/v1/leagues/<lg>/weeks/<week:int>/me', name='v1_me')
@rest
def rest_me(lg, week):
lg = league(lg)
if len(lg.conf.get('me', '')) == 0:
bottle.abort(400, '"me" is not configured for league "%s".'
% lg.full_name)
me_owner = lg.me(lg.owners(week))
if me_owner is None:
bottle.abort(404, 'Could not find owner matching "me" value "%s".'
% lg.full_name)
return as_rest_owner(me_owner)
@bottle.get('/v1/leagues/<lg>/weeks/<week:int>/owners', name='v1_owners')
@bottle.get('/v1/leagues/<lg>/weeks/<week:int>/owners/<owner>', name='v1_owner')
@rest
def rest_owners(lg, week, owner=None):
if owner is None:
return map(as_rest_owner, league(lg).owners(week))
else:
owner = no_none(league(lg).owner(week, owner), 'owner', owner)
return as_rest_owner(owner)
@bottle.get('/v1/leagues/<lg>/weeks/<week:int>/matchups', name='v1_matchups')
@bottle.get('/v1/leagues/<lg>/weeks/<week:int>/matchups/<matchup>',
name='v1_matchup')
@rest
def rest_matchups(lg, week, matchup=None):
if matchup is None:
return map(as_rest_matchup, league(lg).matchups(week))
else:
matchup = no_none(league(lg).matchup(week, matchup),
'matchup', matchup)
return as_rest_matchup(matchup)
@bottle.get('/v1/leagues/<lg>/weeks/<week:int>/players/<player_id>',
name='v1_player_score_details')
@rest
def rest_player_score_details(lg, week, player_id):
lg = league(lg)
q = nfldb.Query(db)
q.game(season_year=lg.season, season_type=lg.phase, week=week)
pp = q.play_player(player_id=player_id).as_aggregate()
if not pp:
return bottle.abort(404,
"No stats found for player with identifier '%s'."
% player_id)
pp = pp[0]
q = nfldb.Query(db)
q.game(season_year=lg.season, season_type=lg.phase, week=week)
q.play_player(player_id=player_id, kicking_fga=1)
fgs = q.as_play_players()
details = nflfan.score_details(lg.scoring, pp, fgs=fgs)
def as_rest_details(cat):
count, points = details[cat]
return {'name': cat, 'count': count, 'points': points}
return map(as_rest_details, sorted(details))
@bottle.get('/v1/leagues/<lg>/weeks/<week:int>/rosters',
name='v1_rosters')
@bottle.get('/v1/leagues/<lg>/weeks/<week:int>/rosters/<roster>',
name='v1_roster')
@rest
def rest_rosters(lg, week, roster=None):
if bottle.request.query.get('scored', '0') == '1':
score = scored_roster
else:
score = lambda _, y: y
lg = league(lg)
if roster is None:
def scored_rest(r):
return as_rest_roster(score(lg, r))
return map(scored_rest, lg.rosters(week))
else:
roster = no_none(lg.roster(week, roster), 'roster', roster)
return as_rest_roster(score(lg, roster))
@bottle.get('/v1/fields', name='v1_fields')
@rest
def rest_fields():
fields = {
'game': nfldb.Game.sql_fields(),
'drive': nfldb.Drive.sql_fields(),
'play': nfldb.Play.sql_fields(),
'play_player': nfldb.PlayPlayer.sql_fields(),
'aggregate': nfldb.PlayPlayer.sql_fields(),
'player': nfldb.Player.sql_fields(),
'stats_play': _play_categories,
'stats_play_player': _player_categories,
}
for k in fields:
fields[k] = sorted(fields[k])
return fields
@bottle.get('/v1/query/<entity>', name='v1_query')
@rest
def rest_query(entity):
if entity not in _as_type_funs:
bottle.abort(404, "Unknown entity type '%s' (valid types: %s)"
% (entity, ', '.join(_as_type_funs.keys())))
return nfldb_query_exec(entity)
def nfldb_query_exec(as_type):
'''
Builds a complete `nfldb.Query` and executes it, returning the
results as objects indicated by `as_type`. `as_type` should be
a string with one of the following values: `game`, `drive`,
`play`, `play_player`, `player` or `aggregate`.
Sorting criteria is also included.
Lists of any value are supported.
Basically, this is the REST interface to `nfldb.Query`.
'''
assert as_type in _as_type_funs
tyfuns = _as_type_funs[as_type]
results = tyfuns['query'](nfldb_query())
if 'filler' in tyfuns:
tyfuns['filler'](db, results)
return map(tyfuns['rest'], results)
def nfldb_query(params=None):
if params is None:
params = bottle.request.query
q = nfldb.Query(db)
aggregate = False
skip = ['limit', 'sort', 'my_players', 'refresh', 'refresh_count']
for param in params:
if param in skip:
continue
if '_' not in param:
return bottle.abort(500, "Unknown query parameter: %s" % param)
if param.startswith('play_player_'):
# blech
entity = 'play_player'
field = param.split('_', 2)[2]
else:
entity, field = param.split('_', 1)
if entity not in _query_funs:
return bottle.abort(500, "Unknown query parameter: %s" % param)
val = params.getall(param)
if len(val) == 0:
continue
elif len(val) == 1:
val = val[0]
if entity == 'aggregate':
aggregate = True
_query_funs[entity](q, **{field: val})
q = nfldb_sort(q)
# If 'my_players' is set, then try to find the season/phase/week and
# restrict results to only those including players in our rosters.
# We do this by retrieving all games matching the query and then use
# the resulting games to pinpoint (year, phase, week) values to use
# to add to the query.
if not aggregate and params.get('my_players', '0') == '1':
# This is deeply regrettable, but we must relax the sort/limit
# restriction, otherwise we might not get all of the games
# for this query.
# This would be unforgivable, but the `games` table is the smallest
# of the bunch, so perhaps we can get away with it.
old_sorts, q._sort_exprs = q._sort_exprs, None
old_limit, q._limit = q._limit, None
games = q.as_games()
q._limit, q._sort_exprs = old_limit, old_sorts
# Collect all (year, phase, week) and dedup them.
# Put them back into a list of dicts for easier mingling.
keys = set((g.season_year, g.season_type, g.week) for g in games)
keys = [{'season_year': k[0], 'season_type': k[1], 'week': k[2]}
for k in keys]
# Now we need to get all the player ids for all the rosters in all
# the leagues corresponding to the (year, phase, week)'s gathered.
pids = set()
for k in keys:
for lg in leagues(season=k['season_year'], phase=k['season_type']):
try:
my_roster = lg.me(lg.rosters(k['week']))
except IOError:
continue
if my_roster is None:
continue
for rp in my_roster.players:
if not rp.bench and rp.player_id is not None:
pids.add(rp.player_id)
# Now add the restriction to the current query.
q.player(player_id=list(pids))
return q
def nfldb_sort(q):
'''
Given an `nfldb.Query` object, apply the necessary sorting and
limit criteria to it from the request parameters.
'''
params = bottle.request.query
limit = min(500, param_int('limit', 20))
sorts = [] # param to pass to nfldb.Query().sort(...)
for field in params.getall('sort'):
if len(field) == 0:
continue
if field[0] == '-':
sorts.append((field[1:], 'desc'))
else:
if field[0] == '+':
field = field[1:]
sorts.append((field, 'asc'))
return q.sort(sorts).limit(limit)
def as_rest_league(lg):
return {
'season': lg.season,
'phase': str(lg.phase),
'ident': lg.ident,
'prov_name': lg.prov_name,
'name': lg.name,
'scoring_schema': lg.scoring.name,
}
def as_rest_owner(o):
return { 'ident': o.ident, 'name': o.name }
def as_rest_matchup(m):
return {
'owner1': as_rest_owner(m.owner1),
'owner2': as_rest_owner(m.owner2),
}
def as_rest_roster(r):
players = sorted(map(as_rest_roster_player, r.players),
key=lambda rp: rp['bench'])
gsis_ids = [rp['gsis_id'] for rp in players if rp['gsis_id']]
if len(gsis_ids) > 0:
games = nfldb.Query(db).game(gsis_id=gsis_ids).as_games()
games = dict([(g.gsis_id, as_rest_game(g)) for g in games])
for rp in players:
if rp['gsis_id']:
rp['game'] = games[rp['gsis_id']]
return {
'owner': as_rest_owner(r.owner),
'players': players,
}
def as_rest_roster_player(p):
return {
'position': p.position,
'team': p.team,
'bench': p.bench,
'season': p.season,
'week': p.week,
'gsis_id': p.game.gsis_id if p.game is not None else None,
'points': p.points,
'player_id': p.player_id,
'player': as_rest_player(p.player) if p.player is not None else None,
'game': None,
}
def as_rest_game(g):
return {
'away_score': g.away_score,
'away_team': g.away_team,
'away_turnovers': g.away_turnovers,
'home_score': g.home_score,
'home_team': g.home_team,
'home_turnovers': g.home_turnovers,
'day_of_week': str(g.day_of_week),
'finished': g.finished,
'gamekey': g.gamekey,
'gsis_id': g.gsis_id,
'is_playing': g.is_playing,
'loser': g.loser,
'winner': g.winner,
'phase': str(g.season_type),
'season': g.season_year,
'start_time': g.start_time.strftime('%Y-%m-%dT%H:%M:%S%z'),
'week': g.week,
}
def as_rest_drive(d):
obj = {
'drive_id': d.drive_id,
'start_field': str(d.start_field),
'end_field': str(d.end_field),
'start_time': str(d.start_time),
'end_time': str(d.end_time),
'first_downs': d.first_downs,
'gsis_id': d.gsis_id,
'penalty_yards': d.penalty_yards,
'play_count': d.play_count,
'pos_team': d.pos_team,
'pos_time': str(d.pos_time),
'result': d.result,
'yards_gained': d.yards_gained,
'game': None,
}
if d._game is not None:
obj['game'] = as_rest_game(d._game)
return obj
def as_rest_play(p):
d = {
'description': p.description,
'down': p.down,
'play_id': p.play_id,
'drive_id': p.drive_id,
'gsis_id': p.gsis_id,
'note': p.note,
'points': p.points,
'pos_team': p.pos_team,
'time': str(p.time),
'yardline': str(p.yardline),
'yards_to_go': p.yards_to_go,
'players': [],
'drive': None,
'fields': [],
'video_url': watch_play_url(p),
}
for field in nfldb.stat_categories.iterkeys():
v = getattr(p, field)
if v != 0:
d[field] = v
d['fields'].append(field)
d['fields'].sort()
if p._play_players is not None:
d['players'] = [pp.player_id for pp in p._play_players]
if p._drive is not None:
d['drive'] = as_rest_drive(p._drive)
return d
def as_rest_play_player(p):
d = {
'player_id': p.player_id,
'play_id': p.play_id,
'drive_id': p.drive_id,
'gsis_id': p.gsis_id,
'points': p.points,
'scoring_team': p.scoring_team,
'team': p.team,
'play': None,
'player': None,
'fields': [],
}
for field, cat in nfldb.stat_categories.iteritems():
if cat.category_type != nfldb.Enums.category_scope.player:
continue
v = getattr(p, field)
if v != 0:
d[field] = v
d['fields'].append(field)
d['fields'].sort()
if p._play is not None:
d['play'] = as_rest_play(p._play)
if p._player is not None:
d['player'] = as_rest_player(p._player)
return d
def as_rest_player(p):
return {
'player_id': p.player_id,
'birthdate': p.birthdate,
'college': p.college,
'first_name': p.first_name,
'last_name': p.last_name,
'full_name': p.full_name,
'gsis_name': p.gsis_name,
'height': p.height,
'position': str(p.position),
'profile_id': p.profile_id,
'profile_url': p.profile_url,
'status': str(p.status),
'team': p.team,
'uniform_number': p.uniform_number,
'weight': p.weight,
'years_pro': p.years_pro,
}
def as_phase(phase):
if isinstance(phase, nfldb.Enums.season_phase):
return phase
try:
return nfldb.Enums.season_phase[phase]
except KeyError:
bottle.abort(404, "Unknown phase '%s'" % phase)
def no_none(v, thing, key):
if v is None:
bottle.abort(400, "Could not find %s with id %s" % (thing, key))
return v
def watch_play_url(p):
pbp_path = conf.get('footage_pbp_path', '')
if pbp_path.startswith('http'):
# Video footage just isn't reliably available before 2011, so
# forcefully shut it off.
# We only do this with HTTP since HTTP is typically used to access
# Neulion's network, which objectively does not have play-by-play
# footage before 2011.
if int(p.gsis_id[0:4]) < 2011:
return None
g = nfldb.Game.from_id(db, p.gsis_id)
vs = dict({k: getattr(p, k) for k in p.__slots__},
yyyy=p.gsis_id[0:4], mm=p.gsis_id[4:6], dd=p.gsis_id[6:8])
vs = dict(vs, **{k: getattr(g, k) for k in g.__slots__})
return pbp_path.format(**vs)
play_path = nflvid.footage_play(pbp_path, p.gsis_id, p.play_id)
if play_path is not None:
return '/vid/%s/%04d.mp4' % (p.gsis_id, p.play_id)
return None
def scored_roster(lg, roster):
return nflfan.score_roster(db, lg.scoring, roster, phase=lg.phase)
def league(name):
lgs = [lgs[name] for lgs in conf['leagues'].itervalues() if name in lgs]
if len(lgs) == 0:
bottle.abort(404, "League '%s' not found." % name)
if len(lgs) >= 2:
bottle.abort(400, "League identifier '%s' is not unique." % name)
return lgs[0]
def leagues(season=None, phase=None):
leagues = []
if phase is not None:
phase = as_phase(phase)
for lgs in conf['leagues'].values():
for lg in lgs.values():
if season and lg.season != season:
continue
if phase and lg.phase != phase:
continue
leagues.append(lg)
return sorted(leagues, key=lambda lg: (-lg.season, lg.name))
def valid_leagues(lgs, week):
valid = []
for lg in lgs:
try:
lg._load(week)
valid.append(lg)
except IOError:
pass
return valid
def param_int(name, default=None):
try:
return int(bottle.request.query.get(name, default))
except ValueError:
return default
def builtin(f):
builtins[f.__name__] = f
return f
def template(*args, **kwargs):
for name, f in builtins.items():
if name not in kwargs:
kwargs[name] = f
return bottle.template(*args, **kwargs)
@builtin
def grouped(n, iterable):
it = itertools.izip_longest(*([iter(iterable)] * n))
return ([x for x in xs if x is not None] for xs in it)
def exec_time(cb):
def _(*args, **kwargs):
start = time.time()
r = cb(*args, **kwargs)
end = time.time()
# A hack to make the response time available in the template.
if isinstance(r, strtype):
r = r.replace('$exec_time$', str(end - start))
# Show response time on all requests via header.
bottle.response.headers['X-Exec-Time'] = str(end - start)
return r
return _
# Maps strings to appropriate `nfldb.Query` and REST transformation functions.
def _fill_plays_and_players(db, pps):
nfldb.PlayPlayer.fill_plays(db, pps)
nfldb.PlayPlayer.fill_players(db, pps)
_as_type_funs = {
'game': {'query': nfldb.Query.as_games, 'rest': as_rest_game},
'drive': {'query': nfldb.Query.as_drives,
'rest': as_rest_drive,
'filler': nfldb.Drive.fill_games},
'play': {'query': lambda q: q.as_plays(fill=False),
'rest': as_rest_play,
'filler': nfldb.Play.fill_drives},
'play_player': {'query': nfldb.Query.as_play_players,
'rest': as_rest_play_player,
'filler': _fill_plays_and_players},
'player': {'query': nfldb.Query.as_players, 'rest': as_rest_player},
'aggregate': {'query': nfldb.Query.as_aggregate,
'rest': as_rest_play_player,
'filler': nfldb.PlayPlayer.fill_players},
}
_query_funs = {
'game': nfldb.Query.game,
'drive': nfldb.Query.drive,
'play': nfldb.Query.play,
'play_player': nfldb.Query.play_player,
'player': nfldb.Query.player,
'aggregate': nfldb.Query.aggregate,
}
_ent_types = {
'game': nfldb.Game,
'drive': nfldb.Drive,
'play': nfldb.Play,
'player': nfldb.Player,
'play_player': nfldb.PlayPlayer,
'aggregate': nfldb.PlayPlayer,
}
def main():
global db, conf
p = argparse.ArgumentParser(
description='Run the NFLfan web interface.')
p.add_argument('--config', metavar='DIR', default='',
help='Configuration directory.')
p.add_argument('--debug', action='store_true',
help='Enable Bottle\'s debug mode.')
p.add_argument('--reload', action='store_true',
help='Enable Bottle\'s reloading functionality.')
p.add_argument('--port', type=int, default=8080)
p.add_argument('--host', default='localhost')
p.add_argument('--server', default='wsgiref',
help='The web server to use. You only need to change this '
'if you\'re running a production server.')
p.add_argument('--available-servers', action='store_true',
help='Shows list of available web server names and quits.')
args = p.parse_args()
if args.available_servers:
for name in sorted(bottle.server_names):
try:
__import__(name)
print(name)
except:
pass
sys.exit(0)
bottle.TEMPLATE_PATH.insert(0, path.join(web_path, 'tpl'))
db = nfldb.connect()
conf = nflfan.load_config(providers=nflfan.builtin_providers,
file_path=args.config)
builtins['db'] = db
builtins['conf'] = conf
bottle.install(exec_time)
bottle.run(server=args.server, host=args.host, port=args.port,
debug=args.debug, reloader=args.reload)
| {
"repo_name": "codeaudit/nflfan",
"path": "nflfan/web/__init__.py",
"copies": "2",
"size": "27652",
"license": "unlicense",
"hash": 5245347059701777000,
"line_mean": 30.6022857143,
"line_max": 80,
"alpha_frac": 0.5840445537,
"autogenerated": false,
"ratio": 3.1462054841278873,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9728400026676518,
"avg_score": 0.000370002230273934,
"num_lines": 875
} |
from __future__ import absolute_import, division, print_function
import argparse
import os
from locale import getpreferredencoding
from os.path import abspath
from conda.compat import PY3
from conda_build.index import update_index
def main():
p = argparse.ArgumentParser(
description="Update package index metadata files in given directories")
p.add_argument('dir',
help='Directory that contains an index to be updated.',
nargs='*',
default=[os.getcwd()])
p.add_argument('-f', "--force",
action="store_true",
help="force reading all files")
p.add_argument('-q', "--quiet",
action="store_true")
args = p.parse_args()
dir_paths = [abspath(path) for path in args.dir]
# Don't use byte strings in Python 2
if not PY3:
dir_paths = [path.decode(getpreferredencoding()) for path in dir_paths]
for path in dir_paths:
update_index(path, verbose=(not args.quiet), force=args.force)
if __name__ == '__main__':
main()
| {
"repo_name": "takluyver/conda-build",
"path": "conda_build/main_index.py",
"copies": "2",
"size": "1097",
"license": "bsd-3-clause",
"hash": 482965938170548000,
"line_mean": 25.756097561,
"line_max": 79,
"alpha_frac": 0.6089334549,
"autogenerated": false,
"ratio": 4.078066914498141,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 41
} |
from __future__ import absolute_import, division, print_function
import argparse
import os
import tempfile
import subprocess
from copy import copy
from appr.commands.command_base import CommandBase
from appr.commands.pull import PullCmd
from appr.plugins.helm import Helm
LOCAL_DIR = os.path.dirname(__file__)
def helm_description(cmd, examples):
return """
Fetch a Chart from the app-registry and execute `helm {cmd}`.
Helm's options can be passed on the command:
$ appr helm {cmd} [APPR_OPTS] -- [HELM_OPTS]
{examples}
""".format(cmd=cmd, examples=examples)
class HelmCmd(CommandBase):
name = 'helm'
help_message = 'Deploy with Helm on Kubernetes'
parse_unknown = True
plugin_subcommands = ['dep', 'install', 'upgrade']
def __init__(self, options):
super(HelmCmd, self).__init__(options)
self.status = {}
def exec_helm_cmd(self, cmd, options, helm_opts):
pull_cmd = PullCmd(options)
pull_cmd.exec_cmd(render=False)
helm_cli = Helm()
try:
output = helm_cli.action(cmd, pull_cmd.path, helm_opts)
except subprocess.CalledProcessError as exc:
payload = {"message": str(exc.output)}
self.render_error(payload)
exit(exc.returncode)
self.status = {'result': output}
self.render()
@classmethod
def _install(cls, options, unknown=None):
cmd = cls(options)
cmd.exec_helm_cmd('install', options, unknown)
@classmethod
def _upgrade(cls, options, unknown=None):
cmd = cls(options)
cmd.exec_helm_cmd('upgrade', options, unknown)
@classmethod
def _dep_pull(cls, options, unknown=None):
cmd = cls(options)
helm_cli = Helm()
cmd.status = {'result': helm_cli.build_dep(dest=options.dest, overwrite=options.overwrite)}
cmd.render()
@classmethod
def _init_args(cls, subcmd):
cls._add_registryhost_option(subcmd)
cls._add_packagename_option(subcmd)
cls._add_packageversion_option(subcmd)
subcmd.add_argument('-t', '--media-type', default='helm', help=argparse.SUPPRESS)
subcmd.add_argument('--dest', default=tempfile.gettempdir(),
help='directory used to extract resources')
subcmd.add_argument('--tarball', action='store_true', default=True, help=argparse.SUPPRESS)
@classmethod
def _init_dep_args(cls, subcmd):
subcmd.add_argument('--dest', default="appr_charts",
help='directory used to extract resources')
subcmd.add_argument('--overwrite', action='store_true', default=False,
help="auto-merge requirements.yaml with the appr dependencies")
@classmethod
def _add_arguments(cls, parser):
from appr.commands.cli import get_parser, all_commands
sub = parser.add_subparsers()
install_cmd = sub.add_parser(
'install', help="Fetch the Chart and execute `helm install`",
formatter_class=argparse.RawDescriptionHelpFormatter, description=helm_description(
"install",
"$ appr helm install quay.io/ant31/cookieapp -- --set imageTag=v0.4.5 --namespace demo"
), epilog="\nhelm options:\n See 'helm install --help'")
upgrade_cmd = sub.add_parser(
'upgrade', help="Fetch the Chart and execute `helm upgrade`",
formatter_class=argparse.RawDescriptionHelpFormatter, description=helm_description(
"upgrade",
"$ appr helm upgrade quay.io/ant31/cookieapp -- release-name --set foo=bar --set foo=newbar"
), epilog="\nhelm options:\n See 'helm upgrade --help'")
dep_pull_cmd = sub.add_parser(
'dep', help="Download Charts from the requirements.yaml using app-registry")
cls._init_dep_args(dep_pull_cmd)
cls._init_args(install_cmd)
cls._init_args(upgrade_cmd)
install_cmd.set_defaults(func=cls._install)
upgrade_cmd.set_defaults(func=cls._upgrade)
dep_pull_cmd.set_defaults(func=cls._dep_pull)
other_cmds = copy(all_commands())
other_cmds.pop("helm")
get_parser(other_cmds, parser, sub, {'APPR_DEFAULT_MEDIA_TYPE': 'helm'})
def _render_dict(self):
return self.status
def _render_console(self):
return self.status['result']
| {
"repo_name": "cn-app-registry/cnr-server",
"path": "appr/commands/helm.py",
"copies": "2",
"size": "4398",
"license": "apache-2.0",
"hash": 6593963398951817000,
"line_mean": 37.2434782609,
"line_max": 108,
"alpha_frac": 0.6293769895,
"autogenerated": false,
"ratio": 3.758974358974359,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000914655132803055,
"num_lines": 115
} |
from __future__ import absolute_import, division, print_function
import argparse
import os
from appr.commands.channel import ChannelCmd
from appr.commands.config import ConfigCmd
from appr.commands.delete_package import DeletePackageCmd
from appr.commands.helm import HelmCmd
from appr.commands.inspect import InspectCmd
from appr.commands.list_package import ListPackageCmd
from appr.commands.login import LoginCmd
from appr.commands.logout import LogoutCmd
from appr.commands.plugins import PluginsCmd
from appr.commands.pull import PullCmd
from appr.commands.push import PushCmd
from appr.commands.jsonnet import JsonnetCmd
from appr.commands.runserver import RunServerCmd
from appr.commands.show import ShowCmd
from appr.commands.version import VersionCmd
from appr.commands.deploy import DeployCmd
def all_commands():
return {
InspectCmd.name: InspectCmd,
PushCmd.name: PushCmd,
VersionCmd.name: VersionCmd,
PullCmd.name: PullCmd,
ShowCmd.name: ShowCmd,
LoginCmd.name: LoginCmd,
LogoutCmd.name: LogoutCmd,
ChannelCmd.name: ChannelCmd,
DeletePackageCmd.name: DeletePackageCmd,
PluginsCmd.name: PluginsCmd,
ConfigCmd.name: ConfigCmd,
DeployCmd.name: DeployCmd,
ListPackageCmd.name: ListPackageCmd,
HelmCmd.name: HelmCmd,
RunServerCmd.name: RunServerCmd,
JsonnetCmd.name: JsonnetCmd, }
def get_parser(commands, parser=None, subparsers=None, env=None):
if parser is None:
parser = argparse.ArgumentParser()
if subparsers is None:
subparsers = parser.add_subparsers(help='command help')
for command_class in commands.values():
command_class.add_parser(subparsers, env)
return parser
def set_cmd_env(env):
""" Allow commands to Set environment variables after being called """
if env is not None:
for key, value in env.items():
os.environ[key] = value
def cli():
try:
parser = get_parser(all_commands())
unknown = None
args, unknown = parser.parse_known_args()
set_cmd_env(args.env)
if args.parse_unknown:
args.func(args, unknown)
else:
args = parser.parse_args()
args.func(args)
except (argparse.ArgumentTypeError, argparse.ArgumentError) as exc:
if os.getenv("APPR_DEBUG", "false") == "true":
raise
else:
parser.error(exc.message)
| {
"repo_name": "app-registry/appr",
"path": "appr/commands/cli.py",
"copies": "2",
"size": "2478",
"license": "apache-2.0",
"hash": -4438700487592596000,
"line_mean": 29.975,
"line_max": 74,
"alpha_frac": 0.6916868442,
"autogenerated": false,
"ratio": 3.8359133126934983,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5527600156893498,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import argparse
import os
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
from PyAstronomy import pyasl
'''
Remove spikes (tellurics) from continuum-normalized apVisit APOGEE spectra.
Typically you will run this after apVisit2input.py, which finds the apVisit
spectra downloaded via the python apogee module for one target and
continuum-normalizes them. If you somehow have an apVisit-style FITS file
that is continuum-normalized already (or close), this can despike it too.
First, read_infiles reads in wavelengths and fluxes for a list of spectra;
Then, despike_spectra despikes the spectra with one of two techniques.
Usage
-----
python despike.py -d datapath -i filelist
datapath: /path/to/filelist/and/files/listed/in/filelist
filelist: Single-column text file with list of *continuum-normalized*
single-visit APOGEE files you want to despike.
The files in this list can either be two-column text files
(wave, flux) or FITS files in the apVisit format.
Result
------
The new despiked spectra are written to two-column (wavelength, flux) files
with similar names as the original, but they now end in '_despiked.txt'.
'''
def main():
'''
Parse arguments, despike a set of spectra, and write the results to files.
'''
# parse command line arguments with argparse
parser = argparse.ArgumentParser()
parser.add_argument('-i', dest='filelist', required=True,
help='text file containing a list of spectra')
parser.add_argument('-d', dest='datapath', required=True,
help='path to where filelist and files in filelist live')
args = parser.parse_args()
filelist = args.filelist
datapath = args.datapath
if not os.path.exists(os.path.join(datapath, filelist)):
raise argparse.ArgumentTypeError("{0} does not exist".format(filelist))
infilelist, wavelist, speclist = read_infiles(datapath, filelist)
newwavelist, newspeclist = despike_spectra(wavelist, speclist)
# write out a set of two-column text files,
# each containing one element of newwavelist and one element of newspeclist
for file, newwave, newspec in zip(infilelist, newwavelist, newspeclist):
# create outfile based on infile name
outfile = os.path.splitext(file)[0] + '_despiked.txt'
with open(outfile, 'w') as f:
for wentry, sentry in zip(newwave, newspec):
print(wentry, sentry, file=f)
return
def read_infiles(datapath, filelist, isFits=False):
'''
Load a text file containing a list of continuum-normalized spectra
Parameters
----------
datapath: `str`
Path to the directory containing both filelist and the files therein
filelist: `str`
Name of a text file containing a list of continuum-normalized apVisit
spectra you want to despike. The files in this list can either be
two-column text files (wave, flux) or FITS files in the apVisit format.
isFits: `bool`
True if the files listed in filelist are FITS files, else False.
Returns
-------
infilelist: `list`
The full path to each spectrum file in filelist
wavelist: `list`
A list of lists containing wavelength values for each spectrum
speclist: `list`
A list of lists containing flux values for the same spectra
'''
print(datapath, filelist)
wavelist = []
speclist = []
with open(os.path.join(datapath, filelist)) as f1:
infilelist = [] # for use later to make outfiles
if isFits: # it's a FITS file
for line in f1:
infile = line.rstrip()
infile = os.path.join(datapath, infile)
infilelist.append(infile)
with fits.open(infile) as hdu:
# APOGEE! the data is in a funny place and backwards
wave = hdu[4].data.flatten()
wave = wave[::-1]
spec = hdu[1].data
spec = spec.flatten()
spec = spec[::-1]
spec = spec / np.median(spec) # put the continuum roughly near 1
wavelist.append(wave)
speclist.append(spec)
else: # it's a text file
for line in f1:
infile = line.rstrip()
infile = os.path.join(datapath, infile)
infilelist.append(infile)
wave, spec = np.loadtxt(infile, usecols=(0, 1), unpack=True)
wavelist.append(wave)
speclist.append(spec)
return infilelist, wavelist, speclist
def simpledespike(wave, spec, delwindow=6, stdfactorup=0.7, stdfactordown=3, plot=True):
'''
Implement a simple despiking routine based on the stdev of 1D fluxes
All outlier points are deleted from wave, spec to yield newwave, newspec.
Parameters
----------
wave: `list`
A 1D list of wavelength values for one spectrum
spec: `list`
A 1D list of flux values for the same spectrum
delwindow: `int`
Around each outlier (upward spike), adjacent points in a window of +/-
delwindow are also flagged as outliers
stdfactorup: `float`
Outliers (upward spikes) are identified as exceeding stdfactorup*sigma
above the continuum
stdfactordown: `float`
Additional outliers (downward spikes) are identified as exceeding
stdfactordown*sigma below the continuum
plot: `bool`
True = show an interactive plot of each spectrum as it is despiked
Returns
-------
newwave: `list`
A 1D list of wavelength values for one despiked spectrum
newspec: `list`
A 1D list of flux values for the same despiked spectrum
'''
pointstodelete = []
outliers = (np.where((spec > 1.0 + stdfactorup*np.std(spec)) |
(spec < 1.0 - stdfactordown*np.std(spec))))[0]
for point in outliers: # add +/- delwindow points around each outlier
pointstodelete.extend(range(point-delwindow, point+delwindow+1))
pointstodelete = [point for point in pointstodelete if point >= 0]
newwave, newspec = np.delete(wave, pointstodelete), np.delete(spec, pointstodelete)
if plot:
plt.plot(wave, spec)
plt.plot(newwave, newspec, color='r')
plt.xlabel('Wavelength ({\AA})')
plt.ylabel('Normalized flux')
plt.axhline(y=(1 + stdfactorup*np.std(spec)), ls=':', color='g')
plt.axhline(y=(1 - stdfactordown*np.std(spec)), ls=':', color='g')
plt.show()
return newwave, newspec
def generalizedESDdespike(wave, spec, maxOLs=1000, alpha=5000):
'''
Implement a complicated despiking routine from PyAstronomy
(this function is not tested)
'''
r = pyasl.pointDistGESD(spec, maxOLs, alpha)
# r[0] is number of outliers found, r[i] is indices of outliers
# maxOLs is max outliers that may be identified; increase alpha to find more
newwave, newspec = np.delete(wave, r[1]), np.delete(spec, r[1])
return newwave, newspec
def despike_spectra(wavelist, speclist, type='simple', plot=True):
'''
Use one of two techniques to remove spikes from a series of spectra.
Parameters
----------
wavelist: `list`
Input list of wavelength arrays
speclist: `list`
Input list of corresponding flux arrays (for 1D spectra)
type: `str`
type='simple' is recommended (see simpledespike)
type=<anything not 'simple'> will use generalizedESDdespike instead
plot: `bool`
True = show an interactive plot of each spectrum as it is despiked
Returns
-------
newwavelist: `list`
A list of lists containing wavelength values for each despiked spectrum
newspeclist: `list`
A list of lists containing flux values the same despiked spectra
'''
newwavelist = []
newspeclist = []
for wave, spec in zip(wavelist, speclist):
if type == 'simple':
delwindow = 6
stdfactorup = 0.7
stdfactordown = 3
newwave, newspec = simpledespike(wave, spec,
delwindow=delwindow,
stdfactorup=stdfactorup,
stdfactordown=stdfactordown,
plot=plot)
else:
newwave, newspec = generalizedESDdespike(wave, spec, maxOLs=1000, alpha=5000)
newwavelist.append(newwave)
newspeclist.append(newspec)
return newwavelist, newspeclist
if __name__ == '__main__':
main()
| {
"repo_name": "mrawls/apVisitproc",
"path": "apvisitproc/despike.py",
"copies": "1",
"size": "8761",
"license": "mit",
"hash": 2588504249382588000,
"line_mean": 36.9264069264,
"line_max": 89,
"alpha_frac": 0.634174181,
"autogenerated": false,
"ratio": 3.9410706252811516,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005136207496003238,
"num_lines": 231
} |
from __future__ import absolute_import, division, print_function
import argparse
import os
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
import apogee.tools.read as apread
from apogee.spec import continuum
'''
Create continuum-normalized text file spectra from a set of APOGEE visit spectra.
This program assumes you are planning to do a broadening function analysis on
the spectra to measure radial velocities, and thus refers to a single "model"
spectrum and a time series of "target" spectra.
Written by Meredith Rawls
Usage
------
$ python apVisit2input.py -d datadir -k KIC
This assumes the apVisit metadata file is datadir/KIC/KICVisitlist.txt, e.g.,
$ python apVisit2input.py -d data -k 1234567
Result
------
A list of the new text file spectra created is printed to the terminal.
Each spectrum's date of observation (HJD) and barycentric velocity (BCV) is
also printed out for easy reference.
'''
def main():
'''
Parse arguments, create new spectrum text files, and print useful info.
More specifically...
- Read datadir and KIC from command line arguments
- Load data for all the visits for the target specified
- Loop over all the visits, normalize the spectra, and save to file
- Print the names of the new files with their HJDs and BCVs
'''
# parse command line arguments with argparse
parser = argparse.ArgumentParser(description='Run with python apVisit2input.py -d datadir -k KIC')
parser.add_argument('-d', dest='datadir', required=True,
help='directory containing KIC subdirectories')
parser.add_argument('-k', dest='KIC', required=True,
help='KIC of target')
args = parser.parse_args()
datadir = args.datadir
KIC = args.KIC
locIDs, mjds, fiberIDs = load_allvisitinfo(datadir, KIC)
print('New spectrum file (despiked), HJD, and BCV:')
# loop over all visit spectra
for locID, mjd, fiberID in zip(locIDs, mjds, fiberIDs):
fitsfilepath, specfileout, wave, flux, fluxerr = load_apVisit(datadir, KIC, locID, mjd, fiberID)
specnorm, specnormerr = normalize_spec(wave, flux, fluxerr, plot=True)
with open(specfileout, 'w') as f:
for wavepoint, specpoint, specnormpoint in zip(wave, specnorm, specnormerr):
print(wavepoint, specpoint, specnormpoint, file=f)
HJD, BCV = make_BFinfile(fitsfilepath)
print(specfileout, HJD, BCV)
return
def load_allvisitinfo(datadir, KIC):
'''
Retrieve necessary metadata from an apVisit metadata file for a target
NOTE: the user must have an apVisit metadata file already (see README)
An example file of the correct format is in sample_Visitlist.txt
Parameters
----------
datadir: `str`
The directory which must contain 'KIC/KICVisitlist.txt'
KIC: `str`
The ID or name of the target. Suggested to be the 7-8 digit Kepler
identifier, but may be any string that is part of datadir
Returns
-------
locIDs: `list`
The location IDs for each spectrum, typically 4 digits
mjds: `list`
The date of observation in MJD of each spectrum, typically 5 digits
fiberIDs: `list`
The ID of the fiber used for each spectrum, typically 3 digits
'''
visitlist = os.path.join(datadir, KIC, KIC + 'Visitlist.txt')
if not os.path.exists(visitlist):
raise argparse.ArgumentTypeError("{0} does not exist".format(visitlist))
locIDs, mjds, fiberIDs = np.loadtxt(visitlist, usecols=(1, 2, 3),
unpack=True, delimiter=',')
return locIDs, mjds, fiberIDs
def load_apVisit(datadir, KIC, locID, mjd, fiberID):
'''
Returns original and new filenames plus raw data for one apVisit spectrum
NOTE: currently only works for data releases DR12 and DR13
Parameters
----------
datadir: `str`
The directory which must contain 'KIC/KICVisitlist.txt'
KIC: `str`
The ID or name of the target. Suggested to be the 7-8 digit Kepler
identifier, but may be any string that is part of datadir
locID: `float`
The location IDs for one spectrum, typically 4 digits
mjd: `float`
The date of observation in MJD of one spectrum, typically 5 digits
fiberID: `float`
The ID of the fiber used of one spectrum, typically 3 digits
Returns
-------
fitsfilepath: `str`
The path to the original apVisit file, as determined by the apogee module
specfileout: `str`
The path to the new spectrum text file that will be created
wave: `list`
A 1D list of wavelength values for one spectrum
flux: `list`
A 1D list of flux values for the same spectrum
fluxerr: `list`
A 1D list of flux error values for the same spectrum
'''
locID = str(int('{:04d}'.format(int(locID))))
mjd = str('{:05d}'.format(int(mjd)))
fiberID = str('{:03d}'.format(int(fiberID)))
specfileout = os.path.join(datadir, KIC, 'apVisitnorm-'+locID+'-'+mjd+'-'+fiberID+'.txt')
wave = apread.apVisit(int(locID), mjd, fiberID, ext=4, header=False)
flux = apread.apVisit(int(locID), mjd, fiberID, ext=1, header=False)
fluxerr = apread.apVisit(int(locID), mjd, fiberID, ext=2, header=False)
SDSS_PATH = os.environ.get('SDSS_LOCAL_SAS_MIRROR')
SDSS_VERSION = os.environ.get('RESULTS_VERS')
if SDSS_PATH is None:
raise RuntimeError('You haven\'t defined the environment variable SDSS_LOCAL_SAS_MIRROR')
if SDSS_VERSION == 'v603':
drnum = 'dr12'
rnum = 'r5'
elif SDSS_VERSION == 'l30e.2':
drnum = 'dr13'
rnum = 'r6'
else:
raise RuntimeError('You don\'t appear to be using DR12 or DR13, cannot proceed')
fitsfile = 'apVisit-' + rnum + '-' + locID + '-' + mjd + '-' + fiberID + '.fits'
fitsfilepath = os.path.join(SDSS_PATH, drnum, 'apogee', 'spectro', 'redux', rnum,
'apo25m', locID, mjd, fitsfile)
return fitsfilepath, specfileout, wave, flux, fluxerr
def normalize_spec(wave, flux, fluxerr, plot=True):
'''
Continuum normalize a single spectrum using the apogee module normalizer
Parameters
----------
wave: `list`
A 1D list of wavelength values for one spectrum
flux: `list`
A 1D list of flux values for the same spectrum
fluxerr: `list`
A 1D list of flux error values for the same spectrum
plot: `bool`
Choose whether to show an interactive plot of the continuum
normalization process for visual inspection
Returns
-------
specnorm: `list`
A 1D list of new normalized flux values for a spectrum (goes with wave)
specnormerr: `list`
A 1D list of normalized flux errors for the same spectrum
'''
contspec = continuum.fitApvisit(flux, fluxerr, wave)
specnorm = flux/contspec
specnormerr = fluxerr/contspec
if plot:
plt.plot(wave, flux)
plt.plot(wave, contspec, lw=2, color='r')
plt.show()
plt.plot(wave, specnorm)
plt.show()
return specnorm, specnormerr
def make_BFinfile(fitsfilepath):
'''
Return info about one spectrum that is useful for the broadening function
Parameters
----------
fitsfilepath: `str`
The path to an apVisit file, as determined by the apogee module
Returns
-------
HJD: `float`
Time of observation from the FITS header, in heliocentric julian days
BCV: `float`
Barycentric velocity from the FITS header, in km/s
'''
header = fits.open(fitsfilepath)[0].header
HJD = float('24'+str(header['HJD']))
BCV = header['BC']
return HJD, BCV
if __name__ == '__main__':
main()
| {
"repo_name": "mrawls/apVisitproc",
"path": "apvisitproc/apVisit2input.py",
"copies": "1",
"size": "7800",
"license": "mit",
"hash": 1459916428775731200,
"line_mean": 34.6164383562,
"line_max": 104,
"alpha_frac": 0.6564102564,
"autogenerated": false,
"ratio": 3.563270899954317,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.971688034875662,
"avg_score": 0.0005601615195393702,
"num_lines": 219
} |
from __future__ import absolute_import, division, print_function
import argparse
import subprocess
from distutils.spawn import find_executable
SUMATRAPDF_NOT_FOUND_MESSAGE = ( 'SumatraPDF executable not found. '
'You should add it to the PATH.' )
GVIM_NOT_FOUND_MESSAGE = ( 'Gvim executable not found. '
'You should add it to the PATH.' )
VIM_FOCUS_RETRIES_NUMBER = 5
class SumatraVim():
def __init__(self, pdf, servername = 'GVIM'):
self.pdf = pdf
self.sumatraPDF = find_executable('SumatraPDF')
if not self.sumatraPDF:
raise RuntimeError(SUMATRAPDF_NOT_FOUND_MESSAGE)
self.gvim = find_executable('gvim')
if not self.gvim:
raise RuntimeError(GVIM_NOT_FOUND_MESSAGE)
self.servername = servername
def Open(self):
return self.Execute()
def ForwardSearch(self, tex, line):
return self.Execute('-forward-search', tex, line)
def ForceFocusVim(self):
retries = VIM_FOCUS_RETRIES_NUMBER
while retries > 0:
self.FocusVim()
retries = retries - 1
def FocusVim(self):
gvim_cmd = [
self.gvim,
'--servername', self.servername,
'--remote-send', ':<C-E><C-U>call foreground() | echo<CR>'
]
subprocess.call(gvim_cmd)
def Execute(self, *args):
full_cmd = [self.sumatraPDF, self.pdf,
'-reuse-instance', '-inverse-search']
full_cmd.append(
self.gvim + ' --servername ' +
self.servername + ' --remote-send '
'":<C-E><C-U>:execute \'drop \' . escape(\'%f\', \' \') | '
':call foreground() | echo<CR>%lGzvzz"'
)
full_cmd.extend(list(args))
self.proc = subprocess.Popen(full_cmd)
def ParseArguments():
parser = argparse.ArgumentParser(
description = 'SumatraVim, a SumatraPDF wrapper to improve '
'interaction between Vim, LaTeX, and SumatraPDF.')
parser.add_argument('--servername', default='GVIM',
metavar='servername',
help='Vim server name (default: %(default)s)')
parser.add_argument('--forward-search', nargs=2,
metavar=('tex', 'line'),
help='Forward search')
parser.add_argument('pdf',
help='PDF file')
return parser.parse_args()
def CallSumatraVim(args):
sumatra_vim = SumatraVim(args.pdf, args.servername)
if args.forward_search:
sumatra_vim.ForwardSearch(args.forward_search[0],
args.forward_search[1])
sumatra_vim.ForceFocusVim()
return
sumatra_vim.Open()
sumatra_vim.ForceFocusVim()
def Main():
"""
SumatraVim, a SumatraPDF wrapper to improve
interaction between Vim, LaTeX, and SumatraPDF.
"""
args = ParseArguments()
CallSumatraVim(args)
| {
"repo_name": "micbou/sumatravim",
"path": "sumatravim/__main__.py",
"copies": "1",
"size": "3001",
"license": "mit",
"hash": -8434811927219500000,
"line_mean": 29.3131313131,
"line_max": 72,
"alpha_frac": 0.5698100633,
"autogenerated": false,
"ratio": 3.7418952618453867,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9795728567885267,
"avg_score": 0.0031953514520240748,
"num_lines": 99
} |
from __future__ import absolute_import, division, print_function
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name', required=True)
parser.add_argument('--snapshot_name', required=True)
parser.add_argument('--test_split', required=True)
parser.add_argument('--gpu_id', type=int, default=0)
args = parser.parse_args()
gpu_id = args.gpu_id # set GPU id to use
import os; os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
import numpy as np
import tensorflow as tf
# Start the session BEFORE importing tensorflow_fold
# to avoid taking up all GPU memory
sess = tf.Session(config=tf.ConfigProto(
gpu_options=tf.GPUOptions(allow_growth=True),
allow_soft_placement=False, log_device_placement=False))
import json
from models_shapes.nmn3_assembler import Assembler
from models_shapes.nmn3_model import NMN3ModelAtt
# Module parameters
H_im = 30
W_im = 30
num_choices = 2
embed_dim_txt = 300
embed_dim_nmn = 300
lstm_dim = 256
num_layers = 2
encoder_dropout = False
decoder_dropout = False
decoder_sampling = False
T_encoder = 15
T_decoder = 11
N = 256
exp_name = args.exp_name
snapshot_name = args.snapshot_name
snapshot_file = './exp_shapes/tfmodel/%s/%s' % (exp_name, snapshot_name)
# Data files
vocab_shape_file = './exp_shapes/data/vocabulary_shape.txt'
vocab_layout_file = './exp_shapes/data/vocabulary_layout.txt'
image_sets = args.test_split.split(':')
training_text_files = './exp_shapes/shapes_dataset/%s.query_str.txt'
training_image_files = './exp_shapes/shapes_dataset/%s.input.npy'
training_label_files = './exp_shapes/shapes_dataset/%s.output'
training_gt_layout_file = './exp_shapes/data/%s.query_layout_symbols.json'
image_mean_file = './exp_shapes/data/image_mean.npy'
save_dir = './exp_shapes/results/%s/%s.%s' % (exp_name, snapshot_name, '_'.join(image_sets))
save_file = save_dir + '.txt'
os.makedirs(save_dir, exist_ok=True)
# Load vocabulary
with open(vocab_shape_file) as f:
vocab_shape_list = [s.strip() for s in f.readlines()]
vocab_shape_dict = {vocab_shape_list[n]:n for n in range(len(vocab_shape_list))}
num_vocab_txt = len(vocab_shape_list)
assembler = Assembler(vocab_layout_file)
num_vocab_nmn = len(assembler.module_names)
# Load training data
training_questions = []
training_labels = []
training_images_list = []
gt_layout_list = []
for image_set in image_sets:
with open(training_text_files % image_set) as f:
training_questions += [l.strip() for l in f.readlines()]
with open(training_label_files % image_set) as f:
training_labels += [l.strip() == 'true' for l in f.readlines()]
training_images_list.append(np.load(training_image_files % image_set))
with open(training_gt_layout_file % image_set) as f:
gt_layout_list += json.load(f)
num_questions = len(training_questions)
training_images = np.concatenate(training_images_list)
# Shuffle the training data
# fix random seed for data repeatibility
np.random.seed(3)
shuffle_inds = np.random.permutation(num_questions)
training_questions = [training_questions[idx] for idx in shuffle_inds]
training_labels = [training_labels[idx] for idx in shuffle_inds]
training_images = training_images[shuffle_inds]
gt_layout_list = [gt_layout_list[idx] for idx in shuffle_inds]
# number of training batches
num_batches = np.ceil(num_questions / N)
# Turn the questions into vocabulary indices
text_seq_array = np.zeros((T_encoder, num_questions), np.int32)
seq_length_array = np.zeros(num_questions, np.int32)
gt_layout_array = np.zeros((T_decoder, num_questions), np.int32)
for n_q in range(num_questions):
tokens = training_questions[n_q].split()
seq_length_array[n_q] = len(tokens)
for t in range(len(tokens)):
text_seq_array[t, n_q] = vocab_shape_dict[tokens[t]]
gt_layout_array[:, n_q] = assembler.module_list2tokens(
gt_layout_list[n_q], T_decoder)
image_mean = np.load(image_mean_file)
image_array = (training_images - image_mean).astype(np.float32)
vqa_label_array = np.array(training_labels, np.int32)
# Network inputs
text_seq_batch = tf.placeholder(tf.int32, [None, None])
seq_length_batch = tf.placeholder(tf.int32, [None])
image_batch = tf.placeholder(tf.float32, [None, H_im, W_im, 3])
expr_validity_batch = tf.placeholder(tf.bool, [None])
# The model
nmn3_model = NMN3ModelAtt(image_batch, text_seq_batch,
seq_length_batch, T_decoder=T_decoder,
num_vocab_txt=num_vocab_txt, embed_dim_txt=embed_dim_txt,
num_vocab_nmn=num_vocab_nmn, embed_dim_nmn=embed_dim_nmn,
lstm_dim=lstm_dim,
num_layers=num_layers, EOS_idx=assembler.EOS_idx,
encoder_dropout=encoder_dropout,
decoder_dropout=decoder_dropout,
decoder_sampling=decoder_sampling,
num_choices=num_choices)
compiler = nmn3_model.compiler
scores = nmn3_model.scores
snapshot_saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
snapshot_saver.restore(sess, snapshot_file)
answer_correct = 0
layout_correct = 0
layout_valid = 0
for n_iter in range(int(num_batches)):
n_begin = int((n_iter % num_batches)*N)
n_end = int(min(n_begin+N, num_questions))
# set up input and output tensors
h = sess.partial_run_setup(
[nmn3_model.predicted_tokens, scores],
[text_seq_batch, seq_length_batch, image_batch,
compiler.loom_input_tensor, expr_validity_batch])
# Part 0 & 1: Run Convnet and generate module layout
tokens = sess.partial_run(h, nmn3_model.predicted_tokens,
feed_dict={text_seq_batch: text_seq_array[:, n_begin:n_end],
seq_length_batch: seq_length_array[n_begin:n_end],
image_batch: image_array[n_begin:n_end]})
# compute the accuracy of the predicted layout
gt_tokens = gt_layout_array[:, n_begin:n_end]
layout_correct += np.sum(np.all(np.logical_or(tokens == gt_tokens,
gt_tokens == assembler.EOS_idx),
axis=0))
# Assemble the layout tokens into network structure
expr_list, expr_validity_array = assembler.assemble(tokens)
layout_valid += np.sum(expr_validity_array)
labels = vqa_label_array[n_begin:n_end]
# Build TensorFlow Fold input for NMN
expr_feed = compiler.build_feed_dict(expr_list)
expr_feed[expr_validity_batch] = expr_validity_array
# Part 2: Run NMN and learning steps
scores_val = sess.partial_run(h, scores, feed_dict=expr_feed)
# compute accuracy
predictions = np.argmax(scores_val, axis=1)
answer_correct += np.sum(np.logical_and(expr_validity_array,
predictions == labels))
answer_accuracy = answer_correct / num_questions
layout_accuracy = layout_correct / num_questions
layout_validity = layout_valid / num_questions
print("answer accuracy =", answer_accuracy, "on", '_'.join(image_sets))
print("layout accuracy =", layout_accuracy, "on", '_'.join(image_sets))
print("layout validity =", layout_validity, "on", '_'.join(image_sets))
with open(save_file, 'w') as f:
print("answer accuracy =", answer_accuracy, "on", '_'.join(image_sets), file=f)
print("layout accuracy =", layout_accuracy, "on", '_'.join(image_sets), file=f)
print("layout validity =", layout_validity, "on", '_'.join(image_sets), file=f)
| {
"repo_name": "ronghanghu/n2nmn",
"path": "exp_shapes/eval_shapes.py",
"copies": "1",
"size": "7263",
"license": "bsd-2-clause",
"hash": -573394478290280900,
"line_mean": 36.828125,
"line_max": 92,
"alpha_frac": 0.6937904447,
"autogenerated": false,
"ratio": 3.134656883901597,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4328447328601597,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--gpu_id', type=int, default=0)
args = parser.parse_args()
gpu_id = args.gpu_id # set GPU id to use
import os; os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
import numpy as np
import tensorflow as tf
# Start the session BEFORE importing tensorflow_fold
# to avoid taking up all GPU memory
sess = tf.Session(config=tf.ConfigProto(
gpu_options=tf.GPUOptions(allow_growth=True),
allow_soft_placement=False, log_device_placement=False))
import json
from models_shapes.nmn3_assembler import Assembler
from models_shapes.nmn3_model import NMN3ModelAtt
# Module parameters
H_im = 30
W_im = 30
num_choices = 2
embed_dim_txt = 300
embed_dim_nmn = 300
lstm_dim = 256
num_layers = 2
encoder_dropout = True
decoder_dropout = True
decoder_sampling = True
T_encoder = 15
T_decoder = 11
N = 256
# Training parameters
weight_decay = 5e-4
max_grad_l2_norm = 10
max_iter = 40000
snapshot_interval = 10000
exp_name = "shapes_gt_layout"
snapshot_dir = './exp_shapes/tfmodel/%s/' % exp_name
# Log params
log_interval = 20
log_dir = './exp_shapes/tb/%s/' % exp_name
# Data files
vocab_shape_file = './exp_shapes/data/vocabulary_shape.txt'
vocab_layout_file = './exp_shapes/data/vocabulary_layout.txt'
image_sets = ['train.large', 'train.med', 'train.small', 'train.tiny']
training_text_files = './exp_shapes/shapes_dataset/%s.query_str.txt'
training_image_files = './exp_shapes/shapes_dataset/%s.input.npy'
training_label_files = './exp_shapes/shapes_dataset/%s.output'
training_gt_layout_file = './exp_shapes/data/%s.query_layout_symbols.json'
image_mean_file = './exp_shapes/data/image_mean.npy'
# Load vocabulary
with open(vocab_shape_file) as f:
vocab_shape_list = [s.strip() for s in f.readlines()]
vocab_shape_dict = {vocab_shape_list[n]:n for n in range(len(vocab_shape_list))}
num_vocab_txt = len(vocab_shape_list)
assembler = Assembler(vocab_layout_file)
num_vocab_nmn = len(assembler.module_names)
# Load training data
training_questions = []
training_labels = []
training_images_list = []
gt_layout_list = []
for image_set in image_sets:
with open(training_text_files % image_set) as f:
training_questions += [l.strip() for l in f.readlines()]
with open(training_label_files % image_set) as f:
training_labels += [l.strip() == 'true' for l in f.readlines()]
training_images_list.append(np.load(training_image_files % image_set))
with open(training_gt_layout_file % image_set) as f:
gt_layout_list += json.load(f)
num_questions = len(training_questions)
training_images = np.concatenate(training_images_list)
# Shuffle the training data
# fix random seed for data repeatibility
np.random.seed(3)
shuffle_inds = np.random.permutation(num_questions)
training_questions = [training_questions[idx] for idx in shuffle_inds]
training_labels = [training_labels[idx] for idx in shuffle_inds]
training_images = training_images[shuffle_inds]
gt_layout_list = [gt_layout_list[idx] for idx in shuffle_inds]
# number of training batches
num_batches = np.ceil(num_questions / N)
# Turn the questions into vocabulary indices
text_seq_array = np.zeros((T_encoder, num_questions), np.int32)
seq_length_array = np.zeros(num_questions, np.int32)
gt_layout_array = np.zeros((T_decoder, num_questions), np.int32)
for n_q in range(num_questions):
tokens = training_questions[n_q].split()
seq_length_array[n_q] = len(tokens)
for t in range(len(tokens)):
text_seq_array[t, n_q] = vocab_shape_dict[tokens[t]]
gt_layout_array[:, n_q] = assembler.module_list2tokens(
gt_layout_list[n_q], T_decoder)
image_mean = np.load(image_mean_file)
image_array = (training_images - image_mean).astype(np.float32)
vqa_label_array = np.array(training_labels, np.int32)
# Network inputs
text_seq_batch = tf.placeholder(tf.int32, [None, None])
seq_length_batch = tf.placeholder(tf.int32, [None])
image_batch = tf.placeholder(tf.float32, [None, H_im, W_im, 3])
expr_validity_batch = tf.placeholder(tf.bool, [None])
vqa_label_batch = tf.placeholder(tf.int32, [None])
use_gt_layout = tf.constant(True, dtype=tf.bool)
gt_layout_batch = tf.placeholder(tf.int32, [None, None])
# The model
nmn3_model = NMN3ModelAtt(image_batch, text_seq_batch,
seq_length_batch, T_decoder=T_decoder,
num_vocab_txt=num_vocab_txt, embed_dim_txt=embed_dim_txt,
num_vocab_nmn=num_vocab_nmn, embed_dim_nmn=embed_dim_nmn,
lstm_dim=lstm_dim,
num_layers=num_layers, EOS_idx=assembler.EOS_idx,
encoder_dropout=encoder_dropout,
decoder_dropout=decoder_dropout,
decoder_sampling=decoder_sampling,
num_choices=num_choices, use_gt_layout=use_gt_layout,
gt_layout_batch=gt_layout_batch)
compiler = nmn3_model.compiler
scores = nmn3_model.scores
log_seq_prob = nmn3_model.log_seq_prob
# Loss function
softmax_loss_per_sample = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=scores, labels=vqa_label_batch)
# The final per-sample loss, which is vqa loss for valid expr
# and invalid_expr_loss for invalid expr
final_loss_per_sample = softmax_loss_per_sample # All exprs are valid
avg_sample_loss = tf.reduce_mean(final_loss_per_sample)
seq_likelihood_loss = tf.reduce_mean(-log_seq_prob)
total_training_loss = seq_likelihood_loss + avg_sample_loss
total_loss = total_training_loss + weight_decay * nmn3_model.l2_reg
# Train with Adam
solver = tf.train.AdamOptimizer()
gradients = solver.compute_gradients(total_loss)
# Clip gradient by L2 norm
# gradients = gradients_part1+gradients_part2
gradients = [(tf.clip_by_norm(g, max_grad_l2_norm), v)
for g, v in gradients]
solver_op = solver.apply_gradients(gradients)
# Training operation
# Partial-run can't fetch training operations
# some workaround to make partial-run work
with tf.control_dependencies([solver_op]):
train_step = tf.constant(0)
# Write summary to TensorBoard
os.makedirs(log_dir, exist_ok=True)
log_writer = tf.summary.FileWriter(log_dir, tf.get_default_graph())
loss_ph = tf.placeholder(tf.float32, [])
entropy_ph = tf.placeholder(tf.float32, [])
accuracy_ph = tf.placeholder(tf.float32, [])
tf.summary.scalar("avg_sample_loss", loss_ph)
tf.summary.scalar("entropy", entropy_ph)
tf.summary.scalar("avg_accuracy", accuracy_ph)
log_step = tf.summary.merge_all()
os.makedirs(snapshot_dir, exist_ok=True)
snapshot_saver = tf.train.Saver(max_to_keep=None) # keep all snapshots
sess.run(tf.global_variables_initializer())
avg_accuracy = 0
accuracy_decay = 0.99
for n_iter in range(max_iter):
n_begin = int((n_iter % num_batches)*N)
n_end = int(min(n_begin+N, num_questions))
# set up input and output tensors
h = sess.partial_run_setup(
[nmn3_model.predicted_tokens, nmn3_model.entropy_reg,
scores, avg_sample_loss, train_step],
[text_seq_batch, seq_length_batch, image_batch, gt_layout_batch,
compiler.loom_input_tensor, vqa_label_batch])
# Part 0 & 1: Run Convnet and generate module layout
tokens, entropy_reg_val = sess.partial_run(h,
(nmn3_model.predicted_tokens, nmn3_model.entropy_reg),
feed_dict={text_seq_batch: text_seq_array[:, n_begin:n_end],
seq_length_batch: seq_length_array[n_begin:n_end],
image_batch: image_array[n_begin:n_end],
gt_layout_batch: gt_layout_array[:, n_begin:n_end]})
# Assemble the layout tokens into network structure
expr_list, expr_validity_array = assembler.assemble(tokens)
# all expr should be valid (since they are ground-truth)
assert(np.all(expr_validity_array))
labels = vqa_label_array[n_begin:n_end]
# Build TensorFlow Fold input for NMN
expr_feed = compiler.build_feed_dict(expr_list)
expr_feed[vqa_label_batch] = labels
# Part 2: Run NMN and learning steps
scores_val, avg_sample_loss_val, _ = sess.partial_run(
h, (scores, avg_sample_loss, train_step), feed_dict=expr_feed)
# compute accuracy
predictions = np.argmax(scores_val, axis=1)
accuracy = np.mean(np.logical_and(expr_validity_array,
predictions == labels))
avg_accuracy += (1-accuracy_decay) * (accuracy-avg_accuracy)
# Add to TensorBoard summary
if n_iter % log_interval == 0 or (n_iter+1) == max_iter:
print("iter = %d\n\tloss = %f, accuracy (cur) = %f, "
"accuracy (avg) = %f, entropy = %f" %
(n_iter, avg_sample_loss_val, accuracy,
avg_accuracy, -entropy_reg_val))
summary = sess.run(log_step, {loss_ph: avg_sample_loss_val,
entropy_ph: -entropy_reg_val,
accuracy_ph: avg_accuracy})
log_writer.add_summary(summary, n_iter)
# Save snapshot
if (n_iter+1) % snapshot_interval == 0 or (n_iter+1) == max_iter:
snapshot_file = os.path.join(snapshot_dir, "%08d" % (n_iter+1))
snapshot_saver.save(sess, snapshot_file, write_meta_graph=False)
print('snapshot saved to ' + snapshot_file)
| {
"repo_name": "ronghanghu/n2nmn",
"path": "exp_shapes/train_shapes_gt_layout.py",
"copies": "1",
"size": "9107",
"license": "bsd-2-clause",
"hash": -6894922248450542000,
"line_mean": 36.632231405,
"line_max": 80,
"alpha_frac": 0.693093225,
"autogenerated": false,
"ratio": 3.067362748400135,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9248846783782794,
"avg_score": 0.002321837923468268,
"num_lines": 242
} |
from __future__ import absolute_import, division, print_function
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--gpu_id', type=int, default=0)
parser.add_argument(
'--pretrained_model',
default='./exp_clevr/tfmodel/clevr_gt_layout/00050000')
args = parser.parse_args()
gpu_id = args.gpu_id # set GPU id to use
import os; os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
import numpy as np
import tensorflow as tf
# Start the session BEFORE importing tensorflow_fold
# to avoid taking up all GPU memory
sess = tf.Session(config=tf.ConfigProto(
gpu_options=tf.GPUOptions(allow_growth=True),
allow_soft_placement=False, log_device_placement=False))
from models_clevr.nmn3_assembler import Assembler
from models_clevr.nmn3_model import NMN3Model
from util.clevr_train.data_reader import DataReader
# Module parameters
H_feat = 10
W_feat = 15
D_feat = 512
embed_dim_txt = 300
embed_dim_nmn = 300
lstm_dim = 512
num_layers = 2
encoder_dropout = False
decoder_dropout = False
decoder_sampling = True
T_encoder = 45
T_decoder = 10
N = 64
prune_filter_module = True
# Training parameters
invalid_expr_loss = 0.5 # loss value when the layout is invalid
lambda_entropy = 0.005
weight_decay = 5e-6
baseline_decay = 0.99
max_grad_l2_norm = 10
max_iter = 80000
snapshot_interval = 10000
exp_name = "clevr_rl_gt_layout"
pretrained_model = args.pretrained_model
snapshot_dir = './exp_clevr/tfmodel/%s/' % exp_name
# Log params
log_interval = 20
log_dir = './exp_clevr/tb/%s/' % exp_name
# Data files
vocab_question_file = './exp_clevr/data/vocabulary_clevr.txt'
vocab_layout_file = './exp_clevr/data/vocabulary_layout.txt'
vocab_answer_file = './exp_clevr/data/answers_clevr.txt'
imdb_file_trn = './exp_clevr/data/imdb/imdb_trn.npy'
imdb_file_tst = './exp_clevr/data/imdb/imdb_val.npy'
assembler = Assembler(vocab_layout_file)
data_reader_trn = DataReader(imdb_file_trn, shuffle=True, one_pass=False,
batch_size=N,
T_encoder=T_encoder,
T_decoder=T_decoder,
assembler=assembler,
vocab_question_file=vocab_question_file,
vocab_answer_file=vocab_answer_file,
prune_filter_module=prune_filter_module)
num_vocab_txt = data_reader_trn.batch_loader.vocab_dict.num_vocab
num_vocab_nmn = len(assembler.module_names)
num_choices = data_reader_trn.batch_loader.answer_dict.num_vocab
# Network inputs
input_seq_batch = tf.placeholder(tf.int32, [None, None])
seq_length_batch = tf.placeholder(tf.int32, [None])
image_feat_batch = tf.placeholder(tf.float32, [None, H_feat, W_feat, D_feat])
expr_validity_batch = tf.placeholder(tf.bool, [None])
answer_label_batch = tf.placeholder(tf.int32, [None])
# The model for training
nmn3_model_trn = NMN3Model(
image_feat_batch, input_seq_batch,
seq_length_batch, T_decoder=T_decoder,
num_vocab_txt=num_vocab_txt, embed_dim_txt=embed_dim_txt,
num_vocab_nmn=num_vocab_nmn, embed_dim_nmn=embed_dim_nmn,
lstm_dim=lstm_dim, num_layers=num_layers,
assembler=assembler,
encoder_dropout=encoder_dropout,
decoder_dropout=decoder_dropout,
decoder_sampling=decoder_sampling,
num_choices=num_choices)
finetune_lr = 1e-4 # 1/10 of the default 1e-3 for adam
compiler = nmn3_model_trn.compiler
scores = nmn3_model_trn.scores
log_seq_prob = nmn3_model_trn.log_seq_prob
# Loss function
softmax_loss_per_sample = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=scores, labels=answer_label_batch)
# The final per-sample loss, which is vqa loss for valid expr
# and invalid_expr_loss for invalid expr
final_loss_per_sample = tf.where(expr_validity_batch,
softmax_loss_per_sample,
tf.ones_like(softmax_loss_per_sample) * invalid_expr_loss)
# Totoal training loss:
# loss = E[ (C - b) * \diff[log(p(x))] + \diff[C] ]
# (where C = -R is the cost/loss; b is baseline)
avg_sample_loss = tf.reduce_mean(final_loss_per_sample)
baseline = tf.Variable(invalid_expr_loss, trainable=False, dtype=tf.float32)
baseline_update_op = tf.assign_add(baseline,
(1-baseline_decay) * (avg_sample_loss-baseline))
policy_gradient_loss = tf.reduce_mean(
tf.stop_gradient(final_loss_per_sample-baseline)*log_seq_prob)
total_training_loss = policy_gradient_loss + avg_sample_loss
total_loss = tf.add_n([total_training_loss,
lambda_entropy * nmn3_model_trn.entropy_reg,
weight_decay * nmn3_model_trn.l2_reg])
# Train with Adam
solver = tf.train.AdamOptimizer(learning_rate=finetune_lr)
gradients = solver.compute_gradients(total_loss)
# Clip gradient by L2 norm
# gradients = gradients_part1+gradients_part2
gradients = [(tf.clip_by_norm(g, max_grad_l2_norm), v)
for g, v in gradients]
solver_op = solver.apply_gradients(gradients)
# Training operation
# Partial-run can't fetch training operations
# some workaround to make partial-run work
with tf.control_dependencies([solver_op, baseline_update_op]):
train_step = tf.constant(0)
# Write summary to TensorBoard
os.makedirs(log_dir, exist_ok=True)
log_writer = tf.summary.FileWriter(log_dir, tf.get_default_graph())
loss_ph = tf.placeholder(tf.float32, [])
entropy_ph = tf.placeholder(tf.float32, [])
accuracy_ph = tf.placeholder(tf.float32, [])
baseline_ph = tf.placeholder(tf.float32, [])
validity_ph = tf.placeholder(tf.float32, [])
summary_trn = []
summary_trn.append(tf.summary.scalar("avg_sample_loss", loss_ph))
summary_trn.append(tf.summary.scalar("entropy", entropy_ph))
summary_trn.append(tf.summary.scalar("avg_accuracy", accuracy_ph))
# summary_trn.append(tf.summary.scalar("baseline", baseline_ph))
summary_trn.append(tf.summary.scalar("validity", validity_ph))
log_step_trn = tf.summary.merge(summary_trn)
tst_answer_accuracy_ph = tf.placeholder(tf.float32, [])
tst_layout_accuracy_ph = tf.placeholder(tf.float32, [])
tst_layout_validity_ph = tf.placeholder(tf.float32, [])
summary_tst = []
summary_tst.append(tf.summary.scalar("test_answer_accuracy", tst_answer_accuracy_ph))
summary_tst.append(tf.summary.scalar("test_layout_accuracy", tst_layout_accuracy_ph))
summary_tst.append(tf.summary.scalar("test_layout_validity", tst_layout_validity_ph))
log_step_tst = tf.summary.merge(summary_tst)
os.makedirs(snapshot_dir, exist_ok=True)
snapshot_saver = tf.train.Saver(max_to_keep=None) # keep all snapshots
sess.run(tf.global_variables_initializer())
# Load previous model
snapshot_loader = tf.train.Saver([v for v in tf.global_variables() if v != baseline])
snapshot_loader.restore(sess, pretrained_model)
def run_training(max_iter, dataset_trn):
avg_accuracy = 0
accuracy_decay = 0.99
for n_iter, batch in enumerate(dataset_trn.batches()):
if n_iter >= max_iter:
break
# set up input and output tensors
h = sess.partial_run_setup(
[nmn3_model_trn.predicted_tokens, nmn3_model_trn.entropy_reg,
scores, avg_sample_loss, train_step],
[input_seq_batch, seq_length_batch, image_feat_batch,
compiler.loom_input_tensor, expr_validity_batch,
answer_label_batch])
# Part 0 & 1: Run Convnet and generate module layout
tokens, entropy_reg_val = sess.partial_run(h,
(nmn3_model_trn.predicted_tokens, nmn3_model_trn.entropy_reg),
feed_dict={input_seq_batch: batch['input_seq_batch'],
seq_length_batch: batch['seq_length_batch'],
image_feat_batch: batch['image_feat_batch']})
# Assemble the layout tokens into network structure
expr_list, expr_validity_array = assembler.assemble(tokens)
# all exprs should be valid (as it's in the decoder)
assert(np.all(expr_validity_array))
labels = batch['answer_label_batch']
# Build TensorFlow Fold input for NMN
expr_feed = compiler.build_feed_dict(expr_list)
expr_feed[expr_validity_batch] = expr_validity_array
expr_feed[answer_label_batch] = labels
# Part 2: Run NMN and learning steps
scores_val, avg_sample_loss_val, _ = sess.partial_run(
h, (scores, avg_sample_loss, train_step), feed_dict=expr_feed)
# compute accuracy
predictions = np.argmax(scores_val, axis=1)
accuracy = np.mean(np.logical_and(expr_validity_array,
predictions == labels))
avg_accuracy += (1-accuracy_decay) * (accuracy-avg_accuracy)
validity = np.mean(expr_validity_array)
# Add to TensorBoard summary
if (n_iter+1) % log_interval == 0 or (n_iter+1) == max_iter:
print("iter = %d\n\tloss = %f, accuracy (cur) = %f, "
"accuracy (avg) = %f, entropy = %f, validity = %f" %
(n_iter+1, avg_sample_loss_val, accuracy,
avg_accuracy, -entropy_reg_val, validity))
summary = sess.run(log_step_trn, {
loss_ph: avg_sample_loss_val,
entropy_ph: -entropy_reg_val,
accuracy_ph: avg_accuracy,
# baseline_ph: sess.run(baseline),
validity_ph: validity})
log_writer.add_summary(summary, n_iter+1)
# Save snapshot
if (n_iter+1) % snapshot_interval == 0 or (n_iter+1) == max_iter:
snapshot_file = os.path.join(snapshot_dir, "%08d" % (n_iter+1))
snapshot_saver.save(sess, snapshot_file, write_meta_graph=False)
print('snapshot saved to ' + snapshot_file)
run_training(max_iter, data_reader_trn)
| {
"repo_name": "ronghanghu/n2nmn",
"path": "exp_clevr/train_clevr_rl_gt_layout.py",
"copies": "1",
"size": "9647",
"license": "bsd-2-clause",
"hash": 6360182380939726000,
"line_mean": 38.6995884774,
"line_max": 85,
"alpha_frac": 0.6710894579,
"autogenerated": false,
"ratio": 3.168144499178982,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4339233957078982,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import ast
from ast import PyCF_ONLY_AST as _AST_FLAG
from bisect import bisect_right
import linecache
import sys
import six
import inspect
import textwrap
import tokenize
import py
cpy_compile = compile
class Source(object):
""" an immutable object holding a source code fragment,
possibly deindenting it.
"""
_compilecounter = 0
def __init__(self, *parts, **kwargs):
self.lines = lines = []
de = kwargs.get("deindent", True)
for part in parts:
if not part:
partlines = []
elif isinstance(part, Source):
partlines = part.lines
elif isinstance(part, (tuple, list)):
partlines = [x.rstrip("\n") for x in part]
elif isinstance(part, six.string_types):
partlines = part.split("\n")
else:
partlines = getsource(part, deindent=de).lines
if de:
partlines = deindent(partlines)
lines.extend(partlines)
def __eq__(self, other):
try:
return self.lines == other.lines
except AttributeError:
if isinstance(other, str):
return str(self) == other
return False
__hash__ = None
def __getitem__(self, key):
if isinstance(key, int):
return self.lines[key]
else:
if key.step not in (None, 1):
raise IndexError("cannot slice a Source with a step")
newsource = Source()
newsource.lines = self.lines[key.start : key.stop]
return newsource
def __len__(self):
return len(self.lines)
def strip(self):
""" return new source object with trailing
and leading blank lines removed.
"""
start, end = 0, len(self)
while start < end and not self.lines[start].strip():
start += 1
while end > start and not self.lines[end - 1].strip():
end -= 1
source = Source()
source.lines[:] = self.lines[start:end]
return source
def putaround(self, before="", after="", indent=" " * 4):
""" return a copy of the source object with
'before' and 'after' wrapped around it.
"""
before = Source(before)
after = Source(after)
newsource = Source()
lines = [(indent + line) for line in self.lines]
newsource.lines = before.lines + lines + after.lines
return newsource
def indent(self, indent=" " * 4):
""" return a copy of the source object with
all lines indented by the given indent-string.
"""
newsource = Source()
newsource.lines = [(indent + line) for line in self.lines]
return newsource
def getstatement(self, lineno):
""" return Source statement which contains the
given linenumber (counted from 0).
"""
start, end = self.getstatementrange(lineno)
return self[start:end]
def getstatementrange(self, lineno):
""" return (start, end) tuple which spans the minimal
statement region which containing the given lineno.
"""
if not (0 <= lineno < len(self)):
raise IndexError("lineno out of range")
ast, start, end = getstatementrange_ast(lineno, self)
return start, end
def deindent(self):
"""return a new source object deindented."""
newsource = Source()
newsource.lines[:] = deindent(self.lines)
return newsource
def isparseable(self, deindent=True):
""" return True if source is parseable, heuristically
deindenting it by default.
"""
from parser import suite as syntax_checker
if deindent:
source = str(self.deindent())
else:
source = str(self)
try:
# compile(source+'\n', "x", "exec")
syntax_checker(source + "\n")
except KeyboardInterrupt:
raise
except Exception:
return False
else:
return True
def __str__(self):
return "\n".join(self.lines)
def compile(
self, filename=None, mode="exec", flag=0, dont_inherit=0, _genframe=None
):
""" return compiled code object. if filename is None
invent an artificial filename which displays
the source/line position of the caller frame.
"""
if not filename or py.path.local(filename).check(file=0):
if _genframe is None:
_genframe = sys._getframe(1) # the caller
fn, lineno = _genframe.f_code.co_filename, _genframe.f_lineno
base = "<%d-codegen " % self._compilecounter
self.__class__._compilecounter += 1
if not filename:
filename = base + "%s:%d>" % (fn, lineno)
else:
filename = base + "%r %s:%d>" % (filename, fn, lineno)
source = "\n".join(self.lines) + "\n"
try:
co = cpy_compile(source, filename, mode, flag)
except SyntaxError:
ex = sys.exc_info()[1]
# re-represent syntax errors from parsing python strings
msglines = self.lines[: ex.lineno]
if ex.offset:
msglines.append(" " * ex.offset + "^")
msglines.append("(code was compiled probably from here: %s)" % filename)
newex = SyntaxError("\n".join(msglines))
newex.offset = ex.offset
newex.lineno = ex.lineno
newex.text = ex.text
raise newex
else:
if flag & _AST_FLAG:
return co
lines = [(x + "\n") for x in self.lines]
linecache.cache[filename] = (1, None, lines, filename)
return co
#
# public API shortcut functions
#
def compile_(source, filename=None, mode="exec", flags=0, dont_inherit=0):
""" compile the given source to a raw code object,
and maintain an internal cache which allows later
retrieval of the source code for the code object
and any recursively created code objects.
"""
if isinstance(source, ast.AST):
# XXX should Source support having AST?
return cpy_compile(source, filename, mode, flags, dont_inherit)
_genframe = sys._getframe(1) # the caller
s = Source(source)
co = s.compile(filename, mode, flags, _genframe=_genframe)
return co
def getfslineno(obj):
""" Return source location (path, lineno) for the given object.
If the source cannot be determined return ("", -1)
"""
from .code import Code
try:
code = Code(obj)
except TypeError:
try:
fn = inspect.getsourcefile(obj) or inspect.getfile(obj)
except TypeError:
return "", -1
fspath = fn and py.path.local(fn) or None
lineno = -1
if fspath:
try:
_, lineno = findsource(obj)
except IOError:
pass
else:
fspath = code.path
lineno = code.firstlineno
assert isinstance(lineno, int)
return fspath, lineno
#
# helper functions
#
def findsource(obj):
try:
sourcelines, lineno = inspect.findsource(obj)
except py.builtin._sysex:
raise
except: # noqa
return None, -1
source = Source()
source.lines = [line.rstrip() for line in sourcelines]
return source, lineno
def getsource(obj, **kwargs):
from .code import getrawcode
obj = getrawcode(obj)
try:
strsrc = inspect.getsource(obj)
except IndentationError:
strsrc = '"Buggy python version consider upgrading, cannot get source"'
assert isinstance(strsrc, str)
return Source(strsrc, **kwargs)
def deindent(lines):
return textwrap.dedent("\n".join(lines)).splitlines()
def get_statement_startend2(lineno, node):
import ast
# flatten all statements and except handlers into one lineno-list
# AST's line numbers start indexing at 1
values = []
for x in ast.walk(node):
if isinstance(x, (ast.stmt, ast.ExceptHandler)):
values.append(x.lineno - 1)
for name in ("finalbody", "orelse"):
val = getattr(x, name, None)
if val:
# treat the finally/orelse part as its own statement
values.append(val[0].lineno - 1 - 1)
values.sort()
insert_index = bisect_right(values, lineno)
start = values[insert_index - 1]
if insert_index >= len(values):
end = None
else:
end = values[insert_index]
return start, end
def getstatementrange_ast(lineno, source, assertion=False, astnode=None):
if astnode is None:
content = str(source)
astnode = compile(content, "source", "exec", 1024) # 1024 for AST
start, end = get_statement_startend2(lineno, astnode)
# we need to correct the end:
# - ast-parsing strips comments
# - there might be empty lines
# - we might have lesser indented code blocks at the end
if end is None:
end = len(source.lines)
if end > start + 1:
# make sure we don't span differently indented code blocks
# by using the BlockFinder helper used which inspect.getsource() uses itself
block_finder = inspect.BlockFinder()
# if we start with an indented line, put blockfinder to "started" mode
block_finder.started = source.lines[start][0].isspace()
it = ((x + "\n") for x in source.lines[start:end])
try:
for tok in tokenize.generate_tokens(lambda: next(it)):
block_finder.tokeneater(*tok)
except (inspect.EndOfBlock, IndentationError):
end = block_finder.last + start
except Exception:
pass
# the end might still point to a comment or empty line, correct it
while end:
line = source.lines[end - 1].lstrip()
if line.startswith("#") or not line:
end -= 1
else:
break
return astnode, start, end
| {
"repo_name": "davidszotten/pytest",
"path": "src/_pytest/_code/source.py",
"copies": "2",
"size": "10225",
"license": "mit",
"hash": -6604552591822531000,
"line_mean": 30.7546583851,
"line_max": 84,
"alpha_frac": 0.573594132,
"autogenerated": false,
"ratio": 4.221717588769612,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5795311720769613,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import ast
from itertools import repeat
from toolz import merge
from . import arithmetic
from . import math
from .expressions import Expr, symbol
__all__ = ['exprify']
def generate_methods(node_names, funcs, builder):
def wrapped(cls):
for node_name, func in zip(node_names, funcs):
setattr(cls, 'visit_%s' % node_name, builder(func))
return cls
return wrapped
arithmetic_ops = ['Eq', 'Ne', 'Lt', 'Gt', 'Le', 'Ge', 'BitAnd', 'BitOr',
'Invert', 'USub', 'Add', 'Mult', 'Div', 'FloorDiv', 'Pow', 'Mod',
'Sub']
@generate_methods(arithmetic_ops, arithmetic_ops,
builder=lambda func: lambda self, node: getattr(arithmetic, func))
class BlazeParser(ast.NodeVisitor):
def __init__(self, dtypes, scope):
self.dtypes = dtypes
self.scope = scope
def visit_Compare(self, node):
assert len(node.ops) == 1, 'chained comparisons not supported'
assert len(node.comparators) == 1, 'chained comparisons not supported'
return self.visit(node.ops[0])(self.visit(node.left),
self.visit(node.comparators[0]))
def visit_Num(self, node):
return node.n
def visit_Str(self, node):
return node.s
def visit_Name(self, node):
name = node.id
if name.startswith('__'):
raise ValueError("invalid name %r" % name)
try:
return self.scope[name]
except KeyError:
return symbol(name, self.dtypes[name])
def visit_BinOp(self, node):
return self.visit(node.op)(self.visit(node.left),
self.visit(node.right))
def visit_UnaryOp(self, node):
op = node.op
operand = node.operand
if isinstance(operand, ast.Num):
return -1 * isinstance(op, ast.USub) * operand.n
return self.visit(op)(self.visit(operand))
def visit_Call(self, node):
assert len(node.args) <= 1, 'only single argument functions allowed'
assert not node.keywords
assert node.starargs is None, 'starargs not allowed'
assert node.kwargs is None, 'kwargs not allowed'
return self.visit(node.func)(*map(self.visit, node.args))
def visit(self, node):
name = node.__class__.__name__
method = 'visit_' + name
visitor = getattr(self, method, None)
if visitor is None:
raise NotImplementedError('%s nodes are not implemented' % name)
return visitor(node)
# Operations like sin, cos, exp, isnan, floor, ceil, ...
math_operators = dict((k, v) for k, v in math.__dict__.items()
if isinstance(v, type) and issubclass(v, Expr))
safe_scope = {'__builtins__': {}, # Python 2
'builtins': {}} # Python 3
def exprify(expr, dtypes):
""" Transform string into scalar expression
>>> from blaze.expr import Expr
>>> expr = exprify('x + y', {'x': 'int64', 'y': 'real'})
>>> expr
x + y
>>> isinstance(expr, Expr)
True
>>> expr.lhs.dshape
dshape("int64")
"""
scope = merge(safe_scope, math_operators)
# use eval mode to raise a SyntaxError if any statements are passed in
parsed = ast.parse(expr, mode='eval')
overlapping_names = set(dtypes) & set(scope)
if overlapping_names:
raise ValueError('overlapping names %s' % overlapping_names)
parser = BlazeParser(dtypes, scope)
return parser.visit(parsed.body)
| {
"repo_name": "mrocklin/blaze",
"path": "blaze/expr/parser.py",
"copies": "4",
"size": "3550",
"license": "bsd-3-clause",
"hash": 9106870336922902000,
"line_mean": 31.2727272727,
"line_max": 84,
"alpha_frac": 0.5997183099,
"autogenerated": false,
"ratio": 3.7725823591923486,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6372300669092349,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import ast
from toolz import merge
from . import arithmetic
from . import math
from .expressions import Expr, symbol
__all__ = ['exprify']
def generate_methods(node_names, funcs, builder):
def wrapped(cls):
for node_name, func in zip(node_names, funcs):
setattr(cls, 'visit_%s' % node_name, builder(func))
return cls
return wrapped
arithmetic_ops = ['Eq', 'Ne', 'Lt', 'Gt', 'Le', 'Ge', 'BitAnd', 'BitOr',
'Invert', 'USub', 'Add', 'Mult', 'Div', 'FloorDiv', 'Pow',
'Mod', 'Sub']
@generate_methods(arithmetic_ops, arithmetic_ops,
builder=lambda func: lambda self, node: getattr(arithmetic, func))
class BlazeParser(ast.NodeVisitor):
def __init__(self, dtypes, scope):
self.dtypes = dtypes
self.scope = scope
def visit_Compare(self, node):
assert len(node.ops) == 1, 'chained comparisons not supported'
assert len(node.comparators) == 1, 'chained comparisons not supported'
return self.visit(node.ops[0])(self.visit(node.left),
self.visit(node.comparators[0]))
def visit_Num(self, node):
return node.n
def visit_Str(self, node):
return node.s
def visit_Name(self, node):
name = node.id
if name.startswith('__'):
raise ValueError("invalid name %r" % name)
try:
return self.scope[name]
except KeyError:
return symbol(name, self.dtypes[name])
def visit_BinOp(self, node):
return self.visit(node.op)(self.visit(node.left),
self.visit(node.right))
def visit_UnaryOp(self, node):
op = node.op
operand = node.operand
if isinstance(operand, ast.Num):
return -1 * isinstance(op, ast.USub) * operand.n
return self.visit(op)(self.visit(operand))
def visit_Call(self, node):
assert len(node.args) <= 1, 'only single argument functions allowed'
assert not node.keywords
assert node.starargs is None, 'starargs not allowed'
assert node.kwargs is None, 'kwargs not allowed'
return self.visit(node.func)(*map(self.visit, node.args))
def visit(self, node):
name = node.__class__.__name__
method = 'visit_' + name
visitor = getattr(self, method, None)
if visitor is None:
raise NotImplementedError('%s nodes are not implemented' % name)
return visitor(node)
# Operations like sin, cos, exp, isnan, floor, ceil, ...
math_operators = dict((k, v) for k, v in math.__dict__.items()
if isinstance(v, type) and issubclass(v, Expr))
safe_scope = {'__builtins__': {}, # Python 2
'builtins': {}} # Python 3
def exprify(expr, dtypes):
""" Transform string into scalar expression
>>> from blaze.expr import Expr
>>> expr = exprify('x + y', {'x': 'int64', 'y': 'real'})
>>> expr
x + y
>>> isinstance(expr, Expr)
True
>>> expr.lhs.dshape
dshape("int64")
"""
scope = merge(safe_scope, math_operators)
# use eval mode to raise a SyntaxError if any statements are passed in
parsed = ast.parse(expr, mode='eval')
overlapping_names = set(dtypes) & set(scope)
if overlapping_names:
raise ValueError('overlapping names %s' % overlapping_names)
parser = BlazeParser(dtypes, scope)
return parser.visit(parsed.body)
| {
"repo_name": "ChinaQuants/blaze",
"path": "blaze/expr/parser.py",
"copies": "10",
"size": "3548",
"license": "bsd-3-clause",
"hash": -5350385570511745000,
"line_mean": 31.2545454545,
"line_max": 84,
"alpha_frac": 0.5930101466,
"autogenerated": false,
"ratio": 3.7905982905982905,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9383608437198291,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import asyncio
import logging
import re
import weakref
from copy import copy
from urllib.parse import urlparse
import aiohttp
import requests
from fsspec.asyn import AsyncFileSystem, sync, sync_wrapper
from fsspec.exceptions import FSTimeoutError
from fsspec.spec import AbstractBufferedFile
from fsspec.utils import DEFAULT_BLOCK_SIZE, tokenize
from ..caching import AllBytes
# https://stackoverflow.com/a/15926317/3821154
ex = re.compile(r"""<(a|A)\s+(?:[^>]*?\s+)?(href|HREF)=["'](?P<url>[^"']+)""")
ex2 = re.compile(r"""(?P<url>http[s]?://[-a-zA-Z0-9@:%_+.~#?&/=]+)""")
logger = logging.getLogger("fsspec.http")
async def get_client(**kwargs):
return aiohttp.ClientSession(**kwargs)
class BlockSizeError(ValueError):
"""
Helper class for exceptions raised in this module.
"""
pass
class HTTPFileSystem(AsyncFileSystem):
"""
Simple File-System for fetching data via HTTP(S)
``ls()`` is implemented by loading the parent page and doing a regex
match on the result. If simple_link=True, anything of the form
"http(s)://server.com/stuff?thing=other"; otherwise only links within
HTML href tags will be used.
"""
sep = "/"
def __init__(
self,
simple_links=True,
block_size=None,
same_scheme=True,
size_policy=None,
cache_type="bytes",
cache_options=None,
asynchronous=False,
loop=None,
client_kwargs=None,
**storage_options,
):
"""
NB: if this is called async, you must await set_client
Parameters
----------
block_size: int
Blocks to read bytes; if 0, will default to raw requests file-like
objects instead of HTTPFile instances
simple_links: bool
If True, will consider both HTML <a> tags and anything that looks
like a URL; if False, will consider only the former.
same_scheme: True
When doing ls/glob, if this is True, only consider paths that have
http/https matching the input URLs.
size_policy: this argument is deprecated
client_kwargs: dict
Passed to aiohttp.ClientSession, see
https://docs.aiohttp.org/en/stable/client_reference.html
For example, ``{'auth': aiohttp.BasicAuth('user', 'pass')}``
storage_options: key-value
Any other parameters passed on to requests
cache_type, cache_options: defaults used in open
"""
super().__init__(self, asynchronous=asynchronous, loop=loop, **storage_options)
self.block_size = block_size if block_size is not None else DEFAULT_BLOCK_SIZE
self.simple_links = simple_links
self.same_schema = same_scheme
self.cache_type = cache_type
self.cache_options = cache_options
self.client_kwargs = client_kwargs or {}
self.kwargs = storage_options
self._session = None
# Clean caching-related parameters from `storage_options`
# before propagating them as `request_options` through `self.kwargs`.
# TODO: Maybe rename `self.kwargs` to `self.request_options` to make
# it clearer.
request_options = copy(storage_options)
self.use_listings_cache = request_options.pop("use_listings_cache", False)
request_options.pop("listings_expiry_time", None)
request_options.pop("max_paths", None)
request_options.pop("skip_instance_cache", None)
self.kwargs = request_options
if not asynchronous:
sync(self.loop, self.set_session)
@staticmethod
def close_session(loop, session):
if loop is not None and loop.is_running():
try:
sync(loop, session.close, timeout=0.1)
return
except (TimeoutError, FSTimeoutError):
pass
if session._connector is not None:
# close after loop is dead
session._connector._close()
async def set_session(self):
if self._session is None:
self._session = await get_client(loop=self.loop, **self.client_kwargs)
if not self.asynchronous:
weakref.finalize(self, self.close_session, self.loop, self._session)
return self._session
@classmethod
def _strip_protocol(cls, path):
"""For HTTP, we always want to keep the full URL"""
return path
@classmethod
def _parent(cls, path):
# override, since _strip_protocol is different for URLs
par = super()._parent(path)
if len(par) > 7: # "http://..."
return par
return ""
async def _ls_real(self, url, detail=True, **kwargs):
# ignoring URL-encoded arguments
kw = self.kwargs.copy()
kw.update(kwargs)
logger.debug(url)
session = await self.set_session()
async with session.get(url, **self.kwargs) as r:
self._raise_not_found_for_status(r, url)
text = await r.text()
if self.simple_links:
links = ex2.findall(text) + [u[2] for u in ex.findall(text)]
else:
links = [u[2] for u in ex.findall(text)]
out = set()
parts = urlparse(url)
for l in links:
if isinstance(l, tuple):
l = l[1]
if l.startswith("/") and len(l) > 1:
# absolute URL on this server
l = parts.scheme + "://" + parts.netloc + l
if l.startswith("http"):
if self.same_schema and l.startswith(url.rstrip("/") + "/"):
out.add(l)
elif l.replace("https", "http").startswith(
url.replace("https", "http").rstrip("/") + "/"
):
# allowed to cross http <-> https
out.add(l)
else:
if l not in ["..", "../"]:
# Ignore FTP-like "parent"
out.add("/".join([url.rstrip("/"), l.lstrip("/")]))
if not out and url.endswith("/"):
out = await self._ls_real(url.rstrip("/"), detail=False)
if detail:
return [
{
"name": u,
"size": None,
"type": "directory" if u.endswith("/") else "file",
}
for u in out
]
else:
return list(sorted(out))
return out
async def _ls(self, url, detail=True, **kwargs):
if self.use_listings_cache and url in self.dircache:
out = self.dircache[url]
else:
out = await self._ls_real(url, detail=detail, **kwargs)
self.dircache[url] = out
return out
ls = sync_wrapper(_ls)
def _raise_not_found_for_status(self, response, url):
"""
Raises FileNotFoundError for 404s, otherwise uses raise_for_status.
"""
if response.status == 404:
raise FileNotFoundError(url)
response.raise_for_status()
async def _cat_file(self, url, start=None, end=None, **kwargs):
kw = self.kwargs.copy()
kw.update(kwargs)
logger.debug(url)
# TODO: extract into testable utility function?
if start is not None or end is not None:
headers = kw.pop("headers", {}).copy()
headers["Range"] = await self._process_limits(url, start, end)
kw["headers"] = headers
session = await self.set_session()
async with session.get(url, **kw) as r:
self._raise_not_found_for_status(r, url)
out = await r.read()
return out
async def _get_file(self, rpath, lpath, chunk_size=5 * 2 ** 20, **kwargs):
kw = self.kwargs.copy()
kw.update(kwargs)
logger.debug(rpath)
session = await self.set_session()
async with session.get(rpath, **self.kwargs) as r:
self._raise_not_found_for_status(r, rpath)
with open(lpath, "wb") as fd:
chunk = True
while chunk:
chunk = await r.content.read(chunk_size)
fd.write(chunk)
async def _exists(self, path, **kwargs):
kw = self.kwargs.copy()
kw.update(kwargs)
try:
logger.debug(path)
session = await self.set_session()
r = await session.get(path, **kw)
async with r:
return r.status < 400
except (requests.HTTPError, aiohttp.ClientError):
return False
async def _isfile(self, path, **kwargs):
return await self._exists(path, **kwargs)
def _open(
self,
path,
mode="rb",
block_size=None,
autocommit=None, # XXX: This differs from the base class.
cache_type=None,
cache_options=None,
size=None,
**kwargs,
):
"""Make a file-like object
Parameters
----------
path: str
Full URL with protocol
mode: string
must be "rb"
block_size: int or None
Bytes to download in one request; use instance value if None. If
zero, will return a streaming Requests file-like instance.
kwargs: key-value
Any other parameters, passed to requests calls
"""
if mode != "rb":
raise NotImplementedError
block_size = block_size if block_size is not None else self.block_size
kw = self.kwargs.copy()
kw["asynchronous"] = self.asynchronous
kw.update(kwargs)
size = size or self.size(path)
session = sync(self.loop, self.set_session)
if block_size and size:
return HTTPFile(
self,
path,
session=session,
block_size=block_size,
mode=mode,
size=size,
cache_type=cache_type or self.cache_type,
cache_options=cache_options or self.cache_options,
loop=self.loop,
**kw,
)
else:
return HTTPStreamFile(
self, path, mode=mode, loop=self.loop, session=session, **kw
)
def ukey(self, url):
"""Unique identifier; assume HTTP files are static, unchanging"""
return tokenize(url, self.kwargs, self.protocol)
async def _info(self, url, **kwargs):
"""Get info of URL
Tries to access location via HEAD, and then GET methods, but does
not fetch the data.
It is possible that the server does not supply any size information, in
which case size will be given as None (and certain operations on the
corresponding file will not work).
"""
size = False
for policy in ["head", "get"]:
try:
session = await self.set_session()
size = await _file_size(
url, size_policy=policy, session=session, **self.kwargs
)
if size:
break
except Exception:
pass
else:
# get failed, so conclude URL does not exist
if size is False:
raise FileNotFoundError(url)
return {"name": url, "size": size or None, "type": "file"}
async def _glob(self, path, **kwargs):
"""
Find files by glob-matching.
This implementation is idntical to the one in AbstractFileSystem,
but "?" is not considered as a character for globbing, because it is
so common in URLs, often identifying the "query" part.
"""
import re
ends = path.endswith("/")
path = self._strip_protocol(path)
indstar = path.find("*") if path.find("*") >= 0 else len(path)
indbrace = path.find("[") if path.find("[") >= 0 else len(path)
ind = min(indstar, indbrace)
detail = kwargs.pop("detail", False)
if not has_magic(path):
root = path
depth = 1
if ends:
path += "/*"
elif await self._exists(path):
if not detail:
return [path]
else:
return {path: await self._info(path)}
else:
if not detail:
return [] # glob of non-existent returns empty
else:
return {}
elif "/" in path[:ind]:
ind2 = path[:ind].rindex("/")
root = path[: ind2 + 1]
depth = None if "**" in path else path[ind2 + 1 :].count("/") + 1
else:
root = ""
depth = None if "**" in path else path[ind + 1 :].count("/") + 1
allpaths = await self._find(
root, maxdepth=depth, withdirs=True, detail=True, **kwargs
)
# Escape characters special to python regex, leaving our supported
# special characters in place.
# See https://www.gnu.org/software/bash/manual/html_node/Pattern-Matching.html
# for shell globbing details.
pattern = (
"^"
+ (
path.replace("\\", r"\\")
.replace(".", r"\.")
.replace("+", r"\+")
.replace("//", "/")
.replace("(", r"\(")
.replace(")", r"\)")
.replace("|", r"\|")
.replace("^", r"\^")
.replace("$", r"\$")
.replace("{", r"\{")
.replace("}", r"\}")
.rstrip("/")
)
+ "$"
)
pattern = re.sub("[*]{2}", "=PLACEHOLDER=", pattern)
pattern = re.sub("[*]", "[^/]*", pattern)
pattern = re.compile(pattern.replace("=PLACEHOLDER=", ".*"))
out = {
p: allpaths[p]
for p in sorted(allpaths)
if pattern.match(p.replace("//", "/").rstrip("/"))
}
if detail:
return out
else:
return list(out)
async def _isdir(self, path):
# override, since all URLs are (also) files
return bool(await self._ls(path))
class HTTPFile(AbstractBufferedFile):
"""
A file-like object pointing to a remove HTTP(S) resource
Supports only reading, with read-ahead of a predermined block-size.
In the case that the server does not supply the filesize, only reading of
the complete file in one go is supported.
Parameters
----------
url: str
Full URL of the remote resource, including the protocol
session: requests.Session or None
All calls will be made within this session, to avoid restarting
connections where the server allows this
block_size: int or None
The amount of read-ahead to do, in bytes. Default is 5MB, or the value
configured for the FileSystem creating this file
size: None or int
If given, this is the size of the file in bytes, and we don't attempt
to call the server to find the value.
kwargs: all other key-values are passed to requests calls.
"""
def __init__(
self,
fs,
url,
session=None,
block_size=None,
mode="rb",
cache_type="bytes",
cache_options=None,
size=None,
loop=None,
asynchronous=False,
**kwargs,
):
if mode != "rb":
raise NotImplementedError("File mode not supported")
self.asynchronous = asynchronous
self.url = url
self.session = session
self.details = {"name": url, "size": size, "type": "file"}
super().__init__(
fs=fs,
path=url,
mode=mode,
block_size=block_size,
cache_type=cache_type,
cache_options=cache_options,
**kwargs,
)
self.loop = loop
def read(self, length=-1):
"""Read bytes from file
Parameters
----------
length: int
Read up to this many bytes. If negative, read all content to end of
file. If the server has not supplied the filesize, attempting to
read only part of the data will raise a ValueError.
"""
if (
(length < 0 and self.loc == 0) # explicit read all
# but not when the size is known and fits into a block anyways
and not (self.size is not None and self.size <= self.blocksize)
):
self._fetch_all()
if self.size is None:
if length < 0:
self._fetch_all()
else:
length = min(self.size - self.loc, length)
return super().read(length)
async def async_fetch_all(self):
"""Read whole file in one shot, without caching
This is only called when position is still at zero,
and read() is called without a byte-count.
"""
logger.debug(f"Fetch all for {self}")
if not isinstance(self.cache, AllBytes):
r = await self.session.get(self.url, **self.kwargs)
async with r:
r.raise_for_status()
out = await r.read()
self.cache = AllBytes(
size=len(out), fetcher=None, blocksize=None, data=out
)
self.size = len(out)
_fetch_all = sync_wrapper(async_fetch_all)
async def async_fetch_range(self, start, end):
"""Download a block of data
The expectation is that the server returns only the requested bytes,
with HTTP code 206. If this is not the case, we first check the headers,
and then stream the output - if the data size is bigger than we
requested, an exception is raised.
"""
logger.debug(f"Fetch range for {self}: {start}-{end}")
kwargs = self.kwargs.copy()
headers = kwargs.pop("headers", {}).copy()
headers["Range"] = "bytes=%i-%i" % (start, end - 1)
logger.debug(self.url + " : " + headers["Range"])
r = await self.session.get(self.url, headers=headers, **kwargs)
async with r:
if r.status == 416:
# range request outside file
return b""
r.raise_for_status()
if r.status == 206:
# partial content, as expected
out = await r.read()
elif "Content-Length" in r.headers:
cl = int(r.headers["Content-Length"])
if cl <= end - start:
# data size OK
out = await r.read()
else:
raise BlockSizeError(
"Got more bytes so far (>%i) than requested (%i)"
% (cl, end - start)
)
else:
cl = 0
out = []
while True:
chunk = await r.content.read(2 ** 20)
# data size unknown, let's see if it goes too big
if chunk:
out.append(chunk)
cl += len(chunk)
if cl > end - start:
raise BlockSizeError(
"Got more bytes so far (>%i) than requested (%i)"
% (cl, end - start)
)
else:
break
out = b"".join(out)
return out
_fetch_range = sync_wrapper(async_fetch_range)
def close(self):
pass
def __reduce__(self):
return reopen, (
self.fs,
self.url,
self.mode,
self.blocksize,
self.cache.name,
self.size,
)
def reopen(fs, url, mode, blocksize, cache_type, size=None):
return fs.open(
url, mode=mode, block_size=blocksize, cache_type=cache_type, size=size
)
magic_check = re.compile("([*[])")
def has_magic(s):
match = magic_check.search(s)
return match is not None
class HTTPStreamFile(AbstractBufferedFile):
def __init__(self, fs, url, mode="rb", loop=None, session=None, **kwargs):
self.asynchronous = kwargs.pop("asynchronous", False)
self.url = url
self.loop = loop
self.session = session
if mode != "rb":
raise ValueError
self.details = {"name": url, "size": None}
super().__init__(fs=fs, path=url, mode=mode, cache_type="none", **kwargs)
async def cor():
r = await self.session.get(url, **kwargs).__aenter__()
return r
self.r = sync(self.loop, cor)
def seek(self, *args, **kwargs):
raise ValueError("Cannot seek streaming HTTP file")
async def _read(self, num=-1):
out = await self.r.content.read(num)
self.loc += len(out)
return out
read = sync_wrapper(_read)
async def _close(self):
self.r.close()
def close(self):
asyncio.run_coroutine_threadsafe(self._close(), self.loop)
def __reduce__(self):
return reopen, (self.fs, self.url, self.mode, self.blocksize, self.cache.name)
async def get_range(session, url, start, end, file=None, **kwargs):
# explicit get a range when we know it must be safe
kwargs = kwargs.copy()
headers = kwargs.pop("headers", {}).copy()
headers["Range"] = "bytes=%i-%i" % (start, end - 1)
r = await session.get(url, headers=headers, **kwargs)
r.raise_for_status()
async with r:
out = await r.read()
if file:
with open(file, "rb+") as f:
f.seek(start)
f.write(out)
else:
return out
async def _file_size(url, session=None, size_policy="head", **kwargs):
"""Call HEAD on the server to get file size
Default operation is to explicitly allow redirects and use encoding
'identity' (no compression) to get the true size of the target.
"""
logger.debug("Retrieve file size for %s" % url)
kwargs = kwargs.copy()
ar = kwargs.pop("allow_redirects", True)
head = kwargs.get("headers", {}).copy()
head["Accept-Encoding"] = "identity"
kwargs["headers"] = head
session = session or await get_client()
if size_policy == "head":
r = await session.head(url, allow_redirects=ar, **kwargs)
elif size_policy == "get":
r = await session.get(url, allow_redirects=ar, **kwargs)
else:
raise TypeError('size_policy must be "head" or "get", got %s' "" % size_policy)
async with r:
# TODO:
# recognise lack of 'Accept-Ranges', or 'Accept-Ranges': 'none' (not 'bytes')
# to mean streaming only, no random access => return None
if "Content-Length" in r.headers:
return int(r.headers["Content-Length"])
elif "Content-Range" in r.headers:
return int(r.headers["Content-Range"].split("/")[1])
r.close()
file_size = sync_wrapper(_file_size)
| {
"repo_name": "intake/filesystem_spec",
"path": "fsspec/implementations/http.py",
"copies": "1",
"size": "23250",
"license": "bsd-3-clause",
"hash": -8679234490833686000,
"line_mean": 32.9912280702,
"line_max": 87,
"alpha_frac": 0.5342795699,
"autogenerated": false,
"ratio": 4.195236376759293,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5229515946659292,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import atexit
import base64
import os
import re
import sys
import subprocess
import multiprocessing
import tornado.web
import tornado.ioloop
import tornado.websocket
import tornado.gen
import zmq
import zmq.eventloop.ioloop
from zmq.eventloop.zmqstream import ZMQStream
from .tree import SceneTree, walk, find_node
def capture(pattern, s):
match = re.match(pattern, s)
if not match:
raise ValueError("Could not match {:s} with pattern {:s}".format(s, pattern))
else:
return match.groups()[0]
def match_zmq_url(line):
return capture(r"^zmq_url=(.*)$", line)
def match_web_url(line):
return capture(r"^web_url=(.*)$", line)
def start_zmq_server_as_subprocess(zmq_url=None, server_args=[]):
"""
Starts the ZMQ server as a subprocess, passing *args through popen.
Optional Keyword Arguments:
zmq_url
"""
# Need -u for unbuffered output: https://stackoverflow.com/a/25572491
args = [sys.executable, "-u", "-m", "meshcat.servers.zmqserver"]
if zmq_url is not None:
args.append("--zmq-url")
args.append(zmq_url)
if server_args:
args.append(*server_args)
# Note: Pass PYTHONPATH to be robust to workflows like Google Colab,
# where meshcat might have been added directly via sys.path.append.
# Copy existing environmental variables as some of them might be needed
# e.g. on Windows SYSTEMROOT and PATH
env = dict(os.environ)
env["PYTHONPATH"] = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
# Use start_new_session if it's available. Without it, in jupyter the server
# goes down when we cancel execution of any cell in the notebook.
server_proc = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
start_new_session=True)
line = ""
while "zmq_url" not in line:
line = server_proc.stdout.readline().strip().decode("utf-8")
if server_proc.poll() is not None:
outs, errs = server_proc.communicate()
print(outs.decode("utf-8"))
print(errs.decode("utf-8"))
raise RuntimeError("the meshcat server process exited prematurely with exit code " + str(server_proc.poll()))
zmq_url = match_zmq_url(line)
web_url = match_web_url(server_proc.stdout.readline().strip().decode("utf-8"))
def cleanup(server_proc):
server_proc.kill()
server_proc.wait()
atexit.register(cleanup, server_proc)
return server_proc, zmq_url, web_url
def _zmq_install_ioloop():
# For pyzmq<17, install ioloop instead of a tornado ioloop
# http://zeromq.github.com/pyzmq/eventloop.html
try:
pyzmq_major = int(zmq.__version__.split(".")[0])
except ValueError:
# Development version?
return
if pyzmq_major < 17:
zmq.eventloop.ioloop.install()
_zmq_install_ioloop()
VIEWER_ROOT = os.path.join(os.path.dirname(__file__), "..", "viewer", "dist")
VIEWER_HTML = "index.html"
DEFAULT_FILESERVER_PORT = 7000
MAX_ATTEMPTS = 1000
DEFAULT_ZMQ_METHOD = "tcp"
DEFAULT_ZMQ_PORT = 6000
MESHCAT_COMMANDS = ["set_transform", "set_object", "delete", "set_property", "set_animation"]
def find_available_port(func, default_port, max_attempts=MAX_ATTEMPTS, **kwargs):
for i in range(max_attempts):
port = default_port + i
try:
return func(port, **kwargs), port
except (OSError, zmq.error.ZMQError):
print("Port: {:d} in use, trying another...".format(port), file=sys.stderr)
except Exception as e:
print(type(e))
raise
else:
raise(Exception("Could not find an available port in the range: [{:d}, {:d})".format(default_port, max_attempts + default_port)))
class WebSocketHandler(tornado.websocket.WebSocketHandler):
def __init__(self, *args, **kwargs):
self.bridge = kwargs.pop("bridge")
super(WebSocketHandler, self).__init__(*args, **kwargs)
def open(self):
self.bridge.websocket_pool.add(self)
print("opened:", self, file=sys.stderr)
self.bridge.send_scene(self)
def on_message(self, message):
pass
def on_close(self):
self.bridge.websocket_pool.remove(self)
print("closed:", self, file=sys.stderr)
def create_command(data):
"""Encode the drawing command into a Javascript fetch() command for display."""
return """
fetch("data:application/octet-binary;base64,{}")
.then(res => res.arrayBuffer())
.then(buffer => viewer.handle_command_bytearray(new Uint8Array(buffer)));
""".format(base64.b64encode(data).decode("utf-8"))
class StaticFileHandlerNoCache(tornado.web.StaticFileHandler):
"""Ensures static files do not get cached.
Taken from: https://stackoverflow.com/a/18879658/7829525
"""
def set_extra_headers(self, path):
# Disable cache
self.set_header('Cache-Control', 'no-store, no-cache, must-revalidate, max-age=0')
class ZMQWebSocketBridge(object):
context = zmq.Context()
def __init__(self, zmq_url=None, host="127.0.0.1", port=None,
certfile=None, keyfile=None, ngrok_http_tunnel=False):
self.host = host
self.websocket_pool = set()
self.app = self.make_app()
self.ioloop = tornado.ioloop.IOLoop.current()
if zmq_url is None:
def f(port):
return self.setup_zmq("{:s}://{:s}:{:d}".format(DEFAULT_ZMQ_METHOD, self.host, port))
(self.zmq_socket, self.zmq_stream, self.zmq_url), _ = find_available_port(f, DEFAULT_ZMQ_PORT)
else:
self.zmq_socket, self.zmq_stream, self.zmq_url = self.setup_zmq(zmq_url)
protocol = "http:"
listen_kwargs = {}
if certfile is not None or keyfile is not None:
if certfile is None:
raise(Exception("You must supply a certfile if you supply a keyfile"))
if keyfile is None:
raise(Exception("You must supply a keyfile if you supply a certfile"))
listen_kwargs["ssl_options"] = { "certfile": certfile,
"keyfile": keyfile }
protocol = "https:"
if port is None:
_, self.fileserver_port = find_available_port(self.app.listen, DEFAULT_FILESERVER_PORT, **listen_kwargs)
else:
self.app.listen(port, **listen_kwargs)
self.fileserver_port = port
self.web_url = "{protocol}//{host}:{port}/static/".format(
protocol=protocol, host=self.host, port=self.fileserver_port)
# Note: The (significant) advantage of putting this in here is not only
# so that the workflow is convenient, but also so that the server
# administers the public web_url when clients ask for it.
if ngrok_http_tunnel:
if protocol == "https:":
# TODO(russt): Consider plumbing ngrok auth through here for
# someone who has paid for ngrok and wants to use https.
raise(Exception('The free version of ngrok does not support https'))
# Conditionally import pyngrok
try:
import pyngrok.conf
import pyngrok.ngrok
# Use start_new_session if it's available. Without it, in
# jupyter the server goes down when we cancel execution of any
# cell in the notebook.
config = pyngrok.conf.PyngrokConfig(start_new_session=True)
self.web_url = pyngrok.ngrok.connect(self.fileserver_port, "http", pyngrok_config=config)
# pyngrok >= 5.0.0 returns an NgrokTunnel object instead of the string.
if not isinstance(self.web_url, str):
self.web_url = self.web_url.public_url
self.web_url += "/static/"
print("\n") # ensure any pyngrok output is properly terminated.
def cleanup():
pyngrok.ngrok.kill()
atexit.register(cleanup)
except ImportError as e:
if "pyngrok" in e.__class__.__name__:
raise(Exception("You must install pyngrok (e.g. via `pip install pyngrok`)."))
self.tree = SceneTree()
def make_app(self):
return tornado.web.Application([
(r"/static/(.*)", StaticFileHandlerNoCache, {"path": VIEWER_ROOT, "default_filename": VIEWER_HTML}),
(r"/", WebSocketHandler, {"bridge": self})
])
def wait_for_websockets(self):
if len(self.websocket_pool) > 0:
self.zmq_socket.send(b"ok")
else:
self.ioloop.call_later(0.1, self.wait_for_websockets)
def handle_zmq(self, frames):
cmd = frames[0].decode("utf-8")
if cmd == "url":
self.zmq_socket.send(self.web_url.encode("utf-8"))
elif cmd == "wait":
self.ioloop.add_callback(self.wait_for_websockets)
elif cmd in MESHCAT_COMMANDS:
if len(frames) != 3:
self.zmq_socket.send(b"error: expected 3 frames")
return
path = list(filter(lambda x: len(x) > 0, frames[1].decode("utf-8").split("/")))
data = frames[2]
# Support caching of objects (note: even UUIDs have to match).
cache_hit = (cmd == "set_object" and
find_node(self.tree, path).object and
find_node(self.tree, path).object == data)
if not cache_hit:
self.forward_to_websockets(frames)
if cmd == "set_transform":
find_node(self.tree, path).transform = data
elif cmd == "set_object":
find_node(self.tree, path).object = data
find_node(self.tree, path).properties = []
elif cmd == "set_property":
find_node(self.tree, path).properties.append(data)
elif cmd == "set_animation":
find_node(self.tree, path).animation = data
elif cmd == "delete":
if len(path) > 0:
parent = find_node(self.tree, path[:-1])
child = path[-1]
if child in parent:
del parent[child]
else:
self.tree = SceneTree()
self.zmq_socket.send(b"ok")
elif cmd == "get_scene":
# when the server gets this command, return the tree
# as a series of msgpack-backed binary blobs
drawing_commands = ""
for node in walk(self.tree):
if node.object is not None:
drawing_commands += create_command(node.object)
for p in node.properties:
drawing_commands += create_command(p)
if node.transform is not None:
drawing_commands += create_command(node.transform)
if node.animation is not None:
drawing_commands += create_command(node.animation)
# now that we have the drawing commands, generate the full
# HTML that we want to generate, including the javascript assets
mainminjs_path = os.path.join(VIEWER_ROOT, "main.min.js")
mainminjs_src = ""
with open(mainminjs_path, "r") as f:
mainminjs_src = f.readlines()
mainminjs_src = "".join(mainminjs_src)
html = """
<!DOCTYPE html>
<html>
<head> <meta charset=utf-8> <title>MeshCat</title> </head>
<body>
<div id="meshcat-pane">
</div>
<script>
{mainminjs}
</script>
<script>
var viewer = new MeshCat.Viewer(document.getElementById("meshcat-pane"));
{commands}
</script>
<style>
body {{margin: 0; }}
#meshcat-pane {{
width: 100vw;
height: 100vh;
overflow: hidden;
}}
</style>
<script id="embedded-json"></script>
</body>
</html>
""".format(mainminjs=mainminjs_src, commands=drawing_commands)
self.zmq_socket.send(html.encode('utf-8'))
else:
self.zmq_socket.send(b"error: unrecognized comand")
def forward_to_websockets(self, frames):
cmd, path, data = frames
for websocket in self.websocket_pool:
websocket.write_message(data, binary=True)
def setup_zmq(self, url):
zmq_socket = self.context.socket(zmq.REP)
zmq_socket.bind(url)
zmq_stream = ZMQStream(zmq_socket)
zmq_stream.on_recv(self.handle_zmq)
return zmq_socket, zmq_stream, url
def send_scene(self, websocket):
for node in walk(self.tree):
if node.object is not None:
websocket.write_message(node.object, binary=True)
for p in node.properties:
websocket.write_message(p, binary=True)
if node.transform is not None:
websocket.write_message(node.transform, binary=True)
if node.animation is not None:
websocket.write_message(node.animation, binary=True)
def run(self):
self.ioloop.start()
def main():
import argparse
import sys
import webbrowser
import platform
import asyncio
# Fix asyncio configuration on Windows for Python 3.8 and above.
# Workaround for https://github.com/tornadoweb/tornado/issues/2608
if sys.version_info >= (3, 8) and platform.system() == 'Windows':
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
parser = argparse.ArgumentParser(description="Serve the MeshCat HTML files and listen for ZeroMQ commands")
parser.add_argument('--zmq-url', '-z', type=str, nargs="?", default=None)
parser.add_argument('--open', '-o', action="store_true")
parser.add_argument('--certfile', type=str, default=None)
parser.add_argument('--keyfile', type=str, default=None)
parser.add_argument('--ngrok_http_tunnel', action="store_true", help="""
ngrok is a service for creating a public URL from your local machine, which
is very useful if you would like to make your meshcat server public.""")
results = parser.parse_args()
bridge = ZMQWebSocketBridge(zmq_url=results.zmq_url,
certfile=results.certfile,
keyfile=results.keyfile,
ngrok_http_tunnel=results.ngrok_http_tunnel)
print("zmq_url={:s}".format(bridge.zmq_url))
print("web_url={:s}".format(bridge.web_url))
if results.open:
webbrowser.open(bridge.web_url, new=2)
try:
bridge.run()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
| {
"repo_name": "rdeits/meshcat-python",
"path": "src/meshcat/servers/zmqserver.py",
"copies": "1",
"size": "15334",
"license": "mit",
"hash": 5579737763111907000,
"line_mean": 37.9187817259,
"line_max": 137,
"alpha_frac": 0.5770836051,
"autogenerated": false,
"ratio": 3.973568281938326,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0015824062126119623,
"num_lines": 394
} |
from __future__ import absolute_import, division, print_function
import atexit
import logging
import os
import signal
import time
from collections import Counter
from mesos.interface import mesos_pb2
from mesos.native import MesosSchedulerDriver
from .binpack import bfd
from .interface import Scheduler
from .proxies import SchedulerProxy
from .proxies.messages import FrameworkInfo, TaskInfo, encode
from .utils import timeout
class Running(object):
def __init__(self, scheduler, name, user='', master=os.getenv('MESOS_MASTER'),
implicit_acknowledge=1, *args, **kwargs):
framework = FrameworkInfo(name=name, user=user, *args, **kwargs)
scheduler = SchedulerProxy(scheduler)
self.driver = MesosSchedulerDriver(scheduler, encode(framework),
master, implicit_acknowledge)
def shutdown(signal, frame):
self.stop()
signal.signal(signal.SIGINT, shutdown)
signal.signal(signal.SIGTERM, shutdown)
atexit.register(self.stop)
def run(self):
return self.driver.run()
def start(self):
status = self.driver.start()
assert status == mesos_pb2.DRIVER_RUNNING
return status
def stop(self):
return self.driver.stop()
def join(self):
return self.driver.join()
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.stop()
self.join()
if exc_type:
raise exc_type, exc_value, traceback
class QueueScheduler(Scheduler):
def __init__(self, *args, **kwargs):
self.tasks = {} # holding task_id => task pairs
self.healthy = True
@property
def statuses(self):
return {task_id: task.status for task_id, task in self.tasks.items()}
def is_idle(self):
return not len(self.tasks)
def report(self):
states = [status.state for status in self.statuses.values()]
counts = Counter(states)
message = ', '.join(['{}: {}'.format(key, count)
for key, count in counts.items()])
logging.info('Task states: {}'.format(message))
def wait(self, seconds=-1):
with timeout(seconds):
try:
while self.healthy and not self.is_idle():
time.sleep(0.1)
except (KeyboardInterrupt, SystemExit):
raise
def submit(self, task): # supports commandtask, pythontask etc.
assert isinstance(task, TaskInfo)
self.tasks[task.id] = task
def on_offers(self, driver, offers):
logging.info('Received offers: {}'.format(sum(offers)))
self.report()
# maybe limit to the first n tasks
staging = [self.tasks[status.task_id]
for status in self.statuses.values() if status.is_staging()]
# best-fit-decreasing binpacking
bins, skip = bfd(staging, offers, cpus=1, mem=1)
for offer, tasks in bins:
try:
for task in tasks:
task.slave_id = offer.slave_id
task.status.state = 'TASK_STARTING'
# running with empty task list will decline the offer
driver.launch(offer.id, tasks)
except Exception:
logging.exception('Exception occured during task launch!')
def on_update(self, driver, status):
task = self.tasks[status.task_id]
logging.info('Updated task {} state to {}'.format(status.task_id,
status.state))
try:
task.update(status) # creates new task.status in case of retry
except:
self.healthy = False
driver.stop()
raise
finally:
if status.has_terminated():
del self.tasks[task.id]
self.report()
if __name__ == '__main__':
scheduler = QueueScheduler()
with Running(scheduler, name='test') as fw:
scheduler.wait()
| {
"repo_name": "lensacom/satyr",
"path": "mentor/scheduler.py",
"copies": "1",
"size": "4103",
"license": "apache-2.0",
"hash": -722456883679351000,
"line_mean": 29.8496240602,
"line_max": 82,
"alpha_frac": 0.5832317816,
"autogenerated": false,
"ratio": 4.296335078534032,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005605127276021378,
"num_lines": 133
} |
from __future__ import absolute_import, division, print_function
import atexit
import logging
import signal
import sys
import threading
import traceback
from functools import partial
from mesos.interface import mesos_pb2
from mesos.native import MesosExecutorDriver
from .interface import Executor
from .messages import PythonTaskStatus
from .proxies import ExecutorProxy
class Running(object):
def __init__(self, executor):
executor = ExecutorProxy(executor)
self.driver = MesosExecutorDriver(executor)
def shutdown(signal, frame):
self.stop()
signal.signal(signal.SIGINT, shutdown)
signal.signal(signal.SIGTERM, shutdown)
atexit.register(self.stop)
def run(self):
return self.driver.run()
def start(self):
status = self.driver.start()
assert status == mesos_pb2.DRIVER_RUNNING
return status
def stop(self):
return self.driver.stop()
def join(self):
return self.driver.join()
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.stop()
self.join()
if exc_type:
raise exc_type, exc_value, traceback
class OneOffExecutor(Executor):
def on_launch(self, driver, task):
status = partial(PythonTaskStatus, task_id=task.id)
def run_task():
driver.update(status(state='TASK_RUNNING'))
logging.info('Sent TASK_RUNNING status update')
try:
logging.info('Executing task...')
result = task()
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
tb = ''.join(traceback.format_tb(exc_traceback))
logging.exception('Task errored with {}'.format(e))
driver.update(status(state='TASK_FAILED',
data=(e, tb),
message=e.message))
logging.info('Sent TASK_RUNNING status update')
else:
driver.update(status(state='TASK_FINISHED', data=result))
logging.info('Sent TASK_FINISHED status update')
finally:
# stopper = threading.Timer(1.0, driver.stop)
# stopper.start()
driver.stop()
thread = threading.Thread(target=run_task)
thread.start()
def on_kill(self, driver, task_id):
driver.stop()
def on_shutdown(self, driver):
driver.stop()
if __name__ == '__main__':
status = Running(OneOffExecutor()).run()
code = 0 if status == mesos_pb2.DRIVER_STOPPED else 1
sys.exit(code)
| {
"repo_name": "lensacom/satyr",
"path": "mentor/executor.py",
"copies": "1",
"size": "2743",
"license": "apache-2.0",
"hash": 320792100119730050,
"line_mean": 26.9897959184,
"line_max": 73,
"alpha_frac": 0.5873131608,
"autogenerated": false,
"ratio": 4.22,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 98
} |
from __future__ import absolute_import, division, print_function
import atexit
import os
import logging
import socket
import select
import signal
import platform
import requests
import socket
from subprocess import Popen, PIPE, call
import struct
import time
import weakref
from .conf import get_config, DEFAULT_KNIT_HOME
from .env import CondaCreator
from .exceptions import KnitException, YARNException
from .yarn_api import YARNAPI
from .utils import triple_slash
from py4j.protocol import Py4JError
from py4j.java_gateway import JavaGateway, GatewayClient
from py4j.java_collections import MapConverter, ListConverter
logger = logging.getLogger(__name__)
on_windows = platform.system() == "Windows"
def read_int(stream):
length = stream.read(4)
if not length:
raise EOFError
return struct.unpack("!i", length)[0]
class Knit(object):
"""
Connection to HDFS/YARN. Launches a single "application" master with a
number of worker containers.
Parameter definition (nn, nn_port, rm, rm_port): those parameters given
to __init__ take priority. If autodetect=True, Knit will attempt to fill
out the others from system configuration files; fallback values are provided
if this fails.
Parameters
----------
nn: str
Namenode hostname/ip
nn_port: int
Namenode Port (default: 9000)
rm: str
Resource Manager hostname
rm_port: int
Resource Manager port (default: 8088)
lang: str
Environment variable language setting, required for ``click`` to
successfully read from the shell. (default: 'C.UTF-8')
user: str ('root')
The user name from point of view of HDFS. This is only used when
checking for the existence of knit files on HDFS, since they are stored
in the user's home directory.
hdfs_home: str
Explicit location of a writable directory in HDFS to store files.
Defaults to the user 'home': hdfs://user/<username>/
replication_factor: int (3)
replication factor for files upload to HDFS (default: 3)
autodetect: bool
Autodetect configuration
upload_always: bool(=False)
If True, will upload conda environment zip always; otherwise will
attempt to check for the file's existence in HDFS (using the hdfs3
library, if present) and not upload if that matches the existing local
file in size and is newer.
knit_home: str
Location of knit's jar
hdfs: HDFileSystem instance or None
Used for checking files in HDFS.
Note: for now, only one Knit instance can live in a single process because
of how py4j interfaces with the JVM.
Examples
--------
>>> k = Knit()
>>> app_id = k.start('sleep 100', num_containers=5, memory=1024)
"""
JAR_FILE = "knit-1.0-SNAPSHOT.jar"
JAVA_APP = "io.continuum.knit.Client"
_instances = weakref.WeakSet()
def __init__(self, autodetect=True, upload_always=False, hdfs_home=None,
knit_home=DEFAULT_KNIT_HOME, hdfs=None, pars=None,
**kwargs):
self.conf = get_config(autodetect=autodetect, pars=pars, **kwargs)
gateway_path = self.conf.get('gateway_path', '')
kerb = self.conf.get(
'hadoop.http.authentication.type', '') == 'kerberos'
if not kerb and self.conf.get('hadoop.http.authentication.simple.'
'anonymous.allowed', '') == 'false':
if 'password' not in self.conf:
raise KnitException('Simple auth required: please supply'
'`password=`.')
pw = self.conf['password']
else:
pw = None
if self.conf.get('yarn.http.policy', '').upper() == "HTTPS_ONLY":
self.yarn_api = YARNAPI(self.conf['rm'], self.conf['rm_port_https'],
scheme='https', gateway_path=gateway_path,
kerberos=kerb, username=self.conf['user'],
password=pw)
else:
self.yarn_api = YARNAPI(self.conf['rm'], self.conf['rm_port'],
gateway_path=gateway_path,
kerberos=kerb, username=self.conf['user'],
password=pw)
self.KNIT_HOME = knit_home
self.upload_always = upload_always
self.lang = self.conf.get('lang', 'C.UTF-8')
self.hdfs_home = hdfs_home or self.conf.get(
'dfs.user.home.base.dir', '/user/' + self.conf['user'])
self.client_gateway = None
# must set KNIT_HOME ENV for YARN App
os.environ['KNIT_HOME'] = self.KNIT_HOME
os.environ['REPLICATION_FACTOR'] = str(self.conf['replication_factor'])
os.environ['HDFS_KNIT_DIR'] = self.hdfs_home
self.client = None
self.master = None
self.app_id = None
self.proc = None
self.hdfs = hdfs
self._instances.add(self)
def __repr__(self):
return "Knit<RM={0}:{1}>".format(self.conf['rm'], self.conf['rm_port'])
@property
def JAR_FILE_PATH(self):
return os.path.join(self.KNIT_HOME, self.JAR_FILE)
def _pre_flight_checks(self, num_containers, virtual_cores, memory,
files, queue):
"""Some checks to see if app is possible to schedule
This depends on YARN's allocations reporting, which do not necessarily
reflect the true amount of resources on the cluster. Other failure
modes, such as full disc, are not likely to be caught here.
"""
try:
# check response from RM
met = self.yarn_api.cluster_metrics()
except YARNException:
raise
except requests.RequestException as e:
if isinstance(e, requests.Timeout):
m = 'Connection timeout'
else:
m = 'Connection error'
raise YARNException(m + ' when talking to the '
'YARN REST server at {}. This can mean that '
'the server/port values are wrong, that you '
'are using the wrong protocol (http/https) or '
'that you need to route through a proxy.'
''.format(self.yarn_api.url))
if met['activeNodes'] < 1:
raise KnitException('No name-nodes active')
# What if we simply don't have the full yarn-site.xml available?
mmin = int(self.conf.get('yarn.scheduler.minimum-allocation-mb', 1024))
# 300MB default allocation for AM in client.scala
mem = (max(300, mmin) + num_containers * max(memory, mmin))
if met['availableMB'] < mem:
raise KnitException('Memory estimate for app (%iMB) exceeds cluster'
' capacity (%iMB)' % (mem, met['availableMB']))
c = 1 + num_containers * virtual_cores
if met['availableVirtualCores'] < c:
raise KnitException('vCPU request for app (%i) exceeds cluster capa'
'city (%i)' % (c, met['availableVirtualCores']))
nodes = self.yarn_api.nodes()
if all((max(mmin, memory) > n['availMemoryMB']) and
(virtual_cores > n['availableVirtualCores'])
for n in nodes):
# cannot test without multiple nodemanagers
raise KnitException('No NodeManager can fit any single container')
if self.hdfs:
df = self.hdfs.df()
cap = (df['capacity'] - df['used']) // 2**20
fs = [self.JAR_FILE_PATH] + [f for f in files
if not f.startswith('hdfs://')]
need = sum(os.stat(f).st_size for f in fs) // 2**20
# NB: if replication > 1 this might not be enough
if cap < need:
raise KnitException('HDFS space requirement (%iMB) exceeds'
'capacity (%iMB)' % (need, cap))
def start(self, cmd, num_containers=1, virtual_cores=1, memory=128,
files=None, envvars=None, app_name="knit", queue="default",
checks=True):
"""
Method to start a yarn app with a distributed shell
Parameters
----------
cmd: str
command to run in each yarn container
num_containers: int
Number of containers YARN should request (default: 1)
* A container should be requested with the number of cores it can
saturate, i.e.
* the average number of threads it expects to have runnable at a
time.
virtual_cores: int
Number of virtual cores per container (default: 1)
* A node's capacity should be configured with virtual cores equal to
* its number of physical cores.
memory: int
Memory per container (default: 128)
* The unit for memory is megabytes.
files: list
list of files to be include in each container. If starting with
`hdfs://`, assume these already exist in HDFS and don't need
uploading. Otherwise, if hdfs3 is installed, existence of the
file on HDFS will be checked to see if upload is needed.
Files ending with `.zip` will be decompressed in the
container before launch as a directory with the same name as the
file: if myarc.zip contains files inside a directory stuff/, to
the container they will appear at ./myarc.zip/stuff/* .
envvars: dict
Environment variables to pass to AM *and* workers. Both keys
and values must be strings only.
app_name: String
Application name shown in YARN (default: "knit")
queue: String
RM Queue to use while scheduling (default: "default")
checks: bool=True
Whether to run pre-flight checks before submitting app to YARN
Returns
-------
applicationId: str
A yarn application ID string
"""
files = files or []
envvars = envvars or {'KNIT_LANG': self.lang}
for k, v in envvars.items():
if not isinstance(k, str) or not isinstance(v, str):
raise ValueError('Environment must contain only strings (%s)'
% ((k, v),))
if self.app_id:
raise ValueError('Already started')
if not isinstance(memory, int):
raise KnitException("Memory argument must be an integer")
if files:
if not isinstance(files, list):
raise KnitException("File argument must be a list of strings")
if checks:
self._pre_flight_checks(num_containers, virtual_cores, memory,
files, queue)
# From https://github.com/apache/spark/blob/d83c2f9f0b08d6d5d369d9fae04cdb15448e7f0d/python/pyspark/java_gateway.py
# thank you spark
## Socket for PythonGatewayServer to communicate its port to us
callback_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
callback_socket.bind(('127.0.0.1', 0))
callback_socket.listen(1)
callback_host, callback_port = callback_socket.getsockname()
if not os.path.exists(self.JAR_FILE_PATH):
raise KnitException('JAR file %s does not exists - please build'
' with maven' % self.JAR_FILE_PATH)
args = ["hadoop", "jar", self.JAR_FILE_PATH, self.JAVA_APP,
"--callbackHost", str(callback_host), "--callbackPort",
str(callback_port)]
## Launch the Java gateway.
# We open a pipe to stdin so that the Java gateway can die when the pipe is broken
if not on_windows:
# Don't send ctrl-c / SIGINT to the Java gateway:
def preexec_func():
signal.signal(signal.SIGINT, signal.SIG_IGN)
proc = Popen(args, stdin=PIPE, preexec_fn=preexec_func)
else:
# preexec_fn not supported on Windows
proc = Popen(args, stdin=PIPE)
self.proc = proc
gateway_port = None
# We use select() here in order to avoid blocking indefinitely if the
# subprocess dies before connecting
long_timeout = 60
while gateway_port is None and proc.poll() is None and long_timeout > 0:
timeout = 1 # (seconds)
readable, _, _ = select.select([callback_socket], [], [], timeout)
if callback_socket in readable:
gateway_connection = callback_socket.accept()[0]
# Determine which ephemeral port the server started on:
gateway_port = read_int(gateway_connection.makefile(mode="rb"))
gateway_connection.close()
callback_socket.close()
long_timeout -= 1
if gateway_port is None:
raise Exception("The JVM Knit client failed to launch successfully."
" Check that java is installed and the Knit JAR"
" file exists.")
gateway = JavaGateway(GatewayClient(port=gateway_port),
auto_convert=True)
self.client = gateway.entry_point
self.client_gateway = gateway
logger.debug("Files submitted: %s" % files)
upfiles = [f for f in files if (not f.startswith('hdfs://')
and self.check_needs_upload(f))]
logger.debug("Files to upload: %s" % upfiles)
jfiles = ListConverter().convert(upfiles, gateway._gateway_client)
jenv = MapConverter().convert(envvars, gateway._gateway_client)
self.app_id = self.client.start(jfiles, jenv, app_name, queue)
## Wait for AM to appear
long_timeout = 100
master_rpcport = -1
while master_rpcport == -1:
master_rpcport = self.client.masterRPCPort()
time.sleep(0.2)
long_timeout -= 0.2
if long_timeout < 0:
break
if master_rpcport in [-1, 'N/A']:
raise Exception(
"""The application master JVM process failed to report back. This can mean:
- that the YARN cluster cannot scheduler adequate resources - check
k.yarn_api.cluster_metrics() and other diagnostic methods;
- that the ApplicationMaster crashed - check the application logs, k.logs();
- that the cluster is otherwise unhealthy - check the RM and NN logs
(use k.yarn_api.system_logs() to find these on a one-node system
""")
master_rpchost = self.client.masterRPCHost()
gateway = JavaGateway(GatewayClient(
address=master_rpchost, port=master_rpcport), auto_convert=True)
self.master = gateway.entry_point
rfiles = [triple_slash(f) if f.startswith('hdfs://') else
'/'.join(['hdfs://', self.hdfs_home, '.knitDeps',
os.path.basename(f)])
for f in files]
logger.debug("Resource files: %s" % rfiles)
jfiles = ListConverter().convert(rfiles, gateway._gateway_client)
jenv = MapConverter().convert(envvars, gateway._gateway_client)
self.master.init(jfiles, jenv, cmd, num_containers,
virtual_cores, memory)
return self.app_id
def add_containers(self, num_containers=1, virtual_cores=1, memory=128):
"""
Method to add containers to an already running yarn app
num_containers: int
Number of containers YARN should request (default: 1)
* A container should be requested with the number of cores it can
saturate, i.e.
* the average number of threads it expects to have runnable at a
time.
virtual_cores: int
Number of virtual cores per container (default: 1)
* A node's capacity should be configured with virtual cores equal to
* its number of physical cores.
memory: int
Memory per container (default: 128)
* The unit for memory is megabytes.
"""
self.master.addContainers(num_containers, virtual_cores, memory)
def get_containers(self):
"""
Method to return active containers
Returns
-------
container_list: List
List of dicts with each container's details
"""
if self.app_id:
return self.yarn_api.app_containers(self.app_id)
else:
raise KnitException('Cannot get containers, app has not started')
def get_container_statuses(self):
"""Get status info for each container
Returns dict where the values are the raw text output.
"""
return {c['id']: c['state'] for c in self.get_containers()}
def remove_containers(self, container_id):
"""
Method to remove containers from a running yarn app
Calls removeContainers in ApplicationMaster.scala
Be careful removing the ...0001 container. This is where the
applicationMaster is running
Parameters
----------
container_id: str
Returns
-------
None
"""
if container_id not in self.get_container_statuses():
raise KnitException('Attempt to remove container nor owned by this'
'app: ' + container_id)
self.master.removeContainer(str(container_id))
@staticmethod
def create_env(env_name, packages=None, remove=False,
channels=None, conda_pars=None):
"""
Create zipped directory of a conda environment
Parameters
----------
env_name : str
packages : list
conda_root: str
Location of conda installation. If None, will download miniconda and
produce an isolated environment.
remove : bool
remove possible conda environment before creating
channels : list of str
conda channels to use (defaults to your conda setup)
conda_pars: dict
Further pars to pass to CondaCreator
Returns
-------
path: str
path to zipped conda environment
Examples
--------
>>> k = Knit()
>>> pkg_path = k.create_env(env_name='dev',
... packages=['distributed', 'dask', 'pandas'])
"""
channels = channels or []
c = CondaCreator(channels=channels, **(conda_pars or {}))
return c.create_env(env_name, packages=packages, remove=remove)
def logs(self, shell=False):
"""
Collect logs from RM (if running)
With shell=True, collect logs from HDFS after job completion
Parameters
----------
shell: bool
Shell out to yarn CLI (default False)
Returns
-------
log: dictionary
logs from each container (when possible)
"""
if self.app_id:
return self.yarn_api.logs(self.app_id, shell=shell)
else:
raise KnitException('Cannot get logs, app not started')
def print_logs(self, shell=False):
"""print out a more console-friendly version of logs()"""
for l, v in self.logs(shell).items():
print('\n### Container ', l, ', id ', v.get('id', 'None'), ' ###\n')
for part in ['stdout', 'stderr']:
print('##', part, '##')
print(v[part])
def wait_for_completion(self, timeout=10):
"""
Wait for completion of the yarn application
Returns
-------
bool:
True if successful, False otherwise
"""
cur_status = self.runtime_status()
while cur_status not in ['FAILED', 'KILLED', 'FINISHED']:
time.sleep(0.2)
timeout -= 0.2
cur_status = self.runtime_status()
if timeout < 0:
break
return timeout > 0
def kill(self):
"""
Method to kill a yarn application
Returns
-------
bool:
True if successful, False otherwise.
"""
if self.client is None:
# never started, can't stop - should be warning or exception?
return False
try:
self.client.kill()
except Py4JError:
logger.debug("Error while attempting to kill", exc_info=1)
# fallback
self.yarn_api.kill(self.app_id)
if self.proc is not None:
self.client_gateway.shutdown()
if on_windows:
call(["cmd", "/c", "taskkill", "/f", "/t", "/pid",
str(self.proc.pid)])
self.proc.terminate()
self.proc.communicate()
self.proc = None
self.client = None
out = self.runtime_status() == 'KILLED'
return out
def __del__(self):
if self.app_id is not None:
try:
self.kill()
except:
pass
self.app_id = None
def status(self):
""" Get status of an application
Returns
-------
log: dictionary
status of application
"""
if self.app_id:
return self.yarn_api.apps_info(self.app_id)
else:
raise KnitException("Cannot get status, app not started")
def runtime_status(self):
""" Get runtime status of an application
Returns
-------
str:
status of application
"""
try:
return self.yarn_api.state(self.app_id)
except:
return "NONE"
def list_envs(self):
"""List knit conda environments already in HDFS
Looks in staging directory for zip-files
Returns: list of dict
Details for each zip-file."""
if self.hdfs:
files = self.hdfs.ls(self.hdfs_home + '/.knitDeps/', True)
return [f for f in files if f['name'].endswith('.zip')]
else:
raise ImportError('Set the `hdfs` attribute to be able to list'
'environments.')
def check_needs_upload(self, path):
"""Upload is needed if file does not exist in HDFS or is older"""
if self.upload_always:
return True
fn = '/'.join([self.hdfs_home, '.knitDeps', os.path.basename(path)])
if self.hdfs and self.hdfs.exists(fn):
st = os.stat(path)
size = st.st_size
t = st.st_mtime
info = self.hdfs.info(fn)
if info['size'] == size and t < info['last_mod']:
return False
else:
return True
else:
return True
@classmethod
def _cleanup(cls):
# called on program exit to destroy lingering connections/apps
for instance in cls._instances:
instance.kill()
atexit.register(Knit._cleanup)
| {
"repo_name": "blaze/knit",
"path": "knit/core.py",
"copies": "2",
"size": "23328",
"license": "bsd-3-clause",
"hash": 9099765492438901000,
"line_mean": 36.993485342,
"line_max": 123,
"alpha_frac": 0.5670010288,
"autogenerated": false,
"ratio": 4.330425097456841,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5897426126256842,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import atexit
import os
import sys
from distutils.version import StrictVersion
import pytest
import stripe
from stripe.six.moves.urllib.request import urlopen
from stripe.six.moves.urllib.error import HTTPError
from tests.request_mock import RequestMock
from tests.stripe_mock import StripeMock
MOCK_MINIMUM_VERSION = "0.107.0"
# Starts stripe-mock if an OpenAPI spec override is found in `openapi/`, and
# otherwise fall back to `STRIPE_MOCK_PORT` or 12111.
if StripeMock.start():
MOCK_PORT = StripeMock.port()
else:
MOCK_PORT = os.environ.get("STRIPE_MOCK_PORT", 12111)
@atexit.register
def stop_stripe_mock():
StripeMock.stop()
def pytest_configure(config):
if not config.getoption("--nomock"):
try:
resp = urlopen("http://localhost:%s/" % MOCK_PORT)
info = resp.info()
version = info.get("Stripe-Mock-Version")
if version != "master" and StrictVersion(version) < StrictVersion(
MOCK_MINIMUM_VERSION
):
sys.exit(
"Your version of stripe-mock (%s) is too old. The minimum "
"version to run this test suite is %s. Please "
"see its repository for upgrade instructions."
% (version, MOCK_MINIMUM_VERSION)
)
except HTTPError as e:
info = e.info()
except Exception:
sys.exit(
"Couldn't reach stripe-mock at `localhost:%s`. Is "
"it running? Please see README for setup instructions."
% MOCK_PORT
)
def pytest_addoption(parser):
parser.addoption(
"--nomock",
action="store_true",
help="only run tests that don't need stripe-mock",
)
def pytest_runtest_setup(item):
if "request_mock" in item.fixturenames and item.config.getoption(
"--nomock"
):
pytest.skip(
"run stripe-mock locally and remove --nomock flag to run skipped tests"
)
@pytest.fixture(autouse=True)
def setup_stripe():
orig_attrs = {
"api_base": stripe.api_base,
"api_key": stripe.api_key,
"client_id": stripe.client_id,
"default_http_client": stripe.default_http_client,
}
http_client = stripe.http_client.new_default_http_client()
stripe.api_base = "http://localhost:%s" % MOCK_PORT
stripe.api_key = "sk_test_123"
stripe.client_id = "ca_123"
stripe.default_http_client = http_client
yield
http_client.close()
stripe.api_base = orig_attrs["api_base"]
stripe.api_key = orig_attrs["api_key"]
stripe.client_id = orig_attrs["client_id"]
stripe.default_http_client = orig_attrs["default_http_client"]
@pytest.fixture
def request_mock(mocker):
return RequestMock(mocker)
| {
"repo_name": "stripe/stripe-python",
"path": "tests/conftest.py",
"copies": "1",
"size": "2886",
"license": "mit",
"hash": -3759028899158590000,
"line_mean": 28.4489795918,
"line_max": 83,
"alpha_frac": 0.6212751213,
"autogenerated": false,
"ratio": 3.6120150187734668,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47332901400734667,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
# import base64
from copy import copy
from functools import partial
import six
from google.protobuf.descriptor import FieldDescriptor
from google.protobuf.message import Message
__all__ = ('protobuf_to_dict',
'dict_to_protobuf',
'TYPE_CALLABLE_MAP',
'REVERSE_TYPE_CALLABLE_MAP')
# adapted from https://github.com/benhodgson/protobuf-to-dict
REVERSE_TYPE_CALLABLE_MAP = {
FieldDescriptor.TYPE_DOUBLE: float,
FieldDescriptor.TYPE_FLOAT: float,
FieldDescriptor.TYPE_INT32: int,
FieldDescriptor.TYPE_INT64: int if six.PY3 else six.integer_types[1],
FieldDescriptor.TYPE_UINT32: int,
FieldDescriptor.TYPE_UINT64: int if six.PY3 else six.integer_types[1],
FieldDescriptor.TYPE_SINT32: int,
FieldDescriptor.TYPE_SINT64: int if six.PY3 else six.integer_types[1],
FieldDescriptor.TYPE_FIXED32: int,
FieldDescriptor.TYPE_FIXED64: int if six.PY3 else six.integer_types[1],
FieldDescriptor.TYPE_SFIXED32: int,
FieldDescriptor.TYPE_SFIXED64: int if six.PY3 else six.integer_types[1],
FieldDescriptor.TYPE_BOOL: bool,
FieldDescriptor.TYPE_STRING: six.text_type,
FieldDescriptor.TYPE_BYTES: six.binary_type # base64.b64encode,
}
TYPE_CALLABLE_MAP = copy(REVERSE_TYPE_CALLABLE_MAP)
TYPE_CALLABLE_MAP[FieldDescriptor.TYPE_ENUM] = int
CONTAINER_MAP = []
def enum_to_label(field, value):
return field.enum_type.values_by_number[int(value)].name
def label_to_enum(field, value):
enum_dict = field.enum_type.values_by_name
return enum_dict[value].number
def message_to_container(message, containers):
for msg, cnt in containers:
if isinstance(msg, type): # class definition used
if isinstance(message, msg):
return cnt()
elif isinstance(message, msg.__class__): # object definition used
if all([getattr(msg, field.name) == getattr(message, field.name)
for field, value in msg.ListFields()]):
return cnt()
return dict() # fallback to plain dictionary
def container_to_message(container, containers):
for msg, cnt in containers:
if isinstance(container, cnt):
if isinstance(msg, type):
return msg()
else:
return copy(msg)
def protobuf_to_dict(pb, containers=CONTAINER_MAP, converters=TYPE_CALLABLE_MAP):
result = message_to_container(pb, containers)
# for field, value in pb.ListFields(): # only non-empty fields
for field in pb.DESCRIPTOR.fields: # empty fields too
value = getattr(pb, field.name)
if (field.message_type and field.message_type.has_options and
field.message_type.GetOptions().map_entry):
converter = dict
elif field.type == FieldDescriptor.TYPE_MESSAGE:
# recursively encode protobuf sub-message
converter = partial(protobuf_to_dict, containers=containers,
converters=converters)
elif field.type == FieldDescriptor.TYPE_ENUM:
converter = partial(enum_to_label, field)
else:
converter = converters[field.type]
if field.label == FieldDescriptor.LABEL_REPEATED:
result[field.name] = list(map(converter, value))
else:
result[field.name] = converter(value)
return result
def dict_to_protobuf(dct, pb=None, containers=CONTAINER_MAP,
converters=REVERSE_TYPE_CALLABLE_MAP, strict=True):
default = container_to_message(dct, containers)
if pb:
if default:
pb.MergeFrom(default)
else:
pb = default
pb = pb if isinstance(pb, Message) else pb()
for k, v in dct.items():
try:
# TODO silently skip undifened fields
field = pb.DESCRIPTOR.fields_by_name[k]
except:
if not strict:
continue
else:
raise
pb_value = getattr(pb, k, None)
if field.label == FieldDescriptor.LABEL_REPEATED:
for item in v:
if field.type == FieldDescriptor.TYPE_MESSAGE:
dict_to_protobuf(item, pb_value.add(),
containers, converters)
elif field.type == FieldDescriptor.TYPE_ENUM:
pb_value.append(label_to_enum(field, item))
else:
pb_value.append(item)
elif field.type == FieldDescriptor.TYPE_MESSAGE:
dict_to_protobuf(v, pb_value, containers, converters)
else:
if field.type in converters:
v = converters[field.type](v)
elif field.type == FieldDescriptor.TYPE_ENUM:
v = label_to_enum(field, v)
setattr(pb, field.name, v)
return pb
| {
"repo_name": "kszucs/proxo",
"path": "proxo/protobuf.py",
"copies": "1",
"size": "4894",
"license": "apache-2.0",
"hash": 3923885940524247600,
"line_mean": 34.7226277372,
"line_max": 81,
"alpha_frac": 0.6246424193,
"autogenerated": false,
"ratio": 3.9531502423263327,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5077792661626332,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import base64
import fnmatch
import glob
import gzip
import hashlib
import io
import os
import tarfile
import pathspec
AUTHORIZED_FILES = [
"*.libsonnet", "*.libjsonnet", "*.jsonnet", "*.yaml", "README.md", "LICENSE", "AUTHORS",
"NOTICE", "manifests", "deps/*.kub"
]
AUTHORIZED_TEMPLATES = ["*.yaml", "*.jsonnet", "*.libjsonnet", "*.yml", "*.j2", "*.libsonnet"]
def authorized_files():
files = []
for name in AUTHORIZED_FILES:
for filename in glob.glob(name):
files.append(filename)
for root, _, filenames in os.walk('templates'):
for name in AUTHORIZED_TEMPLATES:
for filename in fnmatch.filter(filenames, name):
files.append(os.path.join(root, filename))
return files
def ignore(pattern, path):
spec = pathspec.PathSpec.from_lines('gitwildmatch', pattern.splitlines())
return spec.match_file(path)
def all_files():
files = []
ignore_patterns = None
for filename in ['.helmignore', '.apprignore', '.kpmignore']:
if os.path.exists(filename):
with open(filename, 'r') as f:
ignore_patterns = f.read()
break # allow only one file
for root, _, filenames in os.walk('.'):
for filename in filenames:
path = os.path.join(root, filename)
if ignore_patterns is None or not ignore(ignore_patterns, path):
files.append(path.replace("./", ""))
return files
def pack_kub(kub, filter_files=True, prefix=None):
tar = tarfile.open(kub, "w:gz")
if filter_files:
files = authorized_files()
else:
files = all_files()
for filepath in files:
arcname = None
if prefix:
arcname = os.path.join(prefix, filepath)
tar.add(filepath, arcname=arcname)
tar.close()
def unpack_kub(kub, dest="."):
tar = tarfile.open(kub, "r:gz")
tar.extractall(dest)
tar.close()
class ApprPackage(object):
def __init__(self, blob=None, b64_encoded=True):
self.files = {}
self.tar = None
self.blob = None
self.io_file = None
self._digest = None
self._size = None
self.b64blob = None
if blob is not None:
self.load(blob, b64_encoded)
def _load_blob(self, blob, b64_encoded):
if b64_encoded:
self.b64blob = blob
self.blob = base64.b64decode(blob)
else:
self.b64blob = base64.b64encode(blob)
self.blob = blob
def load(self, blob, b64_encoded=True):
self._digest = None
self._load_blob(blob, b64_encoded)
self.io_file = io.BytesIO(self.blob)
self.tar = tarfile.open(fileobj=self.io_file, mode='r:gz')
for member in self.tar.getmembers():
tfile = self.tar.extractfile(member)
if tfile is not None:
self.files[tfile.name] = tfile.read()
def extract(self, dest):
self.tar.extractall(dest)
def pack(self, dest):
with open(dest, "wb") as destfile:
destfile.write(self.blob)
def tree(self, directory=None):
files = list(self.files.keys())
files.sort()
if directory is not None:
filtered = [x for x in files if x.startswith(directory)]
else:
filtered = files
return filtered
def file(self, filename):
return self.files[filename]
@property
def manifest(self):
manifests_files = ["manifest.yaml", "manifest.jsonnet", "Chart.yaml", "Chart.yml"]
for filename in manifests_files:
if filename in self.files:
return self.files[filename]
raise RuntimeError("Unknown manifest format")
@property
def size(self):
if self._size is None:
self.io_file.seek(0, os.SEEK_END)
self._size = self.io_file.tell()
return self._size
@property
def digest(self):
if self._digest is None:
self.io_file.seek(0)
gunzip = gzip.GzipFile(fileobj=self.io_file, mode='r').read()
self._digest = hashlib.sha256(gunzip).hexdigest()
self.io_file.seek(0)
return self._digest
| {
"repo_name": "app-registry/appr",
"path": "appr/pack.py",
"copies": "2",
"size": "4293",
"license": "apache-2.0",
"hash": 3853480146126873000,
"line_mean": 27.62,
"line_max": 94,
"alpha_frac": 0.5842068484,
"autogenerated": false,
"ratio": 3.6881443298969074,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00021512009967018458,
"num_lines": 150
} |
from __future__ import (absolute_import, division, print_function)
import base64
import io
import json
import math
import os
import struct
import zlib
import numpy as np
from six import binary_type, text_type
try:
from urllib.parse import uses_relative, uses_netloc, uses_params, urlparse
except ImportError:
from urlparse import uses_relative, uses_netloc, uses_params, urlparse
_VALID_URLS = set(uses_relative + uses_netloc + uses_params)
_VALID_URLS.discard('')
def _validate_location(location):
"""Validates and formats location values before setting."""
if _isnan(location):
raise ValueError('Location values cannot contain NaNs, '
'got {!r}'.format(location))
if type(location) not in [list, tuple]:
raise TypeError('Expected tuple/list for location, got '
'{!r}'.format(location))
if len(location) != 2:
raise ValueError('Expected two values for location [lat, lon], '
'got {}'.format(len(location)))
location = _locations_tolist(location)
return location
def _validate_coordinates(coordinates):
"""Validates multiple coordinates for the various markers in folium."""
if _isnan(coordinates):
raise ValueError('Location values cannot contain NaNs, '
'got:\n{!r}'.format(coordinates))
coordinates = _locations_tolist(coordinates)
return coordinates
def _locations_tolist(x):
"""Transforms recursively a list of iterables into a list of list."""
if hasattr(x, '__iter__'):
return list(map(_locations_tolist, x))
else:
return x
def _flatten(container):
for i in container:
if isinstance(i, (list, tuple, np.ndarray)):
for j in _flatten(i):
yield j
else:
yield i
def _isnan(values):
"""Check if there are NaNs values in the iterable."""
return any(math.isnan(value) for value in _flatten(values))
def image_to_url(image, colormap=None, origin='upper'):
"""
Infers the type of an image argument and transforms it into a URL.
Parameters
----------
image: string, file or array-like object
* If string, it will be written directly in the output file.
* If file, it's content will be converted as embedded in the
output file.
* If array-like, it will be converted to PNG base64 string and
embedded in the output.
origin: ['upper' | 'lower'], optional, default 'upper'
Place the [0, 0] index of the array in the upper left or
lower left corner of the axes.
colormap: callable, used only for `mono` image.
Function of the form [x -> (r,g,b)] or [x -> (r,g,b,a)]
for transforming a mono image into RGB.
It must output iterables of length 3 or 4, with values between
0. and 1. You can use colormaps from `matplotlib.cm`.
"""
if isinstance(image, (text_type, binary_type)) and not _is_url(image):
fileformat = os.path.splitext(image)[-1][1:]
with io.open(image, 'rb') as f:
img = f.read()
b64encoded = base64.b64encode(img).decode('utf-8')
url = 'data:image/{};base64,{}'.format(fileformat, b64encoded)
elif 'ndarray' in image.__class__.__name__:
img = write_png(image, origin=origin, colormap=colormap)
b64encoded = base64.b64encode(img).decode('utf-8')
url = 'data:image/png;base64,{}'.format(b64encoded)
else:
# Round-trip to ensure a nice formatted json.
url = json.loads(json.dumps(image))
return url.replace('\n', ' ')
def _is_url(url):
"""Check to see if `url` has a valid protocol."""
try:
return urlparse(url).scheme in _VALID_URLS
except:
return False
def write_png(data, origin='upper', colormap=None):
"""
Transform an array of data into a PNG string.
This can be written to disk using binary I/O, or encoded using base64
for an inline PNG like this:
>>> png_str = write_png(array)
>>> "data:image/png;base64,"+png_str.encode('base64')
Inspired from
https://stackoverflow.com/questions/902761/saving-a-numpy-array-as-an-image
Parameters
----------
data: numpy array or equivalent list-like object.
Must be NxM (mono), NxMx3 (RGB) or NxMx4 (RGBA)
origin : ['upper' | 'lower'], optional, default 'upper'
Place the [0,0] index of the array in the upper left or lower left
corner of the axes.
colormap : callable, used only for `mono` image.
Function of the form [x -> (r,g,b)] or [x -> (r,g,b,a)]
for transforming a mono image into RGB.
It must output iterables of length 3 or 4, with values between
0. and 1. Hint: you can use colormaps from `matplotlib.cm`.
Returns
-------
PNG formatted byte string
"""
if colormap is None:
def colormap(x):
return (x, x, x, 1)
arr = np.atleast_3d(data)
height, width, nblayers = arr.shape
if nblayers not in [1, 3, 4]:
raise ValueError('Data must be NxM (mono), '
'NxMx3 (RGB), or NxMx4 (RGBA)')
assert arr.shape == (height, width, nblayers)
if nblayers == 1:
arr = np.array(list(map(colormap, arr.ravel())))
nblayers = arr.shape[1]
if nblayers not in [3, 4]:
raise ValueError('colormap must provide colors of r'
'length 3 (RGB) or 4 (RGBA)')
arr = arr.reshape((height, width, nblayers))
assert arr.shape == (height, width, nblayers)
if nblayers == 3:
arr = np.concatenate((arr, np.ones((height, width, 1))), axis=2)
nblayers = 4
assert arr.shape == (height, width, nblayers)
assert nblayers == 4
# Normalize to uint8 if it isn't already.
if arr.dtype != 'uint8':
with np.errstate(divide='ignore', invalid='ignore'):
arr = arr * 255./arr.max(axis=(0, 1)).reshape((1, 1, 4))
arr[~np.isfinite(arr)] = 0
arr = arr.astype('uint8')
# Eventually flip the image.
if origin == 'lower':
arr = arr[::-1, :, :]
# Transform the array to bytes.
raw_data = b''.join([b'\x00' + arr[i, :, :].tobytes()
for i in range(height)])
def png_pack(png_tag, data):
chunk_head = png_tag + data
return (struct.pack('!I', len(data)) +
chunk_head +
struct.pack('!I', 0xFFFFFFFF & zlib.crc32(chunk_head)))
return b''.join([
b'\x89PNG\r\n\x1a\n',
png_pack(b'IHDR', struct.pack('!2I5B', width, height, 8, 6, 0, 0, 0)),
png_pack(b'IDAT', zlib.compress(raw_data, 9)),
png_pack(b'IEND', b'')])
def mercator_transform(data, lat_bounds, origin='upper', height_out=None):
"""
Transforms an image computed in (longitude,latitude) coordinates into
the a Mercator projection image.
Parameters
----------
data: numpy array or equivalent list-like object.
Must be NxM (mono), NxMx3 (RGB) or NxMx4 (RGBA)
lat_bounds : length 2 tuple
Minimal and maximal value of the latitude of the image.
Bounds must be between -85.051128779806589 and 85.051128779806589
otherwise they will be clipped to that values.
origin : ['upper' | 'lower'], optional, default 'upper'
Place the [0,0] index of the array in the upper left or lower left
corner of the axes.
height_out : int, default None
The expected height of the output.
If None, the height of the input is used.
See https://en.wikipedia.org/wiki/Web_Mercator for more details.
"""
import numpy as np
def mercator(x):
return np.arcsinh(np.tan(x*np.pi/180.))*180./np.pi
array = np.atleast_3d(data).copy()
height, width, nblayers = array.shape
lat_min = max(lat_bounds[0], -85.051128779806589)
lat_max = min(lat_bounds[1], 85.051128779806589)
if height_out is None:
height_out = height
# Eventually flip the image
if origin == 'upper':
array = array[::-1, :, :]
lats = (lat_min + np.linspace(0.5/height, 1.-0.5/height, height) *
(lat_max-lat_min))
latslats = (mercator(lat_min) +
np.linspace(0.5/height_out, 1.-0.5/height_out, height_out) *
(mercator(lat_max)-mercator(lat_min)))
out = np.zeros((height_out, width, nblayers))
for i in range(width):
for j in range(nblayers):
out[:, i, j] = np.interp(latslats, mercator(lats), array[:, i, j])
# Eventually flip the image.
if origin == 'upper':
out = out[::-1, :, :]
return out
def none_min(x, y):
if x is None:
return y
elif y is None:
return x
else:
return min(x, y)
def none_max(x, y):
if x is None:
return y
elif y is None:
return x
else:
return max(x, y)
def iter_coords(obj):
"""
Returns all the coordinate tuples from a geometry or feature.
"""
if isinstance(obj, (tuple, list)):
coords = obj
elif 'features' in obj:
coords = [geom['geometry']['coordinates'] for geom in obj['features']]
elif 'geometry' in obj:
coords = obj['geometry']['coordinates']
else:
coords = obj.get('coordinates', obj)
for coord in coords:
if isinstance(coord, (float, int)):
yield tuple(coords)
break
else:
for f in iter_coords(coord):
yield f
def _locations_mirror(x):
"""
Mirrors the points in a list-of-list-of-...-of-list-of-points.
For example:
>>> _locations_mirror([[[1, 2], [3, 4]], [5, 6], [7, 8]])
[[[2, 1], [4, 3]], [6, 5], [8, 7]]
"""
if hasattr(x, '__iter__'):
if hasattr(x[0], '__iter__'):
return list(map(_locations_mirror, x))
else:
return list(x[::-1])
else:
return x
def get_bounds(locations, lonlat=False):
"""
Computes the bounds of the object in the form
[[lat_min, lon_min], [lat_max, lon_max]]
"""
bounds = [[None, None], [None, None]]
for point in iter_coords(locations):
bounds = [
[
none_min(bounds[0][0], point[0]),
none_min(bounds[0][1], point[1]),
],
[
none_max(bounds[1][0], point[0]),
none_max(bounds[1][1], point[1]),
],
]
if lonlat:
bounds = _locations_mirror(bounds)
return bounds
def camelize(key):
"""Convert a python_style_variable_name to lowerCamelCase.
Examples
--------
>>> camelize('variable_name')
'variableName'
>>> camelize('variableName')
'variableName'
"""
return ''.join(x.capitalize() if i > 0 else x
for i, x in enumerate(key.split('_')))
| {
"repo_name": "QuLogic/folium",
"path": "folium/utilities.py",
"copies": "1",
"size": "10925",
"license": "mit",
"hash": -5378968029708252000,
"line_mean": 29.5167597765,
"line_max": 79,
"alpha_frac": 0.5815102975,
"autogenerated": false,
"ratio": 3.6356073211314475,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.971595374526087,
"avg_score": 0.00023277467411545624,
"num_lines": 358
} |
from __future__ import absolute_import, division, print_function
import base64
import re
from dossier.fc import StringCounter
def subtopics(store, folders, folder_id, subfolder_id, ann_id=None):
'''Yields an unordered generator of subtopics in a subfolder.
Each item of the generator is a 4-tuple of ``content_id``,
``subtopic_id``, ``subtopic_type`` and ``data``. Subtopic type
is one of the following Unicode strings: ``text``, ``image``
or ``manual``. The type of ``data`` is dependent on the
subtopic type. For ``image``, ``data`` is a ``(unicode, str)``,
where the first element is the URL and the second element is
the binary image data. For all other types, ``data`` is a
``unicode`` string.
:param str folder_id: Folder id
:param str subfolder_id: Subfolder id
:param str ann_id: Username
:rtype: generator of
``(content_id, subtopic_id, url, subtopic_type, data)``
'''
# This code will be changed soon. In essence, it implements the
# convention established in SortingDesk for storing subtopic data.
# Currently, subtopic data is stored in the FC that the data (i.e.,
# image or snippet) came from. This is bad because it causes pretty
# severe race conditions.
#
# Our current plan is to put each subtopic datum in its own FC. It will
# require this code to make more FC fetches, but we should be able to
# do it with one `store.get_many` call.
items = folders.grouped_items(folder_id, subfolder_id, ann_id=ann_id)
fcs = dict([(cid, fc) for cid, fc in store.get_many(items.keys())])
for cid, subids in items.iteritems():
fc = fcs[cid]
for subid in subids:
try:
data = typed_subtopic_data(fc, subid)
except KeyError:
# We have a dangling label folks!
continue
yield cid, subid, fc['meta_url'], subtopic_type(subid), data
def typed_subtopic_data(fc, subid):
'''Returns typed subtopic data from an FC.'''
# I don't think this code will change after we fix the data race bug. ---AG
ty = subtopic_type(subid)
data = get_unicode_feature(fc, subid)
assert isinstance(data, unicode), \
'data should be `unicode` but is %r' % type(data)
if ty == 'image':
img_data = get_unicode_feature(fc, subid + '|data')
img = re.sub('^data:image/[a-zA-Z]+;base64,', '', img_data)
img = base64.b64decode(img.encode('utf-8'))
return data, img
elif ty in ('text', 'manual'):
return data
raise ValueError('unrecognized subtopic type "%s"' % ty)
def get_unicode_feature(fc, feat_name):
feat = fc[feat_name]
if isinstance(feat, StringCounter) and len(feat) == 0:
return u''
return feat
def subtopic_type(subid):
return subid.split('|')[1]
| {
"repo_name": "dossier/dossier.models",
"path": "dossier/models/subtopic.py",
"copies": "1",
"size": "2853",
"license": "mit",
"hash": 6594583593881105000,
"line_mean": 37.5540540541,
"line_max": 79,
"alpha_frac": 0.6358219418,
"autogenerated": false,
"ratio": 3.6205583756345177,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47563803174345176,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import bcolz
from bcolz import carray, ctable
import numpy as np
from pandas import DataFrame
from collections import Iterator
from toolz import partition_all, keyfilter
import os
from datashape import to_numpy_dtype
from toolz import keyfilter
from toolz.curried import pipe, partial, map, concat
from .resource import resource
from .dispatch import dispatch
from .compute.bcolz import *
from .utils import keywords
__all__ = ['into', 'bcolz', 'chunks']
@dispatch(type, (ctable, carray))
def into(a, b, **kwargs):
f = into.dispatch(a, type(b))
return f(a, b, **kwargs)
@dispatch((tuple, set, list), (ctable, carray))
def into(o, b, **kwargs):
return into(o, into(np.ndarray(0), b))
@dispatch(Iterator, (ctable, carray))
def into(_, b, **kwargs):
return pipe(b, chunks, map(partial(into, np.ndarray(0))),
map(partial(into, list)),
concat)
@dispatch(np.ndarray, (ctable, carray))
def into(a, b, **kwargs):
return b[:]
@dispatch(ctable, np.ndarray)
def into(a, b, **kwargs):
return ctable(b, **kwargs)
@dispatch(carray, np.ndarray)
def into(a, b, **kwargs):
kwargs = keyfilter(keywords(ctable).__contains__, kwargs)
return carray(b, **kwargs)
@dispatch(carray, (tuple, list))
def into(a, b, dtype=None, **kwargs):
x = into(np.ndarray(0), b, dtype=dtype)
kwargs = keyfilter(keywords(ctable).__contains__, kwargs)
return into(a, x, **kwargs)
@dispatch(carray, carray)
def into(a, b, **kwargs):
if isinstance(a, type):
return b
else:
a.append(iter(b))
return a
@dispatch(ctable, (tuple, list))
def into(a, b, names=None, types=None, **kwargs):
if isinstance(b[0], (tuple, list)):
if not types:
types=[None] * len(b[0])
return ctable([into(np.ndarray(0), c2, dtype=dt)
for (c2, dt) in zip(zip(*b), types)], names,
**kwargs)
else:
if not names:
names =[None] * len(b)
arr = into(np.ndarray(0), b, dtype=np.dtype(list(zip(names, types))))
return ctable(arr, names, **kwargs)
@dispatch((carray, ctable), Iterator)
def into(a, b, **kwargs):
kwargs = keyfilter(keywords(ctable).__contains__, kwargs)
chunks = partition_all(1024, b)
chunk = next(chunks)
a = into(a, chunk, **kwargs)
for chunk in chunks:
a.append(list(zip(*chunk)))
a.flush()
return a
@dispatch(DataFrame, ctable)
def into(a, b, columns=None, schema=None, **kwargs):
if not columns and schema:
columns = dshape(schema)[0].names
return DataFrame.from_items(((column, b[column][:]) for column in
sorted(b.names)),
orient='columns',
columns=columns)
from .compute.chunks import ChunkIterator, chunks
@dispatch((carray, ctable), ChunkIterator)
def into(a, b, **kwargs):
b = iter(b)
a = into(a, next(b), **kwargs)
for chunk in b:
a.append(into(np.ndarray(0), chunk))
a.flush()
return a
from blaze.data.core import DataDescriptor
@dispatch(DataDescriptor, (ctable, carray))
def into(a, b, **kwargs):
a.extend_chunks(chunks(b))
return a
@resource.register('.+\.bcolz/?')
def resource_bcolz(rootdir, **kwargs):
if os.path.exists(rootdir):
kwargs = keyfilter(keywords(ctable).__contains__, kwargs)
return ctable(rootdir=rootdir, **kwargs)
else:
if 'dshape' in kwargs:
dtype = to_numpy_dtype(kwargs['dshape'])
kwargs = keyfilter(keywords(ctable).__contains__, kwargs)
return ctable(np.empty(0, dtype), rootdir=rootdir, **kwargs)
else:
raise ValueError("File does not exist and no `dshape=` given")
| {
"repo_name": "vitan/blaze",
"path": "blaze/bcolz.py",
"copies": "1",
"size": "3864",
"license": "bsd-3-clause",
"hash": 2911770412059642400,
"line_mean": 26.6,
"line_max": 77,
"alpha_frac": 0.6086956522,
"autogenerated": false,
"ratio": 3.36,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.945851698161132,
"avg_score": 0.0020357341177361077,
"num_lines": 140
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.