text
stringlengths 0
1.05M
| meta
dict |
---|---|
from __future__ import (absolute_import, division,
print_function, unicode_literals)
exec(open("ground.py").read())
#
# mine
import hamiltonian
import diffeo
import sde
from utility import *
#
def run(dict,canload=0):
"""
Qr=dict['landmarks'][0,:,:]
Apply push-forward map to sample Qr.
Find average using MAP4.
Plot phase-plane with confidence balls and original data (from push-forward map).
"""
import os.path
if 'fname' in dict:
filename=dict['fname']
else:
print("No filename given")
exit(1)
print("\n",filename," ",dict['ext'],"============================================","\n")
plt.ion()
G=hamiltonian.GaussGreen(dict['ell'],0)
# create set of landmarks by push-forward
SDE = sde.SDE(G)
SDE.set_lam_beta(dict['lam'],dict['beta'],True)
Qr=dict['landmarks'][0,:,:] # use first set as reference
dict['landmarks_n']=SDE.add_sde_noise(Qr, dict['num'])
# find landmark average
SDE=sde.MAP4(G)
SDE.set_data_var(dict['data_var'])
SDE.set_lam_beta(dict['lam'],dict['beta'],False)
SDE.set_landmarks(dict['landmarks_n'])
SDE.set_no_steps(dict['no_steps'])
SDE.solve()
cov_q,cov_p=SDE.cov()
#
# plot landmarks (noisy source data)
if False:
plt.figure(1)
plot_setup()
plt.axis('equal')
plot_landmarks(dict['landmarks_n'],shadow=3,lw=0.2)
plt.savefig(filename+dict['ext']+'_samps.pdf',bbox_inches='tight') # lam
# plot landmarks with average and confidence ball
plt.figure(1)
plot_setup()
plt.axis('equal')
plot_average(SDE.Qh) # red star
Qav=np.average(dict['landmarks_n'],axis=0)
plot_average(Qav,2) # 2=blue dot
add_sd_plot(SDE.Qh, cov_q)
plt.savefig(filename+dict['ext']+'_av.pdf',bbox_inches='tight')
####################################################################
if __name__ == "__main__":
# do this
plt.ion()
#
noise_var=0.0
dict=exp4(noise_var)
dict['beta']=25.0
dict['lam']=0.1
i=4
if i==0:
dict['ext']='2a'
dict['num']=2
if i==1:
dict['ext']='4a'
dict['num']=4
if i==2:
dict['ext']='16b'
dict['num']=16
if i==3:
dict['ext']='64b'
dict['num']=64
if i==4:
dict['ext']='256b'
dict['num']=256
dict['data_var']=noise_var+0.05
run(dict)
| {
"repo_name": "tonyshardlow/reg_sde",
"path": "run_split2_add.py",
"copies": "1",
"size": "2420",
"license": "mit",
"hash": 1280729964433312500,
"line_mean": 27.1395348837,
"line_max": 92,
"alpha_frac": 0.5429752066,
"autogenerated": false,
"ratio": 3.1065468549422337,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41495220615422335,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
from future.utils import iteritems
import inspect
from datacheck.compat import native_type
from datacheck.exceptions import (SchemaError, TypeValidationError,
FieldValidationError, UnknownKeysError)
from datacheck.path import init_path, list_item_path, dict_item_path
def validate(data, schema, **kwargs):
if 'path' not in kwargs:
kwargs['path'] = init_path()
return _validate(data, schema, **kwargs)
def _get_validator(schema):
if isinstance(schema, Validator):
return schema
elif inspect.isclass(schema):
return Type(schema)
elif isinstance(schema, list):
l = len(schema)
if l == 0:
return Type(native_type(list))
elif l == 1:
return List(schema[0])
else:
raise SchemaError()
elif isinstance(schema, dict):
return Dict(schema)
raise SchemaError()
def _validate(data, schema, **kwargs):
v = _get_validator(schema)
return v.validate(data, **kwargs)
class Validator(object):
def validate(self, data, path=None):
raise NotImplementedError()
class Type(Validator):
def __init__(self, expected_type):
if not inspect.isclass(expected_type):
raise SchemaError('expected_type must be a class type')
self.expected_type = expected_type
def validate(self, data, path=None):
if not isinstance(data, self.expected_type):
raise TypeValidationError(data, self.expected_type, path=path)
return data
class List(Validator):
def __init__(self, schema):
self.schema = schema
def validate(self, data, path=None):
if path is None:
path = init_path()
if not isinstance(data, list):
raise TypeValidationError(data, native_type(list), path=path)
output_list = []
for i, x in enumerate(data):
subpath = list_item_path(path, i)
output_list.append(_validate(x, self.schema, path=subpath))
return output_list
class DictField(object):
def __init__(self, schema):
self.schema = schema
class Required(DictField):
pass
class Optional(DictField):
def __init__(self, schema):
super(Optional, self).__init__(schema)
self.has_default = False
self.default_value = None
def default(self, x):
self.has_default = True
self.default_value = x
return self
class Dict(Validator):
def __init__(self, schema, allow_unknown=False):
self.schema = schema
self.allow_unknown = allow_unknown
def validate(self, data, path=None):
if path is None:
path = init_path()
if not isinstance(data, dict):
raise TypeValidationError(data, native_type(dict), path=path)
unknown_keys = set(data)
output_dict = {}
for key, field_spec in iteritems(self.schema):
if isinstance(field_spec, DictField):
item_schema = field_spec.schema
is_optional = isinstance(field_spec, Optional)
else:
item_schema = field_spec
is_optional = False
try:
actual_value = data[key]
except KeyError:
if is_optional:
if field_spec.has_default:
output_dict[key] = field_spec.default_value
else:
raise FieldValidationError(key, path=path)
else:
unknown_keys.remove(key)
subpath = dict_item_path(path, key)
output_dict[key] = _validate(actual_value, item_schema,
path=subpath)
if (not self.allow_unknown) and unknown_keys:
raise UnknownKeysError(unknown_keys, path=path)
else:
output_dict.update({k: data[k] for k in unknown_keys})
return output_dict
| {
"repo_name": "csdev/datacheck",
"path": "datacheck/core.py",
"copies": "1",
"size": "4116",
"license": "mit",
"hash": 7225076665113634000,
"line_mean": 27.1917808219,
"line_max": 74,
"alpha_frac": 0.583819242,
"autogenerated": false,
"ratio": 4.25206611570248,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.533588535770248,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
from future.utils import python_2_unicode_compatible
from datacheck.compat import native_data_type
from datacheck.path import path_to_str
class DatacheckException(Exception):
pass
class SchemaError(DatacheckException):
pass
class ValidationError(DatacheckException):
def __init__(self, path=None):
self.path = path
class MultipleValidationError(ValidationError):
def __init__(self):
pass
@python_2_unicode_compatible
class TypeValidationError(ValidationError):
def __init__(self, data, expected_type, path=None):
super(TypeValidationError, self).__init__(path=path)
self.expected_type = expected_type
self.actual_type = native_data_type(data)
self.actual_value = data
def __str__(self):
return '%s: Expected %s, got %s (%s)' % (
path_to_str(self.path),
self.expected_type.__name__,
self.actual_type.__name__,
self.actual_value,
)
@python_2_unicode_compatible
class FieldValidationError(ValidationError):
def __init__(self, expected_field, path=None):
super(FieldValidationError, self).__init__(path=path)
self.expected_field = expected_field
def __str__(self):
msg = 'Missing required field "%s"' % self.expected_field
if self.path:
msg = path_to_str(self.path) + ': ' + msg
return msg
@python_2_unicode_compatible
class UnknownKeysError(ValidationError):
def __init__(self, unknown_keys, path=None):
super(UnknownKeysError, self).__init__(path=path)
self.unknown_keys = unknown_keys
def __str__(self):
return '%s: Unknown keys: %s' % (
path_to_str(self.path),
', '.join([('"%s"' % k if isinstance(k, str) else str(k))
for k in self.unknown_keys]),
)
@python_2_unicode_compatible
class DataValidationError(ValidationError):
def __init__(self, error_message, data, path=None):
super(DataValidationError, self).__init__(path=path)
self.error_message = error_message
self.data = data
def __str__(self):
return '%s: %s (Received value: %s)' % (
path_to_str(self.path),
self.error_message,
self.data
)
| {
"repo_name": "csdev/datacheck",
"path": "datacheck/exceptions.py",
"copies": "1",
"size": "2417",
"license": "mit",
"hash": -3543301879415149000,
"line_mean": 27.7738095238,
"line_max": 69,
"alpha_frac": 0.6123293339,
"autogenerated": false,
"ratio": 3.936482084690554,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5048811418590554,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
import six
from abc import ABCMeta, abstractmethod, abstractproperty
import torch
@six.add_metaclass(ABCMeta)
class AbstractWorld:
@abstractmethod
def step(self, *args):
pass
@abstractproperty
def state(self):
pass
@abstractproperty
def reset(self):
pass
@abstractproperty
def min_agents(self):
pass
@abstractproperty
def max_agents(self):
pass
@abstractproperty
def num_agents(self):
pass
@six.add_metaclass(ABCMeta)
class Sensor:
@abstractmethod
def observe(self):
pass
@abstractproperty
def space(self):
pass
def __call__(self):
return self.observe()
@six.add_metaclass(ABCMeta)
class Actuator:
@abstractmethod
def act(self):
pass
@abstractproperty
def space(self):
pass
def __call__(self):
self.act()
@six.add_metaclass(ABCMeta)
class AbstractAgent:
def __init__(self, sensor, actuator, model):
in_shape = model.input_shape
out_shape = model(torch.zeros(1, *in_shape)).shape
if in_shape != sensor.space.shape():
raise ValueError
self.sensor = sensor
self.actuator = actuator
| {
"repo_name": "sig-ai/SDM",
"path": "sdm/abstract/core.py",
"copies": "1",
"size": "1371",
"license": "mit",
"hash": -7187869851500013000,
"line_mean": 17.28,
"line_max": 58,
"alpha_frac": 0.6134208607,
"autogenerated": false,
"ratio": 4.129518072289157,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5242938932989156,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
__author__ = 'Sander Bollen'
import locale
import os.path
import logging
from .generics import *
from .models import *
ENCODING = locale.getdefaultlocale()[1]
class Reader(object):
def __init__(self, filename):
self._filename = os.path.basename(filename)
self._handler = open(filename, 'rb')
def __iter__(self):
return self
def next(self):
try:
line = next(self._handler)
except ValueError:
raise StopIteration
return Record.fromline(line)
# python 3 compatibility
def __next__(self):
return self.next()
def close(self):
self._handler.close()
class Writer(object):
def __init__(self, filename):
self._filename = filename
self._handler = open(filename, 'wb')
def write(self, record):
nline = record.line + "\n"
self._handler.write(nline.encode(ENCODING))
def close(self):
self._handler.close()
class Record(object):
def __init__(self, geneName, name, chrom, strand, txStart, txEnd,
cdsStart, cdsEnd, exonCount, exonStarts, exonEnds):
self._gene = geneName
self._tr_name = name
self._chr = chrom
self._strand = strand
self._tx_start = txStart
self._tx_end = txEnd
self._cds_start = cdsStart
self._cds_end = cdsEnd
self._exon_count = exonCount
self._exon_start = exonStarts
self._exon_ends = exonEnds
@property
def gene(self):
return str(self._gene)
@property
def transcript(self):
return str(self._tr_name)
@property
def chromosome(self):
return str(self._chr)
@property
def strand(self):
return str(self._strand)
@property
def txStart(self):
return int(self._tx_start)
@property
def txEnd(self):
return int(self._tx_end)
@property
def cdsStart(self):
return int(self._cds_start)
@property
def cdsEnd(self):
return int(self._cds_end)
@property
def n_exons(self):
return int(self._exon_count)
@property
def exonStarts(self):
return [int(x) for x in self._exon_start]
@property
def exonEnds(self):
return [int(x) for x in self._exon_ends]
@property
def exons(self):
return Exon.fromrecord(self)
@property
def cds_exons(self):
"""
Return those exons which lie within the cds
Also returns those partially inside the cds
:return:
"""
return [x for x in self.exons if x.stop >= self.cdsStart and
x.start <= self.cdsEnd]
def to_dict(self):
d = {}
d["geneName"] = self.gene
d["name"] = self.transcript
d["chrom"] = self.chromosome
d["strand"] = self.strand
d["txStart"] = self.txStart
d["txEnd"] = self.txEnd
d["cdsStart"] = self.cdsStart
d["cdsEnd"] = self.cdsEnd
d["exonStarts"] = self.exonStarts
d["exonEnds"] = self.exonEnds
d["exonCount"] = self.n_exons
return d
@property
def line(self):
line = []
d = self.to_dict()
for nlc in NUMERIC_LIST_COLUMNS:
d[nlc] = ",".join(map(str, d[nlc])) + ","
for col in COLUMNS:
line += [d[col]]
return "\t".join(map(str, line))
@classmethod
def fromdict(cls, items):
"""
Builds a record from a dictionary.
This dictionary must contain all fields specified in generics.COLUMNS
"""
normal_columns = set(COLUMNS) - set(NUMERIC_LIST_COLUMNS) # <-- remember, this is UNORDERED!
# first check whether all columns are there and properly formatted
for c in COLUMNS:
if c not in items:
raise ValueError("Item {c} must be given".format(c=c))
for nlc in NUMERIC_LIST_COLUMNS:
if not isinstance(items[nlc], list):
raise ValueError("Item {nlc} must be a list of integers".format(nlc=nlc))
elif not all([isinstance(x, int) for x in items[nlc]]):
raise ValueError("Item {nlc} must be a list of integers".format(nlc=nlc))
for nc in NUMERIC_COLUMNS:
if not isinstance(items[nc], int):
raise ValueError("Item {nc} must be an integer".format(nc=nc))
r = Record(**items)
return r
@classmethod
def fromline(cls, line):
"""
Builds a record from a line
"""
raw_items = line.strip().split('\t')
assert len(raw_items) >= 11, "Contains less than 11 columns!"
items = dict()
for i in xrange(11):
items[COLUMNS[i]] = raw_items[i]
for nc in NUMERIC_COLUMNS:
items[nc] = int(items[nc])
for lnc in NUMERIC_LIST_COLUMNS:
if not items[lnc].endswith(','):
raise ValueError("Malformed refFlat file! Value {lnc} must end in a comma".format(lnc=lnc))
it = items[lnc].split(',')
it.pop()
items[lnc] = [int(x) for x in it]
r = Record(**items)
return r
class RefFlatProcessor(object):
def __init__(self, filename, log=True, log_level="INFO"):
self.filename = filename
self._already_processed = False
self.genes = {}
self.transcripts = {}
self.n_duplicates = 0
self.duplicates = []
self.log = log
if log:
self.logger = logging.getLogger("RefFlatProcessor")
numeric_level = getattr(logging, log_level.upper(), None)
self.logger.setLevel(numeric_level)
ch = logging.StreamHandler()
ch.setLevel(numeric_level)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
self.logger.addHandler(ch)
self.logger.info("Initializing...")
def process(self, remove_duplicates=True, flush=True):
"""
Create gene and transcript tables for the input refflat file
:param remove_duplicates: Boolean. True by default.
If true, will remove duplicates from the tables.
Duplicate is defined when the transcript name occurs more than one (aka transcript should be unique)
Since we have no way of knowing what transcript is the correct one,
the retained transcript is simply the first we encounter.
Conflicting records will be placed in the `duplicates` list of this object
Please note that if this variable is set to False, derived gene coordinates may no longer make sense
(Yes, there are transcripts that are annotated on multiple chromosomes even!)
:param flush: flush contents of object first
:return: genes as dict to self.genes, transcript as dict to self.transcripts
"""
if flush:
self._already_processed = False
self.genes = {}
self.transcripts = {}
self.duplicates = []
self.n_duplicates = 0
for i, record in enumerate(Reader(self.filename)):
if i % 1000 == 0 and self.log:
self.logger.info("Processed {0} records".format(i))
if not record.gene in self.genes and not record.transcript in self.transcripts:
tmptranscript = Transcript(record.transcript, record.chromosome, record.txStart, record.txEnd,
record.cdsStart, record.cdsEnd, exons=record.exons, strand=record.strand)
tmpgene = Gene(record.gene)
tmpgene.update_transcripts(tmptranscript)
tmptranscript.gene = tmpgene
self.genes[record.gene] = tmpgene
self.transcripts[tmptranscript.name] = tmptranscript
elif record.gene in self.genes and not record.transcript in self.transcripts:
tmptranscript = Transcript(record.transcript, record.chromosome, record.txStart, record.txEnd,
record.cdsStart, record.cdsEnd, exons=record.exons, strand=record.strand)
self.genes[record.gene].update_transcripts(tmptranscript)
tmptranscript.gene = self.genes[record.gene]
self.transcripts[tmptranscript.name] = tmptranscript
elif record.gene in self.genes and record.transcript in self.transcripts:
self.n_duplicates += 1
self.duplicates += [record]
if not remove_duplicates:
tmptranscript = Transcript(record.transcript, record.chromosome, record.txStart, record.txEnd,
record.cdsStart, record.cdsEnd, exons=record.exons, strand=record.strand)
self.genes[record.gene].update_transcripts(tmptranscript)
tmptranscript.gene = self.genes[record.gene]
self.transcripts[tmptranscript.name] = tmptranscript
else:
# would happen in record.transcript in transcripts and not record.gene in genes
# which should not be possible
raise ValueError("Something odd happened!")
self._already_processed = True
if self.log:
self.logger.info("Done processing") | {
"repo_name": "sndrtj/pyrefflat",
"path": "pyrefflat/parser.py",
"copies": "1",
"size": "9535",
"license": "mit",
"hash": 1086052792625860500,
"line_mean": 32.9359430605,
"line_max": 120,
"alpha_frac": 0.5829050865,
"autogenerated": false,
"ratio": 4.122351923908344,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5205257010408344,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
from flask import Flask
from commitsan.hooks_app import app as hooks
frontend = Flask(__name__)
frontend.config['DEBUG'] = True
class CombiningMiddleware(object):
"""Allows one to mount middlewares or applications in a WSGI application.
Unlike DispatcherMiddleware, this one doesn't alter the environment of the
called application. That is, applications still receive the absolute path.
"""
def __init__(self, app, mounts=None):
self.app = app
self.mounts = mounts or {}
def __call__(self, environ, start_response):
script = environ.get('PATH_INFO', '')
while '/' in script:
if script in self.mounts:
app = self.mounts[script]
break
script = script.rsplit('/', 1)[0]
else:
app = self.mounts.get(script, self.app)
return app(environ, start_response)
app = CombiningMiddleware(frontend, {
'/hooks': hooks,
})
@frontend.route('/')
def hello():
return 'Hello World!'
| {
"repo_name": "abusalimov/commitsan",
"path": "commitsan/web_app.py",
"copies": "1",
"size": "1160",
"license": "mit",
"hash": -8525677558714258000,
"line_mean": 25.3636363636,
"line_max": 78,
"alpha_frac": 0.6189655172,
"autogenerated": false,
"ratio": 4.233576642335766,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5352542159535766,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
from future.utils import native
from past.builtins import basestring as _basestring
# The future module redefines certain built-in types so that our python3
# code is also compatible with python2. However, we must be careful not to
# expose one of these redefined type objects to a python2 user who is not
# aware of the future module.
#
# http://python-future.org/what_else.html#passing-data-to-from-python-2-libraries
#
# Safe:
# type(2) --> int
#
# Unsafe; use this function to convert:
# int --> future.types.newint.newint
# type(int(2)) --> future.types.newint.newint
#
# On python3, this function is effectively a no-op.
#
def native_type(t):
if t == int: # future.types.newint.newint
return native_data_type(0)
elif t == list: # future.types.newlist.newlist
return native_data_type([])
elif t == dict: # future.types.newdict.newdict
return native_data_type({})
else:
return t
def native_data_type(x):
return type(native(x))
def is_unicode_or_byte_string(x):
return isinstance(x, _basestring)
| {
"repo_name": "csdev/datacheck",
"path": "datacheck/compat.py",
"copies": "1",
"size": "1197",
"license": "mit",
"hash": 5608630450762224000,
"line_mean": 28.1951219512,
"line_max": 81,
"alpha_frac": 0.6883876358,
"autogenerated": false,
"ratio": 3.4297994269340975,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46181870627340976,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
from .generics import *
class Exon(object):
"""
This class defines an exon inside a record
"""
__slots__ = ["_gene", "_transcript", "_chr", "_start", "_end", "_number"]
def __init__(self, gene, transcript, chr, start, stop, n):
self._gene = gene
self._transcript = transcript
self._chr = chr
self._start = start
self._end = stop
self._number = n
@property
def gene(self):
return self._gene
@property
def transcript(self):
return self._transcript
@property
def chr(self):
return self._chr
@property
def start(self):
return self._start
@property
def stop(self):
return self._end
@property
def number(self):
return self._number
@classmethod
def fromrecord(cls, record):
exons = []
assert(len(record.exonStarts) == len(record.exonEnds))
if record.strand == "+":
for i, (s, e) in enumerate(zip(record.exonStarts,
record.exonEnds)):
exons.append(Exon(record.gene, record.transcript,
record.chromosome, s, e, i+1))
# for negative strand transcripts
else:
i = len(record.exonStarts)
for (s, e) in zip(record.exonStarts, record.exonEnds):
exons.append(Exon(record.gene, record.transcript,
record.chromosome, s, e, i))
i -= 1
return exons
class Transcript(object):
__slots__ = ["name", "gene", "chr", "start", "end", "cds_start", "cds_end", "exons", "strand"]
def __init__(self, name, chr, start, end, cds_start, cds_end, exons=None, gene=None, strand="+"):
self.name = name
self.gene = gene
self.chr = chr
self.start = start
self.end = end
self.cds_start = cds_start
self.cds_end = cds_end
self.exons = exons
self.strand = strand
def update_exons(self, exon):
if exon.start < self.start:
raise ValueError("Start of exon cannot be in front of start of transcript")
if exon.stop > self.end:
raise ValueError("End of exon cannot be behind end of transcript")
if self.exons:
self.exons.append(exon)
else:
self.exons = [exon]
@property
def cds_exons(self):
"""
Return those exons which lie within the cds
Also returns those partially inside the cds
:return:
"""
return [x for x in self.exons if x.stop >= self.cds_start and
x.start <= self.cds_end]
@property
def line(self):
line = []
d = self.to_dict()
for nlc in NUMERIC_LIST_COLUMNS:
d[nlc] = ",".join(map(str, d[nlc])) + ","
for col in COLUMNS:
line += [d[col]]
return "\t".join(map(str, line))
def to_dict(self):
d = {}
d["geneName"] = self.gene.name
d["name"] = self.name
d["chrom"] = self.chr
d["strand"] = self.strand
d["txStart"] = self.start
d["txEnd"] = self.end
d["cdsStart"] = self.cds_start
d["cdsEnd"] = self.cds_end
d["exonStarts"] = [int(x.start) for x in self.exons]
d["exonEnds"] = [int(x.stop) for x in self.exons]
d["exonCount"] = len(self.exons)
return d
class Gene(object):
__slots__ = ["name", "min_coord", "max_coord", "transcripts", "chr"]
def __init__(self, name, chr=None, min_coord=None, max_coord=None, transcripts=None):
self.name = name
self.min_coord = min_coord
self.max_coord = max_coord
self.transcripts = transcripts
self.chr = chr
def update_transcripts(self, transcript):
if self.min_coord:
if transcript.start < self.min_coord:
self.min_coord = transcript.start
else:
self.min_coord = transcript.start
if self.max_coord:
if transcript.end > self.max_coord:
self.max_coord = transcript.end
else:
self.max_coord = transcript.end
if self.transcripts:
self.transcripts += [transcript]
self.chr += [transcript.chr]
else:
self.transcripts = [transcript]
self.chr = [transcript.chr]
| {
"repo_name": "sndrtj/pyrefflat",
"path": "pyrefflat/models.py",
"copies": "1",
"size": "4572",
"license": "mit",
"hash": -3593755624257228300,
"line_mean": 27.9367088608,
"line_max": 101,
"alpha_frac": 0.5330271216,
"autogenerated": false,
"ratio": 3.7691673536685903,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.480219447526859,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
from github import ApiError
from commitsan.github import github, GITHUB_BLOB_URL_TMPL
from commitsan.worker import job
CONTRIBUTING_FILENAME = 'CONTRIBUTING.md'
def output(*args, **kwargs):
kwargs.setdefault('file', sys.stderr)
print(*args, **kwargs)
@job()
def post_status(repo, commit, context, description, state='pending', **kwargs):
endpoint = github.repos(repo).statuses(commit)
if 'target_url' not in kwargs:
kwargs['target_url'] = (GITHUB_BLOB_URL_TMPL
.format(filename=CONTRIBUTING_FILENAME,
**locals()))
output('Posting to {repo}: state of {commit} set to {state}: '
'{context} ({description})'
.format(**dict(kwargs, **locals())))
endpoint.post(context='commitsan/{}'.format(context),
description=description, state=state, **kwargs)
def report_issue(repo, commit, context, description, fatal=False):
post_status.delay(repo, commit, context, description,
state='failure' if fatal else 'pending')
| {
"repo_name": "abusalimov/commitsan",
"path": "commitsan/report.py",
"copies": "1",
"size": "1215",
"license": "mit",
"hash": -8307745388300749000,
"line_mean": 36.96875,
"line_max": 79,
"alpha_frac": 0.6255144033,
"autogenerated": false,
"ratio": 4.063545150501672,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5189059553801671,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
import collections
import itertools
import regex as re
import textwrap
from commitsan.checks import checker
from commitsan.git import git_show
def split_lines(s):
return [line.strip() for line in s.split('\n') if line.strip()]
ignores = split_lines('''
a
again
all
always
an
any
big
bigger
bit
correct
correctly
couple
couple of
different
differently
do
don't
example of
extra
extreme
extremely
few
good
great
greatly
insignificant
insignificantly
less
little
lot
lot of
lots
lots of
major
minor
more
never
no more
not anymore
once again
once more
one more time
one more times
only
proper
properly
significant
significantly
slight
slightly
small
smaller
some
the
various
''')
whitelist = split_lines('''
ahead
alone
bogus
buf
buff
bug
bulk
bus
busy
callee
caller
checksum
child
coding
cold
compiler
down
during
event
field
file
handler
head
help
hole
huge
image
issue
killer
know
macro
mgmt
mode
model
nest
net
newly
next
node
none
once
pending
prompt
resource
role
route
scrub
seen
sense
separately
shared
shell
slave
slot
spelling
stat
status
string
stub
stuck
stuff
there
thread
three
trend
unchanged
unhandled
uninitialized
unsupported
user
void
word
worker
zone
''')
blacklist = split_lines('''
bug
bugs
does
doesn't
doing
done
new
news
nothing important
nothing meaningful
nothing significant
nothing special
now
old
optimisation
optimisations
optimization
optimizations
simplification
simplifications
stub
stubs
''')
verbs = split_lines('''
add
adopt
allow
append
avoid
begin^ : began : begun
build^ : built
bump
call
change
check
clean
cleanup^
clear
clone
close
comment
commit^
compile
complain
consolidate
convert
correct
create
deal : dealt
define
delete
disable
document
drop^ : dropped : dropt
eliminate
enable
ensure
exchange
export
extract
fix
fold
forbid^ : forbad : forbade : forbidden
forget^ : forgot : forgotten
get^ : got : gotten
grow : grew : grown
handle
hold : held
ignore
implement
improve
include
inform
initialise
initialize
introduce
issue
kill
make : made
mark
merge
move
need
optimise
optimize
pass
plug^
polish
poll
port
prefer^
prepare
prevent
print
provide
redefine
reduce
reenable
refactor
reimplement
reintroduce
remove
rename
replace
restore
restructure
return
reverse
revert
reword
rework
rewrite : rewrote : rewritten
roll
save
send : sent
separate
set^ :
setup^ :
show : shown
simplify
split^
squash
start
startup^
stop^
support
switch
throw : threw : thrown
tidy
try
turn
uncomment
unexport
unify
unset^ :
update
use
work
workaround :
write : wrote : written
yield
''')
def verb_forms(s):
"""
From a given verb makes 4-element tuple of:
infinitive: The verb itself
-s: The third form
-ing: Continuous tense
-ed: Past simple tense
"""
words = s.split()
verb = words.pop(0)
third = cont = past = None
if verb[-1] == '^':
verb = verb[:-1]
cont = past = verb + verb[-1] # stop-s # stop-P-ing # stop-P-ed
elif verb[-1] == 'e':
cont = past = verb[:-1] # merge-s # merg-ing # merg-ed
elif verb[-1] in 'sxz' or verb[-2:] in ('ch', 'sh'):
third = verb + 'e' # fix-e-s # fix-ing # fix-ed
elif verb[-1] == 'y':
third = verb[:-1] + 'ie' # tr-ie-s # try-ing # tr-i-ed
past = verb[:-1] + 'i'
return tuple(' '.join([form] + words)
for form in ((verb),
(third or verb) + 's',
(cont or verb) + 'ing',
(past or verb) + 'ed'))
def to_ident(word, prefix=''):
if word is not None:
return prefix + re.sub(r'\W', '', word).lower()
def make_verb_forms(verb_lines):
"""
Constructs a pair of lists:
1. Containing verbs in imperative mood (like 'fix')
2. Forms considered wrong ('fixes / fixing / fixed')
"""
good_list = []
bad_list = []
bad_idents = {} # bad-to-good form mapping
for line in verb_lines:
phrases = [phrase.strip() for phrase in line.split(':')]
if not any(phrases):
continue
verb, third, cont, past = verb_forms(phrases[0])
good_list.append(verb)
for bad_form in filter(bool, (phrases[1:] or [past]) + [third, cont]):
bad_list.append(bad_form)
bad_idents[to_ident(bad_form, prefix='bad_')] = verb
return good_list, bad_list, bad_idents
good_verb_forms, bad_verb_forms, bad_idents_mapping = make_verb_forms(verbs)
# These are not verbs, but verb_forms is used to make a plural form of nouns.
whitelist = list(itertools.chain.from_iterable(verb_forms(word)[:2]
for word in whitelist))
def to_regex(*word_lists, **name_prefix_kw):
name_prefix = name_prefix_kw.pop('name_prefix', 'word_')
words = sorted(itertools.chain.from_iterable(word_lists), key=len)
return r'(?:{})'.format('|'.join('(?<{}>{})'
.format(to_ident(word, name_prefix),
r'\s+'.join(word.split()))
for word in words))
FUZZINESS = {
0: '{i<=1,s<=1,e<=1}',
4: '{i<=1,d<=1,2i+2d+3s<=4}',
8: '{i<=2,d<=2,2i+2d+3s<=6}',
}
FUZZY_PAT_TMPL = r'''(?x:
\m (?&{group}) \M
| (?: (?= \m (?&{group}){fuzzy[0]} \M (?<__f_la>.*)$) .{{0,}}
| (?= \m (?&{group}){fuzzy[4]} \M (?<__f_la>.*)$) .{{4,}}
| (?= \m (?&{group}){fuzzy[8]} \M (?<__f_la>.*)$) .{{8,}} ) (?=\g<__f_la>$)
)'''
class Fuzzifier(object):
def __init__(self, fuzziness={}):
super(Fuzzifier, self).__init__()
self.fuzziness = dict(FUZZINESS)
self.fuzziness.update(fuzziness)
def __getitem__(self, name):
return FUZZY_PAT_TMPL.format(group=name, fuzzy=self.fuzziness)
def __call__(self, pat, name):
return (r'''(?x: (?<{group}> {pat} ){{0}} )'''
.format(group=name, pat=pat) + self[name])
fuzzify = Fuzzifier()
# - Bullets or numbered list
# ^^^^
INDENT_RE = re.compile(r'''(?x)^
(?<bullet> \s{0,3} ( [*\-] | \d [:.)] | [^\W_] [.)] ) )? \s*
''')
# [old-style] topic: (label) ...
# ^^^^^^^^^^^ ^^^^^^ ^^^^^^^
TOPIC_RE = re.compile(r'''(?x)^
(?: \s{0,2}
(?<topic>
(?<brackets> (?=\[) (\[+ \S+? \]+){i<=1} (\s? :)? )
| (?<parens> (?=\() (\(+ \S+? \)+){i<=1} (\s? :)? )
| (?<plain> (?=\S) ( \S+? ){i<=2} (\s? :) )
| (?<plain> ([^\s;]+?) (?<!\(\)) (?<semi> \s? ;) ) )
(?<= [\]):;] ) )*+
\s*
''')
# Some text; and more; ';' is ignored; func(); too; and also an orphan ; alone
# ^^^^^^^^^ ^^^^^^^^ ^^^^^^^^^^^^^^ ^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^
SEMI_RE = re.compile(r'''(?x)^
(?: \s*
(?<sentence> ( (?<!\\)(';'|";") | \(\); | \w+\(.*?\); | \s+; | . )*? )
( ; | \s* $ ) )*+
$''')
# Here goes the sentence itself.
SENTENCE_RE = re.compile(r'''(?x)^
(?<head>
(?<skipping>
( (?<skip> \m {skip_pat} \M ) \s* )*+ )
(?<verb> (?<head_verb>
(?<good> \m .{{0,2}} # This compensates fuzziness, for example,
# in "xxx's fix", where "s fix" ~ "fixes".
\m {good_pat} \M )
| (?<bad> \m {bad_pat} \M ) ) )?+ )
(?<tail>
(?(head_verb)| # Don't match if there was a verb at the beginning.
# Also give up if some part of a sentence starts with a good verb.
( (?! [:;,.?!+] \s* (?<!\.) (?&skipping) (?&good) ) . )*?
# Grab an auxiliary verb, if any,
# but filter out a subordinating clause.
(?<= (?<! \m (that|which)('?s)? \M \s* )
(?<aux>
(?: \m
(?: (are|is|was|were)(\s*not|n't)? (\s+being)?
| (?: (have|has|had)?(\s*not|n't)? | (\s+ been) )+ )
\M \s*)?+ ) )
(?<verb> (?<tail_verb> (?! (?&good)|(?&skip) ) (?&bad) (?<!s|ing|ion) ) )
(?= \s* ([:;,.?!)](?!\w)|\s\(|$) ) )?+
.* ) # Consume till the end, for sake of convenience.
$'''.format(good_pat=to_regex(good_verb_forms, whitelist,
name_prefix='good_'),
bad_pat=fuzzify(to_regex(bad_verb_forms, blacklist,
name_prefix='bad_'),
name='__bad_pat'),
skip_pat=fuzzify(to_regex(ignores,
name_prefix='skip_'),
name='__skip_pat')),
flags=re.I|re.B)
END_PERIOD_RE = re.compile(r'''(?x)
(?<! \m (?: Co | Ltd | Inc
| et\ al | etc | e\.g | i\.e | R\.I\.P
| mgmt | smth | std | stmt )
| \. )
\.
$''', flags=re.I)
def non_empty(iterable):
return list(filter(None, iterable))
def strip_topics(iterable):
return non_empty(s.strip('[( )]:;') for s in iterable)
def wrap(s, width=32, ending=' ...'):
lines = textwrap.wrap(s, width,
break_long_words=False,
break_on_hyphens=False)
if not lines:
return ''
s = lines[0]
if len(lines) > 1:
s += ending
return s
def bad_to_good(sm):
sc = sm.capturesdict()
bad = sc['bad'][0] if sc['bad'] else None
bad_ident = to_ident(bad, prefix='bad_')
if bad:
# Find out the exact verb/word.
if bad not in sc[bad_ident]:
# fuzzy?
for bad_ident in bad_idents_mapping:
if bad in sc[bad_ident]:
break
else:
bad_ident = None
return bad, bad_idents_mapping.get(bad_ident)
def msg_regex(repo, commit, lines):
indent = 0
for i, line in enumerate(lines or ['']):
im = INDENT_RE.match(line)
is_bullet_line = bool(im.group('bullet'))
is_subject_line = (i == 0)
is_paragraph_line = (i > 0) and (is_bullet_line or not lines[i-1])
line_indent = im.end()
if is_bullet_line or line_indent < indent:
indent = line_indent # Zero if the line is blank.
if len(line) > 72 and not line.startswith(' ' * (indent+4)):
if is_subject_line:
yield ('error', 'msg/subj-limit',
'Keep the subject concise: 50 characters or less')
else:
yield ('error', 'msg/wrap',
'Wrap the body at 72 characters')
if is_subject_line and is_bullet_line:
yield ('warning', 'msg/subj-list',
'Do not put bullets on the subject line')
if not is_subject_line and not is_paragraph_line:
# Within a body, only lines starting a paragraph are considered.
continue
line = line[line_indent:]
tm = TOPIC_RE.match(line)
tc = tm.capturesdict()
line = line[tm.end():]
labels = [label.join('()') for label in strip_topics(tc['parens'])]
colons_topic = ':'.join(strip_topics(tc['brackets'] + tc['plain']))
orig_topics = non_empty(re.split(r'\s*(?::\s*)+', colons_topic))
topics = []
sentence_matches = []
for topic in orig_topics:
# Topic regex recognizes up to 3 words (\S+ with {i<=2} fuzziness).
# Filter out false positives like in 'fixing smth.: works now' and
# treat them as regular sentences performing additional checks
# like capitalization or mood of the sentence.
sm = SENTENCE_RE.match(topic)
if (sm.captures('verb') and
len(re.findall(r'\m[\w/.,+\-]+\M', topic)) > 1):
sentence_matches.append(sm)
else:
topics.append(topic)
if not (is_subject_line or topics or labels):
# A paragraph looks like and ordinal statement, don't check it.
continue
semi_sentences = non_empty(SEMI_RE.match(line).captures('sentence'))
sentence_matches.extend(map(SENTENCE_RE.match, semi_sentences))
topic = ': '.join(topics + [''])
label = ' '.join(labels + [''])
trunc_topic = ': '.join((topics[:-1] and ['...']) + topics[-1:] + [''])
trunc_label = ' '.join(labels[:1] + (labels[1:] and ['...']) + [''])
trunc_sentence = '' # The wrapped first sentence, if any.
for j, sm in enumerate(sentence_matches):
sentence = orig_sentence = sm.string.rstrip()
is_first_sentence = (j == 0)
bad, good = bad_to_good(sm)
if bad:
if good:
if sm['head_verb']:
# Fixup the sentence to keep examples used through
# remaining unrelated checks consistent. The following
# stmt replaces the 'bad' verb with a 'good' one.
sentence = sm.expand(r'\g<skipping>{}\g<tail>'
.format(good))
example = (": '{}', not '{}'"
.format(good.capitalize(), bad))
else:
# The sentence ends with some shit, strip it out.
sentence = wrap(sentence,
width=max(3, sm.start('aux')))
example = (": '{}', not '... {}{}'"
.format(good.capitalize(), sm['aux'], bad))
else:
example = ' mood'
yield ('error' if is_subject_line else 'warning', 'msg/mood',
'Use the imperative'+example)
if sm['head']:
sentence = sentence.capitalize()
if sentence and is_first_sentence:
trunc_sentence = wrap(sentence, width=3)
if orig_sentence[0].islower() and sentence[0].isupper():
yield ('warning', 'msg/case',
"Capitalize the sentence: '{}{}{}'"
.format(trunc_topic, trunc_label, sentence))
if not is_subject_line:
continue
if not trunc_sentence:
yield ('error', 'msg',
'Must provide log message')
elif not topic:
yield ('warning', 'msg/topic',
"Missing topic / subsystem")
if topic:
if tc['brackets'] or tc['semi']:
yield ('error' if tc['brackets'] else 'warning',
'msg/brackets',
"Use colons inside the topic: '{}{}{}'"
.format(topic, trunc_label, trunc_sentence))
if tc['parens'] and tc['topic'][0].startswith(tc['parens'][0]):
yield ('warning', 'msg/labels',
"Put labels after the topic: '{}{}{}'"
.format(trunc_topic, label, trunc_sentence))
def msg_subj(repo, commit, lines):
if not lines:
return
empty_line_idx = lines.index('') if '' in lines else len(lines)
subj_lines = lines[:empty_line_idx]
body_lines = lines[empty_line_idx+1:]
if not subj_lines:
yield ('warning', 'msg/subj',
'Put subject on the first line')
return
if len(subj_lines) > 1:
yield ('warning', 'msg/subj-line',
'Separate subject from body with a blank line')
if any(END_PERIOD_RE.search(subj_lines[i])
for i in (0, -1)[len(subj_lines)==1:]):
yield ('warning', 'msg/subj-period',
'Do not end the subject line with a period')
@checker
def check_msg(repo, commit):
lines = [line.expandtabs().rstrip()
for line in git_show(repo, commit).splitlines()]
return itertools.chain(msg_subj(repo, commit, lines),
msg_regex(repo, commit, lines))
| {
"repo_name": "abusalimov/commitsan",
"path": "commitsan/checks/msg.py",
"copies": "1",
"size": "16941",
"license": "mit",
"hash": -481625284503234900,
"line_mean": 23.9499263623,
"line_max": 79,
"alpha_frac": 0.4892863467,
"autogenerated": false,
"ratio": 3.6315112540192924,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4620797600719292,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
import errno
import os
import sys
import subprocess
from subprocess import CalledProcessError
REPOS_PATH = os.path.abspath('repos')
# http://stackoverflow.com/a/600612/545027
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as e: # Python >2.5
if e.errno != errno.EEXIST or not os.path.isdir(path):
raise
def run_cmd(args, bypass_exit_code=False, **kwargs):
try:
return subprocess.check_output(args, **kwargs)
except CalledProcessError:
if not bypass_exit_code:
raise
def git_cmd(repo, args, no_git_dir=False):
repo_path = os.path.join(REPOS_PATH, repo)
env = {}
if not no_git_dir:
env['GIT_DIR'] = repo_path
return run_cmd(['git'] + args, env=env, cwd=repo_path)
def git_revlist(repo, *commits):
try:
out = git_cmd(repo, ['rev-list'] + list(commits) + ['--'])
except CalledProcessError:
return []
else:
return out.splitlines()
def git_show(repo, commit, format='%B'):
format_arg = '--format={}'.format(format) # deal with it
out = git_cmd(repo, ['show', '--no-patch', format_arg, commit])
return out
def git_diff_check(repo, commit):
try:
git_cmd(repo, ['diff-tree', '--check', '--no-color', commit])
except CalledProcessError as e:
out = e.output
return sum(line[0] == '+' for line in out.splitlines() if line) or 1
else:
return 0
| {
"repo_name": "abusalimov/commitsan",
"path": "commitsan/git.py",
"copies": "1",
"size": "1573",
"license": "mit",
"hash": -3330044426273844700,
"line_mean": 26.5964912281,
"line_max": 76,
"alpha_frac": 0.6102987921,
"autogenerated": false,
"ratio": 3.4571428571428573,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9468649651225916,
"avg_score": 0.019758399603388453,
"num_lines": 57
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
import numpy as np
from pois import *
# Results from Noll (1976)
Noll=[1.0299,0.582,0.134,0.111,0.0880,0.0648,0.0587,0.0525,0.0463,0.0401,0.0377,0.0352,0.0328,0.0304,0.0279,0.0267,0.0255,0.0243,0.0232,0.0220,0.0208]
Noll=[1.0299,0.582,0.134,0.111,0.0880,0.0648,0.0587,0.0525,0.0463,0.0401,0.0377,0.0352,0.0328,0.0304,0.0279,0.0267,0.0255,0.0243,0.0232,0.0220,0.0208]
def AoCorrect(gridSize=32, r0=32.0, screenSize=1024, numIter=10000, numRemove=6,numTelescope=1):
screenGenerator=PhaseScreens(numTelescope,r0,gridSize,screenSize)
aperture=CircularMaskGrid(gridSize)
normalisation=np.sum(aperture)
variance=0.0
for i in range(numIter):
pupils = next(screenGenerator)
screen=AdaptiveOpticsCorrect(pupils,gridSize,
maxRadial=5,numRemove=numRemove)
screen=screen*aperture
variance = variance + np.sum(screen**2)
print(normalisation)
variance = variance/numIter/normalisation
return(variance,Noll[numRemove-1])
def test_ao():
variance,noll=AoCorrect(numIter=1000)
print('Residual variance:',variance)
print('Noll 1976 result:',noll)
assert(np.abs(variance-noll)<0.1*noll)
| {
"repo_name": "dbuscher/pois",
"path": "tests/test_ao.py",
"copies": "1",
"size": "1321",
"license": "bsd-2-clause",
"hash": 3197986659371759600,
"line_mean": 40.28125,
"line_max": 150,
"alpha_frac": 0.6896290689,
"autogenerated": false,
"ratio": 2.5901960784313727,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37798251473313726,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
import numpy as np
from .Zernike import ZernikeGrid
from .PhaseScreen import ScreenGenerator
import functools
import sys
__version__="0.4.0"
def PhaseScreens(numTelescope,r0,pupilSize,screenSize=1024,numRealisation=-1):
"""Return a generator for atmospheric wavefront perturbations across
a set of *numTelescope* telescopes, each of size *pupilSize* and
with Fried parameter *r0*. The perturbations are modelled as being
uncorrelated between telescopes. The number of realisations is given
by *numRealisation*, but if *numRealisation* is negative then an
infinite sequence of realisations is generated.
"""
screenGenerators=[ScreenGenerator(screenSize,r0,pupilSize,pupilSize)
for i in range(numTelescope)]
iter=0
while numRealisation<0 or iter<numRealisation:
iter+=1
yield np.array([next(screen) for screen in screenGenerators])
@functools.lru_cache()
def RadiusGrid(gridSize):
"""
Return a square grid with values of the distance from the centre
of the grid to each gridpoint
"""
x,y=np.mgrid[0:gridSize,0:gridSize]
x = x-(gridSize-1.0)/2.0
y = y-(gridSize-1.0)/2.0
return np.abs(x+1j*y)
@functools.lru_cache()
def CircularMaskGrid(gridSize, diameter=None):
"""
Return a square grid with ones inside and zeros outside a given
diameter circle
"""
if diameter is None: diameter=gridSize
return np.less_equal(RadiusGrid(gridSize),diameter/2.0)
def ComplexPupil(pupils,diameter=None):
return np.exp(1j*pupils)*CircularMaskGrid(pupils.shape[-1],diameter)
def AdaptiveOpticsCorrect(pupils,diameter,maxRadial,numRemove=None):
"""
Correct a wavefront using Zernike rejection up to some maximal order.
Can operate on multiple telescopes in parallel.
Note that this version removes the piston mode as well
"""
gridSize=pupils.shape[-1]
pupilsVector=np.reshape(pupils,(-1,gridSize**2))
zernikes=np.reshape(ZernikeGrid(gridSize,maxRadial,diameter),(-1,gridSize**2))
if numRemove is None: numRemove=zernikes.shape[0]
numScreen=pupilsVector.shape[0]
normalisation=1.0/np.sum(zernikes[0])
# Note extra iteration to remove residual piston
for i in list(range(numRemove))+[0,]:
amplitudes=np.inner(zernikes[i],pupilsVector)*normalisation
pupilsVector=pupilsVector-zernikes[i]*amplitudes[:,np.newaxis]
return np.reshape(pupilsVector,pupils.shape)
@functools.lru_cache()
def FibreMode(gridSize,modeDiameter):
"""
Return a pupil-plane Gaussian mode with 1/e diameter given by
*modeDiameter*, normalised so that integral power over the mode is unity
"""
rmode=modeDiameter/2
return np.exp(-(RadiusGrid(gridSize)/rmode)**2)/(np.sqrt(np.pi/2)*rmode)
def FibreCouple(pupils,modeDiameter):
"""
Return the complex amplitudes coupled into a set of fibers
"""
gridSize=pupils.shape[-1]
pupilsVector=np.reshape(pupils,(-1,gridSize**2))
mode=np.reshape(FibreMode(gridSize,modeDiameter),(gridSize**2,))
return np.inner(pupilsVector,mode)
def SingleModeCombine(pupils,modeDiameter=None):
"""
Return the instantaneous coherent fluxes and photometric fluxes for a
multiway single-mode fibre combiner
"""
if modeDiameter is None:
modeDiameter=0.9*pupils.shape[-1]
amplitudes=FibreCouple(pupils,modeDiameter)
cc=np.conj(amplitudes)
fluxes=(amplitudes*cc).real
coherentFluxes=[amplitudes[i]*cc[j]
for i in range(1,len(amplitudes))
for j in range(i)]
return fluxes,coherentFluxes
def MultimodeCombine(pupils):
"""
Return the instantaneous coherent fluxes and photometric fluxes for a
multiway multimode combiner (no spatial filtering)
"""
fluxes=[np.vdot(pupils[i],pupils[i]).real for i in range(len(pupils))]
coherentFluxes=[np.vdot(pupils[i],pupils[j])
for i in range(1,len(pupils))
for j in range(i)]
return fluxes,coherentFluxes
| {
"repo_name": "dbuscher/pois",
"path": "pois/__init__.py",
"copies": "1",
"size": "4167",
"license": "bsd-2-clause",
"hash": 3497524458463220000,
"line_mean": 35.2347826087,
"line_max": 82,
"alpha_frac": 0.7012239021,
"autogenerated": false,
"ratio": 3.222737819025522,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44239617211255217,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
import os
from hookserver import HookServer
from commitsan import repos
from commitsan.github import github
github_webhook_secret = os.environ.get('GITHUB_WEBHOOK_SECRET')
if not github_webhook_secret:
raise RuntimeError('Missing GITHUB_WEBHOOK_SECRET environment variable')
app = HookServer(__name__, bytes(github_webhook_secret, 'utf-8'))
app.config['DEBUG'] = True
NULL_SHA = '0'*40
@app.hook('ping')
def ping(data, guid):
repo = data['repository']
update_job = repos.update_repo.delay(repo['full_name'], repo['clone_url'])
for branch in github.repos(repo['full_name']).branches.get():
if branch['name'] != 'master':
commit_range = '{}..{}'.format('master', branch['commit']['sha'])
repos.process_commit_range.delay(repo['full_name'], commit_range,
depends_on=update_job)
return 'pong: {}'.format(data['zen'])
@app.hook('push')
def push(data, guid):
repo = data['repository']
update_job = repos.update_repo.delay(repo['full_name'], repo['clone_url'])
from_commit = data['before']
to_commit = data['after']
if to_commit != NULL_SHA: # skip branch deletions
if from_commit == NULL_SHA:
from_commit = 'master'
commit_range = '{}..{}'.format(from_commit, to_commit)
repos.process_commit_range.delay(repo['full_name'], commit_range,
depends_on=update_job)
return 'OK'
| {
"repo_name": "abusalimov/commitsan",
"path": "commitsan/hooks_app.py",
"copies": "1",
"size": "1612",
"license": "mit",
"hash": 703870169976489500,
"line_mean": 30.6078431373,
"line_max": 78,
"alpha_frac": 0.6160049628,
"autogenerated": false,
"ratio": 3.7401392111368907,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4856144173936891,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
__author__ = 'Sander Bollen'
COLUMNS = ["geneName", "name", "chrom", "strand", "txStart", "txEnd",
"cdsStart", "cdsEnd", "exonCount", "exonStarts", "exonEnds"]
NO_EXON_COLUMNS = ["geneName", "name", "chrom", "strand", "txStart", "txEnd",
"cdsStart", "cdsEnd", "exonCount"]
NUMERIC_COLUMNS = ["txStart", "txEnd", "cdsStart", "cdsEnd"]
NUMERIC_LIST_COLUMNS = ["exonStarts", "exonEnds"]
STRING_COLUMNS = set(COLUMNS) - set(NUMERIC_COLUMNS) - set(NUMERIC_LIST_COLUMNS)
def empty_line():
items = {}
for n in NUMERIC_COLUMNS:
items[n] = 0
for s in STRING_COLUMNS:
items[s] = "undefined"
for l in NUMERIC_LIST_COLUMNS:
items[l] = [0]
line = "\t".join([str(items[x]) for x in set(COLUMNS) - set(NUMERIC_LIST_COLUMNS)])
for c in NUMERIC_LIST_COLUMNS:
line += "\t" + ",".join(map(str, items[c])) + ","
return line, items
| {
"repo_name": "sndrtj/pyrefflat",
"path": "pyrefflat/generics.py",
"copies": "1",
"size": "1037",
"license": "mit",
"hash": 2446556390142052400,
"line_mean": 28.6285714286,
"line_max": 87,
"alpha_frac": 0.5949855352,
"autogenerated": false,
"ratio": 3.1615853658536586,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42565709010536584,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
import OpenEXR, Imath
import numpy as np
from collections import defaultdict
import os
FLOAT = Imath.PixelType(Imath.PixelType.FLOAT)
HALF = Imath.PixelType(Imath.PixelType.HALF)
UINT = Imath.PixelType(Imath.PixelType.UINT)
NO_COMPRESSION = Imath.Compression(Imath.Compression.NO_COMPRESSION)
RLE_COMPRESSION = Imath.Compression(Imath.Compression.RLE_COMPRESSION)
ZIPS_COMPRESSION = Imath.Compression(Imath.Compression.ZIPS_COMPRESSION)
ZIP_COMPRESSION = Imath.Compression(Imath.Compression.ZIP_COMPRESSION)
PIZ_COMPRESSION = Imath.Compression(Imath.Compression.PIZ_COMPRESSION)
PXR24_COMPRESSION = Imath.Compression(Imath.Compression.PXR24_COMPRESSION)
NP_PRECISION = {
"FLOAT": np.float32,
"HALF": np.float16,
"UINT": np.uint32
}
def open(filename):
# Check if the file is an EXR file
if not OpenEXR.isOpenExrFile(filename):
raise Exception("File '%s' is not an EXR file." % filename)
# Return an `InputFile`
return InputFile(OpenEXR.InputFile(filename), filename)
def read(filename, channels = "default", precision = FLOAT):
f = open(filename)
if _is_list(channels):
# Construct an array of precisions
return f.get_dict(channels, precision=precision)
else:
return f.get(channels, precision)
def read_all(filename, precision = FLOAT):
f = open(filename)
return f.get_all(precision=precision)
def write(filename, data, channel_names = None, precision = FLOAT, compression = PIZ_COMPRESSION):
# Helper function add a third dimension to 2-dimensional matrices (single channel)
def make_ndims_3(matrix):
if matrix.ndim > 3 or matrix.ndim < 2:
raise Exception("Invalid number of dimensions for the `matrix` argument.")
elif matrix.ndim == 2:
matrix = np.expand_dims(matrix, -1)
return matrix
# Helper function to read channel names from default
def get_channel_names(channel_names, depth):
if channel_names:
if depth != len(channel_names):
raise Exception("The provided channel names have the wrong length (%d vs %d)." % (len(channel_names), depth))
return channel_names
elif depth in _default_channel_names:
return _default_channel_names[depth]
else:
raise Exception("There are no suitable default channel names for data of depth %d" % depth)
#
# Case 1, the `data` argument is a dictionary
#
if isinstance(data, dict):
# Make sure everything has ndims 3
for group, matrix in data.items():
data[group] = make_ndims_3(matrix)
# Prepare precisions
if not isinstance(precision, dict):
precisions = {group: precision for group in data.keys()}
else:
precisions = {group: precision.get(group, FLOAT) for group in data.keys()}
# Prepare channel names
if channel_names is None:
channel_names = {}
channel_names = {group: get_channel_names(channel_names.get(group), matrix.shape[2]) for group, matrix in data.items()}
# Collect channels
channels = {}
channel_data = {}
width = None
height = None
for group, matrix in data.items():
# Read the depth of the current group
# and set height and width variables if not set yet
if width is None:
height, width, depth = matrix.shape
else:
depth = matrix.shape[2]
names = channel_names[group]
# Check the number of channel names
if len(names) != depth:
raise Exception("Depth does not match the number of channel names for channel '%s'" % group)
for i, c in enumerate(names):
if group == "default":
channel_name = c
else:
channel_name = "%s.%s" % (group, c)
channels[channel_name] = Imath.Channel(precisions[group])
channel_data[channel_name] = matrix[:,:,i].astype(NP_PRECISION[str(precisions[group])]).tostring()
# Save
header = OpenEXR.Header(width, height)
header['compression'] = compression
header['channels'] = channels
out = OpenEXR.OutputFile(filename, header)
out.writePixels(channel_data)
#
# Case 2, the `data` argument is one matrix
#
elif isinstance(data, np.ndarray):
data = make_ndims_3(data)
height, width, depth = data.shape
channel_names = get_channel_names(channel_names, depth)
header = OpenEXR.Header(width, height)
header['compression'] = compression
header['channels'] = {c: Imath.Channel(precision) for c in channel_names}
out = OpenEXR.OutputFile(filename, header)
out.writePixels({c: data[:,:,i].astype(NP_PRECISION[str(precision)]).tostring() for i, c in enumerate(channel_names)})
else:
raise Exception("Invalid precision for the `data` argument. Supported are NumPy arrays and dictionaries.")
def tonemap(matrix, gamma=2.2):
return np.clip(matrix ** (1.0/gamma), 0, 1)
class InputFile(object):
def __init__(self, input_file, filename=None):
self.input_file = input_file
if not input_file.isComplete():
raise Exception("EXR file '%s' is not ready." % filename)
header = input_file.header()
dw = header['dataWindow']
self.width = dw.max.x - dw.min.x + 1
self.height = dw.max.y - dw.min.y + 1
self.channels = sorted(header['channels'].keys(),key=_channel_sort_key)
self.depth = len(self.channels)
self.precisions = [c.type for c in header['channels'].values()]
self.channel_precision = {c: v.type for c, v in header['channels'].items()}
self.channel_map = defaultdict(list)
self.root_channels = set()
self._init_channel_map()
def _init_channel_map(self):
# Make a dictionary of subchannels per channel
for c in self.channels:
self.channel_map['all'].append(c)
parts = c.split('.')
if len(parts) == 1:
self.root_channels.add('default')
self.channel_map['default'].append(c)
else:
self.root_channels.add(parts[0])
for i in range(1, len(parts)+1):
key = ".".join(parts[0:i])
self.channel_map[key].append(c)
def describe_channels(self):
if 'default' in self.root_channels:
for c in self.channel_map['default']:
print (c)
for group in sorted(list(self.root_channels)):
if group != 'default':
channels = self.channel_map[group]
print("%-20s%s" % (group, ",".join([c[len(group)+1:] for c in channels])))
def get(self, group = 'default', precision=FLOAT):
channels = self.channel_map[group]
if len(channels) == 0:
print("I did't find any channels in group '%s'." % group)
print("You could try:")
self.describe_channels()
raise Exception("I did't find any channels in group '%s'." % group)
strings = self.input_file.channels(channels)
matrix = np.zeros((self.height, self.width, len(channels)), dtype=NP_PRECISION[str(precision)])
for i, string in enumerate(strings):
precision = NP_PRECISION[str(self.channel_precision[channels[i]])]
matrix[:,:,i] = np.frombuffer(string, dtype = precision) \
.reshape(self.height, self.width)
return matrix
def get_all(self, precision = {}):
return self.get_dict(self.root_channels, precision)
def get_dict(self, groups = [], precision = {}):
if not isinstance(precision, dict):
precision = {group: precision for group in groups}
return_dict = {}
todo = []
for group in groups:
group_chans = self.channel_map[group]
if len(group_chans) == 0:
print("I didn't find any channels for the requested group '%s'." % group)
print("You could try:")
self.describe_channels()
raise Exception("I did't find any channels in group '%s'." % group)
if group in precision:
p = precision[group]
else:
p = FLOAT
matrix = np.zeros((self.height, self.width, len(group_chans)), dtype=NP_PRECISION[str(p)])
return_dict[group] = matrix
for i, c in enumerate(group_chans):
todo.append({'group': group, 'id': i, 'channel': c})
if len(todo) == 0:
print("Please ask for some channels, I cannot process empty queries.")
print("You could try:")
self.describe_channels()
raise Exception("Please ask for some channels, I cannot process empty queries.")
strings = self.input_file.channels([c['channel'] for c in todo])
for i, item in enumerate(todo):
precision = NP_PRECISION[str(self.channel_precision[todo[i]['channel']])]
return_dict[item['group']][:,:,item['id']] = \
np.frombuffer(strings[i], dtype = precision) \
.reshape(self.height, self.width)
return return_dict
def _sort_dictionary(key):
if key == 'R' or key == 'r':
return "000010"
elif key == 'G' or key == 'g':
return "000020"
elif key == 'B' or key == 'b':
return "000030"
elif key == 'A' or key == 'a':
return "000040"
elif key == 'X' or key == 'x':
return "000110"
elif key == 'Y' or key == 'y':
return "000120"
elif key == 'Z' or key == 'z':
return "000130"
else:
return key
def _channel_sort_key(i):
return [_sort_dictionary(x) for x in i.split(".")]
_default_channel_names = {
1: ['Z'],
2: ['X','Y'],
3: ['R','G','B'],
4: ['R','G','B','A']
}
def _is_list(x):
return isinstance(x, (list, tuple, np.ndarray))
| {
"repo_name": "tvogels/pyexr",
"path": "pyexr/exr.py",
"copies": "1",
"size": "9429",
"license": "mit",
"hash": -1979205115625419500,
"line_mean": 32.0842105263,
"line_max": 123,
"alpha_frac": 0.6401527203,
"autogenerated": false,
"ratio": 3.574298711144807,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9609103273121948,
"avg_score": 0.02106963166457172,
"num_lines": 285
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
__metaclass__ = type
try:
# Python 2
from itertools import izip
except ImportError:
# Python 3
izip = zip
try:
# Python 2
from itertools import izip_longest as zip_longest
except ImportError:
# Python 3
from itertools import zip_longest
from itertools import tee
from threading import Timer
def pairwise(iterable):
"""s -> (s0,s1), (s2,s3), (s4, s5), ..."""
a, b = tee(iterable)
next(b, None)
a = iter(iterable)
return list(zip(a, b))
def grouper(iterable, n, fillvalue=None):
"""
Collect data into fixed-length chunks or blocks
grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
:param iterable:
:param n:
:param fillvalue:
:rtype : object
"""
args = [iter(iterable)] * n
return zip_longest(fillvalue=fillvalue, *args)
class Interrupt(object):
def __init__(self, interval, function, *args, **kwargs):
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.start()
def _run(self):
self.is_running = False
self.start()
self.function(*self.args, **self.kwargs)
def start(self):
if not self.is_running:
self._timer = Timer(self.interval, self._run)
self._timer.daemon = True
self._timer.start()
self.is_running = True
def stop(self):
self._timer.cancel()
self.is_running = False
| {
"repo_name": "friend0/tower",
"path": "tower/utils/utils.py",
"copies": "1",
"size": "1656",
"license": "isc",
"hash": -2757541571328621600,
"line_mean": 22,
"line_max": 60,
"alpha_frac": 0.5911835749,
"autogenerated": false,
"ratio": 3.7636363636363637,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9854819938536363,
"avg_score": 0,
"num_lines": 72
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
"""
qutip_conv
==========
QuTiP / tncontract conversions.
Functionality for converting between `qutip.Qobj` and `Tensor`.
Requires `qutip`.
"""
import numpy as np
import qutip as qt
import tncontract as tn
import tncontract.onedim as onedim
def qobj_to_tensor(qobj, labels=None, trim_dummy=True):
"""
Convert a `qutip.Qobj` object to a `Tensor`
Parameters
----------
qobj : Qobj
Qobj to convert.
labels : list, optional
List of labels for the indices. Output labels followed by input labels.
Defaults to `['out1', ..., 'outk', 'in1', ..., 'ink']
trim_dummy : bool
If true dummy indices of dimension one are trimmed away
Returns
------
Tensor
"""
data = qobj.data.toarray()
if not len(np.shape(qobj.dims)) == 2:
# wrong dims (not a ket, bra or operator)
raise ValueError("qobj element not a ket/bra/operator")
output_dims = qobj.dims[0]
input_dims = qobj.dims[1]
nsys = len(output_dims)
if labels is None:
output_labels = ['out'+str(k) for k in range(nsys)]
input_labels = ['in'+str(k) for k in range(nsys)]
else:
output_labels = labels[:nsys]
input_labels = labels[nsys:]
t = tn.matrix_to_tensor(data, output_dims+input_dims, output_labels+
input_labels)
if trim_dummy:
t.remove_all_dummy_indices()
return t
def tensor_to_qobj(tensor, output_labels, input_labels):
"""
Convert a `Tensor` object to a `qutip.Qobj`
Parameters
----------
tensor : Tensor
Tensor to convert.
output_labels : list
List of labels that will be the output indices for the `Qobj`.
`None` can be used to insert a dummy index of dimension one.
inpul_labels : list
List of labels that will be the input indices for the `Qobj`.
`None` can be used to insert a dummy index of dimension one.
Returns
-------
Qobj
Notes
-----
The `output_labels` and `input_labels` determines the tensor product
structure of the resulting `Qobj`, inclding the order of the components.
If the indices corresponding to `output_labels` have dimensions
[dim_out1, ..., dim_outk] and the indices corresponding to `input_labels`
have dimensions [dim_in1, ..., dim_inl], the `Qobj.dims` attribute will be
`Qobj.dims = [[dim_out1, ..., dim_outk], [dim_in1, ..., dim_inl]]
Examples
--------
Turn a rank-one vector into a ket `Qobj` (note the use of a `None` input
label to get a well defined `Qobj`)
>>> t = Tensor(np.array([1,0]), labels=['idx1'])
>>> q = tensor_to_qobj(t, ['idx1'], [None])
>>> print(q)
Quantum object: dims = [[2], [1]], shape = [2, 1], type = ket
Qobj data =
[[ 1.]
[ 0.]]
"""
output_dims = []
input_dims = []
t = tensor.copy()
if not isinstance(output_labels, list):
output_labels=[output_labels]
if not isinstance(input_labels, list):
input_labels=[input_labels]
# order the indices according to output_labels and input_labels
for i, label in enumerate(output_labels+input_labels):
if label is None:
label = 'dummy'+str(i)
t.add_dummy_index(label, i)
t.move_index(label, i)
if i < len(output_labels):
output_dims.append(t.shape[i])
else:
input_dims.append(t.shape[i])
output_labels_new = [l if l is not None else 'dummy'+str(i)
for i,l in enumerate(output_labels)]
data = tn.tensor_to_matrix(t, output_labels_new)
dims = [output_dims, input_dims]
return qt.Qobj(data, dims=dims)
def qobjlist_to_mpo(qobjlist):
"""
Construct an MPO from a list of Qobj operators.
Many-body operators are put in MPO form by exact SVD, and virtual "left"
and "right" indices with bond dimension one are added between the elements
of the list.
"""
tensors = np.array([])
for i, qobj in enumerate(qobjlist):
if not len(np.shape(qobj.dims)) == 2:
# wrong dims (not a ket, bra or operator)
raise ValueError("qobj element not a ket/bra/operator")
t = qobj_to_tensor(qobj, trim_dummy=False)
# Add left and right indices with bonddim one
t.add_dummy_index('left', -1)
t.add_dummy_index('right', -1)
# Break up many-body operators by SVDing
tmp_mpo = onedim.tensor_to_mpo(t)
tensors = np.concatenate((tensors, tmp_mpo.data))
return onedim.MatrixProductOperator(tensors, left_label='left',
right_label='right', physin_label='physin', physout_label='physout')
def qobjlist_to_mps(qobjlist):
"""
Construct an MPS from a list of Qobj kets.
Many-body states are put in MPS form by exact SVD, and virtual "left"
and "right" indices with bond dimension one are added between the elements
of the list.
"""
mpo = qobjlist_to_mpo(qobjlist)
tensors = mpo.data
for t in tensors:
# Remove dummy input labels
t.remove_all_dummy_indices(labels=[mpo.physin_label])
# Change physical label to the standard choice 'phys'
t.replace_label(mpo.physout_label, 'phys')
return onedim.MatrixProductState(tensors, left_label='left',
right_label='right', phys_label='phys')
| {
"repo_name": "andrewdarmawan/tncontract",
"path": "tncontract/qutip_conv.py",
"copies": "1",
"size": "5459",
"license": "mit",
"hash": 8572781504548674000,
"line_mean": 29.6685393258,
"line_max": 79,
"alpha_frac": 0.6164132625,
"autogenerated": false,
"ratio": 3.528765352294764,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4645178614794764,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import ( # NOQA
bytes, dict, int, list, object, range, str,
ascii, chr, hex, input, next, oct, open,
pow, round, super,
filter, map, zip)
from termcolor import colored
def section(name):
print("\n{} {}".format(
colored("::", 'blue', attrs=['bold']),
colored(name, attrs=['bold'])
)
)
def task(name):
print('{} {}'.format(
colored("==>", 'green', attrs=['bold']),
colored(name, attrs=['bold'])
)
)
def subtask(name):
print('{} {}'.format(
colored(" ->", 'blue', attrs=['bold']),
colored(name, attrs=['bold'])
)
)
def failure(name):
print('{} {}'.format(
colored("==> ERROR:", 'red', attrs=['bold']),
colored(name, attrs=['bold'])
)
)
def subfailure(name):
print('{} {}'.format(
colored(" ->", 'red', attrs=['bold']),
colored(name, 'red', attrs=['bold'])
)
)
def prompt(name):
print('{} {}'.format(
colored("==>", 'yellow', attrs=['bold']),
colored(name, attrs=['bold'])),
end=""
)
def subprompt(name):
print('{} {}'.format(
colored(" ->", 'yellow', attrs=['bold']),
colored(name, attrs=['bold'])),
end="")
| {
"repo_name": "jonhoo/python-agenda",
"path": "agenda/__init__.py",
"copies": "1",
"size": "1403",
"license": "mit",
"hash": 9056076559109853000,
"line_mean": 20.921875,
"line_max": 57,
"alpha_frac": 0.4775481112,
"autogenerated": false,
"ratio": 3.9189944134078214,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9896542524607821,
"avg_score": 0,
"num_lines": 64
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from collections import namedtuple
from future.builtins import ( # noqa
bytes, dict, int, list, object, range, str,
ascii, chr, hex, input, next, oct, open,
pow, round, super,
filter, map, zip)
from celery.tests.case import patch
from conf.appconfig import HEALTH_OK
from deployer.services import health
from tests.helper import dict_compare
__author__ = 'sukrit'
@patch('deployer.services.health.ping')
@patch('deployer.services.health.client')
@patch('deployer.services.health.get_store')
def test_get_health(
get_store, client, ping):
"""
Should get the health status when elastic search is enabled
"""
# Given: Operational external services"
ping.delay().get.return_value = 'pong'
EtcdInfo = namedtuple('Info', ('machines',))
client.Client.return_value = EtcdInfo(['machine1'])
get_store.return_value.health.return_value = {'type': 'mock'}
# When: I get the health of external services
health_status = health.get_health()
# Then: Expected health status is returned
dict_compare(health_status, {
'etcd': {
'status': HEALTH_OK,
'details': {
'machines': ['machine1']
}
},
'store': {
'status': HEALTH_OK,
'details': {
'type': 'mock'
}
},
'celery': {
'status': HEALTH_OK,
'details': 'Celery ping:pong'
}
})
@patch('deployer.services.health.ping')
@patch('deployer.services.health.client')
@patch('deployer.services.health.get_store')
def test_get_health_when_celery_is_enabled(get_store, client, ping):
"""
Should get the health status when elastic search is enabled
"""
# Given: Operational external services"
ping.delay().get.return_value = 'pong'
EtcdInfo = namedtuple('Info', ('machines',))
client.Client.return_value = EtcdInfo(['machine1'])
get_store.return_value.health.return_value = {'type': 'mock'}
# When: I get the health of external services
health_status = health.get_health(check_celery=True)
# Then: Expected health status is returned
dict_compare(health_status, {
'etcd': {
'status': HEALTH_OK,
'details': {
'machines': ['machine1']
}
},
'store': {
'status': HEALTH_OK,
'details': {
'type': 'mock'
}
},
'celery': {
'status': HEALTH_OK,
'details': 'Celery ping:pong'
},
})
| {
"repo_name": "totem/cluster-deployer",
"path": "tests/unit/services/test_health.py",
"copies": "1",
"size": "2690",
"license": "mit",
"hash": -8542052839614012000,
"line_mean": 28.2391304348,
"line_max": 68,
"alpha_frac": 0.5791821561,
"autogenerated": false,
"ratio": 3.7994350282485874,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9878617184348587,
"avg_score": 0,
"num_lines": 92
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from copy import copy
from textwrap import dedent
from wtforms import (validators, StringField, SubmitField, FileField,
IntegerField, SelectField, FieldList, FormField,
HiddenField, FloatField)
from wtforms import BooleanField
from wtforms.utils import unset_value
from wtforms.widgets import HiddenInput, Select
from wtforms import Form
#FlaskForm needed for wtf.simple_form in flask
from flask_wtf import FlaskForm
from bgmodelbuilder import component, emissionspec
from .fields import (DictField, JSONField, StaticField, validate_units,
NoValSelectField, NumericField, SimsDbField)
from .widgets import SortableTable, InputChoices, StaticIfExists
from collections import OrderedDict
from flask import current_app
from .. import utils
################ Model Forms ##############################
class SaveModelForm(FlaskForm):
"""Form for editing basic bgmodel details"""
name = StringField('Model Name', [validators.required()])
#version = IntegerField("Version", render_kw={'disabled':'disabled'})
description = StringField("Description", [validators.required()])
user = StringField("Your name", [validators.required()])
comment = StringField("Describe your edits", [validators.required()])
# temporarily force this to be true
updatesimdata = BooleanField("Update with latest simulation data",
default=True, render_kw={'disabled': True})
def populate_obj(self, obj):
obj.name = self.name.data
#obj.version = self.version.data
obj.description = self.description.data
obj.editDetails.update(dict(user=self.user.data,
comment=self.comment.data))
class BackendForm(FlaskForm):
""" Form to update simsdb database and view backend """
simsdb = SimsDbField()
class NewModelForm(FlaskForm):
name = StringField('Model Name', [validators.required()], render_kw={'class':'form-control'})
description = StringField("Description", render_kw={'class':'form-control'})
simsdb = SimsDbField()
importmodel = FileField("Optional: Import JSON file",
render_kw={'accept': '.json,.txt'})
submit = SubmitField("Submit", render_kw={'class':'btn btn-primary pull-right'})
############ Shared stuff ####################3
dist_choices = ('bulk', 'surface_in', 'surface_out')
dist_widget = InputChoices(choices=dist_choices)
################## Component Forms ########################
spectypes = (('CombinedSpec','RadioactiveContam'),
('RadonExposure','RadonExposure'),
('CosmogenicActivation','CosmogenicActivation'))
class BoundSpecForm(Form):
"""Show mostly static info about a spec registered to a component"""
id = HiddenField("Spec ID")
name = StringField("Name", [validators.required()], widget=StaticIfExists(),
render_kw={'class':'form-control'})
category = NoValSelectField("Category", render_kw={'class':'form-control'},
choices=copy(spectypes),
widget=StaticIfExists(Select()))
distribution = StringField("Dist.", default=(emissionspec.EmissionSpec.
_default_distribution),
widget=StaticIfExists(dist_widget),
render_kw={'class':'form-control'})
#rate = StaticField("Rate", default='')
querymod = JSONField("Querymod",render_kw={'class':'form-control'})
#edit = StaticField("Edit", default="edit")
#override populate_obj to make new spec if necessary
def populate_obj(self, obj):
spec = self.id.data
if not spec or spec == str(None):
spec = emissionspec.buildspecfromdict({
'__class__': self.category.data,
'name': self.name.data,
'distribution': self.distribution.data,
})
obj.spec = spec
obj.querymod = self.querymod.data
class BaseComponentForm(FlaskForm):
"""Edit basic info about a component"""
#todo: is there any way to do this without repeating everything???
name = StringField("Name", [validators.required()])
description = StringField("Description")
comment = StringField("Comment",
description="Details of current implementation")
moreinfo = DictField("Additional Info",
suggested_keys=(('owner',"Part owner/designer/buyer"),
('partnum', "Part number"),
('vendor', "Part vendor"),
('datasheet', "URL for datasheet")))
querymod = JSONField("Query Modifier",
description="JSON object modifying DB queries")
specs = FieldList(FormField(BoundSpecForm, default=component.BoundSpec),
label="Emission specs",
widget=SortableTable(),
render_kw={'_class':"table table-condensed"})
#default component to get units right
defcomp = component.Component()
class ComponentForm(BaseComponentForm):
"""Basic Info plus physical parameters of component"""
material = StringField("Material")
mass = StringField("Mass", [validate_units(defcomp.mass)])
volume = StringField("Volume", [validate_units(defcomp.volume)])
surface_in = StringField("Inner Surface",
[validate_units(defcomp.surface_in)])
surface_out = StringField("Outer Surface",
[validate_units(defcomp.surface_out)])
class PlacementForm(Form):
component = HiddenField()
name = StringField("Name",[validators.required()],
#widget=StaticIfExists(),
render_kw={'class':'form-control'})
cls = SelectField("Type", [validators.required()],
choices=[(d,d) for d in ('Component','Assembly')],
widget=StaticIfExists(Select()),
render_kw={'class':'form-control'})
weight = NumericField("Quantity", [validators.required()] ,
render_kw={'size':1, 'class':'form-control'})
querymod = JSONField("Querymod")
#edit = StaticField("Edit", default="link goes here");
#override BaseForm process to restructure placements
class _FakePlacement(object):
def __init__(self, placement):
self.component = (placement.component.id
if hasattr(placement.component,'id')
else placement.component)
self.name = placement.name
self.cls = (type(placement.component).__name__
if self.component else None)
self.weight = placement.weight
self.querymod = placement.querymod
def process(self, formdata=None, obj=None, data=None, **kwargs):
if isinstance(obj, component.Placement):
obj = self._FakePlacement(obj)
super().process(formdata=formdata, obj=obj, data=data, **kwargs)
#override populate_obj to make new component if necessary
def populate_obj(self, obj):
comp = self.component.data
if not comp or comp == str(None):
comp = component.buildcomponentfromdict({
'__class__': self.cls.data,
'name': self.name.data
})
obj.component = comp
obj.name = self.name.data
obj.weight = self.weight.data
obj.querymod = self.querymod.data
class AssemblyForm(BaseComponentForm):
"""Basic info plus subcomponents"""
#components = JSONField(default=list, widget=HiddenInput())
components = FieldList(FormField(PlacementForm,
default=component.Placement),
label="Subcomponents",
widget=SortableTable(),
render_kw={'_class':"table table-condensed"})
############ EmissionSpec forms ##################
#distribution_choices = [(d,d) for d
# in emissionspec.EmissionSpec._distribution_types]
#distribution_choices = [(d,d) for d in ('bulk','surface_in','surface_out')]
dist_choices = ('bulk', 'surface_in', 'surface_out', 'flux')
class EmissionspecForm(FlaskForm):
name = StringField("Name", [validators.required()])
comment = StringField("Comment",
description="Comment about current implementation")
distribution = StringField("Distribution",[validators.required()],
description=("Choices are suggestions; "
"any value is valid"),
widget=InputChoices(choices=dist_choices))
category = StringField("Category", description=("A description category "
"for grouping sources "
"(usually leave default)"))
moreinfo = DictField("Additional Details",
suggested_keys=(('reference',"Literature or assay source"),
('url', "Link to reference"),
('refdetail', "Summary of reference info"),
('refdate', "Date reference last checked")))
normfunc = StringField("Normalization", description=dedent("""\
Custom rate normalization function. Will be using 'eval', with
variables 'component' and 'units' defined. Can also be 'piece' or
'per piece' indicating that the rate is already normalized"""))
querymod = JSONField("Querymod", description="Overrides for generating simulation database queries")
class RadioactiveIsotopeForm(Form):
id = HiddenField("ID")
name = StringField("Isotope", [validators.required()],
render_kw={'size':7,'class':'form-control'})
rate = StringField("Decay rate",[validate_units(nonzero=True),
validators.input_required()],
render_kw={'size':20,'class':'form-control'})
err = StringField("Uncertainty",
description="Fractional or same units as rate",
render_kw={'size':12,'class':'form-control'})
islimit = BooleanField("Limit?",
description="Is this a measurement upper limit?")
def populate_obj(self, obj):
# make sure isotope and name are always the same
super().populate_obj(obj)
self.name.populate_obj(obj, 'isotope')
def _defaultisotope():
aspec = emissionspec.RadioactiveContam()
aspec._id = ""
return aspec
class RadioactiveContamForm(EmissionspecForm):
subspecs = FieldList(FormField(RadioactiveIsotopeForm,
default=_defaultisotope),
min_entries=1,
label="Isotopes",
widget=SortableTable(),
render_kw={'_class':"table table-condensed"})
defradexp = emissionspec.RadonExposure()
mode_choices = [(d,d) for d in emissionspec.RadonExposure._mode_types]
class RadonExposureForm(EmissionspecForm):
radonlevel = StringField("Radon Level",
[validate_units(defradexp.radonlevel),
validators.required()])
exposure = StringField("Exposure Time",
[validate_units(defradexp.exposure, nonzero=True),
validators.required()])
column_height = StringField("Plateout Column Height",
[validate_units(defradexp.column_height)])
mode = SelectField("Airflow model", choices = mode_choices)
class CosmogenicIsotopeForm(Form):
id = HiddenField("ID")
name = StringField("Isotope",[validators.required()])
halflife = StringField("Half-life",
[validate_units('second', nonzero=True),
validators.required()])
activationrate = StringField("Activation Rate",
[validate_units('1/kg/day', nonzero=True),
validators.required()],
description=("Sea level activation "
"atoms/mass/time"))
defcosmic = emissionspec.CosmogenicActivation()
class CosmogenicActivationForm(EmissionspecForm):
exposure = StringField("Exposure time",
[validate_units(defcosmic.exposure),
validators.required()])
cooldown = StringField("Cooldown time",
[validate_units(defcosmic.cooldown)])
integration = StringField("Measurement time",
[validate_units(defcosmic.integration)])
isotopes = FieldList(FormField(CosmogenicIsotopeForm,
default=emissionspec.CosmogenicIsotope),
min_entries=1,
label="Isotopes",
widget=SortableTable(),
render_kw={'_class':"table table-condensed"})
class DustAccumulationForm(RadioactiveContamForm):
dustmass = StringField("Dust mass",[validate_units(),validators.required()],
description=("Units match distribution, "
"e.g. kg/cm**2 for surface"))
############### utilities #######################
def get_form(form, obj):
"""Get the correct form for a component
Args:
form: formdata returned from request, passed to Form class
obj: object to populate, should be BaseComponent or Emissionspec
"""
cls = None
if isinstance(obj, component.Component):
cls = ComponentForm
elif isinstance(obj, component.Assembly):
cls = AssemblyForm
if not cls:
raise TypeError("Can't find form for object of type %s"%type(obj))
return cls(form, obj=obj)
| {
"repo_name": "bloer/bgexplorer",
"path": "bgexplorer/modeleditor/forms.py",
"copies": "1",
"size": "14255",
"license": "bsd-2-clause",
"hash": 4597161435493207600,
"line_mean": 43.4080996885,
"line_max": 104,
"alpha_frac": 0.5787443002,
"autogenerated": false,
"ratio": 4.7485009993337775,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008600479944600116,
"num_lines": 321
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from flask import (Blueprint, render_template, render_template_string,
request, abort, url_for, g, json, flash, redirect,
Response, get_flashed_messages, current_app)
from .. import utils
import io
def findsimmatches(dataset, model=None):
"""find all simdatamatch objects associated with the given dataset"""
res = []
model = g.get('model', model)
if not model:
return res
#make sure we're not working with a full dataset object
try:
dataset = dataset.get('_id')
except AttributeError:
pass
for match in model.getsimdata():
if match.dataset == dataset:
res.append(match)
else:
try:
if dataset in match.dataset:
res.append(match)
except TypeError:
pass
return res
class SimsViewer(object):
"""Blueprint for searching and inspecting simulation data
Args:
app: the bgexplorer Flask object
url_prefix (str): Where to mount this blueprint relative to root
detailtemplate: path to template file for detailed simulation view
enableupload (bool): if True, allow uploading new entries
uploadsummary (func): generate a summary projection from uploaded
(json-generated) documents
"""
def __init__(self, app=None, url_prefix='/simulations',
detailtemplate=None,
enableupload=True, uploadsummary=None):
self.app = app
self.enable_upload = enableupload
self.uploadsummary = uploadsummary
self.bp = Blueprint('simsviewer', __name__,
static_folder='static',
template_folder='templates')
self.bp.add_app_template_global(lambda : self, 'getsimsviewer')
self.bp.add_app_template_global(findsimmatches, 'findsimmatches')
self.bp.add_app_template_global(json.dumps, 'json_dumps')
#handle 'query' requests for non strings
@self.bp.url_defaults
def url_defaults(endpoint, values):
query=values.get('query', None)
if query and not isinstance(query, str):
values['query'] = json.dumps(query)
@self.bp.before_request
def preprocess():
if 'query' in request.args:
try:
args = request.args.copy()
args['query'] = json.loads(args['query'])
request.args = args
except (KeyError, json._json.JSONDecodeError):
pass
self.register_endpoints()
if self.app:
self.init_app(app, url_prefix)
def init_app(self, app, url_prefix=''):
"""Register ourselves with the app"""
app.register_blueprint(self.bp,
url_prefix=url_prefix)
app.extensions['SimulationsViewer'] = self
key = "ENABLE_SIMULATION_UPLOADS"
self.enable_upload = app.config.setdefault(key, self.enable_upload)
@property
def simsdb(self):
return g.simsdbview.simsdb
def getcolnames(self, sims):
""" Get the column names to display in summary table """
columns = []
try:
columns = g.simsdbview.summarycolumns
except AttributeError:
pass
if sims and not columns:
#non-underscore keys with string or number values
for key, val in sims[0].items():
if (not key.startswith('_')
and key != 'id'
and isinstance(val,(str,int,float))):
columns.append(key)
return columns
def register_endpoints(self):
"""Attach the model if requested"""
@self.bp.url_value_preprocessor
def find_model(endpoint, values):
if 'modelid' in request.args:
g.model = utils.getmodelordie(request.args['modelid'])
values.setdefault('dbname', g.model.simsdb)
if 'dbname' in values:
g.dbname = values.pop('dbname')
g.simsdbview = utils.get_simsdbview(name=g.dbname)
""" make sure we have a simsdb """
@self.bp.url_defaults
def addsimsdbview(endpoint, values):
model = values.pop('model', None) or g.get('model', None)
if model:
#values['modelid'] = model.id
dbname = model.simsdb
if not dbname:
dbname = current_app.getdefaultsimviewname()
values.setdefault('dbname', dbname)
simsdbview = values.pop('simsdbview', None) or g.get('simsdbview', None)
if simsdbview and 'dbname' not in values:
values['dbname'] = current_app.getsimviewname(simsdbview)
"""Define the view functions here"""
@self.bp.route('/')
def index():
dbnames = list(current_app.simviews.keys())
if len(dbnames) == 1:
return redirect(url_for('.overview', dbname=dbnames[0]))
return render_template('listdbs.html', dbnames=dbnames)
@self.bp.route('/<dbname>/')
def overview():
query = request.args.get('query',None)
try:
projection = g.simsdbview.summarypro
sims = list(self.simsdb.runquery(query, projection=projection))
except Exception as e:
abort(400, "Invalid query specifier")
columns = self.getcolnames(sims)
return render_template('simoverview.html', sims=sims, query=query,
colnames=columns)
@self.bp.route('/<dbname>/dataset/<dataset>')
def detailview(dataset):
detail = self.simsdb.getdatasetdetails(dataset)
matches = findsimmatches(dataset)
return render_template('datasetview.html',dataset=dataset,
detail=detail)
@self.bp.route('/<dbname>/dataset/<dataset>/raw')
def rawview(dataset):
"""Export the dataset as raw JSON"""
detail = self.simsdb.getdatasetdetails(dataset)
return json.jsonify(detail)
if not self.enable_upload:
return
@self.bp.route('/<dbname>/api/upload', methods=('POST',))
def api_upload():
""" Upload files to be inserted, return JSON response """
files = request.files.getlist('fupload')
if request.is_json:
print(request.data, request.json)
fakefile = io.BytesIO(request.data)
fakefile.filename = 'JSON'
files = [fakefile]
try:
result = g.simsdbview.handle_uploads(files)
except NotImplementedError:
abort(501, "Uploads are not implemented for this database")
except BaseException as e:
err = f"{type(e).__name__}: {str(e)}"
result = dict(entries={}, errors = {None: err})
return result
@self.bp.route('/<dbname>/upload', methods=('GET','POST'))
def upload():
""" Upload new JSON-formatted entries """
result = None
if request.method == 'POST':
result = api_upload()
return render_template('uploadsimdata.html', result=result)
| {
"repo_name": "bloer/bgexplorer",
"path": "bgexplorer/simsviewer/simsviewer.py",
"copies": "1",
"size": "7569",
"license": "bsd-2-clause",
"hash": -195944395584794600,
"line_mean": 37.2272727273,
"line_max": 84,
"alpha_frac": 0.5562161448,
"autogenerated": false,
"ratio": 4.410839160839161,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007108280391626909,
"num_lines": 198
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from future.builtins import *
import unittest
from yandex_money.api import Wallet
from .constants import CLIENT_ID, ACCESS_TOKEN
class WalletTestSuite(unittest.TestCase):
def setUp(self):
super(WalletTestSuite, self).setUp()
self.api = Wallet(ACCESS_TOKEN)
def assert_auth_header_present(self):
pass
def testAccountInfo(self):
self.api.account_info()
self.assert_auth_header_present()
def testGetAuxToken(self):
response = self.api.get_aux_token(["account-info",
"operation-history"])
self.assertIn('aux_token', response)
def testOperationHistory(self):
options = {"records": 3}
self.api.operation_history(options)
def testOperationDetails(self):
self.api.operation_details("some-invalid-id")
def testRequestPayment(self):
options = {
"pattern_id": "p2p",
"to": "410011161616877",
"amount_due": "0.02",
"comment": "test payment comment from yandex-money-python",
"message": "test payment message from yandex-money-python",
"label": "testPayment",
"test_payment": True,
"test_result": "success"
}
response = self.api.request_payment(options)
self.assertEqual(response['status'], 'success')
def testResponsePayment(self):
options = {
"request_id": "test-p2p",
"test_payment": True,
"test_result": "success"
}
response = self.api.process_payment(options)
self.assertEqual(response['status'], 'success')
def testIncomingTransferAccept(self):
operation_id = "some id"
protection_code = "some code" # TODO: test when it's None
response = self.api.incoming_transfer_accept(
operation_id=operation_id,
protection_code=protection_code
)
self.assertEqual(response['status'], "refused")
def testIncomingTransferReject(self):
operation_id = "some operatoin id"
self.api.incoming_transfer_reject(
operation_id=operation_id,
)
def testObtainTokenUrl(self):
Wallet.build_obtain_token_url(
"client-id",
"http://localhost/redirect",
["account-info", "operation_history"]
)
# TODO: check url
def testGetAccessToken(self):
options = {
"code": "code",
"client_id": "client_id",
"grant_type": "authorization_code",
"redirect_uri": "redirect_uri",
"client_secret": "client_secret"
}
response = Wallet.get_access_token(
code=options["code"],
client_id=options["client_id"],
redirect_uri=options["redirect_uri"],
client_secret=options["client_secret"]
)
self.assertEqual(response['error'], 'unauthorized_client')
| {
"repo_name": "yandex-money/yandex-money-sdk-python",
"path": "tests/testWallet.py",
"copies": "1",
"size": "3092",
"license": "mit",
"hash": 5586738745187833000,
"line_mean": 30.5510204082,
"line_max": 71,
"alpha_frac": 0.5795601552,
"autogenerated": false,
"ratio": 4.111702127659575,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5191262282859574,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from future.builtins import *
from blockext import *
import sphero
__version__ = '0.2.1'
class Sphero:
def __init__(self):
self.robot = sphero.Sphero()
self.robot.connect()
self.name = self.robot.get_bluetooth_info().name
"""def _is_connected(self):
try:
self.robot.get_bluetooth_info()
except:
self.robot = False
if not self.robot:
try:
self.robot.connect()
self.name = self.robot.get_bluetooth_info().name
except:
pass
return bool(self.robot)"""
def _problem(self):
if not self.robot:
return "Your Sphero is not connected"
def _on_reset(self):
self.robot.roll(0,0)
def get_sphero_name(self):
return self.name
def set_sphero_name(self, name):
self.name = name
self.robot.set_device_name(name)
def roll_sphero(self, power, heading):
self.robot.roll(power*2.55, heading)
"""def set_sphero_color(self, r, g, b):
self.robot.set_rgb(r,g,b)"""
descriptor = Descriptor(
name = "Orbotix Sphero",
port = 7575,
blocks = [
Block('roll_sphero', 'command', 'roll Sphero %n percent speed at %n degrees', defaults=[100,0]),
Block('get_sphero_name', 'reporter', 'get Sphero name'),
Block('set_sphero_name', 'command', 'set Sphero name to %s', defaults=['Rob Orb'])
]
)
extension = Extension(Sphero, descriptor)
if __name__ == '__main__':
extension.run_forever(debug=True)
| {
"repo_name": "blockext/sphero",
"path": "__init__.py",
"copies": "1",
"size": "1738",
"license": "mit",
"hash": -1661544438369770200,
"line_mean": 25.7384615385,
"line_max": 104,
"alpha_frac": 0.5506329114,
"autogenerated": false,
"ratio": 3.598343685300207,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4648976596700207,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
#from future.builtins import *
from .blocks import Block, Input
from .languages import language_codes
class Program(object):
"""For exporting blocks to a specfic block language."""
name = "Program" # "Scratch 2", "Snap"
"""Name of the hardware or extension. Must be filename-friendly."""
by_short_name = {} # "scratch2": ScratchProgram, "snap": SnapProgram
file_extension = "xml"
content_type = "application/octet-stream"
@classmethod
def get_filename(cls, descriptor, lang):
language = language_codes.get(lang) or lang
fmt = "{cls.name} {descriptor.name} {language}.{cls.file_extension}"
return fmt.format(**locals())
@classmethod
def generate_file(cls, descriptor, language):
raise NotImplementedError(self)
#-- Scratch 2.0 --#
import json
BLOCK_SHAPES = {
"command": " ",
"reporter": "r",
"predicate": "b",
}
class ScratchProgram(Program):
name = "Scratch"
file_extension = "s2e"
@classmethod
def generate_file(cls, descriptor, language):
s2e = {
"extensionName": descriptor.name,
"extensionPort": descriptor.port,
"blockSpecs": [],
"menus": language.get_menus(descriptor.menus),
}
for block in descriptor.blocks:
shape = BLOCK_SHAPES[block.shape]
if block.shape == "command" and block.is_blocking:
shape = "w"
spec = language.get(block.spec)
blockspec = [shape, spec, block.selector] + block.defaults
s2e["blockSpecs"].append(blockspec)
return json.dumps(s2e, ensure_ascii=False).encode("utf-8")
# TODO check Scratch will accept utf-8 json
Program.by_short_name["scratch"] = ScratchProgram
#-- Snap! --#
import re
from xml.etree import ElementTree
from xml.etree.ElementTree import Element, SubElement
INPUT_SELECTORS = {
"number": "n",
"string": "s",
"boolean": "b",
"readonly-menu": "txt",
"number-menu": "n",
"color": "clr",
}
class SnapProgram(Program):
name = "Snap"
file_extension = "xml"
content_type = "application/xml"
@classmethod
def generate_file(cls, descriptor, language):
return generate_snap(descriptor, language)
def generate_snap(descriptor, language):
root = Element("blocks", {
"app": "Snap! 4.0, http://snap.berkeley.edu",
"version": "1",
})
menus = language.get_menus(descriptor.menus)
for block in descriptor.blocks:
defn = SubElement(root, "block-definition", {
"type": "%s" % block.shape, # Can't use a future.builtins.str
"category": "other",
})
if block.help_text:
comment = SubElement(defn, "comment", w="360", collapsed="false")
comment.text = block.help_text
SubElement(defn, "header")
SubElement(defn, "code")
inputs = SubElement(defn, "inputs")
snap_spec = ""
for part in block.parts:
if isinstance(part, Input):
input_el = SubElement(inputs, "input", {
"type": "%{shape}".format(shape=INPUT_SELECTORS[part.shape]),
"readonly": "true" if part.shape == "m" else "",
})
input_el.text = str(part.default)
if "menu" in part.shape:
options = SubElement(input_el, "options")
options.text = "\n".join(menus[part.menu])
# TODO menus
# XXX ^ why is there a todo comment here?
index = block.inputs.index(part)
part = "%'arg-{}'".format(index)
else:
assert isinstance(part, str)
# Snap! doesn't allow %-signs in block text yet.
part = re.compile(r" *% *").sub(" ", part)
snap_spec += part
defn.attrib["s"] = snap_spec
http_block = Element("block", s="reportURL")
join_block = SubElement(http_block, "block", s="reportJoinWords")
list_ = SubElement(join_block, "list")
url = "localhost:{descriptor.port}/{block.selector}".format(**vars())
if block.is_blocking:
url += "/-" # Blank request id
SubElement(list_, "l").text = url
for index, input_ in enumerate(block.inputs):
SubElement(list_, "l").text = "/"
encode = SubElement(list_, "block", s="reportTextFunction")
l = SubElement(encode, "l")
SubElement(l, "option").text = "encode URI component"
join = SubElement(encode, "block", s="reportJoinWords")
SubElement(join, "block", var="arg-{}".format(index))
if block.shape == "command":
script_xml = """
<script>
<block s="{cmd}">
<block s="reifyReporter">
<autolambda>
{http_block_xml}
</autolambda>
</block>
</block>
</script>
""".format(
cmd="doRun" if block.is_blocking else "fork",
http_block_xml="{http_block_xml}",
)
elif block.shape == "predicate":
script_xml = """
<script>
<block s="doDeclareVariables">
<list>
<l>result</l>
</list>
</block>
<block s="doSetVar">
<l>result</l>
{http_block_xml}
</block>
<block s="doIf">
<block s="reportEquals">
<block var="result"/>
<l>true</l>
</block>
<script>
<block s="doSetVar">
<l>result</l>
<block s="reportTrue"/>
</block>
</script>
</block>
<block s="doIf">
<block s="reportEquals">
<block var="result"/>
<l>false</l>
</block>
<script>
<block s="doSetVar">
<l>result</l>
<block s="reportFalse"/>
</block>
</script>
</block>
<block s="doReport">
<block var="result"/>
</block>
</script>
"""
elif block.shape == "reporter":
script_xml = """
<script>
<block s="doReport">
{http_block_xml}
</block>
</script>
"""
script = ElementTree.fromstring(script_xml.format(
http_block_xml=str(ElementTree.tostring(http_block).decode("utf-8"))
))
defn.append(script)
return ElementTree.tostring(root)
Program.by_short_name["snap"] = SnapProgram
def generate_file(descriptor, program_short_name, language_code="en"):
program = Program.by_short_name[program_short_name]
filename = Program.get_filename(descriptor, language_code)
language = descriptor.translations[language_code]
contents = program.generate_file(descriptor, language)
return filename, contents
| {
"repo_name": "ilmanzo/scratch_extensions",
"path": "venv/lib/python3.4/site-packages/blockext/generate.py",
"copies": "1",
"size": "7598",
"license": "mit",
"hash": -3068451133420673500,
"line_mean": 31.1949152542,
"line_max": 81,
"alpha_frac": 0.5011845222,
"autogenerated": false,
"ratio": 4.242322724734785,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007830790663752151,
"num_lines": 236
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from future.builtins import *
from future.moves.urllib.parse import urlparse, urlencode
import requests
from . import exceptions
class BasePayment(object):
MONEY_URL = "https://money.yandex.ru"
SP_MONEY_URL = "https://sp-money.yandex.ru"
@classmethod
def send_request(cls, url, headers=None, body=None):
if not headers:
headers = {}
if not body:
body = {}
full_url = cls.MONEY_URL + url
return cls.process_result(
requests.post(full_url, headers=headers, data=body)
)
@classmethod
def process_result(cls, result):
if result.status_code == 400:
raise exceptions.FormatError
elif result.status_code == 401:
raise exceptions.ScopeError
elif result.status_code == 403:
raise exceptions.TokenError
return result.json()
class Wallet(BasePayment):
def __init__(self, access_token):
self.access_token = access_token
def _send_authenticated_request(self, url, options=None):
return self.send_request(url, {
"Authorization": "Bearer {}".format(self.access_token)
}, options)
def account_info(self):
return self._send_authenticated_request("/api/account-info")
def get_aux_token(self, scope):
return self._send_authenticated_request("/api/token-aux", {
"scope": ' '.join(scope)
})
def operation_history(self, options):
return self._send_authenticated_request("/api/operation-history",
options)
def request_payment(self, options):
return self._send_authenticated_request("/api/request-payment",
options)
def process_payment(self, options):
return self._send_authenticated_request("/api/process-payment",
options)
def incoming_transfer_accept(self, operation_id, protection_code=None):
return self._send_authenticated_request(
"/api/incoming-transfer-accept", {
"operation_id": operation_id,
"protection_code": protection_code
})
def incoming_transfer_reject(self, operation_id):
return self._send_authenticated_request("/api/incoming-transfer-reject",
{
"operation_id": operation_id
})
@classmethod
def build_obtain_token_url(self, client_id, redirect_uri, scope):
return "{}/oauth/authorize?{}".format(self.SP_MONEY_URL,
urlencode({
"client_id": client_id,
"redirect_uri": redirect_uri,
"scope": " ".join(scope)
}))
@classmethod
def get_access_token(self, client_id, code, redirect_uri,
client_secret=None):
full_url = self.SP_MONEY_URL + "/oauth/token"
return self.process_result(requests.post(full_url, data={
"code": code,
"client_id": client_id,
"grant_type": "authorization_code",
"redirect_uri": redirect_uri,
"client_secret": client_secret
}
))
@classmethod
def revoke_token(self, token, revoke_all=False):
return self.send_request("/api/revoke", body={
"revoke-all": revoke_all
}, headers={"Authorization": "Bearer {}".format(token)})
class ExternalPayment(BasePayment):
def __init__(self, instance_id):
self.instance_id = instance_id
@classmethod
def get_instance_id(cls, client_id):
return cls.send_request("/api/instance-id", body={
"client_id": client_id
})
def request(self, options):
options['instance_id'] = self.instance_id
return self.send_request("/api/request-external-payment", body=options)
def process(self, options):
options['instance_id'] = self.instance_id
return self.send_request("/api/process-external-payment", body=options)
| {
"repo_name": "raymank26/yandex-money-sdk-python",
"path": "yandex_money/api.py",
"copies": "1",
"size": "4061",
"license": "mit",
"hash": 777057204400781400,
"line_mean": 31.75,
"line_max": 80,
"alpha_frac": 0.5968973159,
"autogenerated": false,
"ratio": 4.077309236947791,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.012881756102028082,
"num_lines": 124
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
#from future.builtins import *
import os
import re
class Block(object):
_highest_id = 0
def __init__(self, selector, shape, parts_or_spec, is_blocking=False,
help_text="", defaults=[]):
self.shape = str(shape)
"""A string determining the kind of values the block reports.
* ``"command"`` -- Doesn't report a value. (puzzle-piece)
* ``"reporter"`` -- Reports a number. (round ends)
* ``"predicate"`` -- Reports a boolean. (pointy ends)
"""
if selector.startswith("_"):
raise ValueError("names starting with an underscore are reserved")
self.selector = str(selector)
"""Used by the block language to identify the block."""
if isinstance(parts_or_spec, list):
self.parts = [p if isinstance(p, Input) else str(p) for p in parts]
else:
self.parts = parse_spec(parts_or_spec)
for input_, value in zip(self.inputs, defaults):
input_.default = value
self.is_blocking = bool(is_blocking)
"""True if the block language should wait for the block to return."""
self.help_text = str(help_text)
"""Text explaining the block to a Scratch user."""
self.translations = {}
@property
def inputs(self):
return [p for p in self.parts if isinstance(p, Input)]
@property
def defaults(self):
return [x.default for x in self.inputs]
@property
def spec(self):
return generate_spec(self.parts)
def __repr__(self):
return "<Block({spec})>".format(spec=repr(generate_spec(self.parts)))
def __call__(self, func):
func._block = self
Block._highest_id += 1
func._block_id = Block._highest_id
return func
class Input(object):
"""The specification for an argument to a :class:`Block`."""
DEFAULTS = {
"number": 0,
"number-menu": 0,
"readonly-menu": None, # Set in _set_menu_defaults()
"string": "",
"boolean": False,
}
def __init__(self, shape, menu=None):
self.shape = str(shape)
"""A string identifying the kind of values the input accepts.
* ``'number'`` -- number input (round ends)
* ``'string'`` -- string input (square ends)
* ``'boolean'`` -- boolean input (pointy ends)
* ``'readonly-menu'`` -- menu input
* ``'number-menu'`` -- editable number input with menu
* ``'color'`` -- color input with picker
"""
if 'menu' in shape:
assert menu, "Menu is required"
else:
assert not menu, "Menu not allowed"
self.menu = str(menu) if menu else None
"""For menu inputs: the options the drop-down menu contains.
The options come from an earlier :attr:`Extension.menu` call::
ext.add_menu("menuName", ["one", "two", "three", ...])
"""
self.default = Input.DEFAULTS.get(self.shape)
def __repr__(self):
r = "Input({}".format(repr(self.menu))
if self.menu:
r += ", menu={}".format(repr(self.menu))
return r + ")"
def __eq__(self, other):
return (isinstance(other, Input) and self.shape == other.shape
and self.menu == other.menu)
def _set_menu_defaults(self, menus):
if self.default is None:
self.default = ""
if self.shape == "readonly-menu":
try:
options = menus[self.menu]
except KeyError:
raise ValueError(
"menu not found: {}".format(repr(self.menu))
)
self.default = options[0]
INPUT_SPECS = {
"n": "number",
"s": "string",
"b": "boolean",
"m": "readonly-menu",
"d": "number-menu",
"c": "color",
}
def parse_spec(spec):
def generate_parts(spec):
for part in re.split(r"(%[^ ](?:\.[A-z]+)?)", spec):
match = re.match(r"^%([^ ])(?:\.([A-z]+))?$", part)
if match:
shape = INPUT_SPECS.get(match.group(1))
if not shape:
raise ValueError("Unknown input shape %s" % part)
part = Input(shape, match.group(2))
else:
part = str(part)
yield part
spec = str(spec)
parts = list(generate_parts(spec))
inputs = [p for p in parts if isinstance(p, Input)]
return parts
def generate_spec(block_parts):
"""A string identifying the labels and inputs to the block.
Words starting with "%" produce input slots. Supported input types are:
* ``%n`` -- number input (round ends)
* ``%s`` -- string input (square ends)
* ``%b`` -- boolean input (pointy ends)
* ``%m.menuName`` -- menu input
* ``%d.menuName`` -- editable number input with menu
The last two input slots produce a drop-down menu. The options come
from an earlier :attr:`Extension.menu` call::
ext.add_menu("menuName", ["one", "two", "three", ...])
"""
def stringify_part(part):
if isinstance(part, Input):
for s, shape in INPUT_SPECS.items():
if shape == part.shape:
break
else:
assert False
r = "%" + s
if part.menu:
r += "." + part.menu
return r
return part
spec = "".join(map(stringify_part, block_parts))
return spec
def load_po_files(this_file, relative_folder=None, **language_file_paths):
translations = {}
base = ""
if this_file is not None:
base = os.path.abspath(os.path.dirname(this_file))
if relative_folder:
base = os.path.join(base, relative_folder)
for lang, path in language_file_paths.items():
path = os.path.join(base, path)
with open(path) as f:
translations[lang] = Language.from_po_file(f)
return translations
class Language(object):
def __init__(self, strings):
self._strings = strings
def __getitem__(self, key):
"""Return translation if possible, else untranslated string."""
return self._strings.get(key, key)
get = __getitem__
@classmethod
def from_po_file(cls, path):
return
raise NotImplementedError()
def get_menus(self, menus):
translated_menus = {}
for key, options in menus.items():
translated_menus[key] = list(map(self.get, options))
return translated_menus
class Descriptor(object):
def __init__(self, name, port, blocks, menus=None, translations=None):
self.name = str(name)
"""Human-readable name of the hardware."""
self.port = int(port)
"""Port the extension runs on."""
self.blocks = list(blocks)
"""The list of blocks displayed in the interface."""
menus = menus or {}
menus = dict((str(k), list(map(str, v))) for k, v in menus.items())
self.menus = menus
"""Options for custom drop-down menus."""
translations = translations or {}
if "en" in translations:
raise ValueError("english must be default")
translations["en"] = Language({})
self.translations = translations
"""Translations for block specs and menu options."""
# Set default menu options
for block in self.blocks:
for input_ in block.inputs:
input_._set_menu_defaults(self.menus)
def __repr__(self):
return "<Descriptor(%r, %i)>" % (self.name, self.port)
| {
"repo_name": "ilmanzo/scratch_extensions",
"path": "venv/lib/python3.4/site-packages/blockext/blocks.py",
"copies": "1",
"size": "7819",
"license": "mit",
"hash": 8718690200306638000,
"line_mean": 29.0730769231,
"line_max": 79,
"alpha_frac": 0.544570917,
"autogenerated": false,
"ratio": 4.091575091575091,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5136146008575091,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
#from future.builtins import *
"""Library for writing Scratch 2.0 and Snap! extensions.
Blockext provides two things:
- automatic generation of extension files for both Scratch and Snap! from
blocks defined in Python code.
- an method for extensions to communicate with Scratch and Snap!.
"""
__version__ = '0.2.0a'
from collections import OrderedDict
from functools import wraps
import re
from .blocks import Block, Input, Descriptor, load_po_files
from .generate import generate_file
from .helper import Extension
_doc_pat = re.compile(r'[ \t]*\n[ \t]*')
def _shape(shape):
def make_block(spec, defaults=[], help_text="", **kwargs):
def wrapper(func, help_text=help_text):
# Magic: name -> selector
selector = func.__name__
# Magic: docstring -> help text
help_text = help_text or func.__doc__ or ""
help_text = _doc_pat.sub("\n", help_text)
block = Block(selector, shape, spec, defaults=defaults,
help_text=help_text, **kwargs)
block(func) # attaches itself to func._block
return func
return wrapper
return make_block
command = _shape("command")
reporter = _shape("reporter")
predicate = _shape("predicate")
del _shape
def get_decorated_blocks_from_class(cls, selectors=None):
if selectors:
cls_vars = vars(cls)
values = map(cls_vars.get, selectors)
else:
values = vars(cls).values()
functions = []
for value in values:
if callable(value) and hasattr(value, '_block'):
functions.append(value)
functions.sort(key=lambda func: func._block_id)
return [f._block for f in functions]
| {
"repo_name": "ilmanzo/scratch_extensions",
"path": "venv/lib/python3.4/site-packages/blockext/__init__.py",
"copies": "1",
"size": "1824",
"license": "mit",
"hash": 5955652564398279000,
"line_mean": 27.0615384615,
"line_max": 73,
"alpha_frac": 0.6326754386,
"autogenerated": false,
"ratio": 3.8238993710691824,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49565748096691825,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from future.builtins import ( # noqa
bytes, dict, int, list, object, range, str,
ascii, chr, hex, input, next, oct, open,
pow, round, super,
filter, map, zip)
from nose.tools import eq_, ok_
from mock import patch
from deployer.services.security import using_encryption_store, \
decrypt_config
MOCK_BUCKET = 'mockbucket'
MOCK_PASSPHRASE = 'mock-passphrase'
MOCK_BASE = 'mock-base'
@patch.dict('deployer.services.security.ENCRYPTION', {
'store': 's3',
's3': {
'bucket': MOCK_BUCKET,
'base': MOCK_BASE,
},
'passphrase': MOCK_PASSPHRASE
})
def test_using_encryption_store_with_s3():
# Given: Mock function wrapped with using_encryption_store
@using_encryption_store
def mock_fn(*args, **kwargs):
eq_(args, ('arg1', ))
eq_(kwargs.get('arg2'), 'arg2')
eq_(kwargs.get('passphrase'), MOCK_PASSPHRASE)
store = kwargs.get('store')
ok_(store is not None)
eq_(store.bucket, MOCK_BUCKET)
eq_(store.keys_base, MOCK_BASE)
# When: I invoke mock_fn
mock_fn('arg1', arg2='arg2')
# Then: Function is called with expected args
@patch.dict('deployer.services.security.ENCRYPTION', {})
def test_using_encryption_store_with_no_provider():
# Given: Mock function wrapped with using_encryption_store
@using_encryption_store
def mock_fn(*args, **kwargs):
store = kwargs.get('store')
ok_(store is None)
# When: I invoke mock_fn
mock_fn('arg1', arg2='arg2')
# Then: Function is called with no store set
@patch.dict('deployer.services.security.ENCRYPTION', {})
@patch('deployer.services.security.decrypt_obj')
def test_decrypt_config(m_decrypt_obj):
# When: I invoke decrypt config
decrypt_config({'mockkey': 'mockvalue'})
# Then: decrypt_obj is called with expected parameters
m_decrypt_obj.assert_called_once_with(
{'mockkey': 'mockvalue'}, profile='default', store=None,
passphrase=None)
| {
"repo_name": "totem/cluster-deployer",
"path": "tests/unit/services/test_security.py",
"copies": "1",
"size": "2094",
"license": "mit",
"hash": -3378675459002436600,
"line_mean": 28.9142857143,
"line_max": 64,
"alpha_frac": 0.647086915,
"autogenerated": false,
"ratio": 3.4440789473684212,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9591165862368422,
"avg_score": 0,
"num_lines": 70
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from future.moves.urllib.parse import urlencode
import requests
from . import exceptions
# change it to debug/demo hosts
config = {
'MONEY_URL': "https://money.yandex.ru",
}
class BasePayment(object):
def send_request(self, url, headers=None, body=None):
if not headers:
headers = {}
headers['User-Agent'] = "Yandex.Money.SDK/Python"
if self.access_token:
headers['Authorization'] = "Bearer " + self.access_token
if not body:
body = {}
full_url = config['MONEY_URL'] + url
return self.process_result(
requests.post(full_url, headers=headers, data=body)
)
@classmethod
def process_result(cls, result):
if result.status_code == 400:
raise exceptions.FormatError
elif result.status_code == 401:
raise exceptions.TokenError
elif result.status_code == 403:
raise exceptions.ScopeError
return result.json()
class Wallet(BasePayment):
def __init__(self, access_token):
self.access_token = access_token
def _send_authenticated_request(self, url, options=None):
return self.send_request(url, options)
def account_info(self):
"""
Returns information about a user's wallet
http://api.yandex.com/money/doc/dg/reference/account-info.xml
https://tech.yandex.ru/money/doc/dg/reference/account-info-docpage/
Returns:
A dictionary containing account information.
Raises:
exceptions.FormatError: Authorization header is missing or has
an invalid value
exceptions.TokenEror: Nonexistent, expired, or revoked token
specified
exceptions.ScopeError: The token does not have permissions for
the requested operation
"""
return self._send_authenticated_request("/api/account-info")
def get_aux_token(self, scope):
return self._send_authenticated_request("/api/token-aux", {
"scope": ' '.join(scope)
})
def operation_history(self, options):
"""
Returns operation history of a user's wallet
http://api.yandex.com/money/doc/dg/reference/operation-history.xml
https://tech.yandex.ru/money/doc/dg/reference/operation-history-docpage/
Args:
options: A dictionary with filter parameters according to
documetation
Returns:
A dictionary containing user's wallet operations.
Raises:
exceptions.FormatError: Authorization header is missing or has
an invalid value
exceptions.TokenEror: Nonexistent, expired, or revoked token
specified
exceptions.ScopeError: The token does not have permissions for
the requested operation
"""
return self._send_authenticated_request("/api/operation-history",
options)
def operation_details(self, operation_id):
"""
Returns details of operation specified by operation_id
http://api.yandex.com/money/doc/dg/reference/operation-details.xml
https://tech.yandex.ru/money/doc/dg/reference/operation-details-docpage/
Args:
operation_id: A operation identifier
Returns:
A dictionary containing all details of requested operation.
Raises:
exceptions.FormatError: Authorization header is missing or has
an invalid value
exceptions.TokenEror: Nonexistent, expired, or revoked token
specified
exceptions.ScopeError: The token does not have permissions for
the requested operation
"""
return self._send_authenticated_request("/api/operation-details",
{"operation_id": operation_id})
def request_payment(self, options):
"""
Requests a payment.
http://api.yandex.com/money/doc/dg/reference/request-payment.xml
https://tech.yandex.ru/money/doc/dg/reference/request-payment-docpage/
Args:
options: A dictionary of method's parameters. Check out docs
for more information.
Returns:
A dictionary containing `payment_id` and additional information
about a recipient and payer
Raises:
exceptions.FormatError: Authorization header is missing or has
an invalid value
exceptions.TokenEror: Nonexistent, expired, or revoked token
specified
exceptions.ScopeError: The token does not have permissions for
the requested operation
"""
return self._send_authenticated_request("/api/request-payment",
options)
def process_payment(self, options):
"""
Confirms a payment that was created using the request-payment
method.
http://api.yandex.com/money/doc/dg/reference/process-payment.xml
https://tech.yandex.ru/money/doc/dg/reference/process-payment-docpage/
Args:
options: A dictionary of method's parameters. Check out docs
for more information.
Returns:
A dictionary containing status of payment and additional steps
for authorization(if needed)
Raises:
exceptions.FormatError: Authorization header is missing or has
an invalid value
exceptions.TokenEror: Nonexistent, expired, or revoked token
specified
exceptions.ScopeError: The token does not have permissions for
the requested operation
"""
return self._send_authenticated_request("/api/process-payment",
options)
def incoming_transfer_accept(self, operation_id, protection_code=None):
"""
Accepts incoming transfer with a protection code or deferred
transfer
http://api.yandex.com/money/doc/dg/reference/incoming-transfer-accept.xml
https://tech.yandex.ru/money/doc/dg/reference/incoming-transfer-accept-docpage/
Args:
operation_id: A operation identifier
protection_code: secret code of four decimal digits. Specified
for an incoming transfer proteced by a secret code. Omitted for
deferred transfers
Returns:
A dictionary containing information about operation result
Raises:
exceptions.FormatError: Authorization header is missing or has
an invalid value
exceptions.TokenEror: Nonexistent, expired, or revoked token
specified
exceptions.ScopeError: The token does not have permissions for
the requested operation
"""
return self._send_authenticated_request(
"/api/incoming-transfer-accept", {
"operation_id": operation_id,
"protection_code": protection_code
})
def incoming_transfer_reject(self, operation_id):
"""
Rejects incoming transfer with a protection code or deferred
transfer
http://api.yandex.com/money/doc/dg/reference/incoming-transfer-reject.xml
https://tech.yandex.ru/money/doc/dg/reference/incoming-transfer-reject-docpage/
Args:
operation_id: A operation identifier
Returns:
A dictionary containing information about operation result
Raises:
exceptions.FormatError: Authorization header is missing or has
an invalid value
exceptions.TokenEror: Nonexistent, expired, or revoked token
specified
exceptions.ScopeError: The token does not have permissions for
the requested operation
"""
return self._send_authenticated_request(
"/api/incoming-transfer-reject",
{
"operation_id": operation_id
})
@classmethod
def build_obtain_token_url(self, client_id, redirect_uri, scope):
return "{}/oauth/authorize?{}".format(config['MONEY_URL'],
urlencode({
"client_id": client_id,
"redirect_uri": redirect_uri,
"scope": " ".join(scope),
"response_type": "code"
}))
@classmethod
def get_access_token(self, client_id, code, redirect_uri,
client_secret=None):
full_url = config['MONEY_URL'] + "/oauth/token"
return self.process_result(requests.post(full_url, data={
"code": code,
"client_id": client_id,
"grant_type": "authorization_code",
"redirect_uri": redirect_uri,
"client_secret": client_secret
}
))
@classmethod
def revoke_token(self, token=None, revoke_all=False):
"""
Revokes access token.
http://api.yandex.com/money/doc/dg/reference/revoke-access-token.xml
https://tech.yandex.ru/money/doc/dg/reference/revoke-access-token-docpage/
Args:
token: A token to be revoked
Returns:
None
Raises:
exceptions.FormatError: Authorization header is missing or has
an invalid value
exceptions.TokenEror: Nonexistent, expired, or revoked token
specified
exceptions.ScopeError: The token does not have permissions for
the requested operation
"""
self.send_request("/api/revoke", body={
"revoke-all": revoke_all
}, headers={"Authorization": "Bearer {}".format(token)})
class ExternalPayment(BasePayment):
def __init__(self, instance_id):
self.instance_id = instance_id
@classmethod
def get_instance_id(cls, client_id):
"""
Registers an instance of the application
http://api.yandex.com/money/doc/dg/reference/instance-id.xml
https://tech.yandex.ru/money/doc/dg/reference/instance-id-docpage/
Args:
client_id: A identifier of an application
Returns:
A dictionary with status of an operation
"""
return cls.send_request("/api/instance-id", body={
"client_id": client_id
})
def request(self, options):
"""
Requests an external payment
http://api.yandex.com/money/doc/dg/reference/request-external-payment.xml
https://tech.yandex.ru/money/doc/dg/reference/request-external-payment-docpage/
Args:
options: A dictionary of method's parameters. Check out docs
for more information.
Returns:
A dictionary containing `payment_id` and additional information
about a recipient and payer
"""
options['instance_id'] = self.instance_id
return self.send_request("/api/request-external-payment", body=options)
def process(self, options):
"""
Confirms a payment that was created using the
request-extenral-payment method
http://api.yandex.com/money/doc/dg/reference/process-external-payment.xml
https://tech.yandex.ru/money/doc/dg/reference/process-external-payment-docpage/
Args:
options: A dictionary of method's parameters. Check out docs
for more information.
Returns:
A dictionary containing status of payment and additional steps
for authorization(if needed)
"""
options['instance_id'] = self.instance_id
return self.send_request("/api/process-external-payment", body=options)
| {
"repo_name": "yandex-money/yandex-money-sdk-python",
"path": "yandex_money/api.py",
"copies": "1",
"size": "12797",
"license": "mit",
"hash": 7880417883619010000,
"line_mean": 37.7787878788,
"line_max": 91,
"alpha_frac": 0.5663046026,
"autogenerated": false,
"ratio": 4.996876220226474,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6063180822826474,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from future.utils import iteritems
from datetime import datetime
import osisoftpy # main package
from dateutil import parser
# x = parser.parse(None)
# print(x)
# print(None if x == None else x.strftime('%Y%m%d%H%M%S'))
# print(datetime.strptime('05-24-2017', '%Y-%m-%dT%H:%M:%SZ'))
webapi = osisoftpy.webapi('https://dev.dstcontrols.com/piwebapi/')
print('Connected to {}'.format(webapi.links.get('Self')))
points = webapi.points(query='name:EdwinPythonTest')
# shittyPoints = webapi.points(query='name:EdwinPythonTest2')
print('good points: {}'.format(points))
# print('shitty points: {}'.format(shittyPoints))
def callback(sender):
print('{} changed! {}'.format(sender.name, sender))
subscriptions = webapi.subscribe(points, 'interpolatedattimes', startdatetime='2015-01-01T00:00:00Z', callback=callback)
# subscriptions = webapi.subscribe(points, 'interpolatedattimes', startdatetime='2015-01-01T00:00:00Z', callback=callback)
print(len(subscriptions))
for point in points:
# point.update_value('2015-01-01T00:00:00Z', 123)
x = point.interpolatedattimes(['2015-01-01T00:00:00Z','2015-01-02T00:00:00Z'])
# x = point.interpolatedattimes('2015-01-01T00:00:00Z')
for value in x:
print('{} : {}'.format(value.timestamp, value.value))
# subscriptions = webapi.unsubscribe(points, 'interpolatedattimes', '05-20-2017')
# print(len(subscriptions))
# subscriptions = webapi.unsubscribe(points, 'interpolatedattimes', '05-21-2017')
# print(len(subscriptions))
# subscriptions = webapi.unsubscribe(points, 'getvalue')
# print(subscriptions)
# # if v1 == v2:
# print('objects match')
# print(v1)
# print(v2)
# else:
# print('objects don''t match')
# print(v1)
# print(v2)
#subscriber example
# def callback(sender):
# print('{} changed! {}'.format(sender.name, sender))
# subscriptions = webapi.subscribe(points, 'current', callback)
# print(subscriptions)
# print(points)
# print(len(points))
# for point in points:
# #current
# point.current(time='2017-05-15')
# point.current(time='2017-05-16')
# point.current(time='2017-05-16')
# subscriptions = webapi.unsubscribe(shittyPoints, 'current')
# print(subscriptions)
#recorded
# recordedpoints = point.recorded()
# print(len(recordedpoints))
# recordedpoints = point.recordedattime('2016-06-01')
# print(len(recordedpoints))
#interpolated
# interpolatedpoints = point.interpolatedattimes(time = ['2016-06-01','2016-05-31','2016-05-30'])
# interpolatedpoints = point.interpolated()
# print(interpolatedpoints.__len__())
# for interpolatedpoint in interpolatedpoints:
# print(interpolatedpoint.timestamp)
# print(interpolatedpoint.value)
#end
# p = point.plot(starttime='*-2d')
# print(p[0].value)
# p = point.recorded(starttime='*-2d')
# q = point.summary(summarytype='Average')
# print(q)
#update insert
# point.update_value('2017-06-01 06:00', 900, updateoption='Insert')
# p = point.current(time='2017-06-01 06:00')
# print(len(p))
# print(len(p.timestamp))
# print(len(p.value))
#updating
# point.update_values(["2017-06-01 04:20","2017-06-01 04:25","2017-06-01 04:30"], [5,2,4])
# point.update_value("2017-06-01 05:00", 100)
# p = point.current(time="2017-06-01 05:00")
# print(point.name)
| {
"repo_name": "dstcontrols/osisoftpy",
"path": "examples/insert_data_to_tags.py",
"copies": "1",
"size": "3533",
"license": "apache-2.0",
"hash": -313731113144642500,
"line_mean": 31.1181818182,
"line_max": 122,
"alpha_frac": 0.6592131333,
"autogenerated": false,
"ratio": 3.151650312221231,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9200705288258041,
"avg_score": 0.022031631452638045,
"num_lines": 110
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import base64
from future.builtins import ( # noqa
bytes, dict, int, list, object, range, str,
ascii, chr, hex, input, next, oct, open,
pow, round, super,
filter, map, zip)
import requests
import yaml
from configservice.cluster_config.base import AbstractConfigProvider
class GithubConfigProvider(AbstractConfigProvider):
"""
Config provider that fetches totem config from a given repository
"""
def __init__(self, token=None, config_base='/'):
"""
:keyword token: Optional github API token for authentication. Needed if
private repositories are getting deployed.
:type token: str
"""
self.auth = (token, 'x-oauth-basic') if token else None
self.config_base = config_base
def _github_fetch(self, owner, repo, ref, name):
"""
Fetches the raw totem config for a given owner, repo, ref and name.
:param owner: Repository owner / organization
:type owner: str
:param repo: Repository name
:type repo: str
:param ref: Branch/tag
:type ref: str
:param name: Name of totem config (totem.yml)
:type name: str
:return: Raw totem config
:rtype: str
:raises GithubFetchException: If fetch fails
"""
path_params = {
'owner': owner,
'repo': repo,
'path': self.config_base + name
}
query_params = {
'ref': ref
}
hub_url = 'https://api.github.com/repos/{owner}/{repo}/contents' \
'{path}'.format(**path_params)
resp = requests.get(hub_url, params=query_params, auth=self.auth)
if resp.status_code == 200:
return base64.decodebytes(resp.json()[u'content'].encode('utf-8'))
elif resp.status_code == 404:
return None
else:
hub_response = {
'url': hub_url,
'response': resp.json() if 'json' in resp.headers.get(
'content-type', {}) else {'raw': resp.text},
'status': resp.status_code
}
raise GithubFetchException(hub_response)
def load(self, name, *paths):
"""
Loads the config for given paths. Github provider only supports
fetch for full path with (owner, repo, and ref). If partial path is
provided, an empty config is returned
:param name: Name of the config file.
:param paths: Paths used for loading config (owner, repo, and ref):
:type paths: tuple
:return: Totem config as dictionary
:rtype: dict
"""
if len(paths) < 4:
return {}
else:
owner, repo, ref = paths[1:4]
raw = self._github_fetch(owner, repo, ref, name)
if raw:
return yaml.load(raw)
else:
return {}
class GithubFetchException(Exception):
"""
Exception thrown if github provider fails to fetch the config.
"""
def __init__(self, github_response=None):
"""
:param github_response: Dictionary representation of github response
comprising of url, status, raw response, json response.
:type github_response: dict
:return:
"""
self.response = github_response or {}
self.code = 'GITHUB_CONFIG_FETCH_FAILED'
resp = self.response.get('response', {})
reason = resp.get('message', None) or \
resp.get('raw', None) or \
str(resp)
self.message = 'Failed to fetch config from github using url:{0}. ' \
'Status:{1}. Reason: {2}'.format(
self.response.get('url'),
self.response.get('status'), reason)
super(GithubFetchException, self).__init__(github_response)
def to_dict(self):
return {
'message': self.message,
'code': self.code,
'details': self.response
}
def __str__(self):
return self.message
| {
"repo_name": "totem/config",
"path": "configservice/cluster_config/github.py",
"copies": "1",
"size": "4187",
"license": "mit",
"hash": -6387497151172016000,
"line_mean": 33.0406504065,
"line_max": 79,
"alpha_frac": 0.5581561978,
"autogenerated": false,
"ratio": 4.263747454175153,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 123
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import json
from parser import ParserError
from future.builtins import ( # noqa
bytes, dict, int, list, object, range, str,
ascii, chr, hex, input, next, oct, open,
pow, round, super,
filter, map, zip)
from jsonschema import ValidationError
from mock import patch
from nose.tools import eq_, raises
from conf.appconfig import DEFAULT_DEPLOYER_URL, DEFAULT_DEPLOYER_CONFIG
from orchestrator.cluster_config.effective import MergedConfigProvider
from orchestrator.cluster_config.etcd import EtcdConfigProvider
from orchestrator.cluster_config.github import GithubConfigProvider
from orchestrator.cluster_config.s3 import S3ConfigProvider
from orchestrator.services import config
from orchestrator.services.errors import ConfigProviderNotFound
from orchestrator.services.exceptions import ConfigValidationError, \
ConfigParseError
from tests.helper import dict_compare
import orchestrator.services.config as service
__author__ = 'sukrit'
@patch.dict('orchestrator.services.config.CONFIG_PROVIDERS', {
'provider1': {},
'provider3': {}
})
@patch('orchestrator.services.config.CONFIG_PROVIDER_LIST')
def test_get_providers(mock_provider_list):
"""
Should get the list of available providers
"""
# Given: Existing config provider list"
mock_provider_list.__iter__.return_value = ['provider1', 'provider2']
# When: I fetch provider list
providers = service.get_providers()
# Then: Expected provider list is returned
eq_(list(providers), ['provider1', 'effective'])
@raises(ConfigProviderNotFound)
def test_get_provider_when_not_found():
"""
Should raise ConfigProviderNotFound when provider is not found
"""
# When: I fetch provider that does not exists
service.get_provider('invalid')
# Then: ConfigProviderNotFound is raised
@patch.dict('orchestrator.services.config.CONFIG_PROVIDERS', {
'etcd': {
'host': 'mockhost',
'port': 10000,
'base': '/mock'
}
})
@patch('orchestrator.services.config.CONFIG_PROVIDER_LIST')
def test_get_etcd_provider(mock_provider_list):
"""
Should return etcd provider
"""
# Given: Existing config provider list"
mock_provider_list.__contains__.return_value = True
mock_provider_list.__iter__.return_value = ['etcd']
# When: I fetch provider that does not exists
provider = service.get_provider('etcd')
# Then: Etcd Config Provider is returned
eq_(isinstance(provider, EtcdConfigProvider), True)
eq_(provider.etcd_cl.host, 'mockhost')
eq_(provider.etcd_cl.port, 10000)
eq_(provider.config_base, '/mock/config')
eq_(provider.ttl, None)
@patch.dict('orchestrator.services.config.CONFIG_PROVIDERS', {
's3': {
'bucket': 'mockbucket',
'base': '/mock'
}
})
@patch('orchestrator.services.config.CONFIG_PROVIDER_LIST')
def test_get_s3_provider(mock_provider_list):
"""
Should return s3 provider
"""
# Given: Existing config provider list"
mock_provider_list.__contains__.return_value = True
mock_provider_list.__iter__.return_value = ['s3']
# When: I fetch provider that does not exists
provider = service.get_provider('s3')
# Then: Etcd Config Provider is returned
eq_(isinstance(provider, S3ConfigProvider), True)
eq_(provider.bucket, 'mockbucket')
eq_(provider.config_base, '/mock')
@patch.dict('orchestrator.services.config.CONFIG_PROVIDERS', {
'github': {
'token': 'mocktoken',
'config_base': '/mock'
}
})
@patch('orchestrator.services.config.CONFIG_PROVIDER_LIST')
def test_get_github_provider(mock_provider_list):
"""
Should return github provider
"""
# Given: Existing config provider list"
mock_provider_list.__contains__.return_value = True
mock_provider_list.__iter__.return_value = ['github']
# When: I fetch provider that does not exists
provider = service.get_provider('github')
# Then: Etcd Config Provider is returned
eq_(isinstance(provider, GithubConfigProvider), True)
eq_(provider.auth, ('mocktoken', 'x-oauth-basic'))
eq_(provider.config_base, '/mock')
@patch.dict('orchestrator.services.config.CONFIG_PROVIDERS', {
'etcd': {
'host': 'mockhost',
'port': 10000,
'base': '/mock'
},
'effective': {
'cache': {
'enabled': True,
'ttl': 300
}
}
})
@patch('orchestrator.services.config.CONFIG_PROVIDER_LIST')
def test_get_effective_provider(mock_provider_list):
"""
Should return effective provider
:return:
"""
"""
Should return effective provider provider
"""
# Given: Existing config provider list"
mock_provider_list.__contains__.return_value = True
mock_provider_list.__iter__.return_value = ['effective', 'default']
# When: I fetch provider that does not exists
provider = service.get_provider('effective')
# Then: Etcd Config Provider is returned
eq_(isinstance(provider, MergedConfigProvider), True)
eq_(len(provider.providers), 1)
def test_evaluate_value_with_nested_variables():
"""
Should evaluate value by parsing templates.
:return:
"""
# Given: Object that needs to be evaluated
obj = {
'variables': {
'var2': {
'value': '{{ var2 }}-modified'
}
},
'str-key': '{{ var1 }}',
'int-key': 2,
'nested-key': {
'nested-key1': {
'value': '{{ var1 }}',
'template': True
},
'variables': {
'var1': {
'value': '{{ var1 }}-modified'
}
},
'nested-key-2': {},
'__defaults__': {
'default1': {
'value': '{{ var1 }} ',
'template': True
}
}
},
'list-key': [
'list-value1',
{
'value': '\n\n{{ var2 }}\n\n',
}
],
'value-key': {
'value': '{{ var1 }}',
'encrypted': True,
'template': True
}
}
# And: variables that needs to be applied
variables = {
'var1': 'var1-value',
'var2': 'var2-value'
}
# When: I evaluate object
result = service.evaluate_value(obj, variables)
# Then: Expected result with evaluated values is returned
dict_compare(result, {
'str-key': '{{ var1 }}',
'int-key': 2,
'nested-key': {
'nested-key1': 'var1-value-modified',
'nested-key-2': {
'default1': 'var1-value-modified'
},
},
'list-key': [
'list-value1',
'var2-value-modified',
],
'value-key': {
'value': 'var1-value',
'encrypted': True
}
})
def test_evaluate_variables():
"""
Should evaluate config variables
:return: None
"""
# Given: Variables that needs to be expanded
variables = {
'var1': {
'value': True
},
'var2': {
'value': '{{var1}}-var2value',
'template': True,
'priority': 2,
},
'var3': {
'value': '{{default1}}-var3value',
'template': True,
'priority': 1,
},
'var4': False
}
# When: I evaluate the config
result = service.evaluate_variables(variables, {
'default1': 'default1value'
})
# Then: Expected config is returned
dict_compare(result, {
'var1': 'true',
'var2': 'true-var2value',
'var3': 'default1value-var3value',
'default1': 'default1value',
'var4': 'false'
})
def test_evaluate_config_with_no_deployers():
"""
Should evaluate config as expected
:return: None
"""
# Given: Config that needs to be evaluated
config = {
'variables': {
'var1': 'value1',
'var2': {
'value': '{{var1}}-var2value',
'template': True,
'priority': 2,
},
},
'key1': {
'value': 'test-{{var1}}-{{var2}}-{{var3}}',
'template': True
}
}
# When: I evaluate the config
result = service.evaluate_config(config, {
'var1': 'default1',
'var2': 'default2',
'var3': 'default3'
})
# Then: Expected config is returned
dict_compare(result, {
'key1': 'test-value1-value1-var2value-default3',
'deployers': {}
})
def test_evaluate_config_with_deployers():
"""
Should evaluate config as expected
:return: None
"""
# Given: Config that needs to be evaluated
config = {
'defaults': {},
'variables': {
'var1': 'value1',
'var2': {
'value': '{{var1}}-var2value',
'template': True,
'priority': 2,
},
},
'key1': {
'value': 'test-{{var1}}-{{var2}}-{{var3}}',
'template': True
},
'deployers': {
'__defaults__': DEFAULT_DEPLOYER_CONFIG,
'default': {
'enabled': True
},
'deployer2': {
'url': 'deployer2-url',
'enabled': True,
'deployer-name': {
'value': '{{deployer}}'
}
},
'deployer3': {
'enabled': {
'value': '{{ False }}'
}
}
}
}
# When: I evaluate the config
result = service.evaluate_config(config, {
'var1': 'default1',
'var2': 'default2',
'var3': 'default3'
})
# Then: Expected config is returned
dict_compare(result, {
'key1': 'test-value1-value1-var2value-default3',
'deployers': {
'default': {
'url': DEFAULT_DEPLOYER_URL,
'enabled': True,
'proxy': {},
'templates': {
'app': {
'args': {}
}
},
'deployment': {}
},
'deployer2': {
'url': 'deployer2-url',
'enabled': True,
'proxy': {},
'templates': {
'app': {
'args': {}
}
},
'deployer-name': 'deployer2',
'deployment': {}
}
}
})
@patch('orchestrator.services.config.validate')
@patch('orchestrator.services.config.open')
@patch('repoze.lru.lru_cache')
def test_validate_schema_for_successful_validation(m_lru_cache, m_open,
m_validate):
# Given: Existing schema
m_open.return_value.__enter__().read.return_value = '''{
"title": "Schema for Job Config",
"id": "#generic-hook-v1",
"properties": {
"mock": {
"$ref": "${base_url}/link/config#/properties/mock"
}
}
}'''
# And: Validator that succeeds validation
m_validate.return_value = None
# And: Config that needs to be validated
config = {
'mock-obj': 'mock-value'
}
# When: I validate against existing schema
ret_value = service.validate_schema(config)
# Then: Validation succeeds
dict_compare(ret_value, config)
dict_compare(m_validate.call_args[0][0], config)
@raises(ConfigValidationError)
@patch('orchestrator.services.config.validate')
@patch('orchestrator.services.config.open')
def test_validate_schema_for_failed_validation(m_open, m_validate):
# Given: Existing schema
schema = '''{
"title": "Schema for Job Config",
"id": "#generic-hook-v1"
}'''
m_open().__enter__().read.return_value = schema
# And: Validator that succeeds validation
m_validate.side_effect = ValidationError('MockError',
schema=json.loads(schema))
# And: Config that needs to be validated
config = {
'mock-obj': 'mock-value'
}
# When: I validate against existing schema
service.validate_schema(config)
# Then: ConfigValidationError is raised
def test_transform_string_values():
"""
Should transform string values inside config as expected.
:return:
"""
# Given: Config that needs to be transformed
config = {
'key1': 'value1',
'port': 1212,
'enabled': 'True',
'nested-port-key': {
'port': u'2321',
'nodes': u'12',
'min-nodes': '13',
'enabled': 'False',
'force-ssl': 'true'
},
'array-config': [
{
'port': '123',
'nodes': '13',
'min-nodes': '14',
'attempts': '10',
'enabled': False
},
'testval'
],
'null-key': None
}
# When: I transform string values in config
result = service.transform_string_values(config)
# Then: Transformed config is returned
dict_compare(result, {
'key1': 'value1',
'port': 1212,
'enabled': True,
'nested-port-key': {
'port': 2321,
'nodes': 12,
'min-nodes': 13,
'enabled': False,
'force-ssl': True
},
'array-config': [
{
'port': 123,
'nodes': 13,
'min-nodes': 14,
'attempts': 10,
'enabled': False
},
'testval'
],
'null-key': None
})
@patch('orchestrator.services.config.get_provider')
@patch('orchestrator.services.config.validate_schema')
def test_load_config(m_validate_schema, m_get_provider):
"""
Should load config successfully
:return:
"""
# Given: Existing valid config
cfg1 = {
'mockkey': 'mockvalue',
8080: 'number-key',
'deployers': {
'__defaults__': DEFAULT_DEPLOYER_CONFIG,
'deployer1': {
'enabled': False,
'variables': {}
},
'deployer2': {
'enabled': True,
'variables': {}
}
},
}
cfg2 = {
'mockkey2': 'mockvalue2',
'deployers': {
'deployer1': {
'variables': {
'deployer_url': 'deployer1-url1',
},
'url': {
'value': '{{deployer_url}}'
}
},
'deployer2': {
'variables': {
'deployer_url': 'deployer2-url1',
},
'url': {
'value': '{{deployer_url}}'
}
}
},
'environment': {
'env1': 'val1'
}
}
m_get_provider.return_value.load.side_effect = [cfg1, cfg2]
m_validate_schema.side_effect = lambda vcfg, schema_name=None: vcfg
# When: I load the config
loaded_config = config.load_config('mockpath1', 'mockpath2')
# Then: Config gets loaded as expected
dict_compare(loaded_config, {
'mockkey': 'mockvalue',
'mockkey2': 'mockvalue2',
'8080': 'number-key',
'deployers': {
'deployer2': {
'templates': {
'app': {
'args': {}
},
},
'proxy': {},
'deployment': {},
'url': 'deployer2-url1',
'enabled': True
}
},
'environment': {
'env1': {
'value': 'val1',
'encrypted': False
}
}
})
@raises(ConfigParseError)
@patch('orchestrator.services.config.get_provider')
@patch('orchestrator.services.config.validate_schema')
def test_load_config_when_config_is_invalid(m_validate_schema, m_get_provider):
"""
Should raise ConfigParseError when configuration is invalid
:return:
"""
# Given: Existing valid config
m_get_provider.return_value.load.side_effect = ParserError('Mock')
m_validate_schema.side_effect = lambda vcfg, schema_name=None: vcfg
# When: I load the config
config.load_config('mockpath1', 'mockpath2')
# Then: ConfigParseError is raised
def test_normalize_config():
"""
Should normalize the config containing environment variables
"""
# Given: Existing config that needs to be normalized
input_config = {
'environment': {
'var1': 'value1',
'var2': 2,
'var3': True,
'var4': {
'value': 'value4'
},
'var5': {
},
'var6': {
'value': 'value6',
'encrypted': True
}
},
'nested': {
'environment': {
'var7': 'value7',
}
},
'other': {
'test-key': 'test-val'
},
'direct-string': 'value',
'direct-int': 1
}
# When: I normalize the config
normalized_config = dict(service.normalize_config(input_config))
# Then: Config gets normalized as expected
dict_compare(normalized_config, {
'environment': {
'var1': {
'value': 'value1',
'encrypted': False
},
'var2': {
'value': '2',
'encrypted': False
},
'var3': {
'value': 'True',
'encrypted': False
},
'var4': {
'value': 'value4',
'encrypted': False
},
'var5': {
'value': '',
'encrypted': False
},
'var6': {
'value': 'value6',
'encrypted': True
}
},
'nested': {
'environment': {
'var7': {
'value': 'value7',
'encrypted': False
}
}
},
'other': {
'test-key': 'test-val'
},
'direct-string': 'value',
'direct-int': 1
})
| {
"repo_name": "totem/cluster-orchestrator",
"path": "tests/unit/orchestrator/services/test_config.py",
"copies": "1",
"size": "18737",
"license": "mit",
"hash": -5665225141035386000,
"line_mean": 25.7671428571,
"line_max": 79,
"alpha_frac": 0.5002401665,
"autogenerated": false,
"ratio": 4.051243243243243,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5051483409743243,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import numpy as np
import sys, time
from astropy.table import Table
from . import loki
def prob(sub1 = None, sub2 = None, RAs1 = None, DECs1 = None, RAs2 = None, DECs2 = None,
dists1 = None, dists2 = None, disterrs1 = None, disterrs2 = None,
PMras1 = None, PMdecs1 = None, PMras2 = None, PMdecs2 = None,
PMraerrs1 = None, PMdecerrs1 = None, PMraerrs2 = None, PMdecerrs2 = None,
RVs1 = None, RVs2 = None, RVerrs1 = None, RVerrs2 = None,
PM = False, RV = False, DIST = False, DISTERR = False, size = 10000,
infile = None, outfile = None, nstepsMC = 1000, subset = False, random = False):
"""
This program reads in (or an input can be used) a list of binaries
and calculates the number of stars expected in the LOS and volume
occupied by the binary system by creating "fake" galaxies and
randomly distributing the stars in that galaxy. It uses the
rejection method (Press et al. 1992) to distribute the stars using
the stellar density profile as calculated by Bochanski et al. (2009)
using SDSS low-mass stars.
Written by : Saurav Dhital
Written on : April 17, 2009
Ported to Python: Chris Theissen, June 29, 2015
"""
# Start the program and time it
print('Start Time: ', time.strftime("%I:%M:%S"))
t_start = time.time()
# Check if there is kinematic and positional information
if PMras1 is not None and PM == False:
PM = True # Proper motions available
if RVs1 is not None and RV == False:
RV = True # Radial velocities available
if dists1 is not None and DIST == False:
DIST = True # Distances available
# Get the file and/or define the parameters
if infile is None: # Case where there is no file
if RAs1 is None and RAs2 is None:
raise Exception('Either a file or a list of RADECs must be included')
else:
# Check if (numpy) arrays
if isinstance( RAs1, np.ndarray ) == False:
RAs1 = np.array( RAs1 ).flatten()
DECs1 = np.array( DECs1 ).flatten()
RAs2 = np.array( RAs2 ).flatten()
DECs2 = np.array( DECs2 ).flatten()
n = len( RAs1 ) # See how long the file is
if DIST and isinstance( dists1, np.ndarray ) == False:
dists1 = np.array( dists1 ).flatten()
dists2 = np.array( dists2 ).flatten()
if disterrs1 is not None:
disterrs1 = np.array( disterrs1 ).flatten()
disterrs2 = np.array( disterrs2 ).flatten()
if PM and isinstance( PMras1, np.ndarray ) == False:
PMras1 = np.array( PMras1 ).flatten()
PMdecs1 = np.array( PMdecs1 ).flatten()
PMras2 = np.array( PMras2 ).flatten()
PMdecs2 = np.array( PMdecs2 ).flatten()
PMraerrs1 = np.array( PMraerrs1 ).flatten()
PMdecerrs1 = np.array( PMdecerrs1 ).flatten()
PMraerrs2 = np.array( PMraerrs2 ).flatten()
PMdecerrs2 = np.array( PMdecerrs2 ).flatten()
if RV and isinstance(RVs1, np.ndarray) == False:
RVs1 = np.array( RVs1 ).flatten()
RVs2 = np.array( RVs2 ).flatten()
RVerrs1 = np.array( RVerrs1 ).flatten()
RVerrs2 = np.array( RVerrs2 ).flatten()
else: # Case where there is a file
binaries = Table.read(infile) # Read in the file
# Define the parameters
RAs1 = binaries['RA1']
DECs1 = binaries['DEC1']
RAs2 = binaries['RA2']
DECs2 = binaries['DEC2']
if DIST:
dists1 = binaries['DIST1']
dists2 = binaries['DIST2']
if DISTERR == False:
# Just apply a 20% uncertainty
disterrs1 = .2*binaries['DIST1']
disterrs2 = .2*binaries['DIST2']
else:
disterrs1 = binaries['DISTERR1']
disterrs2 = binaries['DISTERR2']
if PM:
PMras1 = binaries['PMRA1']
PMdecs1 = binaries['PMDEC1']
PMras2 = binaries['PMRA1']
PMdecs2 = binaries['PMDEC1']
PMraerrs1 = binaries['PMRAERR1']
PMdecerrs1 = binaries['PMDECERR1']
PMraerrs2 = binaries['PMRAERR2']
PMdecerrs2 = binaries['PMDECERR2']
if RV:
RVs1 = binaries['RV1']
RVs2 = binaries['RV2']
RVerrs1 = binaries['RVERR1']
RVerrs2 = binaries['RVERR2']
n = len(RAs1) # See how many candidates there are
binaries_theta = loki.angdist(RAs1, DECs1, RAs2, DECs2) # Get the angular distances between the pairs
# **************************************************************************
print('')
print('No. of candidate pairs: %s'%n)
print('No. of MC steps : %s'%nstepsMC)
print('')
# storage arrays
nstars = np.zeros(n) # stores no. of stars in each LOS
count_star = np.zeros( (n, 5), dtype=np.float64) # stores no. of companions for each LOS
count_binary = np.zeros( (n, 5), dtype=np.float64) # stores no. of companions for each LOS
print('We are at (%s):'%('%'))
count = 0
for i in range(0, n): # loop for each LOS (binary)
ra0 = RAs1[i] # system properties are subscripted with 0
dec0 = DECs1[i]
theta0 = binaries_theta[i]
if DIST: # Check if distances are within uncertainties
dist0 = 0.5 * (dists1[i] + dists2[i])
if DISTERR == False:
# Fudge factor for things without distance uncertainties
sig_ddist0 = 0.1383 * np.sqrt( dists1[i] ** 2 + dists2[i] ** 2 )
else:
sig_ddist0 = np.sqrt( disterrs1[i] ** 2 + disterrs2[i] ** 2 )
# Get the kinematic information if available
if RV and PM: # Case with proper motions and radial velocities
vel0 = 0.5 * np.array( [ PMras1[i] + PMras2[i],
PMdecs1[i] + PMdecs2[i],
RVs1[i] + RVs2[i] ] )
sig_vel0 = np.sqrt( np.array( [ PMraerrs1[i] ** 2 + PMraerrs2[i] ** 2,
PMdecerrs1[i] ** 2 + PMdecerrs2[i] ** 2,
RVerrs1[i] ** 2 + RVerrs2[i] ** 2 ] ) )
#vel0 = 0.5 * np.array([bry['PMRA'][i][0] + bry['PMRA'][i][1],
# bry['PMDEC'][i][0] + bry['PMDEC'][i][1],
# bry['RV'][i][0] + bry['RV'][i][1]])
#sig_vel0 = np.sqrt( np.array([bry['PMRAERR'][i][0]**2 + bry['PMRAERR'][i][1]**2,
# bry['PMDECERR'][i][0]**2 + bry['PMDECERR'][i][1]**2,
# bry['RVERR'][i][0]**2 + bry['RVERR'][i][1]**2]) )
if RV == False and PM: # Case with proper motions but no radial velocities
vel0 = 0.5 * np.array( [ PMras1[i] + PMras2[i],
PMdecs1[i] + PMdecs2[i] ] )
sig_vel0 = np.sqrt( np.array( [ PMraerrs1[i] ** 2 + PMraerrs2[i] ** 2,
PMdecerrs1[i] ** 2 + PMdecerrs2[i] ** 2 ] ) )
# **********************************************************
# ******************** CALC PROB *******************
# **********************************************************
# storage arrays
count_MC = np.zeros(5, dtype = np.float64) # store data for each niter
count_MC2 = np.zeros(5, dtype = np.float64) # store data for each niter
# count the number of stars in each cell of length cell_size
nstars_tot, nstars2, dists = loki.count_nstars(ra0, dec0, full = True)
#nstars_tot = np.around(nstars_tot)
#nstars[i] = nstars_tot
# Need to create a probability distribution based on the float
randstar = np.random.random_sample( size )
nstars_tot2 = np.zeros( size, dtype=np.int64 ) + np.floor( nstars_tot )
nstars_tot2[ np.where( randstar < nstars_tot - np.floor( nstars_tot ) ) ] += 1
nstars_tot3 = np.random.choice(nstars_tot2, size = 1) # Select a random choice of stars (need an integer)
nstars[i] = nstars_tot3 # Add the value to the total stars simulated
for niter in range(0, nstepsMC): # Loop through the MC (Could probably do this more efficiently)
# This keeps track of the binary probability
b1, b2, b3, b4, b5 = 0, 0, 0, 0, 0
c1, c2, c3, c4, c5 = 0, 0, 0, 0, 0
ra, dec, dist = loki.gen_nstars( ra0, dec0, nstars_tot, nstars2, dists )
theta = loki.angdist( ra0, dec0, ra, dec )
# ************** COUNT FOR MATCHES *******************
ind1 = np.where( ( theta >= 0 ) & ( theta <= theta0 ) ) # counts all stars within given theta and all d
c1 = len( ind1[0] )
if c1 >= 1:
b1 += 1
if DIST:
ddist = abs( dist0 - dist )
ind2 = np.where( ( theta >= 0) & ( theta <= theta0 ) &
( ddist <= sig_ddist0 ) & ( ddist <= 100 ) ) # counts stars within given theta and d
c2 = len( ind2[0] )
if c2 >= 1:
b2 += 1
# if kinematics are available # NEED TO COME BACK AND FIX THIS
if c2 > 0 and ( PM or RV ):
R0, T0, Z0 = loki.conv_to_galactic( ra0, dec0, dist0 ) # Convert RADECs to Galactic coordinates
vel1 = loki.gen_pm( R0, T0, Z0, ra0, dec0, dist0, c2 ) # returns [[pmra], [pmdec],[rv]]
if PM and RV == False:
vel = np.array( [ vel1[0], vel1[1] ] )
# replicate vel0 to match the dimensions of generated velocity array
# allows for vector arithmetic
vel0_arr = np.tile( vel0, ( c2, 1 ) ).transpose()
sig_vel0_arr = np.tile( sig_vel0, ( c2, 1 ) ).transpose()
# difference in binary and simulated velocity in units of sigma
dVel = abs( vel - vel0_arr ) / sig_vel0_arr
if PM: # PM match
ind3 = np.where( np.sqrt( dVel[0] ** 2 + dVel[1] ** 2 ) <= 2 )
c3 = len( ind3[0] )
if c3 >= 1:
b3 += 1
else:
c3 = 0
if RV: # RV match
ind4 = np.where( dVel[2] <= 1 )
c4 = len( ind4[0] )
if c4 >= 1:
b4 += 1
else:
c4 = 0
if PM and RV: # PM + RV match
ind5 = np.where( ( dVel[0] ** 2 + dVel[1] ** 2 <= 2 ) & ( dVel[2] <= 1 ) )
c5 = len( ind5[0] )
if c5 >= 1:
b5 += 1
else:
c3, c4, c5 = 0, 0, 0 # End of one MC step
# ******************** STORE DATA FOR EACH NITER ********
count_MC += [c1, c2, c3, c4, c5] # This counts the number of stars in the LOS
count_MC2 += [b1, b2, b3, b4, b5] # This counts the binary (yes or no; there is another star in the LOS)
count += 1
# Keep a running update
sys.stdout.write("\r%0.4f" %( float(count) / (nstepsMC*n)*100 ) )
sys.stdout.flush()
# *********************** STORE DATA FOR EACH STAR ***********
count_star[i,:] = count_MC
count_binary[i,:] = count_MC2
# Convert to probabilities
prob = count_star / nstepsMC
prob2 = count_binary / nstepsMC
if outfile is None:
outfile='Probabilities.csv'
print(' ************* ')
print(' ************* ')
# Create the table for results
if DIST:
Table1 = Table([RAs1, DECs1, 0.5 * (dists1 + dists2), binaries_theta,
prob[:,0], prob[:,1], prob[:,2], prob[:,3], prob[:,4],
prob2[:,0], prob2[:,1], prob2[:,2], prob2[:,3], prob2[:,4], nstars],
names = ['RA','DEC','DIST','THETA','P1','P2','P3','P4','P5',
'PB1','PB2','PB3','PB4','PB5','Nstars'])
else:
Table1 = Table([RAs1, DECs1, binaries_theta,
prob[:,0], prob[:,1], prob[:,2], prob[:,3], prob[:,4],
prob2[:,0], prob2[:,1], prob2[:,2], prob2[:,3], prob2[:,4], nstars],
names = ['RA','DEC','THETA','P1','P2','P3','P4','P5',
'PB1','PB2','PB3','PB4','PB5','Nstars'])
# Write the table locally (overwrite is on by default, scary)
Table1.write(outfile)
print('TOTAL TIME TAKEN : ', (time.time() - t_start) / 3600.,' hours')
print('TIME TAKEN PER LOS : ', (time.time() - t_start) / (60*n),' minutes')
print('END TIME : ', time.strftime("%I:%M:%S"))
def main():
SLW_6D(sub1 = None, sub2 = None, RAs1 = None, DECs1 = None, RAs2 = None, DECs2 = None,
dists1 = None, dists2 = None, disterrs1 = None, disterrs2 = None,
PMras1 = None, PMdecs1 = None, PMras2 = None, PMdecs2 = None,
PMraerrs1 = None, PMdecerrs1 = None, PMraerrs2 = None, PMdecerrs2 = None,
RVs1 = None, RVs2 = None, RVerrs1 = None, RVerrs2 = None,
PM = False, RV = False, DIST = False, DISTERR = False, size = 10000,
infile = None, outfile = None, nstepsMC = 1000, subset = False, random = False)
if __name__ == '__main__':
main()
| {
"repo_name": "ctheissen/LoKi",
"path": "loki/binary_probability.py",
"copies": "1",
"size": "14320",
"license": "mit",
"hash": 5019688978316929000,
"line_mean": 41.619047619,
"line_max": 119,
"alpha_frac": 0.475698324,
"autogenerated": false,
"ratio": 3.420926899187769,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4396625223187769,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import os
import boto
from boto.s3.key import Key
from future.builtins import ( # noqa
bytes, dict, int, list, object, range, str,
ascii, chr, hex, input, next, oct, open,
pow, round, super,
filter, map, zip)
from nose.plugins.attrib import attr
from nose.tools import eq_
from configservice.cluster_config.s3 import S3ConfigProvider
from tests.helper import dict_compare
from tests.integration.configservice.cluster_config import MOCK_CONFIG, \
MOCK_SERIALIZED_CONFIG
__author__ = 'sukrit'
S3_TEST_BUCKET = os.getenv('S3_TEST_BUCKET', 'totem-integration')
S3_CONFIG_BASE = 'totem-%s/config' % (os.getenv('USER'))
@attr(s3='true')
class TestS3ConfigProvider:
"""
Integration tests for S3ConfigProvide
"""
@classmethod
def teardown_class(cls):
bucket = boto.connect_s3().get_bucket(S3_TEST_BUCKET)
for key in bucket.list(prefix=S3_CONFIG_BASE):
key.delete()
def setup(self):
self.provider = S3ConfigProvider(S3_TEST_BUCKET,
config_base=S3_CONFIG_BASE)
def test_write(self):
"""
should write config to s3
"""
# When: I write config using provider
self.provider.write('totem.yml', MOCK_CONFIG, 'cluster1', 'test_write')
# Then: Config gets serialized as yaml and written to s3
key = self.provider._s3_bucket().get_key(
'/%s/cluster1/test_write/totem.yml' % (S3_CONFIG_BASE))
eq_(key is not None, True)
eq_(key.get_contents_as_string().decode(), MOCK_SERIALIZED_CONFIG)
def test_load(self):
"""
Should read config from s3
"""
# Given: Existing config
key = Key(self.provider._s3_bucket())
key.key = '/%s/cluster1/test_load/totem.yml' % (S3_CONFIG_BASE)
key.set_contents_from_string(MOCK_SERIALIZED_CONFIG)
# When: I load config using provider
ret_value = self.provider.load('totem.yml', 'cluster1', 'test_load')
# Then: Config gets loaded
dict_compare(ret_value, MOCK_CONFIG)
def test_delete(self):
"""
Should delete config from s3
"""
# Given: Existing config
key = Key(self.provider._s3_bucket())
key.key = '/%s/cluster1/test_delete/totem.yml' % (S3_CONFIG_BASE)
key.set_contents_from_string(MOCK_SERIALIZED_CONFIG)
# When: I load config using provider
ret_value = self.provider.delete('totem.yml', 'cluster1',
'test_delete')
# Then: Config gets loaded
eq_(ret_value, True)
check_key = self.provider._s3_bucket().get_key(key.key)
eq_(check_key is None, True)
| {
"repo_name": "totem/config",
"path": "tests/integration/configservice/cluster_config/test_s3.py",
"copies": "1",
"size": "2834",
"license": "mit",
"hash": -8739348841973782000,
"line_mean": 31.5747126437,
"line_max": 79,
"alpha_frac": 0.6104446013,
"autogenerated": false,
"ratio": 3.546933667083855,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4657378268383855,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import os
import numpy as np
import scipy.linalg
from warnings import warn
# Here comes the fast implementation:
try:
from _block_diag_ilu import PyILU
except ImportError:
if os.environ.get("USE_FAST_FAKELU", "0") == "1":
# You better not use fast_FakeLU()...
raise
class PyILU:
pass
class ILU:
def __init__(self, A, sub, sup, blockw, ndiag=0):
fA = np.empty((blockw, A.shape[0]), order='F')
ssub = np.empty_like(sub)
ssup = np.empty_like(sup)
nblocks = A.shape[0]//blockw
for bi in range(nblocks):
slc = slice(bi*blockw, (bi+1)*blockw)
fA[0:blockw, slc] = A[slc, slc]
idx = 0
for di in range(ndiag):
for bi in range(nblocks-di-1):
for ci in range(blockw):
ssub[idx] = A[blockw*(bi+di+1)+ci, blockw*bi + ci]
ssup[idx] = A[blockw*bi + ci, blockw*(bi+di+1) + ci]
idx += 1
self._pyilu = PyILU(fA, ssub, ssup, blockw, ndiag)
def solve(self, b):
return self._pyilu.solve(b)
@property
def sub(self):
sub = []
for di in range(self._pyilu.ndiag):
ssub = []
for bi in range(self._pyilu.nblocks - di - 1):
for ci in range(self._pyilu.blockw):
ssub.append(self._pyilu.sub_get(di, bi, ci))
sub.append(ssub)
return sub
@property
def sup(self):
sup = []
for di in range(self._pyilu.ndiag):
ssup = []
for bi in range(self._pyilu.nblocks - di - 1):
for ci in range(self._pyilu.blockw):
ssup.append(self._pyilu.sup_get(di, bi, ci))
sup.append(ssup)
return sup
@property
def rowbycol(self):
nblocks = self._pyilu.nblocks
blockw = self._pyilu.blockw
rbc = []
for bi in range(nblocks):
l = []
for ci in range(blockw):
l.append(self._pyilu.rowbycol_get(bi*blockw+ci))
rbc.append(l)
return rbc
@property
def colbyrow(self):
nblocks = self._pyilu.nblocks
blockw = self._pyilu.blockw
rbc = []
for bi in range(nblocks):
l = []
for ri in range(blockw):
l.append(self._pyilu.colbyrow_get(bi*blockw+ri))
rbc.append(l)
return rbc
@property
def LU_merged(self):
nblocks = self._pyilu.nblocks
blockw = self._pyilu.blockw
ndiag = self._pyilu.ndiag
dim = nblocks*blockw
LU = np.zeros((dim, dim))
LUblocks = self._pyilu.get_LU()
for bi in range(nblocks):
slc = slice(bi*blockw, (bi+1)*blockw)
LU[slc, slc] = LUblocks[:, slc]
for di in range(ndiag):
idx = 0
for bi in range(nblocks-di-1):
for ci in range(blockw):
lri_u = self._pyilu.rowbycol_get(idx)
lri_l = self._pyilu.rowbycol_get(idx+blockw*di)
LU[bi*blockw + lri_l + blockw*(di+1), idx] = self._pyilu.sub_get(
di, bi, ci)
LU[bi*blockw + lri_u, idx + blockw*(di+1)] = self._pyilu.sup_get(
di, bi, ci)
idx += 1
return LU
@property
def piv(self):
blockw = self._pyilu.blockw
p = []
for bi in range(self._pyilu.nblocks):
pp = []
for ci in range(blockw):
pp.append(self._pyilu.piv_get(bi*blockw+ci))
p.append(pp)
return p
def fast_FakeLU(A, n, ndiag=0):
assert A.shape[0] == A.shape[1]
assert A.shape[0] % n == 0
nblocks = A.shape[0]//n
sub, sup = [], []
for di in range(ndiag):
ssub, ssup = [], []
for gi in range((nblocks-di-1)*n):
ssub.append(A[gi + (di+1)*n, gi])
ssup.append(A[gi, gi + (di+1)*n])
sub.extend(ssub)
sup.extend(ssup)
# memory view taking address of first element workaround:
# if len(sub) == 0:
# sub.append(0)
# sup.append(0)
return ILU(np.asfortranarray(A),
np.array(sub, dtype=np.float64),
np.array(sup, dtype=np.float64),
n, ndiag)
# Below is the prototype from which block_diag_ilu.hpp was
# designed: (tests were made for FakeLU and should pass
# for fast_FakeLU above)
def rowpiv2rowbycol(piv):
rowbycol = np.arange(len(piv))
for i in range(len(piv)):
j = piv[i]
if i != j:
tmp = rowbycol[j]
rowbycol[j] = i
rowbycol[i] = tmp
return rowbycol
class FakeLU:
def __init__(self, A, n, ndiag=0):
self.lu, self.piv, self.rowbycol = [], [], []
self.colbyrow = []
self.n = n
self.ndiag = ndiag
assert A.shape[0] == A.shape[1]
assert A.shape[0] % n == 0
self.N = A.shape[0]//n
# Block diagonal
for bi in range(self.N):
slc = slice(bi*self.n, (bi+1)*self.n)
lu, piv = scipy.linalg.lu_factor(A[slc, slc])
self.lu.append(lu)
self.piv.append(piv)
self.rowbycol.append(rowpiv2rowbycol(piv))
self.colbyrow.append([list(self.rowbycol[-1]).index(x) for x in range(self.n)])
# Sub diagonal
self.sub, self.sup = [], []
for di in range(1, self.ndiag+1):
ssub = []
ssup = []
for bi in range(self.N-di):
for ci in range(self.n):
d = self.lu[bi][ci, ci]
ssub.append(A[(bi+di)*n + ci, bi*n + ci]/d) # sub[column_idx]
ssup.append(A[bi*n + ci, (bi+di)*n + ci]) # sup[column_idx]
self.sub.append(ssub)
self.sup.append(ssup)
@property
def L_dot_U(self):
# ILU => L*U ~= A
# this should give a better approximation of A
# Only useful for debugging / accuracy tests...
A = np.zeros((self.N*self.n, self.N*self.n))
for bi in range(self.N):
# Diagonal blocks...
L = np.zeros((self.n, self.n))
U = L.copy()
for ri in range(self.n):
for ci in range(self.n):
if ci == ri:
U[ri, ci] = self.lu[bi][ri, ci]
L[ri, ci] = 1.0
elif ci > ri:
U[ri, ci] = self.lu[bi][ri, ci]
else:
L[ri, ci] = self.lu[bi][ri, ci]
slc = slice(bi*self.n, (bi+1)*self.n)
A[slc, slc] = np.dot(L, U)
for di in range(1, self.ndiag+1): # diag
for bi in range(self.N-di): # block
for ci in range(self.n):
# upper
A[bi*self.n + self.rowbycol[bi][ci], (bi+di)*self.n+ci] = self.sup[di-1][bi*self.n + ci]
# lower
A[(bi+di)*self.n+self.rowbycol[bi+di][ci], bi*self.n+ci] = self.sub[di-1][bi*self.n + ci]*self.lu[bi][ci, ci]
return A
# def permute_vec(self, x):
# n = np.empty_like(x)
# for bi in range(self.N):
# for li in range(self.n):
# n[bi*self.n+li] = x[bi*self.n+self.rowbycol[bi][li]]
# return n
# def antipermute_vec(self, x):
# n = x[:]
# for bi in range(self.N):
# for li in range(self.n):
# n[bi*self.n+li] = x[bi*self.n+self.colbyrow[bi][li]]
# return n
def solve(self, b):
"""
LUx = b:
Ly = b
Ux = y
"""
#b = self.permute_vec(b)
y = []
for bri in range(self.N): # block row index
for li in range(self.n): # local row index
s = 0.0
for lci in range(li): # local column index
s += self.lu[bri][li, lci]*y[bri*self.n+lci]
for di in range(1, self.ndiag+1):
if bri >= di:
# di:th sub diagonal (counted as distance from main diag)
ci = self.colbyrow[bri][li]
s += self.sub[di-1][(bri-di)*self.n+ci]*y[
(bri-di)*self.n + ci]
y.append(b[bri*self.n+self.rowbycol[bri][li]]-s) # Doolittle: L[i, i] == 1
x = [0]*len(y)
for bri in range(self.N-1, -1, -1):
for li in range(self.n - 1, -1, -1):
s = 0.0
for ci in range(li+1, self.n):
s += self.lu[bri][li, ci]*x[bri*self.n + ci]
for di in range(1, self.ndiag+1):
if bri < self.N-di:
ci = self.colbyrow[bri][li]
s += self.sup[di-1][bri*self.n+ci]*x[(bri+di)*self.n + ci]
x[bri*self.n+li] = (y[bri*self.n + li] - s)/self.lu[bri][li, li]
return x #self.antipermute_vec(x)
@property
def LU_merged(self):
A = np.zeros((self.N*self.n, self.N*self.n))
for bi in range(self.N):
slc = slice(bi*self.n, (bi+1)*self.n)
A[slc, slc] = self.lu[bi]
for ci in range(self.n):
for di in range(1, self.ndiag+1):
# bi means block row index:
if bi >= di:
A[bi*self.n+self.rowbycol[bi][ci], (bi-di)*self.n+ci] = self.sub[di-1][(bi-di)*self.n + ci]
if bi < self.N-di:
A[bi*self.n+self.rowbycol[bi][ci], (bi+di)*self.n+ci] = self.sup[di-1][bi*self.n + ci]
return A
| {
"repo_name": "chemreac/block_diag_ilu",
"path": "python_prototype/fakelu.py",
"copies": "1",
"size": "9838",
"license": "bsd-2-clause",
"hash": -7718856516726367000,
"line_mean": 33.7632508834,
"line_max": 129,
"alpha_frac": 0.4699125839,
"autogenerated": false,
"ratio": 3.100535770564135,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9061368037048072,
"avg_score": 0.0018160634832124667,
"num_lines": 283
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import os
import string
REMOVE_PUNCT_TABLE = str.maketrans(
string.punctuation, ' ' * len(string.punctuation))
class Corpus(object):
'''Memory Friendly Corpus Reader'''
def __init__(self, files=None, documents=None):
self.files = files
self.documents = documents
if files is not None and documents is not None:
raise ValueError('Define corpus with files or documents but not both')
if documents is not None:
self.cached = True
else:
self.cached = False
@staticmethod
def _document(file):
with open(file, 'r') as f:
return f.read()
def __getitem__(self, item):
return self._document(self.files[item])
def __iter__(self):
if not self.cached:
for file in self.files:
yield self._document(file)
else:
for document in self.documents:
yield document
def __len__(self):
if self.cached:
return len(self.documents)
else:
return len(self.files)
def transform(self, function, output_dir='', suffix='', overwrite=False):
'''Transform a corpus with a function
Args:
function (callable): function with one parameter.
output_dir (str): path to folder where the transformed files
will be stored.
suffix (str): will be appended to the filenames.
Returns:
Corpus: the transformed corpus
'''
if self.cached:
return Corpus(documents=[function(x) for x in self])
if output_dir == '' and suffix == '' and not overwrite:
raise IOError('default output_dir and suffix parameters'
' would overwrite files, use overwrite=True')
transformed_files = []
extension = '.txt'
for file in self.files:
with open(file, 'r') as f:
text = function(f.read())
name, old_extension = os.path.splitext(file)
name = os.path.basename(name)
transformed_file = os.path.join(
output_dir, name + suffix + extension)
with open(transformed_file, 'w') as f:
f.write(text)
transformed_files.append(transformed_file)
return Corpus(files=transformed_files)
def cached(self):
'''Return a new corpus with documents in memory'''
if self.cached:
return self
return Corpus(documents=[x for x in self])
def flushed(self, output_dir, suffix=''):
'''Return a new corpus with documents in disk'''
extension = '.txt'
files = []
for i, document in enumerate(self.documents):
file = os.path.join(
output_dir, 'doc_' + str(i) + suffix + extension)
with open(file, 'w') as f:
f.write(document)
files.append(file)
return Corpus(files=files)
def vocabulary(self, preprocessor):
'''Extract the vocabulary from all documents'''
n_tokens = 0
vocabulary = set()
for document in self:
tokens = (preprocessor.preprocess(
document.translate(REMOVE_PUNCT_TABLE))
.split())
n_tokens += len(tokens)
vocabulary.update(tokens)
return vocabulary, n_tokens
@staticmethod
def _vocabulary_map(vocabulary):
'''Maps each element of the vocabulary to an integer
also returns a list mapping integers to words
'''
vocabulary_map= {x: i for i, x in enumerate(vocabulary)}
vocabulary_list = [''] * len(vocabulary_map)
for x, i in vocabulary_map.items():
vocabulary_list[i] = x
return vocabulary_map, vocabulary_list
def vocabulary_map(self, preprocessor):
'''Maps each element of the vocabulary to an integer
also returns a list mapping integers to words and a
count of the total number of tokens in the corpus
'''
vocabulary, n_tokens = self.vocabulary(preprocessor)
vocabulary_map, vocabulary_list = self._vocabulary_map(vocabulary)
return vocabulary_map, vocabulary_list, n_tokens
| {
"repo_name": "latorrefabian/topmine",
"path": "topmine/corpus.py",
"copies": "1",
"size": "4415",
"license": "bsd-2-clause",
"hash": -2342320210285858300,
"line_mean": 34.0396825397,
"line_max": 82,
"alpha_frac": 0.5716874292,
"autogenerated": false,
"ratio": 4.58939708939709,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5661084518597089,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import os
import subprocess
from django_linter.checkers.settings import SettingsShecker
from django_linter.checkers.models import ModelsChecker
from django_linter.checkers.misc import MiscChecker
from django_linter.checkers.layout import LayoutChecker
from django_linter.checkers.forms import FormsChecker
from django_linter.checkers.views import ViewsChecker
def main():
out = open('README.rst', 'w')
print("""Django linter
=============
.. image:: https://travis-ci.org/geerk/django_linter.svg?branch=master
:target: https://travis-ci.org/geerk/django_linter
This is a simple extension for pylint that aims to check some common mistakes in django projects.
Contributions are welcome.
Installation
------------
::
pip install django_linter
Usage
-----
It can be used as a plugin or standalone script. To use it as a plugin it should be installed first, then run with pylint:
::
pylint --load-plugins=django_linter TARGET
To use it as a standalone script:
""", file=out)
print('::', file=out)
print('', file=out)
usage = os.tmpfile()
p = subprocess.Popen(
['python', '-m', 'django_linter.main', '-h'], stdout=usage)
p.wait()
usage.seek(0)
for line in usage:
if line != '\n':
out.write(' ' + line.replace('main.py', 'django-linter'))
else:
out.write(line)
print('', file=out)
print('Implemented checks', file=out)
print('------------------', file=out)
print('', file=out)
for checker in (SettingsShecker, ModelsChecker, FormsChecker, ViewsChecker,
LayoutChecker, MiscChecker):
print('**%s:**' % checker.name.title(), file=out)
print('', file=out)
for k in sorted(checker.msgs.viewkeys()):
print('- %s (%s): %s' % (k, checker.msgs[k][1], checker.msgs[k][2]),
file=out)
print('', file=out)
print("""Implemented suppressers
-----------------------
- "Meta" classes
- urlpatterns
- logger
Implemented transformers
------------------------
**Models**
- "id" field
- "objects" manager
- "DoesNotExist" exception
- "MultipleObjectsReturned" exception
**Testing**
- test responses (django and DRF)
**Factories**
- factory-boy's factories (factory should return django model, but not always possible to infer model class)
""", file=out)
if __name__ == '__main__':
main()
| {
"repo_name": "geerk/django_linter",
"path": "generate_readme.py",
"copies": "1",
"size": "2505",
"license": "mit",
"hash": 5119779267303338000,
"line_mean": 24.824742268,
"line_max": 122,
"alpha_frac": 0.6315369261,
"autogenerated": false,
"ratio": 3.75,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9879486887386766,
"avg_score": 0.00041000774264660653,
"num_lines": 97
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import os
import sys
from datetime import datetime
import tarfile
import tempfile
from optparse import make_option
import re
from django.conf import settings
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from dbbackup import utils
from dbbackup.storage.base import BaseStorage
from dbbackup.storage.base import StorageError
from dbbackup import settings as dbbackup_settings
class Command(BaseCommand):
help = "backup_media [--encrypt]"
option_list = BaseCommand.option_list + (
make_option("-c", "--clean", help="Clean up old backup files", action="store_true", default=False),
make_option("-s", "--servername", help="Specify server name to include in backup filename"),
make_option("-e", "--encrypt", help="Encrypt the backup files", action="store_true", default=False),
)
@utils.email_uncaught_exception
def handle(self, *args, **options):
try:
self.servername = options.get('servername')
self.storage = BaseStorage.storage_factory()
self.backup_mediafiles(options.get('encrypt'))
if options.get('clean'):
self.cleanup_old_backups()
except StorageError as err:
raise CommandError(err)
def backup_mediafiles(self, encrypt):
source_dir = self.get_source_dir()
if not source_dir:
print("No media source dir configured.")
sys.exit(0)
print("Backing up media files in %s" % source_dir)
output_file = self.create_backup_file(source_dir, self.get_backup_basename())
if encrypt:
encrypted_file = utils.encrypt_file(output_file)
output_file = encrypted_file
print(" Backup tempfile created: %s (%s)" % (output_file.name, utils.handle_size(output_file)))
print(" Writing file to %s: %s" % (self.storage.name, self.storage.backup_dir))
self.storage.write_file(output_file, self.get_backup_basename())
def get_backup_basename(self):
# TODO: use DBBACKUP_FILENAME_TEMPLATE
server_name = self.get_servername()
if server_name:
server_name = '-%s' % server_name
return '%s%s-%s.media.tar.gz' % (
self.get_databasename(),
server_name,
datetime.now().strftime(dbbackup_settings.DATE_FORMAT)
)
def get_databasename(self):
# TODO: WTF is this??
return settings.DATABASES['default']['NAME']
def create_backup_file(self, source_dir, backup_basename):
temp_dir = tempfile.mkdtemp()
try:
backup_filename = os.path.join(temp_dir, backup_basename)
try:
tar_file = tarfile.open(backup_filename, 'w|gz')
try:
tar_file.add(source_dir)
finally:
tar_file.close()
return utils.create_spooled_temporary_file(backup_filename)
finally:
if os.path.exists(backup_filename):
os.remove(backup_filename)
finally:
os.rmdir(temp_dir)
def get_source_dir(self):
return dbbackup_settings.MEDIA_PATH
def cleanup_old_backups(self):
""" Cleanup old backups, keeping the number of backups specified by
DBBACKUP_CLEANUP_KEEP and any backups that occur on first of the month.
"""
print("Cleaning Old Backups for media files")
file_list = self.get_backup_file_list()
for backup_date, filename in file_list[0:-dbbackup_settings.CLEANUP_KEEP_MEDIA]:
if int(backup_date.strftime("%d")) != 1:
print(" Deleting: %s" % filename)
self.storage.delete_file(filename)
def get_backup_file_list(self):
""" Return a list of backup files including the backup date. The result is a list of tuples (datetime, filename).
The list is sorted by date.
"""
server_name = self.get_servername()
if server_name:
server_name = '-%s' % server_name
media_re = re.compile(r'^%s%s-(.*)\.media\.tar\.gz' % (self.get_databasename(), server_name))
def is_media_backup(filename):
return media_re.search(filename)
def get_datetime_from_filename(filename):
datestr = re.findall(media_re, filename)[0]
return datetime.strptime(datestr, dbbackup_settings.DATE_FORMAT)
file_list = [
(get_datetime_from_filename(f), f)
for f in self.storage.list_directory()
if is_media_backup(f)
]
return sorted(file_list, key=lambda v: v[0])
def get_servername(self):
return self.servername or dbbackup_settings.SERVER_NAME | {
"repo_name": "typecode/django-dbbackup",
"path": "dbbackup/management/commands/mediabackup.py",
"copies": "3",
"size": "4897",
"license": "bsd-3-clause",
"hash": -4707936916178057000,
"line_mean": 35.552238806,
"line_max": 121,
"alpha_frac": 0.6128241781,
"autogenerated": false,
"ratio": 4.004088307440719,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.611691248554072,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import os
import tempfile
import string
import math
import re
import unicodedata
import pdb
from collections import Counter
from itertools import chain
from heapq import heappush, heappop, heapify
NORMALIZE_PUNCT_TABLE = str.maketrans(
string.punctuation, '.' * len(string.punctuation))
REMOVE_PUNCT_TABLE = str.maketrans(
string.punctuation, ' ' * len(string.punctuation))
def to_ascii(s):
'''Adapted from sklearn.feature_extraction.text'''
nkfd_form = unicodedata.normalize('NFKD', s)
only_ascii = nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
return only_ascii
def strip_tags(s):
'''Taken from sklearn.feature_extraction.text'''
return re.compile(r'<([^>]+)>', flags=re.UNICODE).sub('', s)
class RomanPreprocessor(object):
'''Taken from sklearn.feature_extraction.text'''
def preprocess(self, unicode_text):
'''Preprocess strings'''
return to_ascii(strip_tags(unicode_text.lower()))
def __repr__(self):
return 'RomanPreprocessor()'
DEFAULT_PREPROCESSOR = RomanPreprocessor()
class TopmineTokenizer(object):
def __init__(self, preprocessor=DEFAULT_PREPROCESSOR,
threshold=1, min_support=1):
self.preprocessor = preprocessor
self.counter = None
self.threshold = threshold
self.min_support = min_support
self.vocabulary_map = None
self.vocabulary = None
self.n_tokens = None
def fit(self, corpus):
'''Fits the vocabulary and vocabulary_map to a given corpus.
Calculates the counter of phrases given the min_support
'''
self.vocabulary_map, self.vocabulary, self.n_tokens = (
corpus.vocabulary_map(self.preprocessor))
sentences = self.corpus_to_list(corpus)
self.counter = phrase_frequency(sentences, self.min_support)
def transform_document(self, document):
'''Splits a document into sentences, transform the sentences
and return the results in a list
'''
return [x for sentence in self.doc_to_list(document)
for x in self.transform_sentence(sentence)]
def transform_sentence(self, sentence):
'''Given a sentence, encoded as a list of integers using
the vocabulary_map, return it as a sequence of
significant phrases.
'''
phrases = [(x,) for x in sentence]
phrase_start = [x for x in range(len(phrases))]
phrase_end = [x for x in range(len(phrases))]
costs = [(self.cost(phrases[i], phrases[i + 1]), i, i + 1, 2)
for i in range(len(phrases) - 1)]
heapify(costs)
while True and len(costs) > 0:
# a = phrase a, b = phrase b
# i_a = phrase a index, i_b = phrase b index
# phrase_start[x] = x means that a phrase starts at that position
cost, i_a, i_b, length = heappop(costs)
if cost > -self.threshold:
break
if phrase_start[i_a] != i_a:
continue
if phrase_start[i_b] != i_b:
continue
if length != len(phrases[i_a] + phrases[i_b]):
continue
phrase_start[i_b] = i_a
phrase_end[i_a] = phrase_end[i_b]
merged_phrase = phrases[i_a] + phrases[i_b]
phrases[i_a] = merged_phrase
phrases[i_b] = None
if i_a > 0:
prev_phrase_start = phrase_start[i_a - 1]
prev_phrase = phrases[prev_phrase_start]
heappush(costs, (
self.cost(prev_phrase, merged_phrase),
prev_phrase_start, i_a,
len(prev_phrase) + len(merged_phrase)))
if phrase_end[i_b] < len(phrases) - 1:
next_phrase_start = phrase_end[i_b] + 1
next_phrase = phrases[next_phrase_start]
heappush(costs, (
self.cost(merged_phrase, next_phrase),
i_a, next_phrase_start,
len(merged_phrase) + len(next_phrase)))
encoded_phrases = [x for x in phrases if x is not None]
return self.decode_phrases(encoded_phrases)
def corpus_to_list(self, corpus):
'''Transforms a corpus into a list of lists, one list
per sentence in the corpus, encoded using the vocabulary_map
'''
sentences = []
for document in corpus:
for sentence in self.doc_to_list(document):
sentences.append(sentence)
return sentences
def doc_to_list(self, document):
'''Splits a document on punctuation and returns a list
of indices using the vocabulary_map, one list per sentence
'''
document = self.preprocessor.preprocess(
document.translate(NORMALIZE_PUNCT_TABLE))
sentences = document.split('.')
return [[self.vocabulary_map[x] for x in y.split()] for y in sentences]
def decode_phrases(self, encoded_phrases):
'''Takes a list of tuples of indices, and translate each tuple
to a phrase, using the word given by such index in the vocabulary
'''
return [' '.join([self.vocabulary[i] for i in phrase])
for phrase in encoded_phrases]
def cost(self, a, b):
'''Calculates the cost of merging two phrases. Cost is
defined as the negative of significance. This way we can
use the python min-heap implementation
'''
# flatten the tuples a, b
ab = self.counter[tuple([x for x in chain(*(a, b))])]
if ab > 0:
return (-(ab - (self.counter[a] * self.counter[b]) / self.n_tokens )
/ math.sqrt(ab))
else:
return math.inf
def phrase_frequency(sentences, min_support):
'''Calculates counter with frequent phrases
Args:
sentences (list): each sentence is a list of words
'''
indices = [range(len(sentence)) for sentence in sentences]
counter = Counter(((x,) for x in chain(*sentences)))
n = 1
while len(sentences) > 0:
for i, sentence in enumerate_backwards(sentences):
indices[i] = [
j for j in indices[i]
if counter[tuple(sentence[j: j+n])] >= min_support]
if len(indices[i]) == 0:
indices.pop(i)
sentences.pop(i)
continue
for j in indices[i]:
if j + 1 in indices[i]:
counter.update([tuple(sentence[i: i+n+1])])
indices[i].pop()
n = n + 1
return counter
def enumerate_backwards(array):
'''Generate indices and elements of an array from last to first
this allows to pop elements and still traverse each index in the list
'''
for i, x in zip(range(len(array)-1, -1, -1), reversed(array)):
yield i, x
| {
"repo_name": "latorrefabian/topmine",
"path": "topmine/tokenizer.py",
"copies": "1",
"size": "7046",
"license": "bsd-2-clause",
"hash": 1510086413674108200,
"line_mean": 35.5077720207,
"line_max": 80,
"alpha_frac": 0.5797615668,
"autogenerated": false,
"ratio": 3.996596710153148,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006517348105650952,
"num_lines": 193
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import random
import sys
import numpy as np
class Discrete(object):
"""
Discrete space from [0, dims-1]
"""
def __init__(self, dims):
self._dims = dims
def sample(self):
"""
Randomly sample from distribution space
"""
return random.randrange(self.dims)
def __eq__(self, other):
if isinstance(other, Discrete):
return other.dims == self.dims
return False
def __repr__(self):
return "Discrete(%d)" % (self.dims)
def __str__(self):
return "Discrete space from 0 to %d" % (self.dims - 1)
@property
def dims(self):
return self._dims
class Box(object):
"""
Continuous space for R^n, all from a lower bound to an upper bound
if lower or upper is None, there is no bound (we use MAX_INT and MIN_INT to bound it)
"""
def __init__(self, dims, lower=sys.maxint, upper=(-sys.maxint-1)):
self._dims = dims
self._lower = lower
self._upper = upper
def sample(self):
"""
Return a random sample from the R^n space.
"""
return np.array([random.randrange(self.lower, self.upper) for _ in range(self.dims)])
def __repr__(self):
return "Box(%d, %d, %d)" % (self.dims, self.lower, self.upper)
def __str__(self):
return "Continuous Space of %d dimensions in [%d, %d]" % (self.dims, self.lower, self.upper)
def __eq__(self, other):
if isinstance(other, Box):
return (other.dims == self.dims) and (other.lower == self.lower) and (other.upper and self.upper)
return False
@property
def dims(self):
return self._dims
@property
def lower(self):
return self._lower
@property
def upper(self):
return self._upper
| {
"repo_name": "sig-ai/SDM",
"path": "sdm/abstract/types.py",
"copies": "1",
"size": "1936",
"license": "mit",
"hash": -1538342591894962000,
"line_mean": 21.2528735632,
"line_max": 109,
"alpha_frac": 0.5671487603,
"autogenerated": false,
"ratio": 3.856573705179283,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4923722465479283,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import six
from matplotlib import pyplot as plt
import numpy as np
from matplotlib.legend_handler import HandlerLineCollection
import matplotlib.collections as mcol
from matplotlib.lines import Line2D
class HandlerDashedLines(HandlerLineCollection):
"""
Custom Handler for LineCollection instances.
"""
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize, trans):
# figure out how many lines there are
numlines = len(orig_handle.get_segments())
xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,
width, height, fontsize)
leglines = []
# divide the vertical space where the lines will go
# into equal parts based on the number of lines
ydata = ((height) / (numlines + 1)) * np.ones(xdata.shape, float)
# for each line, create the line at the proper location
# and set the dash pattern
for i in range(numlines):
legline = Line2D(xdata, ydata * (numlines - i) - ydescent)
self.update_prop(legline, orig_handle, legend)
# set color, dash pattern, and linewidth to that
# of the lines in linecollection
try:
color = orig_handle.get_colors()[i]
except IndexError:
color = orig_handle.get_colors()[0]
try:
dashes = orig_handle.get_dashes()[i]
except IndexError:
dashes = orig_handle.get_dashes()[0]
try:
lw = orig_handle.get_linewidths()[i]
except IndexError:
lw = orig_handle.get_linewidths()[0]
if dashes[0] != None:
legline.set_dashes(dashes[1])
legline.set_color(color)
legline.set_transform(trans)
legline.set_linewidth(lw)
leglines.append(legline)
return leglines
x = np.linspace(0, 5, 100)
plt.figure()
colors = ['red', 'orange', 'yellow', 'green', 'blue']
styles = ['solid', 'dashed', 'dashed', 'dashed', 'solid']
lines = []
for i, color, style in zip(range(5), colors, styles):
plt.plot(x, np.sin(x) - .1 * i, c=color, ls=style)
# make proxy artists
# make list of one line -- doesn't matter what the coordinates are
line = [[(0, 0)]]
# set up the proxy artist
lc = mcol.LineCollection(5 * line, linestyles=styles, colors=colors)
# create the legend
plt.legend([lc], ['multi-line'], handler_map={type(lc): HandlerDashedLines()},
handlelength=2.5, handleheight=3)
plt.show()
| {
"repo_name": "bundgus/python-playground",
"path": "matplotlib-playground/examples/pylab_examples/legend_demo5.py",
"copies": "1",
"size": "2723",
"license": "mit",
"hash": -7500364954340138000,
"line_mean": 37.3521126761,
"line_max": 78,
"alpha_frac": 0.5989717224,
"autogenerated": false,
"ratio": 3.884450784593438,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9978743013912951,
"avg_score": 0.0009358986160974563,
"num_lines": 71
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import sys
import logging
import csv
import os
import subprocess
import multiprocessing
import platform
import time
import shutil
import gzip
import edlib
import pandas as pd
from builtins import range
from Bio import SeqIO
from Bio.SeqIO.QualityIO import FastqGeneralIterator
from natsort import natsorted
from amptk.__version__ import __version__
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
parentdir = os.path.join(os.path.dirname(__file__))
ASCII = {'!':'0','"':'1','#':'2','$':'3','%':'4','&':'5',
"'":'6','(':'7',')':'8','*':'9','+':'10',',':'11',
'-':'12','.':'13','/':'14','0':'15','1':'16','2':'17',
'3':'18','4':'19','5':'20','6':'21','7':'22','8':'23',
'9':'24',':':'25',';':'26','<':'27','=':'28','>':'29',
'?':'30','@':'31','A':'32','B':'33','C':'34','D':'35',
'E':'36','F':'37','G':'38','H':'39','I':'40','J':'41',
'K':'42','L':'43','M':'44','N':'45','O':'46','P':'47',
'Q':'48','R':'49','S':'50'}
primer_db = {'fITS7': 'GTGARTCATCGAATCTTTG',
'fITS7-ion': 'AGTGARTCATCGAATCTTTG',
'ITS4': 'TCCTCCGCTTATTGATATGC',
'ITS1-F': 'CTTGGTCATTTAGAGGAAGTAA',
'ITS2': 'GCTGCGTTCTTCATCGATGC',
'ITS3': 'GCATCGATGAAGAACGCAGC',
'ITS4-B': 'CAGGAGACTTGTACACGGTCCAG',
'ITS1': 'TCCGTAGGTGAACCTGCGG',
'LR0R': 'ACCCGCTGAACTTAAGC',
'LR2R': 'AAGAACTTTGAAAAGAG',
'LR3': 'CCGTGTTTCAAGACGGG',
'JH-LS-369rc': 'CTTCCCTTTCAACAATTTCAC',
'16S_V3': 'CCTACGGGNGGCWGCAG',
'16S_V4': 'GACTACHVGGGTATCTAATCC',
'ITS3_KYO2': 'GATGAAGAACGYAGYRAA',
'COI-F': 'GGTCAACAAATCATAAAGATATTGG',
'COI-R': 'GGWACTAATCAATTTCCAAATCC',
'515FB': 'GTGYCAGCMGCCGCGGTAA',
'806RB': 'GGACTACNVGGGTWTCTAAT',
'ITS4-B21': 'CAGGAGACTTGTACACGGTCC',
'LCO1490': 'GGTCAACAAATCATAAAGATATTGG',
'mlCOIintR': 'GGRGGRTASACSGTTCASCCSGTSCC'}
degenNuc = [("R", "A"), ("R", "G"),
("M", "A"), ("M", "C"),
("W", "A"), ("W", "T"),
("S", "C"), ("S", "G"),
("Y", "C"), ("Y", "T"),
("K", "G"), ("K", "T"),
("V", "A"), ("V", "C"), ("V", "G"),
("H", "A"), ("H", "C"), ("H", "T"),
("D", "A"), ("D", "G"), ("D", "T"),
("B", "C"), ("B", "G"), ("B", "T"),
("N", "G"), ("N", "A"), ("N", "T"), ("N", "C"),
("X", "G"), ("X", "A"), ("X", "T"), ("X", "C")]
degenNucSimple = [("R", "A"), ("R", "G"),
("M", "A"), ("M", "C"),
("W", "A"), ("W", "T"),
("S", "C"), ("S", "G"),
("Y", "C"), ("Y", "T"),
("K", "G"), ("K", "T"),
("V", "A"), ("V", "C"), ("V", "G"),
("H", "A"), ("H", "C"), ("H", "T"),
("D", "A"), ("D", "G"), ("D", "T"),
("B", "C"), ("B", "G"), ("B", "T")]
class colr(object):
GRN = '\033[92m'
END = '\033[0m'
WARN = '\033[93m'
#setup functions
def download(url, name):
file_name = name
u = urlopen(url)
f = open(file_name, 'wb')
meta = u.info()
file_size = 0
for x in meta.items():
if x[0].lower() == 'content-length':
file_size = int(x[1])
print("Downloading: {0} Bytes: {1}".format(url, file_size))
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
p = float(file_size_dl) / file_size
status = r"{0} [{1:.2%}]".format(file_size_dl, p)
status = status + chr(8)*(len(status)+1)
sys.stdout.write(status)
f.close()
#functions for system checks, etc
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
out = subprocess.Popen(cmd, stdout = subprocess.PIPE, stderr = open(os.devnull, 'w'), cwd=parentdir).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', '--short', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = False
return GIT_REVISION
def getSize(filename):
st = os.stat(filename)
return st.st_size
def execute(cmd):
DEVNULL = open(os.devnull, 'w')
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE,
universal_newlines=True, stderr=DEVNULL)
for stdout_line in iter(popen.stdout.readline, ""):
yield stdout_line
popen.stdout.close()
return_code = popen.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, cmd)
def minimap_otutable(otus, fastq, output, method='ont', min_mapq=1, cpus=1,
min_pident=0.90):
# function to map reads with minimap2 and create OTU table
# requires barcodelable= or barcode= or sample= in fastq header
if method == 'pacbio':
cmd = ['minimap2', '-x', 'map-pb', '-c', '-t', str(cpus),
'--secondary=no', otus, fastq]
else:
cmd = ['minimap2', '-x', 'map-ont', '-c', '-t', str(cpus),
'--secondary=no', otus, fastq]
tmpOut = 'minimap2_{}.paf'.format(os.getpid())
runSubprocess2(cmd, log, tmpOut)
Results = {}
with open(tmpOut, 'r') as infile:
for line in infile:
line = line.rstrip()
qname, qlen, qstart, qend, strand, tname, tlen, tstart, tend, match, alnlen, mapq = line.split('\t')[:12]
theRest = line.split('\t')[12:]
pident = 0.0
barcodelabel = None
if int(mapq) < min_mapq:
continue
if ';' not in qname:
continue
for x in qname.split(';'):
if x.startswith('barcodelabel='):
barcodelabel = x.replace('barcodelabel=', '')
break
elif x.startswith('barcode='):
barcodelabel = x.replace('barcode=', '')
break
elif x.startswith('sample='):
barcodelabel = x.replace('sample=', '')
break
if not barcodelabel:
continue
for y in theRest:
if y.startswith('de:f:'):
pident = 1 - float(y.replace('de:f:', ''))
if pident < min_pident:
continue
#print(tname, barcodelabel, str(pident))
if barcodelabel not in Results:
Results[barcodelabel] = {tname: 1}
else:
if tname not in Results[barcodelabel]:
Results[barcodelabel][tname] = 1
else:
Results[barcodelabel][tname] += 1
df = pd.DataFrame(Results)
df.index.rename('#OTU ID', inplace=True)
df.fillna(0, inplace=True)
df = df.astype(int)
df.to_csv(output, sep='\t')
os.remove(tmpOut)
def checkfile(input):
if os.path.isfile(input):
filesize = getSize(input)
if int(filesize) < 1:
return False
else:
return True
elif os.path.islink(input):
return True
else:
return False
def SafeRemove(input):
if os.path.isdir(input):
shutil.rmtree(input)
elif os.path.isfile(input):
os.remove(input)
else:
return
def number_present(s):
return any(i.isdigit() for i in s)
def get_version():
if git_version():
version = __version__+'-'+git_version
else:
version = __version__
return version
def get_usearch_version(usearch):
try:
version = subprocess.Popen([usearch, '-version'], stderr=subprocess.PIPE, stdout=subprocess.PIPE).communicate()
except OSError:
log.error("%s not found in your PATH, exiting." % usearch)
sys.exit(1)
vers = version[0].decode('utf-8').replace('usearch v', '')
vers2 = vers.split('_')[0]
return vers2
def get_vsearch_version():
version = subprocess.Popen(['vsearch', '--version'], stderr=subprocess.PIPE, stdout=subprocess.PIPE).communicate()
for v in version:
v = v.decode('utf-8')
if v.startswith('vsearch'):
vers = v.replace('vsearch v', '')
vers2 = vers.split('_')[0]
return vers2
def versiontuple(v):
return tuple(map(int, (v.split("."))))
def gvc(input, check):
if versiontuple(input) >= versiontuple(check):
return True
else:
return False
def versionDependencyChecks(usearch, method='vsearch'):
#to run amptk need usearch > 9.0.2132 and vsearch > 2.2.0
usearch_pass = '9.0.2132'
vsearch_pass = '2.2.0'
amptk_version = get_version()
if method == 'usearch':
usearch_version = get_usearch_version(usearch)
if not gvc(usearch_version, usearch_pass):
log.error("USEARCH v%s detected, needs to be atleast v%s" % (usearch_version, usearch_pass))
sys.exit(1)
vsearch_version = get_vsearch_version()
if not gvc(vsearch_version, vsearch_pass):
log.error("VSEARCH v%s detected, needs to be atleast v%s" % (vsearch_version, vsearch_pass))
sys.exit(1)
if method == 'usearch':
log.info("AMPtk v%s, USEARCH v%s, VSEARCH v%s" % (amptk_version, usearch_version, vsearch_version))
else:
log.info("AMPtk v%s, VSEARCH v%s" % (amptk_version, vsearch_version))
def checkusearch10(usearch):
#to run amptk need usearch > 10.0.2132 and vsearch > 2.2.0
amptk_version = get_version()
usearch_version = get_usearch_version(usearch)
vsearch_version = get_vsearch_version()
usearch_pass = '10.0.240'
vsearch_pass = '2.2.0'
if not gvc(usearch_version, usearch_pass):
log.error("USEARCH v%s detected, needs to be atleast v%s" % (usearch_version, usearch_pass))
sys.exit(1)
if not gvc(vsearch_version, vsearch_pass):
log.error("VSEARCH v%s detected, needs to be atleast v%s" % (vsearch_version, vsearch_pass))
sys.exit(1)
#log.info("AMPtk v%s, USEARCH v%s, VSEARCH v%s" % (amptk_version, usearch_version, vsearch_version))
def checkRversion():
#need to have R version > 3.2
cmd = ['Rscript', '--vanilla', os.path.join(parentdir, 'check_version.R')]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
versions = stdout.decode('utf-8').replace(' \n', '')
Rvers = versions.split(',')[0]
dada2 = versions.split(',')[1]
phyloseq = versions.split(',')[2]
lulu = versions.split(',')[3]
return (Rvers, dada2, phyloseq, lulu)
def checkfastqsize(input):
filesize = os.path.getsize(input)
return filesize
def getCPUS():
cores = multiprocessing.cpu_count()
return cores
def MemoryCheck():
import psutil
mem = psutil.virtual_memory()
RAM = int(mem.total)
return round(RAM / 1024000000)
def systemOS():
if sys.platform == 'darwin':
system_os = 'MacOSX '+ platform.mac_ver()[0]
elif sys.platform == 'linux':
linux_version = platform.linux_distribution()
system_os = linux_version[0]+ ' '+linux_version[1]
else:
system_os = sys.platform
return system_os
def SystemInfo():
system_os = systemOS()
python_vers = str(sys.version_info[0])+'.'+str(sys.version_info[1])+'.'+str(sys.version_info[2])
log.info("OS: %s, %i cores, ~ %i GB RAM. Python: %s" % (system_os, multiprocessing.cpu_count(), MemoryCheck(), python_vers))
#print python modules to logfile
mod_versions()
def mod_versions():
import pkg_resources
modules = ['numpy', 'pandas', 'matplotlib', 'psutil', 'natsort', 'biopython', 'edlib', 'biom-format']
results = []
for x in modules:
try:
vers = pkg_resources.get_distribution(x).version
hit = "%s v%s" % (x, vers)
except pkg_resources.DistributionNotFound:
hit = "%s NOT installed!" % x
results.append(hit)
if x == 'edlib':
if '.post' in vers:
vers = vers.split('.post')[0]
if not gvc(vers, '1.2.1'):
log.error("Edlib v%s detected, at least v1.2.1 required for degenerate nucleotide search, please upgrade. e.g. pip install -U edlib or conda install edlib" % vers)
sys.exit(1)
log.debug("Python Modules: %s" % ', '.join(results))
class gzopen(object):
def __init__(self, fname):
f = open(fname)
# Read magic number (the first 2 bytes) and rewind.
magic_number = f.read(2)
f.seek(0)
# Encapsulated 'self.f' is a file or a GzipFile.
if magic_number == '\x1f\x8b':
self.f = gzip.GzipFile(fileobj=f)
else:
self.f = f
# Define '__enter__' and '__exit__' to use in
# 'with' blocks. Always close the file and the
# GzipFile if applicable.
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
try:
self.f.fileobj.close()
except AttributeError:
pass
finally:
self.f.close()
# Reproduce the interface of an open file
# by encapsulation.
def __getattr__(self, name):
return getattr(self.f, name)
def __iter__(self):
return iter(self.f)
def __next__(self):
return next(self.f)
def Funzip(input, output, cpus):
'''
function to unzip as fast as it can, pigz -> bgzip -> gzip
'''
if which('pigz'):
cmd = ['pigz', '--decompress', '-c', '-p', str(cpus), input]
else:
cmd = ['gzip', '--decompress', '-c', input]
try:
runSubprocess2(cmd, log, output)
except NameError:
with open(output, 'w') as outfile:
subprocess.call(cmd, stdout=outfile)
def Fzip(input, output, cpus):
'''
function to zip as fast as it can, pigz -> bgzip -> gzip
'''
if which('pigz'):
cmd = ['pigz', '-c', '-p', str(cpus), input]
else:
cmd = ['gzip', '-c', input]
try:
runSubprocess2(cmd, log, output)
except NameError:
with open(output, 'w') as outfile:
subprocess.call(cmd, stdout=outfile)
def Fzip_inplace(input):
'''
function to zip as fast as it can, pigz -> bgzip -> gzip
'''
cpus = multiprocessing.cpu_count()
if which('pigz'):
cmd = ['pigz', '-f', '-p', str(cpus), input]
else:
cmd = ['gzip', '-f', input]
try:
runSubprocess(cmd, log)
except NameError:
subprocess.call(cmd)
def fastalen2dict(input):
Lengths = {}
with open(input, 'r') as infile:
for rec in SeqIO.parse(infile, 'fasta'):
if not rec.id in Lengths:
Lengths[rec.id] = len(rec.seq)
return Lengths
def myround(x, base=10):
return int(base * round(float(x)/base))
def GuessRL(input):
#read first 50 records, get length then exit
lengths = []
for title, seq, qual in FastqGeneralIterator(gzopen(input)):
if len(lengths) < 50:
lengths.append(len(seq))
else:
break
return myround(max(set(lengths)))
def countfasta(input):
count = 0
with open(input, 'r') as f:
for line in f:
if line.startswith(">"):
count += 1
return count
def countfastq(input):
lines = sum(1 for line in gzopen(input))
count = int(lines) // 4
return count
def line_count(fname):
with open(fname) as f:
i = -1
for i, l in enumerate(f):
pass
return i + 1
def softwrap(string, every=80):
lines = []
for i in range(0, len(string), every):
lines.append(string[i:i+every])
return '\n'.join(lines)
def line_count2(fname):
count = 0
with open(fname, 'r') as f:
for line in f:
if not '*' in line:
count += 1
return count
def getreadlength(input):
with gzopen(input) as fp:
for i, line in enumerate(fp):
if i == 1:
read_length = len(line) - 1 #offset to switch to 1 based counts
elif i > 2:
break
return read_length
def runSubprocess(cmd, logfile):
logfile.debug(' '.join(cmd))
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if stdout:
logfile.debug(stdout.decode("utf-8"))
if stderr:
logfile.debug(stderr.decode("utf-8"))
def runSubprocess2(cmd, logfile, output):
#function where output of cmd is STDOUT, capture STDERR in logfile
logfile.debug(' '.join(cmd))
with open(output, 'w') as out:
proc = subprocess.Popen(cmd, stdout=out, stderr=subprocess.PIPE)
stderr = proc.communicate()
if stderr:
if stderr[0] != None:
logfile.debug(stderr.decode("utf-8"))
def runSubprocess3(cmd, logfile, folder, output):
#function where output of cmd is STDOUT, capture STDERR in logfile
logfile.debug(' '.join(cmd))
with open(output, 'w') as out:
proc = subprocess.Popen(cmd, stdout=out, stderr=out, cwd=folder)
stderr = proc.communicate()
if stderr:
if stderr[0] != None:
logfile.debug(stderr.decode("utf-8"))
def runSubprocess4(cmd, logfile, logfile2):
#function where cmd is issued in logfile, and log captured in logfile 2
logfile.debug(' '.join(cmd))
with open(logfile2, 'w') as out:
proc = subprocess.Popen(cmd, stdout=out, stderr=out)
stderr = proc.communicate()
if stderr:
if stderr[0] != None:
logfile.debug(stderr.decode("utf-8"))
def runSubprocess5(cmd):
#function where no logfile and stdout/stderr to fnull
FNULL = open(os.devnull, 'w')
#print(' '.join(cmd))
subprocess.call(cmd, stdout=FNULL, stderr=FNULL)
def getSize(filename):
st = os.stat(filename)
return st.st_size
def check_valid_file(input):
if os.path.isfile(input):
filesize = getSize(input)
if int(filesize) < 1:
return False
else:
return True
else:
return False
def bam2fastq(input, output):
from . import pybam
with open(output, 'w') as fastqout:
with open(input, 'r') as bamin:
for title, seq, qual in pybam.read(bamin,['sam_qname', 'sam_seq','sam_qual']):
fastqout.write("@%s\n%s\n+\n%s\n" % (title, seq, qual))
def scan_linepos(path):
"""return a list of seek offsets of the beginning of each line"""
linepos = []
offset = 0
with gzopen(path) as inf:
# WARNING: CPython 2.7 file.tell() is not accurate on file.next()
for line in inf:
linepos.append(offset)
offset += len(line)
return linepos
def return_lines(path, linepos, nstart, nstop):
"""return nsamp lines from path where line offsets are in linepos"""
offsets = linepos[int(nstart):int(nstop)]
lines = []
with gzopen(path) as inf:
for offset in offsets:
inf.seek(offset)
lines.append(inf.readline())
return lines
def split_fastq(input, numseqs, outputdir, chunks):
#get number of sequences and then number of sequences in each chunk
numlines = numseqs*4
n = numlines // chunks
#make sure n is divisible by 4 (number of lines in fastq)
if ( n % 4 ) != 0:
n = ((n // 4) + 1) * 4
splits = []
count = 0
for i in range(chunks):
start = count
end = count+n
if end > numlines:
end = numlines
splits.append((start, end))
count += n
#get line positions from file
linepos = scan_linepos(input)
#make sure output directory exists
if not os.path.isdir(outputdir):
os.makedirs(outputdir)
#loop through the positions and write output
for i, x in enumerate(splits):
num = i+1
with open(os.path.join(outputdir, 'chunk_'+str(num)+'.fq'), 'w') as output:
lines = return_lines(input, linepos, x[0], x[1])
output.write('%s' % ''.join(lines))
def split_fastqPE(R1, R2, numseqs, outputdir, chunks):
#get number of sequences and then number of sequences in each chunk
numlines = numseqs*4
n = numlines // chunks
#make sure n is divisible by 4 (number of lines in fastq)
if ( n % 4 ) != 0:
n = ((n // 4) + 1) * 4
splits = []
count = 0
for i in range(chunks):
start = count
end = count+n
if end > numlines:
end = numlines
splits.append((start, end))
count += n
#get line positions from file
linepos = scan_linepos(R1)
#make sure output directory exists
if not os.path.isdir(outputdir):
os.makedirs(outputdir)
#loop through the positions and write output
for i, x in enumerate(splits):
num = i+1
with open(os.path.join(outputdir, 'chunk_'+str(num)+'_R1.fq'), 'w') as output1:
with open(os.path.join(outputdir, 'chunk_'+str(num)+'_R2.fq'), 'w') as output2:
lines1 = return_lines(R1, linepos, x[0], x[1])
output1.write('%s' % ''.join(lines1))
lines2 = return_lines(R2, linepos, x[0], x[1])
output2.write('%s' % ''.join(lines2))
def split_fastqPEandI(R1, R2, I1, numseqs, outputdir, chunks):
#get number of sequences and then number of sequences in each chunk
numlines = numseqs*4
n = numlines // chunks
#make sure n is divisible by 4 (number of lines in fastq)
if ( n % 4 ) != 0:
n = ((n // 4) + 1) * 4
splits = []
count = 0
for i in range(chunks):
start = count
end = count+n
if end > numlines:
end = numlines
splits.append((start, end))
count += n
#get line positions from file
linepos = scan_linepos(R1)
linepos2 = scan_linepos(I1)
#make sure output directory exists
if not os.path.isdir(outputdir):
os.makedirs(outputdir)
#loop through the positions and write output
for i, x in enumerate(splits):
num = i+1
with open(os.path.join(outputdir, 'chunk_'+str(num)+'_R1.fq'), 'w') as output1:
with open(os.path.join(outputdir, 'chunk_'+str(num)+'_R2.fq'), 'w') as output2:
with open(os.path.join(outputdir, 'chunk_'+str(num)+'_R3.fq'), 'w') as output3:
lines1 = return_lines(R1, linepos, x[0], x[1])
output1.write('%s' % ''.join(lines1))
lines2 = return_lines(R2, linepos, x[0], x[1])
output2.write('%s' % ''.join(lines2))
lines3 = return_lines(I1, linepos2, x[0], x[1])
output3.write('%s' % ''.join(lines3))
def split_fasta(input, outputdir, chunks):
#function to return line positions of fasta files for chunking
fastapos = []
position = 0
numseqs = 0
with open(input, 'r') as infile:
for line in infile:
if line.startswith('>'):
numseqs += 1
fastapos.append(position)
position += 1
splits = []
n = numseqs // chunks
num = 0
lastline = line_count(input)
for i in range(chunks):
if i == 0:
start = 0
num = n
lastpos = fastapos[n+1]
elif i == chunks-1: #last one
start = lastpos
lastpos = lastline
else:
start = lastpos
num = num + n
try:
lastpos = fastapos[num+1] #find the n+1 seq
except IndexError:
lastpos = fastapos[-1]
splits.append((start, lastpos))
#check if output folder exists, if not create it
if not os.path.isdir(outputdir):
os.makedirs(outputdir)
#get line positions from file
linepos = scan_linepos(input)
#loop through the positions and write output
for i, x in enumerate(splits):
num = i+1
with open(os.path.join(outputdir, 'chunk_'+str(num)+'.fasta'), 'w') as output:
lines = return_lines(input, linepos, x[0], x[1])
output.write('{:}'.format(''.join(lines)))
def trim3prime(input, trimlen, output, removelist):
with open(output, 'w') as outfile:
for title, seq, qual in FastqGeneralIterator(gzopen(input)):
if not title.split(' ')[0] in removelist:
Seq = seq[:trimlen]
Qual = qual[:trimlen]
outfile.write("@%s\n%s\n+\n%s\n" % (title, Seq, Qual))
def PEsanitycheck(R1, R2):
R1count = line_count(R1)
R2count = line_count(R2)
if R1count != R2count:
return False
else:
return True
def PEandIndexCheck(R1, R2, R3):
R1count = countfastq(R1)
R2count = countfastq(R2)
R3count = countfastq(R3)
if R1count == R2count == R3count:
return True
else:
return False
def mapIndex(seq, mapDict, bcmismatch):
besthit = []
for index_name,index in mapDict.items():
align = edlib.align(index, seq, mode="SHW", k=bcmismatch, additionalEqualities=degenNuc)
if align["editDistance"] < 0:
continue
elif align["editDistance"] == 0:
return (index_name, 0)
else:
if len(besthit) < 3:
besthit = [index, index_name, align["editDistance"]]
else:
if align["editDistance"] < int(besthit[2]):
besthit = [index, index_name, align["editDistance"]]
if len(besthit) == 3:
return (besthit[1], besthit[2])
else:
return (None,None)
def DemuxIllumina(R1, R2, I1, mapDict, mismatch, fwdprimer, revprimer, primer_mismatch, outR1, outR2):
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
Total = 0
FPrimer = 0
RPrimer = 0
BCFound = 0
#function to loop through PE reads, renaming according to index
file1 = FastqGeneralIterator(gzopen(R1))
file2 = FastqGeneralIterator(gzopen(R2))
file3 = FastqGeneralIterator(gzopen(I1))
counter = 1
with open(outR1, 'w') as outfile1:
with open(outR2, 'w') as outfile2:
for read1, read2, index in zip(file1, file2, file3):
Total += 1
Name,Diffs = mapIndex(index[1], mapDict, mismatch)
if Name:
BCFound += 1
#strip primers if found
R1ForPos = trimForPrimer(fwdprimer, read1[1], primer_mismatch)
R1RevPos = trimRevPrimer(revprimer, read1[1], primer_mismatch)
R2ForPos = trimForPrimer(revprimer, read2[1], primer_mismatch)
R2RevPos = trimRevPrimer(fwdprimer, read2[1], primer_mismatch)
if R1ForPos > 0:
FPrimer += 1
if R1RevPos > 0:
RPrimer += 1
header = 'R_'+str(counter)+';barcodelabel='+Name+';bcseq='+index[1]+';bcdiffs='+str(Diffs)+';'
outfile1.write('@%s\n%s\n+\n%s\n' % (header, read1[1][R1ForPos:R1RevPos], read1[2][R1ForPos:R1RevPos]))
outfile2.write('@%s\n%s\n+\n%s\n' % (header, read2[1][R2ForPos:R2RevPos], read2[2][R2ForPos:R2RevPos]))
counter += 1
return Total, BCFound, FPrimer, RPrimer
def stripPrimersPE(R1, R2, RL, samplename, fwdprimer, revprimer, primer_mismatch, require_primer, full_length, outR1, outR2):
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
#can walk through dataset in pairs
file1 = FastqGeneralIterator(gzopen(R1))
file2 = FastqGeneralIterator(gzopen(R2))
counter = 1
Total = 0
multihits = 0
findForPrimer = 0
findRevPrimer = 0
with open(outR1, 'w') as outfile1:
with open(outR2, 'w') as outfile2:
for read1, read2 in zip(file1, file2):
Total += 1
ffp = False
frp = False
R1Seq = read1[1][:RL]
R1Qual = read1[2][:RL]
R2Seq = read2[1][:RL]
R2Qual = read2[2][:RL]
ForTrim, RevTrim = (0,)*2
#look for forward primer in forward read
R1foralign = edlib.align(fwdprimer, R1Seq, mode="HW", k=primer_mismatch, additionalEqualities=degenNuc)
if R1foralign['editDistance'] < 0:
if require_primer == 'on' or full_length: #not found
continue
else:
if len(R1foralign['locations']) > 1: #multiple hits
multihits += 1
continue
try:
ForTrim = R1foralign["locations"][0][1]+1
findForPrimer += 1
ffp = True
except IndexError:
pass
R1revalign = edlib.align(RevComp(revprimer), R1Seq, mode="HW", k=primer_mismatch, additionalEqualities=degenNuc)
if R1revalign['editDistance'] < 0:
R1RevCut = RL
else:
R1RevCut = R1revalign["locations"][0][0]
findRevPrimer += 1
frp = True
#look for reverse primer in reverse read
R2foralign = edlib.align(revprimer, R2Seq, mode="HW", k=primer_mismatch, additionalEqualities=degenNuc)
if R2foralign['editDistance'] < 0:
if require_primer == 'on' or full_length: #not found
continue
else:
if len(R2foralign['locations']) > 1: #multiple hits
multihits += 1
continue
try:
RevTrim = R2foralign["locations"][0][1]+1
if not frp:
findRevPrimer += 1
except IndexError:
pass
R2revalign = edlib.align(RevComp(fwdprimer), R2Seq, mode="HW", k=primer_mismatch, additionalEqualities=degenNuc)
if R2revalign['editDistance'] < 0:
R2RevCut = RL
else:
R2RevCut = R2revalign["locations"][0][0]
if not ffp:
findForPrimer += 1
header = 'R_{:};barcodelabel={:};'.format(counter,samplename)
outfile1.write('@%s\n%s\n+\n%s\n' % (header, R1Seq[ForTrim:R1RevCut], R1Qual[ForTrim:R1RevCut]))
outfile2.write('@%s\n%s\n+\n%s\n' % (header, R2Seq[RevTrim:R2RevCut], R2Qual[RevTrim:R2RevCut]))
counter += 1
return Total, counter-1, multihits, findForPrimer, findRevPrimer
def primerFound(primer, seq, mismatch):
align = edlib.align(primer, seq, mode="HW", k=mismatch, additionalEqualities=degenNuc)
if align['editDistance'] < 0:
return False
else:
return True
def illuminaReorient(R1, R2, fwdprimer, revprimer, mismatch, ReadLength, outR1, outR2):
'''
function to re-orient reads based on primer sequences
only useful if primers in the reads
drops reads that don't have matching primers
'''
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
Total = 0
Correct = 0
Flipped = 0
Dropped = 0
#function to loop through PE reads, renaming according to index
file1 = FastqGeneralIterator(gzopen(R1))
file2 = FastqGeneralIterator(gzopen(R2))
with open(outR1, 'w') as outfile1:
with open(outR2, 'w') as outfile2:
for read1, read2 in zip(file1, file2):
Total += 1
if primerFound(fwdprimer, read1[1], mismatch) and primerFound(revprimer, read2[1], mismatch):
Correct += 1
outfile1.write('@%s\n%s\n+\n%s\n' % (read1[0], read1[1][:ReadLength], read1[2][:ReadLength]))
outfile2.write('@%s\n%s\n+\n%s\n' % (read2[0], read2[1][:ReadLength], read2[2][:ReadLength]))
elif primerFound(fwdprimer, read2[1], mismatch) and primerFound(revprimer, read1[1], mismatch):
Flipped += 1
outfile1.write('@%s\n%s\n+\n%s\n' % (read2[0], read2[1][:ReadLength], read2[2][:ReadLength]))
outfile2.write('@%s\n%s\n+\n%s\n' % (read1[0], read1[1][:ReadLength], read1[2][:ReadLength]))
else:
Dropped += 1
return Total, Correct, Flipped, Dropped
def demuxIlluminaPE(R1, R2, fwdprimer, revprimer, samples, forbarcodes, revbarcodes, barcode_mismatch, primer_mismatch, outR1, outR2, stats):
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
#function to loop through PE reads, renaming according to index
file1 = FastqGeneralIterator(gzopen(R1))
file2 = FastqGeneralIterator(gzopen(R2))
counter = 1
Total = 0
NoBarcode = 0
NoRevBarcode = 0
NoPrimer = 0
NoRevPrimer = 0
ValidSeqs = 0
with open(outR1, 'w') as outfile1:
with open(outR2, 'w') as outfile2:
for read1, read2 in zip(file1, file2):
Total += 1
#look for valid barcode in forward read
if len(forbarcodes) > 0:
BC, BCLabel = AlignBarcode(read1[1], forbarcodes, barcode_mismatch)
if BC == '':
NoBarcode += 1
continue
if len(samples) > 0: #sample dictionary so enforce primers and barcodes from here
FwdPrimer = samples[BCLabel]['ForPrimer']
RevPrimer = samples[BCLabel]['RevPrimer']
foralign = edlib.align(FwdPrimer, read1[1], mode="HW", k=primer_mismatch, additionalEqualities=degenNuc)
if foralign['editDistance'] < 0: #not found
NoPrimer += 1
continue
stringent = {}
stringent[BCLabel] = samples[BCLabel]['RevBarcode']
revBC, revBCLabel = AlignBarcode(read2[1], stringent, barcode_mismatch)
if revBC == '':
NoRevBarcode += 1
continue
#look for reverse primer in reverse read
revalign = edlib.align(RevPrimer, read2[1], mode="HW", k=primer_mismatch, additionalEqualities=degenNuc)
if revalign['editDistance'] < 0: #not found
NoRevPrimer += 1
continue
else:
#look for forward primer
foralign = edlib.align(fwdprimer, read1[1], mode="HW", k=primer_mismatch, additionalEqualities=degenNuc)
if foralign['editDistance'] < 0: #not found
NoPrimer += 1
continue
if len(revbarcodes) > 0:
#look for valid revbarcodes
if len(revbarcodes) > 0:
revBC, revBCLabel = AlignBarcode(read2[1], revbarcodes, barcode_mismatch)
if revBC == '':
NoRevBarcode += 1
continue
#look for reverse primer in reverse read
revalign = edlib.align(revprimer, read2[1], mode="HW", k=primer_mismatch, additionalEqualities=degenNuc)
if revalign['editDistance'] < 0: #not found
NoRevPrimer += 1
continue
#if get here, then all is well, construct new header and trim reads
if BCLabel == revBCLabel:
label = BCLabel
else:
label = BCLabel+':-:'+revBCLabel
ForTrim = foralign["locations"][0][1]+1
RevTrim = revalign["locations"][0][1]+1
header = 'R_{:};barcodelabel={:};'.format(counter,label)
outfile1.write('@%s\n%s\n+\n%s\n' % (header, read1[1][ForTrim:], read1[2][ForTrim:]))
outfile2.write('@%s\n%s\n+\n%s\n' % (header, read2[1][RevTrim:], read2[2][RevTrim:]))
counter += 1
ValidSeqs += 1
with open(stats, 'w') as statsout:
statsout.write('%i,%i,%i,%i,%i,%i\n' % (Total, NoBarcode, NoPrimer, NoRevBarcode, NoRevPrimer, ValidSeqs))
def demuxIlluminaPE2(R1, R2, fwdprimer, revprimer, samples, forbarcodes, revbarcodes, barcode_mismatch, primer_mismatch, outR1, outR2, stats):
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
#function to loop through PE reads, renaming according to index
file1 = FastqGeneralIterator(gzopen(R1))
file2 = FastqGeneralIterator(gzopen(R2))
counter = 1
Total = 0
NoBarcode = 0
NoRevBarcode = 0
NoPrimer = 0
NoRevPrimer = 0
ValidSeqs = 0
with open(outR1, 'w') as outfile1:
with open(outR2, 'w') as outfile2:
for read1, read2 in zip(file1, file2):
Total += 1
#look for forward primer first, should all have primer and in correct orientation
R1ForTrim = trimForPrimer(fwdprimer, read1[1], primer_mismatch)
if R1ForTrim == 0: #no primer found
continue
if len(forbarcodes) > 0: #search for barcode match in seq upstream of primer
R1BCtrim = R1ForTrim - len(fwdprimer)
BC, BCLabel = AlignBarcode2(read1[1][:R1BCtrim], forbarcodes, barcode_mismatch)
if BC == '':
NoBarcode += 1
continue
if len(samples) > 0: #sample dictionary so enforce primers and barcodes from here
FwdPrimer = samples[BCLabel]['ForPrimer']
RevPrimer = samples[BCLabel]['RevPrimer']
stringent = {}
stringent[BCLabel] = samples[BCLabel]['RevBarcode']
#find rev primer in reverse read
R2ForTrim = trimForPrimer(RevPrimer, read2[1], primer_mismatch)
if R2ForTrim == 0:
continue
#look for reverse barcode
R2BCTrim = R2ForTrim - len(RevPrimer)
revBC, revBCLabel = AlignBarcode2(read2[1][:R2BCTrim], stringent, barcode_mismatch)
if revBC == '':
NoRevBarcode += 1
continue
#okay, found both primers and barcodes, now 1 more cleanup step trip revcomped primers
R1RevTrim = trimRevPrimer(RevPrimer, read1[1], primer_mismatch)
R2RevTrim = trimRevPrimer(FwdPrimer, read2[1], primer_mismatch)
else:
#no samples dictionary, so allow all combinations of matches
R2ForTrim = trimForPrimer(revprimer, read2[1], primer_mismatch)
if R2ForTrim == 0:
continue
if len(revbarcodes) > 0:
#look for reverse barcode
R2BCTrim = R2ForTrim - len(revprimer)
revBC, revBCLabel = AlignBarcode2(read2[1][:R2BCTrim], revbarcodes, barcode_mismatch)
if revBC == '':
NoRevBarcode += 1
continue
#okay, found both primers and barcodes, now 1 more cleanup step trip revcomped primers
R1RevTrim = trimRevPrimer(revprimer, read1[1], primer_mismatch)
R2RevTrim = trimRevPrimer(fwdprimer, read2[1], primer_mismatch)
#if get here, then all is well, construct new header and trim reads
if BCLabel == revBCLabel:
label = BCLabel
else:
label = BCLabel+':-:'+revBCLabel
header = 'R_{:};barcodelabel={:};'.format(counter,label)
outfile1.write('@%s\n%s\n+\n%s\n' % (header, read1[1][R1ForTrim:R1RevTrim], read1[2][R1ForTrim:R1RevTrim]))
outfile2.write('@%s\n%s\n+\n%s\n' % (header, read2[1][R2ForTrim:R2RevTrim], read2[2][R2ForTrim:R2RevTrim]))
counter += 1
ValidSeqs += 1
with open(stats, 'w') as statsout:
statsout.write('%i,%i,%i,%i,%i,%i\n' % (Total, NoBarcode, NoPrimer, NoRevBarcode, NoRevPrimer, ValidSeqs))
def trimForPrimer(primer, seq, primer_mismatch):
foralign = edlib.align(primer, seq, mode="HW", k=primer_mismatch, additionalEqualities=degenNuc)
if foralign['editDistance'] < 0:
return 0
else:
CutPos = foralign["locations"][0][1]+1
return CutPos
def trimRevPrimer(primer, seq, primer_mismatch):
revprimer = RevComp(primer)
revalign = edlib.align(revprimer, seq, mode="HW", k=primer_mismatch, task="locations", additionalEqualities=degenNuc)
if revalign['editDistance'] < 0:
return len(seq)
else:
CutPos = revalign["locations"][0][0]
return CutPos
def losslessTrim(input, fwdprimer, revprimer, mismatch, trimLen, padding, minlength, output):
'''
function to trim primers if found from SE reads
and then trim/pad to a set length
'''
with open(output, 'w') as outfile:
for title, seq, qual in FastqGeneralIterator(gzopen(input)):
#sometimes primers sneek through the PE merging pipeline, check quickly again trim if found
ForTrim = trimForPrimer(fwdprimer, seq, mismatch)
RevTrim = trimRevPrimer(revprimer, seq, mismatch)
Seq = seq[ForTrim:RevTrim]
Qual = qual[ForTrim:RevTrim]
if len(Seq) < int(minlength): #need this check here or primer dimers will get through
continue
if len(Seq) < int(trimLen) and padding == 'on':
pad = int(trimLen) - len(Seq)
SeqF = Seq + pad*'N'
QualF = Qual + pad*'I'
else:
SeqF = Seq[:trimLen]
QualF = Qual[:trimLen]
outfile.write('@%s\n%s\n+\n%s\n' % (title, SeqF, QualF))
def checkBCinHeader(input):
#read first header
for title, seq, qual in FastqGeneralIterator(open(input)):
header = title.split(' ')
info = header[-1]
if info.split(':')[-1].isdigit():
return False
else:
return True
break
def illuminaBCmismatch(R1, R2, index):
remove = []
for file in [R1,R2]:
for title, seq, qual in FastqGeneralIterator(open(file)):
ID = title.split(' ')[0]
BC = title.split(':')[-1]
if BC != index:
remove.append(ID)
remove = set(remove)
return remove
def fasta2barcodes(input, revcomp):
BC = {}
with open(input, 'r') as infile:
for rec in SeqIO.parse(infile, 'fasta'):
if not rec.id in BC:
if revcomp:
Seq = RevComp(str(rec.seq))
else:
Seq = str(rec.seq)
BC[rec.id] = Seq
return BC
def AlignBarcode(Seq, BarcodeDict, mismatch):
besthit = []
for BL in list(BarcodeDict.keys()):
B = BarcodeDict[BL]
#apparently people use N's in barcode sequences, this doesn't work well with
#edlib, so if N then trim and then align, need to trim the seq as well
if B.startswith('N'):
origLen = len(B)
B = B.lstrip('N')
newLen = len(B)
lenDiff = origLen - newLen
newSeq = Seq[lenDiff:]
else:
newSeq = Seq
align = edlib.align(B, newSeq, mode="SHW", k=mismatch, additionalEqualities=degenNuc)
if align["editDistance"] < 0:
continue
elif align["editDistance"] == 0:
return B, BL
elif align["editDistance"] > 0 and mismatch == 0:
continue
else:
if len(besthit) < 3:
besthit = [B, BL, align["editDistance"]]
else:
if align["editDistance"] < int(besthit[2]):
besthit = [B, BL, align["editDistance"]]
if len(besthit) == 3:
return besthit[0], besthit[1]
else:
return "", ""
def AlignRevBarcode(Seq, BarcodeDict, mismatch):
besthit = []
for BL in list(BarcodeDict.keys()):
B = BarcodeDict[BL]
if B.endswith('N'):
B = B.rstrip('N')
align = edlib.align(B, Seq, mode="HW", k=mismatch, additionalEqualities=degenNuc)
if align["editDistance"] < 0:
continue
elif align["editDistance"] == 0:
return B, BL
elif align["editDistance"] > 0 and mismatch == 0:
continue
else:
if len(besthit) < 3:
besthit = [B, BL, align["editDistance"]]
else:
if align["editDistance"] < int(besthit[2]):
besthit = [B, BL, align["editDistance"]]
if len(besthit) == 3:
return besthit[0], besthit[1]
else:
return "", ""
def AlignBarcode2(Seq, BarcodeDict, mismatch):
besthit = []
for BL in list(BarcodeDict.keys()):
B = BarcodeDict[BL]
#apparently people use N's in barcode sequences, this doesn't work well with
#edlib, so if N then trim and then align, need to trim the seq as well
if B.startswith('N'):
origLen = len(B)
B = B.lstrip('N')
newLen = len(B)
lenDiff = origLen - newLen
newSeq = Seq[lenDiff:]
else:
newSeq = Seq
align = edlib.align(B, newSeq, mode="HW", k=int(mismatch), additionalEqualities=degenNuc)
if align["editDistance"] < 0:
continue
elif align["editDistance"] == 0:
return B, BL
elif align["editDistance"] > 0 and mismatch == 0:
continue
else:
if len(besthit) < 3:
besthit = [B, BL, align["editDistance"]]
else:
if align["editDistance"] < int(besthit[2]):
besthit = [B, BL, align["editDistance"]]
if len(besthit) == 3:
return besthit[0], besthit[1]
else:
return "", ""
def findFwdPrimer(primer, sequence, mismatch, equalities):
#trim position
TrimPos = None
#search for match
align = edlib.align(primer, sequence, mode="HW", k=int(mismatch), additionalEqualities=equalities)
if align["editDistance"] >= 0: #we found a hit
TrimPos = align["locations"][0][1]+1
#return position will be None if not found
return TrimPos
def findRevPrimer(primer, sequence, mismatch, equalities):
#trim position
TrimPos = None
#search for match
align = edlib.align(primer, sequence, mode="HW", task="locations", k=int(mismatch), additionalEqualities=equalities)
if align["editDistance"] >= 0: #we found a hit
TrimPos = align["locations"][0][0]
#return position will be None if not found
return TrimPos
def MergeReadsSimple(R1, R2, tmpdir, outname, minlen, usearch, rescue, method='vsearch'):
#check that num sequences is identical
if not PEsanitycheck(R1, R2):
log.error("%s and %s are not properly paired, exiting" % (R1, R2))
sys.exit(1)
#next run USEARCH/vsearch mergepe
merge_out = os.path.join(tmpdir, outname + '.merged.fq')
skip_for = os.path.join(tmpdir, outname + '.notmerged.R1.fq')
report = os.path.join(tmpdir, outname +'.merge_report.txt')
log.debug("Now merging PE reads")
if method == 'usearch':
cmd = [usearch, '-fastq_mergepairs', R1, '-reverse', R2,
'-fastqout', merge_out, '-fastq_trunctail', '5',
'-fastqout_notmerged_fwd', skip_for, '-minhsp', '12',
'-fastq_maxdiffs', '8', '-report', report,
'-fastq_minmergelen', str(minlen), '-threads', '1']
elif method == 'vsearch':
log.debug('Merging PE reads using vsearch --fastq_mergepairs: {} {}'.format(os.path.basename(R1), os.path.basename(R2)))
cmd = ['vsearch', '--fastq_mergepairs', R1, '--reverse', R2,
'--fastqout', merge_out, '--fastqout_notmerged_fwd', skip_for,
'--fastq_minmergelen', str(minlen), '--fastq_allowmergestagger', '--threads', '1']
runSubprocess(cmd, log)
#now concatenate files for downstream pre-process_illumina.py script
final_out = os.path.join(tmpdir, outname)
tmp_merge = os.path.join(tmpdir, outname+'.tmp')
with open(tmp_merge, 'w') as cat_file:
shutil.copyfileobj(open(merge_out,'r'), cat_file)
if rescue == 'on':
shutil.copyfileobj(open(skip_for,'r'), cat_file)
phixsize = getSize(tmp_merge)
phixcount = countfastq(tmp_merge)
if method == 'usearch':
#run phix removal
#since most users have 32 bit usearch, check size of file, if > 3 GB, split into parts
log.debug("Removing phix from %s" % outname)
log.debug('File Size: %i bytes' % phixsize)
if phixsize > 3e9:
log.debug('FASTQ > 3 GB, splitting FASTQ file into chunks to avoid potential memory problems with 32 bit usearch')
phixdir = os.path.join(tmpdir, 'phix_'+str(os.getpid()))
os.makedirs(phixdir)
num = round(int((phixsize / 3e9))) + 1
split_fastq(tmp_merge, phixcount, phixdir, int(num))
for file in os.listdir(phixdir):
if file.endswith(".fq"):
output = os.path.join(phixdir, file+'.phix')
file = os.path.join(phixdir, file)
cmd = [usearch, '-filter_phix', file, '-output', output, '-threads', '1']
runSubprocess(cmd, log)
with open(final_out, 'w') as finalout:
for file in os.listdir(phixdir):
if file.endswith('.phix'):
with open(os.path.join(phixdir, file), 'r') as infile:
shutil.copyfileobj(infile, finalout)
shutil.rmtree(phixdir)
else:
cmd = [usearch, '-filter_phix', tmp_merge, '-output', final_out, '-threads', '1']
runSubprocess(cmd, log)
else:
os.rename(tmp_merge, final_out)
#count output
finalcount = countfastq(final_out)
SafeRemove(merge_out)
SafeRemove(skip_for)
SafeRemove(tmp_merge)
return phixcount, finalcount
def MergeReads(R1, R2, tmpdir, outname, read_length, minlen, usearch, rescue, method, index, mismatch):
removelist = []
if mismatch == 0 and index != '':
if checkBCinHeader(R1):
log.debug("Searching for index mismatches > 0: %s" % index)
removelist = illuminaBCmismatch(R1, R2, index)
log.debug("Removing %i reads with index mismatch > 0" % len(removelist))
pretrim_R1 = os.path.join(tmpdir, outname + '.pretrim_R1.fq')
pretrim_R2 = os.path.join(tmpdir, outname + '.pretrim_R2.fq')
log.debug("Removing index 3prime bp 'A' from reads")
trim3prime(R1, read_length, pretrim_R1, removelist)
trim3prime(R2, read_length, pretrim_R2, removelist)
#check that num sequences is identical
if not PEsanitycheck(pretrim_R1, pretrim_R2):
log.error("%s and %s are not properly paired, exiting" % (R1, R2))
sys.exit(1)
#next run USEARCH/vsearch mergepe
merge_out = os.path.join(tmpdir, outname + '.merged.fq')
skip_for = os.path.join(tmpdir, outname + '.notmerged.R1.fq')
report = os.path.join(tmpdir, outname +'.merge_report.txt')
log.debug("Now merging PE reads")
if method == 'usearch':
cmd = [usearch, '-fastq_mergepairs', pretrim_R1, '-reverse', pretrim_R2, '-fastqout', merge_out, '-fastq_trunctail', '5', '-fastqout_notmerged_fwd', skip_for,'-minhsp', '12','-fastq_maxdiffs', '8', '-report', report, '-fastq_minmergelen', str(minlen)]
else:
cmd = ['vsearch', '--fastq_mergepairs', pretrim_R1, '--reverse', pretrim_R2, '--fastqout', merge_out, '--fastqout_notmerged_fwd', skip_for, '--fastq_minmergelen', str(minlen), '--fastq_allowmergestagger']
runSubprocess(cmd, log)
#now concatenate files for downstream pre-process_illumina.py script
final_out = os.path.join(tmpdir, outname)
tmp_merge = os.path.join(tmpdir, outname+'.tmp')
with open(tmp_merge, 'w') as cat_file:
shutil.copyfileobj(open(merge_out,'r'), cat_file)
if rescue == 'on':
shutil.copyfileobj(open(skip_for,'r'), cat_file)
#run phix removal
#since most users have 32 bit usearch, check size of file, if > 3 GB, split into parts
log.debug("Removing phix from %s" % outname)
phixsize = getSize(tmp_merge)
phixcount = countfastq(tmp_merge)
log.debug('File Size: %i bytes' % phixsize)
if phixsize > 3e9:
log.debug('FASTQ > 3 GB, splitting FASTQ file into chunks to avoid potential memory problems with 32 bit usearch')
phixdir = os.path.join(tmpdir, 'phix_'+str(os.getpid()))
os.makedirs(phixdir)
num = round(int((phixsize / 3e9))) + 1
split_fastq(tmp_merge, phixcount, phixdir, int(num))
for file in os.listdir(phixdir):
if file.endswith(".fq"):
output = os.path.join(phixdir, file+'.phix')
file = os.path.join(phixdir, file)
cmd = [usearch, '-filter_phix', file, '-output', output]
runSubprocess(cmd, log)
with open(final_out, 'wb') as finalout:
for file in os.listdir(phixdir):
if file.endswith('.phix'):
with open(os.path.join(phixdir, file), 'r') as infile:
shutil.copyfileobj(infile, finalout)
shutil.rmtree(phixdir)
else:
cmd = [usearch, '-filter_phix', tmp_merge, '-output', final_out]
runSubprocess(cmd, log)
#count output
origcount = countfastq(R1)
finalcount = countfastq(final_out)
log.debug("Removed %i reads that were phiX" % (origcount - finalcount - len(removelist)))
pct_out = finalcount / float(origcount)
#clean and close up intermediate files
os.remove(merge_out)
os.remove(pretrim_R1)
os.remove(pretrim_R2)
os.remove(skip_for)
os.remove(tmp_merge)
return log.info('{0:,}'.format(finalcount) + ' reads passed ('+'{0:.1%}'.format(pct_out)+')')
def validateorientation(tmp, reads, otus, output):
orientcounts = os.path.join(tmp, 'orient.uc')
cmd = ['vsearch', '--usearch_global', reads, '--db', otus, '--sizein', '--id', '0.97', '--strand', 'plus', '--uc', orientcounts]
runSubprocess(cmd, log)
OTUCounts = {}
with open(orientcounts, 'r') as countdata:
for line in countdata:
line = line.rstrip()
cols = line.split('\t')
ID = cols[9]
if ID == '*':
continue
size = cols[8].split('size=')[-1].replace(';', '')
if not ID in OTUCounts:
OTUCounts[ID] = int(size)
else:
OTUCounts[ID] += int(size)
orientmap = os.path.join(tmp, 'orient-map.txt')
cmd = ['vsearch', '--usearch_global', otus, '--db', otus, '--self', '--id', '0.95', '--strand', 'both', '--userout', orientmap, '--userfields', 'query+target+qstrand+id']
runSubprocess(cmd, log)
orient_remove = []
keeper = []
with open(orientmap, 'r') as selfmap:
for line in selfmap:
line = line.rstrip()
cols = line.split('\t')
if cols[2] == '-':
qCount = OTUCounts.get(cols[0])
tCount = OTUCounts.get(cols[1])
if qCount > tCount:
if not cols[1] in orient_remove and not cols[1] in keeper:
orient_remove.append(cols[1])
if not cols[0] in keeper:
keeper.append(cols[0])
else:
if not cols[0] in orient_remove and not cols[0]:
orient_remove.append(cols[0])
if not cols[1] in keeper:
keeper.append(cols[1])
log.debug('Dropping {:,} OTUs: {:}'.format(len(orient_remove), ', '.join(orient_remove)))
count = 0
with open(output, 'w') as outfile:
with open(otus, 'r') as infile:
for rec in SeqIO.parse(infile, 'fasta'):
if not rec.id in orient_remove:
count += 1
SeqIO.write(rec, outfile, 'fasta')
return count, len(orient_remove)
def validateorientationDADA2(OTUCounts, otus, output):
orientmap = 'orient-map.txt'
cmd = ['vsearch', '--usearch_global', otus, '--db', otus, '--self', '--id', '0.95', '--strand', 'both', '--userout', orientmap, '--userfields', 'query+target+qstrand+id']
runSubprocess(cmd, log)
orient_remove = []
keeper = []
with open(orientmap, 'r') as selfmap:
for line in selfmap:
line = line.rstrip()
cols = line.split('\t')
if cols[2] == '-':
qCount = OTUCounts.get(cols[0])
tCount = OTUCounts.get(cols[1])
if qCount > tCount:
if not cols[1] in orient_remove and not cols[1] in keeper:
orient_remove.append(cols[1])
if not cols[0] in keeper:
keeper.append(cols[0])
else:
if not cols[0] in orient_remove and not cols[0]:
orient_remove.append(cols[0])
if not cols[1] in keeper:
keeper.append(cols[1])
log.debug('Dropping {:,} OTUs: {:}'.format(len(orient_remove), ', '.join(natsorted(orient_remove))))
count = 0
with open(output, 'w') as outfile:
with open(otus, 'r') as infile:
for rec in SeqIO.parse(infile, 'fasta'):
if not rec.id in orient_remove:
count += 1
SeqIO.write(rec, outfile, 'fasta')
SafeRemove(orientmap)
return count, len(orient_remove)
def dictFlip(input):
#flip the list of dictionaries
outDict = {}
for k,v in input.items():
for i in v:
if not i in outDict:
outDict[i] = k
else:
print("duplicate ID found: %s" % i)
return outDict
def classifier2dict(input, pcutoff):
ClassyDict = {}
with open(input, 'r') as infile:
for line in infile:
cols = line.split('\t')
ID = cols[0]
tax = cols[1].split(',')
passtax = []
scores = []
hit = False
for i,level in enumerate(tax):
if '(' in level:
score = level.split('(')[-1].replace(')', '')
if float(score) >= float(pcutoff):
hit = True
passtax.append(level.split('(')[0])
scores.append(score)
if hit:
if not ID in ClassyDict:
ClassyDict[ID] = (scores[-1], passtax)
return ClassyDict
def usearchglobal2dict(input):
GlobalDict = {}
with open(input, 'r') as infile:
for line in infile:
line = line.rstrip()
cols = line.split('\t')
ID = cols[0]
if cols[1] != '*':
tax = cols[1].split('tax=')[-1]
tax = tax.split(',')
pident = float(cols[-1]) / 100
besthit = cols[1].split(';tax=')[0]
else:
tax = ['No hit']
besthit = 'None'
pident = 0.0000
pident = "{0:.4f}".format(pident)
#since we can have multiple hits with same pident, need to get best taxonomy
if not ID in GlobalDict:
GlobalDict[ID] = [(pident, besthit, '', tax,)]
else:
GlobalDict[ID].append((pident, besthit, '', tax))
Results = {}
for k,v in natsorted(GlobalDict.items()):
mostTax = []
lcaTax = []
mt = 0
for hit in v:
if len(hit[-1]) > mt:
mt = len(hit[-1])
#loop through getting the ones with most tax info
for x in v:
if len(x[-1]) == mt:
mostTax.append(x)
lcaTax.append(x[-1])
#now if mostTax has more than 1 hit, need to do LCA
if len(mostTax) <= 1:
Results[k] = mostTax[0]
else:
lcaResult = lcaTax[0]
lcaFinal = []
for x in lcaTax[1:]:
s = set(x)
lcaFinal = [z for z in lcaResult if z in s]
lcaResult = lcaFinal
if len(lcaResult) < mt:
Results[k] = (mostTax[0][0], mostTax[0][1], 'LCA', lcaResult)
else:
Results[k] = (mostTax[0][0], mostTax[0][1], '',lcaResult)
return Results
def bestclassifier(utax, sintax, otus):
#both are dictionaries from classifier2dict, otus is a list of OTUs in order
BestClassify = {}
for otu in otus:
sintaxhit, utaxhit, hit = (None,)*3
if otu in sintax:
sintaxhit = sintax.get(otu) #this should be okay...
if otu in utax:
utaxhit = utax.get(otu) #returns tuple of (score, [taxlist]
if sintaxhit and utaxhit:
if len(utaxhit[1]) > len(sintaxhit[1]):
hit = (utaxhit[0], 'U', utaxhit[1])
elif len(utaxhit[1]) == len(sintaxhit[1]):
if float(utaxhit[0]) >= float(sintaxhit[0]):
hit = (utaxhit[0], 'U', utaxhit[1])
else:
hit = (sintaxhit[0], 'S', sintaxhit[1])
else:
hit = (sintaxhit[0], 'S', sintaxhit[1])
elif not sintaxhit and utaxhit:
hit = (utaxhit[0], 'U', utaxhit[1])
elif sintaxhit and not utaxhit:
hit = (sintaxhit[0], 'S', sintaxhit[1])
BestClassify[otu] = hit
return BestClassify
def bestTaxonomy(usearch, classifier):
Taxonomy = {}
for k,v in natsorted(list(usearch.items())):
globalTax = v[-1] #this should be a list
try:
classiTax = classifier.get(k)[-1] #also should be list
except TypeError:
classiTax = None
pident = float(v[0])
besthitID = v[1]
LCA = v[2]
discrep = 'S'
if pident < 0.9700 and classiTax: #if global alignment hit is less than 97%, then default to classifier result
method = classifier.get(k)[1].split()[0] #get first letter, either U or S
score = float(classifier.get(k)[0])
tax = ','.join(classiTax)
fulltax = method+discrep+"|{0:.4f}".format(score)+'|'+besthitID+';'+tax
else: #should default to global alignment with option to update taxonomy from classifier if more information
method = 'G'
score = pident * 100
if classiTax and len(globalTax) < len(classiTax): #iterate through and make sure all levels match else stop where no longer matches
tax = ','.join(classiTax)
for lvl in globalTax:
if not lvl in classiTax: #now we have a problem
error = globalTax.index(lvl) #move one position backwards and keep only that level of taxonomy
discrep = 'D'
tax = ','.join(globalTax[:error])
break
else:
tax = ','.join(globalTax)
if discrep == 'S' and LCA == '':
fulltax = method+discrep+"|{0:.1f}".format(score)+'|'+besthitID+';'+tax
else:
fulltax = method+discrep+"L|{0:.1f}".format(score)+'|'+besthitID+';'+tax
Taxonomy[k] = fulltax
return Taxonomy
def utax2qiime(input, output):
domain = False
with open(output, 'w') as outfile:
outfile.write('#OTUID\ttaxonomy\n')
with open(input, 'r') as infile:
for line in infile:
if line.startswith('#'):
continue
line = line.replace('\n', '')
OTU = line.split('\t')[0]
tax = line.split('\t')[1]
try:
levels = tax.split(';')[1]
except IndexError:
levels = '*'
levels = levels.replace(',', ';')
if levels.startswith('d:'):
domain = True
changes = ['d','p','c','o','f','g','s']
else:
changes = ['k','p','c','o','f','g','s']
for i in changes:
levels = levels.replace(i+':', i+'__')
try:
levList = levels.split(';')
except IndexError:
levList = [levels]
#now loop through and add empty levels
if not domain:
if not levList[0].startswith('k__'):
levList = ['k__', 'p__', 'c__', 'o__', 'f__', 'g__', 's__']
if len(levList) < 2 and levList[0].startswith('k__'):
levList.extend(['p__', 'c__', 'o__', 'f__', 'g__', 's__'])
if len(levList) > 2 and not levList[2].startswith('c__'):
levList.insert(2,'c__')
if len(levList) > 3 and not levList[3].startswith('o__'):
levList.insert(3,'o__')
if len(levList) > 4 and not levList[4].startswith('f__'):
levList.insert(4,'f__')
if len(levList) > 5 and not levList[5].startswith('g__'):
levList.insert(5,'g__')
if len(levList) > 6 and not levList[6].startswith('s__'):
levList.insert(6,'s__')
else:
if not levList[0].startswith('d__'):
levList = ['d__','p__', 'c__', 'o__', 'f__', 'g__', 's__']
if len(levList) < 2 and levList[0].startswith('d__'):
levList.extend(['p__', 'c__', 'o__', 'f__', 'g__', 's__'])
if len(levList) > 2 and not levList[2].startswith('c__'):
levList.insert(2,'c__')
if len(levList) > 3 and not levList[3].startswith('o__'):
levList.insert(3,'o__')
if len(levList) > 4 and not levList[4].startswith('f__'):
levList.insert(4,'f__')
if len(levList) > 5 and not levList[5].startswith('g__'):
levList.insert(5,'g__')
if len(levList) > 6 and not levList[6].startswith('s__'):
levList.insert(6,'s__')
outfile.write('%s\t%s\n' % (OTU, ';'.join(levList)))
def barcodes2dict(input, mapDict, bcmismatch):
#here expecting the index reads from illumina, create dictionary for naming?
Results = {}
NoMatch = []
for title, seq, qual in FastqGeneralIterator(gzopen(input)):
hit = [None, None, None]
titlesplit = title.split(' ')
readID = titlesplit[0]
orient = titlesplit[1]
if orient.startswith('2:'):
seq = RevComp(seq)
if seq in mapDict:
BC = mapDict.get(seq)
hit = [BC,seq, 0]
else:
for k,v in list(mapDict.items()):
alignment = edlib.align(k, seq, mode="NW", k=bcmismatch)
if alignment["editDistance"] < 0:
continue
if hit[0]:
oldhit = hit[2]
if alignment["editDistance"] < oldhit:
hit = [v, k, alignment["editDistance"]]
else:
hit = [v, k, alignment["editDistance"]]
if not hit[0]: #no match, discard read
NoMatch.append(readID)
continue
if not readID in Results:
Results[readID] = (hit[0], hit[1], hit[2])
return Results, NoMatch
def RevComp(s):
rev_comp_lib = {'A':'T','C':'G','G':'C','T':'A','U':'A','M':'K','R':'Y','W':'W','S':'S','Y':'R','K':'M','V':'B','H':'D','D':'H','B':'V','X':'X','N':'N'}
cseq = ''
n = len(s)
for i in range(0,n):
c = s[n-i-1]
cseq += rev_comp_lib[c.upper()]
return cseq
def mapping2dict(input):
#parse a qiime mapping file pull out seqs and ID into dictionary
MapDict = {}
IDs = []
with open(input, 'r') as inputfile:
for line in inputfile:
if line.startswith('#'):
continue
cols = line.split('\t')
ID = cols[0]
Seq = cols[1]
if not Seq in MapDict:
MapDict[Seq] = ID
else:
log.error("duplicate BC seq found %s: %s" % (Seq, ID))
if not ID in IDs:
IDs.append(ID)
else:
log.error("duplicate ID in mapping file: %s, exiting" (ID))
sys.exit(1)
return MapDict
def runMultiProgress(function, inputList, cpus, args=False):
#setup pool
p = multiprocessing.Pool(cpus)
#setup results and split over cpus
tasks = len(inputList)
results = []
for i in inputList:
results.append(p.apply_async(function, args=([i]), kwds={'args':args}))
#refresh pbar every 5 seconds
while True:
incomplete_count = sum(1 for x in results if not x.ready())
if incomplete_count == 0:
break
sys.stdout.write(" Progress: %.2f%% \r" % (float(tasks - incomplete_count) / tasks * 100))
sys.stdout.flush()
time.sleep(1)
p.close()
p.join()
def batch_iterator(iterator, batch_size):
entry = True #Make sure we loop once
while entry :
batch = []
while len(batch) < batch_size :
try :
entry = next(iterator)
except StopIteration :
entry = None
if entry is None :
#End of file
break
batch.append(entry)
if batch :
yield batch
def setupLogging(LOGNAME):
global log
if 'darwin' in sys.platform:
stdoutformat = logging.Formatter(colr.GRN+'%(asctime)s'+colr.END+': %(message)s', datefmt='[%b %d %I:%M %p]')
else:
stdoutformat = logging.Formatter('%(asctime)s: %(message)s', datefmt='[%I:%M:%S %p]')
fileformat = logging.Formatter('%(asctime)s: %(message)s', datefmt='[%x %H:%M:%S]')
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
sth = logging.StreamHandler()
sth.setLevel(logging.INFO)
sth.setFormatter(stdoutformat)
log.addHandler(sth)
fhnd = logging.FileHandler(LOGNAME)
fhnd.setLevel(logging.DEBUG)
fhnd.setFormatter(fileformat)
log.addHandler(fhnd)
def FastMaxEEFilter(input, trunclen, maxee, output):
from Bio.SeqIO.QualityIO import FastqGeneralIterator
with open(output, 'w') as out:
with open(input, 'r') as file:
for title, seq, qual in FastqGeneralIterator(file):
trunclen = int(trunclen)
Seq = seq[:trunclen]
Qual = qual[:trunclen]
ee = 0
for bp, Q in enumerate(Qual):
q = int(ASCII.get(Q))
P = 10**(float(-q)/10)
ee += P
if ee <= float(maxee):
out.write("@%s\n%s\n+\n%s\n" % (title, Seq, Qual))
def MaxEEFilter(input, maxee):
from Bio import SeqIO
with open(input, 'r') as f:
for rec in SeqIO.parse(f, "fastq"):
ee = 0
for bp, Q in enumerate(rec.letter_annotations["phred_quality"]):
P = 10**(float(-Q)/10)
ee += P
if ee <= float(maxee):
rec.name = ""
rec.description = ""
yield rec
def dereplicate(input, output):
from Bio.SeqIO.QualityIO import FastqGeneralIterator
seqs = {}
with open(input, 'r') as file:
for title, sequence, qual in FastqGeneralIterator(file):
if sequence not in seqs:
if title.endswith(';'):
seqs[sequence]=title+'size=1;'
else:
seqs[sequence]=title+';size=1;'
else:
count = int(seqs[sequence].split('=')[-1].rstrip(';')) + 1
formated_string = seqs[sequence].rsplit('=', 1)[0]+'='+str(count)+';'
seqs[sequence] = formated_string
with open(output, 'w') as out:
for sequence in seqs:
out.write('>'+seqs[sequence]+'\n'+sequence+'\n')
def convertSize(num, suffix='B'):
for unit in ['','K','M','G','T','P','E','Z']:
if abs(num) < 1024.0:
return "%3.1f %s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Y', suffix)
def faqual2fastq(fasta, qual, fastq):
global skipCount
from Bio.SeqIO.QualityIO import PairedFastaQualIterator
with open(fastq, 'w') as output:
records = PairedFastaQualIterator(open(fasta), open(qual))
for rec in records:
try:
SeqIO.write(rec, output, 'fastq')
except ValueError:
skipCount +1
return skipCount
def checkfastqsize(input):
filesize = os.path.getsize(input)
return filesize
def fastqreindex(input, output):
from Bio.SeqIO.QualityIO import FastqGeneralIterator
count = 1
with open(output, 'w') as out:
with open(input, 'r') as fastq:
for title, sequence, qual in FastqGeneralIterator(fastq):
cols = title.split(';')
header = 'R_'+str(count)+';'+cols[1]+';'
count += 1
out.write("@%s\n%s\n+\n%s\n" % (header, sequence, qual))
def which(name):
try:
with open(os.devnull) as devnull:
diff = ['tbl2asn', 'dustmasker', 'mafft']
if not any(name in x for x in diff):
subprocess.Popen([name], stdout=devnull, stderr=devnull).communicate()
else:
subprocess.Popen([name, '--version'], stdout=devnull, stderr=devnull).communicate()
except OSError as e:
if e.errno == os.errno.ENOENT:
return False
return True
def CheckDependencies(input):
missing = []
for p in input:
if which(p) == False:
missing.append(p)
if missing != []:
error = ", ".join(missing)
log.error("Missing Dependencies: %s. Please install missing dependencies and re-run script" % (error))
sys.exit(1)
def fastarename(input, relabel, output):
from Bio.SeqIO.FastaIO import FastaIterator
with open(output, 'w') as outfile:
counter = 1
for record in FastaIterator(open(input)):
newName = relabel+str(counter)
outfile.write(">%s\n%s\n" % (newName, record.seq))
counter += 1
def fasta_strip_padding(file, output, stripsize=False):
from Bio.SeqIO.FastaIO import FastaIterator
with open(output, 'w') as outputfile:
for record in FastaIterator(gzopen(file)):
Seq = record.seq.rstrip('N')
if ';size=' in record.id:
record.id = record.id.split(';size=')[0]
outputfile.write(">%s\n%s\n" % (record.id, Seq))
def fastq_strip_padding(file, output):
from Bio.SeqIO.QualityIO import FastqGeneralIterator
with open(output, 'w') as outputfile:
for title, seq, qual in FastqGeneralIterator(open(file)):
Seq = seq.rstrip('N')
Qual = qual[:len(Seq)]
assert len(Seq) == len(Qual)
outputfile.write("@%s\n%s\n+\n%s\n" % (title, Seq, Qual))
def ReverseComp(input, output):
with open(output, 'w') as revcomp:
with open(input, 'r') as fasta:
for rec in SeqIO.parse(fasta, 'fasta'):
revcomp.write(">%s\n%s\n" % (rec.id, rec.seq.reverse_complement()))
def guess_csv_dialect(header):
""" completely arbitrary fn to detect the delimiter
:type header: str
:raise ValueError:
:rtype: csv.Dialect
"""
possible_delims = "\t,"
lines = header.split("\n")
if len(lines) < 2:
raise ValueError("CSV header must contain at least 1 line")
dialect = csv.Sniffer().sniff(header, delimiters=possible_delims)
return dialect
def fasta2list(input):
seqlist = []
with open(input, 'r') as inseq:
for rec in SeqIO.parse(inseq, 'fasta'):
if not rec.description in seqlist:
seqlist.append(rec.description)
return seqlist
def updateMappingFile(mapfile, barcode_count, output):
with open(output, 'w') as outfile:
with open(mapfile, 'r') as infile:
for line in infile:
line = line.rstrip()
cols = line.split('\t')
if line.startswith('#Sample'): #header row, look for DemuxReads
if 'DemuxReads' in cols:
loc = cols.index('DemuxReads')
elif 'phinchID' in cols:
loc = cols.index('phinchID')+1
cols.insert(loc,'DemuxReads')
else:
cols.append('DemuxReads')
loc = cols.index('DemuxReads')
outfile.write('{:}\n'.format('\t'.join(cols)))
else:
if cols[0] in barcode_count:
outfile.write('{:}\t{:}\t{:}\n'.format('\t'.join(cols[:loc]), str(barcode_count[cols[0]]), '\t'.join(cols[loc+1:])))
def CreateGenericMappingFile(barcode_dict, revbarcode_dict, fwd_primer, rev_primer, output, barcodes_found):
with open(output, 'w') as outfile:
outfile.write('#SampleID\tBarcodeSequence\tLinkerPrimerSequence\tRevBarcodeSequence\tReversePrimer\tphinchID\tDemuxReads\tTreatment\n')
for k,v in natsorted(barcodes_found.items()):
sample = k
Fsample,Rsample = (None,)*2
if ':-:' in sample:
Fsample, Rsample = sample.split(':-:')
count = v
forbarcode, revbarcode = ('no_data',)*2
if Fsample:
if Fsample in barcode_dict:
forbarcode = barcode_dict[Fsample]
else:
if sample in barcode_dict:
forbarcode = barcode_dict[sample]
if Rsample:
if Rsample in revbarcode_dict:
revbarcode = revbarcode_dict[Rsample]
else:
if sample in revbarcode_dict:
revbarcode = revbarcode_dict[sample]
outfile.write('%s\t%s\t%s\t%s\t%s\t%s\t%i\t%s\n' % (sample, forbarcode, fwd_primer, revbarcode, rev_primer, sample, count, 'no_data'))
def CreateGenericMappingFileIllumina(samples, fwd_primer, rev_primer, output, barcodes):
with open(output, 'w') as outfile:
outfile.write('#SampleID\tBarcodeSequence\tLinkerPrimerSequence\tRevBarcodeSequence\tReversePrimer\tphinchID\tDemuxReads\tTreatment\n')
for k,v in natsorted(list(samples.items())):
count = barcodes.get(k, 0)
if count > 0:
if '-' in v:
forbarcode,revbarcode = v.split('-')
else:
forbarcode = v
revbarcode = 'no_data'
outfile.write('%s\t%s\t%s\t%s\t%s\t%s\t%i\t%s\n' % (k, forbarcode, fwd_primer, revbarcode, rev_primer, k, int(count), "no_data"))
def parseMappingFile(input, output):
'''
function to parse mapping file pull out primers and barcode sequences
'''
fwdprimer = ''
revprimer = ''
with open(output, 'w') as outfile:
with open(input, 'r') as inputfile:
for line in inputfile:
line = line.replace('\n', '')
if line.startswith('#'):
continue
cols = line.split('\t')
outfile.write('>%s\n%s\n' % (cols[0], cols[1]))
match = edlib.align(cols[1], cols[2], mode="HW", k=0)
if match["editDistance"] == 0:
Trim = match["locations"][0][1]+1
if fwdprimer == '':
fwdprimer = cols[2][Trim:]
revprimer = cols[3]
return (fwdprimer, revprimer)
def getMappingHeaderIndexes(input):
IDx,FBCx,RBCx,FPx,RPx = (None,)*5
with open(input, 'r') as infile:
for line in infile:
line = line.rstrip('\n')
if line.startswith('#'):
cols = line.split('\t')
if '#SampleID' in cols:
IDx = cols.index('#SampleID')
if 'BarcodeSequence' in cols:
FBCx = cols.index('BarcodeSequence')
if 'LinkerPrimerSequence' in cols:
FPx = cols.index('LinkerPrimerSequence')
if 'RevBarcodeSequence' in cols:
RBCx = cols.index('RevBarcodeSequence')
if 'ReversePrimer' in cols:
RPx = cols.index('ReversePrimer')
return IDx, FBCx, FPx, RBCx, RPx
exampleMapFile='#SampleID\tBarcodeSequence\tLinkerPrimerSequence\tRevBarcodeSequence\tReversePrimer\tphinchID\tTreatment\n'
def parseMappingFileNEW(input):
'''
function to parse mapping file pull out primers and barcode sequences
'''
results = {}
ForBCDict = {}
RevBCDict = {}
IDx, FBCx, FPx, RBCx, RPx = getMappingHeaderIndexes(input)
if not any([IDx, FBCx, FPx, RPx]):
log.error('Mapping file incorrectly formatted, headers should be (RevBarcodeSequence is optional):\n{:}'.format(exampleMapFile))
sys.exit(1)
if not RBCx:
log.debug('Mapping file missing header: "RevBarcodeSequence", skipping reverse barcodes')
with open(input, 'r') as inputfile:
for line in inputfile:
line = line.rstrip()
if line.startswith('#'):
continue
cols = line.split('\t')
if len(cols) < 4:
continue
ID = cols[IDx]
FBC = cols[FBCx]
FP = cols[FPx]
if FBC in FP: #barcode nested in primer_db
loc = FP.index(FBC) + len(FBC)
FP = FP[loc:]
RP = cols[RPx]
if RBCx:
RBC = cols[RBCx]
if RBC != '':
if RBC in RP:
loc = RP.index(RBC) + len(RBC)
RP = RP[loc:]
else:
RBC = None
else:
RBC = None
if not ID in results:
results[ID] = {'ForBarcode': FBC, 'ForPrimer': FP, 'RevBarcode': RBC, 'RevPrimer': RP}
ForBCDict[ID] = FBC
if RBC:
RevBCDict[ID] = RBC
else:
log.error('Please fix duplicate SampleID detected in mapping file: {:}'.format(ID))
sys.exit(1)
return results, ForBCDict, RevBCDict, FP, RP
def parseMappingFileIllumina(input):
fwdprimer = ''
revprimer = ''
samples = []
with open(input, 'r') as inputfile:
for line in inputfile:
line = line.replace('\n', '')
if line.startswith('#'):
continue
cols = line.split('\t')
if not cols[0] in samples:
samples.append(cols[0])
match = edlib.align(cols[1], cols[2], mode="HW", k=0)
if match["editDistance"] == 0:
Trim = match["locations"][0][1]+1
if fwdprimer == '':
fwdprimer = cols[2][Trim:]
revprimer = cols[3]
else:
fwdprimer = cols[2]
revprimer = cols[3]
return (samples, fwdprimer, revprimer)
def removefile(input):
if os.path.isfile(input):
os.remove(input)
| {
"repo_name": "nextgenusfs/ufits",
"path": "amptk/amptklib.py",
"copies": "1",
"size": "84854",
"license": "bsd-2-clause",
"hash": 4554148208261997600,
"line_mean": 38.5958936071,
"line_max": 259,
"alpha_frac": 0.5360855116,
"autogenerated": false,
"ratio": 3.5844210704177755,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9591792879580539,
"avg_score": 0.005742740487447303,
"num_lines": 2143
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import sys
import os
install = False
base_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(base_path, 'python'))
if len(sys.argv) >= 2 and sys.argv[1] == '--install':
install_only = True
else:
install_only = False
try:
import uflash
except:
install = True
if __name__ == '__main__':
if install:
uflash_link = "https://raw.githubusercontent.com/ntoll/uflash/e3eeb6504089963683f4cc141bba8901752cef8d/uflash.py"
try:
from urllib.request import urlopen
except:
from urllib import urlopen
resp = urlopen(uflash_link)
text = resp.read()
install_dir = os.path.join(base_path, 'python')
if not os.path.isdir(install_dir):
os.mkdir(install_dir)
with open(os.path.join(install_dir, 'uflash.py'), 'wb') as f:
f.write(text)
f.flush()
print('Local uflash installed')
try:
import uflash
except:
pass
if not install_only:
uflash.main(sys.argv[1:])
| {
"repo_name": "Giannie/atom-microbit-python",
"path": "lib/microbit-python.py",
"copies": "1",
"size": "1178",
"license": "mit",
"hash": 3028820889007108600,
"line_mean": 30,
"line_max": 121,
"alpha_frac": 0.5925297114,
"autogenerated": false,
"ratio": 3.4955489614243325,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.4588078672824332,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
__all__ = ['save_to_regions', 'save_galaxy_to_regions', 'save_to_dsim']
from itertools import cycle
import numpy as np
from astropy.coordinates import SkyCoord
from astropy import units as u
from .utils import mask_to_sky
def save_to_regions(mask, center, writeto=None):
'''
mask is a Mask, center is a SkyCoord, writeto is the output file name
'''
with open(writeto, 'w') as f:
f.write('# Region file format: DS9 version 4.1\n')
f.write('global color=red move=0 select=0\n')
f.write('j2000\n')
ra_str, dec_str = center.to_string('hmsdms').split(' ')
name = mask.name + '_PA{:0.1f}'.format(mask.mask_pa)
x, y = mask.slit_positions()
ra_offsets, dec_offsets = mask_to_sky(x, y, mask.mask_pa)
ra = (ra_offsets / np.cos(center.dec.radian) + center.ra.arcsec) * u.arcsec
dec = (dec_offsets + center.dec.arcsec) * u.arcsec
coords = SkyCoord(ra, dec)
for i, slit in enumerate(mask.slits):
name = slit.name
ra, dec = coords[i].to_string('hmsdms', sep=':').split()
pa = '{:.2f}'.format(slit.pa)
height = '{:.2f}'.format(slit.length) + '\"'
width = '{:.2f}'.format(slit.width) + '\"'
line = 'box(' + ', '.join([ra, dec, width, height, pa]) + ') # text={' + name + '}\n'
f.write(line)
def save_galaxy_to_regions(galaxy, writeto=None, annotate=False):
with open(writeto, 'w') as f:
f.write('# Region file format: DS9 version 4.1\n')
f.write('global color=red move=0 select=0\n')
f.write('j2000\n')
center = galaxy.center
ra_str, dec_str = center.to_string('hmsdms').split(' ')
colors = cycle(['red', 'green', 'blue', 'magenta', 'cyan', 'yellow'])
for mask in galaxy.masks:
color = next(colors)
x, y = mask.slit_positions()
ra_offsets, dec_offsets = mask_to_sky(x, y, mask.mask_pa)
ra = (ra_offsets / np.cos(center.dec.radian) + center.ra.arcsec) * u.arcsec
dec = (dec_offsets + center.dec.arcsec) * u.arcsec
coords = SkyCoord(ra, dec)
for i, slit in enumerate(mask.slits):
name = mask.name[0] + slit.name
ra, dec = coords[i].to_string('hmsdms', sep=':').split()
pa = '{:.2f}'.format(slit.pa)
height = '{:.2f}'.format(slit.length) + '\"'
width = '{:.2f}'.format(slit.width) + '\"'
line = 'box(' + ', '.join([ra, dec, width, height, pa])
if annotate:
line += ') # color=' + color + ' text={' + name + '}\n'
else:
line += ') # color=' + color + '\n'
f.write(line)
def save_to_dsim(mask, center, writeto=None):
'''
mask is a Mask, center is a SkyCoord, writeto is the output file name
'''
with open(writeto, 'w') as f:
ra_str, dec_str = center.to_string('hmsdms').split(' ')
name = mask.name + '_PA{:0.1f}'.format(mask.mask_pa)
header = '\t'.join([name, ra_str, dec_str, '2000.0', 'PA={:.2f}'.format(mask.mask_pa)]) + '\n\n'
f.write(header)
x, y = mask.slit_positions()
ra_offsets, dec_offsets = mask_to_sky(x, y, mask.mask_pa)
ra = (ra_offsets / np.cos(center.dec.radian) + center.ra.arcsec) * u.arcsec
dec = (dec_offsets + center.dec.arcsec) * u.arcsec
coords = SkyCoord(ra, dec)
for i, slit in enumerate(mask.slits):
name = slit.name + ' ' * (16 - len(slit.name))
ra, dec = coords[i].to_string('hmsdms', sep=':').split()
pa = '{:.2f}'.format(slit.pa)
half_len = '{:.2f}'.format(slit.length / 2)
width = '{:.2f}'.format(slit.width)
line = name + '\t'.join([ra, dec, '2000.0', '0', 'R', '100', '1', '1',
pa, half_len, width]) + '\n'
f.write(line)
| {
"repo_name": "adwasser/masktools",
"path": "masktools/superskims/outputs.py",
"copies": "1",
"size": "4094",
"license": "mit",
"hash": -3929564301761052000,
"line_mean": 45.5227272727,
"line_max": 104,
"alpha_frac": 0.5217391304,
"autogenerated": false,
"ratio": 3.1371647509578544,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41589038813578544,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from ast import literal_eval
from builtins import *
from future.utils import iteritems
import os
import json
import logging
from brave.palettes import *
from brave.notebook_display import *
ipython = try_import_ipython()
logger = logging.getLogger(__name__)
__mode = 'script'
__EMBEDDED_INITIALIZED = False
def start_notebook_mode(in_iframe=False):
if ipython is None:
raise ImportError('start_notebook_mode can only run inside an IPython Notebook.')
global __mode
global __EMBEDDED_INITIALIZED
if in_iframe:
__mode = 'iframe'
else:
__mode = 'embedded'
if not __EMBEDDED_INITIALIZED:
ipython.display.display({'application/javascript': get_init_script()}, raw=True)
def save(html, path):
with open(path, 'w', encoding='utf-8') as f:
f.write(html)
def brave(docData, collData, save_to_path=None, width=800, height=600):
if save_to_path is None and __mode == 'embedded':
html = get_embedded_html(json.dumps(collData, indent=4, sort_keys=True), json.dumps(docData, indent=4, sort_keys=True))
return HtmlContainer(html)
parent = os.path.dirname(__file__)
parent = os.path.dirname(parent)
fn = os.path.join(parent, 'templates', 'embedded_brat__template.html')
template = open(fn, encoding='utf-8').read()
template = template.replace("{0}", json.dumps(collData, indent=4, sort_keys=True))
html = template.replace("{1}", json.dumps(docData, indent=4, sort_keys=True))
if save_to_path:
save(html, save_to_path)
if __mode == 'iframe':
if save_to_path:
eff_path = save_to_path
else:
eff_path = 'temp_visual.html'
save(html, eff_path)
ret_val = HtmlContainer("""<iframe
width="{width}"
height="{height}"
src="{src}"
frameborder="0"
allowfullscreen
></iframe>
""".format(src=eff_path,
width=width,
height=height))
else:
ret_val = html
return ret_val
def brave_simple(doc_data, save_to_path=None, width=800, height=600):
"""
This method currently supported only entities and relations!
Args:
doc_data:
save_to_path:
width:
height:
Returns:
"""
brave_data = BraveData(doc_data)
return brave(brave_data.doc_data, brave_data.coll_data, save_to_path=save_to_path, width=width, height=height)
def brave_compare(true_doc_data, pred_doc_data, true_suffix='*', pred_suffix='', save_to_path=None, width=800, height=600):
"""
This method currently supported only entities and relations!
Args:
true_doc_data:
pred_doc_data:
true_suffix:
pred_suffix:
save_to_path:
width:
height:
Returns:
"""
if true_doc_data['text'] != pred_doc_data['text']:
raise ValueError('The text should be equal in both true_doc_data and pred_doc_data')
if true_suffix == pred_suffix:
raise ValueError('true_suffix should be different than pred_suffix')
ret_val = {}
ret_val['text'] = true_doc_data['text']
add_suffix(ret_val, true_doc_data, suffix=true_suffix)
add_suffix(ret_val, pred_doc_data, suffix=pred_suffix)
return brave_simple(ret_val, save_to_path=save_to_path, width=width, height=height)
def add_suffix(ret_val, doc_data, suffix='*'):
ret_val['entities'] = ret_val.get('entities', [])
for key, type_, span in doc_data.get('entities', []):
ret_val['entities'].append((key + suffix, type_ + suffix, span))
ret_val['triggers'] = ret_val.get('triggers', [])
for key, type_, span in doc_data.get('triggers', []):
ret_val['triggers'].append((key + suffix, type_ + suffix, span))
ret_val['attributes'] = ret_val.get('attributes', [])
for key, type_, ent_key in doc_data.get('attributes', []):
ret_val['attributes'].append((key + suffix, type_ + suffix, ent_key + suffix))
ret_val['relations'] = ret_val.get('relations', [])
for key, type_, lst in doc_data.get('relations', []):
new_lst = []
for role, ent_key in lst:
new_lst.append((role, ent_key + suffix))
ret_val['relations'].append((key + suffix, type_ + suffix, new_lst))
ret_val['events'] = ret_val.get('events', [])
for key, trigger_key, lst in doc_data.get('events', []):
new_lst = []
for role, ent_key in lst:
new_lst.append((role, ent_key + suffix))
ret_val['events'].append((key + suffix, trigger_key + suffix, new_lst))
class HtmlContainer(object):
def __init__(self, html):
self.html = html
def _repr_html_(self):
return self.html
class BraveData(object):
def __init__(self, doc_data, coll_data=None):
self.doc_data = doc_data
if coll_data is not None:
self.coll_data = coll_data
else:
self.coll_data = {}
self.__parse_entities()
self.__parse_relations()
def __parse_entities(self):
self.ent_dict = dict([(x[0], x[1]) for x in self.doc_data['entities']])
ent_types = set(self.ent_dict.values())
range_ = range(0, len(entities_palettte), (len(entities_palettte) // len(ent_types)))
colors = [entities_palettte[i] for i in range_]
ent_colors = dict(zip(ent_types, colors))
entity_types = []
for name in ent_types:
t = {
'bgColor': ent_colors[name],
'borderColor': 'darken',
'labels': [name, name[0:3]],
'type': name
}
entity_types.append(t)
self.coll_data['entity_types'] = entity_types
def __parse_relations(self):
relation_args = {}
for rel in self.doc_data['relations']:
key, name, role_ents = rel
for role, ent_key in role_ents:
curr_roles = relation_args.get(name, {})
curr_types = curr_roles.get(role, set())
curr_types.add(self.ent_dict[ent_key])
curr_roles[role] = curr_types
relation_args[name] = curr_roles
range_ = range(0, len(relations_palette), (len(relations_palette) // len(relation_args.keys())))
colors = [relations_palette[i] for i in range_]
rel_colors = dict(zip(relation_args.keys(), colors))
relation_types = []
for name, args in iteritems(relation_args):
rel_dict = {
'args': [{'role': role, 'targets': list(targets)} for role, targets in iteritems(args)],
'color': rel_colors[name],
'dashArray': '3,3',
'labels': [name, name[0:3]],
'type': name
}
relation_types.append(rel_dict)
self.coll_data['relation_types'] = relation_types
def merge_doc_datas(*docs):
"""
Merges several docDatas into one, updating values and indexes as necessary.
***Currently supports only Entities and Relations***
Args:
*docs:
Returns: docData
"""
res = {"text": "", "entities": [], "relations": []}
offsets = [0]
t_index = 0
r_index = 0
for i, doc in enumerate(docs):
# Offset initializaion
offset = offsets[i]
# Update doc
doc["entities"] = update_doc_data_entities(doc["entities"], offset, t_index)
doc["relations"] = update_doc_data_relations(doc["relations"], r_index, t_index)
# Update indexes
t_index = int(doc["entities"][-1][0][1:])
r_index = int(doc["relations"][-1][0][1:])
# Extend res
res["text"] += (doc["text"] + "\n")
res["entities"].extend(doc["entities"])
res["relations"].extend(doc["relations"])
# Update offsets
offsets.append(len(res["text"]))
return res
def update_doc_data_entities(entity, offset, t_index):
indexes, types, spans = zip(*entity)
indexes = ["T" + str(int(ind[1:]) + t_index) for ind in indexes]
new_spans = []
for span in spans:
new_span = increase_spans(span, offset)
new_spans.append(new_span)
res = zip(indexes, types, new_spans)
res = [list(ent) for ent in res]
return res
def update_doc_data_relations(relation, r_index, t_index):
indexes, types, entities = zip(*relation)
indexes = ["R" + str(int(ind[1:]) + r_index) for ind in indexes]
entities = [[[t1[0], "T" + str(int(t1[1][1:]) + t_index)], [t2[0], "T" + str(int(t2[1][1:]) + t_index)]] for t1, t2
in entities]
res = zip(indexes, types, entities)
res = [list(ent) for ent in res]
return res
def increase_spans(spans_input, x):
if type(spans_input) == str: spans_input = literal_eval(spans_input)
groups = []
for span in spans_input:
span[0] += x
span[1] += x
groups.append(span)
return groups
| {
"repo_name": "chorusai/brave",
"path": "brave/_brave.py",
"copies": "1",
"size": "9141",
"license": "apache-2.0",
"hash": 23207818562231096,
"line_mean": 31.3003533569,
"line_max": 127,
"alpha_frac": 0.5722568647,
"autogenerated": false,
"ratio": 3.5361702127659576,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.960405041615124,
"avg_score": 0.0008753322629434837,
"num_lines": 283
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from astroid import MANAGER, Class, Instance, Function, Arguments, Pass
def transform_model_class(cls):
if cls.is_subtype_of('django.db.models.base.Model'):
core_exceptions = MANAGER.ast_from_module_name('django.core.exceptions')
# add DoesNotExist exception
DoesNotExist = Class('DoesNotExist', None)
DoesNotExist.bases = core_exceptions.lookup('ObjectDoesNotExist')[1]
cls.locals['DoesNotExist'] = [DoesNotExist]
# add MultipleObjectsReturned exception
MultipleObjectsReturned = Class('MultipleObjectsReturned', None)
MultipleObjectsReturned.bases = core_exceptions.lookup(
'MultipleObjectsReturned')[1]
cls.locals['MultipleObjectsReturned'] = [MultipleObjectsReturned]
# add objects manager
if 'objects' not in cls.locals:
try:
Manager = MANAGER.ast_from_module_name(
'django.db.models.manager').lookup('Manager')[1][0]
QuerySet = MANAGER.ast_from_module_name(
'django.db.models.query').lookup('QuerySet')[1][0]
except IndexError:
pass
else:
if isinstance(Manager.body[0], Pass):
# for django >= 1.7
for func_name, func_list in QuerySet.locals.items():
if (not func_name.startswith('_') and
func_name not in Manager.locals):
func = func_list[0]
if (isinstance(func, Function) and
'queryset_only' not in func.instance_attrs):
f = Function(func_name, None)
f.args = Arguments()
Manager.locals[func_name] = [f]
cls.locals['objects'] = [Instance(Manager)]
# add id field
if 'id' not in cls.locals:
try:
AutoField = MANAGER.ast_from_module_name(
'django.db.models.fields').lookup('AutoField')[1][0]
except IndexError:
pass
else:
cls.locals['id'] = [Instance(AutoField)]
| {
"repo_name": "geerk/django_linter",
"path": "django_linter/transformers/models.py",
"copies": "1",
"size": "2345",
"license": "mit",
"hash": 7649735360608835000,
"line_mean": 45.9,
"line_max": 80,
"alpha_frac": 0.5356076759,
"autogenerated": false,
"ratio": 4.69,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.57256076759,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from astroid import MANAGER
from astroid.builder import AstroidBuilder
BASE_REQUEST_DEFINITION = """
from django.http import HttpResponse, HttpRequest
def request(self, *args, **kwargs):
resp = HttpResponse()
resp.client = self
resp.content = ''
resp.context = {}
resp.request = HttpRequest()
resp.templates = []
%s
return resp
"""
DJANGO_REQUEST_DEFINITION = BASE_REQUEST_DEFINITION % ''
DRF_REQUEST_DEFINITION = BASE_REQUEST_DEFINITION % 'resp.data = {}'
DJANGO_CLIENT_REQUEST = AstroidBuilder(
MANAGER).string_build(DJANGO_REQUEST_DEFINITION).locals['request']
DRF_CLIENT_REQUEST = AstroidBuilder(
MANAGER).string_build(DRF_REQUEST_DEFINITION).locals['request']
HTTP_METHODS = ('get', 'post', 'put', 'head', 'delete', 'options')
def transform_test_response(cls):
if cls.is_subtype_of('django.test.client.Client'):
for method in HTTP_METHODS:
cls.locals[method] = DJANGO_CLIENT_REQUEST
elif cls.is_subtype_of('rest_framework.test.APIClient'):
for method in HTTP_METHODS:
cls.locals[method] = DRF_CLIENT_REQUEST
| {
"repo_name": "geerk/django_linter",
"path": "django_linter/transformers/testing.py",
"copies": "1",
"size": "1215",
"license": "mit",
"hash": 1876369248286100500,
"line_mean": 33.7142857143,
"line_max": 70,
"alpha_frac": 0.6839506173,
"autogenerated": false,
"ratio": 3.5526315789473686,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4736582196247368,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from astropy.coordinates import SkyCoord
from astropy import units as u
__all__ = ['table_to_regions']
def table_to_regions(table, writeto=None, color='red'):
'''
Parameters
----------
table : astropy Table object with USNO entries
writeto : str, filename for output
color : str, as recognized by ds9
Returns
-------
None
'''
coords = SkyCoord(table['RAJ2000'], table['DEJ2000'])
names = table['USNO-B1.0']
width = '4"'
height = '4"'
pa = '0'
with open(writeto, 'w') as f:
f.write('# Region file format: DS9 version 4.1\n')
f.write('global color=' + color + ' move=0 \n')
f.write('j2000\n')
for i in range(len(coords)):
ra, dec = coords[i].to_string('hmsdms', sep=':').split()
name = names[i]
line = 'box(' + ', '.join([ra, dec, width, height, pa]) + ') # text={' + name + '}\n'
f.write(line)
| {
"repo_name": "adwasser/masktools",
"path": "masktools/stars/outputs.py",
"copies": "1",
"size": "1051",
"license": "mit",
"hash": -6111002348045318000,
"line_mean": 29.9117647059,
"line_max": 97,
"alpha_frac": 0.5451950523,
"autogenerated": false,
"ratio": 3.445901639344262,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9486970040983997,
"avg_score": 0.000825330132052821,
"num_lines": 34
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import (ascii, bytes, chr, dict, filter, hex, input,
int, map, next, oct, open, pow, range, round,
str, super, zip)
from hexagondisasm import common
from hexagondisasm.common import InstructionTemplate, TemplateToken, TemplateBranch
from hexagondisasm.common import UnexpectedException
import re
class HexagonInstructionDecoder(object):
"""Hexagon instruction decoder.
Takes instruction definitions and process them to instruction templates.
Attributes:
inst_def_list (List[InstructionDefintion]): List of instruction definitions saved during the parsing stage.
inst_template_list (List[InstructionTemplate]): List of instruction definitions templates generated
by the decoder from the list of definitions.
"""
__slots__ = ['inst_def_list', 'inst_template_list']
def __init__(self):
"""Load the instruction definitions and convert it to instruction templates.
Creates the InstructionTemplate and processes it.
TODOs:
* All the calls in the loop could be done inside the InstructionTemplate
constructor, should it?
"""
self.inst_def_list = common.pickle_load(common.INST_DEF_PATH)
self.inst_template_list = [InstructionTemplate(inst_def) for inst_def in self.inst_def_list]
for template in self.inst_template_list:
self.analyze_branch(template)
self.resolve_constant_extender(template)
self.tokenize_syntax(template)
def tokenize_syntax(self, template):
"""Generate a list of tokens from the instruction syntax.
Takes the syntax string and split it in smaller strings (tokens). The split is
done to generate a link between the instruction operands and the substrings
that correspond to it, e.g., ``Rd=add(Rs,#s16)`` would be splitted like:
``['Rd', '=add(', 'Rs', ',', '#s16', ')']`` to isolate the three operand strings
(registers ``Rd``, ``Rs`` and immediate ``#s16``) from the rest of the
syntax string.
The substrings are later used to generate TemplateToken objects, which are composed
of a string with its associated operand (if it exists).
Args:
template (InstructionTemplate): to be processed.
Returns:
None: the data is applied to the template itself.
TODOs:
* Should the 2 steps (split and match) be done together?
"""
tokens = [template.syntax] # type: List[str]
# The syntax will be splitted to this list of strings that will be later
# used to create the template tokens.
for op in template.reg_ops + template.imm_ops: # type: InstructionOperand
new_tokens = [] # type: List[str]
# New tokens generated from the current tokens, updated at the end of the loop.
for str_token in tokens:
new_tokens.extend(
re.split('(' + op.syntax_name + ')', str_token)
)
# If a operand is found in the current token, split it to isolate
# the operand, re.split is used because, unlike string.split, it doesn't
# discard the separator (the operator name in this case) when enclosed
# in parenthesis.
if len(new_tokens) != len(tokens) + 2 * template.syntax.count(op.syntax_name):
raise UnexpectedException()
# Every split (appearance of the operand in the syntax)
# has to generate 2 new tokens (an old token is split into 3,
# the separator and left/right tokens, that are always generated
# even if they are empty strings).
tokens = new_tokens
# TODO: use list comprehensions and eliminate `new_tokens`.
# Discard possibly empty generated strings.
tokens = list(filter(lambda s: len(s) > 0, tokens))
# Generate list of TemplateToken and match string tokens to operands.
for str_token in tokens:
template_token = TemplateToken(str_token.lower())
# TODO: Is it ok to convert to lowercase here?
# The letter case of the operands text is useful (specially in IDA) to
# identify them quickly in the visual analysis (from the rest of the instruction).
for op in template.reg_ops + template.imm_ops: # type: InstructionOperand
if str_token == op.syntax_name:
# The string token names the operand, match them.
template_token.op = op
break
template.tokens.append(template_token)
return
def resolve_constant_extender(self, template):
"""In case there are two imm. operands, indicate to which one would apply a constant extension.
This is done for instructions that can be extended by a constant but have two
immediate operands and it has to be indicated to which one the extension applies.
The function ``apply_extension()`` in instruction behaviours is used as an indication
that a constant extension can be applied, and the argument of the function specifies
the syntax of which immediate operand it applies to.
Args:
template (InstructionTemplate): to be processed.
Returns:
None: the data is applied to the template itself.
TODOs:
* Add to the function description an example of an instruction where
there are two imm. ops. and the ``apply_extension()`` resolves which one.
"""
if len(template.imm_ops) < 2:
# There's no need to perform the check, there's (at most) only one
# immediate operand to choose from.
return
m = re.search(r"""
# Looking for something like: "apply_extension(...);"
apply_extension
\(
(.*?) # Capture group for the imm. op. name, e.g., ``#s``.
\)
""", template.behavior.replace(' ', ''), re.X)
# The spaces are removed from the behavior string to simplify the regex.
if m is None:
# No constant extension found in the behavior.
return
imm_op_ext_name = m.group(1)
# Name of the imm. op. that is the argument of ``apply_extension()``.
for imm_op in template.imm_ops:
if imm_op_ext_name in imm_op.syntax_name:
# An equal comparison is not made in the previous if because
# the op. name in the apply_extension argument is usually a shorter
# version of the name in the syntax (normally because the
# operand's bit size was removed), e.g., ``#s16`` in
# ``Rd=add(Rs,#s16)`` is referenced as ``apply_extension(#s);``.
template.imm_ext_op = imm_op
return
raise UnexpectedException()
# If the regex matched, the operand should have been found in the previous loop.
def analyze_branch(self, template):
"""Find a branch in the instruction syntax and generate the template info.
Used in (IDA) static analysis.
Args:
template (InstructionTemplate): to be processed.
Returns:
None: the data is applied to the template itself.
TODOs:
* Change function name to something like 'find_branch(es)'.
* This type of analysis should be done by studying the REIL translation
of the instruction, which truly reflects its behaviour. When the REIL
translation is added this function should be adapted.
* Multiple branches in one instruction: is it possible? I think not,
at most, two branches in one packet but separate. Check this.
* The branch string itself is used to represent it, maybe some constants
should be used instead.
"""
for branch_syntax in TemplateBranch.all_branches: # type: str
# Find any of the possible branch syntaxes in the instruction
# to detect a branch.
m = re.search(branch_syntax, template.syntax, re.X)
if m is None:
continue
if branch_syntax == TemplateBranch.dealloc_ret_syntax:
# The instruction is a 'dealloc_return', a jump to the
# LR as target.
return
# TODO: Should this case be handled? Is it of interest to static analysis?
template.branch = TemplateBranch(branch_syntax)
template.branch.is_conditional = ('if' in template.syntax)
# TODO: The if could be applying to another sub-instruction. Improve detection.
if branch_syntax in [TemplateBranch.jump_reg_syntax, TemplateBranch.call_reg_syntax]:
# Branch type: jump/call register.
# Find which register is the target of the branch.
for reg in template.reg_ops: # type: RegisterTemplate
m = re.search(branch_syntax + r'\s*' + reg.syntax_name, template.syntax, re.X)
if m:
template.branch.target = reg
return
# The target register operand was not found, this shouldn't happen, but
# for now the case of register alias (specially the case of LR) is not
# being handled, so an exception can't be raised, and this case is
# tolerated (retuning instead).
# raise UnexpectedException()
return
if branch_syntax in [TemplateBranch.jump_imm_syntax, TemplateBranch.call_imm_syntax]:
# Branch type: jump/call immediate.
for imm in template.imm_ops: # type: ImmediateTemplate
m = re.search(branch_syntax + r'\s*' + imm.syntax_name.replace('#', r'\#'), template.syntax, re.X)
# The '#' (used in imm. op. names) is escaped, as it is interpreted as
# a comment in verbose regex (re.X), and verbose regex is used because
# the branch syntax is written with spaces (verbose style) to improve
# its readability.
if m:
template.branch.target = imm
return
raise UnexpectedException()
# The target immediate operand should have been found.
return
if __name__ == "__main__":
print("Starting decodification...")
deco = HexagonInstructionDecoder()
common.pickle_dump(common.INST_TEMPL_PATH, deco.inst_template_list)
print("Decoding done.")
# TODO: move this to a general main, to call the importer together with the decoder.
| {
"repo_name": "programa-stic/hexag00n",
"path": "hexagondisasm/decoder.py",
"copies": "1",
"size": "11204",
"license": "bsd-2-clause",
"hash": -6031173822719517000,
"line_mean": 40.8059701493,
"line_max": 118,
"alpha_frac": 0.5948768297,
"autogenerated": false,
"ratio": 4.755517826825128,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5850394656525127,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from collections import defaultdict
from copy import deepcopy
import datetime
from decimal import Decimal
from future.utils import iteritems, iterkeys, itervalues
import inspect
import json
import os
from past.builtins import basestring
from fixtureupper.base import BaseFixtureUpper
class ModelFixtureUpper(BaseFixtureUpper):
required_attributes = []
generated_field_order = []
def __init__(self, *args, **kwargs):
super(ModelFixtureUpper, self).__init__(*args, **kwargs)
self._model_id = self.start_id
if getattr(self, 'model', None):
# Load the primary key of model into fixture upper
self.attr_key = self.get_model_attr_key()
@classmethod
def get_upper_class_key(cls):
try:
return cls.model.__name__
except:
return None
@classmethod
def make_obj_json(cls, obj, str_obj, super_class=None):
obj_json = {
'__class__': type(obj).__name__,
'__value__': str_obj,
}
if super_class:
obj_json['__super_class__'] = super_class
return obj_json
@classmethod
def dynamic_import_and_eval(cls, import_statement, eval_str):
# FIXME Runtime imports and evals...ew.
# noinspection PyUnresolvedReferences
exec(import_statement)
return eval(eval_str)
@classmethod
def get_python_objects_for_json(cls):
pos = {
datetime.datetime: {
'to_json': lambda obj: cls.make_obj_json(obj, repr(obj)),
'from_json': lambda obj: cls.dynamic_import_and_eval('import datetime', obj['__value__']),
},
Decimal: {
'to_json': lambda obj: cls.make_obj_json(obj, repr(obj)),
'from_json': lambda obj: cls.dynamic_import_and_eval('from decimal import Decimal', obj['__value__']),
},
}
def get_from_json(model):
return lambda obj: model(**obj['__value__'])
for name, upper_class in iteritems(cls._upper_classes):
if getattr(upper_class, 'model', None):
pos[upper_class.model] = {
'to_json': lambda obj: cls.get_fixture_to_json(obj),
'from_json': get_from_json(upper_class.model),
}
return pos
@classmethod
def get_fixture_to_dict(cls, fixture):
raise NotImplementedError
@classmethod
def get_fixture_to_json(cls, fixture):
fields = cls.get_fixture_to_dict(fixture)
return cls.make_obj_json(fixture, fields)
def get_current_json_breakdown(self):
return self.breakdown_to_json(self.get_all_fixtures())
def get_current_sql_breakdown(self):
return self.breakdown_to_sql(self.get_all_fixtures())
@classmethod
def sorted_models_key(cls, model_name):
# FIXME: sort working depends on number of fixture model classes being less than 10000
try:
order_num = cls.all_fixtures_order.index(model_name)
except:
order_num = len(cls.all_fixtures_order)
return '%04d_%s' % (order_num, model_name)
@classmethod
def sorted_fixtures_key(cls, f):
return cls.sorted_models_key(type(f).__name__)
# Transform python object into json compatible representation
@classmethod
def get_default_to_json(cls):
python_objects = cls.get_python_objects_for_json()
def _to_json(obj):
# Check if type is directly in python_objects
transforms = python_objects.get(type(obj))
if transforms:
return transforms['to_json'](obj)
# Else check if superclass is in python_objects
for python_object, transforms in iteritems(python_objects):
if isinstance(obj, python_object):
return transforms['to_json'](obj)
return obj
return _to_json
@classmethod
def breakdown_to_json(cls, fixtures):
out = sorted(fixtures or [], key=cls.sorted_fixtures_key)
return json.dumps(out, indent=4, default=cls.get_default_to_json(), sort_keys=True)
@classmethod
def print_breakdown(cls, *args, **kwargs):
return cls.print_json_breakdown(*args, **kwargs)
@classmethod
def _print_breakdown(cls, savedir, fname, data):
"""Function to print model fixtures into generated file"""
if not os.path.exists(savedir):
os.makedirs(savedir)
with open(os.path.join(savedir, fname), 'w') as fout:
fout.write(data)
@classmethod
def print_json_breakdown(cls, savedir, fname, fixtures):
return cls._print_breakdown(savedir, fname, cls.breakdown_to_json(fixtures))
@classmethod
def print_sql_breakdown(cls, savedir, fname, fixtures):
return cls._print_breakdown(savedir, fname, cls.breakdown_to_sql(fixtures))
@classmethod
def sort_fixtures_by_model(cls, fixtures):
def _get_default_dict():
return {
'keys': set(),
'values': [],
}
_fixtures = defaultdict(_get_default_dict)
for f in fixtures:
table = _fixtures[type(f).__name__]
table['keys'].update(cls.get_fixture_to_dict(f).keys())
table['values'].append(f)
return _fixtures
@classmethod
def to_sql(cls, val):
if isinstance(val, datetime.datetime):
return 'TIMESTAMP \'%s\'' % str(val)
elif isinstance(val, basestring):
return "'%s'" % val
elif val is None:
return 'NULL'
return json.dumps(val)
@classmethod
def get_table_name_from_fixture(cls, f):
raise NotImplementedError
@classmethod
def breakdown_to_sql(cls, fixtures):
fixtures = cls.sort_fixtures_by_model(fixtures)
statement_groups = []
def _sort_key(_tuple):
return cls.sorted_models_key(_tuple[0])
for model_name, table_dict in sorted(iteritems(fixtures), key=_sort_key):
fixture_list = table_dict['values']
if not fixture_list:
continue
table_name = cls.get_table_name_from_fixture(fixture_list[0])
data_keys = sorted(list(table_dict['keys']))
header = 'INSERT INTO %s (%s) VALUES' % (table_name, ', '.join(data_keys))
statements = [
'(%s)' % ', '.join(cls.to_sql(getattr(f, key)) for key in data_keys)
for f in fixture_list
]
statement_groups.append('%s\n%s;\n' % (header, ',\n'.join(statements)))
return '\n'.join(statement_groups)
@classmethod
def fixup_from_json(cls, json_str):
python_objects = cls.get_python_objects_for_json()
po_by_name = {po.__name__: transforms for po, transforms in iteritems(python_objects)}
# Transform json representation of python object to python object
# TODO Add ability to get using super_classes
def from_json(obj):
if '__class__' in obj:
transforms = po_by_name.get(obj['__class__'])
if transforms:
return transforms['from_json'](obj)
return obj
return json.loads(json_str, object_hook=from_json)
@classmethod
def read_json_breakdown(cls, fname):
"""Read json file to get fixture data"""
if not os.path.exists(fname):
raise RuntimeError
with open(fname, 'r') as data_file:
return cls.fixup_from_json(data_file.read())
def get_model_attr_key(self, model=None):
raise NotImplementedError
def get_model_id(self, inc=True):
v = self._model_id
if inc:
self._model_id += 1
return v
def set_relation(self, fixture, related_fixtures, relation_prop):
raise NotImplementedError
def _is_generator_function(self, obj):
return callable(obj)
def _call_generator_function(self, fn, fixture, key):
return fn(self, fixture, key)
def set_fixture_values(self, model_values, fixture=None):
# Init model if None passed
fixture = fixture or self.model()
buckets = defaultdict(dict)
relationships = self.get_relationships()
# Get function that sets attribute onto fixture
def _get_fn(value, attr, is_relation=False, is_generated=False):
def _set_attr_fn(fixture):
attr_value = value
if is_generated:
attr_value = self._call_generator_function(value, fixture, attr)
if is_relation:
self.set_relation(fixture, attr_value, attr)
else:
setattr(fixture, attr, attr_value)
return _set_attr_fn
def _dict_as_key(_dict):
return str(sorted(iteritems(_dict)))
# Group into buckets whether attribute value is a relation and is a generator
for attr, value in iteritems(model_values):
params = {
'is_relation': bool(relationships.get(attr)),
'is_generated': self._is_generator_function(value),
}
buckets[_dict_as_key(params)][attr] = _get_fn(value, attr, **params)
# Call static values first
bucket = buckets[_dict_as_key({'is_relation': False, 'is_generated': False})]
for static_values in itervalues(bucket):
static_values(fixture)
# Call static relations next
bucket = buckets[_dict_as_key({'is_relation': True, 'is_generated': False})]
for static_relations in itervalues(bucket):
static_relations(fixture)
# Call generated functions now, according to sorted order, but otherwise prioritize relations
gen_values = buckets[_dict_as_key({'is_relation': False, 'is_generated': True})]
gen_relations = buckets[_dict_as_key({'is_relation': True, 'is_generated': True})]
relation_keys = set(iterkeys(gen_relations))
combined = dict(gen_values, **gen_relations)
for attr, generator in self.sorted_by_generated_order(combined, other_prioritized=relation_keys):
generator(fixture)
return fixture
@classmethod
def get_relationships(cls):
raise NotImplementedError
def sorted_by_generated_order(self, data, other_prioritized={}):
def _sort(_tuple):
attr = _tuple[0]
try:
# Attributes in self.generated_field order prioritized before everything else
return self.generated_field_order.index(attr)
except:
# lower number if a prioritized attribute
return len(self.generated_field_order) + int(attr not in other_prioritized)
return sorted(iteritems(data), key=_sort)
def update_fixtures_with_data(self, data, fixtures=None):
fixtures = fixtures or self.fixtures
for i, d in enumerate(data):
for key, val in iteritems(d):
setattr(fixtures[i], key, val)
def single_fixup(self, data=None, defaults=None, default_overrides={}, **kwargs):
data = data or {}
# Get model values through mix of default values and passed in values
defaults = dict(defaults or self.defaults, **default_overrides)
model_values = dict(defaults, **data)
# Generate model's primary key value if it has a primary key
if self.attr_key and not model_values.get(self.attr_key):
model_values[self.attr_key] = self.get_model_id()
fixture = self.set_fixture_values(model_values)
# Check to make sure required attibutes have been set
for attr in self.required_attributes:
if getattr(fixture, attr, None) is None:
raise Exception('%s is not set for %s' % (attr, str(fixture)))
self.fixtures.append(fixture)
return fixture
def fixup(self, data=None, **kwargs):
if isinstance(data, list):
fixtures = []
for d in data:
fixtures.append(self.single_fixup(data=d, **kwargs))
return fixtures
else:
return self.single_fixup(data=data, **kwargs)
| {
"repo_name": "Rhathe/fixtureupper",
"path": "fixtureupper/model.py",
"copies": "1",
"size": "12419",
"license": "mit",
"hash": -1515020694854973000,
"line_mean": 33.8848314607,
"line_max": 118,
"alpha_frac": 0.5932039617,
"autogenerated": false,
"ratio": 4.117705570291777,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5210909531991778,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from copy import deepcopy
import datetime
from decimal import Decimal
from future.utils import iteritems
import inspect
import json
import operator
import os
from random import Random
from six import with_metaclass
from sqlalchemy.inspection import inspect as sqlalchemy_inspect
# Watch when new FixtureUppers are created and register them to the class's global dictionary
class UpperWatcher(type):
def __init__(cls, name, bases, clsdict):
cls._UPPER_KEY = cls.get_upper_class_key()
if cls._UPPER_KEY:
cls._upper_classes[cls._UPPER_KEY] = cls
super(UpperWatcher, cls).__init__(name, bases, clsdict)
class BaseFixtureUpper(with_metaclass(UpperWatcher, object)):
_upper_classes = {}
upper_aliases = {}
all_fixtures_order = []
def __init__(self, start_id=1, seed=None, upper_instances=None, **kwargs):
self.start_id = start_id
self.seed = seed
self.fixtures = []
self.defaults = getattr(self, 'defaults', {})
self.seed_random()
if upper_instances is None:
upper_instances = {}
self.upper_instances = upper_instances
# Save most recent instance of upper
# to upper map
if getattr(self, '_UPPER_KEY', None):
self.upper_instances[self._UPPER_KEY] = self
@classmethod
def get_upper_class_key(cls):
# Don't register Base Fixture Upper Classes
if cls.__name__ == 'BaseFixtureUpper':
return None
key = cls.__name__
if key in cls._upper_classes:
raise Exception('Fixture Upper with name %s exists, use another name' % key)
return key
@classmethod
def sorted_fixtures_key(cls, f):
return f
def get_all_fixtures(self):
list_of_lists = iter([
instance.fixtures
for key, instance
in iteritems(self.upper_instances)
])
return sorted(
iter([fixture for fixture_list in list_of_lists for fixture in fixture_list]),
key=self.sorted_fixtures_key
)
def seed_random(self, seed=None):
seed = seed or self.seed
self.random = Random()
self.random.seed(seed)
def get_passed_kwarg_keys(self):
return ['start_id', 'seed']
def get_upper(self, key, **kwargs):
# Get alias of key if available
key = self.upper_aliases.get(key, key)
if key not in self.upper_instances:
kwargs['upper_instances'] = self.upper_instances
for kw in self.get_passed_kwarg_keys():
if not kwargs.get(kw):
kwargs[kw] = getattr(self, kw)
self._upper_classes[key](**kwargs)
return self.upper_instances[key]
def randint(self, *args):
return self.random.randint(*args)
def override_defaults(self, defaults):
# Make sure global class defaults are not overridden
self.defaults = dict(deepcopy(self.defaults), **defaults)
def reset_defaults(self):
self.defaults = self.__class__.defaults
def fixup(self, **kwargs):
raise NotImplementedError
| {
"repo_name": "Rhathe/fixtureupper",
"path": "fixtureupper/base.py",
"copies": "1",
"size": "3259",
"license": "mit",
"hash": -2018741662184129300,
"line_mean": 28.8990825688,
"line_max": 93,
"alpha_frac": 0.615526235,
"autogenerated": false,
"ratio": 4.109709962168979,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00030149783410579075,
"num_lines": 109
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from gatspy.periodic import LombScargleFast
from gatspy.periodic import LombScargle
import matplotlib.pyplot as plt
import mousestyles.data as data
from mousestyles.visualization.plot_lomb_scargle import lombscargle_visualize
import numpy as np
import pandas as pd
import statsmodels.api as sm
import statsmodels.formula.api as smf
from scipy.stats import chi2
plt.style.use('ggplot')
INTERVAL_FEATURES = ["AS", "F", "M_AS", "M_IS", "W"]
ALL_FEATURES = ["AS", "F", "M_AS", "M_IS", "W", "Distance"]
METHOD = ["LombScargleFast", "LombScargle"]
def aggregate_interval(strain, mouse, feature, bin_width):
"""
Aggregate the interval data based on n-minute time
intervals, return a time series.
Parameters
----------
strain: int
nonnegative integer indicating the strain number
mouse: int
nonnegative integer indicating the mouse number
feature: {"AS", "F", "M_AS", "M_IS", "W"}
"AS": Active state probalibity
"F": Food consumed (g)
"M_AS": Movement outside homebase
"M_IS": Movement inside homebase
"W": Water consumed (g)
bin_width: number of minutes of time interval for data aggregation
Returns
-------
ts: pandas.tseries
a pandas time series of length 12(day)*24(hour)*60(minute)/n
"""
# Input Check
if (not isinstance(strain, int)) or (strain < 0):
raise ValueError(
'Strain must be a non-negative integer')
if (not isinstance(mouse, int)) or (mouse < 0):
raise ValueError(
'Mouse value must be a non-negative integer')
if feature not in INTERVAL_FEATURES:
raise ValueError(
'Input value must in {"AS", "F", "M_AS", "M_IS", "W"}')
if (not isinstance(bin_width, int)) or bin_width < 0 or bin_width > 1440:
raise ValueError(
'Bin width (minutes) must be a non-negative integer below 1440')
# load data
intervals = data.load_intervals(feature)
mouse_data = intervals.loc[
(intervals['strain'] == strain) & (intervals['mouse'] == mouse)]
# build data frame
days = sorted(np.unique(mouse_data['day']))
bin_count = int(24 * 60 / bin_width)
time_behaviour = np.repeat(0.0, bin_count * len(days))
bin_length = bin_width * 60
for j in days:
df = mouse_data.loc[mouse_data['day'] == j]
start_end = data.load_start_time_end_time(strain, mouse, j)
start = np.asarray(df['start']) - start_end[0]
end = np.asarray(df['stop']) - start_end[0]
for i in range(len(start)):
start_time = start[i]
end_time = end[i]
start_index = int(start_time / (bin_width * 60))
end_index = int(end_time / (bin_width * 60))
if start_index == end_index:
time_behaviour[start_index + j *
bin_count] += end_time - start_time
elif end_index - start_index == 1:
time_behaviour[
start_index + j *
bin_count] += bin_length * end_index - start_time
time_behaviour[end_index + j *
bin_count] += end_time % bin_length
else:
time_behaviour[
start_index + j *
bin_count] += bin_length * (start_index + 1) - start_time
time_behaviour[end_index + j *
bin_count] += end_time % bin_length
time_behaviour[start_index + j * bin_count +
1:end_index + j * bin_count] += bin_length
if feature == 'F' or feature == 'W':
all_feature = data.load_all_features()
group = all_feature[
["strain", "mouse", "day", "hour", "Food", "Water"]].groupby(
["strain", "mouse", "day"]).sum()
group = group.reset_index()
mouse_data = group.loc[(group['strain'] == strain) &
(group['mouse'] == mouse)].copy()
mouse_data.loc[:, 'day'] = np.arange(len(mouse_data))
for i in mouse_data['day'].astype('int'):
if feature == 'F':
food_amount = float(mouse_data['Food'][mouse_data['day'] == i])
time_behaviour[
(bin_count * i):(bin_count * (i + 1))] /= sum(
time_behaviour[(bin_count * i):(bin_count * (i + 1))])
time_behaviour[(bin_count * i):(bin_count *
(i + 1))] *= food_amount
else:
food_amount = float(mouse_data['Water'][
mouse_data['day'] == i])
time_behaviour[
(bin_count * i):(bin_count * (i + 1))] /= sum(
time_behaviour[(bin_count * i):(bin_count * (i + 1))])
time_behaviour[(bin_count * i):(bin_count *
(i + 1))] *= food_amount
if feature == 'AS':
time_behaviour /= (bin_width * 60)
ts = pd.Series(time_behaviour, index=pd.date_range(
'01/01/2014', periods=len(time_behaviour),
freq=str(bin_width) + 'min'))
return ts
def aggregate_movement(strain, mouse, bin_width):
"""
Aggregate the movement data based on n-minute
time intervals, return a time series.
Parameters
----------
strain: int
nonnegative integer indicating the strain number
mouse: int
nonnegative integer indicating the mouse number
bin_width: number of minutes of time interval for data aggregation
Returns
-------
ts: pandas.tseries
a pandas time series of length (#day)*24(hour)*60(minute)/n
"""
# Input Check
if (not isinstance(strain, int)) or (strain < 0):
raise ValueError(
'Strain must be a non-negative integer')
if (not isinstance(mouse, int)) or (mouse < 0):
raise ValueError(
'Mouse value must be a non-negative integer')
if (not isinstance(bin_width, int)) or bin_width < 0 or bin_width > 1440:
raise ValueError(
'Bin width (minutes) must be a non-negative integer below 1440')
# determine number of days
intervals = data.load_intervals('IS')
mouse_data = intervals.loc[
(intervals['strain'] == strain) & (intervals['mouse'] == mouse)]
days = sorted(np.unique(mouse_data['day']))
# build data frame
bin_count = int(24 * 60 / bin_width)
time_movements = np.repeat(0.0, bin_count * len(days))
bin_length = bin_width * 60
for j in days:
M = data.load_movement(strain, mouse, day=int(j))
distance_df = pd.DataFrame({"start": M["t"].values[0:-1],
"end": M["t"].values[1:],
"distance":
np.linalg.norm(M[["x", "y"]].values[1:] -
M[["x", "y"]].values[0:-1],
axis=1)})
start_end = data.load_start_time_end_time(strain, mouse, j)
start = np.asarray(distance_df['start']) - start_end[0]
end = np.asarray(distance_df['end']) - start_end[0]
dist = distance_df['distance']
for i in range(len(start)):
start_time = start[i]
end_time = end[i]
start_index = int(start_time / (bin_width * 60))
end_index = int(end_time / (bin_width * 60))
if start_index == end_index:
time_movements[start_index + j *
bin_count] += dist[i]
else:
time_movements[
end_index + j * bin_count] += end_time % \
bin_length / (end_time - start_time) * dist[i]
time_movements[
start_index + j * bin_count] += dist[i] - \
end_time % bin_length / (end_time - start_time) * dist[i]
ts = pd.Series(time_movements, index=pd.date_range(
'01/01/2014', periods=len(time_movements),
freq=str(bin_width) + 'min'))
return ts
def aggregate_data(feature, bin_width, nmouse=4, nstrain=3):
r"""
Aggregate all the strains and mouses with any feature together
in one dataframe. It combines the results you got from
aggregate_movements and aggregate_interval. It will return
a dataframe with three variables: mouse, strain, feature and hour.
Parameters
----------
feature :
{"AS", "F", "IS", "M_AS", "M_IS", "W", "Distance"}
bin_width : int
Number of minutes, the time interval for data aggregation.
Returns
-------
pandas.dataframe
describe :
Column 0: the mouse number (number depends on strain)(0-3)
Column 1: the strain of the mouse (0-2)
Column 2: hour(numeric values below 24 accourding to bin_width)
Column 3: feature values
Examples
--------
>>> test = aggregate_data("Distance",20)
>>> print(np.mean(test["Distance"]))
531.4500177747973
"""
if feature not in ALL_FEATURES:
raise ValueError(
'Input value must in {"AS", "F", "M_AS", "M_IS", "W", "Distance"}')
if (not isinstance(bin_width, int)) or bin_width < 0 or bin_width > 1440:
raise ValueError(
'Bin width (minutes) must be a non-negative integer below 1440')
init = pd.DataFrame(columns=["mouse", "strain", "hour", feature])
for i in range(nstrain):
for j in range(nmouse):
if feature == "Distance":
tmp = aggregate_movement(strain=i, mouse=j,
bin_width=bin_width)
else:
tmp = aggregate_interval(strain=i, mouse=j,
feature=feature,
bin_width=bin_width)
tmp = pd.DataFrame(list(tmp.values), index=tmp.index)
tmp.columns = [feature]
tmp["strain"] = i
tmp["mouse"] = j
tmp["hour"] = tmp.index.hour + tmp.index.minute / 60
init = init.append(tmp)
return init
def seasonal_decomposition(strain, mouse, feature, bin_width, period_length):
"""
Apply seasonal decomposition model on the time series
of specified strain, mouse, feature and bin_width.
Parameters
----------
strain: int
nonnegative integer indicating the strain number
mouse: int
nonnegative integer indicating the mouse number
feature: {"AS", "F", "M_AS", "M_IS", "W", "Distance"}
"AS": Active state probalibity
"F": Food consumed (g)
"M_AS": Movement outside homebase
"M_IS": Movement inside homebase
"W": Water consumed (g)
"Distance": Distance traveled
bin_width: int
number of minutes, the time interval for data aggregation
period_length: int or float
number of hours, usually the significant period
length indicated by Lomb-scargle model
Returns
-------
res: statsmodel seasonal decomposition object
seasonal decomposition result for the mouse.
Check the seasonal decomposition plot by res.plot(),
seasonl term and trend term by res.seasonal and
res.trend separately.
Examples
--------
>>> res = seasonal_decomposition(strain=0, mouse=0, feature="W",
bin_width=30, period_length = 24)
"""
if (not isinstance(strain, int)) or (strain < 0):
raise ValueError(
'Strain must be a non-negative integer')
if (not isinstance(mouse, int)) or (mouse < 0):
raise ValueError(
'Mouse value must be a non-negative integer')
if feature not in ALL_FEATURES:
raise ValueError(
'Input value must in {"AS", "F", "M_AS", "M_IS", "W", "Distance"}')
if (not isinstance(bin_width, int)) or bin_width < 0 or bin_width > 1440:
raise ValueError(
'Bin width (minutes) must be a non-negative integer below 1440')
if period_length < 0:
raise ValueError(
'Peoriod length must be a non-negative integer or float')
freq = int(period_length * 60 / bin_width)
if feature == "Distance":
ts = aggregate_movement(strain=strain, mouse=mouse,
bin_width=bin_width)
else:
ts = aggregate_interval(strain=strain, mouse=mouse,
feature=feature, bin_width=bin_width)
res = sm.tsa.seasonal_decompose(ts.values, freq=freq, model="additive")
return res
def strain_seasonal(strain, mouse, feature, bin_width, period_length):
"""
Use seansonal decomposition model on the time series
of specified strain, mouse, feature and bin_width.
return the seasonal term and the plot of seasonal term
by mouse of a set of mouses in a strain
Parameters
----------
strain: int
nonnegative integer indicating the strain number
mouse: list, set or tuple
nonnegative integer indicating the mouse number
feature: {"AS", "F", "M_AS", "M_IS", "W", "Distance"}
"AS": Active state probalibity
"F": Food consumed (g)
"M_AS": Movement outside homebase
"M_IS": Movement inside homebase
"W": Water consumed (g)
"Distance": Distance traveled
bin_width: int
number of minutes, the time interval for data aggregation
period_length: int or float
number of hours, usually the significant period
length indicated by Lomb-scargle model
Returns
-------
seasonal_all: numpy array containing the seasonal term for every
mouse indicated by the input parameter
Examples
--------
>>> res = strain_seasonal(strain=0, mouse={0, 1, 2, 3}, feature="W",
bin_width=30, period_length = 24)
"""
if (not isinstance(strain, int)) or (strain < 0):
raise ValueError(
'Strain must be a non-negative integer')
if (not all([isinstance(m, int)
for m in mouse])) or (any([m < 0 for m in mouse])):
raise ValueError(
'Mouse value must be a non-negative integer')
if feature not in ALL_FEATURES:
raise ValueError(
'Input value must in {"AS", "F", "M_AS", "M_IS", "W", "Distance"}')
if (not isinstance(bin_width, int)) or bin_width < 0 or bin_width > 1440:
raise ValueError(
'Bin width (minutes) must be a non-negative integer below 1440')
if period_length < 0:
raise ValueError(
'Peoriod length must be a non-negative integer or float')
# seasonal decomposition
seasonal_all = np.array([])
freq = int(period_length * 60 / bin_width)
for m in mouse:
res = seasonal_decomposition(
strain, m, feature, bin_width, period_length)
seasonal_all = np.append(seasonal_all, res.seasonal[0:freq])
seasonal_all = seasonal_all.reshape([len(mouse), -1])
return seasonal_all
def find_cycle(feature, strain, mouse=None, bin_width=15,
methods='LombScargleFast', disturb_t=False, gen_doc=False,
plot=True, search_range_fit=None, nyquist_factor=3,
n_cycle=10, search_range_find=(2, 26), sig=np.array([0.05])):
"""
Use Lomb-Scargel method on different strain and mouse's data to find the
best possible periods with highest p-values. The function can be used on
specific strains and specific mouses, as well as just specific strains
without specifying mouse number. We use the O(NlogN) fast implementation
of Lomb-Scargle from the gatspy package, and also provide a way to
visualize the result.
Note that either plotting or calculating L-S power doesn't use the same
method in finding best cycle. The former can use user-specified
search_range, while the latter uses default two grid search_range.
Parameters
----------
feature: string in {"AS", "F", "M_AS", "M_IS", "W", "Distance"}
"AS": Active state probalibity
"F": Food consumed (g)
"M_AS": Movement outside homebase
"M_IS": Movement inside homebase
"W": Water consumed (g)
"Distance": Distance traveled
strain: int
nonnegative integer indicating the strain number
mouse: int, default is None
nonnegative integer indicating the mouse number
bin_width: int, minute unit, default is 15 minutes
number of minutes, the time interval for data aggregation
methods: string in {"LombScargleFast", "LombScargle"}
indicating the method used in determining periods and best cycle.
If choose 'LombScargle', 'disturb_t' must be True.
disturb_t: boolean, default is False
If True, add uniformly distributed noise to the time sequence which
are used to fit the Lomb Scargle model. This is to avoid the singular
matrix error that could happen sometimes.
plot: boolean, default is True
If True, call the visualization function to plot the Lomb Scargle
power versus periods plot. First use the data (either strain specific
or strain-mouse specific) to fit the LS model, then use the
search_range_fit as time sequence to predict the corresponding LS
power, at last draw the plot out. There will also be stars and
horizontal lines indicating the p-value of significance. Three stars
will be p-value in [0,0.001], two stars will be p-value in
[0.001,0.01], one star will be p-value in [0.01,0.05]. The horizontal
line is the LS power that has p-value of 0.05.
search_range_fit: list, numpy array or numpy arange, hours unit,
default is None
list of numbers as the time sequence to predict the corrsponding
Lomb Scargle power. If plot is 'True', these will be drawn as the
x-axis. Note that the number of search_range_fit points can not be
too small, or the prediction smooth line will not be accurate.
However the plot will always give the right periods and their LS
power with 1,2 or 3 stars. This could be a sign to check whether
search_range_fit is not enough to draw the correct plot.
We recommend the default None, which is easy to use.
nyquist_factor: int
If search_range_fit is None, the algorithm will automatically
choose the periods sequence.
5 * nyquist_factor * length(time sequence) / 2 gives the number of
power and periods used to make LS prediction and plot the graph.
n_cycle: int, default is 10
numbers of periods to be returned by function, which have the highest
Lomb Scargle power and p-value.
search_range_find: list, tuple or numpy array with length of 2, default is
(2,26), hours unit
Range of periods to be searched for best cycle. Note that the minimum
should be strictly larger than 0 to avoid 1/0 issues.
sig: list or numpy array, default is [0.05].
significance level to be used for plot horizontal line.
gen_doc: boolean, default is False
If true, return the parameters needed for visualize the LS power versus
periods
Returns
-------
cycle: numpy array of length 'n_cycle'
The best periods with highest LS power and p-values.
cycle_power: numpy array of length 'n_cycle'
The corrsponding LS power of 'cycle'.
cycle_pvalue: numpy array of length 'n_cycle'
The corrsponding p-value of 'cycle'.
periods: numpy array of the same length with 'power'
use as time sequence in LS model to make predictions.Only return when
gen_doc is True.
power: numpy array of the same length with 'periods'
the corresponding predicted power of periods. Only return when
gen_doc is True.
sig: list, tuple or numpy array, default is [0.05].
significance level to be used for plot horizontal line.
Only return when gen_doc is True.
N: int
the length of time sequence in the fit model. Only return when
gen_doc is True.
Examples
-------
>>> a,b,c = find_cycle(feature='F', strain = 0,mouse = 0, plot=False,)
>>> print(a,b,c)
>>> [ 23.98055016 4.81080233 12.00693952 6.01216335 8.0356203
3.4316698 2.56303353 4.9294791 21.37925713 3.5697756 ]
[ 0.11543449 0.05138839 0.03853218 0.02982237 0.02275952
0.0147941 0.01151601 0.00998443 0.00845883 0.0082382 ]
[ 0.00000000e+00 3.29976046e-10 5.39367189e-07 8.10528027e-05
4.71001953e-03 3.70178834e-01 9.52707020e-01 9.99372657e-01
9.99999981e-01 9.99999998e-01]
"""
if feature not in ALL_FEATURES:
raise ValueError(
'Input value must in {"AS", "F", "M_AS", "M_IS", "W", "Distance"}')
if methods not in METHOD:
raise ValueError(
'Input value must in {"LombScargleFast","LombScargle"}')
# get data
if mouse is None:
data_all = aggregate_data(feature=feature, bin_width=bin_width)
n_mouse_in_strain = len(
set(data_all.loc[data_all['strain'] == strain]['mouse']))
data = [[] for i in range(n_mouse_in_strain)]
t = [[] for i in range(n_mouse_in_strain)]
for i in range(n_mouse_in_strain):
data[i] = data_all.loc[(data_all['strain'] == strain) & (
data_all['mouse'] == i)][feature]
t[i] = np.array(np.arange(0, len(data[i]) *
bin_width / 60, bin_width / 60))
data = [val for sublist in data for val in sublist]
N = len(data)
t = [val for sublist in t for val in sublist]
else:
if feature == 'Distance':
data = aggregate_movement(
strain=strain, mouse=mouse, bin_width=bin_width)
N = len(data)
t = np.arange(0, N * bin_width / 60, bin_width / 60)
else:
data = aggregate_interval(
strain=strain, mouse=mouse,
feature=feature, bin_width=bin_width)
N = len(data)
t = np.arange(0, N * bin_width / 60, bin_width / 60)
y = data
# fit model
if disturb_t is True:
t = t + np.random.uniform(-bin_width / 600, bin_width / 600, N)
if methods == 'LombScargleFast':
model = LombScargleFast(fit_period=False).fit(t=t, y=y)
elif methods == 'LombScargle':
model = LombScargle(fit_period=False).fit(t=t, y=y)
# calculate periods' LS power
if search_range_fit is None:
periods, power = model.periodogram_auto(nyquist_factor=nyquist_factor)
else:
periods = search_range_fit
power = model.periodogram(periods=search_range_fit)
# find best cycle
model.optimizer.period_range = search_range_find
cycle, cycle_power = model.find_best_periods(
return_scores=True, n_periods=n_cycle)
cycle_pvalue = 1 - (1 - np.exp(cycle_power / (-2) * (N - 1))) ** (2 * N)
# visualization
if plot is True:
lombscargle_visualize(periods=periods, power=power, sig=sig, N=N,
cycle_power=cycle_power,
cycle_pvalue=cycle_pvalue, cycle=cycle)
if gen_doc is True:
return periods, power, sig, N, cycle, cycle_power, cycle_pvalue
return cycle, cycle_power, cycle_pvalue
def mix_strain(data, feature, print_opt=True,
nstrain=3, search_range=(3, 12), degree=1):
"""
Fit the linear mixed model onto our aggregate data. The fixed effects
are the hour, strain, interactions between hour and strain; The random
effect is mouse because we want to make sure that the different mouses
will not give out any differences. We added two dummy variables:
strain0 and strain1 to be our fixed effects.
Parameters
----------
data: data frame output from aggregate_data function
feature: {"AS", "F", "IS", "M_AS", "M_IS", "W", "Distance"}
print_opt: True or False
nstrain: positive integer
range: array contains two elements
degree: positive integer
Returns
-------
Two mixed model regression results which includes all the coefficients,
t statistics and p values for corresponding coefficients; The first model
includes interaction terms while the second model does not include the
interaction terms
Likelihood ratio test p values, if it is below our significance level,
we can conclude that the different strains have significantly different
time patterns
Examples
--------
>>> result = mix_strain(data = aggregate_data("F",30), feature = "F",
>>> print_opt = False, degree = 2)
>>> print(result)
2.5025846540930469e-09
"""
if not isinstance(data, pd.DataFrame):
raise ValueError(
'Data must be a pandas data frame')
if feature not in ALL_FEATURES:
raise ValueError(
'Input value must in {"AS", "F", "M_AS", "M_IS", "W", "Distance"}')
data["cycle"] = 0
for i in range(nstrain):
result = find_cycle(feature="W", strain=i, plot=False,
search_range_find=search_range)
cycle = result[0][0]
data.loc[data["strain"] == i, "cycle"] = cycle
b = pd.get_dummies(data["strain"])
data["strain0"] = b.ix[:, 0]
data["strain1"] = b.ix[:, 1]
data["strain2"] = b.ix[:, 2]
data["hour2"] = np.array(data["hour"].values)**degree
data = data.drop('strain', 1)
names = data.columns.tolist()
names[names.index(feature)] = 'feature'
data.columns = names
if degree == 1:
md1 = smf.mixedlm("feature ~ hour + strain0 + strain1 + cycle \
+ strain0*hour + strain1*hour", data,
groups=data["mouse"])
else:
md1 = smf.mixedlm("feature ~ hour + hour2 + strain0 + strain1 + \
strain0*hour+ strain1*hour + strain0*hour2+ \
strain1*hour2", data, groups=data["mouse"])
mdf1 = md1.fit()
like1 = mdf1.llf
if print_opt:
print(mdf1.summary())
if degree == 1:
md2 = smf.mixedlm("feature ~ hour + cycle + strain0 \
+ strain1", data, groups=data["mouse"])
else:
md2 = smf.mixedlm("feature ~ hour + hour2 + cycle + strain0 + \
strain1", data, groups=data["mouse"])
mdf2 = md2.fit()
like2 = mdf2.llf
if print_opt:
print(mdf2.summary())
fstat = 2 * abs(like1 - like2)
p_v = chi2.pdf(fstat, df=2)
return p_v
| {
"repo_name": "togawa28/mousestyles",
"path": "mousestyles/ultradian/__init__.py",
"copies": "3",
"size": "26924",
"license": "bsd-2-clause",
"hash": -9176317693295174000,
"line_mean": 39.1251862891,
"line_max": 79,
"alpha_frac": 0.5833085723,
"autogenerated": false,
"ratio": 3.7603351955307263,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5843643767830726,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from itertools import cycle
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
from .utils import mask_to_sky
__all__ = ['plot_mask', 'plot_galaxy']
def slit_patches(mask, color=None, sky_coords=False, center=None):
'''
Constructs mpl patches for the slits of a mask. If sky_coords is true,
output in relative ra/dec. galaxy center is necessary for sky_coords
'''
patches = []
for slit in mask.slits:
x = slit.x
y = slit.y
dx = slit.length
dy = slit.width
# bottom left-hand corner
if sky_coords:
L = np.sqrt(dx**2 + dy**2) / 2
alpha = np.tan(dy / dx)
phi = np.pi / 2 - np.radians(slit.pa)
delta_x = L * (np.cos(alpha + phi) - np.cos(alpha))
delta_y = L * (np.sin(alpha + phi) - np.sin(alpha))
ra, dec = mask_to_sky(x - dx / 2, y - dy / 2, mask.mask_pa)
blc0 = (ra, dec)
angle = (90 - slit.pa)
blc = (ra + delta_x, dec - delta_y)
# blc = (ra + x1 + x2, dec - y1 + y2)
else:
blc = (x - dx / 2, y - dy / 2)
angle = slit.pa - mask.mask_pa
patches.append(mpl.patches.Rectangle(blc, dx, dy, angle=angle,
fc=color, ec='k', alpha=0.5))
# patches.append(mpl.patches.Rectangle(blc0, dx, dy, angle=0,
# fc=color, ec='k', alpha=0.1))
return patches
def plot_mask(mask, color=None, writeto=None, annotate=False):
'''Plot the slits in a mask, in mask coords'''
fig, ax = plt.subplots()
for p in slit_patches(mask, color=color, sky_coords=False):
ax.add_patch(p)
if annotate:
for slit in mask.slits:
ax.text(slit.x - 3, slit.y + 1, slit.name, size=8)
xlim = mask.x_max / 2
ylim = mask.y_max / 2
lim = min(xlim, ylim)
ax.set_title(mask.name)
ax.set_xlim(-lim, lim)
ax.set_ylim(-lim, lim)
ax.set_xlabel('x offset (arcsec)', fontsize=16)
ax.set_ylabel('y offset (arcsec)', fontsize=16)
if writeto is not None:
fig.savefig(writeto)
return fig, ax
def plot_galaxy(galaxy, writeto=None):
'''Plot all slit masks'''
fig, ax = plt.subplots()
colors = cycle(['r', 'b', 'm', 'c', 'g'])
handles = []
for i, mask in enumerate(galaxy.masks):
color = next(colors)
label = str(i + 1) + galaxy.name + ' (PA = {:.2f})'.format(mask.mask_pa)
handles.append(mpl.patches.Patch(fc=color, ec='k',
alpha=0.5, label=label))
for p in slit_patches(mask, color=color,
sky_coords=True, center=galaxy.center):
ax.add_patch(p)
xlim = galaxy.masks[0].x_max / 2
ylim = galaxy.masks[0].y_max / 2
lim = min(xlim, ylim)
# reverse x axis so it looks like sky
ax.set_xlim(lim, -lim)
# ax.set_xlim(-lim, lim)
ax.set_ylim(-lim, lim)
ax.set_title(galaxy.name, fontsize=16)
ax.set_xlabel('RA offset (arcsec)', fontsize=16)
ax.set_ylabel('Dec offset (arcsec)', fontsize=16)
ax.legend(handles=handles, loc='best')
if writeto is not None:
fig.savefig(writeto) #, bbox_inches='tight')
return fig, ax
| {
"repo_name": "adwasser/masktools",
"path": "masktools/superskims/plotting.py",
"copies": "1",
"size": "3437",
"license": "mit",
"hash": 4474493122683881500,
"line_mean": 34.8020833333,
"line_max": 80,
"alpha_frac": 0.5423334303,
"autogenerated": false,
"ratio": 3.135948905109489,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.91571763949116,
"avg_score": 0.004221188099577775,
"num_lines": 96
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from itertools import groupby
import pkg_resources
import re
import sys
try:
import simplejson as json
json # silences pyflakes :<
except ImportError:
import json
GRAPHS = {}
DICTIONARY_MATCHERS = []
def translate(string, chr_map):
out = ''
for char in string:
out += chr_map[char] if char in chr_map else char
return out
#-------------------------------------------------------------------------------
# dictionary match (common passwords, english, last names, etc) ----------------
#-------------------------------------------------------------------------------
def dictionary_match(password, ranked_dict):
result = []
length = len(password)
pw_lower = password.lower()
for i in range(0, length):
for j in range(i, length):
word = pw_lower[i:j+1]
if word in ranked_dict:
rank = ranked_dict[word]
result.append( {'pattern':'dictionary',
'i' : i,
'j' : j,
'token' : password[i:j+1],
'matched_word' : word,
'rank': rank,
})
return result
def _build_dict_matcher(dict_name, ranked_dict):
def func(password):
matches = dictionary_match(password, ranked_dict)
for match in matches:
match['dictionary_name'] = dict_name
return matches
return func
def _build_ranked_dict(unranked_list):
result = {}
i = 1
for word in unranked_list:
result[word] = i
i += 1
return result
def _load_frequency_lists():
data = pkg_resources.resource_string(__name__, 'generated/frequency_lists.json')
data = data.decode('utf-8')
dicts = json.loads(data)
for name, wordlist in list(dicts.items()):
DICTIONARY_MATCHERS.append(_build_dict_matcher(name, _build_ranked_dict(wordlist)))
def _load_adjacency_graphs():
global GRAPHS
data = pkg_resources.resource_string(__name__, 'generated/adjacency_graphs.json')
data = data.decode('utf-8')
GRAPHS = json.loads(data)
# on qwerty, 'g' has degree 6, being adjacent to 'ftyhbv'. '\' has degree 1.
# this calculates the average over all keys.
def _calc_average_degree(graph):
average = 0.0
for neighbors in list(graph.values()):
average += len([n for n in neighbors if n is not None])
average /= len(graph)
return average
_load_frequency_lists()
_load_adjacency_graphs()
KEYBOARD_AVERAGE_DEGREE = _calc_average_degree(GRAPHS[u'qwerty'])
# slightly different for keypad/mac keypad, but close enough
KEYPAD_AVERAGE_DEGREE = _calc_average_degree(GRAPHS[u'keypad'])
KEYBOARD_STARTING_POSITIONS = len(GRAPHS[u'qwerty'])
KEYPAD_STARTING_POSITIONS = len(GRAPHS[u'keypad'])
#-------------------------------------------------------------------------------
# dictionary match with common l33t substitutions ------------------------------
#-------------------------------------------------------------------------------
L33T_TABLE = {
'a': ['4', '@'],
'b': ['8'],
'c': ['(', '{', '[', '<'],
'e': ['3'],
'g': ['6', '9'],
'i': ['1', '!', '|'],
'l': ['1', '|', '7'],
'o': ['0'],
's': ['$', '5'],
't': ['+', '7'],
'x': ['%'],
'z': ['2'],
}
# makes a pruned copy of L33T_TABLE that only includes password's possible substitutions
def relevant_l33t_subtable(password):
password_chars = set(password)
filtered = {}
for letter, subs in list(L33T_TABLE.items()):
relevent_subs = [sub for sub in subs if sub in password_chars]
if len(relevent_subs) > 0:
filtered[letter] = relevent_subs
return filtered
# returns the list of possible 1337 replacement dictionaries for a given password
def enumerate_l33t_subs(table):
subs = [[]]
def dedup(subs):
deduped = []
members = set()
for sub in subs:
key = str(sorted(sub))
if key not in members:
deduped.append(sub)
return deduped
keys = list(table.keys())
while len(keys) > 0:
first_key = keys[0]
rest_keys = keys[1:]
next_subs = []
for l33t_chr in table[first_key]:
for sub in subs:
dup_l33t_index = -1
for i in range(0, len(sub)):
if sub[i][0] == l33t_chr:
dup_l33t_index = i
break
if dup_l33t_index == -1:
sub_extension = list(sub)
sub_extension.append((l33t_chr, first_key))
next_subs.append(sub_extension)
else:
sub_alternative = list(sub)
sub_alternative.pop(dup_l33t_index)
sub_alternative.append((l33t_chr, first_key))
next_subs.append(sub)
next_subs.append(sub_alternative)
subs = dedup(next_subs)
keys = rest_keys
return [dict(sub) for sub in subs]
def l33t_match(password):
matches = []
for sub in enumerate_l33t_subs(relevant_l33t_subtable(password)):
if len(sub) == 0:
break
subbed_password = translate(password, sub)
for matcher in DICTIONARY_MATCHERS:
for match in matcher(subbed_password):
token = password[match['i']:match['j'] + 1]
if token.lower() == match['matched_word']:
continue
match_sub = {}
for subbed_chr, char in list(sub.items()):
if token.find(subbed_chr) != -1:
match_sub[subbed_chr] = char
match['l33t'] = True
match['token'] = token
match['sub'] = match_sub
match['sub_display'] = ', '.join([("%s -> %s" % (k, v)) for k, v in list(match_sub.items())])
matches.append(match)
return matches
# ------------------------------------------------------------------------------
# spatial match (qwerty/dvorak/keypad) -----------------------------------------
# ------------------------------------------------------------------------------
def spatial_match(password):
matches = []
for graph_name, graph in list(GRAPHS.items()):
matches.extend(spatial_match_helper(password, graph, graph_name))
return matches
def spatial_match_helper(password, graph, graph_name):
result = []
i = 0
while i < len(password) - 1:
j = i + 1
last_direction = None
turns = 0
shifted_count = 0
while True:
prev_char = password[j-1]
found = False
found_direction = -1
cur_direction = -1
adjacents = graph[prev_char] if prev_char in graph else []
# consider growing pattern by one character if j hasn't gone over the edge.
if j < len(password):
cur_char = password[j]
for adj in adjacents:
cur_direction += 1
if adj and adj.find(cur_char) != -1:
found = True
found_direction = cur_direction
if adj.find(cur_char) == 1:
# index 1 in the adjacency means the key is shifted, 0 means unshifted: A vs a, % vs 5, etc.
# for example, 'q' is adjacent to the entry '2@'. @ is shifted w/ index 1, 2 is unshifted.
shifted_count += 1
if last_direction != found_direction:
# adding a turn is correct even in the initial case when last_direction is null:
# every spatial pattern starts with a turn.
turns += 1
last_direction = found_direction
break
# if the current pattern continued, extend j and try to grow again
if found:
j += 1
# otherwise push the pattern discovered so far, if any...
else:
if j - i > 2: # don't consider length 1 or 2 chains.
result.append({
'pattern': 'spatial',
'i': i,
'j': j-1,
'token': password[i:j],
'graph': graph_name,
'turns': turns,
'shifted_count': shifted_count,
})
# ...and then start a new search for the rest of the password.
i = j
break
return result
#-------------------------------------------------------------------------------
# repeats (aaa) and sequences (abcdef) -----------------------------------------
#-------------------------------------------------------------------------------
def repeat_match(password):
result = []
repeats = groupby(password)
i = 0
for char, group in repeats:
length = len(list(group))
if length > 2:
j = i + length - 1
result.append({
'pattern': 'repeat',
'i': i,
'j': j,
'token': password[i:j+1],
'repeated_char': char,
})
i += length
return result
SEQUENCES = {
'lower': 'abcdefghijklmnopqrstuvwxyz',
'upper': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'digits': '01234567890',
}
def sequence_match(password):
result = []
i = 0
while i < len(password):
j = i + 1
seq = None # either lower, upper, or digits
seq_name = None
seq_direction = None # 1 for ascending seq abcd, -1 for dcba
for seq_candidate_name, seq_candidate in list(SEQUENCES.items()):
i_n = seq_candidate.find(password[i])
j_n = seq_candidate.find(password[j]) if j < len(password) else -1
if i_n > -1 and j_n > -1:
direction = j_n - i_n
if direction in [1, -1]:
seq = seq_candidate
seq_name = seq_candidate_name
seq_direction = direction
break
if seq:
while True:
if j < len(password):
prev_char, cur_char = password[j-1], password[j]
prev_n, cur_n = seq_candidate.find(prev_char), seq_candidate.find(cur_char)
if j == len(password) or cur_n - prev_n != seq_direction:
if j - i > 2: # don't consider length 1 or 2 chains.
result.append({
'pattern': 'sequence',
'i': i,
'j': j-1,
'token': password[i:j],
'sequence_name': seq_name,
'sequence_space': len(seq),
'ascending': seq_direction == 1,
})
break
else:
j += 1
i = j
return result
#-------------------------------------------------------------------------------
# digits, years, dates ---------------------------------------------------------
#-------------------------------------------------------------------------------
def match_all(password, pattern_name, regex):
out = []
for match in regex.finditer(password):
i = match.start()
j = match.end()
out.append({
'pattern' : pattern_name,
'i' : i,
'j' : j,
'token' : password[i:j+1]
})
return out
DIGITS_MATCH = re.compile(r'\d{3,}')
def digits_match(password):
return match_all(password, 'digits', DIGITS_MATCH)
YEAR_MATCH = re.compile(r'19\d\d|200\d|201\d')
def year_match(password):
return match_all(password, 'year', YEAR_MATCH)
def date_match(password):
l = date_without_sep_match(password)
l.extend(date_sep_match(password))
return l
DATE_WITHOUT_SEP_MATCH = re.compile(r'\d{4,8}')
def date_without_sep_match(password):
date_matches = []
for digit_match in DATE_WITHOUT_SEP_MATCH.finditer(password):
i, j = digit_match.start(), digit_match.end()
token = password[i:j+1]
end = len(token)
candidates_round_1 = [] # parse year alternatives
if len(token) <= 6:
# 2-digit year prefix
candidates_round_1.append({
'daymonth': token[2:],
'year': token[0:2],
'i': i,
'j': j,
})
# 2-digit year suffix
candidates_round_1.append({
'daymonth': token[0:end-2],
'year': token[end-2:],
'i': i,
'j': j,
})
if len(token) >= 6:
# 4-digit year prefix
candidates_round_1.append({
'daymonth': token[4:],
'year': token[0:4],
'i': i,
'j': j,
})
# 4-digit year suffix
candidates_round_1.append({
'daymonth': token[0:end-4],
'year': token[end-4:],
'i': i,
'j': j,
})
candidates_round_2 = [] # parse day/month alternatives
for candidate in candidates_round_1:
if len(candidate['daymonth']) == 2: # ex. 1 1 97
candidates_round_2.append({
'day': candidate['daymonth'][0],
'month': candidate['daymonth'][1],
'year': candidate['year'],
'i': candidate['i'],
'j': candidate['j'],
})
elif len(candidate['daymonth']) == 3: # ex. 11 1 97 or 1 11 97
candidates_round_2.append({
'day': candidate['daymonth'][0:2],
'month': candidate['daymonth'][2],
'year': candidate['year'],
'i': candidate['i'],
'j': candidate['j'],
})
candidates_round_2.append({
'day': candidate['daymonth'][0],
'month': candidate['daymonth'][1:3],
'year': candidate['year'],
'i': candidate['i'],
'j': candidate['j'],
})
elif len(candidate['daymonth']) == 4: # ex. 11 11 97
candidates_round_2.append({
'day': candidate['daymonth'][0:2],
'month': candidate['daymonth'][2:4],
'year': candidate['year'],
'i': candidate['i'],
'j': candidate['j'],
})
# final loop: reject invalid dates
for candidate in candidates_round_2:
try:
day = int(candidate['day'])
month = int(candidate['month'])
year = int(candidate['year'])
except ValueError:
continue
valid, (day, month, year) = check_date(day, month, year)
if not valid:
continue
date_matches.append( {
'pattern': 'date',
'i': candidate['i'],
'j': candidate['j'],
'token': password[i:j+1],
'separator': '',
'day': day,
'month': month,
'year': year,
})
return date_matches
DATE_RX_YEAR_SUFFIX = re.compile(r"(\d{1,2})(\s|-|/|\\|_|\.)(\d{1,2})\2(19\d{2}|200\d|201\d|\d{2})")
#DATE_RX_YEAR_SUFFIX = "(\d{1,2})(\s|-|/|\\|_|\.)"
DATE_RX_YEAR_PREFIX = re.compile(r"(19\d{2}|200\d|201\d|\d{2})(\s|-|/|\\|_|\.)(\d{1,2})\2(\d{1,2})")
def date_sep_match(password):
matches = []
for match in DATE_RX_YEAR_SUFFIX.finditer(password):
day, month, year = tuple(int(match.group(x)) for x in [1, 3, 4])
matches.append( {
'day' : day,
'month' : month,
'year' : year,
'sep' : match.group(2),
'i' : match.start(),
'j' : match.end()
})
for match in DATE_RX_YEAR_PREFIX.finditer(password):
day, month, year = tuple(int(match.group(x)) for x in [4, 3, 1])
matches.append( {
'day' : day,
'month' : month,
'year' : year,
'sep' : match.group(2),
'i' : match.start(),
'j' : match.end()
})
out = []
for match in matches:
valid, (day, month, year) = check_date(match['day'], match['month'], match['year'])
if not valid:
continue
out.append({
'pattern': 'date',
'i': match['i'],
'j': match['j']-1,
'token': password[match['i']:match['j']],
'separator': match['sep'],
'day': day,
'month': month,
'year': year,
})
return out
def check_date(day, month, year):
if 12 <= month <= 31 and day <= 12: # tolerate both day-month and month-day order
day, month = month, day
if day > 31 or month > 12:
return (False, (0, 0, 0))
if not (1900 <= year <= 2019):
return (False, (0, 0, 0))
return (True, (day, month, year))
MATCHERS = list(DICTIONARY_MATCHERS)
MATCHERS.extend([
l33t_match,
digits_match, year_match, date_match,
repeat_match, sequence_match,
spatial_match
])
def omnimatch(password, user_inputs=[]):
ranked_user_inputs_dict = {}
for i, user_input in enumerate(user_inputs):
ranked_user_inputs_dict[user_input.lower()] = i+1
user_input_matcher = _build_dict_matcher('user_inputs', ranked_user_inputs_dict)
matches = user_input_matcher(password)
for matcher in MATCHERS:
matches.extend(matcher(password))
matches.sort(key=lambda x : (x['i'], x['j']))
return matches
| {
"repo_name": "moreati/python-zxcvbn",
"path": "zxcvbn/matching.py",
"copies": "1",
"size": "18275",
"license": "mit",
"hash": -609420198228645100,
"line_mean": 32.7800369686,
"line_max": 120,
"alpha_frac": 0.4584404925,
"autogenerated": false,
"ratio": 4.065628476084538,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5024068968584539,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from jumpdir.directories import Directories
from jumpdir.pathfinder import PathFinder
from jumpdir.bookmarks import Bookmarks
import argparse
import os
import sys
HOME = os.getenv('HOME')
DATA_DIR = os.path.join(HOME, '.jumpdir')
BOOKMARKS = os.path.join(DATA_DIR, '.jdbookmarks.json')
CACHE_FILE = os.path.join(DATA_DIR, '.jdcache.json')
if not os.path.isdir(DATA_DIR):
os.mkdir(DATA_DIR)
def parse_args(args):
"""Parse list/tuple of arguments with argparse module
- **Parameters** and **returns**::
:param list args: arguments to be parsed
:returns namespace: parsed arguments
"""
parser = argparse.ArgumentParser(description='jumpdir')
subparsers = parser.add_subparsers(help='sub-command help',
dest='commands')
# jumpdir search ...
parser_search = subparsers.add_parser('search',
help='search home directory for a directory matching given search term')
parser_search.add_argument('search_term',
help='directory name to search for (case insensitive).',)
parser_search.set_defaults(which='search')
# jumpdir add ...
parser_add = subparsers.add_parser('add', help='add bookmark')
parser_add.add_argument('name', help='name of bookmark to add')
parser_add.add_argument('-p', '--path', default=os.getcwd(),
help="define path that bookmark points to")
# jumpdir delete ...
parser_delete = subparsers.add_parser('delete', help='delete bookmark')
parser_delete.add_argument('name', help='name of bookmark to remove')
# jumpdir list ...
subparsers.add_parser('list', help='list saved bookmarks')
return parser.parse_args(args)
def run_search(search_term, bookmarks):
if search_term == HOME:
return HOME
# Check bookmarks
try:
return bookmarks[search_term]
except KeyError:
pass
# Check cache
ddict = Directories(HOME, CACHE_FILE)
try:
return ddict.shallowest_path_to(search_term)
except KeyError:
pass
# Remap home folder and check for match
ddict.map_directories()
try:
return ddict.shallowest_path_to(search_term)
except (KeyError, IndexError):
pass
def main(argv=sys.argv[1:]):
if len(argv) < 1:
raise ValueError("error: no commands given")
args = parse_args(argv)
bookmarks = Bookmarks(BOOKMARKS)
# Sub command logic
if not args.commands:
raise ValueError("error: command not recognised")
elif args.commands == 'add':
bookmarks.add_bookmark(args.name, args.path)
raise SystemExit
elif args.commands == 'delete':
bookmarks.del_bookmark(args.name)
raise SystemExit
elif args.commands == 'list':
bookmarks.list_bookmarks()
raise SystemExit
else:
return run_search(args.search_term, bookmarks)
if __name__ == '__main__':
main()
| {
"repo_name": "cgmcintyr/jumpdir",
"path": "jumpdir/main.py",
"copies": "2",
"size": "3041",
"license": "mit",
"hash": 9053473243663789000,
"line_mean": 28.8137254902,
"line_max": 84,
"alpha_frac": 0.6484708977,
"autogenerated": false,
"ratio": 3.9699738903394257,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009914393564912597,
"num_lines": 102
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from mousestyles import data
from mousestyles.classification import clustering
def test_prep_data():
# Check prep_data return the correct dimension
mouse_data = data.load_all_features()
preped_data = clustering.prep_data(mouse_data)
assert preped_data.shape == (170, 20)
def test_hc_param():
# Check get_optimal_hc_params returns appropriate parameters
mouse_data = data.load_all_features()
preped_data = clustering.prep_data(mouse_data)
method, dist = clustering.get_optimal_hc_params(preped_data)
assert method in ['ward', 'average', 'complete']
assert dist in ['cityblock', 'euclidean', 'chebychev']
def test_fit_hc():
# Check fit_hc returns appropriate result
mouse_data = data.load_all_features()
preped_data = clustering.prep_data(mouse_data)
mouse_day_X = preped_data[:, 2:]
res = clustering.fit_hc(mouse_day_X, "average", "chebychev",
num_clusters=range(2, 17))
assert len(res) == 2
assert len(res[0]) == 15
assert len(res[1][0]) == 170
assert len(set(res[1][14])) <= 16
# silhouette score should be between -1 and 1
assert all(value < 1 for value in res[0])
assert all(value > -1 for value in res[0])
def test_fit_kmeans():
# Check get_optimal_fit_kmeans returns expected result
mouse_data = data.load_all_features()
preped_data = clustering.prep_data(mouse_data)
mouse_day_X = preped_data[:, 2:]
res = clustering.get_optimal_fit_kmeans(
mouse_day_X, num_clusters=range(2, 17), raw=False)
assert len(res) == 2
assert len(res[0]) == 15
assert len(res[1][0]) == 170
assert len(set(res[1][14])) <= 16
# silhouette score should be between -1 and 1
assert all(value < 1 for value in res[0])
assert all(value > -1 for value in res[0])
def test_cluster_in_strain():
# Check cluster_in_strain calculate correct strain counts
res = clustering.cluster_in_strain([1, 2, 1, 0, 0], [0, 1, 1, 2, 1])
assert res == {0: [0, 1, 0], 1: [1, 1, 1], 2: [1, 0, 0]}
| {
"repo_name": "togawa28/mousestyles",
"path": "mousestyles/classification/tests/test_clustering.py",
"copies": "3",
"size": "2156",
"license": "bsd-2-clause",
"hash": 7564547194971933000,
"line_mean": 35.5423728814,
"line_max": 72,
"alpha_frac": 0.6410018553,
"autogenerated": false,
"ratio": 3.232383808095952,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5373385663395952,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from nose.plugins.attrib import attr
from nose.tools import raises
import numpy as np
from numpy.testing import (assert_equal,
assert_almost_equal)
from ..irr import (compute_ts,
simulate_ts_dist,
simulate_npc_dist)
from ..data import nsgk
R = 10
Ns = 35
from numpy.random import RandomState
RNG = RandomState(42)
res = RNG.binomial(1, .5, (R, Ns))
def test_irr():
rho_s = compute_ts(res)
assert_almost_equal(rho_s, 0.51936507)
#res = spt(group, condition, response, iterations=1000)
#res1 = spt(group, condition, response, iterations=1000)
#assert_less(res[1], 0.01)
#assert_almost_equal(res[3], res1[3])
def test_simulate_ts_dist():
expected_res1 = {'dist': None,
'geq': 624,
'obs_ts': 0.51936507936507936,
'pvalue': 0.0624,
'num_perm': 10000}
res1 = simulate_ts_dist(res, seed=42)
assert_equal(res1, expected_res1)
expected_res2 = {'geq': 9457,
'obs_ts': 0.46285714285714286,
'num_perm': 10000}
res2 = simulate_ts_dist(res[:5], seed=42, keep_dist=True)
assert_equal(res2['geq'], expected_res2['geq'])
assert_equal(res2['obs_ts'], expected_res2['obs_ts'])
assert_equal(res2['num_perm'], expected_res2['num_perm'])
assert_equal(res2['dist'].shape, (10000,))
@attr('slow')
def test_with_naomi_data():
""" Test irr functionality using Naomi data."""
x = nsgk()
t = x[1]
y = t[0]
res = simulate_ts_dist(y, num_perm=10, keep_dist=True, seed=42)
expected_res = {'dist': np.array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]),
'geq': 10,
'num_perm': 10,
'pvalue': 1,
'obs_ts': 1.0}
assert_equal(res, expected_res)
freq = RNG.choice([0.2, 0.8], Ns)
res2 = np.zeros((R, Ns))
for i in range(len(freq)):
res2[:, i] = RNG.binomial(1, freq[i], R)
def test_irr_concordance():
rho_s2 = compute_ts(res2)
assert_almost_equal(rho_s2, 0.70476190476190481)
def test_simulate_ts_dist_concordance():
expected_res_conc = {'dist': None,
'geq': 0,
'obs_ts': 0.70476190476190481,
'pvalue': 0.0,
'num_perm': 10000}
res_conc = simulate_ts_dist(res2, seed=42)
assert_equal(res_conc, expected_res_conc)
res1 = simulate_ts_dist(res, keep_dist=True, seed=42)
res_conc = simulate_ts_dist(res2, keep_dist=True, seed=42)
true_pvalue = np.array(
[res1['geq'] / res1['num_perm'], res_conc['geq'] / res_conc['num_perm']])
rho_perm = np.transpose(np.vstack((res1['dist'], res_conc['dist'])))
def test_simulate_npc_dist():
expected_npc_res = {'num_perm': 10000,
'obs_npc': -0.010547525099011886,
'pvalue': 0.0016}
obs_npc_res = simulate_npc_dist(
rho_perm, size=np.array([Ns, Ns]), pvalues=true_pvalue)
assert_equal(obs_npc_res, expected_npc_res)
@raises(ValueError)
def test_simulate_npc_error():
simulate_npc_dist(rho_perm, size=np.array([Ns, Ns]))
def test_simulate_npc_perfect():
mat1 = np.tile(np.array([1, 0, 1, 0, 0]), (5, 1))
mat2 = np.tile(np.array([0, 1, 0]), (5, 1))
videos = [mat1, mat2]
time_stamps = np.array([5, 3])
d = [] # list of the permutation distributions for each video
tst = [] # list of test statistics for each video
pval = []
for j in range(len(videos)): # loop over videos
res = simulate_ts_dist(videos[j], keep_dist=True, seed=5)
d.append(res['dist'])
tst.append(res['obs_ts'])
pval.append(res['pvalue'])
perm_distr = np.asarray(d).transpose()
overall1 = simulate_npc_dist(
perm_distr, size=time_stamps, pvalues=np.array(pval))
overall2 = simulate_npc_dist(
perm_distr, size=time_stamps, obs_ts=tst)
expected_overall = {'num_perm': 10000,
'obs_npc': -0.0076080098859340932,
'pvalue': 0.0}
assert_equal(overall1, expected_overall)
assert_equal(overall2, expected_overall)
| {
"repo_name": "qqqube/permute",
"path": "permute/tests/test_irr.py",
"copies": "1",
"size": "4322",
"license": "bsd-2-clause",
"hash": -5376585036092745000,
"line_mean": 31.7424242424,
"line_max": 79,
"alpha_frac": 0.5666358168,
"autogenerated": false,
"ratio": 2.9889349930843707,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40555708098843707,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from pylint.checkers import BaseChecker
from pylint.checkers.utils import safe_infer
from pylint.interfaces import IAstroidChecker
from astroid import YES, List
from ..__pkginfo__ import BASE_ID
class SettingsShecker(BaseChecker):
__implements__ = IAstroidChecker
name = 'settings'
msgs = {
'E%s21' % BASE_ID: (
'Required setting "%s" is missed',
'required-setting-missed',
'Used when required setting missed in settings file.'),
'E%s22' % BASE_ID: (
'Empty setting "%s"',
'empty-setting',
'Used when setting is empty value.'),
'W%s21' % BASE_ID: (
'Improper settings import',
'improper-settings-import',
'Used when settings is not imported from django.conf'),
}
_REQUIRED_SETTINGS = ('STATIC_ROOT', 'ALLOWED_HOSTS')
@staticmethod
def _is_settings_module(node):
if node.name.rsplit('.', 1)[-1] == 'settings':
return True
return False
def visit_import(self, node):
if ('settings' in node.as_string() and
'django.conf' not in node.as_string()):
self.add_message('improper-settings-import', node=node)
def visit_from(self, node):
if node.modname.rsplit('.', 1)[-1] == 'settings' or (
'settings' in dict(node.names) and
'django.conf' not in node.modname):
self.add_message('improper-settings-import', node=node)
def leave_module(self, node):
if self._is_settings_module(node):
module_locals = node.locals
for setting_name in self._REQUIRED_SETTINGS:
if setting_name not in module_locals:
self.add_message(
'required-setting-missed', args=setting_name, node=node)
else:
setting = module_locals[setting_name][-1]
val = safe_infer(setting)
if val is not None and val is not YES:
if isinstance(val, List):
is_empty = not val.elts
else:
is_empty = not val.value
if is_empty:
self.add_message('empty-setting', args=setting_name,
node=setting)
| {
"repo_name": "geerk/django_linter",
"path": "django_linter/checkers/settings.py",
"copies": "1",
"size": "2509",
"license": "mit",
"hash": -6074785726384013000,
"line_mean": 36.447761194,
"line_max": 80,
"alpha_frac": 0.5332801913,
"autogenerated": false,
"ratio": 4.325862068965518,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5359142260265517,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from pylint.checkers import BaseChecker
from pylint.interfaces import IAstroidChecker
from astroid import Name, CallFunc, AssName, Getattr, Tuple, Subscript
from ..__pkginfo__ import BASE_ID
class ViewsChecker(BaseChecker):
__implements__ = IAstroidChecker
name = 'views'
msgs = {
'W%s31' % BASE_ID: (
'is_authenticated is not called',
'is-authenticated-not-called',
'Used when is_authenticated method is not called'),
'W%s32' % BASE_ID: (
'objects.get is used without catching DoesNotExist',
'objects-get-without-doesnotexist',
'Used when Model.objects.get is used without enclosing it '
'in try-except block to catch DoesNotExist exception.'),
'W%s33' % BASE_ID: (
'Fetching model objects only for getting len',
'fetching-db-objects-len',
'Used when there is db query that fetches objects from '
'database only to check the number of returned objects.'),
'W%s34' % BASE_ID: (
'Accessing raw GET or POST data, consider using forms',
'raw-get-post-access',
'Used when request.GET or request.POST dicts is accessed '
'directly, it is better to use forms.'),
}
_is_view_function = False
_is_view_class = False
_is_inside_try_except = False
_try_except_node = None
_is_len = False
@staticmethod
def _is_does_not_exist(node):
if (isinstance(node, (Name, Getattr)) and
'DoesNotExist' in node.as_string()):
return True
return False
@staticmethod
def _is_getattr_or_name(node, name):
if ((isinstance(node, Name) and node.name == name) or
(isinstance(node, Getattr) and node.attrname == name)):
return True
return False
def visit_attribute(self, node):
parent = node.parent
expr = node.expr
if self._is_getattr_or_name(expr, 'user'):
if (node.attrname == 'is_authenticated' and
not isinstance(parent, CallFunc)):
self.add_message('is-authenticated-not-called', node=node)
elif self._is_getattr_or_name(expr, 'request'):
if node.attrname in ('GET', 'POST'):
if (isinstance(parent, Subscript) or
isinstance(parent, Getattr) and
parent.attrname == 'get'):
self.add_message('raw-get-post-access', node=node)
elif isinstance(parent, Getattr) and node.attrname == 'objects':
if parent.attrname == 'get':
if self._is_view_function or self._is_view_class:
if not self._is_inside_try_except:
self.add_message(
'objects-get-without-doesnotexist', node=node)
else:
for h in self._try_except_node.handlers:
if self._is_does_not_exist(h.type):
break
elif isinstance(h.type, Tuple):
_does_not_exist_found = False
for exc_cls in h.type.elts:
if self._is_does_not_exist(exc_cls):
_does_not_exist_found = True
break
if _does_not_exist_found:
break
else:
self.add_message(
'objects-get-without-doesnotexist', node=node)
elif parent.attrname in ('all', 'filter', 'exclude'):
if self._is_len:
self.add_message('fetching-db-objects-len', node=node)
def visit_functiondef(self, node):
if 'views' in node.root().file:
args = node.args.args
if (args and isinstance(args[0], AssName) and
args[0].name == 'request'):
self._is_view_function = True
def leave_functiondef(self, node):
self._is_view_function = False
def visit_tryexcept(self, node):
self._is_inside_try_except = True
self._try_except_node = node
def leave_tryexcept(self, node):
self._is_inside_try_except = False
self._try_except_node = None
def visit_call(self, node):
if isinstance(node.func, Name) and node.func.name == 'len':
self._is_len = True
def leave_call(self, node):
self._is_len = False
def visit_classdef(self, node):
if node.is_subtype_of('django.views.generic.base.View'):
self._is_view_class = True
def leave_classdef(self, node):
self._is_view_class = False
| {
"repo_name": "geerk/django_linter",
"path": "django_linter/checkers/views.py",
"copies": "1",
"size": "4979",
"license": "mit",
"hash": -8903418102390117000,
"line_mean": 38.832,
"line_max": 78,
"alpha_frac": 0.5312311709,
"autogenerated": false,
"ratio": 4.194608256107835,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5225839427007835,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from pylint.checkers import BaseChecker
from pylint.interfaces import IAstroidChecker
from ..__pkginfo__ import BASE_ID
class LayoutChecker(BaseChecker):
__implements__ = IAstroidChecker
name = 'layout'
msgs = {
'W%s01' % BASE_ID: (
'Form %s not in forms module',
'forms-layout',
'Used when form class definition is not in forms module.'),
'W%s02' % BASE_ID: (
'Admin class %s not in admin module',
'admin-layout',
'Used when admin class definition is not in admin module.'),
}
def leave_class(self, node):
if node.is_subtype_of('django.forms.forms.BaseForm'):
if not ('forms' in node.root().file):
self.add_message('forms-layout', node=node, args=(node.name,))
elif node.is_subtype_of('django.contrib.admin.options.ModelAdmin'):
if not ('admin' in node.root().file):
self.add_message('admin-layout', node=node, args=(node.name,))
| {
"repo_name": "geerk/django_linter",
"path": "django_linter/checkers/layout.py",
"copies": "1",
"size": "1126",
"license": "mit",
"hash": -6042957599171366000,
"line_mean": 35.3225806452,
"line_max": 78,
"alpha_frac": 0.593250444,
"autogenerated": false,
"ratio": 3.923344947735192,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5016595391735191,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from pylint.checkers import BaseChecker
from pylint.interfaces import IAstroidChecker
class LayoutChecker(BaseChecker):
__implements__ = IAstroidChecker
name = 'layout'
msgs = {
'W5301': ('Form %s not in forms module',
'forms-layout',
'Used when form class definition is not in forms module.'),
'W5302': ('Admin class %s not in admin module',
'admin-layout',
'Used when admin class definition is not in admin module.'),
}
def leave_class(self, node):
if node.is_subtype_of('django.forms.forms.BaseForm'):
if not ('forms' in node.root().file):
self.add_message('W5301', node=node, args=(node.name,))
elif node.is_subtype_of('django.contrib.admin.options.ModelAdmin'):
if not ('admin' in node.root().file):
self.add_message('W5302', node=node, args=(node.name,))
| {
"repo_name": "johndeng/django_linter",
"path": "django_linter/checkers/layout.py",
"copies": "1",
"size": "1055",
"license": "mit",
"hash": 8075019166732364000,
"line_mean": 38.0740740741,
"line_max": 78,
"alpha_frac": 0.590521327,
"autogenerated": false,
"ratio": 4.073359073359073,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5163880400359073,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from pylint.checkers.utils import safe_infer
from astroid import MANAGER, Class, Name, Instance, YES
from astroid.builder import AstroidBuilder
try:
DjangoModel = AstroidBuilder(MANAGER).string_build("""
from django.db import models
class Model(models.Model):
id = models.AutoField()""").lookup('Model')[1][0]
except IndexError:
DjangoModel = None
def transform_factory_return(node):
if (isinstance(node.func, Name) and
'factory' in node.func._repr_name().lower()):
val = safe_infer(node.func)
if (isinstance(val, Class) and
val.is_subtype_of('factory.django.DjangoModelFactory')):
try:
model = safe_infer(val.locals['Meta'][0].locals['model'][0])
except (KeyError, IndexError):
pass
else:
if model is not None and model is not YES:
if isinstance(model, Class):
def infer_call_result(self, caller, context=None):
yield Instance(model)
val.infer_call_result = infer_call_result
elif DjangoModel is not None:
def infer_call_result(self, caller, context=None):
yield Instance(DjangoModel)
val.infer_call_result = infer_call_result
| {
"repo_name": "geerk/django_linter",
"path": "django_linter/transformers/factories.py",
"copies": "1",
"size": "1462",
"license": "mit",
"hash": -8051333480113470000,
"line_mean": 38.5135135135,
"line_max": 76,
"alpha_frac": 0.5820793434,
"autogenerated": false,
"ratio": 4.403614457831325,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5485693801231325,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from scipy.cluster.hierarchy import linkage
from sklearn.cluster import AgglomerativeClustering, KMeans
from sklearn import metrics
import numpy as np
from scipy.cluster.hierarchy import cophenet
from scipy.spatial.distance import pdist
from mousestyles.data.utils import day_to_mouse_average
# prep data functions
def prep_data(mouse_data, melted=False, std=True, rescale=True):
"""
Returns a ndarray data to be used in clustering algorithms:
column 0 : strain,
column 1: mouse,
other columns corresponding to feature avg/std of a mouse over 16 days
that may or may not be rescaled to the same unit as specified
Parameters
----------
mouse_data:
(i) a 21131 * (4 + ) pandas DataFrame,
column 0 : strain,
column 1: mouse,
column 2: day,
column 3: hour,
other columns corresponding to features
or
(ii) a 1921 * (3 + ) pandas DataFrame,
column 0: strain,
column 1: mouse,
column 2: day,
other columns corresponding to features
melted: bool,
False if the input mouse_data is of type (i)
std: bool,
whether the standard deviation of each feature is returned
rescale: bool,
whether each column is rescaled or not (rescale is performed by the
column's maximum)
Returns
-------
The ndarray as specified
"""
if melted:
mouse_X = np.array(mouse_data.iloc[:, 3:], dtype=float)
else:
mouse_X = np.array(mouse_data.iloc[:, 4:], dtype=float)
mouse_labels = np.array(mouse_data.iloc[:, 0:3])
mouse_dayavg, mouse_daystd = day_to_mouse_average(
mouse_X, mouse_labels, num_strains=16, stdev=True, stderr=False)
mouse_dayavgstd = np.hstack([mouse_dayavg, mouse_daystd[:, 2:]])
mouse_dayavgstd_X = mouse_dayavgstd[:, 2:]
mouse_dayavgstd_X_scl = mouse_dayavgstd_X / np.max(
mouse_dayavgstd_X, axis=0)
mouse_dayavgstd_scl = np.hstack(
[mouse_dayavgstd[:, 0:2], mouse_dayavgstd_X_scl])
if (std is False and rescale is False):
return mouse_dayavg
elif (std is True and rescale is True):
return mouse_dayavgstd
elif (std is False and rescale is True):
return mouse_dayavgstd_scl[:, 0:(mouse_dayavg.shape[1])]
else:
return mouse_dayavgstd_scl
# model fitting functions
def get_optimal_hc_params(mouse_day):
"""
Returns a list of 2: [method, dist]
method: {'ward', 'average', 'complete'}
dist: {'cityblock', 'euclidean', 'chebychev'}
Parameters
----------
mouse_day: a 170 * M numpy array,
column 0 : strain,
column 1: mouse,
other columns corresponding to feature avg/std of a mouse over 16 days
Returns
-------
method_distance: list
[method, dist]
"""
methods = ['ward', 'average', 'complete']
dists = ['cityblock', 'euclidean', 'chebychev']
method_dists = [(methods[i], dists[j]) for i in range(len(methods))
for j in range(len(dists))]
method_dists = [(method, dist) for method, dist in method_dists
if method != 'ward' or dist == 'euclidean']
cs = []
for method, dist in method_dists:
Z = linkage(mouse_day[:, 2:], method=method, metric=dist)
c, coph_dists = cophenet(Z, pdist(mouse_day[:, 2:]))
cs.append(c)
# determine the distance method
method, dist = method_dists[np.argmax(cs)]
return [method, dist]
def fit_hc(mouse_day_X, method, dist, num_clusters=range(2, 17)):
"""
Returns a list of 2: [silhouettes, cluster_labels]
silhouettes: list of float,
cluster_labels: list of list,
each sublist is the labels corresponding to the silhouette
Parameters
----------
mouse_day_X: a 170 * M numpy array,
all columns corresponding to feature avg/std of a mouse over 16 days
method: str,
method of calculating distance between clusters
dist: str,
distance metric
num_clusters: range
range of number of clusters
Returns
-------
A list of 2: [silhouettes, cluster_labels]
"""
if (dist == "chebychev"):
dist = "chebyshev"
cluster_labels = []
silhouettes = []
for n_clusters in num_clusters:
clustering = AgglomerativeClustering(
linkage=method, n_clusters=n_clusters)
clustering.fit(mouse_day_X)
labels = clustering.labels_
silhouettes.append(metrics.silhouette_score(
mouse_day_X, labels, metric=dist))
cluster_labels.append(list(labels))
return [silhouettes, cluster_labels]
def get_optimal_fit_kmeans(mouse_X, num_clusters, raw=False):
"""
Returns a list of 2: [silhouettes, cluster_labels]
silhouettes: list of float,
cluster_labels: list of list,
each sublist is the labels corresponding to the silhouette
Parameters
----------
mouse_X: a 170 * M numpy array or 21131 * M numpy array,
all columns corresponding to feature avg/std of a mouse over 16 days
or the raw data without averaging over days
num_clusters: range or a list or a numpy array
range of number of clusters
raw: a boolean with default is False
False if using the 170 * M array
Returns
-------
A list of 2: [silhouettes, cluster_labels]
"""
if raw:
sample_amount = 1000
else:
sample_amount = mouse_X.shape[0]
cluster_labels = []
silhouettes = []
for n_clusters in num_clusters:
clustering = KMeans(n_clusters=n_clusters)
clustering.fit(mouse_X)
labels = clustering.labels_
silhouettes.append(
metrics.silhouette_score(
mouse_X, labels, metric="euclidean",
sample_size=sample_amount))
cluster_labels.append(list(labels))
return [silhouettes, cluster_labels]
def cluster_in_strain(labels_first, labels_second):
"""
Returns a dictionary object indicating the count of different
clusters in each different strain (when put cluster labels as first)
or the count of different strain in each clusters (when put strain
labels as first).
Parameters
----------
labels_first: numpy arrary or list
A numpy arrary or list of integers representing which cluster
the mice in, or representing which strain mice in.
labels_second: numpy arrary or list
A numpy array or list of integers (0-15) representing which strain
the mice in, or representing which cluster the mice in
Returns
-------
count_data : dictionary
A dictioanry object with key is the strain number and value is a list
indicating the distribution of clusters, or the key is the cluster
number and the value is a list indicating the distribution of each
strain.
Examples
--------
>>> count_1 = cluster_in_strain([1,2,1,0,0],[0,1,1,2,1])
"""
count_data = {}
labels_first = np.asarray(labels_first)
labels_second = np.asarray(labels_second)
for label_2 in np.unique(labels_second):
label_2_index = labels_second == label_2
label_1_sub = labels_first[label_2_index]
count_list = []
for label_1 in np.unique(labels_first):
count_list.append(sum(label_1_sub == label_1))
count_data[label_2] = count_list
return count_data
| {
"repo_name": "togawa28/mousestyles",
"path": "mousestyles/classification/clustering.py",
"copies": "3",
"size": "7621",
"license": "bsd-2-clause",
"hash": 5986788904131656000,
"line_mean": 32.7212389381,
"line_max": 78,
"alpha_frac": 0.6231465687,
"autogenerated": false,
"ratio": 3.82964824120603,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.595279480990603,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from unittest import TestCase
from fixtureupper.register import UpperRegister
from tests.models import Article, Author, CoWrite, Draft
class BaseTestCase(TestCase):
def setUp(self):
self.SqlAlchemyModelFixtureUpper = UpperRegister('SqlAlchemyModel')
class AuthorFixtureUpper(self.SqlAlchemyModelFixtureUpper):
model = Author
defaults = {}
class ArticleFixtureUpper(self.SqlAlchemyModelFixtureUpper):
model = Article
defaults = {}
class DraftFixtureUpper(self.SqlAlchemyModelFixtureUpper):
model = Draft
defaults = {}
class CoWriteFixtureUpper(self.SqlAlchemyModelFixtureUpper):
model = CoWrite
defaults = {}
self.m_fu = self.SqlAlchemyModelFixtureUpper(start_id=150)
self.AuthorFixtureUpperClass = AuthorFixtureUpper
self.ArticleFixtureUpperClass = ArticleFixtureUpper
self.DraftFixtureUpperClass = DraftFixtureUpper
self.CoWriteFixtureUpperClass = CoWriteFixtureUpper
self.au_fu = self.m_fu.get_upper('Author')
self.ar_fu = self.m_fu.get_upper('Article', start_id=250)
self.dr_fu = self.m_fu.get_upper('Draft', start_id=300)
self.co_fu = self.m_fu.get_upper('CoWrite', start_id=370)
self.json_dict = [
{
'__class__': 'Article',
'__value__': {'id': 250, 'main_author_id': 150}
},
{
'__class__': 'Article',
'__value__': {'id': 251, 'main_author_id': 150}
},
{
'__class__': 'Article',
'__value__': {
'id': 252,
'main_author_id': 151,
'is_visible': True,
'title': u'some title',
}
},
{
'__class__': 'Author',
'__value__': {'id': 150}
},
{
'__class__': 'Author',
'__value__': {'id': 151}
},
]
| {
"repo_name": "Rhathe/fixtureupper",
"path": "tests/functional/sqlalchemy/__init__.py",
"copies": "1",
"size": "2213",
"license": "mit",
"hash": -5030520393336035000,
"line_mean": 32.0298507463,
"line_max": 75,
"alpha_frac": 0.5155896972,
"autogenerated": false,
"ratio": 4.443775100401607,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5459364797601607,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from yieldfrom2.syntax import expand_yield_from, from_, return_
def foobar():
for i in range(3):
try:
x = yield i
while x is not None:
x = yield x
except RuntimeError as e:
print("Caught error:", e)
# FIXME: should return in non-generator functions.
return_(123)
@expand_yield_from
def yieldfrom_test():
try:
x = yield from_(foobar())
print("yieldfrom result:", x)
yield from_([100, 200, 300])
except Exception as err:
print("yieldfrom exc:", err)
yield from_([1000, 2000])
yield from_(foobar())
if __name__ == '__main__':
it = yieldfrom_test()
assert next(it) == 0
assert it.send('foobar') == 'foobar'
assert next(it) == 1
assert it.throw(RuntimeError("hello world")) == 2
assert next(it) == 100
# FIXME: raises when forwarding the error to list iterator
# assert it.throw(Exception("cuckoo")) == 1000
# assert next(it) == 2000
it.close()
| {
"repo_name": "immerrr/yieldfrom2",
"path": "test_yieldfrom2.py",
"copies": "1",
"size": "1133",
"license": "bsd-2-clause",
"hash": 8560333471266669000,
"line_mean": 25.9761904762,
"line_max": 63,
"alpha_frac": 0.5701676964,
"autogenerated": false,
"ratio": 3.906896551724138,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.99726550770483,
"avg_score": 0.0008818342151675485,
"num_lines": 42
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import json
import os
from collections import defaultdict
try:
from os import scandir
except ImportError:
from scandir import scandir
class Directories:
"""Used to build dictionary of directory names and corresponding paths
Recursivley creates and stores a dict of directory names and a list of
corresponding paths below a given root directory.
- **parameters** and **instance variables**::
:param str base_dir : path to root directory
:param str cache_file: path to json directory cache file
:ivar defaultdict dirs: *{str directory name: list paths}* dictionary
of directory names and corresponding paths
"""
def __init__(self, base_dir, cache_file):
self.base_dir = base_dir # Top level directory to start from
self.cache_file = cache_file
self.dirs = {}
self.load_cache(cache_file)
def __getitem__(self, key):
return self.dirs[key]
def __iter__(self):
return iter(self.dirs)
def map_directories(self):
self.dirs = defaultdict(list)
self.dict_builder(self.base_dir)
self.cache_dirs(self.cache_file)
def dict_builder(self, base_dir):
"""Walks through base_dir and populates instance's dirs defaultdict
- **parameters** and **returns**::
:param str base_dir: path to root directory
:returns None
"""
try:
for entry in scandir(base_dir):
entry_name = entry.name
try:
entry_name = unicode(entry.name, 'utf-8')
except NameError:
pass
if entry.is_dir() and not entry_name.startswith('.'):
self.dirs[entry.name.lower()].append(entry.path)
self.dict_builder(entry.path)
except OSError:
# Permission denied
return
def shallowest_path_to(self, dname):
"""Returns the shallowest path from corresponding paths in dirs dictionary
- **parameters** and **returns**::
:param str dname: directory name to retrieve path from
:returns str: shallowest path from corresponding list of paths
"""
return sorted(self.dirs[dname], key=len)[0]
def cache_dirs(self, cache_file):
"""Saves current directory dictionary as json file
- **parameters** and **returns**::
:param str cache_file: location of file to save to
:returns None
"""
if cache_file is None:
cache_file = self.cache_file
with open(cache_file, "w") as cache_file:
json.dump(self.dirs, cache_file, sort_keys=True,
indent=4, separators=(',', ': '))
def load_cache(self, cache_file):
"""Loads a directory dictionary from a json file
- **parameters** and **returns**::
:param str cache_file: location of file to load
:returns None
"""
if cache_file is None:
cache_file = self.cache_file
if os.path.exists(cache_file):
with open(cache_file, 'r') as cache_file:
self.dirs = json.load(cache_file)
else:
with open(cache_file, 'w') as cache_file:
self.dirs = {}
json.dump(self.dirs, cache_file)
| {
"repo_name": "chrsintyre/jumpdir",
"path": "jumpdir/directories.py",
"copies": "2",
"size": "3488",
"license": "mit",
"hash": -8204736896167319000,
"line_mean": 33.1960784314,
"line_max": 82,
"alpha_frac": 0.5791284404,
"autogenerated": false,
"ratio": 4.36,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002602053568065955,
"num_lines": 102
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import logging
import click
import semantic_version
from changes import attributes
log = logging.getLogger(__name__)
def current_version(module_name):
return attributes.extract_attribute(module_name, '__version__')
def get_new_version(module_name, current_version, no_input,
major=False, minor=False, patch=False):
proposed_new_version = increment(
current_version,
major=major,
minor=minor,
patch=patch
)
if no_input:
new_version = proposed_new_version
else:
new_version = click.prompt(
'What is the release version for "{0}" '.format(module_name),
default=proposed_new_version
)
return new_version.strip()
def increment(version, major=False, minor=False, patch=True):
"""
Increment a semantic version
:param version: str of the version to increment
:param major: bool specifying major level version increment
:param minor: bool specifying minor level version increment
:param patch: bool specifying patch level version increment
:return: str of the incremented version
"""
version = semantic_version.Version(version)
if major:
version.major += 1
version.minor = 0
version.patch = 0
elif minor:
version.minor += 1
version.patch = 0
elif patch:
version.patch += 1
return str(version)
def increment_version(context):
"""Increments the __version__ attribute of your module's __init__."""
attributes.replace_attribute(
context.module_name,
'__version__',
context.new_version,
dry_run=context.dry_run)
log.info('Bumped version from %s to %s' % (context.current_version, context.new_version))
| {
"repo_name": "goldsborough/changes",
"path": "changes/version.py",
"copies": "1",
"size": "1882",
"license": "mit",
"hash": -9023675135143636000,
"line_mean": 25.8857142857,
"line_max": 93,
"alpha_frac": 0.6418703507,
"autogenerated": false,
"ratio": 4.219730941704036,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5361601292404036,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import math
import re
from zxcvbn.matching import (KEYBOARD_STARTING_POSITIONS, KEYBOARD_AVERAGE_DEGREE,
KEYPAD_STARTING_POSITIONS, KEYPAD_AVERAGE_DEGREE)
def binom(n, k):
"""
Returns binomial coefficient (n choose k).
"""
# http://blog.plover.com/math/choose.html
if k > n:
return 0
if k == 0:
return 1
result = 1
for denom in range(1, k + 1):
result *= n
result /= denom
n -= 1
return result
def lg(n):
"""
Returns logarithm of n in base 2.
"""
return math.log(n, 2)
# ------------------------------------------------------------------------------
# minimum entropy search -------------------------------------------------------
# ------------------------------------------------------------------------------
#
# takes a list of overlapping matches, returns the non-overlapping sublist with
# minimum entropy. O(nm) dp alg for length-n password with m candidate matches.
# ------------------------------------------------------------------------------
def get(a, i):
if i < 0 or i >= len(a):
return 0
return a[i]
def minimum_entropy_match_sequence(password, matches):
"""
Returns minimum entropy
Takes a list of overlapping matches, returns the non-overlapping sublist with
minimum entropy. O(nm) dp alg for length-n password with m candidate matches.
"""
bruteforce_cardinality = calc_bruteforce_cardinality(password) # e.g. 26 for lowercase
up_to_k = [0] * len(password) # minimum entropy up to k.
# for the optimal sequence of matches up to k, holds the final match (match['j'] == k). null means the sequence ends
# without a brute-force character.
backpointers = []
for k in range(0, len(password)):
# starting scenario to try and beat: adding a brute-force character to the minimum entropy sequence at k-1.
up_to_k[k] = get(up_to_k, k-1) + lg(bruteforce_cardinality)
backpointers.append(None)
for match in matches:
if match['j'] != k:
continue
i, j = match['i'], match['j']
# see if best entropy up to i-1 + entropy of this match is less than the current minimum at j.
up_to = get(up_to_k, i-1)
candidate_entropy = up_to + calc_entropy(match)
if candidate_entropy < up_to_k[j]:
#print "New minimum: using " + str(match)
#print "Entropy: " + str(candidate_entropy)
up_to_k[j] = candidate_entropy
backpointers[j] = match
# walk backwards and decode the best sequence
match_sequence = []
k = len(password) - 1
while k >= 0:
match = backpointers[k]
if match:
match_sequence.append(match)
k = match['i'] - 1
else:
k -= 1
match_sequence.reverse()
# fill in the blanks between pattern matches with bruteforce "matches"
# that way the match sequence fully covers the password: match1.j == match2.i - 1 for every adjacent match1, match2.
def make_bruteforce_match(i, j):
return {
'pattern': 'bruteforce',
'i': i,
'j': j,
'token': password[i:j+1],
'entropy': lg(math.pow(bruteforce_cardinality, j - i + 1)),
'cardinality': bruteforce_cardinality,
}
k = 0
match_sequence_copy = []
for match in match_sequence:
i, j = match['i'], match['j']
if i - k > 0:
match_sequence_copy.append(make_bruteforce_match(k, i - 1))
k = j + 1
match_sequence_copy.append(match)
if k < len(password):
match_sequence_copy.append(make_bruteforce_match(k, len(password) - 1))
match_sequence = match_sequence_copy
min_entropy = 0 if len(password) == 0 else up_to_k[len(password) - 1] # corner case is for an empty password ''
crack_time = entropy_to_crack_time(min_entropy)
# final result object
return {
'password': password,
'entropy': round_to_x_digits(min_entropy, 3),
'match_sequence': match_sequence,
'crack_time': round_to_x_digits(crack_time, 3),
'crack_time_display': display_time(crack_time),
'score': crack_time_to_score(crack_time),
}
def round_to_x_digits(number, digits):
"""
Returns 'number' rounded to 'digits' digits.
"""
return round(number * math.pow(10, digits)) / math.pow(10, digits)
# ------------------------------------------------------------------------------
# threat model -- stolen hash catastrophe scenario -----------------------------
# ------------------------------------------------------------------------------
#
# assumes:
# * passwords are stored as salted hashes, different random salt per user.
# (making rainbow attacks infeasable.)
# * hashes and salts were stolen. attacker is guessing passwords at max rate.
# * attacker has several CPUs at their disposal.
# ------------------------------------------------------------------------------
# for a hash function like bcrypt/scrypt/PBKDF2, 10ms per guess is a safe lower bound.
# (usually a guess would take longer -- this assumes fast hardware and a small work factor.)
# adjust for your site accordingly if you use another hash function, possibly by
# several orders of magnitude!
SINGLE_GUESS = .010
NUM_ATTACKERS = 100 # number of cores guessing in parallel.
SECONDS_PER_GUESS = SINGLE_GUESS / NUM_ATTACKERS
def entropy_to_crack_time(entropy):
return (0.5 * math.pow(2, entropy)) * SECONDS_PER_GUESS # average, not total
def crack_time_to_score(seconds):
if seconds < math.pow(10, 2):
return 0
if seconds < math.pow(10, 4):
return 1
if seconds < math.pow(10, 6):
return 2
if seconds < math.pow(10, 8):
return 3
return 4
# ------------------------------------------------------------------------------
# entropy calcs -- one function per match pattern ------------------------------
# ------------------------------------------------------------------------------
def calc_entropy(match):
if 'entropy' in match: return match['entropy']
if match['pattern'] == 'repeat':
entropy_func = repeat_entropy
elif match['pattern'] == 'sequence':
entropy_func = sequence_entropy
elif match['pattern'] == 'digits':
entropy_func = digits_entropy
elif match['pattern'] == 'year':
entropy_func = year_entropy
elif match['pattern'] == 'date':
entropy_func = date_entropy
elif match['pattern'] == 'spatial':
entropy_func = spatial_entropy
elif match['pattern'] == 'dictionary':
entropy_func = dictionary_entropy
match['entropy'] = entropy_func(match)
return match['entropy']
def repeat_entropy(match):
cardinality = calc_bruteforce_cardinality(match['token'])
return lg(cardinality * len(match['token']))
def sequence_entropy(match):
first_chr = match['token'][0]
if first_chr in ['a', '1']:
base_entropy = 1
else:
if first_chr.isdigit():
base_entropy = lg(10) # digits
elif first_chr.isalpha():
base_entropy = lg(26) # lower
else:
base_entropy = lg(26) + 1 # extra bit for uppercase
if not match['ascending']:
base_entropy += 1 # extra bit for descending instead of ascending
return base_entropy + lg(len(match['token']))
def digits_entropy(match):
return lg(math.pow(10, len(match['token'])))
NUM_YEARS = 119 # years match against 1900 - 2019
NUM_MONTHS = 12
NUM_DAYS = 31
def year_entropy(match):
return lg(NUM_YEARS)
def date_entropy(match):
if match['year'] < 100:
entropy = lg(NUM_DAYS * NUM_MONTHS * 100) # two-digit year
else:
entropy = lg(NUM_DAYS * NUM_MONTHS * NUM_YEARS) # four-digit year
if match['separator']:
entropy += 2 # add two bits for separator selection [/,-,.,etc]
return entropy
def spatial_entropy(match):
if match['graph'] in ['qwerty', 'dvorak']:
s = KEYBOARD_STARTING_POSITIONS
d = KEYBOARD_AVERAGE_DEGREE
else:
s = KEYPAD_STARTING_POSITIONS
d = KEYPAD_AVERAGE_DEGREE
possibilities = 0
L = len(match['token'])
t = match['turns']
# estimate the number of possible patterns w/ length L or less with t turns or less.
for i in range(2, L + 1):
possible_turns = min(t, i - 1)
for j in range(1, possible_turns+1):
x = binom(i - 1, j - 1) * s * math.pow(d, j)
possibilities += x
entropy = lg(possibilities)
# add extra entropy for shifted keys. (% instead of 5, A instead of a.)
# math is similar to extra entropy from uppercase letters in dictionary matches.
if 'shifted_count' in match:
S = match['shifted_count']
U = L - S # unshifted count
possibilities = sum(binom(S + U, i) for i in range(0, min(S, U) + 1))
entropy += lg(possibilities)
return entropy
def dictionary_entropy(match):
match['base_entropy'] = lg(match['rank']) # keep these as properties for display purposes
match['uppercase_entropy'] = extra_uppercase_entropy(match)
match['l33t_entropy'] = extra_l33t_entropy(match)
ret = match['base_entropy'] + match['uppercase_entropy'] + match['l33t_entropy']
return ret
START_UPPER = re.compile('^[A-Z][^A-Z]+$')
END_UPPER = re.compile('^[^A-Z]+[A-Z]$')
ALL_UPPER = re.compile('^[A-Z]+$')
def extra_uppercase_entropy(match):
word = match['token']
if word.islower():
return 0
# a capitalized word is the most common capitalization scheme,
# so it only doubles the search space (uncapitalized + capitalized): 1 extra bit of entropy.
# allcaps and end-capitalized are common enough too, underestimate as 1 extra bit to be safe.
for regex in [START_UPPER, END_UPPER, ALL_UPPER]:
if regex.match(word):
return 1
# Otherwise calculate the number of ways to capitalize U+L uppercase+lowercase letters with U uppercase letters or
# less. Or, if there's more uppercase than lower (for e.g. PASSwORD), the number of ways to lowercase U+L letters
# with L lowercase letters or less.
upp_len = len([x for x in word if x.isupper()])
low_len = len([x for x in word if x.islower()])
possibilities = sum(binom(upp_len + low_len, i) for i in range(0, min(upp_len, low_len) + 1))
return lg(possibilities)
def extra_l33t_entropy(match):
if 'l33t' not in match or not match['l33t']:
return 0
possibilities = 0
for subbed, unsubbed in list(match['sub'].items()):
sub_len = len([x for x in match['token'] if x == subbed])
unsub_len = len([x for x in match['token'] if x == unsubbed])
possibilities += sum(binom(unsub_len + sub_len, i) for i in range(0, min(unsub_len, sub_len) + 1))
# corner: return 1 bit for single-letter subs, like 4pple -> apple, instead of 0.
if possibilities <= 1:
return 1
return lg(possibilities)
# utilities --------------------------------------------------------------------
def calc_bruteforce_cardinality(password):
lower, upper, digits, symbols = 0, 0, 0, 0
for char in password:
if char.islower():
lower = 26
elif char.isdigit():
digits = 10
elif char.isupper():
upper = 26
else:
symbols = 33
cardinality = lower + digits + upper + symbols
return cardinality
def display_time(seconds):
minute = 60
hour = minute * 60
day = hour * 24
month = day * 31
year = month * 12
century = year * 100
if seconds < minute:
return 'instant'
elif seconds < hour:
return '%s minutes' % (1 + math.ceil(seconds / minute),)
elif seconds < day:
return '%s hours' % (1 + math.ceil(seconds / hour),)
elif seconds < month:
return '%s days' % (1 + math.ceil(seconds / day),)
elif seconds < year:
return '%s months' % (1 + math.ceil(seconds / month),)
elif seconds < century:
return '%s years' % (1 + math.ceil(seconds / year),)
else:
return 'centuries'
| {
"repo_name": "erikr/python-zxcvbn",
"path": "zxcvbn/scoring.py",
"copies": "2",
"size": "12311",
"license": "mit",
"hash": 2746137209404965400,
"line_mean": 34.5809248555,
"line_max": 120,
"alpha_frac": 0.5719275445,
"autogenerated": false,
"ratio": 3.7340006066120717,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5305928151112073,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import matplotlib.pyplot as plt
import numpy as np
from mousestyles.distribution import (powerlaw_pdf, exp_pdf,
powerlaw_inverse_cdf, exp_inverse_cdf)
from mousestyles.est_power_param import (fit_powerlaw, fit_exponential,
getdistance)
from mousestyles.kde import kde
def plot_powerlaw(estimation):
"""
Return the histogram of all estimators of power law
to check the distribution.
Parameters
----------
estimation: dataframe
dataframe of strain, mouse, day and the estimator
Returns
-------
plot : histogram
The histogram of all estimators of power law
"""
plt.hist(list(estimation.ix[estimation["strain"] == 0, 3]))
plt.hist(list(estimation.ix[estimation["strain"] == 1, 3]))
plt.hist(list(estimation.ix[estimation["strain"] == 2, 3]))
plt.title("Histogram: Power Law parameters distribution by strain")
def plot_exponential(estimation):
"""
Return the histogram of all estimators of exponential
to check the distribution.
Parameters
----------
estimation: dataframe
dataframe of strain, mouse, day and the estimator
Returns
-------
plot : histogram
The histogram of all estimators of exponential.
"""
plt.hist(list(estimation.ix[estimation["strain"] == 0, 4]))
plt.hist(list(estimation.ix[estimation["strain"] == 1, 4]))
plt.hist(list(estimation.ix[estimation["strain"] == 2, 4]))
plt.title("Histogram: Exponential parameters distribution by strain")
def plot_fitted(strain, mouse, day, hist=True, density=False):
"""
Return the plot of one single mouse day
-fitted power law
-fitted exponential
-histogram of distance
-kernel density curve
Parameters
----------
strain : int
the strain number of the mouse
mouse : int
the mouse number in its strain
day : int
the day number
hist : boolean
Plot histogram if True
density : boolean
plot density if True
Returns
-------
plot : 1 histogram (blue) + 2 fitted curve + 1 density (cyan)
"""
fig, ax = plt.subplots(1, 1)
x = np.arange(1, 2.7, 0.01)
alpha = fit_powerlaw(strain, mouse, day)
lamb = fit_exponential(strain, mouse, day)
cut_dist = getdistance(strain, mouse, day)
ax.plot(x, powerlaw_pdf(x, alpha), 'r-', lw=2, alpha=2,
label='powerlaw pdf')
ax.plot(x, exp_pdf(x, lamb), 'y-', lw=2, alpha=2,
label='exp pdf')
if hist:
weights = np.ones_like(cut_dist) / len(cut_dist) * 10
ax.hist(cut_dist, weights=weights, bins=np.arange(1, 2.6, 0.1))
if density:
np.random.seed(0)
sample_cut_dist = np.random.choice(cut_dist, 1000, replace=False)
pdf = kde(sample_cut_dist, x_grid=x, symmetric_correction=True,
cutoff=1)
ax.plot(x, pdf, 'c', lw=2, alpha=2,
label='powerlaw pdf')
def plot_relative_dist(strain, mouse, day):
"""
Return the relative distribution of distance of a single mouse day
The relative distribution of a comparison distribution with density g
vs a reference distribution with density f is g(F^(-1)(x))/f(F^(-1)(x))
It's a prob distribution on [0, 1].
Parameters
----------
strain : int
the strain number of the mouse
mouse : int
the mouse number in its strain
day : int
the day number
Returns
-------
pdf_rel_power : numpy.ndarray
the relative distribution, referencing the best fit powerlaw
distribution, at grid points np.arange(0, 1.0, 0.02)
pdf_rel_exp : numpy.ndarray
the relative distribution, referencing the best fit exponential
distribution, at grid points np.arange(0, 1.0, 0.02)
"""
# calculate the best fit parameters for powerlaw and exponential
alpha = fit_powerlaw(strain, mouse, day)
lamb = fit_exponential(strain, mouse, day)
# set up the grid points on [0, 1]
x = np.arange(0, 1.0, 0.02)
# transform x to F^(-1)(x) = y
y_grid_power = powerlaw_inverse_cdf(x, alpha)
y_grid_exp = exp_inverse_cdf(x, lamb)
cut_dist = getdistance(strain, mouse, day)
# to save computation, we do random sampling
np.random.seed(0)
sample_cut_dist = np.random.choice(cut_dist, 1000, replace=False)
# get the corresponding density at points y
pdf_grid_power = kde(sample_cut_dist, x_grid=y_grid_power,
symmetric_correction=True, cutoff=1)
pdf_grid_exp = kde(sample_cut_dist, x_grid=y_grid_exp,
symmetric_correction=True, cutoff=1)
# calculate the ratio as relative distribution
pdf_rel_power = pdf_grid_power / powerlaw_pdf(y_grid_power, alpha)
pdf_rel_exp = pdf_grid_exp / exp_pdf(y_grid_exp, lamb)
# plot the relative distribution
fig, ax = plt.subplots(1, 2)
ax[0].plot(x, pdf_rel_power, lw=2, alpha=2, c='b',
label='Relative distribution, powerlaw')
ax[0].axhline(y=1, c='r')
ax[1].plot(x, pdf_rel_exp, lw=2, alpha=2, c='b',
label='Relative distribution, exponential')
ax[1].axhline(y=1, c='r')
| {
"repo_name": "changsiyao/mousestyles",
"path": "mousestyles/visualization/distribution_plot.py",
"copies": "3",
"size": "5349",
"license": "bsd-2-clause",
"hash": 3443265401003458000,
"line_mean": 33.9607843137,
"line_max": 76,
"alpha_frac": 0.6191811554,
"autogenerated": false,
"ratio": 3.6387755102040815,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5757956665604081,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import matplotlib.pyplot as plt
import numpy as np
from mousestyles.ultradian import strain_seasonal
from mousestyles.ultradian import find_cycle
ALL_FEATURES = ["AS", "F", "M_AS", "M_IS", "W", "Distance"]
def plot_strain_seasonal(strains, mouse, feature, bin_width, period_length):
"""
Use seansonal decomposition model on the time series
of specified strain, mouse, feature and bin_width.
return the seasonal term and the plot of seasonal term
by mouse of a set of mouses in a strain
Parameters
----------
strain: list, set or tuple
nonnegative integer indicating the strain number
mouse: list, set or tuple
nonnegative integer indicating the mouse number
feature: {"AS", "F", "M_AS", "M_IS", "W", "Distance"}
"AS": Active state probalibity
"F": Food consumed (g)
"M_AS": Movement outside homebase
"M_IS": Movement inside homebase
"W": Water consumed (g)
"Distance": Distance traveled
bin_width: int
number of minutes, the time interval for data aggregation
period_length: float or int
number of hours, usually the significant period
length indicated by Lomb-scargle model
Returns
-------
seasonal_plot: plot of seasonal term by mouse
Examples
--------
>>> res = plot_strain_seasonal(strains={0, 1, 2,}, mouse={0, 1, 2, 3},
feature="W",
bin_width=30, period_length = 24)
"""
if (not all([isinstance(m, int)
for m in mouse])) or (any([m < 0 for m in mouse])):
raise ValueError(
'Strain must be a non-negative integer')
if (not all([isinstance(m, int)
for m in mouse])) or (any([m < 0 for m in mouse])):
raise ValueError(
'Mouse value must be a non-negative integer')
if feature not in ALL_FEATURES:
raise ValueError(
'Input value must in {"AS", "F", "M_AS", "M_IS", "W", "Distance"}')
if (not isinstance(bin_width, int)) or bin_width < 0 or bin_width > 1440:
raise ValueError(
'Bin width (minutes) must be a non-negative integer below 1440')
if period_length < 0:
raise ValueError(
'Peoriod length must be a non-negative float or int')
time = np.arange(0, period_length, bin_width / 60)
fig = plt.figure(figsize=(8, 8))
flag = 0
for strain in strains:
if flag == 0:
ax = fig.add_subplot(3, 1, strain + 1)
flag += 1
else:
ax = fig.add_subplot(3, 1, strain + 1, sharey=ax)
seasonal_all = strain_seasonal(strain, mouse, feature,
bin_width, period_length)
for i in np.arange(len(mouse)):
ax.plot(time, seasonal_all[i, :])
ax.legend(['mouse' + str(i) for i in np.arange(len(mouse))],
loc='upper right', prop={'size': 10})
ax.set_title('strain ' + str(strain))
plt.xlabel('Time')
plt.ylabel('Seasonal Variation')
plt.suptitle(feature, fontsize=20)
fig.show()
return fig
def compare_strain(feature, n_strain=3, bin_width=15, disturb_t=False):
"""
Use the data from function find_cycle and plotting method from function
lombscargle_visualize to compare the Lomb-Scargle plots between different
strains.
Parameters
----------
feature: string in {"AS", "F", "M_AS", "M_IS", "W", "Distance"}
"AS": Active state probalibity
"F": Food consumed (g)
"M_AS": Movement outside homebase
"M_IS": Movement inside homebase
"W": Water consumed (g)
"Distance": Distance traveled
n_strain: int, defalt is 3
nonnegative integer indicating total number of strains to be compared
bin_width: int, minute unit, default is 15 minutes
number of minutes, the time interval for data aggregation
disturb_t: boolean, default is False
If True, add uniformly distributed noise to the time sequence which
are used to fit the Lomb Scargle model. This is to avoid the singular
matrix error that could happen sometimes.
Returns
-------
Lomb Scargle Power versus Periods (hours) plot with significant levels.
Examples
-------
"""
if feature not in ALL_FEATURES:
raise ValueError(
'Input value must in {"AS", "F", "M_AS", "M_IS", "W", "Distance"}')
fig = plt.figure(figsize=(16, 8))
flag = 0
for i in range(n_strain):
if flag == 0 or feature == "M_IS":
ax = fig.add_subplot(1, n_strain, i + 1)
flag += 1
else:
ax = fig.add_subplot(1, n_strain, i + 1, sharey=ax)
periods, power, sig, N, cycle, cycle_power, cycle_pvalue = find_cycle(
feature=feature, strain=i, bin_width=bin_width,
methods='LombScargleFast', disturb_t=disturb_t,
gen_doc=True, plot=False)
ax.plot(periods, power, color='steelblue')
ax.set(xlim=(0, 26), ylim=(0, max(cycle_power)),
xlabel='Period (hours)',
ylabel='Lomb-Scargle Power')
ax.set_title('strain' + str(i))
for i in sig:
power_sig = -2 / (N - 1) * np.log(
1 - (1 - np.asarray(i)) ** (1 / 2 / N))
plt.axhline(y=power_sig, color='green', ls='dashed', lw=1)
ax.text(x=24, y=power_sig, s='P-value:' +
str(float(i)), ha='right', va='bottom')
idx = [i for i, x in enumerate(cycle_pvalue) if x < 0.001]
for j in idx:
if cycle[j] > min(
periods) and cycle[j] < min(max(periods), 23):
ax.text(x=cycle[j], y=cycle_power[j],
s=r'$\bigstar\bigstar\bigstar$',
ha='right', va='top')
idx = [i for i, x in enumerate(
cycle_pvalue) if x > 0.001 and x < 0.01]
for j in idx:
if cycle[j] > min(periods) and cycle[j] < max(periods):
ax.text(x=cycle[j], y=cycle_power[
j], s=r'$\bigstar\bigstar$', ha='right', va='top')
idx = [i for i, x in enumerate(
cycle_pvalue) if x > 0.01 and x < 0.05]
for j in idx:
if cycle[j] > min(periods) and cycle[j] < max(periods):
ax.text(x=cycle[j], y=cycle_power[j],
s=r'$\bigstar$', ha='right', va='top')
plt.suptitle('Feature: ' + feature, fontsize=20)
return fig
| {
"repo_name": "changsiyao/mousestyles",
"path": "mousestyles/visualization/plot_ultradian.py",
"copies": "3",
"size": "6750",
"license": "bsd-2-clause",
"hash": -6393349086081615000,
"line_mean": 37.7931034483,
"line_max": 79,
"alpha_frac": 0.5533333333,
"autogenerated": false,
"ratio": 3.6486486486486487,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5701981981948648,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import matplotlib.pyplot as plt
import numpy as np
def lombscargle_visualize(periods, power, sig, N, cycle,
cycle_power, cycle_pvalue):
"""
Use Lomb-Scargel method on different strain and mouse's data to find the
best possible periods with highest p-values, and plot the Lomb Scargle
power versus periods plot. use the periods as time sequence to predict
the corresponding LS power, draw the plot.
There will also be stars and horizontal lines indicating the p-value of
significance. Three stars will be p-value in [0,0.001], two stars will be
p-value in [0.001,0.01], one star will be p-value in [0.01,0.05]. The
horizontal line is the LS power that has p-value of 0.05.
Parameters
----------
periods: numpy array of the same length with 'power'
use as time sequence in LS model to make predictions
power: numpy array of the same length with 'periods'
the corresponding predicted power of periods
sig: list, tuple or numpy array, default is [0.05].
significance level to be used for plot horizontal line.
N: int
the length of time sequence in the fit model
cycle: numpy array
periods
cycle_power: numpy array
LS power corrsponding to the periods in 'cycle'
cycle_pvalue: numpy array
p-values corresponding to the periods in 'cycle'
Returns
-------
Lomb Scargle Power versus Periods (hours) plot with significant levels.
Examples
--------
"""
fig, ax = plt.subplots()
ax.plot(periods, power, color='steelblue')
ax.set(xlim=(0, 26), ylim=(0, max(cycle_power)),
xlabel='Period (hours)',
ylabel='Lomb-Scargle Power')
for i in sig:
power_sig = -2 / (N - 1) * np.log(
1 - (1 - np.asarray(i)) ** (1 / 2 / N))
plt.axhline(y=power_sig, color='green', ls='dashed', lw=1)
ax.text(x=24, y=power_sig, s='P-value:' +
str(float(i)), ha='right', va='bottom')
idx = [i for i, x in enumerate(cycle_pvalue) if x < 0.001]
for j in idx:
if cycle[j] > min(periods) and cycle[j] < max(periods):
ax.text(x=cycle[j],
y=cycle_power[j], s=r'$\bigstar\bigstar\bigstar$',
ha='right', va='top')
idx = [i for i, x in enumerate(cycle_pvalue) if x > 0.001 and x < 0.01]
for j in idx:
if cycle[j] > min(periods) and cycle[j] < max(periods):
ax.text(x=cycle[j], y=cycle_power[j],
s=r'$\bigstar\bigstar$', ha='right', va='top')
idx = [i for i, x in enumerate(cycle_pvalue) if x > 0.01 and x < 0.05]
for j in idx:
if cycle[j] > min(periods) and cycle[j] < max(periods):
ax.text(x=cycle[j], y=cycle_power[j],
s=r'$\bigstar$', ha='right', va='top')
| {
"repo_name": "changsiyao/mousestyles",
"path": "mousestyles/visualization/plot_lomb_scargle.py",
"copies": "3",
"size": "3025",
"license": "bsd-2-clause",
"hash": -6727041823898236000,
"line_mean": 40.4383561644,
"line_max": 79,
"alpha_frac": 0.5834710744,
"autogenerated": false,
"ratio": 3.554641598119859,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 73
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import matplotlib.pyplot as plt
import pandas as pd
def plot_path(movement, paths, title='example plot of path', alpha=.1,
linewidth=1., xlim=[-16.24, 3.76], ylim=[0.9, 43.5]):
r"""
Plot the lines along paths.
Parameters
----------
movement : pandas.DataFrame
CX, CY coordinates. Must have length greater than 1.
paths: list
a list containing the indices for all paths.
title : str
the title of the plot. Default is 'example plot of path'
alpha : numeric
graphical parameter which determines strongness of each
line. Default is .1.
linewidth : numeric
graphical parameter which determines the width of each
line. Default is 1.
xlim, ylim : list
list of length 2 indicating the end points of the plot.
Returns
-------
Drawing the plot of the path.
Examples
--------
>>> movement = data.load_movement(1,2,1)
>>> sep = path_index(movement, 1, 1)
>>> plot_path(movement, sep)
"""
if not isinstance(movement, pd.core.frame.DataFrame):
raise TypeError("movement must be pandas DataFrame")
if not set(movement.keys()).issuperset(['x', 'y']):
raise ValueError("the keys of movement must contain 'x', 'y'")
if len(movement) <= 1:
raise ValueError("movement must contain at least 2 rows")
if not isinstance(paths, list):
raise TypeError("paths must be a list")
if len(paths) == 0:
raise ValueError("length of paths is 0")
if not isinstance(title, str):
raise TypeError("title must be a string")
if not isinstance(alpha, float):
raise TypeError("alpha must be float")
if not isinstance(linewidth, float):
raise TypeError("linewidth must be float")
for sep in paths:
path = movement[sep[0]:sep[1] + 1]
plt.plot(path['x'], path['y'], 'b',
linewidth=linewidth, alpha=alpha)
plt.xlabel('x-coordinate')
plt.xlim(xlim[0], xlim[1])
plt.ylabel('y-coordinate')
plt.ylim(ylim[0], ylim[1])
plt.title(title)
| {
"repo_name": "changsiyao/mousestyles",
"path": "mousestyles/visualization/plot_path.py",
"copies": "3",
"size": "2241",
"license": "bsd-2-clause",
"hash": 8147876183867381000,
"line_mean": 27.7307692308,
"line_max": 70,
"alpha_frac": 0.6055332441,
"autogenerated": false,
"ratio": 3.9946524064171123,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 78
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import matplotlib.pyplot as plt
def plot_box(list_of_arrays, title="Box Plot of Distribution", width=4,
height=4):
r"""
Make a box plot of the desired metric (path length, speed, angle, etc)
per mouse, day or strain.
Parameters
----------
list_of_arrays : list
each element of the list is a numpy array containing data on the
metric to be plotted per mouse, day, or strain.Data is typically
generated from another function in the package.
title : str
desired title of the plot
width : int
first argument in figure size specifying width of plot
height : int
second argument in figure size specifying height of plot
Returns
-------
box plot : box plot of the desired metric combinations
"""
if len(list_of_arrays) == 0:
raise ValueError("List of arrays can not be empty")
if type(title) != str:
raise TypeError("Title must be a string")
if type(list_of_arrays) != list:
raise TypeError("List of arrays must be a list")
plt.boxplot(list_of_arrays)
plt.title(title)
plt.figure(figsize=(width, height))
plt.show()
def plot_hist(list_of_arrays, title="Histogram of Distribution",
xlab="X Values", ylab="Y Values", leg=True):
r"""
Make a histogram of the desired metric (path length, speed, angle, etc)
per mouse, day, or strain.
Parameters
----------
list_of_arrays : list
each element of the list is a numpy array containing data on the
metric to be plotted per mouse, day, or strain.Data typically
generated from another function in the package.
title : str
desired title of the plot
xlab : str
desired x axis label of the plot
ylab : str
desired y axis label of the plot
leg : bool
indicates whether a legend for the plot is desired
Returns
-------
hist plot : histogram of the desired metric combinations
"""
if len(list_of_arrays) == 0:
raise ValueError("List of arrays can not be empty")
if type(title) != str:
raise TypeError("Title must be a string")
if type(xlab) != str:
raise TypeError("xlab must be a string")
if type(ylab) != str:
raise TypeError("ylab must be a string")
if type(list_of_arrays) != list:
raise TypeError("List of arrays must be a list")
if type(leg) != bool:
raise TypeError("leg must be a boolean")
lab = list(range(len(list_of_arrays)))
plt.hist(list_of_arrays, normed=True, label=lab)
plt.xlabel(xlab)
plt.ylabel(ylab)
if leg is True:
plt.legend()
plt.title(title)
plt.show()
| {
"repo_name": "berkeley-stat222/mousestyles",
"path": "mousestyles/visualization/path_diversity_plotting.py",
"copies": "3",
"size": "2827",
"license": "bsd-2-clause",
"hash": 3807215600274500600,
"line_mean": 29.3978494624,
"line_max": 75,
"alpha_frac": 0.6239830209,
"autogenerated": false,
"ratio": 4.091172214182344,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6215155235082344,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import numpy as np
from astropy.coordinates import SkyCoord
from .utils import sersic_profile_function
from .mask import Mask
class Galaxy:
'''This is a representation of a galaxy which needs slitmasks.'''
def __init__(self, name, center, r_eff, axial_ratio, position_angle,
mu_eff=22.0, brightness_profile=None):
'''
Parameters
----------
name: str, name of galaxy, e.g. 'n4551', used in file output labeling
center: SkyCoord object of central position
r_eff: float, in arcseconds, giving the effective radius of galaxy
axial_ratio: float, ratio of minor axis to major axis, equal to (1 - flattening)
position_angle: float, in degrees, giving the position angle
measured counter-clockwise from north (i.e. positive declination)
mu_eff: float, in mag [per arcsec2], giving the surface brightness at r_eff
brightness_profile: f: radius in arcsec, position angle in degrees ->
surface brightness in mag/arcsec^2
If None, default to de Vaucouleurs' profile
'''
self.name = name
assert isinstance(center, SkyCoord)
self.center = center
self.r_eff = r_eff
self.mu_eff = mu_eff
self.axial_ratio = axial_ratio
self.position_angle = position_angle
if brightness_profile is not None:
self.brightness_profile = brightness_profile
else:
# default to deVaucouleurs' profile
self.brightness_profile = sersic_profile_function(mu_eff, r_eff, 4, position_angle, axial_ratio)
self.masks = []
def __repr__(self):
return '<Galaxy ' + self.name + ': ' + self.center.to_string('hmsdms') + '>'
def create_masks(self, num_masks, mask_cone_angles=None, cone_overlap=180.):
'''
Parameters
----------
num_masks: int, number of masks to make for the galaxy
mask_cone_angles: list, if not None, sets the individual opening angles for each mask
cone_overlap: float, degrees, = cone_angle * num_masks
'''
self.masks = []
if mask_cone_angles is not None:
assert len(mask_cone_angles) == num_masks
cone_angles = mask_cone_angles
else:
cone_angle = cone_overlap / num_masks
cone_angles = [cone_angle] * num_masks
sep_angle = 180. / num_masks
for i in range(num_masks):
delta_pa = i * sep_angle
mask_pa = self.position_angle + delta_pa
mask_r_eff = np.sqrt((self.r_eff * np.cos(np.radians(delta_pa)))**2 +
(self.r_eff * self.axial_ratio * np.sin(np.radians(delta_pa)))**2)
name = str(i + 1) + self.name
self.masks.append(Mask(name, mask_pa, mask_r_eff, cone_angles[i], self.brightness_profile))
def slit_positions(self, best=False):
'''
Returns the slit positions (x, y), rotated to the galaxy frame, (i.e., the x-axis
is along the major axis and the y-axis is along the minor axis).
if best, then get slit positions for the best fitting slits
'''
# list of positions rotated to the major axis of galaxy
x_positions = np.array([])
y_positions = np.array([])
for mask in self.masks:
theta = np.radians(mask.mask_pa - self.position_angle)
if best:
x = np.array([slit.x for slit in mask.best_slits])
y = np.array([slit.x for slit in mask.best_slits])
else:
x, y = mask.slit_positions()
x_rot = x * np.cos(theta) - y * np.sin(theta)
y_rot = x * np.sin(theta) + y * np.cos(theta)
x_positions = np.concatenate([x_positions, x_rot, -x_rot])
y_positions = np.concatenate([y_positions, y_rot, -y_rot])
return x_positions, y_positions
def sampling_metric(self, xx, yy, resolution):
'''
Evaluates how well the given points sample the 2d space.
Parameters
----------
xx: float array, arcsec along long end of mask
yy: float array, arcsec along short end of mask
resolution: float, arcsec, spacing between points in spatial grid
Returns
-------
metric: float, mean of minimum distances between spatial grid and slit samples
'''
assert len(xx) == len(yy)
# take only points on one side of minor axis
# mask = xx >= 0
# xx = xx[mask]
# yy = yy[mask]
num_slits = len(xx)
# make grid samples
x_samples = np.linspace(0, np.amax(xx), int(np.amax(xx) / resolution))
y_samples = np.linspace(np.amin(yy), np.amax(yy), int(np.ptp(yy) / resolution))
# flatten grid
x_flat = np.tile(x_samples, y_samples.shape)
y_flat = np.tile(y_samples, (x_samples.size, 1)).T.flatten()
num_points = len(x_flat)
# tile grid to n_points by n_slits
x_points = np.tile(x_flat, (num_slits, 1))
y_points = np.tile(y_flat, (num_slits, 1))
# tile slit positions to n_points by n_slits
x_slits = np.tile(xx, (num_points, 1)).T
y_slits = np.tile(yy, (num_points, 1)).T
distances = np.amin(np.sqrt((x_slits - x_points)**2 +
(y_slits - y_points)**2),
axis=0)
return np.amax(distances)
def optimize(self, num_masks=4, num_iter=100, resolution=1, cone_angles=None, cone_overlap=180):
'''
Find the optimal spatial sampling of mask slits.
Parameters
----------
num_masks: int, number of masks to make for the galaxy
num_iter: int, number of iterations in MC
resolution: float, arcsec, spatial resolution of mask area to sample for MC
'''
self.create_masks(num_masks, cone_angles, cone_overlap)
# iteratively randomize slit distribution and check spatial sampling
best_result = np.inf
for i in range(num_iter):
# print(i)
# randomize slits
for mask in self.masks:
mask.random_slits()
# list of positions rotated to the major axis of galaxy
x_positions, y_positions = self.slit_positions()
metric = self.sampling_metric(x_positions, y_positions, resolution)
# minimize metric
if metric < best_result:
# copy current slit configuration to best setup
for mask in self.masks:
# cleanup first
# del mask.best_slits[:]
# storage next
mask.best_slits = mask.slits
best_result = metric
# add sky slits and mirror the final results
for mask in self.masks:
mask.add_sky_slits()
mask.mirror_slits()
return best_result
| {
"repo_name": "adwasser/masktools",
"path": "masktools/superskims/galaxy.py",
"copies": "1",
"size": "7204",
"license": "mit",
"hash": -5305748656583879000,
"line_mean": 41.1286549708,
"line_max": 108,
"alpha_frac": 0.5627429206,
"autogenerated": false,
"ratio": 3.807610993657505,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48703539142575053,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import numpy as np
from scipy import stats, optimize
from mousestyles.data import load_movement
def get_travel_distances(strain=0, mouse=0, day=0):
""" Get distances travelled in 20ms for this strain, this mouse,
on this day.
Parameters
----------
strain: int {0, 1, 2}
The strain of mouse to test
mouse: int {0, 1, 2, 3}
The mouse twin id with in the strain
day: int {0, 1, ..., 11}
The day to calculate the distance
Returns
-------
x: np.ndarray shape (n, 1)
The distances travelled in 20ms for this mouse on this day, truncated
at 1cm (i.e. only record mouse movement when it moves more than 1cm)
Examples:
>>> get_travel_distances(0, 0, 0)[:3]
array([ 1.00648944, 1.02094319, 1.0178885 ])
"""
df = load_movement(strain=strain, mouse=mouse, day=day)
x = np.array(np.sqrt(df.x.diff()**2 + df.y.diff()**2))[1:]
x = x[x >= 1]
return x
def perform_kstest(x, distribution=stats.pareto,
verbose=True):
"""This function fits a distribution to data, and then test the
fit of the distribution using Kolmogorov-Smirnov test.
The Kolmogorov-Smirnov test constructs the test statistic, which is defined
as $\sup |F_n(x) - F(x)|$, for $F_n$ is the sample CDF, and F is the
theoretical CDF. This statistics can be considered as a measure of
distance between the sample distribution and the theoretical distribution.
The smaller it is, the more similar the two distributions.
We first estimate the parameter using MLE, then by minimizing the KS test
statistic.
The Pareto distribution is sometimes known as the Power Law distribution,
with the PDF: $b / x**(b + 1)$ for $x >= 1, b > 0$.
The truncated exponential distribution is the same as the rescaled
exponential distribution.
Parameters
----------
x: np.ndarray (n,)
The sample data to test the distribution
distribution: A Scipy Stats Continuous Distribution
{stats.pareto, stats.expon, stats.gamma}
The distribution to test against. Currently support pareto, expon, and
gamma, but any one-sided continuous distribution in Scipy.stats should
work.
verbose: boolean
If True, will print out testing result
Returns
-------
params: np.ndarray shape (p,)
The optimal parameter for the distribution. Optimal in the sense of
minimizing K-S statistics.
The function also print out the Kolmogorov-Smirnov test result for three
cases
1. When comparing the empirical distribution against the distribution with
parameters estimated with MLE
2. When comparing the empirical distribution against the distribution with
parameters estimated by explicitely minimizing KS statistics
3. When comparing a resample with replacement of the empirical distribution
against the Pareto in 2.
A p-value > 0.05 means we fail to reject the Null hypothesis that the
empirical distribution follow the specified distribution.
Notes:
------
The MLE often does not fit the data very well. We instead minimizing the
K-S distance, and obtain a better fit (as seen by the PDF and CDF
similarity between sample data and the fit)
References:
-----------
1. Kolmogorov-Smirnov test:
https://en.wikipedia.org/wiki/Kolmogorov-Smirnov_test
2. Pareto Distribution (also known as power law distribution)
https://en.wikipedia.org/wiki/Pareto_distribution
Examples:
---------
>>> x = get_travel_distances(0, 0, 0)
>>> res = perform_kstest(x, verbose=False)
>>> np.allclose(res, np.array([3.67593246, 0.62795748, 0.37224205]))
True
"""
dist_name = distribution.name
# Fit the parameters by MLE
mle_params = distribution.fit(x)
# Fit the Parameter by explicitely minimize KS-Statistics, and perform
# K-S test. First define helper function to minimize
def calculate_ks_statistics(params):
return stats.kstest(x, dist_name, args=params)[0]
# Define good initial parameters to help optimizer find optimal values
if dist_name == "pareto":
init_params = [4.5, .5, .5]
else:
init_params = mle_params
opt_params = optimize.fmin(calculate_ks_statistics, x0=init_params, disp=0)
if verbose:
print("1. Testing {} distribution with MLE parameters".format(
dist_name))
print(stats.kstest(x, dist_name, args=mle_params))
print("2. Testing {} distribution with optimal parameters".
format(dist_name))
print(stats.kstest(x, dist_name, args=opt_params))
# Resample x, and test again
x_bag = np.random.choice(x, size=len(x), replace=True)
print("3. Similar to 2., but on a resample of x")
print(stats.kstest(x_bag, dist_name, args=opt_params))
return opt_params
| {
"repo_name": "berkeley-stat222/mousestyles",
"path": "mousestyles/distributions/kolmogorov_test.py",
"copies": "3",
"size": "5049",
"license": "bsd-2-clause",
"hash": -8623030688448152000,
"line_mean": 36.962406015,
"line_max": 79,
"alpha_frac": 0.660526837,
"autogenerated": false,
"ratio": 3.7962406015037593,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5956767438503758,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import numpy as np
import matplotlib.pyplot as plt
def plot_performance(result):
"""
Plots the performance of classification model.It
is a side-by-side barplot. For each strain, it plots
the precision, recall and F-1 measure.
Parameters
----------
result: dataframe, columns of precision, recall and
F-1 measure.
Returns
-------
None
"""
N = 16
ind = np.arange(N) # the x locations for the groups
width = 0.2
fig = plt.figure()
ax = fig.add_subplot(111)
precision = result.iloc[:, 0]
rects1 = ax.bar(ind, precision, width, color='Coral')
recall = result.iloc[:, 1]
rects2 = ax.bar(ind + width, recall, width, color='LightSeaGreen')
f1 = result.iloc[:, 2]
rects3 = ax.bar(ind + width * 2, f1, width, color='DodgerBlue')
ax.set_ylabel('Performance Measures')
ax.set_xlabel('Strains')
ax.set_xticks(ind + width)
ax.set_xticklabels(range(16))
ax.legend((rects1[0], rects2[0], rects3[0]), ('precision', 'recall', 'F1'))
plt.show()
def plot_comparison(comparison):
"""
Plots the F1 Measure of different classification models.
It is a side-by-side barplot. For each strain, it plots
the F-1 measure of RandomForest, GradientBoosting,
SVM.
Parameters
----------
comparison: dataframe, columns of F-1 measures of 3
methods
Returns
-------
None
"""
N = 16
ind = np.arange(N) # the x locations for the groups
width = 0.2
fig = plt.figure()
ax = fig.add_subplot(111)
rf = comparison.iloc[:, 0]
rects1 = ax.bar(ind, rf, width, color='Coral')
gb = comparison.iloc[:, 1]
rects2 = ax.bar(ind + width, gb, width, color='LightSeaGreen')
svm = comparison.iloc[:, 2]
rects3 = ax.bar(ind + width * 2, svm, width, color='DodgerBlue')
ax.set_ylabel('F-1 measures')
ax.set_xlabel('Strains')
ax.set_xticks(ind + width)
ax.set_xticklabels(range(16))
ax.legend((rects1[0], rects2[0], rects3[0]),
('RandomForest', 'GradientBoosting', 'SVM'))
plt.show()
| {
"repo_name": "togawa28/mousestyles",
"path": "mousestyles/visualization/plot_classification.py",
"copies": "3",
"size": "2199",
"license": "bsd-2-clause",
"hash": 5948350357802199000,
"line_mean": 29.9718309859,
"line_max": 79,
"alpha_frac": 0.6093678945,
"autogenerated": false,
"ratio": 3.3521341463414633,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5461502040841464,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import numpy as np
import pandas as pd
from mousestyles import data
def getdistance(strain, mouse, day):
"""
Return the distance of each two consecutive points among coordinates
which is bigger than 1cm(truncated).
Parameters
----------
strain : int
the strain number of the mouse
mouse : int
the mouse number in its strain
day : int
the day number
Returns
-------
cut_dist : an array of number
The vector of truncated distance.
Examples
--------
>>> getdistance (0, 0, 0)
array([ 1.00648944, 1.02094319, 1.0178885 , ..., 1.00099351,
1.01191156, 1.00423354])
"""
df = data.load_movement(strain, mouse, day)
xcood = df["x"]
ycood = df["y"]
distance_vector = np.sqrt(np.diff(xcood)**2 + np.diff(ycood)**2)
msk = distance_vector > 1
cut_dist = distance_vector[msk]
return cut_dist
def fit_powerlaw(strain, mouse, day):
"""
Return the estimator of truncated power law.
Parameters
----------
strain : int
the strain number of the mouse
mouse : int
the mouse number in its strain
day : int
the day number
Returns
-------
estimator : a float number
The estimator of truncated power law.
Examples
--------
>>> fit_powerlaw (0, 0, 0)
9.4748705008269827
"""
cut_dist = getdistance(strain, mouse, day)
ret_mle = 1 + len(cut_dist) * 1 / \
(np.sum(np.log(cut_dist / np.min(cut_dist))))
return ret_mle
def fit_exponential(strain, mouse, day):
"""
Return the estimator of truncated exponential.
Parameters
----------
strain : int
the strain number of the mouse
mouse : int
the mouse number in its strain
day : int
the day number
Returns
-------
estimator : a float number
The estimator of truncated exponential distribution.
Examples
--------
>>> fit_exponential (0, 0, 0)
7.385844980814098
"""
cut_dist = getdistance(strain, mouse, day)
ret_mle = len(cut_dist) / (np.sum(cut_dist) - len(cut_dist))
return ret_mle
def fit_dist_all():
"""
Return the estimators of truncated power law and exponential for each
mouse day.
Parameters
----------
Returns
-------
estimator : a float number
The estimator of truncated exponential distribution.
Examples
--------
>>> fit()
7.385844980814098
"""
esti_df = {"strain": [], "mouse": [], "day": [], "power": [], "exp": []}
for i in range(3):
for j in range(4):
for k in range(12):
try:
temp1 = fit_powerlaw(i, j, k)
temp2 = fit_exponential(i, j, k)
esti_df["strain"].append(i)
esti_df["mouse"].append(j)
esti_df["day"] .append(k)
esti_df["power"] .append(temp1)
esti_df["exp"] .append(temp2)
except:
next
estimation = pd.DataFrame(
esti_df, columns=["strain", "mouse", "day", "power", "exp"])
return estimation
| {
"repo_name": "berkeley-stat222/mousestyles",
"path": "mousestyles/est_power_param.py",
"copies": "3",
"size": "3303",
"license": "bsd-2-clause",
"hash": 6655419859544203000,
"line_mean": 23.6492537313,
"line_max": 76,
"alpha_frac": 0.5467756585,
"autogenerated": false,
"ratio": 3.7878440366972477,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003109452736318408,
"num_lines": 134
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import numpy as np
import pandas as pd
import math
def compute_accelerations(speeds, timestamps):
r"""
Returns a list of the accelerations along the path.
Each element of the list is the ratio of the speed to the time difference.
The length of the list is equal to the length of speeds minus 1.
Parameters
----------
speeds : list
the traveled distances along the path. Expecting the output of
compute_distances.
timestamps : list
the time difference within the path.
Its length must be equal to the length of speeds plus 1.
Should not contain same time in adjacent rows;
otherwise output would contain inf.
Returns
-------
accel : list
contains the speeds along the path.
Examples
--------
>>> compute_accelerations([1, 2, 0], [3, 4, 5, 6])
[0.5, -1.0]
"""
if not isinstance(speeds, list) or not isinstance(timestamps, list):
raise TypeError("speeds and timestamps must be lists")
if len(speeds) is not len(timestamps) - 1:
raise ValueError(
"lengths of speeds must be the length of timestamps minus 1")
speeds_diff = [x - y for x, y in zip(speeds[:len(speeds)], speeds[1:])]
time_diffs = [x - y for x, y in zip(timestamps[:len(timestamps) - 1],
timestamps[2:])]
test = "timestamps should not contain same times in i th and i+2 th rows."
if np.count_nonzero(time_diffs) is not len(time_diffs):
raise ValueError(test)
accel = [v / t for v, t in zip(speeds_diff, time_diffs)]
return accel
def angle_between(v1, v2):
r"""
Returns the angle in radians between vectors `v1` and `v2`.
Both vectors must have same length.
Parameters
----------
v1, v2 : lists
Vectors whose angle would be calculated.
Should have same lengths.
Should not be zero vector.
Returns
-------
angle : numpy float object
nan if either of `v1` or `v2` is zero vector,
Examples
--------
>>> angle_between([1, 0],[1, 0])
0.0
>>> angle_between([1, 0],[0, 1])
1.5707963267948966
>>> angle_between([1, 0],[-1, 0])
3.1415926535897931
"""
if not isinstance(v1, list) or not isinstance(v2, list):
raise TypeError("v1 and v2 must be lists")
if len(v1) is not len(v2):
raise ValueError("both vectors must have same lengths")
norm_v1 = np.linalg.norm(v1)
norm_v2 = np.linalg.norm(v2)
if np.count_nonzero([norm_v1, norm_v2]) is not 2:
raise ValueError('both vectors must have norms greater than 0')
v1_u = v1 / norm_v1
v2_u = v2 / norm_v2
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def compute_angles(path_obj, radian=False):
r"""
Returns a list of the angles in the path.
Each element of the list is the angle between the adjacent vectors in the
path. The length of the list is equal to the length of speeds minus 2.
Parameters
----------
path_obj : pandas.DataFrame
CT, CX, CY coordinates and homebase status.
Must have length greater than 3.
radian : boolean
True for the output in radians. False for in turns (i.e. 360 for a
full turn).
Default is False.
Returns
-------
angles : list
contains the angles in the path. The first and last elements are None.
Examples
--------
>>> path = pd.DataFrame({'t':[2, 4.5, 10.5], 'x':[0, 1, 1],\
'y':[0, 0, 1], 'isHB':[True, True, False]})
>>> compute_angles(path)
[None, 90.0, None]
"""
if not isinstance(path_obj, pd.core.frame.DataFrame):
raise TypeError("path_obj must be pandas DataFrame")
if not set(path_obj.keys()).issuperset(['x', 'y']):
raise ValueError("the keys of path_obj must contain 'x', 'y'")
if len(path_obj) <= 2:
raise ValueError("path_obj must contain at least 3 rows")
if not isinstance(radian, bool):
raise TypeError("radian must be bool")
indices = path_obj.index[:len(path_obj) - 1]
vectors = [path_obj.loc[i + 1, 'x':'y'] - path_obj.loc[i, 'x':'y']
for i in indices]
angles = [angle_between(list(v1), list(v2)) for v1, v2 in
zip(vectors[1:], vectors[:len(vectors)])]
if not radian:
angles = [theta * 180 / math.pi for theta in angles]
# the first and last elements should be None
angles.insert(len(angles), None)
angles.insert(0, None)
return angles
| {
"repo_name": "berkeley-stat222/mousestyles",
"path": "mousestyles/path_diversity/path_features.py",
"copies": "3",
"size": "4659",
"license": "bsd-2-clause",
"hash": -806828204126325400,
"line_mean": 28.8653846154,
"line_max": 78,
"alpha_frac": 0.6016312513,
"autogenerated": false,
"ratio": 3.6313328137178487,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 156
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import numpy as np
from mousestyles.data.utils import (pull_locom_tseries_subset,
total_time_rectangle_bins)
def test_pull_locom():
M = np.array([[1, 2, 3, 4, 5, 6], [8, 7, 6, 5, 5, 4], [-1, 3, 4, 1, 1, 4]])
M_ = np.array([[1, 2, 3, 4], [8, 7, 6, 5], [-1, 3, 4, 1]])
Mnew = pull_locom_tseries_subset(M, start_time=1, stop_time=4)
np.testing.assert_allclose(Mnew, M_)
M_ = np.array([[1, 2, 3, 3.4], [8, 7, 6, 6], [-1, 3, 4, 4]])
Mnew = pull_locom_tseries_subset(M, start_time=1, stop_time=3.4)
np.testing.assert_allclose(Mnew, M_)
M_ = np.array([[1.2, 2, 3, 3.4], [8, 7, 6, 6], [-1, 3, 4, 4]])
Mnew = pull_locom_tseries_subset(M, start_time=1.2, stop_time=3.4)
np.testing.assert_allclose(Mnew, M_)
M_ = np.array([[1, 2, 3, 4, 5], [8, 7, 6, 5, 5], [-1, 3, 4, 1, 1]])
Mnew = pull_locom_tseries_subset(M, start_time=0, stop_time=5)
np.testing.assert_allclose(Mnew, M_)
M_ = np.array([[1.2, 1.5], [8, 8], [-1, -1]])
Mnew = pull_locom_tseries_subset(M, start_time=1.2, stop_time=1.5)
np.testing.assert_allclose(Mnew, M_)
Mnew = pull_locom_tseries_subset(M, start_time=1, stop_time=7)
np.testing.assert_allclose(Mnew, M)
def test_total_time():
M = np.array([[1, 2, 3], [.5, .1, .1], [.3, .4, .6]])
TT = total_time_rectangle_bins(M, xbins=2, ybins=2)
np.testing.assert_allclose(TT, [[0., 0.], [1., 1.]])
M = np.array([[1, 2, 3], [.5, .6, .1], [.3, .4, .6]])
TT = total_time_rectangle_bins(M, xbins=3, ybins=5)
np.testing.assert_allclose(TT, [[0., 0., 0.], [0., 0., 0.],
[0., 1., 0.], [0., 1., 0.], [0., 0., 0.]])
M = np.array([[1, 2, 3, 4, 5, 6],
[.51, .61, .11, .81, .21, .3],
[.3, .41, .6, .1, .1, .1]])
TT = total_time_rectangle_bins(M, xbins=3, ybins=5)
np.testing.assert_allclose(TT, [[0., 0., 0.], [1., 0., 0.], [
0., 1., 0.], [0., 1., 0.], [1., 0., 1.]])
M = np.array([[1, 2], [.5, .5], [.3, .3]])
TT = total_time_rectangle_bins(M, xbins=3, ybins=5)
np.testing.assert_allclose(TT, [[0., 0., 0.], [0., 0., 0.],
[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]])
| {
"repo_name": "berkeley-stat222/mousestyles",
"path": "mousestyles/data/tests/test_utils.py",
"copies": "3",
"size": "2371",
"license": "bsd-2-clause",
"hash": 2864342297630106000,
"line_mean": 40.5964912281,
"line_max": 79,
"alpha_frac": 0.4812315479,
"autogenerated": false,
"ratio": 2.4723670490093848,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44535985969093844,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import numpy as np
from .slit import Slit
class Mask:
'''Represents a slitmask'''
def __init__(self, name, mask_pa, mask_r_eff, cone_angle, brightness_profile,
slit_separation=0.5, slit_width=1, min_slit_length=3,
max_radius_factor=4, angle_offset=5):
'''
Parameters
----------
name: str, gui name of mask
mask_pa: float, degrees east of north
mask_r_eff: float, arcsec, effective radius along the mask position angle
cone_angle: float, degrees, the opening angle of the slit spatial distribution
brightness_profile: f: radius in arcsec, position angle in degrees ->
surface brightness in mag/arcsec^2
slit_separation: float, arcsec, minimum separation between slits
slit_width: float, arcsec, width of slit, should not be less than 1 arcsec
min_slit_length: float, arcsec, the minimum slit length
max_radius_factor: float, factors of Reff to which to extend the skims
angle_offset: float, degrees, rotate the slits from the mask_pa by this amount
'''
# x_max, y_max, are the maximum spatial extent of the masks, in arcsec
self.name = name
self.x_max = 498
self.y_max = 146
self.mask_pa = mask_pa
self.mask_r_eff = mask_r_eff
self.cone_angle = cone_angle
self.brightness_profile = brightness_profile
self.slit_separation = slit_separation
self.slit_width = slit_width
self.min_slit_length = min_slit_length
self.max_radius_factor = max_radius_factor
self.angle_offset = angle_offset
self.slits = []
self.best_slits = []
def __repr__(self):
mask_params_str = '<Mask: ' + self.name + ': PA: {0:.2f}, Reff: {1:.2f}, Cone angle: {2:.2f}>'
return mask_params_str.format(self.mask_pa, self.mask_r_eff, self.cone_angle)
def get_slit(self, name):
'''
Searches through the slit slit list for the named slit.
Parameters
----------
name, str, name of slit
Returns
-------
slit, Slit, name of matching Slit object, or None if no match
'''
for slit in self.slits:
if slit.name.strip() == name.strip():
return slit
print(name + ' not found in ' + self.name + '!')
return None
def get_slit_length(self, x, y, snr=35., sky=19, integration_time=7200, plate_scale=0.1185,
gain=1.2, read_noise=2.5, dark_current=4, imag_count_20=1367):
'''
Determine how long the slit should be, based on the required signal-to-noise ratio.
Default signal-to-noise ratio is set by kinematic requirements.
Default sky background is for dark sky conditions in I band.
Default time is for two hours.
Plate scale is set for DEIMOS.
Gain, read noise, and dark current are rough estimates from
http://www2.keck.hawaii.edu/inst/deimos/deimos_detector_data.html
I band counts at I = 20 are from LRIS, but should be close to DEIMOS, see
http://www2.keck.hawaii.edu/inst/deimos/lris_vs_deimos.html
To do: calibrate what value counts should have for a desired signal-to-noise ratio
Parameters
----------
x: float, arcsec, x coordinate
y: float, arcsec, y coordinate
snr: float, desired signal-to-noise ratio
sky: float, brightness of sky in mag/arcsec^2, default (sky=19) is a wild and crazy guess
integration_time: float, seconds
plate_scale: float, arcsec per pixel
gain: float, e- counts per ADU
read_noise: float, e- counts
dark_current: float, e- counts per pix per hour
imag_count_20: float, e- counts per second at I = 20 mag
'''
radius = np.sqrt(x**2 + y**2)
angle = self.mask_pa + np.degrees(np.arctan(y/x))
source_sb = self.brightness_profile(radius, angle)
# convert to e- per second per pix^2
mag_plate_scale = - 2.5 * np.log10(plate_scale**2)
source_flux = imag_count_20 * 10**(0.4 * (20 - source_sb - mag_plate_scale))
sky_flux = imag_count_20 * 10**(0.4 * (20 - sky - mag_plate_scale))
# dark = dark_current / 3600.
# denominator = (read_noise**2 + (gain / 2)**2 +
# integration_time * (source_flux + sky_flux + dark))
npix = snr**2 * sky_flux / integration_time / source_flux**2
area = npix * plate_scale**2
length = area / self.slit_width
return length
def slit_positions(self):
'''
Returns arrays with x, y positions of slits.
'''
xx = np.array([slit.x for slit in self.slits])
yy = np.array([slit.y for slit in self.slits])
return xx, yy
def _test_slits(self):
# reset slits
self.slits = []
# x is at the left edge of the slit
x = self.slit_separation / 2.
count = 0
while x < self.mask_r_eff * self.max_radius_factor:
# y_cone = np.tan(np.radians(self.cone_angle / 2.)) * x
# y = np.random.uniform(-y_cone, y_cone)
y = 0
length = max(self.min_slit_length, self.get_slit_length(x, y))
# first run gets even indices, rotated copy gets odd indices
name = 'skims{0:02d}'.format(2 * count)
self.slits.append(Slit(x + length / 2, y, length, self.slit_width,
self.mask_pa + self.angle_offset, name=name))
count += 1
x += length + self.slit_separation
def random_slits(self):
'''
Produce a random alignment (satisfying the opening angle restriction), with slit lengths
satisfying a signal-to-noise requirement.
'''
# reset slits
self.slits = []
# x is at the left edge of the slit
x = self.slit_separation / 2.
count = 0
while x < self.mask_r_eff * self.max_radius_factor:
y_cone = np.tan(np.radians(self.cone_angle / 2.)) * x
y = np.random.uniform(-y_cone, y_cone)
length = max(self.min_slit_length, self.get_slit_length(x, y))
# first run gets even indices, rotated copy gets odd indices
name = 'skims{0:02d}'.format(2 * count)
self.slits.append(Slit(x + length / 2, y, length, self.slit_width,
self.mask_pa + self.angle_offset, name=name))
count += 1
x += length + self.slit_separation
def add_sky_slits(self, num_sky_slits=10, sky_spacing=100):
'''
Place sky slits on the mask
Parameters
----------
num_sky_slits, int, maximum number of sky slits (per half of max) to place
sky_spacing, float, arcsec, start placing slits this far from mask edge
'''
x = self.x_max - sky_spacing
count = 0
while x < self.x_max and count < num_sky_slits:
y = np.random.uniform(-self.y_max, self.y_max)
length = self.min_slit_length
name = 'sky{0:02d}'.format(2 * count)
self.slits.append(Slit(x + length / 2, y, length, self.slit_width,
self.mask_pa + self.angle_offset, name=name))
count += 1
x += length + self.slit_separation
def mirror_slits(self):
'''
Adds the mirror image of the current slits to the mask.
'''
nslits = len(self.slits)
for i in range(nslits):
slit = self.slits[i]
x = -slit.x
y = -slit.y
length = slit.length
width = slit.width
pa = slit.pa
slit_type = slit.name[:-2]
index = int(slit.name[-2:])
# mirrored slits are odd
name = slit_type + '{0:02d}'.format(index + 1)
self.slits.append(Slit(x, y, length, width, pa, name))
def within_mask(self, x, y):
x = np.abs(x)
a = (y < -1) | (x < 360)
b = (360 <= x) & (x < 420) & (y < -0.85*x+452.)
c = (420. <= x) & (x < 460.) & (y < -1.075*x+546.5)
d = (460. <= x) & (x < 498.) & (y < -1.9347368421*x+693.5789473684)
return a | b | c | d
def within_cones(self, x, y):
x = np.abs(x)
yline1 = np.tan(self.cone_angle / 2. * np.pi/180.) * np.array(x)
yline2 = -np.tan(self.cone_angle / 2. * np.pi/180.) * np.array(x)
return (yline2 < y) & (y < yline1)
def within_slits(self, x, y):
return np.sqrt(x**2 + y**2) <= self.mask_r_eff * self.max_radius_factor
| {
"repo_name": "adwasser/masktools",
"path": "masktools/superskims/mask.py",
"copies": "1",
"size": "8887",
"license": "mit",
"hash": -2388717992226478600,
"line_mean": 40.9198113208,
"line_max": 102,
"alpha_frac": 0.5536176437,
"autogenerated": false,
"ratio": 3.3535849056603775,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4407202549360377,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import numpy as np
def get_dist_speed(movement, start, end, return_array=True):
r"""
Return a list containing distance(s) covered in a path and
average speed(s) within a path.
Parameters
----------
movement : pandas.DataFrame
CT, CX, CY coordinates and homebase status for the unique
combination of strain, mouse, and day
start : int
positive integer indicating the starting index of a path
end : int
positive integer indicating the end index of a path
return_array : bool
boolean indicating whether an array of distances and
average speeds are returned or the summation of those
distances and speeds
Returns
-------
dist : distance(s) travelled along a path
speed : average speed(s) along a path
Examples
--------
>>> movement = data.load_movement(1, 2, 1)
>>> dist, speed = get_dist_speed(movement, 0, 3)
>>> print(dist)
[0.0, 0.17999999999999972, 0.19446593532030554]
>>> print(speed)
[0.0, 0.9999999999983815, 0.055246004352409776]
>>> dist, speed = get_dist_speed(movement, 0, 3, return_array=False)
>>> print(dist)
0.37446593532030525
>>> print(speed)
0.096661315260887087
"""
# Check whether inputs are valid.
if (start < 0) or (end < 0):
raise ValueError("Start and end indices must be positive")
if (type(start) != int) or (type(end) != int):
raise TypeError("Start and end indices must be integers")
if start > end:
raise ValueError("Start index must be smaller than end index")
if end - start > len(movement) - 1:
raise ValueError("Number of observations must be less than \
or equal to total observations")
if start == end:
return [0, 0]
x = movement['x'][start:(end + 1)].ravel()
y = movement['y'][start:(end + 1)].ravel()
if return_array:
t = movement['t'][start:(end + 1)].ravel()
time = np.diff(t)
dist = np.sqrt((x[1:] - x[:-1])**2 + (y[1:] - y[:-1])**2).tolist()
speed = (dist / time).tolist()
else:
t = movement['t']
time = t[end] - t[start]
dist = sum(np.sqrt((x[1:] - x[:-1])**2 + (y[1:] - y[:-1])**2))
speed = dist / time
return [dist, speed]
| {
"repo_name": "togawa28/mousestyles",
"path": "mousestyles/path_diversity/get_dist_speed.py",
"copies": "3",
"size": "2409",
"license": "bsd-2-clause",
"hash": 6795136597837399000,
"line_mean": 29.8846153846,
"line_max": 74,
"alpha_frac": 0.5898713159,
"autogenerated": false,
"ratio": 3.606287425149701,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 78
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import numpy as np
def path_index(movement, stop_threshold, min_path_length):
r"""
Return a list object containing start and end indices
for a specific movement. Each element in the list is
a list containing two indices: the first element is
the start index and the second element is the end index.
Parameters
----------
movement : pandas.DataFrame
CT, CX, CY coordinates and homebase status
for the unique combination of strain, mouse and day
stop_threshold : float
positive number indicating the path cutoff criteria
if the time difference between two observations is
less than this threhold, they will be in the same path
min_path_length : int
positive integer indicating how many observations in
a path
Returns
-------
paths index : a list containing the indices for all paths
Examples
--------
>>> movement = data.load_movement(1, 2, 1)
>>> paths = path_index(movement, 1, 1)[:5]
>>> paths
[[0, 2], [6, 8], [107, 113], [129, 131], [144, 152]]
"""
# check if all inputs are positive integers
conditions_value = [stop_threshold <= 0, min_path_length <= 0]
conditions_type = type(min_path_length) != int
if any(conditions_value):
raise ValueError("Input values need to be positive")
if conditions_type:
raise TypeError("min_path_length needs to be integer")
# Pull out time variable
T = movement['t'].ravel()
# Calculate time differences
TD = np.diff(T)
path = []
# index
i = 0
while i < len(TD):
start_index = i
# If time difference is less than stop_threshold
# start to track the index in this path
while TD[i] < stop_threshold:
i += 1
if i == len(TD):
break
end_index = i
# Check whether start index is equal to end index
# If they are equal jump to next index
if start_index == end_index:
next
else:
path.append([start_index, end_index])
i += 1
path = [p for p in path if (p[1] - p[0]) > min_path_length]
return path
| {
"repo_name": "berkeley-stat222/mousestyles",
"path": "mousestyles/path_diversity/path_index.py",
"copies": "3",
"size": "2301",
"license": "bsd-2-clause",
"hash": 1224103679127582000,
"line_mean": 29.68,
"line_max": 66,
"alpha_frac": 0.6014776184,
"autogenerated": false,
"ratio": 4.176043557168784,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6277521175568784,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import numpy as np
def powerlaw_pdf(x, a):
"""
The probability density function of truncated power law.
Parameters
----------
x : float > 0, or a np.dnarray
x in formula p(x)=(alpha-1)*x^(-alpha).
a : float > 1
alpha in formula p(x)=(alpha-1)*x^(-alpha).
Returns
-------
probability density : float
The probability density of power law at x.
Examples
--------
>>> powerlaw_pdf (2, 2)
0.25
"""
return (a - 1) * x ** (-a)
def exp_pdf(x, l):
"""
The probability density function of truncated exponential.
Parameters
----------
x : float, or a np.dnarray
x in formula p(x)=lambda*exp(-lambda*x).
l : float
lambda in formula p(x)=lambda*exp(-lambda*x).
Returns
-------
probability density : float
The probability density of power law at x.
Examples
--------
>>> exp_pdf(1, 1)
0.36787944117144233
"""
return l * np.exp(-l * (x - 1))
def powerlaw_inverse_cdf(y, a):
"""
The inverse CDF function of power law distribution
Parameters
----------
y : float in [0, 1], or a np.dnarray
y in formula F^(-1)(y) = (1 - y)^(1/(1-a))
a : float > 1
a in formula F^(-1)(y) = (1 - y)^(1/(1-a))
Returns
-------
x : float
The inverse CDF function of power law distribution with
parameter a at point y
Examples
--------
>>> powerlaw_inverse_cdf(0.5, 5)
1.189207115002721
"""
return (1 - y)**(1/(1-a))
def exp_inverse_cdf(y, l):
"""
The inverse CDF function of truncated (at 1) exponential distribution
Parameters
----------
y : float in [0, 1], or a np.dnarray
y in formula F^(-1)(y) = 1 - log(1 - y) / l
l : float > 0
a in formula F^(-1)(y) = 1 - log(1 - y) / l
Returns
-------
x : float
The inverse CDF function of truncated (at 1) exponential distribution
distribution with parameter l at point y
Examples
--------
>>> exp_inverse_cdf(0.6,2)
1.4581453659370776
"""
return 1 - np.log(1 - y) / l
| {
"repo_name": "changsiyao/mousestyles",
"path": "mousestyles/distribution.py",
"copies": "3",
"size": "2255",
"license": "bsd-2-clause",
"hash": 5994046351713556000,
"line_mean": 21.1078431373,
"line_max": 77,
"alpha_frac": 0.5286031042,
"autogenerated": false,
"ratio": 3.5400313971742543,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5568634501374254,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import os as _os
import numpy as np
import pandas as pd
from mousestyles import data_dir
from mousestyles.intervals import Intervals
import collections
from matplotlib.externals import six
INTERVAL_FEATURES = ["AS", "F", "IS", "M_AS", "M_IS", "W"]
def load_all_features():
"""
Returns a (21131, 13) size pandas.DataFrame object corresponding to
9 features over each mouse's 2-hour time bin. The first four columns
index each mouses's 2-hour bin:
Column 0: the strain of the mouse (0-15)
Column 1: the mouse number (number depends on strain)
Column 2: the day number (5-16)
Column 3: the 2-hour time bin (e.g., value 4 corresponds to hours 4 to 6)
The remaining 9 columns are the computed features.
Returns
-------
features_data_frame : pandas.DataFrame
A dataframe of computed features.
"""
features = [
'ASProbability',
'ASNumbers',
'ASDurations',
'Food',
'Water',
'Distance',
'ASFoodIntensity',
'ASWaterIntensity',
'MoveASIntensity']
# 9 x 1921 x (3 labels + 11 feature time bins)
all_features = np.load(
_os.path.join(
data_dir,
'all_features_mousedays_11bins.npy'))
# Here we begin reshaping the 3-d numpy array into a pandas 2-d dataframe
columns = ['strain', 'mouse', 'day']
# Append 2-hour time bin values
columns += list(range(0, 22, 2))
# For each feature, unpivot its dataframe so that the 2-hour
# time bins become a value column, rather than a dimension
data_frames = []
for (i, feature) in enumerate(features):
df = pd.DataFrame(data=all_features[i, :, :], columns=columns)
melted_df = pd.melt(df, id_vars=columns[:3], value_vars=columns[3:],
var_name='hour', value_name=feature)
data_frames.append(melted_df)
# Connect each of the feature dataframes to make one dataframe
all_features_df = data_frames.pop(0)
other_features = [x.iloc[:, -1] for x in data_frames]
other_features.insert(0, all_features_df)
final_df = pd.concat(other_features, axis=1)
# fix for data inconsistency
final_df['day'] = final_df['day'] - 5
return final_df
def load_mouseday_features(features=None):
"""
Returns a (1921, 3+11*n) size pandas.DataFrame object corresponding to
each 2-hour time bin of the n inputted features over each mouse.
The first three columns index each mouse:
Column 0: the strain of the mouse (0-15)
Column 1: the mouse number (number depends on strain)
Column 2: the day number (5-16)
The remaining 3*n columns are the values for each 2-hour time bin
of the n inputted features.
Parameters
----------
features: list, optional
A list of one or more features chosen from
{"ASProbability", "ASNumbers", "ASDurations",
"Food", "Water", "Distance",
"ASFoodIntensity", "ASWaterIntensity", "MoveASIntensity"}
Default all features when optional
Returns
-------
features_data_frame : pandas.DataFrame
A dataframe of computed features.
Examples
--------
>>> mouseday = load_mouseday_features()
>>> mouseday = load_mouseday_features(["Food"])
>>> mouseday = load_mouseday_features(["Food", "Water", "Distance"])
"""
features_list = [
"ASProbability",
"ASNumbers",
"ASDurations",
"Food",
"Water",
"Distance",
"ASFoodIntensity",
"ASWaterIntensity",
"MoveASIntensity"]
if features is None:
features = features_list
if type(features) is str:
features = [features]
fea_str = "{"
for item in features_list:
fea_str += '"' + item + '", '
fea_str = fea_str[:-2] + "}"
# Check if input is a list
if type(features) is not list:
raise TypeError(
"Input value must be a list."
)
# Check if input values are expected features
for feature in features:
if feature not in features_list:
raise ValueError(
"Input value must be chosen from " + fea_str + "."
)
# 9 x 1921 x (3 labels + 11 feature time bins)
all_features = np.load(
_os.path.join(
data_dir,
"all_features_mousedays_11bins.npy"))
# Locate each feature and aggregate numpy arrays
dic = {}
for (i, feature) in enumerate(features_list):
dic[feature] = i
all_data_orig = np.hstack(
[all_features[0, :, 0:3]] +
[all_features[dic[feature], :, 3:] for feature in features])
# Prepare column names
columns = ["strain", "mouse", "day"]
for feature in features:
columns += [feature + "_" + str(x) for x in range(0, 22, 2)]
# Transform into data frame
data_all = pd.DataFrame(all_data_orig, columns=columns)
return data_all
def load_intervals(feature):
"""
Return a pandas.DataFrame object of project interval data
for the specified feature.
There are 5 columns in the dataframe:
strain: the strain number of the mouse
mouse: the mouse number in its strain
day: the day number
start: the start time
stop: the stop time
Parameters
----------
feature: {"AS", "F", "IS", "M_AS", "M_IS", "W"}
Returns
-------
intervals : pandas.DataFrame
All data of the specified feature as a dataframe
Examples
--------
>>> AS = load_intervals('AS')
>>> IS = load_intervals('IS')
"""
# check input is one of the provided choices
if feature not in INTERVAL_FEATURES:
raise ValueError(
'Input value must be one of {"AS", "F", "IS", "M_AS", "M_IS", "W"}'
)
# get all file names
file_names = _os.listdir(_os.path.join(data_dir, "intervals", feature))
# check if directory is empty
if len(file_names) == 0:
raise ValueError('Directory is empty; no file found.')
# initialized data frame
# use for loop to load every file and concat to overall data frame
dt = pd.DataFrame()
for item in file_names:
strain = int(item.split("strain")[1].split("_mouse")[0])
mouse = int(item.split("mouse")[1].split("_day")[0])
day = int(item.split("day")[1].split(".npy")[0])
path = _os.path.join(data_dir, "intervals", feature, item)
sub = np.load(path)
dt_sub = pd.DataFrame()
dt_sub["strain"] = [strain] * sub.shape[0]
dt_sub["mouse"] = [mouse] * sub.shape[0]
dt_sub["day"] = [day] * sub.shape[0]
dt_sub["start"] = sub[:, 0]
dt_sub["stop"] = sub[:, 1]
dt = pd.concat([dt, dt_sub])
# sort based on strain, mouse and day
dt = dt.sort_values(["strain", "mouse", "day"])
dt.index = range(dt.shape[0])
return dt
def load_movement(strain, mouse, day):
"""
Return a pandas.DataFrame object of project movement data
for the specified combination of strain, mouse and day.
There are 4 columns in the dataframe:
t: Time coordinates (in seconds)
x: X coordinates indicating the left-right position of the cage
y: Y coordinates indicating the front-back position of the cage
isHB: Boolean indicating whether the point is in the home base or not
Parameters
----------
strain: int
nonnegative integer indicating the strain number
mouse: int
nonnegative integer indicating the mouse number
day: int
nonnegative integer indicating the day number
Returns
-------
movement : pandas.DataFrame
CT, CX, CY coordinates and home base status
of the combination of strain, mouse and day
Examples
--------
>>> movement = load_movement(0, 0, 0)
>>> movement = load_movement(1, 2, 1)
"""
# check if all inputs are nonnegative integers
conditions_value = [strain < 0, mouse < 0, day < 0]
conditions_type = [type(strain) != int, type(mouse) != int,
type(day) != int]
if any(conditions_value):
raise ValueError("Input values need to be nonnegative")
if any(conditions_type):
raise TypeError("Input values need to be integer")
# load all four files of HB, CT, CX and CY data
NHB_path = "txy_coords/C_idx_HB/C_idx_HB_strain{}_mouse{}_day{}.npy".\
format(strain, mouse, day)
CT_path = "txy_coords/CT/CT_strain{}_mouse{}_day{}.npy".\
format(strain, mouse, day)
CX_path = "txy_coords/CX/CX_strain{}_mouse{}_day{}.npy".\
format(strain, mouse, day)
CY_path = "txy_coords/CY/CY_strain{}_mouse{}_day{}.npy".\
format(strain, mouse, day)
try:
HB = ~ np.load(_os.path.join(data_dir, NHB_path))
CT = np.load(_os.path.join(data_dir, CT_path))
CX = np.load(_os.path.join(data_dir, CX_path))
CY = np.load(_os.path.join(data_dir, CY_path))
except IOError:
raise ValueError("No data exists for strain {}, mouse {}, day {}".
format(strain, mouse, day))
# make data frame
dt = pd.DataFrame()
dt["t"] = CT
dt["x"] = CX
dt["y"] = CY
dt["isHB"] = HB
return dt
def _lookup_intervals(times, intervals):
"""
Return a boolean array where each element is True
if the corresponding element of `times` was
in `intervals`.
Parameters
----------
times: numpy.array of floats
an array of timestamps
intervals: pandas.DataFrame
a data frame containing columns 'start' and
'stop', represent a series of time intervals
Returns
-------
numpy.array of booleans
Array of booleans representing whether the timestamps
in `times` fell in the intervals in `intervals`
Examples
--------
>>> t = pd.Series([1.5, 2.5, 3.5])
>>> ints = pd.DataFrame({'start': [1, 2], 'stop': [1.99, 2.99]})
>>> in_intervals = _lookup_intervals(t, ints)
>>> t.shape == in_intervals.shape
True
>>> in_intervals
0 True
1 True
2 False
dtype: bool
"""
ints = Intervals(intervals[['start', 'stop']])
return times.map(ints.contains)
def load_movement_and_intervals(strain, mouse, day,
features=INTERVAL_FEATURES):
"""
Return a pandas.DataFrame object of project movement and interval
data for the specified combination of strain, mouse and day.
There are 4 + len(features) columns in the dataframe: t: Time
coordinates (in seconds) x: X coordinates indicating the
left-right position of the cage y: Y coordinates indicating the
front-back position of the cage isHB: Boolean indicating whether
the point is in the home base or not Additonal columns taking
their names from features: Boolean indicating whether the time
point is in an interval of behavior of the given feature.
Parameters
----------
strain: int
nonnegative integer indicating the strain number
mouse: int
nonnegative integer indicating the mouse number
day: int
nonnegative integer indicating the day number
features: list (or other iterable) of strings
list of features from {"AS", "F", "IS", "M_AS", "M_IS", "W"}
Returns
-------
movement : pandas.DataFrame CT, CX, CY
coordinates, home base status, and feature interval information
for a given srain, mouse and day
Examples
--------
>>> m1 = load_movement(1, 1, 1)
>>> m2 = load_movement_and_intervals(1, 1, 1, []) # don't add any features
>>> np.all(m1 == m2)
True
>>> m3 = load_movement_and_intervals(1, 1, 1, ['AS'])
>>> m3.shape[1] == m1.shape[1] + 1 # adds one column
True
>>> m3.shape[0] == m1.shape[0] # same number of rows
True
>>> m3[29:32]
t x y isHB AS
29 56448.333 -6.289 34.902 False False
30 56448.653 -5.509 34.173 True True
31 56449.273 -5.048 33.284 True True
"""
if isinstance(features, six.string_types):
features = [features]
elif not isinstance(features, collections.Iterable):
raise ValueError('features must be a string or iterable of strings')
movements = load_movement(strain, mouse, day)
for f in features:
intervals = load_intervals(feature=f)
mouse_intervals = intervals[(intervals['strain'] == strain) &
(intervals['mouse'] == mouse) &
(intervals['day'] == day)]
movements[f] = _lookup_intervals(movements['t'], mouse_intervals)
return movements
def load_start_time_end_time(strain, mouse, day):
"""
Returns the start and end times recorded
for the mouse-day. The first number indicates
the number of seconds elapsed since midnight,
the second number indicates when the cage is
closed for cleaning. In other words, this is
the interval for which all sensors are active.
Parameters
----------
strain: int
nonnegative integer indicating the strain number
mouse: int
nonnegative integer indicating the mouse number
day: int
nonnegative integer indicating the day number
Returns
-------
times: a tuple of (float, float)
the start time and end time
"""
file_name = 'recordingStartTimeEndTime_strain{}_mouse{}_day{}.npy'.\
format(strain, mouse, day)
path_to_file = _os.path.join(data_dir, 'txy_coords',
'recordingStartTimeEndTime',
file_name)
return tuple(np.load(path_to_file))
def distances(strain, mouse, day, step=50):
"""
Return a numpy array object of project movement data
for the specified combination of strain, mouse and day.
At regular timesteps, defined by the step parameter,
compute the euclidian distance between the positions
of the mouse at two consecutive times.
More specifically:
- let delta_t be the step parameter.
- let $t_n$ be the sequance of non negative numbers such
that $t_0 = 0$ and $t_(n+1) = t_n + delta_t$. The sequence
is defined for all $n$ such that $n>=0$ and $t_n <= time$
of the experiment
- let $d_n$ be the sequence of non negative numbers such
that $d_0 = 0$ and $d_n$ equals the position of the mouse
at a particular day at time $t_n$. $d_n$ is then defined
on the same set of integers as the sequence $t_n$.
- The function returns the sequence $d_n$.
Parameters
----------
strain: int
nonnegative integer indicating the strain number
mouse: int
nonnegative integer indicating the mouse number
day: int
nonnegative integer indicating the day number
step: float
positive float defining the time between two observations
default corresponds to 1 second
Returns
-------
movement : numpy array
Examples
--------
>>> dist = distances(0, 0, 0, step=1e2)
"""
movement = load_movement(strain, mouse, day)
# Compute distance between samples
dist = np.sqrt(movement["x"].diff()[1:]**2 + movement["y"].diff()[1:]**2)
time = movement['t'][1:] - movement['t'][0]
t_final = time[len(time) - 1]
# Aggregate distances according to step
aggregate = np.zeros(int(t_final / step))
j = 1
for i in range(len(aggregate)):
while time[j] < i * step:
aggregate[i] = aggregate[i] + dist[j]
j = j + 1
return aggregate
def distances_bymouse(strain, mouse, step=50, verbose=False):
"""
Aggregates 'distances' for all days of recorded data for
one particular mouse.
More specifically:
- let $d^1,...,d^D$ be the sequence of distances for one particular
mouse for days $1$ to $D$.
- The function returns the concatenation of the $d^i$.
Parameters
----------
strain: int
nonnegative integer indicating the strain number
mouse: int
nonnegative integer indicating the mouse number
step: float
positive float defining the time between two observations
default corresponds to 1 second
Returns
-------
movement : numpy array
Examples
--------
>>> dist = distances_bymouse(0, 0, step=1e2)
"""
day = 0
res = np.array([])
while True:
try:
res = np.append(res, distances(strain, mouse, day,
step=step))
day += 1
if verbose:
print('day %s done.' % day)
except ValueError:
break
return res
def distances_bystrain(strain, step=50, verbose=False):
"""
Aggregates distances_bymouse for all mice in one given
strain.
More specifically:
- let $d^1,...,d^M$ be the sequence of distances for one particular
strain for mouses $1$ to $M$.
- The function returns the sequence concatenation of the $d^i$.
Parameters
----------
strain: int
nonnegative integer indicating the strain number
step: float
positive float defining the time between two observations
default corresponds to 1 second
Returns
-------
movement : numpy array
Examples
--------
>>> dist = distances_bystrain(0, step=1e2)
"""
mouse = 0
res = np.array([])
dist = np.array([0])
while dist.size > 0:
dist = distances_bymouse(strain, mouse,
step=step)
res = np.append(res, dist)
mouse += 1
if verbose:
print('mouse %s done.' % mouse)
return res
def load_time_matrix_dynamics():
"""
Load the time matrix for dynamics pattern project
"""
return pd.read_csv(
_os.path.join(
data_dir,
'time_matrix.csv'))
| {
"repo_name": "togawa28/mousestyles",
"path": "mousestyles/data/__init__.py",
"copies": "3",
"size": "18023",
"license": "bsd-2-clause",
"hash": 3760767207990582000,
"line_mean": 30.6192982456,
"line_max": 79,
"alpha_frac": 0.5995672197,
"autogenerated": false,
"ratio": 3.837947189097104,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 570
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import os
import numpy as np
import scipy.linalg
if os.environ.get("USE_FAST_FAKELU", "0") == "1":
print("Using C++ extension for FakeLU")
from fakelu import fast_FakeLU as FakeLU
else:
from fakelu import FakeLU
def test_FakeLU():
A = np.array([
[ 9, 3, .1, 0,.01, 0],
[ -4, 18, 0, .5, 0,.02],
[ .2, 0, 7, -2, 1, 0],
[ 0,-.1, 3, 5, 0,-.1],
[.03, 0, .1, 0, -9, 2],
[ 0,.04, 0,.05, -3, 7]
])
b = np.array([-7, 3, 5, -4, 6, 1.5])
lu, piv = scipy.linalg.lu_factor(A)
x = scipy.linalg.lu_solve((lu, piv), b)
fLU = FakeLU(A, 2, 2)
fx = np.array(fLU.solve(b))
assert np.allclose(x, fx, rtol=0.05, atol=1e-6)
def _get_test_m1():
A = np.array([
[1, 3, 5],
[2, 4, 7],
[1, 1, 0]
])
ref = np.array([
[ 2.0, 4.0, 7.0],
[ 0.5, 1.0, 1.5],
[ 0.5,-1.0,-2.0]
])
return A, ref
def _get_test_m2():
A = np.array([
[ 5, 3, 2, 0, 0, 0],
[ 5, 8, 0, 3, 0, 0],
[ 1, 0, 8, 4, 4, 0],
[ 0, 2, 4, 4, 0, 5],
[ 0, 0, 3, 0, 6, 9],
[ 0, 0, 0, 4, 2, 7]
], dtype=np.float64)
ref = np.array([
[ 5, 3, 2, 0, 0, 0],
[ 1, 5, 0, 3, 0, 0],
[1/5, 0, 8, 4, 4, 0],
[ 0,2/5,1/2, 2, 0, 5],
[ 0, 0,3/8, 0, 6, 9],
[ 0, 0, 0,4/2,1/3, 4]
], dtype=np.float64)
return A, ref
def _get_test_m3():
A = np.array([
[-17, 63, .2, 0],
[ 37, 13, 0, .3],
[ .1, 0, 11, 72],
[ 0, .2,-42, 24]
], dtype=np.float64)
pivref = [[1, 1], [1, 1]]
# [ 37, 13, 0, .3],
# [-17, 63, .2, 0],
# [ 0, .2,-42, 24]
# [ .1, 0, 11, 72],
ref = np.array([
[ 37, 13, 0, .3],
[-17/37, 63+17/37*13, .2, 0],
[ 0, .2/(63+17/37*13), -42, 24],
[0.1/37, 0, -11/42, 72+11/42*24]
], dtype=np.float64)
return A, ref, pivref
def _get_test_m4():
A = np.array([
[-17, 63, .2, 0,.02, 0],
[ 37, 13, 0, .3, 0,.03],
[ .1, 0, 11, 72,-.1, 0],
[ 0, .2,-42, 24, 0, .2],
[.03, 0,-.1, 0, 72,-13],
[ 0,-.1, 0,.08, 14,-57]
], dtype=np.float64)
pivref = [[1, 1], [1, 1], [0, 1]]
# [ 37, 13, 0, .3, 0,.03],
# [-17, 63, .2, 0,.02, 0],
# [ 0, .2,-42, 24, 0, .2],
# [ .1, 0, 11, 72,-.1, 0],
# [.03, 0,-.1, 0, 72,-13],
# [ 0,-.1, 0,.08, 14,-57]
a = 63 + 17/37*13
b = 72 + 11/42*24
c = -57 + 14/72*13
ref = np.array([
[ 37, 13, 0, .3, 0, .03],
[-17/37, a, .2, 0, .02, 0],
[ 0, .2/a, -42, 24, 0, .2],
[ .1/37, 0, -11/42, b, -.1, 0],
[.03/37, 0, .1/42, 0, 72, -13],
[ 0, -.1/a, 0, .08/b, 14/72, c],
], dtype=np.float64)
return A, ref, pivref
def test_FakeLU_LU_merged_1():
A1, ref1 = _get_test_m1()
fLU1 = FakeLU(A1, 3)
assert np.allclose(fLU1.LU_merged, ref1)
assert np.all(fLU1.piv[0] == [1, 1, 2])
def test_FakeLU_LU_merged_2():
A2, ref2 = _get_test_m2()
fLU2 = FakeLU(A2, 2, 1)
assert np.allclose(fLU2.LU_merged, ref2)
for i in range(3):
assert np.all(fLU2.piv[i] == [0, 1])
def test_FakeLU_LU_merged_3():
A3, ref3, pivref3 = _get_test_m3()
fLU3 = FakeLU(A3, 2, 1)
fLU3_LU_merged = fLU3.LU_merged
assert np.allclose(ref3, fLU3_LU_merged)
for i, pivref in enumerate(pivref3):
assert np.all(pivref == fLU3.piv[i])
def test_FakeLU_solve_1():
A1, ref1 = _get_test_m1()
fLU1 = FakeLU(A1, 3)
xref = [2, 3, 5]
b = np.array([2+9+25, 4+12+35, 2+3], dtype=np.float64)
x = fLU1.solve(b)
assert np.allclose(x, xref)
def test_FakeLU_solve_2():
A2, ref2 = _get_test_m2()
fLU2 = FakeLU(A2, 2, 1)
assert np.allclose(fLU2.sub[0], [1/5, 2/5, 3/8, 4/2])
assert np.allclose(fLU2.sup[0], [2, 3, 4, 5])
b = np.array([65, 202, 11, 65, 60, 121], dtype=np.float64)
# scipy.linalg.lu_solve(scipy.linalg.lu_factor(A2), b) gives xtrue:
xtrue = [11, 12, -13, 17, 9, 5]
# but here we verify the errornous result `xref` from the incomplete LU
# factorization of A2 which is only midly diagonally dominant:
# LUx = b
# Ly = b
yref = [b[0]]
yref = yref + [b[1] - 1*yref[0]]
yref = yref + [b[2] - yref[0]/5]
yref = yref + [b[3] - 2/5*yref[1] - 1/2*yref[2]]
yref = yref + [b[4] - 3/8*yref[2]]
yref = yref + [b[5] - 2*yref[3] - 1/3*yref[4]]
# Ux = y
xref = [(yref[5] )/4]
xref = [(yref[4]-9*xref[-1])/6] + xref
xref = [(yref[3]-5*xref[-1])/2] + xref
xref = [(yref[2]-4*xref[-2]-4*xref[-3])/8] + xref
xref = [(yref[1]-3*xref[-3])/5] + xref
xref = [(yref[0]-3*xref[-5]-2*xref[-4])/5] + xref
x = fLU2.solve(b)
assert np.allclose(xref, x)
assert not np.allclose(xtrue, x) # <-- shows that the error is intentional
def test_FakeLU_solve_3():
A3, ref3, pivref3 = _get_test_m3()
fLU3 = FakeLU(A3, 2, 1)
assert np.allclose(fLU3.sub[0], [0.1/37, .2/(63+17/37*13)])
assert np.allclose(fLU3.sup[0], [.2, .3])
assert np.all(fLU3.rowbycol[0] == [1, 0])
assert np.all(fLU3.rowbycol[1] == [1, 0])
assert np.all(fLU3.colbyrow[0] == [1, 0])
assert np.all(fLU3.colbyrow[1] == [1, 0])
b = np.array([-62, 207, 11, -14], dtype=np.float64)
xtrue = scipy.linalg.lu_solve(scipy.linalg.lu_factor(A3), b.copy())
# LUx = b
# Ly = b
yref = [b[1]]
yref = yref + [b[0] + 17/37*yref[0]]
yref = yref + [b[3] - .2/(63+17/37*13)*yref[1]]
yref = yref + [b[2] - 0.1/37*yref[0] + 11/42*yref[2]]
# print('yref=',yref)
# Ux = y
xref = [(yref[3] )/(72+11/42*24)]
xref = [(yref[2]-24*xref[-1])/(-42)] + xref
xref = [(yref[1]-0.2*xref[-2])/(63+17/37*13)] + xref
xref = [(yref[0]-0.3*xref[-1] - 13*xref[-3])/37] + xref
xref = np.array(xref)
x = fLU3.solve(b.copy())
# print('bsolveref', [b[1], b[0], b[3], b[2]])
# print('xtrue',xtrue)
# print('xref',xref)
# print('x',x)
# print('xref-x',list(xref - x))
assert np.allclose(xref, x)
assert np.allclose(xtrue, x, rtol=0.01, atol=1e-6)
def test_FakeLU_solve_4():
A4, ref4, pivref4 = _get_test_m4()
fLU4 = FakeLU(A4, 2, 2)
assert np.allclose(fLU4.sub[0], [0.1/37, .2/(63+17/37*13), .1/42, .08/(72+11/42*24)])
# print(fLU4.sub[1])
assert np.allclose(fLU4.sub[1], [0.03/37, -.1/(63+17/37*13)])
assert np.allclose(fLU4.sup[0], [.2, .3, -.1, .2])
assert np.allclose(fLU4.sup[1], [.02, .03])
assert np.all(fLU4.rowbycol[0] == [1, 0])
assert np.all(fLU4.rowbycol[1] == [1, 0])
assert np.all(fLU4.rowbycol[2] == [0, 1])
assert np.all(fLU4.colbyrow[0] == [1, 0])
assert np.all(fLU4.colbyrow[1] == [1, 0])
assert np.all(fLU4.colbyrow[2] == [0, 1])
b = np.array([-62, 207, 11, -14, 25, -167], dtype=np.float64)
xtrue = scipy.linalg.lu_solve(scipy.linalg.lu_factor(A4), b.copy())
a_ = 63 + 17/37*13
b_ = 72 + 11/42*24
c_ = -57 + 14/72*13
# LUx = b
# Ly = b
yref = [b[1]]
yref = yref + [b[0] + 17/37*yref[0]]
yref = yref + [b[3] - .2/a_*yref[1]]
yref = yref + [b[2] - 0.1/37*yref[0] + 11/42*yref[2]]
yref = yref + [b[4] - .03/37*yref[0] - .1/42*yref[2]]
yref = yref + [b[5] + .1/a_*yref[1] - .08/b_*yref[3] - 14/72*yref[4]]
# print('yref=',yref)
# Ux = y
xref = [(yref[5] )/c_]
xref = [(yref[4]+13*xref[-1])/72] + xref
xref = [(yref[3]+.1*xref[-2])/b_] + xref
xref = [(yref[2]-24*xref[-3]-.2*xref[-1])/(-42)] + xref
xref = [(yref[1]-0.2*xref[-4]-0.02*xref[-2])/a_] + xref
xref = [(yref[0]-0.03*xref[-1]-0.3*xref[-3] - 13*xref[-5])/37] + xref
print('\n'.join(["%23.17e," % x for x in xref]))
xref = np.array(xref)
x = fLU4.solve(b.copy())
# print('bsolveref', [b[1], b[0], b[3], b[2], b[4], b[5]])
# print('xtrue',xtrue)
# print('xref',xref)
# print('x',x)
# print('x-xtrue',list(x-xtrue))
# print('x-xref',list(x-xref))
assert np.allclose(xref, x)
assert np.allclose(xtrue, x, rtol=0.03, atol=1e-6)
if __name__ == '__main__':
test_FakeLU_solve_4()
| {
"repo_name": "chemreac/block_diag_ilu",
"path": "python_prototype/tests/test_fakelu.py",
"copies": "1",
"size": "8673",
"license": "bsd-2-clause",
"hash": 1797668231437794600,
"line_mean": 31.0036900369,
"line_max": 89,
"alpha_frac": 0.4515161997,
"autogenerated": false,
"ratio": 2.2232760830556266,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3174792282755627,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import os
import time
import codecs
try:
import simplejson as json
json
except ImportError:
import json
try:
import urllib.request as urllib_request
except ImportError:
import urllib2 as urllib_request
SLEEP_TIME = 20 # seconds
def get_ranked_english():
'''
wikitionary has a list of ~40k English words, ranked by frequency of occurance in TV and movie transcripts.
more details at:
http://en.wiktionary.org/wiki/Wiktionary:Frequency_lists/TV/2006/explanation
the list is separated into pages of 1000 or 2000 terms each.
* the first 10k words are separated into pages of 1000 terms each.
* the remainder is separated into pages of 2000 terms each:
'''
URL_TMPL = 'http://en.wiktionary.org/wiki/Wiktionary:Frequency_lists/TV/2006/%s'
urls = []
for i in range(10):
freq_range = "%d-%d" % (i * 1000 + 1, (i+1) * 1000)
urls.append(URL_TMPL % freq_range)
for i in range(0,15):
freq_range = "%d-%d" % (10000 + 2 * i * 1000 + 1, 10000 + (2 * i + 2) * 1000)
urls.append(URL_TMPL % freq_range)
urls.append(URL_TMPL % '40001-41284')
ranked_terms = [] # ordered by rank, in decreasing frequency.
for url in urls:
html, is_cached = wiki_download(url)
if not is_cached:
time.sleep(SLEEP_TIME)
new_terms = parse_wiki_terms(html)
ranked_terms.extend(new_terms)
return ranked_terms
def wiki_download(url):
'''
scrape friendly: sleep 20 seconds between each request, cache each result.
'''
DOWNLOAD_TMPL = '../data/tv_and_movie_freqlist%s.html'
freq_range = url[url.rindex('/')+1:]
tmp_path = DOWNLOAD_TMPL % freq_range
if os.path.exists(tmp_path):
print('cached.......', url)
with codecs.open(tmp_path, 'r', 'utf8') as f:
return f.read(), True
with codecs.open(tmp_path, 'w', 'utf8') as f:
print('downloading...', url)
req = urllib_request.Request(url, headers={
'User-Agent': 'zxcvbn'
})
response = urllib_request.urlopen(req)
result = response.read().decode('utf8')
f.write(result)
return result, False
def parse_wiki_terms(doc):
'''who needs an html parser. fragile hax, but checks the result at the end'''
results = []
last3 = ['', '', '']
header = True
for line in doc.split('\n'):
last3.pop(0)
last3.append(line.strip())
if all(s.startswith('<td>') and not s == '<td></td>' for s in last3):
if header:
header = False
continue
last3 = [s.replace('<td>', '').replace('</td>', '').strip() for s in last3]
rank, term, count = last3
rank = int(rank.split()[0])
term = term.replace('</a>', '')
term = term[term.index('>')+1:].lower()
results.append(term)
assert len(results) in [1000, 2000, 1284] # early docs have 1k entries, later have 2k, last doc has 1284
return results
def get_ranked_census_names():
'''
takes name lists from the the 2000 us census, prepares as a json array in order of frequency (most common names first).
more info:
http://www.census.gov/genealogy/www/data/2000surnames/index.html
files in data are downloaded copies of:
http://www.census.gov/genealogy/names/dist.all.last
http://www.census.gov/genealogy/names/dist.male.first
http://www.census.gov/genealogy/names/dist.female.first
'''
FILE_TMPL = '../data/us_census_2000_%s.txt'
SURNAME_CUTOFF_PERCENTILE = 85 # ie7 can't handle huge lists. cut surname list off at a certain percentile.
lists = []
for list_name in ['surnames', 'male_first', 'female_first']:
path = FILE_TMPL % list_name
lst = []
for line in codecs.open(path, 'r', 'utf8'):
if line.strip():
if list_name == 'surnames' and float(line.split()[2]) > SURNAME_CUTOFF_PERCENTILE:
break
name = line.split()[0].lower()
lst.append(name)
lists.append(lst)
return lists
def get_ranked_common_passwords():
lst = []
for line in codecs.open('../data/common_passwords.txt', 'r', 'utf8'):
if line.strip():
lst.append(line.strip())
return lst
def to_ranked_dict(lst):
return dict((word, i) for i, word in enumerate(lst))
def filter_short(terms):
'''
only keep if brute-force possibilities are greater than this word's rank in the dictionary
'''
return [term for i, term in enumerate(terms) if 26**(len(term)) > i]
def filter_dup(lst, lists):
'''
filters lst to only include terms that don't have lower rank in another list
'''
max_rank = len(lst) + 1
dct = to_ranked_dict(lst)
dicts = [to_ranked_dict(l) for l in lists]
return [word for word in lst if all(dct[word] < dct2.get(word, max_rank) for dct2 in dicts)]
def filter_ascii(lst):
'''
removes words with accent chars etc.
(most accented words in the english lookup exist in the same table unaccented.)
'''
return [word for word in lst if all(ord(c) < 128 for c in word)]
def main():
english = get_ranked_english()
surnames, male_names, female_names = get_ranked_census_names()
passwords = get_ranked_common_passwords()
[english,
surnames, male_names, female_names,
passwords] = [filter_ascii(filter_short(lst)) for lst in (english,
surnames, male_names, female_names,
passwords)]
# make dictionaries disjoint so that d1 & d2 == set() for any two dictionaries
all_dicts = set(tuple(l) for l in [english, surnames, male_names, female_names, passwords])
passwords = filter_dup(passwords, all_dicts - set([tuple(passwords)]))
male_names = filter_dup(male_names, all_dicts - set([tuple(male_names)]))
female_names = filter_dup(female_names, all_dicts - set([tuple(female_names)]))
surnames = filter_dup(surnames, all_dicts - set([tuple(surnames)]))
english = filter_dup(english, all_dicts - set([tuple(english)]))
with open('../generated/frequency_lists.json', 'w') as f: # words are all ascii at this point
lsts = locals()
out = {}
for lst_name in 'passwords male_names female_names surnames english'.split():
lst = lsts[lst_name]
out[lst_name] = lst
json.dump(out, f)
print('\nall done! totals:\n')
print('passwords....', len(passwords))
print('male.........', len(male_names))
print('female.......', len(female_names))
print('surnames.....', len(surnames))
print('english......', len(english))
print()
if __name__ == '__main__':
if os.path.basename(os.getcwd()) != 'scripts':
print('run this from the scripts directory')
exit(1)
main()
| {
"repo_name": "moreati/python-zxcvbn",
"path": "zxcvbn/scripts/build_frequency_lists.py",
"copies": "1",
"size": "7091",
"license": "mit",
"hash": -1082776628520189400,
"line_mean": 35.7409326425,
"line_max": 123,
"alpha_frac": 0.5968128614,
"autogenerated": false,
"ratio": 3.519106699751861,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4615919561151861,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import pandas as pd
import numpy as np
from mousestyles import data
try:
basestring
except NameError:
basestring = str
def create_time_matrix(combined_gap=4, time_gap=1,
days_index=137, verbose=False):
r"""
Return a time matrix for estimate the MLE parobability.
The rows are 137 mousedays. The columns are time series
in a day. The data are the mouse activity at that time.
0 represents IS, 1 represents eating, 2 represents
drinking, 3 represents others activity in AS.
Parameters
----------
combined_gap: nonnegative float or int
The threshold for combining small intervals. If next start time
minus last stop time is smaller than combined_gap than combined
these two intervals.
time_gap: positive float or int
The time gap for create the columns time series
days_index: nonnegative int
The number of days to process, from day 0 to day days_index.
verbose: bool
If True, print out helpful information to the screen
Returns
-------
time: Pandas.DataFrame
a matrix represents the activity for a certain
mouse day and a certain time.
Examples
--------
>>> time = create_time_matrix(combined_gap=4, time_gap=1).iloc[0, 0:10]
>>> strain 0
mouse 0
day 0
48007 0
48008 0
48009 0
48010 0
48011 0
48012 0
48013 0
Name: 0, dtype: float64
"""
# check all the inputs
condition_combined_gap = ((type(combined_gap) == int or
type(combined_gap) == float) and
combined_gap >= 0)
condition_time_gap = ((type(time_gap) == int or type(time_gap) ==
float) and time_gap > 0)
condition_days_index = (type(days_index) == int and days_index >= 0)
if not condition_time_gap:
raise ValueError("time_gap should be nonnegative int or float")
if not condition_combined_gap:
raise ValueError("combined_gap should be nonnegative int or float")
if not condition_days_index:
raise ValueError("days_index should be nonnegative int")
intervals_AS = data.load_intervals('AS')
intervals_F = data.load_intervals('F')
intervals_W = data.load_intervals('W')
intervals_IS = data.load_intervals('IS')
# 137 days totally
days = np.array(intervals_AS.iloc[:, 0:3].drop_duplicates().
reset_index(drop=True))
# set time range for columns
initial = int(min(intervals_IS['stop']))
end = int(max(intervals_IS['stop'])) + 1
columns = np.arange(initial, end + 1, time_gap)
# result matrix
matrix = np.zeros((days.shape[0], len(columns)))
# we set 0 as IS, 1 as F, 2 as W, 3 as Others
for i in range(days.shape[0]):
W = np.array(intervals_W[(intervals_W['strain'] == days[i, 0]) &
(intervals_W['mouse'] == days[i, 1]) &
(intervals_W['day'] == days[i, 2])].
iloc[:, 3:5])
F = np.array(intervals_F[(intervals_F['strain'] == days[i, 0]) &
(intervals_F['mouse'] == days[i, 1]) &
(intervals_F['day'] == days[i, 2])].
iloc[:, 3:5])
AS = np.array(intervals_AS[(intervals_AS['strain'] == days[i, 0]) &
(intervals_AS['mouse'] == days[i, 1]) &
(intervals_AS['day'] == days[i, 2])].
iloc[:, 3:5])
n = W.shape[0]
index = (np.array(np.where(W[1:, 0] - W[0:n - 1, 1] >
combined_gap))).ravel()
stop_W = W[np.append(index, n - 1), 1]
start_W = W[np.append(0, index + 1), 0]
n = F.shape[0]
index = (np.array(np.where(F[1:, 0] - F[0:n - 1, 1] >
combined_gap))).ravel()
stop_F = F[np.append(index, n - 1), 1]
start_F = F[np.append(0, index + 1), 0]
n = AS.shape[0]
index = (np.array(np.where(AS[1:, 0] - AS[0:n - 1, 1] >
combined_gap))).ravel()
stop_AS = AS[np.append(index, n - 1), 1]
start_AS = AS[np.append(0, index + 1), 0]
for j in range(len(columns)):
if sum(np.logical_and(columns[j] > start_AS, columns[j] <
stop_AS)) != 0:
if sum(np.logical_and(columns[j] > start_F, columns[j] <
stop_F)) != 0:
matrix[i, j] = 1 # food
elif sum(np.logical_and(columns[j] > start_W, columns[j] <
stop_W)) != 0:
matrix[i, j] = 2 # water
else:
matrix[i, j] = 3 # others
# give you the precent of matrix has been processed
if verbose:
print(i / days.shape[0], 'has been processed')
if i > days_index:
break
# format data frame
matrix = pd.DataFrame(matrix, columns=columns)
title = pd.DataFrame(days, columns=['strain', 'mouse', 'day'])
time_matrix = pd.concat([title, matrix], axis=1)
return time_matrix
def get_prob_matrix_list(time_df, interval_length=1000):
r"""
returns a list of probability transition matrices
that will be later used to characterize and simu-
late the behavior dynamics of different strains of
mice. The data used as input is the pandas DataFrame
generated by function create_time_matrix with de-
fault parameters. The output is a list of numpy
arrays, each being a transition matrix characterizing
one small time interval. The interval length could
be chosen.
Parameters
----------
time_df: Pandas.DataFrame
a huge data frame containing info on strain, mouse
no., mouse day, and different states at chosen time
points.
interval_length: int
an integer specifying the desired length of each
small time interval.
Returns
-------
matrix_list: list
a list of the mle estimations of the probability tran-
sition matrices for each small time interval stored in
the format of numpy array. Each element of this list
is a numpy array matrix.
Examples
--------
>>> row_i = np.hstack((np.zeros(13), np.ones(10),
np.ones(10)*2, np.ones(10)*3))
>>> time_df_eg = np.vstack((row_i, row_i,row_i))
>>> time_df_eg = pd.DataFrame(time_df_eg)
>>> mat_list = get_prob_matrix_list(time_df_eg,
interval_length=10)
>>> mat_list[0]
>>> array([[ 1., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.]])
>>> mat_list[1]
>>> array([[ 0., 0., 0., 0.],
[ 0., 1., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.]])
>>> mat_list[2]
>>> array([[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 1., 0.],
[ 0., 0., 0., 0.]])
>>> mat_list[3]
>>> array([[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 1.]])
"""
# check all the inputs
condition_time_df = isinstance(time_df, pd.core.frame.DataFrame)
condition_interval_length = isinstance(interval_length, int) and \
interval_length > 0
if not condition_time_df:
raise ValueError("time_df should be pandas DataFrame")
if not condition_interval_length:
raise ValueError("interval_length should be positive int")
time_array = np.array(time_df)[:, 3:]
n = np.ceil(time_array.shape[1] / interval_length)
matrix_list = [None] * int(n)
for i in np.arange(n):
i = int(i)
ind = [(i * interval_length), ((i + 1) * interval_length)]
small_time_array = time_array[:, ind[0]:ind[1]]
small_time_list = list(small_time_array)
small_time_str_list = ["".join(np.char.mod('%i', a))
for a in small_time_list]
matrix_list[i] = get_prob_matrix_small_interval(small_time_str_list)
return matrix_list
def get_prob_matrix_small_interval(string_list, verbose=False):
r"""
return the MLE estimate of the probability matrix
of the markov chain model. The data used as input
is a list of strings that contains the information
regarding the transition of states of the mouse be-
havior. The output is a matrix stored in the format
of numpy array, where the i,j th term indicates the
probability of transiting from state i to state j.
Parameters
----------
string_list: list
a list of strings of the states in the given
time slot.
verbose: bool
If True, print out helpful information to the screen
Returns
-------
M: numpy.ndarray
the MLE estimation of the probability tran-
sition matrix. Each entry M_ij represents the
probability of transiting from state i to state
j.
Examples
--------
>>> time_list = ['002', '001', '012']
>>> get_prob_matrix_small_interval(time_list)
>>> array([[ 0.4, 0.4, 0.2, 0. ],
[ 0. , 0. , 1. , 0. ],
[ 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. ]])
"""
# check all the inputs
condition_string_list = isinstance(string_list, list)
condition_list_item = isinstance(string_list[0], basestring)
if verbose:
print(string_list[0])
if not condition_string_list:
raise ValueError("string_list should be a list")
if not condition_list_item:
raise ValueError("items in string_list should be str")
Mat_prob = np.zeros(4 * 4).reshape(4, 4)
for i in np.arange(4):
i = int(i)
for j in np.arange(4):
j = int(j)
ijth = str(i) + str(j)
Mat_prob[i, j] = sum([string.count(ijth) for string
in string_list])
for k in np.arange(4):
k = int(k)
if sum(Mat_prob[k, :]) != 0:
Mat_prob[k, :] = Mat_prob[k, :] / sum(Mat_prob[k, :])
return Mat_prob
def mcmc_simulation(mat_list, n_per_int):
r"""
This function gives the Monte Carlo simulation
of the stochastic process modeling the dynamic
changes of states of the mice behavior. The in-
put of this function is a list of probability
transition matrices and an integer indicates
how many outputs for each matrix. This number
is related to the interval_length parameter in
function get_prob_matrix_list. The output is an
array of numbers, each indicates one state.
Parameters
----------
mat_list: List
a list of numpy arrays storing the probabi-
lity transition matrices for each small time
interval chosen.
n_per_int: int
an integer specifying the desired output
length of each probability transition matrix.
This is the same as the parameter
interval_length used in the function
get_prob_matrix_small_interval
Returns
-------
simu_result: numpy.array
an array of integers indicating the simulated
states given a list of probability transition
matrices.
Examples
--------
>>> mat0 = np.zeros(16).reshape(4, 4)
>>> np.fill_diagonal(mat0, val=1)
>>> mat1 = np.zeros(16).reshape(4, 4)
>>> mat1[0, 1] = 1
>>> mat1[1, 0] = 1
>>> mat1[2, 2] = 1
>>> mat1[3, 3] = 1
>>> mat_list_example = [mat0, mat1]
>>> mcmc_simulation(mat_list_example, 10)
>>> array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 1, 0, 1, 0, 1, 0, 1, 0])
"""
# check all the inputs
condition_mat_list = (type(mat_list) == list)
condition_list_item = (type(mat_list[0]) == np.ndarray)
condition_n_per_int = (type(n_per_int) == int and n_per_int > 0)
if not condition_mat_list:
raise ValueError("mat_list should be a list")
if not condition_list_item:
raise ValueError("items in mat_list should be numpy array")
if not condition_n_per_int:
raise ValueError("n_per_int should be positive int")
n = len(mat_list)
simu_result = np.zeros(n * n_per_int)
for i in np.arange(n):
for j in np.arange(n_per_int):
index = int(i * n_per_int + j)
if index == 0:
simu_result[index] = 0
state_i = simu_result[int(index - 1)]
prob_trans = mat_list[i][int(state_i), :]
prob_trans = np.cumsum(prob_trans)
rand = np.random.uniform()
state_j = sum(rand > prob_trans)
simu_result[index] = int(state_j)
simu_result = np.array(simu_result, dtype=int)
return simu_result
def get_score(true_day, simulated_day, weight=[1, 10, 50, 1]):
r"""
Returns the evaluation score for the simulted day
that will be later used to choose the best time
interval for different strains. The input data
should be two numpy arrays and one list, the two arrays
are possibly with different lengths, one being the
activities for one particular day of one particular
mouse, the other array is our simulated day for this
mouse from the mcmc_simulation function. And the list
is the weight for different status. We should give different
rewards for making correct simulations on various status.
For example, the average mouse day have 21200 timestamps.
10000 of them are IS, 1000 are EAT, 200 are drink, and the
left 10000 are OTHERS. So we should weigh more on drink and
eat, their ratio is 10000:1000:200:10000 = 1:0.1:0.02:0.1.
So I took the inverse of them to be 1:10:50:1.
The output will be one number between 0 and max(weight),
indicating the similiary of the true day of a mouse and
a simulated day of the same mouse. We will use
this function to measure the performance of the
simulation and then choose the appropriate time
interval.
Parameters
----------
true_day: numpy.array
a numpy.array containing the activities for one
particular mouse on a specific day
simulated_day: numpy.array
a numpy.array containing the simulated activities
for this particular mouse.
weight: list
a list with positive numbers showing the rewards
for making the right predictions of various status.
Returns
-------
score: float
a float from 0 to max(weight), indicating the similarity of
the simulated data with the actual value, and therefore,
the performance of the simulation, with max(weight) being the
most similar, and 0 being the least similar.
Examples
--------
>>> true_day_1 = np.zeros(13)
>>> simulated_day_1 = np.ones(13)
>>> get_score(true_day_1, simulated_day_1)
>>> 0.0
>>> true_day_2 = np.ones(13)
>>> simulated_day_2 = np.ones(13)
>>> get_score(true_day_2, simulated_day_2)
>>> 10.0
"""
# check all the inputs
condition_true_day = (isinstance(true_day, (np.ndarray, np.generic)))
condition_simulated_day = (isinstance(simulated_day,
(np.ndarray, np.generic)))
condition_weight = (isinstance(weight, list))
if not condition_true_day:
raise ValueError("true_day should be numpy array")
if not condition_simulated_day:
raise ValueError("simulated_day should be numpy array")
if not condition_weight:
raise ValueError("weight should be list")
len_weight = len(weight)
if len_weight != 4:
raise ValueError("Length of weight should be 4")
# check all the weights are positive
for w in weight:
if w <= 0:
raise ValueError("All the weights should be positive")
# make sure these two arrays have the same length
len_true = len(true_day)
len_simulated = len(simulated_day)
if len_true > len_simulated:
raise ValueError("Length of simulated_day is smaller than true_day")
simulated_same_length = simulated_day[:len_true]
score = 0
for i in np.arange(len_true):
if true_day[i] == simulated_same_length[i]:
status = true_day[i]
score += weight[int(status)]
score = score / len_true
return score
def find_best_interval(df, strain_num,
interval_length_initial=np.arange(600, 7800, 600)):
r"""
Returns the optimized time interval length and the corresponding
fake mouse behavior string with the evaluation score for a
particular mouse strain. The input data are the pandas DataFrame
generated by function create_time_matrix which contains all strains
information, the strain number that we want to develop, and some
initial time interval length we want to optimize on. The outputs
are the optimized time interval length, the simulated mouse states
string using that optimized time interval and the evaluation score
comparing this simulated mouse with the real mice behavior in the
strain using the same optimized time interval length.
Parameters
----------
df: Pandas.DataFrame
a huge data frame containing info on strain, mouse
no., mouse day, and different states at chosen time
points.
strain_num: int
an integer of 0, 1, 2 that specifying the desired mouse strain
interval_length_initial: numpy.ndarray
a numpy.ndarray specifying the range of time interval
that it optimizes on, with the default value of a sequence
from 600s to 7200s with 600s step since 10min to 2h is a
reasonable choice.
Returns
-------
best_interval_length: int
a interger indicating the optimized time interval
in terms of the evaluation score
best_fake_mouse: list
a list of around 88,283 integers indicating the simulated
states using the best_interval_length
best_score: float
a float between 0 and 1 representing the evaluation score
comparing the best_fake_mouse with the real mice
behavior in the strain under the same optimized
time interval length. higher the score, better the
simulation behavior.
Examples
--------
>>> row_i = np.hstack((np.zeros(40)))
>>> time_df_eg = np.vstack((row_i, row_i, row_i))
>>> time_df_eg = pd.DataFrame(time_df_eg)
>>> time_df_eg.rename(columns={0:'strain'}, inplace=True)
>>> find_best_interval(time_df_eg, 0, np.arange(10, 40, 10))
(10, array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0]), 0.98245614035087725)
"""
# check all the inputs: dataframe, strain number, intitial time length
condition_df = (type(df) == pd.core.frame.DataFrame)
condition_strain_num = (strain_num in (0, 1, 2))
condition_interval_length_initial = (type(interval_length_initial) ==
np.ndarray and
np.sum(interval_length_initial > 0) ==
len(interval_length_initial) and
all(isinstance(i, np.int64) for i
in interval_length_initial))
if not condition_df:
raise ValueError("df should be pandas DataFrame")
if not condition_strain_num:
raise ValueError("strain_num can only be 0, 1, 2")
if not condition_interval_length_initial:
raise ValueError("interval_length_initial positive np.array")
interval_length_initial = [int(interval_length) for interval_length in
interval_length_initial]
data_strain = df[df.strain == strain_num]
time_score = []
store_fake_mouse = []
for interval_length in interval_length_initial:
prob_matrix_list = get_prob_matrix_list(data_strain, interval_length)
np.random.seed([0])
fake_mouse = mcmc_simulation(prob_matrix_list, interval_length)
store_fake_mouse.append(fake_mouse)
real_mouse = data_strain.ix[:, 3:]
real_mouse = real_mouse.reset_index()
n_mouse = len(real_mouse)
mouse_score = np.zeros(n_mouse)
for mouse_index in range(0, n_mouse):
real_mouse_list = list(real_mouse.ix[mouse_index, :])
mouse_score[mouse_index] = get_score(np.array(real_mouse_list),
fake_mouse)
time_score.append(np.mean(mouse_score))
best_score = max(time_score)
best_score_index = time_score.index(best_score)
best_interval_length = interval_length_initial[best_score_index]
best_fake_mouse = store_fake_mouse[best_score_index]
return best_interval_length, best_fake_mouse, best_score
| {
"repo_name": "changsiyao/mousestyles",
"path": "mousestyles/dynamics/__init__.py",
"copies": "3",
"size": "21301",
"license": "bsd-2-clause",
"hash": -6509979658280945000,
"line_mean": 38.15625,
"line_max": 79,
"alpha_frac": 0.5785174405,
"autogenerated": false,
"ratio": 3.7754342431761785,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5853951683676178,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from mousestyles.dynamics import find_best_interval
def plot_dynamics(df, strain_num,
interval_length_initial=np.arange(600, 7800, 600),
plot_time_range=np.arange(36000, 36100, 1)):
r"""
returns a plot that can help understand the
behavior dynamics that are obtained from the best
simulation. The data used as input is the pandas
DataFrame generated by function create_time_matrix.
The output is a plot that summarizes the dynamics
of a fake mouse of the given strain_num. The
strain_num could be chosen. Of note is that
0 represents IS, 1 represents eating, 2 represents
drinking, 3 represents others activity in AS. In the
plot, blue represents IS, bright green represents eating,
yellow represents drinking, and red represents
other activities in AS.
Parameters
----------
df: Pandas.DataFrame
a huge data frame containing info on strain, mouse
no., mouse day, and different states at chosen time
points.
starin_num: int
an integer specifying the desired mouse strain.
strain_num is 0, 1, or 2.
interval_length_initial: numpy.ndarray
a numpy.ndarray specifying the range of time interval
that it optimizes on.
plot_time_range: numpy.ndarray
a numpy.ndarray specifying the range of time range
of the plot.
Returns
-------
dynamics_plot: plot
a plot of behavior dynamics of a fake mouse of the
given strain_num. The x-axis is the time stamps that
start from 0. For strain_num = 0, the x-aixs is from
0 to 92,400. For stain_num = 1, the x-axis is from 0
90,000. For strain_num = 2, the x-axis is from 0 to
88,800. We assign different colors for different
states. In the plot, blue represents IS, bright green
represents eating, yellow represents drinking,
and red represents other activities in AS.
Examples
--------
>>> row_i = np.hstack((np.zeros(40)))
>>> time_df_eg = np.vstack((row_i, row_i, row_i))
>>> time_df_eg = pd.DataFrame(time_df_eg)
>>> time_df_eg.rename(columns={0:'strain'}, inplace=True)
>>> plot_dynamics_plot(time_df_eg, 0,
np.arange(10, 40, 10), np.arange(0, 40, 1))
"""
# check all the inputs
condition_df = (type(df) == pd.core.frame.DataFrame)
condition_strain_num = (strain_num in (0, 1, 2))
condition_interval_length_initial = (type(interval_length_initial) ==
np.ndarray and
np.sum(interval_length_initial > 0) ==
len(interval_length_initial) and
all(isinstance(i, np.int64) for i
in interval_length_initial))
condition_plot_time_range = (type(plot_time_range) ==
np.ndarray and
np.sum(plot_time_range > 0) ==
len(plot_time_range) and
all(isinstance(i, np.int64) for i
in plot_time_range))
if not condition_df:
raise ValueError("df should be pandas DataFrame")
if not condition_strain_num:
raise ValueError("strain_num can only be 0, 1, 2")
if not condition_interval_length_initial:
raise ValueError("interval_length_initial positive np.array")
if not condition_plot_time_range:
raise ValueError("plot_time_range positive np.array")
value_array = find_best_interval(df, strain_num)[1][plot_time_range]
value_list = list(value_array)
time_list = list(plot_time_range)
fig, dynamics_plot = plt.subplots(figsize=(6, 1))
dynamics_plot.scatter(time_list, [1] * len(time_list),
c=value_list, marker='s', s=100)
dynamics_plot.yaxis.set_visible(False)
dynamics_plot.xaxis.set_ticks_position('bottom')
dynamics_plot.get_yaxis().set_ticklabels([])
plt.show()
| {
"repo_name": "changsiyao/mousestyles",
"path": "mousestyles/visualization/dynamics.py",
"copies": "3",
"size": "4289",
"license": "bsd-2-clause",
"hash": -7869012206683845000,
"line_mean": 42.3232323232,
"line_max": 79,
"alpha_frac": 0.6017719748,
"autogenerated": false,
"ratio": 3.989767441860465,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 99
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import pandas as pd
import numpy as np
from mousestyles.path_diversity import detect_noise
def smooth_noise(movement, paths, angle_threshold, delta_t):
r"""
Return a new smoothed movement pandas DataFrame object containing
CT, CX, CY coordinates.
The inputted movement DataFrame is passed through a noise detection
function. At points where noise is detected, as defined by the
input parameters (i.e., angle_threshold and delta_t), this function
returns a new movement DataFrame by averaging points where noise
is detected.
Parameters
----------
movement : pandas.DataFrame
CT, CX, CY coordinates and homebase status
for the unique combination of strain, mouse and day
paths index : a list containing the indices for all paths
angle_threshold : float
positive number indicating the minimum turning angle to flag as noise
delta_t : float
positive number indicating the delta_time interval
Returns
-------
smoothed movement : pandas.DataFrame
CT, CX, CY coordinates and homebase status
for the unique combination of strain, mouse and day
Examples
--------
>>> movement = data.load_movement(1, 2, 1)
>>> paths = path_index(movement, 1, 1)
>>> smoothed_movement = smooth_noise(movement, paths, 135, .1)
"""
# check if all inputs are positive
if not isinstance(movement, pd.core.frame.DataFrame):
raise TypeError("Movement must be pandas DataFrame")
if set(movement.keys()).issuperset(['x', 'y', 't']) is False:
raise ValueError(
"The keys of movement must be 't', 'x', and 'y'")
if len(movement) <= 1:
raise ValueError("Movement must contain at least 2 rows")
noise = detect_noise(movement, paths, angle_threshold, delta_t)
max_noise = max(noise)
drop_ind = np.array([])
for i in range(1, max_noise + 1):
noise_chunk = noise[noise == i]
movement_chunk = movement.loc[noise_chunk.index]
x_avg = np.mean(movement_chunk['x'])
y_avg = np.mean(movement_chunk['y'])
t_avg = np.mean(movement_chunk['t'])
movement['x'][noise_chunk.index[0]] = x_avg
movement['y'][noise_chunk.index[0]] = y_avg
movement['t'][noise_chunk.index[0]] = t_avg
# Note: The above DataFrame manipulations result in a
# SettingWithCopyWarning. The warning persists even after
# attempting the following format:
# .loc[row_indexer,col_indexer] = value. Despite this,
# the output of the function is working as intended.
drop_ind = np.append(drop_ind, noise_chunk.index[1:])
new_movement = movement.drop(drop_ind)
new_movement.index = range(len(new_movement))
return new_movement
| {
"repo_name": "togawa28/mousestyles",
"path": "mousestyles/path_diversity/smooth_noise.py",
"copies": "3",
"size": "2904",
"license": "bsd-2-clause",
"hash": -4536697917981752300,
"line_mean": 32,
"line_max": 77,
"alpha_frac": 0.6497933884,
"autogenerated": false,
"ratio": 3.8823529411764706,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.603214632957647,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import pandas as pd
from mousestyles import path_diversity
def test_smooth_noise():
movement = {'t': pd.Series([0., 0.02, 0.04, 0.06], index=[0, 1, 2, 3]),
'x': pd.Series([0., 0., 0.1, 0.2], index=[0, 1, 2, 3]),
'y': pd.Series([0., 1., 0., 1.], index=[0, 1, 2, 3]),
'isHB': pd.Series(['No', 'No', 'No', 'No'],
index=[0, 1, 2, 3])}
movement = pd.DataFrame(movement)
paths = path_diversity.path_index(movement, 1, 1)
# Check if function produces the correct outputs.
smoothed = path_diversity.smooth_noise(movement, paths, 120, 1)
assert len(smoothed) == 3
movement = {'t': pd.Series([0., 0.02, 0.04, 0.06], index=[0, 1, 2, 3]),
'x': pd.Series([0., 0., 0.1, 0.2], index=[0, 1, 2, 3]),
'y': pd.Series([0., 1., 0., 1.], index=[0, 1, 2, 3]),
'isHB': pd.Series(['No', 'No', 'No', 'No'],
index=[0, 1, 2, 3])}
movement = pd.DataFrame(movement)
paths = path_diversity.path_index(movement, 1, 1)
# Check if function produces the correct outputs.
smoothed = path_diversity.smooth_noise(movement, paths, 120, 1)
assert smoothed['y'][1] == 0.5
movement = {'t': pd.Series([0., 0.02, 0.04, 0.06], index=[0, 1, 2, 3]),
'x': pd.Series([0., 0., 0.1, 0.2], index=[0, 1, 2, 3]),
'y': pd.Series([0., 1., 0., 1.], index=[0, 1, 2, 3]),
'isHB': pd.Series(['No', 'No', 'No', 'No'],
index=[0, 1, 2, 3])}
movement = pd.DataFrame(movement)
paths = path_diversity.path_index(movement, 1, 1)
# Check if function produces the correct outputs.
smoothed = path_diversity.smooth_noise(movement, paths, 120, 1)
assert smoothed['x'][1] == 0.05
movement = {'t': pd.Series([0., 0.02, 0.04, 0.06], index=[0, 1, 2, 3]),
'x': pd.Series([0., 0., 0.1, 0.2], index=[0, 1, 2, 3]),
'y': pd.Series([0., 1., 0., 1.], index=[0, 1, 2, 3]),
'isHB': pd.Series(['No', 'No', 'No', 'No'],
index=[0, 1, 2, 3])}
movement = pd.DataFrame(movement)
paths = path_diversity.path_index(movement, 1, 1)
# Check if function produces the correct outputs.
smoothed = path_diversity.smooth_noise(movement, paths, 120, 1)
assert smoothed['t'][1] == 0.03
| {
"repo_name": "berkeley-stat222/mousestyles",
"path": "mousestyles/path_diversity/tests/test_smooth_noise.py",
"copies": "3",
"size": "2526",
"license": "bsd-2-clause",
"hash": 7851332215690985000,
"line_mean": 46.6603773585,
"line_max": 75,
"alpha_frac": 0.5003958828,
"autogenerated": false,
"ratio": 2.9928909952606637,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4993286878060663,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import pandas as pd
from mousestyles.path_diversity import compute_angles
def detect_noise(movement, paths, angle_threshold, delta_t):
r"""
Return a list object containing boolean values at points
where measurement noise is detected and will be passed to
a smoothing function
Parameters
----------
movement : pandas.DataFrame
CT, CX, CY coordinates and homebase status
for the unique combination of strain, mouse and day
paths index : a list containing the indices for all paths
angle_threshold : float
positive number indicating the minimum turning angle to flag as noise
delta_t : float
positive number indicating the delta_time interval
Returns
-------
noise index : a pandas series containing the indices at which
noise, as defined by input parameters, is detected
Examples
--------
>>> movement = data.load_movement(1, 2, 1)
>>> paths = path_diversity.path_index(movement, 1, 1)
>>> noise = detect_noise(movement, paths, 135, .1)
"""
# check if all inputs are positive
conditions_value = [angle_threshold <= 0, delta_t <= 0]
if any(conditions_value):
raise ValueError("Input values need to be positive")
if not isinstance(movement, pd.core.frame.DataFrame):
raise TypeError("Movement must be pandas DataFrame")
if set(movement.keys()) != {'isHB', 't', 'x', 'y'}:
raise ValueError(
"The keys of movement must be 't', 'x', 'y', and 'isHB'")
if len(movement) <= 1:
raise ValueError("Movement must contain at least 2 rows")
noise_index = 1
noise_path = []
noise_path = pd.Series(noise_path)
current_noise = False
for path in paths:
path_obj = movement[path[0]:path[1] + 1]
if len(path_obj) > 3:
path_obj['angles'] = compute_angles(path_obj, False)
path_obj['sharp_angle'] = path_obj['angles'] > angle_threshold
path_obj['noise'] = 0
# Note: The above DataFrame manipulations result in a
# SettingWithCopyWarning. The warning persists even after
# attempting the following format:
# .loc[row_indexer,col_indexer] = value. Despite this,
# the output of the function is working as intended.
for i in range(0, len(path_obj) - 1):
if path_obj['sharp_angle'].iloc[i]:
if path_obj['sharp_angle'].iloc[i + 1]:
if path_obj['t'].iloc[
i + 1] - path_obj['t'].iloc[i] < delta_t:
path_obj['noise'].iloc[i] = noise_index
path_obj['noise'].iloc[i + 1] = noise_index
current_noise = True
elif current_noise:
noise_index += 1
current_noise = False
elif current_noise:
noise_index += 1
current_noise = False
else:
path_obj['noise'] = 0
noise_path = noise_path.append(path_obj.noise)
return noise_path
| {
"repo_name": "changsiyao/mousestyles",
"path": "mousestyles/path_diversity/detect_noise.py",
"copies": "3",
"size": "3295",
"license": "bsd-2-clause",
"hash": 6925468621832961000,
"line_mean": 33.6842105263,
"line_max": 77,
"alpha_frac": 0.5678300455,
"autogenerated": false,
"ratio": 4.2297817715019255,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6297611817001925,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import permute.data as data
from numpy.testing import assert_equal
def test_botulinum():
""" Test that "Botulinum" data can be loaded. """
botulinum = data.botulinum()
assert_equal((botulinum.size, len(botulinum.dtype)), (80, 28))
def test_chrom17m():
""" Test that "chrom17m" data can be loaded. """
chrom17m = data.chrom17m()
assert_equal((chrom17m.size, len(chrom17m.dtype)), (10, 3))
def test_confocal():
""" Test that "confocal" data can be loaded. """
confocal = data.confocal()
assert_equal((confocal.size, len(confocal.dtype)), (112, 17))
def test_germina():
""" Test that "germina" data can be loaded. """
germina = data.germina()
assert_equal((germina.size, len(germina.dtype)), (40, 5))
def test_kenya():
""" Test that "Kenya" data can be loaded. """
kenya = data.kenya()
assert_equal((kenya.size, len(kenya.dtype)), (16, 3))
def test_massaro_blair():
""" Test that "massaro_blair" data can be loaded. """
massaro_blair = data.massaro_blair()
assert_equal((massaro_blair.size, len(massaro_blair.dtype)), (29, 2))
def test_monachus():
""" Test that "monachus" data can be loaded. """
monachus = data.monachus()
assert_equal(monachus.size, 12)
assert_equal(len(monachus.dtype), 17)
def test_mult():
""" Test that "mult" data can be loaded. """
mult = data.mult()
assert_equal(mult.size, 16)
assert_equal(len(mult.dtype), 4)
def test_perch():
""" Test that "perch" data can be loaded. """
perch = data.perch()
assert_equal(perch.size, 108)
assert_equal(len(perch.dtype), 31)
def test_rats():
""" Test that "rats" data can be loaded. """
rats = data.rats()
assert_equal(rats.size, 36)
assert_equal(len(rats.dtype), 19)
def test_setig():
""" Test that "setig" data can be loaded. """
setig = data.setig()
assert_equal(setig.size, 334)
assert_equal(len(setig.dtype), 6)
def test_urology():
""" Test that "urology" data can be loaded. """
urology = data.urology()
assert_equal(urology.size, 481)
assert_equal(len(urology.dtype), 31)
def test_washing_test():
""" Test that "washing_test" data can be loaded. """
washing_test = data.washing_test()
assert_equal(washing_test.size, 800)
assert_equal(len(washing_test.dtype), 4)
def test_waterfalls():
""" Test that "waterfalls" data can be loaded. """
waterfalls = data.waterfalls()
assert_equal(waterfalls.size, 42)
assert_equal(len(waterfalls.dtype), 17)
| {
"repo_name": "qqqube/permute",
"path": "permute/data/tests/test_data.py",
"copies": "1",
"size": "2645",
"license": "bsd-2-clause",
"hash": 3957766991150778000,
"line_mean": 26.2680412371,
"line_max": 73,
"alpha_frac": 0.6351606805,
"autogenerated": false,
"ratio": 2.929125138427464,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4064285818927464,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import pytest
import pandas as pd
import numpy as np
from mousestyles.path_diversity.path_features_advanced import compute_advanced
def test_compute_advanced_input():
# Check if function raises the correct type of errors.
# not pandas DataFrame
like_path = [1, 2, 3]
# not having keys 'x', 'y'
like_path2 = pd.DataFrame({'x': [5, -2, 1],
't': [-2, 3, 1],
'isHB': [True, True, False]})
# length is less than 2
like_path3 = pd.DataFrame({'t': [2], 'x': [5], 'y': [3], 'isHB': [True]})
with pytest.raises(TypeError) as info:
compute_advanced(like_path)
assert info.value.args[0] == "path_obj must be pandas DataFrame"
with pytest.raises(ValueError) as info:
compute_advanced(like_path2)
assert info.value.args[0] == "the keys of path_obj must contain 'x', 'y'"
with pytest.raises(ValueError) as info:
compute_advanced(like_path3)
assert info.value.args[0] == "path_obj must contain at least 3 rows"
def test_compute_advanced():
'''path = pd.DataFrame({'t': [2, 4.5, 10.5],
'x': [0, 1, 1],
'y': [0, 0, 1],
'isHB': [True, True, False]})
adv_feats = compute_advanced(path)
assert adv_feats['area_rec'] == 1
assert adv_feats['abs_distance'] == np.sqrt(2)
assert len(adv_feats['radius']) == len(path)
assert len(adv_feats['center_angles']) == len(path) - 1
assert adv_feats['center_angles'] == [np.pi / 2] * 2
assert adv_feats['radius'] == [np.sqrt(2) / 2] * 3
# in area covered some error was produced
# so it's not exactly but approximately equal to 1/2
expected = 1 / 2
assert np.abs(adv_feats['area_cov'] - expected) < 0.00001
'''
path = pd.DataFrame({'x': [0, 3, 3, 0],
'y': [0, 0, 4, 4]})
adv_feats = compute_advanced(path)
assert adv_feats['area_rec'] == 3 * 4
assert adv_feats['abs_distance'] == 4
assert len(adv_feats['center_angles']) == len(path) - 1
assert len(adv_feats['radius']) == len(path)
assert adv_feats['radius'] == [np.sqrt(3 ** 2 + 4 ** 2) / 2] * 4
# in area covered some error was produced
# so it's not exactly but approximately equal to
# the theoretical value
expected = 3 * 4 - 3 / 2 * 4 / 2
assert np.abs(adv_feats['area_cov'] - expected) < 0.0000001
# in center_angles some errors were produced
# so it's not exactly but approximately equal to
# the theoretical values
# By law of cosines
expected1 = (2 * 2.5 ** 2 - 3 ** 2) / (2 * 2.5 ** 2)
expected2 = (2 * 2.5 ** 2 - 4 ** 2) / (2 * 2.5 ** 2)
assert np.abs(np.cos(adv_feats['center_angles'][0]) -
expected1) < 0.0000001
assert np.abs(np.cos(adv_feats['center_angles'][2]) -
expected1) < 0.0000001
assert np.abs(np.cos(adv_feats['center_angles'][1]) -
expected2) < 0.0000001
| {
"repo_name": "togawa28/mousestyles",
"path": "mousestyles/path_diversity/tests/test_path_features_advanced.py",
"copies": "3",
"size": "3099",
"license": "bsd-2-clause",
"hash": 1873030699862746000,
"line_mean": 37.2592592593,
"line_max": 78,
"alpha_frac": 0.5666343982,
"autogenerated": false,
"ratio": 3.343042071197411,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 81
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import pytest
import pandas as pd
from mousestyles import data
from mousestyles import path_diversity
def test_detect_noise_input():
movement = data.load_movement(0, 0, 0)
paths = path_diversity.path_index(movement, 1, 1)
# Check if function raises the correct type of errors.
# Input negative angle_threshold
with pytest.raises(ValueError) as excinfo:
path_diversity.detect_noise(movement, paths, -1, 1)
assert excinfo.value.args[0] == "Input values need to be positive"
# Input negative delta_t
with pytest.raises(ValueError) as excinfo:
path_diversity.detect_noise(movement, paths, 1, -1)
assert excinfo.value.args[0] == "Input values need to be positive"
# Input zero angle_threshold
with pytest.raises(ValueError) as excinfo:
path_diversity.detect_noise(movement, paths, 0, 1)
assert excinfo.value.args[0] == "Input values need to be positive"
# Input zero delta_t
with pytest.raises(ValueError) as excinfo:
path_diversity.detect_noise(movement, paths, 1, 0)
assert excinfo.value.args[0] == "Input values need to be positive"
def test_detect_noise():
movement = {'t': pd.Series([0., 0.02, 0.04, 0.06], index=[0, 1, 2, 3]),
'x': pd.Series([0., 0., 0.1, 0.2], index=[0, 1, 2, 3]),
'y': pd.Series([0., 1., 0., 1.], index=[0, 1, 2, 3]),
'isHB': pd.Series(['No', 'No', 'No', 'No'],
index=[0, 1, 2, 3])}
movement = pd.DataFrame(movement)
paths = path_diversity.path_index(movement, 2, 1)
# Check if function produces the correct outputs
noise = path_diversity.detect_noise(movement, paths, 120, 1)
noise = list(noise)
assert noise == [0, 1, 1, 0]
movement = {'t': pd.Series([0., 2., 4., 7.], index=[0, 1, 2, 3]),
'x': pd.Series([0., 0., 0.1, 0.2], index=[0, 1, 2, 3]),
'y': pd.Series([0., 1., 0., 1.], index=[0, 1, 2, 3]),
'isHB': pd.Series(['No', 'No', 'No', 'No'],
index=[0, 1, 2, 3])}
movement = pd.DataFrame(movement)
paths = path_diversity.path_index(movement, 4, 1)
# Check if function produces the correct outputs
noise = path_diversity.detect_noise(movement, paths, 120, 1)
noise = list(noise)
assert noise == [0, 0, 0, 0]
| {
"repo_name": "togawa28/mousestyles",
"path": "mousestyles/path_diversity/tests/test_detect_noise.py",
"copies": "3",
"size": "2458",
"license": "bsd-2-clause",
"hash": 7865435219276275000,
"line_mean": 42.8928571429,
"line_max": 75,
"alpha_frac": 0.5919446705,
"autogenerated": false,
"ratio": 3.272969374167776,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5364914044667776,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import sys
import h5py
import numpy as np
import os
import re
from collections import OrderedDict
import pandas as pd
import json
import skimage.io as sio
from PIL import Image
import copy
import glob
import ast
import matplotlib.animation as animation
import matplotlib.pyplot as plt
from collections.abc import Iterable
from atom.api import Atom, Str, observe, Typed, Dict, List, Int, Float, Enum, Bool
from .load_data_from_db import (db, fetch_data_from_db, flip_data,
helper_encode_list, helper_decode_list,
write_db_to_hdf, fetch_run_info)
from ..core.utils import normalize_data_by_scaler, grid_interpolate
from ..core.map_processing import (RawHDF5Dataset, compute_total_spectrum_and_count, TerminalProgressBar,
dask_client_create)
from .scan_metadata import ScanMetadataXRF
import requests
from distutils.version import LooseVersion
import logging
import warnings
import pyxrf
pyxrf_version = pyxrf.__version__
logger = logging.getLogger(__name__)
warnings.filterwarnings('ignore')
sep_v = os.sep
class FileIOModel(Atom):
"""
This class focuses on file input and output.
Attributes
----------
working_directory : str
current working path
file_name : str
name of loaded file
file_name_silent_change : bool
If this flag is set to True, then ``file_name`` may be changed once without
starting file read operation. The flag is automatically reset to False.
load_status : str
Description of file loading status
data_sets : dict
dict of experiment data, 3D array
img_dict : dict
Dict of 2D arrays, such as 2D roi pv or fitted data
"""
window_title = Str()
window_title_base = Str()
working_directory = Str()
file_name = Str()
file_name_silent_change = Bool(False)
file_path = Str()
load_status = Str()
data_sets = Typed(OrderedDict)
file_channel_list = List()
img_dict = Dict()
img_dict_keys = List()
img_dict_default_selected_item = Int()
img_dict_is_updated = Bool(False)
runid = Int(-1) # Run ID of the current run
runuid = Str() # Run UID of the current run
h_num = Int(1)
v_num = Int(1)
fname_from_db = Str()
file_opt = Int(-1)
data = Typed(np.ndarray)
data_total_count = Typed(np.ndarray)
data_all = Typed(object)
selected_file_name = Str()
# file_name = Str()
mask_data = Typed(object)
mask_name = Str() # Displayed name of the mask (I'm not sure it is used, but let's keep it for now)
mask_file_path = Str() # Full path to file with mask data
mask_active = Bool(False)
load_each_channel = Bool(False)
# Spatial ROI selection
roi_selection_active = Bool(False) # Active/inactive
roi_row_start = Int(-1) # Selected values matter only when ROI selection is active
roi_col_start = Int(-1)
roi_row_end = Int(-1)
roi_col_end = Int(-1)
# Used while loading data from database
# True: overwrite existing data file if it exists
# False: create new file with unique name (original name + version number)
file_overwrite_existing = Bool(False)
data_ready = Bool(False)
# Scan metadata
scan_metadata = Typed(ScanMetadataXRF)
# Indicates if metadata is available for recently loaded scan
scan_metadata_available = Bool(False)
# Indicates if the incident energy is available in metadata for recently loaded scan
incident_energy_available = Bool(False)
# Changing this variable sets incident energy in the ``plot_model``
# Must be linked with the function ``plot_model.set_incident_energy``
# This value is not updated if incident energy parameter is changed somewhere else, therefore
# its value should not be used for computations!!!
incident_energy_set = Float(0.0)
def __init__(self, *, working_directory):
self.working_directory = working_directory
self.mask_data = None
# Display PyXRF version in the window title
ver_str, new_ver = self._get_pyxrf_version_str()
if new_ver is not None:
ver_str += f" - new version {new_ver} is available"
self.window_title_base = f"PyXRF: X-ray Fluorescence Analysis Tool ({ver_str})"
self.window_title = self.window_title_base
def _get_pyxrf_version_str(self):
"""
The function returns the tuple of strings:
- current version number of PyXRF;
- the latest version of PyXRF from nsls2forge conda channel.
If current version is the latest, then the second string None.
"""
# Determine the current version of PyXRF
global pyxrf_version
pyxrf_version_str = pyxrf_version
if pyxrf_version_str[0].lower() != 'v':
pyxrf_version_str = f"v{pyxrf_version_str}"
logger.info("Checking for new version availability ...")
# Now find the latest version available at nsls2forge
pyxrf_latest_version_str = None
try:
# Create a list of available versions
r = requests.get('https://conda.anaconda.org/nsls2forge/noarch/repodata.json')
pkgs = r.json()
pyxrf_ver = []
for pkg in pkgs['packages'].keys():
if pkg.startswith('pyxrf'):
pkg_version = LooseVersion(pkg.split('-')[1])
pyxrf_ver.append(pkg_version)
if len(pyxrf_ver):
max_version = pyxrf_ver[0]
for pkg_version in pyxrf_ver:
if max_version < pkg_version:
max_version = pkg_version
current_version = LooseVersion(pyxrf_version)
if current_version < max_version:
pyxrf_latest_version_str = f"v{max_version}"
if pyxrf_latest_version_str is not None:
logger.info(f"New version of PyXRF ({pyxrf_latest_version_str}) was found "
"in the 'nsls2forge' conda channel")
else:
logger.info("You have the latest version of PyXRF")
except Exception:
# This exception is mostly likely to happen if there is no internet connection or
# nsls2forge is unreachable. Then ignore the procedure, assume that the current
# version is the latest.
logger.warning("Failed to check availability of the latest version of PyXRF "
"in the 'nsls2forge' conda channel.")
pass
return pyxrf_version_str, pyxrf_latest_version_str
def window_title_clear(self):
self.window_title = self.window_title_base
def window_title_set_file_name(self, file_name):
self.window_title = f"{self.window_title_base} - File: {file_name}"
def window_title_set_run_id(self, run_id):
self.window_title = f"{self.window_title_base} - Scan ID: {run_id}"
def is_databroker_available(self):
"""
Check if Databroker is configured and data can be loaded from the database.
Returns
-------
bool
True - Databroker is available, False otherwise.
"""
return db is not None
def _metadata_update_program_state(self):
"""
Update program state based on metadata:
-- enable controls (mostly ``View Metadata`` button in ``File IO`` tab
-- set incident energy if it is available
-- print logger warning if incident energy is not available in metadata
(or if metadata does not exist)
"""
self.scan_metadata_available = False
self.incident_energy_available = False
if self.scan_metadata is not None:
self.scan_metadata_available = self.scan_metadata.is_metadata_available()
self.incident_energy_available = self.scan_metadata.is_mono_incident_energy_available()
if self.incident_energy_available:
# Fetch incident energy from metadata if it exists
self.incident_energy_set = self.scan_metadata.get_mono_incident_energy()
logger.info(f"Incident energy {self.incident_energy_set} keV was extracted from the scan metadata")
else:
logger.warning(
"Incident energy is not available in scan metadata and needs to be set manually:\n"
" Click 'Find Elements Automatically' button in 'Fit' "
"tab to access the settings dialog box.")
def clear(self):
"""
Clear all existing data. The function should be called before loading new data (file or run)
"""
self.runid = -1
self.file_opt = -1
self.selected_file_name = ""
self.file_path = ""
# We don't clear the following data arrays for now. The commented code is left
# mostly for future reference.
# self.data = np.ndarray([])
# self.data_total_count = np.ndarray([])
# self.data_all = {}
self.mask_data = {}
self.mask_name = "" # Displayed name of the mask (I'm not sure it is used, but let's keep it for now)
self.mask_file_path = "" # Full path to file with mask data
self.mask_active = False
# Spatial ROI selection
self.roi_selection_active = False # Active/inactive
self.roi_row_start = -1 # Selected values matter only when ROI selection is active
self.roi_col_start = -1
self.roi_row_end = -1
self.roi_col_end = -1
self.img_dict = {}
self.img_dict_keys = []
self.img_dict_default_selected_item = 0
self.data_sets = OrderedDict()
self.scan_metadata = ScanMetadataXRF()
self._metadata_update_program_state()
@observe(str('file_name'))
def load_data_from_file(self, change):
"""This function loads data file for GUI. It also generates preview data for default channel #0."""
if change['value'] == 'temp':
# 'temp' is used to reload the same file
return
if self.file_name_silent_change:
self.file_name_silent_change = False
logger.info(f"File name is silently changed. New file name is '{change['value']}'")
return
self.file_channel_list = []
logger.info('File is loaded: %s' % (self.file_name))
# Clear data. If reading the file fails, then old data should not be kept.
self.clear()
# focus on single file only
img_dict, self.data_sets, self.scan_metadata = \
file_handler(self.working_directory,
self.file_name,
load_each_channel=self.load_each_channel)
self.img_dict = img_dict
self.update_img_dict()
# Replace relative scan ID with true scan ID.
# Full path to the data file
self.file_path = os.path.join(self.working_directory, self.file_name)
# Process metadata
self._metadata_update_program_state()
if self.scan_metadata_available:
if "scan_id" in self.scan_metadata:
self.runid = int(self.scan_metadata["scan_id"])
if "scan_uid" in self.scan_metadata:
self.runuid = self.scan_metadata["scan_uid"]
self.data_ready = True
self.file_channel_list = list(self.data_sets.keys())
default_channel = 0 # Use summed data as default
self.file_opt = default_channel
if self.file_channel_list and self.data_sets:
self.data_sets[self.file_channel_list[default_channel]].selected_for_preview = True
self.update_data_set_buffers()
def get_dataset_map_size(self):
map_size = None
ds_name_first = ""
for ds_name, ds in self.data_sets.items():
if map_size is None:
map_size = ds.get_map_size()
ds_name_first = ds_name
else:
map_size_other = ds.get_map_size()
if map_size != map_size_other:
logger.warning(f"Map sizes don't match for datasets '{ds_name}' and '{ds_name_first}': "
f"{map_size_other} != {map_size}")
return map_size
def update_img_dict(self, img_dict_additional=None):
if img_dict_additional is None:
img_dict_additional = {}
new_keys = list(img_dict_additional.keys())
selected_key = new_keys[0] if new_keys else None
self.img_dict.update(img_dict_additional)
self.img_dict_keys = self._get_img_dict_keys()
if selected_key is None:
selected_item = 1 if self.img_dict_keys else 0
else:
selected_item = self.img_dict_keys.index(selected_key) + 1
self.select_img_dict_item(selected_item, always_update=True)
def select_img_dict_item(self, selected_item, *, always_update=False):
"""
Select the set of image maps.
Parameters
----------
selected_item : int
Selected item (set of maps) in the `self.img_dict`. Range: `0..len(self.img_dict)`.
0 - no dataset is selected.
always_update : boolean
True - update even if the item is already selected.
"""
# Select no dataset if index is out of range
selected_item = selected_item if (0 <= selected_item <= len(self.img_dict)) else 0
# Don't update the plots if the item is already selected
if always_update or (selected_item != self.img_dict_default_selected_item):
self.img_dict_default_selected_item = selected_item
self.img_dict_is_updated = False
self.img_dict_is_updated = True
def _get_img_dict_keys(self):
key_suffix = [r"scaler$", r"det\d+_roi$", r"roi$", r"det\d+_fit$", r"fit$"]
keys = [[] for _ in range(len(key_suffix) + 1)]
for k in self.img_dict.keys():
found = False
for n, suff in enumerate(key_suffix):
if re.search(suff, k):
keys[n + 1].append(k)
found = True
break
if not found:
keys[0].append(k)
keys_sorted = []
for n in reversed(range(len(keys))):
keys[n].sort()
keys_sorted += keys[n]
return keys_sorted
def get_dataset_preview_count_map_range(self, *, selected_only=False):
"""
Returns the range of the Total Count Maps in the loaded datasets.
Parameters
----------
selected_only: bool
True - use only datasets that are currently selected for preview,
False - use all LOADED datasets (for which the total spectrum and
total count map is computed
Returns
-------
tuple(float)
the range of values `(value_min, value_max)`. Returns `(None, None)`
if no datasets are loaded (or selected if `selected_only=True`)
"""
v_min, v_max = None, None
for ds_name, ds in self.data_sets.items():
if not selected_only or ds.selected_for_preview:
if ds.data_ready:
dset_min, dset_max = ds.get_total_count_range()
if (v_min is None) or (v_max is None):
v_min, v_max = dset_min, dset_max
elif (dset_min is not None) and (dset_max is not None):
v_min = min(v_min, dset_min)
v_max = max(v_max, dset_max)
if (v_min is not None) and (v_max is not None):
if v_min >= v_max:
v_min, v_max = v_min - 0.005, v_max + 0.005 # Some small range
return v_min, v_max
def is_xrf_maps_available(self):
"""
The method returns True if one or more set XRF maps are loaded (or computed) and
available for display.
Returns
-------
True/False - One or more sets of XRF Maps are available/not available
"""
regex_list = [".*_fit", ".*_roi"]
for key in self.img_dict.keys():
for reg in regex_list:
if re.search(reg, key):
return True
return False
def get_uid_short(self, uid=None):
uid = self.runuid if uid is None else uid
return uid.split("-")[0]
@observe(str('runid'))
def _update_fname(self, change):
self.fname_from_db = 'scan2D_'+str(self.runid)
def load_data_runid(self, run_id_uid):
"""
Load data according to runID number.
requires databroker
"""
# Clear data. If reading the file fails, then old data should not be kept.
self.file_channel_list = []
self.clear()
if db is None:
raise RuntimeError("Databroker is not installed. The scan cannot be loaded.")
s = f"ID {run_id_uid}" if isinstance(run_id_uid, int) \
else f"UID '{run_id_uid}'"
logger.info(f"Loading scan with {s}")
rv = render_data_to_gui(run_id_uid,
create_each_det=self.load_each_channel,
working_directory=self.working_directory,
file_overwrite_existing=self.file_overwrite_existing)
if rv is None:
logger.error(f"Data from scan #{self.runid} was not loaded")
return
img_dict, self.data_sets, fname, detector_name, self.scan_metadata = rv
# Process metadata
self._metadata_update_program_state()
# Replace relative scan ID with true scan ID.
if "scan_id" in self.scan_metadata:
self.runid = int(self.scan_metadata["scan_id"])
if "scan_uid" in self.scan_metadata:
self.runuid = self.scan_metadata["scan_uid"]
# Change file name without rereading the file
self.file_name_silent_change = True
self.file_name = os.path.basename(fname)
logger.info(f"Data loading: complete dataset for the detector "
f"'{detector_name}' was loaded successfully.")
self.file_channel_list = list(self.data_sets.keys())
self.img_dict = img_dict
self.data_ready = True
default_channel = 0 # Use summed data as default
self.file_opt = default_channel
if self.file_channel_list and self.data_sets:
self.data_sets[self.file_channel_list[default_channel]].selected_for_preview = True
self.update_data_set_buffers()
try:
self.selected_file_name = self.file_channel_list[self.file_opt]
except IndexError:
pass
# passed to fitting part for single pixel fitting
self.data_all = self.data_sets[self.selected_file_name].raw_data
# get summed data or based on mask
self.data, self.data_total_count = \
self.data_sets[self.selected_file_name].get_total_spectrum_and_count()
def update_data_set_buffers(self):
""" Update buffers in all datasets """
for dset_name, dset in self.data_sets.items():
# Update only the data that are needed
if dset.selected_for_preview or dset_name == self.selected_file_name:
dset.update_buffers()
@observe(str('file_opt'))
def choose_file(self, change):
if not self.data_ready:
return
if self.file_opt < 0 or self.file_opt >= len(self.file_channel_list):
self.file_opt = 0
# selected file name from all channels
# controlled at top level gui.py startup
try:
self.selected_file_name = self.file_channel_list[self.file_opt]
except IndexError:
pass
# passed to fitting part for single pixel fitting
self.data_all = self.data_sets[self.selected_file_name].raw_data
# get summed data or based on mask
self.data, self.data_total_count = \
self.data_sets[self.selected_file_name].get_total_spectrum_and_count()
def get_selected_detector_channel(self):
r"""
Returns selected channel name. Expected values are ``sum``, ``det1``, ``det2``, etc.
If no channel is selected or it is impossible to determine the channel name, then
the return value is ``None`` (this is not a normal outcome).
"""
det_channel = None
if self.selected_file_name:
try:
# The channel is supposed to be the last component of the 'selected_file_name'
det_channel = self.selected_file_name.split("_")[-1]
except Exception:
pass
return det_channel
def apply_mask_to_datasets(self):
"""Set mask and ROI selection for datasets."""
if self.mask_active:
# Load mask data
if len(self.mask_file_path) > 0:
ext = os.path.splitext(self.mask_file_path)[-1].lower()
msg = ""
try:
if '.npy' == ext:
self.mask_data = np.load(self.mask_file_path)
elif '.txt' == ext:
self.mask_data = np.loadtxt(self.mask_file_path)
else:
self.mask_data = np.array(Image.open(self.mask_file_path))
for k in self.data_sets.keys():
self.data_sets[k].set_mask(mask=self.mask_data, mask_active=self.mask_active)
# TODO: remove the following code if not needed
# I see no reason of adding the mask image to every processed dataset
# for k in self.img_dict.keys():
# if 'fit' in k:
# self.img_dict[k]["mask"] = self.mask_data
except IOError as ex:
msg = f"Mask file '{self.mask_file_path}' cannot be loaded: {str(ex)}."
except Exception as ex:
msg = f"Mask from file '{self.mask_file_path}' cannot be set: {str(ex)}."
if msg:
logger.error(msg)
self.mask_data = None
self.mask_active = False # Deactivate the mask
# Now raise the exception so that proper error processing can be performed
raise RuntimeError(msg)
logger.debug(f"Mask was successfully loaded from file '{self.mask_file_path}'")
else:
# We keep the file name, but there is no need to keep the data, which is loaded from
# file each time the mask is loaded. Mask is relatively small and the file can change
# between the function calls, so it's better to load new data each time.
self.mask_data = None
# Now clear the mask in each dataset
for k in self.data_sets.keys():
self.data_sets[k].set_mask(mask=self.mask_data, mask_active=self.mask_active)
# TODO: remove the following code if not needed
# There is also no reason to remove the mask image if it was not added
# for k in self.img_dict.keys():
# if 'fit' in k:
# self.img_dict[k]["mask"] = self.mask_data
logger.debug("Setting spatial ROI ...")
logger.debug(f" ROI selection is active: {self.roi_selection_active}")
logger.debug(f" Starting position: ({self.roi_row_start}, {self.roi_col_start})")
logger.debug(f" Ending position (not included): ({self.roi_row_end}, {self.roi_col_end})")
try:
for k in self.data_sets.keys():
self.data_sets[k].set_selection(pt_start=(self.roi_row_start, self.roi_col_start),
pt_end=(self.roi_row_end, self.roi_col_end),
selection_active=self.roi_selection_active)
except Exception as ex:
msg = f"Spatial ROI selection can not be set: {str(ex)}\n"
logger.error(msg)
raise RuntimeError(ex)
# TODO: it may be more logical to pass the data somewhere else. We leave it here for now.
# Select raw data to for single pixel fitting.
self.data_all = self.data_sets[self.selected_file_name].raw_data
# Create Dask client to speed up processing of multiple datasets
client = dask_client_create()
# Run computations with the new selection and mask
# ... for the dataset selected for processing
self.data, self.data_total_count = \
self.data_sets[self.selected_file_name].get_total_spectrum_and_count(client=client)
# ... for all datasets selected for preview except the one selected for processing.
for key in self.data_sets.keys():
if (key != self.selected_file_name) and self.data_sets[key].selected_for_preview:
self.data_sets[key].update_buffers(client=client)
client.close()
plot_as = ['Sum', 'Point', 'Roi']
class DataSelection(Atom):
"""
The class used for management of raw data. Function
`get_sum` is used to compute the total spectrum (sum of spectra for all pixels)
and total count (sum of counts over all energy bins for each pixel).
Selection of pixels may be set by selecting an area (limited by `point1` and
`point2` or the mask `mask`. Selection of area also applied to the mask if
both are set.
There are some unresolved questions about logic:
- what is exactly the role of `self.io_model.data`? It is definitely used when
the data is plotted, but is it ever bypassed by directly calling `get_sum`?
If the data is always accessed using `self.io_model.data`, then storing the averaged
spectrum in cache is redundant, but since the array is small, it's not
much overhead to keep another copy just in case.
- there is no obvious way to set `point1` and `point2`
(it seems like point selection doesn't work in PyXRF). May be at some point
this needs to be fixed.
Anyway, the logic is not defined to the level when it makes sense to
write tests for this class. There are tests for underlying computational
functions though.
Attributes
----------
filename : str
plot_choice : enum
methods ot plot
point1 : str
starting position
point2 : str
ending position
roi : list
raw_data : array
experiment 3D data
data : array
selected_for_preview : int
plot data or not, sum or roi or point
"""
filename = Str()
plot_choice = Enum(*plot_as)
# point1 = Str('0, 0')
# point2 = Str('0, 0')
selection_active = Bool(False)
sel_pt_start = List()
sel_pt_end = List() # Not included
mask_active = Bool(False)
mask = Typed(np.ndarray)
# 'raw_data' may be numpy array, dask array or core.map_processing.RawHDF5Dataset
# Processing functions are expected to support all those types
raw_data = Typed(object)
selected_for_preview = Bool(False) # Dataset is currently selected for preview
data_ready = Bool(False) # Total spectrum and total count map are computed
fit_name = Str()
fit_data = Typed(np.ndarray)
_cached_spectrum = Dict()
def get_total_spectrum(self, *, client=None):
total_spectrum, _ = self._get_sum(client=client)
return total_spectrum.copy()
def get_total_count(self, *, client=None):
_, total_count = self._get_sum(client=client)
return total_count.copy()
def get_total_spectrum_and_count(self, *, client=None):
total_spectrum, total_count = self._get_sum(client=client)
return total_spectrum.copy(), total_count.copy()
def update_buffers(self, *, client=None):
logger.debug(f"Dataset '{self.filename}': updating cached buffers.")
self._get_sum(client=client)
def get_total_count_range(self):
"""
Get the range of values of total count map
Returns
-------
tuple(float)
(value_min, value_max)
"""
total_count = self.get_total_count()
return total_count.min(), total_count.max()
@observe(str('selected_for_preview'))
def _update_roi(self, change):
if self.selected_for_preview:
self.update_buffers()
def set_selection(self, *, pt_start, pt_end, selection_active):
"""
Set spatial ROI selection
Parameters
----------
pt_start: tuple(int) or list(int)
Starting point of the selection `(row_start, col_start)`, where
`row_start` is in the range `0..n_rows-1` and `col_start` is
in the range `0..n_cols-1`.
pt_end: tuple(int) or list(int)
End point of the selection, which is not included in the selection:
`(row_end, col_end)`, where `row_end` is in the range `1..n_rows` and
`col_end` is in the range `1..n_cols`.
selection_active: bool
`True` - selection is active, `False` - selection is not active.
The selection points must be set before the selection is set active,
otherwise `ValueError` is raised
Raises
------
ValueError is raised if selection is active, but the points are not set.
"""
if selection_active and (pt_start is None or pt_end is None):
raise ValueError("Selection is active, but at least one of the points is not set")
def _check_pt(pt):
if pt is not None:
if not isinstance(pt, Iterable):
raise ValueError(f"The point value is not iterable: {pt}.")
if len(list(pt)) != 2:
raise ValueError(f"The point ({pt}) must be represented by iterable of length 2.")
_check_pt(pt_start)
_check_pt(pt_end)
pt_start = list(pt_start)
pt_end = list(pt_end)
def _reset_pt(pt, value, pt_range):
"""
Verify if pt is in the range `(pt_range[0], pt_range[1])` including `pt_range[1]`.
Clip the value to be in the range. If `pt` is negative (assume it is not set), then
set it to `value`.
"""
pt = int(np.clip(pt, a_min=pt_range[0], a_max=pt_range[1])
if pt >= 0 else value)
return pt
map_size = self.get_map_size()
pt_start[0] = _reset_pt(pt_start[0], 0, (0, map_size[0] - 1))
pt_start[1] = _reset_pt(pt_start[1], 0, (0, map_size[1] - 1))
pt_end[0] = _reset_pt(pt_end[0], map_size[0], (1, map_size[0]))
pt_end[1] = _reset_pt(pt_end[1], map_size[1], (1, map_size[1]))
if pt_start[0] > pt_end[0] or pt_start[1] > pt_end[1]:
msg = f"({pt_start[0]}, {pt_start[1]}) .. ({pt_end[0]}, {pt_end[1]})"
raise ValueError(f"Selected spatial ROI does not include any points: {msg}")
self.sel_pt_start = list(pt_start) if pt_start is not None else None
self.sel_pt_end = list(pt_end) if pt_end is not None else None
self.selection_active = selection_active
def set_mask(self, *, mask, mask_active):
"""
Set mask by supplying np array. The size of the array must match the size of the map.
Clear the mask by supplying `None`.
Parameter
---------
mask: ndarray or None
Array that contains the mask data or None to clear the mask
mask_active: bool
`True` - apply the mask, `False` - don't apply the mask
The mask must be set if `mask_active` is set `True`, otherwise `ValueError` is raised.
"""
if mask_active and mask is None:
raise ValueError("Mask is set active, but no mask is set.")
if (mask is not None) and not isinstance(mask, np.ndarray):
raise ValueError(f"Mask must be a Numpy array or None: type(mask) = {type(mask)}")
if mask is None:
self.mask = None
else:
m_size = self.get_map_size()
if any(mask.shape != m_size):
raise ValueError(f"The mask shape({mask.shape}) is not equal to the map size ({m_size})")
self.mask = np.array(mask) # Create a copy
def get_map_size(self):
"""
Returns map size as a tuple `(n_rows, n_columns)`. The function is returning
the dimensions 0 and 1 of the raw data without loading the data.
"""
return self.raw_data.shape[0], self.raw_data.shape[1]
def get_raw_data_shape(self):
"""
Returns the shape of raw data: `(n_rows, n_columns, n_energy_bins)`.
"""
return self.raw_data.shape
def _get_sum(self, *, client=None):
# Only the values of 'mask', 'pos1' and 'pos2' will be cached
mask = self.mask if self.mask_active else None
pt_start = self.sel_pt_start if self.selection_active else None
pt_end = self.sel_pt_end if self.selection_active else None
def _compare_cached_settings(cache, pt_start, pt_end, mask):
if not cache:
return False
# Verify that all necessary keys are in the dictionary
if not all([_ in cache.keys()
for _ in ("pt_start", "pt_end", "mask", "spec")]):
return False
if (cache["pt_start"] != pt_start) or (cache["pt_end"] != pt_end):
return False
mask_none = [_ is None for _ in (mask, cache["mask"])]
if all(mask_none): # Mask is not applied in both cases
return True
elif any(mask_none): # Mask is applied only in one cases
return False
# Mask is applied in both cases, so compare the masks
if not (cache["mask"] == mask).all():
return False
return True
cache_valid = _compare_cached_settings(self._cached_spectrum,
pt_start=pt_start, pt_end=pt_end,
mask=mask)
if cache_valid:
# We create copy to make sure that cache remains intact
logger.debug(f"Dataset '{self.filename}': using cached copy of the averaged spectrum ...")
# The following are references to cached objects. Care should be taken not to modify them.
spec = self._cached_spectrum["spec"]
count = self._cached_spectrum["count"]
else:
logger.debug(f"Dataset '{self.filename}': computing the total spectrum and total count map "
"from raw data ...")
SC = SpectrumCalculator(pt_start=pt_start, pt_end=pt_end, mask=mask)
spec, count = SC.get_spectrum(self.raw_data, client=client)
# Save cache the computed spectrum (with all settings)
self._cached_spectrum["pt_start"] = pt_start.copy() if pt_start is not None else None
self._cached_spectrum["pt_end"] = pt_end.copy() if pt_end is not None else None
self._cached_spectrum["mask"] = mask.copy() if mask is not None else None
self._cached_spectrum["spec"] = spec.copy()
self._cached_spectrum["count"] = count.copy()
self.data_ready = True
# Return the 'sum' spectrum as regular 64-bit float (raw data is in 'np.float32')
return spec.astype(np.float64, copy=False), count.astype(np.float64, copy=False)
class SpectrumCalculator(object):
"""
Calculate summed spectrum according to starting and ending positions.
"""
def __init__(self, *, pt_start=None, pt_end=None, mask=None):
"""
Initialize the class. The spatial ROI selection and the mask are applied
to the data.
Parameters
----------
pt_start: iterable(int) or None
indexes of the beginning of the selection: `(row_start, col_start)`.
`row_start` is in the range `0..n_rows-1`,
`col_start` is in the range `0..n_cols-1`.
The point `col_start` is not included in the selection
pt_end: iterable(int) or None
indexes of the beginning of the selection: `(row_end, col_end)`.
`row_end` is in the range `1..n_rows`,
`col_end` is in the range `1..n_cols`.
The point `col_end` is not included in the selection.
If `pt_end` is None, then `pt_start` MUST be None.
mask: ndarray(float) or None
the mask that is applied to the data, shape (n_rows, n_cols)
"""
def _validate_point(v):
v_out = None
if v is not None:
if isinstance(v, Iterable) and len(list(v)) == 2:
v_out = list(v)
else:
logger.warning("SpectrumCalculator.__init__(): Spatial ROI selection "
f"point '{v}' is invalid. Using 'None' instead.")
return v_out
self._pt_start = _validate_point(pt_start)
self._pt_end = _validate_point(pt_end)
# Validate 'mask'
if mask is not None:
if not isinstance(mask, np.ndarray):
logger.warning(f"SpectrumCalculator.__init__(): type of parameter 'mask' must by np.ndarray, "
f"type(mask) = {type(mask)}. Using mask=None instead.")
mask = None
elif mask.ndim != 2:
logger.warning(f"SpectrumCalculator.__init__(): the number of dimensions "
"in ndarray 'mask' must be 2, "
f"mask.ndim = {mask.ndim}. Using mask=None instead.")
mask = None
self.mask = mask
def get_spectrum(self, data, *, client=None):
"""
Run computation of the total spectrum and total count. Use the selected
spatial ROI and/or mask
Parameters
----------
data: ndarray(float)
raw data array, shape (n_rows, n_cols, n_energy_bins)
client: dask.distributed.Client or None
Dask client. If None, then local client will be created
"""
selection = None
if self._pt_start:
if self._pt_end:
# Region is selected
selection = (self._pt_start[0],
self._pt_start[1],
self._pt_end[0] - self._pt_start[0],
self._pt_end[1] - self._pt_start[1])
else:
# Only a single point is selected
selection = (self._pt_start[0], self._pt_start[1], 1, 1)
progress_bar = TerminalProgressBar("Computing total spectrum: ")
total_spectrum, total_count = compute_total_spectrum_and_count(
data, selection=selection, mask=self.mask,
chunk_pixels=5000, n_chunks_min=4,
progress_bar=progress_bar, client=client)
return total_spectrum, total_count
def file_handler(working_directory, file_name, load_each_channel=True, spectrum_cut=3000):
# send information on GUI level later !
get_data_nsls2 = True
try:
if get_data_nsls2 is True:
return read_hdf_APS(working_directory, file_name,
spectrum_cut=spectrum_cut,
load_each_channel=load_each_channel)
else:
return read_MAPS(working_directory,
file_name, channel_num=1)
except IOError as e:
logger.error("I/O error({0}): {1}".format(e.errno, str(e)))
logger.error('Please select .h5 file')
except Exception:
logger.error("Unexpected error:", sys.exc_info()[0])
raise
def read_xspress3_data(file_path):
"""
Data IO for xspress3 format.
Parameters
----------
working_directory : str
path folder
file_name : str
Returns
-------
data_output : dict
with data from each channel
"""
data_output = {}
# file_path = os.path.join(working_directory, file_name)
with h5py.File(file_path, 'r') as f:
data = f['entry/instrument']
# data from channel summed
exp_data = np.asarray(data['detector/data'])
xval = np.asarray(data['NDAttributes/NpointX'])
yval = np.asarray(data['NDAttributes/NpointY'])
# data size is (ysize, xsize, num of frame, num of channel, energy channel)
exp_data = np.sum(exp_data, axis=2)
num_channel = exp_data.shape[2]
# data from each channel
for i in range(num_channel):
channel_name = 'channel_'+str(i+1)
data_output.update({channel_name: exp_data[:, :, i, :]})
# change x,y to 2D array
xval = xval.reshape(exp_data.shape[0:2])
yval = yval.reshape(exp_data.shape[0:2])
data_output.update({'x_pos': xval})
data_output.update({'y_pos': yval})
return data_output
def output_data(dataset_dict=None, output_dir=None,
file_format='tiff', scaler_name=None, use_average=False,
dataset_name=None, quant_norm=False,
param_quant_analysis=None,
positions_dict=None,
interpolate_to_uniform_grid=False,
scaler_name_list=None):
"""
Read data from h5 file and transfer them into txt.
Parameters
----------
dataset_dict : dict(ndarray)
Dictionary of XRF maps contained in the selected dataset. Each XRF map is saved in
the individual file. File name consists of the detector channel name (e.g. 'detsum', 'det1' etc.)
and map name (dictionary key). Optional list of scaler names 'scaler_name_list' may be passed
to the function. If map name is contained in the scaler name list, then the detector channel
name is not attached to the file name.
output_dir : str
which folder to save those txt file
file_format : str, optional
tiff or txt
scaler_name : str, optional
if given, normalization will be performed.
use_average : Bool, optional
when normalization, multiply by the mean value of scaler,
i.e., norm_data = data/scaler * np.mean(scaler)
dataset_name : str
the name of the selected datset (in Element Map tab)
should end with the suffix '_fit' (for sum of all channels), '_det1_fit" etc.
quant_norm : bool
True - quantitative normalization is enabled, False - disabled
param_quant_analysis : ParamQuantitativeAnalysis
reference to class, which contains parameters for quantitative normalization
interpolate_to_uniform_grid : bool
interpolate the result to uniform grid before saving to tiff and txt files
The grid dimensions match the dimensions of positional data for X and Y axes.
The range of axes is chosen to fit the values of X and Y.
scaler_name_list : list(str)
The list of names of scalers that may exist in the dataset 'dataset_dict'
"""
if not dataset_name:
raise RuntimeError("Dataset is not selected. Data can not be saved.")
if dataset_dict is None:
dataset_dict = {}
if positions_dict is None:
positions_dict = {}
# Extract the detector channel name from dataset name
# Dataset name ends with '_fit' for the sum of all channels
# and '_detX_fit' for detector X (X is 1, 2, 3 ..)
# The extracted detector channel name should be 'detsum', 'det1', 'det2' etc.
dset = None
if re.search(r"_det\d+_fit$", dataset_name):
dset = re.search(r"_det\d_", dataset_name)[0]
dset = dset.strip('_')
elif re.search(r"_fit$", dataset_name):
dset = "detsum"
elif re.search(r"_det\d+_fit$", dataset_name):
dset = re.search(r"_det\d_", dataset_name)[0]
dset = dset.strip('_')
dset += "_roi"
elif re.search(r"_roi$", dataset_name):
dset = "detsum_roi"
elif re.search(r"_scaler$", dataset_name):
dset = "scaler"
else:
dset = dataset_name
file_format = file_format.lower()
fit_output = {}
for k, v in dataset_dict.items():
fit_output[k] = v
for k, v in positions_dict.items():
fit_output[k] = v
logger.info(f"Saving data as {file_format.upper()} files. Directory '{output_dir}'")
if scaler_name:
logger.info(f"Data is NORMALIZED before saving. Scaler: '{scaler_name}'")
if(interpolate_to_uniform_grid):
if ("x_pos" in fit_output) and ("y_pos" in fit_output):
logger.info("Data is INTERPOLATED to uniform grid.")
for k, v in fit_output.items():
# Do not interpolation positions
if 'pos' in k:
continue
fit_output[k], xx, yy = grid_interpolate(v, fit_output["x_pos"], fit_output["y_pos"])
fit_output["x_pos"] = xx
fit_output["y_pos"] = yy
else:
logger.error("Positional data 'x_pos' and 'y_pos' is not found in the dataset.\n"
"Iterpolation to uniform grid can not be performed. "
"Data is saved without interpolation.")
output_data_to_tiff(fit_output, output_dir=output_dir,
file_format=file_format, name_prefix_detector=dset, name_append="",
scaler_name=scaler_name, quant_norm=quant_norm,
param_quant_analysis=param_quant_analysis,
use_average=use_average,
scaler_name_list=scaler_name_list)
def output_data_to_tiff(fit_output,
output_dir=None,
file_format='tiff', name_prefix_detector=None, name_append=None,
scaler_name=None, scaler_name_list=None,
quant_norm=False, param_quant_analysis=None,
use_average=False):
"""
Read data in memory and save them into tiff to txt.
Parameters
----------
fit_output:
dict of fitting data and scaler data
output_dir : str, optional
which folder to save those txt file
file_format : str, optional
tiff or txt
name_prefix_detector : str
prefix appended to file name except for the files that contain positional data and scalers
name_append: str, optional
more information saved to output file name
scaler_name : str, optional
if given, normalization will be performed.
scaler_name_list : list(str)
The list of names of scalers that may exist in the dataset 'dataset_dict'
quant_norm : bool
True - apply quantitative normalization, False - use normalization by scaler
param_quant_analysis : ParamQuantitativeAnalysis
reference to class, which contains parameters for quantitative normalization,
if None, then quantitative normalization will be skipped
use_average : Bool, optional
when normalization, multiply by the mean value of scaler,
i.e., norm_data = data/scaler * np.mean(scaler)
"""
if output_dir is None:
raise ValueError("Output directory is not specified.")
if name_append:
name_append = f"_{name_append}"
else:
# If 'name_append' is None, set it to "" so that it could be safely appended to a string
name_append = ""
file_format = file_format.lower()
allowed_formats = ('txt', 'tiff')
if file_format not in allowed_formats:
raise RuntimeError(f"The specified format '{file_format}' not in {allowed_formats}")
# Create the output directory if it does not exist
os.makedirs(output_dir, exist_ok=True)
def _save_data(data, *, output_dir, file_name,
name_prefix_detector, name_append,
file_format, scaler_name_list):
# The 'file_format' is specified as file extension
file_extension = file_format.lower()
# If data is scalar or position, then don't attach the prefix
fname = f"{name_prefix_detector}_{file_name}" \
if (file_name not in scaler_name_list) and ("pos" not in file_name) \
else file_name
fname = f"{fname}{name_append}.{file_extension}"
fname = os.path.join(output_dir, fname)
if file_format.lower() == 'tiff':
sio.imsave(fname, data.astype(np.float32))
elif file_format.lower() == 'txt':
np.savetxt(fname, data.astype(np.float32))
else:
raise ValueError(f"Function is called with invalid file format '{file_format}'.")
if quant_norm:
if param_quant_analysis:
for data_name, data in fit_output.items():
# Quantitative normalization
data_normalized, quant_norm_applied = param_quant_analysis.apply_quantitative_normalization(
data_in=data,
scaler_dict=fit_output,
scaler_name_default=None, # We don't want data to be scaled
data_name=data_name,
name_not_scalable=None) # For simplicity, all saved maps are normalized
if quant_norm_applied:
# Save data only if quantitative normalization was performed.
_save_data(data_normalized, output_dir=output_dir,
file_name=data_name,
name_prefix_detector=name_prefix_detector,
name_append=f"{name_append}_quantitative",
file_format=file_format,
scaler_name_list=scaler_name_list)
else:
logger.error("Quantitative analysis parameters are not provided. "
f"Quantitative data is not saved in {file_format.upper()} format.")
# Normalize data if scaler is provided
if scaler_name is not None:
if scaler_name in fit_output:
scaler_data = fit_output[scaler_name]
for data_name, data in fit_output.items():
if 'pos' in data_name or 'r2' in data_name:
continue
# Normalization of data
data_normalized = normalize_data_by_scaler(data, scaler_data)
if use_average is True:
data_normalized *= np.mean(scaler_data)
_save_data(data_normalized, output_dir=output_dir,
file_name=data_name,
name_prefix_detector=name_prefix_detector,
name_append=f"{name_append}_norm",
file_format=file_format,
scaler_name_list=scaler_name_list)
else:
logger.warning(f"The scaler '{scaler_name}' was not found. Data normalization "
f"was not performed for {file_format.upper()} file.")
# Always save not normalized data
for data_name, data in fit_output.items():
_save_data(data, output_dir=output_dir,
file_name=data_name,
name_prefix_detector=name_prefix_detector,
name_append=name_append,
file_format=file_format,
scaler_name_list=scaler_name_list)
def read_hdf_APS(working_directory,
file_name, spectrum_cut=3000,
# The following parameters allow fine grained control over what is loaded from the file
load_summed_data=True, # Enable loading of RAW, FIT or ROI data from 'sum' channel
load_each_channel=False, # .. RAW data from individual detector channels
load_processed_each_channel=True, # .. FIT or ROI data from the detector channels
load_raw_data=True, # For all channels: load RAW data
load_fit_results=True, # .. load FIT data
load_roi_results=True): # .. load ROI data
"""
Data IO for files similar to APS Beamline 13 data format.
This might be changed later.
Parameters
----------
working_directory : str
path folder
file_name : str
selected h5 file
spectrum_cut : int, optional
only use spectrum from, say 0, 3000
load_summed_data : bool, optional
load summed spectrum or not
load_each_channel : bool, optional
indicates whether to load raw experimental data for each detector channel or not
load_raw_data : bool
load raw experimental data
load_processed_each_channel : bool
indicates whether or not to load processed results (fit, roi) for each detector channel
load_fit_results :bool
load fitting results
load_roi_results : bool
load results of roi computation
Returns
-------
data_dict : dict
with fitting data
data_sets : dict
data from each channel and channel summed, a dict of DataSelection objects
"""
data_sets = OrderedDict()
img_dict = OrderedDict()
# Empty container for metadata
mdata = ScanMetadataXRF()
file_path = os.path.join(working_directory, file_name)
# defined in other_list in config file
try:
dict_sc = retrieve_data_from_hdf_suitcase(file_path)
except Exception:
dict_sc = {}
with h5py.File(file_path, 'r+') as f:
# Retrieve metadata if it exists
if "xrfmap/scan_metadata" in f: # Metadata is always loaded
metadata = f["xrfmap/scan_metadata"]
for key, value in metadata.attrs.items():
# Convert ndarrays to lists (they were lists before they were saved)
if isinstance(value, np.ndarray):
value = list(value)
mdata[key] = value
data = f['xrfmap']
fname = file_name.split('.')[0]
if load_summed_data and load_raw_data:
try:
# data from channel summed
# exp_data = np.array(data['detsum/counts'][:, :, 0:spectrum_cut],
# dtype=np.float32)
# exp_data = np.array(data['detsum/counts'], dtype=np.float32)
data_shape = data['detsum/counts'].shape
exp_data = RawHDF5Dataset(file_path, 'xrfmap/detsum/counts',
shape=data_shape)
logger.warning(f"We use spectrum range from 0 to {spectrum_cut}")
logger.info(f"Exp. data from h5 has shape of: {data_shape}")
fname_sum = f"{fname}_sum"
DS = DataSelection(filename=fname_sum,
raw_data=exp_data)
data_sets[fname_sum] = DS
logger.info('Data of detector sum is loaded.')
except KeyError:
print('No data is loaded for detector sum.')
if 'scalers' in data: # Scalers are always loaded if data is available
det_name = data['scalers/name']
temp = {}
for i, n in enumerate(det_name):
if not isinstance(n, str):
n = n.decode()
temp[n] = data['scalers/val'][:, :, i]
img_dict[f"{fname}_scaler"] = temp
# also dump other data from suitcase if required
if len(dict_sc) != 0:
img_dict[f"{fname}_scaler"].update(dict_sc)
if 'positions' in data: # Positions are always loaded if data is available
pos_name = data['positions/name']
temp = {}
for i, n in enumerate(pos_name):
if not isinstance(n, str):
n = n.decode()
temp[n] = data['positions/pos'][i, :]
img_dict['positions'] = temp
# TODO: rewrite the algorithm for finding the detector channels (not robust)
# find total channel:
channel_num = 0
for v in list(data.keys()):
if 'det' in v:
channel_num = channel_num+1
channel_num = channel_num-1 # do not consider det_sum
# data from each channel
if load_each_channel and load_raw_data:
for i in range(1, channel_num+1):
det_name = f"det{i}"
file_channel = f"{fname}_det{i}"
try:
# exp_data_new = np.array(data[f"{det_name}/counts"][:, :, 0:spectrum_cut],
# dtype=np.float32)
data_shape = data[f"{det_name}/counts"].shape
exp_data_new = RawHDF5Dataset(file_path, f"xrfmap/{det_name}/counts",
shape=data_shape)
DS = DataSelection(filename=file_channel,
raw_data=exp_data_new)
data_sets[file_channel] = DS
logger.info(f"Data from detector channel {i} is loaded.")
except KeyError:
print(f"No data is loaded for {det_name}.")
if load_processed_each_channel:
for i in range(1, channel_num + 1):
det_name = f"det{i}"
file_channel = f"{fname}_det{i}"
if 'xrf_fit' in data[det_name] and load_fit_results:
try:
fit_result = get_fit_data(data[det_name]['xrf_fit_name'][()],
data[det_name]['xrf_fit'][()])
img_dict.update({f"{file_channel}_fit": fit_result})
# also include scaler data
if 'scalers' in data:
img_dict[f"{file_channel}_fit"].update(img_dict[f"{fname}_scaler"])
except IndexError:
logger.info(f"No fitting data is loaded for channel {i}.")
if 'xrf_roi' in data[det_name] and load_roi_results:
try:
fit_result = get_fit_data(data[det_name]['xrf_roi_name'][()],
data[det_name]['xrf_roi'][()])
img_dict.update({f"{file_channel}_roi": fit_result})
# also include scaler data
if 'scalers' in data:
img_dict[f"{file_channel}_roi"].update(img_dict[f"{fname}_scaler"])
except IndexError:
logger.info(f"No ROI data is loaded for channel {i}.")
# read fitting results from summed data
if 'xrf_fit' in data['detsum'] and load_summed_data and load_fit_results:
try:
fit_result = get_fit_data(data['detsum']['xrf_fit_name'][()],
data['detsum']['xrf_fit'][()])
img_dict.update({f"{fname}_fit": fit_result})
if 'scalers' in data:
img_dict[f"{fname}_fit"].update(img_dict[f"{fname}_scaler"])
except (IndexError, KeyError):
logger.info('No fitting data is loaded for channel summed data.')
if 'xrf_roi' in data['detsum'] and load_summed_data and load_roi_results:
try:
fit_result = get_fit_data(data['detsum']['xrf_roi_name'][()],
data['detsum']['xrf_roi'][()])
img_dict.update({f"{fname}_roi": fit_result})
if 'scalers' in data:
img_dict[f"{fname}_roi"].update(img_dict[f"{fname}_scaler"])
except (IndexError, KeyError):
logger.info('No ROI data is loaded for summed data.')
return img_dict, data_sets, mdata
def render_data_to_gui(run_id_uid, *, create_each_det=False,
working_directory=None, file_overwrite_existing=False):
"""
Read data from databroker and save to Atom class which GUI can take.
.. note:: Requires the databroker package from NSLS2
Parameters
----------
run_id_uid : int or str
ID or UID of a run
create_each_det : bool
True: load data from all detector channels
False: load only the sum of all channels
working_directory : str
path to the directory where data files are saved
file_overwrite_existing : bool
True: overwrite data file if it exists
False: create unique file name by adding version number
"""
run_id, run_uid = fetch_run_info(run_id_uid) # May raise RuntimeError
data_sets = OrderedDict()
img_dict = OrderedDict()
# Don't create unique file name if the existing file is to be overwritten
fname_add_version = not file_overwrite_existing
# Create file name here, so that working directory may be attached to the file name
prefix = 'scan2D_'
fname = f"{prefix}{run_id}.h5"
if working_directory:
fname = os.path.join(working_directory, fname)
# It is better to use full run UID to fetch the data.
data_from_db = fetch_data_from_db(run_uid,
fpath=fname,
fname_add_version=fname_add_version,
file_overwrite_existing=file_overwrite_existing,
create_each_det=create_each_det,
# Always create data file (processing results
# are going to be saved in the file)
output_to_file=True)
if not len(data_from_db):
logger.warning(f"No detector data was found in Run #{run_id} ('{run_uid}').")
return
else:
logger.info(f"Data from {len(data_from_db)} detectors were found "
f"in Run #{run_id} ('{run_uid}').")
if len(data_from_db) > 1:
logger.warning(f"Selecting only the latest run (UID '{run_uid}') "
f"with from Run ID #{run_id}.")
# If the experiment contains data from multiple detectors (for example two separate
# Xpress3 detectors) that need to be treated separately, only the data from the
# first detector is loaded. Data from the second detector is saved to file and
# can be loaded from the file. Currently this is a very rare case (only one set
# of such experiments from SRX beamline exists).
data_out = data_from_db[0]['dataset']
fname = data_from_db[0]['file_name']
detector_name = data_from_db[0]['detector_name']
scan_metadata = data_from_db[0]['metadata']
# Create file name for the 'sum' dataset ('file names' are used as dictionary
# keys in data storage containers, as channel labels in plot legends,
# and as channel names in data selection widgets.
# Since there is currently no consistent metadata in the start documents
# and/or data files, let's leave original labeling conventions for now.
fname_no_ext = os.path.splitext(os.path.basename(fname))[0]
fname_sum = fname_no_ext + '_sum'
# Determine the number of available detector channels and create the list
# of channel names. The channels are named as 'det1', 'det2', 'det3' etc.
xrf_det_list = [nm for nm in data_out.keys() if 'det' in nm and 'sum' not in nm]
# Replace the references to raw data by the references to HDF5 datasets.
# This should also release memory used for storage of raw data
# It is expected that 'data_out' has keys 'det_sum', 'det1', 'det2', etc.
interpath = "xrfmap"
dset = "counts"
# Data from individual detectors may or may not be present in the file
for det_name in xrf_det_list:
dset_name = f"{interpath}/{det_name}/{dset}"
with h5py.File(fname, "r") as f:
dset_shape = f[dset_name].shape
data_out[det_name] = RawHDF5Dataset(fname, dset_name, dset_shape)
# The file is always expected to have 'detsum' dataset
dset_name = f"{interpath}/detsum/{dset}"
with h5py.File(fname, "r") as f:
dset_shape = f[dset_name].shape
data_out["det_sum"] = RawHDF5Dataset(fname, dset_name, dset_shape)
# Now fill 'data_sets' dictionary
DS = DataSelection(filename=fname_sum,
raw_data=data_out["det_sum"])
data_sets[fname_sum] = DS
logger.info("Data loading: channel sum is loaded successfully.")
for det_name in xrf_det_list:
exp_data = data_out[det_name]
fln = f"{fname_no_ext}_{det_name}"
DS = DataSelection(filename=fln,
raw_data=exp_data)
data_sets[fln] = DS
logger.info("Data loading: channel data is loaded successfully.")
if ('pos_data' in data_out) and ('pos_names' in data_out):
if 'x_pos' in data_out['pos_names'] and 'y_pos' in data_out['pos_names']:
p_dict = {}
for v in ['x_pos', 'y_pos']:
ind = data_out['pos_names'].index(v)
p_dict[v] = data_out['pos_data'][ind, :, :]
img_dict['positions'] = p_dict
logger.info("Data loading: positions data are loaded successfully.")
scaler_tmp = {}
for i, v in enumerate(data_out['scaler_names']):
scaler_tmp[v] = data_out['scaler_data'][:, :, i]
img_dict[fname_no_ext+'_scaler'] = scaler_tmp
logger.info("Data loading: scaler data are loaded successfully.")
return img_dict, data_sets, fname, detector_name, scan_metadata
def retrieve_data_from_hdf_suitcase(fpath):
"""
Retrieve data from suitcase part in hdf file.
Data name is defined in config file.
"""
data_dict = {}
with h5py.File(fpath, 'r+') as f:
other_data_list = [v for v in f.keys() if v != 'xrfmap']
if len(other_data_list) > 0:
f_hdr = f[other_data_list[0]].attrs['start']
if not isinstance(f_hdr, str):
f_hdr = f_hdr.decode('utf-8')
start_doc = ast.literal_eval(f_hdr)
other_data = f[other_data_list[0]+'/primary/data']
if start_doc['beamline_id'] == 'HXN':
current_dir = os.path.dirname(os.path.realpath(__file__))
config_file = 'hxn_pv_config.json'
config_path = sep_v.join(current_dir.split(sep_v)[:-2]+['configs', config_file])
with open(config_path, 'r') as json_data:
config_data = json.load(json_data)
extra_list = config_data['other_list']
fly_type = start_doc.get('fly_type', None)
subscan_dims = start_doc.get('subscan_dims', None)
if 'dimensions' in start_doc:
datashape = start_doc['dimensions']
elif 'shape' in start_doc:
datashape = start_doc['shape']
else:
logger.error('No dimension/shape is defined in hdr.start.')
datashape = [datashape[1], datashape[0]] # vertical first, then horizontal
for k in extra_list:
# k = k.encode('utf-8')
if k not in other_data.keys():
continue
_v = np.array(other_data[k])
v = _v.reshape(datashape)
if fly_type in ('pyramid',):
# flip position the same as data flip on det counts
v = flip_data(v, subscan_dims=subscan_dims)
data_dict[k] = v
return data_dict
def read_MAPS(working_directory,
file_name, channel_num=1):
# data_dict = OrderedDict()
data_sets = OrderedDict()
img_dict = OrderedDict()
# Empty container for metadata
mdata = ScanMetadataXRF()
# cut off bad point on the last position of the spectrum
# bad_point_cut = 0
fit_val = None
fit_v_pyxrf = None
file_path = os.path.join(working_directory, file_name)
print('file path is {}'.format(file_path))
with h5py.File(file_path, 'r+') as f:
data = f['MAPS']
fname = file_name.split('.')[0]
# for 2D MAP
# data_dict[fname] = data
# raw data
exp_data = data['mca_arr'][:]
# data from channel summed
roi_channel = data['channel_names'][()]
roi_val = data['XRF_roi'][:]
scaler_names = data['scaler_names'][()]
scaler_val = data['scalers'][:]
try:
# data from fit
fit_val = data['XRF_fits'][:]
except KeyError:
logger.info('No fitting from MAPS can be loaded.')
try:
fit_data = f['xrfmap/detsum']
fit_v_pyxrf = fit_data['xrf_fit'][:]
fit_n_pyxrf = fit_data['xrf_fit_name'][()]
print(fit_n_pyxrf)
except KeyError:
logger.info('No fitting from pyxrf can be loaded.')
# exp_shape = exp_data.shape
exp_data = exp_data.T
exp_data = np.rot90(exp_data, 1)
logger.info('File : {} with total counts {}'.format(fname,
np.sum(exp_data)))
DS = DataSelection(filename=fname,
raw_data=exp_data)
data_sets.update({fname: DS})
# save roi and fit into dict
temp_roi = {}
temp_fit = {}
temp_scaler = {}
temp_pos = {}
for i, name in enumerate(roi_channel):
temp_roi[name] = np.flipud(roi_val[i, :, :])
img_dict[fname+'_roi'] = temp_roi
if fit_val is not None:
for i, name in enumerate(roi_channel):
temp_fit[name] = fit_val[i, :, :]
img_dict[fname+'_fit_MAPS'] = temp_fit
cut_bad_col = 1
if fit_v_pyxrf is not None:
for i, name in enumerate(fit_n_pyxrf):
temp_fit[name] = fit_v_pyxrf[i, :, cut_bad_col:]
img_dict[fname+'_fit'] = temp_fit
for i, name in enumerate(scaler_names):
if name == 'x_coord':
temp_pos['x_pos'] = np.flipud(scaler_val[i, :, :])
elif name == 'y_coord':
temp_pos['y_pos'] = np.flipud(scaler_val[i, :, :])
else:
temp_scaler[name] = np.flipud(scaler_val[i, :, :])
img_dict[fname+'_scaler'] = temp_scaler
img_dict['positions'] = temp_pos
# read fitting results
# if 'xrf_fit' in data[detID]:
# fit_result = get_fit_data(data[detID]['xrf_fit_name'][()],
# data[detID]['xrf_fit'][()])
# img_dict.update({fname+'_fit': fit_result})
return img_dict, data_sets, mdata
def get_fit_data(namelist, data):
"""
Read fit data from h5 file. This is to be moved to filestore part.
Parameters
---------
namelist : list
list of str for element lines
data : array
3D array of fitting results
"""
data_temp = dict()
for i, v in enumerate(namelist):
if not isinstance(v, str):
v = v.decode()
data_temp.update({v: data[i, :, :]})
return data_temp
def read_hdf_to_stitch(working_directory, filelist,
shape, ignore_file=None):
"""
Read fitted results from each hdf file, and stitch them together.
Parameters
----------
working_directory : str
folder with all the h5 files and also the place to save output
filelist : list of str
names for all the h5 files
shape : list or tuple
shape defines how to stitch all the h5 files. [veritcal, horizontal]
ignore_file : list of str
to be implemented
Returns
-------
dict :
combined results from each h5 file
"""
out = {}
# shape_v = {}
horizontal_v = 0
vertical_v = 0
h_index = np.zeros(shape)
v_index = np.zeros(shape)
for i, file_name in enumerate(filelist):
img, _ = read_hdf_APS(working_directory, file_name,
load_summed_data=False, load_each_channel=False)
tmp_shape = img['positions']['x_pos'].shape
m = i // shape[1]
n = i % shape[1]
if n == 0:
h_step = 0
h_index[m][n] = h_step
v_index[m][n] = m * tmp_shape[0]
h_step += tmp_shape[1]
if i < shape[1]:
horizontal_v += tmp_shape[1]
if i % shape[1] == 0:
vertical_v += tmp_shape[0]
if i == 0:
out = copy.deepcopy(img)
data_tmp = np.zeros([vertical_v, horizontal_v])
for k, v in out.items():
for m, n in v.items():
v[m] = np.array(data_tmp)
for i, file_name in enumerate(filelist):
img, _ = read_hdf_APS(working_directory, file_name,
load_summed_data=False, load_each_channel=False)
tmp_shape = img['positions']['x_pos'].shape
m = i // shape[1]
n = i % shape[1]
h_i = h_index[m][n]
v_i = v_index[m][n]
keylist = ['fit', 'scaler', 'position']
for key_name in keylist:
fit_key0, = [v for v in list(out.keys()) if key_name in v]
fit_key, = [v for v in list(img.keys()) if key_name in v]
for k, v in img[fit_key].items():
out[fit_key0][k][v_i:v_i+tmp_shape[0], h_i:h_i+tmp_shape[1]] = img[fit_key][k]
return out
def get_data_from_folder_helper(working_directory, foldername,
filename, flip_h=False):
"""
Read fitted data from given folder.
Parameters
----------
working_directory : string
overall folder path where multiple fitting results are saved
foldername : string
folder name of given fitting result
filename : string
given element
flip_h : bool
x position is saved in a wrong way, so we may want to flip left right on the data,
to be removed.
Returns
-------
2D array
"""
fpath = os.path.join(working_directory, foldername, filename)
if 'txt' in filename:
data = np.loadtxt(fpath)
elif 'tif' in filename:
data = np.array(Image.open(fpath))
# x position is saved in a wrong way
if flip_h is True:
data = np.fliplr(data)
return data
def get_data_from_multiple_folders_helper(working_directory, folderlist,
filename, flip_h=False):
"""
Read given element from fitted results in multiple folders.
Parameters
----------
working_directory : string
overall folder path where multiple fitting results are saved
folderlist : list
list of folder names saving fitting result
filename : string
given element
flip_h : bool
x position is saved in a wrong way, so we may want to flip left right on the data,
to be removed.
Returns
-------
2D array
"""
output = np.array([])
for foldername in folderlist:
result = get_data_from_folder_helper(working_directory, foldername,
filename, flip_h=flip_h)
output = np.concatenate([output, result.ravel()])
return output
def stitch_fitted_results(working_directory, folderlist, output=None):
"""
Stitch fitted data from multiple folders. Output stiched results as 1D array.
Parameters
----------
working_directory : string
overall folder path where multiple fitting results are saved
folderlist : list
list of folder names saving fitting result
output : string, optional
output folder name to save all the stiched results.
"""
# get all filenames
fpath = os.path.join(working_directory, folderlist[0], '*')
pathlist = [name for name in glob.glob(fpath)]
filelist = [name.split(sep_v)[-1] for name in pathlist]
out = {}
for filename in filelist:
if 'x_pos' in filename:
flip_h = True
else:
flip_h = False
data = get_data_from_multiple_folders_helper(working_directory, folderlist,
filename, flip_h=flip_h)
out[filename.split('.')[0]] = data
if output is not None:
outfolder = os.path.join(working_directory, output)
if os.path.exists(outfolder) is False:
os.mkdir(outfolder)
for k, v in out.items():
outpath = os.path.join(outfolder, k+'_stitched.txt')
np.savetxt(outpath, v)
return out
def save_fitdata_to_hdf(fpath, data_dict,
datapath='xrfmap/detsum',
data_saveas='xrf_fit',
dataname_saveas='xrf_fit_name'):
"""
Add fitting results to existing h5 file. This is to be moved to filestore.
Parameters
----------
fpath : str
path of the hdf5 file
data_dict : dict
dict of array
datapath : str
path inside h5py file
data_saveas : str, optional
name in hdf for data array
dataname_saveas : str, optional
name list in hdf to explain what the saved data mean
"""
f = h5py.File(fpath, 'a')
try:
dataGrp = f.create_group(datapath)
except ValueError:
dataGrp = f[datapath]
data = []
namelist = []
for k, v in data_dict.items():
if not isinstance(k, str):
k = k.decode()
namelist.append(k)
data.append(v)
if data_saveas in dataGrp:
del dataGrp[data_saveas]
data = np.asarray(data)
ds_data = dataGrp.create_dataset(data_saveas, data=data)
ds_data.attrs['comments'] = ' '
if dataname_saveas in dataGrp:
del dataGrp[dataname_saveas]
if not isinstance(dataname_saveas, str):
dataname_saveas = dataname_saveas.decode()
namelist = np.array(namelist).astype('|S20')
name_data = dataGrp.create_dataset(dataname_saveas, data=namelist)
name_data.attrs['comments'] = ' '
f.close()
def export_to_view(fpath, output_name=None, output_folder='', namelist=None):
"""
Output fitted data to tablet data for visulization.
Parameters
----------
fpath : str
input file path, file is pyxrf h5 file
output_name : str
output file name
otuput_folder : str, optional
default as current working folder
namelist : list, optional
list of elemental names
"""
with h5py.File(fpath, 'r') as f:
d = f['xrfmap/detsum/xrf_fit'][:]
d = d.reshape([d.shape[0], -1])
elementlist = f['xrfmap/detsum/xrf_fit_name'][:]
elementlist = helper_decode_list(elementlist)
xy = f['xrfmap/positions/pos'][:]
xy = xy.reshape([xy.shape[0], -1])
xy_name = ['X', 'Y']
names = xy_name + elementlist
data = np.concatenate((xy, d), axis=0)
data_dict = OrderedDict()
if namelist is None:
for i, k in enumerate(names):
if 'Userpeak' in k or 'r2_adjust' in k:
continue
data_dict.update({k: data[i, :]})
else:
for i, k in enumerate(names):
if k in namelist or k in xy_name:
data_dict.update({k: data[i, :]})
df = pd.DataFrame(data_dict)
if output_name is None:
fname = fpath.split(sep_v)[-1]
output_name = fname.split('.')[0] + '_fit_view.csv'
outpath = os.path.join(output_folder, output_name)
print('{} is created.'.format(outpath))
df.to_csv(outpath, index=False)
def get_header(fname):
"""
helper function to extract header in spec file.
.. warning :: This function works fine for spec file format
from Canadian light source. Others may need to be tested.
Parameters
----------
fname : spec file name
"""
mydata = []
with open(fname, 'r') as f:
for v in f: # iterate the file
mydata.append(v)
_sign = '#'
_sign = _sign.encode('utf-8')
if _sign not in v:
break
header_line = mydata[-2] # last line is space
n = [v.strip() for v in header_line[1:].split('\t') if v.strip() != '']
return n
def combine_data_to_recon(element_list, datalist, working_dir, norm=True,
file_prefix='scan2D_', ic_name='sclr1_ch4',
expand_r=2, internal_path='xrfmap/detsum'):
"""
Combine 2D data to 3D array for reconstruction.
Parameters
----------
element_list : list
list of elements
datalist : list
list of run number
working_dir : str
norm : bool, optional
normalization or not
file_prefix : str, optional
prefix name for h5 file
ic_name : str
ion chamber name for normalization
expand_r: int
expand initial array to a larger size to include each 2D image easily,
as each 2D image may have different size. Crop the 3D array back to a proper size in the end.
internal_path : str, optional
inside path to get fitting data in h5 file
Returns
-------
dict of 3d array with each array's shape like [num_sequences, num_row, num_col]
"""
element3d = {}
for element_name in element_list:
element3d[element_name] = None
max_h = 0
max_v = 0
for i, v in enumerate(datalist):
filename = file_prefix+str(v)+'.h5'
filepath = os.path.join(working_dir, filename)
with h5py.File(filepath, 'r+') as f:
dataset = f[internal_path]
try:
data_all = dataset['xrf_fit'][()]
data_name = dataset['xrf_fit_name'][()]
data_name = helper_decode_list(data_name)
except KeyError:
print('Need to do fitting first.')
scaler_dataset = f['xrfmap/scalers']
scaler_v = scaler_dataset['val'][()]
scaler_n = scaler_dataset['name'][()]
scaler_n = helper_decode_list(scaler_n)
data_dict = {}
for name_i, name_v in enumerate(data_name):
data_dict[name_v] = data_all[name_i, :, :]
if norm is True:
scaler_dict = {}
for s_i, s_v in enumerate(scaler_n):
scaler_dict[s_v] = scaler_v[:, :, s_i]
for element_name in element_list:
data = data_dict[element_name]
if norm is True:
normv = scaler_dict[ic_name]
data = data/normv
if element3d[element_name] is None:
element3d[element_name] = np.zeros(
[len(datalist),
data.shape[0]*expand_r,
data.shape[1]*expand_r])
element3d[element_name][i, :data.shape[0], :data.shape[1]] = data
max_h = max(max_h, data.shape[0])
max_v = max(max_v, data.shape[1])
for k, v in element3d.items():
element3d[k] = v[:, :max_h, :max_v]
return element3d
def h5file_for_recon(element_dict, angle, runid=None, filename=None):
"""
Save fitted 3d elemental data into h5 file for reconstruction use.
Parameters
----------
element_dict : dict
elements 3d data after normalization
angle : list
angle information
runid : list or optional
run ID
filename : str
"""
if filename is None:
filename = 'xrf3d.h5'
with h5py.File(filename) as f:
d_group = f.create_group('element_data')
for k, v in element_dict.items():
sub_g = d_group.create_group(k)
sub_g.create_dataset('data', data=np.asarray(v),
compression='gzip')
sub_g.attrs['comments'] = 'normalized fluorescence data for {}'.format(k)
angle_g = f.create_group('angle')
angle_g.create_dataset('data', data=np.asarray(angle))
angle_g.attrs['comments'] = 'angle information'
if runid is not None:
runid_g = f.create_group('runid')
runid_g.create_dataset('data', data=np.asarray(runid))
runid_g.attrs['comments'] = 'run id information'
def create_movie(data, fname='demo.mp4', dpi=100, cmap='jet',
clim=None, fig_size=(6, 8), fps=20, data_power=1, angle=None, runid=None):
"""
Transfer 3d array into a movie.
Parameters
----------
data : 3d array
data shape is [num_sequences, num_row, num_col]
fname : string, optional
name to save movie
dpi : int, optional
resolution of the movie
cmap : string, optional
color format
clim : list, tuple, optional
[low, high] value to define plotting range
fig_size : list, tuple, optional
size (horizontal size, vertical size) of each plot
fps : int, optional
frame per second
"""
fig, ax = plt.subplots()
ax.set_aspect('equal')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
im = ax.imshow(np.zeros([data.shape[1], data.shape[2]]),
cmap=cmap, interpolation='nearest')
fig.set_size_inches(fig_size)
fig.tight_layout()
def update_img(n):
tmp = data[n, :, :]
im.set_data(tmp**data_power)
if clim is not None:
im.set_clim(clim)
else:
im.set_clim([0, np.max(data[n, :, :])])
figname = ''
if runid is not None:
figname = 'runid: {} '.format(runid[n])
if angle is not None:
figname += 'angle: {}'.format(angle[n])
# if len(figname) != 0:
# im.ax.set_title(figname)
return im
# legend(loc=0)
ani = animation.FuncAnimation(fig, update_img, data.shape[0], interval=30)
writer = animation.writers['ffmpeg'](fps=fps)
ani.save(fname, writer=writer, dpi=dpi)
def spec_to_hdf(wd, spec_file, spectrum_file, output_file, img_shape,
ic_name=None, x_name=None, y_name=None):
"""
Transform spec data to hdf file pyxrf can take. Using this function, users need to
have two input files ready, sepc_file and spectrum_file, with explanation as below.
.. warning :: This function should be better defined to take care spec file in general.
The work in suitcase should also be considered. This function works fine for spec file format
from Canadian light source. Others may need to be tested.
Parameters
----------
wd : str
working directory for spec file, and created hdf
spec_file : str
spec txt data file
spectrum_file : str
fluorescence spectrum data file
output_file : str
the output h5 file for pyxrf
img_shape : list or array
the shape of two D scan, [num of row, num of column]
ic_name : str
the name of ion chamber for normalization, listed in spec file
x_name : str
x position name, listed in spec file
y_name : str
y position name, listed in spec file
"""
# read scaler data from spec file
spec_path = os.path.join(wd, spec_file)
h = get_header(spec_path)
spec_data = pd.read_csv(spec_path, names=h, sep='\t', comment='#', index_col=False)
if ic_name is not None:
scaler_name = [str(ic_name)]
scaler_val = spec_data[scaler_name].values
scaler_val = scaler_val.reshape(img_shape)
scaler_data = np.zeros([img_shape[0], img_shape[1], 1])
scaler_data[:, :, 0] = scaler_val
if x_name is not None and y_name is not None:
xy_data = np.zeros([2, img_shape[0], img_shape[1]])
xy_data[0, :, :] = spec_data[x_name].values.reshape(img_shape)
xy_data[1, :, :] = spec_data[y_name].values.reshape(img_shape)
xy_name = ['x_pos', 'y_pos']
spectrum_path = os.path.join(wd, spectrum_file)
sum_data0 = np.loadtxt(spectrum_path)
sum_data = np.reshape(sum_data0, [sum_data0.shape[0], img_shape[0], img_shape[1]])
sum_data = np.transpose(sum_data, axes=(1, 2, 0))
interpath = 'xrfmap'
fpath = os.path.join(wd, output_file)
with h5py.File(fpath) as f:
dataGrp = f.create_group(interpath+'/detsum')
ds_data = dataGrp.create_dataset('counts', data=sum_data, compression='gzip')
ds_data.attrs['comments'] = 'Experimental data from channel sum'
if ic_name is not None:
dataGrp = f.create_group(interpath+'/scalers')
dataGrp.create_dataset('name', data=helper_encode_list(scaler_name))
dataGrp.create_dataset('val', data=scaler_data)
if x_name is not None and y_name is not None:
dataGrp = f.create_group(interpath+'/positions')
dataGrp.create_dataset('name', data=helper_encode_list(xy_name))
dataGrp.create_dataset('pos', data=xy_data)
def make_hdf_stitched(working_directory, filelist, fname,
shape):
"""
Read fitted results from each hdf file, stitch them together and save to
a new h5 file.
Parameters
----------
working_directory : str
folder with all the h5 files and also the place to save output
filelist : list of str
names for all the h5 files
fname : str
name of output h5 file
shape : list or tuple
shape defines how to stitch all the h5 files. [veritcal, horizontal]
"""
print('Reading data from each hdf file.')
fpath = os.path.join(working_directory, fname)
out = read_hdf_to_stitch(working_directory, filelist, shape)
result = {}
img_shape = None
for k, v in out.items():
for m, n in v.items():
if img_shape is None:
img_shape = n.shape
result[m] = n.ravel()
current_dir = os.path.dirname(os.path.realpath(__file__))
config_file = 'srx_pv_config.json'
config_path = sep_v.join(current_dir.split(sep_v)[:-2]+['configs', config_file])
with open(config_path, 'r') as json_data:
config_data = json.load(json_data)
print('Saving all the data into one hdf file.')
write_db_to_hdf(fpath, result,
img_shape,
det_list=config_data['xrf_detector'],
pos_list=('x_pos', 'y_pos'),
scaler_list=config_data['scaler_list'],
base_val=config_data['base_value']) # base value shift for ic
fitkey, = [v for v in list(out.keys()) if 'fit' in v]
save_fitdata_to_hdf(fpath, out[fitkey])
print('Done!')
| {
"repo_name": "NSLS-II-HXN/PyXRF",
"path": "pyxrf/model/fileio.py",
"copies": "1",
"size": "90612",
"license": "bsd-3-clause",
"hash": -1828295308468093700,
"line_mean": 37.4927782498,
"line_max": 111,
"alpha_frac": 0.5709950117,
"autogenerated": false,
"ratio": 3.8728042056673933,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.993918340436025,
"avg_score": 0.0009231626014285842,
"num_lines": 2354
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.gen
import tornado.platform.twisted
import mysql.connector
tornado.platform.twisted.install()
import toradbapi
DB_NAME = 'test_toradbapi'
DB_HOST = '127.0.0.1'
DB_PORT = 3306
DB_USER = 'root'
DB_PASSWORD = ''
class MainHandler(tornado.web.RequestHandler):
def initialize(self, db):
self.db = db
@tornado.gen.coroutine
def get(self):
people = yield self.db.run_query('SELECT `name` FROM `person`')
self.finish('<html><head><title>Demo toradbapi</title></head>'
'<body><h3>Type new name:</h3>'
'<form method="post"><input name="name" type="text"/>'
'<input type="submit" value="Submit"/></form>'
'<h3>Already inserted names:</h3>'
'<ul>%s</ul></body></html>' % ''.join(
'<li>%s</li>' % name for name in people))
@tornado.gen.coroutine
def post(self):
name = self.get_argument('name')
try:
yield self.db.run_operation(
# always use escaping functionality to avoid sql-injection
'INSERT INTO `person` (`name`) VALUES (%s)',
(name,))
except mysql.connector.errors.DatabaseError as e:
self.finish('<html><head><title>Demo toradbapi</title></head>'
'<body><h3>Error inserting new name: %s</h3>'
'<br/><a href="/">Main page</a></body></html>' % e)
else:
self.redirect('/')
def setup_database():
# just to ensure that database and table exist
cnx = mysql.connector.connect(
user=DB_USER, passwd=DB_PASSWORD, host=DB_HOST, port=DB_PORT)
cursor = cnx.cursor()
try:
cursor.execute('CREATE DATABASE `%s` CHARACTER SET utf8' % DB_NAME)
except mysql.connector.errors.DatabaseError:
# do nothing if database exists
pass
cursor.execute('USE `%s`' % DB_NAME)
cursor.execute('CREATE TABLE IF NOT EXISTS `person` ('
' `name` varchar(255) NOT NULL,'
' `dob` date DEFAULT NULL,'
' UNIQUE KEY (`name`))')
cursor.close()
cnx.close()
def main():
setup_database()
tornado.options.parse_command_line()
pool = toradbapi.ConnectionPool(
'mysql.connector', database=DB_NAME, user=DB_USER, port=DB_PORT,
password=DB_PASSWORD, host=DB_HOST)
application = tornado.web.Application([(r'/', MainHandler, {'db': pool})])
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(8888, '127.0.0.1')
try:
tornado.ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
pass
finally:
pool.close()
tornado.ioloop.IOLoop.instance().stop()
if __name__ == '__main__':
main()
| {
"repo_name": "geerk/toradbapi",
"path": "demo.py",
"copies": "1",
"size": "3036",
"license": "mit",
"hash": 7133691057713358000,
"line_mean": 31.2978723404,
"line_max": 78,
"alpha_frac": 0.5793807642,
"autogenerated": false,
"ratio": 3.8625954198473282,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4941976184047328,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import unittest
from datacheck import validate
from datacheck.exceptions import TypeValidationError
class TestList(unittest.TestCase):
def test_list_ok(self):
input = [123, 456]
result = validate(input, [int])
self.assertIsNot(result, input, 'input list should not be modified')
self.assertEqual(result, input, 'input should be copied into result')
def test_list_type_error(self):
expected_msg = r'<unnamed field>: Expected list, got int \(123\)'
with self.assertRaisesRegexp(TypeValidationError, expected_msg):
validate(123, [])
def test_list_validation_error(self):
expected_msg = r'<unnamed list>\[2\]: Expected int, got float \(0.9\)'
with self.assertRaisesRegexp(TypeValidationError, expected_msg):
validate([1, 1, 0.9, 1], [int])
def test_list_of_list_ok(self):
input = [[123, 456], []]
result = validate(input, [[int]])
self.assertIsNot(result, input, 'input list should not be modified')
for i in range(3):
self.assertIsNot(input[0], result[0],
'nested input list should not be modified')
self.assertEqual(result, input, 'input should be copied into result')
def test_list_of_list_validation_error(self):
input = [[123, 456], [1, 0.8, 2]]
expected_msg = r'<unnamed list>\[1\]\[1\]: Expected int, got float \(0.8\)'
with self.assertRaisesRegexp(TypeValidationError, expected_msg):
validate(input, [[int]])
| {
"repo_name": "csdev/datacheck",
"path": "tests/functional_tests/test_list.py",
"copies": "1",
"size": "1657",
"license": "mit",
"hash": -2232977457908937700,
"line_mean": 38.4523809524,
"line_max": 83,
"alpha_frac": 0.624019312,
"autogenerated": false,
"ratio": 4.021844660194175,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5145863972194175,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import unittest
from mock import NonCallableMagicMock, patch, call
from datacheck.core import Dict, Required, Optional
from datacheck.exceptions import (TypeValidationError, FieldValidationError,
UnknownKeysError)
class TestDict(unittest.TestCase):
def test_dict_ok(self):
mock_validator = NonCallableMagicMock()
dict_validator = Dict({
'key1': mock_validator,
'key2': mock_validator,
})
input_dict = {
'key1': 'value1',
'key2': 'value2'
}
# simulate underlying validator that passes all fields through
# without validation errors
def mock_validate(data, schema, **kwargs):
return data
with patch('datacheck.core._validate', side_effect=mock_validate) \
as mock_validate:
result = dict_validator.validate(input_dict)
expected_calls = [
call('value1', mock_validator, path=['key1']),
call('value2', mock_validator, path=['key2']),
]
mock_validate.assert_has_calls(expected_calls, any_order=True)
self.assertIsNot(result, input_dict,
'input dict should not be modified')
self.assertEqual(result, input_dict,
'input should be copied into result')
def test_dict_type_error(self):
mock_validator = NonCallableMagicMock()
dict_validator = Dict({'mykey': mock_validator})
input_dict = 123 # not a dict
with self.assertRaises(TypeValidationError) as ctx:
dict_validator.validate(input_dict, path=['mydict'])
e = ctx.exception
self.assertEqual(e.expected_type, dict)
self.assertEqual(e.actual_type, int)
self.assertEqual(e.actual_value, input_dict)
self.assertEqual(e.path, ['mydict'])
def test_dict_field_error(self):
mock_validator = NonCallableMagicMock()
dict_validator = Dict({
'key1': mock_validator,
})
input_dict = {}
with self.assertRaises(FieldValidationError) as ctx:
dict_validator.validate(input_dict, path=['mydict'])
e = ctx.exception
self.assertEqual(e.expected_field, 'key1')
self.assertEqual(e.path, ['mydict'])
def test_dict_required_ok(self):
mock_validator = NonCallableMagicMock()
dict_validator = Dict({
'key1': Required(mock_validator),
})
input_dict = {
'key1': 'value1',
}
# simulate underlying validator that passes all fields through
# without validation errors
def mock_validate(data, schema, **kwargs):
return data
with patch('datacheck.core._validate', side_effect=mock_validate) \
as mock_validate:
result = dict_validator.validate(input_dict, path=['mydict'])
self.assertIsNot(result, input_dict,
'input dict should not be modified')
self.assertEqual(result, input_dict,
'input should be copied into result')
def test_dict_required_error(self):
mock_validator = NonCallableMagicMock()
dict_validator = Dict({
'key1': Required(mock_validator),
})
input_dict = {}
with self.assertRaises(FieldValidationError) as ctx:
dict_validator.validate(input_dict, path=['mydict'])
e = ctx.exception
self.assertEqual(e.expected_field, 'key1')
self.assertEqual(e.path, ['mydict'])
def test_dict_optional_ok(self):
mock_validator = NonCallableMagicMock()
dict_validator = Dict({
'key1': Optional(mock_validator),
})
input_dict = {}
result = dict_validator.validate(input_dict, path=['mydict'])
self.assertIsNot(result, input_dict,
'input dict should not be modified')
self.assertEqual(result, {})
def test_dict_optional_default_ok(self):
mock_validator = NonCallableMagicMock()
dict_validator = Dict({
'foo': Optional(mock_validator).default('bar'),
})
input_dict = {}
result = dict_validator.validate(input_dict, path=['mydict'])
self.assertIsNot(result, input_dict,
'input dict should not be modified')
self.assertEqual(result, {'foo': 'bar'})
def test_dict_unknown_keys_ok(self):
dict_validator = Dict({}, allow_unknown=True)
input_dict = {
'foo': 'bar',
}
result = dict_validator.validate(input_dict, path=['mydict'])
self.assertIsNot(result, input_dict,
'input dict should not be modified')
self.assertEqual(result, {'foo': 'bar'})
def test_dict_unknown_keys_error(self):
dict_validator = Dict({})
input_dict = {
'foo': 'bar',
}
with self.assertRaises(UnknownKeysError) as ctx:
dict_validator.validate(input_dict, path=['mydict'])
e = ctx.exception
self.assertEqual(len(e.unknown_keys), 1)
self.assertIn('foo', e.unknown_keys)
self.assertEqual(e.path, ['mydict'])
| {
"repo_name": "csdev/datacheck",
"path": "tests/unit_tests/test_dict.py",
"copies": "1",
"size": "5391",
"license": "mit",
"hash": -4375121416757386000,
"line_mean": 31.6727272727,
"line_max": 76,
"alpha_frac": 0.5811537748,
"autogenerated": false,
"ratio": 4.330120481927711,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5411274256727711,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
def filter_path(movement, paths, time_threshold):
r"""
Return a list object containing start and end indices for
movements lasting equal to or longer than the specified time
threshold
Parameters
----------
movement : pandas.DataFrame
CT, CX, CY coordinates and homebase status
for the unique combination of strain, mouse and day
paths: list
a list containing the indices for all paths
time_threshold : float
positive number indicating the time threshold
Returns
-------
paths index : a list containing the indices for all paths
that the spending times are larger than a time threshold
Examples
--------
>>> movement = data.load_movement(1, 2, 1)
>>> paths = path_index(movement, 1, 1)
>>> filter_path(movement, paths, 20)
[[26754, 26897], [28538, 28627]
"""
# check if all inputs are positive integers
conditions_value = time_threshold <= 0
if conditions_value:
raise ValueError("Input values need to be positive")
# Variable that store paths equal to or larger than time threshold
pass_paths = []
# Pull out time variable
T = movement['t']
# Run through each path and check whether the time spending
# on the path is equal to or larger than the time threshold
for path in paths:
start_time, end_time = T[path].ravel()
if (end_time - start_time) >= time_threshold:
pass_paths.append(path)
return pass_paths
| {
"repo_name": "togawa28/mousestyles",
"path": "mousestyles/path_diversity/filter_path.py",
"copies": "3",
"size": "1618",
"license": "bsd-2-clause",
"hash": 4036271716335366000,
"line_mean": 30.1153846154,
"line_max": 70,
"alpha_frac": 0.6452410383,
"autogenerated": false,
"ratio": 4.349462365591398,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6494703403891398,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.