Unnamed: 0
int64 0
10k
| function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
---|---|---|---|
5,900 |
def validate_url(url):
if not url.count('://'):
url = "http://" + url
validate = URLValidator(verify_exists=True)
try:
validate(url)
return True
except __HOLE__:
return False
|
ValidationError
|
dataset/ETHPy150Open haystack/eyebrowse-server/common/view_helpers.py/validate_url
|
5,901 |
def get_config(self):
try:
return self.config
except __HOLE__:
pass
self.config = ConfigParser.ConfigParser()
path = self.ask('Path to the config?', '~/.pyplease')
path = self.normalize_path(path)
self.note('Using "%s" as config' % path)
self.config.read([path])
self.config_path = path
return self.config
|
AttributeError
|
dataset/ETHPy150Open va1en0k/please/pyplease/modules/please.py/Module.get_config
|
5,902 |
def _validate(self, value):
try:
if value != int(value):
raise ValueError()
value = int(value)
except __HOLE__:
raise ValueError("%s must be an integer" % self.label)
if value < self.min:
raise ValueError("%s must be >= %i" % (self.label, self.min))
if value > self.max:
raise ValueError("%s must be <= %i" % (self.label, self.max))
return value
|
ValueError
|
dataset/ETHPy150Open glue-viz/glue/glue/core/simpleforms.py/IntOption._validate
|
5,903 |
def pop_command(self, id_):
self.acquire()
try:
try:
return self.commands.pop(id_)
except __HOLE__:
return None
finally:
self.release()
|
KeyError
|
dataset/ETHPy150Open Skype4Py/Skype4Py/Skype4Py/api/__init__.py/SkypeAPIBase.pop_command
|
5,904 |
def load_config():
""" Returns the config information """
# This is global
_config_file = '{}/config.yaml'.format(os.getenv('POCS', '/var/panoptes/POCS'))
_local_config_file = '{}/config_local.yaml'.format(os.getenv('POCS', '/var/panoptes/POCS'))
_config = dict()
# Load the global config
try:
with open(_config_file, 'r') as f:
_config.update(yaml.load(f.read()))
except IOError as err:
warnings.warn('Cannot open config file. Please make sure $POCS environment variable is set: {}'.format(err))
# If there is a local config load that
try:
with open(_local_config_file, 'r') as f:
_config.update(yaml.load(f.read()))
except __HOLE__ as err:
pass
return _config
|
IOError
|
dataset/ETHPy150Open panoptes/POCS/panoptes/utils/config.py/load_config
|
5,905 |
def test_nC_nP_nT():
from sympy.utilities.iterables import (
multiset_permutations, multiset_combinations, multiset_partitions,
partitions, subsets, permutations)
from sympy.functions.combinatorial.numbers import (
nP, nC, nT, stirling, _multiset_histogram, _AOP_product)
from sympy.combinatorics.permutations import Permutation
from sympy.core.numbers import oo
from random import choice
c = string.ascii_lowercase
for i in range(100):
s = ''.join(choice(c) for i in range(7))
u = len(s) == len(set(s))
try:
tot = 0
for i in range(8):
check = nP(s, i)
tot += check
assert len(list(multiset_permutations(s, i))) == check
if u:
assert nP(len(s), i) == check
assert nP(s) == tot
except __HOLE__:
print(s, i, 'failed perm test')
raise ValueError()
for i in range(100):
s = ''.join(choice(c) for i in range(7))
u = len(s) == len(set(s))
try:
tot = 0
for i in range(8):
check = nC(s, i)
tot += check
assert len(list(multiset_combinations(s, i))) == check
if u:
assert nC(len(s), i) == check
assert nC(s) == tot
if u:
assert nC(len(s)) == tot
except AssertionError:
print(s, i, 'failed combo test')
raise ValueError()
for i in range(1, 10):
tot = 0
for j in range(1, i + 2):
check = nT(i, j)
tot += check
assert sum(1 for p in partitions(i, j, size=True) if p[0] == j) == check
assert nT(i) == tot
for i in range(1, 10):
tot = 0
for j in range(1, i + 2):
check = nT(range(i), j)
tot += check
assert len(list(multiset_partitions(list(range(i)), j))) == check
assert nT(range(i)) == tot
for i in range(100):
s = ''.join(choice(c) for i in range(7))
u = len(s) == len(set(s))
try:
tot = 0
for i in range(1, 8):
check = nT(s, i)
tot += check
assert len(list(multiset_partitions(s, i))) == check
if u:
assert nT(range(len(s)), i) == check
if u:
assert nT(range(len(s))) == tot
assert nT(s) == tot
except AssertionError:
print(s, i, 'failed partition test')
raise ValueError()
# tests for Stirling numbers of the first kind that are not tested in the
# above
assert [stirling(9, i, kind=1) for i in range(11)] == [
0, 40320, 109584, 118124, 67284, 22449, 4536, 546, 36, 1, 0]
perms = list(permutations(range(4)))
assert [sum(1 for p in perms if Permutation(p).cycles == i)
for i in range(5)] == [0, 6, 11, 6, 1] == [
stirling(4, i, kind=1) for i in range(5)]
# http://oeis.org/A008275
assert [stirling(n, k, signed=1)
for n in range(10) for k in range(1, n + 1)] == [
1, -1,
1, 2, -3,
1, -6, 11, -6,
1, 24, -50, 35, -10,
1, -120, 274, -225, 85, -15,
1, 720, -1764, 1624, -735, 175, -21,
1, -5040, 13068, -13132, 6769, -1960, 322, -28,
1, 40320, -109584, 118124, -67284, 22449, -4536, 546, -36, 1]
# http://en.wikipedia.org/wiki/Stirling_numbers_of_the_first_kind
assert [stirling(n, k, kind=1)
for n in range(10) for k in range(n+1)] == [
1,
0, 1,
0, 1, 1,
0, 2, 3, 1,
0, 6, 11, 6, 1,
0, 24, 50, 35, 10, 1,
0, 120, 274, 225, 85, 15, 1,
0, 720, 1764, 1624, 735, 175, 21, 1,
0, 5040, 13068, 13132, 6769, 1960, 322, 28, 1,
0, 40320, 109584, 118124, 67284, 22449, 4536, 546, 36, 1]
# http://en.wikipedia.org/wiki/Stirling_numbers_of_the_second_kind
assert [stirling(n, k, kind=2)
for n in range(10) for k in range(n+1)] == [
1,
0, 1,
0, 1, 1,
0, 1, 3, 1,
0, 1, 7, 6, 1,
0, 1, 15, 25, 10, 1,
0, 1, 31, 90, 65, 15, 1,
0, 1, 63, 301, 350, 140, 21, 1,
0, 1, 127, 966, 1701, 1050, 266, 28, 1,
0, 1, 255, 3025, 7770, 6951, 2646, 462, 36, 1]
assert stirling(3, 4, kind=1) == stirling(3, 4, kind=1) == 0
raises(ValueError, lambda: stirling(-2, 2))
def delta(p):
if len(p) == 1:
return oo
return min(abs(i[0] - i[1]) for i in subsets(p, 2))
parts = multiset_partitions(range(5), 3)
d = 2
assert (sum(1 for p in parts if all(delta(i) >= d for i in p)) ==
stirling(5, 3, d=d) == 7)
# other coverage tests
assert nC('abb', 2) == nC('aab', 2) == 2
assert nP(3, 3, replacement=True) == nP('aabc', 3, replacement=True) == 27
assert nP(3, 4) == 0
assert nP('aabc', 5) == 0
assert nC(4, 2, replacement=True) == nC('abcdd', 2, replacement=True) == \
len(list(multiset_combinations('aabbccdd', 2))) == 10
assert nC('abcdd') == sum(nC('abcdd', i) for i in range(6)) == 24
assert nC(list('abcdd'), 4) == 4
assert nT('aaaa') == nT(4) == len(list(partitions(4))) == 5
assert nT('aaab') == len(list(multiset_partitions('aaab'))) == 7
assert nC('aabb'*3, 3) == 4 # aaa, bbb, abb, baa
assert dict(_AOP_product((4,1,1,1))) == {
0: 1, 1: 4, 2: 7, 3: 8, 4: 8, 5: 7, 6: 4, 7: 1}
# the following was the first t that showed a problem in a previous form of
# the function, so it's not as random as it may appear
t = (3, 9, 4, 6, 6, 5, 5, 2, 10, 4)
assert sum(_AOP_product(t)[i] for i in range(55)) == 58212000
raises(ValueError, lambda: _multiset_histogram({1:'a'}))
|
AssertionError
|
dataset/ETHPy150Open sympy/sympy/sympy/functions/combinatorial/tests/test_comb_numbers.py/test_nC_nP_nT
|
5,906 |
def _debug_body(self, body, headers):
try:
ctype = headers['content-type']
except __HOLE__:
ctype = None
if ctype is not None and ctype[:5] == 'text/':
self.logger.debug("Body:")
for line in str(body).split('\n'):
self.logger.debug(" %s" % line)
else:
self.logger.debug("Body: non-textual content (Content-Type: %s). Not logged." % ctype)
|
AttributeError
|
dataset/ETHPy150Open versionone/VersionOne.SDK.Python/v1pysdk/client.py/V1Server._debug_body
|
5,907 |
def fetch(self, path, query='', postdata=None):
"Perform an HTTP GET or POST depending on whether postdata is present"
url = self.build_url(path, query=query)
self.logger.debug("URL: %s" % url)
try:
if postdata is not None:
if isinstance(postdata, dict):
postdata = urlencode(postdata)
self.logger.debug("postdata: %s" % postdata)
response = self.http_post(url, postdata)
else:
response = self.http_get(url)
body = response.read()
self._debug_headers(response.headers)
self._debug_body(body, response.headers)
return (None, body)
except __HOLE__, e:
if e.code == 401:
raise
body = e.fp.read()
self._debug_headers(e.headers)
self._debug_body(body, e.headers)
return (e, body)
|
HTTPError
|
dataset/ETHPy150Open versionone/VersionOne.SDK.Python/v1pysdk/client.py/V1Server.fetch
|
5,908 |
@frappe.whitelist(allow_guest=True)
def accept():
args = frappe.form_dict
files = []
web_form = frappe.get_doc("Web Form", args.web_form)
if args.doctype != web_form.doc_type:
frappe.throw(_("Invalid Request"))
elif args.name and not web_form.allow_edit:
frappe.throw(_("You are not allowed to update this Web Form Document"))
if args.name:
# update
doc = frappe.get_doc(args.doctype, args.name)
else:
# insert
doc = frappe.new_doc(args.doctype)
# set values
for fieldname, value in args.iteritems():
if fieldname not in ("web_form", "cmd", "owner"):
if value and value.startswith("{"):
try:
filedata = json.loads(value)
if "__file_attachment" in filedata:
files.append((fieldname, filedata))
continue
except __HOLE__:
pass
doc.set(fieldname, value)
if args.name:
if has_web_form_permission(doc.doctype, doc.name, "write"):
doc.save(ignore_permissions=True)
else:
# only if permissions are present
doc.save()
else:
# insert
if web_form.login_required and frappe.session.user=="Guest":
frappe.throw(_("You must login to submit this form"))
doc.insert(ignore_permissions = True)
# add files
if files:
for f in files:
fieldname, filedata = f
# remove earlier attachmed file (if exists)
if doc.get(fieldname):
remove_file_by_url(doc.get(fieldname), doc.doctype, doc.name)
# save new file
filedoc = save_file(filedata["filename"], filedata["dataurl"],
doc.doctype, doc.name, decode=True)
# update values
doc.set(fieldname, filedoc.file_url)
doc.save()
|
ValueError
|
dataset/ETHPy150Open frappe/frappe/frappe/website/doctype/web_form/web_form.py/accept
|
5,909 |
def profile_start_response(self, app, environ, start_response):
"""Collect and store statistics for a single request.
Use this method from middleware in place of the standard
request-serving pattern. Do:
profiler = RequestProfiler(...)
return profiler(app, environ, start_response)
Instead of:
return app(environ, start_response)
Depending on the mode, this method gathers timing information
and an execution profile and stores them in the datastore for
later access.
"""
# Always track simple start/stop time.
self.start = time.time()
if self.mode == Mode.SIMPLE:
# Detailed recording is disabled.
result = app(environ, start_response)
for value in result:
yield value
else:
# Add logging handler
handler = RequestProfiler.create_handler()
logging.getLogger().addHandler(handler)
if Mode.is_rpc_enabled(self.mode):
# Turn on AppStats monitoring for this request
# Note that we don't import appstats_profiler at the top of
# this file so we don't bring in a lot of imports for users who
# don't have the profiler enabled.
from . import appstats_profiler
self.appstats_prof = appstats_profiler.Profile()
app = self.appstats_prof.wrap(app)
# By default, we create a placeholder wrapper function that
# simply calls whatever function it is passed as its first
# argument.
result_fxn_wrapper = lambda fxn: fxn()
# TODO(kamens): both sampling_profiler and instrumented_profiler
# could subclass the same class. Then they'd both be guaranteed to
# implement run(), and the following if/else could be simplified.
if Mode.is_sampling_enabled(self.mode):
# Turn on sampling profiling for this request.
# Note that we don't import sampling_profiler at the top of
# this file so we don't bring in a lot of imports for users who
# don't have the profiler enabled.
from . import sampling_profiler
if Mode.is_memory_sampling_enabled(self.mode):
self.sampling_prof = sampling_profiler.Profile(
memory_sample_rate=25)
else:
self.sampling_prof = sampling_profiler.Profile()
result_fxn_wrapper = self.sampling_prof.run
elif Mode.is_linebyline_enabled(self.mode):
from . import linebyline_profiler
self.linebyline_prof = linebyline_profiler.Profile()
result_fxn_wrapper = self.linebyline_prof.run
elif Mode.is_instrumented_enabled(self.mode):
# Turn on cProfile instrumented profiling for this request
# Note that we don't import instrumented_profiler at the top of
# this file so we don't bring in a lot of imports for users who
# don't have the profiler enabled.
from . import instrumented_profiler
self.instrumented_prof = instrumented_profiler.Profile()
result_fxn_wrapper = self.instrumented_prof.run
# Get wsgi result
result = result_fxn_wrapper(lambda: app(environ, start_response))
# If we're dealing w/ a generator, profile all of the .next calls as well
if type(result) == GeneratorType:
while True:
try:
yield result_fxn_wrapper(result.next)
except __HOLE__:
break
else:
for value in result:
yield value
logging.getLogger().removeHandler(handler)
self.logs = self.get_logs(handler)
handler.stream.close()
self.end = time.time()
# Store stats for later access
RequestStats(self, environ).store()
|
StopIteration
|
dataset/ETHPy150Open gae-init/gae-init-debug/main/libx/gae_mini_profiler/profiler.py/RequestProfiler.profile_start_response
|
5,910 |
def _tuple_from_version(version):
def _intify(s):
try:
return int(s)
except __HOLE__:
return s
return tuple(_intify(b) for b in version.split('.'))
|
ValueError
|
dataset/ETHPy150Open an0/Letterpress/code/markdown2/tools/cutarelease.py/_tuple_from_version
|
5,911 |
def _version_from_version_info(version_info):
v = str(version_info[0])
state_dot_join = True
for i in version_info[1:]:
if state_dot_join:
try:
int(i)
except __HOLE__:
state_dot_join = False
else:
pass
if state_dot_join:
v += "." + str(i)
else:
v += str(i)
return v
|
ValueError
|
dataset/ETHPy150Open an0/Letterpress/code/markdown2/tools/cutarelease.py/_version_from_version_info
|
5,912 |
def _version_info_from_version(version):
m = _version_re.match(version)
if not m:
raise Error("could not convert '%s' version to version info" % version)
version_info = []
for g in m.groups():
if g is None:
break
try:
version_info.append(int(g))
except __HOLE__:
version_info.append(g)
return tuple(version_info)
|
ValueError
|
dataset/ETHPy150Open an0/Letterpress/code/markdown2/tools/cutarelease.py/_version_info_from_version
|
5,913 |
def parse_changelog(changes_path):
"""Parse the given changelog path and return `(content, parsed, nyr)`
where `nyr` is the ' (not yet released)' marker and `parsed` looks like:
[{'body': u'\n(nothing yet)\n\n',
'verline': u'restify 1.0.1 (not yet released)',
'version': u'1.0.1'}, # version is parsed out for top section only
{'body': u'...',
'verline': u'1.0.0'},
{'body': u'...',
'verline': u'1.0.0-rc2'},
{'body': u'...',
'verline': u'1.0.0-rc1'}]
A changelog (CHANGES.md) is expected to look like this:
# $project Changelog
## $next_version (not yet released)
...
## $version1
...
## $version2
... and so on
The version lines are enforced as follows:
- The top entry should have a " (not yet released)" suffix. "Should"
because recovery from half-cutarelease failures is supported.
- A version string must be extractable from there, but it tries to
be loose (though strict "X.Y.Z" versioning is preferred). Allowed
## 1.0.0
## my project 1.0.1
## foo 1.2.3-rc2
Basically, (a) the " (not yet released)" is stripped, (b) the
last token is the version, and (c) that version must start with
a digit (sanity check).
"""
if not exists(changes_path):
raise Error("changelog file '%s' not found" % changes_path)
content = codecs.open(changes_path, 'r', 'utf-8').read()
parser = re.compile(
r'^##\s*(?P<verline>[^\n]*?)\s*$(?P<body>.*?)(?=^##|\Z)',
re.M | re.S)
sections = parser.findall(content)
# Sanity checks on changelog format.
if not sections:
template = "## 1.0.0 (not yet released)\n\n(nothing yet)\n"
raise Error("changelog '%s' must have at least one section, "
"suggestion:\n\n%s" % (changes_path, _indent(template)))
first_section_verline = sections[0][0]
nyr = ' (not yet released)'
#if not first_section_verline.endswith(nyr):
# eg = "## %s%s" % (first_section_verline, nyr)
# raise Error("changelog '%s' top section must end with %r, "
# "naive e.g.: '%s'" % (changes_path, nyr, eg))
items = []
for i, section in enumerate(sections):
item = {
"verline": section[0],
"body": section[1]
}
if i == 0:
# We only bother to pull out 'version' for the top section.
verline = section[0]
if verline.endswith(nyr):
verline = verline[0:-len(nyr)]
version = verline.split()[-1]
try:
int(version[0])
except __HOLE__:
msg = ''
if version.endswith(')'):
msg = " (cutarelease is picky about the trailing %r " \
"on the top version line. Perhaps you misspelled " \
"that?)" % nyr
raise Error("changelog '%s' top section version '%s' is "
"invalid: first char isn't a number%s"
% (changes_path, version, msg))
item["version"] = version
items.append(item)
return content, items, nyr
## {{{ http://code.activestate.com/recipes/577058/ (r2)
|
ValueError
|
dataset/ETHPy150Open an0/Letterpress/code/markdown2/tools/cutarelease.py/parse_changelog
|
5,914 |
def get_pdf(html, options=None):
html = scrub_urls(html)
html, options = prepare_options(html, options)
fname = os.path.join("/tmp", "frappe-pdf-{0}.pdf".format(frappe.generate_hash()))
try:
pdfkit.from_string(html, fname, options=options or {})
with open(fname, "rb") as fileobj:
filedata = fileobj.read()
except __HOLE__, e:
if ("ContentNotFoundError" in e.message
or "ContentOperationNotPermittedError" in e.message
or "UnknownContentError" in e.message
or "RemoteHostClosedError" in e.message):
# allow pdfs with missing images if file got created
if os.path.exists(fname):
with open(fname, "rb") as fileobj:
filedata = fileobj.read()
else:
frappe.throw(_("PDF generation failed because of broken image links"))
else:
raise
finally:
cleanup(fname, options)
return filedata
|
IOError
|
dataset/ETHPy150Open frappe/frappe/frappe/utils/pdf.py/get_pdf
|
5,915 |
def compile(self, sources,
output_dir=None, macros=None, include_dirs=None, debug=0,
extra_preargs=None, extra_postargs=None, depends=None):
macros, objects, extra_postargs, pp_opts, build = \
self._setup_compile(output_dir, macros, include_dirs, sources,
depends, extra_postargs)
compile_opts = extra_preargs or []
compile_opts.append ('-c')
if debug:
compile_opts.extend (self.compile_options_debug)
else:
compile_opts.extend (self.compile_options)
for obj in objects:
try:
src, ext = build[obj]
except __HOLE__:
continue
# XXX why do the normpath here?
src = os.path.normpath(src)
obj = os.path.normpath(obj)
# XXX _setup_compile() did a mkpath() too but before the normpath.
# Is it possible to skip the normpath?
self.mkpath(os.path.dirname(obj))
if ext == '.res':
# This is already a binary file -- skip it.
continue # the 'for' loop
if ext == '.rc':
# This needs to be compiled to a .res file -- do it now.
try:
self.spawn (["brcc32", "-fo", obj, src])
except DistutilsExecError, msg:
raise CompileError, msg
continue # the 'for' loop
# The next two are both for the real compiler.
if ext in self._c_extensions:
input_opt = ""
elif ext in self._cpp_extensions:
input_opt = "-P"
else:
# Unknown file type -- no extra options. The compiler
# will probably fail, but let it just in case this is a
# file the compiler recognizes even if we don't.
input_opt = ""
output_opt = "-o" + obj
# Compiler command line syntax is: "bcc32 [options] file(s)".
# Note that the source file names must appear at the end of
# the command line.
try:
self.spawn ([self.cc] + compile_opts + pp_opts +
[input_opt, output_opt] +
extra_postargs + [src])
except DistutilsExecError, msg:
raise CompileError, msg
return objects
# compile ()
|
KeyError
|
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/distutils/bcppcompiler.py/BCPPCompiler.compile
|
5,916 |
def _perform_request(self, request_type, resource, **kwargs):
'''
Utility method that performs all requests.
'''
request_type_methods = set(["get", "post", "put", "delete"])
if request_type not in request_type_methods:
raise Exception("Unknown request type. Supported request types are"
": {0}".format(", ".join(request_type_methods)))
uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource)
# set a timeout, just to be safe
kwargs["timeout"] = self.timeout
response = getattr(self.session, request_type)(uri, **kwargs)
# handle errors
if response.status_code not in (200, 202):
_raise_for_status(response)
# when responses have no content body (ie. delete, set_permission),
# simply return the whole response
if not response.text:
return response
# for other request types, return most useful data
content_type = response.headers.get('content-type').strip().lower()
if re.match(r'application\/json;\s*charset=utf-8', content_type):
return response.json()
elif re.match(r'text\/csv;\s*charset=utf-8', content_type):
csv_stream = StringIO(response.text)
return [line for line in csv.reader(csv_stream)]
elif re.match(r'application\/rdf\+xml;\s*charset=utf-8', content_type):
return response.content
elif re.match(r'text\/plain;\s*charset=utf-8', content_type):
try:
return json.loads(response.text)
except __HOLE__:
return response.text
else:
raise Exception("Unknown response format: {0}"
.format(content_type))
|
ValueError
|
dataset/ETHPy150Open xmunoz/sodapy/sodapy/__init__.py/Socrata._perform_request
|
5,917 |
def _raise_for_status(response):
'''
Custom raise_for_status with more appropriate error message.
'''
http_error_msg = ""
if 400 <= response.status_code < 500:
http_error_msg = "{0} Client Error: {1}".format(response.status_code,
response.reason)
elif 500 <= response.status_code < 600:
http_error_msg = "{0} Server Error: {1}".format(response.status_code,
response.reason)
if http_error_msg:
try:
more_info = response.json().get("message")
except __HOLE__:
more_info = None
if more_info and more_info.lower() != response.reason.lower():
http_error_msg += ".\n\t{0}".format(more_info)
raise requests.exceptions.HTTPError(http_error_msg, response=response)
|
ValueError
|
dataset/ETHPy150Open xmunoz/sodapy/sodapy/__init__.py/_raise_for_status
|
5,918 |
def __exit__(self, *args):
for p in self.processes:
try:
p.wait()
except __HOLE__:
log.warning('%s failed', p)
|
OSError
|
dataset/ETHPy150Open romanz/amodem/amodem/alsa.py/Interface.__exit__
|
5,919 |
def supports_c_code(self, inputs):
"""
Returns True if the current op and reduce pattern has functioning C
code.
"""
# If we don't even have the right method, we certainly
# don't support the C code
# (This is the test that used to be implemented by
# local_gpu_sum)
pattern = (''.join(str(i) for i in self.reduce_mask))
if not hasattr(self, 'c_code_reduce_%s' % pattern):
return False
# Now that this is a general reduction op, we might
# have a method for a pattern, but that pattern
# might not be implemented for the current scalar op.
# To detect this more complicated situation, we
# make fake arguments to c_code, try to run them,
# and see if NotImplementedError gets raised.
node = self.make_node(*inputs)
name = 'fake_name'
inp = ['fake_input_name_%d' % i for i in xrange(len(inputs))]
out = ['fake_output_name_%d' % i for i in xrange(len(node.outputs))]
sub = {'fail': 'fake failure code'}
try:
self.c_code(node, name, inp, out, sub)
self.c_support_code_apply(node, name)
except __HOLE__:
return False
return True
|
NotImplementedError
|
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/sandbox/cuda/basic_ops.py/GpuCAReduce.supports_c_code
|
5,920 |
def truthiness(s):
"""Returns a boolean from a string"""
try:
return str(s).lower() in ['true', 't', '1']
except (__HOLE__, ValueError, UnicodeEncodeError):
return False
|
TypeError
|
dataset/ETHPy150Open rehandalal/buchner/buchner/helpers.py/truthiness
|
5,921 |
def get_success_url(self):
"""
Returns the supplied URL.
"""
if self.success_url:
url = self.success_url % self.object.__dict__
else:
try:
url = self.object.get_absolute_url()
except __HOLE__:
raise ImproperlyConfigured(
"No URL to redirect to. Either provide a url or define"
" a get_absolute_url method on the Model.")
return url
|
AttributeError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/views/generic/edit.py/ModelFormMixin.get_success_url
|
5,922 |
def evalfun(self, inputx):
"""
This is like self.f.evalfun(), i.e. evaluating the benchmark
function, but without incrementing usage counters. This may be
used only for re-evaluating a function at a point we already
obtained but simply cannot conveniently retrieve right now;
i.e. a value returned from the method black-box.
"""
if self.f._is_rowformat:
x = np.asarray(inputx)
else:
x = np.transpose(inputx)
out = self.f._fun_evalfull(x)
try:
return out[0]
except __HOLE__:
return out
|
TypeError
|
dataset/ETHPy150Open pasky/cocopf/experiment.py/FInstance.evalfun
|
5,923 |
def KeyboardListener(event):
global choiceboxChoices, choiceboxWidget
key = event.keysym
if len(key) <= 1:
if key in string.printable:
# Find the key in the list.
# before we clear the list, remember the selected member
try:
start_n = int(choiceboxWidget.curselection()[0])
except __HOLE__:
start_n = -1
# clear the selection.
choiceboxWidget.selection_clear(0, 'end')
# start from previous selection +1
for n in range(start_n + 1, len(choiceboxChoices)):
item = choiceboxChoices[n]
if item[0].lower() == key.lower():
choiceboxWidget.selection_set(first=n)
choiceboxWidget.see(n)
return
else:
# has not found it so loop from top
for n, item in enumerate(choiceboxChoices):
if item[0].lower() == key.lower():
choiceboxWidget.selection_set(first=n)
choiceboxWidget.see(n)
return
# nothing matched -- we'll look for the next logical choice
for n, item in enumerate(choiceboxChoices):
if item[0].lower() > key.lower():
if n > 0:
choiceboxWidget.selection_set(first=(n - 1))
else:
choiceboxWidget.selection_set(first=0)
choiceboxWidget.see(n)
return
# still no match (nothing was greater than the key)
# we set the selection to the first item in the list
lastIndex = len(choiceboxChoices) - 1
choiceboxWidget.selection_set(first=lastIndex)
choiceboxWidget.see(lastIndex)
return
# -------------------------------------------------------------------
# diropenbox
# -------------------------------------------------------------------
|
IndexError
|
dataset/ETHPy150Open datalyze-solutions/pandas-qt/pandasqt/ui/fallback/easygui/boxes/base_boxes.py/KeyboardListener
|
5,924 |
def _init_pathinfo():
"""Return a set containing all existing directory entries from sys.path"""
d = set()
for dir in sys.path:
try:
if os.path.isdir(dir):
dir, dircase = makepath(dir)
d.add(dircase)
except __HOLE__:
continue
return d
|
TypeError
|
dataset/ETHPy150Open babble/babble/include/jython/Lib/site.py/_init_pathinfo
|
5,925 |
def addpackage(sitedir, name, known_paths):
"""Add a new path to known_paths by combining sitedir and 'name' or execute
sitedir if it starts with 'import'"""
if known_paths is None:
_init_pathinfo()
reset = 1
else:
reset = 0
fullname = os.path.join(sitedir, name)
try:
f = open(fullname, "rU")
except __HOLE__:
return
try:
for line in f:
if line.startswith("#"):
continue
if line.startswith("import"):
exec line
continue
line = line.rstrip()
dir, dircase = makepath(sitedir, line)
if not dircase in known_paths and os.path.exists(dir):
sys.path.append(dir)
known_paths.add(dircase)
finally:
f.close()
if reset:
known_paths = None
return known_paths
|
IOError
|
dataset/ETHPy150Open babble/babble/include/jython/Lib/site.py/addpackage
|
5,926 |
def __setup(self):
if self.__lines:
return
data = None
for dir in self.__dirs:
for filename in self.__files:
filename = os.path.join(dir, filename)
try:
fp = file(filename, "rU")
data = fp.read()
fp.close()
break
except __HOLE__:
pass
if data:
break
if not data:
data = self.__data
self.__lines = data.split('\n')
self.__linecnt = len(self.__lines)
|
IOError
|
dataset/ETHPy150Open babble/babble/include/jython/Lib/site.py/_Printer.__setup
|
5,927 |
def __call__(self):
self.__setup()
prompt = 'Hit Return for more, or q (and Return) to quit: '
lineno = 0
while 1:
try:
for i in range(lineno, lineno + self.MAXLINES):
print self.__lines[i]
except __HOLE__:
break
else:
lineno += self.MAXLINES
key = None
while key is None:
key = raw_input(prompt)
if key not in ('', 'q'):
key = None
if key == 'q':
break
|
IndexError
|
dataset/ETHPy150Open babble/babble/include/jython/Lib/site.py/_Printer.__call__
|
5,928 |
def execsitecustomize():
"""Run custom site specific code, if available."""
try:
import sitecustomize
except __HOLE__:
pass
|
ImportError
|
dataset/ETHPy150Open babble/babble/include/jython/Lib/site.py/execsitecustomize
|
5,929 |
@synchronized('daemon-client-lock')
def _get_client(cls, rootwrap_config):
try:
return cls._clients[rootwrap_config]
except __HOLE__:
from oslo_rootwrap import client
new_client = client.Client([
"sudo", "nova-rootwrap-daemon", rootwrap_config])
cls._clients[rootwrap_config] = new_client
return new_client
|
KeyError
|
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/utils.py/RootwrapDaemonHelper._get_client
|
5,930 |
def parse_server_string(server_str):
"""Parses the given server_string and returns a tuple of host and port.
If it's not a combination of host part and port, the port element
is an empty string. If the input is invalid expression, return a tuple of
two empty strings.
"""
try:
# First of all, exclude pure IPv6 address (w/o port).
if netaddr.valid_ipv6(server_str):
return (server_str, '')
# Next, check if this is IPv6 address with a port number combination.
if server_str.find("]:") != -1:
(address, port) = server_str.replace('[', '', 1).split(']:')
return (address, port)
# Third, check if this is a combination of an address and a port
if server_str.find(':') == -1:
return (server_str, '')
# This must be a combination of an address and a port
(address, port) = server_str.split(':')
return (address, port)
except (__HOLE__, netaddr.AddrFormatError):
LOG.error(_LE('Invalid server_string: %s'), server_str)
return ('', '')
|
ValueError
|
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/utils.py/parse_server_string
|
5,931 |
def is_valid_ipv6_cidr(address):
try:
netaddr.IPNetwork(address, version=6).cidr
return True
except (__HOLE__, netaddr.AddrFormatError):
return False
|
TypeError
|
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/utils.py/is_valid_ipv6_cidr
|
5,932 |
def safe_ip_format(ip):
"""Transform ip string to "safe" format.
Will return ipv4 addresses unchanged, but will nest ipv6 addresses
inside square brackets.
"""
try:
if netaddr.IPAddress(ip).version == 6:
return '[%s]' % ip
except (__HOLE__, netaddr.AddrFormatError): # hostname
pass
# it's IPv4 or hostname
return ip
|
TypeError
|
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/utils.py/safe_ip_format
|
5,933 |
@contextlib.contextmanager
def tempdir(**kwargs):
argdict = kwargs.copy()
if 'dir' not in argdict:
argdict['dir'] = CONF.tempdir
tmpdir = tempfile.mkdtemp(**argdict)
try:
yield tmpdir
finally:
try:
shutil.rmtree(tmpdir)
except __HOLE__ as e:
LOG.error(_LE('Could not remove tmpdir: %s'), e)
|
OSError
|
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/utils.py/tempdir
|
5,934 |
def last_bytes(file_like_object, num):
"""Return num bytes from the end of the file, and remaining byte count.
:param file_like_object: The file to read
:param num: The number of bytes to return
:returns (data, remaining)
"""
try:
file_like_object.seek(-num, os.SEEK_END)
except __HOLE__ as e:
# seek() fails with EINVAL when trying to go before the start of the
# file. It means that num is larger than the file size, so just
# go to the start.
if e.errno == errno.EINVAL:
file_like_object.seek(0, os.SEEK_SET)
else:
raise
remaining = file_like_object.tell()
return (file_like_object.read(), remaining)
|
IOError
|
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/utils.py/last_bytes
|
5,935 |
def validate_integer(value, name, min_value=None, max_value=None):
"""Make sure that value is a valid integer, potentially within range."""
try:
value = int(str(value))
except (__HOLE__, UnicodeEncodeError):
msg = _('%(value_name)s must be an integer')
raise exception.InvalidInput(reason=(
msg % {'value_name': name}))
if min_value is not None:
if value < min_value:
msg = _('%(value_name)s must be >= %(min_value)d')
raise exception.InvalidInput(
reason=(msg % {'value_name': name,
'min_value': min_value}))
if max_value is not None:
if value > max_value:
msg = _('%(value_name)s must be <= %(max_value)d')
raise exception.InvalidInput(
reason=(
msg % {'value_name': name,
'max_value': max_value})
)
return value
|
ValueError
|
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/utils.py/validate_integer
|
5,936 |
def safe_truncate(value, length):
"""Safely truncates unicode strings such that their encoded length is
no greater than the length provided.
"""
b_value = encodeutils.safe_encode(value)[:length]
# NOTE(chaochin) UTF-8 character byte size varies from 1 to 6. If
# truncating a long byte string to 255, the last character may be
# cut in the middle, so that UnicodeDecodeError will occur when
# converting it back to unicode.
decode_ok = False
while not decode_ok:
try:
u_value = encodeutils.safe_decode(b_value)
decode_ok = True
except __HOLE__:
b_value = b_value[:-1]
return u_value
|
UnicodeDecodeError
|
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/utils.py/safe_truncate
|
5,937 |
def _validate_category(self, category):
try:
if category.type_id != self.parsed_obj["category"]:
raise exception.OCCISchemaMismatch(
expected=category.type_id,
found=self.parsed_obj["category"]
)
except __HOLE__:
raise exception.OCCIMissingType(
type_id=category.type_id)
|
KeyError
|
dataset/ETHPy150Open openstack/ooi/ooi/occi/validator.py/Validator._validate_category
|
5,938 |
def _compare_schemes(self, expected_type, actual):
actual_scheme, actual_term = helpers.decompose_type(actual)
if expected_type.scheme != actual_scheme:
return False
try:
if expected_type.term != actual_term:
return False
except __HOLE__:
# ignore the fact the type does not have a term
pass
return True
|
AttributeError
|
dataset/ETHPy150Open openstack/ooi/ooi/occi/validator.py/Validator._compare_schemes
|
5,939 |
def _validate_optional_links(self, expected, links):
for uri, l in links.items():
try:
rel = l['rel']
except __HOLE__:
raise exception.OCCIMissingType(type_id=uri)
for ex in expected:
if rel == ex.type_id:
break
else:
expected_types = ', '.join([e.type_id for e in expected])
raise exception.OCCISchemaMismatch(expected=expected_types,
found=l['rel'])
|
KeyError
|
dataset/ETHPy150Open openstack/ooi/ooi/occi/validator.py/Validator._validate_optional_links
|
5,940 |
def isNumber(num):
"input is a number"
try:
cnum = complex(num)
return True
except __HOLE__:
return False
|
ValueError
|
dataset/ETHPy150Open xraypy/xraylarch/lib/utils/strutils.py/isNumber
|
5,941 |
def request(url, method = "get", data = False, limit = config.max_api_pages):
status = False
req = urllib2.Request(url)
if data:
req = urllib2.Request(url, json.dumps(data))
try:
req.get_method = lambda: method.upper()
res = urllib2.urlopen(req)
status = True
res_data = json.loads(res.read())
res_headers = res.info()
except urllib2.URLError as e:
res = e
except urllib2.HTTPError as e:
res = e
except __HOLE__ as e:
return res, status
except any as e:
res = e
if not status:
return res, status
# decide whether to return now or continue recursing
if limit == 0 or not "link" in res_headers.keys():
return res_data, status
next_url_match = re.match(r"<(?P<next_url>[^>]+)>; rel=\"next\"", res_headers['Link'])
# if there isn't another url, return current response data
if not next_url_match:
return res_data, status
# get the next urls data
next_url = next_url_match.group("next_url")
next_data, status = request(next_url, method, data, int(limit) - 1)
# api call failed. Return whatever was returned - assume entire api call failed
if not status:
return next_data, status
# successfully return everything
return res_data + next_data, status
|
ValueError
|
dataset/ETHPy150Open jonmorehouse/vimhub/lib/github.py/request
|
5,942 |
def _to_seconds(var):
sec = 0
MINUTE = 60
HOUR = 60 * MINUTE
DAY = 24 * HOUR
WEEK = 7 * DAY
MONTH = 31 * DAY
try:
for key, value in var.items():
if key in ('second', 'seconds'):
sec += value
elif key in ('minute', 'minutes'):
sec += value * MINUTE
elif key in ('hour', 'hours'):
sec += value * HOUR
elif key in ('day', 'days'):
sec += value * DAY
elif key in ('week', 'weeks'):
sec += value * WEEK
elif key in ('month', 'months'):
sec += value * MONTH
else:
raise ValueError("Unknown time unit '%s'" % key)
return sec
except __HOLE__:
return var
|
AttributeError
|
dataset/ETHPy150Open ronnix/fabtools/fabtools/require/deb.py/_to_seconds
|
5,943 |
def first(self):
try:
return self.get_queryset()[0]
except __HOLE__:
pass
|
IndexError
|
dataset/ETHPy150Open mirumee/saleor/saleor/product/models/images.py/ImageManager.first
|
5,944 |
def lazy_load_library_exists(self):
"""check if libvirt is available."""
# try to connect libvirt. if fail, skip test.
try:
import libvirt
import libxml2
except __HOLE__:
return False
global libvirt
libvirt = __import__('libvirt')
connection.libvirt = __import__('libvirt')
connection.libxml2 = __import__('libxml2')
return True
|
ImportError
|
dataset/ETHPy150Open nii-cloud/dodai-compute/nova/tests/test_libvirt.py/LibvirtConnTestCase.lazy_load_library_exists
|
5,945 |
def lazy_load_library_exists(self):
"""check if libvirt is available."""
# try to connect libvirt. if fail, skip test.
try:
import libvirt
import libxml2
except __HOLE__:
return False
global libvirt
libvirt = __import__('libvirt')
connection.libvirt = __import__('libvirt')
connection.libxml2 = __import__('libxml2')
return True
|
ImportError
|
dataset/ETHPy150Open nii-cloud/dodai-compute/nova/tests/test_libvirt.py/IptablesFirewallTestCase.lazy_load_library_exists
|
5,946 |
def __eq__(self, other):
ident = self.isidentical(other)
if ident is True:
return ident
try:
return self._eq(other)
except __HOLE__:
# e.g., we can't compare whole tables to other things (yet?)
pass
return False
|
AttributeError
|
dataset/ETHPy150Open blaze/blaze/blaze/expr/core.py/Node.__eq__
|
5,947 |
def get_callable_name(o):
"""Welcome to str inception. Leave your kittens at home.
"""
# special case partial objects
if isinstance(o, partial):
keywords = o.keywords
kwds = (
', '.join('%s=%r' % item for item in keywords.items())
if keywords else
''
)
args = ', '.join(map(repr, o.args))
arguments = []
if args:
arguments.append(args)
if kwds:
arguments.append(kwds)
return 'partial(%s, %s)' % (
get_callable_name(o.func),
', '.join(arguments),
)
try:
# python 3 makes builtins look nice
return o.__qualname__
except __HOLE__:
try:
# show the module of the object, if we can
return '%s.%s' % (inspect.getmodule(o).__name__, o.__name__)
except AttributeError:
try:
# __self__ tells us the class the method is bound to
return '%s.%s' % (o.__self__.__name__, o.__name__)
except AttributeError:
# exhausted all avenues of printing callables so just print the
# name of the object
return o.__name__
|
AttributeError
|
dataset/ETHPy150Open blaze/blaze/blaze/expr/core.py/get_callable_name
|
5,948 |
def subs(o, d):
""" Substitute values within data structure
>>> subs(1, {1: 2})
2
>>> subs([1, 2, 3], {2: 'Hello'})
[1, 'Hello', 3]
"""
d = dict((k, v) for k, v in d.items() if k is not v)
if not d:
return o
try:
if o in d:
d = d.copy()
o = d.pop(o)
except __HOLE__:
pass
return _subs(o, d)
|
TypeError
|
dataset/ETHPy150Open blaze/blaze/blaze/expr/core.py/subs
|
5,949 |
def delete_dir(self):
self.path = self.TestConfig['rdf.store_conf']
try:
if self.TestConfig['rdf.source'] == "Sleepycat":
subprocess.call("rm -rf " + self.path, shell=True)
elif self.TestConfig['rdf.source'] == "ZODB":
delete_zodb_data_store(self.path)
except __HOLE__ as e:
if e.errno == 2:
# The file may not exist and that's fine
pass
else:
raise e
|
OSError
|
dataset/ETHPy150Open openworm/PyOpenWorm/tests/DataTestTemplate.py/_DataTest.delete_dir
|
5,950 |
def clean_coordinates(self):
coords = self.cleaned_data['coordinates'].strip()
if not coords:
return None
pieces = re.split('[ ,]+', coords)
if len(pieces) != 2:
raise forms.ValidationError('could not understand coordinates')
try:
lat = float(pieces[0])
lon = float(pieces[1])
except __HOLE__:
raise forms.ValidationError('could not understand coordinates')
return [lat, lon]
|
ValueError
|
dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/locations/forms.py/LocationForm.clean_coordinates
|
5,951 |
def win_find_exe(filename, installsubdir=None, env="ProgramFiles"):
"""Find executable in current dir, system path or given ProgramFiles subdir"""
for fn in [filename, filename+".exe"]:
try:
if installsubdir is None:
path = _where(fn)
else:
path = _where(fn, dirs=[os.path.join(os.environ[env], installsubdir)])
except __HOLE__:
path = filename
else:
break
return path
|
IOError
|
dataset/ETHPy150Open phaethon/scapy/scapy/arch/windows/__init__.py/win_find_exe
|
5,952 |
def update(self, data):
"""Update info about network interface according to given dnet dictionary"""
self.name = data["name"]
self.description = data['description']
self.win_index = data['win_index']
# Other attributes are optional
if conf.use_winpcapy:
self._update_pcapdata()
try:
self.ip = socket.inet_ntoa(get_if_raw_addr(data['guid']))
except (KeyError, __HOLE__, NameError):
pass
try:
self.mac = data['mac']
except KeyError:
pass
|
AttributeError
|
dataset/ETHPy150Open phaethon/scapy/scapy/arch/windows/__init__.py/NetworkInterface.update
|
5,953 |
def load_from_powershell(self):
for i in get_windows_if_list():
try:
interface = NetworkInterface(i)
self.data[interface.name] = interface
except (__HOLE__, PcapNameNotFoundError):
pass
if len(self.data) == 0:
log_loading.warning("No match between your pcap and windows network interfaces found. "
"You probably won't be able to send packets. "
"Deactivating unneeded interfaces and restarting Scapy might help."
"Check your winpcap and powershell installation, and access rights.")
|
KeyError
|
dataset/ETHPy150Open phaethon/scapy/scapy/arch/windows/__init__.py/NetworkInterfaceDict.load_from_powershell
|
5,954 |
def pcap_name(self, devname):
"""Return pcap device name for given Windows device name."""
try:
pcap_name = self.data[devname].pcap_name
except __HOLE__:
raise ValueError("Unknown network interface %r" % devname)
else:
return pcap_name
|
KeyError
|
dataset/ETHPy150Open phaethon/scapy/scapy/arch/windows/__init__.py/NetworkInterfaceDict.pcap_name
|
5,955 |
def pcap_name(devname):
"""Return pypcap device name for given libdnet/Scapy device name"""
try:
pcap_name = ifaces.pcap_name(devname)
except __HOLE__:
# pcap.pcap() will choose a sensible default for sniffing if iface=None
pcap_name = None
return pcap_name
|
ValueError
|
dataset/ETHPy150Open phaethon/scapy/scapy/arch/windows/__init__.py/pcap_name
|
5,956 |
def sndrcv(pks, pkt, timeout = 2, inter = 0, verbose=None, chainCC=0, retry=0, multi=0):
if not isinstance(pkt, Gen):
pkt = SetGen(pkt)
if verbose is None:
verbose = conf.verb
debug.recv = plist.PacketList([],"Unanswered")
debug.sent = plist.PacketList([],"Sent")
debug.match = plist.SndRcvList([])
nbrecv=0
ans = []
# do it here to fix random fields, so that parent and child have the same
all_stimuli = tobesent = [p for p in pkt]
notans = len(tobesent)
hsent={}
for i in tobesent:
h = i.hashret()
if h in hsent:
hsent[h].append(i)
else:
hsent[h] = [i]
if retry < 0:
retry = -retry
autostop=retry
else:
autostop=0
while retry >= 0:
found=0
if timeout < 0:
timeout = None
pid=1
try:
if WINDOWS or pid == 0:
try:
try:
i = 0
if verbose:
print("Begin emission:")
for p in tobesent:
pks.send(p)
i += 1
time.sleep(inter)
if verbose:
print("Finished to send %i packets." % i)
except SystemExit:
pass
except KeyboardInterrupt:
pass
except:
log_runtime.exception("--- Error sending packets")
log_runtime.info("--- Error sending packets")
finally:
try:
sent_times = [p.sent_time for p in all_stimuli if p.sent_time]
except:
pass
if WINDOWS or pid > 0:
# Timeout starts after last packet is sent (as in Unix version)
if timeout:
stoptime = time.time()+timeout
else:
stoptime = 0
remaintime = None
# inmask = [pks.ins.fd]
try:
try:
while 1:
if stoptime:
remaintime = stoptime-time.time()
if remaintime <= 0:
break
r = pks.recv(MTU)
if r is None:
continue
ok = 0
h = r.hashret()
if h in hsent:
hlst = hsent[h]
for i in range(len(hlst)):
if r.answers(hlst[i]):
ans.append((hlst[i],r))
if verbose > 1:
os.write(1, b"*")
ok = 1
if not multi:
del(hlst[i])
notans -= 1;
else:
if not hasattr(hlst[i], '_answered'):
notans -= 1;
hlst[i]._answered = 1;
break
if notans == 0 and not multi:
break
if not ok:
if verbose > 1:
os.write(1, b".")
nbrecv += 1
if conf.debug_match:
debug.recv.append(r)
except __HOLE__:
if chainCC:
raise
finally:
if WINDOWS:
for p,t in zip(all_stimuli, sent_times):
p.sent_time = t
finally:
pass
# remain = reduce(list.__add__, hsent.values(), [])
remain = list(itertools.chain(*[ i for i in hsent.values() ]))
if multi:
#remain = filter(lambda p: not hasattr(p, '_answered'), remain);
remain = [ p for p in remain if not hasattr(p, '_answered')]
if autostop and len(remain) > 0 and len(remain) != len(tobesent):
retry = autostop
tobesent = remain
if len(tobesent) == 0:
break
retry -= 1
if conf.debug_match:
debug.sent=plist.PacketList(remain[:],"Sent")
debug.match=plist.SndRcvList(ans[:])
#clean the ans list to delete the field _answered
if (multi):
for s,r in ans:
if hasattr(s, '_answered'):
del(s._answered)
if verbose:
print("\nReceived %i packets, got %i answers, remaining %i packets" % (nbrecv+len(ans), len(ans), notans))
return plist.SndRcvList(ans),plist.PacketList(remain,"Unanswered")
|
KeyboardInterrupt
|
dataset/ETHPy150Open phaethon/scapy/scapy/arch/windows/__init__.py/sndrcv
|
5,957 |
def sniff(count=0, store=1, offline=None, prn = None, lfilter=None, L2socket=None, timeout=None, *arg, **karg):
"""Sniff packets
sniff([count=0,] [prn=None,] [store=1,] [offline=None,] [lfilter=None,] + L2ListenSocket args) -> list of packets
Select interface to sniff by setting conf.iface. Use show_interfaces() to see interface names.
count: number of packets to capture. 0 means infinity
store: wether to store sniffed packets or discard them
prn: function to apply to each packet. If something is returned,
it is displayed. Ex:
ex: prn = lambda x: x.summary()
lfilter: python function applied to each packet to determine
if further action may be done
ex: lfilter = lambda x: x.haslayer(Padding)
offline: pcap file to read packets from, instead of sniffing them
timeout: stop sniffing after a given time (default: None)
L2socket: use the provided L2socket
"""
c = 0
if offline is None:
log_runtime.info('Sniffing on %s' % conf.iface)
if L2socket is None:
L2socket = conf.L2listen
s = L2socket(type=ETH_P_ALL, *arg, **karg)
else:
s = PcapReader(offline)
lst = []
if timeout is not None:
stoptime = time.time()+timeout
remain = None
while 1:
try:
if timeout is not None:
remain = stoptime-time.time()
if remain <= 0:
break
try:
p = s.recv(MTU)
except PcapTimeoutElapsed:
continue
if p is None:
break
if lfilter and not lfilter(p):
continue
if store:
lst.append(p)
c += 1
if prn:
r = prn(p)
if r is not None:
print(r)
if count > 0 and c >= count:
break
except __HOLE__:
break
s.close()
return plist.PacketList(lst,"Sniffed")
|
KeyboardInterrupt
|
dataset/ETHPy150Open phaethon/scapy/scapy/arch/windows/__init__.py/sniff
|
5,958 |
def _request(method, url, content_type=None, _data=None):
'''
Makes a HTTP request. Returns the JSON parse, or an obj with an error.
'''
opener = _build_opener(_HTTPHandler)
request = _Request(url, data=_data)
if content_type:
request.add_header('Content-Type', content_type)
request.get_method = lambda: method
try:
handler = opener.open(request)
except __HOLE__ as exc:
return {'error': '{0}'.format(exc)}
return json.loads(handler.read())
|
HTTPError
|
dataset/ETHPy150Open saltstack/salt/salt/returners/couchdb_return.py/_request
|
5,959 |
def getargspec(obj):
"""
Get the names and default values of a callable's
arguments
A tuple of four things is returned: (args, varargs,
varkw, defaults).
- args is a list of the argument names (it may
contain nested lists).
- varargs and varkw are the names of the * and
** arguments or None.
- defaults is a tuple of default argument values
or None if there are no default arguments; if
this tuple has n elements, they correspond to
the last n elements listed in args.
Unlike inspect.getargspec(), can return argument
specification for functions, methods, callable
objects, and classes. Does not support builtin
functions or methods.
"""
if not callable(obj):
raise TypeError("%s is not callable" % type(obj))
try:
if inspect.isfunction(obj):
return inspect.getargspec(obj)
elif hasattr(obj, FUNC_OBJ_ATTR):
# For methods or classmethods drop the first
# argument from the returned list because
# python supplies that automatically for us.
# Note that this differs from what
# inspect.getargspec() returns for methods.
# NB: We use im_func so we work with
# instancemethod objects also.
spec = inspect.getargspec(getattr(obj, FUNC_OBJ_ATTR))
return inspect.ArgSpec(spec.args[:1], spec.varargs, spec.keywords, spec.defaults)
elif inspect.isclass(obj):
return getargspec(obj.__init__)
elif isinstance(obj, object):
# We already know the instance is callable,
# so it must have a __call__ method defined.
# Return the arguments it expects.
return getargspec(obj.__call__)
except __HOLE__:
# If a nested call to our own getargspec()
# raises NotImplementedError, re-raise the
# exception with the real object type to make
# the error message more meaningful (the caller
# only knows what they passed us; they shouldn't
# care what aspect(s) of that object we actually
# examined).
pass
raise NotImplementedError("do not know how to get argument list for %s" % type(obj))
|
NotImplementedError
|
dataset/ETHPy150Open koenbok/Cactus/cactus/utils/internal.py/getargspec
|
5,960 |
def close(self):
try:
self._context._tags[self.tagname].remove(self)
except __HOLE__:
pass
return self._markup(self._close())
|
ValueError
|
dataset/ETHPy150Open jek/flatland/flatland/out/markup.py/Tag.close
|
5,961 |
def _attribute_sort_key(item):
try:
return (0, _static_attribute_order.index(item[0]))
except __HOLE__:
return (1, item[0])
|
ValueError
|
dataset/ETHPy150Open jek/flatland/flatland/out/markup.py/_attribute_sort_key
|
5,962 |
def _process_alive(pid):
if exists("/proc"):
return exists("/proc/%d" % pid)
else:
try:
os.kill(int(pid), 0)
return True
except __HOLE__, err:
return err.errno == errno.EPERM
|
OSError
|
dataset/ETHPy150Open tmm1/graphite/carbon/lib/carbon/conf.py/_process_alive
|
5,963 |
def handleAction(self):
"""Handle extra argument for backwards-compatibility.
* C{start} will simply do minimal pid checking and otherwise let twistd
take over.
* C{stop} will kill an existing running process if it matches the
C{pidfile} contents.
* C{status} will simply report if the process is up or not.
"""
action = self["action"]
pidfile = self.parent["pidfile"]
program = settings["program"]
instance = self["instance"]
if action == "stop":
if not exists(pidfile):
print "Pidfile %s does not exist" % pidfile
raise SystemExit(0)
pf = open(pidfile, 'r')
try:
pid = int(pf.read().strip())
pf.close()
except:
print "Could not read pidfile %s" % pidfile
raise SystemExit(1)
print "Sending kill signal to pid %d" % pid
try:
os.kill(pid, 15)
except __HOLE__, e:
if e.errno == errno.ESRCH:
print "No process with pid %d running" % pid
else:
raise
raise SystemExit(0)
elif action == "status":
if not exists(pidfile):
print "%s (instance %s) is not running" % (program, instance)
raise SystemExit(1)
pf = open(pidfile, "r")
try:
pid = int(pf.read().strip())
pf.close()
except:
print "Failed to read pid from %s" % pidfile
raise SystemExit(1)
if _process_alive(pid):
print ("%s (instance %s) is running with pid %d" %
(program, instance, pid))
raise SystemExit(0)
else:
print "%s (instance %s) is not running" % (program, instance)
raise SystemExit(1)
elif action == "start":
if exists(pidfile):
pf = open(pidfile, 'r')
try:
pid = int(pf.read().strip())
pf.close()
except:
print "Could not read pidfile %s" % pidfile
raise SystemExit(1)
if _process_alive(pid):
print ("%s (instance %s) is already running with pid %d" %
(program, instance, pid))
raise SystemExit(1)
else:
print "Removing stale pidfile %s" % pidfile
try:
os.unlink(pidfile)
except:
print "Could not remove pidfile %s" % pidfile
print "Starting %s (instance %s)" % (program, instance)
else:
print "Invalid action '%s'" % action
print "Valid actions: start stop status"
raise SystemExit(1)
|
OSError
|
dataset/ETHPy150Open tmm1/graphite/carbon/lib/carbon/conf.py/CarbonCacheOptions.handleAction
|
5,964 |
@webapi_check_local_site
@webapi_login_required
def get(self, request, *args, **kwargs):
"""Returns the location of the current draft reply.
If the draft reply exists, this will return :http:`302` with
a ``Location`` header pointing to the URL of the draft. Any
operations on the draft can be done at that URL.
If the draft reply does not exist, this will return a Does Not
Exist error.
"""
try:
resources.review_request.get_object(request, *args, **kwargs)
review = resources.review.get_object(request, *args, **kwargs)
reply = review.get_pending_reply(request.user)
except __HOLE__:
return DOES_NOT_EXIST
if not reply:
return DOES_NOT_EXIST
return 302, {}, {
'Location': self._build_redirect_with_args(
request,
resources.review_reply.get_href(reply, request, *args,
**kwargs)),
}
|
ObjectDoesNotExist
|
dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/webapi/resources/review_reply_draft.py/ReviewReplyDraftResource.get
|
5,965 |
def __init__(self, path, value, raw_value=None, timestamp=None, precision=0,
host=None, metric_type='COUNTER', ttl=None):
"""
Create new instance of the Metric class
Takes:
path=string: string the specifies the path of the metric
value=[float|int]: the value to be submitted
timestamp=[float|int]: the timestamp, in seconds since the epoch
(as from time.time()) precision=int: the precision to apply.
Generally the default (2) should work fine.
"""
# Validate the path, value and metric_type submitted
if (None in [path, value] or metric_type not in ('COUNTER', 'GAUGE')):
raise DiamondException(("Invalid parameter when creating new "
"Metric with path: %r value: %r "
"metric_type: %r")
% (path, value, metric_type))
# If no timestamp was passed in, set it to the current time
if timestamp is None:
timestamp = int(time.time())
else:
# If the timestamp isn't an int, then make it one
if not isinstance(timestamp, int):
try:
timestamp = int(timestamp)
except ValueError as e:
raise DiamondException(("Invalid timestamp when "
"creating new Metric %r: %s")
% (path, e))
# The value needs to be a float or an int. If it is, great. If not,
# try to cast it to one of those.
if not isinstance(value, (int, float)):
try:
if precision == 0:
value = round(float(value))
else:
value = float(value)
except __HOLE__ as e:
raise DiamondException(("Invalid value when creating new "
"Metric %r: %s") % (path, e))
self.path = path
self.value = value
self.raw_value = raw_value
self.timestamp = timestamp
self.precision = precision
self.host = host
self.metric_type = metric_type
self.ttl = ttl
|
ValueError
|
dataset/ETHPy150Open python-diamond/Diamond/src/diamond/metric.py/Metric.__init__
|
5,966 |
def _contains_bad_data(self, event):
"""
Check if the current event has any
incorrect or badly formatted data.
"""
# All events must have a me
if not self._check_media_id(event):
print 'XXXX Fail!'
return True
try:
# TODO: truncate floating to int
# Events should not have negative buffering_length
if int(event.get('x_buffering_length', 0)) < 0:
print 'Negative Buffering'
return True
except __HOLE__:
# buffering_length is a float (illegal)
print 'Buffering Length not an integer'
return True
return False
|
ValueError
|
dataset/ETHPy150Open pbs/agora-proc/agora/stats.py/PBSVideoStats._contains_bad_data
|
5,967 |
def _addEventMediaBufferingStart(self, event):
self.buffer_start_events += 1
if not self._valid_buffering_length:
return
if self.is_buffering:
# two MediaBufferingStart events in a row
# toss stream
self._invalidate_buffer_results()
return
self.is_buffering = True
try:
self._buffering_start_time = datetime.strptime(
event['event_date'], '%Y-%m-%d %H:%M:%S')
except (__HOLE__, TypeError):
# can't parse event_date, can't calculate buffer length
self._invalidate_buffer_results()
return
self._video_location_check = event.get('x_video_location')
|
ValueError
|
dataset/ETHPy150Open pbs/agora-proc/agora/stats.py/PBSVideoStats._addEventMediaBufferingStart
|
5,968 |
def _addEventMediaBufferingEnd(self, event):
if event.get('x_after_seek') == 'False':
# only count buffering when not seeking
return
self.buffering_events += 1
if event.get('x_auto'):
if event['x_auto'] == 'true':
self.auto_bitrate = True
# calculate buffering data
if not self._valid_buffering_length:
return
if not self.is_buffering:
# two MediaBufferingEnd events in a row
# toss stream
self._invalidate_buffer_results()
return
self.is_buffering = False
if event.get('x_video_location') != self._video_location_check:
# we scrubbed during buffering, disregard buffering data
self._invalidate_buffer_results()
return
# subtract MediaBufferingEnd timestamp from
# MediaBufferingStart timestamp
if not self.buffering_length:
self.buffering_length = 0
try:
media_buffering_end_time = datetime.strptime(
event['event_date'], '%Y-%m-%d %H:%M:%S')
except (__HOLE__, TypeError):
# can't parse event_date, can't calculate buffer length
self._invalidate_buffer_results()
return
if media_buffering_end_time < self._buffering_start_time:
# the MediaBufferingEnd event has a timestamp before its
# MediaBufferingStart event: bad data
self._invalidate_buffer_results()
return
buffer_delta = media_buffering_end_time - self._buffering_start_time
self.buffering_length += self._total_seconds(buffer_delta)
if event.get('x_video_location'):
loc = event['x_video_location']
self.buffering_positions.append(loc)
|
ValueError
|
dataset/ETHPy150Open pbs/agora-proc/agora/stats.py/PBSVideoStats._addEventMediaBufferingEnd
|
5,969 |
def is_equivalent(self, other, logger, tolerance=0.):
"""
Test if self and `other` are equivalent.
other: :class:`FlowSolution`
The flowfield to check against.
logger: :class:`Logger` or None
Used to log debug messages that will indicate what, if anything, is
not equivalent.
tolerance: float
The maximum relative difference in array values to be considered
equivalent.
"""
if not isinstance(other, FlowSolution):
logger.debug('other is not a FlowSolution object.')
return False
if other.grid_location != self.grid_location:
logger.debug('grid locations are not equal: %s vs. %s.',
other.grid_location, self.grid_location)
return False
if other.ghosts != self.ghosts:
logger.debug('flow ghost cell counts are not equal: %s vs. %s.',
other.ghosts, self.ghosts)
return False
for arr in self._arrays:
name = self.name_of_obj(arr)
try:
other_arr = getattr(other, name)
except __HOLE__:
logger.debug('other is missing array %r', name)
return False
if tolerance > 0.:
if not numpy.allclose(other_arr, arr, tolerance, tolerance):
logger.debug("%s values are not 'close'.", name)
return False
else:
if (other_arr != arr).any():
logger.debug('%s values are not equal.', name)
return False
for vector in self._vectors:
name = self.name_of_obj(vector)
try:
other_vector = getattr(other, name)
except AttributeError:
logger.debug('other is missing vector %r', name)
return False
if not vector.is_equivalent(other_vector, name, logger, tolerance):
return False
# TODO: check scalars
return True
|
AttributeError
|
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.lib/src/openmdao/lib/datatypes/domain/flow.py/FlowSolution.is_equivalent
|
5,970 |
def add_file(self, file):
try:
if os.environ["OS"].startswith("Windows"):
self.add_from_file( file ) #+ "\\builder.ui")
except __HOLE__ as e:
self.add_from_file( file ) #+ "/builder.ui")
|
KeyError
|
dataset/ETHPy150Open lionheart/TimeTracker-Linux/libs/UI.py/uiBuilder.add_file
|
5,971 |
def builder_build(self, *args, **kwargs):
widget_list_dict = kwargs.get('widget_list_dict', {})
def parse_widgets(file):
ids = re.compile("(?:id=\"([a-zA-Z0-9_]*?)\")+")
classes = re.compile("(?:class=\"([a-zA-Z0-9_]*?)\")+")
components = {}
current = ''
with open(file) as lines:
for line in lines.readlines():
for id in ids.findall(line):
#print 'r:',id
if id:
for klass in classes.findall(line):
#print 'l:',left
if klass in self.Gtk_Widget_List:
components[id] = []
current = id
if not klass in self.Gtk_Widget_List and current:
try:
components[current].append( id )
except __HOLE__:
print 'cb: ',current, 'r:',id, 'l:', klass
return components
file = kwargs.get('builder_file', './data/ui/builder.ui')
if isinstance(file, list):
for f in file:
widget_list_dict.update(parse_widgets(f))
self.add_file(f)
self.connect_widgets(self)
#print widget_list_dict
elif isinstance(file, str):
widget_list_dict = parse_widgets(file)
self.add_file(file)
self.connect_widgets(self)
if widget_list_dict:
self.get_widgets(widget_list_dict)
return True
else:
return False
|
KeyError
|
dataset/ETHPy150Open lionheart/TimeTracker-Linux/libs/UI.py/uiBuilder.builder_build
|
5,972 |
def _ensure_ascii(words):
try:
for i, word in enumerate(words):
word.decode('ascii')
except __HOLE__:
raise ValueError("Token %d (%r) is non-ASCII. BLLIP Parser "
"currently doesn't support non-ASCII inputs." %
(i, word))
|
UnicodeDecodeError
|
dataset/ETHPy150Open nltk/nltk/nltk/parse/bllip.py/_ensure_ascii
|
5,973 |
def demo():
"""This assumes the Python module bllipparser is installed."""
# download and install a basic unified parsing model (Wall Street Journal)
# sudo python -m nltk.downloader bllip_wsj_no_aux
from nltk.data import find
model_dir = find('models/bllip_wsj_no_aux').path
print('Loading BLLIP Parsing models...')
# the easiest way to get started is to use a unified model
bllip = BllipParser.from_unified_model_dir(model_dir)
print('Done.')
sentence1 = 'British left waffles on Falklands .'.split()
sentence2 = 'I saw the man with the telescope .'.split()
# this sentence is known to fail under the WSJ parsing model
fail1 = '# ! ? : -'.split()
for sentence in (sentence1, sentence2, fail1):
print('Sentence: %r' % ' '.join(sentence))
try:
tree = next(bllip.parse(sentence))
print(tree)
except __HOLE__:
print("(parse failed)")
# n-best parsing demo
for i, parse in enumerate(bllip.parse(sentence1)):
print('parse %d:\n%s' % (i, parse))
# using external POS tag constraints
print("forcing 'tree' to be 'NN':",
next(bllip.tagged_parse([('A', None), ('tree', 'NN')])))
print("forcing 'A' to be 'DT' and 'tree' to be 'NNP':",
next(bllip.tagged_parse([('A', 'DT'), ('tree', 'NNP')])))
# constraints don't have to make sense... (though on more complicated
# sentences, they may cause the parse to fail)
print("forcing 'A' to be 'NNP':",
next(bllip.tagged_parse([('A', 'NNP'), ('tree', None)])))
|
StopIteration
|
dataset/ETHPy150Open nltk/nltk/nltk/parse/bllip.py/demo
|
5,974 |
def setup_module(module):
from nose import SkipTest
try:
_ensure_bllip_import_or_error()
except __HOLE__:
raise SkipTest('doctests from nltk.parse.bllip are skipped because '
'the bllipparser module is not installed')
|
ImportError
|
dataset/ETHPy150Open nltk/nltk/nltk/parse/bllip.py/setup_module
|
5,975 |
def test_bad_monitoring_input_in_monitor_based_lr():
# tests that the class MonitorBasedLRAdjuster in sgd.py avoids wrong
# settings of channel_name or dataset_name in the constructor.
dim = 3
m = 10
rng = np.random.RandomState([6, 2, 2014])
X = rng.randn(m, dim)
learning_rate = 1e-2
batch_size = 5
# We need to include this so the test actually stops running at some point
epoch_num = 2
dataset = DenseDesignMatrix(X=X)
# including a monitoring datasets lets us test that
# the monitor works with supervised data
monitoring_dataset = DenseDesignMatrix(X=X)
cost = DummyCost()
model = SoftmaxModel(dim)
termination_criterion = EpochCounter(epoch_num)
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
monitoring_batches=2,
monitoring_dataset=monitoring_dataset,
termination_criterion=termination_criterion,
update_callbacks=None,
set_batch_size=False)
# testing for bad dataset_name input
dummy = 'void'
monitor_lr = MonitorBasedLRAdjuster(dataset_name=dummy)
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=[monitor_lr])
try:
train.main_loop()
except ValueError as e:
pass
except Exception:
reraise_as(AssertionError("MonitorBasedLRAdjuster takes dataset_name "
"that is invalid "))
# testing for bad channel_name input
monitor_lr2 = MonitorBasedLRAdjuster(channel_name=dummy)
model2 = SoftmaxModel(dim)
train2 = Train(dataset,
model2,
algorithm,
save_path=None,
save_freq=0,
extensions=[monitor_lr2])
try:
train2.main_loop()
except __HOLE__ as e:
pass
except Exception:
reraise_as(AssertionError("MonitorBasedLRAdjuster takes channel_name "
"that is invalid "))
return
|
ValueError
|
dataset/ETHPy150Open lisa-lab/pylearn2/pylearn2/training_algorithms/tests/test_sgd.py/test_bad_monitoring_input_in_monitor_based_lr
|
5,976 |
def testing_multiple_datasets_in_monitor_based_lr():
# tests that the class MonitorBasedLRAdjuster in sgd.py does not take
# multiple datasets in which multiple channels ending in '_objective'
# exist.
# This case happens when the user has not specified either channel_name or
# dataset_name in the constructor
dim = 3
m = 10
rng = np.random.RandomState([6, 2, 2014])
X = rng.randn(m, dim)
Y = rng.randn(m, dim)
learning_rate = 1e-2
batch_size = 5
# We need to include this so the test actually stops running at some point
epoch_num = 1
# including a monitoring datasets lets us test that
# the monitor works with supervised data
monitoring_train = DenseDesignMatrix(X=X)
monitoring_test = DenseDesignMatrix(X=Y)
cost = DummyCost()
model = SoftmaxModel(dim)
dataset = DenseDesignMatrix(X=X)
termination_criterion = EpochCounter(epoch_num)
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
monitoring_batches=2,
monitoring_dataset={'train': monitoring_train,
'test': monitoring_test},
termination_criterion=termination_criterion,
update_callbacks=None,
set_batch_size=False)
monitor_lr = MonitorBasedLRAdjuster()
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=[monitor_lr])
try:
train.main_loop()
except __HOLE__:
return
raise AssertionError("MonitorBasedLRAdjuster takes multiple dataset names "
"in which more than one \"objective\" channel exist "
"and the user has not specified either channel_name "
"or database_name in the constructor to "
"disambiguate.")
|
ValueError
|
dataset/ETHPy150Open lisa-lab/pylearn2/pylearn2/training_algorithms/tests/test_sgd.py/testing_multiple_datasets_in_monitor_based_lr
|
5,977 |
def test_reject_mon_batch_without_mon():
# tests that setting up the sgd algorithm
# without a monitoring dataset
# but with monitoring_batches specified is an error
dim = 3
m = 10
rng = np.random.RandomState([25, 9, 2012])
X = rng.randn(m, dim)
idx = rng.randint(0, dim, (m,))
Y = np.zeros((m, dim))
for i in xrange(m):
Y[i, idx[i]] = 1
dataset = DenseDesignMatrix(X=X, y=Y)
m = 15
X = rng.randn(m, dim)
idx = rng.randint(0, dim, (m, ))
Y = np.zeros((m, dim))
for i in xrange(m):
Y[i, idx[i]] = 1
model = SoftmaxModel(dim)
learning_rate = 1e-3
batch_size = 5
cost = SupervisedDummyCost()
try:
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
monitoring_batches=3,
monitoring_dataset=None,
update_callbacks=None,
set_batch_size=False)
except __HOLE__:
return
assert False
|
ValueError
|
dataset/ETHPy150Open lisa-lab/pylearn2/pylearn2/training_algorithms/tests/test_sgd.py/test_reject_mon_batch_without_mon
|
5,978 |
def test_determinism():
# Verifies that running SGD twice results in the same examples getting
# visited in the same order
for mode in _iteration_schemes:
dim = 1
batch_size = 3
num_batches = 5
m = num_batches * batch_size
dataset = ArangeDataset(m)
model = SoftmaxModel(dim)
learning_rate = 1e-3
batch_size = 5
visited = [[-1] * m]
def visit(X):
mx = max(visited[0])
counter = mx + 1
for i in X[:, 0]:
i = int(i)
assert visited[0][i] == -1
visited[0][i] = counter
counter += 1
data_specs = (model.get_input_space(), model.get_input_source())
cost = CallbackCost(visit, data_specs)
# We need to include this so the test actually stops running at some
# point
termination_criterion = EpochCounter(5)
def run_algorithm():
unsupported_modes = ['random_slice', 'random_uniform',
'even_sequences']
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
train_iteration_mode=mode,
monitoring_dataset=None,
termination_criterion=termination_criterion,
update_callbacks=None,
set_batch_size=False)
algorithm.setup(dataset=dataset, model=model)
raised = False
try:
algorithm.train(dataset)
except __HOLE__:
assert mode in unsupported_modes
raised = True
if mode in unsupported_modes:
assert raised
return True
return False
if run_algorithm():
continue
visited.insert(0, [-1] * m)
del model.monitor
run_algorithm()
for v in visited:
assert len(v) == m
for elem in range(m):
assert elem in v
assert len(visited) == 2
print(visited[0])
print(visited[1])
assert np.all(np.asarray(visited[0]) == np.asarray(visited[1]))
|
ValueError
|
dataset/ETHPy150Open lisa-lab/pylearn2/pylearn2/training_algorithms/tests/test_sgd.py/test_determinism
|
5,979 |
def test_uneven_batch_size():
"""
Testing extensively sgd parametrisations for datasets with a number of
examples not divisible by batch size
The tested settings are:
- Model with force_batch_size = True or False
- Training dataset with number of examples divisible or not by batch size
- Monitoring dataset with number of examples divisible or not by batch size
- Even or uneven iterators
2 tests out of 10 should raise ValueError
"""
learning_rate = 1e-3
batch_size = 5
dim = 3
m1, m2, m3 = 10, 15, 22
rng = np.random.RandomState([25, 9, 2012])
dataset1 = DenseDesignMatrix(X=rng.randn(m1, dim))
dataset2 = DenseDesignMatrix(X=rng.randn(m2, dim))
dataset3 = DenseDesignMatrix(X=rng.randn(m3, dim))
def train_with_monitoring_datasets(train_dataset,
monitoring_datasets,
model_force_batch_size,
train_iteration_mode,
monitor_iteration_mode):
model = SoftmaxModel(dim)
if model_force_batch_size:
model.force_batch_size = model_force_batch_size
cost = DummyCost()
algorithm = SGD(learning_rate, cost,
batch_size=batch_size,
train_iteration_mode=train_iteration_mode,
monitor_iteration_mode=monitor_iteration_mode,
monitoring_dataset=monitoring_datasets,
termination_criterion=EpochCounter(2))
train = Train(train_dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=None)
train.main_loop()
no_monitoring_datasets = None
even_monitoring_datasets = {'valid': dataset2}
uneven_monitoring_datasets = {'valid': dataset2, 'test': dataset3}
# without monitoring datasets
train_with_monitoring_datasets(
train_dataset=dataset1,
monitoring_datasets=no_monitoring_datasets,
model_force_batch_size=False,
train_iteration_mode='sequential',
monitor_iteration_mode='sequential')
train_with_monitoring_datasets(
train_dataset=dataset1,
monitoring_datasets=no_monitoring_datasets,
model_force_batch_size=batch_size,
train_iteration_mode='sequential',
monitor_iteration_mode='sequential')
# with uneven training datasets
train_with_monitoring_datasets(
train_dataset=dataset3,
monitoring_datasets=no_monitoring_datasets,
model_force_batch_size=False,
train_iteration_mode='sequential',
monitor_iteration_mode='sequential')
try:
train_with_monitoring_datasets(
train_dataset=dataset3,
monitoring_datasets=no_monitoring_datasets,
model_force_batch_size=batch_size,
train_iteration_mode='sequential',
monitor_iteration_mode='sequential')
assert False
except ValueError:
pass
train_with_monitoring_datasets(
train_dataset=dataset3,
monitoring_datasets=no_monitoring_datasets,
model_force_batch_size=batch_size,
train_iteration_mode='even_sequential',
monitor_iteration_mode='sequential')
# with even monitoring datasets
train_with_monitoring_datasets(
train_dataset=dataset1,
monitoring_datasets=even_monitoring_datasets,
model_force_batch_size=False,
train_iteration_mode='sequential',
monitor_iteration_mode='sequential')
train_with_monitoring_datasets(
train_dataset=dataset1,
monitoring_datasets=even_monitoring_datasets,
model_force_batch_size=batch_size,
train_iteration_mode='sequential',
monitor_iteration_mode='sequential')
# with uneven monitoring datasets
train_with_monitoring_datasets(
train_dataset=dataset1,
monitoring_datasets=uneven_monitoring_datasets,
model_force_batch_size=False,
train_iteration_mode='sequential',
monitor_iteration_mode='sequential')
try:
train_with_monitoring_datasets(
train_dataset=dataset1,
monitoring_datasets=uneven_monitoring_datasets,
model_force_batch_size=batch_size,
train_iteration_mode='sequential',
monitor_iteration_mode='sequential')
assert False
except __HOLE__:
pass
train_with_monitoring_datasets(
train_dataset=dataset1,
monitoring_datasets=uneven_monitoring_datasets,
model_force_batch_size=batch_size,
train_iteration_mode='sequential',
monitor_iteration_mode='even_sequential')
|
ValueError
|
dataset/ETHPy150Open lisa-lab/pylearn2/pylearn2/training_algorithms/tests/test_sgd.py/test_uneven_batch_size
|
5,980 |
def test_BasicHeader(self):
template = '''
#from Cheetah.Filters import Markdown
#transform Markdown
$foo
Header
======
'''
expected = '''<p>bar</p>
<h1>Header</h1>'''
try:
template = Cheetah.Template.Template(template, searchList=[{'foo' : 'bar'}])
template = str(template)
assert template == expected
except __HOLE__, ex:
print('>>> We probably failed to import markdown, bummer %s' % ex)
return
except Exception, ex:
if ex.__class__.__name__ == 'MarkdownException' and majorVer == 2 and minorVer < 5:
print('>>> NOTE: Support for the Markdown filter will be broken for you. Markdown says: %s' % ex)
return
raise
|
ImportError
|
dataset/ETHPy150Open binhex/moviegrabber/lib/site-packages/Cheetah/Tests/Filters.py/BasicMarkdownFilterTest.test_BasicHeader
|
5,981 |
def get_permittee_from_threadlocals(kw):
"""
Wrapper to get a permittee keyword from threadlocals and make sure it is
usable.
"""
# Just skip if perm checks are disabled
if not ExpedientPermission.objects.are_checks_enabled():
return None
d = threadlocals.get_thread_locals()
logger.debug("Got threadlocals %s" % d)
try:
permittee = d[kw]
except __HOLE__:
raise PermitteeNotInThreadLocals(kw)
if not permittee:
raise NonePermitteeException(kw)
return permittee
|
KeyError
|
dataset/ETHPy150Open fp7-ofelia/ocf/expedient/src/python/expedient/common/permissions/shortcuts.py/get_permittee_from_threadlocals
|
5,982 |
def contents(self):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
field, obj, model_admin = self.field['field'], self.form.instance, self.model_admin
try:
f, attr, value = lookup_field(field, obj, model_admin)
except (__HOLE__, ValueError, ObjectDoesNotExist):
result_repr = EMPTY_CHANGELIST_VALUE
else:
if f is None:
boolean = getattr(attr, "boolean", False)
if boolean:
result_repr = _boolean_icon(value)
else:
result_repr = smart_text(value)
if getattr(attr, "allow_tags", False):
result_repr = mark_safe(result_repr)
else:
if value is None:
result_repr = EMPTY_CHANGELIST_VALUE
#HERE WE NEED TO CHANGE THIS TEST
# elif isinstance(f.rel, ManyToManyRel):
# result_repr = ", ".join(map(unicode, value.all()))
else:
result_repr = display_for_field(value, f)
return conditional_escape(result_repr)
|
AttributeError
|
dataset/ETHPy150Open MongoEngine/django-mongoengine/django_mongoengine/mongo_admin/helpers.py/AdminReadonlyField.contents
|
5,983 |
def main(argparseOptions):
global c, rOpt, appnamePrefix, rClient
rOpt = argparseOptions
c = string_ops.Printer(rOpt.enableColor)
runningApps = []
timestamps = []
try:
with open(os.devnull, 'w') as devnull:
subprocess.check_call(['notify-send', '--version'], stdout=devnull)
except:
print(c.RED("Unable to launch notify-send command!",
"We cannot notify you of events without libnotify installed"))
sys.exit(2)
try:
with open(os.path.expanduser(rOpt.configFile)) as f:
cfg = yaml.safe_load(f)
except:
print(c.yellow(
"Note: unable to read configFile '{}'; using defaults"
.format(rOpt.configFile)))
nick = user = passwd = messg = events = None
else:
nick = cfg.get('nickname', None)
user = cfg.get('ravelloUser', None)
passwd = cfg.get('ravelloPass', None)
messg = cfg.get('unableToLoginAdditionalMsg', None)
events = cfg.get('eventsOfInterest', None)
if rOpt.kerberos:
appnamePrefix = 'k:' + rOpt.kerberos + '__'
elif nick:
appnamePrefix = 'k:' + nick + '__'
else:
appnamePrefix = ''
lackingCreds = False
if not rOpt.ravelloUser:
if user:
rOpt.ravelloUser = user
elif sys.stdout.isatty():
rOpt.ravelloUser = get_username(c.CYAN("Enter Ravello username: "))
else:
lackingCreds = True
if not rOpt.ravelloPass:
if passwd:
rOpt.ravelloPass = passwd
elif sys.stdout.isatty():
rOpt.ravelloPass = get_passphrase(c.CYAN("Enter Ravello passphrase: "))
else:
lackingCreds = True
if lackingCreds:
cmd = [
'notify-send', '--urgency', 'critical',
"rav-notify missing Ravello credentials!",
"You must either populate ~/.ravshello/ravshello.conf, run " +
"rav-notify with -u & -p options, or run rav-notify from a " +
"terminal so it can prompt you for user/pass.",
]
subprocess.check_call(cmd)
sys.exit(3)
rClient = ravello_sdk.RavelloClient()
try:
# Try to log in.
rClient.login(rOpt.ravelloUser, rOpt.ravelloPass)
except:
if sys.stdout.isatty():
print(c.RED("Logging in to Ravello failed!"))
print("\nRe-check your username and password.")
if messg: print(messg)
else:
cmd = [
'notify-send', '--urgency', 'critical',
"rav-notify failed to log in to Ravello!",
"Re-check your username and password.",
]
subprocess.check_call(cmd)
sys.exit(5)
cmd = [
'notify-send', '--urgency', 'low',
"rav-notify monitoring Ravello events",
"Any events of interest (app timeouts or deletions, vms being " +
"started or stopped) will trigger further notifications",
]
subprocess.check_call(cmd)
if events:
eventsOfInterest = events
else:
eventsOfInterest = [
'APP_TIMEOUT_AUTO_STOPPING',
'APP_TIMEOUT_AUTO_STOPPED',
'APPLICATION_TIMER_RESET',
'APPLICATION_DELETED',
'VM_STOPPED',
'VM_STARTED',
'VM_SNAPSHOTTING_AFTER_STOP',
'VM_FINISHED_SNAPSHOTTING',
]
debug("Event triggers:\n{}\n".format("\n".join(eventsOfInterest)))
urgency = {
'INFO': "low",
'WARN': "normal",
'ERROR': "critical",
}
# Build a list of app ids we should pay attention to.
myAppIds = update_myAppIds()
for appId in myAppIds:
app = rClient.get_application(appId, aspect='properties')
try:
# Grab expiration time for all of my deployed apps.
expirationTime = app['deployment']['expirationTime']
except:
continue
else:
a = {
'id': appId,
'name': app['name'].replace(appnamePrefix, ''),
'expirationTime': sanitize_timestamp(expirationTime),
}
runningApps.append(a)
# Run forever-loop to watch for notifications or expiring apps.
while 1:
# Run check to see if any apps are about to expire.
act_on_imminent_app_expiration(runningApps)
myEvents = []
# Set lower bound to 5 minutes ago, upper bound to right now.
# Unusual manipulation present because Ravello expects timestamps to
# include thousandths of a sec, but not as floating-point.
start = time.time() - (5*60 + rOpt.refreshInterval)
start = int("{:.3f}".format(start).replace('.', ''))
end = int("{:.3f}".format(time.time()).replace('.', ''))
query = {
'dateRange': {
'startTime': start,
'endTime': end,
},
}
try:
# Perform our search.
results = rClient.search_notifications(query)
except ravello_sdk.RavelloError as e:
if e.args[0] == 'request timeout':
# Timeout, so try one more time.
results = rClient.search_notifications(query)
try:
# Results are returned in reverse-chronological order.
for event in reversed(results['notification']):
try:
# Only deal with events we have not seen before that relate
# to one of myAppIds.
if (any(appId == event['appId'] for appId in myAppIds) and
event['eventTimeStamp'] not in timestamps):
myEvents.append(event)
except:
pass
except:
pass
# Iterate over events relevant to my apps.
for event in myEvents:
if any(etype in event['eventType'] for etype in eventsOfInterest):
# Get application data if event of interest.
try:
app = rClient.get_application(
event['appId'], aspect='properties')
except KeyError:
# Will fail if event is not about an app, i.e.: on user login.
continue
else:
continue
# Add unique timestamp for this event to our list, to prevent acting
# on it in a subsequent loop.
timestamps.append(event['eventTimeStamp'])
try:
appName = app['name'].replace(appnamePrefix, '')
except __HOLE__:
# Will fail if app was deleted.
appName = ''
if event['eventType'] == 'APPLICATION_TIMER_RESET':
try:
# Grab expiration time if app is deployed.
expirationTime = app['deployment']['expirationTime']
except:
# (app isn't deployed)
pass
else:
expirationTime = sanitize_timestamp(expirationTime)
for a in runningApps:
# Try to find the app by id in our existing list.
if a['id'] == app['id']:
# Update the app's expirationTime timestamp.
a['expirationTime'] = expirationTime
break
else:
# If the appId for the APPLICATION_TIMER_RESET event isn't
# present in our runningApps list, we need to add it.
a = {
'id': app['id'],
'name': appName,
'expirationTime': expirationTime,
}
runningApps.append(a)
else:
# Event type is anything but APPLICATION_TIMER_RESET.
tstamp = datetime.fromtimestamp(
sanitize_timestamp(timestamps[-1])
).strftime("%H:%M:%S")
if appName:
appName = " ({})".format(appName)
msg = event['eventProperties'][0]['value'].replace(appnamePrefix, '')
cmd = [
'notify-send',
'--urgency',
urgency[event['notificationLevel']],
"{}{}".format(event['eventType'], appName),
"[{}] {}".format(tstamp, msg),
]
subprocess.check_call(cmd)
if rOpt.enableDebug and sys.stdout.isatty():
i = rOpt.refreshInterval
while i >= 0:
print(c.REVERSE("{}".format(i)), end='')
sys.stdout.flush()
time.sleep(1)
print('\033[2K', end='')
i -= 1
print()
else:
time.sleep(rOpt.refreshInterval)
myAppIds = update_myAppIds(myAppIds)
|
TypeError
|
dataset/ETHPy150Open ryran/ravshello/rav-notify.py/main
|
5,984 |
def validate(self, document):
"""
Check input for Python syntax errors.
"""
# When the input starts with Ctrl-Z, always accept. This means EOF in a
# Python REPL.
if document.text.startswith('\x1a'):
return
try:
if self.get_compiler_flags:
flags = self.get_compiler_flags()
else:
flags = 0
compile(document.text, '<input>', 'exec', flags=flags, dont_inherit=True)
except SyntaxError as e:
# Note, the 'or 1' for offset is required because Python 2.7
# gives `None` as offset in case of '4=4' as input. (Looks like
# fixed in Python 3.)
index = document.translate_row_col_to_index(e.lineno - 1, (e.offset or 1) - 1)
raise ValidationError(index, 'Syntax Error')
except __HOLE__ as e:
# e.g. "compile() expected string without null bytes"
raise ValidationError(0, str(e))
|
TypeError
|
dataset/ETHPy150Open jonathanslenders/ptpython/ptpython/validator.py/PythonValidator.validate
|
5,985 |
@admin_required(reviewers=True)
def langpacks(request):
if request.method == 'POST':
try:
tasks.fetch_langpacks.delay(request.POST['path'])
except __HOLE__:
messages.error(request, 'Invalid language pack sub-path provided.')
return redirect('zadmin.langpacks')
addons = (Addon.objects.no_cache()
.filter(addonuser__user__email=settings.LANGPACK_OWNER_EMAIL,
type=amo.ADDON_LPAPP)
.order_by('name'))
data = {'addons': addons, 'base_url': settings.LANGPACK_DOWNLOAD_BASE,
'default_path': settings.LANGPACK_PATH_DEFAULT % (
'firefox', amo.FIREFOX.latest_version)}
return render(request, 'zadmin/langpack_update.html', data)
|
ValueError
|
dataset/ETHPy150Open mozilla/addons-server/src/olympia/zadmin/views.py/langpacks
|
5,986 |
@login_required
@json_view
def es_collections_json(request):
app = request.GET.get('app', '')
q = request.GET.get('q', '')
qs = Collection.search()
try:
qs = qs.query(id__startswith=int(q))
except ValueError:
qs = qs.query(name__match=q)
try:
qs = qs.filter(app=int(app))
except __HOLE__:
pass
data = []
for c in qs[:7]:
data.append({'id': c.id,
'name': unicode(c.name),
'all_personas': c.all_personas,
'url': c.get_url_path()})
return data
|
ValueError
|
dataset/ETHPy150Open mozilla/addons-server/src/olympia/zadmin/views.py/es_collections_json
|
5,987 |
@admin_required
@post_required
def featured_collection(request):
try:
pk = int(request.POST.get('collection', 0))
except __HOLE__:
pk = 0
c = get_object_or_404(Collection, pk=pk)
return render(request, 'zadmin/featured_collection.html',
dict(collection=c))
|
ValueError
|
dataset/ETHPy150Open mozilla/addons-server/src/olympia/zadmin/views.py/featured_collection
|
5,988 |
def clean(self):
cleaned_data = super(UpdateCaseGroupForm, self).clean()
try:
self.current_group = CommCareCaseGroup.get(self.cleaned_data.get('item_id'))
except __HOLE__:
raise forms.ValidationError("You're not passing in the group's id!")
except ResourceNotFound:
raise forms.ValidationError("This case group was not found in our database!")
return cleaned_data
|
AttributeError
|
dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/data_interfaces/forms.py/UpdateCaseGroupForm.clean
|
5,989 |
def nnlf_fr(self, thetash, x, frmask):
# new frozen version
# - sum (log pdf(x, theta),axis=0)
# where theta are the parameters (including loc and scale)
#
try:
if frmask != None:
theta = frmask.copy()
theta[np.isnan(frmask)] = thetash
else:
theta = thetash
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except __HOLE__:
raise ValueError("Not enough input arguments.")
if not self._argcheck(*args) or scale <= 0:
return np.inf
x = np.array((x-loc) / scale)
cond0 = (x <= self.a) | (x >= self.b)
if (np.any(cond0)):
return np.inf
else:
N = len(x)
#raise ValueError
return self._nnlf(x, *args) + N*np.log(scale)
|
IndexError
|
dataset/ETHPy150Open statsmodels/statsmodels/statsmodels/sandbox/distributions/sppatch.py/nnlf_fr
|
5,990 |
def expect_v2(self, fn=None, args=(), loc=0, scale=1, lb=None, ub=None, conditional=False):
'''calculate expected value of a function with respect to the distribution
location and scale only tested on a few examples
Parameters
----------
all parameters are keyword parameters
fn : function (default: identity mapping)
Function for which integral is calculated. Takes only one argument.
args : tuple
argument (parameters) of the distribution
lb, ub : numbers
lower and upper bound for integration, default is set using
quantiles of the distribution, see Notes
conditional : boolean (False)
If true then the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Returns
-------
expected value : float
Notes
-----
This function has not been checked for it's behavior when the integral is
not finite. The integration behavior is inherited from scipy.integrate.quad.
The default limits are lb = self.ppf(1e-9, *args), ub = self.ppf(1-1e-9, *args)
For some heavy tailed distributions, 'alpha', 'cauchy', 'halfcauchy',
'levy', 'levy_l', and for 'ncf', the default limits are not set correctly
even when the expectation of the function is finite. In this case, the
integration limits, lb and ub, should be chosen by the user. For example,
for the ncf distribution, ub=1000 works in the examples.
There are also problems with numerical integration in some other cases,
for example if the distribution is very concentrated and the default limits
are too large.
'''
#changes: 20100809
#correction and refactoring how loc and scale are handled
#uses now _pdf
#needs more testing for distribution with bound support, e.g. genpareto
if fn is None:
def fun(x, *args):
return (loc + x*scale)*self._pdf(x, *args)
else:
def fun(x, *args):
return fn(loc + x*scale)*self._pdf(x, *args)
if lb is None:
#lb = self.a
try:
lb = self.ppf(1e-9, *args) #1e-14 quad fails for pareto
except __HOLE__:
lb = self.a
else:
lb = max(self.a, (lb - loc)/(1.0*scale)) #transform to standardized
if ub is None:
#ub = self.b
try:
ub = self.ppf(1-1e-9, *args)
except ValueError:
ub = self.b
else:
ub = min(self.b, (ub - loc)/(1.0*scale))
if conditional:
invfac = self._sf(lb,*args) - self._sf(ub,*args)
else:
invfac = 1.0
return integrate.quad(fun, lb, ub,
args=args, limit=500)[0]/invfac
### for discrete distributions
#TODO: check that for a distribution with finite support the calculations are
# done with one array summation (np.dot)
#based on _drv2_moment(self, n, *args), but streamlined
|
ValueError
|
dataset/ETHPy150Open statsmodels/statsmodels/statsmodels/sandbox/distributions/sppatch.py/expect_v2
|
5,991 |
def update(self, node):
try:
field_list = self.field_list
except AttributeError:
return
for f in field_list:
try:
delattr(self, f)
except AttributeError:
pass
try:
func = getattr(node, 'get_' + f)
except __HOLE__:
pass
else:
setattr(self, f, func())
|
AttributeError
|
dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Node/__init__.py/NodeInfoBase.update
|
5,992 |
def format(self, field_list=None, names=0):
if field_list is None:
try:
field_list = self.field_list
except __HOLE__:
field_list = sorted(self.__dict__.keys())
fields = []
for field in field_list:
try:
f = getattr(self, field)
except AttributeError:
f = None
f = str(f)
if names:
f = field + ': ' + f
fields.append(f)
return fields
|
AttributeError
|
dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Node/__init__.py/NodeInfoBase.format
|
5,993 |
def get_build_env(self):
"""Fetch the appropriate Environment to build this node.
"""
try:
return self._memo['get_build_env']
except __HOLE__:
pass
result = self.get_executor().get_build_env()
self._memo['get_build_env'] = result
return result
|
KeyError
|
dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Node/__init__.py/Node.get_build_env
|
5,994 |
def get_executor(self, create=1):
"""Fetch the action executor for this node. Create one if
there isn't already one, and requested to do so."""
try:
executor = self.executor
except __HOLE__:
if not create:
raise
try:
act = self.builder.action
except AttributeError:
executor = SCons.Executor.Null(targets=[self])
else:
executor = SCons.Executor.Executor(act,
self.env or self.builder.env,
[self.builder.overrides],
[self],
self.sources)
self.executor = executor
return executor
|
AttributeError
|
dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Node/__init__.py/Node.get_executor
|
5,995 |
def executor_cleanup(self):
"""Let the executor clean up any cached information."""
try:
executor = self.get_executor(create=None)
except __HOLE__:
pass
else:
if executor is not None:
executor.cleanup()
|
AttributeError
|
dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Node/__init__.py/Node.executor_cleanup
|
5,996 |
def reset_executor(self):
"Remove cached executor; forces recompute when needed."
try:
delattr(self, 'executor')
except __HOLE__:
pass
|
AttributeError
|
dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Node/__init__.py/Node.reset_executor
|
5,997 |
def visited(self):
"""Called just after this node has been visited (with or
without a build)."""
try:
binfo = self.binfo
except __HOLE__:
# Apparently this node doesn't need build info, so
# don't bother calculating or storing it.
pass
else:
self.ninfo.update(self)
self.store_info()
|
AttributeError
|
dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Node/__init__.py/Node.visited
|
5,998 |
def clear(self):
"""Completely clear a Node of all its cached state (so that it
can be re-evaluated by interfaces that do continuous integration
builds).
"""
# The del_binfo() call here isn't necessary for normal execution,
# but is for interactive mode, where we might rebuild the same
# target and need to start from scratch.
self.del_binfo()
self.clear_memoized_values()
self.ninfo = self.new_ninfo()
self.executor_cleanup()
try:
delattr(self, '_calculated_sig')
except __HOLE__:
pass
self.includes = None
|
AttributeError
|
dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Node/__init__.py/Node.clear
|
5,999 |
def builder_set(self, builder):
self.builder = builder
try:
del self.executor
except __HOLE__:
pass
|
AttributeError
|
dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Node/__init__.py/Node.builder_set
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.