function
stringlengths 11
56k
| repo_name
stringlengths 5
60
| features
sequence |
---|---|---|
def create_dependency_processor(prop):
types = {
ONETOMANY : OneToManyDP,
MANYTOONE: ManyToOneDP,
MANYTOMANY : ManyToManyDP,
}
if prop.association is not None:
return AssociationDP(prop)
else:
return types[prop.direction](prop) | santisiri/popego | [
5,
2,
5,
1,
1476320366
] |
def __init__(self, prop):
self.prop = prop
self.cascade = prop.cascade
self.mapper = prop.mapper
self.parent = prop.parent
self.secondary = prop.secondary
self.direction = prop.direction
self.is_backref = prop.is_backref
self.post_update = prop.post_update
self.foreign_keys = prop.foreign_keys
self.passive_deletes = prop.passive_deletes
self.passive_updates = prop.passive_updates
self.enable_typechecks = prop.enable_typechecks
self.key = prop.key
if not self.prop.synchronize_pairs:
raise exceptions.ArgumentError("Can't build a DependencyProcessor for relation %s. No target attributes to populate between parent and child are present" % self.prop) | santisiri/popego | [
5,
2,
5,
1,
1476320366
] |
def hasparent(self, state):
"""return True if the given object instance has a parent,
according to the ``InstrumentedAttribute`` handled by this ``DependencyProcessor``."""
# TODO: use correct API for this
return self._get_instrumented_attribute().impl.hasparent(state) | santisiri/popego | [
5,
2,
5,
1,
1476320366
] |
def whose_dependent_on_who(self, state1, state2):
"""Given an object pair assuming `obj2` is a child of `obj1`,
return a tuple with the dependent object second, or None if
there is no dependency.
"""
if state1 is state2:
return None
elif self.direction == ONETOMANY:
return (state1, state2)
else:
return (state2, state1) | santisiri/popego | [
5,
2,
5,
1,
1476320366
] |
def preprocess_dependencies(self, task, deplist, uowcommit, delete = False):
"""Used before the flushes' topological sort to traverse
through related objects and ensure every instance which will
require save/update/delete is properly added to the
UOWTransaction.
"""
raise NotImplementedError() | santisiri/popego | [
5,
2,
5,
1,
1476320366
] |
def _synchronize(self, state, child, associationrow, clearkeys, uowcommit):
"""Called during a flush to synchronize primary key identifier
values between a parent/child object, as well as to an
associationrow in the case of many-to-many.
"""
raise NotImplementedError() | santisiri/popego | [
5,
2,
5,
1,
1476320366
] |
def _pks_changed(self, uowcommit, state):
raise NotImplementedError() | santisiri/popego | [
5,
2,
5,
1,
1476320366
] |
def register_dependencies(self, uowcommit):
if self.post_update:
if not self.is_backref:
stub = MapperStub(self.parent, self.mapper, self.key)
uowcommit.register_dependency(self.mapper, stub)
uowcommit.register_dependency(self.parent, stub)
uowcommit.register_processor(stub, self, self.parent)
else:
uowcommit.register_dependency(self.parent, self.mapper)
uowcommit.register_processor(self.parent, self, self.parent) | santisiri/popego | [
5,
2,
5,
1,
1476320366
] |
def preprocess_dependencies(self, task, deplist, uowcommit, delete = False):
#print self.mapper.mapped_table.name + " " + self.key + " " + repr(len(deplist)) + " preprocess_dep isdelete " + repr(delete) + " direction " + repr(self.direction)
if delete:
# head object is being deleted, and we manage its list of child objects
# the child objects have to have their foreign key to the parent set to NULL
if not self.post_update:
should_null_fks = not self.cascade.delete and not self.passive_deletes=='all'
for state in deplist:
(added, unchanged, deleted) = uowcommit.get_attribute_history(state, self.key,passive=self.passive_deletes)
if unchanged or deleted:
for child in deleted:
if child is not None and self.hasparent(child) is False:
if self.cascade.delete_orphan:
uowcommit.register_object(child, isdelete=True)
else:
uowcommit.register_object(child)
if should_null_fks:
for child in unchanged:
if child is not None:
uowcommit.register_object(child)
else:
for state in deplist:
(added, unchanged, deleted) = uowcommit.get_attribute_history(state, self.key,passive=True)
if added or deleted:
for child in added:
if child is not None:
uowcommit.register_object(child)
for child in deleted:
if not self.cascade.delete_orphan:
uowcommit.register_object(child, isdelete=False)
elif self.hasparent(child) is False:
uowcommit.register_object(child, isdelete=True)
for c, m in self.mapper.cascade_iterator('delete', child):
uowcommit.register_object(c._state, isdelete=True)
if not self.passive_updates and self._pks_changed(uowcommit, state):
if not unchanged:
(added, unchanged, deleted) = uowcommit.get_attribute_history(state, self.key, passive=False)
if unchanged:
for child in unchanged:
uowcommit.register_object(child) | santisiri/popego | [
5,
2,
5,
1,
1476320366
] |
def _pks_changed(self, uowcommit, state):
return sync.source_changes(uowcommit, state, self.parent, self.prop.synchronize_pairs) | santisiri/popego | [
5,
2,
5,
1,
1476320366
] |
def register_dependencies(self, uowcommit):
uowcommit.register_processor(self.parent, self, self.mapper) | santisiri/popego | [
5,
2,
5,
1,
1476320366
] |
def process_dependencies(self, task, deplist, uowcommit, delete=False):
# for passive updates, register objects in the process stage
# so that we avoid ManyToOneDP's registering the object without
# the listonly flag in its own preprocess stage (results in UPDATE)
# statements being emitted
if not delete and self.passive_updates:
self._process_key_switches(deplist, uowcommit) | santisiri/popego | [
5,
2,
5,
1,
1476320366
] |
def _pks_changed(self, uowcommit, state):
return sync.source_changes(uowcommit, state, self.mapper, self.prop.synchronize_pairs) | santisiri/popego | [
5,
2,
5,
1,
1476320366
] |
def __init__(self, prop):
DependencyProcessor.__init__(self, prop)
self.mapper._dependency_processors.append(DetectKeySwitch(prop)) | santisiri/popego | [
5,
2,
5,
1,
1476320366
] |
def process_dependencies(self, task, deplist, uowcommit, delete = False):
#print self.mapper.mapped_table.name + " " + self.key + " " + repr(len(deplist)) + " process_dep isdelete " + repr(delete) + " direction " + repr(self.direction)
if delete:
if self.post_update and not self.cascade.delete_orphan and not self.passive_deletes=='all':
# post_update means we have to update our row to not reference the child object
# before we can DELETE the row
for state in deplist:
self._synchronize(state, None, None, True, uowcommit)
(added, unchanged, deleted) = uowcommit.get_attribute_history(state, self.key,passive=self.passive_deletes)
if added or unchanged or deleted:
self._conditional_post_update(state, uowcommit, deleted + unchanged + added)
else:
for state in deplist:
(added, unchanged, deleted) = uowcommit.get_attribute_history(state, self.key,passive=True)
if added or deleted or unchanged:
for child in added:
self._synchronize(state, child, None, False, uowcommit)
self._conditional_post_update(state, uowcommit, deleted + unchanged + added) | santisiri/popego | [
5,
2,
5,
1,
1476320366
] |
def _synchronize(self, state, child, associationrow, clearkeys, uowcommit):
if state is None or (not self.post_update and uowcommit.is_deleted(state)):
return
if clearkeys or child is None:
sync.clear(state, self.parent, self.prop.synchronize_pairs)
else:
self._verify_canload(child)
sync.populate(child, self.mapper, state, self.parent, self.prop.synchronize_pairs) | santisiri/popego | [
5,
2,
5,
1,
1476320366
] |
def register_dependencies(self, uowcommit):
# many-to-many. create a "Stub" mapper to represent the
# "middle table" in the relationship. This stub mapper doesnt save
# or delete any objects, but just marks a dependency on the two
# related mappers. its dependency processor then populates the
# association table.
stub = MapperStub(self.parent, self.mapper, self.key)
uowcommit.register_dependency(self.parent, stub)
uowcommit.register_dependency(self.mapper, stub)
uowcommit.register_processor(stub, self, self.parent) | santisiri/popego | [
5,
2,
5,
1,
1476320366
] |
def preprocess_dependencies(self, task, deplist, uowcommit, delete = False):
#print self.mapper.mapped_table.name + " " + self.key + " " + repr(len(deplist)) + " preprocess_dep isdelete " + repr(delete) + " direction " + repr(self.direction)
if not delete:
for state in deplist:
(added, unchanged, deleted) = uowcommit.get_attribute_history(state, self.key,passive=True)
if deleted:
for child in deleted:
if self.cascade.delete_orphan and self.hasparent(child) is False:
uowcommit.register_object(child, isdelete=True)
for c, m in self.mapper.cascade_iterator('delete', child):
uowcommit.register_object(c._state, isdelete=True) | santisiri/popego | [
5,
2,
5,
1,
1476320366
] |
def _pks_changed(self, uowcommit, state):
return sync.source_changes(uowcommit, state, self.parent, self.prop.synchronize_pairs) | santisiri/popego | [
5,
2,
5,
1,
1476320366
] |
def __init__(self, *args, **kwargs):
super(AssociationDP, self).__init__(*args, **kwargs)
self.cascade.delete = True
self.cascade.delete_orphan = True | santisiri/popego | [
5,
2,
5,
1,
1476320366
] |
def __init__(self, parent, mapper, key):
self.mapper = mapper
self.base_mapper = self
self.class_ = mapper.class_
self._inheriting_mappers = [] | santisiri/popego | [
5,
2,
5,
1,
1476320366
] |
def _register_dependencies(self, uowcommit):
pass | santisiri/popego | [
5,
2,
5,
1,
1476320366
] |
def _delete_obj(self, *args, **kwargs):
pass | santisiri/popego | [
5,
2,
5,
1,
1476320366
] |
def __init__(self):
pass | rubendibattista/python-ransac-library | [
7,
3,
7,
3,
1431708514
] |
def min_points(self):
'''int: Minimum number of points needed to define the feature.''' | rubendibattista/python-ransac-library | [
7,
3,
7,
3,
1431708514
] |
def points_distance(self,points):
'''
This function implements a method to compute the distance
of points from the feature. | rubendibattista/python-ransac-library | [
7,
3,
7,
3,
1431708514
] |
def print_feature(self,num_points):
'''
This method returns an array of x,y coordinates for
points that are in the feature. | rubendibattista/python-ransac-library | [
7,
3,
7,
3,
1431708514
] |
def __init__(self,points):
self.radius,self.xc,self.yc = self.__gen(points) | rubendibattista/python-ransac-library | [
7,
3,
7,
3,
1431708514
] |
def __gen(self,points):
'''
Compute the radius and the center coordinates of a
circumference given three points | rubendibattista/python-ransac-library | [
7,
3,
7,
3,
1431708514
] |
def points_distance(self,points):
r'''
Compute the distance of the points from the feature | rubendibattista/python-ransac-library | [
7,
3,
7,
3,
1431708514
] |
def print_feature(self, num_points):
'''
This method returns an array of x,y coordinates for
points that are in the feature. | rubendibattista/python-ransac-library | [
7,
3,
7,
3,
1431708514
] |
def __init__(self,points):
self.a,self.k,self.b = self.__gen(points) | rubendibattista/python-ransac-library | [
7,
3,
7,
3,
1431708514
] |
def __gen(self,points):
'''
Compute the three parameters that univocally determine the
exponential curve | rubendibattista/python-ransac-library | [
7,
3,
7,
3,
1431708514
] |
def exponential(x,points):
''' Non linear system function to use
with :py:func:`scypy.optimize.root`
'''
aa = x[0]
nn = x[1]
bb = x[2] | rubendibattista/python-ransac-library | [
7,
3,
7,
3,
1431708514
] |
def points_distance(self,points):
r'''
Compute the distance of the points from the feature | rubendibattista/python-ransac-library | [
7,
3,
7,
3,
1431708514
] |
def print_feature(self, num_points, a,b):
'''
This method returns an array of x,y coordinates for
points that are in the feature in the interval [a,b]. | rubendibattista/python-ransac-library | [
7,
3,
7,
3,
1431708514
] |
def test_patch(self):
for (msg, orig, mod, expected_patch) in TEST_CASES:
self.assertEqual(expected_patch, makepatch(orig, mod), msg=msg) | MapofLife/MOL | [
23,
4,
23,
15,
1318277433
] |
def build_config_var(beta=False, external=False):
"""
Create the configuration key which will be used to locate
the base tiddlywiki file.
"""
base = 'base_tiddlywiki'
if external:
base += '_external'
if beta:
base += '_beta'
return base | TiddlySpace/tiddlyspace | [
105,
38,
105,
23,
1268688852
] |
def list_tiddlers(self, tiddlers):
"""
Override tiddlers.link so the location in noscript is to
/tiddlers.
"""
http_host, _ = determine_host(self.environ)
space_name = determine_space(self.environ, http_host)
if space_name:
recipe_name = determine_space_recipe(self.environ, space_name)
if '/recipes/%s' % recipe_name in tiddlers.link:
tiddlers.link = '/tiddlers'
return WikiSerialization.list_tiddlers(self, tiddlers) | TiddlySpace/tiddlyspace | [
105,
38,
105,
23,
1268688852
] |
def baseURL():
if neuroptikon.runningFromSource:
basePath = os.path.join(neuroptikon.rootDir, 'documentation', 'build', 'Documentation')
else:
basePath = os.path.join(neuroptikon.rootDir, 'documentation') | JaneliaSciComp/Neuroptikon | [
9,
2,
9,
48,
1409685514
] |
def _construct_form(self, i, **kwargs):
# Need to override _construct_form to avoid calling to_python on an empty string PK value
if self.is_bound and i < self.initial_form_count():
pk_key = "%s-%s" % (self.add_prefix(i), self.model._meta.pk.name)
pk = self.data[pk_key]
if pk == '':
kwargs['instance'] = self.model()
else:
pk_field = self.model._meta.pk
to_python = self._get_to_python(pk_field)
pk = to_python(pk)
kwargs['instance'] = self._existing_object(pk)
if i < self.initial_form_count() and 'instance' not in kwargs:
kwargs['instance'] = self.get_queryset()[i]
if i >= self.initial_form_count() and self.initial_extra:
# Set initial values for extra forms
try:
kwargs['initial'] = self.initial_extra[i - self.initial_form_count()]
except IndexError:
pass
# bypass BaseModelFormSet's own _construct_form
return super(BaseModelFormSet, self)._construct_form(i, **kwargs) | torchbox/django-modelcluster | [
425,
59,
425,
24,
1391095565
] |
def transientmodelformset_factory(model, formset=BaseTransientModelFormSet, **kwargs):
return modelformset_factory(model, formset=formset, **kwargs) | torchbox/django-modelcluster | [
425,
59,
425,
24,
1391095565
] |
def __init__(self, data=None, files=None, instance=None, queryset=None, **kwargs):
if instance is None:
self.instance = self.fk.remote_field.model()
else:
self.instance = instance
self.rel_name = ForeignObjectRel(self.fk, self.fk.remote_field.model, related_name=self.fk.remote_field.related_name).get_accessor_name()
if queryset is None:
queryset = getattr(self.instance, self.rel_name).all()
super(BaseChildFormSet, self).__init__(data, files, queryset=queryset, **kwargs) | torchbox/django-modelcluster | [
425,
59,
425,
24,
1391095565
] |
def clean(self, *args, **kwargs):
self.validate_unique()
return super(BaseChildFormSet, self).clean(*args, **kwargs) | torchbox/django-modelcluster | [
425,
59,
425,
24,
1391095565
] |
def childformset_factory(
parent_model, model, form=ModelForm,
formset=BaseChildFormSet, fk_name=None, fields=None, exclude=None,
extra=3, can_order=False, can_delete=True, max_num=None, validate_max=False,
formfield_callback=None, widgets=None, min_num=None, validate_min=False | torchbox/django-modelcluster | [
425,
59,
425,
24,
1391095565
] |
def __init__(self, options=None):
super(ClusterFormOptions, self).__init__(options=options)
self.formsets = getattr(options, 'formsets', None)
self.exclude_formsets = getattr(options, 'exclude_formsets', None) | torchbox/django-modelcluster | [
425,
59,
425,
24,
1391095565
] |
def child_form(cls):
return ClusterForm | torchbox/django-modelcluster | [
425,
59,
425,
24,
1391095565
] |
def __init__(self, data=None, files=None, instance=None, prefix=None, **kwargs):
super(ClusterForm, self).__init__(data, files, instance=instance, prefix=prefix, **kwargs)
self.formsets = {}
for rel_name, formset_class in self.__class__.formsets.items():
if prefix:
formset_prefix = "%s-%s" % (prefix, rel_name)
else:
formset_prefix = rel_name
self.formsets[rel_name] = formset_class(data, files, instance=instance, prefix=formset_prefix)
if self.is_bound and not self._has_explicit_formsets:
# check which formsets have actually been provided as part of the form submission -
# if no `formsets` or `exclude_formsets` was specified, we allow them to be omitted
# (https://github.com/wagtail/wagtail/issues/5414#issuecomment-567468127).
self._posted_formsets = [
formset
for formset in self.formsets.values()
if '%s-%s' % (formset.prefix, TOTAL_FORM_COUNT) in self.data
]
else:
# expect all defined formsets to be part of the post
self._posted_formsets = self.formsets.values() | torchbox/django-modelcluster | [
425,
59,
425,
24,
1391095565
] |
def is_valid(self):
form_is_valid = super(ClusterForm, self).is_valid()
formsets_are_valid = all(formset.is_valid() for formset in self._posted_formsets)
return form_is_valid and formsets_are_valid | torchbox/django-modelcluster | [
425,
59,
425,
24,
1391095565
] |
def media(self):
media = super(ClusterForm, self).media
for formset in self.formsets.values():
media = media + formset.media
return media | torchbox/django-modelcluster | [
425,
59,
425,
24,
1391095565
] |
def format_excel(writer, df_size):
""" Add Excel specific formatting to the workbook
df_size is a tuple representing the size of the dataframe - typically called
by df.shape -> (20,3)
"""
# Get the workbook and the summary sheet so we can add the formatting
workbook = writer.book
worksheet = writer.sheets['summary']
# Add currency formatting and apply it
money_fmt = workbook.add_format({'num_format': 42, 'align': 'center'})
worksheet.set_column('A:A', 20)
worksheet.set_column('B:C', 15, money_fmt)
# Add 1 to row so we can include a total
# subtract 1 from the column to handle because we don't care about index
table_end = xl_rowcol_to_cell(df_size[0] + 1, df_size[1] - 1)
# This assumes we start in the left hand corner
table_range = 'A1:{}'.format(table_end)
worksheet.add_table(table_range, {'columns': [{'header': 'account',
'total_string': 'Total'},
{'header': 'Total Sales',
'total_function': 'sum'},
{'header': 'Average Sales',
'total_function': 'average'}],
'autofilter': False,
'total_row': True,
'style': 'Table Style Medium 20'}) | chris1610/pbpython | [
1894,
986,
1894,
14,
1431396080
] |
def daemonize():
"""
http://code.activestate.com/recipes/278731-creating-a-daemon-the-python-way/
http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/
"""
try:
pid = os.fork() # Fork #1
if pid > 0:
sys.exit(0) # Exit first parent
except OSError as e:
sys.stderr.write('Fork #1 failed: %d (%s)\n' % (e.errno, e.strerror))
sys.exit(1)
# The first child. Decouple from parent environment
# Become session leader of this new session.
# Also be guaranteed not to have a controlling terminal
os.chdir('/')
# noinspection PyArgumentList
os.setsid()
os.umask(0o022)
try:
pid = os.fork() # Fork #2
if pid > 0:
sys.exit(0) # Exit from second parent
except OSError as e:
sys.stderr.write('Fork #2 failed: %d (%s)\n' % (e.errno, e.strerror))
sys.exit(1)
# Close all open file descriptors
import resource # Resource usage information
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if maxfd == resource.RLIM_INFINITY:
maxfd = 1024
# Iterate through and close all file descriptors
for fd in range(0, maxfd):
try:
os.close(fd)
except OSError: # ERROR, fd wasn't open (ignored)
pass
# Redirect standard file descriptors to /dev/null
sys.stdout.flush()
sys.stderr.flush()
si = open(os.devnull, 'r')
so = open(os.devnull, 'a+')
se = open(os.devnull, 'a+')
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
return 0 | erigones/Ludolph | [
39,
6,
39,
2,
1370966024
] |
def load_config(fp, reopen=False):
config = RawConfigParser()
if reopen:
fp = open(fp.name)
try: # config.readfp() is Deprecated since python 3.2
# noinspection PyDeprecation
read_file = config.readfp
except AttributeError:
read_file = config.read_file
read_file(fp)
fp.close()
return config | erigones/Ludolph | [
39,
6,
39,
2,
1370966024
] |
def log_except_hook(*exc_info):
logger.critical('Unhandled exception!', exc_info=exc_info) | erigones/Ludolph | [
39,
6,
39,
2,
1370966024
] |
def load_plugins(config, reinit=False):
plugins = []
for config_section in config.sections():
config_section = config_section.strip()
if config_section in config_base_sections:
continue
# Parse other possible imports
parsed_plugin = config_section.split('.')
if len(parsed_plugin) == 1:
modname = 'ludolph.plugins.' + config_section
plugin = config_section
else:
modname = config_section
plugin = parsed_plugin[-1]
logger.info('Loading plugin: %s', modname)
try:
# Translate super_ludolph_plugin into SuperLudolphPlugin
clsname = plugin[0].upper() + re.sub(r'_+([a-zA-Z0-9])', lambda m: m.group(1).upper(), plugin[1:])
module = __import__(modname, fromlist=[clsname])
if reinit and getattr(module, '_loaded_', False):
reload(module)
module._loaded_ = True
imported_class = getattr(module, clsname)
if not issubclass(imported_class, LudolphPlugin):
raise TypeError('Plugin: %s is not LudolphPlugin instance' % modname)
plugins.append(Plugin(config_section, modname, imported_class))
except Exception as ex:
logger.exception(ex)
logger.critical('Could not load plugin: %s', modname)
return plugins | erigones/Ludolph | [
39,
6,
39,
2,
1370966024
] |
def sighup(signalnum, handler):
if xmpp.reloading:
logger.warning('Reload already in progress')
else:
xmpp.reloading = True
try:
config = load_config(cfg_fp, reopen=True)
logger.info('Reloaded configuration from %s', cfg_fp.name)
xmpp.prereload()
plugins = load_plugins(config, reinit=True)
xmpp.reload(config, plugins=plugins)
finally:
xmpp.reloading = False | erigones/Ludolph | [
39,
6,
39,
2,
1370966024
] |
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_rebel_brigadier_general_sullustan_male.iff"
result.attribute_template_id = 9
result.stfName("npc_name","sullustan_base_male") | anhstudios/swganh | [
62,
37,
62,
37,
1297996365
] |
def create(kernel):
result = Creature()
result.template = "object/creature/npc/droid/crafted/shared_cll_8_binary_load_lifter_advanced.iff"
result.attribute_template_id = 3
result.stfName("droid_name","cll_8_binary_load_lifter_crafted_advanced") | anhstudios/swganh | [
62,
37,
62,
37,
1297996365
] |
def test_age_filter():
age = 22
search_fetchable = SearchFetchable(gentation='everybody',
minimum_age=age, maximum_age=age)
for profile in search_fetchable[:5]:
assert profile.age == age | IvanMalison/okcupyd | [
105,
18,
105,
24,
1411441891
] |
def test_count_variable(request):
profiles = search(gentation='everybody', count=14)
assert len(profiles) == 14
for profile in profiles:
profile.username
profile.age
profile.location
profile.match_percentage
profile.enemy_percentage
profile.id
profile.rating
profile.contacted | IvanMalison/okcupyd | [
105,
18,
105,
24,
1411441891
] |
def test_location_filter():
session = Session.login()
location_cache = LocationQueryCache(session)
location = 'Portland, OR'
search_fetchable = SearchFetchable(location=location, location_cache=location_cache, radius=1)
for profile in search_fetchable[:5]:
assert profile.location == 'Portland, OR' | IvanMalison/okcupyd | [
105,
18,
105,
24,
1411441891
] |
def test_search_function():
profile, = search(count=1)
assert isinstance(profile, Profile)
profile.username
profile.age
profile.location
profile.match_percentage
profile.enemy_percentage
profile.id
profile.rating
profile.contacted | IvanMalison/okcupyd | [
105,
18,
105,
24,
1411441891
] |
def test_search_fetchable_iter():
search_fetchable = SearchFetchable(gentation='everybody',
religion='buddhist', age_min=25, age_max=25,
location='new york, ny', keywords='bicycle')
for count, profile in enumerate(search_fetchable):
assert isinstance(profile, Profile)
if count > 30:
break | IvanMalison/okcupyd | [
105,
18,
105,
24,
1411441891
] |
def test_easy_search_filters():
session = Session.login()
query_test_pairs = [# ('bodytype', maps.bodytype),
# TODO(@IvanMalison) this is an alist feature,
# so it can't be tested for now.
('drugs', maps.drugs), ('smokes', maps.smokes),
('diet', maps.diet,), ('job', maps.job)]
for query_param, re_map in query_test_pairs:
for value in sorted(re_map.pattern_to_value.keys()):
profile = SearchFetchable(**{
'gentation': '',
'session': session,
'count': 1,
query_param: value
})[0]
attribute = getattr(profile.details, query_param)
assert value in (attribute or '').lower() | IvanMalison/okcupyd | [
105,
18,
105,
24,
1411441891
] |
def test_children_filter():
session = Session.login()
profile = SearchFetchable(session, wants_kids="wants kids", count=1)[0]
assert "wants" in profile.details.children.lower()
profile = SearchFetchable(session, has_kids=["has kids"],
wants_kids="doesn't want kids",
count=0)[0]
assert "has kids" in profile.details.children.lower()
assert "doesn't want" in profile.details.children.lower() | IvanMalison/okcupyd | [
105,
18,
105,
24,
1411441891
] |
def test_pets_queries():
session = Session.login()
profile = SearchFetchable(session, cats=['dislikes cats', 'likes cats'],
count=1)[0]
assert 'likes cats' in profile.details.pets.lower()
profile = SearchFetchable(session, dogs='likes dogs', cats='has cats', count=1)[0]
assert 'likes dogs' in profile.details.pets.lower()
assert 'has cats' in profile.details.pets.lower() | IvanMalison/okcupyd | [
105,
18,
105,
24,
1411441891
] |
def test_height_filter():
session = Session.login()
profile = SearchFetchable(session, height_min='5\'6"', height_max='5\'6"',
gentation='girls who like guys', radius=25, count=1)[0]
match = magicnumbers.imperial_re.search(profile.details.height)
assert int(match.group(1)) == 5
assert int(match.group(2)) == 6
profile = SearchFetchable(session, height_min='2.00m', count=1)[0]
match = magicnumbers.metric_re.search(profile.details.height)
assert float(match.group(1)) >= 2.00
profile = SearchFetchable(session, height_max='1.5m', count=1)[0]
match = magicnumbers.metric_re.search(profile.details.height)
assert float(match.group(1)) <= 1.5 | IvanMalison/okcupyd | [
105,
18,
105,
24,
1411441891
] |
def test_language_filter():
session = Session.login()
profile = SearchFetchable(session, language='french', count=1)[0]
assert 'french' in [language_info[0].lower()
for language_info in profile.details.languages]
profile = SearchFetchable(session, language='Afrikaans', count=1)[0]
assert 'afrikaans' in map(operator.itemgetter(0), profile.details.languages) | IvanMalison/okcupyd | [
105,
18,
105,
24,
1411441891
] |
def test_attractiveness_filter():
session = Session.login()
profile = SearchFetchable(session, attractiveness_min=4000,
attractiveness_max=6000, count=1)[0]
assert profile.attractiveness > 4000
assert profile.attractiveness < 6000 | IvanMalison/okcupyd | [
105,
18,
105,
24,
1411441891
] |
def test_question_filter():
user = User()
user_question = user.questions.somewhat_important[0]
for profile in user.search(question=user_question)[:5]:
question = profile.find_question(user_question.id)
assert question.their_answer_matches | IvanMalison/okcupyd | [
105,
18,
105,
24,
1411441891
] |
def test_question_filter_with_custom_answers():
user = User()
user_question = user.questions.somewhat_important[1]
unacceptable_answers = [answer_option.id
for answer_option in user_question.answer_options
if not answer_option.is_match]
for profile in user.search(question=user_question.id,
question_answers=unacceptable_answers)[:5]:
question = profile.find_question(user_question.id)
assert not question.their_answer_matches | IvanMalison/okcupyd | [
105,
18,
105,
24,
1411441891
] |
def test_question_count_filter():
user = User()
for profile in user.search(question_count_min=250)[:5]:
assert profile.questions[249] | IvanMalison/okcupyd | [
105,
18,
105,
24,
1411441891
] |
def to_bytes(string):
"""
Converts the given string into bytes
"""
# pylint: disable=E0602
if type(string) is unicode:
return str(string)
return string | CloudI/CloudI | [
384,
53,
384,
4,
1251785476
] |
def to_bytes(string):
"""
Converts the given string into bytes
"""
if type(string) is bytes:
return string
return bytes(string, "UTF-8") | CloudI/CloudI | [
384,
53,
384,
4,
1251785476
] |
def is_enum(obj):
"""
Checks if an object is from an enumeration class
:param obj: Object to test
:return: True if the object is an enumeration item
"""
return isinstance(obj, enum.Enum) | CloudI/CloudI | [
384,
53,
384,
4,
1251785476
] |
def is_enum(_):
"""
Before Python 3.4, enumerations didn't exist.
:param _: Object to test
:return: Always False
"""
return False | CloudI/CloudI | [
384,
53,
384,
4,
1251785476
] |
def create(kernel):
result = Tangible()
result.template = "object/tangible/scout/trap/shared_trap_webber.iff"
result.attribute_template_id = -1
result.stfName("item_n","trap_webber") | anhstudios/swganh | [
62,
37,
62,
37,
1297996365
] |
def create(kernel):
result = Tangible()
result.template = "object/tangible/wearables/ithorian/shared_ith_shirt_s09.iff"
result.attribute_template_id = 11
result.stfName("wearables_name","ith_shirt_s09") | anhstudios/swganh | [
62,
37,
62,
37,
1297996365
] |
def __init__(self, editwin):
self.editwin = editwin
# Provide instance variables referenced by Debugger
# XXX This should be done differently
self.flist = self.editwin.flist
self.root = self.editwin.root | babyliynfg/cross | [
75,
39,
75,
4,
1489383147
] |
def check_module_event(self, event):
filename = self.getfilename()
if not filename:
return 'break'
if not self.checksyntax(filename):
return 'break'
if not self.tabnanny(filename):
return 'break' | babyliynfg/cross | [
75,
39,
75,
4,
1489383147
] |
def tabnanny(self, filename):
f = open(filename, 'r')
try:
tabnanny.process_tokens(tokenize.generate_tokens(f.readline))
except tokenize.TokenError, msg:
msgtxt, (lineno, start) = msg
self.editwin.gotoline(lineno)
self.errorbox("Tabnanny Tokenizing Error",
"Token Error: %s" % msgtxt)
return False
except tabnanny.NannyNag, nag:
# The error messages from tabnanny are too confusing...
self.editwin.gotoline(nag.get_lineno())
self.errorbox("Tab/space error", indent_message)
return False
return True | babyliynfg/cross | [
75,
39,
75,
4,
1489383147
] |
def checksyntax(self, filename):
self.shell = shell = self.flist.open_shell()
saved_stream = shell.get_warning_stream()
shell.set_warning_stream(shell.stderr)
f = open(filename, 'r')
source = f.read()
f.close()
if '\r' in source:
source = re.sub(r"\r\n", "\n", source)
source = re.sub(r"\r", "\n", source)
if source and source[-1] != '\n':
source = source + '\n'
text = self.editwin.text
text.tag_remove("ERROR", "1.0", "end")
try:
try:
# If successful, return the compiled code
return compile(source, filename, "exec")
except (SyntaxError, OverflowError), err:
try:
msg, (errorfilename, lineno, offset, line) = err
if not errorfilename:
err.args = msg, (filename, lineno, offset, line)
err.filename = filename
self.colorize_syntax_error(msg, lineno, offset)
except:
msg = "*** " + str(err)
self.errorbox("Syntax error",
"There's an error in your program:\n" + msg)
return False
finally:
shell.set_warning_stream(saved_stream) | babyliynfg/cross | [
75,
39,
75,
4,
1489383147
] |
def colorize_syntax_error(self, msg, lineno, offset):
text = self.editwin.text
pos = "0.0 + %d lines + %d chars" % (lineno-1, offset-1)
text.tag_add("ERROR", pos)
char = text.get(pos)
if char and char in IDENTCHARS:
text.tag_add("ERROR", pos + " wordstart", pos)
if '\n' == text.get(pos): # error at line end
text.mark_set("insert", pos)
else:
text.mark_set("insert", pos + "+1c")
text.see(pos) | babyliynfg/cross | [
75,
39,
75,
4,
1489383147
] |
def run_module_event(self, event):
"""Run the module after setting up the environment. | babyliynfg/cross | [
75,
39,
75,
4,
1489383147
] |
def getfilename(self):
"""Get source filename. If not saved, offer to save (or create) file | babyliynfg/cross | [
75,
39,
75,
4,
1489383147
] |
def ask_save_dialog(self):
msg = "Source Must Be Saved\n" + 5*' ' + "OK to Save?"
mb = tkMessageBox.Message(title="Save Before Run or Check",
message=msg,
icon=tkMessageBox.QUESTION,
type=tkMessageBox.OKCANCEL,
default=tkMessageBox.OK,
master=self.editwin.text)
return mb.show() | babyliynfg/cross | [
75,
39,
75,
4,
1489383147
] |
def validate(self):
self._assignment = None
if self.is_new():
if self.assigned_by == self.allocated_to:
assignment_message = frappe._("{0} self assigned this task: {1}").format(get_fullname(self.assigned_by), self.description)
else:
assignment_message = frappe._("{0} assigned {1}: {2}").format(get_fullname(self.assigned_by), get_fullname(self.allocated_to), self.description)
self._assignment = {
"text": assignment_message,
"comment_type": "Assigned"
}
else:
# NOTE the previous value is only available in validate method
if self.get_db_value("status") != self.status:
if self.allocated_to == frappe.session.user:
removal_message = frappe._("{0} removed their assignment.").format(
get_fullname(frappe.session.user))
else:
removal_message = frappe._("Assignment of {0} removed by {1}").format(
get_fullname(self.allocated_to), get_fullname(frappe.session.user))
self._assignment = {
"text": removal_message,
"comment_type": "Assignment Completed"
} | frappe/frappe | [
4495,
2418,
4495,
1493,
1307520856
] |
def on_trash(self):
self.delete_communication_links()
self.update_in_reference() | frappe/frappe | [
4495,
2418,
4495,
1493,
1307520856
] |
def delete_communication_links(self):
# unlink todo from linked comments
return frappe.db.delete("Communication Link", {
"link_doctype": self.doctype,
"link_name": self.name
}) | frappe/frappe | [
4495,
2418,
4495,
1493,
1307520856
] |
def get_owners(cls, filters=None):
"""Returns list of owners after applying filters on todo's.
"""
rows = frappe.get_all(cls.DocType, filters=filters or {}, fields=['allocated_to'])
return [parse_addr(row.allocated_to)[1] for row in rows if row.allocated_to] | frappe/frappe | [
4495,
2418,
4495,
1493,
1307520856
] |
def on_doctype_update():
frappe.db.add_index("ToDo", ["reference_type", "reference_name"]) | frappe/frappe | [
4495,
2418,
4495,
1493,
1307520856
] |
def has_permission(doc, ptype="read", user=None):
user = user or frappe.session.user
todo_roles = frappe.permissions.get_doctype_roles('ToDo', ptype)
if 'All' in todo_roles:
todo_roles.remove('All')
if any(check in todo_roles for check in frappe.get_roles(user)):
return True
else:
return doc.allocated_to==user or doc.assigned_by==user | frappe/frappe | [
4495,
2418,
4495,
1493,
1307520856
] |
def _lines_to_dict(lines):
md = {}
errors = []
for line in lines:
# Skip a line if there is invalid value.
try:
line = line.decode("utf-8")
except UnicodeDecodeError as e:
errors.append("Invalid line '{}': {}".format(line, e))
continue
if line.startswith("EOF"):
break
if '=' not in line:
continue
key, value = line.split('=', 1)
md[key.strip()] = value.strip()
return md, errors | oVirt/vdsm | [
129,
183,
129,
68,
1351274855
] |
def dump(lines):
md, errors = parse(lines)
if errors:
logging.warning(
"Invalid metadata found errors=%s", errors)
md["status"] = sc.VOL_STATUS_INVALID
else:
md["status"] = sc.VOL_STATUS_OK
# Do not include domain in dump output.
md.pop("domain", None)
return md | oVirt/vdsm | [
129,
183,
129,
68,
1351274855
] |
def __init__(self, domain, image, parent, capacity, format, type, voltype,
disktype, description="", legality=sc.ILLEGAL_VOL, ctime=None,
generation=sc.DEFAULT_GENERATION,
sequence=sc.DEFAULT_SEQUENCE):
# Storage domain UUID
self.domain = domain
# Image UUID
self.image = image
# UUID of the parent volume or BLANK_UUID
self.parent = parent
# Volume capacity in bytes
self.capacity = capacity
# Format (RAW or COW)
self.format = format
# Allocation policy (PREALLOCATED or SPARSE)
self.type = type
# Relationship to other volumes (LEAF, INTERNAL or SHARED)
self.voltype = voltype
# Intended usage of this volume (unused)
self.disktype = disktype
# Free-form description and may be used to store extra metadata
self.description = description
# Indicates if the volume contents should be considered valid
self.legality = legality
# Volume creation time (in seconds since the epoch)
self.ctime = int(time.time()) if ctime is None else ctime
# Generation increments each time certain operations complete
self.generation = generation
# Sequence number of the volume, increased every time a new volume is
# created in an image.
self.sequence = sequence | oVirt/vdsm | [
129,
183,
129,
68,
1351274855
] |
def from_lines(cls, lines):
'''
Instantiates a VolumeMetadata object from storage read bytes.
Args:
lines: list of key=value entries given as bytes read from storage
metadata section. "EOF" entry terminates parsing.
'''
metadata, errors = parse(lines)
if errors:
raise exception.InvalidMetadata(
"lines={} errors={}".format(lines, errors))
return cls(**metadata) | oVirt/vdsm | [
129,
183,
129,
68,
1351274855
] |
def description(self):
return self._description | oVirt/vdsm | [
129,
183,
129,
68,
1351274855
] |
def description(self, desc):
self._description = self.validate_description(desc) | oVirt/vdsm | [
129,
183,
129,
68,
1351274855
] |
def capacity(self):
return self._capacity | oVirt/vdsm | [
129,
183,
129,
68,
1351274855
] |
def capacity(self, value):
self._capacity = self._validate_integer("capacity", value) | oVirt/vdsm | [
129,
183,
129,
68,
1351274855
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.