text
stringlengths
0
1.05M
meta
dict
from funfactory.settings_base import * # Django Settings ############################################################################## # Defines the views served for root URLs. ROOT_URLCONF = 'careers.urls' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-US' INSTALLED_APPS = list(INSTALLED_APPS) + [ 'careers.base', 'careers.careers', 'careers.university', 'careers.django_workable', 'django.contrib.admin', 'django_jobvite', 'jingo_minify', 'south', ] # Third-party Libary Settings ############################################################################## # Because Jinja2 is the default template loader, add any non-Jinja templated # apps here: JINGO_EXCLUDE_APPS = [ 'admin', 'registration', ] # Accepted locales PROD_LANGUAGES = ('en-US',) # Bundles is a dictionary of two dictionaries, css and js, which list css files # and js files that can be bundled together by the minify app. MINIFY_BUNDLES = { 'css': { 'base': ( 'css/base.css', 'js/libs/video-js/video-js.css', 'js/libs/video-js/video-js-sandstone.css', ), 'print': ( 'css/print.css', ), 'careers': ( 'css/careers.css', ), 'careers-ie8': ( 'css/careers-ie8.css', ), 'university': ( 'css/university.css', ), 'university-ie8': ( 'css/university-ie8.css', ), 'position': ( 'css/position.css', ), 'listings': ( 'css/listings.css', ), 'listings-ie8': ( 'css/listings-ie8.css', ), }, 'js': { 'global': ( 'js/libs/jquery-1.7.1.min.js', 'js/libs/waypoints.min.js', 'js/libs/modernizr.custom.96716.js', ), 'common': ( 'js/libs/video-js/video.dev.js', 'js/libs/requestAnimationFrame.min.js', 'js/base.js', ), 'google_analytics': ( 'js/ga.js', ), 'google_analytics_events': ( 'js/ga_event-tracking.js', ), 'careers': ( 'js/careers.js', ), 'listings': ( 'js/listings.js', ), 'filters': ( 'js/filters.js', ), 'university': ( 'js/university.js', ), 'university_links': ( 'js/university_links.js', ), 'smallscreen': ( 'js/libs/jquery.carouFredSel-6.2.1-packed.js', 'js/libs/jquery.touchSwipe.min.js', ), 'ie8shims': ( 'js/libs/html5shiv-printshiv.js', ), 'university-ie8': ( 'js/libs/video-js/video.dev.js', 'js/university-ie8.js', ), } } # Testing configuration. NOSE_ARGS = ['--logging-clear-handlers', '--logging-filter=-factory,-south'] # Careers-specific Settings ############################################################################## # Goolge Analytics Code GA_ACCOUNT_CODE = 'UA-36116321-8' # URI of Jobvite job feed. JOBVITE_URI = 'https://www.jobvite.com/CompanyJobs/Xml.aspx?c=qpX9Vfwa&cf=e'
{ "repo_name": "chirilo/lumbergh", "path": "careers/settings/base.py", "copies": "1", "size": "3323", "license": "bsd-3-clause", "hash": -5243993784233175000, "line_mean": 24.1742424242, "line_max": 79, "alpha_frac": 0.4814926271, "autogenerated": false, "ratio": 3.490546218487395, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4472038845587395, "avg_score": null, "num_lines": null }
from funfactory.urlresolvers import reverse from mock import patch from nose.tools import eq_ from pyquery import PyQuery as pq from bedrock.mozorg.tests import TestCase @patch('bedrock.newsletter.utils.get_languages_for_newsletters', lambda *x: set(['en', 'fr', 'pt'])) @patch('lib.l10n_utils.template_is_active', lambda *x: True) class TestNewsletterFooter(TestCase): def setUp(self): self.view_name = 'newsletter.subscribe' def test_country_selected(self): """ The correct country for the locale should be initially selected. """ with self.activate('en-US'): resp = self.client.get(reverse(self.view_name)) doc = pq(resp.content) eq_(doc('#id_country option[selected="selected"]').val(), 'us') # no country in locale, no country selected with self.activate('fr'): resp = self.client.get(reverse(self.view_name)) doc = pq(resp.content) eq_(doc('#id_country option[selected="selected"]').val(), '') with self.activate('pt-BR'): resp = self.client.get(reverse(self.view_name)) doc = pq(resp.content) eq_(doc('#id_country option[selected="selected"]').val(), 'br') def test_language_selected(self): """ The correct language for the locale should be initially selected or 'en' if it's not an option. """ with self.activate('fr'): resp = self.client.get(reverse(self.view_name)) doc = pq(resp.content) eq_(doc('#id_lang option[selected="selected"]').val(), 'fr') # with hyphenated regional locale, should have only lang with self.activate('pt-BR'): resp = self.client.get(reverse(self.view_name)) doc = pq(resp.content) eq_(doc('#id_lang option[selected="selected"]').val(), 'pt') # not supported. should default to '' with self.activate('af'): resp = self.client.get(reverse(self.view_name)) doc = pq(resp.content) eq_(doc('#id_lang option[selected="selected"]').val(), '')
{ "repo_name": "amjadm61/bedrock", "path": "bedrock/newsletter/tests/test_footer_form.py", "copies": "4", "size": "2104", "license": "mpl-2.0", "hash": 8344183362847345000, "line_mean": 36.5714285714, "line_max": 75, "alpha_frac": 0.608365019, "autogenerated": false, "ratio": 3.7978339350180503, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 56 }
from funfactory.urlresolvers import reverse from nose.tools import eq_ from pyquery import PyQuery as pq from apps.common.tests import ESTestCase, user class ModelForms(ESTestCase): def test_edit_unavailable_form_field(self): newbie_client = self.pending_client newbie = self.pending # make sure we're at a good starting state assert not newbie.get_profile().is_vouched edit_profile_url = reverse('profile.edit') bad_data = dict(full_name='BadLaRue', bio='Rides the rails', is_vouched=True) bad_edit = newbie_client.post(edit_profile_url, bad_data, follow=True) eq_(bad_edit.status_code, 200) newbie_profile = bad_edit.context['profile'] assert not newbie_profile.is_vouched eq_(newbie_profile.full_name, bad_data['full_name']) def test_username_filled_in(self): """The username field should have a type and value.""" newbie = user(username='sam', email='sam@sam.com') url = reverse('profile.edit') assert self.client.login(email=newbie.email) response = self.client.get(url, follow=True) eq_(200, response.status_code) doc = pq(response.content) field = doc('#id_username')[0] eq_('input', field.tag) assert 'value' in field.attrib eq_('text', field.attrib['type']) eq_(newbie.username, field.attrib['value'])
{ "repo_name": "satdav/mozillians", "path": "apps/phonebook/tests/test_modelform.py", "copies": "1", "size": "1438", "license": "bsd-3-clause", "hash": 7846671071875630000, "line_mean": 33.2380952381, "line_max": 78, "alpha_frac": 0.6349095967, "autogenerated": false, "ratio": 3.568238213399504, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9703147810099504, "avg_score": 0, "num_lines": 42 }
from funfactory.urlresolvers import reverse from nose.tools import eq_ from pyquery import PyQuery as pq from mozillians.common.tests.init import ESTestCase, user class ModelForms(ESTestCase): def test_edit_unavailable_form_field(self): newbie_client = self.pending_client newbie = self.pending # make sure we're at a good starting state assert not newbie.get_profile().is_vouched edit_profile_url = reverse('profile.edit') bad_data = self.data_privacy_fields.copy() bad_data.update(dict(full_name='BadLaRue', country='pl', bio='Rides the rails', is_vouched=True)) bad_edit = newbie_client.post(edit_profile_url, bad_data, follow=True) eq_(bad_edit.status_code, 200) newbie_profile = bad_edit.context['profile'] assert not newbie_profile.is_vouched eq_(newbie_profile.full_name, bad_data['full_name']) def test_username_filled_in(self): """The username field should have a type and value.""" newbie = user(username='sam', email='sam@sam.com') url = reverse('profile.edit') assert self.client.login(email=newbie.email) response = self.client.get(url, follow=True) eq_(200, response.status_code) doc = pq(response.content) field = doc('#id_username')[0] eq_('input', field.tag) assert 'value' in field.attrib eq_('text', field.attrib['type']) eq_(newbie.username, field.attrib['value'])
{ "repo_name": "glogiotatidis/mozillians-new", "path": "mozillians/phonebook/tests/test_modelform.py", "copies": "1", "size": "1526", "license": "bsd-3-clause", "hash": -7357921821971927000, "line_mean": 34.488372093, "line_max": 78, "alpha_frac": 0.6356487549, "autogenerated": false, "ratio": 3.5571095571095572, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.46927583120095573, "avg_score": null, "num_lines": null }
from funfactory.urlresolvers import reverse from nose.tools import eq_ from careers.base.tests import TestCase from careers.careers.tests import PositionFactory as JobvitePositionFactory from careers.django_workable.tests import PositionFactory as WorkablePositionFactory class PositionTests(TestCase): """Tests static pages for careers""" def test_position_case_sensitive_match(self): """ Validate that a position match is returned from a case-sensitive job id and it doesn't raise a multiple records error. """ job_id_1 = 'oflWVfwb' job_id_2 = 'oFlWVfwB' JobvitePositionFactory.create(job_id=job_id_1) JobvitePositionFactory.create(job_id=job_id_2) url = reverse('careers.position', kwargs={'job_id': job_id_1}) response = self.client.get(url, follow=True) eq_(response.status_code, 200) eq_(response.context['position'].job_id, job_id_1) url = reverse('careers.position', kwargs={'job_id': job_id_2}) response = self.client.get(url, follow=True) eq_(response.status_code, 200) eq_(response.context['position'].job_id, job_id_2) class WorkablePositionDetailViewTests(TestCase): def test_base(self): position_1 = WorkablePositionFactory.create(title='bbb') position_2 = WorkablePositionFactory.create(category=position_1.category, title='aaa') status = self.client.get(reverse('careers.workable_position', kwargs={'shortcode': position_1.shortcode})) eq_(status.context['positions'], [position_2, position_1]) eq_(status.context['position'], position_1)
{ "repo_name": "chirilo/lumbergh", "path": "careers/careers/tests/test_views.py", "copies": "2", "size": "1683", "license": "bsd-3-clause", "hash": -7213285663027728000, "line_mean": 42.1538461538, "line_max": 94, "alpha_frac": 0.6720142602, "autogenerated": false, "ratio": 3.7905405405405403, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.001139621652257551, "num_lines": 39 }
from funfactory.urlresolvers import reverse from nose.tools import eq_, ok_ from airmozilla.base.tests.testbase import DjangoTestCase from airmozilla.surveys.models import ( Survey, Question, Answer ) class TestSurvey(DjangoTestCase): def _create_survey(self, name='Test survey', active=True): survey = Survey.objects.create(name=name, active=active) return survey def test_render_questions(self): survey = self._create_survey() url = reverse('surveys:load', args=(survey.id,)) # add a question question = Question.objects.create( survey=survey, question={ 'question': 'Fav color?', 'choices': ['Red', 'Green', 'Blue'] } ) # empty questions are ignored Question.objects.create( survey=survey, question={} ) # render the questions response = self.client.get(url) eq_(response.status_code, 200) ok_('type="submit"' not in response.content) self._login() response = self.client.get(url) eq_(response.status_code, 200) ok_('csrfmiddlewaretoken' in response.content) ok_('type="submit"' in response.content) # three choices eq_(response.content.count('name="%s"' % question.id), 3) ok_('Fav color?' in response.content) ok_('Red' in response.content) ok_('Green' in response.content) ok_('Blue' in response.content) def test_submit_response_to_questions(self): survey = self._create_survey() url = reverse('surveys:load', args=(survey.id,)) user = self._login() # add a question question = Question.objects.create( survey=survey, question={ 'question': 'Fav color?', 'choices': ['Red', 'Green', 'Blue'] } ) Question.objects.create( survey=survey, question={ 'question': 'Gender?', 'choices': ['Male', 'Female', 'Mixed'] } ) response = self.client.post(url, { str(question.id): "Green", # note that we don't submit an answer to the second question }) eq_(response.status_code, 302) self.assertRedirects(response, url) answers = Answer.objects.filter( question=question, user=user ) eq_(answers.count(), 1) def test_submit_multiple_times(self): survey = self._create_survey() url = reverse('surveys:load', args=(survey.id,)) user = self._login() # add a question question = Question.objects.create( survey=survey, question={ 'question': 'Fav color?', 'choices': ['Red', 'Green', 'Blue'] } ) response = self.client.post(url, { str(question.id): "Green", # note that we don't submit an answer to the second question }) eq_(response.status_code, 302) self.assertRedirects(response, url) answers = Answer.objects.filter( question=question, user=user ) eq_(answers.count(), 1) answer, = answers eq_(answer.answer['answer'], 'Green') # so far so good # now let's try to submit a different answer response = self.client.post(url, { str(question.id): "Red", # note that we don't submit an answer to the second question }) eq_(response.status_code, 302) self.assertRedirects(response, url) answers = Answer.objects.filter( question=question, user=user ) eq_(answers.count(), 1) answer, = answers eq_(answer.answer['answer'], 'Red') def test_reset_submitted_response_to_questions(self): survey = self._create_survey() url = reverse('surveys:load', args=(survey.id,)) user = self._login() # add a question question = Question.objects.create( survey=survey, question={ 'question': 'Fav color?', 'choices': ['Red', 'Green', 'Blue'] } ) response = self.client.post(url, { str(question.id): "Green", # note that we don't submit an answer to the second question }) eq_(response.status_code, 302) self.assertRedirects(response, url) answers = Answer.objects.filter( question=question, user=user ) eq_(answers.count(), 1) response = self.client.post(url, {'resetmine': True}) eq_(response.status_code, 302) answers = Answer.objects.filter( question=question, user=user ) eq_(answers.count(), 0)
{ "repo_name": "anu7495/airmozilla", "path": "airmozilla/surveys/tests/test_views.py", "copies": "3", "size": "4969", "license": "bsd-3-clause", "hash": 1286030123037034200, "line_mean": 30.4493670886, "line_max": 72, "alpha_frac": 0.5347152345, "autogenerated": false, "ratio": 4.324630113141862, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 158 }
from funk.error import FunkyError from funk.util import function_call_str from funk.util import function_call_str_multiple_lines from funk.matchers import to_matcher from .pycompat import iteritems from .util import map_values class InfiniteCallCount(object): def none_remaining(self): return False def decrement(self): pass def is_satisfied(self): return True class IntegerCallCount(object): def __init__(self, count): self._count = count def none_remaining(self): return self._count <= 0 def decrement(self): self._count -= 1 def is_satisfied(self): return self.none_remaining() class Call(object): _arguments_set = False def __init__(self, name, call_count=InfiniteCallCount()): self._name = name self._call_count = call_count self._action = lambda: None self._sequences = [] def has_name(self, name): return self._name == name def accepts(self, args, kwargs, mismatch_description): if self._call_count.none_remaining(): mismatch_description.append("%s [expectation has already been satisfied]" % str(self)) return False if not self._arguments_set: return True def describe_arg(matcher, result): if result.is_match: explanation = "matched" else: explanation = result.explanation return "%s [%s]" % (matcher.describe(), explanation) def describe_kwargs(kwarg_matches): return dict( (key, describe_arg(self._allowed_kwargs[key], result)) for key, result in kwarg_matches ) def describe_mismatch(arg_matches, kwarg_matches): args_desc = map(describe_arg, self._allowed_args, arg_matches) kwargs_desc = describe_kwargs(kwarg_matches) return function_call_str_multiple_lines(self._name, args_desc, kwargs_desc) if len(self._allowed_args) != len(args): mismatch_description.append("%s [wrong number of positional arguments]" % str(self)) return False missing_kwargs = set(self._allowed_kwargs.keys()) - set(kwargs.keys()) if len(missing_kwargs) > 0: mismatch_description.append("%s [missing keyword arguments: %s]" % (str(self), ", ".join(sorted(missing_kwargs)))) return False extra_kwargs = set(kwargs.keys()) - set(self._allowed_kwargs.keys()) if len(extra_kwargs) > 0: mismatch_description.append("%s [unexpected keyword arguments: %s]" % (str(self), ", ".join(extra_kwargs))) return False arg_matches = [ matcher.match(arg) for matcher, arg in zip(self._allowed_args, args) ] kwarg_matches = [ (key, matcher.match(kwargs[key])) for key, matcher in iteritems(self._allowed_kwargs) ] matches = arg_matches + [match for key, match in kwarg_matches] if not all(match.is_match for match in matches): mismatch_description.append(describe_mismatch(arg_matches, kwarg_matches)) return False return True def __call__(self, *args, **kwargs): if self._call_count.none_remaining(): raise FunkyError("Cannot call any more times") if not self.accepts(args, kwargs, []): raise FunkyError("Called with wrong arguments") self._call_count.decrement() for sequence in self._sequences: sequence.add_actual_call(self) return self._action() def with_args(self, *args, **kwargs): self._arguments_set = True self._allowed_args = tuple(map(to_matcher, args)) self._allowed_kwargs = dict([(key, to_matcher(kwargs[key])) for key in kwargs]) return self def returns(self, return_value): self._action = lambda: return_value return self def raises(self, error): def action(): raise error self._action = action return self def in_sequence(self, sequence): self._sequences.append(sequence) sequence.add_expected_call(self) return self def is_satisfied(self): return self._call_count.is_satisfied() def __str__(self): if self._arguments_set: return function_call_str( self._name, [arg.describe() for arg in self._allowed_args], map_values(lambda arg: arg.describe(), self._allowed_kwargs), ) return self._name
{ "repo_name": "mwilliamson/funk", "path": "funk/call.py", "copies": "1", "size": "4799", "license": "bsd-2-clause", "hash": -1712673755765671400, "line_mean": 33.2785714286, "line_max": 126, "alpha_frac": 0.5726192957, "autogenerated": false, "ratio": 4.350861287398005, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5423480583098005, "avg_score": null, "num_lines": null }
from funktional.layer import Layer, Dense, GRUH0, StackedGRUH0, \ Embedding, OneHot, clipped_rectify,\ last, softmax3d, params import funktional.context as context from funktional.layer import params import imaginet.task as task from funktional.util import autoassign import funktional.util as util import theano.tensor as T import theano import zipfile import numpy import StringIO import json import cPickle as pickle from theano.tensor.shared_randomstreams import RandomStreams import imaginet.defn.visual as visual from collections import Counter import random import sys class Network: def __init__(self, size_vocab, size_embed, size, size_target): autoassign(locals()) self.Shared = Embedding(self.size_vocab, self.size_embed) self.EncodeV = StackedGRUH0(size_embed, size, depth=1, activation=clipped_rectify) self.EncodeLM = StackedGRUH0(size_embed, size, depth=1, activation=clipped_rectify) self.ToTxt = Dense(size, size_vocab) self.ToImg = Dense(size, size_target) def params(self): return params(self.Shared, self.EncodeV, self.EncodeLM, self.ToTxt, self.ToImg) class Visual(task.Task): def __init__(self, network, config): autoassign(locals()) self.updater = util.Adam(max_norm=config['max_norm'], lr=config['lr']) self.inputs = [T.imatrix()] self.target = T.fmatrix() def params(self): return params(self.network.Shared, self.network.EncodeV, self.network.ToImg) def __call__(self, input): return self.network.ToImg(last(self.network.EncodeV(self.network.Shared(input)))) def cost(self, target, prediction): return util.CosineDistance(target, prediction) def args(self, item): return (item['input'], item['target_v']) def _make_representation(self): with context.context(training=False): rep = self.network.EncodeV(self.network.Shared(*self.inputs)) return theano.function(self.inputs, rep) def _make_pile(self): with context.context(training=False): rep = self.network.EncodeV.intermediate(self.network.Shared(*self.inputs)) return theano.function(self.inputs, rep) class LM(task.Task): def __init__(self, network, config): autoassign(locals()) self.updater = util.Adam(max_norm=config['max_norm'], lr=config['lr']) self.inputs = [T.imatrix()] self.target = T.imatrix() def params(self): return params(self.network.Shared, self.network.EncodeLM, self.network.ToTxt) def __call__(self, prev): return softmax3d(self.network.ToTxt(self.network.EncodeLM(self.network.Shared(prev)))) def cost(self, target, prediction): oh = OneHot(size_in=self.network.size_vocab) return util.CrossEntropy(oh(target), prediction) def args(self, item): """Choose elements of item to be passed to .loss_test and .train functions.""" return (item['target_prev_t'], item['target_t']) def _make_pile(self): with context.context(training=False): rep = self.network.EncodeLM.intermediate(self.network.Shared(*self.inputs)) return theano.function(self.inputs, rep) class LMVisual(task.Bundle): def __init__(self, data, config, weights=None): self.config = config self.data = data self.batcher = data['batcher'] self.scaler = data['scaler'] self.config['size_vocab'] = self.data['batcher'].mapper.size() self.network = Network(config['size_vocab'], config['size_embed'], config['size'], config['size_target']) self.visual = Visual(self.network, config) self.lm = LM(self.network, config) if weights is not None: assert len(self.network.params())==len(weights) for param, weight in zip(self.params(), weights): param.set_value(weight) self.visual.compile() self.visual.representation = self.visual._make_representation() self.visual.pile = self.visual._make_pile() self.lm.compile() self.lm.pile = self.lm._make_pile() def params(self): return self.network.params() def get_config(self): return self.config def get_data(self): return self.data def load(path): """Load data and reconstruct model.""" with zipfile.ZipFile(path,'r') as zf: buf = StringIO.StringIO(zf.read('weights.npy')) weights = numpy.load(buf) config = json.loads(zf.read('config.json')) data = pickle.loads(zf.read('data.pkl')) return LMVisual(data, config, weights=weights) def trainer(model, data, epochs, validate_period, model_path, prob_lm=0.1, runid=''): def valid_loss(): result = dict(lm=[], visual=[]) for item in data.iter_valid_batches(): result['lm'].append(model.lm.loss_test(*model.lm.args(item))) result['visual'].append(model.visual.loss_test(*model.visual.args(item))) return result costs = Counter(dict(cost_v=0.0, N_v=0.0, cost_t=0.0, N_t=0.0)) print "LM: {} parameters".format(count_params(model.lm.params())) print "Vi: {} parameters".format(count_params(model.visual.params())) for epoch in range(1,epochs+1): for _j, item in enumerate(data.iter_train_batches()): j = _j +1 if random.random() <= prob_lm: cost_t = model.lm.train(*model.lm.args(item)) costs += Counter(dict(cost_t=cost_t, N_t=1)) else: cost_v = model.visual.train(*model.visual.args(item)) costs += Counter(dict(cost_v=cost_v, N_v=1)) print epoch, j, j*data.batch_size, "train", \ numpy.divide(costs['cost_v'], costs['N_v']),\ numpy.divide(costs['cost_t'], costs['N_t']) if j % validate_period == 0: result = valid_loss() print epoch, j, 0, "valid", \ numpy.mean(result['visual']),\ numpy.mean(result['lm']) sys.stdout.flush() model.save(path='model.r{}.e{}.zip'.format(runid, epoch)) model.save(path='model.zip') def count_params(params): def product(xs): return reduce(lambda z, x: z*x, xs, 1) return sum((product(param.get_value().shape) for param in params)) def predict_img(model, sents, batch_size=128): """Project sents to the visual space using model. For each sentence returns the predicted vector of visual features. """ inputs = list(model.batcher.mapper.transform(sents)) return numpy.vstack([ model.visual.predict(model.batcher.batch_inp(batch)) for batch in util.grouper(inputs, batch_size) ]) encode_sentences = predict_img
{ "repo_name": "gchrupala/reimaginet", "path": "imaginet/defn/lm_visual_vanilla.py", "copies": "1", "size": "6890", "license": "mit", "hash": 1452277551100720400, "line_mean": 37.2777777778, "line_max": 94, "alpha_frac": 0.6229317852, "autogenerated": false, "ratio": 3.6339662447257384, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4756898029925738, "avg_score": null, "num_lines": null }
from funktional.layer import Layer, Dense, StackedGRU, StackedGRUH0, \ Embedding, OneHot, clipped_rectify, clipped_elu, CosineDistance,\ last, softmax3d, params import funktional.context as context from funktional.layer import params import imaginet.task as task from funktional.util import autoassign import funktional.util as util from funktional.util import steeper_sigmoid, sigmoid, orthogonal, xavier, tanh import theano.tensor as T import theano import zipfile import numpy import StringIO import json import cPickle as pickle from theano.tensor.extra_ops import fill_diagonal class Encoder(Layer): def __init__(self, size_vocab, size_embed, size, depth, residual=False, activation=clipped_rectify): autoassign(locals()) self.Embed = OneHot(self.size_vocab) self.GRU = StackedGRUH0(self.size_vocab, self.size, self.depth, activation=self.activation, residual=self.residual) def params(self): return params(self.Embed, self.GRU) def __call__(self, input): return self.GRU(self.Embed(input)) class Visual(task.Task): def __init__(self, config): autoassign(locals()) self.margin_size = config.get('margin_size', 0.2) self.updater = util.Adam(max_norm=config['max_norm'], lr=config['lr']) self.Encode = Encoder(config['size_vocab'], config['size_embed'], config['size'], config['depth'], activation=eval(config.get('activation', 'clipped_rectify')), residual=config.get('residual',False)) self.ImgEncoder = Dense(config['size_target'], config['size'], init=eval(config.get('init_img', 'orthogonal'))) self.inputs = [T.imatrix()] self.target = T.fmatrix() def compile(self): task.Task.compile(self) self.encode_images = self._make_encode_images() def params(self): return params(self.Encode, self.ImgEncoder) def __call__(self, input): return util.l2norm(last(self.Encode(input))) def cost(self, i, s_encoded): if self.config['contrastive']: i_encoded = util.l2norm(self.ImgEncoder(i)) return self.contrastive(i_encoded, s_encoded, margin=self.margin_size) else: raise NotImplementedError def contrastive(self, i, s, margin=0.2): # i: (fixed) image embedding, # s: sentence embedding errors = - util.cosine_matrix(i, s) diagonal = errors.diagonal() # compare every diagonal score to scores in its column (all contrastive images for each sentence) cost_s = T.maximum(0, margin - errors + diagonal) # all contrastive sentences for each image cost_i = T.maximum(0, margin - errors + diagonal.reshape((-1, 1))) cost_tot = cost_s + cost_i # clear diagonals cost_tot = fill_diagonal(cost_tot, 0) return cost_tot.mean() def Margin(self, U, V, dist=CosineDistance, d=1.0): V_ = (V[self.srng.permutation(n=T.shape(V)[0], size=(1,)),]).reshape(T.shape(V)) # A bit silly making it nondet return T.maximum(0.0, dist(U, V) - dist(U, V_) + d) def args(self, item): return (item['input'], item['target_v']) def _make_representation(self): with context.context(training=False): rep = self.Encode(*self.inputs) return theano.function(self.inputs, rep) def _make_pile(self): with context.context(training=False): rep = self.Encode.GRU.intermediate(self.Encode.Embed(*self.inputs)) return theano.function(self.inputs, rep) def _make_encode_images(self): images = T.fmatrix() with context.context(training=False): rep = util.l2norm(self.ImgEncoder(images)) return theano.function([images], rep) def encode_sentences(model, sents, batch_size=128): """Project sents to the joint space using model. For each sentence returns a vector. """ inputs = list(model.batcher.mapper.transform(sents)) return numpy.vstack([ model.task.predict(model.batcher.batch_inp(batch)) for batch in util.grouper(inputs, batch_size) ]) def encode_images(model, imgs, batch_size=128): """Project imgs to the joint space using model. """ return numpy.vstack([ model.task.encode_images(batch) for batch in util.grouper(imgs, batch_size) ]) def predict_img(model, sents, batch_size=128): """Project sents to the visual space using model. For each sentence returns the predicted vector of visual features. """ inputs = list(model.batcher.mapper.transform(sents)) return numpy.vstack([ model.task.predict(model.batcher.batch_inp(batch)) for batch in util.grouper(inputs, batch_size) ]) def embeddings(model): return model.task.Encode.Embed.params()[0].get_value() def symbols(model): return model.batcher.mapper.ids.decoder
{ "repo_name": "gchrupala/reimaginet", "path": "imaginet/defn/visual1h2.py", "copies": "1", "size": "5281", "license": "mit", "hash": -1008856619890122800, "line_mean": 37.5474452555, "line_max": 120, "alpha_frac": 0.609354289, "autogenerated": false, "ratio": 3.832365747460087, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.49417200364600866, "avg_score": null, "num_lines": null }
from funktional.layer import Layer, Dense, StackedGRU, StackedGRUH0, \ Embedding, OneHot, clipped_rectify, CrossEntropy, \ last, softmax3d, params import funktional.context as context from funktional.layer import params import imaginet.task from funktional.util import autoassign import funktional.util as util import theano.tensor as T import theano import zipfile import numpy import StringIO import json import cPickle as pickle class Decoder(Layer): def __init__(self, size_vocab, size_embed, size, depth, residual=False, activation=clipped_rectify): autoassign(locals()) self.Embed = OneHot(self.size_vocab) self.GRU = StackedGRUH0(self.size_vocab, self.size, self.depth, activation=self.activation, residual=self.residual) def params(self): return params(self.Embed, self.GRU) def __call__(self, out_prev): return self.GRU(self.Embed(out_prev)) class LM(imaginet.task.Task): def __init__(self, config): autoassign(locals()) self.updater = util.Adam(max_norm=config['max_norm'], lr=config['lr']) self.Decode = Decoder(config['size_vocab'], config['size_embed'], config['size'], config['depth'], activation=eval(config.get('activation','clipped_rectify')), residual=config.get('residual', False)) self.ToTxt = Dense(config['size'], config['size_vocab']) self.inputs = [T.imatrix()] self.target = T.imatrix() def params(self): return params(self.Decode, self.ToTxt) def __call__(self, out_prev): return softmax3d(self.ToTxt(self.Decode(out_prev))) def cost(self, target, prediction): oh = OneHot(size_in=self.config['size_vocab']) return CrossEntropy(oh(target), prediction) def args(self, item): """Choose elements of item to be passed to .loss_test and .train functions.""" inp, target_v, out_prev, target_t = item return (out_prev, target_t) def _make_representation(self): with context.context(training=False): rep = self.Decode(*self.inputs) return theano.function(self.inputs, rep) def _make_pile(self): with context.context(training=False): rep = self.Decode.GRU.intermediate(self.Decode.Embed(*self.inputs)) return theano.function(self.inputs, rep) def symbols(model): return model.batcher.mapper.ids.decoder
{ "repo_name": "gchrupala/reimaginet", "path": "imaginet/defn/lm1h.py", "copies": "1", "size": "2629", "license": "mit", "hash": 8305575157624883000, "line_mean": 36.5571428571, "line_max": 90, "alpha_frac": 0.6139216432, "autogenerated": false, "ratio": 3.877581120943953, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9833876809255786, "avg_score": 0.031525190977633344, "num_lines": 70 }
from funktional.layer import Layer, Dense, Sum, \ Embedding, OneHot, CosineDistance,\ last, softmax3d, params import funktional.context as context from funktional.layer import params import imaginet.task as task from funktional.util import autoassign import funktional.util as util import theano.tensor as T import theano import zipfile import numpy import StringIO import json import cPickle as pickle from theano.tensor.shared_randomstreams import RandomStreams class Encoder(Layer): def __init__(self, size_vocab, size_embed): autoassign(locals()) self.Embed = Embedding(self.size_vocab, self.size_embed) self.Sum = Sum(self.size_embed) def params(self): return params(self.Embed, self.Sum) def __call__(self, input): return self.Sum(self.Embed(input)) class VectorSum(task.Task): def __init__(self, config): autoassign(locals()) self.updater = util.Adam(max_norm=config['max_norm'], lr=config['lr']) self.Encode = Encoder(config['size_vocab'], config['size_embed']) self.ToImg = Dense(config['size_embed'], config['size_target']) self.inputs = [T.imatrix()] self.target = T.fmatrix() def params(self): return params(self.Encode, self.ToImg) def __call__(self, input): # Using last because Sum returns the whole seq of partial sums # to be compatible with recurrent layers. return self.ToImg(last(self.Encode(input))) def cost(self, target, prediction): return CosineDistance(target, prediction) def args(self, item): return (item['input'], item['target_v']) def _make_representation(self): with context.context(training=False): rep = self.Encode(*self.inputs) return theano.function(self.inputs, rep) def _make_pile(self): with context.context(training=False): # no layers, insert dimension for comppatibility with stacked rep = self.Encode(*self.inputs).dimshuffle([0, 1, 'x', 2]) return theano.function(self.inputs, rep) def predict_img(model, sents, batch_size=128): """Project sents to the visual space using model. For each sentence returns the predicted vector of visual features. """ inputs = list(model.batcher.mapper.transform(sents)) return numpy.vstack([ model.task.predict(model.batcher.batch_inp(batch)) for batch in util.grouper(inputs, batch_size) ]) encode_sentences = predict_img def embeddings(model): return model.task.Encode.Embed.params()[0].get_value() def symbols(model): return model.batcher.mapper.ids.decoder
{ "repo_name": "gchrupala/reimaginet", "path": "imaginet/defn/vectorsum.py", "copies": "1", "size": "2751", "license": "mit", "hash": 4434030743756264400, "line_mean": 30.988372093, "line_max": 78, "alpha_frac": 0.6513994911, "autogenerated": false, "ratio": 3.8801128349788434, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.004687102774961032, "num_lines": 86 }
from funktional.util import grouper, autoassign import imaginet.task import imaginet.defn.visual as visual import imaginet.defn.lm as lm import numpy import imaginet.data_provider as dp from sklearn.preprocessing import StandardScaler from imaginet.simple_data import SimpleData, characters, phonemes import sys import os import os.path import cPickle as pickle import gzip from evaluate import ranking import random from collections import Counter from funktional.util import clipped_rectify def train(dataset='coco', datapath='.', model_path='.', task=visual.Visual, residual=False, contrastive=False, margin_size=1.0, tokenize=phonemes, max_norm=None, min_df=10, scale=True, epochs=1, batch_size=64, lr=0.0002, shuffle=True, size_embed=128, size_hidden=512, size_vocab=None, val_vocab=False, init_img='xavier', depth=2, activation='clipped_rectify', validate_period=100, limit=None, seed=None): # sys.setrecursionlimit(50000) # needed for pickling models if seed is not None: random.seed(seed) numpy.random.seed(seed) prov = dp.getDataProvider(dataset, root=datapath) data = SimpleData(prov, tokenize=tokenize, min_df=min_df, scale=scale, val_vocab=val_vocab, batch_size=batch_size, shuffle=shuffle, limit=limit) config = dict(size_embed=size_embed, size=size_hidden, depth=depth, size_target=4096, max_norm=max_norm, lr=lr, size_vocab=size_vocab, residual=residual, activation=activation, contrastive=contrastive, margin_size=margin_size, init_img=init_img) model = imaginet.task.GenericBundle(dict(scaler=data.scaler, batcher=data.batcher), config, task) trainer(model, data, epochs, validate_period, model_path) def trainer(model, data, epochs, validate_period, model_path): def valid_loss(): result = [] for item in data.iter_valid_batches(): result.append(model.task.loss_test(*model.task.args(item))) return result for epoch in range(1, epochs + 1): print len(model.task.params()) costs = Counter() for _j, item in enumerate(data.iter_train_batches()): j = _j + 1 cost = model.task.train(*model.task.args(item)) costs += Counter({'cost':cost, 'N':1}) print epoch, j, j*data.batch_size, "train", "".join([str(costs['cost']/costs['N'])]) if j % validate_period == 0: print epoch, j, 0, "valid", "".join([str(numpy.mean(valid_loss()))]) sys.stdout.flush() model.save(path='model.{0}.zip'.format(epoch)) model.save(path='model.zip') def evaluate(dataset='coco', datapath='.', model_path='model.zip', batch_size=128, task=visual.Visual, tokenize=phonemes, split='val' ): model = imaginet.task.load(path=model_path) task = model.task scaler = model.scaler batcher = model.batcher mapper = batcher.mapper prov = dp.getDataProvider(dataset, root=datapath) sents_tok = [ tokenize(sent) for sent in prov.iterSentences(split=split) ] predictions = visual.predict_img(model, sents_tok, batch_size=batch_size) sents = list(prov.iterSentences(split=split)) images = list(prov.iterImages(split=split)) img_fs = list(scaler.transform([ image['feat'] for image in images ])) correct_img = numpy.array([ [ sents[i]['imgid']==images[j]['imgid'] for j in range(len(images)) ] for i in range(len(sents)) ] ) return ranking(img_fs, predictions, correct_img, ns=(1,5,10), exclude_self=False)
{ "repo_name": "gchrupala/reimaginet", "path": "imaginet/commands.py", "copies": "1", "size": "4020", "license": "mit", "hash": 6413951863716711000, "line_mean": 38.801980198, "line_max": 103, "alpha_frac": 0.5947761194, "autogenerated": false, "ratio": 3.7781954887218046, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.48729716081218044, "avg_score": null, "num_lines": null }
from funkybomb import Template from pygments.formatters import HtmlFormatter from templates.util import row_cols tmpl = Template() tmpl + '<!DOCTYPE html>' html = tmpl.html # head head = html.head head.meta(charset='utf-8') styles = ( "https://maxcdn.bootstrapcdn.com/bootstrap/" "4.0.0-alpha.6/css/bootstrap.min.css", ) for style in styles: head.link(rel="stylesheet", href=style, crossorigin="anonymous") head.style + HtmlFormatter(style='colorful').get_style_defs('.highlight') head.style + ''' .nav-links ul { margin-left: 0;} .nav-links ul ul { margin-left: 1.2rem;} .highlight { padding: 1.4rem; background-color: #f2f2f2; } .highlight pre { margin: 0 } ''' scripts = ( "https://code.jquery.com/jquery-3.1.1.slim.min.js", "https://cdnjs.cloudflare.com/ajax/libs/tether/1.4.0/js/tether.min.js", "https://maxcdn.bootstrapcdn.com/bootstrap/" "4.0.0-alpha.6/js/bootstrap.min.js", ) for script in scripts: head.script(src=script, crossorigin="anonymous") # body - grid layout hb = html.body container = hb.div(_class='container') main = row_cols(container) main + Template('base main') # components - footer footer = row_cols(main) div = footer.div(id='footer') div + Template('base footer')
{ "repo_name": "glennyonemitsu/funkybomb", "path": "website/templates/base.py", "copies": "1", "size": "1238", "license": "apache-2.0", "hash": 364110395632507970, "line_mean": 22.8076923077, "line_max": 75, "alpha_frac": 0.6938610662, "autogenerated": false, "ratio": 2.9129411764705884, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9106802242670589, "avg_score": 0, "num_lines": 52 }
from funkybomb import Template, Text from application.util import route from templates import documentation from templates.util import ( header, p, show_html, show_python, show_text, template ) @route('/docs/basics') @template(documentation.tmpl) async def docs_basics_home(req): t = Template() t + p('Coming soon.') return { 'content': t, 'headline': Text('Basics') } @route('/docs/basics/installation') @template(documentation.tmpl) async def docs_basics_installation(req): t = Template() t + p('Funky Bomb is available via PyPi and install is simple') t + show_text('pip install funky-bomb') return { 'content': t, 'headline': Text('Installation') } @route('/docs/basics/syntax') @template(documentation.tmpl) async def docs_basics_syntax(req): t = Template() t + header('Creating the HMTL tree') t + p( 'Funky Bomb aims to recreate the HTML tree with Python syntax. ' 'Starting with an empty template.' ) t + show_python( ''' from funkybomb import Template tmpl = Template() ''' ) t + p( 'Currently there is no tag inside this template. To start adding ' 'HTML tags, just use attributes like you would in Python. Once you ' 'create an attribute, the tag is part of the HMTL tree. We can see the ' 'output of the HTML using the render function.' ) t + show_python( ''' from funkybomb import render, Template tmpl = Template() tmpl.p render(tmpl) # output: <p></p> ''' ) t + p('To add text to a tag, we "add a string" to it.') t + show_python( ''' from funkybomb import render, Template tmpl = Template() tmpl.p # continuing from previous code sample tmpl.p + 'Hello, World!' render(tmpl) # output: <p></p><p>Hello, World!</p> ''' ) t + header('Controlling where the HTML tags are made') t + p( 'You might notice is there is a second <p> tag. This is because funky ' 'bomb always appends tags with new attribute access . But if ' 'we want to manipulate tags at all levels we can use variables to move ' 'around in the tag tree. This is possible because creating tags with ' 'attributes also returns the tag node object.' ) t + show_python( ''' from funkybomb import render, Template tmpl = Template() ul = tmpl.ul for line in ('foo', 'bar', 'baz'): ul.li + line render(tmpl) ''' ) t + p('The HTML output') t + show_html( ''' <ul> <li>foo</li> <li>bar</li> <li>baz</li> </ul> ''' ) t + p( 'Note even though we call render on the tmpl node, it held ' 'references to the ul node. Adding tags to ul adds to the tree in tmpl.' ) t + header('Tag attributes', 3) t + p( 'To add HTML attributes to your tag just "call" your attribute on ' 'creation time with the key/value pairs. To create "class" attributes ' 'use the special "_class" key. This is to get around "class" being a ' 'Python keyword.' ) t + show_python( ''' from funkybomb import render, Template tmpl = Template() tmpl.p(_class='greeting', foo='bar') + 'Hello, World!' render(tmpl) # output: <p class='greeting' foo='bar'>Hello, World!</p> ''' ) t + header('Explicit Text Nodes', 3) t + p( 'Using the "+ \'string value\'" notation is a convenient way to add ' 'text to your HTML. Under the hood this creates a Text node similar to ' 'the HTML DOM. You can create these explicitly as well.' ) t + show_python( ''' from funkybomb import Template, Text tmpl = Template() tmpl.p + 'Hello, World!' tmpl.p + Text('Hello, World!') # functionally the same ''' ) t + header('Explicit Tag Nodes', 3) t + p( 'Tag creation using the Python dot attribute name syntax also is a ' 'convenience shortcut that creates Tag objects. Just like the Text ' 'nodes you can create these explicitly along with tag attributes.' ) t + show_python( ''' from funkybomb import Tag, Template greeting = Tag('p', _class='greeting') + 'Hello, World!' tmpl = Template() tmpl + greeting ''' ) t + p('Using the Tag objects allows you to embed tags inside the tree.') t + show_python( '''from funkybomb import Tag\n''' '''greeting = Tag('p', _class='greeting') + ''' ''''Hello, ' + (Tag('em') + 'World!')''' ) t + p( 'This might be a bit confusing, so let\'s unravel this step by step.' ) t + show_python( ''' from funkybomb import Tag greeting = Tag('p', _class='greeting') greeting + 'Hello, ' subject = Tag('em') + 'World!' # this is encapsulated inside the () greeting + subject ''' ) return { 'content': t, 'headline': Text('Syntax') } @route('/docs/basics/templating') @template(documentation.tmpl) async def docs_basics_templating(req): t = Template() t + p( 'Template nodes are important building blocks to make websites quickly ' 'and efficiently. Functionally you can think of them as any other HTML ' 'node but that doesn\'t have any output during rendering.' ) t + show_python( ''' from funkybomb import render, Template tmpl = Template() tmpl + Template() # ... tmpl + Template() render(tmpl) # outputs nothing ''' ) t + p( 'Since they have no output side effects, one basic way to use them is ' 'as a node tree container.' ) t + show_python( ''' from funkybomb import render, Template tmpl = Template() container = tmpl.div greeting = Template() greeting.p + 'Hello, World!' question = Template() question.p + 'How are you feeling today?' answer = Template() answer.p + 'I am feeling alright' greeting + question greeting + answer container + greeting render(tmpl) ''' ) t + p('The HTML output') t + show_html( ''' <div> <p>Hello, World!</p> <p>How are you feeling today?</p> <p>I am feeling alright</p> </div> ''' ) t + header('Template blocks', 2) t + p( 'All good templating systems need a way to inject data into specific ' 'sections of the output. We can assign names to the Template node ' 'objects on creation time. These template nodes\' children are treated ' 'as the default content.' ) t + show_python( ''' from funkybomb import render, Template, Tag tmpl = Template() default = Template('replace me') default.p + 'This is the default content' tmpl + default render(tmpl) # <p>This is the default content</p> context = {'replace me': Tag('h1') + 'Hello'} render(tmpl, context=context) # <h1>Hello</h1> ''' ) t + p( 'Named Template nodes can be nested and replaced with the single ' 'context object.' ) t + show_python( ''' from funkybomb import render, Template, Tag tmpl = Template() default = Template('replace me') default.p + 'This is the default content' inner_tmpl = Template('inner template') inner_tmpl.p + 'Another template to replace' default + inner_tmpl # nesting template tmpl + default context = {'inner template': Tag('h1') + 'Hello'} render(tmpl, context=context) ''' ) t + show_html( ''' <p>This is the default content</p> <h1>Hello</h1> ''' ) t + header('Freezing templates', 2) t + p( 'Using templates can be a bit dangerous because in Python, any changes ' 'to a node object will remain intact. This means templates can change ' 'over the running lifetime of a webserver.', 'Funky Bomb allows you to "freeze" your base templates so you can ' 'reuse them with different render contexts without side effects.' ) t + show_python( ''' from funkybomb import freeze, Template tmpl = Template() tmpl.p + 'Hello, World!' freeze(tmpl) tmpl.p + 'This will not be allowed' ''' ) t + header('Replacing templates', 2) t + p( 'Sometimes freezing might not be enough. In some cases base templates ' 'are just building blocks for other templates where freezing and using ' 'contexts are not enough. To permanently replace templates use the ' 'dictionary style item assignment syntax.', 'A common pattern for reuse is to do a deepcopy of the templates, then ' 'replacing anything inside. More details on this is available in the ' 'common patterns section.' ) t + show_python( ''' from copy import deepcopy from funkybomb import render, Template, Text base_tmpl = Template() base_tmpl.h1 + Template('base content') tmpl = deepcopy(base_tmpl) tmpl['base content'] = Text('headline') render(tmpl) # output: <h1>headline</h1> tmpl['base content'] = Text('another headline') render(tmpl) # output (notice no change): <h1>headline</h1> ''' ) return { 'content': t, 'headline': Text('Templating') } @route('/docs/basics/utilities') @template(documentation.tmpl) async def docs_basics_utilities(req): t = Template() t + p('Coming soon.') return { 'content': t, 'headline': Text('Utilities') }
{ "repo_name": "glennyonemitsu/funkybomb", "path": "website/handlers/docs/basics.py", "copies": "1", "size": "10136", "license": "apache-2.0", "hash": 3417877400902194700, "line_mean": 28.1264367816, "line_max": 80, "alpha_frac": 0.5634372534, "autogenerated": false, "ratio": 3.98114689709348, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.00035476089115935857, "num_lines": 348 }
from funkybomb import Template, Text from application.util import route, url from templates import documentation from templates.util import row_cols, show_python, show_html, template @route('/') @template(documentation.tmpl) async def home(req): followups = [] followups.append({ 'header': 'It is easy to use', 'content': ( 'Funky Bomb has a small set of rules to build out DOM-like ' 'structures and reusable templates with native Python.' ), 'links': ( ('Learn more:',), ('Syntax', '/docs/basics/syntax'), ('Templating', '/docs/basics/templating'), ('Utilities', '/docs/basics/utilities'), ) }) followups.append({ 'header': 'Use Python syntax and patterns', 'content': ( 'Use normal programming patterns to build abstractions and ' 'construct more advanced HTML with the power of Python.' ), 'links': ( ('Common patterns:',), ('Abstraction', '/docs/patterns/abstraction'), ('Composition', '/docs/patterns/composition'), ('Reusability', '/docs/patterns/reusability'), ) }) followups.append({ 'header': 'Easy integration', 'content': ( 'Any web framework that uses strings for serving HTML can have ' 'Funky Bomb integrated, since Funky Bomb outputs HTML strings ' 'itself.' ), 'links': ( ('Examples:',), ('Flask', '/docs/integrations/flask'), ) }) example_funky = show_python(''' from funkybomb import render, Template from models import user_model tmpl = Template() table = tmpl.table for user in user_model.get_all(): row = table.tr row.td + user.first_name row.td + user.last_name print(render(tmpl)) ''') example_html = show_html(''' <table> <tr> <td>John</td> <td>Doe</td> </tr> <tr> <td>Jane</td> <td>Doe</td> </tr> </table> ''') content = Template() pitch_python, pitch_html = row_cols(content, 6, 6) pitch_python.p(_class='h5') + 'Use Native Python' pitch_python + example_funky pitch_html.p(_class='h5') + 'Create HTML Pages' pitch_html + example_html fu = row_cols(content) fu.p(_class='lead mt-5 mb-5') + \ 'That is it! No other HTML template or code involved.' for item in followups: fu.p(_class='h4 mt-5') + item['header'] fu.p + item['content'] fu_links = fu.p for i, link in enumerate(item['links']): if i == 0: fu_links + (link[0] + ' ') else: fu_links.a(href=url(link[1])) + link[0] if i < (len(item['links']) - 1): fu_links + ', ' return { 'content': content, 'headline': Text('Funky Bomb') } @route('/docs') @template(documentation.tmpl) async def docs_home(req): tmpl = Template() tmpl.p + 'Coming soon' return { 'content': tmpl, 'headline': Text('Docs') }
{ "repo_name": "glennyonemitsu/funkybomb", "path": "website/handlers/general.py", "copies": "1", "size": "3228", "license": "apache-2.0", "hash": -8437897728952471000, "line_mean": 25.0322580645, "line_max": 76, "alpha_frac": 0.5263320942, "autogenerated": false, "ratio": 3.829181494661922, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.48555135888619216, "avg_score": null, "num_lines": null }
from fun_views.patterns.detail import detail_render_pattern from fun_views.views.utils import (get_context_base, make_base_view, not_set_get_obj, not_set_get_template_name, prefer_func, prefer_literal, render_response_base) detail_render_base = make_base_view(detail_render_pattern) def detail_render(obj=None, get_obj=not_set_get_obj, obj_context_name='obj', get_obj_context_name=None, get_context=get_context_base, template_name=None, get_template_name=not_set_get_template_name, render_response=render_response_base): """ allow literal values API on top of `template_render_base`, so not every param needs to be a function :param str template_name: :param Callable[[ReqData, Dict], str] get_template_name: :param Any obj: :param Callable[[ReqData], Any] get_obj: :param str obj_context_name: :param Callable[[ReqData, Dict], str] get_obj_context_name: :param Callable[[ReqData, Dict], Dict] get_context: :param Callable[[ReqData, str, Dict], HttpResponse] render_response: """ return detail_render_base( prefer_literal(obj, get_obj), prefer_func(obj_context_name, get_obj_context_name), get_context, prefer_literal(template_name, get_template_name), render_response )
{ "repo_name": "keithasaurus/django_fun_views", "path": "fun_views/views/generic/detail.py", "copies": "1", "size": "1502", "license": "mit", "hash": -4887617033529311000, "line_mean": 39.5945945946, "line_max": 81, "alpha_frac": 0.5985352863, "autogenerated": false, "ratio": 3.8911917098445596, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.49897269961445595, "avg_score": null, "num_lines": null }
from fun_views.patterns.detail import detail_render_pattern from tests.utils.defaults_for_tests import (default_context, default_obj_context_name, default_response, default_template_name, DefaultObj) def test_functions_passed_on_and_context_correct(): this_object = DefaultObj() this_base_context = {default_obj_context_name: this_object} def this_get_template_name(context): assert context == default_context() return default_template_name def this_get_obj(): return this_object def this_get_obj_context_name(obj): assert obj == this_object return default_obj_context_name def this_get_context(**base_context): assert base_context == this_base_context return default_context() def this_render_response(template_name, context): assert template_name == default_template_name assert context == default_context() return default_response response = detail_render_pattern( this_get_obj, this_get_obj_context_name, this_get_context, this_get_template_name, this_render_response ) assert response == default_response
{ "repo_name": "keithasaurus/django_fun_views", "path": "tests/patterns/test_detail.py", "copies": "1", "size": "1311", "license": "mit", "hash": -5494173992374197000, "line_mean": 31.775, "line_max": 78, "alpha_frac": 0.6094584287, "autogenerated": false, "ratio": 4.429054054054054, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5538512482754054, "avg_score": null, "num_lines": null }
from fun_views.patterns.form.process import form_process_pattern from fun_views.utils import all_args_callable from functools import partial @all_args_callable def update_process_pattern(get_obj, get_form_class, populate_form, get_validity, process_valid, get_success_url, render_response_valid, get_obj_context_name, get_form_context_name, get_context, get_template_name, render_response_invalid): """ Process form data to update an object :param Callable[[], type] get_form_class: :param Callable[[Form, Any], Form] populate_form: :param Callable[[Form, Any], bool] get_validity: function to check a form's validity :param Callable[[Form], Any] process_valid: should return an HttpResponse, usually a redirect :param Callable[[Form], HttpResponse] get_success_url: returns the url for redirect on success :param Callable[[Form, Any, str], HttpResponse] render_response_valid: :param Callable[[Form], str] get_template_name: :param Callable[[], Model] get_obj: :param Callable[[Any], str] get_obj_context_name: :param Callable[[type], str] get_form_context_name: :param Callable[[Dict], Dict] get_context: :param Callable[[str, Dict], HttpResponse] render_response_invalid: """ obj = get_obj() obj_context_name = get_obj_context_name(obj) base_context_dict = {} if obj_context_name: base_context_dict[obj_context_name] = obj return form_process_pattern(partial(get_form_class, obj=obj), partial(populate_form, obj=obj), partial(get_validity, obj=obj), partial(process_valid, obj=obj), get_success_url, render_response_valid, get_form_context_name, partial(get_context, **base_context_dict), get_template_name, render_response_invalid)
{ "repo_name": "keithasaurus/django_fun_views", "path": "fun_views/patterns/update/process.py", "copies": "1", "size": "2327", "license": "mit", "hash": 7954923082879624000, "line_mean": 41.3090909091, "line_max": 74, "alpha_frac": 0.5371723249, "autogenerated": false, "ratio": 4.598814229249012, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5635986554149013, "avg_score": null, "num_lines": null }
from fun_views.patterns.form.process import form_process_pattern from tests.utils.defaults_for_tests import (default_context, default_form_class, default_form_context_name, default_response_invalid, default_response_valid, default_template_name, default_url) this_form = default_form_class() this_saved_obj = ['some saved object1', 'someobj2'] def test_form_process_pattern(): for this_validity in (True, False): def this_get_form_class(): return default_form_class def this_populate_form(form_class): assert form_class == default_form_class return this_form def this_get_validity(form): assert form == this_form return this_validity def this_process_valid(form): assert form == this_form return this_saved_obj def this_render_response_valid(saved_obj, success_url): assert saved_obj == this_saved_obj assert success_url == default_url return default_response_valid def this_get_success_url(saved_obj): assert saved_obj == this_saved_obj return default_url def this_get_form_context_name(form): assert form == this_form return default_form_context_name def this_get_context(**base_context): assert base_context == { default_form_context_name: this_form, } return default_context() def this_get_template_name(context): assert context == default_context() return default_template_name def this_render_response_invalid(template_name, context): assert template_name == default_template_name assert context == default_context() return default_response_invalid response = form_process_pattern( this_get_form_class, this_populate_form, this_get_validity, this_process_valid, this_get_success_url, this_render_response_valid, this_get_form_context_name, this_get_context, this_get_template_name, this_render_response_invalid ) assert response == default_response_valid if this_validity else default_response_invalid
{ "repo_name": "keithasaurus/django_fun_views", "path": "tests/patterns/test_form_process.py", "copies": "1", "size": "2552", "license": "mit", "hash": -7828785772508898000, "line_mean": 33.9589041096, "line_max": 96, "alpha_frac": 0.5568181818, "autogenerated": false, "ratio": 4.682568807339449, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5739386989139449, "avg_score": null, "num_lines": null }
from fun_views.patterns.form.render import form_render_pattern from fun_views.utils import all_args_callable from functools import partial @all_args_callable def update_render_pattern(get_obj, get_form_class, init_form, get_obj_context_name, get_form_context_name, get_context, get_template_name, render_response): """ :param Callable[[Dict], str] get_template_name: :param Callable[[], Model] get_obj: :param Callable[[Form], str] get_obj_context_name: :param Callable[[Any], type] get_form_class: :param Callable[[type], Form}] init_form: :param Callable[[type], str] get_form_context_name: :param Callable[[Dict], Dict] get_context: :param Callable[[str, Dict], HttpResponse] render_response: """ obj = get_obj() obj_context_name = get_obj_context_name(obj) base_context_dict = {} if obj_context_name: base_context_dict[obj_context_name] = obj return form_render_pattern(partial(get_form_class, obj=obj), partial(init_form, obj=obj), partial(get_form_context_name, obj=obj), partial(get_context, **base_context_dict), get_template_name, render_response)
{ "repo_name": "keithasaurus/django_fun_views", "path": "fun_views/patterns/update/render.py", "copies": "1", "size": "1467", "license": "mit", "hash": 5539214302193014000, "line_mean": 37.6052631579, "line_max": 73, "alpha_frac": 0.5405589639, "autogenerated": false, "ratio": 4.191428571428571, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 38 }
from fun_views.patterns.form.render import form_render_pattern from tests.utils.defaults_for_tests import (default_context, default_form, default_form_class, default_form_context_name, default_response, default_template_name) def test_form_render_pattern(): def this_get_form_class(): return default_form_class def this_init_form(form_class): assert form_class == default_form_class return default_form def this_get_template_name(context): assert context == default_context() return default_template_name def this_get_form_context_name(form): assert isinstance(form, default_form_class) return default_form_context_name def this_get_context(**base_context): assert base_context == { default_form_context_name: default_form, } return default_context() def this_render_response(template_name, context): assert template_name == default_template_name assert context == default_context() return default_response response = form_render_pattern( this_get_form_class, this_init_form, this_get_form_context_name, this_get_context, this_get_template_name, this_render_response ) assert response == default_response
{ "repo_name": "keithasaurus/django_fun_views", "path": "tests/patterns/test_form_render.py", "copies": "1", "size": "1490", "license": "mit", "hash": -5756605697192756000, "line_mean": 32.1111111111, "line_max": 74, "alpha_frac": 0.5872483221, "autogenerated": false, "ratio": 4.52887537993921, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 45 }
from fun_views.patterns.template import template_render_pattern from fun_views.utils import all_args_callable from functools import partial @all_args_callable def form_render_pattern(get_form_class, init_form, get_form_context_name, get_context, get_template_name, render_response): """ :param Callable[[Dict], str] get_template_name: :param Callable[[], type] get_form_class: :param Callable[[type], Form] init_form: :param Callable[[Form], str] get_form_context_name: :param Callable[[Dict], Dict] get_context: :param Callable[[str, Dict], HttpResponse] render_response: :rtype: HttpResponse """ form = init_form(get_form_class()) base_context_dict = {} form_key = get_form_context_name(form) if form_key: base_context_dict[form_key] = form get_context_partial = partial(get_context, **base_context_dict) return template_render_pattern( get_context_partial, get_template_name, render_response )
{ "repo_name": "keithasaurus/django_fun_views", "path": "fun_views/patterns/form/render.py", "copies": "1", "size": "1120", "license": "mit", "hash": -4383704876113730000, "line_mean": 29.2702702703, "line_max": 67, "alpha_frac": 0.6080357143, "autogenerated": false, "ratio": 3.9298245614035086, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5037860275703508, "avg_score": null, "num_lines": null }
from fun_views.patterns.template import template_render_pattern from fun_views.utils import all_args_callable from functools import partial @all_args_callable def list_render_pattern(get_list_obj, get_page_size, get_paginator, get_page_obj_number, get_page_obj, get_list_obj_context_name, get_context, get_template_name, render_response): """ :param Callable[[Dict], str] get_template_name: :param Callable[[], Iterable] get_list_obj: :param Callable[[Iterable], int] get_page_size: :param Callable[[Iterable, int], Paginator] get_paginator: :param Callable[[Iterable, Any], int] get_page_obj_number: :param Callable[[Iterable, Paginator, page_number], Page] get_page_obj: :param Callable[[Iterable], str] get_list_obj_context_name: :param Callable[[Dict], str] get_context: :param Callable[[str, Dict], HttpResponse] render_response: :rtype: Callable """ list_obj = get_list_obj() paginator, page_obj, is_paginated = None, None, False page_size = get_page_size(list_obj) if page_size: paginator = get_paginator(list_obj, page_size) page_number = get_page_obj_number(list_obj, paginator) page_obj = get_page_obj( list_obj, paginator, page_number ) is_paginated = page_obj.has_other_pages() list_obj = page_obj.object_list base_context = { 'paginator': paginator, 'page_obj': page_obj, 'is_paginated': is_paginated, get_list_obj_context_name(list_obj): list_obj } get_context_partial = partial(get_context, **base_context) return template_render_pattern(get_context_partial, get_template_name, render_response)
{ "repo_name": "keithasaurus/django_fun_views", "path": "fun_views/patterns/list.py", "copies": "1", "size": "1976", "license": "mit", "hash": 7821578405770932000, "line_mean": 34.2857142857, "line_max": 75, "alpha_frac": 0.5754048583, "autogenerated": false, "ratio": 3.967871485943775, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5043276344243776, "avg_score": null, "num_lines": null }
from fun_views.patterns.template import template_render_pattern from fun_views.views.utils import (get_context_base, make_base_view, not_set_get_template_name, prefer_literal, render_response_base) template_render_base = make_base_view(template_render_pattern) def template_render(get_context=get_context_base, template_name=None, get_template_name=not_set_get_template_name, render_response=render_response_base): """ allow literal values API on top of `template_render_base`, so not every param needs to be a function :param str template_name: :param Callable[[ReqData, Dict], str] get_template_name: :param Callable[[ReqData, Dict], Dict] get_context: :param Callable[[ReqData, str, Dict], HttpResponse] render_response: """ return template_render_base( get_context, prefer_literal(template_name, get_template_name), render_response )
{ "repo_name": "keithasaurus/django_fun_views", "path": "fun_views/views/generic/template.py", "copies": "1", "size": "1036", "license": "mit", "hash": -6139474185997243000, "line_mean": 38.8461538462, "line_max": 81, "alpha_frac": 0.6341698842, "autogenerated": false, "ratio": 4.0627450980392155, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.00046904315196998124, "num_lines": 26 }
from fun_views.patterns.update.process import update_process_pattern from fun_views.views.utils import (get_context_base, get_validity_base, make_base_view, not_set_get_form_class, not_set_get_obj, not_set_get_success_url, not_set_get_template_name, prefer_func, prefer_literal, render_response_base, render_response_valid_base) update_process_base = make_base_view(update_process_pattern) def _populate_form(req_data, form_class, obj): return form_class(instance=obj, data=req_data.request.POST, files=req_data.request.FILES) def _process_valid(req_data, form, obj): return form.save() def update_process(obj=None, get_obj=not_set_get_obj, form_class=None, get_form_class=not_set_get_form_class, populate_form=_populate_form, get_validity=get_validity_base, process_valid=_process_valid, success_url=None, get_success_url=not_set_get_success_url, render_response_valid=render_response_valid_base, obj_context_name='obj', get_obj_context_name=None, form_context_name='form', get_form_context_name=None, template_name=None, get_context=get_context_base, get_template_name=not_set_get_template_name, render_response_invalid=render_response_base): return update_process_base( prefer_literal(obj, get_obj), prefer_literal(form_class, get_form_class), populate_form, get_validity, process_valid, prefer_literal(success_url, get_success_url), render_response_valid, prefer_func(obj_context_name, get_obj_context_name), prefer_func(form_context_name, get_form_context_name), get_context, prefer_literal(template_name, get_template_name), render_response_invalid )
{ "repo_name": "keithasaurus/django_fun_views", "path": "fun_views/views/generic/update/process.py", "copies": "1", "size": "2175", "license": "mit", "hash": 7503067499127450000, "line_mean": 40.8269230769, "line_max": 93, "alpha_frac": 0.5627586207, "autogenerated": false, "ratio": 4.012915129151292, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.000204582651391162, "num_lines": 52 }
from fun_views.patterns.update.process import update_process_pattern from tests.utils.defaults_for_tests import (default_context, default_form_context_name, default_model_form_class, default_obj_context_name, default_response_invalid, default_response_valid, default_template_name, default_url, DefaultObj) def test_works_properly(): this_obj = DefaultObj() this_form = default_model_form_class(instance=this_obj, data={}) this_saved_obj = [1, 2, 3, 4] this_expected_base_context = { default_obj_context_name: this_obj, default_form_context_name: this_form, } for this_validity in (True, False): def this_get_obj(): return this_obj def this_populate_form(form_class, obj): assert form_class == default_model_form_class assert obj == this_obj return this_form def this_get_obj_context_name(obj): assert obj == this_obj return default_obj_context_name def this_get_form_class(obj): assert obj == this_obj return default_model_form_class def this_get_validity(form, obj): assert isinstance(form, default_model_form_class) assert obj == this_obj return this_validity def this_get_success_url(saved_obj): assert saved_obj == this_saved_obj return default_url def this_process_valid(form, obj): assert isinstance(form, default_model_form_class) assert obj == this_obj return this_saved_obj def this_render_response_valid(saved_obj, success_url): assert success_url == default_url assert saved_obj == this_saved_obj return default_response_valid def this_get_form_context_name(form): assert form == this_form return default_form_context_name def this_get_context(**base_context): assert base_context == this_expected_base_context return default_context() def this_get_template_name(context): assert context == default_context() return default_template_name def this_render_response_invalid(template_name, context): assert template_name == default_template_name assert context == default_context() return default_response_invalid response = update_process_pattern( this_get_obj, this_get_form_class, this_populate_form, this_get_validity, this_process_valid, this_get_success_url, this_render_response_valid, this_get_obj_context_name, this_get_form_context_name, this_get_context, this_get_template_name, this_render_response_invalid ) assert response == default_response_valid \ if this_validity \ else default_response_invalid
{ "repo_name": "keithasaurus/django_fun_views", "path": "tests/patterns/test_update_process.py", "copies": "1", "size": "3310", "license": "mit", "hash": 3647988300973636000, "line_mean": 35.3736263736, "line_max": 79, "alpha_frac": 0.5474320242, "autogenerated": false, "ratio": 4.584487534626039, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5631919558826038, "avg_score": null, "num_lines": null }
from fun_views.patterns.update.render import update_render_pattern from fun_views.views.utils import (get_context_base, make_base_view, not_set_get_form_class, not_set_get_obj, not_set_get_template_name, prefer_func, prefer_literal, render_response_base) update_render_base = make_base_view(update_render_pattern) def _init_form(req_data, form_class, obj): return form_class(instance=obj) def update_render(obj=None, get_obj=not_set_get_obj, form_class=None, get_form_class=not_set_get_form_class, init_form=_init_form, obj_context_name='obj', get_obj_context_name=None, form_context_name='form', get_form_context_name=None, get_context=get_context_base, template_name=None, get_template_name=not_set_get_template_name, render_response=render_response_base): return update_render_base( prefer_literal(obj, get_obj), prefer_literal(form_class, get_form_class), init_form, prefer_func(obj_context_name, get_obj_context_name), prefer_func(form_context_name, get_form_context_name), get_context, prefer_literal(template_name, get_template_name), render_response )
{ "repo_name": "keithasaurus/django_fun_views", "path": "fun_views/views/generic/update/render.py", "copies": "1", "size": "1463", "license": "mit", "hash": -4261712121773139500, "line_mean": 38.5405405405, "line_max": 75, "alpha_frac": 0.5645933014, "autogenerated": false, "ratio": 3.8601583113456464, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4924751612745646, "avg_score": null, "num_lines": null }
from fun_views.patterns.update.render import update_render_pattern from tests.utils.defaults_for_tests import (default_context, default_form_context_name, default_model_form_class, default_obj_context_name, default_response, default_template_name, DefaultObj) def test_works(): this_obj = DefaultObj() this_form = default_model_form_class(instance=this_obj) this_updated_base_context = { default_obj_context_name: this_obj, default_form_context_name: this_form, } def this_get_obj(): return this_obj def this_get_form_class(obj): assert obj == this_obj return default_model_form_class def this_init_form(form_class, obj): assert form_class == default_model_form_class assert obj, this_obj return this_form def this_get_obj_context_name(obj): assert obj == this_obj return default_obj_context_name def this_get_form_context_name(form, obj): assert isinstance(form, default_model_form_class) assert obj == this_obj return default_form_context_name def this_get_template_name(context): assert context == default_context() return default_template_name def this_get_context(**base_context): assert base_context == this_updated_base_context return default_context() def this_render_response(template_name, context): assert template_name == default_template_name assert context == default_context() return default_response response = update_render_pattern( this_get_obj, this_get_form_class, this_init_form, this_get_obj_context_name, this_get_form_context_name, this_get_context, this_get_template_name, this_render_response ) assert response == default_response
{ "repo_name": "keithasaurus/django_fun_views", "path": "tests/patterns/test_update_render.py", "copies": "1", "size": "2073", "license": "mit", "hash": 8996497656413066000, "line_mean": 31.9047619048, "line_max": 78, "alpha_frac": 0.5885190545, "autogenerated": false, "ratio": 4.274226804123711, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5362745858623711, "avg_score": null, "num_lines": null }
from fun_views.requests import make_req_data from fun_views.views.generic.redirect import redirect_view from tests.utils.defaults_for_tests import (default_query_string, default_response, default_url) from tests.utils.requests import call_view, FakeRequest this_req_data = make_req_data(FakeRequest()) this_req_data.request.META = { 'QUERY_STRING': default_query_string } def this_get_url(req_data): assert req_data == this_req_data return default_url def this_get_query_string(req_data): assert req_data == this_req_data return default_query_string def test_functions_are_called_as_expected(): for this_use_query_string in (True, False): for this_permanent in (True, False): this_url = "{}?{}".format(default_url, default_query_string) \ if this_use_query_string \ else default_url def this_get_use_query_string(req_data, query_string): assert req_data == this_req_data assert query_string == default_query_string return this_use_query_string def this_get_permanent(req_data, url): assert req_data == this_req_data assert url == this_url return this_permanent def this_do_redirect(req_data, url, permanent): assert req_data == this_req_data assert url == this_url assert permanent == this_permanent return default_response assert default_response == call_view( redirect_view( get_url=this_get_url, get_query_string=this_get_query_string, get_use_query_string=this_get_use_query_string, get_permanent=this_get_permanent, do_redirect=this_do_redirect ), this_req_data ) def test_literals_called_as_expected(): for this_use_query_string in (True, False): for this_permanent in (True, False): this_expected_url = "{}?{}".format(default_url, default_query_string) \ if this_use_query_string \ else default_url def this_do_redirect(req_data, url, permanent): assert req_data == this_req_data assert url == this_expected_url assert permanent == this_permanent return default_response assert default_response == call_view( redirect_view( url=default_url, query_string=default_query_string, use_query_string=this_use_query_string, permanent=this_permanent, do_redirect=this_do_redirect ), this_req_data )
{ "repo_name": "keithasaurus/django_fun_views", "path": "tests/views/generic/test_redirect.py", "copies": "1", "size": "2912", "license": "mit", "hash": -9074069091846246000, "line_mean": 35.4, "line_max": 83, "alpha_frac": 0.5521978022, "autogenerated": false, "ratio": 4.3076923076923075, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5359890109892307, "avg_score": null, "num_lines": null }
from fun_views.routing import HTTP_METHODS, route, route_all, route_multiple from tests.utils.errors import FakeHttpResponseNotAllowed from tests.utils.mocks import echo_args, mock from tests.utils.requests import FakeRequest from unittest import TestCase EXPECTED_MSG_1 = 'expected msg 1' EXPECTED_MSG_2 = 'expected msg 2' def view_func_for_tests_1(request): return EXPECTED_MSG_1 def view_func_for_tests_2(request): return EXPECTED_MSG_2 def _get_response_for_method(http_method, view_func): request = FakeRequest() request.method = http_method.upper() return view_func(request) @mock.patch('fun_views.routing.route_multiple', echo_args) def test_view_func_registered_for_all_known_methods(): assert route_all(view_func_for_tests_1) ==\ (({method for method in HTTP_METHODS}, view_func_for_tests_1),) def _raise_response_not_allowed(msg): raise FakeHttpResponseNotAllowed(msg) @mock.patch('fun_views.routing.HttpResponseNotAllowed', _raise_response_not_allowed) class TestRouteMultipleHttpMethods(TestCase): def test_gethead_and_postput_views(self): resulting_view_func = route_multiple( ({'get', 'head'}, view_func_for_tests_1), ({'post', 'put'}, view_func_for_tests_2), ) for http_method in ('get', 'head'): response = _get_response_for_method(http_method, resulting_view_func) self.assertEqual(response, EXPECTED_MSG_1) for http_method in ('post', 'put'): response = _get_response_for_method(http_method, resulting_view_func) self.assertEqual(response, EXPECTED_MSG_2) def test_unused_method_results_in_405_response(self): routed_view_func = route_multiple( ({'get', 'head'}, view_func_for_tests_1) ) for http_method in HTTP_METHODS: if http_method in {'get', 'head'}: continue with self.assertRaises(FakeHttpResponseNotAllowed): _get_response_for_method(http_method, routed_view_func) def test_route_is_errors_raised(self): routed_view_func = route_multiple( ({'get', 'put'}, view_func_for_tests_1), ({'post'}, view_func_for_tests_2), ) for http_method in HTTP_METHODS: if http_method in {'get', 'post', 'put'}: self.assertIn( _get_response_for_method(http_method, routed_view_func), {EXPECTED_MSG_1, EXPECTED_MSG_2} ) else: with self.assertRaises(FakeHttpResponseNotAllowed): _get_response_for_method(http_method, routed_view_func) @mock.patch('fun_views.routing.route_multiple', echo_args) def test_route(): response = route( get=view_func_for_tests_2, post=view_func_for_tests_1 ) assert 2 == len(response) for t in ( ({'get'}, view_func_for_tests_2), ({'post'}, view_func_for_tests_1), ): assert t in response
{ "repo_name": "keithasaurus/django_fun_views", "path": "tests/test_routing.py", "copies": "1", "size": "3026", "license": "mit", "hash": 4510673040147516400, "line_mean": 32.2527472527, "line_max": 84, "alpha_frac": 0.6249173827, "autogenerated": false, "ratio": 3.585308056872038, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4710225439572038, "avg_score": null, "num_lines": null }
from fun_views.utils import ParamNotSetError from fun_views.views.generic.list import list_render from tests.utils.defaults_for_tests import (default_req_data, default_template_name) from tests.utils.errors import ShouldNotReachError from tests.utils.mocks import echo_args, mock from tests.utils.requests import call_view this_obj_list = list(range(37)) def this_get_list_obj(req_data): assert req_data == default_req_data return this_obj_list def this_get_template_name(req_data, context): assert req_data == default_req_data assert isinstance(context, dict) return default_template_name def test_no_get_template_name_raises_not_set(): try: call_view( list_render( get_list_obj=this_get_list_obj ) ) except ParamNotSetError as e: assert str(e) == 'get_template_name not set' else: raise ShouldNotReachError def test_no_get_list_obj_raises_not_set(): try: call_view( list_render( get_template_name=this_get_template_name ) ) except ParamNotSetError as e: assert str(e) == 'get_list_obj not set' else: raise ShouldNotReachError @mock.patch('fun_views.views.generic.list.list_render_base', echo_args) def test_literals_are_accepted(): this_list_obj = ['some', 'strings', 'woohoo'] this_page_size = 10 default_list_obj_context_name = 'ebilwberwer' get_list_obj, get_page_size, get_paginator, get_page_obj_number, get_page_obj, \ get_list_obj_context_name, get_context, get_template_name, render_response = \ list_render( template_name=default_template_name, list_obj=this_list_obj, page_size=this_page_size, list_obj_context_name=default_list_obj_context_name ) for func, literal in ( (get_template_name, default_template_name), (get_list_obj, this_list_obj), (get_page_size, this_page_size), (get_list_obj_context_name, default_list_obj_context_name), ): assert func() == literal
{ "repo_name": "keithasaurus/django_fun_views", "path": "tests/views/generic/test_list.py", "copies": "1", "size": "2160", "license": "mit", "hash": -8740344770655427000, "line_mean": 29.8571428571, "line_max": 86, "alpha_frac": 0.6305555556, "autogenerated": false, "ratio": 3.467094703049759, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9595988904452908, "avg_score": 0.00033227083937023083, "num_lines": 70 }
from fun_views.utils import ParamNotSetError, wrap from fun_views.views.generic.form.render import form_render from tests.utils.defaults_for_tests import (default_context, default_form, default_form_class, default_form_context_name, default_template_name) from tests.utils.errors import ShouldNotReachError, SuccessError from tests.utils.mocks import mock from tests.utils.requests import call_view def test_assert_raises_error_without_get_template_name_defined(): try: call_view( form_render( get_form_class=wrap(default_form_class) ) ) except ParamNotSetError as e: assert str(e), 'get_template_name not set' else: raise ShouldNotReachError def test_assert_raises_error_without_get_form_class_defined(): try: call_view(form_render( get_template_name=wrap(default_template_name) )) except ParamNotSetError as e: assert str(e) == 'get_form_class not set' else: raise ShouldNotReachError def test_get_template_name_is_called(): def this_get_template_name(req_data, context): assert req_data == req_data assert context['form'].__class__ == default_form_class raise SuccessError try: call_view( form_render( get_template_name=this_get_template_name, get_form_class=wrap(default_form_class) ) ) except SuccessError: pass else: raise ShouldNotReachError def test_get_form_class_is_called(): def this_get_form_class(req_data): assert req_data == req_data raise SuccessError try: call_view( form_render( get_template_name=wrap(default_template_name), get_form_class=this_get_form_class ) ) except SuccessError: pass else: raise ShouldNotReachError def test_init_form_is_called(): def this_init_form(req_data, form_class): assert req_data == req_data assert form_class == default_form_class raise SuccessError try: call_view( form_render( get_template_name=wrap(default_template_name), get_form_class=wrap(default_form_class), init_form=this_init_form ) ) except SuccessError: pass else: raise ShouldNotReachError def test_get_form_class_context_name_is_called(): def this_get_form_context_name(req_data, form): assert req_data == req_data assert form.__class__ == default_form_class raise SuccessError try: call_view( form_render( get_template_name=wrap(default_template_name), get_form_class=wrap(default_form_class), get_form_context_name=this_get_form_context_name ) ) except SuccessError: pass else: raise ShouldNotReachError def test_default_value_for_get_form_class_context_name(): this_inited_form = default_form_class() def this_get_context(req_data, **base_context): assert base_context == { 'form': this_inited_form, } assert req_data == req_data raise SuccessError try: call_view(form_render(get_template_name=wrap(default_template_name), get_form_class=wrap(default_form_class), get_context=this_get_context, init_form=wrap(this_inited_form))) except SuccessError: pass else: raise ShouldNotReachError def test_get_context_is_called_with_custom_obj_context_name(): def this_get_context(req_data, **base_context): assert base_context == { default_form_context_name: default_form, } assert req_data == req_data raise SuccessError try: call_view( form_render( get_template_name=wrap(default_template_name), get_form_class=wrap(default_form_class), get_form_context_name=wrap(default_form_context_name), get_context=this_get_context, init_form=wrap(default_form) ) ) except SuccessError: pass else: raise ShouldNotReachError def test_render_get_proper_values(): this_expected_base_context = { default_form_context_name: default_form, } def this_get_context(req_data, **base_context): assert req_data == req_data assert base_context == this_expected_base_context return default_context() def this_render(req_data, template_name, context): assert req_data == req_data assert template_name == default_template_name assert context == default_context() raise SuccessError try: call_view( form_render( get_template_name=wrap(default_template_name), get_context=this_get_context, get_form_class=wrap(default_form_class), init_form=wrap(default_form), get_form_context_name=wrap(default_form_context_name), render_response=this_render ) ) except SuccessError: pass else: raise ShouldNotReachError @mock.patch('fun_views.views.utils.render') def test_render_response_works_properly(mock_render_response): success_response = 'ALL GOOD' mock_render_response.return_value = success_response response = call_view(form_render( get_template_name=wrap(default_template_name), get_form_class=wrap(default_form_class), )) assert success_response == response
{ "repo_name": "keithasaurus/django_fun_views", "path": "tests/views/generic/test_form_render.py", "copies": "1", "size": "5937", "license": "mit", "hash": 8927207158499975000, "line_mean": 28.2463054187, "line_max": 76, "alpha_frac": 0.5846387064, "autogenerated": false, "ratio": 4.008777852802161, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 203 }
from fun_views.views.generic.update.process import update_process from tests.utils.defaults_for_tests import (default_form_context_name, default_model_form_class, default_obj_context_name, default_response_invalid, default_response_valid, default_template_name, default_url, DefaultObj) from tests.utils.requests import call_view def test_accepts_literals(): this_obj = DefaultObj() this_saved_obj = ['qweqw', 'qweqw', 'qwe'] for this_validity in (True, False): def this_get_validity(req_data, form, obj): assert req_data == req_data assert isinstance(form, default_model_form_class) assert obj == this_obj return this_validity def this_render_response_valid(req_data, saved_obj, success_url): assert req_data == req_data assert saved_obj == this_saved_obj assert success_url == default_url return default_response_valid def this_render_response_invalid(req_data, template_name, context): assert req_data == req_data assert context[default_obj_context_name] == this_obj assert isinstance(context[default_form_context_name], default_model_form_class) assert template_name == default_template_name return default_response_invalid def this_process_valid(req_data, form, obj): assert req_data == req_data assert isinstance(form, default_model_form_class) assert obj == this_obj return this_saved_obj response = call_view( update_process( form_class=default_model_form_class, obj=this_obj, success_url=default_url, obj_context_name=default_obj_context_name, form_context_name=default_form_context_name, template_name=default_template_name, get_validity=this_get_validity, process_valid=this_process_valid, render_response_valid=this_render_response_valid, render_response_invalid=this_render_response_invalid ) ) assert response == default_response_valid if this_validity else default_response_invalid
{ "repo_name": "keithasaurus/django_fun_views", "path": "tests/views/generic/test_update_process.py", "copies": "1", "size": "2525", "license": "mit", "hash": 5065972899588882000, "line_mean": 43.298245614, "line_max": 96, "alpha_frac": 0.5627722772, "autogenerated": false, "ratio": 4.590909090909091, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.00037155865908608364, "num_lines": 57 }
from fun_views.views.generic.update.render import update_render from tests.utils.defaults_for_tests import (default_form_context_name, default_model_form_class, default_obj_context_name, default_req_data, default_response_invalid, default_template_name, DefaultObj) from tests.utils.requests import call_view def test_literal_values_allowed(): this_obj = DefaultObj() def this_render_response_invalid(req_data, template_name, context): assert req_data == default_req_data assert template_name == default_template_name assert context[default_obj_context_name] == this_obj assert isinstance(context[default_form_context_name], default_model_form_class) return default_response_invalid response = call_view(update_render( obj=this_obj, form_class=default_model_form_class, obj_context_name=default_obj_context_name, form_context_name=default_form_context_name, template_name=default_template_name, render_response=this_render_response_invalid )) assert response, default_response_invalid
{ "repo_name": "keithasaurus/django_fun_views", "path": "tests/views/generic/test_update_render.py", "copies": "1", "size": "1318", "license": "mit", "hash": -5918891565863797000, "line_mean": 42.9333333333, "line_max": 87, "alpha_frac": 0.6054628225, "autogenerated": false, "ratio": 4.513698630136986, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0003787878787878788, "num_lines": 30 }
from fuocore.models import ( BaseModel, SongModel, PlaylistModel, AlbumModel, ArtistModel, SearchModel, ModelStage, ) from .provider import provider class QQBaseModel(BaseModel): _api = provider.api class Meta: allow_get = True provider = provider fields = ('mid', ) @classmethod def get(cls, identifier): raise NotImplementedError def _deserialize(data, schema_cls): schema = schema_cls(strict=True) obj, _ = schema.load(data) # XXX: 将 model 设置为 gotten,减少代码编写时的心智负担, # 避免在调用 get 方法时进入无限递归。 obj.stage = ModelStage.gotten return obj class QQSongModel(SongModel, QQBaseModel): @classmethod def get(cls, identifier): data = cls._api.get_song_detail(identifier) song = _deserialize(data, QQSongDetailSchema) return song @property def url(self): if self._url is not None: return self._url url = self._api.get_song_url(self.mid) if url is not None: self._url = url else: self._url = '' return self._url @url.setter def url(self, url): self._url = url class QQAlbumModel(AlbumModel, QQBaseModel): @classmethod def get(cls, identifier): data_album = cls._api.album_detail(identifier) album = _deserialize(data_album, QQAlbumSchema) album.cover = cls._api.get_cover(album.mid, 2) return album class QQArtistModel(ArtistModel, QQBaseModel): @classmethod def get(cls, identifier): data_artist = cls._api.artist_detail(identifier) artist = _deserialize(data_artist, QQArtistSchema) artist.cover = cls._api.get_cover(artist.mid, 1) return artist class QQPlaylistModel(PlaylistModel, QQBaseModel): pass class QQSearchModel(SearchModel, QQBaseModel): pass def search(keyword, **kwargs): data_songs = provider.api.search(keyword) songs = [] for data_song in data_songs: song = _deserialize(data_song, QQSongSchema) songs.append(song) return QQSearchModel(songs=songs) from .schemas import ( QQArtistSchema, QQAlbumSchema, QQSongSchema, QQSongDetailSchema, ) # noqa
{ "repo_name": "cosven/feeluown-core", "path": "fuocore/qqmusic/models.py", "copies": "1", "size": "2308", "license": "mit", "hash": 3596698155558636000, "line_mean": 21.42, "line_max": 58, "alpha_frac": 0.6355932203, "autogenerated": false, "ratio": 3.465224111282844, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9600817331582844, "avg_score": 0, "num_lines": 100 }
from furious.context.context import _insert_tasks from furious.context.context import _tasks_to_reinsert def insert_tasks_ignore_duplicate_names(tasks, queue, *args, **kwargs): """Insert a batch of tasks into a specific queue. If a DuplicateTaskNameError is raised, loop through the tasks and insert the remaining, ignoring and logging the duplicate tasks. Returns the number of successfully inserted tasks. """ from google.appengine.api import taskqueue try: inserted = _insert_tasks(tasks, queue, *args, **kwargs) return inserted except taskqueue.DuplicateTaskNameError: # At least one task failed in our batch, attempt to re-insert the # remaining tasks. Named tasks can never be transactional. reinsert = _tasks_to_reinsert(tasks, transactional=False) count = len(reinsert) inserted = len(tasks) - count # Our subsequent task inserts should raise TaskAlreadyExistsError at # least once, but that will be swallowed by _insert_tasks. for task in reinsert: inserted += _insert_tasks([task], queue, *args, **kwargs) return inserted
{ "repo_name": "Workiva/furious", "path": "furious/extras/insert_task_handlers.py", "copies": "4", "size": "1175", "license": "apache-2.0", "hash": -2868601165995887000, "line_mean": 35.71875, "line_max": 76, "alpha_frac": 0.6910638298, "autogenerated": false, "ratio": 4.304029304029304, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.6995093133829304, "avg_score": null, "num_lines": null }
from furl import furl from datetime import datetime from framework.utils import iso8601format from dateutil import parser from website.project.metadata.utils import serialize_meta_schema from website.settings import DOMAIN as OSF_DOMAIN EMBARGO = 'embargo' IMMEDIATE = 'immediate' CANCELED = 'canceled' def serialize_user(user): return { 'full_name': user.fullname, 'username': user.username, 'id': user._id } # TODO: Write and use APIv2 serializer for this def serialize_draft_registration(draft, json_safe=True): node_url = get_url(draft) embargo = get_embargo(draft, json_safe) submitted = None if draft.approval is not None: if json_safe: submitted = iso8601format(draft.approval.initiation_date) else: submitted = draft.approval.initiation_date return { 'pk': draft._id, 'initiator': serialize_user(draft.initiator), 'registration_metadata': draft.registration_metadata, 'registration_schema': serialize_meta_schema(draft.registration_schema), 'initiated': iso8601format(draft.datetime_initiated) if json_safe else draft.datetime_initiated, 'updated': iso8601format(draft.datetime_updated) if json_safe else draft.datetime_updated, 'submitted': submitted, 'requires_approval': draft.requires_approval, 'is_pending_approval': draft.is_pending_review, 'is_approved': draft.is_approved, 'is_rejected': draft.is_rejected, 'notes': draft.notes, 'proof_of_publication': draft.flags.get('proof_of_publication'), 'payment_sent': draft.flags.get('payment_sent'), 'assignee': draft.flags.get('assignee'), 'title': draft.registration_metadata['q1']['value'], 'embargo': embargo, 'registered_node': node_url, 'status': get_approval_status(draft), 'logs': map(serialize_draft_logs, draft.status_logs), } def serialize_draft_logs(log): return '{} on {}'.format(log.action, datetime.strftime(log.date, '%c')) def get_url(draft): url = furl(OSF_DOMAIN) if draft.registered_node is not None: url.path.add(draft.registered_node.url) return url.url elif draft.branched_from is not None: url.path.add(draft.branched_from.url) return url.url return None def get_embargo(draft, json_safe): if draft.approval is not None: registration_choice = draft.approval.meta.get('registration_choice', None) if registration_choice == EMBARGO: time = parser.parse(draft.approval.meta['embargo_end_date']) return iso8601format(time) if json_safe else time return IMMEDIATE else: return CANCELED def get_approval_status(draft): if draft.is_approved: if draft.registered_node is not None: if draft.registered_node.is_deleted: return 'Approved but canceled' if draft.registered_node.retraction is None: return 'Approved and registered' else: return 'Approved but withdrawn' return 'Approved but not registered' elif draft.is_rejected: return 'Rejected' else: return 'Pending approval'
{ "repo_name": "laurenrevere/osf.io", "path": "admin/pre_reg/serializers.py", "copies": "9", "size": "3269", "license": "apache-2.0", "hash": -834768070736764900, "line_mean": 32.0202020202, "line_max": 104, "alpha_frac": 0.6531049251, "autogenerated": false, "ratio": 3.8413631022326675, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.8994468027332668, "avg_score": null, "num_lines": null }
from furl import furl from lxml import etree from django.conf import settings from share.harvest import BaseHarvester class PLOSHarvester(BaseHarvester): VERSION = 1 MAX_ROWS_PER_REQUEST = 999 def do_harvest(self, start_date, end_date): if not settings.PLOS_API_KEY: raise Exception('PLOS api key not defined.') start_date = start_date.isoformat().split('.')[0] + 'Z' end_date = end_date.isoformat().split('.')[0] + 'Z' return self.fetch_rows(furl(self.config.base_url).set(query_params={ 'q': 'publication_date:[{} TO {}]'.format(start_date, end_date), 'rows': '0', 'api_key': settings.PLOS_API_KEY }).url, start_date, end_date) def fetch_rows(self, url, start_date, end_date): resp = self.requests.get(url) total_rows = etree.XML(resp.content).xpath('//result/@numFound') total_rows = int(total_rows[0]) if total_rows else 0 current_row = 0 while current_row < total_rows: response = self.requests.get(furl(self.config.base_url).set(query_params={ 'q': 'publication_date:[{} TO {}]'.format(start_date, end_date), 'start': current_row, 'api_key': settings.PLOS_API_KEY, 'rows': self.MAX_ROWS_PER_REQUEST }).url) docs = etree.XML(response.content).xpath('//doc') for doc in docs: if doc.xpath("arr[@name='abstract']") or doc.xpath("str[@name='author_display']"): doc_id = doc.xpath("str[@name='id']")[0].text doc = etree.tostring(doc) yield (doc_id, doc) current_row += len(docs)
{ "repo_name": "aaxelb/SHARE", "path": "share/harvesters/org_plos.py", "copies": "3", "size": "1738", "license": "apache-2.0", "hash": -6105038046359412000, "line_mean": 33.76, "line_max": 98, "alpha_frac": 0.5604142693, "autogenerated": false, "ratio": 3.532520325203252, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.00067881883973838, "num_lines": 50 }
from furl import furl from lxml import etree from django.conf import settings from share import Harvester class PLOSHarvester(Harvester): url = 'http://api.plos.org/search' MAX_ROWS_PER_REQUEST = 999 def do_harvest(self, start_date, end_date): if not settings.PLOS_API_KEY: raise Exception('PLOS api key not defined.') start_date = start_date.isoformat().split('.')[0] + 'Z' end_date = end_date.isoformat().split('.')[0] + 'Z' return self.fetch_rows(furl(self.url).set(query_params={ 'q': 'publication_date:[{} TO {}]'.format(start_date, end_date), 'rows': '0', 'api_key': settings.PLOS_API_KEY }).url, start_date, end_date) def fetch_rows(self, url, start_date, end_date): resp = self.requests.get(url) total_rows = etree.XML(resp.content).xpath('//result/@numFound') total_rows = int(total_rows[0]) if total_rows else 0 current_row = 0 while current_row < total_rows: response = self.requests.get(furl(self.url).set(query_params={ 'q': 'publication_date:[{} TO {}]'.format(start_date, end_date), 'start': current_row, 'api_key': settings.PLOS_API_KEY, 'rows': self.MAX_ROWS_PER_REQUEST }).url) docs = etree.XML(response.content).xpath('//doc') for doc in docs: if doc.xpath("arr[@name='abstract']") or doc.xpath("str[@name='author_display']"): doc_id = doc.xpath("str[@name='id']")[0].text doc = etree.tostring(doc) yield (doc_id, doc) current_row += len(docs)
{ "repo_name": "zamattiac/SHARE", "path": "providers/org/plos/harvester.py", "copies": "1", "size": "1721", "license": "apache-2.0", "hash": -6484121407292345000, "line_mean": 33.42, "line_max": 98, "alpha_frac": 0.5543288786, "autogenerated": false, "ratio": 3.505091649694501, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4559420528294501, "avg_score": null, "num_lines": null }
from furl import furl import logging from libtaxii.common import generate_message_id from . import dispatcher, utils from .converters import to_detailed_service_instance_entity from .exceptions import ( NoURIProvidedError, ServiceNotFoundError, AmbiguousServicesError, ClientException ) from six.moves import map class AbstractClient(object): ''' Abstract client class. This class can not be used directly, use :py:meth:`cabby.create_client` to create client instances. ''' SUPPORTED_SCHEMES = ['http', 'https'] taxii_version = None def __init__(self, host=None, discovery_path=None, port=None, use_https=False, headers=None, timeout=None): self.host = host self.port = port self.use_https = use_https self.discovery_path = discovery_path self.services = None self.proxies = None self.verify_ssl = True self.ca_cert = None self.cert_file = None self.key_file = None self.key_password = None self.username = None self.password = None self.jwt_url = None self.jwt_token = None self.headers = headers or {} self.timeout = timeout self.log = logging.getLogger( "{}.{}".format(self.__module__, self.__class__.__name__)) def set_auth(self, ca_cert=None, cert_file=None, key_file=None, key_password=None, username=None, password=None, jwt_auth_url=None, verify_ssl=True): ''' Set authentication credentials. ``jwt_auth_url`` is required for JWT based authentication. If it is not specified but ``username`` and ``password`` are provided, client will configure Basic authentication. SSL authentication can be combined with JWT and Basic authentication. :param str ca_cert: a path to CA SSL certificate file :param str cert_file: a path to SSL certificate file :param str key_file: a path to SSL key file :param str username: username, used in basic auth or JWT auth :param str password: password, used in basic auth or JWT auth :param str key_password: same argument as in ``ssl.SSLContext.load_cert_chain`` - may be a function to call to get the password for decrypting the private key or string/bytes/bytearray. It will only be called if the private key is encrypted and a password is necessary. :param str jwt_auth_url: URL used to obtain JWT token :param bool/str verify_ssl: set to False to skip checking host's SSL certificate. Set to True to check certificate against public CAs or set to filepath to check against custom CA bundle. ''' self.ca_cert = ca_cert self.cert_file = cert_file self.key_file = key_file self.key_password = key_password self.username = username self.password = password if jwt_auth_url: self.jwt_url = self._prepare_url(jwt_auth_url) self.verify_ssl = verify_ssl def set_proxies(self, proxies): ''' Set proxy properties. Cause requests to go through a proxy. Must be a dictionary mapping protocol names to URLs of proxies. :param dir proxies: dictionary mapping protocol names to URLs ''' self.proxies = proxies def _prepare_url(self, uri): fu = furl(uri) if fu.scheme and fu.scheme not in self.SUPPORTED_SCHEMES: raise ValueError( 'Scheme "{}" is not supported. Use one of: {}' .format(fu.scheme, ', '.join(self.SUPPORTED_SCHEMES))) use_https = self.use_https or (fu.scheme == 'https') fu.scheme = fu.scheme or ('https' if use_https else 'http') fu.host = fu.host or self.host fu.port = self.port if self.port else ( fu.port or (443 if use_https else 80)) if not fu.host: raise ValueError('Host name is not provided: {}'.format(fu.url)) return fu.url def refresh_jwt_token(self, session=None): ''' Obtain JWT token using provided JWT session, url, username and password. ''' session = session or self.prepare_generic_session() self.jwt_token = dispatcher.obtain_jwt_token( session, self._prepare_url(self.jwt_url), self.username, self.password) return self.jwt_token def prepare_generic_session(self): ''' Prepare basic generic session with configured proxies, headers, username/password (if no JWT url configured), cert file, key file and SSL verification flags. ''' return dispatcher.get_generic_session( proxies=self.proxies, headers=self.headers, username=self.username if not self.jwt_url else None, password=self.password if not self.jwt_url else None, cert_file=self.cert_file, key_file=self.key_file, key_password=self.key_password, ca_cert=self.ca_cert, verify_ssl=self.verify_ssl) def _execute_request(self, request, uri=None, service_type=None): ''' Execute generic TAXII request. A service is defined by ``uri`` parameter or is chosen from pre-cached services by ``service_type``. ''' if not uri and not service_type: raise NoURIProvidedError('URI or service_type needed') elif not uri: service = self._get_service(service_type) uri = service.address if (self.key_file and not self.key_password and utils.if_key_encrypted(self.key_file)): raise ValueError( 'Key file is encrypted but key password was not provided') session = self.prepare_generic_session() if self.jwt_url and self.username and self.password: if not self.jwt_token: self.refresh_jwt_token(session=session) session = dispatcher.set_jwt_token(session, self.jwt_token) message = dispatcher.send_taxii_request( session, self._prepare_url(uri), request, taxii_binding=self.taxii_binding, timeout=self.timeout) return message def _generate_id(self): return generate_message_id(version=self.services_version) def _get_service(self, service_type): candidates = self.get_services(service_type=service_type) if not candidates: raise ServiceNotFoundError( "Service with type '{}' is not advertised" .format(service_type)) elif len(candidates) > 1: raise AmbiguousServicesError( "{} services found with type '{}'. Specify the exact URI" .format(len(candidates), service_type)) return candidates[0] def get_services(self, service_type=None, service_types=None): ''' Get services advertised by TAXII server. This method will try to do automatic discovery by calling :py:meth:`discover_services`. :param str service_type: filter services by specific type. Accepted values are listed in :py:data:`cabby.entities.SERVICE_TYPES` :param str service_types: filter services by multiple types. Accepted values are listed in :py:data:`cabby.entities.SERVICE_TYPES` :return: list of service instances :rtype: list of :py:class:`cabby.entities.DetailedServiceInstance` (or :py:class:`cabby.entities.InboxDetailedService`) :raises ValueError: if URI provided is invalid or schema is not supported :raises `cabby.exceptions.HTTPError`: if HTTP error happened :raises `cabby.exceptions.UnsuccessfulStatusError`: if Status Message received and status_type is not `SUCCESS` :raises `cabby.exceptions.ServiceNotFoundError`: if no service found :raises `cabby.exceptions.AmbiguousServicesError`: more than one service with type specified :raises `cabby.exceptions.NoURIProvidedError`: no URI provided and client can't discover services ''' if self.services: services = self.services else: try: services = self.discover_services() except ClientException as e: self.log.error('Can not autodiscover advertised services') raise e if service_type: return [s for s in services if s.type == service_type] elif service_types: return [s for s in services if s.type in service_types] else: return services def discover_services(self, uri=None, cache=True): ''' Discover services advertised by TAXII server. This method will send discovery request to a service, defined by ``uri`` or constructor's connection parameters. :param str uri: URI path to a specific TAXII service :param bool cache: if discovered services should be cached :return: list of TAXII services :rtype: list of :py:class:`cabby.entities.DetailedServiceInstance` (or :py:class:`cabby.entities.InboxDetailedService`) :raises ValueError: if URI provided is invalid or schema is not supported :raises `cabby.exceptions.HTTPError`: if HTTP error happened :raises `cabby.exceptions.UnsuccessfulStatusError`: if Status Message received and status_type is not `SUCCESS` :raises `cabby.exceptions.ServiceNotFoundError`: if no Discovery servicefound :raises `cabby.exceptions.AmbiguousServicesError`: more than one service with type specified :raises `cabby.exceptions.NoURIProvidedError`: no URI provided and client can't discover services ''' uri = uri or self.discovery_path if not uri: raise NoURIProvidedError('Discovery service URI is not specified') response = self._discovery_request(uri) services = list(map( to_detailed_service_instance_entity, response.service_instances)) self.log.info("%d services discovered", len(services)) if cache: self.services = services return services def __repr__(self): t = '{name}(host={host}, port={port}, discovery_path={discovery_path})' return t.format( name=type(self).__name__, host=self.host, port=self.port, discovery_path=self.discovery_path, )
{ "repo_name": "Intelworks/cabby", "path": "cabby/abstract.py", "copies": "1", "size": "11048", "license": "bsd-3-clause", "hash": 8709552963586365000, "line_mean": 34.4102564103, "line_max": 79, "alpha_frac": 0.6028240406, "autogenerated": false, "ratio": 4.442299959790913, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 312 }
from furl import furl import pendulum from django.conf import settings from share.harvest import BaseHarvester class MendeleyHarvester(BaseHarvester): VERSION = 1 MENDELEY_OAUTH_URL = 'https://api.mendeley.com/oauth/token' def get_token(self): """ Mendeley gives tokens that last for one hour. A new token will be requested everytime the harvester is run to ensure the access token is valid. """ data = {'grant_type': 'client_credentials', 'scope': 'all'} headers = {'Content-Type': 'application/x-www-form-urlencoded'} r = self.requests.post( self.MENDELEY_OAUTH_URL, headers=headers, data=data, auth=(settings.MENDELEY_API_CLIENT_ID, settings.MENDELEY_API_CLIENT_SECRET), ) if r.status_code != 200: raise Exception('Access token not granted. Stopping harvest.') return r.json()['access_token'] def do_harvest(self, start_date, end_date): if not settings.MENDELEY_API_CLIENT_ID or not settings.MENDELEY_API_CLIENT_SECRET: raise Exception('Mendeley authorization information not provided') self.requests.headers.update({'Authorization': 'Bearer ' + self.get_token()}) ACCEPT_HEADER = 'application/vnd.mendeley-public-dataset.1+json' headers = {'Accept': ACCEPT_HEADER} # Inputs are a DateTime object, many APIs only accept dates start_date = start_date.date() # Fetch records is a separate function for readability # Ends up returning a list of tuples with provider given id and the document itself return self.fetch_records(furl(self.config.base_url).set(query_params={ 'modified_since': start_date.isoformat(), 'fields': 'results.*', 'limit': '100', # chance of timing out with larger requests 'sort': 'publish_date', 'order': 'asc', }).url, headers, end_date) def fetch_records(self, url, headers, end_date): resp = self.requests.get(url, headers=headers) while True: for dataset in resp.json()['results']: # modified_since filters on publish_date if pendulum.parse(dataset['publish_date']) >= end_date: break # Send another request to get useful contributor information if 'contributors' in dataset: for contributor in dataset['contributors']: try: profile_resp = self.get_contributor_profile(headers, contributor['profile_id']) contributor['full_profile'] = profile_resp.json() except KeyError: continue yield (dataset['id'], dataset) try: resp = self.requests.get(resp.links['next']['url'], headers=headers) except KeyError: break def get_contributor_profile(self, headers, contributor_uuid): ACCEPT_HEADER = 'application/vnd.mendeley-profiles.1+json' BASE_PROFILE_URL = 'https://api.mendeley.com/profiles/' contributor_headers = {'Accept': ACCEPT_HEADER} profile_url = furl(BASE_PROFILE_URL).join(contributor_uuid).url return self.requests.get(profile_url, headers=contributor_headers)
{ "repo_name": "CenterForOpenScience/SHARE", "path": "share/harvesters/com_mendeley_data.py", "copies": "2", "size": "3403", "license": "apache-2.0", "hash": 8427422412454684000, "line_mean": 40, "line_max": 107, "alpha_frac": 0.6038789304, "autogenerated": false, "ratio": 4.248439450686642, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0007921253419390175, "num_lines": 83 }
from furl import furl import requests import time import re import dateutil.parser from datetime import datetime from .results import SUCCESS, FAILURE from ..clients.ping import ping from ..clients.ssh import RemoteCommandError from ..slave import Slave, get_console from ..actions.buildslave_uptime import buildslave_uptime import logging log = logging.getLogger(__name__) def buildslave_last_activity(name): """Get the build slave state, last activity time, and uptime. Returns a dictionary of the form: { 'last_state': # unknown, booting, stopped, ready, running_command 'last_activity_seconds': # last activity age according to twistd.log, in seconds. 'uptime': uptime # machine uptime, in seconds. } """ slave = Slave(name) slave.load_slavealloc_info() slave.load_devices_info() rc, uptime = buildslave_uptime(name) if rc != SUCCESS: return rc, uptime cur_time = time.time() if uptime < 3 * 60: # Assume we're still booting log.debug("uptime is %.2f; assuming we're still booting up", uptime) return SUCCESS, { "state": "booting", "last_activity": 0 } console = get_console(slave, usebuildbotslave=False) try: log.debug("tailing twistd.log") log.debug("slave.basedir='%s'" % slave.basedir) # we'll disregard the return code b/c it will be non-zero if twistd.log.1 is not found rc, output = console.run_cmd("tail -n 100 %(basedir)s/twistd.log.1 %(basedir)s/twistd.log" % { 'basedir': slave.basedir }) except RemoteCommandError: return FAILURE, "failed to tail twistd.log" console.disconnect() # account for the time it took to retrieve the log tail # and reset cur_time uptime = uptime + int(time.time() - cur_time) cur_time = time.time() last_activity = None running_command = False line = "" last_activity = cur_time last_state = "unknown" for line in output.splitlines(): time.sleep(0) m = re.search(r"^(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})", line) if m: last_activity = time.mktime(time.strptime(m.group(1), "%Y-%m-%d %H:%M:%S")) else: # Not sure what to do with this line... continue if "RunProcess._startCommand" in line or "using PTY: " in line: log.debug("started command - %s", line.strip()) running_command = True elif "commandComplete" in line or "stopCommand" in line: log.debug("done command - %s", line.strip()) running_command = False if "Shut Down" in line: # Check if this happened before we booted, i.e. we're still booting up if (cur_time - last_activity) > uptime: log.debug( "last activity delta (%s) is older than uptime (%s); assuming we're still booting %s", (last_activity - cur_time), uptime, line.strip()) last_state = "booting" else: last_state = "stopped" elif "I have a leftover directory" in line: # Ignore this, it doesn't indicate anything continue elif "slave is ready" in line: if (cur_time - last_activity) < uptime: last_state = "ready" elif running_command: # We're in the middle of running something last_state = "running_command" # Reset last_activity to "now" last_activity = cur_time return SUCCESS, { 'last_state': last_state, 'last_activity_seconds': (cur_time - last_activity), 'uptime': uptime, }
{ "repo_name": "lundjordan/slaveapi", "path": "slaveapi/actions/buildslave_last_activity.py", "copies": "1", "size": "3680", "license": "mpl-2.0", "hash": 1234808280291060500, "line_mean": 34.7281553398, "line_max": 130, "alpha_frac": 0.6002717391, "autogenerated": false, "ratio": 3.758937691521961, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9805464451978501, "avg_score": 0.010748995728691885, "num_lines": 103 }
from furl import furl import requests import time from .results import SUCCESS, FAILURE from ..clients.ping import ping from ..clients.ssh import RemoteCommandError from ..slave import Slave, get_console from ..util import logException import logging log = logging.getLogger(__name__) # The longest we will wait for a slave to shutdown. MAX_SHUTDOWN_WAIT_TIME = 60 * 60 * 5 # 5 hours def shutdown_buildslave(name): """Attempts to gracefully shut down the buildslave process on the named slave. In order to support Windows, this must be done by contacting the Buildbot Master the slave talks to, and requesting the shut down there. (Slave-side graceful shutdown doesn't work on Windows.) Once initiated, the shutdown is confirmed by watching the slave's twistd.log file.""" status_text = "Gracefully shutting down slave..." slave = Slave(name) slave.load_slavealloc_info() slave.load_devices_info() if not slave.master_url: status_text += "Success\nNo master set, nothing to do!" return SUCCESS, status_text if not ping(slave.fqdn): status_text += "Success\nSlave is offline, nothing to do!" return SUCCESS, status_text # We do graceful shutdowns through the master's web interface because it's # the simplest way that works across all platforms. log.info("Starting graceful shutdown.") shutdown_url = furl(slave.master_url) shutdown_url.path = "/buildslaves/%s/shutdown" % slave.name try: # Disabling redirects is important here - otherwise we'll load a # potentially expensive page from the Buildbot master. The response # code is good enough to confirm whether or not initiating this worked # or not anyways. requests.post(str(shutdown_url), allow_redirects=False) except requests.RequestException: logException(log.error, "Failed to initiate graceful shutdown.") status_text += "Failure\nFailed to initiate graceful shutdown through %s" % (shutdown_url,) return FAILURE, status_text twistd_log = "%s/%s" % (slave.basedir, "twistd.log") start = time.time() console = get_console(slave, usebuildbotslave=True) while console and time.time() - start < MAX_SHUTDOWN_WAIT_TIME: try: rc, output = console.run_cmd("tail -n1 %s" % twistd_log) if "Server Shut Down" in output: status_text += "Success" log.debug("Shutdown succeeded.") return SUCCESS, status_text else: time.sleep(30) except RemoteCommandError: logException(log.debug, "Caught error when waiting for shutdown, trying again...") time.sleep(30) else: status_text += "Failure\nCouldn't confirm shutdown" return FAILURE, status_text
{ "repo_name": "lundjordan/slaveapi", "path": "slaveapi/actions/shutdown_buildslave.py", "copies": "1", "size": "2841", "license": "mpl-2.0", "hash": 145023790827401760, "line_mean": 40.1739130435, "line_max": 99, "alpha_frac": 0.6733544527, "autogenerated": false, "ratio": 4.047008547008547, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.001073347478989617, "num_lines": 69 }
from furl import furl from datetime import date from share.harvest.harvester import Harvester class USGSHarvester(Harvester): url = 'https://pubs.er.usgs.gov/pubs-services/publication' def do_harvest(self, start_date, end_date): today = date.today() end_date = end_date.date() start_date = start_date.date() end_days_back = (today - end_date).days start_days_back = (today - start_date).days # The USGS API does not support date ranges for days_back in range(end_days_back, start_days_back): page = 1 page_size = 100 while True: resp = self.requests.get(furl(self.url).set(query_params={ 'mod_x_days': days_back + 1, 'page_number': page, 'page_size': page_size }).url) records = resp.json()['records'] for record in records: record_id = record['id'] yield (record_id, record) if len(records) < page_size: break page += 1
{ "repo_name": "zamattiac/SHARE", "path": "providers/gov/usgs/harvester.py", "copies": "1", "size": "1149", "license": "apache-2.0", "hash": -2834925818262086000, "line_mean": 27.725, "line_max": 74, "alpha_frac": 0.5178416014, "autogenerated": false, "ratio": 3.9757785467128026, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9993620148112803, "avg_score": 0, "num_lines": 40 }
from furl import furl from datetime import date from share.harvest import BaseHarvester class USGSHarvester(BaseHarvester): VERSION = 1 def do_harvest(self, start_date, end_date): today = date.today() end_date = end_date.date() start_date = start_date.date() end_days_back = (today - end_date).days start_days_back = (today - start_date).days # The USGS API does not support date ranges for days_back in range(end_days_back, start_days_back): page = 1 page_size = 100 while True: resp = self.requests.get(furl(self.config.base_url).set(query_params={ 'mod_x_days': days_back + 1, 'page_number': page, 'page_size': page_size }).url) records = resp.json()['records'] for record in records: record_id = record['id'] yield (record_id, record) if len(records) < page_size: break page += 1
{ "repo_name": "aaxelb/SHARE", "path": "share/harvesters/gov_usgs.py", "copies": "3", "size": "1112", "license": "apache-2.0", "hash": -1706779271257949400, "line_mean": 26.8, "line_max": 86, "alpha_frac": 0.5107913669, "autogenerated": false, "ratio": 4.073260073260073, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.6084051440160073, "avg_score": null, "num_lines": null }
from furl import furl from django.contrib import admin from django.urls import reverse from django.utils.html import format_html from share.admin.util import FuzzyPaginator, linked_fk, linked_many, admin_link, SourceConfigFilter from share.models.jobs import AbstractBaseJob, IngestJob from share.tasks import ingest STATUS_COLORS = { AbstractBaseJob.STATUS.created: 'blue', AbstractBaseJob.STATUS.started: 'cyan', AbstractBaseJob.STATUS.failed: 'red', AbstractBaseJob.STATUS.succeeded: 'green', AbstractBaseJob.STATUS.rescheduled: 'goldenrod', AbstractBaseJob.STATUS.forced: 'maroon', AbstractBaseJob.STATUS.skipped: 'orange', AbstractBaseJob.STATUS.retried: 'darkseagreen', AbstractBaseJob.STATUS.cancelled: 'grey', } @linked_fk('source_config') class BaseJobAdmin(admin.ModelAdmin): list_filter = ('status', SourceConfigFilter, ) list_select_related = ('source_config', ) actions = ('restart_tasks', ) readonly_fields = ('task_id', 'error_type', 'error_message', 'error_context', 'completions', 'date_started', 'source_config_version', ) show_full_result_count = False paginator = FuzzyPaginator def status_(self, obj): return format_html( '<span style="font-weight: bold; color: {}">{}</span>', STATUS_COLORS[obj.status], AbstractBaseJob.STATUS[obj.status].title(), ) def source_config_(self, obj): return obj.source_config.label class HarvestJobAdmin(BaseJobAdmin): list_display = ('id', 'source_config_', 'status_', 'start_date_', 'end_date_', 'error_type', 'share_version', 'harvest_job_actions', ) readonly_fields = BaseJobAdmin.readonly_fields + ('harvester_version', 'start_date', 'end_date', 'harvest_job_actions',) def start_date_(self, obj): return obj.start_date.isoformat() def end_date_(self, obj): return obj.end_date.isoformat() def harvest_job_actions(self, obj): url = furl(reverse('admin:source-config-harvest', args=[obj.source_config_id])) url.args['start'] = self.start_date_(obj) url.args['end'] = self.end_date_(obj) url.args['superfluous'] = True return format_html('<a class="button" href="{}">Restart</a>', url.url) harvest_job_actions.short_description = 'Actions' @linked_fk('suid') @linked_many( 'ingested_normalized_data', order_by=['-created_at'], select_related=['source'], ) class IngestJobAdmin(BaseJobAdmin): actions = ('reingest', 'reingest_without_shareobject', ) list_display = ('id', 'source_config_', 'suid_', 'status_', 'date_started', 'error_type', 'share_version', ) list_select_related = BaseJobAdmin.list_select_related + ('suid',) readonly_fields = BaseJobAdmin.readonly_fields + ('transformer_version', 'regulator_version', 'retries', 'most_recent_suid_raw',) show_full_result_count = False def get_search_results(self, request, queryset, search_term): # return (queryset, is_distinct) pair return queryset.filter(suid__identifier=search_term), False def suid_(self, obj): return obj.suid.identifier def most_recent_suid_raw(self, obj): return admin_link(obj.suid.most_recent_raw_datum()) def reingest(self, request, queryset): self._enqueue_tasks(queryset) reingest.short_description = 'Re-ingest' def reingest_without_shareobject(self, request, queryset): self._enqueue_tasks(queryset, {'apply_changes': False}) reingest_without_shareobject.short_description = 'Re-ingest (skipping ShareObject)' def _enqueue_tasks(self, job_queryset, task_kwargs=None): # grab the ids once, use them twice job_ids = list(job_queryset.values_list('id', flat=True)) IngestJob.objects.filter(id__in=job_ids).update( status=AbstractBaseJob.STATUS.created ) for job_id in job_ids: ingest.delay(job_id=job_id, **(task_kwargs or {}))
{ "repo_name": "aaxelb/SHARE", "path": "share/admin/jobs.py", "copies": "1", "size": "3969", "license": "apache-2.0", "hash": 8877463089553681000, "line_mean": 37.5339805825, "line_max": 139, "alpha_frac": 0.671957672, "autogenerated": false, "ratio": 3.506183745583039, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9674676533186741, "avg_score": 0.0006929768792595582, "num_lines": 103 }
from furl import furl from framework.utils import iso8601format from dateutil import parser from website.project.metadata.utils import serialize_meta_schema from website.settings import DOMAIN as OSF_DOMAIN EMBARGO = 'embargo' IMMEDIATE = 'immediate' def serialize_user(user): return { 'full_name': user.fullname, 'username': user.username, 'id': user._id } # TODO: Write and use APIv2 serializer for this def serialize_draft_registration(draft, json_safe=True): if draft.branched_from is not None: url = furl(OSF_DOMAIN) url.path.add(draft.branched_from.url) node_url = url.url else: node_url = None registration_choice = draft.approval.meta.get('registration_choice', None) if registration_choice == EMBARGO: time = parser.parse(draft.approval.meta['embargo_end_date']) embargo = iso8601format(time) if json_safe else time else: embargo = IMMEDIATE return { 'pk': draft._id, 'initiator': serialize_user(draft.initiator), 'registration_metadata': draft.registration_metadata, 'registration_schema': serialize_meta_schema(draft.registration_schema), 'initiated': iso8601format(draft.datetime_initiated) if json_safe else draft.datetime_initiated, 'updated': iso8601format(draft.datetime_updated) if json_safe else draft.datetime_updated, 'submitted': iso8601format(draft.approval.initiation_date) if json_safe else draft.approval.initiation_date, 'requires_approval': draft.requires_approval, 'is_pending_approval': draft.is_pending_review, 'is_approved': draft.is_approved, 'is_rejected': draft.is_rejected, 'notes': draft.notes, 'proof_of_publication': draft.flags.get('proof_of_publication'), 'payment_sent': draft.flags.get('payment_sent'), 'assignee': draft.flags.get('assignee'), 'title': draft.registration_metadata['q1']['value'], 'embargo': embargo, 'registered_node': node_url, }
{ "repo_name": "TomHeatwole/osf.io", "path": "admin/pre_reg/serializers.py", "copies": "9", "size": "2059", "license": "apache-2.0", "hash": 3536682514051005400, "line_mean": 35.7678571429, "line_max": 116, "alpha_frac": 0.6736279747, "autogenerated": false, "ratio": 3.7233273056057867, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.8896955280305787, "avg_score": null, "num_lines": null }
from furl import furl from lxml import etree from share.harvest.harvester import Harvester class SciTechHarvester(Harvester): url = 'https://www.osti.gov/scitech/scitechxml' namespaces = { 'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#', 'dc': 'http://purl.org/dc/elements/1.1/', 'dcq': 'http://purl.org/dc/terms/' } def do_harvest(self, start_date, end_date): end_date = end_date.date() start_date = start_date.date() page = 0 more_pages = True while more_pages: response = self.requests.get(furl(self.url).set(query_params={ 'page': page, 'EntryDateTo': end_date.strftime('%m/%d/%Y'), 'EntryDateFrom': start_date.strftime('%m/%d/%Y'), }).url) xml = etree.XML(response.content) records = xml.xpath('records/record') for record in records: doc_id = record.xpath('dc:ostiId/node()', namespaces=self.namespaces)[0] doc = etree.tostring(record) yield (doc_id, doc) page += 1 more_pages = xml.xpath('//records/@morepages')[0] == 'true'
{ "repo_name": "zamattiac/SHARE", "path": "providers/gov/scitech/harvester.py", "copies": "1", "size": "1207", "license": "apache-2.0", "hash": -4207630561585231400, "line_mean": 29.9487179487, "line_max": 88, "alpha_frac": 0.5468102734, "autogenerated": false, "ratio": 3.478386167146974, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4525196440546974, "avg_score": null, "num_lines": null }
from furl import furl from lxml import etree from share.harvest import BaseHarvester class DataOneHarvester(BaseHarvester): VERSION = 1 def do_harvest(self, start_date, end_date): end_date = end_date.format('YYYY-MM-DDT00:00:00', formatter='alternative') + 'Z' start_date = start_date.format('YYYY-MM-DDT00:00:00', formatter='alternative') + 'Z' url = furl(self.config.base_url).set(query_params={ 'q': 'dateModified:[{} TO {}]'.format(start_date, end_date), 'start': 0, 'rows': 1 }).url return self.fetch_records(url, start_date, end_date) def fetch_records(self, url, start_date, end_date): resp = self.requests.get(url) doc = etree.XML(resp.content) total_records = int(doc.xpath("//result/@numFound")[0]) records_processed = 0 while records_processed < total_records: response = self.requests.get(furl(url).set(query_params={ 'q': 'dateModified:[{} TO {}]'.format(start_date, end_date), 'start': records_processed, 'rows': 1000 }).url) docs = etree.XML(response.content).xpath('//doc') for doc in docs: doc_id = doc.xpath("str[@name='id']")[0].text doc = etree.tostring(doc) yield (doc_id, doc) records_processed += len(docs)
{ "repo_name": "laurenbarker/SHARE", "path": "share/harvesters/org_dataone.py", "copies": "3", "size": "1426", "license": "apache-2.0", "hash": -2137325467469289000, "line_mean": 32.1627906977, "line_max": 92, "alpha_frac": 0.5638148668, "autogenerated": false, "ratio": 3.601010101010101, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5664824967810101, "avg_score": null, "num_lines": null }
from furl import furl from lxml import etree from share.harvest import BaseHarvester class SciTechHarvester(BaseHarvester): VERSION = 1 namespaces = { 'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#', 'dc': 'http://purl.org/dc/elements/1.1/', 'dcq': 'http://purl.org/dc/terms/' } def do_harvest(self, start_date, end_date): end_date = end_date.date() start_date = start_date.date() page = 0 more_pages = True while more_pages: response = self.requests.get(furl(self.config.base_url).set(query_params={ 'page': page, 'EntryDateTo': end_date.strftime('%m/%d/%Y'), 'EntryDateFrom': start_date.strftime('%m/%d/%Y'), }).url) xml = etree.XML(response.content) records = xml.xpath('records/record') for record in records: doc_id = record.xpath('dc:ostiId/node()', namespaces=self.namespaces)[0] doc = etree.tostring(record) yield (doc_id, doc) page += 1 more_pages = xml.xpath('//records/@morepages')[0] == 'true'
{ "repo_name": "laurenbarker/SHARE", "path": "share/harvesters/gov_scitech.py", "copies": "3", "size": "1181", "license": "apache-2.0", "hash": -2986430323986896400, "line_mean": 29.2820512821, "line_max": 88, "alpha_frac": 0.5436071126, "autogenerated": false, "ratio": 3.5572289156626504, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.560083602826265, "avg_score": null, "num_lines": null }
from furl import furl from lxml import etree from share import Harvester class DataOneHarvester(Harvester): url = 'https://cn.dataone.org/cn/v2/query/solr/' def do_harvest(self, start_date, end_date): end_date = end_date.format('YYYY-MM-DDT00:00:00', formatter='alternative') + 'Z' start_date = start_date.format('YYYY-MM-DDT00:00:00', formatter='alternative') + 'Z' url = furl(self.url).set(query_params={ 'q': 'dateModified:[{} TO {}]'.format(start_date, end_date), 'start': 0, 'rows': 1 }).url return self.fetch_records(url, start_date, end_date) def fetch_records(self, url, start_date, end_date): resp = self.requests.get(url) doc = etree.XML(resp.content) total_records = int(doc.xpath("//result/@numFound")[0]) records_processed = 0 while records_processed < total_records: response = self.requests.get(furl(url).set(query_params={ 'q': 'dateModified:[{} TO {}]'.format(start_date, end_date), 'start': records_processed, 'rows': 1000 }).url) docs = etree.XML(response.content).xpath('//doc') for doc in docs: doc_id = doc.xpath("str[@name='id']")[0].text doc = etree.tostring(doc) yield (doc_id, doc) records_processed += len(docs)
{ "repo_name": "zamattiac/SHARE", "path": "providers/org/dataone/harvester.py", "copies": "1", "size": "1435", "license": "apache-2.0", "hash": 2352647800505491000, "line_mean": 32.3720930233, "line_max": 92, "alpha_frac": 0.5602787456, "autogenerated": false, "ratio": 3.5344827586206895, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9592204685240664, "avg_score": 0.0005113637960051811, "num_lines": 43 }
from furl import furl from osfoffline.settings import API_BASE, FILE_BASE # todo: can make a api_url_for function that makes things potentially simpler...? USERS = 'users' NODES = 'nodes' FILES = 'files' APPLICATIONS = 'applications' CHILDREN = 'children' RESOURCES = 'resources' def _ensure_trailing_slash(url): url.rstrip('/') return url + '/' def api_url_for(endpoint_type, related_type=None, **kwargs): base = furl(API_BASE) files_base = furl(FILE_BASE) assert endpoint_type in [USERS, NODES, FILES, APPLICATIONS, RESOURCES] if endpoint_type == USERS: base.path.segments.extend(['v2', USERS]) if 'user_id' in kwargs and kwargs['user_id'] is not None: base.path.segments.append(str(kwargs['user_id'])) if related_type: assert related_type in [NODES] base.path.segments.append(related_type) user_nodes = _ensure_trailing_slash(base.url) user_nodes += '?filter[registration]=false' return user_nodes elif endpoint_type == NODES: base.path.segments.extend(['v2', NODES]) if 'node_id' in kwargs and kwargs['node_id'] is not None: base.path.segments.append(str(kwargs['node_id'])) if related_type: assert related_type in [FILES, CHILDREN] base.path.segments.append(related_type) if kwargs.get('provider') is not None and kwargs.get('file_id') is not None: base.path.segments.extend([kwargs['provider'], str(kwargs['file_id'])]) elif endpoint_type == FILES: base.path.segments.extend(['v2', FILES]) if 'file_id' in kwargs and kwargs['file_id'] is not None: base.path.segments.append(str(kwargs['file_id'])) elif endpoint_type == RESOURCES: # /v1/resources/6/providers/osfstorage/21/?kind=folder&name=FUN_FOLDER HTTP/1.1" 200 - files_base.path.segments.extend(['v1', RESOURCES, str(kwargs['node_id']), 'providers', kwargs['provider'] ]) if 'file_id' in kwargs and kwargs['file_id'] is not None: files_base.path.segments.append(str(kwargs['file_id'])) return _ensure_trailing_slash(files_base.url) return _ensure_trailing_slash(base.url)
{ "repo_name": "chennan47/OSF-Offline", "path": "osfoffline/polling_osf_manager/api_url_builder.py", "copies": "1", "size": "2402", "license": "apache-2.0", "hash": 7844062129720972000, "line_mean": 39.7118644068, "line_max": 94, "alpha_frac": 0.5924229808, "autogenerated": false, "ratio": 3.6953846153846155, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9783966831001522, "avg_score": 0.0007681530366188024, "num_lines": 59 }
from furl import furl from share import Harvester class CrossRefHarvester(Harvester): url = 'https://api.crossref.org/v1/works' def do_harvest(self, start_date, end_date): start_date = start_date.date() end_date = end_date.date() return self.fetch_records(furl(self.url).set(query_params={ 'filter': 'from-update-date:{},until-update-date:{}'.format( start_date.isoformat(), end_date.isoformat() ), 'rows': 1000 }).url) def fetch_records(self, url): resp = self.requests.get(url) resp.raise_for_status() total = resp.json()['message']['total-results'] records = resp.json()['message']['items'] # return the first 1000 records for record in records: yield (record['DOI'], record) # make requests for the remaining records for i in range(1000, total, 1000): response = self.requests.get(furl(url).add(query_params={ 'offset': i }).url) response.raise_for_status() records = response.json()['message']['items'] for record in records: yield (record['DOI'], record)
{ "repo_name": "zamattiac/SHARE", "path": "providers/org/crossref/harvester.py", "copies": "1", "size": "1250", "license": "apache-2.0", "hash": -4282613957535031000, "line_mean": 30.25, "line_max": 72, "alpha_frac": 0.5552, "autogenerated": false, "ratio": 4.006410256410256, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5061610256410256, "avg_score": null, "num_lines": null }
from furl import furl import requests from requests import RequestException import json import logging from ..actions.results import FAILURE, SUCCESS from ..util import logException log = logging.getLogger(__name__) def get_slave(api, id_=None, name=None): if id_ and name: raise ValueError("Can't retrieve slave by id and name at the same time.") url = furl(api) if id_: url.path.add("slaves/%s" % id_) elif name: url.path.add("slaves/%s" % name) url.args["byname"] = 1 else: raise Exception() log.info("Making request to: %s", url) return requests.get(str(url)).json() def get_slave_id(api, name): return get_slave(api, name=name)['slaveid'] def update_slave(api, name, data): """ updates a slave's values in slavealloc. :param api: the api url for slavealloc :type api: str :param name: hostname of slave :type name: str :param data: values to be updated :type data: dict :rtype: tuple """ return_msg = "Updating slave %s in slavealloc..." % name id_ = get_slave_id(api, name=name) url = furl(api) url.path.add("slaves/%s" % id_) payload = json.dumps(data) try: response = requests.put(str(url), data=payload) except RequestException as e: logException(log.error, "Caught exception while updating slavealloc.") logException(log.error, "Exception message: %s" % e) return_msg += "Failed\nCaught exception while updating: %s" % (e,) return FAILURE, return_msg if response.status_code == 200: return_msg += "Success" return_code = SUCCESS else: return_msg += "Failed\n" return_msg += 'error response code: %s\n' % response.status_code return_msg += 'error response msg: %s' % response.reason return_code = FAILURE return return_code, return_msg def get_slaves(api, purposes=[], environs=[], pools=[], enabled=None): url = furl(api) url.path.add("slaves") url.args["purpose"] = purposes url.args["environment"] = environs url.args["pool"] = pools if enabled: url.args["enabled"] = int(enabled) log.info("Making request to: %s", url) return requests.get(str(url)).json() def get_master(api, id_): url = furl(api) url.path.add("masters/%s" % id_) return requests.get(str(url)).json()
{ "repo_name": "lundjordan/slaveapi", "path": "slaveapi/clients/slavealloc.py", "copies": "1", "size": "2392", "license": "mpl-2.0", "hash": -3830235741111883000, "line_mean": 26.1818181818, "line_max": 81, "alpha_frac": 0.622909699, "autogenerated": false, "ratio": 3.497076023391813, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4619985722391813, "avg_score": null, "num_lines": null }
from furl import furl class GenericUrlBuilder(object): def __init__(self, worker_conf): """ use a url template to create services URLs based on the container networking config :param worker_conf: dict containing the worker config :return: """ self.service_url = dict() if 'formatting_string' in worker_conf: for url in worker_conf['formatting_string'].split(';'): self.service_url[url] = furl(url) def build(self, port_mapping): """ :param port_mapping: a docker port mapping(container[][], e.g. {u'15672/tcp': [{u'HostPort': u'7000', u'HostIp': u'192.168.59.103'}], u'5672/tcp': [{u'HostPort': u'7001', u'HostIp': u'192.168.59.103'}]} :return: list of urls """ urls = list() for internal_port in port_mapping.keys(): for index, unused in enumerate(port_mapping[internal_port]): endpoint = port_mapping[internal_port][index] port, proto = internal_port.split('/') tmp_url = None 'if our internal url is part of a formatting template, use it' for item in self.service_url.keys(): if int(port) == self.service_url[item].port: tmp_url = self.service_url[item].copy() tmp_url.port = int(endpoint['HostPort']) tmp_url.host = endpoint['HostIp'] break 'otherwise we create a url with http(most often default?)' if not tmp_url: tmp_url = furl('http://{}:{}'.format( endpoint['HostIp'], endpoint['HostPort'])) urls.append(str(tmp_url)) return urls
{ "repo_name": "BeneDicere/metahosting-worker", "path": "urlbuilders/__init__.py", "copies": "1", "size": "1815", "license": "bsd-3-clause", "hash": 9101087328737116000, "line_mean": 41.2093023256, "line_max": 78, "alpha_frac": 0.5245179063, "autogenerated": false, "ratio": 4.134396355353076, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5158914261653076, "avg_score": null, "num_lines": null }
from furl import furl def connection_should_use_tls(url): # Services such as RabbitMQ/MySQL running on Travis do not yet have TLS # certificates set up. We could try using TLS locally using self-signed certs, # but until Travis has support it's not overly useful. host = furl(url).host or url # The url passed is already just the hostname. return host not in ('0.0.0.0', 'localhost', 'mysql', 'redis', 'rabbitmq', '127.0.0.1') def get_tls_redis_url(redis_url): """ Returns the TLS version of a Heroku REDIS_URL string. Whilst Redis server (like memcached) doesn't natively support TLS, Heroku runs an stunnel daemon on their Redis instances, which can be connected to directly by Redis clients that support TLS (avoiding the need for stunnel on the client). The stunnel port is one higher than the Redis server port, and the informal `rediss://` scheme used to instruct clients to wrap the connection with TLS. Will convert 'redis://h:PASSWORD@INSTANCE.compute-1.amazonaws.com:8409' ...to: 'rediss://h:PASSWORD@INSTANCE.compute-1.amazonaws.com:8410?ssl_cert_reqs=none' See: https://devcenter.heroku.com/articles/securing-heroku-redis#connecting-directly-to-stunnel """ url = furl(redis_url) url.port += 1 url.scheme += 's' # Disable TLS certificate validation (restoring the behaviour of the older redis-py 2.x), # since for now Heroku Redis uses self-signed certificates: # https://bugzilla.mozilla.org/show_bug.cgi?id=1510000 url.args['ssl_cert_reqs'] = 'none' return str(url)
{ "repo_name": "KWierso/treeherder", "path": "treeherder/config/utils.py", "copies": "1", "size": "1593", "license": "mpl-2.0", "hash": 3193700137794375000, "line_mean": 44.5142857143, "line_max": 95, "alpha_frac": 0.7062146893, "autogenerated": false, "ratio": 3.455531453362256, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4661746142662256, "avg_score": null, "num_lines": null }
from fusedwind.plant_flow.vt import GenericWindTurbineVT, GenericWindTurbinePowerCurveVT, \ ExtendedWindTurbinePowerCurveVT, WeibullWindRoseVT, GenericWindRoseVT, GenericWindFarmTurbineLayout, WTPC from fusedwind.plant_flow.comp import WeibullWindRose from fusedwind.fused_helper import init_container from random import random from numpy import array, vstack, linspace, pi, floor import numpy as np from numpy import ndarray, array, loadtxt, log, zeros, cos, arccos, sin, nonzero, argsort, NaN, mean, ones, vstack, linspace, exp, arctan, arange from numpy import pi, sqrt, dot, diff wr_example = { 'wind_directions': [ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.], 'frequency_array': array([[ 5.98508681e-04, 1.29125597e-03, 1.40644367e-03, 1.50695738e-03, 1.59075031e-03, 1.65624153e-03, 1.70235893e-03, 1.72856490e-03, 1.73486257e-03, 1.72178180e-03, 1.69034515e-03, 1.64201552e-03, 1.57862816e-03, 1.50231057e-03, 1.41539443e-03, 1.32032425e-03, 1.21956703e-03, 1.11552739e-03, 1.01047187e-03, 9.06465283e-04, 8.05321315e-04, 7.08568401e-04, 6.17431098e-04, 5.32826166e-04, 4.55371884e-04, 3.85408522e-04, 3.23027505e-04, 2.68106646e-04, 2.20348820e-04, 1.79321659e-04, 1.44496131e-04, 1.15282311e-04, 9.10610673e-05, 7.12108611e-05, 5.51292806e-05, 4.22493037e-05, 3.20505858e-05, 2.40662917e-05, 1.78861305e-05, 1.31563231e-05, 9.57723654e-06, 6.89937639e-06, 4.91835204e-06, 3.46932973e-06, 2.42138393e-06, 1.67205154e-06, 1.14229776e-06, 7.72019618e-07, 5.16145719e-07, 1.88493405e-07], [ 5.37841890e-04, 1.17137781e-03, 1.29209566e-03, 1.40243194e-03, 1.50017830e-03, 1.58343336e-03, 1.65065464e-03, 1.70070122e-03, 1.73286418e-03, 1.74688276e-03, 1.74294495e-03, 1.72167214e-03, 1.68408831e-03, 1.63157510e-03, 1.56581497e-03, 1.48872509e-03, 1.40238526e-03, 1.30896313e-03, 1.21064049e-03, 1.10954356e-03, 1.00768041e-03, 9.06887902e-04, 8.08789626e-04, 7.14766123e-04, 6.25937453e-04, 5.43157779e-04, 4.67020940e-04, 3.97875490e-04, 3.35847385e-04, 2.80868263e-04, 2.32707260e-04, 1.91004359e-04, 1.55303494e-04, 1.25083907e-04, 9.97885749e-05, 7.88488978e-05, 6.17051693e-05, 4.78226658e-05, 3.67034680e-05, 2.78943290e-05, 2.09910581e-05, 1.56399791e-05, 1.15370570e-05, 8.42528041e-06, 6.09084297e-06, 4.35860280e-06, 3.08721638e-06, 2.16426127e-06, 1.50157853e-06, 5.64510272e-07], [ 7.34384828e-04, 1.59522107e-03, 1.75338058e-03, 1.89615822e-03, 2.02065184e-03, 2.12442644e-03, 2.20558187e-03, 2.26280528e-03, 2.29540446e-03, 2.30331951e-03, 2.28711186e-03, 2.24793044e-03, 2.18745677e-03, 2.10783128e-03, 2.01156471e-03, 1.90143894e-03, 1.78040205e-03, 1.65146286e-03, 1.51758962e-03, 1.38161747e-03, 1.24616827e-03, 1.11358564e-03, 9.85887069e-04, 8.64733665e-04, 7.51417342e-04, 6.46864186e-04, 5.51651985e-04, 4.66039404e-04, 3.90003912e-04, 3.23285473e-04, 2.65433096e-04, 2.15851592e-04, 1.73846289e-04, 1.38663902e-04, 1.09528303e-04, 8.56703870e-05, 6.63517639e-05, 5.08823379e-05, 3.86322096e-05, 2.90385320e-05, 2.16081112e-05, 1.59165946e-05, 1.16050941e-05, 8.37502495e-06, 5.98185836e-06, 4.22836268e-06, 2.95779353e-06, 2.04737008e-06, 1.40226805e-06, 5.21865933e-07], [ 8.33549350e-04, 1.84169114e-03, 2.06792348e-03, 2.28187986e-03, 2.47872489e-03, 2.65397760e-03, 2.80365465e-03, 2.92440538e-03, 3.01363035e-03, 3.06957564e-03, 3.09139654e-03, 3.07918593e-03, 3.03396438e-03, 2.95763173e-03, 2.85288197e-03, 2.72308551e-03, 2.57214521e-03, 2.40433379e-03, 2.22412177e-03, 2.03600497e-03, 1.84434078e-03, 1.65320138e-03, 1.46625052e-03, 1.28664892e-03, 1.11699093e-03, 9.59273093e-04, 8.14893252e-04, 6.84676880e-04, 5.68926093e-04, 4.67485808e-04, 3.79821142e-04, 3.05100195e-04, 2.42276867e-04, 1.90169161e-04, 1.47529438e-04, 1.13104229e-04, 8.56822934e-05, 6.41306205e-05, 4.74189068e-05, 3.46336791e-05, 2.49836383e-05, 1.77980092e-05, 1.25197133e-05, 8.69506677e-06, 5.96149641e-06, 4.03449922e-06, 2.69478465e-06, 1.77626040e-06, 1.15527504e-06, 4.12261443e-07], [ 8.75649591e-04, 1.96089486e-03, 2.23844060e-03, 2.50798829e-03, 2.76299025e-03, 2.99704926e-03, 3.20413948e-03, 3.37883326e-03, 3.51652048e-03, 3.61360643e-03, 3.66767480e-03, 3.67760359e-03, 3.64362439e-03, 3.56731878e-03, 3.45154953e-03, 3.30032921e-03, 3.11863272e-03, 2.91216488e-03, 2.68709730e-03, 2.44979088e-03, 2.20652129e-03, 1.96322419e-03, 1.72527478e-03, 1.49731330e-03, 1.28312416e-03, 1.08557161e-03, 9.06590931e-04, 7.47229760e-04, 6.07731093e-04, 4.87647468e-04, 3.85974642e-04, 3.01293307e-04, 2.31908402e-04, 1.75977466e-04, 1.31621718e-04, 9.70160297e-05, 7.04562783e-05, 5.04046218e-05, 3.55147821e-05, 2.46404880e-05, 1.68307569e-05, 1.13157851e-05, 7.48694566e-06, 4.87388896e-06, 3.12111215e-06, 1.96570164e-06, 1.21733999e-06, 7.41145938e-07, 4.43511188e-07, 1.47913995e-07], [ 9.21833138e-04, 2.02972715e-03, 2.26792759e-03, 2.48924710e-03, 2.68828202e-03, 2.86016852e-03, 3.00075514e-03, 3.10675755e-03, 3.17588458e-03, 3.20692676e-03, 3.19980011e-03, 3.15554126e-03, 3.07625259e-03, 2.96499978e-03, 2.82566704e-03, 2.66277825e-03, 2.48129442e-03, 2.28639940e-03, 2.08328616e-03, 1.87695569e-03, 1.67203910e-03, 1.47265146e-03, 1.28228339e-03, 1.10373323e-03, 9.39079988e-04, 7.89694167e-04, 6.56281665e-04, 5.38953963e-04, 4.37317052e-04, 3.50571223e-04, 2.77614200e-04, 2.17141074e-04, 1.67735711e-04, 1.27949878e-04, 9.63677749e-05, 7.16551869e-05, 5.25936127e-05, 3.81007254e-05, 2.72391342e-05, 1.92157716e-05, 1.33743058e-05, 9.18285657e-06, 6.21901467e-06, 4.15380956e-06, 2.73587799e-06, 1.77670735e-06, 1.13749101e-06, 7.17855181e-07, 4.46505085e-07, 1.53770564e-07], [ 1.16901958e-03, 2.57123904e-03, 2.86920047e-03, 3.14535195e-03, 3.39300616e-03, 3.60617562e-03, 3.77978313e-03, 3.90984848e-03, 3.99363885e-03, 4.02977166e-03, 4.01826203e-03, 3.96051003e-03, 3.85922694e-03, 3.71830354e-03, 3.54262752e-03, 3.33786020e-03, 3.11018562e-03, 2.86604639e-03, 2.61188155e-03, 2.35388078e-03, 2.09776781e-03, 1.84862318e-03, 1.61075328e-03, 1.38760920e-03, 1.18175505e-03, 9.94882455e-04, 8.27865106e-04, 6.80845117e-04, 5.53342092e-04, 4.44375282e-04, 3.52589813e-04, 2.76379013e-04, 2.13996464e-04, 1.63653140e-04, 1.23596898e-04, 9.21732637e-05, 6.78679450e-05, 4.93326593e-05, 3.53966246e-05, 2.50665048e-05, 1.75177046e-05, 1.20797737e-05, 8.21835936e-06, 5.51572541e-06, 3.65138564e-06, 2.38394737e-06, 1.53484965e-06, 9.74342318e-07, 6.09789869e-07, 2.11057434e-07], [ 1.29167899e-03, 2.84708713e-03, 3.18980733e-03, 3.51603080e-03, 3.81940501e-03, 4.09401758e-03, 4.33455093e-03, 4.53642998e-03, 4.69595418e-03, 4.81040657e-03, 4.87813326e-03, 4.89858844e-03, 4.87234169e-03, 4.80104624e-03, 4.68736902e-03, 4.53488515e-03, 4.34794164e-03, 4.13149657e-03, 3.89094109e-03, 3.63191281e-03, 3.36010887e-03, 3.08110718e-03, 2.80020331e-03, 2.52226949e-03, 2.25164048e-03, 1.99202970e-03, 1.74647673e-03, 1.51732586e-03, 1.30623376e-03, 1.11420268e-03, 9.41634913e-04, 7.88403451e-04, 6.53933420e-04, 5.37289163e-04, 4.37262064e-04, 3.52454934e-04, 2.81359597e-04, 2.22425166e-04, 1.74115486e-04, 1.34954997e-04, 1.03563111e-04, 7.86777442e-05, 5.91691442e-05, 4.40454359e-05, 3.24514605e-05, 2.36624992e-05, 1.70743903e-05, 1.21913870e-05, 8.61289935e-06, 3.28301641e-06], [ 1.38975021e-03, 3.05388147e-03, 3.41196792e-03, 3.75586418e-03, 4.08021930e-03, 4.38002322e-03, 4.65070152e-03, 4.88820790e-03, 5.08910961e-03, 5.25066120e-03, 5.37086331e-03, 5.44850321e-03, 5.48317510e-03, 5.47527889e-03, 5.42599698e-03, 5.33724976e-03, 5.21163111e-03, 5.05232625e-03, 4.86301505e-03, 4.64776411e-03, 4.41091185e-03, 4.15695061e-03, 3.89040996e-03, 3.61574527e-03, 3.33723508e-03, 3.05889041e-03, 2.78437835e-03, 2.51696181e-03, 2.25945622e-03, 2.01420338e-03, 1.78306198e-03, 1.56741358e-03, 1.36818226e-03, 1.18586592e-03, 1.02057680e-03, 8.72088629e-04, 7.39888098e-04, 6.23227994e-04, 5.21180074e-04, 4.32685696e-04, 3.56602728e-04, 2.91747592e-04, 2.36931694e-04, 1.90991816e-04, 1.52814395e-04, 1.21353871e-04, 9.56455150e-05, 7.48132906e-05, 5.80734310e-05, 2.38465774e-05], [ 1.17262387e-03, 2.60943924e-03, 2.96153237e-03, 3.30851045e-03, 3.64475920e-03, 3.96476457e-03, 4.26322159e-03, 4.53514933e-03, 4.77600674e-03, 4.98180415e-03, 5.14920535e-03, 5.27561545e-03, 5.35925035e-03, 5.39918426e-03, 5.39537250e-03, 5.34864811e-03, 5.26069168e-03, 5.13397512e-03, 4.97168137e-03, 4.77760292e-03, 4.55602326e-03, 4.31158587e-03, 4.04915610e-03, 3.77368147e-03, 3.49005569e-03, 3.20299167e-03, 2.91690796e-03, 2.63583236e-03, 2.36332540e-03, 2.10242534e-03, 1.85561537e-03, 1.62481230e-03, 1.41137541e-03, 1.21613315e-03, 1.03942478e-03, 8.81153559e-04, 7.40848127e-04, 6.17728441e-04, 5.10773070e-04, 4.18784878e-04, 3.40452671e-04, 2.74406873e-04, 2.19267908e-04, 1.73686495e-04, 1.36375601e-04, 1.06134229e-04, 8.18635783e-05, 6.25763984e-05, 4.74005026e-05, 1.90836763e-05], [ 8.24556184e-04, 1.83763158e-03, 2.08931937e-03, 2.33782881e-03, 2.57906571e-03, 2.80899131e-03, 3.02370408e-03, 3.21952653e-03, 3.39309331e-03, 3.54143638e-03, 3.66206360e-03, 3.75302687e-03, 3.81297659e-03, 3.84119948e-03, 3.83763784e-03, 3.80288855e-03, 3.73818169e-03, 3.64533906e-03, 3.52671394e-03, 3.38511469e-03, 3.22371493e-03, 3.04595417e-03, 2.85543299e-03, 2.65580696e-03, 2.45068367e-03, 2.24352684e-03, 2.03757097e-03, 1.83574943e-03, 1.64063812e-03, 1.45441579e-03, 1.27884152e-03, 1.11524876e-03, 9.64554676e-04, 8.27283012e-04, 7.03597966e-04, 5.93346558e-04, 4.96106649e-04, 4.11237894e-04, 3.37933106e-04, 2.75267810e-04, 2.22246152e-04, 1.77841801e-04, 1.41032921e-04, 1.10830721e-04, 8.63015293e-05, 6.65826278e-05, 5.08923720e-05, 3.85353060e-05, 2.89030828e-05, 1.15455611e-05], [ 7.32521276e-04, 1.58062566e-03, 1.72456794e-03, 1.85424825e-03, 1.96761259e-03, 2.06297576e-03, 2.13905365e-03, 2.19498595e-03, 2.23034790e-03, 2.24514978e-03, 2.23982416e-03, 2.21520098e-03, 2.17247154e-03, 2.11314284e-03, 2.03898398e-03, 1.95196698e-03, 1.85420428e-03, 1.74788524e-03, 1.63521428e-03, 1.51835254e-03, 1.39936521e-03, 1.28017592e-03, 1.16252948e-03, 1.04796352e-03, 9.37789483e-04, 8.33082624e-04, 7.34680586e-04, 6.43189695e-04, 5.58997846e-04, 4.82292734e-04, 4.13084095e-04, 3.51228587e-04, 2.96456022e-04, 2.48395784e-04, 2.06602397e-04, 1.70579417e-04, 1.39801019e-04, 1.13730832e-04, 9.18378043e-05, 7.36090050e-05, 5.85594467e-05, 4.62391216e-05, 3.62375226e-05, 2.81859826e-05, 2.17581928e-05, 1.66692633e-05, 1.26736799e-05, 9.56247777e-06, 7.15991731e-06, 2.85976559e-06]]), 'wind_speeds': [ 4. , 4.42857143, 4.85714286, 5.28571429, 5.71428571, 6.14285714, 6.57142857, 7. , 7.42857143, 7.85714286, 8.28571429, 8.71428571, 9.14285714, 9.57142857, 10. , 10.42857143, 10.85714286, 11.28571429, 11.71428571, 12.14285714, 12.57142857, 13. , 13.42857143, 13.85714286, 14.28571429, 14.71428571, 15.14285714, 15.57142857, 16. , 16.42857143, 16.85714286, 17.28571429, 17.71428571, 18.14285714, 18.57142857, 19. , 19.42857143, 19.85714286, 20.28571429, 20.71428571, 21.14285714, 21.57142857, 22. , 22.42857143, 22.85714286, 23.28571429, 23.71428571, 24.14285714, 24.57142857, 25. ]} def generate_a_valid_wt(D = 200*random()): wt_desc = GenericWindTurbineVT() wt_desc.rotor_diameter = D wt_desc.hub_height = D * (0.5 + random()) return wt_desc def generate_random_GenericWindTurbinePowerCurveVT(D=None): """ Generate a random turbine and power curve using the GenericWindTurbinePowerCurveVT class Parameters ---------- D float, default=random, (optional) The wind turbine rotor diameter Returns ------- wt_desc GenericWindTurbinePowerCurveVT A random wind turbine power curve and c_t curve variable tree """ if not D: D = 200*random() wt_desc = GenericWindTurbinePowerCurveVT() wt_desc.rotor_diameter = D wt_desc.hub_height = D * (0.5 + random()) wt_desc.cut_in_wind_speed = 2. + 4. * random() wt_desc.cut_out_wind_speed = 20. + 10. * random() rho = 1.225 rated_wind_speed = 8. + 4. * random() max_a = 0.333 * random() max_cp = 4 * max_a * (1 - max_a)**2. max_ct = 4 * max_a * (1 - max_a) A = 0.25 * pi * D**2. # Rotor area ideal_power = lambda ws: 0.5 * rho * A * max_cp * ws **3. real_power = lambda ws: ideal_power(ws) if ws < rated_wind_speed else ideal_power(rated_wind_speed) #a_ct = -sqrt(-c_t + 1)/2 + 1/2 ct_from_cp = lambda cp: min(0.89, cp * 2.) cp_from_power = lambda power, ws: power/(0.5 * rho * A * ws**3.) ct_from_power = lambda pws: ct_from_cp(cp_from_power(pws[0], pws[1])) N = 3+int(random() * 100) ws = linspace(wt_desc.cut_in_wind_speed, wt_desc.cut_out_wind_speed, N) wt_desc.power_curve = vstack([ws, map(real_power, ws)]).T wt_desc.c_t_curve = vstack([ws, map(ct_from_power, zip(wt_desc.power_curve[:,1],ws))]).T wt_desc.power_rating = ideal_power(rated_wind_speed) wt_desc.rated_wind_speed = rated_wind_speed wt_desc.air_density = rho wt_desc.test_consistency() return wt_desc def generate_random_WTPC(D=None, **kwargs): wt = generate_random_GenericWindTurbinePowerCurveVT(D) inputs = {k: getattr(wt, k) for k in wt.list_vars()} inputs['position'] = np.random.rand(2) inputs['name'] = 'wt' # Replacing the parameters by the inputs of the function for k, v in kwargs.iteritems(): inputs[k] = v return WTPC(**inputs) def generate_random_wt_positions(D=None, nwt=None, min_D=None): """ Generate a random layout of wind turbines Parameters ---------- D float, default=random, (optional) The wind turbine rotor diameter nwt int, default=random, (optional) The number of turbines in the layout min_D float, default=random, (optional) The minimum of rotor diameter between turbines Returns ------ wt_positions ndarray [nwt, 2] The (x,y) position of the turbines Example ------- >>> generate_random_wt_positions(D=82., nwt=5, min_D=7.) array([[ 0. , 0. ], [ -1.74906289, -30.4628047 ], [ -5.88793386, -7.11251901], [-35.53857721, 32.42632383], [ 15.25619612, 0.79847237], [ 1.20034923, -10.73881476]]) """ if not D: D = 200*random() if not nwt: nwt = int(30*random()) if not min_D: min_D = 3. + 5.*random() Nnorm = lambda x: sqrt(x[0]**2. + x[1]**2.) min_dist = lambda pos_arr, pos: min(map(Nnorm, pos_arr - pos)) def random_path(wt_positions): N = floor(random()*len(wt_positions)) #print N x0, y0 = wt_positions[N,:] count = 0 while min_dist(wt_positions, array([x0,y0])) < min_D and count < 20: x0, y0 = x0 + (0.5-random()) * D, y0 + (0.5-random()) * D count +=1 return vstack([wt_positions, [x0, y0]]) wt_positions = array([[0.,0.]]) for i in range(nwt-1): wt_positions = random_path(wt_positions) return wt_positions def generate_random_GenericWindRoseVT(): """ Generate a random GenericWindRoseVT object Parameters ---------- N/A Returns ------- wind_rose GenericWindRoseVT A wind rose variable tree """ weibull_array = np.array([[ 0.00000000e+00, 3.59673400e-02, 9.22422800e+00, 2.38867200e+00], [ 3.00000000e+01, 3.94977300e-02, 9.86435600e+00, 2.44726600e+00], [ 6.00000000e+01, 5.17838000e-02, 9.65220200e+00, 2.41992200e+00], [ 9.00000000e+01, 6.99794900e-02, 9.98217800e+00, 2.58789100e+00], [ 1.20000000e+02, 8.36383000e-02, 1.00946000e+01, 2.74804700e+00], [ 1.50000000e+02, 6.43412500e-02, 9.64369000e+00, 2.59179700e+00], [ 1.80000000e+02, 8.64220000e-02, 9.63377500e+00, 2.58007800e+00], [ 2.10000000e+02, 1.17690000e-01, 1.05678600e+01, 2.54492200e+00], [ 2.40000000e+02, 1.51555100e-01, 1.14525200e+01, 2.46679700e+00], [ 2.70000000e+02, 1.47361100e-01, 1.17423700e+01, 2.60351600e+00], [ 3.00000000e+02, 1.00109800e-01, 1.16923200e+01, 2.62304700e+00], [ 3.30000000e+02, 5.16542400e-02, 1.01385800e+01, 2.32226600e+00]]) ### -> This is going to be really slow because of interpolation nwd, nws = 4+int(random()*360), 4+int(random()*25) wd = linspace(0., 360., nwd)[:-1].tolist() ws = linspace(3., 30., nws).tolist() #wind_rose_array = array([[wd_, random(), random()*15., random()*4.] for wd_ in wd]) #wind_rose_array *= [1., 1./wind_rose_array[:,1].sum(), 1., 1.] #return WeibullWindRose()(wind_directions= wd, # wind_speeds=ws, # wind_rose_array=wind_rose_array).wind_rose # gwr = GenericWindRoseVT(wind_directions=wd, wind_speeds=ws, weibull_array=weibull_array) return gwr # def generate_random_wt_layout(D=None, nwt=None): # """ # Generate a random GenericWindFarmTurbineLayout of wind turbines # # Parameters # ---------- # D float, default=random, (optional) # The wind turbine rotor diameter # # nwt int, default=random, (optional) # The number of turbines in the layout # # Returns # ------- # wt_layout GenericWindFarmTurbineLayout # A random wind turbine layout # # """ # if D is None: # D = 200*random() # if nwt is None: # nwt = int(30*random()) # wt_layout = GenericWindFarmTurbineLayout() # wt_layout.wt_positions = generate_random_wt_positions(D=D, nwt=nwt) # wt_layout.wt_list = [generate_random_GenericWindTurbinePowerCurveVT(D) for i in range(nwt)] # wt_layout.wt_wind_roses = [generate_random_GenericWindRoseVT() for i in range(nwt)] # wt_layout.wt_names = ['wt_%d'%(i) for i in range(nwt)] # wt_layout.test_consistency() # return wt_layout def generate_random_wt_layout(D=None, nwt=None): """ Parameters ---------- D float, default=random, (optional) The wind turbine rotor diameter nwt int, default=random, (optional) The number of turbines in the layout Returns ------- a random GenericWindFarmTurbineLayout instance """ if not D: D = 200*random() if not nwt: nwt = int(30*random()) wt_positions = generate_random_wt_positions(D=D, nwt=nwt) return GenericWindFarmTurbineLayout([generate_random_WTPC( name='wt%d'%(n), position=wt_positions[n,:], wind_rose=generate_random_GenericWindRoseVT()) for n in range(nwt)])
{ "repo_name": "mrosemeier/fusedwind", "path": "src/fusedwind/plant_flow/generate_fake_vt.py", "copies": "2", "size": "20215", "license": "apache-2.0", "hash": 2903791268329052000, "line_mean": 79.2222222222, "line_max": 898, "alpha_frac": 0.6234479347, "autogenerated": false, "ratio": 2.175059177964278, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.3798507112664278, "avg_score": null, "num_lines": null }
from fuse import FUSE from splitviewfuse import SplitViewFuseBase from splitviewfuse.filehandlecontainers.VirtualFileSegmentFileHandleContainer import VirtualFileSegmentFileHandleContainer from splitviewfuse.SegmentUtils import SegmentUtils from math import ceil import os import sys from splitviewfuse.SplitViewFuseBase import ArgumentParserError from argparse import ArgumentTypeError class SplitViewFuse(SplitViewFuseBase.SplitViewFuseBase): def __init__(self, root, maxSegmentSize, loglevel, logfile): super(SplitViewFuse, self).__init__(root, maxSegmentSize, VirtualFileSegmentFileHandleContainer(maxSegmentSize), loglevel, logfile) def _SplitViewFuseBase__processReadDirEntry(self, absRootPath, entry): dirContent = list() absRootPathEntry = os.path.join(absRootPath, entry) # split large files if not os.path.isdir(absRootPathEntry) and os.path.exists(absRootPathEntry): fileSize = os.path.getsize(absRootPathEntry) if fileSize > self.maxFileSize: numberOfParts = int(ceil(fileSize / float(self.maxFileSize))) for i in range(0, numberOfParts): dirContent.append(SegmentUtils.joinSegmentPath(entry, i)) return dirContent # return not splitted entry dirContent.append(entry) return dirContent def main(): try: args = SplitViewFuseBase.parseArguments(sys.argv, 'Filesystem that splits files into segments of given size. The size is specified in the mount options.') _ = FUSE(SplitViewFuse(args.device, args.mountOptions['segmentsize'], args.mountOptions['loglevel'], args.mountOptions['logfile']), args.dir, **args.mountOptions['other']) #fuse = FUSE(SplitViewFuse(args.device, args.mountOptions['segmentsize']), args.dir, nothreads=True, foreground=True) except ArgumentParserError as e: print('Error during command line parsing: {0}'.format(str(e))) sys.exit(1) except ArgumentTypeError as e: print('Error during command line parsing: {0}'.format(str(e))) sys.exit(1) if __name__ == '__main__': main()
{ "repo_name": "seiferma/splitviewfuse", "path": "splitviewfuse/SplitViewFuse.py", "copies": "1", "size": "2167", "license": "mit", "hash": 4770115239005306000, "line_mean": 42.34, "line_max": 179, "alpha_frac": 0.710198431, "autogenerated": false, "ratio": 4.012962962962963, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5223161393962963, "avg_score": null, "num_lines": null }
from fuse import FUSE from splitviewfuse import SplitViewFuseBase from splitviewfuse.SegmentUtils import SegmentUtils from splitviewfuse.filehandlecontainers.VirtualRegularFileFileHandleContainer import VirtualRegularFileFileHandleContainer import sys from splitviewfuse.SplitViewFuseBase import ArgumentParserError from argparse import ArgumentTypeError class UnionViewFuse(SplitViewFuseBase.SplitViewFuseBase): def __init__(self, root, maxSegmentSize, loglevel, logfile): super(UnionViewFuse, self).__init__(root, maxSegmentSize, VirtualRegularFileFileHandleContainer(maxSegmentSize), loglevel, logfile) def _SplitViewFuseBase__processReadDirEntry(self, absRootPath, entry): dirContent = list() segmentFreeEntry, segmentNumber = SegmentUtils.splitSegmentPath(entry) if segmentNumber is None or segmentNumber is 0: dirContent.append(segmentFreeEntry) return dirContent def main(): try: args = SplitViewFuseBase.parseArguments(sys.argv, 'Filesystem that merges segmented files into complete ones. The size is specified in the mount options.') _ = FUSE(UnionViewFuse(args.device, args.mountOptions['segmentsize'], args.mountOptions['loglevel'], args.mountOptions['logfile']), args.dir, **args.mountOptions['other']) #fuse = FUSE(UnionViewFuse(args.device, args.mountOptions['segmentsize']), args.dir, nothreads=True, foreground=True) except ArgumentParserError as e: print('Error during command line parsing: {0}'.format(str(e))) sys.exit(1) except ArgumentTypeError as e: print('Error during command line parsing: {0}'.format(str(e))) sys.exit(1) if __name__ == '__main__': main()
{ "repo_name": "seiferma/splitviewfuse", "path": "splitviewfuse/UnionViewFuse.py", "copies": "1", "size": "1725", "license": "mit", "hash": 1581236719795439600, "line_mean": 45.6216216216, "line_max": 179, "alpha_frac": 0.7449275362, "autogenerated": false, "ratio": 3.9930555555555554, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5237983091755555, "avg_score": null, "num_lines": null }
from fuse.utils import validators as validator_functions import six import fuse class InvalidInputError(Exception): pass def prompt( text='', description='', validators=None, options=None, default=None, pre_validation_hook=None, post_validation_hook=None): """ Present an informative message to the user and return input from user """ # Prompt confirmation on defaults confirm = fuse.settings['confirm'] # Set pre/post validation hook to return-input lambdas if None if pre_validation_hook is None: pre_validation_hook = lambda v: v if post_validation_hook is None: post_validation_hook = lambda v: v # Context for prompt template context = { 'text': text, 'default': default, 'options': options, 'description': description, 'invalid_input_message': None, } # Replace validator descriptors with actual functions if validators is None: validators = [] validators = [getattr(validator_functions, validator) for validator in validators] # If options is a list with elements, create a validator on the # fly and replace all other potential validators. if options is not None: option_validator = lambda v: v in [identifier for identifier, _ in options] option_validator.__doc__ = "Value must be an integer in [1-{options_length}]".format(options_length=len(options)) validators = [option_validator] else: # Force None and empty string ('') to fail validators.insert(0, validator_functions.not_none) def read_input(): # Return default value if confirm is False and default is not None if confirm is False and default is not None: return default user_message = fuse.render(context, 'prompt.j2', out=None).strip() # Set input function (python 3's input is equal to python 2's raw_input) return (raw_input if six.PY2 else input)(user_message) def map_input(user_input): # Assumption: If a default value exists, it is already mapped if user_input in (default, '', None): return default if options: try: option_index = int(user_input) - 1 if option_index >= 0: user_input = options[option_index][0] else: user_input = None except (TypeError, IndexError, ValueError): user_input = None return user_input def validate_input(user_input): user_input = pre_validation_hook(user_input) for validator in validators: if not validator(user_input): context['invalid_input_message'] = validator.__doc__.strip() confirm = True raise InvalidInputError return post_validation_hook(user_input) while True: try: # Read user_input = read_input() fuse.log.info("got user input `{user_input}`".format(user_input=user_input)) # Map user_input = map_input(user_input) fuse.log.info("user input converted to `{user_input}` (by input map)".format( user_input=user_input, map_input=map_input)) # Validate user_input = validate_input(user_input) fuse.log.info("user input converted to `{user_input}` (by input validation)".format( user_input=user_input)) # Reset confirm to default behavior confirm = fuse.settings['confirm'] return user_input except InvalidInputError: # Make sure user is prompted if a default was invalid confirm = True
{ "repo_name": "aholmback/fuse", "path": "fuse/utils/prompting.py", "copies": "1", "size": "3797", "license": "mit", "hash": -7561556827300764000, "line_mean": 32.6017699115, "line_max": 121, "alpha_frac": 0.6010007901, "autogenerated": false, "ratio": 4.4670588235294115, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.011278378302447925, "num_lines": 113 }
from fusion_classifier import * import sys import os.path hello_message = "Please enter command: 1 for dataset prediction using pre-built classifiers, 2 for k-fold evaluation on training set built online:, exit to close\n" predict_message = "Please enter filename of dataset to predict (enter 0 to go back)\n" k_fold_message = "Please enter filename of dataset to build classifier (enter 0 to go back)\n" secret_message = "Welcome to the secret train mode, you should know what to do\n" def driver(args): while True: i = raw_input(hello_message) if i not in ['1', '2', '3', 'exit']: continue if i == '1': #predict mode while True: fname = raw_input(predict_message) if fname == '0': break if os.path.isfile(fname): fusion_predict(fname) continue if i == '2': #k-fold mode while True: fname = raw_input(k_fold_message) if fname == '0': break if os.path.isfile(fname): perform_k_fold(fname) continue if i == '3': #train mode while True: fname = raw_input(secret_message) if fname == '0': break if os.path.isfile(fname): train(fname) continue if i == 'exit': print "Bye" sys.exit(1) if __name__ == '__main__': driver(sys.argv)
{ "repo_name": "chenzeyu/demographic_prediction", "path": "driver.py", "copies": "1", "size": "1333", "license": "mit", "hash": -2789357019739103700, "line_mean": 26.7916666667, "line_max": 164, "alpha_frac": 0.6001500375, "autogenerated": false, "ratio": 3.136470588235294, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4236620625735294, "avg_score": null, "num_lines": null }
from futclient import FutClient import futfunctions, time transfer_money = True money_left = 0 min_transfer_amount = 10000 accounts = [ ["andreasw@gmail.com",'Bryrmeg111','Trondheim'], ["fut1.bryrmeg@neverbox.com","Bryrmeg111","Trondheim"], ["fut2.bryrmeg@neverbox.com","Bryrmeg111","Trondheim"], ["fut3.bryrmeg@neverbox.com","Bryrmeg111","Trondheim"], ['fut4.bryrmeg@neverbox.com','Bryrmeg111','Trondheim'] ] while True: for acc in accounts: client = FutClient(acc[0],acc[1],acc[2]) print "%s - coins: %s - tradepile: %s"%(acc[0],client.coins(),client.tradepile()[1]) if client.coins() > 0: client.relist_cards() # transfer money if transfer_money and acc[0] != "andreasw@gmail.com" and client.coins()>money_left+min_transfer_amount: client2 = FutClient(accounts[0][0],accounts[0][1],accounts[0][2]) auctions = client2.search_auctions( card_type='player',pos="LB",lev='bronze', leag=350,max_bin=200) if client2.buy_item(auctions[-1].tradeID,auctions[-1].BIN): print "Bought player with main account!" itemid = auctions[-1].itemID bin_price = futfunctions.lower_price(client.coins()-money_left) bid_price = futfunctions.lower_price(bin_price) client2.move_card(itemid) if client2.post_trade(auctions[-1].itemID,bid_price,bin_price): print "Listed player!" time.sleep(4) auctions = client.search_auctions( card_type='player',pos="LB",lev='bronze', leag=350,min_bin=bid_price,max_bin=bin_price,nat=183) for card in auctions: if card.itemID == itemid: if client.buy_item(card.tradeID,card.BIN): print "Bought item from main account!" time.sleep(1) client.discard(card.itemID) print "Sleeping for 1 hr" time.sleep(60*60)
{ "repo_name": "bryrmeg/14", "path": "relist_cards.py", "copies": "1", "size": "2243", "license": "apache-2.0", "hash": 7946267875210976000, "line_mean": 43.86, "line_max": 115, "alpha_frac": 0.532322782, "autogenerated": false, "ratio": 3.521193092621664, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4553515874621664, "avg_score": null, "num_lines": null }
from futclient import FutClient import sqlite3 import datetime, time import futfunctions import pickle import multiprocessing #httplib.HTTPConnection.debuglevel = 1 def watch_auctionhouse(search): print "searching for player: "+str(search) page = 0 count = 50 new = client.search_auctions(card_type="player",lev="gold", count=count,page=page,team=search[0], nat=search[1]) auctions = [] for card in new: #add_tradeid_to_watch(card.tradeID,card.expires) if int(card.resourceID) == int(search[2]): auctions.append(card) last_trade_id = 0 while True and len(new) > (count-1) and new[-1].expires < 3600 \ and new[-1].tradeID != last_trade_id: page += 1 last_trade_id = new[-1].tradeID new = client.search_auctions(card_type="player",lev="gold", count=count,page=page, team=search[0],nat=search[1]) for card in new: if int(card.resourceID) == int(search[2]): auctions.append(card) return auctions def get_trade_info(tradeid): card = client.get_trade_info(tradeid) if card: return card return None def add_player_to_db(card): t = (str(card.tradeID),) # Checking if value already is in the database r = c.execute('SELECT tradeid FROM players WHERE tradeid=?',t) if r.fetchone(): print "found duplicate in add_player_to_db" # Value is in the database. No need to do anything more. return False #card = client.get_trade_info(tradeid) if (not isinstance(card,bool)) \ and isinstance(card.tradeState, unicode) \ and card.tradeState == "closed": try: c.execute( 'insert into players values\ (?, ?, ?, ?, ?, ?, ?,?, ?, ?,?,?,?)', (card.resourceID, card.tradeID, card.name, card.nationality, card.team, card.league, card.rating, datetime.date.today(), datetime.datetime.now(), card.currentBid, card.BIN, card.position, card.playstyle) ) c.commit() print "added: "+str(card.tradeID) except sqlite3.IntegrityError: # This statement should never be printed as we have # checked for existing values earlier print "couldn't add " +str(card.tradeID) +" twice" pass def add_tradeid_to_watch(card): expires = int(card.expires) + int(time.time()) try: c.execute('insert into tradeids values(?, ?)', (card.tradeID, expires)) c.commit() #print "added to db" except sqlite3.IntegrityError: #print "couldn't add " +str(card.tradeID) +" twice" pass if __name__ == "__main__": c = sqlite3.connect("auctions.db") c.text_factory = str start_time = 0 while True: search = [] players_to_watch = pickle.load( open( "players_to_watch.p", "rb" ) ) for key in players_to_watch: search.append([int(players_to_watch[key]['team']),int(players_to_watch[key]['nat']),int(players_to_watch[key]['rid'])]) client = FutClient('fut1.bryrmeg@neverbox.com','Bryrmeg11','Trondheim') if True and (start_time == 0 or int(time.time())-start_time > 3600): start_time = int(time.time()) pool = multiprocessing.Pool(processes=25) auctions = pool.map(watch_auctionhouse,search) pool.close() for auction in auctions: for card in auction: if card.resourceID in players_to_watch: add_tradeid_to_watch(card) # Fetch tradeids that have expired to examine the result of #that auction t = (str(int(time.time())+60),) tradeids = [] for row in c.execute('SELECT * FROM tradeids WHERE expires<=?',t): tradeids.append(int(row[0])) c.execute('DELETE FROM tradeids WHERE expires<=?',t) c.commit() print "number of auction to check:" + str(len(tradeids)) pool = multiprocessing.Pool(processes=20) cards = pool.map(get_trade_info,tradeids) for card in cards: if card: add_player_to_db(card) # Processing data auctions = [] pos = set() chem = set() resid = set() #t = ('2000',) for row in c.execute( "SELECT * FROM players where ds >= date('now', '-4 day')"): pos.add(row[-2]) chem.add(row[-1]) resid.add(row[0]) auctions.append(row) # Building dictionary data = {} auctions_dict = {} for p in pos: data[p] = {} for ch in chem: data[p].update({ch:[]}) for auction in auctions: auctions_dict[auction[0]] = {} for p in pos: auctions_dict[auction[0]][p] = {} for ch in chem: auctions_dict[auction[0]][p][ch] = [] for auction in auctions: auctions_dict[auction[0]][auction[-2]][auction[-1]].\ append(auction[-4]) for key in auctions_dict: for p in pos: for ch in chem: if len(auctions_dict[key][p][ch]) > 30: auctions_dict[key][p][ch] = futfunctions\ .fut_round(sum(auctions_dict[key][p][ch])\ /float( len(auctions_dict[key][p][ch]))) else: auctions_dict[key][p][ch] = None # want to remove players with to wrong BIN min_bin = 2000 max_bin = 4000 print "starting removing players" for key in auctions_dict: for p in pos: for ch in chem: if auctions_dict[key][p][ch] and\ (int(auctions_dict[key][p][ch]) <= min_bin or int(auctions_dict[key][p][ch]) >= max_bin): if key in players_to_watch: del players_to_watch[key] print "Number of players to watch" print len(players_to_watch) pickle.dump(players_to_watch,open('players_to_watch.p','wb')) pickle.dump(auctions_dict, open( "checked_players.p", "wb" )) # sleep and repeat sleep_time = 60 if sleep_time > 0: print "sleeping for: "+str(sleep_time) +" seconds" time.sleep(sleep_time)
{ "repo_name": "bryrmeg/14", "path": "watchauctionhouse.py", "copies": "1", "size": "6739", "license": "apache-2.0", "hash": -7189756795058180000, "line_mean": 33.7371134021, "line_max": 131, "alpha_frac": 0.5215907405, "autogenerated": false, "ratio": 3.826802952867689, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.48483936933676886, "avg_score": null, "num_lines": null }
from .future_base import FutureBase, CancelledError from threading import Lock import functools class FutureBaseExt(FutureBase): """ABC for Future combination functions.""" @classmethod def successful(cls, result=None, *, clb_executor=None): """Returns successfully completed future. Args: result: value to complete future with. clb_executor: default Executor to use for running callbacks (default - Synchronous). """ f = cls._new(clb_executor=clb_executor) f.set_result(result) return f @classmethod def failed(cls, exception, *, clb_executor=None): """Returns failed future. Args: exception: Exception to set to future. clb_executor: default Executor to use for running callbacks (default - Synchronous). """ f = cls._new(clb_executor=clb_executor) f.set_exception(exception) return f @classmethod def completed(cls, fun, *args, clb_executor=None, **kwargs): """Returns successful or failed future set from provided function.""" f = cls._new(clb_executor=clb_executor) try: f.set_result(fun(*args, **kwargs)) except Exception as ex: f.set_exception(ex) return f def complete(self, fun, *args, **kwargs): try: self.set_result(fun(*args, **kwargs)) except Exception as ex: self.set_exception(ex) def recover(self, fun_ex_or_value, *, executor=None): """Returns future that will contain result of original if it completes successfully, or set from result of provided function in case of failure. New future inherits default callback executor from original future. Propagates exceptions from function as well as cancellation. Args: fun_ex_or_value: function that accepts Exception parameter or just value to use in error case. executor: Executor to use when performing call to function. """ f = self._new() def on_done_recover(fut): if fut.cancelled(): f.cancel() if fut.exception() is None: f.set_result(fut.result()) elif callable(fun_ex_or_value): f.complete(fun_ex_or_value, fut.exception()) else: f.set_result(fun_ex_or_value) def backprop_cancel(fut): if fut.cancelled(): self.cancel() self.add_done_callback(on_done_recover, executor=executor) f.add_done_callback(backprop_cancel) return f def map(self, fun_res, *, executor=None): """Returns future which will be set from result of applying provided function to original future value. New future inherits default callback executor from original future. Propagates exceptions from function as well as cancellation. Args: fun_res: function that accepts original result and returns new value. executor: Executor to use when performing call to function (default - Synchronous). """ assert callable(fun_res), "Future.map expects callable" f = self._new() def on_done_map(fut): if fut.cancelled(): f.cancel() elif fut.exception() is None: f.complete(fun_res, fut.result()) else: f.set_exception(fut.exception()) def backprop_cancel(fut): if fut.cancelled(): self.cancel() self.add_done_callback(on_done_map, executor=executor) f.add_done_callback(backprop_cancel) return f def then(self, future_fun, *, executor=None): """Returns future which represents two futures chained one after another. Failures are propagated from first future, from second future and from callback function. Cancellation is propagated both ways. Args: future_fun: either function that returns future to be chained after successful completion of first one, or Future instance directly. executor: Executor to use when performing call to function (default - Synchronous). """ assert callable(future_fun) or isinstance(future_fun, FutureBase), "Future.then expects callable or Future" f = self._new() def on_done_start_next(fut): if fut.cancelled(): f.cancel() elif fut.exception() is None: try: f2_raw = future_fun if isinstance(future_fun, FutureBase) else future_fun() f2 = self.convert(f2_raw) self.compatible([self, f2]) f2.add_done_callback(f.set_from) except Exception as ex: f.set_exception(ex) else: f.set_exception(fut.exception()) def backprop_cancel(fut): if fut.cancelled(): self.cancel() self.add_done_callback(on_done_start_next, executor=executor) f.add_done_callback(backprop_cancel) return f def fallback(self, future_fun, *, executor=None): """Returns future that will contain result of original if it completes successfully, or will be set from future returned from provided function in case of failure. Provided function is called only if original future fails. Cancellation is propagated both ways. Args: future_fun: either function that returns future to be used for fallback, or Future instance directly. executor: Executor to use when performing call to function. """ assert callable(future_fun) or isinstance(future_fun, FutureBase), "Future.fallback expects callable or Future" f = self._new() def on_done_start_fallback(fut): if fut.cancelled(): f.cancel() elif fut.exception() is not None: try: f2_raw = future_fun if isinstance(future_fun, FutureBase) else future_fun() f2 = self.convert(f2_raw) self.compatible([self, f2]) def backprop_cancel_fallback(fut): if fut.cancelled(): f2.cancel() f2.add_done_callback(f.set_from) f.add_done_callback(backprop_cancel_fallback) except Exception as ex: f.set_exception(ex) else: f.set_result(fut.result()) def backprop_cancel_orig(fut): if fut.cancelled(): self.cancel() self.add_done_callback(on_done_start_fallback, executor=executor) f.add_done_callback(backprop_cancel_orig) return f @classmethod def gather(cls, futures, *, return_exceptions=False, clb_executor=None): """Return a future aggregating results from the given futures. If all futures are completed successfully, the returned future’s result is the list of results (in the order of the original sequence, not necessarily in the order of future completion). If return_exceptions is True, exceptions in the tasks are treated the same as successful results, and gathered in the result list; otherwise, the first raised exception will be immediately propagated to the returned future. Cancellation: if the outer Future is cancelled, all children that have not completed yet are also cancelled. If any child is cancelled, this is treated as if it raised CancelledError – the outer Future is not cancelled in this case (this is to prevent the cancellation of one child to cause other children to be cancelled). Args: futures: list of futures to combine. return_exceptions: treat exceptions as successful results clb_executor: default executor to use when running new future's callbacks. """ if not futures: return cls.successful([], clb_executor=clb_executor) futures = list(map(cls.convert, futures)) cls.compatible(futures) f = cls._new(clb_executor=clb_executor) lock = Lock() results = [None] * len(futures) left = len(futures) def done(i, fut): nonlocal left exc = CancelledError() if fut.cancelled() else fut.exception() if exc is not None and not return_exceptions: f.set_exception(exc) else: with lock: results[i] = exc if exc is not None else fut.result() left -= 1 if not left: f.set_result(results) def backprop_cancel(fut): if fut.cancelled(): for fi in futures: fi.cancel() for i, fi in enumerate(futures): fi.add_done_callback(functools.partial(done, i)) f.add_done_callback(backprop_cancel) return f @classmethod def first(cls, futures, *, clb_executor=None): """Returns future which will be set from result of first future to complete, both successfully or with failure. Cancellation is propagated both ways - if aggregate future is cancelled it will cancel all child futures. Args: futures: list of futures to combine. clb_executor: default executor to use when running new future's callbacks. """ if not futures: raise TypeError("Future.first() got empty sequence") futures = list(map(cls.convert, futures)) cls.compatible(futures) f = cls._new(clb_executor=clb_executor) for fi in futures: fi.add_done_callback(f.try_set_from) def backprop_cancel(fut): if fut.cancelled(): for fi in futures: fi.cancel() f.add_done_callback(backprop_cancel) return f @classmethod def first_successful(cls, futures, *, clb_executor=None): """Returns future which will be set from result of first future to complete successfully, last detected error will be set in case when all of the provided future fail. In case of cancelling aggregate future all child futures will be cancelled. Only cancellation of all child future triggers cancellation of aggregate future. Args: futures: list of futures to combine. clb_executor: default executor to use when running new future's callbacks. """ if not futures: raise TypeError("Future.first_successful() got empty sequence") futures = list(map(cls.convert, futures)) cls.compatible(futures) f = cls._new(clb_executor=clb_executor) lock = Lock() left = len(futures) def on_done(fut): nonlocal left if not fut.cancelled() and fut.exception() is None: f.try_set_result(fut.result()) else: with lock: left -= 1 if not left: f.set_from(fut) for fi in futures: fi.add_done_callback(on_done) def backprop_cancel(fut): if fut.cancelled(): for fi in futures: fi.cancel() f.add_done_callback(backprop_cancel) return f @classmethod def reduce(cls, futures, fun, initial, *, executor=None, clb_executor=None): """Returns future which will be set with reduced result of all provided futures. In case of any failure future will be failed with first exception to occur. Cancellation: if the outer Future is cancelled, all children that have not completed yet are also cancelled. If any child is cancelled, this is treated as if it raised CancelledError – the outer Future is not cancelled in this case (this is to prevent the cancellation of one child to cause other children to be cancelled). Args: futures: list of futures to combine. fun: reduce-compatible function. executor: Executor to use when performing call to function. clb_executor: default executor to use when running new future's callbacks. """ return cls \ .gather(futures, clb_executor=clb_executor) \ .map(lambda results: functools.reduce(fun, results, initial), executor=executor) @classmethod def _new(cls, other=None, *, clb_executor=None): executor = clb_executor or (other._executor if other else None) return cls(clb_executor=executor) @classmethod def convert(cls, future): """Performs future type conversion. It either makes sure that passed future is safe to use with current future type, or raises TypeError indicating incompatibility. Override this method in leaf future classes to enable compatibility between different Future implementations. """ if not isinstance(future, cls): raise TypeError("{} is not compatible with {}" .format(_typename(cls), _typename(type(future)))) return future @classmethod def compatible(cls, futures): pass def _typename(cls): return cls.__module__ + '.' + cls.__name__
{ "repo_name": "mikhtonyuk/rxpython", "path": "concurrent/futures/cooperative/future_extensions.py", "copies": "1", "size": "13648", "license": "mit", "hash": 7055560170999626000, "line_mean": 36.2732240437, "line_max": 119, "alpha_frac": 0.5977129453, "autogenerated": false, "ratio": 4.56406825025092, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0011504996193240325, "num_lines": 366 }
from future.builtins import dict from future.builtins import int #!/usr/bin/env python # -*- coding: utf-8 -*- ''' timeparse.py (c) Will Roberts <wildwilhelm@gmail.com> 1 February, 2014 Implements a single function, `timeparse`, which can parse various kinds of time expressions. ''' # MIT LICENSE # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import re #YEARS = r'(?P<years>\d+)\s*(?:ys?|yrs?.?|years?)' #MONTHS = r'(?P<months>\d+)\s*(?:mos?.?|mths?.?|months?)' WEEKS = r'(?P<weeks>[\d.]+)\s*(?:w|wks?|weeks?)' DAYS = r'(?P<days>[\d.]+)\s*(?:d|dys?|days?)' HOURS = r'(?P<hours>[\d.]+)\s*(?:h|hrs?|hours?)' MINS = r'(?P<mins>[\d.]+)\s*(?:m|(mins?)|(minutes?))' SECS = r'(?P<secs>[\d.]+)\s*(?:s|secs?|seconds?)' SEPARATORS = r'[,/]' SECCLOCK = r':(?P<secs>\d{2}(?:\.\d+)?)' MINCLOCK = r'(?P<mins>\d{1,2}):(?P<secs>\d{2}(?:\.\d+)?)' HOURCLOCK = r'(?P<hours>\d+):(?P<mins>\d{2}):(?P<secs>\d{2}(?:\.\d+)?)' DAYCLOCK = (r'(?P<days>\d+):(?P<hours>\d{2}):' r'(?P<mins>\d{2}):(?P<secs>\d{2}(?:\.\d+)?)') OPT = lambda x: r'(?:{x})?'.format(x=x, SEPARATORS=SEPARATORS) OPTSEP = lambda x: r'(?:{x}\s*(?:{SEPARATORS}\s*)?)?'.format( x=x, SEPARATORS=SEPARATORS) TIMEFORMATS = [ r'{WEEKS}\s*{DAYS}\s*{HOURS}\s*{MINS}\s*{SECS}'.format( #YEARS=OPTSEP(YEARS), #MONTHS=OPTSEP(MONTHS), WEEKS=OPTSEP(WEEKS), DAYS=OPTSEP(DAYS), HOURS=OPTSEP(HOURS), MINS=OPTSEP(MINS), SECS=OPT(SECS)), r'{MINCLOCK}'.format( MINCLOCK=MINCLOCK), r'{WEEKS}\s*{DAYS}\s*{HOURCLOCK}'.format( WEEKS=OPTSEP(WEEKS), DAYS=OPTSEP(DAYS), HOURCLOCK=HOURCLOCK), r'{DAYCLOCK}'.format( DAYCLOCK=DAYCLOCK), r'{SECCLOCK}'.format( SECCLOCK=SECCLOCK), ] MULTIPLIERS = dict([ #('years', 60 * 60 * 24 * 365), #('months', 60 * 60 * 24 * 30), ('weeks', 60 * 60 * 24 * 7), ('days', 60 * 60 * 24), ('hours', 60 * 60), ('mins', 60), ('secs', 1) ]) def _interpret_as_minutes(sval, mdict): """ Times like "1:22" are ambiguous; do they represent minutes and seconds or hours and minutes? By default, timeparse assumes the latter. Call this function after parsing out a dictionary to change that assumption. >>> import pprint >>> pprint.pprint(_interpret_as_minutes('1:24', {'secs': '24', 'mins': '1'})) {'hours': '1', 'mins': '24'} """ if ( sval.count(':') == 1 and '.' not in sval and (('hours' not in mdict) or (mdict['hours'] is None)) and (('days' not in mdict) or (mdict['days'] is None)) and (('weeks' not in mdict) or (mdict['weeks'] is None)) ): mdict['hours'] = mdict['mins'] mdict['mins'] = mdict['secs'] mdict.pop('secs') pass return mdict def timeparse(sval, granularity='seconds'): ''' Parse a time expression, returning it as a number of seconds. If possible, the return value will be an `int`; if this is not possible, the return will be a `float`. Returns `None` if a time expression cannot be parsed from the given string. Arguments: - `sval`: the string value to parse >>> timeparse('1:24') 84 >>> timeparse(':22') 22 >>> timeparse('1 minute, 24 secs') 84 >>> timeparse('1m24s') 84 >>> timeparse('1.2 minutes') 72 >>> timeparse('1.2 seconds') 1.2 If granularity is specified as ``minutes``, then ambiguous digits following a colon will be interpreted as minutes; otherwise they are considered seconds. >>> timeparse('1:30') 90 >>> timeparse('1:30', granularity='minutes') 5400 ''' for timefmt in TIMEFORMATS: match = re.match(r'\s*' + timefmt + r'\s*$', sval, re.I) if match and match.group(0).strip(): mdict = match.groupdict() if granularity == 'minutes': mdict = _interpret_as_minutes(sval, mdict) # if all of the fields are integer numbers if all(v.isdigit() for v in list(mdict.values()) if v): return sum([MULTIPLIERS[k] * int(v, 10) for (k, v) in list(mdict.items()) if v is not None]) # if SECS is an integer number elif ('secs' not in mdict or mdict['secs'] is None or mdict['secs'].isdigit()): # we will return an integer return ( int(sum([MULTIPLIERS[k] * float(v) for (k, v) in list(mdict.items()) if k != 'secs' and v is not None])) + (int(mdict['secs'], 10) if mdict['secs'] else 0)) else: # SECS is a float, we will return a float return sum([MULTIPLIERS[k] * float(v) for (k, v) in list(mdict.items()) if v is not None])
{ "repo_name": "fnkr/POSS", "path": "utils/pytimeparse/timeparse.py", "copies": "1", "size": "6010", "license": "mit", "hash": 8641730387871877000, "line_mean": 36.0987654321, "line_max": 86, "alpha_frac": 0.5675540765, "autogenerated": false, "ratio": 3.353794642857143, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9375125808492386, "avg_score": 0.009244582172951448, "num_lines": 162 }
from future.builtins import int from django.contrib.admin.widgets import AdminTextareaWidget from django.forms.widgets import Widget from django.template.loader import render_to_string from django.utils.safestring import mark_safe from django.forms.widgets import flatatt from django.utils.html import escape from fluent_contents.models import get_parent_language_code from fluent_contents.models.managers import get_parent_active_language_choices from fluent_utils.django_compat import smart_text class PlaceholderFieldWidget(Widget): """ The widget to render a :class:`fluent_contents.models.PlaceholderField`. It outputs a ``<div>`` element which operates as placeholder content area. The client-side editor will use that area to display the admin interfaces of the :class:`fluent_contents.models.ContentItem` models. """ class Media: js = ( 'fluent_contents/admin/cp_admin.js', 'fluent_contents/admin/cp_data.js', 'fluent_contents/admin/cp_plugins.js', ) css = { 'screen': ( 'fluent_contents/admin/cp_admin.css', ), } def __init__(self, attrs=None, slot=None, parent_object=None, plugins=None): super(PlaceholderFieldWidget, self).__init__(attrs) self.slot = slot self._plugins = plugins self.parent_object = parent_object def value_from_datadict(self, data, files, name): # This returns the field value from the form POST fields. # Currently returns a dummy value, so the PlaceholderFieldDescriptor() can detect it. return "-DUMMY-" def render(self, name, value, attrs=None): """ Render the placeholder field. """ other_instance_languages = None if value and value != "-DUMMY-": if get_parent_language_code(self.parent_object): # Parent is a multilingual object, provide information # for the copy dialog. other_instance_languages = get_parent_active_language_choices( self.parent_object, exclude_current=True) context = { 'cp_plugin_list': list(self.plugins), 'placeholder_id': '', 'placeholder_slot': self.slot, 'other_instance_languages': other_instance_languages, } return mark_safe(render_to_string('admin/fluent_contents/placeholderfield/widget.html', context)) @property def plugins(self): """ Get the set of plugins that this widget should display. """ from fluent_contents import extensions # Avoid circular reference because __init__.py imports subfolders too if self._plugins is None: return extensions.plugin_pool.get_plugins() else: return extensions.plugin_pool.get_plugins_by_name(*self._plugins) class WysiwygWidget(AdminTextareaWidget): """ WYSIWYG widget """ def __init__(self, attrs=None): defaults = {'rows': 4} if attrs: defaults.update(attrs) super(WysiwygWidget, self).__init__(attrs) def render(self, name, value, attrs=None): value = smart_text(value or u'') final_attrs = self.build_attrs(attrs, name=name) if 'class' in final_attrs: final_attrs['class'] += ' cp-wysiwyg-widget' else: final_attrs['class'] = 'cp-wysiwyg-widget' return mark_safe(u'<textarea{0}>{1}</textarea>'.format(flatatt(final_attrs), escape(value)))
{ "repo_name": "jpotterm/django-fluent-contents", "path": "fluent_contents/forms/widgets.py", "copies": "1", "size": "3564", "license": "apache-2.0", "hash": -8959640338434849000, "line_mean": 35, "line_max": 136, "alpha_frac": 0.6346801347, "autogenerated": false, "ratio": 4.068493150684931, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0018342517714090296, "num_lines": 99 }
from future_builtins import map, filter import os import hashlib import sqlite3 import weakref import datetime from config import config class NotFound(Exception): pass class ForeignKeyNotMapped(Exception): pass class Duplicate(Exception): pass class Contradictory(Exception): pass tables = [] def register(cls): tables.append(cls) return cls def password_hash(tutor_id, password): salt = config['password_salt'] assert len(salt) >= 20, 'Salt is not long enough' return hashlib.sha512('%s|%d|%s' % (salt, tutor_id, password)).hexdigest() def get_conn(): # pragma: no cover conn = sqlite3.connect(config['database']) return conn def _model(nb_keys): class Model(object): @staticmethod def make_id(args): return tuple(args[0:nb_keys]) def __init__(self, *args): id_ = self.make_id(args) if id_ in self._instances: assert self._instances[id_] is self else: self._instances[id_] = self self._attributes = dict(zip(self._fields, args)) def __repr__(self): return '<enseigner.model.%s(%s)>' % (self.__class__.__name__, ', '.join(['%s=%r' % (x, getattr(self, x)) for x in self._fields])) def __getattr__(self, name): if name in self._attributes: return self._attributes[name] else: raise AttributeError('%r has not attribute %r' % (self, name)) @classmethod def _check_exists(cls, cls2, key): try: cls2.get(key) except NotFound: raise ForeignKeyNotMapped() @classmethod def _get_or_create(cls, data): if not data: raise NotFound() id_ = cls.make_id(data) if id_ in cls._instances: return cls._instances[id_] else: instance = cls(*data) cls._instances[id_] = instance return instance @classmethod def _fetch_many(cls, request, args=()): conn = get_conn() c = conn.cursor() try: c.execute(request, args) r = c.fetchall() finally: c.close() return r @classmethod def _get_many(cls, request, args=()): r = cls._fetch_many(request, args) return {cls._instances.get(cls.make_id(x), None) or cls(*x) for x in r} Model.__name__ = 'Model(%d)' % nb_keys return Model class SingleKeyModel(_model(1)): @classmethod def make_id(self, args): return args[0] @classmethod def _insert_one(cls, cols, args): conn = get_conn() c = conn.cursor() try: c.execute('INSERT INTO %s (%s) VALUES (%s)' % (cls._table, cols, ', '.join('?'*len(args))), args) r = c.lastrowid except sqlite3.IntegrityError: raise Duplicate() else: conn.commit() finally: c.close() return cls(r, *args) @classmethod def _insert_many(cls, cols, l): conn = get_conn() c = conn.cursor() r = [] try: for args in l: c.execute('INSERT INTO %s (%s) VALUES (%s);' % \ (cls._table, cols, ', '.join('?'*len(args))), args) r.append((c.lastrowid, args)) except sqlite3.IntegrityError: conn.rollback() raise Duplicate() else: conn.commit() finally: c.close() return list(cls(x, *args) for (x, args) in r) DoubleKeyModel = _model(2) TripleKeyModel = _model(3) @register class Tutor(SingleKeyModel): _table = 'tutors' _create_table = '''CREATE TABLE tutors ( tutor_id INTEGER PRIMARY KEY, tutor_email TEXT UNIQUE, tutor_name TEXT, tutor_password_hash TEXT, tutor_phone_number TEXT, tutor_is_admin BOOLEAN, tutor_is_active BOOLEAN, tutor_comment TEXT )''' _instances = weakref.WeakValueDictionary() _fields = ('uid', 'email', 'name', 'password_hash', 'phone_number', 'is_admin', 'is_active', 'comment') @classmethod def create(cls, email, name, password=None, phone_number=None, is_admin=False, is_active=True, comment=None): t = cls._insert_one('''tutor_email, tutor_name, tutor_password_hash, tutor_phone_number, tutor_is_admin, tutor_is_active, tutor_comment''', (email, name, None, phone_number, is_admin, is_active, comment)) if password: t.password_hash = password_hash(t.uid, password) conn = get_conn() c = conn.cursor() try: c.execute('''UPDATE tutors SET tutor_password_hash=? WHERE tutor_id=?''', (t.password_hash, t.uid)) conn.commit() finally: c.close() return t @classmethod def get(cls, email_or_id): conn = get_conn() c = conn.cursor() try: if isinstance(email_or_id, (str, unicode)): c.execute('''SELECT * FROM tutors WHERE tutor_email=?''', (email_or_id,)) elif isinstance(email_or_id, int): c.execute('''SELECT * FROM tutors WHERE tutor_id=?''', (email_or_id,)) else: raise ValueError('email_or_id should be str or int, not %r' % email_or_id) r = c.fetchone() finally: c.close() return cls._get_or_create(r) @classmethod def all(cls): return cls._get_many('''SELECT * FROM tutors''') @classmethod def all_active(cls): return cls._get_many('''SELECT * FROM tutors WHERE tutor_is_active=1''') @classmethod def check_password(cls, tutor_email, password): conn = get_conn() c = conn.cursor() c.execute('SELECT tutor_id FROM tutors WHERE tutor_email=?', (tutor_email,)) r = c.fetchone() if not r: return None tutor_id = r[0] try: c.execute('''SELECT * FROM tutors WHERE tutor_id=? AND tutor_password_hash=?''', (tutor_id, password_hash(tutor_id, password))) r = c.fetchone() finally: c.close() if r: return cls._instances.get(r[0], None) or cls(*r) else: return None @register class Student(SingleKeyModel): _table = 'students' _create_table = '''CREATE TABLE students ( student_id INTEGER PRIMARY KEY, student_emails TEXT, student_name TEXT UNIQUE, student_phone_number TEXT, student_is_active BOOLEAN, student_blacklisted BOOLEAN, student_comment TEXT )''' _instances = weakref.WeakValueDictionary() _fields = ('uid', 'emails', 'name', 'is_active', 'blacklisted', 'comment') @classmethod def create(cls, emails, name, phone_number=None, is_active=True, blacklisted=False, comment=None): return cls._insert_one('''student_emails, student_name, student_phone_number, student_is_active, student_blacklisted, student_comment''', (emails, name, phone_number, is_active, blacklisted, comment)) @classmethod def get(cls, uid): conn = get_conn() c = conn.cursor() try: if not isinstance(uid, int): raise ValueError('id should be or int, not %r' % uid) c.execute('''SELECT * FROM students WHERE student_id=?''', (uid,)) r = c.fetchone() finally: c.close() return cls._get_or_create(r) @classmethod def all(cls): return cls._get_many('''SELECT * FROM students''') @classmethod def all_active_not_blacklisted(cls): return cls._get_many('''SELECT * FROM students WHERE student_is_active=1 AND student_blacklisted=0''') @register class Session(SingleKeyModel): _table = 'sessions' _create_table = '''CREATE TABLE sessions ( session_id INTEGER PRIMARY KEY, session_date DATETIME, session_managers TEXT, session_form_comment_students TEXT, session_form_comment_tutors TEXT, session_emailed_students BOOLEAN, session_emailed_tutors BOOLEAN, session_is_open BOOLEAN )''' _instances = weakref.WeakValueDictionary() _fields = ('sid', 'date', 'managers', 'session_form_comment_students', 'session_form_comment_tutors', 'emailed_students', 'emailed_tutors', 'is_open') @property def date(self): return datetime.datetime.strptime(self._attributes['date'], '%Y-%m-%d %H:%M:%S') @classmethod def create(cls, date, managers, form_comment_students='', form_comment_tutors='', emailed_students=False, emailed_tutors=False, is_open=True): return cls._insert_one('''session_date, session_managers, session_form_comment_students, session_form_comment_tutors, session_emailed_students, session_emailed_tutors, session_is_open''', (date, managers, form_comment_students, form_comment_tutors, emailed_students, emailed_tutors, is_open)) @classmethod def get(cls, sid): assert isinstance(sid, int) conn = get_conn() c = conn.cursor() try: c.execute('''SELECT * FROM sessions WHERE session_id=?''', (sid,)) r = c.fetchone() finally: c.close() return cls._get_or_create(r) @classmethod def all(cls): return cls._get_many('''SELECT * FROM sessions''') @property def nb_students(self): conn = get_conn() c = conn.cursor() try: c.execute('''SELECT COUNT() FROM student_registrations WHERE session_id=?''', (self.sid,)) r = c.fetchone()[0] finally: c.close() return r @property def nb_tutors(self): conn = get_conn() c = conn.cursor() try: c.execute('''SELECT COUNT() FROM tutor_registrations WHERE session_id=?''', (self.sid,)) r = c.fetchone()[0] finally: c.close() return r def set_emailed_tutors(self): conn = get_conn() conn.execute('''UPDATE sessions SET session_emailed_tutors=1 WHERE session_id=?''', (self.sid,)) conn.commit() self.emailed_tutors = 1 def set_emailed_students(self): conn = get_conn() conn.execute('''UPDATE sessions SET session_emailed_students=1 WHERE session_id=?''', (self.sid,)) conn.commit() self.emailed_students = 1 @register class TutorRegistration(SingleKeyModel): _table = 'tutor_registrations' _create_table = '''CREATE TABLE tutor_registrations ( treg_id INTEGER PRIMARY KEY, session_id INTEGER, treg_tutor_id INTEGER, treg_group_size INTEGER, treg_comment TEXT, FOREIGN KEY (session_id) REFERENCES sessions(session_id), FOREIGN KEY (treg_tutor_id) REFERENCES tutors(tutor_id), UNIQUE (session_id, treg_tutor_id) )''' _instances = weakref.WeakValueDictionary() _fields = ('trid', 'sid', 'uid', 'group_size', 'comment') @classmethod def create(cls, session, tutor, *args): if isinstance(session, Session): session = session.sid else: cls._check_exists(Session, session) if isinstance(tutor, Tutor): tutor = tutor.uid else: cls._check_exists(Tutor, tutor) return cls._insert_one('''session_id, treg_tutor_id, treg_group_size, treg_comment''', (session, tutor) + args) @classmethod def all_in_session(cls, session): if isinstance(session, Session): session = session.sid assert isinstance(session, int), session return cls._get_many('''SELECT * FROM tutor_registrations WHERE session_id=?''', (session,)) @classmethod def get(cls, trid): conn = get_conn() c = conn.cursor() try: c.execute('''SELECT * FROM tutor_registrations WHERE treg_id=?''', (trid,)) r = c.fetchone() finally: c.close() return cls._get_or_create(r) @classmethod def find(cls, session, tutor): if isinstance(session, Session): session = session.sid else: cls._check_exists(Session, session) if isinstance(tutor, Tutor): tutor = tutor.uid else: cls._check_exists(Tutor, tutor) r = cls._get_many('''SELECT * FROM tutor_registrations WHERE session_id=? AND treg_tutor_id=?''', (session, tutor)) r = list(r) if not r: raise NotFound() else: assert len(r) == 1 return r[0] def update(self, group_size, comment): conn = get_conn() conn.execute('''UPDATE tutor_registrations SET treg_group_size=?, treg_comment=? WHERE treg_id=?''', (group_size, comment, self.trid,)) conn.commit() self.group_size = group_size self.comment = comment @register class Subject(SingleKeyModel): _table = 'subjects' _create_table = '''CREATE TABLE subjects ( subject_id INTEGER PRIMARY KEY, subject_name TEXT, subject_is_exceptional BOOLEAN, subject_color TEXT )''' _instances = weakref.WeakValueDictionary() _fields = ('sid', 'name', 'color', 'is_exceptional') @classmethod def create(cls, name, is_exceptional=False, color='#000000'): return cls._insert_one('''subject_name, subject_is_exceptional, subject_color''', (name, is_exceptional, color)) @classmethod def get(cls, sid): conn = get_conn() c = conn.cursor() try: if not isinstance(sid, int): raise ValueError('id should be or int, not %r' % sid) c.execute('''SELECT * FROM subjects WHERE subject_id=?''', (sid,)) r = c.fetchone() finally: c.close() return cls._get_or_create(r) @classmethod def all_permanent(cls): return cls._get_many('''SELECT * FROM subjects WHERE subject_is_exceptional=0''') @classmethod def all(cls): return cls._get_many('''SELECT * FROM subjects''') @register class SessionSubject(SingleKeyModel): _table = 'session_subjects' _create_table = '''CREATE TABLE session_subjects ( ss_id INTEGER PRIMARY KEY, session_id INTEGER, subject_id INTEGER, ss_is_open INTEGER, FOREIGN KEY (session_id) REFERENCES sessions(session_id), FOREIGN KEY (subject_id) REFERENCES subjects(subject_id), UNIQUE (session_id, subject_id) )''' _instances = weakref.WeakValueDictionary() _fields = ('ssid', 'seid', 'suid', 'is_open') @classmethod def create_for_session(cls, session, subjects): if isinstance(session, Session): session = session.sid else: cls._check_exists(Session, session) for subject in subjects: if not isinstance(subject, Subject): cls._check_exists(Subject, subject) subjects = (s.sid if isinstance(s, Subject) else s for s in subjects) l = list(map(lambda x:(session, x, True), subjects)) return cls._insert_many('session_id, subject_id, ss_is_open', l) @classmethod def all_subjects_for_session(cls, session): if isinstance(session, Session): session = session.sid else: cls._check_exists(Session, session) return Subject._get_many('''SELECT * FROM subjects LEFT JOIN session_subjects USING (subject_id) WHERE session_id IS NULL OR session_id=?''', (session,)) @register class TutorRegistrationSubject(SingleKeyModel): _table = 'tutor_registrations_subject' _create_table = '''CREATE TABLE tutor_registrations_subject ( tregs_id INTEGER PRIMARY KEY, tregs_treg_id INTEGER, tregs_subject_id INTEGER, tregs_preference INTEGER, FOREIGN KEY (tregs_treg_id) REFERENCES tutor_registrations(treg_id), FOREIGN KEY (tregs_subject_id) REFERENCES subjects(subject_id), UNIQUE (tregs_treg_id, tregs_subject_id) )''' _instances = weakref.WeakValueDictionary() _fields = ('id', 'trid', 'sid', 'preference') @classmethod def create(cls, treg, subject, pref): if isinstance(treg, TutorRegistration): treg = treg.trid else: cls._check_exists(TutorRegistration, treg) if isinstance(subject, Subject): subject = subject.sid else: cls._check_exists(Subject, subject) return cls._insert_one('''tregs_treg_id, tregs_subject_id, tregs_preference''', (treg, subject, pref)) @classmethod def all_of_treg(cls, treg): if isinstance(treg, TutorRegistration): treg = treg.trid assert isinstance(treg, int), treg return cls._get_many('''SELECT * FROM tutor_registrations_subject WHERE tregs_treg_id=?''', (treg,)) @classmethod def set_for_treg(cls, treg, l): if isinstance(treg, TutorRegistration): treg = treg.trid else: cls._check_exists(TutorRegistration, treg) assert isinstance(treg, int), treg assert hasattr(l, '__iter__'), l conn = get_conn() c = conn.cursor() try: c.execute('''DELETE FROM tutor_registrations_subject WHERE tregs_treg_id=?''', (treg,)) for (subject, preference) in l: if isinstance(subject, Subject): subject = subject.sid else: cls._check_exists(Subject, subject) c.execute('''INSERT INTO tutor_registrations_subject (tregs_treg_id, tregs_subject_id, tregs_preference) VALUES (?, ?, ?);''', (treg, subject, preference)) except sqlite3.IntegrityError: conn.rollback() raise Duplicate() else: conn.commit() finally: c.close() @register class StudentRegistration(SingleKeyModel): _table = 'student_registrations' _create_table = '''CREATE TABLE student_registrations ( sreg_id INTEGER PRIMARY KEY, session_id INTEGER, student_id INTEGER, subject_id INTEGER, sreg_friends INTEGER, sreg_comment TEXT, FOREIGN KEY (session_id) REFERENCES sessions(session_id), FOREIGN KEY (student_id) REFERENCES students(student_id), FOREIGN KEY (subject_id) REFERENCES subjects(subject_id), UNIQUE (session_id, student_id) )''' _fields = ('srid', 'seid', 'stid', 'suid', 'friends', 'comment') @classmethod def create(cls, session, student, subject, *args): if isinstance(session, Session): session = session.sid else: cls._check_exists(Session, session) if isinstance(student, Student): student = student.uid else: cls._check_exists(Student, student) if isinstance(subject, Subject): subject = subject.sid else: cls._check_exists(Subject, subject) assert isinstance(session, int), session assert isinstance(student, int), student assert isinstance(subject, int), subject return cls._insert_one('''session_id, student_id, subject_id, sreg_friends, sreg_comment''', (session, student, subject) + args) @classmethod def get(cls, sid): conn = get_conn() c = conn.cursor() try: if not isinstance(sid, int): raise ValueError('id should be or int, not %r' % sid) c.execute('''SELECT * FROM student_registrations WHERE sreg_id=?''', (sid,)) r = c.fetchone() finally: c.close() return cls._get_or_create(r) @classmethod def all_in_session(cls, session): if isinstance(session, Session): session = session.sid assert isinstance(session, int), session return cls._get_many('''SELECT * FROM student_registrations WHERE session_id=?''', (session,)) @classmethod def find(cls, session, student): if isinstance(session, Session): session = session.sid else: assert isinstance(session, int), session cls._check_exists(Session, session) if isinstance(student, Student): student = student.uid else: assert isinstance(student, int), student cls._check_exists(Student, student) r = cls._get_many('''SELECT * FROM student_registrations WHERE session_id=? AND student_id=?''', (session, student)) r = list(r) if not r: raise NotFound() else: assert len(r) == 1, r return r[0] def update(self, subject, friends, comment): if isinstance(subject, Subject): subject = subject.sid else: cls._check_exists(Subject, subject) assert isinstance(friends, int), friends assert isinstance(comment, str), comment assert isinstance(subject, int), subject conn = get_conn() conn.execute('''UPDATE student_registrations SET subject_id=?, sreg_friends=?, sreg_comment=? WHERE sreg_id=?''', (subject, friends, comment, self.srid,)) conn.commit() self.subject = subject self.friends = friends self.comment = comment @register class Mail(SingleKeyModel): _table = 'mails' _create_table = '''CREATE TABLE mails ( mail_id INTEGER PRIMARY KEY, mail_recipient TEXT, mail_subject TEXT, mail_content TEXT, mail_sent BOOLEAN )''' _instances = weakref.WeakValueDictionary() _fields = ('mid', 'recipient', 'subject', 'content', 'sent') @classmethod def create(cls, recipient, content, sent=False): return cls._insert_one('''mail_recipient, mail_subject, mail_content, mail_sent''', (recipient, content, sent)) @classmethod def create_many(cls, rows): rows = [(x[0], x[1], x[2], x[3] if len(x) > 3 else False) for x in rows] return cls._insert_many('''mail_recipient, mail_subject, mail_content, mail_sent''', rows) @classmethod def get(cls, mid): conn = get_conn() c = conn.cursor() try: c.execute('''SELECT * FROM mails WHERE mail_id=?''', (mid,)) r = c.fetchone() finally: c.close() return cls._get_or_create(r) @classmethod def all_unsent(cls): return cls._get_many('''SELECT * FROM mails WHERE mail_sent=0''') def set_sent(self): conn = get_conn() conn.execute('''UPDATE mails SET mail_sent=1 WHERE mail_id=?''', (self.mid,)) conn.commit() self.sent = True
{ "repo_name": "ProgVal/site-enseigner", "path": "enseigner/model.py", "copies": "1", "size": "25426", "license": "mit", "hash": 9072340917064165000, "line_mean": 32.5435356201, "line_max": 113, "alpha_frac": 0.5201368678, "autogenerated": false, "ratio": 4.2482873851294904, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0023332698058555453, "num_lines": 758 }
from future.builtins import ( # noqa bytes, dict, int, list, object, range, ascii, chr, hex, input, next, oct, open, pow, round, super, filter, map, zip) import base64 import copy from Crypto import Random from Crypto.Cipher import PKCS1_v1_5 from Crypto.Hash import SHA from Crypto.PublicKey import RSA from encryption.store.inmemory import InMemoryProvider DEFAULT_STORE = InMemoryProvider() DEFAULT_PROFILE = 'default' DEFAULT_CHUNK_SIZE = 256 def _encrypt_value(key, value, chunk_size=DEFAULT_CHUNK_SIZE): def _encrypt_chunk(key, value): return base64.urlsafe_b64encode(key.encrypt(value)) if value is not None: value_enc = value + SHA.new(value).digest() value = None while len(value_enc) > 0: if value is not None: value += '\n' else: value = '' if len(value_enc) > chunk_size: chunk = value_enc[:chunk_size] value = value + _encrypt_chunk(key, chunk) value_enc = value_enc[chunk_size:] else: value = value + _encrypt_chunk(key, value_enc) value_enc = '' return value return None def _decrypt_value(key, enc_value): dsize = SHA.digest_size sentinel = Random.new().read(15 + dsize) split_val = enc_value.splitlines() decrypted_val = '' for line in split_val: line_decrypted = key.decrypt( base64.urlsafe_b64decode(line.encode('utf-8')), sentinel) decrypted_val = decrypted_val + line_decrypted clear_val = decrypted_val[:-dsize] return clear_val def _create_enc_key(profile, store=None): store = store or DEFAULT_STORE key_data = store.load(profile) if key_data: return PKCS1_v1_5.new(RSA.importKey(key_data)) else: return None def _create_dec_key(profile, store=None, passphrase=None): store = store or DEFAULT_STORE key_data = store.load(profile, public=False) if key_data: return PKCS1_v1_5.new(RSA.importKey(key_data, passphrase=passphrase)) else: return None def encrypt(value, profile=DEFAULT_PROFILE, store=DEFAULT_STORE, chunk_size=DEFAULT_CHUNK_SIZE, key=None): """ Encrypts the given value using given profile and key store. :param value: String value that needs to be encrypted :keyword profile: Profile(Key Id) to be used for encryption :type profile: str :keyword store: Store to be used for encryption :type store: encryption.store.base.AbstractProvider :keyword key: PKCS1_v1_5 key :type key: Crypto.Cipher.PKCS1_v1_5.PKCS115_Cipher :return: Base64 Encrypted Encrypted value. :rtype: str or dict """ key = key or _create_enc_key(profile, store) if key: return _encrypt_value(key, value, chunk_size=chunk_size) else: # No encryption support return value def decrypt(value, profile=DEFAULT_PROFILE, store=DEFAULT_STORE, passphrase=None, key=None): """ Decrypts the given value using given profile and key store. Optionally the store can be protected by passphrase :param value: Base64 encrypted string or dictionary containg encrypted str. :type value: str or dict :keyword profile: Profile(Key Id) to be used for decryption :type profile: str :param store: Store to be used for decryption :type store: encryption.store.base.AbstractProvider :param passphrase: Passphrase for the private key. If None, key is un-protected :type passphrase: str :return: Decrypted String :rtype: str """ key = key or _create_dec_key(profile, store, passphrase=passphrase) if key: return _decrypt_value(key, value) else: # No decryption support return value def encrypt_obj(value, profile=DEFAULT_PROFILE, store=DEFAULT_STORE, chunk_size=DEFAULT_CHUNK_SIZE, key=None): """ Encrypts the given value using given profile and key store. :param value: value that needs to be encrypted :keyword profile: Profile(Key Id) to be used for encryption :type profile: str :keyword store: Store to be used for encryption :type store: encryption.store.base.AbstractProvider :keyword key: PKCS1_v1_5 key :type key: Crypto.Cipher.PKCS1_v1_5.PKCS115_Cipher :return: base64 encrypted values """ def recurse(val): return encrypt_obj(val, profile=profile, store=store, chunk_size=chunk_size, key=key) if hasattr(value, 'items'): if 'value' in value: if value.get('encrypted', False): return value else: copied_val = copy.deepcopy(value) copied_val['encrypted'] = True copied_val['value'] = \ encrypt(value['value'], profile=profile, store=store, chunk_size=chunk_size, key=key) return copied_val else: return { k: recurse(v) for k, v in value.items() } elif isinstance(value, (list, set, tuple)): return [recurse(v) for v in value] else: return { 'value': encrypt(str(value).encode('utf-8'), profile=profile, store=store, chunk_size=chunk_size, key=key), 'encrypted': True } def decrypt_obj(value, profile=DEFAULT_PROFILE, store=DEFAULT_STORE, passphrase=None, key=None): """ Decrypts the given value using given profile and key store. Optionally the store can be protected by passphrase :param value: Base64 encrypted string or dictionary containg encrypted str. :type value: str or dict :keyword profile: Profile(Key Id) to be used for decryption :type profile: str :param store: Store to be used for decryption :type store: encryption.store.base.AbstractProvider :param passphrase: Passphrase for the private key. If None, key is un-protected :type passphrase: str :return: Decrypted object """ def recurse(val): return decrypt_obj(val, profile=profile, store=store, passphrase=passphrase, key=key) if hasattr(value, 'items'): if 'value' in value: if value.get('encrypted', False) and \ isinstance(value['value'], (basestring,)): return decrypt(value['value'], profile=profile, store=store, passphrase=passphrase, key=key) else: return value['value'] else: return { k: recurse(v) for k, v in value.items() } elif isinstance(value, (list, set, tuple)): return [recurse(v) for v in value] else: return copy.deepcopy(value)
{ "repo_name": "totem/totem-encrypt", "path": "encryption/security.py", "copies": "1", "size": "6927", "license": "mit", "hash": -2991469735678229000, "line_mean": 32.9558823529, "line_max": 79, "alpha_frac": 0.6133968529, "autogenerated": false, "ratio": 4.001733102253033, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 204 }
from future.builtins import ( # noqa bytes, dict, int, list, object, range, str, ascii, chr, hex, input, next, oct, open, pow, round, filter, map, zip) import re __author__ = 'sukrit' """ Defines all the filters used for jinja templates (config) """ USE_TESTS = ('starting_with', 'matching', ) def apply_conditions(env): """ Applies filters on jinja env. :param env: Jinja environment :return: """ for name in USE_TESTS: env.tests[name] = globals()[name] return env def starting_with(value, prefix): """ Filter to check if value starts with prefix :param value: Input source :type value: str :param prefix: :return: True if matches. False otherwise :rtype: bool """ return str(value).startswith(str(prefix)) def matching(value, pattern, casesensitive=True): """ Filter that performs a regex match :param value: Input source :type value: str :param pattern: Regex Pattern to be matched :return: True if matches. False otherwise :rtype: bool """ flags = re.I if not casesensitive else 0 return re.match(str(pattern), str(value), flags) is not None
{ "repo_name": "totem/cluster-orchestrator", "path": "orchestrator/jinja/conditions.py", "copies": "1", "size": "1183", "license": "mit", "hash": 3910350358223700500, "line_mean": 21.75, "line_max": 64, "alpha_frac": 0.6424344886, "autogenerated": false, "ratio": 3.7555555555555555, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4897990044155555, "avg_score": null, "num_lines": null }
from future.builtins import object import psutil import os import time import signal import shlex import gevent try: import subprocess32 as subprocess except: import subprocess from .context import log class Process(object): """ The parent class of Worker, Agent and Supervisor """ exitcode = 0 def install_signal_handlers(self): """ Handle events like Ctrl-C from the command line. """ self.graceful_stop = False def request_shutdown_now(): self.shutdown_now() def request_shutdown_graceful(): # Second time CTRL-C, shutdown now if self.graceful_stop: self.shutdown_now() else: self.graceful_stop = True self.shutdown_graceful() # First time CTRL-C, try to shutdown gracefully gevent.signal_handler(signal.SIGINT, request_shutdown_graceful) # User (or Heroku) requests a stop now, just mark tasks as interrupted. gevent.signal_handler(signal.SIGTERM, request_shutdown_now) class ProcessPool(object): """ Manages a pool of processes """ def __init__(self, watch_interval=1, extra_env=None): self.processes = [] self.desired_commands = [] self.greenlet_watch = None self.watch_interval = watch_interval self.stopping = False self.extra_env = extra_env def set_commands(self, commands, timeout=None): """ Sets the processes' desired commands for this pool and manages diff to reach that state """ self.desired_commands = commands target_commands = list(self.desired_commands) for process in list(self.processes): found = False for i in range(len(target_commands)): if process["command"] == target_commands[i]: target_commands.pop(i) found = True break if not found: self.stop_process(process, timeout) # What is left are the commands to add # TODO: we should only do this once memory conditions allow for command in target_commands: self.spawn(command) def spawn(self, command): """ Spawns a new process and adds it to the pool """ # process_name # output # time before starting (wait for port?) # start_new_session=True : avoid sending parent signals to child env = dict(os.environ) env["MRQ_IS_SUBPROCESS"] = "1" env.update(self.extra_env or {}) # Extract env variables from shell commands. parts = shlex.split(command) for p in list(parts): if "=" in p: env[p.split("=")[0]] = p[len(p.split("=")[0]) + 1:] parts.pop(0) else: break p = subprocess.Popen(parts, shell=False, close_fds=True, env=env, cwd=os.getcwd()) self.processes.append({ "subprocess": p, "pid": p.pid, "command": command, "psutil": psutil.Process(pid=p.pid) }) def start(self): self.greenlet_watch = gevent.spawn(self.watch) self.greenlet_watch.start() def wait(self): """ Waits for the pool to be fully stopped """ while True: if not self.greenlet_watch: break if self.stopping: gevent.sleep(0.1) else: gevent.sleep(1) def watch(self): while True: self.watch_processes() gevent.sleep(self.watch_interval) def watch_processes(self): """ Manages the status of all the known processes """ for process in list(self.processes): self.watch_process(process) # Cleanup processes self.processes = [p for p in self.processes if not p.get("dead")] if self.stopping and len(self.processes) == 0: self.stop_watch() def watch_process(self, process): """ Manages the status of a single process """ status = process["psutil"].status() # TODO: how to avoid zombies? # print process["pid"], status if process.get("terminate"): if status in ("zombie", "dead"): process["dead"] = True elif process.get("terminate_at"): if time.time() > (process["terminate_at"] + 5): log.warning("Process %s had to be sent SIGKILL" % (process["pid"], )) process["subprocess"].send_signal(signal.SIGKILL) elif time.time() > process["terminate_at"]: log.warning("Process %s had to be sent SIGTERM" % (process["pid"], )) process["subprocess"].send_signal(signal.SIGTERM) else: if status in ("zombie", "dead"): # Restart a new process right away (TODO: sleep a bit? max retries?) process["dead"] = True self.spawn(process["command"]) elif status not in ("running", "sleeping"): log.warning("Process %s was in status %s" % (process["pid"], status)) # process["subprocess"].returncode in (0, 2, 3) def stop(self, timeout=None): """ Initiates a graceful stop of the processes """ self.stopping = True for process in list(self.processes): self.stop_process(process, timeout=timeout) def stop_process(self, process, timeout=None): """ Initiates a graceful stop of one process """ process["terminate"] = True if timeout is not None: process["terminate_at"] = time.time() + timeout process["subprocess"].send_signal(signal.SIGINT) def terminate(self): """ Terminates the processes right now with a SIGTERM """ for process in list(self.processes): process["subprocess"].send_signal(signal.SIGTERM) self.stop_watch() def kill(self): """ Kills the processes right now with a SIGKILL """ for process in list(self.processes): process["subprocess"].send_signal(signal.SIGKILL) self.stop_watch() def stop_watch(self): """ Stops the periodic watch greenlet, thus the pool itself """ if self.greenlet_watch: self.greenlet_watch.kill(block=False) self.greenlet_watch = None
{ "repo_name": "pricingassistant/mrq", "path": "mrq/processes.py", "copies": "1", "size": "6460", "license": "mit", "hash": 6388811664770033000, "line_mean": 29.9090909091, "line_max": 103, "alpha_frac": 0.5679566563, "autogenerated": false, "ratio": 4.315297261189045, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5383253917489045, "avg_score": null, "num_lines": null }
from future.builtins import range from .utils import memoize from . import context def redis_key(name, *args): prefix = context.get_current_config()["redis_prefix"] if name == "known_subqueues": return "%s:ksq:%s" % (prefix, args[0].root_id) elif name == "queue": return "%s:q:%s" % (prefix, args[0].id) elif name == "started_jobs": return "%s:s:started" % prefix elif name == "paused_queues": return "%s:s:paused" % prefix elif name == "notify": return "%s:notify:%s" % (prefix, args[0].root_id) @memoize def redis_zaddbyscore(): """ Increments multiple keys in a sorted set & returns them """ return context.connections.redis.register_script(""" local zset = KEYS[1] local min = ARGV[1] local max = ARGV[2] local offset = ARGV[3] local count = ARGV[4] local score = ARGV[5] local data = redis.call('zrangebyscore', zset, min, max, 'LIMIT', offset, count) for i, member in pairs(data) do redis.call('zadd', zset, score, member) end return data """) @memoize def redis_zpopbyscore(): """ Pops multiple keys by score """ return context.connections.redis.register_script(""" local zset = KEYS[1] local min = ARGV[1] local max = ARGV[2] local offset = ARGV[3] local count = ARGV[4] local data = redis.call('zrangebyscore', zset, min, max, 'LIMIT', offset, count) if #data > 0 then redis.call('zremrangebyrank', zset, 0, #data - 1) end return data """) @memoize def redis_lpopsafe(): """ Safe version of LPOP that also adds the key in a "started" zset """ return context.connections.redis.register_script(""" local key = KEYS[1] local zset_started = KEYS[2] local count = ARGV[1] local now = ARGV[2] local left = ARGV[3] local data = {} local current = nil for i=1, count do if left == '1' then current = redis.call('lpop', key) else current = redis.call('rpop', key) end if current == false then return data end data[i] = current redis.call('zadd', zset_started, now, current) end return data """) def redis_group_command(command, cnt, redis_key): with context.connections.redis.pipeline(transaction=False) as pipe: for _ in range(cnt): getattr(pipe, command)(redis_key) return [x for x in pipe.execute() if x]
{ "repo_name": "pricingassistant/mrq", "path": "mrq/redishelpers.py", "copies": "1", "size": "2251", "license": "mit", "hash": 3281085889448571400, "line_mean": 22.6947368421, "line_max": 80, "alpha_frac": 0.655708574, "autogenerated": false, "ratio": 3.0542740841248306, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.42099826581248306, "avg_score": null, "num_lines": null }
from future.builtins import range import schema from featureforge import generate from featureforge.feature import make_feature EQ = 'EQ' APPROX = 'APPROX' IN = 'IN' RAISES = 'RAISES' EPSILON = 0.01 def _raise_predicate(spec, data, exception): try: spec(data) except exception: return True return False _PREDICATES = { EQ: lambda f, d, v: f(d) == v, APPROX: lambda f, d, v: abs(f(d) - v) < EPSILON, IN: lambda f, d, v: f(d) in v, RAISES: _raise_predicate } _EXPLAIN_PREDICATE_FAIL = { EQ: 'is not equal', APPROX: 'is not approx', IN: 'is not in', RAISES: 'not raised' } class FeatureFixtureCheckMixin(object): """ This class is a TestCase mixin that provides some assertions to test features. In most cases, you shouldn't use this directly but BaseFeatureFixture instead """ def assert_feature_passes_fixture(self, feature_spec, fixture): """ Check that the given feature (function or Feature instance) passes all the conditions given in the fixture `fixture` is a dictionary where each key/value pair describes a simple example for the feature. The key should be a string (which will be reported in case of failure, so you know which case failed), and the value is a tuple (input, predicate, value). The `input` is the value that will be passed as argument as a feature. The predicate and the value give the condition, and should be one of the following: * (input, EQ, value) checks that feature(input) == value * (input, APPROX, value) checks that feature(input) == value approximately the error allowed is given by the constant EPSILON in this module * (input, IN, values) checks that feature(input) in values * (input, RAISES, eclass) checks that feature(input) raises an exception of eclass type. Note that input/output validation always raise an exception that subclasses ValueError """ failures = [] feature_spec = make_feature(feature_spec) for label, (data_point, predicate, value) in fixture.items(): if not _PREDICATES[predicate](feature_spec, data_point, value): msg = '%s failed, %s %s %s' % ( label, feature_spec(data_point), _EXPLAIN_PREDICATE_FAIL[predicate], value) failures.append(msg) self.assertFalse(failures, msg='; '.join(failures)) def assert_passes_fuzz(self, feature_spec, tries=1000): """ Generates tries data points for the feature (which should have an input schema which allows generation) randomly, and applies those to the feature. It checks that the evaluation proceeds without raising exceptions and that it produces valid outputs according to the output schema. """ feature_spec = make_feature(feature_spec) for i in range(tries): data_point = generate.generate(feature_spec.input_schema) try: feature = feature_spec(data_point) except Exception as e: self.fail("Error evaluating; input=%r error=%r" % (data_point, e)) try: feature_spec.output_schema.validate(feature) except schema.SchemaError: self.fail("Invalid output schema; input=%r output=%r" % (data_point, feature)) class BaseFeatureFixture(FeatureFixtureCheckMixin): """ Inheriting this class together with unittest.TestCase allows you to quickly build test cases for features. Your subclass should define two class attributes: `feature` should be a function or a Feature() instance `fixture` has a list of cases to test; check the documentation of `assert_feature_passes_fixture` for more details. The class defined by this will validate all features in the fixture. It will also subject the feature to fuzzy testing if the input schema allows it. It's also possible to add additional tests to the testcase. If you want to have more control about how the fixture is applied or skip fuzzy testing, take a look at the FeatureFixtureCheckMixin. """ feature = None # Needs to be defined on subclasses def test_fixtures(self): self.assert_feature_passes_fixture(self.feature, self.fixtures) def test_fuzz(self): self.assert_passes_fuzz(self.feature) ### EXAMPLE ### if __name__ == "__main__": from featureforge.feature import input_schema, output_schema import unittest @input_schema(str) @output_schema(int, lambda n: n >= 0) def length(data_point): return len(data_point) # This is an example on how to use assertions directly class TestLength(unittest.TestCase, FeatureFixtureCheckMixin): def test_f(self): fixture = dict( test_eq=('hello', EQ, 5), test_approx=('world!', APPROX, 6.00001), test_in=('hello', IN, (5, 6, 1)), test_raise=(None, RAISES, ValueError), ) self.assert_feature_passes_fixture(length, fixture) def test_fuzz(self): self.assert_passes_fuzz(length) class TestLength2(unittest.TestCase, BaseFeatureFixture): feature = length fixtures = dict( test_eq=('hello', EQ, 5), test_approx=('world!', APPROX, 6.00001), test_in=('hello', IN, (5, 6, 1)), test_raise=(None, RAISES, ValueError), ) unittest.main()
{ "repo_name": "machinalis/featureforge", "path": "featureforge/validate.py", "copies": "2", "size": "5729", "license": "bsd-3-clause", "hash": -8647725017365993, "line_mean": 33.5120481928, "line_max": 78, "alpha_frac": 0.6220980974, "autogenerated": false, "ratio": 4.184806428049671, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5806904525449671, "avg_score": null, "num_lines": null }
from future.builtins import str from django.template import Library, Node from django.template.base import TemplateSyntaxError from fluent_contents.admin.contentitems import BaseContentItemInline from tag_parser import template_tag, parse_as_var, parse_token_kwargs register = Library() @register.filter def only_content_item_inlines(inlines): return [i for i in inlines if isinstance(i, BaseContentItemInline)] @register.filter def only_content_item_formsets(formsets): return [f for f in formsets if isinstance(f.opts, BaseContentItemInline)] @register.filter def has_no_visible_fields(inline_admin_form): # fieldset = admin Fieldset object. for name, options in inline_admin_form.fieldsets: for name_slot in options.get('fields', ()): # Lines can include (field, field) if not isinstance(name_slot, (list, tuple)): name_slot = [name_slot] for name in name_slot: if not inline_admin_form.form.fields[name].widget.is_hidden: return False return True @register.filter def group_plugins_into_categories(plugins): """ Return all plugins, grouped by category. The structure is a {"Categorynane": [list of plugin classes]} """ if not plugins: return {} plugins = sorted(plugins, key=lambda p: p.verbose_name) categories = {} for plugin in plugins: title = str(plugin.category or u"") # enforce resolving ugettext_lazy proxies. if title not in categories: categories[title] = [] categories[title].append(plugin) return categories @register.filter def plugin_categories_to_list(plugin_categories): if not plugin_categories: return [] categories_list = list(plugin_categories.items()) categories_list.sort(key=lambda item: item[0]) # sort category names return categories_list @register.filter def plugin_categories_to_choices(categories): """ Return a tuple of plugin model choices, suitable for a select field. Each tuple is a ("TypeName", "Title") value. """ choices = [] for category, items in categories.items(): if items: plugin_tuples = tuple((plugin.type_name, plugin.verbose_name) for plugin in items) if category: choices.append((category, plugin_tuples)) else: choices += plugin_tuples choices.sort(key=lambda item: item[0]) return choices @template_tag(register, 'getfirstof') class GetFirstOfNode(Node): def __init__(self, filters, var_name): self.filters = filters # list of FilterExpression nodes. self.var_name = var_name def render(self, context): value = None for filterexpr in self.filters: # The ignore_failures argument is the most important, otherwise # the value is converted to the TEMPLATE_STRING_IF_INVALID which happens with the with block. value = filterexpr.resolve(context, ignore_failures=True) if value is not None: break context[self.var_name] = value return '' @classmethod def parse(cls, parser, token): """ Parse the node: {% getfirstof val1 val2 as val3 %} parser: a Parser class. token: a Token class. """ bits, var_name = parse_as_var(parser, token) tag_name, choices, _ = parse_token_kwargs(parser, bits, allowed_kwargs=()) if var_name is None: raise TemplateSyntaxError("Expected syntax: {{% {0} val1 val2 as val %}}".format(tag_name)) return cls(choices, var_name)
{ "repo_name": "jpotterm/django-fluent-contents", "path": "fluent_contents/templatetags/placeholder_admin_tags.py", "copies": "2", "size": "3669", "license": "apache-2.0", "hash": -7787792319328036000, "line_mean": 31.1842105263, "line_max": 105, "alpha_frac": 0.6462251295, "autogenerated": false, "ratio": 4.085746102449889, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0009807984317148654, "num_lines": 114 }
from future.builtins import str from future.utils import python_2_unicode_compatible from django.core.exceptions import ValidationError from django.db import models from django.utils.translation import ugettext_lazy as _ from micawber import ProviderException from fluent_contents.models.db import ContentItem from fluent_contents.plugins.oembeditem.fields import OEmbedUrlField from fluent_contents.plugins.oembeditem import backend @python_2_unicode_compatible class AbstractOEmbedItem(ContentItem): """ The base class for an OEmbedItem, This allows to create custom models easily. .. versionadded:: 1.0 """ TYPE_PHOTO = 'photo' TYPE_VIDEO = 'video' TYPE_RICH = 'rich' # HTML TYPE_LINK = 'link' # Fetch parameters embed_url = OEmbedUrlField(_("URL to embed")) embed_max_width = models.PositiveIntegerField(_("Max width"), blank=True, null=True) embed_max_height = models.PositiveIntegerField(_("Max height"), blank=True, null=True) # The cached response: type = models.CharField(editable=False, max_length=20, null=True, blank=True) url = models.URLField(editable=False, null=True, blank=True) title = models.CharField(editable=False, max_length=512, null=True, blank=True) description = models.TextField(editable=False, null=True, blank=True) author_name = models.CharField(editable=False, max_length=255, null=True, blank=True) author_url = models.URLField(editable=False, null=True, blank=True) provider_name = models.CharField(editable=False, max_length=255, null=True, blank=True) provider_url = models.URLField(editable=False, null=True, blank=True) thumbnail_url = models.URLField(editable=False, null=True, blank=True) thumbnail_height = models.IntegerField(editable=False, null=True, blank=True) thumbnail_width = models.IntegerField(editable=False, null=True, blank=True) height = models.IntegerField(editable=False, null=True, blank=True) width = models.IntegerField(editable=False, null=True, blank=True) html = models.TextField(editable=False, null=True, blank=True) class Meta: abstract = True verbose_name = _("Online media") verbose_name_plural = _("Online media") def __str__(self): return self.title or self.embed_url def __init__(self, *args, **kwargs): super(AbstractOEmbedItem, self).__init__(*args, **kwargs) self._old_embed_url = self.embed_url self._old_embed_max_width = self.embed_max_width self._old_embed_max_height = self.embed_max_height def save(self, *args, **kwargs): self.update_oembed_data() # if clean() did not run, still update the oembed super(AbstractOEmbedItem, self).save(*args, **kwargs) def clean(self): # Avoid getting server errors when the URL is not valid. try: self.update_oembed_data() except ProviderException as e: raise ValidationError(str(e)) def update_oembed_data(self, force=False, **backend_params): """ Update the OEmbeddata if needed. .. versionadded:: 1.0 Added force and backend_params parameters. """ if force or self._input_changed(): # Fetch new embed code params = self.get_oembed_params(self.embed_url) params.update(backend_params) response = backend.get_oembed_data(self.embed_url, **params) # Save it self.store_response(response) # Track field changes self._old_embed_url = self.embed_url self._old_embed_max_width = self.embed_max_width self._old_embed_max_height = self.embed_max_height def get_oembed_params(self, embed_url): """ .. versionadded:: 1.0 Allow to define the parameters that are passed to the backend to fetch the current URL. """ return { 'max_width': self.embed_max_width, 'max_height': self.embed_max_height, } def _input_changed(self): return not self.type \ or self._old_embed_url != self.embed_url \ or self._old_embed_max_width != self.embed_max_width \ or self._old_embed_max_height != self.embed_max_height def store_response(self, response): # Store the OEmbed response # Unused: cache_age # Security considerations: URLs are checked by Django for http:// or ftp:// KEYS = ( 'type', 'title', 'description', 'author_name', 'author_url', 'provider_url', 'provider_name', 'thumbnail_width', 'thumbnail_height', 'thumbnail_url', 'height', 'width', 'html', 'url' ) for key in KEYS: if key in response: setattr(self, key, response[key]) class OEmbedItem(AbstractOEmbedItem): """ Embedded media via OEmbed """ class Meta: verbose_name = _("Online media") verbose_name_plural = _("Online media")
{ "repo_name": "jpotterm/django-fluent-contents", "path": "fluent_contents/plugins/oembeditem/models.py", "copies": "2", "size": "5011", "license": "apache-2.0", "hash": 7428879500956266000, "line_mean": 34.2887323944, "line_max": 105, "alpha_frac": 0.6465775294, "autogenerated": false, "ratio": 3.8546153846153848, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5501192914015385, "avg_score": null, "num_lines": null }
from future.builtins import str from future.utils import python_2_unicode_compatible from django.db import models from django.utils.translation import ugettext_lazy as _ from fluent_contents.extensions import PluginImageField, PluginUrlField from fluent_contents.models.db import ContentItem from . import appsettings @python_2_unicode_compatible class PictureItem(ContentItem): """ Display a picture """ ALIGN_LEFT = 'left' ALIGN_CENTER = 'center' ALIGN_RIGHT = 'right' ALIGN_CHOICES = ( (ALIGN_LEFT, _("Left")), (ALIGN_CENTER, _("Center")), (ALIGN_RIGHT, _("Right")), ) image = PluginImageField(_("Image"), upload_to=appsettings.FLUENT_PICTURE_UPLOAD_TO) caption = models.TextField(_("Caption"), blank=True) align = models.CharField(_("Align"), max_length=10, choices=ALIGN_CHOICES, blank=True) url = PluginUrlField(_("URL"), blank=True) in_new_window = models.BooleanField(_("Open in a new window"), default=False, blank=True) class Meta: verbose_name = _("Picture") verbose_name_plural = _("Pictures") def __str__(self): return self.caption or str(self.image) @property def align_class(self): """ The CSS class for alignment. """ if self.align == self.ALIGN_LEFT: return 'align-left' elif self.align == self.ALIGN_CENTER: return 'align-center' elif self.align == self.ALIGN_RIGHT: return 'align-right' else: return ''
{ "repo_name": "ixc/django-fluent-contents", "path": "fluent_contents/plugins/picture/models.py", "copies": "2", "size": "1548", "license": "apache-2.0", "hash": -886735987077330000, "line_mean": 30.5918367347, "line_max": 93, "alpha_frac": 0.6324289406, "autogenerated": false, "ratio": 3.909090909090909, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.554151984969091, "avg_score": null, "num_lines": null }
from future.builtins import str from mrq.queue import Queue from mrq.task import Task from mrq.job import Job from mrq.context import log, connections, run_task, get_current_config import datetime import time class RequeueInterruptedJobs(Task): """ Requeue jobs that were marked as status=interrupt when a worker got a SIGTERM. """ max_concurrency = 1 def run(self, params): return run_task("mrq.basetasks.utils.JobAction", { "status": "interrupt", "action": "requeue_retry" }) class RequeueRetryJobs(Task): """ Requeue jobs that were marked as retry. """ max_concurrency = 1 def run(self, params): print("IN") return run_task("mrq.basetasks.utils.JobAction", { "status": "retry", "dateretry": {"$lte": datetime.datetime.utcnow()}, "action": "requeue_retry" }) class RequeueStartedJobs(Task): """ Requeue jobs that were marked as status=started and never finished. That may be because the worker got a SIGKILL or was terminated abruptly. The timeout parameter of this task is in addition to the task's own timeout. """ max_concurrency = 1 def run(self, params): additional_timeout = params.get("timeout", 300) stats = { "requeued": 0, "started": 0 } # There shouldn't be that much "started" jobs so we can quite safely # iterate over them. fields = { "_id": 1, "datestarted": 1, "queue": 1, "path": 1, "retry_count": 1, "worker": 1, "status": 1 } for job_data in connections.mongodb_jobs.mrq_jobs.find( {"status": "started"}, projection=fields): job = Job(job_data["_id"]) job.set_data(job_data) stats["started"] += 1 expire_date = datetime.datetime.utcnow( ) - datetime.timedelta(seconds=job.timeout + additional_timeout) requeue = job_data["datestarted"] < expire_date if not requeue: # Check that the supposedly running worker still exists requeue = not connections.mongodb_jobs.mrq_workers.find_one( {"_id": job_data["worker"]}, projection={"_id": 1}) if requeue: log.debug("Requeueing job %s" % job.id) job.requeue() stats["requeued"] += 1 return stats
{ "repo_name": "pricingassistant/mrq", "path": "mrq/basetasks/cleaning.py", "copies": "1", "size": "2460", "license": "mit", "hash": -7576118595735490000, "line_mean": 27.9411764706, "line_max": 105, "alpha_frac": 0.5792682927, "autogenerated": false, "ratio": 4.026186579378069, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5105454872078069, "avg_score": null, "num_lines": null }
from future.builtins import str, object from future.utils import with_metaclass import inspect from sira.modelling.utils import class_getter class NoDefaultException(Exception): """ Thrown when a :py:class:`_Base` is created without providing values for one or more :py:class:`Element`s which do not have default values. Note that users should never be instantiating or subclassing :py:class:`_Base` directly. One should extend a class returned by :py:func:`generate_element_base`, which returns a class which extends :py:class:`_Base`. """ pass class ValidationError(Exception): """ Thrown when validation of some item fails. Some examples of when this may occur are: - A value for an :py:class:`Element` is provided which is not an an instance of the type specified for the Element (which is specified via argument *cls* to :py:meth:`Element.__init__`). - One of the validators provided for an element (see the agument *validators* to :py:class:`Element.__init__`) fails or raises an exception of this type. """ pass # class AlreadySavedException(Exception): # """ # Raised if an attempt is made to save a 'Document' which has previously been # saved. # """ # pass class DisallowedElementException(ValueError): """ Raised if an an attempt is made to define an element with a disallowed name. Disallowed names are specified by :py:attr:StructuralMeta.DISALLOWED_FIELDS. """ pass class MultipleBasesOfTypeBaseError(ValueError): """ Raised if an attempt is made to define a class which inherits from multiple classes (``c``) for which ``issubclass(type(c), StructuralMeta)`` is *True*. The reason to dissalow multiple inheritance of said classes is to conform to the structure of XML, where an element can only have one parent. This may not turn out to be an issue as other interpretations of class hierachies in the context of XML may be sensible/feasible... but the semantics and practicalities would need to be considered so stop this for now and see how we go. """ pass class Info(str): """ Strings that provide 'metadata' on classes. At present, this is only used to identify immutable strings on a class when they are displayed. """ pass class Element(object): """ Represents an element of a model. If a model were represented in a relational database, this would be analogous to a field in a table. """ @staticmethod def NO_DEFAULT(): """ A callable that can be used to signal that an Element has no default value. Simply raises a :py:exception:`NoDefaultException`. """ raise NoDefaultException() def __init__(self, cls, description, default=None, validators=None): self.cls = cls self.description = Info(description) self._default = default self.validators = validators @property def default(self): if self._default is False: raise NoDefaultException() return self._default() if callable(self._default) else self._default class StructuralMeta(type): """ Metaclass for structural """ #: Names of :py:class:`Element`s that cannot defined on any class ``c`` for #: which ``issubclass(type(c), StructuralMeta)`` is *True*. These are names #: of elements which are used internally and for the sake of the performance #: of attribute lookup, are banned for other use. DISALLOWED_FIELDS = [ 'class', 'predecessor', '_predecessor', '_id', '_value', '_attributes'] def __new__(mcs, name, bases, dct): # check that only one base is instance of _Base if len([base for base in bases if issubclass(type(base), StructuralMeta)]) > 1: raise MultipleBasesOfTypeBaseError('Invalid bases in class {}'.format(name)) def extract_params_of_type(clazz): # extract the parameters params = {} for k in list(dct.keys()): if isinstance(dct[k], clazz): params[k] = dct.pop(k) # cannot have a parameter with name class, as this messes with # serialisation for field in StructuralMeta.DISALLOWED_FIELDS: if field in params: raise DisallowedElementException( 'class {} cannot have Element with name "{}"'.format(name, field)) return params dct['__params__'] = extract_params_of_type(Element) # create a json description of the class json_desc = {} for k, v in list(dct['__params__'].items()): # TODO: put validators in here json_desc[k] = {'class': v.cls} for k, v in list(extract_params_of_type(Info).items()): json_desc[k] = { 'class': 'Info', 'value': str(v)} dct['__json_desc__'] = json_desc return super(StructuralMeta, mcs).__new__(mcs, name, bases, dct) def __init__(cls, name, bases, dct): # We do this here as I prefer to get the module from the class. Not sure # if it matters in practice, but it feels better. cls_module contains # the module in which this class is defined and we know that the types # declared for the Elements of a class are accessible in that module. cls_module = inspect.getmodule(cls).__name__ cls.__json_desc__['class'] = '.'.join([cls_module, name]) for param in list(cls.__params__.values()): param.cls_module = cls_module for k, v in list(cls.__json_desc__.items()): if k == 'class': continue try: ecls = class_getter([cls_module, v['class']]) if hasattr(ecls, '__json_desc__'): cls.__json_desc__[k] = ecls.__json_desc__ else: v['class'] = '.'.join([ecls.__module__, ecls.__name__]) if isinstance(ecls, Element): try: default = v.default except NoDefaultException: pass else: # default = jsonify(default) if default: cls.__json_desc__[k]['default'] = default except: v['class'] = '.'.join(['__builtin__', v['class']]) super(StructuralMeta, cls).__init__(name, bases, dct) class Base(metaclass=StructuralMeta): """ Base class for all 'model' classes. **This should never be used by clients** and serves as a base class for dynamically generated classes returned by :py:func:``, which are designed for use by clients. """ def __init__(self, **kwargs): self._predecessor = kwargs.pop('predecessor', None) if self._predecessor is None: # then we provide default values for each element for k, v in list(self.__params__.items()): if k not in kwargs: try: kwargs[k] = v.default except NoDefaultException: raise ValueError('Must provide value for {}'.format(k)) for k, v in list(kwargs.items()): setattr(self, k, v)
{ "repo_name": "GeoscienceAustralia/sifra", "path": "sira/modelling/structural.py", "copies": "1", "size": "7502", "license": "apache-2.0", "hash": -3591056139664210400, "line_mean": 34.3867924528, "line_max": 90, "alpha_frac": 0.5881098374, "autogenerated": false, "ratio": 4.418138987043581, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5506248824443581, "avg_score": null, "num_lines": null }
from future_builtins import zip from operator import itemgetter from collections import defaultdict from heapdict import heapdict def identity(value): """ Identity mapping of single argument. """ return value def make_graph(source, edge_factory=identity): """ Make graph out of list of edges. :param source: list of tuples in form of (from, to, *payload) :param edge_factory: factory for edge attributes :returns: adjacency list Example: >>> make_graph([(1, 2, 10), (2, 3, 15), (1, 3, 30)]) defaultdict(<type 'list'>, {1: [(2, 10), (3, 30)], 2: [(3, 15)]}) >>> from collections import namedtuple >>> EdgeAttrs = namedtuple('EdgeAttrs', 'time distance mode') >>> make_graph([(1, 2, 10, 0.4, 'WALK'), (2, 3, 30, 15, 'BUS'), (1, 3, 15, 30, 'TRAIN')], edge_factory=EdgeAttrs) defaultdict(<type 'list'>, {1: [(2, EdgeAttrs(time=10, distance=0.4, mode='WALK')), (3, EdgeAttrs(time=15, distance=30, mode='TRAIN'))], 2: [(3, EdgeAttrs(time=30, distance=15, mode='BUS'))]}) """ graph = defaultdict(list) for edge in source: left, right = edge[:2] payload = edge_factory(*edge[2:]) graph[left].append((right, payload)) return graph def reverse_graph(source): """ Reverse direction of graph. :param source: adjacency list of graph Example: >>> reverse_graph(make_graph([(1, 2, 10), (2, 3, 15), (1, 3, 30)])) defaultdict(<type 'list'>, {2: [(1, 10)], 3: [(1, 30), (2, 15)]}) """ reversed = defaultdict(list) for left, edges in source.items(): for right, payload in edges: reversed[right].append((left, payload)) return reversed def dijkstra_kernel(graph, start, previous, cost_fn): """ Generator for dijkstra search. Each iteration yields visited node id and cost to reach it from start node. :param graph: adjacency list of graph :param start: start node id :param previous: dictionary for storing settled nodes :param cost_fn: cost function applied for each edge, must be stateless """ queue = heapdict({start: 0.0}) previous.update({start: (None, 0.0, None)}) while queue: left, cost = queue.popitem() yield left, cost for right, payload in graph.get(left, []): alt_cost = cost + cost_fn(payload) # if there was no cost associated or cost is lower if right not in previous or alt_cost < previous[right][1]: queue[right] = alt_cost previous[right] = (left, alt_cost, payload) def backtrack(previous, start): """ Collect edges visited by dijkstra kernel. :param previous: dictionary of settled nodes :param start: node id to start backtracking from :returns: reversed list of edges Example: >>> list(backtrack({3: (2, 25, 15), 2: (1, 10, 10), 1: (None, 0.0, None)}, 3)) [(2, 3, 15), (1, 2, 10)] """ left = start while True: right, _, payload = previous[left] if right is None: break yield right, left, payload left = right def just_ids(source): """ Convert dijkstra result to contain only node ids. :param source: (cost, edges) tuple returned by dijkstra search Example: >>> just_ids((25.0, [(1, 2, 10.0), (2, 3, 15.0)])) (25.0, [1, 2, 3]) """ cost, edges = source if not len(edges): return (cost, []) result = [left for left, right, _ in edges] result.append(right) return (cost, result) def dijkstra(graph, start, end, cost_fn=identity): """ Dijkstra search on directed graph. :param graph: adjacency list of graph :param start: node to start search at :param end: final node :param cost_fn: cost function applied on edges Example: >>> graph = make_graph([(1, 2, 10.0), (2, 3, 15.0), (1, 3, 30.0)]) >>> dijkstra(graph, 1, 3) (25.0, [(1, 2, 10.0), (2, 3, 15.0)]) """ previous = {} # keep visiting nodes till search is finished for id, cost in dijkstra_kernel(graph, start, previous, cost_fn): if id == end: break # if end node was reached if id == end: edges = list(backtrack(previous, id)) edges.reverse() return (cost, edges) def bidirect_dijkstra(graph, start, end, bwd_graph = None, cost_fn=identity): """ Bidirectional dijkstra search on directed graph. Search from both start and end of the graph reducing search space substantially. :param graph: adjacency list of graph :param start: node to start search at :param end: final node :param bwd_graph: reversed graph for backward search :param cost_fn: cost function applied on edges Example: >>> graph = make_graph([(1, 2, 10.0), (2, 3, 15.0), (1, 3, 30.0)]) >>> bidirect_dijkstra(graph, 1, 3) (25.0, [(1, 2, 10.0), (2, 3, 15.0)]) """ fwd_previous = {} bwd_previous = {} fwd_kernel = dijkstra_kernel(graph, start, fwd_previous, cost_fn) bwd_kernel = dijkstra_kernel(reverse_graph(graph), end, bwd_previous, cost_fn) intersection = None cost = float('inf') # helper for finding intersection candidate def check_intersects(id, previous): if id in previous: alt_cost = fwd_previous[id][1] + bwd_previous[id][1] if alt_cost < cost: return (id, alt_cost) return (None, float('inf')) for fwd, bwd in zip(fwd_kernel, bwd_kernel): # check if there is better intersection intersection, cost = min((intersection, cost), check_intersects(fwd[0], bwd_previous), check_intersects(bwd[0], fwd_previous), key=itemgetter(1)) # stop searching if current path is not improved if cost < fwd[1] + bwd[1]: break # if shortest path was found if intersection is not None: edges = list(backtrack(fwd_previous, intersection)) edges.reverse() edges.extend((right, left, payload) for left, right, payload in backtrack(bwd_previous, intersection)) return (cost, edges)
{ "repo_name": "shaxbee/py-computer-science", "path": "dijkstra.py", "copies": "1", "size": "6294", "license": "mit", "hash": 22632005985939016, "line_mean": 31.112244898, "line_max": 200, "alpha_frac": 0.5861137591, "autogenerated": false, "ratio": 3.648695652173913, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4734809411273913, "avg_score": null, "num_lines": null }
from future_builtins import zip from django.core.exceptions import FieldError from django.db import transaction from django.db.backends.util import truncate_name from django.db.models.query_utils import select_related_descend from django.db.models.sql.constants import * from django.db.models.sql.datastructures import EmptyResultSet from django.db.models.sql.expressions import SQLEvaluator from django.db.models.sql.query import get_order_dir, Query from django.db.utils import DatabaseError class SQLCompiler(object): def __init__(self, query, connection, using): self.query = query self.connection = connection self.using = using self.quote_cache = {} def pre_sql_setup(self): """ Does any necessary class setup immediately prior to producing SQL. This is for things that can't necessarily be done in __init__ because we might not have all the pieces in place at that time. # TODO: after the query has been executed, the altered state should be # cleaned. We are not using a clone() of the query here. """ if not self.query.tables: self.query.join((None, self.query.model._meta.db_table, None, None)) if (not self.query.select and self.query.default_cols and not self.query.included_inherited_models): self.query.setup_inherited_models() if self.query.select_related and not self.query.related_select_cols: self.fill_related_selections() def quote_name_unless_alias(self, name): """ A wrapper around connection.ops.quote_name that doesn't quote aliases for table names. This avoids problems with some SQL dialects that treat quoted strings specially (e.g. PostgreSQL). """ if name in self.quote_cache: return self.quote_cache[name] if ((name in self.query.alias_map and name not in self.query.table_map) or name in self.query.extra_select): self.quote_cache[name] = name return name r = self.connection.ops.quote_name(name) self.quote_cache[name] = r return r def as_sql(self, with_limits=True, with_col_aliases=False): """ Creates the SQL for this query. Returns the SQL string and list of parameters. If 'with_limits' is False, any limit/offset information is not included in the query. """ if with_limits and self.query.low_mark == self.query.high_mark: return '', () self.pre_sql_setup() # After executing the query, we must get rid of any joins the query # setup created. So, take note of alias counts before the query ran. # However we do not want to get rid of stuff done in pre_sql_setup(), # as the pre_sql_setup will modify query state in a way that forbids # another run of it. self.refcounts_before = self.query.alias_refcount.copy() out_cols = self.get_columns(with_col_aliases) ordering, ordering_group_by = self.get_ordering() distinct_fields = self.get_distinct() # This must come after 'select', 'ordering' and 'distinct' -- see # docstring of get_from_clause() for details. from_, f_params = self.get_from_clause() qn = self.quote_name_unless_alias where, w_params = self.query.where.as_sql(qn=qn, connection=self.connection) having, h_params = self.query.having.as_sql(qn=qn, connection=self.connection) params = [] for val in self.query.extra_select.itervalues(): params.extend(val[1]) result = ['SELECT'] if self.query.distinct: result.append(self.connection.ops.distinct_sql(distinct_fields)) result.append(', '.join(out_cols + self.query.ordering_aliases)) result.append('FROM') result.extend(from_) params.extend(f_params) if where: result.append('WHERE %s' % where) params.extend(w_params) grouping, gb_params = self.get_grouping() if grouping: if distinct_fields: raise NotImplementedError( "annotate() + distinct(fields) not implemented.") if ordering: # If the backend can't group by PK (i.e., any database # other than MySQL), then any fields mentioned in the # ordering clause needs to be in the group by clause. if not self.connection.features.allows_group_by_pk: for col, col_params in ordering_group_by: if col not in grouping: grouping.append(str(col)) gb_params.extend(col_params) else: ordering = self.connection.ops.force_no_ordering() result.append('GROUP BY %s' % ', '.join(grouping)) params.extend(gb_params) if having: result.append('HAVING %s' % having) params.extend(h_params) if ordering: result.append('ORDER BY %s' % ', '.join(ordering)) if with_limits: if self.query.high_mark is not None: result.append('LIMIT %d' % (self.query.high_mark - self.query.low_mark)) if self.query.low_mark: if self.query.high_mark is None: val = self.connection.ops.no_limit_value() if val: result.append('LIMIT %d' % val) result.append('OFFSET %d' % self.query.low_mark) if self.query.select_for_update and self.connection.features.has_select_for_update: # If we've been asked for a NOWAIT query but the backend does not support it, # raise a DatabaseError otherwise we could get an unexpected deadlock. nowait = self.query.select_for_update_nowait if nowait and not self.connection.features.has_select_for_update_nowait: raise DatabaseError('NOWAIT is not supported on this database backend.') result.append(self.connection.ops.for_update_sql(nowait=nowait)) # Finally do cleanup - get rid of the joins we created above. self.query.reset_refcounts(self.refcounts_before) return ' '.join(result), tuple(params) def as_nested_sql(self): """ Perform the same functionality as the as_sql() method, returning an SQL string and parameters. However, the alias prefixes are bumped beforehand (in a copy -- the current query isn't changed), and any ordering is removed if the query is unsliced. Used when nesting this query inside another. """ obj = self.query.clone() if obj.low_mark == 0 and obj.high_mark is None: # If there is no slicing in use, then we can safely drop all ordering obj.clear_ordering(True) obj.bump_prefix() return obj.get_compiler(connection=self.connection).as_sql() def get_columns(self, with_aliases=False): """ Returns the list of columns to use in the select statement. If no columns have been specified, returns all columns relating to fields in the model. If 'with_aliases' is true, any column names that are duplicated (without the table names) are given unique aliases. This is needed in some cases to avoid ambiguity with nested queries. """ qn = self.quote_name_unless_alias qn2 = self.connection.ops.quote_name result = ['(%s) AS %s' % (col[0], qn2(alias)) for alias, col in self.query.extra_select.iteritems()] aliases = set(self.query.extra_select.keys()) if with_aliases: col_aliases = aliases.copy() else: col_aliases = set() if self.query.select: only_load = self.deferred_to_columns() for col in self.query.select: if isinstance(col, (list, tuple)): alias, column = col table = self.query.alias_map[alias].table_name if table in only_load and column not in only_load[table]: continue r = '%s.%s' % (qn(alias), qn(column)) if with_aliases: if col[1] in col_aliases: c_alias = 'Col%d' % len(col_aliases) result.append('%s AS %s' % (r, c_alias)) aliases.add(c_alias) col_aliases.add(c_alias) else: result.append('%s AS %s' % (r, qn2(col[1]))) aliases.add(r) col_aliases.add(col[1]) else: result.append(r) aliases.add(r) col_aliases.add(col[1]) else: result.append(col.as_sql(qn, self.connection)) if hasattr(col, 'alias'): aliases.add(col.alias) col_aliases.add(col.alias) elif self.query.default_cols: cols, new_aliases = self.get_default_columns(with_aliases, col_aliases) result.extend(cols) aliases.update(new_aliases) max_name_length = self.connection.ops.max_name_length() result.extend([ '%s%s' % ( aggregate.as_sql(qn, self.connection), alias is not None and ' AS %s' % qn(truncate_name(alias, max_name_length)) or '' ) for alias, aggregate in self.query.aggregate_select.items() ]) for table, col in self.query.related_select_cols: r = '%s.%s' % (qn(table), qn(col)) if with_aliases and col in col_aliases: c_alias = 'Col%d' % len(col_aliases) result.append('%s AS %s' % (r, c_alias)) aliases.add(c_alias) col_aliases.add(c_alias) else: result.append(r) aliases.add(r) col_aliases.add(col) self._select_aliases = aliases return result def get_default_columns(self, with_aliases=False, col_aliases=None, start_alias=None, opts=None, as_pairs=False, local_only=False): """ Computes the default columns for selecting every field in the base model. Will sometimes be called to pull in related models (e.g. via select_related), in which case "opts" and "start_alias" will be given to provide a starting point for the traversal. Returns a list of strings, quoted appropriately for use in SQL directly, as well as a set of aliases used in the select statement (if 'as_pairs' is True, returns a list of (alias, col_name) pairs instead of strings as the first component and None as the second component). """ result = [] if opts is None: opts = self.query.model._meta # Skip all proxy to the root proxied model opts = opts.concrete_model._meta qn = self.quote_name_unless_alias qn2 = self.connection.ops.quote_name aliases = set() only_load = self.deferred_to_columns() if start_alias: seen = {None: start_alias} for field, model in opts.get_fields_with_model(): if local_only and model is not None: continue if start_alias: try: alias = seen[model] except KeyError: link_field = opts.get_ancestor_link(model) alias = self.query.join((start_alias, model._meta.db_table, link_field.column, model._meta.pk.column)) seen[model] = alias else: # If we're starting from the base model of the queryset, the # aliases will have already been set up in pre_sql_setup(), so # we can save time here. alias = self.query.included_inherited_models[model] table = self.query.alias_map[alias].table_name if table in only_load and field.column not in only_load[table]: continue if as_pairs: result.append((alias, field.column)) aliases.add(alias) continue if with_aliases and field.column in col_aliases: c_alias = 'Col%d' % len(col_aliases) result.append('%s.%s AS %s' % (qn(alias), qn2(field.column), c_alias)) col_aliases.add(c_alias) aliases.add(c_alias) else: r = '%s.%s' % (qn(alias), qn2(field.column)) result.append(r) aliases.add(r) if with_aliases: col_aliases.add(field.column) return result, aliases def get_distinct(self): """ Returns a quoted list of fields to use in DISTINCT ON part of the query. Note that this method can alter the tables in the query, and thus it must be called before get_from_clause(). """ qn = self.quote_name_unless_alias qn2 = self.connection.ops.quote_name result = [] opts = self.query.model._meta for name in self.query.distinct_fields: parts = name.split(LOOKUP_SEP) field, col, alias, _, _ = self._setup_joins(parts, opts, None) col, alias = self._final_join_removal(col, alias) result.append("%s.%s" % (qn(alias), qn2(col))) return result def get_ordering(self): """ Returns a tuple containing a list representing the SQL elements in the "order by" clause, and the list of SQL elements that need to be added to the GROUP BY clause as a result of the ordering. Also sets the ordering_aliases attribute on this instance to a list of extra aliases needed in the select. Determining the ordering SQL can change the tables we need to include, so this should be run *before* get_from_clause(). """ if self.query.extra_order_by: ordering = self.query.extra_order_by elif not self.query.default_ordering: ordering = self.query.order_by else: ordering = (self.query.order_by or self.query.model._meta.ordering or []) qn = self.quote_name_unless_alias qn2 = self.connection.ops.quote_name distinct = self.query.distinct select_aliases = self._select_aliases result = [] group_by = [] ordering_aliases = [] if self.query.standard_ordering: asc, desc = ORDER_DIR['ASC'] else: asc, desc = ORDER_DIR['DESC'] # It's possible, due to model inheritance, that normal usage might try # to include the same field more than once in the ordering. We track # the table/column pairs we use and discard any after the first use. processed_pairs = set() for field in ordering: if field == '?': result.append(self.connection.ops.random_function_sql()) continue if isinstance(field, int): if field < 0: order = desc field = -field else: order = asc result.append('%s %s' % (field, order)) group_by.append((field, [])) continue col, order = get_order_dir(field, asc) if col in self.query.aggregate_select: result.append('%s %s' % (qn(col), order)) continue if '.' in field: # This came in through an extra(order_by=...) addition. Pass it # on verbatim. table, col = col.split('.', 1) if (table, col) not in processed_pairs: elt = '%s.%s' % (qn(table), col) processed_pairs.add((table, col)) if not distinct or elt in select_aliases: result.append('%s %s' % (elt, order)) group_by.append((elt, [])) elif get_order_dir(field)[0] not in self.query.extra_select: # 'col' is of the form 'field' or 'field1__field2' or # '-field1__field2__field', etc. for table, col, order in self.find_ordering_name(field, self.query.model._meta, default_order=asc): if (table, col) not in processed_pairs: elt = '%s.%s' % (qn(table), qn2(col)) processed_pairs.add((table, col)) if distinct and elt not in select_aliases: ordering_aliases.append(elt) result.append('%s %s' % (elt, order)) group_by.append((elt, [])) else: elt = qn2(col) if distinct and col not in select_aliases: ordering_aliases.append(elt) result.append('%s %s' % (elt, order)) group_by.append(self.query.extra_select[col]) self.query.ordering_aliases = ordering_aliases return result, group_by def find_ordering_name(self, name, opts, alias=None, default_order='ASC', already_seen=None): """ Returns the table alias (the name might be ambiguous, the alias will not be) and column name for ordering by the given 'name' parameter. The 'name' is of the form 'field1__field2__...__fieldN'. """ name, order = get_order_dir(name, default_order) pieces = name.split(LOOKUP_SEP) field, col, alias, joins, opts = self._setup_joins(pieces, opts, alias) # If we get to this point and the field is a relation to another model, # append the default ordering for that model. if field.rel and len(joins) > 1 and opts.ordering: # Firstly, avoid infinite loops. if not already_seen: already_seen = set() join_tuple = tuple([self.query.alias_map[j].table_name for j in joins]) if join_tuple in already_seen: raise FieldError('Infinite loop caused by ordering.') already_seen.add(join_tuple) results = [] for item in opts.ordering: results.extend(self.find_ordering_name(item, opts, alias, order, already_seen)) return results col, alias = self._final_join_removal(col, alias) return [(alias, col, order)] def _setup_joins(self, pieces, opts, alias): """ A helper method for get_ordering and get_distinct. This method will call query.setup_joins, handle refcounts and then promote the joins. Note that get_ordering and get_distinct must produce same target columns on same input, as the prefixes of get_ordering and get_distinct must match. Executing SQL where this is not true is an error. """ if not alias: alias = self.query.get_initial_alias() field, target, opts, joins, _, _ = self.query.setup_joins(pieces, opts, alias, False) # We will later on need to promote those joins that were added to the # query afresh above. joins_to_promote = [j for j in joins if self.query.alias_refcount[j] < 2] alias = joins[-1] col = target.column if not field.rel: # To avoid inadvertent trimming of a necessary alias, use the # refcount to show that we are referencing a non-relation field on # the model. self.query.ref_alias(alias) # Must use left outer joins for nullable fields and their relations. # Ordering or distinct must not affect the returned set, and INNER # JOINS for nullable fields could do this. if joins_to_promote: self.query.promote_alias_chain(joins_to_promote, self.query.alias_map[joins_to_promote[0]].join_type == self.query.LOUTER) return field, col, alias, joins, opts def _final_join_removal(self, col, alias): """ A helper method for get_distinct and get_ordering. This method will trim extra not-needed joins from the tail of the join chain. This is very similar to what is done in trim_joins, but we will trim LEFT JOINS here. It would be a good idea to consolidate this method and query.trim_joins(). """ if alias: while 1: join = self.query.alias_map[alias] if col != join.rhs_join_col: break self.query.unref_alias(alias) alias = join.lhs_alias col = join.lhs_join_col return col, alias def get_from_clause(self): """ Returns a list of strings that are joined together to go after the "FROM" part of the query, as well as a list any extra parameters that need to be included. Sub-classes, can override this to create a from-clause via a "select". This should only be called after any SQL construction methods that might change the tables we need. This means the select columns, ordering and distinct must be done first. """ result = [] qn = self.quote_name_unless_alias qn2 = self.connection.ops.quote_name first = True for alias in self.query.tables: if not self.query.alias_refcount[alias]: continue try: name, alias, join_type, lhs, lhs_col, col, nullable = self.query.alias_map[alias] except KeyError: # Extra tables can end up in self.tables, but not in the # alias_map if they aren't in a join. That's OK. We skip them. continue alias_str = (alias != name and ' %s' % alias or '') if join_type and not first: result.append('%s %s%s ON (%s.%s = %s.%s)' % (join_type, qn(name), alias_str, qn(lhs), qn2(lhs_col), qn(alias), qn2(col))) else: connector = not first and ', ' or '' result.append('%s%s%s' % (connector, qn(name), alias_str)) first = False for t in self.query.extra_tables: alias, unused = self.query.table_alias(t) # Only add the alias if it's not already present (the table_alias() # calls increments the refcount, so an alias refcount of one means # this is the only reference. if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1: connector = not first and ', ' or '' result.append('%s%s' % (connector, qn(alias))) first = False return result, [] def get_grouping(self): """ Returns a tuple representing the SQL elements in the "group by" clause. """ qn = self.quote_name_unless_alias result, params = [], [] if self.query.group_by is not None: if (len(self.query.model._meta.fields) == len(self.query.select) and self.connection.features.allows_group_by_pk): self.query.group_by = [ (self.query.model._meta.db_table, self.query.model._meta.pk.column) ] group_by = self.query.group_by or [] extra_selects = [] for extra_select, extra_params in self.query.extra_select.itervalues(): extra_selects.append(extra_select) params.extend(extra_params) cols = (group_by + self.query.select + self.query.related_select_cols + extra_selects) seen = set() for col in cols: if col in seen: continue seen.add(col) if isinstance(col, (list, tuple)): result.append('%s.%s' % (qn(col[0]), qn(col[1]))) elif hasattr(col, 'as_sql'): result.append(col.as_sql(qn, self.connection)) else: result.append('(%s)' % str(col)) return result, params def fill_related_selections(self, opts=None, root_alias=None, cur_depth=1, used=None, requested=None, restricted=None, nullable=None, dupe_set=None, avoid_set=None): """ Fill in the information needed for a select_related query. The current depth is measured as the number of connections away from the root model (for example, cur_depth=1 means we are looking at models with direct connections to the root model). """ if not restricted and self.query.max_depth and cur_depth > self.query.max_depth: # We've recursed far enough; bail out. return if not opts: opts = self.query.get_meta() root_alias = self.query.get_initial_alias() self.query.related_select_cols = [] self.query.related_select_fields = [] if not used: used = set() if dupe_set is None: dupe_set = set() if avoid_set is None: avoid_set = set() orig_dupe_set = dupe_set # Setup for the case when only particular related fields should be # included in the related selection. if requested is None: if isinstance(self.query.select_related, dict): requested = self.query.select_related restricted = True else: restricted = False for f, model in opts.get_fields_with_model(): if not select_related_descend(f, restricted, requested): continue # The "avoid" set is aliases we want to avoid just for this # particular branch of the recursion. They aren't permanently # forbidden from reuse in the related selection tables (which is # what "used" specifies). avoid = avoid_set.copy() dupe_set = orig_dupe_set.copy() table = f.rel.to._meta.db_table promote = nullable or f.null if model: int_opts = opts alias = root_alias alias_chain = [] for int_model in opts.get_base_chain(model): # Proxy model have elements in base chain # with no parents, assign the new options # object and skip to the next base in that # case if not int_opts.parents[int_model]: int_opts = int_model._meta continue lhs_col = int_opts.parents[int_model].column dedupe = lhs_col in opts.duplicate_targets if dedupe: avoid.update(self.query.dupe_avoidance.get((id(opts), lhs_col), ())) dupe_set.add((opts, lhs_col)) int_opts = int_model._meta alias = self.query.join((alias, int_opts.db_table, lhs_col, int_opts.pk.column), exclusions=used, promote=promote) alias_chain.append(alias) for (dupe_opts, dupe_col) in dupe_set: self.query.update_dupe_avoidance(dupe_opts, dupe_col, alias) if self.query.alias_map[root_alias].join_type == self.query.LOUTER: self.query.promote_alias_chain(alias_chain, True) else: alias = root_alias dedupe = f.column in opts.duplicate_targets if dupe_set or dedupe: avoid.update(self.query.dupe_avoidance.get((id(opts), f.column), ())) if dedupe: dupe_set.add((opts, f.column)) alias = self.query.join((alias, table, f.column, f.rel.get_related_field().column), exclusions=used.union(avoid), promote=promote) used.add(alias) columns, aliases = self.get_default_columns(start_alias=alias, opts=f.rel.to._meta, as_pairs=True) self.query.related_select_cols.extend(columns) if self.query.alias_map[alias].join_type == self.query.LOUTER: self.query.promote_alias_chain(aliases, True) self.query.related_select_fields.extend(f.rel.to._meta.fields) if restricted: next = requested.get(f.name, {}) else: next = False new_nullable = f.null or promote for dupe_opts, dupe_col in dupe_set: self.query.update_dupe_avoidance(dupe_opts, dupe_col, alias) self.fill_related_selections(f.rel.to._meta, alias, cur_depth + 1, used, next, restricted, new_nullable, dupe_set, avoid) if restricted: related_fields = [ (o.field, o.model) for o in opts.get_all_related_objects() if o.field.unique ] for f, model in related_fields: if not select_related_descend(f, restricted, requested, reverse=True): continue # The "avoid" set is aliases we want to avoid just for this # particular branch of the recursion. They aren't permanently # forbidden from reuse in the related selection tables (which is # what "used" specifies). avoid = avoid_set.copy() dupe_set = orig_dupe_set.copy() table = model._meta.db_table int_opts = opts alias = root_alias alias_chain = [] chain = opts.get_base_chain(f.rel.to) if chain is not None: for int_model in chain: # Proxy model have elements in base chain # with no parents, assign the new options # object and skip to the next base in that # case if not int_opts.parents[int_model]: int_opts = int_model._meta continue lhs_col = int_opts.parents[int_model].column dedupe = lhs_col in opts.duplicate_targets if dedupe: avoid.update((self.query.dupe_avoidance.get(id(opts), lhs_col), ())) dupe_set.add((opts, lhs_col)) int_opts = int_model._meta alias = self.query.join( (alias, int_opts.db_table, lhs_col, int_opts.pk.column), exclusions=used, promote=True, reuse=used ) alias_chain.append(alias) for dupe_opts, dupe_col in dupe_set: self.query.update_dupe_avoidance(dupe_opts, dupe_col, alias) dedupe = f.column in opts.duplicate_targets if dupe_set or dedupe: avoid.update(self.query.dupe_avoidance.get((id(opts), f.column), ())) if dedupe: dupe_set.add((opts, f.column)) alias = self.query.join( (alias, table, f.rel.get_related_field().column, f.column), exclusions=used.union(avoid), promote=True ) used.add(alias) columns, aliases = self.get_default_columns(start_alias=alias, opts=model._meta, as_pairs=True, local_only=True) self.query.related_select_cols.extend(columns) self.query.related_select_fields.extend(model._meta.fields) next = requested.get(f.related_query_name(), {}) new_nullable = f.null or None self.fill_related_selections(model._meta, table, cur_depth+1, used, next, restricted, new_nullable) def deferred_to_columns(self): """ Converts the self.deferred_loading data structure to mapping of table names to sets of column names which are to be loaded. Returns the dictionary. """ columns = {} self.query.deferred_to_data(columns, self.query.deferred_to_columns_cb) return columns def results_iter(self): """ Returns an iterator over the results from executing this query. """ resolve_columns = hasattr(self, 'resolve_columns') fields = None has_aggregate_select = bool(self.query.aggregate_select) # Set transaction dirty if we're using SELECT FOR UPDATE to ensure # a subsequent commit/rollback is executed, so any database locks # are released. if self.query.select_for_update and transaction.is_managed(self.using): transaction.set_dirty(self.using) for rows in self.execute_sql(MULTI): for row in rows: if resolve_columns: if fields is None: # We only set this up here because # related_select_fields isn't populated until # execute_sql() has been called. if self.query.select_fields: fields = self.query.select_fields + self.query.related_select_fields else: fields = self.query.model._meta.fields # If the field was deferred, exclude it from being passed # into `resolve_columns` because it wasn't selected. only_load = self.deferred_to_columns() if only_load: db_table = self.query.model._meta.db_table fields = [f for f in fields if db_table in only_load and f.column in only_load[db_table]] row = self.resolve_columns(row, fields) if has_aggregate_select: aggregate_start = len(self.query.extra_select.keys()) + len(self.query.select) aggregate_end = aggregate_start + len(self.query.aggregate_select) row = tuple(row[:aggregate_start]) + tuple([ self.query.resolve_aggregate(value, aggregate, self.connection) for (alias, aggregate), value in zip(self.query.aggregate_select.items(), row[aggregate_start:aggregate_end]) ]) + tuple(row[aggregate_end:]) yield row def execute_sql(self, result_type=MULTI): """ Run the query against the database and returns the result(s). The return value is a single data item if result_type is SINGLE, or an iterator over the results if the result_type is MULTI. result_type is either MULTI (use fetchmany() to retrieve all rows), SINGLE (only retrieve a single row), or None. In this last case, the cursor is returned if any query is executed, since it's used by subclasses such as InsertQuery). It's possible, however, that no query is needed, as the filters describe an empty set. In that case, None is returned, to avoid any unnecessary database interaction. """ try: sql, params = self.as_sql() if not sql: raise EmptyResultSet except EmptyResultSet: if result_type == MULTI: return empty_iter() else: return cursor = self.connection.cursor() cursor.execute(sql, params) if not result_type: return cursor if result_type == SINGLE: if self.query.ordering_aliases: return cursor.fetchone()[:-len(self.query.ordering_aliases)] return cursor.fetchone() # The MULTI case. if self.query.ordering_aliases: result = order_modified_iter(cursor, len(self.query.ordering_aliases), self.connection.features.empty_fetchmany_value) else: result = iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)), self.connection.features.empty_fetchmany_value) if not self.connection.features.can_use_chunked_reads: # If we are using non-chunked reads, we return the same data # structure as normally, but ensure it is all read into memory # before going any further. return list(result) return result class SQLInsertCompiler(SQLCompiler): def placeholder(self, field, val): if field is None: # A field value of None means the value is raw. return val elif hasattr(field, 'get_placeholder'): # Some fields (e.g. geo fields) need special munging before # they can be inserted. return field.get_placeholder(val, self.connection) else: # Return the common case for the placeholder return '%s' def as_sql(self): # We don't need quote_name_unless_alias() here, since these are all # going to be column names (so we can avoid the extra overhead). qn = self.connection.ops.quote_name opts = self.query.model._meta result = ['INSERT INTO %s' % qn(opts.db_table)] has_fields = bool(self.query.fields) fields = self.query.fields if has_fields else [opts.pk] result.append('(%s)' % ', '.join([qn(f.column) for f in fields])) if has_fields: params = values = [ [ f.get_db_prep_save(getattr(obj, f.attname) if self.query.raw else f.pre_save(obj, True), connection=self.connection) for f in fields ] for obj in self.query.objs ] else: values = [[self.connection.ops.pk_default_value()] for obj in self.query.objs] params = [[]] fields = [None] can_bulk = (not any(hasattr(field, "get_placeholder") for field in fields) and not self.return_id and self.connection.features.has_bulk_insert) if can_bulk: placeholders = [["%s"] * len(fields)] else: placeholders = [ [self.placeholder(field, v) for field, v in zip(fields, val)] for val in values ] if self.return_id and self.connection.features.can_return_id_from_insert: params = params[0] col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column)) result.append("VALUES (%s)" % ", ".join(placeholders[0])) r_fmt, r_params = self.connection.ops.return_insert_id() result.append(r_fmt % col) params += r_params return [(" ".join(result), tuple(params))] if can_bulk: result.append(self.connection.ops.bulk_insert_sql(fields, len(values))) return [(" ".join(result), tuple([v for val in values for v in val]))] else: return [ (" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals) for p, vals in zip(placeholders, params) ] def execute_sql(self, return_id=False): assert not (return_id and len(self.query.objs) != 1) self.return_id = return_id cursor = self.connection.cursor() for sql, params in self.as_sql(): cursor.execute(sql, params) if not (return_id and cursor): return if self.connection.features.can_return_id_from_insert: return self.connection.ops.fetch_returned_insert_id(cursor) return self.connection.ops.last_insert_id(cursor, self.query.model._meta.db_table, self.query.model._meta.pk.column) class SQLDeleteCompiler(SQLCompiler): def as_sql(self): """ Creates the SQL for this query. Returns the SQL string and list of parameters. """ assert len(self.query.tables) == 1, \ "Can only delete from one table at a time." qn = self.quote_name_unless_alias result = ['DELETE FROM %s' % qn(self.query.tables[0])] where, params = self.query.where.as_sql(qn=qn, connection=self.connection) result.append('WHERE %s' % where) return ' '.join(result), tuple(params) class SQLUpdateCompiler(SQLCompiler): def as_sql(self): """ Creates the SQL for this query. Returns the SQL string and list of parameters. """ self.pre_sql_setup() if not self.query.values: return '', () table = self.query.tables[0] qn = self.quote_name_unless_alias result = ['UPDATE %s' % qn(table)] result.append('SET') values, update_params = [], [] for field, model, val in self.query.values: if hasattr(val, 'prepare_database_save'): val = val.prepare_database_save(field) else: val = field.get_db_prep_save(val, connection=self.connection) # Getting the placeholder for the field. if hasattr(field, 'get_placeholder'): placeholder = field.get_placeholder(val, self.connection) else: placeholder = '%s' if hasattr(val, 'evaluate'): val = SQLEvaluator(val, self.query, allow_joins=False) name = field.column if hasattr(val, 'as_sql'): sql, params = val.as_sql(qn, self.connection) values.append('%s = %s' % (qn(name), sql)) update_params.extend(params) elif val is not None: values.append('%s = %s' % (qn(name), placeholder)) update_params.append(val) else: values.append('%s = NULL' % qn(name)) if not values: return '', () result.append(', '.join(values)) where, params = self.query.where.as_sql(qn=qn, connection=self.connection) if where: result.append('WHERE %s' % where) return ' '.join(result), tuple(update_params + params) def execute_sql(self, result_type): """ Execute the specified update. Returns the number of rows affected by the primary update query. The "primary update query" is the first non-empty query that is executed. Row counts for any subsequent, related queries are not available. """ cursor = super(SQLUpdateCompiler, self).execute_sql(result_type) rows = cursor and cursor.rowcount or 0 is_empty = cursor is None del cursor for query in self.query.get_related_updates(): aux_rows = query.get_compiler(self.using).execute_sql(result_type) if is_empty: rows = aux_rows is_empty = False return rows def pre_sql_setup(self): """ If the update depends on results from other tables, we need to do some munging of the "where" conditions to match the format required for (portable) SQL updates. That is done here. Further, if we are going to be running multiple updates, we pull out the id values to update at this point so that they don't change as a result of the progressive updates. """ self.query.select_related = False self.query.clear_ordering(True) super(SQLUpdateCompiler, self).pre_sql_setup() count = self.query.count_active_tables() if not self.query.related_updates and count == 1: return # We need to use a sub-select in the where clause to filter on things # from other tables. query = self.query.clone(klass=Query) query.bump_prefix() query.extra = {} query.select = [] query.add_fields([query.model._meta.pk.name]) # Recheck the count - it is possible that fiddling with the select # fields above removes tables from the query. Refs #18304. count = query.count_active_tables() if not self.query.related_updates and count == 1: return must_pre_select = count > 1 and not self.connection.features.update_can_self_select # Now we adjust the current query: reset the where clause and get rid # of all the tables we don't need (since they're in the sub-select). self.query.where = self.query.where_class() if self.query.related_updates or must_pre_select: # Either we're using the idents in multiple update queries (so # don't want them to change), or the db backend doesn't support # selecting from the updating table (e.g. MySQL). idents = [] for rows in query.get_compiler(self.using).execute_sql(MULTI): idents.extend([r[0] for r in rows]) self.query.add_filter(('pk__in', idents)) self.query.related_ids = idents else: # The fast path. Filters and updates in one query. self.query.add_filter(('pk__in', query)) for alias in self.query.tables[1:]: self.query.alias_refcount[alias] = 0 class SQLAggregateCompiler(SQLCompiler): def as_sql(self, qn=None): """ Creates the SQL for this query. Returns the SQL string and list of parameters. """ if qn is None: qn = self.quote_name_unless_alias sql = ('SELECT %s FROM (%s) subquery' % ( ', '.join([ aggregate.as_sql(qn, self.connection) for aggregate in self.query.aggregate_select.values() ]), self.query.subquery) ) params = self.query.sub_params return (sql, params) class SQLDateCompiler(SQLCompiler): def results_iter(self): """ Returns an iterator over the results from executing this query. """ resolve_columns = hasattr(self, 'resolve_columns') if resolve_columns: from django.db.models.fields import DateTimeField fields = [DateTimeField()] else: from django.db.backends.util import typecast_timestamp needs_string_cast = self.connection.features.needs_datetime_string_cast offset = len(self.query.extra_select) for rows in self.execute_sql(MULTI): for row in rows: date = row[offset] if resolve_columns: date = self.resolve_columns(row, fields)[offset] elif needs_string_cast: date = typecast_timestamp(str(date)) yield date def empty_iter(): """ Returns an iterator containing no results. """ yield next(iter([])) def order_modified_iter(cursor, trim, sentinel): """ Yields blocks of rows from a cursor. We use this iterator in the special case when extra output columns have been added to support ordering requirements. We must trim those extra columns before anything else can use the results, since they're only needed to make the SQL valid. """ for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)), sentinel): yield [r[:-trim] for r in rows]
{ "repo_name": "cobalys/django", "path": "django/db/models/sql/compiler.py", "copies": "2", "size": "48514", "license": "bsd-3-clause", "hash": -4059514072778123000, "line_mean": 42.8247515808, "line_max": 136, "alpha_frac": 0.5527270479, "autogenerated": false, "ratio": 4.314273010226767, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.001189646255329693, "num_lines": 1107 }
from future_builtins import zip from django.db.backends.util import truncate_name, typecast_timestamp from django.db.models.sql import compiler from django.db.models.sql.constants import MULTI SQLCompiler = compiler.SQLCompiler class GeoSQLCompiler(compiler.SQLCompiler): def get_columns(self, with_aliases=False): """ Return the list of columns to use in the select statement. If no columns have been specified, returns all columns relating to fields in the model. If 'with_aliases' is true, any column names that are duplicated (without the table names) are given unique aliases. This is needed in some cases to avoid ambiguitity with nested queries. This routine is overridden from Query to handle customized selection of geometry columns. """ qn = self.quote_name_unless_alias qn2 = self.connection.ops.quote_name result = ['(%s) AS %s' % (self.get_extra_select_format(alias) % col[0], qn2(alias)) for alias, col in self.query.extra_select.iteritems()] aliases = set(self.query.extra_select.keys()) if with_aliases: col_aliases = aliases.copy() else: col_aliases = set() if self.query.select: only_load = self.deferred_to_columns() # This loop customized for GeoQuery. for col, field in zip(self.query.select, self.query.select_fields): if isinstance(col, (list, tuple)): alias, column = col table = self.query.alias_map[alias].table_name if table in only_load and column not in only_load[table]: continue r = self.get_field_select(field, alias, column) if with_aliases: if col[1] in col_aliases: c_alias = 'Col%d' % len(col_aliases) result.append('%s AS %s' % (r, c_alias)) aliases.add(c_alias) col_aliases.add(c_alias) else: result.append('%s AS %s' % (r, qn2(col[1]))) aliases.add(r) col_aliases.add(col[1]) else: result.append(r) aliases.add(r) col_aliases.add(col[1]) else: result.append(col.as_sql(qn, self.connection)) if hasattr(col, 'alias'): aliases.add(col.alias) col_aliases.add(col.alias) elif self.query.default_cols: cols, new_aliases = self.get_default_columns(with_aliases, col_aliases) result.extend(cols) aliases.update(new_aliases) max_name_length = self.connection.ops.max_name_length() result.extend([ '%s%s' % ( self.get_extra_select_format(alias) % aggregate.as_sql(qn, self.connection), alias is not None and ' AS %s' % qn(truncate_name(alias, max_name_length)) or '' ) for alias, aggregate in self.query.aggregate_select.items() ]) # This loop customized for GeoQuery. for (table, col), field in zip(self.query.related_select_cols, self.query.related_select_fields): r = self.get_field_select(field, table, col) if with_aliases and col in col_aliases: c_alias = 'Col%d' % len(col_aliases) result.append('%s AS %s' % (r, c_alias)) aliases.add(c_alias) col_aliases.add(c_alias) else: result.append(r) aliases.add(r) col_aliases.add(col) self._select_aliases = aliases return result def get_default_columns(self, with_aliases=False, col_aliases=None, start_alias=None, opts=None, as_pairs=False, local_only=False): """ Computes the default columns for selecting every field in the base model. Will sometimes be called to pull in related models (e.g. via select_related), in which case "opts" and "start_alias" will be given to provide a starting point for the traversal. Returns a list of strings, quoted appropriately for use in SQL directly, as well as a set of aliases used in the select statement (if 'as_pairs' is True, returns a list of (alias, col_name) pairs instead of strings as the first component and None as the second component). This routine is overridden from Query to handle customized selection of geometry columns. """ result = [] if opts is None: opts = self.query.model._meta aliases = set() only_load = self.deferred_to_columns() # Skip all proxy to the root proxied model proxied_model = opts.concrete_model if start_alias: seen = {None: start_alias} for field, model in opts.get_fields_with_model(): if local_only and model is not None: continue if start_alias: try: alias = seen[model] except KeyError: if model is proxied_model: alias = start_alias else: link_field = opts.get_ancestor_link(model) alias = self.query.join((start_alias, model._meta.db_table, link_field.column, model._meta.pk.column)) seen[model] = alias else: # If we're starting from the base model of the queryset, the # aliases will have already been set up in pre_sql_setup(), so # we can save time here. alias = self.query.included_inherited_models[model] table = self.query.alias_map[alias].table_name if table in only_load and field.column not in only_load[table]: continue if as_pairs: result.append((alias, field.column)) aliases.add(alias) continue # This part of the function is customized for GeoQuery. We # see if there was any custom selection specified in the # dictionary, and set up the selection format appropriately. field_sel = self.get_field_select(field, alias) if with_aliases and field.column in col_aliases: c_alias = 'Col%d' % len(col_aliases) result.append('%s AS %s' % (field_sel, c_alias)) col_aliases.add(c_alias) aliases.add(c_alias) else: r = field_sel result.append(r) aliases.add(r) if with_aliases: col_aliases.add(field.column) return result, aliases def resolve_columns(self, row, fields=()): """ This routine is necessary so that distances and geometries returned from extra selection SQL get resolved appropriately into Python objects. """ values = [] aliases = self.query.extra_select.keys() # Have to set a starting row number offset that is used for # determining the correct starting row index -- needed for # doing pagination with Oracle. rn_offset = 0 if self.connection.ops.oracle: if self.query.high_mark is not None or self.query.low_mark: rn_offset = 1 index_start = rn_offset + len(aliases) # Converting any extra selection values (e.g., geometries and # distance objects added by GeoQuerySet methods). values = [self.query.convert_values(v, self.query.extra_select_fields.get(a, None), self.connection) for v, a in zip(row[rn_offset:index_start], aliases)] if self.connection.ops.oracle or getattr(self.query, 'geo_values', False): # We resolve the rest of the columns if we're on Oracle or if # the `geo_values` attribute is defined. for value, field in map(None, row[index_start:], fields): values.append(self.query.convert_values(value, field, self.connection)) else: values.extend(row[index_start:]) return tuple(values) #### Routines unique to GeoQuery #### def get_extra_select_format(self, alias): sel_fmt = '%s' if hasattr(self.query, 'custom_select') and alias in self.query.custom_select: sel_fmt = sel_fmt % self.query.custom_select[alias] return sel_fmt def get_field_select(self, field, alias=None, column=None): """ Returns the SELECT SQL string for the given field. Figures out if any custom selection SQL is needed for the column The `alias` keyword may be used to manually specify the database table where the column exists, if not in the model associated with this `GeoQuery`. Similarly, `column` may be used to specify the exact column name, rather than using the `column` attribute on `field`. """ sel_fmt = self.get_select_format(field) if field in self.query.custom_select: field_sel = sel_fmt % self.query.custom_select[field] else: field_sel = sel_fmt % self._field_column(field, alias, column) return field_sel def get_select_format(self, fld): """ Returns the selection format string, depending on the requirements of the spatial backend. For example, Oracle and MySQL require custom selection formats in order to retrieve geometries in OGC WKT. For all other fields a simple '%s' format string is returned. """ if self.connection.ops.select and hasattr(fld, 'geom_type'): # This allows operations to be done on fields in the SELECT, # overriding their values -- used by the Oracle and MySQL # spatial backends to get database values as WKT, and by the # `transform` method. sel_fmt = self.connection.ops.select # Because WKT doesn't contain spatial reference information, # the SRID is prefixed to the returned WKT to ensure that the # transformed geometries have an SRID different than that of the # field -- this is only used by `transform` for Oracle and # SpatiaLite backends. if self.query.transformed_srid and ( self.connection.ops.oracle or self.connection.ops.spatialite ): sel_fmt = "'SRID=%d;'||%s" % (self.query.transformed_srid, sel_fmt) else: sel_fmt = '%s' return sel_fmt # Private API utilities, subject to change. def _field_column(self, field, table_alias=None, column=None): """ Helper function that returns the database column for the given field. The table and column are returned (quoted) in the proper format, e.g., `"geoapp_city"."point"`. If `table_alias` is not specified, the database table associated with the model of this `GeoQuery` will be used. If `column` is specified, it will be used instead of the value in `field.column`. """ if table_alias is None: table_alias = self.query.model._meta.db_table return "%s.%s" % (self.quote_name_unless_alias(table_alias), self.connection.ops.quote_name(column or field.column)) class SQLInsertCompiler(compiler.SQLInsertCompiler, GeoSQLCompiler): pass class SQLDeleteCompiler(compiler.SQLDeleteCompiler, GeoSQLCompiler): pass class SQLUpdateCompiler(compiler.SQLUpdateCompiler, GeoSQLCompiler): pass class SQLAggregateCompiler(compiler.SQLAggregateCompiler, GeoSQLCompiler): pass class SQLDateCompiler(compiler.SQLDateCompiler, GeoSQLCompiler): """ This is overridden for GeoDjango to properly cast date columns, since `GeoQuery.resolve_columns` is used for spatial values. See #14648, #16757. """ def results_iter(self): if self.connection.ops.oracle: from django.db.models.fields import DateTimeField fields = [DateTimeField()] else: needs_string_cast = self.connection.features.needs_datetime_string_cast offset = len(self.query.extra_select) for rows in self.execute_sql(MULTI): for row in rows: date = row[offset] if self.connection.ops.oracle: date = self.resolve_columns(row, fields)[offset] elif needs_string_cast: date = typecast_timestamp(str(date)) yield date
{ "repo_name": "cobalys/django", "path": "django/contrib/gis/db/models/sql/compiler.py", "copies": "11", "size": "13144", "license": "bsd-3-clause", "hash": 8495881159486737000, "line_mean": 43.7074829932, "line_max": 105, "alpha_frac": 0.5747108947, "autogenerated": false, "ratio": 4.422611036339165, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": null, "num_lines": null }
from FutureEvent import FutureEvent import hbase_data import redis_data import hashlib import datetime import calendar from FesException import FesException STORAGE_CUTOFF_MINUTES = 15 def generate_hash(id_): return hashlib.sha224(id_).hexdigest() def add(id_, expiration, payload): now = datetime.datetime.utcnow() if expiration <= calendar.timegm(now.utctimetuple()): raise FesException("Expiration must be in the future.") id_hash = generate_hash(id_) storage_cutoff_time = now + datetime.timedelta(minutes=STORAGE_CUTOFF_MINUTES) # check for an existing hbase record event = hbase_data.read_event(id_hash) #if the new expiration is within 15 minutes if expiration <= calendar.timegm(storage_cutoff_time.utctimetuple()): #delete hbase records if event is not None: hbase_data.delete_all(id_hash, event.expiration) #add to redis redis_data.add(id_hash, expiration, payload) else: #delete hbase index if event is not None: hbase_data.delete_from_expiration_index(id_hash, event.expiration) _move_event_to_hbase(id_hash, expiration, payload) return FutureEvent(id_hash, payload, expiration) def update_expiration(id_, expiration): now = datetime.datetime.utcnow() if expiration <= calendar.timegm(now.utctimetuple()): raise FesException("Expiration must be in the future.") id_hash = generate_hash(id_) storage_cutoff_time = now + datetime.timedelta(minutes=STORAGE_CUTOFF_MINUTES) #redis is faster, so check it first payload = redis_data.get_event_payload(id_hash) #it's already in redis if payload is not None: #the new expiration is within 15 minutes, just update redis expiration if expiration <= calendar.timegm(storage_cutoff_time.utctimetuple()): redis_data.update_expiration(id_hash, expiration) else: _move_event_to_hbase(id_hash, expiration, payload) return #retrieve event from hbase event = hbase_data.read_event(id_hash) #it's in hbase if event is not None: #the new expiration is not within 15 minutes, just update hbase expiration if expiration > calendar.timegm(storage_cutoff_time.utctimetuple()): hbase_data.delete_from_expiration_index(id_hash, event.expiration) hbase_data.add(id_hash, expiration, event.payload) else: move_event_to_redis(id_hash, expiration, event) return raise FesException("Event " + id_ + " not found.") def update_event_payload(id_, payload): id_hash = generate_hash(id_) #redis is faster, so check it first result = redis_data.get_event_payload(id_hash) if result is not None: redis_data.update_event(id_hash, payload) return future_event = hbase_data.read_event(id_hash) if future_event is not None: hbase_data.write_event(id_hash, future_event.expiration, payload) return raise FesException("No event found for id " + id_) def delete(id_): id_hash = generate_hash(id_) #delete from redis first event = redis_data.get_and_delete(id_hash) #if it wasn't in redis, delete from hbase if event is None: #retrieve expiration so we can delete the index entry event = hbase_data.read_event(id_hash) if event is not None: hbase_data.delete_all(id_hash, event.expiration) def _move_event_to_hbase(id_hash, expiration, payload): if payload is None: raise FesException("Error copying event " + id_hash + " to hbase. Data not found.") redis_data.delete(id_hash) hbase_data.add(id_hash, expiration, payload) def move_event_to_redis(id_hash, expiration, event): if event is None: raise FesException("Error copying event " + id_hash + " to redis. Data not found.") if expiration is None: expiration = event.expiration redis_data.add(id_hash, expiration, event.payload) hbase_data.delete_all(id_hash, event.expiration)
{ "repo_name": "erprice/fes", "path": "src/fes_controller.py", "copies": "1", "size": "4063", "license": "apache-2.0", "hash": -613819152939802400, "line_mean": 31.246031746, "line_max": 91, "alpha_frac": 0.6736401674, "autogenerated": false, "ratio": 3.707116788321168, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.48807569557211683, "avg_score": null, "num_lines": null }
from .__future__gks import GKS from .reg_gng import GNGregressor from .reg_inn import ISOINNregressor from numpy import array, zeros, argmin, sum class npGNGregressor: age_max = 200 nn_lambda = 20 model = [] reg = [] GNG = True globus = False def __init__(self, age_max_ = 200, nn_lambda_ = 30, GNG = True, globus = False, randomize = False): self.age_max = age_max_ self.nn_lambda = nn_lambda_ self.GNG = GNG self.globus = globus if randomize: from random import randint as rand self.age_max = rand(100,500) self.nn_lambda = rand(10,60) def fit(self, X, y, sample_weight = None): ## train = [] ## for i in range(len(y)): ## tmp = list(X[i]) ## tmp += [y[i]] ## train.append(array(tmp)) #print train if self.GNG: self.model = GNGregressor(age_max = self.age_max,nn_lambda = self.nn_lambda) else: self.model = ISOINNregressor(age_max = self.age_max,nn_lambda = self.nn_lambda,del_noise = False) self.model.fit(X,y) points = self.model.nodes #print points populations = array(self.model.counts) if sample_weight != None: populations = zeros(len(points)) for i in range(len(sample_weight)): distances = sum((array(train[i] - points))**2,axis = -1) winner_index = argmin(distances) populations[winner_index] += sample_weight[i] populations = array(populations) #print populations variances = self.model.standard_deviation ** 0.5 self.reg = GKS(points, populations, variances, 1, self.globus) return 0 def predict(self, X): return self.reg.responses(X) def get_params(self, deep = False): return {} def set_params(self, random_state = False): return 0 if __name__ == '__main__': # GR = npGNGregressor( GNG=True, globus = True) # GR.fit(array([[1,2],[3,2],[4,2],[1,8]]),array([4,3,2,1]), sample_weight = [0.1, 0.3, 0.5, 0.1]) # print GR.predict(array([[1,2],[3,2],[4,2],[1,8]])) from xzyutil.csv_reader import csv_reader from sklearn.ensemble import ExtraTreesRegressor as ET r1 = csv_reader('/Users/xzy/work/history/NNregression/4train.csv') r2 = csv_reader('/Users/xzy/work/history/NNregression/4test.csv') #data = array([[1,2,3],[2,3,4],[3,4,5],[5,6,7]]) train_data, y = r1.down_sample_seperate(1) test_data, labels = r2.down_sample_seperate(1) print(len(train_data)) # from sklearn.ensemble import AdaBoostRegressor as ABR # # models = ABR(npGNGregressor(age_max_ = 200,nn_lambda_ = 40, GNG = True, globus = True)) models = npGNGregressor(age_max_ = 200,nn_lambda_ = 60, GNG = True, globus = False) # models = ET() models.fit(train_data, y) pred_labels = models.predict(test_data) from sklearn.metrics import mean_squared_error print(mean_squared_error(labels, pred_labels))
{ "repo_name": "sbxzy/pygks", "path": "pygks/np_gngr.py", "copies": "1", "size": "3080", "license": "bsd-3-clause", "hash": -215199155265025600, "line_mean": 34.4022988506, "line_max": 109, "alpha_frac": 0.5853896104, "autogenerated": false, "ratio": 3.1269035532994924, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.42122931636994926, "avg_score": null, "num_lines": null }
# `from __future__` has to be the very first thing in a module # otherwise a syntax error is raised from __future__ import annotations # type: ignore # noqa # Python 3.6 linters complain from dataclasses import dataclass, fields from enum import Enum from pytest import raises from omegaconf import OmegaConf, ValidationError class Height(Enum): SHORT = 0 TALL = 1 @dataclass class SimpleTypes: num: int = 10 pi: float = 3.1415 is_awesome: bool = True height: "Height" = Height.SHORT # test forward ref description: str = "text" def simple_types_class() -> None: # confirm that the type annotations are in fact stored as strings # i.e., that the `from future` import worked num_field = fields(SimpleTypes)[0] assert isinstance(num_field.type, str) assert num_field.type == "int" conf = OmegaConf.structured(SimpleTypes) assert conf.num == 10 assert conf.pi == 3.1415 assert conf.is_awesome is True assert conf.height == Height.SHORT assert conf.description == "text" def conversions() -> None: conf: SimpleTypes = OmegaConf.structured(SimpleTypes) conf.num = 20 conf.num = "20" # type: ignore assert conf.num == 20 with raises(ValidationError): # ValidationError: "one" cannot be converted to an integer conf.num = "one" # type: ignore
{ "repo_name": "omry/omegaconf", "path": "tests/examples/dataclass_postponed_annotations.py", "copies": "1", "size": "1363", "license": "bsd-3-clause", "hash": 458954972230041150, "line_mean": 25.7254901961, "line_max": 88, "alpha_frac": 0.6771826853, "autogenerated": false, "ratio": 3.7240437158469946, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.49012264011469947, "avg_score": null, "num_lines": null }
from __future__ import absolute_import, absolute_import, unicode_literals import collections import gzip import json import logging import os import re import sys import tempfile import time import mopidy from mopidy import compat, local, models from mopidy.local import search, storage, translator from mopidy.utils import encoding logger = logging.getLogger(__name__) # TODO: move to load and dump in models? def load_library(json_file): if not os.path.isfile(json_file): logger.info( 'No local library metadata cache found at %s. Please run ' '`mopidy local scan` to index your local music library. ' 'If you do not have a local music collection, you can disable the ' 'local backend to hide this message.', json_file) return {} try: with gzip.open(json_file, 'rb') as fp: return json.load(fp, object_hook=models.model_json_decoder) except (IOError, ValueError) as error: logger.warning( 'Loading JSON local library failed: %s', encoding.locale_decode(error)) return {} def write_library(json_file, data): data['version'] = mopidy.__version__ directory, basename = os.path.split(json_file) # TODO: cleanup directory/basename.* files. tmp = tempfile.NamedTemporaryFile( prefix=basename + '.', dir=directory, delete=False) try: with gzip.GzipFile(fileobj=tmp, mode='wb') as fp: json.dump(data, fp, cls=models.ModelJSONEncoder, indent=2, separators=(',', ': ')) os.rename(tmp.name, json_file) finally: if os.path.exists(tmp.name): os.remove(tmp.name) class _BrowseCache(object): encoding = sys.getfilesystemencoding() splitpath_re = re.compile(r'([^/]+)') def __init__(self, uris): self._cache = { local.Library.ROOT_DIRECTORY_URI: collections.OrderedDict()} for track_uri in uris: path = translator.local_track_uri_to_path(track_uri, b'/') parts = self.splitpath_re.findall( path.decode(self.encoding, 'replace')) track_ref = models.Ref.track(uri=track_uri, name=parts.pop()) # Look for our parents backwards as this is faster than having to # do a complete search for each add. parent_uri = None child = None for i in reversed(range(len(parts))): directory = '/'.join(parts[:i+1]) uri = translator.path_to_local_directory_uri(directory) # First dir we process is our parent if not parent_uri: parent_uri = uri # We found ourselves and we exist, done. if uri in self._cache: if child: self._cache[uri][child.uri] = child break # Initialize ourselves, store child if present, and add # ourselves as child for next loop. self._cache[uri] = collections.OrderedDict() if child: self._cache[uri][child.uri] = child child = models.Ref.directory(uri=uri, name=parts[i]) else: # Loop completed, so final child needs to be added to root. if child: self._cache[ local.Library.ROOT_DIRECTORY_URI][child.uri] = child # If no parent was set we belong in the root. if not parent_uri: parent_uri = local.Library.ROOT_DIRECTORY_URI self._cache[parent_uri][track_uri] = track_ref def lookup(self, uri): return self._cache.get(uri, {}).values() # TODO: make this available to other code? class DebugTimer(object): def __init__(self, msg): self.msg = msg self.start = None def __enter__(self): self.start = time.time() def __exit__(self, exc_type, exc_value, traceback): duration = (time.time() - self.start) * 1000 logger.debug('%s: %dms', self.msg, duration) class JsonLibrary(local.Library): name = 'json' def __init__(self, config): self._tracks = {} self._browse_cache = None self._media_dir = config['local']['media_dir'] self._json_file = os.path.join( config['local']['data_dir'], b'library.json.gz') storage.check_dirs_and_files(config) def browse(self, uri): if not self._browse_cache: return [] return self._browse_cache.lookup(uri) def load(self): logger.debug('Loading library: %s', self._json_file) with DebugTimer('Loading tracks'): library = load_library(self._json_file) self._tracks = dict((t.uri, t) for t in library.get('tracks', [])) with DebugTimer('Building browse cache'): self._browse_cache = _BrowseCache(sorted(self._tracks.keys())) return len(self._tracks) def lookup(self, uri): try: return [self._tracks[uri]] except KeyError: return [] def search(self, query=None, limit=100, offset=0, uris=None, exact=False): tracks = self._tracks.values() # TODO: pass limit and offset into search helpers if exact: return search.find_exact(tracks, query=query, uris=uris) else: return search.search(tracks, query=query, uris=uris) def begin(self): return compat.itervalues(self._tracks) def add(self, track): self._tracks[track.uri] = track def remove(self, uri): self._tracks.pop(uri, None) def close(self): write_library(self._json_file, {'tracks': self._tracks.values()}) def clear(self): try: os.remove(self._json_file) return True except OSError: return False
{ "repo_name": "jcass77/mopidy", "path": "mopidy/local/json.py", "copies": "2", "size": "5971", "license": "apache-2.0", "hash": 6176580732223044000, "line_mean": 31.6284153005, "line_max": 79, "alpha_frac": 0.5732708089, "autogenerated": false, "ratio": 4.08133971291866, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.565461052181866, "avg_score": null, "num_lines": null }
from __future__ import absolute_import, absolute_import, unicode_literals import collections import gzip import json import logging import os import re import sys import tempfile import mopidy from mopidy import compat, local, models from mopidy.internal import encoding, timer from mopidy.local import search, storage, translator logger = logging.getLogger(__name__) # TODO: move to load and dump in models? def load_library(json_file): if not os.path.isfile(json_file): logger.info( 'No local library metadata cache found at %s. Please run ' '`mopidy local scan` to index your local music library. ' 'If you do not have a local music collection, you can disable the ' 'local backend to hide this message.', json_file) return {} try: with gzip.open(json_file, 'rb') as fp: return json.load(fp, object_hook=models.model_json_decoder) except (IOError, ValueError) as error: logger.warning( 'Loading JSON local library failed: %s', encoding.locale_decode(error)) return {} def write_library(json_file, data): data['version'] = mopidy.__version__ directory, basename = os.path.split(json_file) # TODO: cleanup directory/basename.* files. tmp = tempfile.NamedTemporaryFile( prefix=basename + '.', dir=directory, delete=False) try: with gzip.GzipFile(fileobj=tmp, mode='wb') as fp: json.dump(data, fp, cls=models.ModelJSONEncoder, indent=2, separators=(',', ': ')) os.rename(tmp.name, json_file) finally: if os.path.exists(tmp.name): os.remove(tmp.name) class _BrowseCache(object): encoding = sys.getfilesystemencoding() splitpath_re = re.compile(r'([^/]+)') def __init__(self, uris): self._cache = { local.Library.ROOT_DIRECTORY_URI: collections.OrderedDict()} for track_uri in uris: path = translator.local_track_uri_to_path(track_uri, b'/') parts = self.splitpath_re.findall( path.decode(self.encoding, 'replace')) track_ref = models.Ref.track(uri=track_uri, name=parts.pop()) # Look for our parents backwards as this is faster than having to # do a complete search for each add. parent_uri = None child = None for i in reversed(range(len(parts))): directory = '/'.join(parts[:i + 1]) uri = translator.path_to_local_directory_uri(directory) # First dir we process is our parent if not parent_uri: parent_uri = uri # We found ourselves and we exist, done. if uri in self._cache: if child: self._cache[uri][child.uri] = child break # Initialize ourselves, store child if present, and add # ourselves as child for next loop. self._cache[uri] = collections.OrderedDict() if child: self._cache[uri][child.uri] = child child = models.Ref.directory(uri=uri, name=parts[i]) else: # Loop completed, so final child needs to be added to root. if child: self._cache[ local.Library.ROOT_DIRECTORY_URI][child.uri] = child # If no parent was set we belong in the root. if not parent_uri: parent_uri = local.Library.ROOT_DIRECTORY_URI self._cache[parent_uri][track_uri] = track_ref def lookup(self, uri): return self._cache.get(uri, {}).values() class JsonLibrary(local.Library): name = 'json' def __init__(self, config): self._tracks = {} self._browse_cache = None self._media_dir = config['local']['media_dir'] self._json_file = os.path.join( config['local']['data_dir'], b'library.json.gz') storage.check_dirs_and_files(config) def browse(self, uri): if not self._browse_cache: return [] return self._browse_cache.lookup(uri) def load(self): logger.debug('Loading library: %s', self._json_file) with timer.time_logger('Loading tracks'): library = load_library(self._json_file) self._tracks = dict((t.uri, t) for t in library.get('tracks', [])) with timer.time_logger('Building browse cache'): self._browse_cache = _BrowseCache(sorted(self._tracks.keys())) return len(self._tracks) def lookup(self, uri): try: return [self._tracks[uri]] except KeyError: return [] def get_distinct(self, field, query=None): if field == 'track': def distinct(track): return {track.name} elif field == 'artist': def distinct(track): return {a.name for a in track.artists} elif field == 'albumartist': def distinct(track): album = track.album or models.Album() return {a.name for a in album.artists} elif field == 'album': def distinct(track): album = track.album or models.Album() return {album.name} elif field == 'composer': def distinct(track): return {a.name for a in track.composers} elif field == 'performer': def distinct(track): return {a.name for a in track.performers} elif field == 'date': def distinct(track): return {track.date} elif field == 'genre': def distinct(track): return {track.genre} else: return set() distinct_result = set() search_result = search.search(self._tracks.values(), query, limit=None) for track in search_result.tracks: distinct_result.update(distinct(track)) return distinct_result def search(self, query=None, limit=100, offset=0, uris=None, exact=False): tracks = self._tracks.values() if exact: return search.find_exact( tracks, query=query, limit=limit, offset=offset, uris=uris) else: return search.search( tracks, query=query, limit=limit, offset=offset, uris=uris) def begin(self): return compat.itervalues(self._tracks) def add(self, track): self._tracks[track.uri] = track def remove(self, uri): self._tracks.pop(uri, None) def close(self): write_library(self._json_file, {'tracks': self._tracks.values()}) def clear(self): try: os.remove(self._json_file) return True except OSError: return False
{ "repo_name": "rawdlite/mopidy", "path": "mopidy/local/json.py", "copies": "1", "size": "6973", "license": "apache-2.0", "hash": 8765051258541639000, "line_mean": 33.0146341463, "line_max": 79, "alpha_frac": 0.5605908504, "autogenerated": false, "ratio": 4.205669481302775, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 205 }
from __future__ import absolute_import #added# from djangojy.db.backends import BaseDatabaseIntrospection from com.ziclix.python.sql.zxJDBC import ProgrammingError, OperationalError from .MySQLdb_2.constants import FIELD_TYPE import re foreign_key_re = re.compile(r"\sCONSTRAINT `[^`]*` FOREIGN KEY \(`([^`]*)`\) REFERENCES `([^`]*)` \(`([^`]*)`\)") class DatabaseIntrospection(BaseDatabaseIntrospection): data_types_reverse = { FIELD_TYPE.BLOB: 'TextField', FIELD_TYPE.CHAR: 'CharField', FIELD_TYPE.DECIMAL: 'DecimalField', FIELD_TYPE.NEWDECIMAL: 'DecimalField', FIELD_TYPE.DATE: 'DateField', FIELD_TYPE.DATETIME: 'DateTimeField', FIELD_TYPE.DOUBLE: 'FloatField', FIELD_TYPE.FLOAT: 'FloatField', FIELD_TYPE.INT24: 'IntegerField', FIELD_TYPE.LONG: 'IntegerField', FIELD_TYPE.LONGLONG: 'BigIntegerField', FIELD_TYPE.SHORT: 'IntegerField', FIELD_TYPE.STRING: 'CharField', FIELD_TYPE.TIMESTAMP: 'DateTimeField', FIELD_TYPE.TINY: 'IntegerField', FIELD_TYPE.TINY_BLOB: 'TextField', FIELD_TYPE.MEDIUM_BLOB: 'TextField', FIELD_TYPE.LONG_BLOB: 'TextField', FIELD_TYPE.VAR_STRING: 'CharField', } def get_table_list(self, cursor): "Returns a list of table names in the current database." cursor.execute("SHOW TABLES") return [row[0] for row in cursor.fetchall()] def get_table_description(self, cursor, table_name): "Returns a description of the table, with the DB-API cursor.description interface." cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name)) return cursor.description def _name_to_index(self, cursor, table_name): """ Returns a dictionary of {field_name: field_index} for the given table. Indexes are 0-based. """ return dict([(d[0], i) for i, d in enumerate(self.get_table_description(cursor, table_name))]) def get_relations(self, cursor, table_name): """ Returns a dictionary of {field_index: (field_index_other_table, other_table)} representing all relationships to the given table. Indexes are 0-based. """ my_field_dict = self._name_to_index(cursor, table_name) constraints = self.get_key_columns(cursor, table_name) relations = {} for my_fieldname, other_table, other_field in constraints: other_field_index = self._name_to_index(cursor, other_table)[other_field] my_field_index = my_field_dict[my_fieldname] relations[my_field_index] = (other_field_index, other_table) return relations def get_key_columns(self, cursor, table_name): """ Returns a list of (column_name, referenced_table_name, referenced_column_name) for all key columns in given table. """ key_columns = [] try: cursor.execute(""" SELECT column_name, referenced_table_name, referenced_column_name FROM information_schema.key_column_usage WHERE table_name = %s AND table_schema = DATABASE() AND referenced_table_name IS NOT NULL AND referenced_column_name IS NOT NULL""", [table_name]) key_columns.extend(cursor.fetchall()) except (ProgrammingError, OperationalError): # Fall back to "SHOW CREATE TABLE", for previous MySQL versions. # Go through all constraints and save the equal matches. cursor.execute("SHOW CREATE TABLE %s" % self.connection.ops.quote_name(table_name)) for row in cursor.fetchall(): pos = 0 while True: match = foreign_key_re.search(row[1], pos) if match == None: break pos = match.end() key_columns.append(match.groups()) return key_columns def get_primary_key_column(self, cursor, table_name): """ Returns the name of the primary key column for the given table """ for column in self.get_indexes(cursor, table_name).iteritems(): if column[1]['primary_key']: return column[0] return None def get_indexes(self, cursor, table_name): """ Returns a dictionary of fieldname -> infodict for the given table, where each infodict is in the format: {'primary_key': boolean representing whether it's the primary key, 'unique': boolean representing whether it's a unique index} """ cursor.execute("SHOW INDEX FROM %s" % self.connection.ops.quote_name(table_name)) indexes = {} for row in cursor.fetchall(): indexes[row[4]] = {'primary_key': (row[2] == 'PRIMARY'), 'unique': not bool(row[1])} return indexes
{ "repo_name": "rotoudjimaye/django-jy", "path": "src/main/python/djangojy/db/backends/mysql/introspection.py", "copies": "1", "size": "4972", "license": "bsd-3-clause", "hash": 4680904097527990000, "line_mean": 43, "line_max": 113, "alpha_frac": 0.6063958166, "autogenerated": false, "ratio": 4.1433333333333335, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5249729149933333, "avg_score": null, "num_lines": null }
from __future__ import absolute_import as _abs import ctypes as _ctypes import numpy as np from nnvm import symbol from nnvm._base import c_str, check_call, _LIB, c_array, nn_uint SessionHandle = _ctypes.c_void_p nn_float = _ctypes.c_float def _get_numpy(cptr, dtype, shape): if dtype != 0: raise ValueError("only float32 is supported so far") size = 1 for s in shape: size *= s if size != 0 and shape: dbuffer = (nn_float * size).from_address(_ctypes.addressof(cptr.contents)) return np.frombuffer(dbuffer, dtype=np.float32).reshape(shape).copy() else: return None class Session(object): def __init__(self, config='cpu'): handle = SessionHandle() check_call(_LIB.NNSessionCreate(_ctypes.byref(handle), c_str(config))) self.handle = handle def __del__(self): check_call(_LIB.NNSessionClose(self.handle)) def run(self, fetch, feed_dict=None): if isinstance(fetch, list): fetch = symbol.Group(fetch) feed_dict = feed_dict if feed_dict else {} feed_placeholders = [] feed_dptr = [] feed_dtype = [] feed_shape_csr_ptr = [0] feed_shape_data = [] src_list = [] for k, v in feed_dict.items(): assert isinstance(k, symbol.Symbol) assert isinstance(v, np.ndarray) feed_placeholders.append(k.handle) # only convert to float32 for now source_array = np.ascontiguousarray(v, dtype=np.float32) # leep src_list alive for the period src_list.append(source_array) feed_dptr.append(source_array.ctypes.data_as(_ctypes.c_void_p)) feed_dtype.append(0) feed_shape_data.extend(source_array.shape) feed_shape_csr_ptr.append(len(feed_shape_data)) out_size = nn_uint() out_dptr = _ctypes.POINTER(_ctypes.POINTER(nn_float))() out_dtype = _ctypes.POINTER(nn_uint)() out_shape_ndim = _ctypes.POINTER(nn_uint)() out_shape_data = _ctypes.POINTER(_ctypes.POINTER(nn_uint))() check_call(_LIB.NNSessionRun( self.handle, fetch.handle, nn_uint(len(src_list)), c_array(_ctypes.c_void_p, feed_placeholders), c_array(_ctypes.c_void_p, feed_dptr), c_array(nn_uint, feed_dtype), c_array(nn_uint, feed_shape_csr_ptr), c_array(nn_uint, feed_shape_data), _ctypes.byref(out_size), _ctypes.byref(out_dptr), _ctypes.byref(out_dtype), _ctypes.byref(out_shape_ndim), _ctypes.byref(out_shape_data))) ret = [] for i in range(out_size.value): shape = tuple(out_shape_data[i][:out_shape_ndim[i]]) ret.append(_get_numpy(out_dptr[i], out_dtype[i], shape)) return ret[0] if len(ret) == 1 else ret
{ "repo_name": "tqchen/tinyflow", "path": "python/tinyflow/_session.py", "copies": "2", "size": "2899", "license": "apache-2.0", "hash": 2214621274525478400, "line_mean": 36.6493506494, "line_max": 82, "alpha_frac": 0.5857192135, "autogenerated": false, "ratio": 3.4553039332538735, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5041023146753874, "avg_score": null, "num_lines": null }
from __future__ import absolute_import as _abs import json from nnvm import symbol, graph def infer_variable_shapes(net, feed_dict): """Inference shape of all variables in the net. Parameters ---------- net : tf.Symbol The symbolic network containing all the variables. feed_dict : dict dict of placeholder to known shape Returns ------- Generator of (var, vname, vshape) Enables enumeration of variables in the net with corresponding name and shape. """ g = graph.create(net) jgraph = json.loads(g.apply('SaveJSON').json_attr('json')) jnode_row_ptr = jgraph["node_row_ptr"] jnodes = jgraph["nodes"] shape = [[]] * jnode_row_ptr[-1] nindex = {n['name']: i for i, n in enumerate(jnodes)} for k, v in feed_dict.items(): node_name = k.attr("name") shape[jnode_row_ptr[nindex[node_name]]] = v g._set_json_attr("shape", shape, "list_shape") g = g.apply("InferShape") shape = g.json_attr("shape") ret = {} for v in net.list_input_variables(): vname = v.attr("name") vshape = shape[jnode_row_ptr[nindex[vname]]] if len(vshape) == 0: raise ValueError("not sufficient information in feed_dict") yield (v, vname, vshape)
{ "repo_name": "ZihengJiang/tinyflow", "path": "python/tinyflow/_util.py", "copies": "2", "size": "1278", "license": "apache-2.0", "hash": -3166209214087562000, "line_mean": 30.95, "line_max": 82, "alpha_frac": 0.6126760563, "autogenerated": false, "ratio": 3.5013698630136987, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5114045919313699, "avg_score": null, "num_lines": null }
from __future__ import absolute_import as _abs import os import sys if sys.version_info[0] == 3: import builtins as __builtin__ else: import __builtin__ curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) if hasattr(__builtin__, "NNVM_BASE_PATH"): assert __builtin__.NNVM_BASE_PATH == curr_path else: __builtin__.NNVM_BASE_PATH = curr_path if hasattr(__builtin__, "NNVM_LIBRARY_NAME"): assert __builtin__.NNVM_LIBRARY_NAME == curr_path else: __builtin__.NNVM_LIBRARY_NAME = "libtinyflow" import ctypes as _ctypes from nnvm.name import NameManager from nnvm._base import c_str, check_call, _LIB from nnvm import symbol, graph from nnvm import _symbol_internal __all__ = ["float32", "placeholder", "Variable", "group", "initialize_all_variables", "gradients"] # data type table float32 = 0 # global list of all variable initializers _all_variable_inits = [] def Variable(init=None, name=None): name = NameManager.current.get(name, 'variable') v = symbol.Variable(name) if init is not None: if not isinstance(init, symbol.Symbol): raise TypeError("Expect initialization expression to be Symbol") _all_variable_inits.append(symbol.assign(v, init)) return v def initialize_all_variables(): global _all_variable_inits init_op = group(*_all_variable_inits) _all_variable_inits = [] return init_op def placeholder(dtype, shape=None, name=None): v = symbol.placeholder(name=name, dtype=dtype) return v def group(*inputs): x = _symbol_internal._nop() x._add_control_deps(symbol.Group(inputs)) return x def gradients(ys, xs, grad_ys=None): if isinstance(ys, list): ys = symbol.Group(ys) g = graph.create(ys) g._set_symbol_list_attr('grad_ys', ys) g._set_symbol_list_attr('grad_xs', xs) ny = len(ys.list_output_names()) if grad_ys is None: grad_ys = [symbol.ones_like(ys[i]) for i in range(ny)] g._set_symbol_list_attr('grad_ys_out_grad', grad_ys) sym = g.apply('Gradient').symbol nx = len(xs) if isinstance(xs, list) else len(xs.list_output_names()) ret = [sym[i] for i in range(nx)] return ret
{ "repo_name": "ZihengJiang/tinyflow", "path": "python/tinyflow/_base.py", "copies": "2", "size": "2201", "license": "apache-2.0", "hash": -5832916703788814000, "line_mean": 26.5125, "line_max": 76, "alpha_frac": 0.6583371195, "autogenerated": false, "ratio": 3.2415316642120766, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9890346396857541, "avg_score": 0.0019044773709071823, "num_lines": 80 }
from __future__ import absolute_import as _absolute_import import operator #! /usr/bin/env python ''' Copyright 2015 Tim Nonner Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' def plot(scenario,img_filename=None,resource_height=1.0,show_task_labels=True, color_prec_groups=False,hide_tasks=[],hide_resources=[],task_colors=dict(),fig_size=(15,5), vertical_text=False) : """ Plot the given solved scenario using matplotlib Args: scenario: scenario to plot msg: 0 means no feedback (default) during computation, 1 means feedback """ try : import matplotlib if img_filename is not None: matplotlib.use('Agg') import matplotlib.patches as patches, matplotlib.pyplot as plt except : raise Exception('ERROR: matplotlib is not installed') import random S = scenario # trivial connected components implementation to avoid # having to import other packages just for that def get_connected_components(edges) : comps = dict() for v,u in edges : if v not in comps and u not in comps : comps[v] = v comps[u] = v elif v in comps and u not in comps : comps[u] = comps[v] elif v not in comps and u in comps : comps[v] = comps[u] elif v in comps and u in comps and comps[v] != comps[u] : old_comp = comps[u] for w in comps : if comps[w] == old_comp : comps[w] = comps[v] # replace component identifiers by integers startting with 0 values = list(comps.values()) comps = { T : values.index(comps[T]) for T in comps } return comps tasks = [ T for T in S.tasks() if T not in hide_tasks ] # get connected components dict for coloring # each task is mapping to an integer number which corresponds # to its connected component edges = [ (T,T) for T in tasks ] if color_prec_groups : edges += [ (T,T_) for P in set(S.precs_lax()) | set(S.precs_tight()) \ for T in P.tasks() for T_ in P.tasks() \ if T in tasks and T_ in tasks ] comps = get_connected_components(edges) # color map colors = ['#7EA7D8','#A1D372','#EB4845','#7BCDC8','#FFF79A'] #pastel colors #colors = ['red','green','blue','yellow','orange','black','purple'] #basic colors colors += [ [ random.random() for i in range(3) ] for x in range(len(S.tasks())) ] #random colors color_map = { T : colors[comps[T]] for T in comps } # replace colors with fixed task colors for T in task_colors : color_map[T] = task_colors[T] hide_tasks_str = [ T for T in hide_tasks ] for T in scenario.tasks(): if hasattr(T,'plot_color'): if T['plot_color'] is not None: color_map[T] = T['plot_color'] else: hide_tasks_str.append(T) solution = S.solution() solution = [ (T,R,x,y) for (T,R,x,y) in solution if T not in hide_tasks_str ] #tasks of zero length are not plotted fig, ax = plt.subplots(1, 1, figsize=fig_size) resource_sizes_count = 0 visible_resources = [ R for R in S.resources() if R not in hide_resources ] if not visible_resources: raise Exception('ERROR: no resources to plot') total_resource_sizes = sum([ R.size for R in visible_resources ]) R_ticks = list() for R in visible_resources : if R.size is not None : resource_size = R.size else : resource_size = 1.0 R_ticks += [str(R.name)]*int(resource_size) # compute the levels of the tasks on one resource task_levels = dict() # get solution on resource sorted according to start time R_solution = [ (T,R_,x,y) for (T,R_,x,y) in solution if R_ == R ] R_solution = sorted(R_solution, key=lambda x : x[2]) # iteratively fill all levels on resource, start with empty fill level_fill = { i : 0 for i in range(int(resource_size)) } for T,R_,x,y in R_solution : sorted_levels = sorted(level_fill.items(), key = operator.itemgetter(1, 0)) # get the maximum resource requirement coeff = max([ RA[R] for RA in T.resources_req if R_ in RA ]) min_levels = [ level for level,fill in sorted_levels[:coeff] ] task_levels[T] = min_levels for level in min_levels : level_fill[level] += T.length # plot solution for (T,R,x,x_) in R_solution : for level in task_levels[T] : y = (total_resource_sizes-1-(resource_sizes_count+level))*resource_height ax.add_patch( patches.Rectangle( (x, y), # (x,y) max(x_-x,0.5), # width resource_height, # height color = color_map[T], alpha=0.6 ) ) if show_task_labels : if vertical_text: text_rotation = 90 y_ = y+0.9*resource_height else: text_rotation = 0 y_ = y+0.1*resource_height plt.text(x,y_,str(T.name),fontsize=14,color='black',rotation=text_rotation) resource_sizes_count += resource_size # format graph plt.title(str(S.name)) plt.yticks([ resource_height*x + resource_height/2.0 for x in range(len(R_ticks)) ],R_ticks[::-1]) plt.ylim(0,resource_sizes_count*resource_height)#resource_height*len(resources)) plt.xlim(0,max([ x_ for (I,R,x,x_) in solution if R in visible_resources ])) if img_filename is not None: fig.figsize=(1,1) plt.savefig(img_filename,dpi=200,bbox_inches='tight') else : plt.show()
{ "repo_name": "timnon/pyschedule", "path": "src/pyschedule/plotters/matplotlib.py", "copies": "1", "size": "5819", "license": "apache-2.0", "hash": 8876163126938412000, "line_mean": 34.9197530864, "line_max": 116, "alpha_frac": 0.6712493556, "autogenerated": false, "ratio": 3.138619201725998, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9048889372573291, "avg_score": 0.052195836950541315, "num_lines": 162 }
from __future__ import absolute_import as _ def array_shape_reveal(a): r""" Compute shape from lazy arrays. Dask arrays might have unknown dimensions due to performance reasons. This function compute those dimensions. """ from numpy import any, isnan import dask.array as da if any(isnan(a.shape)): # Rebuild Dask Array with known chunks return da.Array(a.__dask_graph__(), a.name, _get_chunks(a), a.dtype) return a def _get_shape_helper(a): from numpy import asarray s = asarray(a.shape, dtype=int) return s[len(s) * (None,) + (slice(None),)] def _get_all_chunk_shapes(a): return a.map_blocks( _get_shape_helper, dtype=int, chunks=tuple(len(c) * (1,) for c in a.chunks) + ((a.ndim,),), new_axis=a.ndim, ) def _get_chunks(a): cs = _get_all_chunk_shapes(a) c = [] for i in range(a.ndim): s = a.ndim * [0] + [i] s[i] = slice(None) s = tuple(s) c.append(tuple(cs[s])) return tuple(c) return tuple(c)
{ "repo_name": "Horta/limix", "path": "limix/_bits/dask.py", "copies": "1", "size": "1063", "license": "apache-2.0", "hash": 4960833297201923000, "line_mean": 21.6170212766, "line_max": 76, "alpha_frac": 0.5813734713, "autogenerated": false, "ratio": 3.182634730538922, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4264008201838922, "avg_score": null, "num_lines": null }
from __future__ import absolute_import as _ def set_coord(x, dim, values): r""" Assign a new coordinate or subset an existing one. """ if dim not in x.coords: return x.assign_coords(**{dim: list(values)}) return x.loc[{dim: x.get_index(dim).isin(values)}] def take(x, indices, dim): r""" Subset a data array on an arbitrary dimension. """ sl = [slice(None)] * x.ndim axis = next(i for i, d in enumerate(x.dims) if d == dim) sl[axis] = indices return x[tuple(sl)] def in_coords_dim(arr, k): return k in arr.coords or k in arr.dims def hint_aware_sel(x, **kwargs): from .._data import is_dim_hint, is_dim_name, dim_name_to_hint, dim_hint_to_name for k in kwargs.keys(): if in_coords_dim(x, k): continue if is_dim_name(k) or is_dim_hint(k): if in_coords_dim(x, dim_name_to_hint(k)): new_k = dim_name_to_hint(k) if new_k not in kwargs: kwargs[new_k] = kwargs[k] del kwargs[k] elif in_coords_dim(x, dim_hint_to_name(k)): new_k = dim_hint_to_name(k) if new_k not in kwargs: kwargs[new_k] = kwargs[k] del kwargs[k] return x.sel(**kwargs) def query(data, expr): from io import StringIO from tokenize import generate_tokens, OP, NAME tokens = list(generate_tokens(StringIO(expr).readline)) final_expr = "" last = None for t in tokens: if t.type == NAME: is_boolean = last is not None is_boolean = is_boolean and not (last.type == OP and _is_comp(last.string)) is_boolean = is_boolean and _is_boolean(t.string) if is_boolean: final_expr += _cast_boolean(t.string) else: final_expr += 'data["{}"]'.format(t.string) elif t.type == OP and _is_comp(t.string): final_expr += " {} ".format(t.string) else: final_expr += t.string last = t return eval("data.where(" + final_expr + ", drop=True)") def _is_comp(v): return v in set(["<", ">", "<=", ">=", "==", "!="]) def _is_boolean(v): return v.lower() in set(["and", "or", "not"]) def _cast_boolean(v): d = {"and": " & ", "or": " | ", "not": "~"} return d[v.lower()]
{ "repo_name": "Horta/limix", "path": "limix/_bits/xarray.py", "copies": "1", "size": "2366", "license": "apache-2.0", "hash": -7909663682638015000, "line_mean": 28.2098765432, "line_max": 87, "alpha_frac": 0.5278951817, "autogenerated": false, "ratio": 3.286111111111111, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.43140062928111106, "avg_score": null, "num_lines": null }
from __future__ import absolute_import # Avoid importing `importlib` from this package. from importlib import import_module from django.utils.version import get_docs_version def deconstructible(*args, **kwargs): """ Class decorator that allow the decorated class to be serialized by the migrations subsystem. Accepts an optional kwarg `path` to specify the import path. """ path = kwargs.pop('path', None) def decorator(klass): def __new__(cls, *args, **kwargs): # We capture the arguments to make returning them trivial obj = super(klass, cls).__new__(cls) obj._constructor_args = (args, kwargs) return obj def deconstruct(obj): """ Returns a 3-tuple of class import path, positional arguments, and keyword arguments. """ # Python 2/fallback version if path: module_name, _, name = path.rpartition('.') else: module_name = obj.__module__ name = obj.__class__.__name__ # Make sure it's actually there and not an inner class module = import_module(module_name) if not hasattr(module, name): raise ValueError( "Could not find object %s in %s.\n" "Please note that you cannot serialize things like inner " "classes. Please move the object into the main module " "body to use migrations.\n" "For more information, see " "https://docs.djangoproject.com/en/%s/topics/migrations/#serializing-values" % (name, module_name, get_docs_version())) return ( path or '%s.%s' % (obj.__class__.__module__, name), obj._constructor_args[0], obj._constructor_args[1], ) klass.__new__ = staticmethod(__new__) klass.deconstruct = deconstruct return klass if not args: return decorator return decorator(*args, **kwargs)
{ "repo_name": "Sonicbids/django", "path": "django/utils/deconstruct.py", "copies": "8", "size": "2136", "license": "bsd-3-clause", "hash": -5372766799979252000, "line_mean": 35.8275862069, "line_max": 96, "alpha_frac": 0.5472846442, "autogenerated": false, "ratio": 4.821670428893905, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9368955073093906, "avg_score": null, "num_lines": null }
from __future__ import absolute_import # Avoid importing `importlib` from this package. from importlib import import_module def deconstructible(*args, **kwargs): """ Class decorator that allow the decorated class to be serialized by the migrations subsystem. Accepts an optional kwarg `path` to specify the import path. """ path = kwargs.pop('path', None) def decorator(klass): def __new__(cls, *args, **kwargs): # We capture the arguments to make returning them trivial obj = super(klass, cls).__new__(cls) obj._constructor_args = (args, kwargs) return obj def deconstruct(obj): """ Returns a 3-tuple of class import path, positional arguments, and keyword arguments. """ # Python 2/fallback version if path: module_name, _, name = path.rpartition('.') else: module_name = obj.__module__ name = obj.__class__.__name__ # Make sure it's actually there and not an inner class module = import_module(module_name) if not hasattr(module, name): raise ValueError( "Could not find object %s in %s.\n" "Please note that you cannot serialize things like inner " "classes. Please move the object into the main module " "body to use migrations.\n" "For more information, see " "https://docs.djangoproject.com/en/dev/topics/migrations/#serializing-values" % (name, module_name)) return ( path or '%s.%s' % (obj.__class__.__module__, name), obj._constructor_args[0], obj._constructor_args[1], ) klass.__new__ = staticmethod(__new__) klass.deconstruct = deconstruct return klass if not args: return decorator return decorator(*args, **kwargs)
{ "repo_name": "joebowen/movement_validation_cloud", "path": "djangodev/lib/python2.7/site-packages/django/utils/deconstruct.py", "copies": "70", "size": "2066", "license": "mit", "hash": 1522609695663373600, "line_mean": 35.8928571429, "line_max": 97, "alpha_frac": 0.5396902227, "autogenerated": false, "ratio": 4.884160756501182, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": null, "num_lines": null }
from __future__ import absolute_import # Avoid importing `importlib` from this package. from contextlib import contextmanager import copy import imp from importlib import import_module import os import sys from django.core.exceptions import ImproperlyConfigured from django.utils import six def import_by_path(dotted_path, error_prefix=''): """ Import a dotted module path and return the attribute/class designated by the last name in the path. Raise ImproperlyConfigured if something goes wrong. """ try: module_path, class_name = dotted_path.rsplit('.', 1) except ValueError: raise ImproperlyConfigured("%s%s doesn't look like a module path" % ( error_prefix, dotted_path)) try: module = import_module(module_path) except ImportError as e: msg = '%sError importing module %s: "%s"' % ( error_prefix, module_path, e) six.reraise(ImproperlyConfigured, ImproperlyConfigured(msg), sys.exc_info()[2]) try: attr = getattr(module, class_name) except AttributeError: raise ImproperlyConfigured('%sModule "%s" does not define a "%s" attribute/class' % ( error_prefix, module_path, class_name)) return attr @contextmanager def import_lock(): """ Context manager that aquires the import lock. """ imp.acquire_lock() try: yield finally: imp.release_lock() def autodiscover_modules(*args, **kwargs): """ Auto-discover INSTALLED_APPS modules and fail silently when not present. This forces an import on them to register any admin bits they may want. You may provide a register_to keyword parameter as a way to access a registry. This register_to object must have a _registry instance variable to access it. """ from django.apps import apps register_to = kwargs.get('register_to') for app_config in apps.get_app_configs(): # Attempt to import the app's module. try: if register_to: before_import_registry = copy.copy(register_to._registry) for module_to_search in args: import_module('%s.%s' % (app_config.name, module_to_search)) except: # Reset the model registry to the state before the last import as # this import will have to reoccur on the next request and this # could raise NotRegistered and AlreadyRegistered exceptions # (see #8245). if register_to: register_to._registry = before_import_registry # Decide whether to bubble up this error. If the app just # doesn't have an admin module, we can ignore the error # attempting to import it, otherwise we want it to bubble up. if module_has_submodule(app_config.module, module_to_search): raise def module_has_submodule(package, module_name): """See if 'module' is in 'package'.""" name = ".".join([package.__name__, module_name]) try: # None indicates a cached miss; see mark_miss() in Python/import.c. return sys.modules[name] is not None except KeyError: pass try: package_path = package.__path__ # No __path__, then not a package. except AttributeError: # Since the remainder of this function assumes that we're dealing with # a package (module with a __path__), so if it's not, then bail here. return False for finder in sys.meta_path: if finder.find_module(name, package_path): return True for entry in package_path: try: # Try the cached finder. finder = sys.path_importer_cache[entry] if finder is None: # Implicit import machinery should be used. try: file_, _, _ = imp.find_module(module_name, [entry]) if file_: file_.close() return True except ImportError: continue # Else see if the finder knows of a loader. elif finder.find_module(name): return True else: continue except KeyError: # No cached finder, so try and make one. for hook in sys.path_hooks: try: finder = hook(entry) # XXX Could cache in sys.path_importer_cache if finder.find_module(name): return True else: # Once a finder is found, stop the search. break except ImportError: # Continue the search for a finder. continue else: # No finder found. # Try the implicit import machinery if searching a directory. if os.path.isdir(entry): try: file_, _, _ = imp.find_module(module_name, [entry]) if file_: file_.close() return True except ImportError: pass # XXX Could insert None or NullImporter else: # Exhausted the search, so the module cannot be found. return False
{ "repo_name": "Beeblio/django", "path": "django/utils/module_loading.py", "copies": "1", "size": "5451", "license": "bsd-3-clause", "hash": 6342406540572824000, "line_mean": 35.34, "line_max": 93, "alpha_frac": 0.5663181068, "autogenerated": false, "ratio": 4.76486013986014, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.583117824666014, "avg_score": null, "num_lines": null }
from __future__ import absolute_import # Avoid importing `importlib` from this package. import copy from importlib import import_module import os import sys import warnings from django.core.exceptions import ImproperlyConfigured from django.utils import six from django.utils.deprecation import RemovedInDjango19Warning def import_string(dotted_path): """ Import a dotted module path and return the attribute/class designated by the last name in the path. Raise ImportError if the import failed. """ try: module_path, class_name = dotted_path.rsplit('.', 1) except ValueError: msg = "%s doesn't look like a module path" % dotted_path six.reraise(ImportError, ImportError(msg), sys.exc_info()[2]) module = import_module(module_path) try: return getattr(module, class_name) except AttributeError: msg = 'Module "%s" does not define a "%s" attribute/class' % ( dotted_path, class_name) six.reraise(ImportError, ImportError(msg), sys.exc_info()[2]) def import_by_path(dotted_path, error_prefix=''): """ Import a dotted module path and return the attribute/class designated by the last name in the path. Raise ImproperlyConfigured if something goes wrong. """ warnings.warn( 'import_by_path() has been deprecated. Use import_string() instead.', RemovedInDjango19Warning, stacklevel=2) try: attr = import_string(dotted_path) except ImportError as e: msg = '%sError importing module %s: "%s"' % ( error_prefix, dotted_path, e) six.reraise(ImproperlyConfigured, ImproperlyConfigured(msg), sys.exc_info()[2]) return attr def autodiscover_modules(*args, **kwargs): """ Auto-discover INSTALLED_APPS modules and fail silently when not present. This forces an import on them to register any admin bits they may want. You may provide a register_to keyword parameter as a way to access a registry. This register_to object must have a _registry instance variable to access it. """ from django.apps import apps register_to = kwargs.get('register_to') for app_config in apps.get_app_configs(): for module_to_search in args: # Attempt to import the app's module. try: if register_to: before_import_registry = copy.copy(register_to._registry) import_module('%s.%s' % (app_config.name, module_to_search)) except: # Reset the registry to the state before the last import # as this import will have to reoccur on the next request and # this could raise NotRegistered and AlreadyRegistered # exceptions (see #8245). if register_to: register_to._registry = before_import_registry # Decide whether to bubble up this error. If the app just # doesn't have the module in question, we can ignore the error # attempting to import it, otherwise we want it to bubble up. if module_has_submodule(app_config.module, module_to_search): raise if sys.version_info[:2] >= (3, 3): if sys.version_info[:2] >= (3, 4): from importlib.util import find_spec as importlib_find else: from importlib import find_loader as importlib_find def module_has_submodule(package, module_name): """See if 'module' is in 'package'.""" try: package_name = package.__name__ package_path = package.__path__ except AttributeError: # package isn't a package. return False full_module_name = package_name + '.' + module_name return importlib_find(full_module_name, package_path) is not None else: import imp def module_has_submodule(package, module_name): """See if 'module' is in 'package'.""" name = ".".join([package.__name__, module_name]) try: # None indicates a cached miss; see mark_miss() in Python/import.c. return sys.modules[name] is not None except KeyError: pass try: package_path = package.__path__ # No __path__, then not a package. except AttributeError: # Since the remainder of this function assumes that we're dealing with # a package (module with a __path__), so if it's not, then bail here. return False for finder in sys.meta_path: if finder.find_module(name, package_path): return True for entry in package_path: try: # Try the cached finder. finder = sys.path_importer_cache[entry] if finder is None: # Implicit import machinery should be used. try: file_, _, _ = imp.find_module(module_name, [entry]) if file_: file_.close() return True except ImportError: continue # Else see if the finder knows of a loader. elif finder.find_module(name): return True else: continue except KeyError: # No cached finder, so try and make one. for hook in sys.path_hooks: try: finder = hook(entry) # XXX Could cache in sys.path_importer_cache if finder.find_module(name): return True else: # Once a finder is found, stop the search. break except ImportError: # Continue the search for a finder. continue else: # No finder found. # Try the implicit import machinery if searching a directory. if os.path.isdir(entry): try: file_, _, _ = imp.find_module(module_name, [entry]) if file_: file_.close() return True except ImportError: pass # XXX Could insert None or NullImporter else: # Exhausted the search, so the module cannot be found. return False
{ "repo_name": "edevil/django", "path": "django/utils/module_loading.py", "copies": "21", "size": "6701", "license": "bsd-3-clause", "hash": -2585728956471038500, "line_mean": 37.7341040462, "line_max": 88, "alpha_frac": 0.5500671542, "autogenerated": false, "ratio": 4.859318346627991, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": null, "num_lines": null }
from __future__ import absolute_import # Avoid importing `importlib` from this package. import copy from importlib import import_module import os import sys from django.utils import six def import_string(dotted_path): """ Import a dotted module path and return the attribute/class designated by the last name in the path. Raise ImportError if the import failed. """ try: module_path, class_name = dotted_path.rsplit('.', 1) except ValueError: msg = "%s doesn't look like a module path" % dotted_path six.reraise(ImportError, ImportError(msg), sys.exc_info()[2]) module = import_module(module_path) try: return getattr(module, class_name) except AttributeError: msg = 'Module "%s" does not define a "%s" attribute/class' % ( dotted_path, class_name) six.reraise(ImportError, ImportError(msg), sys.exc_info()[2]) def autodiscover_modules(*args, **kwargs): """ Auto-discover INSTALLED_APPS modules and fail silently when not present. This forces an import on them to register any admin bits they may want. You may provide a register_to keyword parameter as a way to access a registry. This register_to object must have a _registry instance variable to access it. """ from django.apps import apps register_to = kwargs.get('register_to') for app_config in apps.get_app_configs(): for module_to_search in args: # Attempt to import the app's module. try: if register_to: before_import_registry = copy.copy(register_to._registry) import_module('%s.%s' % (app_config.name, module_to_search)) except: # Reset the registry to the state before the last import # as this import will have to reoccur on the next request and # this could raise NotRegistered and AlreadyRegistered # exceptions (see #8245). if register_to: register_to._registry = before_import_registry # Decide whether to bubble up this error. If the app just # doesn't have the module in question, we can ignore the error # attempting to import it, otherwise we want it to bubble up. if module_has_submodule(app_config.module, module_to_search): raise if sys.version_info[:2] >= (3, 3): if sys.version_info[:2] >= (3, 4): from importlib.util import find_spec as importlib_find else: from importlib import find_loader as importlib_find def module_has_submodule(package, module_name): """See if 'module' is in 'package'.""" try: package_name = package.__name__ package_path = package.__path__ except AttributeError: # package isn't a package. return False full_module_name = package_name + '.' + module_name return importlib_find(full_module_name, package_path) is not None else: import imp def module_has_submodule(package, module_name): """See if 'module' is in 'package'.""" name = ".".join([package.__name__, module_name]) try: # None indicates a cached miss; see mark_miss() in Python/import.c. return sys.modules[name] is not None except KeyError: pass try: package_path = package.__path__ # No __path__, then not a package. except AttributeError: # Since the remainder of this function assumes that we're dealing with # a package (module with a __path__), so if it's not, then bail here. return False for finder in sys.meta_path: if finder.find_module(name, package_path): return True for entry in package_path: try: # Try the cached finder. finder = sys.path_importer_cache[entry] if finder is None: # Implicit import machinery should be used. try: file_, _, _ = imp.find_module(module_name, [entry]) if file_: file_.close() return True except ImportError: continue # Else see if the finder knows of a loader. elif finder.find_module(name): return True else: continue except KeyError: # No cached finder, so try and make one. for hook in sys.path_hooks: try: finder = hook(entry) # XXX Could cache in sys.path_importer_cache if finder.find_module(name): return True else: # Once a finder is found, stop the search. break except ImportError: # Continue the search for a finder. continue else: # No finder found. # Try the implicit import machinery if searching a directory. if os.path.isdir(entry): try: file_, _, _ = imp.find_module(module_name, [entry]) if file_: file_.close() return True except ImportError: pass # XXX Could insert None or NullImporter else: # Exhausted the search, so the module cannot be found. return False
{ "repo_name": "Sonicbids/django", "path": "django/utils/module_loading.py", "copies": "3", "size": "5894", "license": "bsd-3-clause", "hash": -7014068295624375000, "line_mean": 37.7763157895, "line_max": 88, "alpha_frac": 0.5318968442, "autogenerated": false, "ratio": 4.9075770191507075, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.6939473863350708, "avg_score": null, "num_lines": null }
from __future__ import absolute_import # Avoid importing `importlib` from this package. import copy import imp from importlib import import_module import os import sys from django.core.exceptions import ImproperlyConfigured from django.utils import six def import_by_path(dotted_path, error_prefix=''): """ Import a dotted module path and return the attribute/class designated by the last name in the path. Raise ImproperlyConfigured if something goes wrong. """ try: module_path, class_name = dotted_path.rsplit('.', 1) except ValueError: raise ImproperlyConfigured("%s%s doesn't look like a module path" % ( error_prefix, dotted_path)) try: module = import_module(module_path) except ImportError as e: msg = '%sError importing module %s: "%s"' % ( error_prefix, module_path, e) six.reraise(ImproperlyConfigured, ImproperlyConfigured(msg), sys.exc_info()[2]) try: attr = getattr(module, class_name) except AttributeError: raise ImproperlyConfigured('%sModule "%s" does not define a "%s" attribute/class' % ( error_prefix, module_path, class_name)) return attr def autodiscover_modules(*args, **kwargs): """ Auto-discover INSTALLED_APPS modules and fail silently when not present. This forces an import on them to register any admin bits they may want. You may provide a register_to keyword parameter as a way to access a registry. This register_to object must have a _registry instance variable to access it. """ from django.conf import settings register_to = kwargs.get('register_to') for app in settings.INSTALLED_APPS: mod = import_module(app) # Attempt to import the app's module. try: if register_to: before_import_registry = copy.copy(register_to._registry) for module_to_search in args: import_module('%s.%s' % (app, module_to_search)) except: # Reset the model registry to the state before the last import as # this import will have to reoccur on the next request and this # could raise NotRegistered and AlreadyRegistered exceptions # (see #8245). if register_to: register_to._registry = before_import_registry # Decide whether to bubble up this error. If the app just # doesn't have an admin module, we can ignore the error # attempting to import it, otherwise we want it to bubble up. if module_has_submodule(mod, module_to_search): raise def module_has_submodule(package, module_name): """See if 'module' is in 'package'.""" name = ".".join([package.__name__, module_name]) try: # None indicates a cached miss; see mark_miss() in Python/import.c. return sys.modules[name] is not None except KeyError: pass try: package_path = package.__path__ # No __path__, then not a package. except AttributeError: # Since the remainder of this function assumes that we're dealing with # a package (module with a __path__), so if it's not, then bail here. return False for finder in sys.meta_path: if finder.find_module(name, package_path): return True for entry in package_path: try: # Try the cached finder. finder = sys.path_importer_cache[entry] if finder is None: # Implicit import machinery should be used. try: file_, _, _ = imp.find_module(module_name, [entry]) if file_: file_.close() return True except ImportError: continue # Else see if the finder knows of a loader. elif finder.find_module(name): return True else: continue except KeyError: # No cached finder, so try and make one. for hook in sys.path_hooks: try: finder = hook(entry) # XXX Could cache in sys.path_importer_cache if finder.find_module(name): return True else: # Once a finder is found, stop the search. break except ImportError: # Continue the search for a finder. continue else: # No finder found. # Try the implicit import machinery if searching a directory. if os.path.isdir(entry): try: file_, _, _ = imp.find_module(module_name, [entry]) if file_: file_.close() return True except ImportError: pass # XXX Could insert None or NullImporter else: # Exhausted the search, so the module cannot be found. return False
{ "repo_name": "ericholscher/django", "path": "django/utils/module_loading.py", "copies": "9", "size": "5229", "license": "bsd-3-clause", "hash": 2660980560382559000, "line_mean": 36.8913043478, "line_max": 93, "alpha_frac": 0.5628227194, "autogenerated": false, "ratio": 4.797247706422018, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9860070425822018, "avg_score": null, "num_lines": null }
from __future__ import absolute_import # Avoid importing `importlib` from this package. import decimal import datetime from importlib import import_module import unicodedata from django.conf import settings from django.utils import dateformat, numberformat, datetime_safe from django.utils.encoding import force_str from django.utils.functional import lazy from django.utils.safestring import mark_safe from django.utils import six from django.utils.translation import get_language, to_locale, check_for_language # format_cache is a mapping from (format_type, lang) to the format string. # By using the cache, it is possible to avoid running get_format_modules # repeatedly. _format_cache = {} _format_modules_cache = {} ISO_INPUT_FORMATS = { 'DATE_INPUT_FORMATS': ('%Y-%m-%d',), 'TIME_INPUT_FORMATS': ('%H:%M:%S', '%H:%M:%S.%f', '%H:%M'), 'DATETIME_INPUT_FORMATS': ( '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%d %H:%M', '%Y-%m-%d' ), } def reset_format_cache(): """Clear any cached formats. This method is provided primarily for testing purposes, so that the effects of cached formats can be removed. """ global _format_cache, _format_modules_cache _format_cache = {} _format_modules_cache = {} def iter_format_modules(lang, format_module_path=None): """ Does the heavy lifting of finding format modules. """ if check_for_language(lang): format_locations = ['django.conf.locale.%s'] if format_module_path: format_locations.append(format_module_path + '.%s') format_locations.reverse() locale = to_locale(lang) locales = [locale] if '_' in locale: locales.append(locale.split('_')[0]) for location in format_locations: for loc in locales: try: yield import_module('%s.formats' % (location % loc)) except ImportError: pass def get_format_modules(lang=None, reverse=False): """ Returns a list of the format modules found """ if lang is None: lang = get_language() modules = _format_modules_cache.setdefault(lang, list(iter_format_modules(lang, settings.FORMAT_MODULE_PATH))) if reverse: return list(reversed(modules)) return modules def get_format(format_type, lang=None, use_l10n=None): """ For a specific format type, returns the format for the current language (locale), defaults to the format in the settings. format_type is the name of the format, e.g. 'DATE_FORMAT' If use_l10n is provided and is not None, that will force the value to be localized (or not), overriding the value of settings.USE_L10N. """ format_type = force_str(format_type) if use_l10n or (use_l10n is None and settings.USE_L10N): if lang is None: lang = get_language() cache_key = (format_type, lang) try: cached = _format_cache[cache_key] if cached is not None: return cached else: # Return the general setting by default return getattr(settings, format_type) except KeyError: for module in get_format_modules(lang): try: val = getattr(module, format_type) for iso_input in ISO_INPUT_FORMATS.get(format_type, ()): if iso_input not in val: if isinstance(val, tuple): val = list(val) val.append(iso_input) _format_cache[cache_key] = val return val except AttributeError: pass _format_cache[cache_key] = None return getattr(settings, format_type) get_format_lazy = lazy(get_format, six.text_type, list, tuple) def date_format(value, format=None, use_l10n=None): """ Formats a datetime.date or datetime.datetime object using a localizable format If use_l10n is provided and is not None, that will force the value to be localized (or not), overriding the value of settings.USE_L10N. """ return dateformat.format(value, get_format(format or 'DATE_FORMAT', use_l10n=use_l10n)) def time_format(value, format=None, use_l10n=None): """ Formats a datetime.time object using a localizable format If use_l10n is provided and is not None, that will force the value to be localized (or not), overriding the value of settings.USE_L10N. """ return dateformat.time_format(value, get_format(format or 'TIME_FORMAT', use_l10n=use_l10n)) def number_format(value, decimal_pos=None, use_l10n=None, force_grouping=False): """ Formats a numeric value using localization settings If use_l10n is provided and is not None, that will force the value to be localized (or not), overriding the value of settings.USE_L10N. """ if use_l10n or (use_l10n is None and settings.USE_L10N): lang = get_language() else: lang = None return numberformat.format( value, get_format('DECIMAL_SEPARATOR', lang, use_l10n=use_l10n), decimal_pos, get_format('NUMBER_GROUPING', lang, use_l10n=use_l10n), get_format('THOUSAND_SEPARATOR', lang, use_l10n=use_l10n), force_grouping=force_grouping ) def localize(value, use_l10n=None): """ Checks if value is a localizable type (date, number...) and returns it formatted as a string using current locale format. If use_l10n is provided and is not None, that will force the value to be localized (or not), overriding the value of settings.USE_L10N. """ if isinstance(value, bool): return mark_safe(six.text_type(value)) elif isinstance(value, (decimal.Decimal, float) + six.integer_types): return number_format(value, use_l10n=use_l10n) elif isinstance(value, datetime.datetime): return date_format(value, 'DATETIME_FORMAT', use_l10n=use_l10n) elif isinstance(value, datetime.date): return date_format(value, use_l10n=use_l10n) elif isinstance(value, datetime.time): return time_format(value, 'TIME_FORMAT', use_l10n=use_l10n) else: return value def localize_input(value, default=None): """ Checks if an input value is a localizable type and returns it formatted with the appropriate formatting string of the current locale. """ if isinstance(value, (decimal.Decimal, float) + six.integer_types): return number_format(value) elif isinstance(value, datetime.datetime): value = datetime_safe.new_datetime(value) format = force_str(default or get_format('DATETIME_INPUT_FORMATS')[0]) return value.strftime(format) elif isinstance(value, datetime.date): value = datetime_safe.new_date(value) format = force_str(default or get_format('DATE_INPUT_FORMATS')[0]) return value.strftime(format) elif isinstance(value, datetime.time): format = force_str(default or get_format('TIME_INPUT_FORMATS')[0]) return value.strftime(format) return value def sanitize_separators(value): """ Sanitizes a value according to the current decimal and thousand separator setting. Used with form field input. """ if settings.USE_L10N and isinstance(value, six.string_types): parts = [] decimal_separator = get_format('DECIMAL_SEPARATOR') if decimal_separator in value: value, decimals = value.split(decimal_separator, 1) parts.append(decimals) if settings.USE_THOUSAND_SEPARATOR: thousand_sep = get_format('THOUSAND_SEPARATOR') for replacement in set([ thousand_sep, unicodedata.normalize('NFKD', thousand_sep)]): value = value.replace(replacement, '') parts.append(value) value = '.'.join(reversed(parts)) return value
{ "repo_name": "ZhaoCJ/django", "path": "django/utils/formats.py", "copies": "5", "size": "8044", "license": "bsd-3-clause", "hash": -8841340743281124000, "line_mean": 37.1232227488, "line_max": 114, "alpha_frac": 0.6347588265, "autogenerated": false, "ratio": 3.8451242829827916, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0015296030443727627, "num_lines": 211 }
from __future__ import absolute_import # Avoid importing `importlib` from this package. import imp from importlib import import_module import os import sys from django.core.exceptions import ImproperlyConfigured from django.utils import six def import_by_path(dotted_path, error_prefix=''): """ Import a dotted module path and return the attribute/class designated by the last name in the path. Raise ImproperlyConfigured if something goes wrong. """ try: module_path, class_name = dotted_path.rsplit('.', 1) except ValueError: raise ImproperlyConfigured("%s%s doesn't look like a module path" % ( error_prefix, dotted_path)) try: module = import_module(module_path) except ImportError as e: msg = '%sError importing module %s: "%s"' % ( error_prefix, module_path, e) six.reraise(ImproperlyConfigured, ImproperlyConfigured(msg), sys.exc_info()[2]) try: attr = getattr(module, class_name) except AttributeError: raise ImproperlyConfigured('%sModule "%s" does not define a "%s" attribute/class' % ( error_prefix, module_path, class_name)) return attr def module_has_submodule(package, module_name): """See if 'module' is in 'package'.""" name = ".".join([package.__name__, module_name]) try: # None indicates a cached miss; see mark_miss() in Python/import.c. return sys.modules[name] is not None except KeyError: pass try: package_path = package.__path__ # No __path__, then not a package. except AttributeError: # Since the remainder of this function assumes that we're dealing with # a package (module with a __path__), so if it's not, then bail here. return False for finder in sys.meta_path: if finder.find_module(name, package_path): return True for entry in package_path: try: # Try the cached finder. finder = sys.path_importer_cache[entry] if finder is None: # Implicit import machinery should be used. try: file_, _, _ = imp.find_module(module_name, [entry]) if file_: file_.close() return True except ImportError: continue # Else see if the finder knows of a loader. elif finder.find_module(name): return True else: continue except KeyError: # No cached finder, so try and make one. for hook in sys.path_hooks: try: finder = hook(entry) # XXX Could cache in sys.path_importer_cache if finder.find_module(name): return True else: # Once a finder is found, stop the search. break except ImportError: # Continue the search for a finder. continue else: # No finder found. # Try the implicit import machinery if searching a directory. if os.path.isdir(entry): try: file_, _, _ = imp.find_module(module_name, [entry]) if file_: file_.close() return True except ImportError: pass # XXX Could insert None or NullImporter else: # Exhausted the search, so the module cannot be found. return False
{ "repo_name": "denisenkom/django", "path": "django/utils/module_loading.py", "copies": "2", "size": "3749", "license": "bsd-3-clause", "hash": 6843443665863941000, "line_mean": 36.49, "line_max": 93, "alpha_frac": 0.538010136, "autogenerated": false, "ratio": 4.900653594771242, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0003421993194086588, "num_lines": 100 }