function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
sequence
def get_readme(): with open('README.md') as file: return file.read()
tylerbutler/engineer
[ 23, 3, 23, 1, 1331695786 ]
def empty_ops(request): return Filter()
marrow/mongo
[ 20, 3, 20, 20, 1455762934 ]
def single_ops(request): return Filter({'roll': 27})
marrow/mongo
[ 20, 3, 20, 20, 1455762934 ]
def test_getitem(self, empty_ops, single_ops): with pytest.raises(KeyError): empty_ops['roll']
marrow/mongo
[ 20, 3, 20, 20, 1455762934 ]
def test_setitem(self, empty_ops): assert repr(empty_ops) == "Filter([])" empty_ops['meaning'] = 42
marrow/mongo
[ 20, 3, 20, 20, 1455762934 ]
def test_delitem(self, empty_ops, single_ops): with pytest.raises(KeyError): del empty_ops['roll']
marrow/mongo
[ 20, 3, 20, 20, 1455762934 ]
def test_length(self, empty_ops, single_ops): assert len(empty_ops) == 0 assert len(single_ops) == 1
marrow/mongo
[ 20, 3, 20, 20, 1455762934 ]
def test_keys(self, empty_ops, single_ops): assert list(empty_ops.keys()) == [] assert list(single_ops.keys()) == ['roll']
marrow/mongo
[ 20, 3, 20, 20, 1455762934 ]
def test_items(self, empty_ops, single_ops): assert list(empty_ops.items()) == [] assert list(single_ops.items()) == [('roll', 27)]
marrow/mongo
[ 20, 3, 20, 20, 1455762934 ]
def test_values(self, empty_ops, single_ops): assert list(empty_ops.values()) == [] assert list(single_ops.values()) == [27]
marrow/mongo
[ 20, 3, 20, 20, 1455762934 ]
def test_contains(self, single_ops): assert 'foo' not in single_ops assert 'roll' in single_ops
marrow/mongo
[ 20, 3, 20, 20, 1455762934 ]
def test_equality_inequality(self, empty_ops, single_ops): assert empty_ops == {} assert empty_ops != {'roll': 27} assert single_ops != {} assert single_ops == {'roll': 27}
marrow/mongo
[ 20, 3, 20, 20, 1455762934 ]
def test_get(self, single_ops): assert single_ops.get('foo') is None assert single_ops.get('foo', 42) == 42 assert single_ops.get('roll') == 27
marrow/mongo
[ 20, 3, 20, 20, 1455762934 ]
def test_clear(self, single_ops): assert len(single_ops.operations) == 1 single_ops.clear() assert len(single_ops.operations) == 0
marrow/mongo
[ 20, 3, 20, 20, 1455762934 ]
def test_pop(self, single_ops): assert len(single_ops.operations) == 1
marrow/mongo
[ 20, 3, 20, 20, 1455762934 ]
def test_popitem(self, single_ops): assert len(single_ops.operations) == 1 assert single_ops.popitem() == ('roll', 27) assert len(single_ops.operations) == 0
marrow/mongo
[ 20, 3, 20, 20, 1455762934 ]
def test_update(self, empty_ops, single_ops): assert len(empty_ops.operations) == 0 empty_ops.update(name="Bob Dole") assert len(empty_ops.operations) == 1
marrow/mongo
[ 20, 3, 20, 20, 1455762934 ]
def test_setdefault(self, empty_ops): assert len(empty_ops.operations) == 0 empty_ops.setdefault('fnord', 42) assert len(empty_ops.operations) == 1 assert empty_ops.operations['fnord'] == 42 empty_ops.setdefault('fnord', 27) assert len(empty_ops.operations) == 1 assert empty_ops.operations['fnord'] == 42
marrow/mongo
[ 20, 3, 20, 20, 1455762934 ]
def test_ops_shallow_copy(self, single_ops): assert single_ops.operations == single_ops.copy().operations
marrow/mongo
[ 20, 3, 20, 20, 1455762934 ]
def test_operations_and_clean_merge(self): comb = Filter({'roll': 27}) & Filter({'foo': 42}) assert comb.as_query == {'roll': 27, 'foo': 42}
marrow/mongo
[ 20, 3, 20, 20, 1455762934 ]
def test_operations_and_operator_overlap(self): comb = Filter({'roll': {'$gte': 27}}) & Filter({'roll': {'$lte': 42}}) assert comb.as_query == {'roll': {'$gte': 27, '$lte': 42}}
marrow/mongo
[ 20, 3, 20, 20, 1455762934 ]
def test_paradoxical_condition(self): comb = Filter({'roll': 27}) & Filter({'roll': {'$lte': 42}}) assert comb.as_query == {'roll': {'$eq': 27, '$lte': 42}}
marrow/mongo
[ 20, 3, 20, 20, 1455762934 ]
def test_operations_or_clean_merge(self): comb = Filter({'roll': 27}) | Filter({'foo': 42}) assert comb.as_query == {'$or': [{'roll': 27}, {'foo': 42}]}
marrow/mongo
[ 20, 3, 20, 20, 1455762934 ]
def test_operations_hard_and(self): comb = Filter({'$and': [{'a': 1}, {'b': 2}]}) & Filter({'$and': [{'c': 3}]}) assert comb.as_query == {'$and': [{'a': 1}, {'b': 2}, {'c': 3}]}
marrow/mongo
[ 20, 3, 20, 20, 1455762934 ]
def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {})
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def get_long_running_output(pipeline_response): deserialized = self._deserialize('LoadBalancer', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def list_all( self, **kwargs: Any
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list_all.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def list( self, resource_group_name: str, **kwargs: Any
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def main(): # 1. How many prime numbers are there? # Hint: Check page 322 message = "Iymdi ah rv urxxeqfi fjdjqv gu gzuqw clunijh." # Encrypted with key "PRIMES" #print(decryptMessage(blank, blank)) # Fill in the blanks # 2. What are integers that are not prime called? # Hint: Check page 323 message = "Vbmggpcw wlvx njr bhv pctqh emi psyzxf czxtrwdxr fhaugrd." # Encrypted with key "NOTCALLEDEVENS" #print(decryptMessage(blank, blank)) # Fill in the blanks # 3. What are two algorithms for finding prime numbers? # Hint: Check page 323 # Encrypted with key "ALGORITHMS" message = "Tsk hyzxl mdgzxwkpfz gkeo ob kpbz ngov gfv: bkpmd dtbwjqhu, eaegk cw Mkhfgsenseml, hzv Rlhwe-Ubsxwr." #print(decryptMessage(blank, blank)) # Fill in the blanks
JoseALermaIII/python-tutorials
[ 2, 3, 2, 10, 1475898535 ]
def set_calculator(eos_mod, kind, kind_opts): assert kind in kind_opts, ( kind + ' is not a valid thermal calculator. '+ 'You must select one of: ' + str(kind_opts)) eos_mod._kind = kind if kind=='GammaPowLaw': calc = _GammaPowLaw(eos_mod) elif kind=='GammaShiftPowLaw': calc = _GammaShiftPowLaw(eos_mod) elif kind=='GammaFiniteStrain': calc = _GammaFiniteStrain(eos_mod) else: raise NotImplementedError(kind+' is not a valid '+ 'GammaEos Calculator.') eos_mod._add_calculator(calc, calc_type='gamma') pass
aswolf/xmeos
[ 1, 1, 1, 1, 1465498574 ]
def __init__(self, kind='GammaPowLaw', natom=1, model_state={}): self._pre_init(natom=natom) set_calculator(self, kind, self._kind_opts) ref_compress_state='P0' ref_thermal_state='T0' ref_energy_type = 'E0' refstate.set_calculator(self, ref_compress_state=ref_compress_state, ref_thermal_state=ref_thermal_state, ref_energy_type=ref_energy_type) # self._set_ref_state() self._post_init(model_state=model_state) pass
aswolf/xmeos
[ 1, 1, 1, 1, 1465498574 ]
def _set_ref_state(self): calc = self.calculators['gamma'] path_const = calc.path_const if path_const=='S': param_ref_names = [] param_ref_units = [] param_ref_defaults = [] param_ref_scales = [] else: raise NotImplementedError( 'path_const '+path_const+' is not valid for ThermalEos.') self._path_const = calc.path_const self._param_ref_names = param_ref_names self._param_ref_units = param_ref_units self._param_ref_defaults = param_ref_defaults self._param_ref_scales = param_ref_scales pass
aswolf/xmeos
[ 1, 1, 1, 1, 1465498574 ]
def gamma_deriv(self, V_a): gamma_deriv_a = self.calculators['gamma']._calc_gamma_deriv(V_a) return gamma_deriv_a
aswolf/xmeos
[ 1, 1, 1, 1, 1465498574 ]
def __init__(self, eos_mod): self._eos_mod = eos_mod self._init_params() self._path_const = 'S' pass
aswolf/xmeos
[ 1, 1, 1, 1, 1465498574 ]
def path_const( self ): return self._path_const
aswolf/xmeos
[ 1, 1, 1, 1, 1465498574 ]
def _init_params( self ): """Initialize list of calculator parameter names.""" pass
aswolf/xmeos
[ 1, 1, 1, 1, 1465498574 ]
def _calc_gamma(self, V_a): pass
aswolf/xmeos
[ 1, 1, 1, 1, 1465498574 ]
def _calc_gamma_deriv(self, V_a): pass
aswolf/xmeos
[ 1, 1, 1, 1, 1465498574 ]
def _calc_temp(self, V_a, T0=None): pass
aswolf/xmeos
[ 1, 1, 1, 1, 1465498574 ]
def _calc_param_deriv(self, fname, paramname, V_a, dxfrac=1e-6): scale_a, paramkey_a = self.get_param_scale(apply_expand_adj=True ) scale = scale_a[paramkey_a==paramname][0] # print 'scale: ' + np.str(scale) #if (paramname is 'E0') and (fname is 'energy'): # return np.ones(V_a.shape) try: fun = getattr(self, fname) # Note that self is implicitly included val0_a = fun(V_a) except: assert False, 'That is not a valid function name ' + \ '(e.g. it should be press or energy)' try: param = core.get_params([paramname])[0] dparam = scale*dxfrac # print 'param: ' + np.str(param) # print 'dparam: ' + np.str(dparam) except: assert False, 'This is not a valid parameter name' # set param value in eos_d dict core.set_params([paramname,], [param+dparam,]) # Note that self is implicitly included dval_a = fun(V_a) - val0_a # reset param to original value core.set_params([paramname], [param]) deriv_a = dval_a/dxfrac return deriv_a
aswolf/xmeos
[ 1, 1, 1, 1, 1465498574 ]
def __init__(self, eos_mod): super(_GammaPowLaw, self).__init__(eos_mod) pass
aswolf/xmeos
[ 1, 1, 1, 1, 1465498574 ]
def _calc_gamma(self, V_a): V0, gamma0, q = self.eos_mod.get_param_values( param_names=['V0','gamma0','q']) gamma_a = gamma0 *(V_a/V0)**q return gamma_a
aswolf/xmeos
[ 1, 1, 1, 1, 1465498574 ]
def _calc_temp(self, V_a, T0=None): if T0 is None: T0 = self.eos_mod.refstate.ref_temp() # T0, = self.eos_mod.get_param_values(param_names=['T0'], overrides=[T0]) gamma0, q = self.eos_mod.get_param_values( param_names=['gamma0','q']) gamma_a = self._calc_gamma(V_a) T_a = T0*np.exp(-(gamma_a - gamma0)/q) return T_a
aswolf/xmeos
[ 1, 1, 1, 1, 1465498574 ]
def __init__(self, eos_mod): super(_GammaShiftPowLaw, self).__init__(eos_mod) pass
aswolf/xmeos
[ 1, 1, 1, 1, 1465498574 ]
def _calc_gamma(self, V_a): V0, gamma0, gamma_inf, beta = self.eos_mod.get_param_values( param_names=['V0','gamma0','gamma_inf','beta']) gamma_a = gamma_inf + (gamma0-gamma_inf)*(V_a/V0)**beta return gamma_a
aswolf/xmeos
[ 1, 1, 1, 1, 1465498574 ]
def _calc_temp(self, V_a, T0=None): T0, = self.eos_mod.get_param_values(param_names=['T0'], overrides=[T0]) V0, gamma0, gamma_inf, beta = self.eos_mod.get_param_values( param_names=['V0','gamma0','gamma_inf','beta']) gamma_a = self._calc_gamma(V_a) x = V_a/V0 T_a = T0*x**(-gamma_inf)*np.exp((gamma0-gamma_inf)/beta*(1-x**beta)) return T_a
aswolf/xmeos
[ 1, 1, 1, 1, 1465498574 ]
def __init__(self, eos_mod): super(_GammaFiniteStrain, self).__init__(eos_mod) pass
aswolf/xmeos
[ 1, 1, 1, 1, 1465498574 ]
def _calc_strain_coefs(self): V0, gamma0, gammap0 = self.eos_mod.get_param_values( param_names=['V0','gamma0','gammap0']) a1 = 6*gamma0 a2 = -12*gamma0 +36*gamma0**2 -18*gammap0 return a1, a2
aswolf/xmeos
[ 1, 1, 1, 1, 1465498574 ]
def _calc_gamma(self, V_a): a1, a2 = self._calc_strain_coefs() fstr_a = self._calc_fstrain(V_a) gamma_a = (2*fstr_a+1)*(a1+a2*fstr_a)/(6*(1+a1*fstr_a+0.5*a2*fstr_a**2)) return gamma_a
aswolf/xmeos
[ 1, 1, 1, 1, 1465498574 ]
def _calc_temp(self, V_a, T0=None): if T0 is None: T0 = self.eos_mod.refstate.ref_temp() a1, a2 = self._calc_strain_coefs() fstr_a = self._calc_fstrain(V_a) T_a = T0*np.sqrt(1 + a1*fstr_a + 0.5*a2*fstr_a**2) return T_a
aswolf/xmeos
[ 1, 1, 1, 1, 1465498574 ]
def paginate_items(items,size=100): '''paginate_items will return a list of lists, each of a particular max size ''' groups = [] for idx in range(0, len(items), size): group = items[idx:idx+size] groups.append(group) return groups
radinformatics/som-tools
[ 7, 7, 7, 12, 1485049255 ]
def __init__(self, title=None, subtitle=None, translated_title=None): # noqa: E501 """WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501 self._title = None self._subtitle = None self._translated_title = None self.discriminator = None if title is not None: self.title = title if subtitle is not None: self.subtitle = subtitle if translated_title is not None: self.translated_title = translated_title
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def title(self): """Gets the title of this WorkTitleV30Rc2. # noqa: E501 :return: The title of this WorkTitleV30Rc2. # noqa: E501 :rtype: TitleV30Rc2 """ return self._title
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def title(self, title): """Sets the title of this WorkTitleV30Rc2. :param title: The title of this WorkTitleV30Rc2. # noqa: E501 :type: TitleV30Rc2 """ self._title = title
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def subtitle(self): """Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501 :return: The subtitle of this WorkTitleV30Rc2. # noqa: E501 :rtype: SubtitleV30Rc2 """ return self._subtitle
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def subtitle(self, subtitle): """Sets the subtitle of this WorkTitleV30Rc2. :param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501 :type: SubtitleV30Rc2 """ self._subtitle = subtitle
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def translated_title(self): """Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501 :return: The translated_title of this WorkTitleV30Rc2. # noqa: E501 :rtype: TranslatedTitleV30Rc2 """ return self._translated_title
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def translated_title(self, translated_title): """Sets the translated_title of this WorkTitleV30Rc2. :param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501 :type: TranslatedTitleV30Rc2 """ self._translated_title = translated_title
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict())
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, WorkTitleV30Rc2): return False return self.__dict__ == other.__dict__
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_map)
ArcherSys/ArcherSys
[ 3, 2, 3, 16, 1412356452 ]
def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_map)[0]
ArcherSys/ArcherSys
[ 3, 2, 3, 16, 1412356452 ]
def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0]
ArcherSys/ArcherSys
[ 3, 2, 3, 16, 1412356452 ]
def getregentry(): return codecs.CodecInfo( name='cp852', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, )
ArcherSys/ArcherSys
[ 3, 2, 3, 16, 1412356452 ]
def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_map)
ArcherSys/ArcherSys
[ 3, 2, 3, 16, 1412356452 ]
def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_map)[0]
ArcherSys/ArcherSys
[ 3, 2, 3, 16, 1412356452 ]
def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0]
ArcherSys/ArcherSys
[ 3, 2, 3, 16, 1412356452 ]
def getregentry(): return codecs.CodecInfo( name='cp852', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, )
ArcherSys/ArcherSys
[ 3, 2, 3, 16, 1412356452 ]
def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_map)
ArcherSys/ArcherSys
[ 3, 2, 3, 16, 1412356452 ]
def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_map)[0]
ArcherSys/ArcherSys
[ 3, 2, 3, 16, 1412356452 ]
def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0]
ArcherSys/ArcherSys
[ 3, 2, 3, 16, 1412356452 ]
def getregentry(): return codecs.CodecInfo( name='cp852', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, )
ArcherSys/ArcherSys
[ 3, 2, 3, 16, 1412356452 ]
def index( ) : return "View Generator Service is Active!"
BioGRID/ORCA
[ 1, 1, 1, 5, 1477719854 ]
def view( ) : executor.submit(runTask) return ""
BioGRID/ORCA
[ 1, 1, 1, 5, 1477719854 ]
def runTask( ) : cron = CRONTask.CRONTask( ) cron.run( ) cron.killPID( ) sys.exit(0)
BioGRID/ORCA
[ 1, 1, 1, 5, 1477719854 ]
def get_proteins_for_db(fastafn, fastadelim, genefield): """Runs through fasta file and returns proteins accession nrs, sequences and evidence levels for storage in lookup DB. Duplicate accessions in fasta are accepted and removed by keeping only the last one. """ records = {acc: (rec, get_record_type(rec)) for acc, rec in SeqIO.index(fastafn, 'fasta').items()} proteins = ((x,) for x in records.keys()) sequences = ((acc, str(rec.seq)) for acc, (rec, rtype) in records.items()) desc = ((acc, get_description(rec, rtype)) for acc, (rec, rtype) in records.items() if rtype) evid = ((acc, get_uniprot_evidence_level(rec, rtype)) for acc, (rec, rtype) in records.items()) ensgs = [(get_ensg(rec), acc) for acc, (rec, rtype) in records.items() if rtype == 'ensembl'] def sym_out(): symbols = ((get_symbol(rec, rtype, fastadelim, genefield), acc) for acc, (rec, rtype) in records.items() if rtype) othergene = ((get_other_gene(rec, fastadelim, genefield), acc) for acc, (rec, rtype) in records.items() if not rtype and fastadelim and fastadelim in rec.description) yield from symbols yield from othergene return proteins, sequences, desc, evid, ensgs, [x for x in sym_out()]
glormph/msstitch
[ 5, 4, 5, 2, 1383224653 ]
def get_record_type(record): dmod = get_decoy_mod_string(record.id) test_name = record.id if dmod is not None: test_name = record.id.replace(dmod, '') if test_name.split('|')[0] in ['sp', 'tr']: return 'swiss' elif test_name[:3] == 'ENS': return 'ensembl' else: return False
glormph/msstitch
[ 5, 4, 5, 2, 1383224653 ]
def get_description(record, rectype): if rectype == 'ensembl': desc_spl = [x.split(':') for x in record.description.split()] try: descix = [ix for ix, x in enumerate(desc_spl) if x[0] == 'description'][0] except IndexError: return 'NA' desc = ' '.join([':'.join(x) for x in desc_spl[descix:]])[12:] return desc elif rectype == 'swiss': desc = [] for part in record.description.split()[1:]: if len(part.split('=')) > 1: break desc.append(part) return ' '.join(desc)
glormph/msstitch
[ 5, 4, 5, 2, 1383224653 ]
def get_genes_pickfdr(fastafn, outputtype, fastadelim, genefield): """Called by protein FDR module for both ENSG and e.g. Uniprot""" for rec in parse_fasta(fastafn): rtype = get_record_type(rec) if rtype == 'ensembl' and outputtype == 'ensg': yield get_ensg(rec) elif outputtype == 'genename': yield get_symbol(rec, rtype, fastadelim, genefield)
glormph/msstitch
[ 5, 4, 5, 2, 1383224653 ]
def get_symbol(record, rectype, fastadelim, genefield): if rectype == 'ensembl': fields = [x.split(':') for x in record.description.split()] sym = [x[1] for x in fields if x[0] == 'gene_symbol' and len(x) == 2] elif rectype == 'swiss': fields = [x.split('=') for x in record.description.split()] sym = [x[1] for x in fields if x[0] == 'GN' and len(x) == 2] elif fastadelim and fastadelim in record.description and genefield: return record.description.split(fastadelim)[genefield] else: return 'NA' try: return sym[0] except IndexError: return 'NA'
glormph/msstitch
[ 5, 4, 5, 2, 1383224653 ]
def test_bleuscore(): # dataset with two sentences sentences = ["a quick brown fox jumped", "the rain in spain falls mainly on the plains"] references = [["a fast brown fox jumped", "a quick brown fox vaulted", "a rapid fox of brown color jumped", "the dog is running on the grass"], ["the precipitation in spain falls on the plains", "spanish rain falls for the most part on the plains", "the rain in spain falls in the plains most of the time", "it is raining today"]] # reference scores for the given set of reference sentences bleu_score_references = [92.9, 88.0, 81.5, 67.1] # bleu1, bleu2, bleu3, bleu4 # compute scores bleu_metric = BLEUScore() bleu_metric(sentences, references) # check against references for score, reference in zip(bleu_metric.bleu_n, bleu_score_references): assert round(score, 1) == reference
matthijsvk/multimodalSR
[ 51, 19, 51, 1, 1490812066 ]
def __init__(self, params_to_estimate, M, prior): ''' Parent class for all PID interfaces. Arguments: * `params_to_estimate` : List of parameter names to be estimated * `M` : The bioscrape Model object to use for inference * `prior` : A dictionary specifying prior distribution. Two built-in prior functions are `uniform_prior` and `gaussian_prior`. Each prior has its own syntax for accepting the distribution parameters in the dictionary. New priors may be added. The suggested format for prior dictionaries: prior_dict = {'parameter_name': ['prior_name', prior_distribution_parameters]} For built-in uniform prior, use {'parameter_name':['uniform', lower_bound, upper_bound]} For built-in gaussian prior, use {'parameter_name':['gaussian', mean, standard_deviation, probability threshold]} New PID interfaces can be added by creating child classes of PIDInterface class as shown for Built-in PID interfaces : `StochasticInference` and `DeterministicInference` ''' self.params_to_estimate = params_to_estimate self.M = M self.prior = prior return
ananswam/bioscrape
[ 14, 13, 14, 2, 1484639073 ]
def check_prior(self, params_dict): ''' To add new prior functions: simply add a new function similar to ones that exist and then call it here. ''' lp = 0.0 for key,value in params_dict.items(): if 'positive' in self.prior[key] and value < 0: return np.inf prior_type = self.prior[key][0] if prior_type == 'uniform': lp += self.uniform_prior(key, value) elif prior_type == 'gaussian': lp += self.gaussian_prior(key, value) elif prior_type == 'exponential': lp += self.exponential_prior(key, value) elif prior_type == 'gamma': lp += self.gamma_prior(key, value) elif prior_type == 'log-uniform': lp += self.log_uniform_prior(key, value) elif prior_type == 'log-gaussian': lp += self.log_gaussian_prior(key, value) elif prior_type == 'beta': lp += self.beta_prior(key, value) elif prior_type == 'custom': # The last element in the prior dictionary must be a callable function # The callable function shoud have the following signature : # Arguments: param_name (str), param_value(float) # Returns: log prior probability (float or numpy inf) custom_fuction = self.prior[key][-1] lp += custom_fuction(key, value) else: raise ValueError('Prior type undefined.') return lp
ananswam/bioscrape
[ 14, 13, 14, 2, 1484639073 ]
def gaussian_prior(self, param_name, param_value): ''' Check if given param_value is valid according to the prior distribution. Returns the log prior probability or np.Inf if the param_value is invalid. ''' prior_dict = self.prior if prior_dict is None: raise ValueError('No prior found') mu = prior_dict[param_name][1] sigma = prior_dict[param_name][2] if sigma < 0: raise ValueError('The standard deviation must be positive.') # Using probability density function for normal distribution # Using scipy.stats.norm has overhead that affects speed up to 2x prob = 1/(np.sqrt(2*np.pi) * sigma) * np.exp(-0.5*(param_value - mu)**2/sigma**2) if prob < 0: warnings.warn('Probability less than 0 while checking Gaussian prior! Current parameter name and value: {0}:{1}.'.format(param_name, param_value)) return np.inf else: return np.log(prob)
ananswam/bioscrape
[ 14, 13, 14, 2, 1484639073 ]
def gamma_prior(self, param_name, param_value): ''' Check if given param_value is valid according to the prior distribution. Returns the log prior probability or np.inf if the param_value is invalid. ''' prior_dict = self.prior if prior_dict is None: raise ValueError('No prior found') alpha = prior_dict[param_name][1] beta = prior_dict[param_name][2] from scipy.special import gamma prob = (beta**alpha)/gamma(alpha) * param_value**(alpha - 1) * np.exp(-1 * beta*param_value) if prob < 0: warnings.warn('Probability less than 0 while checking Exponential prior! Current parameter name and value: {0}:{1}.'.format(param_name, param_value)) return np.inf else: return np.log(prob)
ananswam/bioscrape
[ 14, 13, 14, 2, 1484639073 ]
def log_uniform_prior(self, param_name, param_value): ''' Check if given param_value is valid according to the prior distribution. Returns the log prior probability or np.inf if the param_value is invalid. ''' prior_dict = self.prior if prior_dict is None: raise ValueError('No prior found') lower_bound = prior_dict[param_name][1] upper_bound = prior_dict[param_name][2] if lower_bound < 0 or upper_bound < 0: raise ValueError('Upper and lower bounds for log-uniform prior must be positive.') if param_value > upper_bound or param_value < lower_bound: return np.inf prob = 1/(param_value* (np.log(upper_bound) - np.log(lower_bound))) if prob < 0: warnings.warn('Probability less than 0 while checking Log-Uniform prior! Current parameter name and value: {0}:{1}.'.format(param_name, param_value)) return np.inf else: return np.log(prob)
ananswam/bioscrape
[ 14, 13, 14, 2, 1484639073 ]
def __init__(self, params_to_estimate, M, prior): self.LL_stoch = None self.dataStoch = None super().__init__(params_to_estimate, M, prior) return
ananswam/bioscrape
[ 14, 13, 14, 2, 1484639073 ]
def get_likelihood_function(self, params): # Set params here and return the likelihood object. if self.LL_stoch is None: raise RuntimeError("Must call StochasticInference.setup_likelihood_function before using StochasticInference.get_likelihood_function.") #Set params params_dict = {} for key, p in zip(self.params_to_estimate, params): params_dict[key] = p self.LL_stoch.set_init_params(params_dict) #Prior lp = self.check_prior(params_dict) if not np.isfinite(lp): return -np.inf LL_stoch_cost = self.LL_stoch.py_log_likelihood() ln_prob = lp + LL_stoch_cost return ln_prob
ananswam/bioscrape
[ 14, 13, 14, 2, 1484639073 ]
def __init__(self, params_to_estimate, M, prior): self.LL_det = None self.dataDet = None super().__init__(params_to_estimate, M, prior) return
ananswam/bioscrape
[ 14, 13, 14, 2, 1484639073 ]
def get_likelihood_function(self, params): if self.LL_det is None: raise RuntimeError("Must call DeterministicInference.setup_likelihood_function before using DeterministicInference.get_likelihood_function.") #this part is the only part that is called repeatedly params_dict = {} for key, p in zip(self.params_to_estimate, params): params_dict[key] = p self.LL_det.set_init_params(params_dict) # Check prior lp = 0 lp = self.check_prior(params_dict) if not np.isfinite(lp): return -np.inf #apply cost function LL_det_cost = self.LL_det.py_log_likelihood() ln_prob = lp + LL_det_cost return ln_prob
ananswam/bioscrape
[ 14, 13, 14, 2, 1484639073 ]
def __init__(self, application): """ Default constructor """ super().__init__() self.application = application self.splash_screen = None self.qt_app = QApplication([]) # self.qt_app.setOverrideCursor(QCursor(Qt.BlankCursor))
tuturto/pyherc
[ 43, 2, 43, 69, 1327858418 ]
def show_main_window(self): """ Show main window """ main_window = MainWindow(self.application, self.application.surface_manager, self.qt_app, None, Qt.FramelessWindowHint, StartGameController(self.application.level_generator_factory, self.application.creature_generator, self.application.item_generator, self.application.config.start_level)) self.splash_screen.finish(main_window) main_window.show_new_game() self.qt_app.exec_()
tuturto/pyherc
[ 43, 2, 43, 69, 1327858418 ]
def __init__(self, application, surface_manager, qt_app, parent, flags, controller): """ Default constructor """ super().__init__(parent, flags) self.application = application self.surface_manager = surface_manager self.qt_app = qt_app self.controller = controller self.__set_layout()
tuturto/pyherc
[ 43, 2, 43, 69, 1327858418 ]
def show_new_game(self): """ Show new game dialog """ app = self.application start_dialog = StartGameWidget(generator=app.player_generator, config=self.application.config.controls, parent=self, application=self.application, surface_manager=self.surface_manager, flags=Qt.Dialog | Qt.CustomizeWindowHint) result = start_dialog.exec_() if result == QDialog.Accepted: player = start_dialog.player_character intro_text = self.controller.setup_world(self.application.world, player) player.register_for_updates(self.map_window.hit_points_widget) self.map_window.hit_points_widget.show_hit_points(player) self.map_window.hit_points_widget.show_spirit_points(player) self.map_window.message_widget.text_edit.setText(intro_text) self.__show_map_window()
tuturto/pyherc
[ 43, 2, 43, 69, 1327858418 ]
def __show_message_window(self, character): """ Show message display :param character: character which events to display :type character: Character """ messages_display = EventMessageDockWidget(self, character) self.addDockWidget(Qt.BottomDockWidgetArea, messages_display)
tuturto/pyherc
[ 43, 2, 43, 69, 1327858418 ]
def test_create_queryset_with_host_string(): """ Create a queryset with a host given as a string """ # When create a queryset t = QuerySet("localhost", index="bar") # And I have records response = { "took": 1, "hits": { "total": 1, "max_score": 1, "hits": [ { "_index": "bar", "_type": "baz", "_id": "1", "_score": 10, "_source": { "foo": "bar" }, "sort": [ 1395687078000 ] } ] } } httpretty.register_uri(httpretty.GET, "http://localhost:9200/bar/_search", body=json.dumps(response), content_type="application/json") # When I run a query results = t[0:1] # Then I see the response. len(results).should.equal(1)
Yipit/pyeqs
[ 207, 7, 207, 5, 1395330543 ]
def test_create_queryset_with_host_dict(): """ Create a queryset with a host given as a dict """ # When create a queryset connection_info = {"host": "localhost", "port": 8080} t = QuerySet(connection_info, index="bar") # And I have records good_response = { "took": 1, "hits": { "total": 1, "max_score": 1, "hits": [ { "_index": "bar", "_type": "baz", "_id": "1", "_score": 10, "_source": { "foo": "bar" }, "sort": [ 1395687078000 ] } ] } } bad_response = { "took": 1, "hits": { "total": 0, "max_score": None, "hits": [] } } httpretty.register_uri(httpretty.GET, "http://localhost:9200/bar/_search", body=json.dumps(bad_response), content_type="application/json") httpretty.register_uri(httpretty.GET, "http://localhost:8080/bar/_search", body=json.dumps(good_response), content_type="application/json") # When I run a query results = t[0:1] # Then I see the response. len(results).should.equal(1) results[0]["_source"]["foo"].should.equal("bar")
Yipit/pyeqs
[ 207, 7, 207, 5, 1395330543 ]