query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Replace all occurrences of y with x (Exercise 3.29)
def subst(x, y, S): if core.first(S) == y: if len(S) > 1: return core.prefix(x, subst(x, y, core.rest(S))) else: return [x] else: if len(S) > 1: return core.prefix(core.first(S), subst(x, y, core.rest(S))) else: return S
[ "def strReplace( x, idx1, idx2, y):\n\n b0 = x[0:idx1]\n b1 = y\n b2 = x[idx2:]\n b = b0+b1+b2\n return str(b)", "def subst1st(x, y, S):\n if core.first(S) == y:\n return indsubst(x, 0, S)\n else:\n return core.prefix(core.first(S), subst1st(x, y, core.rest(S)))", "def test_replace_with_marker(self):\n self.assertEqual(textlib.replaceExcept('AxyxB', 'x', 'y', [],\n marker='.',\n site=self.site),\n 'Ayyy.B')\n self.assertEqual(textlib.replaceExcept('AxyxB', '1', 'y', [],\n marker='.',\n site=self.site),\n 'AxyxB.')", "def lsubst(T, y, S):\n if core.first(S) == y:\n if len(S) > 1:\n return cat(T, lsubst(T, y, core.rest(S)))\n else:\n return T\n else:\n if len(S) > 1:\n return core.prefix(core.first(S), lsubst(T, y, core.rest(S)))\n else:\n return S", "def replace(x, **kwargs):\n if hasattr(x, '_replace'):\n result = x._replace(**kwargs)\n else:\n result = _default_replace(x, kwargs)\n return result", "def test_replace_source_reference(self):\n # Don't use a valid reference number in the original string,\n # in case it tries to apply that as a reference.\n self.assertEqual(textlib.replaceExcept(r'\\42', r'^(.*)$', r'X\\1X',\n [], site=self.site),\n r'X\\42X')\n self.assertEqual(textlib.replaceExcept(\n r'\\g<bar>', r'^(?P<foo>.*)$', r'X\\g<foo>X', [], site=self.site),\n r'X\\g<bar>X')", "def replace(what: Expression, repl: Expression, target_input: Expression) -> Expression:\n target = copy.deepcopy(target_input)\n return replace_without_copy(what, repl, target)", "def test_replace_with_count(self):\n self.assertEqual(textlib.replaceExcept('x [[x]] x x', 'x', 'y', [],\n site=self.site),\n 'y [[y]] y y')\n self.assertEqual(textlib.replaceExcept('x [[x]] x x', 'x', 'y', [],\n site=self.site, count=5),\n 'y [[y]] y y')\n self.assertEqual(textlib.replaceExcept('x [[x]] x x', 'x', 'y', [],\n site=self.site, count=2),\n 'y [[y]] x x')\n self.assertEqual(textlib.replaceExcept(\n 'x [[x]] x x', 'x', 'y', ['link'], site=self.site, count=2),\n 'y [[x]] y x')", "def replacer(string, bad, good):\n\n for (b, g) in zip(bad, good):\n string = string.replace(b, g)\n return string", "def substitute(func1, func2, arg=None):\n\n try:\n regex = re.compile(r'[x y]{}'.format(arg))\n obj = re.search(regex, func2)\n s = func2[obj.start():obj.end()]\n\n except AttributeError:\n pass\n finally:\n if arg is None or (obj is None):\n return None\n else:\n return func2.replace(s, func1)", "def replaceAll(self, replaceWord): #$NON-NLS-1$\r", "def replace(self, replaceWord): #$NON-NLS-1$\r", "def unify(x,y,s):\n if s is None:\n return None\n elif x==y:\n return s\n elif isVariable(x):\n return unify_var(x,y,s)\n elif isVariable(y):\n return unify_var(y,x,s)\n elif type(x) == list and type(y) == list:\n return unify(x[1:],y[1:], unify(x[0],y[0],s))\n else:\n return None", "def replace(expression, replace = None):\n\n if not isinstance(replace, dict): \n return expression;\n \n #parse regular expression \n pattern = sre.parse(expression);\n\n #group patterns\n gd = pattern.pattern.groupdict\n gdi = {y:x for x,y in gd.items()};\n \n gkeys = gdi.keys();\n rkeys = replace.keys();\n \n newpattern = [];\n for p in pattern:\n if p[0] == sre.SUBPATTERN:\n sid = p[1][0];\n if sid is not None:\n if sid in rkeys:\n newpattern += [(ire.TEXT, replace[sid])];\n elif sid in gkeys:\n gn = gdi[sid];\n if gn in rkeys:\n newpattern += [(ire.TEXT, replace[gn])];\n else:\n newpattern.append(p);\n else:\n newpattern.append(p);\n else:\n newpattern.append(p);\n else:\n newpattern.append(p);\n \n pattern.data = newpattern;\n return ire.patternToExpression(pattern);", "def replace_var():\n nonlocal orchestra\n nonlocal element_index\n nonlocal var\n while var in orchestra[element_index]:\n orchestra[element_index] =\\\n orchestra[element_index].replace(var, Vars.get_var(var[1]))", "def replace_(expression):\n original = ['x', '÷', '^', 'π', 'e', 'sin⁻¹(', 'cos⁻¹(', 'tan⁻¹(', '!', \"√\"]\n replaced = ['*', '/', '**', str(math.pi), str(math.e), 'asin(', 'acos(', 'atan(', 'factorial(', \"square_root(\"]\n for original_, replaced_ in zip(original, replaced):\n new_text = expression.replace(original_, replaced_)\n expression = new_text\n \n # Adding required parenthesis\n if expression.count('(') > expression.count(')'):\n expression = expression + ')'\n \n # Removing Redundant parenthesis\n while expression.count('(') < expression.count(')'):\n expl = list(expression)\n expl.remove(')')\n expression = ''.join(expl)\n return expression", "def transform_y(self, y):\n raise NotImplementedError()", "def _replace(variables, match):\r\n expression = match.group(1)\r\n\r\n # Look-up chars and functions for the specified operator\r\n prefix_char, separator_char, split_fn, escape_fn, format_fn = operator_map.get(\r\n expression[0], defaults)\r\n\r\n replacements = []\r\n for key, modify_fn, explode in split_fn(expression):\r\n if key in variables:\r\n variable = modify_fn(variables[key])\r\n replacement = format_fn(explode, separator_char, escape_fn, key, variable)\r\n replacements.append(replacement)\r\n return prefix_char + separator_char.join(replacements)", "def replaces(word):\r\n\r\n\tletters = \"ㅂㅈㄷㄱㅅㅁㄴㅇㄹㅎㅋㅌㅊㅍㅃㅉㄸㄲㅆㅕㅑㅐㅒㅔㅖㅗㅛㅓㅏㅣㅠㅜㅡ\"\r\n\treplacesret = []\r\n\tfor i in range(len(word)+1):\r\n\t\tleft = word[:i]\r\n\t\tright = word[i:]\r\n\t\tfor l in letters:\r\n\t\t\tif right:\r\n\t\t\t\treplacesret.append(left + l + right[1:])\r\n\treturn(replacesret)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Replace all occurences of y in S with the elements of sequence T (Exercise 3.31)
def lsubst(T, y, S): if core.first(S) == y: if len(S) > 1: return cat(T, lsubst(T, y, core.rest(S))) else: return T else: if len(S) > 1: return core.prefix(core.first(S), lsubst(T, y, core.rest(S))) else: return S
[ "def subst(x, y, S):\n if core.first(S) == y:\n if len(S) > 1:\n return core.prefix(x, subst(x, y, core.rest(S)))\n else:\n return [x]\n else:\n if len(S) > 1:\n return core.prefix(core.first(S), subst(x, y, core.rest(S)))\n else:\n return S", "def subst1st(x, y, S):\n if core.first(S) == y:\n return indsubst(x, 0, S)\n else:\n return core.prefix(core.first(S), subst1st(x, y, core.rest(S)))", "def indsubst(x, i, S):\n if not S:\n return [x]\n elif i == 0:\n return core.prefix(x, core.rest(S))\n elif i > 0:\n return core.prefix(core.first(S), indsubst(x, i - 1, core.rest(S)))", "def unify(x,y,s):\n if s is None:\n return None\n elif x==y:\n return s\n elif isVariable(x):\n return unify_var(x,y,s)\n elif isVariable(y):\n return unify_var(y,x,s)\n elif type(x) == list and type(y) == list:\n return unify(x[1:],y[1:], unify(x[0],y[0],s))\n else:\n return None", "def subpair(X, Y, S):\n if not X or not Y or not S:\n return S\n else:\n return subpair(core.rest(X), core.rest(Y), subst(core.first(Y), core.first(X), S))", "def unify(x, y, s):\n if s is None:\n return None\n elif x == y:\n return s\n elif is_variable(x):\n return unify_var(x, y, s)\n elif is_variable(y):\n return unify_var(y, x, s)\n elif isinstance(x, Expr) and isinstance(y, Expr):\n return unify(x.args, y.args, unify(x.op, y.op, s))\n elif isterm(x) or isterm(y) or not x or not y:\n return utils.if_(x == y, s, None)\n elif utils.is_sequence(x) and utils.is_sequence(y) and len(x) == len(y):\n return unify(x[1:], y[1:], unify(x[0], y[0], s))\n else:\n return None", "def vowelswaps(word):\n vowels = set('aeiouy')\n word = list(word)\n # ['h','i'] becomes ['h', ['a', 'e', 'i', 'o', 'u', 'y']]\n for idx, l in enumerate(word):\n if type(l) == list:\n pass # dont mess with the reductions\n elif l in vowels:\n word[idx] = list(vowels) # if l is a vowel, replace with all possible vowels\n\n # ['h',['i','ii','iii']] becomes 'hi','hii','hiii'\n for p in product(*word):\n yield ''.join(p)", "def seqreverseaux(S, T):\n if not S:\n return T\n else:\n return seqreverseaux(core.rest(S), core.prefix(core.first(S), T))", "def test_timeseries_replace(self):\n\n ts = self.ts_short.clone()\n ts.tseries = ts.tseries ** 2\n\n ts_new = self.ts.replace(ts)\n\n self.assertEqual(ts_new.tseries[0], 0)\n self.assertEqual(ts_new.tseries[1], 1)\n self.assertEqual(ts_new.tseries[2], 4)\n self.assertEqual(ts_new.tseries[3], 9)\n self.assertEqual(ts_new.tseries[4], 16)\n self.assertEqual(ts_new.tseries[5], 5)\n self.assertEqual(ts_new.tseries[6], 6)\n self.assertEqual(ts_new.tseries[7], 7)\n self.assertEqual(ts_new.tseries[8], 8)\n self.assertEqual(ts_new.tseries[9], 9)", "def transform_y(self, y):\n raise NotImplementedError()", "def preprocess_s(s):\n y = defaultdict(list)\n for i, c in enumerate(s):\n y[c].append(i)\n return y", "def reduce_sop(s,on):\n ontt = tt(on)\n N = len(s[0])\n stt = s_stt(s)\n J = range(len(s))\n J.reverse()\n pp=list(s)\n for j in J:\n ttj = stt[j]&ontt\n ttj = ttj & ~sttx(stt,j)\n stt[j]=ttj\n pp[j]=min_cube(ttj)\n return pp", "def replacer(wordlist: Sequence[Sequence[str]]):\n for word in wordlist:\n for i in range(len(word)):\n try:\n word[i] = letters[word[i]]\n except KeyError:\n continue\n return wordlist", "def replace(values: Sequence[T], idx: int, value: T) -> Sequence[T]:\n xs = list(values)\n xs[idx] = value\n\n return type(values)(xs)", "def strReplace( x, idx1, idx2, y):\n\n b0 = x[0:idx1]\n b1 = y\n b2 = x[idx2:]\n b = b0+b1+b2\n return str(b)", "def replacer(string, bad, good):\n\n for (b, g) in zip(bad, good):\n string = string.replace(b, g)\n return string", "def s_stt(s): # sop_tv\n res = []\n for j in range(len(s)):\n res = res + [ttcube(s[j])]\n return res", "def _external_substitution(seq, trajectory):\n # Assign proper type\n seq = ETC.cast(seq)\n\n # Initialize ETC to 0\n etc = 0\n\n # Iterate over the given substitution table and substitute\n for step in trajectory[1:]: # Skip first entry, not a substitution step\n\n pair = step.get(\"window\")\n\n # Substitute only if the sequence is atleast 2 symbols long\n if len(seq) > 1 and _check_pair(tuple(pair), seq):\n\n # Cython function call\n seq = ETC.cast(core.substitute_pairs(seq, pair, max(seq) + 1))\n etc += 1\n\n # If sequence has been fully compressed, stop\n else:\n break\n\n # Return both etc as well as the sequence, whatever is left of it\n return etc, seq", "def swap(sequence, i, j):\n temp = sequence[i]; sequence[i] = sequence[j]; sequence[j] = temp" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the difference reduction of a sequence (Exercise 3.39)
def seqdif(S): if not S: return 0 elif core.second(S): return core.first(S) - core.second(S) + seqdif(core.rest(core.rest(S))) else: return core.first(S)
[ "def diff(seq):\r\n return standardize([i - j for i, j in zip(seq[1:], seq[:-1])])", "def backward_differences(T):\n\tnumOfTimes = len(T)\n\t#the number of steps in the method\n\tm = numOfTimes - 1\n\t#generate the initial differences, which\n\t#is just the standard basis.\n\tD = np.array([ [np.float64((i+1)==(numOfTimes-j)) for i in range(numOfTimes)] for j in range(numOfTimes)])\n\tdifferences = np.zeros_like(D)\n\tdifferences[0] = D[0]\n\t\n\t\n\tfor q in range(1,numOfTimes):\n\t\tfor j in range(numOfTimes - q):\n\t\t\tD[j] = first_difference([T[m-j],T[m-j-q]],[D[j],D[j+1]])\n\t\t\tdifferences[q] = D[0]\n\treturn differences", "def subtract(*arg):\n #first element will be the initial value of substraction, since we substruct from it\n subtraction = arg[0]\n #choosing the range from the second element till the end\n #this is what we are going to gradually substruct from the first element\n for i in arg[1:]:\n subtraction -= i\n return subtraction", "def diff(a, i=1):\n n = len(a)\n return a[i:]-a[:n-i]", "def logdiff(seq):\r\n #seq = [[np.abs(np.log(np.abs(i + 1)) - np.log(np.abs(j + 1))) for i, j in zip(seq[1:], seq[:-1])]]\r\n seq = standardize([np.log(np.abs(i - j) + 1.) for i, j in zip(seq[1:], seq[:-1])])\r\n return seq", "def diff(*seqs, **kwargs): # real signature unknown; restored from __doc__\n pass", "def deletions(seq):\n for i in range(len(seq)):\n seq_del = seq[:i] + seq[i+1:]\n yield seq_del", "def deltas(xs):\n if len(xs) < 2:\n return []\n else:\n return [xs[1] - xs[0]] + deltas(xs[1:])", "def antiparallel(sequence):\n if not sequence:\n return sequence\n \n return complement(sequence[::-1])", "def dif(a, b):\n return a - b", "def gcd_seq(seq):\n return reduce(gcd, seq)", "def difference(num):\n return square_of_sum(num) - sum_of_squares(num)", "def logpercdiff(seq):\r\n return [np.log((np.abs(i - j) * (i + j) / 2.) + 1.) for i, j in zip(seq[1:], seq[:-1])]", "def testSubtractNothing(self):\n fasta1 = \"\\n\".join(\n [\n \">one\",\n \"agtcagtcagtc\",\n \">two\",\n \"acctg\",\n \">three\",\n \"atgggtc\",\n \">four\",\n \"atggctattgaactgtatct\",\n ]\n )\n fasta2 = \"\\n\".join(\n [\n \">five\",\n \"agtcagtcagtc\",\n \">six\",\n \"acctg\",\n ]\n )\n\n result = list(fastaSubtract([StringIO(fasta1), StringIO(fasta2)]))\n self.assertEqual(\n [\"four\", \"one\", \"three\", \"two\"], sorted([seq.id for seq in result])\n )", "def difference(iterable: Iterable[numbers.Number]) -> Iterator[numbers.Number]:\n return (x2 - x1 for x1, x2 in _common.pairwise(iterable))", "def diff(training_data: List[int], testing_data: List[int]):\n\n diff_training_data = []\n\n for i in range(len(training_data) - 1):\n diff_value = training_data[i+1]-training_data[i]\n diff_training_data.append(diff_value)\n\n return diff_training_data, testing_data", "def map_dif(S):\n if not S:\n return []\n else:\n return core.prefix(seqdif(core.first(S)), map_dif(core.rest(S)))", "def differences(a: list[int], b: list[int]) -> int:\n result: int = 0\n short: list[int]\n if len(a) >= len(b):\n result = len(a) - len(b)\n short = b\n else:\n result = len(b) - len(a)\n short = a\n\n i: int = 0\n while i < len(short):\n if a[i] != b[i]:\n result += 1\n i += 1\n\n return result", "def rm_four_every_other(seq):\n return seq[4:-4:2]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Perform the substition pairs of sequence P on sequence S (Exercise 3.41)
def sublis(P, S): if not S or not P: return S else: return sublis(core.rest(P), subst(core.second(core.first(P)), core.first(core.first(P)), S))
[ "def subpair(X, Y, S):\n if not X or not Y or not S:\n return S\n else:\n return subpair(core.rest(X), core.rest(Y), subst(core.first(Y), core.first(X), S))", "def subst(x, y, S):\n if core.first(S) == y:\n if len(S) > 1:\n return core.prefix(x, subst(x, y, core.rest(S)))\n else:\n return [x]\n else:\n if len(S) > 1:\n return core.prefix(core.first(S), subst(x, y, core.rest(S)))\n else:\n return S", "def prime_to_S_part(self,S):\n a = self\n for p in S:\n n = a.valuation(p)\n a = a*p**(-n)\n return a", "def indsubst(x, i, S):\n if not S:\n return [x]\n elif i == 0:\n return core.prefix(x, core.rest(S))\n elif i > 0:\n return core.prefix(core.first(S), indsubst(x, i - 1, core.rest(S)))", "def _external_substitution(seq, trajectory):\n # Assign proper type\n seq = ETC.cast(seq)\n\n # Initialize ETC to 0\n etc = 0\n\n # Iterate over the given substitution table and substitute\n for step in trajectory[1:]: # Skip first entry, not a substitution step\n\n pair = step.get(\"window\")\n\n # Substitute only if the sequence is atleast 2 symbols long\n if len(seq) > 1 and _check_pair(tuple(pair), seq):\n\n # Cython function call\n seq = ETC.cast(core.substitute_pairs(seq, pair, max(seq) + 1))\n etc += 1\n\n # If sequence has been fully compressed, stop\n else:\n break\n\n # Return both etc as well as the sequence, whatever is left of it\n return etc, seq", "def lsubst(T, y, S):\n if core.first(S) == y:\n if len(S) > 1:\n return cat(T, lsubst(T, y, core.rest(S)))\n else:\n return T\n else:\n if len(S) > 1:\n return core.prefix(core.first(S), lsubst(T, y, core.rest(S)))\n else:\n return S", "def reduce_sop(s,on):\n ontt = tt(on)\n N = len(s[0])\n stt = s_stt(s)\n J = range(len(s))\n J.reverse()\n pp=list(s)\n for j in J:\n ttj = stt[j]&ontt\n ttj = ttj & ~sttx(stt,j)\n stt[j]=ttj\n pp[j]=min_cube(ttj)\n return pp", "def make_subs_in_locus(locus, p):\n for i, codon in enumerate(locus.codons):\n old_seq = codon.seq\n make_sub_from_p(codon, p)\n new_seq = codon.seq\n if old_seq != new_seq:\n locus.history.append([old_seq, new_seq, i])", "def permute(s, i):\n p = tuple(range(len(s)))\n for j in i.split(\",\"):\n if j.startswith(\"s\"):\n n = int(j[1:])\n s = s[-n:] + s[:-n]\n p = p[-n:] + p[:-n]\n else:\n a, b = tuple(int(i) for i in j[1:].split(\"/\"))\n if j.startswith(\"p\"):\n a, b = p.index(a), p.index(b)\n if a < b:\n s = s[:a] + s[b:b + 1] + s[a + 1:b] + s[a:a + 1] + s[b + 1:]\n p = p[:a] + p[b:b + 1] + p[a + 1:b] + p[a:a + 1] + p[b + 1:]\n else:\n s = s[:b] + s[a:a + 1] + s[b + 1:a] + s[b:b + 1] + s[a + 1:]\n p = p[:b] + p[a:a + 1] + p[b + 1:a] + p[b:b + 1] + p[a + 1:]\n # print(s, j)\n return s", "def simpleslp1(word):\n def sub1(m):\n a = m.group(1)\n return a.lower()\n \n regex1 = '([AIUFXEOMHKGNCJWQTDPBLVSZ])'\n word1 = re.sub(regex1,sub1,word)\n regex2 = r'(.)\\1'\n def sub2(m):\n a = m.group(0) # xx\n return a[0] # x\n \n word2 = re.sub(regex2,sub2,word1)\n var = transcoder.transcoder_processString(word2,'slp1','simpleslp1lo')\n #if word != word2:\n # if word.startswith('kar'):\n # print('dbg:',word,word1,word2,var)\n ans = [var]\n #if not re.search(r'(ar|ri|ru)\n # sometimes an 'ar' slp1 might also be slp1 vowel 'f'.\n # probably when NOT followed by a vowel\n # (i.e. at end or followed by consonant)\n regex3 = r'(ar)([^aiufeo]|$)'\n def sub3(m):\n return 'r' + m.group(2)\n word3 = re.sub(regex3,sub3,var)\n #if True and (word3 != var):\n # print('dbg:',word,word1,word2,word3,var)\n if word3 != var:\n ans.append(word3)\n # sometimes, ri should be interpreted as 'f'\n # when (a) at beginning or not preceded by a vowel or followed by vowel\n regex4 = r'(^|[^aiufeo])ri([^aiufeo]|$)'\n def sub4(m):\n return m.group(1) + 'r' + m.group(2) # drop r in ri\n word4 = re.sub(regex4,sub4,word3)\n if word4 != word3:\n ans.append(word4)\n if True:\n print('dbg:',word,word1,word2,var,word3,word4)\n return ans", "def swap(sequence, i, j):\n temp = sequence[i]; sequence[i] = sequence[j]; sequence[j] = temp", "def simplify(self):\n #c = 0\n simp_sentences = []\n for s in self.sentences:\n\n #print \"Original: \" + s\n \n simp_sentences.append(self.transformation(s, ''))\n\n ## for demonstration purposes only. remove the prints later\n #print \"Simplified: \",\n #print simp_sentences[c]\n #c+=1\n\n #print \n return simp_sentences", "def subst1st(x, y, S):\n if core.first(S) == y:\n return indsubst(x, 0, S)\n else:\n return core.prefix(core.first(S), subst1st(x, y, core.rest(S)))", "def SmallParsimony_Backtracking(Tree, S, P, seq, i):\n \n # find best scoring base at root. \n # put that base as last element in array-> [bases]\n # initiates backwalking array [bases]\n \n best = float('inf')\n root = 2*n-2\n bases = [False for _ in range(2*n-1)]\n for k in S[root].keys(): \n if S[root][k] < best: \n best = S[root][k]\n bases[root] = k \n \n # Visit all nodes down from root to all parents of leaves.\n # update the bases for son, daughter from Pointers[node][base] \n # add the base for the current node to ancestor sequence\n \n for v in range(2*n-2, n-1, -1): \n k = bases[v]\n seq[v] += k \n [son, daughter] = Tree[v] \n bases[son] = P[v][k][0]\n bases[daughter] = P[v][k][1]\n\n return seq", "def snps(self):\n data = self.tokenize(self.data)\n\n self.allele1_aligned = [] # Will be joined into string later.\n self.allele2_aligned = []\n snps = []\n\n idx = 0 # Current index on the SNP sequence.\n pos = 0 # Position relative to the first allele\n while idx < len(data):\n c = data[idx]\n if c in 'ACGTN':\n self.allele1_aligned.append(c)\n self.allele2_aligned.append(c)\n elif c == '[':\n self.allele1_aligned.append(data[idx+1])\n self.allele2_aligned.append(data[idx+3])\n if data[idx+1] == '-':\n # Insertion SNP\n descriptor = f'.{pos}ins{data[idx+3]}'\n snp = Snp(descriptor)\n elif data[idx+3] == '-':\n # Deletion SNP\n descriptor = f'.{pos}del'\n snp = Snp(descriptor)\n snp.ref_nucleotide = data[idx+1]\n else:\n # Substitution SNP\n descriptor = f'.{pos}{data[idx+1]}>{data[idx+3]}'\n snp = Snp(descriptor)\n \n\n snps.append(snp)\n\n # Place the idx on the ']' so the next increment reads the next token.\n idx += 4\n else:\n raise StarpError(('Invalid characters in Snp Sequence. The accepted alphabet '\n 'is {A, C, G, T, -, /, [, ]}.'))\n\n idx += 1\n pos += 1\n\n self.allele1_aligned = ''.join(self.allele1_aligned)\n self.allele2_aligned = ''.join(self.allele2_aligned)\n self.allele1 = self.allele1_aligned.replace('-', '')\n self.allele2 = self.allele2_aligned.replace('-', '')\n\n return snps", "def seqreverseaux(S, T):\n if not S:\n return T\n else:\n return seqreverseaux(core.rest(S), core.prefix(core.first(S), T))", "def sturms_sequence(p: Polynomial):\n # setting up the Sturm's sequence.\n sturms_seq = []\n sturms_seq.append(p)\n sturms_seq.append(p.deriv())\n\n # filling the Sturm's sequence list.\n f = -Polynomial(poly.polydiv(sturms_seq[-2].coef, sturms_seq[-1].coef)[1])\n while f.degree() != 0 or f.coef[0] != 0:\n sturms_seq.append(f.copy())\n f = -Polynomial(poly.polydiv(\n sturms_seq[-2].coef, sturms_seq[-1].coef)[1])\n\n return sturms_seq", "def slide_through(seq: str,\n slen: int,\n overlap: int) -> t.Iterable[t.Tuple[str, int, int]]:\n assert slen > 0, \"sub-sequence length must large than zero.\"\n assert overlap >= 0, 'overlap length must large or equal to zero.'\n assert overlap < slen, 'overlap length must less than sub-seq length.'\n step = slen - overlap\n tlen = len(seq)\n s = 0\n while s + slen <= tlen:\n e = s + slen\n sub = seq[s:e]\n yield sub, s, e\n s += step", "def reversecompliment(sequence):\n \n # IUPAC complimentary nucleotides. Dictionary used for speed, includes\n # entries for UPPER and lower case.\n IUPAC_NUC = {\"A\": \"T\", \"a\": \"t\", # A -> T\n \"C\": \"G\", \"c\": \"g\", # C -> G\n \"G\": \"C\", \"g\": \"c\", # G -> T\n \"T\": \"A\", \"t\": \"a\", # T -> A\n \"R\": \"Y\", \"r\": \"y\", # A or G -> T or C\n \"Y\": \"R\", \"y\": \"r\", # C or T -> G or A\n \"S\": \"S\", \"s\": \"s\", # G or C -> G or C\n \"W\": \"W\", \"w\": \"w\", # A or T -> A or T\n \"K\": \"M\", \"k\": \"m\", # G or T -> A or C\n \"M\": \"K\", \"m\": \"k\", # A or C -> G or T\n \"B\": \"V\", \"b\": \"v\", # C or G or T -> G or C or A\n \"V\": \"B\", \"v\": \"b\", # G or C or A -> C or G or T\n \"D\": \"H\", \"d\": \"h\", # A or G or T -> T or C or A\n \"H\": \"D\", \"h\": \"d\", # T or C or A -> A or G or T\n \"N\": \"N\", \"n\": \"n\", # any base\n \"-\": \"-\"} # gap\n revcomp = []\n # compliment the sequence\n for base in sequence:\n # get the complimentary code, if one does not exist add 'x'\n revcomp.append(IUPAC_NUC.get(base, \"x\"))\n # reverse it\n revcomp.reverse()\n # return as a string rather than a list\n return ''.join(revcomp)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Perform pairwise substition of positional keys in X with positional values in Y on sequence S (Exercise 3.42)
def subpair(X, Y, S): if not X or not Y or not S: return S else: return subpair(core.rest(X), core.rest(Y), subst(core.first(Y), core.first(X), S))
[ "def subst(x, y, S):\n if core.first(S) == y:\n if len(S) > 1:\n return core.prefix(x, subst(x, y, core.rest(S)))\n else:\n return [x]\n else:\n if len(S) > 1:\n return core.prefix(core.first(S), subst(x, y, core.rest(S)))\n else:\n return S", "def subst1st(x, y, S):\n if core.first(S) == y:\n return indsubst(x, 0, S)\n else:\n return core.prefix(core.first(S), subst1st(x, y, core.rest(S)))", "def lsubst(T, y, S):\n if core.first(S) == y:\n if len(S) > 1:\n return cat(T, lsubst(T, y, core.rest(S)))\n else:\n return T\n else:\n if len(S) > 1:\n return core.prefix(core.first(S), lsubst(T, y, core.rest(S)))\n else:\n return S", "def compose(s2, s1):\n\n for key, value in s1.items():\n s1[key] = substitute(s2, value)\n\n temp, s = {}, {}\n temp.update(s2)\n temp.update(s1)\n\n s = {k: v for k, v in temp.items() if k != v}\n return s", "def indsubst(x, i, S):\n if not S:\n return [x]\n elif i == 0:\n return core.prefix(x, core.rest(S))\n elif i > 0:\n return core.prefix(core.first(S), indsubst(x, i - 1, core.rest(S)))", "def replace_pair(pair, vocab, indices):\n first, second = pair\n pair_str = ''.join(pair)\n pair_str = pair_str.replace('\\\\', '\\\\\\\\')\n changes = []\n pattern = re.compile(r'(?<!\\S)' + re.escape(first + ' ' + second) + r'(?!\\S)')\n if sys.version_info < (3, 0):\n iterator = indices[pair].iteritems()\n else:\n iterator = indices[pair].items()\n for j, freq in iterator:\n if freq < 1:\n continue\n word, freq = vocab[j]\n new_word = ' '.join(word)\n new_word = pattern.sub(pair_str, new_word)\n new_word_tpl = tuple(new_word.split())\n\n if len(new_word_tpl) - 2 * new_word_tpl.count('==') - 1 == 0:\n # If magic then allow to merge morphemes\n new_word_tpl = tuple(filter(lambda t: t != '==', new_word_tpl))\n\n vocab[j] = (new_word_tpl, freq)\n changes.append((j, new_word_tpl, word, freq))\n\n return changes", "def _external_substitution(seq, trajectory):\n # Assign proper type\n seq = ETC.cast(seq)\n\n # Initialize ETC to 0\n etc = 0\n\n # Iterate over the given substitution table and substitute\n for step in trajectory[1:]: # Skip first entry, not a substitution step\n\n pair = step.get(\"window\")\n\n # Substitute only if the sequence is atleast 2 symbols long\n if len(seq) > 1 and _check_pair(tuple(pair), seq):\n\n # Cython function call\n seq = ETC.cast(core.substitute_pairs(seq, pair, max(seq) + 1))\n etc += 1\n\n # If sequence has been fully compressed, stop\n else:\n break\n\n # Return both etc as well as the sequence, whatever is left of it\n return etc, seq", "def unify(x,y,s):\n if s is None:\n return None\n elif x==y:\n return s\n elif isVariable(x):\n return unify_var(x,y,s)\n elif isVariable(y):\n return unify_var(y,x,s)\n elif type(x) == list and type(y) == list:\n return unify(x[1:],y[1:], unify(x[0],y[0],s))\n else:\n return None", "def InvertMapping(x_to_ys):\n y_to_xs = {}\n for x, ys in x_to_ys.items():\n for y in ys:\n y_to_xs.setdefault(y, []).append(x)\n return y_to_xs", "def unify(x, y, s):\n if s is None:\n return None\n elif x == y:\n return s\n elif is_variable(x):\n return unify_var(x, y, s)\n elif is_variable(y):\n return unify_var(y, x, s)\n elif isinstance(x, Expr) and isinstance(y, Expr):\n return unify(x.args, y.args, unify(x.op, y.op, s))\n elif isterm(x) or isterm(y) or not x or not y:\n return utils.if_(x == y, s, None)\n elif utils.is_sequence(x) and utils.is_sequence(y) and len(x) == len(y):\n return unify(x[1:], y[1:], unify(x[0], y[0], s))\n else:\n return None", "def unify(u, v, s):\n s = s if isinstance(s, Substitution) else Substitution(s)\n u = s.walk(u)\n v = s.walk(v)\n if u == v:\n return s\n if isinstance(u, Var):\n return s.assoc(u, v)\n if isinstance(v, Var):\n return s.assoc(v, u)\n if isinstance(u, tuple) and isinstance(v, tuple):\n if len(u) != len(v):\n return False\n for uu, vv in zip(u, v): # avoiding recursion\n s = unify(uu, vv, s)\n if s == False: # (instead of a Substitution object.)\n break\n return s\n return False", "def swap_pos_x_y(self, str, sx, sy):\n x,y = self.integerize(sx), self.integerize(sy)\n l = list(str)\n l[x],l[y] = str[y],str[x]\n return ''.join(l)", "def __modifyPosTag(self, first, second):\n swap = []\n swap = self.__indexKeyWord[first]\n self.__indexKeyWord[first] = self.__indexKeyWord[second]\n self.__indexKeyWord[second] = swap\n return None # should raise NotImplementedError()", "def _move_keys(d1, d2, keys):\n for k in keys:\n # split the key if it is a tuple\n k1, k2 = (k, k) if isinstance(k, str) else k\n if k1 in d1:\n d2[k2] = d1.pop(k1)", "def edit_distance(s1, s2):\n d = {}\n lenstr1 = len(s1)\n lenstr2 = len(s2)\n for i in xrange(-1, lenstr1 + 1):\n d[(i, -1)] = i + 1\n for j in xrange(-1, lenstr2 + 1):\n d[(-1, j)] = j + 1\n\n for i in xrange(lenstr1):\n for j in xrange(lenstr2):\n if s1[i] == s2[j]:\n cost = 0\n else:\n cost = 1\n d[(i, j)] = min(\n d[(i - 1, j)] + 1, # deletion\n d[(i, j - 1)] + 1, # insertion\n d[(i - 1, j - 1)] + cost, # substitution\n )\n if i and j and s1[i] == s2[j - 1] and s1[i - 1] == s2[j]:\n d[(i, j)] = min(d[(i, j)], d[i - 2, j - 2] + cost) # transposition\n\n return d[lenstr1 - 1, lenstr2 - 1]", "def __translate(kvps: Iterator[Tuple[str, Any]], lookup: Mapping[str, str]) -> Iterator[Tuple[str, Any]]:\n yield from ((lookup.get(key, key), value) for key, value in kvps if key != '_type')", "def test_mapping_inversion_complete() -> None:\n # GIVEN\n n = 4\n K2L = create_K2L(n)\n L2K = sut.create_L2K(n)\n\n k = [K2L[i] for i in range(n ** 2)]\n maps = {L(i, j): L2K[i][j] for i, j in product(range(n), repeat=2)}\n\n # WHEN\n k_inv = [e.subs(maps) for e in k]\n\n # THEN\n for i in range(n ** 2):\n assert expand(k_inv[i]) == K(i)", "def align(s0, s1, key=None, pad=None):\n\n if key is None:\n m0, m1 = s0, s1\n else:\n m0, m1 = map(key, s0), map(key, s1)\n\n mm0 = set(m0)\n if len(mm0) < len(m0) or len(set(m1)) < len(m1):\n raise TypeError, 'some argument has repeated values'\n\n order = dict((v, i) for i, v in enumerate(m0))\n\n p = filter(lambda ix: ix[1] in order, enumerate(m1))\n q = [pr[0] for pr in sorted(p, key=lambda iv: order[iv[1]])]\n r = range(len(m1))\n for i, pr in enumerate(p):\n r[pr[0]] = q[i]\n\n t0 = zip(m0, s0)\n t1 = [(m1[i], s1[i]) for i in r]\n\n del p, q, r\n\n if not callable(pad):\n blank = pad\n pad = lambda x: blank\n\n u0, u1 = [], []\n i = j = 0\n lim0, lim1 = len(t0), len(t1)\n null = object()\n\n while i < lim0 or j < lim1:\n vv, v = t0[i] if i < lim0 else (null, pad(t1[j][1]))\n ww, w = t1[j] if j < lim1 else (null, pad(t0[i][1]))\n\n di = dj = 1\n if vv != ww:\n if ww not in mm0 and j < lim1:\n v = pad(w)\n di = 0\n else:\n w = pad(v)\n dj = 0\n\n i += di\n j += dj\n u0.append(v)\n u1.append(w)\n\n return tuple(u0), tuple(u1)", "def do_assignment_replacements(topology, assignments):\n if u.is_str(assignments):\n assignments = segment_assignments_to_dict(assignments) \n else:\n assignments = assignments.copy()\n \n keys = assignments.keys() \n ret =[]\n for s in topology:\n sl = s.upper()\n if sl in keys: \n ret.append(assignments[sl].pop(0))\n else:\n ret.append(s)\n return ret" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the difference mapping of a sequence (Exercise 3.54)
def map_dif(S): if not S: return [] else: return core.prefix(seqdif(core.first(S)), map_dif(core.rest(S)))
[ "def diff(seq):\r\n return standardize([i - j for i, j in zip(seq[1:], seq[:-1])])", "def interSequence(seq_maps):\n seq_map = {}\n \n return seq_map", "def seqdif(S):\n if not S:\n return 0\n elif core.second(S):\n return core.first(S) - core.second(S) + seqdif(core.rest(core.rest(S)))\n else:\n return core.first(S)", "def pass_down(L,L1,map):\n## print len(L),len(L1),len(map),max(map)\n## print sumsize(L)\n## print sumsize(L1) \n for j in range(len(map)):\n if L[j] == -1:\n continue\n assert L1[map[j]] == -1 or L1[map[j]] == L[j], 'L1=%d, L = %d'%(L1[map[j]],L[j]) \n L1[map[j]] = max(L[j],L1[map[j]])\n return L1", "def trans_i_v(seq1, seq2):\n \n purine = ['A', 'G']\n pyrimidine = ['C', 'T']\n \n transition = 0\n transversion = 0\n \n for base in range(len(seq1)):\n if seq1[base] != seq2[base]:\n if seq1[base] in purine and seq2[base] in purine:\n transition += 1\n elif seq1[base] in pyrimidine and seq2[base] in pyrimidine:\n transition += 1\n else:\n transversion += 1\n \n print('%0.11f' % (transition / transversion))", "def seq_to_edit_distance(seq_dict):\n\n from Bio import pairwise2\n #dim = len(list(seq_dict.keys()))\n dim = len(seq_dict.keys())\n d = np.zeros((dim, dim), dtype=np.int)\n s = list(seq_dict.values())\n\n for i in range(dim):\n print(i)\n sys.stdout.flush()\n\n for j in range(dim):\n if i == j:\n d[i,j] = 0 # put 0\n elif i > j:\n d[i,j] = d[j,i] # put d[i][j] = d[j][i]\n else:\n score = pairwise2.align.globalms(s[i], s[j], 0, -1, -1, -1, score_only = True) # calc d[i][j]\n d[i,j] = -score\n return d", "def logdiff(seq):\r\n #seq = [[np.abs(np.log(np.abs(i + 1)) - np.log(np.abs(j + 1))) for i, j in zip(seq[1:], seq[:-1])]]\r\n seq = standardize([np.log(np.abs(i - j) + 1.) for i, j in zip(seq[1:], seq[:-1])])\r\n return seq", "def code_phase_difference(dset: \"Dataset\") -> Tuple[Dict[str, Any], Dict[str, Any]]:\n code_phase_1 = dict(val = np.zeros(dset.num_obs))\n code_phase_2 = dict(val = np.zeros(dset.num_obs))\n\n for sys in dset.unique(\"system\"):\n \n # TODO: This is not correct for single-frequency solutions. \n if len(dset.meta[\"obstypes\"][sys]) < 4:\n raise ValueError(f\"Dual-frequency code and phase observations are needed for code-phase difference.\")\n \n idx = dset.filter(system=sys)\n \n # Get observations for the 1st and 2nd frequency\n #\n # NOTE: The GNSS observation types defined in meta variable 'obstypes' has a defined order, which is determined\n # by the given observation types for each GNSS and the priority list.\n #\n code1 = dset.meta[\"obstypes\"][sys][0]\n code2 = dset.meta[\"obstypes\"][sys][1]\n phase1 = dset.meta[\"obstypes\"][sys][2]\n phase2 = dset.meta[\"obstypes\"][sys][3]\n code_phase_1.setdefault(\"sys_obs\", dict()).update({sys: [code1, phase1]})\n code_phase_2.setdefault(\"sys_obs\", dict()).update({sys: [code2, phase2]})\n\n code_phase_1[\"val\"][idx] = dset.obs[code1][idx] - dset.obs[phase1][idx]\n code_phase_2[\"val\"][idx] = dset.obs[code1][idx] - dset.obs[phase1][idx]\n\n return code_phase_1, code_phase_2", "def relativeSequence(wireSequence, initialSequence, lapNumber):\n return (wireSequence + (lapNumber * (2**32))) - initialSequence", "def logpercdiff(seq):\r\n return [np.log((np.abs(i - j) * (i + j) / 2.) + 1.) for i, j in zip(seq[1:], seq[:-1])]", "def diff(*seqs, **kwargs): # real signature unknown; restored from __doc__\n pass", "def all_diff_combinate(self, events):\n t1 = events\n t2 = self.spike_times\n m1 = numpy.tile(t1[:,numpy.newaxis] , (1,t2.size) )\n m2 = numpy.tile(t2[numpy.newaxis,:] , (t1.size,1) )\n m = m2-m1\n m = m.reshape(m.size) \n return m", "def backward_differences(T):\n\tnumOfTimes = len(T)\n\t#the number of steps in the method\n\tm = numOfTimes - 1\n\t#generate the initial differences, which\n\t#is just the standard basis.\n\tD = np.array([ [np.float64((i+1)==(numOfTimes-j)) for i in range(numOfTimes)] for j in range(numOfTimes)])\n\tdifferences = np.zeros_like(D)\n\tdifferences[0] = D[0]\n\t\n\t\n\tfor q in range(1,numOfTimes):\n\t\tfor j in range(numOfTimes - q):\n\t\t\tD[j] = first_difference([T[m-j],T[m-j-q]],[D[j],D[j+1]])\n\t\t\tdifferences[q] = D[0]\n\treturn differences", "def diff_map(imsatpath, imuavpath, w=0):\n if w == 0 or w % 2 == 0:\n print(\"use this function like below: \\n\"\n \"diff_map(imsatpath, imuavpath, w)\\n need param w of search box,\"\n \" its must be odd like 1, 3, 5, 7, 9,.etc.\")\n sys.exit(1)\n imsatarr, imuavarr = np.moveaxis(Image.img2array(imsatpath), 0, 2), np.moveaxis(Image.img2array(imuavpath), 0, 2)\n print(imsatarr.shape, imuavarr.shape)\n imsat_desc, imuav_desc = descriptor(imsatarr), descriptor(imuavarr)\n print(\"descriptors of IMUAV and IMSAT have generated successed, now compute difference maps!\")\n diffarr = np.zeros((imuavarr.shape[0], imuavarr.shape[1]), dtype='f')\n for i in range(imuav_desc.shape[0]):\n for j in range(imuav_desc.shape[1]):\n xmin, xmax = int(j-(w-1)/2), int(j+(w-1)/2+1)\n ymin, ymax = int(i-(w-1)/2), int(i+(w-1)/2+1)\n distsset = []\n for x in range(xmin, xmax):\n for y in range(ymin, ymax):\n if imuav_desc.shape[1] > x > 0 and 0 < y < imuav_desc.shape[0]:\n distsset.append(np.linalg.norm(imsat_desc[x, y] - imuav_desc[i, j])) #norm1 of vector/matrix\n diffarr[i, j] = min(distsset)\n np.save(os.path.join(Config.data, \"diffmap.npy\"), diffarr)\n return diffarr", "def calcDistanceDeviationsGivenMapping(mg1, mg2, atomMap):\n\t#assert mg1 and mg2 contain the same number of each type of atom (and obviously the same total number of atoms)\n\n\t#generate distance mappings\n\t(hetMap1, homMap1, hetMapType1, homMapType1)=mg1.getDistanceMappings()\n\t(hetMap2, homMap2, hetMapType2, homMapType2)=mg2.getDistanceMappings()\n\n\tdistDevAbs = {}\n\tdistDevRel = {}\n\t#first, go through the hetMaps\n\tfor i in hetMap1:\n\t\td1 = hetMap1[i]\n\t\td2 = hetMap2[(atomMap[i[0]],atomMap[i[1]])]\n\t\tdistDevAbs[i]=d1-d2\n\t\tdistDevRel[i]=(d1-d2)/min(d1,d2)\n\t#second, go through the homMaps\n\tfor i in homMap1:\n\t\td1 = homMap1[i]\n\t\tj0 = atomMap[i[0]]\n\t\tj1 = atomMap[i[1]]\n\t\tif j0 < j1:\n\t\t\td2 = homMap2[(j0,j1)]\n\t\telse:\n\t\t\td2 = homMap2[(j1,j0)]\n\t\tdistDevAbs[i]=d1-d2\n\t\tdistDevRel[i]=(d1-d2)/min(d1,d2)\n\n\treturn distDevAbs, distDevRel", "def CalculateTransition(ProteinSequence,AAProperty,AAPName):\r\n\t\r\n\tTProteinSequence=StringtoNum(ProteinSequence,AAProperty)\r\n\tResult={}\r\n\tNum=len(TProteinSequence)\r\n\tCTD=TProteinSequence\r\n\tResult[AAPName+'T'+'12']=round(float(CTD.count('12')+CTD.count('21'))/(Num-1),3)\r\n\tResult[AAPName+'T'+'13']=round(float(CTD.count('13')+CTD.count('31'))/(Num-1),3)\r\n\tResult[AAPName+'T'+'23']=round(float(CTD.count('23')+CTD.count('32'))/(Num-1),3)\r\n\treturn Result", "def get_difference_operator(self):\n if self.differ_cache:\n return self.differ_cache\n\n D = {tuple([0]*self.s_):1}\n\n for xi in self.Xi_:\n Dp = {}\n for v in D:\n p = tuple(vector(v) + vector(xi))\n Dp[p] = -D[v]\n\n for v in D:\n if v in Dp:\n Dp[v] += D[v]\n else:\n Dp[v] = D[v]\n D = Dp\n\n self.differ_cache = [(vector(k)-self.c_xi, D[k]) for k in D if D[k] != 0]\n return self.differ_cache", "def map_(value: float, start1: float, stop1: float, start2: float, stop2: float) -> float:\n return (value - start1) / (stop1 - start1) * (stop2 - start2) + start2", "def get_mapping( ref, alt ):\n\n cig_ops = \"MIDNSHP=X\"\n\n ref_len = len( ref )\n alt_len = len( alt )\n\n # Substitutions?\n if ref_len == alt_len:\n return 0, alt, [ [ cig_ops.find( \"M\" ), ref_len ] ]\n\n # Deletions?\n alt_in_ref_index = ref.find( alt )\n if alt_in_ref_index != -1:\n return alt_in_ref_index, ref[ alt_in_ref_index + 1: ], [ [ cig_ops.find( \"D\" ), ref_len - alt_len ] ]\n\n # Insertions?\n ref_in_alt_index = alt.find( ref )\n if ref_in_alt_index != -1:\n return ref_in_alt_index, alt[ ref_in_alt_index + 1: ], [ [ cig_ops.find( \"I\" ), alt_len - ref_len ] ]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a subsequence of S from elements m to n (Exercise 3.63)
def subseq(S, m, n): if m < 0: return subseq(S, len(S) + m, n) elif n < 0: return subseq(S, m, len(S) + n) elif m > len(S): return subseq(S, m - len(S), n) elif n > len(S): return subseq(S, m, n - len(S)) if m == n: return [] elif m > n: if n == 0: return cat(subseq(core.rest(S), m - 1, 0), [core.first(S)]) else: return subseq(core.rest(S), m - 1, n - 1) elif m < n: if m == 0: return core.prefix(core.first(S), subseq(core.rest(S), 0, n - 1)) else: return subseq(core.rest(S), m - 1, n - 1)
[ "def subseqs(s):\n if len(s)==0:\n return [[]]\n else:\n sub=subseqs(s[1:])\n return insert_into_all(s[0],sub)+sub", "def _slice_a_subsequence(self, seq):\n n = len(seq)\n if n == 1:\n return seq\n else:\n start = random_pick(n)\n length = random_pick(range(1, n))\n end = start + length\n if end < n:\n sub_seq = seq[start:end]\n else:\n sub_seq = seq[start:n] + seq[0:end - n - 1]\n\n return sub_seq", "def subs(s, count):\r\n return [s[i:(i + count)] for i in range(len(s) - count + 1)]", "def getSubstrings(a, n):\n\n # Get substrings from string\n substrings = set()\n for i in range(0, len(a) - n + 1):\n substrings.add(a[i:i + n])\n\n return substrings", "def sliced (s, n):\n result = [s[0+i:n+i] for i in range(0, len(s), n)]\n # if last sliced lenght less than n, then add zeros to last sliced until the lenght equal with n\n if len(result[-1]) < n:\n less = n-len(result[-1])\n zeros = ''\n for i in range(less):\n zeros = zeros + '0'\n result[-1] = result[-1]+zeros\n return result", "def choose_m_of_n(alist, m):\r\n\r\n if m < 1 or m > len(alist):\r\n return []\r\n elif len(alist) == m:\r\n return [alist]\r\n elif m == 1:\r\n return [[struc] for struc in alist]\r\n else:\r\n element = alist[-1]\r\n rest = alist[:-1]\r\n without_element = choose_m_of_n(rest, m)\r\n with_element = [sublist + [element] for sublist in choose_m_of_n(rest, m-1)]\r\n result = with_element + without_element\r\n return result", "def split_in_substrings(s, n):\n\n output = []\n\n # for each line in s\n for line_in_s in s:\n # split the line in substrings of size n\n for i in range(len(line_in_s) - n + 1):\n output.append(line_in_s[i:i + n])\n\n return output", "def front_times(s, n):\n length = len(s) if len(s) < 3 else 3\n return s[:length] * n", "def substrings(a, b, n):\n\n # TODO\n la = len(a)\n lb = len(b)\n sub_a = []\n sub_b = []\n sub = []\n\n for i in range(la-n+1):\n sub_a.append(a[i:i+n])\n\n for j in range(lb-n+1):\n sub_b.append(b[j:j+n])\n\n for k in sub_a:\n if k in sub_b:\n sub.append(k)\n\n sub = set(sub)\n\n return sub", "def find_subsequence( s, t ):\r\n snext = 0\r\n indices = []\r\n for i in range(len(t)):\r\n for j in range(snext,len(s)):\r\n if t[i] == s[j]:\r\n indices.append( j+1 ) # 1-based!!\r\n snext = j + 1 # start next scan on s on next character\r\n break\r\n return utils.list2string( indices )", "def subsets(S: list, k: int) -> list:\n if k>len(S) or k<0:\n raise Exception('must have 0<=k<=|S|, got k : ',k,'and |S|',len(S))\n T = subset_helper(S, k)\n assert len(T)==binomial(len(S),k)\n return T\n\n # is S has size k, then return [S]\n # ow. for every element e in S\n #find all subsets of size k with e\n #find all subsets of size k without e\n #return union of these", "def split_every(seq, n):\n return [seq[i:i+n] for i in range(0, len(seq), n)]", "def slices(n, max_len: int = None) -> iter:\n return chain(\n ((0, 0), ), # Empty word\n (\n (i, j)\n for i in range(n)\n for j in range(i + 1, min(n, i + max_len) + 1 if max_len else n + 1)\n )\n )", "def extract_sub_sequences(sequence, window_size):\n if window_size <= 0:\n return \"Window size must be a positive integer\"\n if window_size > len(sequence):\n return \"Window size is larger than sequence length\"\n result = []\n nr_windows = len(sequence) - window_size + 1\n for i in range(nr_windows):\n sub_sequence = sequence[i:i + window_size]\n result.append(sub_sequence)\n return result", "def substrings(s, minlength=30):\n maxsize = current = len(s)\n result = []\n while current >= minlength:\n result.extend([s[start:start+current] \n for start in range(maxsize-current+1)])\n # range(5) is [0,1,2,3,4]\n current -= 1\n return set(result) # set() removes duplicates", "def sublis(P, S):\n if not S or not P:\n return S\n else:\n return sublis(core.rest(P), subst(core.second(core.first(P)), core.first(core.first(P)), S))", "def take(n, seq):\n seq = iter(seq)\n return (next(seq) for i in range(n))", "def make_seq_list(n, sessions_list):\n seq_list = []\n for seq in sessions_list:\n if len(seq) >= n:\n for m in range(len(seq)-n+1):\n seq_list += [tuple(seq[m:m+n])]\n\n return seq_list", "def slide_through(seq: str,\n slen: int,\n overlap: int) -> t.Iterable[t.Tuple[str, int, int]]:\n assert slen > 0, \"sub-sequence length must large than zero.\"\n assert overlap >= 0, 'overlap length must large or equal to zero.'\n assert overlap < slen, 'overlap length must less than sub-seq length.'\n step = slen - overlap\n tlen = len(seq)\n s = 0\n while s + slen <= tlen:\n e = s + slen\n sub = seq[s:e]\n yield sub, s, e\n s += step" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Distribute left function (Exercise 3.69)
def seqdistleft(x, S): if not S: return [] else: return core.prefix(core.prefix(x, core.prefix(core.first(S), [])), seqdistleft(x, core.rest(S)))
[ "def rw_generator(N: int, x_0 = 0, p_left=0.5):\n steps = [x_0] + [ 1 if (i>p_left) else -1 for i in np.random.random(N-1)]\n return np.add.accumulate(steps)", "def _do_action_left(state):\n\n height, width = state.shape\n reward = 0\n\n for row in range(height):\n # Always the rightmost tile in the current row that was already moved\n merge_candidate = -1\n merged = np.zeros((4,), dtype=np.bool)\n\n for col in range(4):\n if state[row, col] == 0:\n continue\n\n if (merge_candidate != -1 and\n not merged[merge_candidate] and\n state[row, merge_candidate] == state[row, col]):\n # Merge tile with merge_candidate\n state[row, col] = 0\n merged[merge_candidate] = True\n state[row, merge_candidate] += 1\n reward += 2 ** state[row, merge_candidate]\n\n else:\n # Move tile to the left\n merge_candidate += 1\n if col != merge_candidate:\n state[row, merge_candidate] = state[row, col]\n state[row, col] = 0\n\n return reward", "def is_left(p0, p1, p2):\r\n # cast the answer to an int so it can be used directly from sort()\r\n return int((p1.x - p0.x)*(p2.y-p0.y) - (p2.x-p0.x)*(p1.y-p0.y))", "def _left_normal_form_perm_(self):\n n = self.parent().strands()\n delta = 0\n Delta = Permutation([n-i for i in range(n)])\n l = self.Tietze()\n if l==():\n return (0,)\n form = []\n for i in l:\n if i>0:\n form.append(Permutation((i, i+1)))\n else:\n delta = delta+1\n form = [Delta*a*Delta for a in form]\n form.append(Delta*Permutation((-i, -i+1)))\n i = j = 0\n while j<len(form):\n while i<len(form)-j-1:\n e = form[i].idescents(from_zero=False)\n s = form[i + 1].descents(from_zero=False)\n S = set(s).difference(set(e))\n while S:\n a = list(S)[0]\n form[i] = form[i] * Permutation((a, a+1))\n form[i + 1] = Permutation((a, a+1))*form[i+1]\n e = form[i].idescents(from_zero=False)\n s = form[i + 1].descents(from_zero=False)\n S = set(s).difference(set(e))\n if form[i+1].length()==0:\n form.pop(i+1)\n i = 0\n else:\n i += 1\n j += 1\n i = 0\n form = [a for a in form if a.length()]\n while form!=[] and form[0]==Delta:\n form.pop(0)\n delta = delta-1\n return tuple([-delta]+form)", "def compute_possible_left_sides():\n # 362880 entries: \n perms = list(itertools.permutations(['1','2','3','4','5','6','7','8','9']))\n\n # fixing top-left entry to 1 leaves 8! = 40320 possible left sides\n fix_1 = [x for x in perms if x[0] == '1']\n \n # Since the top-left 3x3 block can not contain 2 and 3 more\n # than once, we can further prune the possible left sides:\n exclude_23 = [x for x in fix_1 if (x[1] not in ['2', '3'] and x[2] not in ['2', '3'])]\n return exclude_23", "def multLeft(self, *args):\n return _coin.SbDPMatrix_multLeft(self, *args)", "def compute_elldash(p,N,k0,n):\n\n return ModularForms(N,k0 + n*(p-1)).sturm_bound()", "def move_left(self, p):\r\n\t i = 0\r\n\t j = 0\t\r\n\t while i < 9:\r\n\t\t while j < 10:\r\n\t\t\t p[j][i] = p[j][i+1]\t\r\n\t\t\t j = j + 1\r\n\t\t j = 0\t\t\t\r\n\t\t i = i + 1\t\r\n\t\t\r\n\t for i in range(10):\r\n\t\t p[i][9] = 0 # Put zero on the left column of probability table\r", "def redistribute(self, idx: int) -> bool:\n node = self.pointers[idx]\n if self.has_left_sibling(idx) and self.pointers[idx - 1].is_plenty():\n print('FIX BY BORROW FROM LEFT SIBLING')\n left_sibling = self.pointers[idx - 1]\n print('LEFT SIBLING BEFORE BORROW: {}'.format(left_sibling))\n if node.is_leaf():\n # max key of the left sibling, redistribute to be the min key of the node\n moving_key = left_sibling.keys.pop()\n moving_payload = left_sibling.payload.pop()\n node.keys.insert(0, moving_key)\n node.payload.insert(0, moving_payload)\n self.keys[idx - 1] = moving_key # min key from the right sub-tree\n else:\n new_key = node.get_min_key()\n moving_child = left_sibling.pointers.pop()\n left_sibling.keys.pop() # simply remove the right most key\n node.pointers.insert(0, moving_child)\n node.keys.insert(0, new_key)\n self.keys[idx - 1] = node.get_min_key() # update since node gets a new left most leaf\n\n print('LEFT SIBLING AFTER BORROW: {}'.format(left_sibling))\n print('UNDERFLOW NODE AFTER BORROW: {}'.format(self.pointers[idx]))\n return True\n\n elif self.has_right_sibling(idx) and self.pointers[idx + 1].is_plenty():\n right_sibling = self.pointers[idx + 1]\n print('FIX BY BORROW FROM RIGHT')\n print('RIGHT SIBLING BEFORE BORROW: {}'.format(right_sibling))\n if node.is_leaf():\n new_key = right_sibling.keys.pop(0) # min key of the right sibling\n node.keys.append(new_key)\n moving_payload = right_sibling.payload.pop(0)\n node.payload.append(moving_payload)\n self.keys[idx] = right_sibling.get_min_key()\n else:\n moving_child = right_sibling.pointers.pop(0)\n right_sibling.keys.pop(0)\n node.pointers.append(moving_child)\n node.keys.append(moving_child.get_min_key())\n # right_sibling.keys[0] = right_sibling.pointers[1].get_min_key()\n self.keys[idx] = right_sibling.get_min_key()\n\n print('RIGHT SIBLING AFTER BORROW: {}'.format(right_sibling))\n print('UNDERFLOW NODE AFTER BORROW: {}'.format(self.pointers[idx]))\n return True\n\n else: # redistribution fails, try merge with siblings.\n return False", "def my_insort_left(a, x, lo=0, hi=None, keyfunc=lambda v: v):\r\n x_key = keyfunc(x)\r\n\r\n if lo < 0:\r\n raise ValueError('lo must be non-negative')\r\n if hi is None:\r\n hi = len(a)\r\n while lo < hi:\r\n mid = (lo+hi)//2\r\n if keyfunc(a[mid]) < x_key: lo = mid+1\r\n else: hi = mid\r\n a.insert(lo, x)", "def getBinLefts( self ):\n return [ self.binShift + bin * self.binSize for bin, count in sorted( self.bin2count.items() ) ]", "def leftFactorize(self):\n\n\t\tnewProds=[]\n\t\tfor P in self.ProdsJoined:\n\t\t\tAlpha=Grammar.findLongestFactorOnLeft(P.Right)\n\t\t\tif Alpha!='':\n\t\t\t\tGammas=[]\n\t\t\t\tBetas=[]\n\t\t\t\tfor prod in P.Right:\n\t\t\t\t\tif Alpha in prod:\n\t\t\t\t\t\tnewTerm=prod.replace(Alpha,'',1)\n\t\t\t\t\t\tif newTerm=='':\n\t\t\t\t\t\t\tif 'ε' not in Betas:\n\t\t\t\t\t\t\t\tBetas.append('ε')\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tBetas.append(newTerm)\n\n\t\t\t\t\t\t\t\n\t\t\t\t\telse:\n\t\t\t\t\t\tGammas.append(prod)\n\t\t\t\tP.Right=[Alpha+'<'+P.Left+'`'+'>']+Gammas\n\t\t\t\tnewP=Production('<'+P.Left+'`'+'>',Betas) \n\t\t\t\tnewProds.append(newP)\n\t\tself.ProdsJoined.extend(newProds)\n\t\tfor V in newProds:\n\t\t\tself.VN.append(V.Left)\n\t\t# self.Productions.extend(newProds)\n\t\t# self.productionsJoin()\n\t\t# self.findTermAndNotTerm()", "def _bisect_left_func(func, x, lo, hi):\n\n while lo < hi:\n mid = (lo + hi) // 2\n if func(mid) < x:\n lo = mid + 1\n else:\n hi = mid\n\n return lo", "def left_normal_form(self):\n lnfp = self._left_normal_form_perm_()\n a = lnfp[0]\n l = lnfp[1:]\n n = self.strands()\n delta = Permutation([n-i for i in range(n)])\n P = self.parent()\n return tuple( [P._permutation_braid(delta) ** a] +\n [P._permutation_braid(i) for i in l] )", "def isLeft(*args):\n return _almathinternal.isLeft(*args)", "def leftWing(s, obj):\n\n lift = s.lift(obj)/2 # Two wings so divide by 2\n return lift.scale(-s.x + 1)", "def reversals(series, left=False, right=False):\n series = iter(series)\n\n x_last, x = next(series), next(series)\n d_last = x - x_last\n\n if left:\n yield x_last\n for x_next in series:\n if x_next == x:\n continue\n d_next = x_next - x\n if d_last * d_next < 0:\n yield x\n x_last, x = x, x_next\n d_last = d_next\n if right:\n yield x_next", "def smallest_multiple(N):", "def rotate_i64_left(n, k):\n mask = (1 << 64) - 1\n result = ((n << (k % 64)) & mask) | ((n & mask) >> ((64 - k) % 64))\n if result > mask:\n result -= 1 << 64\n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reverse sequence S onto sequence T
def seqreverseaux(S, T): if not S: return T else: return seqreverseaux(core.rest(S), core.prefix(core.first(S), T))
[ "def reversed(seq):\n\n l=list(seq)\n l.reverse()\n return l", "def elements_reversed(seq):\n return seq[::-1]", "def reverse(s):\n r = \"\".join(reversed(s))\n\n return r", "def sg_reverse_seq(tensor, opt):\n # default sequence dimension\n opt += tf.sg_opt(dim=1)\n seq_len = tf.not_equal(tensor, tf.zeros_like(tensor)).sg_int().sg_sum(dims=opt.dim)\n return tf.reverse_sequence(tensor, seq_len, opt.dim, name=opt.name)", "def reverse(L):\r\n return L[::-1]", "def reverse(self, s: List[str], l: int, r: int) -> None:\n while l < r:\n s[l], s[r] = s[r], s[l]\n l += 1\n r -= 1", "def reverse_rlist_iterative(s):\n \"*** YOUR CODE HERE ***\"\n newlist = rlist(first(s), empty_rlist)\n while rest(s) != empty_rlist:\n s = rest(s)\n newlist = rlist(first(s), newlist)\n return newlist", "def reverse_utterances(self, seq):\n reversed_seq = numpy.copy(seq)\n for idx in range(seq.shape[1]):\n eos_indices = numpy.where(seq[:, idx] == self.eos_sym)[0]\n prev_eos_index = -1\n for eos_index in eos_indices:\n reversed_seq[(prev_eos_index+1):eos_index, idx] = (reversed_seq[(prev_eos_index+1):eos_index, idx])[::-1]\n prev_eos_index = eos_index\n\n return reversed_seq", "def reverse(self):\n self.flips.reverse()\n for e in self.flips:\n self.permute(e, False)\n self.flips = []", "def string_reverse(s):\n if len(s) == 0:\n return s\n else:\n return string_reverse(s[1:]) + s[0]", "def reverse(self, in_place=False):\n pass", "def last_n_reversed(sequence, n):\r\n sequence = sequence[::-1]\r\n new_sequence = sequence[:n]\r\n\r\n return new_sequence", "def reverse_complement(sequence, strand_type='RNA'):\n if strand_type == 'RNA':\n sequence = convert_to_RNA(sequence)\n tempseq = sequence.replace('A', 'x')\n tempseq = tempseq.replace('U', 'A')\n tempseq = tempseq.replace('x', 'U')\n tempseq = tempseq.replace('G', 'x')\n tempseq = tempseq.replace('C', 'G')\n tempseq = tempseq.replace('x', 'C')\n sequence = tempseq[::-1]\n if strand_type == 'DNA':\n sequence = convert_to_DNA(sequence)\n tempseq = sequence.replace('A', 'x')\n tempseq = tempseq.replace('T', 'A')\n tempseq = tempseq.replace('x', 'T')\n tempseq = tempseq.replace('G', 'x')\n tempseq = tempseq.replace('C', 'G')\n tempseq = tempseq.replace('x', 'C')\n sequence = tempseq[::-1]\n return sequence", "def example_reverse(string):\n print(f'Task reverse started')\n time.sleep(5)\n print('Task slept')\n\n return string[::-1]", "def _reverse(self):\n o = self.copy()\n # Clear ok reversed flag\n o._reversed = not o._reversed\n\n if o.bits == 8:\n # No need for reversing\n return o.copy()\n\n if o.is_top:\n # A TOP is still a TOP after reversing\n si = o.copy()\n return si\n\n else:\n if not o.is_integer:\n # We really don't want to do that... but well, sometimes it just happens...\n logger.warning(\"Reversing a real strided-interval %s is bad\", self)\n\n # Reversing an integer is easy\n rounded_bits = ((o.bits + 7) // 8) * 8\n list_bytes = []\n si = None\n\n for i in range(0, rounded_bits, 8):\n b = o._unrev_extract(min(i + 7, o.bits - 1), i)\n list_bytes.append(b)\n\n for b in list_bytes:\n si = b if si is None else si.concat(b)\n si.uninitialized = self.uninitialized\n si._reversed = o._reversed\n return si", "def reverseSlice(s, size):\n if s.step > 0 or s.step is None:\n return s\n\n i = s.start\n j = s.stop\n k = s.step\n if i is None:\n i = size - 1\n elif i < 0:\n i = i % size\n if j is None:\n j = -1\n elif -size - 1 < j < 0:\n j = j % size\n\n if i < -size or j < -size - 1:\n raise RuntimeError(\"Invalid slice %s\" % repr(s))\n\n k = -k\n pk = (int((j - i + k) / k)) * k + i\n j = i + 1\n i = pk % size\n\n# if j==size:\n# j = None\n return slice(i, j, k)", "def rev(s):\n if s is not Link.empty:\n yield from rev(s.rest)\n yield s.first", "def reverse(inlist, pos, length):\n inlist = deque(inlist)\n inlist.rotate(-pos)\n original = list(inlist)\n rever = original[0:length]\n rever.reverse()\n for position in range(0, length):\n original[position] = rever[position]\n inlist = deque(original)\n inlist.rotate(pos)\n return inlist", "def reverse_streamlines(sft: StatefulTractogram, reverse_ids: np.ndarray = None):\n if reverse_ids is None:\n reverse_ids = range(len(sft.streamlines))\n\n new_streamlines = [s[::-1] if i in reverse_ids else s for i, s in\n enumerate(sft.streamlines)]\n new_data_per_point = copy.deepcopy(sft.data_per_point)\n for key in sft.data_per_point:\n new_data_per_point[key] = [d[::-1] if i in reverse_ids else d for i, d\n in enumerate(new_data_per_point[key])]\n\n new_sft = StatefulTractogram.from_sft(\n new_streamlines, sft, data_per_point=new_data_per_point,\n data_per_streamline=sft.data_per_streamline)\n\n return new_sft" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializes a new instance. An instance contains the list of genes and a mapping from gene to place in list
def __init__(self, genes, sort=True): if sort == True: self.genes = sorted(genes) else: self.genes = genes self.ind = {k: i for i, k in enumerate(self.genes)} self.rev_ind = {i: k for i, k in enumerate(self.genes)} self.n = len(genes) self.ind_size = (self.n * (self.n - 1)) // 2 self.i0_inds = [((2 * self.n - i_0 - 1) * i_0) // 2 for i_0 in range(self.n)]
[ "def __init__(self, genome):\n self.genome = []", "def __init__(self):\n self.genome = []\n self.fitness = 0", "def __init__(self, genes):\n \"\"\"Конструктор Bot - созданная Пользователем \"последовательность генов\" для отдельного бота\"\"\"\n\n self._genes = genes[:]", "def __init__(self, genotype, target):\n self.genotype = list(genotype)\n self.target = target", "def init_gene():\n \n gene_details = dict(chr = '', exons = [], gene_info = {}, id = '', is_alt_spliced = 0, name = '', source = '', start = '', stop = '', strand = '', transcripts = [])\n return gene_details", "def __init__(self, *gene_sizes, mode=\"real\", initial_genomes=\"random\", genomes=20):\n\n self.gene_sizes = gene_sizes\n self.number_of_genomes = genomes\n self.mode = mode\n\n if initial_genomes == \"random\":\n self.genomes = random_genomes(self.gene_sizes, self.number_of_genomes, self.mode)\n elif initial_genomes == \"zeros\":\n self.genomes = zero_genomes(self.gene_sizes, self.number_of_genomes, self.mode)\n elif isinstance(initial_genomes, type(lambda: 0)):\n self.genomes = initial_genomes(self.gene_sizes, self.number_of_genomes, self.mode)\n else:\n raise TypeError(\"initial_genomes must be 'zeros', 'random', or a function, not \" + str(initial_genomes))", "def __setUpGA(self):\n\t\tself.genome = G1DList.G1DList(self.numNeurons)\n\t\tself.genome.setParams(rangemin=-3.0, rangemax=3.0)\n\t\tself.genome.initializator.set(Initializators.G1DListInitializatorReal)\n\t\tself.genome.mutator.set(Mutators.G1DListMutatorRealGaussian)\n\t\tself.genome.crossover.set(Crossovers.G1DListCrossoverTwoPoint)\n\t\tself.genome.evaluator.set(self.scoreEvalFunc)\n\t\tself.ga = GSimpleGA.GSimpleGA(self.genome)\n\t\tself.ga.setMutationRate(0.05)\n\t\tself.ga.selector.set(Selectors.GRouletteWheel)\n\t\tself.ga.setElitism(False)\n\t\tself.ga.setPopulationSize(self.pandaNumber)\n\t\tself.ga.initialize()", "def __init__(self, name):\n self.name = name\n self.chromosomes = list()", "def __init__(self, chromosomes, sizes):\n sizeL = [long(x) for x in sizes]\n if len(chromosomes) != len(sizes):\n logging.error(\"list of chromosomes and lengths have to be the same length\")\n logging.error(chromosomes)\n logging.error(sizes)\n sys.exit(1)\n self.chromosomes = chromosomes\n self.__size = dict(zip(chromosomes, sizeL))\n self.__order = dict(zip(chromosomes, range(len(chromosomes))))\n self.__offsets = dict(zip(chromosomes, \n [sum(sizeL[0:i]) for i in range(len(sizeL))]))\n self.__offset_list = sorted(self.__offsets.items(), key = lambda x: x[1])", "def __init__(self, mapping=None):\n self.storage = {}\n\n if mapping is not None:\n for k, v in mapping.items():\n self.__setitem__(k, v)", "def setGenes(self, geneSets=None):\n # A consolidated master set containing all Gene objects\n self.genes = Genes.GeneSet()\n # A list of sets of genes, each set a potential cause of the reaction\n self.geneSets = []\n if geneSets is not None:\n # Make sure all the Gene objects are represented in the master set,\n # and that genes mentioned multiple times are represented by the same Gene object.\n for subSet in geneSets:\n self.geneSets.append(self.genes.recastSet(subSet))", "def init_value(self):\n genome = []\n for i in range(self.get_size()):\n gene = randint(0,1000)%2\n genome.append(gene)\n return genome", "def __init__(self):\n self._map = dict()", "def __init__(self, sex_genes=(X_CHROMOSOME, X_CHROMOSOME), mag_genes=(NMAG_CHROMOSOME, MAG_CHROMOSOME)):\n object.__init__(self)\n\n self.__sex_genes = sex_genes\n self.__mag_genes = mag_genes\n self.__age = 0\n self.__children = 0\n self.__alive = True\n\n self.__DefineGender()\n self.__DefineMageStatus()\n self.__DefineMaxAge()", "def __initialize_current_generation(self):\n self.generation = []\n for chromosome in self.starting_gen:\n self.__add_chromosome(chromosome)\n self.__trim_generation(self.trim_first)\n self.generation_number = 1", "def __init__(self):\r\n self.individual = dict()\r\n self.family = dict()", "def __init__(self, topics: List[int]):\n base_topics = np.array(sorted(set(topics)))\n topics = base_topics.copy().reshape(-1, 1)\n self.mappings_ = np.hstack([topics.copy(), topics.copy()]).tolist()", "def create_population(self):\n global maxid\n self.population= []\n #.....0th individual is the initial guess if there is\n ind= Individual(0,self.ngene,self.murate,self.func,self.args)\n genes=[]\n for ig in range(self.ngene):\n g= Gene(self.nbitlen,self.vars[ig]\n ,min=self.vranges[ig,0],max=self.vranges[ig,1])\n genes.append(g)\n ind.set_genes(genes)\n self.population.append(ind)\n #.....other individuals whose genes are randomly distributed\n for i in range(self.nindv-1):\n ind= Individual(i+1,self.ngene,self.murate,self.func,self.args)\n maxid= i+1\n genes= []\n for ig in range(self.ngene):\n g= Gene(self.nbitlen,self.vars[ig]\n ,min=self.vranges[ig,0],max=self.vranges[ig,1])\n #.....randomize by mutating with high rate\n g.mutate(0.25)\n genes.append(g)\n ind.set_genes(genes)\n self.population.append(ind)", "def __init__(self, submissions: list):\n self.submissions_generator = (Submission(submission) for submission in submissions)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Internal function, gets a location and calulate the indexes of the genes returns the genes as a tuple
def _calc_pair_genes(self, ind, return_inds=False): i_0 = bisect(self.i0_inds, ind) - 1 i_1 = ind - self.i0_inds[i_0] + i_0 + 1 if return_inds: return (i_0, i_1) return (self.get_genes_solo(i_0), self.get_genes_solo(i_1))
[ "def findGeneIndex(featureGenes, genes):\n indices = []\n missing = []\n for gene in featureGenes:\n if gene in genes:\n indices.append(genes.index(gene))\n else:\n missing.append(gene)\n return indices", "def accession_indices(geno_hdf, mac, locus_indices):\n # Pull out the genotypes of those loci for all 1135 accessions.\n selected_snps = np.array([geno_hdf['snps'][i] for i in locus_indices])\n # It is possible that the minor allele could be coded as a zero. If so, flip it to a one.\n loci_to_flip = mac[locus_indices] > 900\n selected_snps[loci_to_flip] = 1- selected_snps[loci_to_flip]\n # Index positions for accessions with the minor allele at each SNP. \n accession_indices = [np.where(i == 1)[0] for i in selected_snps]\n\n return accession_indices", "def getGenotypes(self,sample_idx=None,idx_start=None,idx_end=None,chrom=None,pos_start=None,pos_end=None,center=True,unit=True,impute_missing=False,snp_idx=None,windowsize=0):\n #position based matching?\n if (idx_start is None) and (idx_end is None) and ((pos_start is not None) & (pos_end is not None)):\n idx_start,idx_end=self.getGenoIndex(chrom=chrom,pos_start=pos_start,pos_end=pos_end,windowsize=windowsize)\n #index based matching?\n if (idx_start is not None) & (idx_end is not None):\n X = self.geno_matrix[:,idx_start:idx_end]\n elif snp_idx is not None:\n X = self.geno_matrix[:,snp_idx]\n else:\n X = self.geno_matrix[:,:]\n if sample_idx is not None:\n X=X[sample_idx]\n if impute_missing:\n X = du.imputeMissing(X,center=center,unit=unit)\n return X", "def test_mousegenes_get(self):\n pass", "def gen2ind(genotype):\r\n # For example, gen2ind([1,1,1,1,1,1,1,1]) = 255\r\n i = 0\r\n index = 0\r\n mg = len(genotype)\r\n while i < mg:\r\n index += genotype[i] * (2 ** (mg - i - 1))\r\n i += 1\r\n return int(index)", "def get_expression_info(gexp_file, process=None, delim=',', quote='\"', dump_file=None):\n if dump_file is not None and os.path.exists(dump_file):\n\tgexp, gene_to_idx, cell_line_to_idx = cPickle.load(open(dump_file))\n\treturn gexp, gene_to_idx, cell_line_to_idx\n #gene_to_values = {}\n f = open(gexp_file)\n reader = csv.reader(f, delimiter=delim, quotechar=quote)\n header = reader.next()\n #print len(header), header\n header = header[1:]\n cell_line_to_idx = dict([ (cell_line, i) for i, cell_line in enumerate(header) ])\n gene_to_idx = {}\n values_arr = []\n for i, row in enumerate(reader):\n\tgene = row[0]\n\tvalues = map(float, row[1:])\n\t#gene_to_values[gene] = values\n\tgene_to_idx[gene] = i\n\tvalues_arr.append(values)\n f.close()\n gexp = numpy.array(values_arr)\n if process is not None:\n\tif \"log2\" in process:\n\t gexp = numpy.log2(gexp)\n\tif \"z\" in process:\n\t gexp = (gexp - gexp.mean(axis=1)[:, numpy.newaxis]) / gexp.std(axis=1, ddof=1)[:, numpy.newaxis]\n\tif \"abs\" in process:\n\t gexp = numpy.abs(gexp)\n\t#print gexp.shape, gexp_norm.shape\n\t#print gexp[0,0], gexp_norm[0,0]\n\t#return gene_to_values, cell_line_to_idx\n if dump_file is not None:\n\tvalues = gexp, gene_to_idx, cell_line_to_idx\n\tcPickle.dump(values, open(dump_file, 'w')) \n return gexp, gene_to_idx, cell_line_to_idx", "def getGenoIndex(self,pos_start=None,pos_end=None,windowsize=0):\n if (pos_start is not None) & (pos_end is not None):\n assert pos_start[0]==pos_end[0], \"getGenoIndex only supports selection on a single chromosome\"\n I = self.position[\"chrom\"]==pos_start[0]\n I = I & (self.postion[\"pos\"]>=(pos_start[1]-windowsize)) & (self.position[\"pos\"]<(pos_end[1]+windowsize))\n I = sp.nonzero(I)[0]\n idx_start = I.min()\n idx_end = I.max()\n elif (chrom is not None):\n I = self.position[\"chrom\"]==chrom\n\n idx_start = I.min()\n idx_end = I.max()\n else:\n idx_start=None\n idx_end=None\n return idx_start,idx_end", "def get_first_genotype_index(self):\n return self.first_genotype_idx", "def _get_genotypes(self, gemini_variant, individual_objs):\n individuals = []\n for ind in individual_objs:\n index = ind.ind_index\n individuals.append(Genotype(\n sample_id=ind.ind_id,\n genotype=gemini_variant['gts'][index],\n case_id=ind.case_id,\n phenotype=ind.phenotype,\n ref_depth=gemini_variant['gt_ref_depths'][index],\n alt_depth=gemini_variant['gt_alt_depths'][index],\n depth=gemini_variant['gt_depths'][index],\n genotype_quality=gemini_variant['gt_quals'][index]\n ))\n\n return individuals", "def co_loc(sample,bedfile):\n s = bedfile[bedfile['sample']==sample]\n locs=[]\n parents = s['donor'].unique()\n for index,row in s.iterrows():\n locs.append([row['chr'],int(row['start']),int(row['end']),row['donor']])\n return locs,parents", "def gene_finder(self):\n\t\tseq = self.dna_seq\n\t\tstop_code = 'AAAAAAAAAATTTTTTTTTT' \n\t\tgenes = seq.split(stop_code.lower())\n\t\tgenes = [Sequence(gene) for gene in genes]\n\t\treturn(genes)", "def find_matches(samples, geno_db, unique):\n mapper = {}\n matches = {}\n for hash, offset in samples:\n mapper[hash] = offset\n for h in mapper.keys():\n for g in geno_db:\n if h in geno_db[g]:\n offset = geno_db[g][h]\n if g not in matches:\n matches[g] = [] \n matches[g].append((offset - mapper[h], offset, mapper[h])) \n diff_counter = {}\n largest = 0\n largest_count = 0\n geno_id = []\n for gid in matches:\n for tup in matches[gid]:\n diff_exact, offset, fan_time = tup\n diff = round(diff_exact/200) #round after exact matching to reference but before attempting to find consistent offsets on both strands\n if diff not in diff_counter:\n diff_counter[diff] = {}\n if gid not in diff_counter[diff]:\n diff_counter[diff][gid] = 0\n diff_counter[diff][gid] += 1\n if diff_counter[diff][gid] > largest_count:\n largest = diff\n largest_count = diff_counter[diff][gid]\n geno_id = [gid]\n elif diff_counter[diff][gid] == largest_count:\n geno_id.append(gid)\n if unique and len(geno_id) >1: \n return ([], -1, {})\n return (geno_id, largest_count, diff_counter)", "def get_genesets():\n return (\n parse_gmt_file(KEGG_GENESETS),\n parse_gmt_file(REACTOME_GENESETS),\n parse_gmt_file(WIKIPATHWAYS_GENESETS),\n )", "def buildLabelCoords( self, geo_ids):\n data = []\n row = []\n col = []\n for i, val in enumerate(geo_ids):\n gv = GEOvector(val, self.dictionary, self._conn, stopwords=self.stopwords)\n self.addVecToCoord(gv,i, data, row, col)\n return (data, row, col)", "def details(self):\n details = super().details()\n ret = query.response(self.db, \"SELECT chr,start,end FROM genes WHERE \" + self.where_sql())\n if ret.nrow() > 0: \n coords = ret.row(0)\n details['location'] = coords[0] + \":\" + str(coords[1]) + \"-\" + str(coords[2])\n #ret = query.singleton_response(self.db, \"SELECT COUNT(DISTINCT cpg) FROM cpgs WHERE \"+self.where_sql())\n ## the line above is correct but very slow so we created a table to store number of CpG sites per gene\n ret = query.singleton_response(self.db, \"SELECT nsites FROM gene_details WHERE \" + self.where_sql())\n details['CpG sites'] = ret.value()\n return details", "def __getitem__(self, index):\n return (self.grid.loc(index), self.values[index])", "def __init__(self, genes, sort=True):\n if sort == True:\n self.genes = sorted(genes)\n else:\n self.genes = genes\n\n self.ind = {k: i for i, k in enumerate(self.genes)}\n self.rev_ind = {i: k for i, k in enumerate(self.genes)}\n self.n = len(genes)\n self.ind_size = (self.n * (self.n - 1)) // 2\n self.i0_inds = [((2 * self.n - i_0 - 1) * i_0) //\n 2 for i_0 in range(self.n)]", "def indicesByPdgId(self,pdgIds,useAbs=True,indices=None):\n result = [ ]\n if type(pdgIds)==type(0):\n pdgIds_ = [ pdgIds ]\n else:\n pdgIds_ = pdgIds\n parts = self.genParts\n if indices!=None:\n parts = [ self.genParts[i] for i in indices ]\n for mp in parts:\n id = mp.particle.pdgId()\n if useAbs:\n id = abs(id)\n if id in pdgIds_:\n result.append(mp.index)\n return result", "def get_gene_index(self, query):\n if len(query) > 4 and query[:4] == 'ENSG':\n return np.nonzero(query == self.gene_ids)[0]\n else:\n return np.nonzero(query == self.gene_names)[0]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function convert the multiclass labels into binary labels as healthy and malaria.
def convert_multiclass_lbl_to_binary_lbl(multiclass_labels): binary_label = [] for tempLabel in multiclass_labels: if tempLabel == "healthy": binary_label.append("healthy") else: binary_label.append("malaria") return binary_label
[ "def binary_labels(labels, in_class, column=\"inregister\"):\n\n labels[column] = labels[column].apply(lambda paper: 1 if paper == in_class else 0)\n\n return labels", "def _convert_labels(self, y_user):\n print self._user2classif_lut\n return [self._user2classif_lut[x] for x in y_user]", "def convert_to_binary_labels(y):\n return y.isnull().map(lambda x: 0 if x else 1).values", "def label_to_onehot(labels):\n onehot_labels = []\n for label in labels:\n if label == 0:\n onehot_labels.append([1, 0])\n else:\n onehot_labels.append([0, 1])\n onehot_labels = np.array(onehot_labels)\n return onehot_labels", "def _onehot_labels(self, labels):\n labels_full = np.zeros((self.num_classes, self.num_rois))\n for idx, l in enumerate(labels):\n labels_full[int(l), idx] = 1\n return labels_full", "def convert_strings_to_one_hot(Y, classes):\n return label_binarize(Y, classes=classes)", "def convert_labels_to_binary_vector(labels):\n labels = labels.reshape((1, len(labels)))\n binary_mat = np.zeros((labels.size, labels.max()+1))\n binary_mat[np.arange(labels.size), labels] = 1\n return binary_mat", "def preprocess_labels(y, y_name=None, index=None, verbose=0):\n if y_name is None:\n y_name = \"y\"\n\n # Make sure that y is a series with correct index\n y = assure_pandas_series(y, index=index)\n\n # Warn if not binary labels\n if len(y.unique()) != 2:\n if verbose > 0:\n warnings.warn(\n f\"The labels in {y_name} contain {y.unique()} unique values. The features in probatus support\"\n f\" binary classification models, thus, the feature might not work correctly.\"\n )\n return y", "def to6classes(labels):\n res =np.zeros_like(labels, dtype=int)\n res[labels==1] = 1 #Ground\n res[labels==2] = 2 #buildings\n res[labels==3] = 3 #poles\n res[labels==4] = 3 #poles\n # res[labels==5] = 0 #trashcan\n # res[labels==6] = 0 #barriers\n res[labels==7] = 4 #Ground\n res[labels==8] = 5 #Ground\n res[labels==9] = 6 #Ground\n return res", "def string2binary(target_class_label='Active'):\n return lambda label: None if target_class_label == 'None' else 0 if target_class_label != label else 1", "def get_labels(self):\n return [\"0\",\"1\"]", "def encode_label(labels):\n y = np.zeros(len(labels))\n for i, l in np.ndenumerate(labels):\n if l == 'realDonaldTrump':\n y[i] = 1\n else:\n y[i] = 0\n return y", "def labels_to_one_hot(labels):\n new_labels = np.zeros((labels.shape[0], n_classes))\n new_labels[range(labels.shape[0]), labels] = np.ones(labels.shape)\n return new_labels", "def global_one_hot_labels(self, classes):\n all_labels = self.unique_labels()\n return [int(c in all_labels) for c in classes]", "def labels_to_one_hot(labels):\n new_labels = np.zeros((labels.shape[0], 100))\n new_labels[range(labels.shape[0]), labels] = np.ones(labels.shape)\n return new_labels", "def map_lbls_to_b(self, train_lbls):\r\n\r\n unique_lbls = self.unq_\r\n\r\n b = np.zeros((unique_lbls.shape[0], train_lbls.shape[0]))\r\n\r\n for l in range(0, train_lbls.shape[0]):\r\n for u in range(0, unique_lbls.shape[0]):\r\n if train_lbls[l] == unique_lbls[u]:\r\n b[u, l] = 1\r\n break\r\n\r\n return b", "def label_encoding(self):\n attributes_labels = np.concatenate((self.train_data.attributes.values,\n self.validation_data.attributes.values,\n self.test_data.attributes.values),\n axis=None)\n attributes_yt = self.mlb.fit_transform(attributes_labels)\n return {\n 'tr_act': torch.tensor(self.le.fit_transform(self.train_data.action.values), device=self.device),\n 'tr_att': torch.tensor(attributes_yt[0:len(self.train_data.attributes.values)], device=self.device),\n 'vd_act': torch.tensor(self.le.fit_transform(self.validation_data.action.values), device=self.device),\n 'vd_att': torch.tensor(attributes_yt[len(self.train_data.attributes.values): len(self.train_data.attributes.values) + len(\n self.validation_data.attributes.values)], device=self.device),\n 'tst_act': torch.tensor(self.le.fit_transform(self.test_data.action.values), device=self.device),\n 'tst_att': torch.tensor(attributes_yt[len(self.train_data.attributes.values) + len(self.validation_data.attributes.values):],\n device=self.device)\n }", "def convertMatsToLabel(labelsMat):\n labels = []\n labelsList = [\"entailment\", \"contradiction\", \"neutral\"]\n numSamples, _ = labelsMat.shape\n for idx in range(numSamples):\n sample = labelsMat[idx, :]\n label = labelsList[np.where(sample == 1.)[0][0]]\n labels.append(label)\n\n return labels", "def unify_labels(self, labels, labels_unique, code):\n condition = (labels == 1)\n index = np.where(condition)\n labels_unique[index] = code\n\n return labels_unique" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get current filename without path (str).
def userFriendlyCurrentFile(self): if self.currentFile: return strippedName(self.currentFile) else: return ""
[ "def current_name():\n return os.path.basename(os.getcwd())", "def current_filename(self):\n return self.dr.fileName()", "def filename(self):\n fname = self.raw_filename\n if not isinstance(fname, text_type):\n fname = fname.decode('utf8', 'ignore')\n fname = normalize('NFKD', fname)\n fname = fname.encode('ASCII', 'ignore').decode('ASCII')\n fname = os.path.basename(fname.replace('\\\\', os.path.sep))\n fname = re.sub(r'[^a-zA-Z0-9-_.\\s]', '', fname).strip()\n fname = re.sub(r'[-\\s]+', '-', fname).strip('.-')\n return fname[:255] or 'empty'", "def getCurFileName(self) -> \"char const *\":\n return _coin.SoInput_getCurFileName(self)", "def get_filename(self) -> str:\n return self._filename", "def file_name(self) -> str:\n return pulumi.get(self, \"file_name\")", "def file_name(self):\n ret = self._get_attr(\"fileName\")\n return ret", "def filename(self):\n if self.type == 'literal':\n return self._message.filename\n return ''", "def filename_core (apath):\n if (apath is None): # sanity check\n return ''\n return os.path.basename(os.path.splitext(apath)[0])", "def workflow_filename():\n stacks = inspect.stack()\n frame = inspect.stack()[len(stacks) - 1]\n full_path = frame[0].f_code.co_filename\n filename, _ = os.path.splitext(os.path.basename(full_path))\n filename = argo_safe_name(filename)\n return filename", "def get_filename(self):\n return self.source.get_filename()", "def pathName(self, filename: str) -> str:\n x = self\n theDir = x.baseDirName()\n return g.finalize_join(theDir, filename) if theDir else ''", "def getFilename(self, frameNum):\n\t\treturn self.format % (self.dirname, self.frameName, frameNum)", "def getFilename(self) -> \"char const *\":\n return _coin.ScXMLDocument_getFilename(self)", "def get_filename(self):\r\n self.filename = self.history_lines[0].split('=')[1].rstrip()", "def filename(self):\n return f\"{self.sha}{self.extension}\"", "def GetFileName(self) -> \"char const *\":\n return _ITKIOImageBaseBasePython.itkImageIOBase_GetFileName(self)", "def _get_filename(self) -> \"std::string\" :\n return _core.FileDialog__get_filename(self)", "def get_default_filename(cls) -> str:\n return cls.__open('default_filename')", "def cctFileName(self):\n p = os.path.basename(self.cctFilePath())\n return p" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create |QAction| that is mapped via methodName to call.
def createMappedAction(self, icon, text, parent, shortcut, methodName): if icon is not None: action = QtGui.QAction(icon, text, parent, shortcut=shortcut, triggered=self._actionMapper.map) else: action = QtGui.QAction(text, parent, shortcut=shortcut, triggered=self._actionMapper.map) self._actionMapper.setMapping(action, methodName) return action
[ "def create_action(self, action_name, action_config):\n action_type = action_config['type']\n clz = Actions.get_action_class(action_type)\n action = clz()\n action.set_info(self.name, action_name, self.config)\n return action", "def addAction(self, *_args) -> Union[None, QAction]:\n if len(_args) == 1:\n super().addAction(_args[0])\n else:\n a = QAction(_args[1], _args[0], self)\n super().addAction(a)\n popMode = QToolButton.InstantPopup if len(_args) < 3 else _args[2]\n if self.mItems:\n button: SARibbonToolButton = self.mItems[-1].widget\n button.setPopupMode(popMode)\n return a", "def createAction(self, name, slot=None, shortcut=None, image=None, enabled=True):\n #logging.debug('Application: createAction() - ' + name)\n if image:\n image0 = QPixmap()\n image0.load(\":/resources/\" + image + \".svg\")\n action = QAction(QIcon(image0), name, self._window)\n else:\n action = QAction(name, self._window)\n action.setEnabled(enabled)\n if slot:\n self.connect(action, SIGNAL(\"triggered()\"), slot)\n if shortcut:\n if isinstance(shortcut, list):\n action.setShortcuts(shortcut)\n else:\n action.setShortcut(shortcut)\n return action", "def new_action(self, name):\n if not self.sub_actor:\n self.sub_actor = Actor(self)\n return self.sub_actor.new_action(name)", "def menuButton(workbench, base, cmd, btn, actions):\n menu = QtGui.QMenu(mw)\n menuList.append(menu)\n try:\n uid = cmd.split(\"CP_Menu_\", 1)[1]\n except IndexError:\n uid = \"No_UID\"\n menu.setObjectName(uid)\n g = cpc.findGroup(base, uid)\n if g and uid != \"No_UID\":\n commands = g.GetString(\"commands\")\n if commands:\n commands = commands.split(\",\")\n else:\n commands = []\n for cmd in commands:\n if cmd.startswith(\"CP_Menu\") or cmd.startswith(\"CP_Spacer\"):\n pass\n elif cmd == \"CP_Separator\":\n menu.addSeparator()\n elif cmd in actions:\n menu.addAction(actions[cmd])\n else:\n a = QtGui.QAction(menu)\n a.setEnabled(False)\n a.setText(cmd)\n a.setIcon(QtGui.QIcon(\":/icons/freecad\"))\n a.setToolTip(\"Command \" + cmd + \" is currently not available\")\n menu.addAction(a)\n # Set default action\n try:\n btn.setDefaultAction(menu.actions()[0])\n menu.setDefaultAction(menu.actions()[0])\n except IndexError:\n pass\n\n default = g.GetString(\"Default\")\n for a in menu.actions():\n if a.objectName() == default:\n btn.setDefaultAction(a)\n menu.setDefaultAction(a)\n\n # Add expand action\n data = \",\".join([workbench, uid, str(1)])\n\n e = QtGui.QAction(menu)\n e.setText(\"Expand\")\n e.setIcon(QtGui.QIcon(path + \"CommandPanelExpand.svg\"))\n e.setToolTip(\"Expand menu\")\n e.setData(data)\n\n menu.addSeparator()\n menu.addAction(e)\n\n mapperExpandCollapse.setMapping(e, data)\n e.triggered.connect(mapperExpandCollapse.map)\n\n mapperShow.setMapping(menu, data)\n menu.aboutToShow.connect(mapperShow.map)\n\n return menu", "def add_action(self, key, label, callback, tip=None, icon=None, \n shortcut=None, userdata=None, selection_callback=None): \n #TODO: Update doc to reflect final decision on userdata\n if icon is None:\n ac = QAction(tr(label), self)\n else:\n if not isinstance(icon, QIcon):\n if isinstance(icon, basestring) and not os.path.isfile(icon):\n sugg = os.path.dirname(__file__) + '/../images/' + icon\n if os.path.isfile(sugg):\n icon = sugg\n icon = QIcon(icon)\n ac = QAction(icon, tr(label), self)\n if shortcut is not None:\n ac.setShortcuts(shortcut)\n if tip is not None:\n ac.setStatusTip(tr(tip))\n if userdata is not None:\n ac.setData(userdata)\n if userdata is None:\n self.connect(ac, SIGNAL('triggered()'), callback)\n else:\n def callback_udwrap():\n callback(userdata)\n self.connect(ac, SIGNAL('triggered()'), callback_udwrap)\n self.actions[key] = ac\n if selection_callback is not None:\n self._action_selection_cbs[key] = selection_callback\n ac.setEnabled(False)\n return ac", "def moreCreateActions(self):\n # self.addLibrariesAction = QtHelper.createAction(self, \"&Add Libraries\", self.addLibraries, \n # icon = QIcon(\":/libraries.png\"), tip = 'Create a new set of library' )\n self.addLibraryAction = QtHelper.createAction(self, \"&Add Library\", self.addLibrary, \n icon = QIcon(\":/libraries.png\"), tip = 'Create new library' )\n # self.checkLibrariesAction = QtHelper.createAction(self, \"&Check\", self.checkSyntaxLibraries, \n # icon = QIcon(\":/libraries-check.png\"), tip = 'Check syntax of all libraries' )\n # self.setAsDefaultAction = QtHelper.createAction(self, \"&Set as Extra\", self.setLibraryAsDefault, \n # icon = None, tip = 'Set library as Extra' )\n # self.setAsGenericAction = QtHelper.createAction(self, \"&Set as Generic\", self.setLibraryAsGeneric, \n # icon = None, tip = 'Set library as Generic' )", "def call(self, action_name, container, instances=None, map_name=None, **kwargs):\n method_name = '{0}_actions'.format(action_name)\n action_method = getattr(self.get_policy(), method_name)\n if callable(action_method):\n return action_method(map_name or self._default_map, container, instances=instances, **kwargs)\n raise ValueError(\"The selected policy does not provide a method '{0}' for generating actions.\".format(method_name))", "def create_action(**args):\n kind = args['kind']\n if kind == NO_ACTION:\n return Action()\n elif kind == SLICE_CHANGED:\n return SliceChangedAction(args['val'])\n\n elif kind == ARCH_CP_CHANGED:\n return ArchCpChangedAction(args['curr'], args['prev'], args['index'])\n elif kind == ARCH_CP_ADDED:\n return ArchCpAddedAction(args['cp'], args['index'])\n elif kind == ARCH_CP_REMOVED:\n return ArchCpRemovedAction(args['index'])\n\n elif kind == LEFT_CANAL_CP_CHANGED:\n return LeftCanalCpChangedAction(args['curr'], args['prev'], args['index'])\n elif kind == RIGHT_CANAL_CP_CHANGED:\n return RightCanalCpChangedAction(args['curr'], args['prev'], args['index'])\n elif kind == LEFT_CANAL_CP_ADDED:\n return LeftCanalCpAddedAction(args['cp'], args['index'])\n elif kind == RIGHT_CANAL_CP_ADDED:\n return RightCanalCpAddedAction(args['cp'], args['index'])\n elif kind == LEFT_CANAL_CP_REMOVED:\n return LeftCanalCpRemovedAction(args['index'])\n elif kind == RIGHT_CANAL_CP_REMOVED:\n return RightCanalCpRemovedAction(args['index'])\n\n elif kind == SIDE_VOLUME_CP_ADDED:\n return SideVolumeCpAddedAction(args['cp'], args['index'], args['pos'])\n elif kind == SIDE_VOLUME_CP_REMOVED:\n return SideVolumeCpRemovedAction(args['index'], args['pos'])\n elif kind == SIDE_VOLUME_CP_CHANGED:\n return SideVolumeCpChangedAction(args['curr'], args['prev'], args['index'], args['pos'])\n elif kind == SIDE_VOLUME_SPLINE_EXTRACTED:\n return SideVolumeSplineExtractedAction(args['pos'], args['from_pos'])\n elif kind == SIDE_VOLUME_SPLINE_RESET:\n return SideVolumeSplineResetAction(args['pos'])\n\n elif kind == TILTED_PLANES_ANNOTATION:\n return TiltedPlanesAnnotationAction()\n elif kind == DEFAULT_PLANES_ANNOTATION:\n return DefaultPlanesAnnotationAction()\n\n else:\n raise ValueError(\"kind not recognized\")", "def createMenu(self):\r\n self.menuFile = self.menuBar().addMenu(\"&File\")\r\n self.menuFile.addAction(self.actionQuit)\r\n self.menuFile.addAction(self.actionImportFile)\r\n self.menuFile.addAction(self.actionExportFile)\r\n\r\n self.menuContacts = self.menuBar().addMenu(\"&Contact\")\r\n self.menuContacts.addAction(self.actionNewContact)\r\n self.menuContacts.addAction(self.actionModContact)\r\n self.menuContacts.addAction(self.actionDelContact)\r\n self.menuContacts.addAction(self.actionDisplay)\r\n\r\n self.menuHelp = self.menuBar().addMenu(\"&?\")\r\n self.menuHelp.addAction(self.actionAbout)", "def create_default_actions(self):\n self.plugin_manager.create_actions()", "def add_action(self, action):\r\n self._plugins_menu.addAction(action)", "def add_action(self, name, func, \n duration=0.0, delay=0.0, update=False, loop=False, cleanup=None):\n\n self.action_list[name] = {\n 'func': func,\n 'duration': float(duration),\n 'delay': float(delay),\n 'update': bool(update),\n 'loop': bool(loop),\n 'cleanup': cleanup,\n 'elapsed': 0.0,\n 'started': False\n }", "def from_callable(func):\n return Command(\n name=Command.extract_name(func),\n usage=Command.extract_usage(func),\n brief=Command.extract_brief(func),\n description=Command.extract_description(func),\n )", "def _create_command_menu(self):\n f1 = urwid.Button('Jump', on_press=self.button_show_jump)\n f2 = urwid.Button('Sell', on_press=self.button_show_sell)\n f3 = urwid.Button('Buy', on_press=self.button_show_buy)\n f4 = urwid.Button('Upgrade', on_press=self.button_show_equip)\n f5 = urwid.Button('Galaxy', on_press=self.button_show_galaxy)\n f6 = urwid.Button('Locals', on_press=self.button_show_locals)\n f7 = urwid.Button('System', on_press=self.button_show_planet_info)\n f8 = urwid.Button('Market', on_press=self.button_show_market)\n f9 = urwid.Button('Status', on_press=self.button_show_status)\n f0 = urwid.Button('Cargo', on_press=self.button_show_cargo)\n buttons = [f1, f2, f3, f4, f5, f6, f7, f8, f9, f0]\n buttons = (urwid.AttrMap(b, 'button') for b in buttons)\n menu = urwid.Columns(buttons)\n menu.focus_position = 8\n return menu", "def _create_action_features(self, action):\n return self.actions[action]", "def test_QToolBar_functions(qtbot):\n toolbar = QtWidgets.QToolBar()\n toolbar.addAction(\"QtPy with a shortcut\", QtGui.QKeySequence.UnknownKey)\n toolbar.addAction(\n QtGui.QIcon(),\n \"QtPy with an icon and a shortcut\",\n QtGui.QKeySequence.UnknownKey,\n )", "def create_action_chains(self):\n return ActionChains(self._selenium_web_driver())", "def action(func):\n assert_not_multiple_decorators(func, \"actions\")\n DecoratorBasedIdentifier.decorated_items[\"actions\"].add(func)\n return func", "def _context_menu_make(self, pos):\n menu = QtGui.QMenu(self)\n\n self.cut_action = menu.addAction('Cut', self.cut)\n self.cut_action.setEnabled(self.can_cut())\n self.cut_action.setShortcut(QtGui.QKeySequence.Cut)\n\n self.copy_action = menu.addAction('Copy', self.copy)\n self.copy_action.setEnabled(self.can_copy())\n self.copy_action.setShortcut(QtGui.QKeySequence.Copy)\n\n self.paste_action = menu.addAction('Paste', self.paste)\n self.paste_action.setEnabled(self.can_paste())\n self.paste_action.setShortcut(QtGui.QKeySequence.Paste)\n\n anchor = self._control.anchorAt(pos)\n if anchor:\n menu.addSeparator()\n self.copy_link_action = menu.addAction(\n 'Copy Link Address', lambda: self.copy_anchor(anchor=anchor))\n self.open_link_action = menu.addAction(\n 'Open Link', lambda: self.open_anchor(anchor=anchor))\n\n menu.addSeparator()\n menu.addAction(self.select_all_action)\n\n menu.addSeparator()\n menu.addAction(self.export_action)\n menu.addAction(self.print_action)\n\n return menu" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update the Window menu.
def updateWindowMenu(self): self._windowMenu.clear() self._windowMenu.addAction(self._closeAct) self._windowMenu.addAction(self._closeAllAct) self._windowMenu.addSeparator() self._windowMenu.addAction(self._tileAct) self._windowMenu.addAction(self._cascadeAct) self._windowMenu.addSeparator() self._windowMenu.addAction(self._nextAct) self._windowMenu.addAction(self._previousAct) self._windowMenu.addAction(self._separatorAct) windows = self._mdiArea.subWindowList() self._separatorAct.setVisible(len(windows) != 0) for i, window in enumerate(windows): child = window.widget() text = "%d %s" % (i + 1, child.userFriendlyCurrentFile) if i < 9: text = '&' + text action = self._windowMenu.addAction(text) action.setCheckable(True) action.setChecked(child == self.activeMdiChild) action.triggered.connect(self._windowMapper.map) self._windowMapper.setMapping(action, window)
[ "def updateMenuAndWindowTitle(self):\n self.updateMenu()\n self.updateWindowTitle()", "def updateMenu(self):\n #logging.debug('Application: updateMenu()')\n if self.mainWindow().startupScreen():\n self.updateStartupScreen()\n # Recent files\n num_recent_files = min(len(self._recentFiles), self.MAX_VISIBLE_RECENT_FILES)\n for i in range(0, num_recent_files):\n filename = self._recentFiles[i]\n self._recentFilesMenuActions[i].setText(os.path.basename(filename))\n self._recentFilesMenuActions[i].setToolTip(filename)\n self._recentFilesMenuActions[i].setStatusTip(filename)\n self._recentFilesMenuActions[i].setData(QVariant(filename))\n self._recentFilesMenuActions[i].setVisible(True)\n \n for i in range(num_recent_files, self.MAX_VISIBLE_RECENT_FILES):\n self._recentFilesMenuActions[i].setVisible(False)\n \n if num_recent_files == 0:\n self._fileMenuItems['clearRecentFilesAction'].setEnabled(False)\n self._fileMenuItems['clearMissingRecentFilesAction'].setEnabled(False)\n else:\n self._fileMenuItems['clearRecentFilesAction'].setEnabled(True)\n self._fileMenuItems['clearMissingRecentFilesAction'].setEnabled(True)\n \n # Enabled / disable menu entries depending on number of open files\n at_least_one_flag = False\n at_least_two_flag = False\n if len(self.tabControllers()) > 1:\n at_least_one_flag = True\n at_least_two_flag = True\n elif len(self.tabControllers()) > 0:\n at_least_one_flag = True\n \n self._fileMenuItems['saveFileAction'].setEnabled(at_least_one_flag)\n self._fileMenuItems['saveFileAsAction'].setEnabled(at_least_one_flag)\n self._fileMenuItems['reloadFileAction'].setEnabled(at_least_one_flag)\n self._fileMenuItems['closeFileAction'].setEnabled(at_least_one_flag)\n \n self._fileMenuItems['saveAllFilesAction'].setEnabled(at_least_two_flag)\n self._fileMenuItems['closeAllAction'].setEnabled(at_least_two_flag)\n \n try:\n if at_least_one_flag:\n if not self.currentTabController().isEditable():\n self._fileMenuItems['saveFileAction'].setEnabled(False)\n self._fileMenuItems['saveFileAsAction'].setEnabled(False)\n if not self.currentTabController().isModified():\n self._fileMenuItems['saveFileAction'].setEnabled(False)\n \n # Copy / Cut / Paste\n copy_paste_enabled_flag = at_least_one_flag and self.currentTabController().isCopyPasteEnabled()\n self._editMenuItems['cutAction'].setEnabled(copy_paste_enabled_flag)\n self._editMenuItems['copyAction'].setEnabled(copy_paste_enabled_flag)\n self._editMenuItems['pasteAction'].setEnabled(copy_paste_enabled_flag)\n \n self._editMenuItems['selectAllAction'].setVisible(self.currentTabController().allowSelectAll())\n \n self._editMenuItems['findAction'].setEnabled(at_least_one_flag and self.currentTabController().isFindEnabled())\n \n # Undo / Redo\n undo_supported_flag = at_least_one_flag and self.currentTabController().supportsUndo()\n self._editMenuItems[\"undoAction\"].setEnabled(undo_supported_flag)\n self._editMenuItems[\"undoAction\"].setVisible(undo_supported_flag)\n self._editMenuItems[\"redoAction\"].setEnabled(undo_supported_flag)\n self._editMenuItems[\"redoAction\"].setVisible(undo_supported_flag)\n self.showPluginToolBar(self._undoToolBar, undo_supported_flag)\n \n if undo_supported_flag:\n undo_events = self.currentTabController().undoEvents()\n num_undo_events = min(len(undo_events), self.MAX_VISIBLE_UNDO_EVENTS)\n self._editMenuItems[\"undoAction\"].setEnabled(num_undo_events > 0)\n if num_undo_events > 1:\n self._editMenuItems[\"undoAction\"].setMenu(self._undoActionsMenu)\n else:\n self._editMenuItems[\"undoAction\"].setMenu(None)\n for i in range(0, num_undo_events):\n undo_event = undo_events[num_undo_events - i - 1] # iterate backwards\n self._undoMenuActions[i].setText(undo_event.LABEL)\n self._undoMenuActions[i].setToolTip(undo_event.description())\n self._undoMenuActions[i].setStatusTip(undo_event.description())\n self._undoMenuActions[i].setData(QVariant(i+1))\n self._undoMenuActions[i].setVisible(True)\n for i in range(num_undo_events, self.MAX_VISIBLE_UNDO_EVENTS):\n self._undoMenuActions[i].setVisible(False)\n \n redo_events = self.currentTabController().redoEvents()\n num_redo_events = min(len(redo_events), self.MAX_VISIBLE_UNDO_EVENTS)\n self._editMenuItems[\"redoAction\"].setEnabled(num_redo_events > 0)\n if num_redo_events > 1:\n self._editMenuItems[\"redoAction\"].setMenu(self._redoActionsMenu)\n else:\n self._editMenuItems[\"redoAction\"].setMenu(None)\n for i in range(0, num_redo_events):\n redo_event = redo_events[num_redo_events - i - 1] # iterate backwards\n self._redoMenuActions[i].setText(redo_event.LABEL)\n self._redoMenuActions[i].setToolTip(redo_event.description())\n self._redoMenuActions[i].setStatusTip(redo_event.description())\n self._redoMenuActions[i].setData(QVariant(i+1))\n self._redoMenuActions[i].setVisible(True)\n for i in range(num_redo_events, self.MAX_VISIBLE_UNDO_EVENTS):\n self._redoMenuActions[i].setVisible(False)\n \n except NoCurrentTabControllerException:\n pass", "def update(self):\n self.UI.update()", "def refresh_menu(self):\n if self.plugins:\n if self.current_view == DUMMY_NAME:\n new_view = self.get_views()[0]\n self.drop_menu.set_menu(new_view, *self.get_views())\n self.update_options_view(new_view)\n else:\n self.drop_menu.set_menu(self.current_view, *self.get_views())\n self.update_options_view(self.current_view)", "def menu(self):\n self.parent.switch_screen(\"Menu\")", "def onMenuShow(n):\n global currentMenu\n currentMenu = n", "def update(self):\n self.tk_gui.update()", "def display_menu(self) -> None:\n self.quit_button.display()\n self.restart_button.display()\n self.undo_move_button.display()\n self.save_log_button.display()", "def _setupMenues(self):\n\n\n self._menues[\"file\"] = qt.QPopupMenu(self)\n self.menuBar().insertItem('&File',self._menues[\"file\"])\n\n\n\n\n\n\n\n self._actions[\"exit-faraday\"].addTo(self._menues[\"file\"]);\n self.menuBar().insertSeparator()\n\n\n self._menues[\"shell\"] = qt.QPopupMenu(self)\n self.menuBar().insertItem('&Shell',self._menues[\"shell\"])\n self._actions[\"new_shell\"].addTo(self._menues[\"shell\"]);\n self._actions[\"close_shell\"].addTo(self._menues[\"shell\"]);\n self._actions[\"maximize-shell\"].addTo(self._menues[\"shell\"]);\n\n self.menuBar().insertSeparator()\n\n self._menues[\"edit\"] = qt.QPopupMenu(self)\n self.menuBar().insertItem('&Edit',self._menues[\"edit\"])\n self._menues[\"edit\"].insertItem('&Copy', self._copy)\n self._menues[\"edit\"].insertItem('&Paste', self._paste)\n\n self._actions[\"repo-config\"].addTo(self._menues[\"edit\"]);\n\n self.menuBar().insertSeparator()\n\n\n self._menues[\"workspace\"] = qt.QPopupMenu(self)\n self.menuBar().insertItem('&Workspace',self._menues[\"workspace\"])\n # self._actions[\"open-workspace\"].addTo(self._menues[\"workspace\"])\n self._actions[\"create-workspace\"].addTo(self._menues[\"workspace\"])\n\n\n\n self.menuBar().insertSeparator()\n\n\n self._menues[\"tools\"] = qt.QPopupMenu(self)\n self.menuBar().insertItem('&Tools',self._menues[\"tools\"])\n self._actions[\"visualization\"].addTo(self._menues[\"tools\"]);\n\n self._actions[\"plugin\"].addTo(self._menues[\"tools\"]);\n self._actions[\"screenshot\"].addTo(self._menues[\"tools\"]);\n\n self.menuBar().insertSeparator()\n\n\n self._menues[\"view\"] = qt.QPopupMenu(self)\n self.menuBar().insertItem('&View',self._menues[\"view\"])\n self._actions[\"toggle-hosttree\"].addTo(self._menues[\"view\"]);\n self._actions[\"toggle-logconsole\"].addTo(self._menues[\"view\"]);\n self._actions[\"maximize-shell\"].addTo(self._menues[\"view\"]);\n\n self.menuBar().insertSeparator()\n\n\n self._menues[\"help\"] = qt.QPopupMenu(self)\n self.menuBar().insertItem('&Help',self._menues[\"help\"])\n self._menues[\"help\"].insertItem('&About', self._showAboutDialog)\n self._actions[\"documentation\"].addTo(self._menues[\"help\"]);", "def _initMenu(self):\n #--- Menu Project ---#\n self.mi_newProject.setShortcut(\"Ctrl+Shift+N\")\n self.mi_newProject.triggered.connect(self.on_miNewProject)\n self.mi_loadProject.setShortcut(\"Ctrl+Shift+L\")\n self.mi_loadProject.triggered.connect(self.on_miLoadProject)\n #--- Menu Settings ---#\n self.mi_toolSettings.setShortcut(\"Ctrl+Shift+T\")\n self.mi_toolSettings.triggered.connect(self.on_miToolSettings)\n self.mi_projectSettings.setShortcut(\"Ctrl+Shift+P\")\n self.mi_projectSettings.triggered.connect(self.on_miProjectSettings)\n #--- Menu Help ---#\n #- Log Level\n for level in self.log.levels:\n menuItem = self.m_logLevel.addAction(level)\n menuItem.setCheckable(True)\n menuItem.triggered.connect(partial(self.on_miLogLevel, level))\n self.on_miLogLevel(self.log.level)\n #- Style\n for style in pQt.Style().styles:\n menuItem = self.m_style.addAction(style)\n menuItem.setCheckable(True)\n menuItem.triggered.connect(partial(self.on_miStyle, style))\n self.on_miStyle('darkGrey')", "def _add_menus(self):\r\n self.menu_bar.Append(self.mfile, \"&File\")\r\n self.menu_bar.Append(self.medit, \"&Edit\")\r\n self.menu_bar.Append(self.mview, \"&View\")", "def refresh_current_menu():\n debug_msg(\"Refreshing current menu.\")\n current_menu = get_current_menu()\n if current_menu:\n current_menu.refresh()\n return True\n else:\n return False", "def changeToMenu(self, menuname):\n\n if (menuname == \"MainMenu\"):\n self.db.setCurrentMenu(\"MainMenu\")\n self.centralWidget.setCurrentWidget(self.mainMenuWidget)\n\n elif (menuname == \"SelectAudioMenu\"):\n self.db.setCurrentMenu(\"SelectAudioMenu\")\n self.selectAudioMenuWidget = SelectAudioMenu(self)\n self.centralWidget.addWidget(self.selectAudioMenuWidget)\n self.centralWidget.setCurrentWidget(self.selectAudioMenuWidget)\n\n elif (menuname == \"PlayAudioMenu\"):\n self.db.setCurrentMenu(\"PlayAudioMenu\")\n self.playAudioMenuWidget = PlayAudioMenu(self)\n #Observer pattern register\n self.audioController.register(self.playAudioMenuWidget)\n self.centralWidget.addWidget(self.playAudioMenuWidget)\n self.centralWidget.setCurrentWidget(self.playAudioMenuWidget)\n\n elif (menuname == \"PlayRadioMenu\"):\n self.db.setCurrentMenu(\"PlayRadioMenu\")\n self.playRadioMenuWidget = PlayRadioMenu(self)\n # Observer pattern register\n self.audioController.register(self.playRadioMenuWidget)\n self.centralWidget.addWidget(self.playRadioMenuWidget)\n self.centralWidget.setCurrentWidget(self.playRadioMenuWidget)", "def openMenu(self):\n root = tk.Tk()\n menu = Menu(self, master=root)\n menu.mainloop()", "def menu_bar(self):\n\n mv_logger.debug('MainView.menubar')\n\n color = [\"red\", \"green\", \"blue\", \"yellow\", \"cyan\", \"magenta\", \"white\", \"black\"]\n fonts = ['times 8', 'times 10', 'times 12', 'times 14', 'times 16', 'times 18', \n 'times 20', 'times 24', 'times 26', 'times 28', 'times 36', 'times 48']\n\n self.menubar = tk.Menu(self)\n self.dropdown_menu = tk.Menu(self.menubar, tearoff=0)\n self.font_color = tk.Menu(self.menubar)\n self.font_menu = tk.Menu(self.menubar)\n\n for c in color:\n self.dropdown_menu.add_command(label=c, command=lambda c=c: self.change_window('bg', c))\n self.font_color.add_command(label=c, command=lambda c=c: self.change_window('fg', c))\n\n\n for f in fonts:\n self.font_menu.add_command(label=f, command=lambda f=f: self.change_window('font', f))\n\n self.menubar.add_cascade(label='bg color', menu=self.dropdown_menu)\n self.menubar.add_cascade(label='Font size', menu=self.font_menu)\n self.menubar.add_cascade(label='Font color', menu=self.font_color)\n\n self.master.config(menu=self.menubar)", "def setMenu(self, menu):\n self.__menu = menu\n self.update()", "def _add_menu(self):\n self.menu_bar.set_menu()\n self.menu_bar.add_menu_action(\"Add\", self._show_create_library_dialog)\n self.menu_bar.add_menu_action(\"Exit\", self.close)", "def update(self):\n logging.info(\"Updating main window\")\n # uuid / save version\n self.ui.uuid_label.setText(self.player.get_uuid())\n self.ui.ver_label.setText(self.player.get_header())\n # name\n self.ui.name.setText(self.player.get_name())\n # race\n self.ui.race.setCurrentText(self.player.get_race(pretty=True))\n # pixels\n try:\n self.ui.pixels.setValue(self.player.get_pixels())\n except TypeError:\n logging.exception(\"Unable to set pixels widget\")\n # description\n self.ui.description.setPlainText(self.player.get_description())\n # gender\n getattr(self.ui, self.player.get_gender()).toggle()\n # game mode\n game_mode = self.player.get_game_mode()\n try:\n self.ui.game_mode.setCurrentText(self.assets.player().mode_types[game_mode])\n except KeyError:\n logging.exception(\"No game mode set on player\")\n\n # stats\n self.update_stat(\"health\")\n self.update_stat(\"energy\")\n\n # quests\n # TODO: re-enable when quests are supported again\n # can_edit_quests = \"quests\" in self.player.entity\n can_edit_quests = False\n self.ui.quests_button.setEnabled(can_edit_quests)\n\n # ship\n can_edit_ship = (\"shipUpgrades\" in self.player.entity and\n \"aiState\" in self.player.entity)\n self.ui.ship_button.setEnabled(can_edit_ship)\n\n # items\n total = 0\n progress = QProgressDialog(\"Updating item slots...\",\n None, 0, 11, self.window)\n\n progress.setWindowTitle(\"Updating...\")\n progress.setWindowModality(QtCore.Qt.ApplicationModal)\n progress.forceShow()\n progress.setValue(total)\n\n # equipment\n equip_bags = \"head\", \"chest\", \"legs\", \"back\"\n for bag in equip_bags:\n logging.debug(\"Updating %s\", bag)\n items = []\n for x in getattr(self.player, \"get_\" + bag)():\n if x is not None:\n items.append(ItemWidget(x[\"content\"], self.assets))\n else:\n items.append(ItemWidget(None, self.assets))\n\n getattr(self.ui, bag).setItem(0, 0, items[0])\n getattr(self.ui, bag).setItem(0, 1, items[1])\n total += 1\n progress.setValue(total)\n\n for bag in \"wieldable\", \"main_bag\", \"tile_bag\", \"object_bag\", \"action_bar\", \"essentials\", \"mouse\":\n self.update_bag(bag)\n total += 1\n progress.setValue(total)\n\n self.update_player_preview()", "def _add_menu_items(self):\r\n self.mfile.AppendItem(self.mf_close)\r\n self.mfile.AppendItem(self.mf_exit)\r\n\r\n self.medit.AppendItem(self.me_redraw)\r\n self.medit.AppendItem(self.me_pref)\r\n self.medit.AppendSeparator()\r\n self.medit.AppendItem(self.me_run)\r\n\r\n self.mview.AppendItem(self.mv_zoomfit)\r\n self.mview.AppendSeparator()\r\n\r\n self.mopts.AppendItem(self.mo_limits)\r\n self.mopts.AppendItem(self.mo_emails)", "def _fillFileMenu(self):\n logging.debug('Application: _fillFileMenu()')\n self._fileMenuItems = {}\n if not self._window.fileMenu().isEmpty():\n self._window.fileMenu().clear()\n \n # New\n newFileActions = []\n for plugin in self._plugins:\n newFileActions += plugin.getNewFileActions()\n \n if len(newFileActions) == 1:\n newFileActions[0].setShortcut('Ctrl+N')\n \n self._window.fileMenu().addActions(newFileActions) \n \n # Open\n openFileAction = self.createAction('&Open File', self.openFileDialog, 'Ctrl+O', \"fileopen\")\n self._window.fileMenu().addAction(openFileAction)\n self._window.fileToolBar().addAction(openFileAction)\n\n # Reload\n self._fileMenuItems['reloadFileAction'] = self.createAction('&Reload File', self.reloadFile, ['Ctrl+R', 'F5'], \"reload\")\n self._window.fileMenu().addAction(self._fileMenuItems['reloadFileAction'])\n #self._window.fileToolBar().addAction(self._fileMenuItems['reloadFileAction'])\n \n # Recent files\n if not hasattr(self, 'recentFilesMenu'):\n self._recentFilesMenu = QMenu('&Recent Files', self._window)\n self._recentFilesMenuActions = []\n for i in range(0, self.MAX_VISIBLE_RECENT_FILES):\n action = self.createAction(\"recent file \" + str(i), self.openRecentFileSlot)\n action.setVisible(False)\n self._recentFilesMenu.addAction(action) \n self._recentFilesMenuActions.append(action)\n self._recentFilesMenu.addSeparator()\n self._fileMenuItems['clearMissingRecentFilesAction'] = self.createAction(\"Clear missing files\", self.clearMissingRecentFiles)\n self._recentFilesMenu.addAction(self._fileMenuItems['clearMissingRecentFilesAction'])\n self._fileMenuItems['clearRecentFilesAction'] = self.createAction(\"Clear list\", self.clearRecentFiles)\n self._recentFilesMenu.addAction(self._fileMenuItems['clearRecentFilesAction'])\n \n self._window.fileMenu().addMenu(self._recentFilesMenu)\n\n self._window.fileMenu().addSeparator()\n \n # Close\n self._fileMenuItems['closeFileAction'] = self.createAction('&Close', self.closeFile, 'Ctrl+W', \"closefile\")\n self._window.fileMenu().addAction(self._fileMenuItems['closeFileAction'])\n \n # Close all\n self._fileMenuItems['closeAllAction'] = self.createAction('Close All', self.closeAllFiles, 'Ctrl+Shift+W', \"closefileall\") \n self._window.fileMenu().addAction(self._fileMenuItems['closeAllAction'])\n \n self._window.fileMenu().addSeparator()\n \n # Save\n self._fileMenuItems['saveFileAction'] = self.createAction('&Save', self.saveFile, 'Ctrl+S', \"filesave\") \n self._window.fileMenu().addAction(self._fileMenuItems['saveFileAction'])\n self._window.fileToolBar().addAction(self._fileMenuItems['saveFileAction'])\n \n # Save as\n self._fileMenuItems['saveFileAsAction'] = self.createAction('Save As...', self.saveFileAsDialog, 'Ctrl+Shift+S', image=\"filesaveas\") \n self._window.fileMenu().addAction(self._fileMenuItems['saveFileAsAction'])\n \n # Save all\n self._fileMenuItems['saveAllFilesAction'] = self.createAction('Save &All', self.saveAllFiles, \"Ctrl+Alt+S\", \"filesaveall\") \n self._window.fileMenu().addAction(self._fileMenuItems['saveAllFilesAction'])\n \n self._window.fileMenu().addSeparator()\n \n #editPreferencesAction = self.createAction('Preferences',self.editPreferences)\n #self._window.fileMenu().addAction(editPreferencesAction)\n # Exit\n exit = self.createAction('&Exit', self.exit, \"Ctrl+Q\", \"exit\") \n self._window.fileMenu().addAction(exit)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create status bar label.
def createStatusBarLabel(self, stretch=0): label = QtGui.QLabel() label.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Sunken) label.setLineWidth(2) self.statusBar().addWidget(label, stretch) return label
[ "def createStatusBar(self):\n # intialize status bar\n self.statusBar().showMessage('')\n self.status_mouse_pos = QLabel('')\n self.statusBar().addPermanentWidget(self.status_mouse_pos)\n # connect the scene pos changed to a function formatting self.status_mouse_pos\n self.nodeEditor.view.scenePosChanged.connect(self.onScenePosChanged)", "def _createStatusBar(self):\n self.statusbar = self.statusBar()\n self.statusbar.showMessage(\"Ready\", 3000)", "def createStatusBar(self):\n self.myStatusBar = QtWidgets.QStatusBar()\n\n rangestring = 'database data ranges from '\n rangestring += QtCore.QDate(self.appStatus.databaseRange_timestring[0]).toString(QtCore.Qt.DefaultLocaleShortDate)\n rangestring += ' to '\n rangestring += QtCore.QDate(self.appStatus.databaseRange_timestring[1]).toString(QtCore.Qt.DefaultLocaleShortDate)\n databaserangeinfo = QtWidgets.QLabel(rangestring)\n self.myStatusBar.addPermanentWidget(databaserangeinfo)\n\n self.setStatusBar(self.myStatusBar)\n\n # TODO show progress: percentage labeled data", "def status_bar(self):\n\n self.statusbar = self.CreateStatusBar()\n # Two sections of the status bar.\n # 0 | Updates and status messages.\n # 1 | Current open file name.\n self.statusbar.SetFieldsCount(2)\n # Ratio: 2 parts first section, 1 part second section, for size.\n self.statusbar.SetStatusWidths([-2, -1])\n self.statusbar.SetStatusText('Welcome to tmpNote.', 0)\n self.statusbar.SetStatusText('No open file.', 1)\n self.statusbar.Show()", "def statusbar(i,N,**kwargs):\n import time\n StartTime = kwargs.get(\"StartTime\",False)\n Title = kwargs.get(\"Title\",'')\n\n assert type(i)==int, \"i must be an int\"\n assert type(N)==int, \"N must be an int\"\n assert N>i, \"N must be greater than i\"\n assert N>0, \"N must be a positive integer\"\n assert i>=0, \"i must not be negative (can be zero)\"\n assert type(Title) == str, \"Title should be a string\"\n assert len(Title) <= 25, \"Title should be less than 25 characters\"\n if Title != '': Title = ' '*(25-len(Title)) + Title + ': '\n statusbar = Title +'[' + '\\u25a0'*int((i+1)/(N/50)) + '\\u25a1'*(50-int((i+1)/(N/50))) + '] '\n if StartTime != False:\n print(statusbar + '{0:1.1f}'.format((i+1)/N*100) + '% complete, ' + '{0:1.1f}'.format(time.time() - StartTime) + 'sec \\r', end='')\n else:\n print(statusbar + '{0:1.1f}'.format((i+1)/N*100) + '% complete \\r',end = '')", "def _make_label(self):\n label_text = \"{}: \".format(self.options_file.name)\n label = Label(self, text=label_text, justify=LEFT)\n label.grid(row=self.row, column=0, sticky=EW)", "def create_label(master=None, text=None):\n\treturn tk.Label(master=master, text=text, fg='#212121', bg='#CFD8DC')", "def createStatusBarIcon(self):\n icon = E5ClickableLabel()\n icon.setPixmap(\n UI.PixmapCache.getPixmap(\"tabManager.png\").scaled(16, 16))\n icon.setToolTip(self.tr(\"Show Tab Manager\"))\n icon.clicked.connect(lambda: self.raiseTabManager(icon))\n \n return icon", "def createStatusSection(self, master):\r\n self.statusFrame = ctl.frame(master, tk.X, 0, 0)\r\n self.statusFrame.pack(fill=tk.X, side=tk.BOTTOM, anchor=tk.S)\r\n # Add label into the layout\r\n self.style.layout('text.Horizontal.TProgressbar',\r\n [('Horizontal.Progressbar.trough',\r\n {\r\n 'children': [('Horizontal.Progressbar.pbar',\r\n {'side': 'left', 'sticky': 's'})],\r\n 'sticky': 'swe'\r\n }),\r\n ('Horizontal.Progressbar.label', {'sticky': 'we'})])\r\n self.style.configure('text.Horizontal.TProgressbar', font=ctl.FONT_REGULAR)\r\n self.progressBar = tk.ttk.Progressbar(master=self.statusFrame,\r\n orient=tk.HORIZONTAL,\r\n style='text.Horizontal.TProgressbar',\r\n length=101,\r\n mode='determinate',\r\n value=0,\r\n maximum=101)\r\n self.progressBar.pack(fill=tk.X)\r\n self.updateStatus('Open an image using the magnifying button and click Play')", "def statusbar(i,N,**kwargs):\n\timport time\n\tfrom scipy import interpolate\n\timport numpy as np\n\tStartTime = kwargs.get(\"StartTime\",False)\n\tTitle = kwargs.get(\"Title\",'')\n\tglobal time_array\n\tglobal TimeLeft\n\tassert type(i)==int, \"i must be an int\"\n\tassert type(N)==int, \"N must be an int\"\n\tassert N>i, \"N must be greater than i\"\n\tassert N>0, \"N must be a positive integer\"\n\tassert i>=0, \"i must not be negative (can be zero)\"\n\tassert type(Title) == str, \"Title should be a string\"\n\tassert len(Title) <= 22, \"Title should be less than 25 characters\"\n\tif Title != '' : Title = ' '*(22-len(Title)) + Title + ' : '\n\tstatusbar = Title +'[' + '\\u25a0'*int((i+1)/(N/50)) + '\\u25a1'*(50-int((i+1)/(N/50))) + '] '\n\tTimeBreak = abs\n\tif StartTime != False:\n\t\tif i==0:\n\t\t\ttime_array = []\n\t\t\tTimeLeft = '--'\n\t\telif i==int(0.02*N):\n\t\t\ttime_array.append(time.time()-StartTime)\n\t\t\tTimeLeft = '{0:1.1f}'.format(time_array[-1]*(N/(i+1)))\n\t\telif i%int(0.02*N)==0:\n\t\t\ttime_array.append(time.time()-StartTime)\n\t\t\tTimeLeft = '{0:1.1f}'.format(float(interpolate.interp1d(np.arange(len(time_array)),time_array,fill_value='extrapolate')(49))-time_array[-1])\n\t\tprint(statusbar + '{0:1.1f}'.format((i+1)/N*100) + '% complete, ' + '{0:1.1f}'.format(time.time() - StartTime) \\\n\t\t\t+ 'sec, (est. ' + TimeLeft,' sec left)\t\t\\r', end='')\n\telse:\n\t\tprint(statusbar + '{0:1.1f}'.format((i+1)/N*100) + '% complete \\r',end = '')", "def status_label(incident):\n opts = {Incident.OPEN: 'warning', Incident.RESOLVED: 'success'}\n return mark_safe(\n '<span class=\"label label-%s\"> %s </span>'\n % (opts[incident.status], incident.get_status_display().upper())\n )", "def OnStatusBarUpdate(self, message):\n #print message\n self.statusBar_main.SetStatusText(message.data)", "def show_message(self, msg):\n self.statusbar.SetLabel(msg)", "def _createRightLabel(self):\n self.label_R = Label(self.frame_in_text_R, text=self.current_seq_len, bg=\"#f5feff\")\n self.label_R.pack(padx=60, side=RIGHT)\n return self.label_R", "def label(x,y,w,h,text,fontAlign=\"left\"):\n ob = EdmObject(\"Static Text\")\n ob.setDimensions(w,h)\n ob.setPosition(x,y)\n ob[\"font\"]=quoteString(\"arial-medium-r-10.0\")\n ob[\"fgColor\"]=ob.Colour[\"Black\"]\n ob[\"useDisplayBg\"] = True\n ob[\"value\"] = quoteListString(text) \n ob[\"fontAlign\"] = quoteString(fontAlign) \n return ob", "def _set_status_message(self, value, fg=\"Black\"):\n self.__log.call(value, fg=fg)\n self._status_label.config(text=value)\n _styled(self._status_label, foreground=fg)", "def configureWelcomeLabel(self, parent):\n welcome_label = Label(parent, text = \"Welcome to the Herbarium!\", bg = 'orchid')\n welcome_label.config(font = (\"georgia\", 20, \"italic bold\"))\n welcome_label.pack()\n return welcome_label", "def updateStatus(self, text):\r\n self.style.configure('text.Horizontal.TProgressbar', text=' ' + text)\r\n self.progressTextFormat = ' ' + text + ' {0}%'\r\n print(text)", "def _update_status_bar(self, message):\n self.window().status_bar = message", "def update_label():\n \n # add code here to update the label_var variable (which is displayed in our label)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Toggle synchronized subwindow panning.
def toggleSynchPan(self): if self._synchPanAct.isChecked(): self.synchPan(self.activeMdiChild)
[ "def toggle_pan(self, pan_off):\n if pan_off:\n self.pan_image = False\n self.view.toolbar.ToggleTool(self.view.toolbar_ids['Pan Image'], False)\n self.view.canvas.SetCursor(wx.StockCursor(wx.CURSOR_DEFAULT))\n self.view.SetCursor(wx.StockCursor(wx.CURSOR_DEFAULT))\n else:\n self.pan_image = True", "def toggleSingleBeamPlot(self):\n if self.sb_dock.isVisible(): self.sb_dock.hide()\n else: self.sb_dock.show()", "def toggle_zoom(self):\r\n #TODO! zooming\r\n logging.debug('toggle \"single shot\" zoom')\r\n #aktiviraj zoomiranje\r\n self.zoomSelector.set_active(True)\r\n #ako postoji span selector, disable radi konfilikta sa ljevim klikom\r\n if self.spanSelector != None:\r\n self.spanSelector.visible = False", "def toggleMultiBeamPlot(self):\n if self.mb_dock.isVisible(): self.mb_dock.hide()\n else: self.mb_dock.show()", "def toggle_zoom(self, zoom_off):\n if zoom_off:\n self.zoom = False\n self.view.toolbar.ToggleTool(self.view.toolbar_ids['Zoom In'], False)\n self.view.canvas.SetCursor(wx.StockCursor(wx.CURSOR_DEFAULT))\n self.view.SetCursor(wx.StockCursor(wx.CURSOR_DEFAULT))\n self.view.toggle_selector.set_active(False)\n else:\n self.zoom = True", "def toggle_snap_to_grid():\r\n pass", "def change_pan_direction(self):\n self.neck_pan_delta *= -1", "def on_pan_image_menu(self, event):\n if not self.pan_image:\n self.view.toolbar.ToggleTool(self.view.toolbar_ids['Pan Image'], True)\n else:\n self.view.toolbar.ToggleTool(self.view.toolbar_ids['Pan Image'], False)\n self.on_pan_image(event)", "def toggle_snap_to_pivot():\r\n pass", "def toggleAxis(origin=bool, view=bool):\n pass", "def _setSubmode(self, mid):\n self._current_submode = mode = self._modes[mid]\n mode.qa.setChecked(True)\n # hide submodes if any\n for mm in self._all_submodes:\n mm.qa.setShortcuts([])\n # set F4 shortcut to next submode\n if len(self._valid_submodes) > 1:\n for i, mm in enumerate(self._valid_submodes):\n if mm is mode:\n self._valid_submodes[(i + 1) % len(self._valid_submodes)].qa.setShortcut(Qt.Key_F4)\n break\n self.setMouseMode.emit(mode)", "def toggle_scroll(self):\n if self.scroll == 'Man Scroll':\n self.scroll = 'Auto Scroll'\n sys.stdout.autoscroll = False\n else:\n self.scroll = 'Man Scroll'\n sys.stdout.autoscroll = True \n\n self.b_scroll.config(text=self.scroll)", "def ChangeGestureMode(self,parameter):\r\n if (self.gestureButton.GetActive() == True):\r\n self.ada.Notify('gestureModeStarted',True)\r\n self.gestureButton.SetLabel('Deactivate')\r\n else:\r\n self.ada.Notify('gestureModeStarted',False)\r\n self.gestureButton.SetLabel('Activate')", "def unzoom(self):\n\n self.button_zoom.setText(\"zoom\")\n self.zooming = False\n self.select_zooming = False\n self.zoom_point = QPoint()\n\n self.button_zoom.clicked.connect(self.zoom_image)\n\n self.load_current_frame()\n self.update()", "def pan_gesture(self, dx, dy):\n return False", "def toggle_zoom_to_selection(self):\n selected = self.scene().selectedItems()\n if selected and 'follow_selection' != self.zoom_mode:\n # Show the selection\n self.zoom_mode = 'follow_selection'\n self.zoom_to_items(selected)\n elif 'whole_scene' != self.zoom_mode:\n # Either no selection and/or currently in 'fixed' or\n # 'follow_selection' - show the whole image\n self.zoom_home()\n else:\n # Apply a mild fixed zoom\n self.zoom_mode = 'fixed'\n self.new_relative_zoom(4.0)", "def do_pan_view(self, dx, dy):\n auto = self.autoReplot()\n self.setAutoReplot(False)\n axes_to_update = self.get_axes_to_update(dx, dy)\n axis_ids_vertical = (self.get_axis_id(\"left\"), self.get_axis_id(\"right\"))\n\n for (x1, x0, _start, _width), axis_id in axes_to_update:\n lbound, hbound = self.get_axis_limits(axis_id)\n i_lbound = self.transform(axis_id, lbound)\n i_hbound = self.transform(axis_id, hbound)\n delta = x1 - x0\n vmin = self.invTransform(axis_id, i_lbound - delta)\n vmax = self.invTransform(axis_id, i_hbound - delta)\n # patch for not zooming into \"negative space\" ;) :\n if axis_id in axis_ids_vertical:\n vmin = 0\n if vmax < 0:\n vmax = -vmax\n self.set_axis_limits(axis_id, vmin, vmax)\n\n self.setAutoReplot(auto)\n # the signal MUST be emitted after replot, otherwise\n # we receiver won't see the new bounds (don't know why?)\n self.replot()\n self.emit(SIG_PLOT_AXIS_CHANGED, self)", "def toggle_minimize(self) -> None:", "def toggle_control_panel(self):\n\n control_panel = self.parent.sim_tab.control_panel\n hbox = self.parent.sim_tab.hbox\n\n if self.cp.IsChecked():\n hbox.Show(control_panel)\n hbox.Layout()\n else:\n hbox.Hide(control_panel)\n hbox.Layout()", "def toggle_fullscreen(self) -> None:" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Toggle synchronized subwindow zooming.
def toggleSynchZoom(self): if self._synchZoomAct.isChecked(): self.synchZoom(self.activeMdiChild)
[ "def toggle_zoom(self):\r\n #TODO! zooming\r\n logging.debug('toggle \"single shot\" zoom')\r\n #aktiviraj zoomiranje\r\n self.zoomSelector.set_active(True)\r\n #ako postoji span selector, disable radi konfilikta sa ljevim klikom\r\n if self.spanSelector != None:\r\n self.spanSelector.visible = False", "def toggle_zoom(self, zoom_off):\n if zoom_off:\n self.zoom = False\n self.view.toolbar.ToggleTool(self.view.toolbar_ids['Zoom In'], False)\n self.view.canvas.SetCursor(wx.StockCursor(wx.CURSOR_DEFAULT))\n self.view.SetCursor(wx.StockCursor(wx.CURSOR_DEFAULT))\n self.view.toggle_selector.set_active(False)\n else:\n self.zoom = True", "def zoom_image(self):\n\n self.select_zooming = True\n self.button_zoom.setText(\"Unzoom\")\n self.button_zoom.clicked.connect(self.unzoom)", "def unzoom(self):\n\n self.button_zoom.setText(\"zoom\")\n self.zooming = False\n self.select_zooming = False\n self.zoom_point = QPoint()\n\n self.button_zoom.clicked.connect(self.zoom_image)\n\n self.load_current_frame()\n self.update()", "def toggle_zoom_to_selection(self):\n selected = self.scene().selectedItems()\n if selected and 'follow_selection' != self.zoom_mode:\n # Show the selection\n self.zoom_mode = 'follow_selection'\n self.zoom_to_items(selected)\n elif 'whole_scene' != self.zoom_mode:\n # Either no selection and/or currently in 'fixed' or\n # 'follow_selection' - show the whole image\n self.zoom_home()\n else:\n # Apply a mild fixed zoom\n self.zoom_mode = 'fixed'\n self.new_relative_zoom(4.0)", "def zoom_out_action(self) -> None:\n self.grview.scale(0.8, 0.8)", "def zoom(self, mode):\n\n if mode == \"out\":\n self.scale -= 0.1\n elif mode == \"in\":\n self.scale += 0.1\n else:\n self.scale = 1\n\n self.scale = round(self.scale, 1)\n self.update_window()", "def zoomIn(self):\n interval_size_removed = (self.mainCursor.max - self.mainCursor.min) / self.ZOOM_STEP\n\n # update the new visible interval\n if self.mainCursor.max - interval_size_removed > self.mainCursor.min + interval_size_removed:\n self.mainCursor.max -= interval_size_removed\n self.mainCursor.min += interval_size_removed\n\n self.graph()", "def zoomTo(self):\n if self.isZooming():\n # If we are already in the process of zooming we don't want to\n # initiate another zoom.\n return\n elif self._zoomMouseOver is None:\n # The mouse pointer is not over any zoomable.\n return\n else:\n self._zoomToNodePath(self._zoomMouseOver.np)\n self.focus = self._zoomMouseOver\n messager.send('zooming to znode',self._zoomMouseOver)", "def toggleSingleBeamPlot(self):\n if self.sb_dock.isVisible(): self.sb_dock.hide()\n else: self.sb_dock.show()", "def can_zoom(self): \r\n return False", "def set_zoom(self,zoom):\n self.image.set_zoom(zoom)", "def updateZoomRegion(self, oscilogram_update=True):\n # to avoid multiple recursive calls at this method because\n # the signal raised by the zoom region widgets of osc and spec widgets\n if self.zoom_update_in_progress:\n return\n\n # avoid multiples recursion calls when update the zoom regions in widgets\n self.zoom_update_in_progress = True\n\n osc_min_x, osc_max_x = self.axesOscilogram.gui_user_tool.zoomRegion.getRegion()\n spec_min_x, spec_max_x = self.axesSpecgram.gui_user_tool.zoomRegion.getRegion()\n\n # translation of spectrograms coords into oscilogram coords\n spec_min_x, spec_max_x = self.from_spec_to_osc(spec_min_x), self.from_spec_to_osc(spec_max_x)\n\n min_x_spec = self.from_osc_to_spec(osc_max_x)\n max_x_spec = self.from_osc_to_spec(osc_min_x)\n\n if abs(osc_max_x - spec_max_x) > 1 or abs(osc_min_x - spec_min_x) > 1:\n if oscilogram_update and (min_x_spec != spec_min_x or max_x_spec != spec_max_x):\n self.axesSpecgram.gui_user_tool.zoomRegion.setRegion([min_x_spec, max_x_spec])\n self.signalIntervalSelected.emit(osc_min_x, osc_max_x)\n\n elif not oscilogram_update and (spec_min_x != osc_min_x or spec_max_x != osc_max_x):\n self.axesOscilogram.gui_user_tool.zoomRegion.setRegion([spec_min_x, spec_max_x])\n self.signalIntervalSelected.emit(spec_min_x, spec_max_x)\n\n self.zoom_update_in_progress = False", "def toggleMultiBeamPlot(self):\n if self.mb_dock.isVisible(): self.mb_dock.hide()\n else: self.mb_dock.show()", "def zoomOut(self):\n interval_size_added = max(1, (self.mainCursor.max - self.mainCursor.min) / self.ZOOM_STEP)\n\n # update the max interval limit\n if (self.mainCursor.max + interval_size_added) < self.signal.length-1:\n self.mainCursor.max += interval_size_added\n else:\n self.mainCursor.max = self.signal.length-1\n\n # update the min interval limit\n if self.mainCursor.min - interval_size_added >= 0:\n self.mainCursor.min -= interval_size_added\n else:\n self.mainCursor.min = 0\n\n self.graph()", "def updateZoomRegionsLimits(self):\n if self.selectedTool != Tools.ZoomTool:\n return\n\n min_limit, max_limit = self.mainCursor.min, self.mainCursor.max\n # set the limits of the zoom regions to the length of the signal\n self.axesOscilogram.gui_user_tool.zoomRegion.setBounds((min_limit, max_limit))\n\n rgn = self.axesOscilogram.gui_user_tool.zoomRegion.getRegion()\n # if the zoom region is the complete interval of visualization\n # clear the zoom tool region\n if rgn == (min_limit, max_limit):\n self.axesOscilogram.gui_user_tool.zoomRegion.setRegion((min_limit, min_limit))\n\n # do not update the spectrogram cursors directly because the oscilogram\n # zoom region setBounds raise the signal of changed if have to and then\n # the spec zoom region would be updated by the connections made\n self.updateSpecZoomRegion()", "def reset_zoom_and_center(self):\n self._send_to_ztv('reset-zoom-and-center')", "def zoom_in(self, x, y):\n\n pass", "def zoom_home(self):\n debug_print('BoxesView.zoom_home')\n self.zoom_mode = 'whole_scene'\n self.fitInView(self.scene().sceneRect(), Qt.KeepAspectRatio)", "def _autozoom_in(self, lvl, p1lat, p1lon, p2lat, p2lon):\n if ( ( self._visible_marker(p1lat, p1lon)\n and self._visible_marker(p2lat, p2lon) )\n and lvl < 18 ):\n lvl += 1\n self.osm.set_zoom(lvl)\n GLib.timeout_add(int(50), self._autozoom_in, lvl,\n p1lat, p1lon, p2lat, p2lon)\n else:\n GLib.timeout_add(int(50), self._autozoom_out, lvl,\n p1lat, p1lon, p2lat, p2lon)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Activate current subwindow's System Menu.
def activateSubwindowSystemMenu(self): activeSubWindow = self._mdiArea.activeSubWindow() if activeSubWindow: activeSubWindow.showSystemMenu()
[ "def menu(self):\n self.parent.switch_screen(\"Menu\")", "def openMenu(self):\n root = tk.Tk()\n menu = Menu(self, master=root)\n menu.mainloop()", "def show():\n\tif checkOpen()==0:\n\t\tnotify(\"File explorer not open. Say \\\"start\\\" to open a new file explorer or \\\"exit\\\" to end this session.\")\n\t\tprint \"File explorer not open\"\n\t\treturn #window not open\n\tsubprocess.call([\"xdotool\", \"windowactivate\", str(use[0])])", "def win_activate(title, text=u''):\r\n if not isinstance(title, basestring):\r\n _audll.AU3_WinActivateByHandle(title)\r\n else:\r\n _audll.AU3_WinActivate(title, text)", "def updateMenuAndWindowTitle(self):\n self.updateMenu()\n self.updateWindowTitle()", "def initVellsContextMenu (self):\n # skip if no main window\n if not self._mainwin:\n return;\n self.log_switch_set = False\n if self._menu is None:\n self._menu = QMenu(self._mainwin);\n# self.connect(self._menu,Qt.SIGNAL(\"activated(int)\"),self.update_vells_display);\n self._menu.aboutToShow.connect(self.addVellsMenu)\n self.add_basic_menu_items()", "def home(self):\n self.window.show_view(Menu())", "def accessoriesMenu():\n pref = QtGui.QAction(mw)\n pref.setText(\"Command panel\")\n pref.setObjectName(\"CommandPanel\")\n pref.triggered.connect(onPreferences)\n try:\n import AccessoriesMenu\n AccessoriesMenu.addItem(\"CommandPanel\")\n except ImportError:\n a = mw.findChild(QtGui.QAction, \"AccessoriesMenu\")\n if a:\n a.menu().addAction(pref)\n else:\n mb = mw.menuBar()\n action = QtGui.QAction(mw)\n action.setObjectName(\"AccessoriesMenu\")\n action.setIconText(\"Accessories\")\n menu = QtGui.QMenu()\n action.setMenu(menu)\n menu.addAction(pref)\n\n def addMenu():\n \"\"\"Add accessories menu to the menu bar.\"\"\"\n mb.addAction(action)\n action.setVisible(True)\n\n addMenu()\n mw.workbenchActivated.connect(addMenu)", "def click_menu(self):\n pass", "def open_admin_side_menu(self):\n self.click_on_element_by_css(adpl.ADMIN_SIDE_NAVIGATION_MENU)", "def user32_SetMenu(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hWnd\", \"hMenu\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def onMenuShow(n):\n global currentMenu\n currentMenu = n", "def change_focus(window):\n xdotool('windowactivate', window)", "def open(self):\n sublime.active_window().show_quick_panel(self.items, self.select, sublime.MONOSPACE_FONT)", "def attach_sysutil_menu(menu_bar,screen_def): \n utilities_menu = Menu(menu_bar) \n menu_bar.add_cascade(label=\"Utilities\", menu=utilities_menu) \n\n #--------------------------------------------------------------\n # Create the Tablespace dropdown menu \n #\n tablespaces_menu = Menu(utilities_menu)\n utilities_menu.add_cascade(label=\"Tablespaces\" ,menu = tablespaces_menu)\n # Add menu items to the Tablespaces menu \n tablespaces_menu.add_command(label=\"Dba_Tablespaces\",\n command=lambda :\n display_window(screens.get_screen_def('DBA_Tablespaces')))\n attach_tblspc_util_menu(tablespaces_menu, screen_def)\n #--------------------------------------------------------------\n # Create the DML locks dropdown menu \n #\n DML_locks_menu = Menu(utilities_menu)\n utilities_menu.add_cascade(label=\"Locks\" , menu = DML_locks_menu)\n DML_locks_menu.add_command(label=\"DML Locks\",\n command=lambda :\n display_window(screens.get_screen_def('DML_Locks')))\n DML_locks_menu.add_command(label=\"Blocking Locks\",\n command=lambda :\n display_window(screens.get_screen_def('BlockingLocks')))\n \n # Add the DBA Registry selection to the Utilities menu\n utilities_menu.add_command(label=\"DBA Registry\",\n command=lambda :\n display_window(screens.get_screen_def('DBA_Registry')))\n\n #--------------------------------------------------------------\n # Create the Events dropdown menu \n #\n events_menu = Menu(utilities_menu)\n utilities_menu.add_cascade(label=\"Events\" , menu = events_menu)\n events_menu.add_command(label=\"All System Events\",\n command=lambda :\n display_window(screens.get_screen_def('SysEvents'))) \n events_menu.add_command(label=\"System Events Percentages\",\n command=lambda :\n display_window(screens.get_screen_def('SysEventsPercentages')))\n \n #--------------------------------------------------------------\n # Create the Logins dropdown menu \n #\n logins_menu = Menu(utilities_menu)\n utilities_menu.add_cascade(label=\"Logins\" , menu =logins_menu) \n logins_menu.add_command(label=\"Failed Logins\",\n command=lambda :\n display_window(screens.get_screen_def('FailedLogins'))) \n logins_menu.add_command(label=\"Invalid Logins\",\n command=lambda :\n display_window(screens.get_screen_def('InvalidLogins')))\n \n #--------------------------------------------------------------\n # Create the Alert Log dropdown menu \n # \n alert_log_menu = Menu(utilities_menu)\n utilities_menu.add_cascade(label=\"Alert Log\" , menu =alert_log_menu)\n # The first parameter passed to the display_alert_log function is the\n # screen_def['name'] of either the alert messages or alert errors screen.\n alert_log_menu.add_command(label=\"Messages\",\n command=lambda :\n display_alert_log('AlertLogMsgs',screen_def)) \n alert_log_menu.add_command(label=\"Errors\",\n command=lambda :\n display_alert_log('AlertLogErrors',screen_def))", "def mode_start(self, **kwargs):\n self.add_mode_event_handler(\"show_mainmenu\", self.show_menu)", "def return_to_main_menu(self):\n self.manager.return_to_main_menu()", "def _install_menu():\n from ..tools import (\n creator,\n publish,\n workfiles,\n loader,\n sceneinventory\n )\n\n # Create menu\n menubar = nuke.menu(\"Nuke\")\n menu = menubar.addMenu(api.Session[\"AVALON_LABEL\"])\n\n label = \"{0}, {1}\".format(\n api.Session[\"AVALON_ASSET\"], api.Session[\"AVALON_TASK\"]\n )\n context_action = menu.addCommand(label)\n context_action.setEnabled(False)\n\n menu.addSeparator()\n menu.addCommand(\"Create...\",\n lambda: creator.show(parent=get_main_window()))\n menu.addCommand(\"Load...\",\n lambda: loader.show(parent=get_main_window(),\n use_context=True))\n menu.addCommand(\"Publish...\",\n lambda: publish.show(parent=get_main_window()))\n menu.addCommand(\"Manage...\",\n lambda: sceneinventory.show(parent=get_main_window()))\n\n menu.addSeparator()\n menu.addCommand(\"Work Files...\",\n lambda: workfiles.show(\n os.environ[\"AVALON_WORKDIR\"],\n parent=get_main_window())\n )\n\n menu.addSeparator()\n menu.addCommand(\"Reset Frame Range\", command.reset_frame_range)\n menu.addCommand(\"Reset Resolution\", command.reset_resolution)\n\n # add reload pipeline only in debug mode\n if bool(os.getenv(\"NUKE_DEBUG\")):\n menu.addSeparator()\n menu.addCommand(\"Reload Pipeline\", reload_pipeline)", "def attach_tblspc_util_menu(menu_base, screen_def): \n menu_base.add_command(label=\"Free Space\",\n command=lambda :\n display_window(screens.get_screen_def('FreeSpace')))", "def init_sub_menu2():\n global process, process_conn\n if process2 is not None:\n process = process2\n p = get_active_window()\n if p is not None:\n process_conn = p\n elif current_menu == \"card configuration\":\n process_conn = Application().connect(path = process)\n else:\n log.debug(\"init_sub_menu2 not defined for %s menu.\" % current_menu)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Open a recent file.
def openRecentFile(self, filename): self.loadFile(filename)
[ "def open_recent_file(self):\n action = self.sender()\n if action:\n self.load_file(action.data())", "def openRecentFileSlot(self):\n filename = self.sender().data().toString()\n logging.debug('Application: openRecentFileSlot() - ' + filename)\n self.openFile(filename)", "def __openRecent(self, act):\n filename = act.data()\n if filename:\n self.__startProc(\n \"eric6.py\",\n filename)", "def on_open(self, event):\r\n xml_path = utils.open_file()\r\n\r\n if xml_path:\r\n self.last_opened_file = xml_path\r\n self.open_xml_file(xml_path)", "def open(self, token):\n if token[0:1] == '/':\n token = token[1:]\n linkfile=path.join(self.rootdir, token)\n revsdir = \"%s-revs\" % linkfile\n mkdirs.mkdirs(revsdir)\n timestamp = strftime(\"%Y%m%d-%H%M%S\", localtime(time()))\n counter = 1\n while path.exists(path.join(revsdir, timestamp)):\n counter += 1\n timestamp = \"%s-%d\" % (timestamp, counter)\n extension = \"-edit%d\" % (int(random.random() * 10000))\n outfilename = path.join(revsdir, \"%s-%s\" % (timestamp, extension))\n args = (linkfile, timestamp, extension, revsdir)\n return RevisionedStorage.TempRevFile(outfilename, self, args)", "def addRecentFile(self, filename):\n logging.debug('Application: addRecentFile() - ' + filename)\n if isinstance(filename, QString):\n filename = str(filename) # Make sure filename is a python string not a QString\n leftCount = self.MAX_RECENT_FILES - 1\n if filename in self._recentFiles:\n del self._recentFiles[self._recentFiles.index(filename)]\n self._recentFiles = [filename] + self._recentFiles[:leftCount]\n self._saveIni()", "def open_file(filename):\n if sys.platform == \"win32\":\n os.startfile(filename)\n else:\n opener = \"open\" if sys.platform == \"darwin\" else \"xdg-open\"\n subprocess.call([opener, filename])", "def openFileInPath(self, path):\n try:\n if os.path.exists(path):\n os.startfile(path)\n except:\n print(traceback.format_exc())", "def open_file_in_new_tab(self, file_path):\n try:\n ActionSystem.new_file(file_path)\n except Exception as e:\n LogSystem.error(e)", "def open(self):\n subprocess.call(\"explorer \" + self._path)", "def open(self):\n filename = filedialog.askopenfilename()\n if filename == '':\n return None\n return filename", "def open(filename, mode=\"r\"):\n return GFile(filename, mode)", "def open_file_when_exists(self, filename):\n if self.wait_for_file_to_appear(filename):\n return open(filename, \"rU\")\n return None", "def reopen(self):\n\n #print(\"Reopening\", self.file, \"...\", end=\"\")\n\n # if we don't have an opened file already then try to open it now\n if not self.fh or self.fh.closed:\n try:\n self.open(self.file, start_pos=\"head\");\n except IOError:\n return False\n return True\n\n # save current values\n fh = self.fh\n pos = self.pos\n cur = self.stat\n \n # reopen same file\n try:\n self.open(self.file, \"head\")\n except IOError as e:\n #print(\"FILE DOES NOT EXIST\")\n return False\n \n new = self.stat\n #print(new.st_ino, ' == ', cur.st_ino)\n if (\n (self.reopen_check == 'inode' and new.st_ino == cur.st_ino)\n or\n (self.reopen_check == 'time' and new.st_mtime <= floor(self.last_time) and new.st_size == pos)\n ):\n #print(\"FILE NOT CHANGED\")\n # file appears to be the same or older than our last read\n #self.last_time = new.st_mtime\n self.fh = fh\n self.pos = pos\n self.stat = cur\n return False\n\n #print(\"NEW FILE\")\n return True", "def open_file(f):\n\tglobal global_file\n\tglobal_file = open(f)", "def recent_handler( directory, num_recents, files_info):\n\n\t# Get all folder and files in diectory\n\tall_entities = os.listdir(directory)\n\t\n\tall_files = []\n\t\n\tfor file in all_entities:\n\t\n\t\tfullpath = os.path.join(directory,file)\n\t\t# Only include files\n\t\tif os.path.isfile( fullpath ):\n\t\t\tall_files.append( fullpath )\n\n\t# Sort files by their last modified date\n\trecent_files = sorted(all_files, key=os.path.getmtime)[-num_recents:]\n\n\trecent_folder = os.path.join(directory,\"Recent-{}\".format(num_recents))\n\t# Create a recent folder\n\tos.mkdir(recent_folder)\n\n\tfor recent_file in recent_files:\n\t\tfile = os.path.basename(recent_file)\n\t\tnew_path = os.path.join(recent_folder,file)\n\t\tos.rename( recent_file , new_path )\n\n\tfiles_info['Recent'] = recent_files", "def open_file(file_name):\n nuke.scriptClose(ignoreUnsavedChanges=True)\n nuke.scriptOpen(file_name)", "def openFile (self):\n # print \"hello OPENFILE !!!\" \n if len(self.files_selected) == 1:\n try:\n fname = self.files_selected.pop(0)\n os.system('open ' + fname) # self.files_selected.pop(0))\n except:\n self.mprint (\"Could not open file %s for some reason!\" % (fname))\n elif len(self.files_selected) > 1:\n self.mprint(\"Cannot open multiple files. Please select ONLY one\")", "def __showRecentFilesMenu(self):\n self.recentFiles = []\n self.rsettings.sync()\n self.__loadRecentFiles()\n \n self.recentFilesMenu.clear()\n \n idx = 1\n for rf in self.recentFiles:\n if idx < 10:\n formatStr = '&{0:d}. {1}'\n else:\n formatStr = '{0:d}. {1}'\n act = self.recentFilesMenu.addAction(\n formatStr.format(\n idx, Utilities.compactPath(rf, self.maxMenuFilePathLen)))\n act.setData(rf)\n idx += 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Toggle subwindow scrollbar visibility.
def toggleScrollbars(self): checked = self._showScrollbarsAct.isChecked() windows = self._mdiArea.subWindowList() for window in windows: child = window.widget() child.enableScrollBars(checked)
[ "def notebook_visible_toggle_action(self):\n\n self.notebook.Show(not self.notebook.IsShown())\n self.viewmenu.Check(406, self.notebook.IsShown())\n self.SendSizeEvent()", "def toggleSingleBeamPlot(self):\n if self.sb_dock.isVisible(): self.sb_dock.hide()\n else: self.sb_dock.show()", "def toggleMultiBeamPlot(self):\n if self.mb_dock.isVisible(): self.mb_dock.hide()\n else: self.mb_dock.show()", "def check_scrollbar(self, scrollbar, offset):\r\n\r\n if scrollbar.visible and offset >= 0:\r\n scrollbar.visible = False\r\n scrollbar.grid_remove()\r\n elif not scrollbar.visible and offset < 0:\r\n scrollbar.visible = True\r\n scrollbar.grid()", "def status_bar_toggle_action(self):\n\n self.statusbar.Show(not self.statusbar.IsShown())\n self.SendSizeEvent()", "def toggle_scroll(self):\n if self.scroll == 'Man Scroll':\n self.scroll = 'Auto Scroll'\n sys.stdout.autoscroll = False\n else:\n self.scroll = 'Man Scroll'\n sys.stdout.autoscroll = True \n\n self.b_scroll.config(text=self.scroll)", "def __toggleWindow(self, w):\n if w.isHidden():\n w.show()\n else:\n w.hide()", "def toggle_fullscreen(self) -> None:", "def full_screen(self):\r\n global win_event\r\n win_event = NORMAL\r\n self.master.full_screen_toggle()", "def toggleWindowVisibility(string):\n pass", "def toggle_control_panel(self):\n\n control_panel = self.parent.sim_tab.control_panel\n hbox = self.parent.sim_tab.hbox\n\n if self.cp.IsChecked():\n hbox.Show(control_panel)\n hbox.Layout()\n else:\n hbox.Hide(control_panel)\n hbox.Layout()", "def toggle_visibility(self):\n\n if self.actor.GetVisibility():\n self.actor.VisibilityOff()\n\n else:\n self.actor.VisibilityOn()", "def change_toolbar_visibility(self):\r\n if self.toolbar.isVisible():\r\n self.toolbar.hide()\r\n else:\r\n self.toolbar.show()", "def __toggleCPlugins(self):\n self.__toggleWindow(self.cpluginsDock)", "def togglePyConsole(self, item, user_ns):\n if item.get_active():\n if not hasattr(self, 'pythonWindow'):\n self.pythonWindow = gtk.Window()\n S = gtk.ScrolledWindow()\n S.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n V = IPythonView(user_ns=user_ns)\n V.modify_font(pango.FontDescription(\"luxi mono 8\"))\n V.set_wrap_mode(gtk.WRAP_CHAR)\n S.add(V)\n self.pythonWindow.add(S)\n self.pythonWindow.show_all()\n self.pythonWindow.set_size_request(750, 550)\n self.pythonWindow.set_resizable(True)\n def onDestroy(*args):\n item.set_active(False)\n del self.pythonWindow\n self.pythonWindow.connect(\"destroy\", onDestroy)\n else:\n if hasattr(self, 'pythonWindow'):\n self.pythonWindow.destroy()", "def ToggleToolBar(self, event):\n if self.menu_show_tool_bar.IsChecked():\n self.toolbar.Show()\n else:\n self.toolbar.Hide()", "def changeWindow(self, isVisible = None, isEnabled = None):\r\n if self.mainWidget:\r\n\r\n # Re-build list of run widgets and their children, as new children appear after clicks\r\n runWidgets = list(self.runWidgets)\r\n for w in self.runWidgets:\r\n runWidgets.extend(w.findChildren(QtCore.QObject))\r\n\r\n # Search for widget objects in main window\r\n for w in self.mainWidget.findChildren(QtGui.QWidget):\r\n if w not in runWidgets:\r\n if isVisible is not None and hasattr(w, \"setVisible\"): # and type(w) is not QtGui.QFrame:\r\n w.setVisible(isVisible)\r\n if isEnabled is not None and hasattr(w, \"setEnabled\"):\r\n w.setEnabled(isEnabled)\r\n\r\n # Search for spacers in main window\r\n #for s in self.mainWidget.findChildren(QtCore.QObject):\r\n # print \">spacer>\", type(s)\r", "def set_scrollbars(self):\n try:\n if len(self.row_labels) < 5:\n show_horizontal = wx.SHOW_SB_NEVER\n else:\n show_horizontal = wx.SHOW_SB_DEFAULT\n self.ShowScrollbars(show_horizontal, wx.SHOW_SB_DEFAULT)\n except AttributeError:\n pass", "def user32_ShowScrollBar(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hWnd\", \"wBar\", \"bShow\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def _hide_status_bar(self):\n\n self._status_frame_outer.place_forget()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Toggle status bar visibility.
def toggleStatusbar(self): self.statusBar().setVisible(self._showStatusbarAct.isChecked())
[ "def status_bar_toggle_action(self):\n\n self.statusbar.Show(not self.statusbar.IsShown())\n self.SendSizeEvent()", "def _hide_status_bar(self):\n\n self._status_frame_outer.place_forget()", "def toggleWindowVisibility(string):\n pass", "def toggle_visibility(self):\n\n if self.actor.GetVisibility():\n self.actor.VisibilityOff()\n\n else:\n self.actor.VisibilityOn()", "def hide_status(self):\r\n self.hide()\r\n self._searchWidget._checkSensitive.setCheckState(Qt.Unchecked)\r\n self._searchWidget._checkWholeWord.setCheckState(Qt.Unchecked)\r\n self._searchWidget.setVisible(False)\r\n self._replaceWidget.setVisible(False)\r\n self._fileSystemOpener.setVisible(False)\r\n main_container = IDE.get_service(\"main_container\")\r\n widget = None\r\n if main_container:\r\n widget = main_container.get_current_widget()\r\n if widget:\r\n widget.setFocus()", "def notebook_visible_toggle_action(self):\n\n self.notebook.Show(not self.notebook.IsShown())\n self.viewmenu.Check(406, self.notebook.IsShown())\n self.SendSizeEvent()", "def change_toolbar_visibility(self):\r\n if self.toolbar.isVisible():\r\n self.toolbar.hide()\r\n else:\r\n self.toolbar.show()", "def __toggleWindow(self, w):\n if w.isHidden():\n w.show()\n else:\n w.hide()", "def toggle_fullscreen(self) -> None:", "def hide_status_icon(self):\n self._icon_visible = False\n self.repaint()\n self.update()", "def initStatusBar(self):\n\n self.statusBar()", "def workspace_switcher_toggle():\n workspace_switcher.hide() if workspace_switcher.showing else workspace_switcher.show()", "def _createStatusBar(self):\n self.statusbar = self.statusBar()\n self.statusbar.showMessage(\"Ready\", 3000)", "def ToggleToolBar(self, event):\n if self.menu_show_tool_bar.IsChecked():\n self.toolbar.Show()\n else:\n self.toolbar.Hide()", "def _update_status_bar(self, message):\n self.window().status_bar = message", "def toggle_fullscreen(context: tcod.context.Context) -> None:\n if not context.sdl_window_p:\n return\n fullscreen = tcod.lib.SDL_GetWindowFlags(context.sdl_window_p) & (\n tcod.lib.SDL_WINDOW_FULLSCREEN | tcod.lib.SDL_WINDOW_FULLSCREEN_DESKTOP\n )\n tcod.lib.SDL_SetWindowFullscreen(\n context.sdl_window_p,\n 0 if fullscreen else tcod.lib.SDL_WINDOW_FULLSCREEN_DESKTOP,\n )", "def initStatusBar(self, configs=None):\r\n self.statusbar = self.CreateStatusBar()", "def status_bar(self):\n\n self.statusbar = self.CreateStatusBar()\n # Two sections of the status bar.\n # 0 | Updates and status messages.\n # 1 | Current open file name.\n self.statusbar.SetFieldsCount(2)\n # Ratio: 2 parts first section, 1 part second section, for size.\n self.statusbar.SetStatusWidths([-2, -1])\n self.statusbar.SetStatusText('Welcome to tmpNote.', 0)\n self.statusbar.SetStatusText('No open file.', 1)\n self.statusbar.Show()", "def dark_toggle(self, event):\n if self.dark_status.get() == True:\n self.dark_status.set(False)\n else:\n self.dark_status.set(True)\n\n self.update_image(0)", "def toggle_fullscreen(self, event):\n self.fullScreenState = not self.fullScreenState\n self.addwin.attributes(\"-fullscreen\", self.fullScreenState)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handle |QMdiSubWindow| activated signal.
def subWindowActivated(self, window): self.updateStatusBar()
[ "def setActiveWin(self, window):\n self.activeWindow = window\n self.controlActivated.emit(self)\n self.updateCommandsAvail()\n filterTextDialog = globalref.mainControl.filterTextDialog\n if filterTextDialog and filterTextDialog.isVisible():\n filterTextDialog.updateAvail('', True)\n filterConditionDialog = globalref.mainControl.filterConditionDialog\n if filterConditionDialog and filterConditionDialog.isVisible():\n filterConditionDialog.updateFilterControls()", "def active(self):\n self._state = window_states.ActiveWindowState(self)\n self.action_after_active()", "def __activated(self, reason):\n if (\n reason == QSystemTrayIcon.Context or\n reason == QSystemTrayIcon.MiddleClick\n ):\n self.__showContextMenu()\n elif reason == QSystemTrayIcon.DoubleClick:\n self.__startEric()", "def on_activate_item(self, event):\n print('in on_activate_item')\n if event.Index == 0:\n # self.add_item()\n self.master.initialize_new_event()", "def _bidsapp_fired(self):\n print_blue(\"[Open BIDS App Window]\")\n bids_layout = BIDSLayout(self.project_info.base_directory)\n subjects = bids_layout.get_subjects()\n\n anat_config = os.path.join(\n self.project_info.base_directory, \"code/\", \"ref_anatomical_config.json\"\n )\n dmri_config = os.path.join(\n self.project_info.base_directory, \"code/\", \"ref_diffusion_config.json\"\n )\n fmri_config = os.path.join(\n self.project_info.base_directory, \"code/\", \"ref_fMRI_config.json\"\n )\n eeg_config = os.path.join(\n self.project_info.base_directory, \"code/\", \"ref_EEG_config.json\"\n )\n\n self.bidsapp_ui = cmp.bidsappmanager.gui.bidsapp.BIDSAppInterfaceWindow(\n project_info=self.project_info,\n bids_root=self.project_info.base_directory,\n subjects=sorted(subjects),\n anat_config=anat_config,\n dmri_config=dmri_config,\n fmri_config=fmri_config,\n eeg_config=eeg_config\n )\n self.bidsapp_ui.configure_traits()", "def raiseTabManager(self, icon):\n window = None\n if isinstance(icon, E5ClickableLabel):\n window = icon.window()\n elif isinstance(icon, QAction):\n window = icon.parentWidget()\n \n if window is not None:\n titleBarHeight = self.style().pixelMetric(QStyle.PM_TitleBarHeight)\n \n y = max(0, window.frameGeometry().top() + titleBarHeight + 1)\n \n desktop = e5App().desktop()\n desktopGeometry = desktop.availableGeometry(self)\n windowFrameGeometry = window.frameGeometry()\n if (desktopGeometry.width() - windowFrameGeometry.right() - 1 >\n self.frameGeometry().width()):\n x = windowFrameGeometry.right() + 1\n else:\n x = windowFrameGeometry.x() - 1 - self.frameGeometry().width()\n \n newGeo = QRect(x, y, self.width(), window.height())\n self.setGeometry(newGeo)\n \n self.activateWindow()\n self.showNormal()\n self.raise_()", "def _connectSignals(self):\n logging.debug('Application: _connectSignals()')\n self.connect(self._window.tabWidget(), SIGNAL(\"currentChanged(int)\"), self.tabChanged)\n self.connect(self._window, SIGNAL(\"windowActivated()\"), self.tabChanged)\n self.connect(self._window.tabWidget(), SIGNAL(\"tabCloseRequested(int)\"), self.tabCloseRequest)", "def _notify_active_cb(self, widget, event):\n self._clock.active = self.props.active\n if self.props.active:\n self._inhibit_suspend()\n else:\n self._allow_suspend()", "def activate(self):\n\n # check log_win to determine, if windows are already created\n if self.wins.log_win is not None:\n self.wins.input_win.state.active = True\n self.wins.input_win.redraw()\n self.wins.log_win.state.active = False\n self.wins.log_win.redraw()\n self.clear_notifications()\n return", "def OnBusinessProcessSelectionChanged(self, eii):\n self._activeSheet = eii.ExtensionObject().ActiveSheet()\n bp = self._GetBusinessProcess(eii)\n if bp != self.BusinessProcess():\n self._SetBusinessProcess(bp)\n self.OnSelectedItemChanged()", "def _select_main_window(self):\n self.driver.switch_to.window(self.main_window_handle)", "def showWindow(self, sender):", "def focusSignal(self, focus):\r\n\r\n self.__widget.emit(QtCore.SIGNAL(\"focusSignal\"), focus)", "def on_minimize(self, _widget, event):\n if not self.application.app_hidden and event.new_window_state & Gdk.WindowState.ICONIFIED:\n self.application.window.iconify()", "def focusInEvent(self, event):\n super().focusInEvent(event)\n self.gotFocus.emit()", "def quit_subframe(self, event):\n subframe = event.GetEventObject().GetParent()\n if isinstance(subframe, wx.Panel):\n subframe = subframe.GetParent()\n self.Enable()\n subframe.Destroy()", "def _accessibility_notifications_callback(notification, element):\n if notification in ['AXWindowMiniaturized', 'AXWindowCreated']:\n WindowManager().reflow()\n logging.debug('Notification <%s> for application <%s>.', notification, element['AXTitle'])", "def handle_become_visible(self):\n log.debug(\"Handling a become_visible request\")\n\n def became_visible(window):\n \"\"\"Callback when window becomes visible.\"\"\"\n\n # Notifying Display Manager that the window has loaded.\n etree = self._encapsulate_request(self._generate_msg_finished_loading())\n self.zmq_request_queue.put_nowait(etree)\n\n # Calling method for subclass.\n self.do_became_visible()\n\n self.get_window().become_visible(became_visible)", "def activate():\n ActivityListener()", "def glyphWindowOpenCB(self, info):\n glyphWindow = info[\"window\"]\n self.guideStatus.addViewToWindow(glyphWindow)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Switch MDI subwindow layout direction.
def switchLayoutDirection(self): if self.layoutDirection() == QtCore.Qt.LeftToRight: QtGui.qApp.setLayoutDirection(QtCore.Qt.RightToLeft) else: QtGui.qApp.setLayoutDirection(QtCore.Qt.LeftToRight)
[ "def set_single_layout(window, box1, box2):\n\truntime_info[\"double_layout\"] = False\n\n\tbox2.place_forget()\n\tbox1.place(height=window.winfo_height(), width=window.winfo_width())", "def switchToOutlinerPerspLayout():\n\n pass", "def layoutAll(self, warp=False):\r\n if self.screen and len(self.windows):\r\n with self.disableMask(xcb.xproto.EventMask.EnterWindow):\r\n normal = [x for x in self.windows if not x.floating]\r\n floating = [\r\n x for x in self.windows\r\n if x.floating and not x.minimized\r\n ]\r\n screen = self.screen.get_rect()\r\n if normal:\r\n self.layout.layout(normal, screen)\r\n if floating:\r\n self.floating_layout.layout(floating, screen)\r\n if self.currentWindow and \\\r\n self.screen == self.qtile.currentScreen:\r\n self.currentWindow.focus(warp)", "def layoutAll(self, warp=False):\n if self.screen and len(self.windows):\n with self.disableMask(xcb.xproto.EventMask.EnterWindow):\n normal = [x for x in self.windows if not x.floating]\n floating = [x for x in self.windows\n if x.floating and not x.minimized]\n screen = self.screen.get_rect()\n if normal:\n self.layout.layout(normal, screen)\n if floating:\n self.floating_layout.layout(floating, screen)\n if (self.currentWindow and\n self.screen == self.qtile.currentScreen):\n self.currentWindow.focus(warp)", "def layout_changed(self):\n\n manager = self.manager\n if manager is not None:\n manager.layout_changed()\n parent = self.parent\n if parent is not None:\n parent.layout_changed()", "def Expand( self, flag=True ):\n for win in self._windows:\n win.Show( flag )\n self.hidden = not flag\n \n # Make sure to update the button to reflect the current state\n if hasattr( self, 'but' ):\n self.but.SetState( not self.hidden )\n \n # Layout has changed, make sure to refresh the entire tree.\n self.grid.RecurseLayout( self.grid.panel )\n self.grid.FitInside()", "def on(self):\n dacq_fixwin(self.fwnum, self.x, self.y, self.size, self.vbias)", "def _ch_d(self):\n self._toggle_dimming(active=False)\n self.active = self._dumpwin()", "def cmd_prevlayout(self, group=None):\r\n if group:\r\n group = self.groupMap.get(group)\r\n else:\r\n group = self.currentGroup\r\n group.prevLayout()", "def set_double_layout(window, box1, box2):\n\truntime_info[\"double_layout\"] = True\n\n\tw_width = window.winfo_width()\n\tw_height = window.winfo_height()\n\n\tbox1.place(height=(w_height // 2), width=w_width)\n\tbox2.place(height=(w_height // 2), width=w_width, y=(w_height // 2))", "def setup_main_window(self):\n\n layout_data = QtWidgets.QHBoxLayout()\n layout_data.addWidget(self.diffraction_space_widget,1)\n layout_data.addWidget(self.real_space_widget,1)\n\n layout_data_and_control = QtWidgets.QHBoxLayout()\n layout_data_and_control.addWidget(self.control_widget,0)\n layout_data_and_control.addLayout(layout_data,1)\n layout_data_and_control.setSpacing(0)\n layout_data_and_control.setContentsMargins(0,0,0,0)\n\n self.main_window.setLayout(layout_data_and_control)\n\n self.main_window.setGeometry(0,0,3600,1600)\n self.console_widget.setGeometry(0,1800,1600,250)\n self.main_window.show()\n self.main_window.raise_()\n self.console_widget.show()\n self.console_widget.raise_()\n return self.main_window", "def cmd_prevlayout(self, group=None):\n if group:\n group = self.groupMap.get(group)\n else:\n group = self.currentGroup\n group.prevLayout()", "def cmd_toggle_split(self):\r\n self.currentStack.toggleSplit()\r\n self.group.layoutAll()", "def _setSubmode(self, mid):\n self._current_submode = mode = self._modes[mid]\n mode.qa.setChecked(True)\n # hide submodes if any\n for mm in self._all_submodes:\n mm.qa.setShortcuts([])\n # set F4 shortcut to next submode\n if len(self._valid_submodes) > 1:\n for i, mm in enumerate(self._valid_submodes):\n if mm is mode:\n self._valid_submodes[(i + 1) % len(self._valid_submodes)].qa.setShortcut(Qt.Key_F4)\n break\n self.setMouseMode.emit(mode)", "def layout(self, wname, ltree):\n wobj = self.getwidget(wname)\n assert isinstance(ltree, list)\n lobj = self.getobj(ltree)\n assert isinstance(lobj, Layout)\n wobj.setLayout(lobj) #qt", "def cmd_nextlayout(self, group=None):\r\n if group:\r\n group = self.groupMap.get(group)\r\n else:\r\n group = self.currentGroup\r\n group.nextLayout()", "def moveToGroup(self, group):\r\n if self.currentWindow and group:\r\n self.addGroup(group)\r\n self.currentWindow.togroup(group)", "def toggle_control_panel(self):\n\n control_panel = self.parent.sim_tab.control_panel\n hbox = self.parent.sim_tab.hbox\n\n if self.cp.IsChecked():\n hbox.Show(control_panel)\n hbox.Layout()\n else:\n hbox.Hide(control_panel)\n hbox.Layout()", "def adjust_content(window, box1, box2):\n\tif runtime_info[\"double_layout\"]:\n\t\tset_double_layout(window, box1, box2)\n\telse:\n\t\tset_single_layout(window, box1, box2)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Synch panning of all subwindowws to the same as fromViewer.
def synchPan(self, fromViewer): assert isinstance(fromViewer, MdiChild) if not fromViewer: return if self._handlingScrollChangedSignal: return self._handlingScrollChangedSignal = True newState = fromViewer.scrollState changedWindow = fromViewer.parent() windows = self._mdiArea.subWindowList() for window in windows: if window != changedWindow: window.widget().scrollState = newState self._handlingScrollChangedSignal = False
[ "def update(self):\n\t\tfor p in self.panes:\n\t\t\tp.update()", "def layoutAll(self, warp=False):\r\n if self.screen and len(self.windows):\r\n with self.disableMask(xcb.xproto.EventMask.EnterWindow):\r\n normal = [x for x in self.windows if not x.floating]\r\n floating = [\r\n x for x in self.windows\r\n if x.floating and not x.minimized\r\n ]\r\n screen = self.screen.get_rect()\r\n if normal:\r\n self.layout.layout(normal, screen)\r\n if floating:\r\n self.floating_layout.layout(floating, screen)\r\n if self.currentWindow and \\\r\n self.screen == self.qtile.currentScreen:\r\n self.currentWindow.focus(warp)", "def layoutAll(self, warp=False):\n if self.screen and len(self.windows):\n with self.disableMask(xcb.xproto.EventMask.EnterWindow):\n normal = [x for x in self.windows if not x.floating]\n floating = [x for x in self.windows\n if x.floating and not x.minimized]\n screen = self.screen.get_rect()\n if normal:\n self.layout.layout(normal, screen)\n if floating:\n self.floating_layout.layout(floating, screen)\n if (self.currentWindow and\n self.screen == self.qtile.currentScreen):\n self.currentWindow.focus(warp)", "def paned_window(self, *args, **kwargs) -> Iterator[tw.PanedWindow]:\n\n with self.widget(tw.PanedWindow, *args, **kwargs) as w:\n yield w", "def grid_place_window():\n mg.reset_to_current_window()", "def onScroll(self, event):\r\n\t\r\n\t\tfor i in range(self.nSubPlots):\r\n\t\t\tsubPlot = self.selectSubPlot(i)\t\t\r\n\t\t\txmin, xmax = subPlot.get_xlim()\r\n\t\t\tdx = xmax - xmin\r\n\t\t\tcx = (xmax+xmin)/2\r\n\t\t\tif event.button == 'down':\r\n\t\t\t\tdx *= 1.1\r\n\t\t\telse:\r\n\t\t\t\tdx /= 1.1\r\n\t\t\t_xmin = cx - dx/2\r\n\t\t\t_xmax = cx + dx/2\t\r\n\t\t\tsubPlot.set_xlim(_xmin, _xmax)\r\n\t\tevent.canvas.draw()", "def _set_main_window(self, window):\n old_main = self._window_ref()\n self._window_ref = ref(window)\n if old_main is not None:\n old_main.dock_manager = None\n curr_panes = self._panes\n self._panes = []\n for pane in curr_panes:\n self.add_pane(pane)", "def set_all_windows(self, action=None):\n for mw in self.mw[:self._a] + self.rw[:len(self.rw_inds)]:\n for i in range(len(self.bin_actions)):\n mw.bin_actions[i].setChecked(self.bin_actions[i].isChecked())\n mw.set_bins()\n for i in range(len(self.fit_methods)):\n mw.fit_methods[i].setChecked(self.fit_methods[i].isChecked())", "def changeWindow(self, isVisible = None, isEnabled = None):\r\n if self.mainWidget:\r\n\r\n # Re-build list of run widgets and their children, as new children appear after clicks\r\n runWidgets = list(self.runWidgets)\r\n for w in self.runWidgets:\r\n runWidgets.extend(w.findChildren(QtCore.QObject))\r\n\r\n # Search for widget objects in main window\r\n for w in self.mainWidget.findChildren(QtGui.QWidget):\r\n if w not in runWidgets:\r\n if isVisible is not None and hasattr(w, \"setVisible\"): # and type(w) is not QtGui.QFrame:\r\n w.setVisible(isVisible)\r\n if isEnabled is not None and hasattr(w, \"setEnabled\"):\r\n w.setEnabled(isEnabled)\r\n\r\n # Search for spacers in main window\r\n #for s in self.mainWidget.findChildren(QtCore.QObject):\r\n # print \">spacer>\", type(s)\r", "def __groupByWindow(self):\n windows = self.__mw.mainWindows()\n \n self.__isRefreshing = True\n \n winCount = 0\n for mainWin in windows:\n winCount += 1\n winItem = self.__createEmptyItem()\n winItem.setText(0, self.tr(\"Window {0}\").format(winCount))\n winItem.setToolTip(0, self.tr(\"Double click to switch\"))\n if mainWin == self.__mw:\n font = winItem.font(0)\n font.setBold(True)\n winItem.setFont(0, font)\n winItem.setData(0, TabManagerWidget.WebWindowRole, mainWin)\n \n for browser in mainWin.tabWidget().browsers():\n if self.__page == browser.page():\n self.__page = None\n continue\n \n tabItem = self.__createEmptyItem(winItem)\n if browser == mainWin.tabWidget().currentBrowser():\n font = tabItem.font(0)\n font.setBold(True)\n tabItem.setFont(0, font)\n if not browser.isLoading():\n tabItem.setIcon(0, browser.icon())\n else:\n tabItem.setIcon(0, UI.PixmapCache.getIcon(\"loading.png\"))\n tabItem.setText(0, browser.title())\n tabItem.setToolTip(0, browser.title())\n \n tabItem.setData(0, TabManagerWidget.WebBrowserRole, browser)\n tabItem.setData(0, TabManagerWidget.WebWindowRole, mainWin)\n \n self.__makeWebBrowserViewConnections(browser)", "def resizeWindows(sender, data):\n\n mainWindowWidth = get_main_window_size()[0]\n mainWindowHeight = get_main_window_size()[1]\n\n heightTopPanel = int(mainWindowHeight * 0.05)\n heightMiddlePanels = int(mainWindowHeight * 0.70)\n heightArchivePanel = int(mainWindowHeight * 0.6)\n heightBottomPanels = int(mainWindowHeight * 0.09)\n\n widthMiddlePanels = int(mainWindowWidth * 0.32)\n widthTopPanel = int(mainWindowWidth * 0.97)\n widthArchivePanel = int(mainWindowWidth * 0.97)\n\n yPosTopPanel = int(mainWindowHeight * 0.03)\n yPosMiddlePanels = int(mainWindowHeight * 0.15)\n yPosArchivePanel = int(mainWindowHeight * 0.1)\n yposBottomPanels = int(mainWindowHeight * 0.85)\n\n xPosTopPanel = int(mainWindowWidth * 0.009)\n xPosLeftPanel = int(mainWindowWidth * 0.008)\n xPosMiddlePanel = int(mainWindowWidth * 0.0073 + xPosLeftPanel + widthMiddlePanels)\n xPosRightPanel = int(mainWindowWidth * 0.0073 + xPosMiddlePanel + widthMiddlePanels)\n xPosArchivePanel = int(mainWindowWidth * 0.009)\n\n # Assigning dimensions to various windows\n\n # Top Panel\n set_window_pos(\"Top Panel\", x=xPosTopPanel, y=yPosTopPanel)\n set_item_width(\"Top Panel\", width=widthTopPanel)\n set_item_height(\"Top Panel\", height=heightTopPanel)\n\n # To Do Panel\n set_window_pos(\"To Do\", x=xPosLeftPanel, y=yPosMiddlePanels)\n set_item_width(\"To Do\", width=widthMiddlePanels)\n set_item_height(\"To Do\", height=heightMiddlePanels)\n\n # In Progress Panel\n set_window_pos(\"In Progress\", x=xPosMiddlePanel, y=yPosMiddlePanels)\n set_item_width(\"In Progress\", width=widthMiddlePanels)\n set_item_height(\"In Progress\", height=heightMiddlePanels)\n\n # Done Panel\n set_window_pos(\"Done\", x=xPosRightPanel, y=yPosMiddlePanels)\n set_item_width(\"Done\", width=widthMiddlePanels)\n set_item_height(\"Done\", height=heightMiddlePanels)\n\n # Archives Panel\n set_window_pos(\"Archives\", x=xPosArchivePanel, y=yPosArchivePanel)\n set_item_width(\"Archives\", width=widthArchivePanel)\n set_item_height(\"Archives\", height=heightArchivePanel)\n\n # To Do Status Panel\n set_window_pos(\"Status To Do\", x=xPosLeftPanel, y=yposBottomPanels)\n set_item_width(\"Status To Do\", width=widthMiddlePanels)\n set_item_height(\"Status To Do\", height=heightBottomPanels)\n\n # In Progress Status Panel\n set_window_pos(\"Status In Progress\", x=xPosMiddlePanel, y=yposBottomPanels)\n set_item_width(\"Status In Progress\", width=widthMiddlePanels)\n set_item_height(\"Status In Progress\", height=heightBottomPanels)\n\n # Done Status Panel\n set_window_pos(\"Status Done\", x=xPosRightPanel, y=yposBottomPanels)\n set_item_width(\"Status Done\", width=widthMiddlePanels)\n set_item_height(\"Status Done\", height=heightBottomPanels)", "def handle_scene(self, publish_cb, scene):\n with self.lock:\n windows = scene.get('windows', [])\n route_viewports = route_touch_to_viewports(windows, route_touch_key=MIRROR_TOUCH_CONFIG_KEY)\n self.route_viewports = route_viewports\n\n if len(route_viewports) == 0:\n route_viewports = self.default_viewports\n\n publish_cb(frozenset(route_viewports))", "def windowEvent(self, *args, **kwargs):\n super().windowEvent(*args, **kwargs)\n\n for win, cam, pixel2d in self.forcedAspectWins:\n aspectRatio = self.getAspectRatio(win)\n cam.node().getLens().setAspectRatio(aspectRatio)\n\n # Fix pixel2d scale for new window size\n # Temporary hasattr for old Pandas\n if not hasattr(win, 'getSbsLeftXSize'):\n pixel2d.setScale(2.0 / win.getXSize(), 1.0, 2.0 / win.getYSize())\n else:\n pixel2d.setScale(2.0 / win.getSbsLeftXSize(), 1.0, 2.0 / win.getSbsLeftYSize())", "def _clipchanged(self):\n if not self._parent or not self._onscreen_wid:\n return\n self._zapclip()\n self._buttonschanged()\n self._zapregions()", "def refresh(self):\n\n for win in self.get_window():\n win.refresh()\n self.scr.refresh()", "def zoomIn(self):\n interval_size_removed = (self.mainCursor.max - self.mainCursor.min) / self.ZOOM_STEP\n\n # update the new visible interval\n if self.mainCursor.max - interval_size_removed > self.mainCursor.min + interval_size_removed:\n self.mainCursor.max -= interval_size_removed\n self.mainCursor.min += interval_size_removed\n\n self.graph()", "def focus(self, win, warp):\n if self.qtile._drag:\n # don't change focus while dragging windows\n return\n if win and not win in self.windows:\n return\n if win:\n self.currentWindow = win\n if win.floating:\n for l in self.layouts:\n l.blur()\n self.floating_layout.focus(win)\n else:\n self.floating_layout.blur()\n for l in self.layouts:\n l.focus(win)\n else:\n self.currentWindow = None\n hook.fire(\"focus_change\")\n # !!! note that warp isn't hooked up now\n self.layoutAll(warp)", "def seekDock(self):\r\n self.demo(1)", "def focus(self, win, warp):\r\n if self.qtile._drag:\r\n # don't change focus while dragging windows\r\n return\r\n if win:\r\n if not win in self.windows:\r\n return\r\n else:\r\n self.currentWindow = win\r\n if win.floating:\r\n for l in self.layouts:\r\n l.blur()\r\n self.floating_layout.focus(win)\r\n else:\r\n self.floating_layout.blur()\r\n for l in self.layouts:\r\n l.focus(win)\r\n else:\r\n self.currentWindow = None\r\n hook.fire(\"focus_change\")\r\n # !!! note that warp isn't hooked up now\r\n self.layoutAll(warp)", "def refresh_all(self):\n\t\tself.stat_win.refresh()\n\t\tself.input_win.refresh()\n\t\tself.time_win.refresh()\n\t\tself.main_win.refresh()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Synch zoom of all subwindowws to the same as fromViewer.
def synchZoom(self, fromViewer): if not fromViewer: return newZoomFactor = fromViewer.zoomFactor changedWindow = fromViewer.parent() windows = self._mdiArea.subWindowList() for window in windows: if window != changedWindow: window.widget().zoomFactor = newZoomFactor
[ "def updateZoomRegionsLimits(self):\n if self.selectedTool != Tools.ZoomTool:\n return\n\n min_limit, max_limit = self.mainCursor.min, self.mainCursor.max\n # set the limits of the zoom regions to the length of the signal\n self.axesOscilogram.gui_user_tool.zoomRegion.setBounds((min_limit, max_limit))\n\n rgn = self.axesOscilogram.gui_user_tool.zoomRegion.getRegion()\n # if the zoom region is the complete interval of visualization\n # clear the zoom tool region\n if rgn == (min_limit, max_limit):\n self.axesOscilogram.gui_user_tool.zoomRegion.setRegion((min_limit, min_limit))\n\n # do not update the spectrogram cursors directly because the oscilogram\n # zoom region setBounds raise the signal of changed if have to and then\n # the spec zoom region would be updated by the connections made\n self.updateSpecZoomRegion()", "def zoomIn(self):\n interval_size_removed = (self.mainCursor.max - self.mainCursor.min) / self.ZOOM_STEP\n\n # update the new visible interval\n if self.mainCursor.max - interval_size_removed > self.mainCursor.min + interval_size_removed:\n self.mainCursor.max -= interval_size_removed\n self.mainCursor.min += interval_size_removed\n\n self.graph()", "def zoomTo(self):\n if self.isZooming():\n # If we are already in the process of zooming we don't want to\n # initiate another zoom.\n return\n elif self._zoomMouseOver is None:\n # The mouse pointer is not over any zoomable.\n return\n else:\n self._zoomToNodePath(self._zoomMouseOver.np)\n self.focus = self._zoomMouseOver\n messager.send('zooming to znode',self._zoomMouseOver)", "def updateZoomRegion(self, oscilogram_update=True):\n # to avoid multiple recursive calls at this method because\n # the signal raised by the zoom region widgets of osc and spec widgets\n if self.zoom_update_in_progress:\n return\n\n # avoid multiples recursion calls when update the zoom regions in widgets\n self.zoom_update_in_progress = True\n\n osc_min_x, osc_max_x = self.axesOscilogram.gui_user_tool.zoomRegion.getRegion()\n spec_min_x, spec_max_x = self.axesSpecgram.gui_user_tool.zoomRegion.getRegion()\n\n # translation of spectrograms coords into oscilogram coords\n spec_min_x, spec_max_x = self.from_spec_to_osc(spec_min_x), self.from_spec_to_osc(spec_max_x)\n\n min_x_spec = self.from_osc_to_spec(osc_max_x)\n max_x_spec = self.from_osc_to_spec(osc_min_x)\n\n if abs(osc_max_x - spec_max_x) > 1 or abs(osc_min_x - spec_min_x) > 1:\n if oscilogram_update and (min_x_spec != spec_min_x or max_x_spec != spec_max_x):\n self.axesSpecgram.gui_user_tool.zoomRegion.setRegion([min_x_spec, max_x_spec])\n self.signalIntervalSelected.emit(osc_min_x, osc_max_x)\n\n elif not oscilogram_update and (spec_min_x != osc_min_x or spec_max_x != osc_max_x):\n self.axesOscilogram.gui_user_tool.zoomRegion.setRegion([spec_min_x, spec_max_x])\n self.signalIntervalSelected.emit(spec_min_x, spec_max_x)\n\n self.zoom_update_in_progress = False", "def zoom_home(self):\n debug_print('BoxesView.zoom_home')\n self.zoom_mode = 'whole_scene'\n self.fitInView(self.scene().sceneRect(), Qt.KeepAspectRatio)", "def zoom_image(self):\n\n self.select_zooming = True\n self.button_zoom.setText(\"Unzoom\")\n self.button_zoom.clicked.connect(self.unzoom)", "def do_zoom_view(self, dx, dy, lock_aspect_ratio=False):\n # See guiqwt/events.py where dx and dy are defined like this:\n # dx = (pos.x(), self.last.x(), self.start.x(), rct.width())\n # dy = (pos.y(), self.last.y(), self.start.y(), rct.height())\n # where:\n # * self.last is the mouse position seen during last event\n # * self.start is the first mouse position (here, this is the\n # coordinate of the point which is at the center of the zoomed area)\n # * rct is the plot rect contents\n # * pos is the current mouse cursor position\n auto = self.autoReplot()\n self.setAutoReplot(False)\n dx = (-1,) + dx # adding direction to tuple dx\n dy = (1,) + dy # adding direction to tuple dy\n if lock_aspect_ratio:\n direction, x1, x0, start, width = dx\n F = 1 + 3 * direction * float(x1 - x0) / width\n axes_to_update = self.get_axes_to_update(dx, dy)\n\n axis_ids_vertical = (self.get_axis_id(\"left\"), self.get_axis_id(\"right\"))\n\n for (direction, x1, x0, start, width), axis_id in axes_to_update:\n lbound, hbound = self.get_axis_limits(axis_id)\n if not lock_aspect_ratio:\n F = 1 + 3 * direction * float(x1 - x0) / width\n if F * (hbound - lbound) == 0:\n continue\n if self.get_axis_scale(axis_id) == 'lin':\n orig = self.invTransform(axis_id, start)\n vmin = orig - F * (orig - lbound)\n vmax = orig + F * (hbound - orig)\n else: # log scale\n i_lbound = self.transform(axis_id, lbound)\n i_hbound = self.transform(axis_id, hbound)\n imin = start - F * (start - i_lbound)\n imax = start + F * (i_hbound - start)\n vmin = self.invTransform(axis_id, imin)\n vmax = self.invTransform(axis_id, imax)\n\n # patch for not zooming into \"negative space\" ;) :\n if axis_id in axis_ids_vertical:\n vmin = 0\n if vmax < 0:\n vmax = -vmax\n\n self.set_axis_limits(axis_id, vmin, vmax)\n\n self.setAutoReplot(auto)\n # the signal MUST be emitted after replot, otherwise\n # we receiver won't see the new bounds (don't know why?)\n self.replot()\n self.emit(SIG_PLOT_AXIS_CHANGED, self)", "def updateViewer(self):\n if not self.hasImage():\n return\n if len(self.zoomStack) and self.sceneRect().contains(self.zoomStack[-1]):\n self.fitInView(self.zoomStack[-1], self.aspectRatioMode) # Show zoomed rect\n else:\n self.zoomStack = [] # Clear the zoom stack (in case we got here because of an invalid zoom).\n self.fitInView(self.sceneRect(), self.aspectRatioMode) # Show entire image (use current aspect ratio mode).", "def zoom(self, mode):\n\n if mode == \"out\":\n self.scale -= 0.1\n elif mode == \"in\":\n self.scale += 0.1\n else:\n self.scale = 1\n\n self.scale = round(self.scale, 1)\n self.update_window()", "def windowEvent(self, *args, **kwargs):\n super().windowEvent(*args, **kwargs)\n\n for win, cam, pixel2d in self.forcedAspectWins:\n aspectRatio = self.getAspectRatio(win)\n cam.node().getLens().setAspectRatio(aspectRatio)\n\n # Fix pixel2d scale for new window size\n # Temporary hasattr for old Pandas\n if not hasattr(win, 'getSbsLeftXSize'):\n pixel2d.setScale(2.0 / win.getXSize(), 1.0, 2.0 / win.getYSize())\n else:\n pixel2d.setScale(2.0 / win.getSbsLeftXSize(), 1.0, 2.0 / win.getSbsLeftYSize())", "def toggle_zoom(self):\r\n #TODO! zooming\r\n logging.debug('toggle \"single shot\" zoom')\r\n #aktiviraj zoomiranje\r\n self.zoomSelector.set_active(True)\r\n #ako postoji span selector, disable radi konfilikta sa ljevim klikom\r\n if self.spanSelector != None:\r\n self.spanSelector.visible = False", "def unzoom(self):\n\n self.button_zoom.setText(\"zoom\")\n self.zooming = False\n self.select_zooming = False\n self.zoom_point = QPoint()\n\n self.button_zoom.clicked.connect(self.zoom_image)\n\n self.load_current_frame()\n self.update()", "def zoom_out_action(self) -> None:\n self.grview.scale(0.8, 0.8)", "def _autozoom_in(self, lvl, p1lat, p1lon, p2lat, p2lon):\n if ( ( self._visible_marker(p1lat, p1lon)\n and self._visible_marker(p2lat, p2lon) )\n and lvl < 18 ):\n lvl += 1\n self.osm.set_zoom(lvl)\n GLib.timeout_add(int(50), self._autozoom_in, lvl,\n p1lat, p1lon, p2lat, p2lon)\n else:\n GLib.timeout_add(int(50), self._autozoom_out, lvl,\n p1lat, p1lon, p2lat, p2lon)", "def zoomOut(self):\n interval_size_added = max(1, (self.mainCursor.max - self.mainCursor.min) / self.ZOOM_STEP)\n\n # update the max interval limit\n if (self.mainCursor.max + interval_size_added) < self.signal.length-1:\n self.mainCursor.max += interval_size_added\n else:\n self.mainCursor.max = self.signal.length-1\n\n # update the min interval limit\n if self.mainCursor.min - interval_size_added >= 0:\n self.mainCursor.min -= interval_size_added\n else:\n self.mainCursor.min = 0\n\n self.graph()", "def resetZoom(self):\n self.xmin, self.ymin, self.xmax, self.ymax = self.defaultZoom()", "def zoom_to_items(self, items):\n united = unite_rects(i.sceneBoundingRect() for i in items)\n if 'whole_scene' == self.zoom_mode:\n debug_print('Ensuring [{0}] items visible'.format(len(items)))\n self.ensureVisible(united)\n self.viewport_changed.emit(self.normalised_scene_rect())\n else:\n debug_print('Showing [{0}] items'.format(len(items)))\n # Add some padding around the selection\n padding = 20\n if 'follow_selection' == self.zoom_mode:\n # Update zoom\n united.adjust(-padding, -padding, 2 * padding, 2 * padding)\n self.fitInView(united, Qt.KeepAspectRatio)\n\n if self.absolute_zoom > self.MAXIMUM_ZOOM:\n # new_absolute_zoom() emits viewport_changed\n self.new_absolute_zoom(self.MAXIMUM_ZOOM)\n else:\n self.viewport_changed.emit(self.normalised_scene_rect())\n else:\n # zoom_mode == fixed\n self.ensureVisible(united, xMargin=padding, yMargin=padding)", "def _autozoom(self):\n level_start = self.osm.props.zoom\n p1lat, p1lon = self.begin_selection.get_degrees()\n p2lat, p2lon = self.end_selection.get_degrees()\n lat = p1lat + ( p2lat - p1lat ) / 2\n lon = p1lon + ( p2lon - p1lon ) / 2\n # We center the map on the center of the region\n self.osm.set_center(lat, lon)\n self.save_center(lat, lon)\n p1lat = self.begin_selection.rlat\n p1lon = self.begin_selection.rlon\n p2lat = self.end_selection.rlat\n p2lon = self.end_selection.rlon\n # We zoom in until at least one marker missing.\n GLib.timeout_add(int(50), self._autozoom_in, level_start,\n p1lat, p1lon, p2lat, p2lon)", "def toggle_zoom_to_selection(self):\n selected = self.scene().selectedItems()\n if selected and 'follow_selection' != self.zoom_mode:\n # Show the selection\n self.zoom_mode = 'follow_selection'\n self.zoom_to_items(selected)\n elif 'whole_scene' != self.zoom_mode:\n # Either no selection and/or currently in 'fixed' or\n # 'follow_selection' - show the whole image\n self.zoom_home()\n else:\n # Apply a mild fixed zoom\n self.zoom_mode = 'fixed'\n self.new_relative_zoom(4.0)", "def rect_zoom(self, eclick, erelease):\r\n msg = 'rect_zoom called. eclick={0} , erelease={1}'.format(str(eclick), str(erelease))\r\n logging.debug(msg)\r\n if eclick.xdata != erelease.xdata and eclick.ydata != erelease.ydata:\r\n x = sorted([eclick.xdata, erelease.xdata])\r\n y = sorted([eclick.ydata, erelease.ydata])\r\n paket = {'x':x,\r\n 'y':y,\r\n 'tip':self.konfig.TIP}\r\n msg = 'zoom value emit - data={0}'.format(str(paket))\r\n logging.debug(msg)\r\n self.emit(QtCore.SIGNAL('add_zoom_level(PyQt_PyObject)'), paket)\r\n #TODO! disable zoom\r\n self.zoomSelector.set_active(False)\r\n #enable spanSelector\r\n if self.spanSelector != None:\r\n self.spanSelector.visible = True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save dialog state, position & size.
def saveDialogState(self, dialog, groupName): assert isinstance(dialog, QtGui.QDialog) settings = QtCore.QSettings() settings.beginGroup(groupName) settings.setValue('state', dialog.saveState()) settings.setValue('geometry', dialog.saveGeometry()) settings.setValue('filter', dialog.selectedNameFilter()) settings.endGroup()
[ "def save_ui_geometry(self):\n if self.viewmanager.MDI_ON:\n Configuration.setSetting(\"PlayerSizes\", self.saveState())\n Configuration.setSetting(\"MainWindowSize\", self.size())\n Configuration.setSetting(\"MainWindowPosition\", self.pos())\n\n else:\n Configuration.setSetting(\"PlayerSizesFloating\", self.saveState())\n Configuration.setSetting(\"MainWindowSizeFloating\", self.size())\n Configuration.setSetting(\"MainWindowPositionFloating\", self.pos())", "def save_position(self):\n\n # Get position label.\n label = self.gui.posLabel.text()\n\n # Check that the label is unique\n if self.gui.posSelect.findText(label) == -1:\n\n # Generate position dictionary.\n position = {}\n position[\"XS\"] = self.PV_XSPOS_ABS.get()\n position[\"YS\"] = self.PV_YSPOS_ABS.get()\n position[\"ZS\"] = self.PV_ZSPOS_ABS.get()\n position[\"XO\"] = self.PV_XOPOS_ABS.get()\n position[\"YO\"] = self.PV_YOPOS_ABS.get()\n position[\"ZO\"] = self.PV_ZOPOS_ABS.get()\n\n # Save position into dynamic memory.\n self.gui.savedPos[label] = position\n\n # Update the position select drop down menu.\n self.gui.posSelect.insertItem(1, label)\n\n # Update the saved positions file.\n save_pos_config(path=\"saved_positions.json\",\n data=self.gui.savedPos)\n\n # Print output statement.\n self.append_text(f\"Position saved: {label}\")\n\n else:\n # Print output statement.\n self.append_text(\"ERROR: Position label already exists, change the position label and try again.\",\n QColor(255, 0, 0))", "def _save_state(self):\n self.initial_input = self.input_widget.toPlainText()\n self.initial_output = self.output_widget.toPlainText()", "def _save_button_clicked(self):\n\n fileName, _ = QtWidgets.QFileDialog.getSaveFileName(self,\"Save File\",UWBsim.BASE_DIR,\"All Files (*);;YAML files (*.yaml)\")\n \n yaml_dump = {}\n for i in range(len(self.anchor_positions)):\n key = str(i)\n yaml_dump[key] = {}\n yaml_dump[key]['x'] = str(self.anchorLineEdits[i][0].text())\n yaml_dump[key]['y'] = str(self.anchorLineEdits[i][1].text())\n yaml_dump[key]['z'] = str(self.anchorLineEdits[i][2].text())\n\n if not fileName.endswith('.yaml'):\n fileName = fileName + '.yaml'\n \n with open(fileName, 'w') as f:\n yaml.safe_dump(yaml_dump, f)", "def saveGraphicState(self):\n gState = dict(\n font=self._font,\n fontSize=self._fontSize,\n fill=self._fill,\n stroke=self._stroke,\n strokeWidth=self._strokeWidth,\n ox=self._ox,\n oy=self._oy,\n rotate=self._rotate,\n hyphenation=self._hyphenation,\n openTypeFeatures=self._openTypeFeatures,\n )\n self._gState.append(gState)", "def _save(self):\n settings = self._settings\n self.data_format = settings.data_format\n self.byte_order = settings.byte_order\n self.restore = True", "def save_game(self):\n filename = filedialog.asksaveasfilename()\n if filename:\n self._filename = filename\n if self._filename:\n fd = open(self._filename, 'w')\n fd.write(f'{self._model.get_game()}\\n{self._model.get_pokemon_locations()}\\n{self._statusbar.sec}\\n'\n f'{self._statusbar.min}')\n fd.close()", "def save_window_state(self, recursive: bool = False):\n settings = core.Settings()\n name = self.objectName()\n logger.debug(f\"Saving window state for {self.windowTitle()!r}...\")\n settings[f\"{name}.geometry\"] = self.saveGeometry()\n settings[f\"{name}.state\"] = self.saveState()\n if recursive:\n for window in self.find_children(MainWindow, recursive=True):\n if window.objectName():\n window.save_window_state()", "def showSave(self) -> \"adsk::core::DialogResults\" :\n return _core.FileDialog_showSave(self)", "def saveOrderAs(self):\n saveFile = QtGui.QFileDialog.getSaveFileName\n configFile = saveFile(parent=self,\n caption=\"Save the image order\")\n if not configFile:\n # User hit 'Cancel'\n return\n \n self._currentOrder = configFile\n self.saveOrder()", "def save_geometry_settings(width, height):\n\tsettings = setting_handler.load_settings()\n\tsettings[\"width\"] = str(width)\n\tsettings[\"height\"] = str(height)\n\tsetting_handler.save_settings(settings)", "def saveViewportSettings():\n pass", "def closeEvent(self, event: QtGui.QCloseEvent) -> None:\n # check for modified files and ask if needed to save or just save depending on the preference\n if settings['options.edit.automatic_save_preference'] != 2:\n for edits, name in ((self.grammars, 'Grammar'), (self.transformers, 'Transformer'), (self.contents, 'Content')):\n for i in range(len(edits)):\n edit = edits[i]\n if edit.is_modified():\n if settings['options.edit.automatic_save_preference'] == 1:\n save = True\n else:\n answer = QtWidgets.QMessageBox.warning(self, 'Unsaved modification', '{} tab {} contains unsaved modifications. Save?'.format(name, i+1), QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Cancel)\n save = answer == QtWidgets.QMessageBox.Ok\n if save:\n edit.save()\n\n # update settings\n settings['window.size.width'] = self.width()\n settings['window.size.height'] = self.height()\n settings['window.splitter.columns.size'] = self.column_splitter.sizes()\n settings['window.splitter.left.size'] = self.left_splitter.sizes()\n\n settings['grammar.files'] = [x.file for x in self.grammars]\n settings['transformer.files'] = [x.file for x in self.transformers]\n settings['content.files'] = [x.file for x in self.contents]\n\n settings['grammar.active_tab'] = self.grammar_tabs.currentIndex()\n settings['transformer.active_tab'] = self.transformer_tabs.currentIndex()\n settings['content.active_tab'] = self.content_tabs.currentIndex()\n\n # save setting\n text = json.dumps(settings, indent=1, sort_keys=True)\n common.write_text(settings_file, text)\n\n event.accept()", "def __OnSaveConfig(self, event):\n\t\tdialog = wx.FileDialog(parent=self.__frame, style=wx.FD_SAVE)\n\t\tif dialog.ShowModal() == wx.ID_OK:\n\t\t\tself.__config.Save(filename=dialog.GetPath())\n\t\tdialog.Destroy()", "def onFlagSaveButton(self,event):\n # Check whether DB still available\n self.checkDB('minimal')\n\n currentlen = len(self.flaglist)\n\n #print (\"FlagSave\", self.flaglist)\n\n self.changeStatusbar(\"Saving flags ...\")\n dlg = StreamSaveFlagDialog(None, title='Save Flags', db = self.db, flaglist=self.flaglist,\n last_dir=self.last_dir)\n if dlg.ShowModal() == wx.ID_OK:\n #flaglist = dlg.flaglist\n pass\n\n #self.flaglist = []\n self.changeStatusbar(\"Flaglist saved and reset - Ready\")", "def openSaveDialog(self):\n\n fileDialog = QFileDialog(self)\n fileDialog.setDirectory(os.getcwd())\n #\n #self.connect(fileDialog, QtCore.SIGNAL('filesSelected(QStringList)'),\n # self.saveConfiguration)\n #\n fileDialog.exec_()", "def save_poi(self):\n # TODO: put POIs to the global data dictionary\n if not self.Main.simple_pos: # empty position, no gps connected yet\n appuifw.note(u\"No GPS\", 'error')\n return\n \n simple_pos = self.Main.simple_pos\n # Default name is gps timestamp (UTC) with timezone info (time.altzone)\n ts = unicode(time.strftime(u\"%H:%M:%SZ \", time.localtime(simple_pos[\"gpstime\"])))\n simple_pos[\"text\"] = appuifw.query(u\"Name\", \"text\", ts)\n if simple_pos[\"text\"] is not None: # user did not press Cancel\n self.Main.data[\"pois_private\"].append(simple_pos)\n else: # user pressed cancel -> no POI\n pass\n #pos[\"text\"] = u\"\" # empty text", "def save(self):\n\n state_path = Path('~', STATE_DIRECTORY, STATE_FILENAME).expanduser()\n state_path.parent.mkdir(parents=True, exist_ok=True)\n\n with open(state_path, 'wb') as f:\n pickle.dump(self.data, f, protocol=pickle.HIGHEST_PROTOCOL)", "def _save_symbols_state(self):\r\n #filename = self.actualSymbols[0]\r\n #TODO: persist self.collapsedItems[filename] in QSettings\r\n pass", "def save_state(self):\n assert self.file is not None, 'set_up_save() has not been run'\n\n for p in self.particle_list:\n string = ''.join([str(v) + ','\n for v in (self.t_curr, p.id, p.x[0], p.x[1],\n p.v[0], p.v[1], p.P,\n p.rho, p.bound)]) + '\\n'\n self.file.write(string)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Restore dialog state, position & size.
def restoreDialogState(self, dialog, groupName): assert isinstance(dialog, QtGui.QDialog) settings = QtCore.QSettings() settings.beginGroup(groupName) dialog.restoreState(settings.value('state')) dialog.restoreGeometry(settings.value('geometry')) dialog.selectNameFilter(settings.value('filter', "")) settings.endGroup()
[ "def restore_state(self):\n self._restore_input()\n self._restore_output()", "def restore_main_window(self, e):\n self.frame.max_from_tray(e)", "def restore(self):\n\t\tself.label[\"text\"] = \"ABRA CADABRA, Hello there!!!!!\"\n\t\tself.clearBtn[\"state\"] = \"normal\"\n\t\tself.restoreBtn[\"state\"] = \"disabled\"", "def _restore(self):\n\n # check restore\n if not self.restore:\n return\n\n # restore\n settings = self._settings\n settings.data_format = self.data_format\n settings.byte_order = self.byte_order\n self.restore = False", "def restoreContent(self):\n ...", "def restoreDisplaySettings(self, settings):\n if settings is None:\n return\n self.resize(settings['win-size'])\n # Note that restoring the position doesn't always work (issues with\n # windows that are placed partially outside the screen)\n self.move(settings['win-pos'])\n # Restore each viewer-specific display only if the number of viewers\n # stayed the same:\n num_inspectors = len(self._inspectors)\n if num_inspectors == settings['num-inspectors']:\n for idx in range(num_inspectors):\n self._inspectors[idx].restoreDisplaySettings(settings['inspection-widgets'][idx])\n self.update()", "def restore_field(self):\r\n self.__field = self.__backup_field\r\n self.__player_pos = self.__backup_pos", "def do_revert(view):\r\n pos = view.viewport_position()\r\n view.run_command('revert')\r\n sublime.set_timeout(lambda: reposition_view(view, pos), 50) # must delay\r", "def restore_memento(self, memento: Memento) -> None:\n self.__state = memento.get_state()", "def restore_state(self):\n if self:\n self.pop()\n else:\n log.warning(\"Can't reset empty state\")", "def restore_settings(self):\n\n #Se ejecuta la función correspondiente de cada Frame.\n self.__sharing_function_frame.restore_settings()\n self.__algorithm_frame.restore_settings()", "def btnRestoreClicked(self):\n pyzo.resetConfig()\n shutil.copyfile(self.backup_file, self.conf_file)\n pyzo.main.restart()", "def restore(self, *args, **kwargs):\n raise NotImplementedError()", "def _check_restore_pane_sizes(self):\n \n window = self.winfo_toplevel()\n if hasattr(window, \"initializing\") and window.initializing:\n return\n \n try:\n self._restoring_pane_sizes = True\n if len(self.panes()) > 1:\n self._set_pane_size(\"last\", self.last_pane_size)\n if len(self.panes()) > 2:\n self._set_pane_size(\"first\", self.first_pane_size)\n finally:\n self._restoring_pane_sizes = False", "def _reset_state_wrapper(self):\n self._reset_state_impl()\n self._is_adapted = False", "def back_to_home_gui(self):\n self.forget_non_home_gui()\n self.seeds_path.set(\"\")\n self.initilize_gui()", "def restore_all(self):\n for r in self.saved:\n self.restore(r)", "def on_reset_clicked(self):\n if self.option_control.option_window.confirm_operation('Please Confirm', 'The reset process will clear all '\n 'settings and restore initial '\n 'settings, confirm to continue?'):\n try:\n self.option_control.option_data.reset_all()\n self.option_control.option_data.save_config_to_local()\n self.option_control.fill_data()\n self.reset_state()\n except Exception as ex:\n self.error_signal.emit('Reset Failed', ex.__str__())", "def transf_reset_callback(self, event, tag, state):\n self.imgMask.hide()\n self.imgTransform.cancel()\n\n #self.imgTransform.reset()\n self.origImgPanel.set_image(self.board.src_image)\n self.update_board(True)\n\n self.origImgPanel.buttons['edge'].state = False\n self.origImgPanel.buttons['reset'].disabled = True\n return False", "def back( self ):\n super( ProbesScreen, self ).back()\n\n #self._current_option = self._current_option - 1\n #if self._current_option < 0:\n #self._current_option = 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update recent file list setting.
def updateRecentFileSettings(self, filename, delete=False): settings = QtCore.QSettings() files = list(settings.value(SETTING_RECENTFILELIST, [])) try: files.remove(filename) except ValueError: pass if not delete: files.insert(0, filename) del files[MDIImageViewerWindow.MaxRecentFiles:] settings.setValue(SETTING_RECENTFILELIST, files)
[ "def load_files_from_settings(self):\n files = self.settings.value('recent_files_list', [[]])[0]\n self.load_files(files)", "def __loadRecentFiles(self):\n rf = self.rsettings.value(Globals.recentNameFiles)\n if rf is not None:\n for f in rf:\n if QFileInfo(f).exists():\n self.recentFiles.append(f)", "def update_filelistdialog(self):\n if self.filelist_dlg is not None:\n self.filelist_dlg.synchronize(self.get_stack_index())", "def clearRecentFiles(self): \n self._recentFiles = []\n self._saveIni()\n self.updateMenu()", "def update_pull(self):\n \n file_path = os.path.join(self.script_dir,'pull list.json') \n if not os.path.isfile(file_path)or os.path.getsize(file_path) == 0 :\n with open(file_path,'w') as out:\n json.dump(self.pull_list,out)\n else:\n with open(file_path) as infile:\n data = json.load(infile)\n data.update(self.pull_list)\n\n with open(file_path,'w') as out:\n json.dump(self.pull_list,out)", "def addRecentFile(self, filename):\n logging.debug('Application: addRecentFile() - ' + filename)\n if isinstance(filename, QString):\n filename = str(filename) # Make sure filename is a python string not a QString\n leftCount = self.MAX_RECENT_FILES - 1\n if filename in self._recentFiles:\n del self._recentFiles[self._recentFiles.index(filename)]\n self._recentFiles = [filename] + self._recentFiles[:leftCount]\n self._saveIni()", "def _fileRefresh(self, item):", "def add_file(self, filename):\n self.filenames.insert(0, filename)\n del self.filenames[self.max_no_files:]\n self.filenames = list(dict.fromkeys(self.filenames))\n self.settings.setValue('recent_files_list', [self.filenames])\n self.update_actions()", "def __showRecentFilesMenu(self):\n self.recentFiles = []\n self.rsettings.sync()\n self.__loadRecentFiles()\n \n self.recentFilesMenu.clear()\n \n idx = 1\n for rf in self.recentFiles:\n if idx < 10:\n formatStr = '&{0:d}. {1}'\n else:\n formatStr = '{0:d}. {1}'\n act = self.recentFilesMenu.addAction(\n formatStr.format(\n idx, Utilities.compactPath(rf, self.maxMenuFilePathLen)))\n act.setData(rf)\n idx += 1", "def __saveHistory(self):\n url = self.vcsUrlPicker.text()\n vcsUrlHistory = self.vcsUrlPicker.getPathItems()\n if url not in vcsUrlHistory:\n vcsUrlHistory.insert(0, url)\n \n # max. list sizes is hard coded to 20 entries\n newVcsUrlHistory = [url for url in vcsUrlHistory if url]\n if len(newVcsUrlHistory) > 20:\n newVcsUrlHistory = newVcsUrlHistory[:20]\n \n self.__vcs.getPlugin().setPreferences(\n \"RepositoryUrlHistory\", newVcsUrlHistory)", "def SetLastFile(self, filename, selection):\n self.config.set(\"main\",\"lastfile\",filename)\n self.config.set(\"main\",\"selection\",selection)", "def merge_file_lists(self):\n\n for old, new in self.build['merge']:\n self.file_lists[new] = FileList.merge([\n self.file_lists[old],\n self.file_lists[new]])", "def save_file_list(list):\n if Settings.FILE_LIST:\n with open(Settings.FILE_LIST, mode=\"w\") as f:\n for fl in list: \n f.write(fl + \"\\n\")", "def __loadRecentMultiProjects(self):\n rmp = self.rsettings.value(Globals.recentNameMultiProject)\n if rmp is not None:\n for f in rmp:\n if QFileInfo(f).exists():\n self.recentMultiProjects.append(f)", "def _directoryRefresh(self, item):", "def load_files(self, file_list):\n self.filenames = file_list[:self.max_no_files]\n self.update_actions()", "def clear_recent_files(self):\n if self.dcc:\n from anima.recent import RecentFileManager\n\n rfm = RecentFileManager()\n rfm[self.dcc.name] = []\n rfm.save()", "def __setitem__(self, *args):\r\n return _osgDB.stdFilePathList___setitem__(self, *args)", "def update_files():\r\n set_to_file(Crawler.queue, Crawler.queueFile)\r\n set_to_file(Crawler.crawled, Crawler.crawledFile)\r\n external_to_file(Crawler.external, Crawler.externalFile)", "def on_sources_list_modified(self):\n self.reload_sourceslist()\n self.show_distro()\n self.show_isv_sources()\n self.show_cdrom_sources()\n self.button_revert.set_sensitive(True)\n self.modified_sourceslist = True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run MDI Image Viewer application.
def main(): import sys app = QtGui.QApplication(sys.argv) QtCore.QSettings.setDefaultFormat(QtCore.QSettings.IniFormat) app.setOrganizationName(COMPANY) app.setOrganizationDomain(DOMAIN) app.setApplicationName(APPNAME) app.setWindowIcon(QtGui.QIcon(":/icon.png")) mainWin = MDIImageViewerWindow() mainWin.setWindowTitle(APPNAME) mainWin.show() sys.exit(app.exec_())
[ "def open_image(self):\r\n image_viewer = {self.__LINUX_SYS: self.__LINUX_IMG_VWR,\r\n self.__WINDOWS_SYS: self.__WINDOWS_IMG_VWR,\r\n self.__APPLE_SYS: self.__APPLE_IMG_VWR}[self._SYS_PLTFRM]\r\n try:\r\n subprocess.run([image_viewer, self.__RCVD_IMG])\r\n except FileNotFoundError:\r\n pass", "def runIjandShow(image_name):\n if os.path.isfile(image_name):\n subp.Popen([IJShow.ijpath, image_name],\n shell=False, stdin=None, stdout=None, stderr=None, close_fds=True)\n else:\n logging.warning('Specified file ' + image_name + \" does not exist\")", "def main_show():\n variant = sys.argv[2]\n if variant == 'original':\n obj = view.Original()\n cmap=None\n elif variant == 'aligned':\n obj = view.Aligned()\n cmap=glumpy.colormap.Grey\n elif variant == 'funneled':\n obj = view.Funneled()\n cmap=None\n else:\n raise ValueError(variant)\n\n glumpy_viewer(\n img_array=obj.image_pixels,\n arrays_to_print=[obj.image_pixels],\n cmap=cmap,\n window_shape=(250, 250),\n )", "def launch_DataViewer():\n app = QApplication([sys.argv[0]])\n \n screen_rect = app.desktop().screenGeometry()\n width, height = screen_rect.width(), screen_rect.height()\n \n \n \n form = DataViewer(screen_res = [width, height])\n form.show()\n app.exec_()", "def _runMeshViewer(self):\n self._hideWindow()\n wiz = MeshViewer()\n wiz.run()\n cv2.destroyAllWindows()\n self._showWindow()", "def main():\n photomosaic_generator = PhotomosaicGenerator.PhotomosaicGenerator()\n app = QtWidgets.QApplication(sys.argv)\n app.setQuitOnLastWindowClosed(False)\n gui = GUI.Window(photomosaic_generator)\n sys.exit(app.exec_())", "def run_gui(colmap_executable_path, image_dir_path, colmap_database_path, colmap_sparse_plaintext_path):\n args = [\n colmap_executable_path,\n 'gui',\n '--database_path', colmap_database_path,\n '--import_path', colmap_sparse_plaintext_path,\n '--image_path', image_dir_path,\n '--Render.min_track_len', '1',\n '--Render.max_error', '1000',\n '--Render.image_connections', '1'\n ]\n\n subprocess.run(args)", "def show_monitor_img(img):\n show_img = 'eog --fullscreen ' + img + ' &'\n #time.sleep(0.1)\n os.system(show_img)", "def _LaunchViewerProcess(filename, log_file):\n launch_base_command = []\n if platform.system() == \"Darwin\": # MacOSX\n launch_base_command = [\"open\", \"-W\", \"-a\", \"Skim\"]\n elif platform.system() == \"Linux\":\n launch_base_command = [\"evince\"]\n\n return subprocess.Popen(launch_base_command +\n [filename], stdout=log_file, stderr=log_file)", "def launch_data_processing_UI(self):\r\n self.window = QtWidgets.QMainWindow()\r\n # self.ui = CustomApplicationLauncherWindow(current_working_directory)\r\n self.ui = CustomApplicationLauncherWindow(\r\n self.names, self.current_working_directory, self.model_name)\r\n self.ui.setupUi(self.window)\r\n self.window.show()\r\n self.thread.stop() # stop webcam\r\n self.automatic_photos_taker.hide() # hide window\r", "def launch_single(self) -> None:\n print('launching single extraction gui')\n self.master.title('Python Data Extractor - Single')\n\n destroy_child_widgets(self.central_widget)\n SingleAppGui(self.central_widget)", "def run(self):\n files = []\n image_controls = []\n for folder in self.app_config[\"folders\"]:\n for filename in os.listdir(folder):\n if os.path.splitext(filename)[1].lower() in ['.jpg', '.jpeg', '.png', '.bmp']:\n files += [os.path.join(folder, filename)]\n\n for i,filename in enumerate(files):\n image_control = self.create_control(\"image\", \"image_\" + str(i))\n image_control.filename = filename\n image_control.x = 0\n image_control.y = 0\n image_control.width = self.offscreen_canvas.width\n image_control.height = self.offscreen_canvas.height\n image_control.enabled = False\n image_controls += [image_control]\n\n current_indx = 0\n\n while len(image_controls) > 0 and not self.stop_event.is_set():\n image_controls[current_indx].enabled = True\n\n # update the display buffer with image data from the controls\n self.update()\n\n # redraw the display\n self.draw()\n\n # display for a delay\n if self.stop_event.wait(self.app_config[\"delay\"]):\n break\n\n # go on to the next picture\n image_controls[current_indx].enabled = False\n\n current_indx += 1\n current_indx %= len(image_controls)", "def main():\n # Set start method for multiprocess to spawn for all platforms\n multiprocessing.set_start_method('spawn')\n\n logging_conf_file = pkg_resources.resource_filename(\n __name__, 'cfg/aecgviewer_aecg_logging.conf')\n\n options = argparse.ArgumentParser()\n options.add_argument(\"-i\", \"--indexfile\", type=str,\n help='Study index file to be loaded')\n options.add_argument(\"-l\", \"--logconffile\", type=str,\n help=f'logging configuration file (default: '\n f'{logging_conf_file})',\n default=logging_conf_file)\n args = options.parse_args()\n\n # Initialize logging\n print(f\"Loading logging configuration from: {args.logconffile}\")\n logging.config.fileConfig(args.logconffile)\n logger = logging.getLogger(\"aecgviewer\")\n for h in logger.root.handlers:\n if isinstance(h, logging.FileHandler):\n print(f\"Logging to {h.baseFilename} file with a \"\n f\"{type(h).__name__}\")\n logger.info(\"aECG viewer version %s loaded\",\n aecgviewer.__version__)\n\n app = QApplication(sys.argv)\n window = MainWindow(args.indexfile)\n sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)\n sizePolicy.setHeightForWidth(False)\n window.setSizePolicy(sizePolicy)\n window.show()\n\n sys.exit(app.exec_())", "def main():\n # Import a image\n original_mt = SimpleImage('images/mt-rainier.jpg')\n # Show the original image\n original_mt.show()\n reflected = reflect('images/mt-rainier.jpg')\n # Show the vertically mirrored image\n reflected.show()", "def launchImageEditor(filename, editImageFile=\"string\", viewImageFile=\"string\"):\n pass", "def main():\n app = qt.QApplication([])\n widget = IPythonDockWidget()\n widget.show()\n app.exec()", "def displayImage(image):\n # Create a window of the correct dimensions\n width = image.getWidth()\n height = image.getHeight()\n imageWindow = cImage.ImageWin(\"Image Viewer\", width, height)\n\n # Display the image in this window\n image.draw(imageWindow)\n\n # Wait for a mouse click to close the window\n imageWindow.exitOnClick()", "def imshow(*images):\r\n\r\n print('Showing %d image(s)...' % (len(images)))\r\n\r\n for i, img in enumerate(images):\r\n print('Image %d of shape %s, dtype %s.' % (i + 1, img.shape, img.dtype))\r\n print(img, '\\n')\r\n cv2.imshow('Image ' + str(i + 1), img)\r\n print('Press Q to close image(s)...')\r\n cv2.waitKey(0)", "def consoleUI():\n import argparse\n parser = argparse.ArgumentParser(description=\"Annotation viewer for visual side by side comparisons\")\n # Required arguments\n parser.add_argument(\"queryfile\", help=\"Path to query file\", type=argparse.FileType('r'))\n parser.add_argument(\"referencefile\", help=\"Path to reference file\", type=argparse.FileType('r'))\n # Optional Arguments\n parser.add_argument(\"-l\",\"--line\", help=\"Goto line\", nargs=1, type=int)\n parser.add_argument(\"-z\",\"--zoom\", help=\"Zoom level (bp)\", nargs=1, type=int)\n parser.add_argument(\"-f\",\"--features\", help=\"Nearby feature display size\", nargs=1, type=int)\n # Parse\n args = parser.parse_args()\n\n # Setup variables\n line = 0\n if args.line:\n line = args.line[0]\n zoom = 10000\n if args.zoom:\n zoom = int((args.zoom[0])/2) # Divide the value by two as view size is the size either side\n features = 5\n if args.features:\n features = args.features[0]+1\n\n # Start program\n controlPane(args.queryfile, args.referencefile, line, zoom, features)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The data in this frame as an `~astropy.coordinates.MoonLocation` class.
def moon_location(self): from .moon import MoonLocation cart = self.represent_as(CartesianRepresentation) return MoonLocation(x=cart.x, y=cart.y, z=cart.z)
[ "def GetLocationMatrix(self):\n\t\treturn self._LocationMatrix", "def location(self):\n return (self._moment.get(\"latitude\"), self._moment.get(\"longitude\"))", "def get_latlon():\r\n\t\tmagtag.url = get_data_source_url(api=\"forecast5\", location=secrets[\"openweather_location\"])\r\n\t\tmagtag.json_path = [\"city\"]\r\n\t\traw_data = magtag.fetch()\r\n\t\treturn raw_data[\"coord\"][\"lat\"], raw_data[\"coord\"][\"lon\"]", "def get_lat_lon_values():\n refcube = xr.open_dataset(settings.ACCESS_G_PATH + settings.access_g_filename('20190101'))\n return refcube.lat.values, refcube.lon.values", "def location_field_indexing(self):\n return {\n 'lat':self.latitude,\n 'lon':self.longitude\n }", "def get_lat(city: Record) -> np.ndarray:\n return city.geometry.y", "def moonPosition(datetime):\n j = pyEphem.Moon()\n j.compute(datetime)\n\n ra, dec = j.ra, j.dec\n return ra, dec", "def location_details(self):\n return self._location_details", "def geo_data(self):\n return self._geo_data", "def artistLocationGeo(self):\n try:\n lat = float(self.locationGraph.objects(self.locationURI, self.latPredicate).next())\n lon = float(self.locationGraph.objects(self.locationURI, self.longPredicate).next())\n print \"Latitude is\", lat\n print \"Longitude is\", lon\n return lat, lon\n except StopIteration: # If generator is empty\n print \"No geodata!\"\n except AttributeError: # If locationURI hasn't been defined\n print \"LocationURI not defined!\"", "def longitude(self) -> float:\n return self.raw_data[\"lon\"]", "def __parse_location(row):\n loc = Location(\n row['street_address'],\n row['city'],\n row['state'],\n row['zipcode'],\n row['latitude'],\n row['longitude'],\n row['id'],\n row['store_id']\n )\n return loc", "def convert_coords(data):\n data['Latitude'].dropna(inplace=True)\n data['Longitude'].dropna(inplace=True)\n\n data['Latitude'] = data['Latitude'].astype(\n float)\n data['Longitude'] = data['Longitude'].astype(\n float)\n\n data['dec_deg_latlon'] = data[[\n 'Latitude', 'Longitude']].values.tolist()\n\n # convert decimal degrees to utm and make new columns for UTM Northing and Easting\n data['utm_latlon'] = [utm.from_latlon(\n e[0], e[1]) for e in data['dec_deg_latlon']]\n\n data['utm_E'] = [e[0] for e in data['utm_latlon']]\n data['utm_N'] = [e[1] for e in data['utm_latlon']]\n\n xyz = pd.DataFrame()\n xyz['r'] = 6378137 + data['Elevation']\n\n xyz['x'] = xyz['r'] * \\\n np.cos(data['Latitude'].apply(deg2rad)) * \\\n np.cos(data['Longitude'].apply(deg2rad))\n xyz['y'] = xyz['r'] * \\\n np.cos(data['Latitude'].apply(deg2rad)) * \\\n np.sin(data['Longitude'].apply(deg2rad))\n xyz['z'] = xyz['r'] * \\\n np.sin(data['Latitude'].apply(deg2rad)) * (1 - 1 / 298.257223563)\n\n data['xyz_coords'] = xyz[['x', 'y', 'z']].values.tolist()\n\n return data", "def lon(self):\n if self.repr == 'cartesian':\n self.to_spherical_coords()\n return switch_frame(self.frame, lambda c: c.ra.value, lambda c: c.az.value, lambda c: c.lon.value,\n lambda c: c.l.value, lambda c: c.sgl.value, self.__coord)", "def get_location_dataframe(matrix_dataframe):\n\n df_location = matrix_dataframe.loc[:, ['Value.M41', 'Value.M42', 'Value.M43']]\n df_location.columns = ['X', 'Y', 'Z']\n return df_location", "def location(self) -> LyricLocation:\n return self.coordinator.data.locations_dict[self._location.locationID]", "def get_location(cls) -> Location:\n latitude, longitude, datetime_ = GPSD._get_coordinates()\n return Location(latitude, longitude, datetime_)", "def earth_location(self):\n return None", "def longitude(self):\n return self.__longitude", "def longitude(self) -> float:\r\n lon = self.__get_mower_attributes()[\"positions\"][0][\"longitude\"]\r\n return lon" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns a list without duplicates that is the intersection of s1 and s2
def intersect(s1, s2): # build a list containing common elements tmp = [] for e1 in s1: for e2 in s2: if e1 == e2: tmp.append(e1) break # drop the duplicates result = [] for e in tmp: if e not in result: result.append(e) return result
[ "def intersection(a,b):\n return [x for x in a if x in a and x in b]", "def intersection(a,b):\n return \"\".join(sorted(set(c for c in a+b)))", "def list_intersection(a, b):\n return [item for item in a if item in b]", "def intersect(seq1, seq2):\n ret = []\n for elem in seq1:\n if elem in seq2:\n ret.append(elem)\n return ret", "def common_elements(set_1, set_2):\n unique_ele = set()\n ret_lst = []\n\n for ele in set_1:\n unique_ele.add(ele)\n for ele2 in set_2:\n if ele2 in unique_ele:\n ret_lst.append(ele2)\n return ret_lst", "def intersect(list1, list2):\r\n intersection = []\r\n for element in list1:\r\n if element in list2:\r\n intersection.append(element)\r\n return intersection", "def get_differt_entries(list1, list2):\n return [a for a in list1 + list2 if (a not in list1) or (a not in list2)]", "def intersection(self, other_set):\n new_set = Set()\n\n o_set = other_set.hash_set.keys()\n\n for element in o_set:\n if self.contains(element):\n new_set.add(element)\n\n return new_set", "def intersection(*args):\n if not args:\n return []\n\n # remove duplicates from first list whilst preserving order\n base = list(OrderedDict.fromkeys(args[0]))\n\n if len(args) == 1:\n return base\n else:\n others = set(args[1]).intersection(*args[2:])\n return [e for e in base if e in others]", "def filter_list(seq1, seq2):\n seq2 = set(seq2)\n return [item for item in seq1 if item not in seq2]", "def get_existing_kics(arr1, arr2):\n list_kics = set(arr1).intersection(arr2)\n logger.info(\"done: %d kics\" % len(list_kics))\n return list(list_kics)", "def ordered_intersect(a: Iterable, b: Iterable) -> OrderedSet:\n assert not isinstance(a, str) # treat string as atomic, not iterable\n assert not isinstance(b, str) # treat string as atomic, not iterable\n b = set(b)\n return OrderedSet([v for v in a if v in b])", "def intersect(list1, list2):\n \n intersection = []\n append = intersection.append\n idx1 = 0\n idx2 = 0\n \n while idx1 < len(list1) and idx2 < len(list2):\n if list1[idx1] < list2[idx2]:\n idx1 += 1\n elif list1[idx1] > list2[idx2]:\n idx2 += 1\n else:\n append(list1[idx1])\n idx1 += 1\n idx2 += 1\n \n return intersection", "def intersection(l1, l2):\n l1.sort()\n l2.sort()\n i = j = 0\n inter = []\n while i < len(l1) and j < len(l2):\n if l1[i] < l2[j]:\n i += 1\n elif l1[i] > l2[j]:\n j += 1\n else:\n inter.append(l1[i])\n i += 1\n j += 1\n return inter", "def get_not_in_list_one(list1, list2):\n return [x for x in list1 if x not in set(list2)]", "def compute_overlap(a,b):\n return list((Counter(a) & Counter(b)).elements())", "def intersection(self, other_set):\n new_set = TreeSet()\n for item in other_set:\n if self.contains(item):\n new_set.add(item)\n return new_set", "def intersection(self, other):\n return self.context.parallelize(\n list(set(self.toLocalIterator()) & set(other.toLocalIterator()))\n )", "def multiplex_intersect(sets1, sets2):\n return list(map(set.intersection,sets1, sets2))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to import_modules the forest plant records into the survey
def import_fauna(survey, species_list, infile, format, verbose=None): start = time.time() now = datetime.datetime.now() info_msg = ' Started the import_modules of fauna information {date}'. \ format(date=now.strftime("%Y-%m-%d %H:%M")) logging.info(info_msg) try: version = tools_lib.get_git_tag().strip() except: version = '' info_msg = ' ForestEye Collect Importer {version}'.format(version=version) logging.info(info_msg) if format == '2010': import_fauna_2010.import_fauna_2010(survey, species_list, infile) elif format == '2015': import_fauna_2015.import_fauna_2015(survey, species_list, infile) else: warn_message = "The file format {format} is not supported".format(format=format) logging.warn(warn_message) info_msg = "The import_modules took {time:.2} seconds".format(time=time.time() - start) logging.info(info_msg) now = datetime.datetime.now() info_msg = ' Finished the import_modules of fauna information dataset {date}'. \ format(date=now.strftime("%Y-%m-%d %H:%M")) logging.info(info_msg)
[ "def db_imports():\n import_energy_data()", "def load_modules(self, cfg):\n \n if self.did_load_modules:\n return \n \n print('Loading Superglueflow with learned weights')\n #load trian flow\n weights = torch.load(cfg[\"trianflow\"].pretrained)\n self.trianFlow.load_state_dict(weights['model_state_dict'])\n\n #load superpoint\n #superglue matcher loads superoint and superglue in their resepctive __init__ functions\n \n self.did_load_modules = True\n\n pass", "def run_import():\n db_engine = get_db_engine()\n\n with open(\"DRUGIDS\") as fp:\n drug_ids = [line.strip() for line in fp]\n\n # Scrape the site, and pull the data we need\n # This would be the \"Source\" in a DAG pipline, I.e. the first node\n logger.info(\"Scraping the Drugbank Site\")\n drug_metadata = pull_drugs(drug_ids)\n\n # Check the Database against the \"action\" and \"alt_identifier\" types\n # we observe from the site, and the one's we already have in the database.\n # Insert / Update accordingly. After the update, we can then insert\n # data with references to these tables.\n # This is simply a transformation (T in ETL), again another node / step\n # in the pipeline.\n logger.info(\"Equalizing Type IDs\")\n equalize_type_ids(db_engine, drug_metadata)\n\n # Transform the Metadata dicts into lists of tuples, 1 list per relation\n # so we can bulk insert accordingly\n # The sink in the graph.\n logger.info(\"Transforming data to tuples for insertion\")\n db_rows_to_insert: Dict = transform_to_db_rows(db_engine, drug_metadata)\n\n # Insert the rows.\n logger.info(\"Inserting Data\")\n write_rows_to_db(db_engine, db_rows_to_insert)", "def import_models(model_names):\n for t in model_names:\n Logger.instance().debug(\"TableInputOutputInformation.import_models: importing \" + str(t))\n importlib.import_module(t)", "def _import(self):\n\t\tbpy.ops.import_scene.gltf(filepath=self.filename)", "def test_import_data(self):\n clear_collections()\n fdr = \"files\"\n prod_f = \"products.csv\"\n cust_f = \"customers.csv\"\n rent_f = \"rentals.csv\"\n records, errors = import_data(fdr, prod_f, cust_f, rent_f)\n\n self.assertEqual(records, (7, 4, 6))\n self.assertEqual(errors, (0, 0, 0))", "def load_fixtures():\n load_model('data/grs_schemas.json', GrsSchema)\n load_model('data/tiles.json', Tile)\n load_model('data/composite_functions.json', CompositeFunctionSchema)\n load_collections('data/collections.json')\n load_items('data/items.json')\n\n db.session.commit()", "def load_preproc_data_german(protected_attributes=None):\n def custom_preprocessing(df):\n \"\"\" Custom pre-processing for German Credit Data\n \"\"\"\n\n def group_credit_hist(x):\n if x in ['A30', 'A31', 'A32']:\n return 'None/Paid'\n elif x == 'A33':\n return 'Delay'\n elif x == 'A34':\n return 'Other'\n else:\n return 'NA'\n\n def group_employ(x):\n if x == 'A71':\n return 'Unemployed'\n elif x in ['A72', 'A73']:\n return '1-4 years'\n elif x in ['A74', 'A75']:\n return '4+ years'\n else:\n return 'NA'\n\n def group_savings(x):\n if x in ['A61', 'A62']:\n return '<500'\n elif x in ['A63', 'A64']:\n return '500+'\n elif x == 'A65':\n return 'Unknown/None'\n else:\n return 'NA'\n\n def group_status(x):\n if x in ['A11', 'A12']:\n return '<200'\n elif x in ['A13']:\n return '200+'\n elif x == 'A14':\n return 'None'\n else:\n return 'NA'\n\n status_map = {'A91': 1.0, 'A93': 1.0, 'A94': 1.0,\n 'A92': 0.0, 'A95': 0.0}\n df['sex'] = df['personal_status'].replace(status_map)\n\n\n # group credit history, savings, and employment\n df['credit_history'] = df['credit_history'].apply(lambda x: group_credit_hist(x))\n df['savings'] = df['savings'].apply(lambda x: group_savings(x))\n df['employment'] = df['employment'].apply(lambda x: group_employ(x))\n df['age'] = df['age'].apply(lambda x: np.float(x >= 26))\n df['status'] = df['status'].apply(lambda x: group_status(x))\n\n return df\n\n # Feature partitions\n XD_features = ['credit_history', 'savings', 'employment', 'sex', 'age']\n D_features = ['sex', 'age'] if protected_attributes is None else protected_attributes\n Y_features = ['credit']\n X_features = list(set(XD_features)-set(D_features))\n categorical_features = ['credit_history', 'savings', 'employment']\n\n # privileged classes\n all_privileged_classes = {\"sex\": [1.0],\n \"age\": [1.0]}\n\n # protected attribute maps\n all_protected_attribute_maps = {\"sex\": {1.0: 'Male', 0.0: 'Female'},\n \"age\": {1.0: 'Old', 0.0: 'Young'}}\n\n return GermanDataset(\n label_name=Y_features[0],\n favorable_classes=[1],\n protected_attribute_names=D_features,\n privileged_classes=[all_privileged_classes[x] for x in D_features],\n instance_weights_name=None,\n categorical_features=categorical_features,\n features_to_keep=X_features+Y_features+D_features,\n metadata={ 'label_maps': [{1.0: 'Good Credit', 2.0: 'Bad Credit'}],\n 'protected_attribute_maps': [all_protected_attribute_maps[x]\n for x in D_features]},\n custom_preprocessing=custom_preprocessing)", "def test_callsites_importfrom(setup_importfrom):\n assert setup_importfrom.assign_call_find() == {('pruebas', 'Clase1_1', 'Clase1_2', 'firstn'): [4]}", "def load_inputs(mod, switch_data, inputs_dir):\n\n # TODO: maybe move these columns to a storage_gen_info file to avoid the weird index\n # reading and avoid having to create these extra columns for all projects;\n # Alternatively, say that these values are specified for _all_ projects (maybe with None\n # as default) and then define STORAGE_GENS as the subset of projects for which\n # gen_storage_efficiency has been specified, then require valid settings for all\n # STORAGE_GENS.\n switch_data.load_aug(\n filename=os.path.join(inputs_dir, \"gen_info.csv\"),\n optional_params=[\n \"gen_store_to_release_ratio\",\n \"gen_storage_energy_to_power_ratio\",\n \"gen_storage_max_cycles_per_year\",\n ],\n param=(\n mod.gen_storage_efficiency,\n mod.gen_store_to_release_ratio,\n mod.gen_storage_energy_to_power_ratio,\n mod.gen_storage_max_cycles_per_year,\n ),\n )\n # Base the set of storage projects on storage efficiency being specified.\n # TODO: define this in a more normal way\n switch_data.data()[\"STORAGE_GENS\"] = {\n None: list(switch_data.data(name=\"gen_storage_efficiency\").keys())\n }\n switch_data.load_aug(\n filename=os.path.join(inputs_dir, \"gen_build_costs.csv\"),\n param=(mod.gen_storage_energy_overnight_cost),\n )\n switch_data.load_aug(\n optional=True,\n filename=os.path.join(inputs_dir, \"gen_build_predetermined.csv\"),\n param=(mod.build_gen_energy_predetermined,),\n )", "def loadModules(self):\n\n connection = sqlite3.connect( self.path )\n cursor = connection.cursor()\n\n if not os.path.exists(self.path):\n # Datenbank exitstiert nicht, wird nun angelegt\n print('Datenbank modules.db nicht vorhanden - Datenbank wird angelegt')\n\n sql = 'CREATE TABLE modules('\\\n 'questName STRING, rightAnswer STRING, falseAnswer1 STRING, falseAnswer2 STRING, falseAnswer3 STRING, richtig INTEGER, falsch INTEGER '\\\n ')'\n cursor.execute(sql)\n\n print('Anlegen der Datenbank modules.db mit ' + sql + ' abgeschlossen')\n \n connection.commit()\n connection.close()", "def import_survey(self,sImportData,sImportDataType,sNewSurveyName=None,DestSurveyID=None):\n params = self.__format_params(locals().copy())\n method = \"import_survey\"\n r = self.call_rpc(method,params)\n return r.json()['result']", "def Nu_importAllRefs() :\n\tsysPath = 'O:/studioTools/maya/python/tool/rig/nuTools/pipeline'\n\tif not sysPath in sys.path : \n\t\tsys.path.append(sysPath)\n\n\timport pipeTools\n\treload(pipeTools)\n\t\n\n\tpipeTools.importAllRefs()", "def importing(request):\n try:\n # create a directory to store the model files\n model_files_dir = \"model_files\"\n os.mkdir(model_files_dir)\n \n zip_file = zipfile.ZipFile(request.FILES['model_path'], mode='r')\n for file in zip_file.namelist():\n # extract sas files\n if str(file) == 'PATHSCORE.spk':\n inner_zip = io.BytesIO(zip_file.read(file))\n zip2 = zipfile.ZipFile(inner_zip)\n for file2 in zip2.namelist():\n if str(file2) == 'SASSCORE.spk':\n score_spk = io.BytesIO(zip2.read(file2))\n zip3 = zipfile.ZipFile(score_spk)\n for i in zip3.namelist():\n zip3.extract(i, model_files_dir)\n \n # extract mining result files\n if str(file) == 'MININGRESULT.spk':\n inner_zip = io.BytesIO(zip_file.read(file))\n zip2 = zipfile.ZipFile(inner_zip)\n for i in zip2.namelist():\n zip2.extract(i, model_files_dir)\n \n # Save the model files to database\n model_uuid = uuid.uuid1() # id to specify the model\n files = os.listdir(model_files_dir) \n for f in files:\n with open(model_files_dir + '/' + f, 'r') as s:\n data = s.read()\n model_name = str(request.FILES['model_path'])\n entry = Model_Main(model_Id=model_uuid, model_Name=model_name[:model_name.find('.spk')], file_Name= str(f), model_File=data)\n entry.save()\n \n transaction.commit() # commit the memory result to database \n \n finally:\n shutil.rmtree(model_files_dir)\n \n return HttpResponse('The model was imported successfully.')", "def post_import(self):", "def load_fixtures():\n for model, fixturefile in model_fixtures:\n fabutils.manage_py('syncdata %s' % (PROJECT_ROOT / fixturefile,))", "def setup_modules(self):\n if self.modules_do_not_exist():\n modules = self.create_modules_from_config(self.config['modules'])\n self.db.session.add_all(modules)\n self.db.session.commit()", "def import_collection(self, filepath, options):\n verbosity = options['verbosity']\n strict = options['strict']\n\n logger.info(f'Importing {filepath}')\n with open(filepath, 'r') as handle:\n fhirjs = json.load(handle)\n bundle = Bundle(fhirjs, strict=strict)\n\n for entry in fhirjs[\"entry\"]: # TODO: Is there something more elegant than mucking through a JSON structure?\n res = entry[\"resource\"] # TODO: Is there something more elegant than mucking through a JSON structure?\n res_type = res[\"resourceType\"]\n\n # OC: Creating spans from the tracer of the execution context will divide the overall execution into small, measurable chunks\n tracer = execution_context.get_opencensus_tracer()\n with tracer.span(name=f'Import {res_type}'):\n logger.debug(f'Resource: {res}')\n\n if res_type == \"Patient\":\n self.import_patient(res, options)\n\n if res_type == \"Condition\":\n self.import_condition(res, options)\n\n if res_type == \"Observation\":\n self.import_observation(res, options)", "def importRBFs(filePath):\n data = __importData(filePath)\n if data is None:\n return\n for k, v in data.iteritems():\n rbfModule = RBF_MODULES[v[\"rbfType\"]]\n rbfModule.createRBFFromInfo({k: v})" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Start the Selenium server
def startSelenium(self): os.chdir(r"../server/") args = self.buildServerStartCommand() self.startSelenium.config(state="disabled") self.seleniumServer = Popen(args) os.chdir(r"../scripts/") # Crude wait to give the server time to start time.sleep(5) self.serverStatus(Event())
[ "def start_selenium_server():\n\n seleniumserver_path = find_selenium_server()\n if not seleniumserver_path:\n print('The file \"standalone-server-standalone-x.x.x.jar\" not found.')\n return\n\n cmd = ['java', '-jar', seleniumserver_path]\n subprocess.Popen(cmd, creationflags=subprocess.CREATE_NEW_CONSOLE)", "def run_selenium():\n froid.run_selenium()", "def setUpModule():\n global WEBDRIVER_SERVER_URL\n global WEBDRIVER_PROCESS\n if not WEBDRIVER_SERVER_URL:\n WEBDRIVER_SERVER_URL = 'http://localhost:%d' % WEBDRIVER_PORT\n WEBDRIVER_PROCESS = subprocess.Popen([WEBDRIVER_EXE,\n '--port=%d' % WEBDRIVER_PORT])\n time.sleep(3)", "def launch_app(self):\n self._selenium_web_driver().launch_app()", "def _start_webdriver_instance(self, url: str) -> WebDriver:\n pass", "def launch_browser(self):\n self.driver = webdriver.Chrome()", "def run_locally(self):\n print('Running for browser: ' + self.driver_name)\n return self.trigger_pytest(self.driver_name)", "def run_server(server):\n\n server.start()\n server.wait()", "def start(self):\n self.log('Server started...')\n self.httpd.serve_forever()", "def runDriver(driver_path):\n print(\"Running driver...\") \n driver = webdriver.Chrome(driver_path)\n return driver", "def set_selenium_instances(self):\n self.selenium.set_instances(\n self.cfg,\n self.leader_starter_instance.arango_importer,\n self.leader_starter_instance.arango_restore,\n self.leader_starter_instance.all_instances[0],\n )", "def __init__(self):\n options = Options()\n options.add_argument('-headless')\n self.path = \"C:\\\\Users\\\\weimaoquan\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\Application\\\\chromedriver.exe\"\n self.browser = webdriver.Chrome(executable_path=self.path, options=options)\n self.browser.implicitly_wait(3)\n self.login()", "def start(self):\n server_host = Constants.RPI_IP\n server_port = Constants.WEB_SERVER_PORT # random.randint(10000, 60000)\n new_loop = asyncio.new_event_loop()\n start_server = websockets.serve(self.__send_data, server_host, server_port, loop=new_loop)\n t = threading.Thread(target=self.__start_loop, args=(new_loop, start_server))\n t.start()\n print(\"Server launched\")\n time.sleep(2)", "def _selenium_start(*args, **kwargs):\n try:\n subprocess.Popen = partial(subprocess.Popen,\n preexec_fn=setpgrp)\n VideoCall._selenium_start_orig(*args, **kwargs)\n finally:\n subprocess.Popen = VideoCall._subprocess_popen_orig", "def set_selenium_remote_session(self, selenium_url=''):\n if self.aborting:\n return self\n\n if self.use_firefox:\n self.browser = webdriver.Remote(\n command_executor=selenium_url,\n desired_capabilities=DesiredCapabilities.FIREFOX)\n else:\n self.browser = webdriver.Remote(\n command_executor=selenium_url,\n desired_capabilities=DesiredCapabilities.CHROME)\n\n self.logger.info('Session started - %s'\n % (datetime.now().strftime('%Y-%m-%d %H:%M:%S')))\n\n return self", "def __start_browsermob():\n subprocess.Popen([config.browsermob_dir + \"/bin/browsermob-proxy\"])\n i = 3\n while i > 0:\n print(\"waiting \" + str(i))\n time.sleep(1)\n i -= 1", "def driver(request):\n print(\"\\nstart browser for test..\")\n browser_name = request.config.getoption(\"browser_name\")\n if browser_name == \"chrome\":\n options = Options()\n options.add_argument('--no-sandbox')\n # options.add_argument(\"--headless\")\n options.add_argument(\"window-size=1920,1080\")\n options.add_argument(\"--incognito\")\n driver = webdriver.Chrome(executable_path=ChromeDriverManager().install(), options=options)\n driver.get(url=config.CROP_MONITORING_URL)\n time.sleep(2)\n\n elif browser_name == \"firefox\":\n options = FirefoxOptions()\n options.add_argument('--no-sandbox')\n # options.add_argument(\"--headless\")\n options.add_argument(\"window-size=1920,1080\")\n options.add_argument(\"--incognito\")\n driver = webdriver.Firefox(executable_path=GeckoDriverManager().install(), options=options)\n driver.get(url=config.CROP_MONITORING_URL)\n time.sleep(2)\n\n else:\n print(f\"Browser <browser_name> is still not implemented\")\n yield driver\n print(\"\\nquit browser..\")\n driver.quit()", "def tests_setup(request):\n logging.info(\"Initializing the Selenium Driver\")\n driver = webdriver.Firefox()\n driver.maximize_window()\n\n # use the same driver for all the test cases\n request.cls.driver = driver\n\n login_to_kubeflow(driver)\n\n # Run the test cases\n yield driver\n # After all the test cases where run\n\n logging.info(\"Closing the Selenium Driver\")\n driver.close()", "def run_on_browserstack(self):\n test_status = 0\n # If password not provided in command line look ad server configuration file\n if self.bs_username is None:\n credentials = self.tc.gid('browserstack').split(':')\n self.bs_username = credentials[0]\n self.bs_password = credentials[1]\n if bs_api.wait_for_free_sessions((self.bs_username, self.bs_password),\n self.tc.gid('session_waiting_time'), self.tc.gid('session_waiting_delay')):\n # load browserstack variables from configuration files\n if self.env_type == 'versioned':\n if self.test_type == 'smoke':\n self.bs_config.read(self.bs_config_file_smoke)\n elif self.test_mobile == 'yes':\n self.bs_config.read(self.bs_config_file_mobile)\n else:\n self.bs_config.read(self.bs_config_file)\n for config_section in self.bs_config.sections():\n print('Running combination: ' + config_section)\n test_status = self.trigger_pytest(config_section)\n\n # load browserstack variables from OS environment variable\n elif self.env_type == 'direct':\n config_list = json.loads(str(os.environ['BROWSERSTACK']))\n for config_section in config_list['test_suite']:\n print('Running combination: ' + str(config_section))\n test_status = self.trigger_pytest(config_section)\n return test_status", "def run(self, daemon=False):\n self.run_browser(daemon)\n self.run_register(daemon)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Stop the Selenium server
def stopSelenium(self): if self.seleniumServer != 0: self.seleniumServer.terminate() self.seleniumServer = 0 self.serverStatus(Event()) return if sys.platform[:5] == "linux": result = os.popen("ps x").readlines() for line in result: if "selenium" in line and "java" in line: pid = line.split()[0] os.system("kill %s" % pid) print "Stopping process %s started with command %s" % (pid, line) self.serverStatus(Event()) return
[ "def stop():\n driver.quit()\n result = status", "def stop_webserver():\r\n _webserver_do('stop')", "def stop_server():\n\n executeCmd(\"./bin/fteproxy --quiet --mode server --stop\")\n\n time.sleep(1)", "def stop_server(self):\n response = requests.post(self._build_url(\"stop\"))\n return response", "def kill_driver(self):\n # Logout hasn't been implemented yet\n self.driver.quit()", "def shutdownChrome(driver):\n driver.close()\n driver.quit()", "def stop_client():\n\n executeCmd(\"./bin/fteproxy --quiet --mode client --stop\")\n\n time.sleep(1)", "def close(self):\n print(\"Closing down SeleniumRunner...\")\n self.driver.quit()", "def stop_server():\r\n os.system(\"kill -9 $(ps -ef|grep node |awk '$0 !~/grep/ {print $2}' |tr -s '\\n' ' ')\")\r\n os.system(\"kill -9 $(ps -ef|grep adb |awk '$0 !~/grep/ {print $2}' |tr -s '\\n' ' ')\")", "def close_webdriver(self):\n self._gateway.terminateCrawler()\n self._gateway.close(False, True)\n self._gateway.shutdown(True)\n self._subProcess.terminate()", "def stop_smbserver(self):\n self.server_process.terminate()\n sleep(0.5)", "def stop(self):\n self.logger.debug('Server - td-agent-bit - stop call.')\n self.change_service_status(\"stop\")", "def stop_server(self):\n\n # Set red flag\n self.red_flag = True\n # Set server flag off\n self.server_flag = False\n \n # Only used in abnormal conditions or to interrupt the crawl\n for worker in self.workers:\n worker.stop()\n\n print 'Crawler process =>',self.id,'stopped.'\n \n # log.info(\"Crawl Process =>\",self.id,\"stopped.\")", "def destroy(self):\n #print 'start destroy'\n #if self.started:\n #print \"Detroying this client %s\" % self.selenium\n self.selenium_mgr.stop_client(self.selenium)\n self.selenium_mgr.destroy_client(self.selenium)\n self.started = False", "def stop_xvc_server(self):\n self.serverThread.stop()\n self.serverThread = None", "def tearDownClass(cls):\n # type: () -> None\n cls.test_server.stop()\n\n # wait a bit for server to shutdown\n time.sleep(1)", "def stop(self):\n if self.notebook_server_exists():\n self.log.info(\"Stopping notebook server...\")\n if sys.platform == 'win32':\n self._notebook_server.send_signal(signal.CTRL_BREAK_EVENT)\n else:\n self._notebook_server.terminate()\n\n for i in range(10):\n retcode = self._notebook_server.poll()\n if retcode is not None:\n self._notebook_server_exists = False\n break\n time.sleep(0.1)\n\n if retcode is None:\n self.log.critical(\"Couldn't shutdown notebook server, force killing it\")\n self._notebook_server.kill()\n\n self._notebook_server.wait()", "def server_stop():\n # since dev_appserver2, we need to kill 2 processes..\n run = \"\"\"\n psgrep dev_appserver.py | awk '{print $2}' | xargs kill -9\n psgrep _python_runtime.py | awk '{print $2}' | xargs kill -9\n \"\"\"\n\n # std: kill pid file..\n daemon.kill(opts.proj.dirs.gae.dev_appserver_pid)", "def stop(self):\n try:\n self._testpmd.send('stop')\n self._testpmd.wait('Done.', 5)\n self._testpmd.send('quit', 2)\n self._testpmd.kill()\n except pexpect.EOF:\n pass\n dpdk.cleanup()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the timeline of an user If since_id is set, will get new tweets instead of the whole timeline
def _get_all_timeline(self, screen_name, since_id=None): if since_id is not None: data = self._twitter_instance.statuses.user_timeline( screen_name=screen_name, count=200, trim_user=True, since_id=since_id) else: data = self._twitter_instance.statuses.user_timeline( screen_name=screen_name, count=200, trim_user=True) while len(data) >= 200: print("For user {0} we are at {1} tweets".format(screen_name, str(len(data)))) last_id = data[-1]["id"] if since_id is not None: _ = self._twitter_instance.statuses.user_timeline( screen_name=screen_name, count=200, trim_user=True, max_id=last_id, since_id=since_id) else: _ = self._twitter_instance.statuses.user_timeline( screen_name=screen_name, count=200, trim_user=True, max_id=last_id) if len(_) == 1: break data += _ return data
[ "def get_timeline(username, since_id=None, count=0):\n twitter = OAuth1Session(client_key=settings.CLIENT_KEY, client_secret=settings.CLIENT_SECRET,\n resource_owner_key=settings.ACCESS_TOKEN_KEY,\n resource_owner_secret=settings.ACCESS_TOKEN_SECRET)\n url = 'https://api.twitter.com/1.1/statuses/user_timeline.json'\n params = {\n 'screen_name': username,\n }\n if since_id:\n params.update(since_id=since_id)\n if count:\n params.update(count=count)\n r = twitter.get(url, params=params)\n return r.json()", "def list_timeline(user, list_id, owner_id, since_id, count):\n try:\n t = TwitterUser(user.access_token, user.access_token_secret)\n return t.get_list_timeline(list_id, owner_id, since_id, count)\n except twitter.TwitterError as e:\n from random import choice, randint\n if e.message[0]['code'] == 88:\n user = list(User.objects(access_token_active = True).skip(randint(0,1)).limit(10))\n user = choice(user)\n return list_timeline(user, list_id, owner_id, since_id, count)\n else:\n raise e", "def get_tweets_for(user, ntweets=200, max_id=None, since_id=None):\r\n params = {}\r\n if max_id:\r\n params['max_id'] = max_id\r\n if since_id:\r\n params['since_id'] = since_id\r\n user_tweets, iters = [], 0\r\n while len(user_tweets) < ntweets and iters < MAX_REQUESTS_PER_15_MIN:\r\n nrequested = min(200, ntweets - len(user_tweets))\r\n tweets = twitter_api.statuses.user_timeline(screen_name=user,\r\n count=nrequested, include_rts=0, **params)\r\n user_tweets.extend(tweets)\r\n iters += 1\r\n if len(tweets) == 0:\r\n ## got no results: maybe hit limit, or ran out of tweets, or error\r\n break\r\n params['max_id'] = tweets[-1]['id']\r\n return user_tweets", "def get_timeline(self, model, user, last_days=None):\r\n key = make_key('timeline', user.pk)\r\n timeline = cache.get(key)\r\n if timeline is None:\r\n query = self._get_query(model, user, last_days)\r\n timeline = self.filter(query).values_list('object_id', flat=True)\r\n cache.set(key, timeline)\r\n return timeline", "def get_tweets(self, account, number=MAX_TWEETS, since_id=None, max_id=None):\n import twitter\n\n all_tweets = []\n while number > 0:\n try:\n tweets = self.api.GetUserTimeline(\n screen_name=account,\n include_rts=False,\n exclude_replies=True,\n count=min(number, CHUNK_SIZE),\n since_id=since_id,\n max_id=max_id,\n )\n except twitter.TwitterError as e:\n raise plugin.PluginError(f'Unable to fetch timeline {account} for {e}')\n\n if not tweets:\n break\n\n all_tweets += tweets\n number -= len(tweets)\n max_id = tweets[-1].id - 1\n\n return all_tweets", "def get_tweets_for_user(self, user_id):\n tweets = [tweet for tweet in self.tweets if tweet.user.id == user_id]\n # print(tweets)\n return tweets", "def get_posts_timeline(self, user, timeline):\r\n key = make_key('posts_timeline', user.pk)\r\n posts_timeline = cache.get(key)\r\n if posts_timeline is None:\r\n posts_timeline = self.filter(pk__in=timeline).order_by('-created')\r\n cache.set(key, posts_timeline)\r\n return posts_timeline", "def get_user_tweets(self):\n tweets = []\n for status in tweepy.Cursor(self.api.user_timeline).items():\n tweets.append(status)\n return tweets", "def timeline():\n if not g.user:\n return redirect(url_for('public_timeline'))\n users = [u.username for u in g.user.followers] or []\n users.append(g.user.username)\n messages = Message.objects.filter(author__in=users)\\\n .order_by('-pub_date').limit(PER_PAGE)\n return render_template('timeline.html', messages=messages)", "def timeline():\n from app.api.data.friend import get_friends\n\n username = get_jwt_identity()\n # Check if user exists.\n if not users.exists(username=username):\n return bad_json_response('user not found')\n\n # Get the user's own posts.\n posts_array = get_posts(username)\n\n # Get the user's friends.\n friends = get_friends(username)\n\n for i in range(len(friends)):\n try:\n friend = friends[i]['username']\n friend_address = get_user_ip(friend)\n # Get the posts of the friend.\n response = requests.get(\n friend_address + '/api/user/posts',\n params={\n 'username': friend\n },\n headers=request.headers\n ).json()\n if response['success']:\n posts = response['data']['posts']\n posts_array = posts_array + posts\n except BaseException:\n continue\n\n posts_array = sorted(\n posts_array,\n key=lambda k: datetime.datetime.strptime(k['creation_date'],\n '%Y-%m-%d %H:%M:%S'),\n reverse=True\n )\n\n return good_json_response({\n 'posts': posts_array\n })", "def getNewsFeed(self, userId):\r\n import heapq\r\n import itertools\r\n tweet=heapq.merge(*(self.tweets[u] for u in self.followees[userId] | {userId} ))\r\n return [t for _, t in itertools.islice(tweet,10)]", "def userTweets(username):\n api = twitter.Api(consumer_key=key,consumer_secret=secret,access_token_key=access_key,access_token_secret=access_secret)\n user_tweet = api.GetUserTimeline(screen_name=username)\n for tweet in user_tweet:\n util.safe_print(tweet.GetText())", "def user_timeline(request, *args, **kwargs):\r\n log_entries = LogEntry.objects.by_user(request.user)\r\n f = LogEntryFilter(request.GET, queryset=log_entries)\r\n\r\n return render_to_response(\"txcommon/user_timeline.html\", {\r\n 'f': f,\r\n 'actionlog': f.qs\r\n }, context_instance = RequestContext(request))", "def get_tweets_from_timeline(self):\n tweets = []\n for status in tweepy.Cursor(self.api.home_timeline).items(200):\n tweets.append(status)\n return tweets", "def get_trump_tweets(nreqs=180, max_id=None, since_id=None):\r\n params = {}\r\n if max_id:\r\n params['max_id'] = max_id\r\n if since_id:\r\n params['since_id'] = since_id\r\n user_tweets, iters = [], 0\r\n nreqs = min(180,nreqs)\r\n while iters < nreqs:\r\n tweets = twitter_api.statuses.user_timeline(screen_name='realDonaldTrump',\r\n count=200, include_rts=0, **params)\r\n user_tweets.extend(tweets)\r\n iters += 1\r\n if len(tweets) == 0:\r\n ## got no results: maybe hit limit, or ran out of tweets, or error\r\n break\r\n params['max_id'] = tweets[-1]['id']\r\n return (user_tweets,max_id)", "def getNewsFeed(self, userId):\n lst = self.followees[userId]\n lst.add(userId)\n allTweets=[]\n for fellow in lst:\n for x in self.tweets[fellow]:\n allTweets.append(x)\n allTweets.sort(key=lambda x:x[1],reverse=True)\n # print(allTweets)\n return [x[0] for x in allTweets[:10]]", "def get_last(self, user, encoding=None):\n count = 1\n tweets = self.api.user_timeline(user, encoding, count=count)\n tweet = tweets[0]\n t = Tweet(tweet.created_at, tweet.source, user, tweet.text, tweet.id)\n return t", "def filter(self, user_id=None, count=20, tweet_timeline=None):\n try:\n timeline = None\n if tweet_timeline is not None:\n timeline = tweet_timeline\n else:\n timeline = self.get_tweets(user_id=user_id, count=count)\n\n if timeline is None:\n raise Exception(\"Error: Could not fetch the tweets\")\n\n filtered_tweets = {}\n for tweets in timeline:\n tweet = tweets._json\n if (\n tweet[\"retweet_count\"] > self.min_retweet\n and tweet[\"retweet_count\"] < self.max_retweet\n ):\n temp_tweet = {\n \"text\": tweet[\"text\"],\n \"retweet_count\": tweet[\"retweet_count\"],\n }\n filtered_tweets[tweet[\"id\"]] = temp_tweet\n\n print(\n \"Status: Filtered {} tweets with the given criteria\".format(\n len(filtered_tweets)\n )\n )\n return filtered_tweets\n\n except Exception as e:\n print(str(e))\n sys.exit(0)", "def get_all_tweets(screen_name):\n #Twitter only allows access to a users most recent 3240 tweets with this method\n \n #authorize twitter, initialize tweepy\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_key, access_secret)\n api = tweepy.API(auth)\n \n #initialize a list to hold all the tweepy Tweets\n alltweets = [] \n \n #make initial request for most recent tweets (200 is the maximum allowed count)\n new_tweets = api.user_timeline(screen_name = screen_name, count=200, include_rts = True)\n \n #only do this for users that have actually tweeted\n if len(new_tweets) > 0:\n #save most recent tweets\n alltweets.extend(new_tweets)\n \n #save the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n \n #keep grabbing tweets until there are no tweets left to grab\n while len(new_tweets) > 0:\n \n #all subsiquent requests use the max_id param to prevent duplicates\n new_tweets = api.user_timeline(screen_name = screen_name,count=200, max_id=oldest, include_rts = True)\n \n #save most recent tweets\n alltweets.extend(new_tweets)\n \n #update the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n \n print \"...%s tweets downloaded so far\" % (len(alltweets))\n \n # Save tweets for user in a json file\n fname = \"tweets/\"+str(screen_name)+\".jsonl\"\n with open(fname, 'w') as f:\n for status in alltweets:\n f.write(json.dumps(status._json)+\"\\n\")\n \n #close the file\n print \"Done with \" + str(screen_name)\n time.sleep(60)\n print \"Sleeping for one minute\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute a sketch matrix of input matrix Note that l must be smaller than m
def fd_sketch(mat, l): # number of columns m = mat.shape[1] # Input error handling if l >= m: raise ValueError('Error: ell must be smaller than m') if l >= mat.shape[0]: raise ValueError('Error: ell must not be greater than n') def svd_sketch(mat_b): mat_u, vec_sigma, mat_vt = ln.svd(mat_b, full_matrices=False) # obtain squared singular value for threshold squared_sv_center = vec_sigma[l-1] ** 2 # update sigma to shrink the row norms sigma_tilda = [(0.0 if d < 0.0 else math.sqrt(d)) for d in (vec_sigma ** 2 - squared_sv_center)] # update matrix B where at least half rows are all zero return np.dot(np.diagflat(sigma_tilda), mat_vt) # initialize output matrix B mat_b = np.zeros([2 * l, m]) # compute zero valued row list zero_rows = np.nonzero([round(s, 7) == 0.0 for s in np.sum(mat_b, axis = 1)])[0].tolist() # repeat inserting each row of matrix A for i in range(0, mat.shape[0]): # insert a row into matrix B #print "Zero row ", zero_rows[0] mat_b[zero_rows[0], :] = mat[i, :] # remove zero valued row from the list zero_rows.remove(zero_rows[0]) # if there is no more zero valued row if len(zero_rows) == 0: mat_b = svd_sketch(mat_b) # update the zero valued row list zero_rows = np.nonzero([round(s, 7) == 0 for s in np.sum(mat_b, axis = 1)])[0].tolist() # sketch last rows, and return l-sized sketch mat_b = svd_sketch(mat_b) return mat_b[:l, :]
[ "def lowrank_matricize(self):\n\n\t\tU, l = self.U, self.lmbda\n\t\tdim = self.ndim\n\t\t\n\t\tulst, vlst = [], []\n\t\tL = np.diag(l)\n\t\t\n\t\tfor n in range(dim):\n\t\t\tlst = list(range(n)) + list(range(n + 1, dim))\n\n\t\t\tutemp = [U[l] for l in lst]\n\t\t\tmat = khatrirao(tuple(utemp), reverse = True).conj().T\n\n\t\t\t\n\t\t\tulst.append(U[n])\n\t\t\tvlst.append(dot(L,mat))\n\n\t\treturn ulst, vlst", "def pm_mat_from_function(f, l):\n a = np.zeros((l,l), dtype = np.int32)\n for i in range(l): \n for j in range(l): \n a[i,j] = 1 - 2 * (f(i,j) & 1)\n return a", "def pm_diag_from_function(f, l):\n a = np.zeros((l,l), dtype = np.int32)\n for i in range(l): \n a[i,i] = 1 - 2 * (f(i) & 1)\n return a", "def _matrix_chain(m, s, p, i, j):\n if m[i][j] > MIN: # if we have already calculated this subproblem\n return m[i][j] # return the calculated result\n if i == j:\n m[i][j] = 0\n else:\n for k in range(i, j):\n q = _matrix_chain(m, s, p, i, k) + _matrix_chain(m, s, p, k + 1, j) + p[i - 1] * p[k] * p[j]\n if q > m[i][j]:\n m[i][j] = q\n s[i][j] = k # in addition remember the index of splitting\n return m[i][j]", "def hadamard_matrix(lg_n):\n n = 1 << lg_n\n f = lambda i, j: bitparity(i & j) \n return pm_mat_from_function(f, n)", "def powermat(X, p):\n l = len(X)\n m = 2\n result = initmat(X)\n \n Y = power2(X)\n \n while m < p:\n for i in range(l):\n for j in range(l):\n for k in range(l):\n result[i][j] += Y[i][k] * X[k][j]\n \n Y, result = swapmat(Y, result)\n m = m + 1\n \n return Y", "def parity_hadamard_matrix(lg_n):\n n = 1 << lg_n\n return pm_mat_from_function(scal_prod_parity, n)", "def change_state(self, l):\n if len(l) != 9:\n print ('length of the list should be 9')\n self.matrix = []\n for i in range(3):\n self.matrix.append(l[i*3:i*3+3])", "def disc_mask(l):\n sz = 2 * l + 1\n m = np.zeros((sz,sz))\n x = np.linspace(-l,l,2*l+1)/l\n x = np.expand_dims(x, 1)\n m = x**2\n m = m + m.T\n m = m<1\n m = np.expand_dims(m, 2)\n return m", "def optimal_linear_transform_for_l_sdp(p, d, E, l):\n\n k, v = p.shape\n n = k * (k+1)/2\n m = E.shape[0]\n A = zeros((m, n), 'd')\n\n diff = asarray(p[:, E[:, 0]] - p[:, E[:, 1]]).T\n i = 0\n for r in xrange(k):\n for c in xrange(r + 1):\n if r == c:\n A[:, i] = diff[:, r] * diff[:, r]\n else:\n A[:, i] = 2 * diff[:, r] * diff[:, c]\n i = i + 1\n\n # Minimize ||A x - l|| subjec to constraint symmatrix(x) is\n # semi-positive definite. This is equivalent to the following\n # semi-definite optimization:\n #\n # Minimize t\n # Subject to constraint\n # (1) t >= |A x - l|^2\n # (2) symmatrix(x) semi-pos definite\n #\n # Constraint (1):\n # <=> (Ax-l)^T (Ax-l) - t <= 0\n # <=> | I Ax-l| >= 0\n # | (Ax-l)^T t |\n #\n # Constraint (2):\n # <=> unpack x into a symmetric matrix S and S >= 0\n\n cvxopt.coneprog.options['DSDP_Monitor'] = 10\n cvxopt.coneprog.options['DSDP_GapTolerance'] = 1e-4\n cvxopt.coneprog.options['DSDP_MaxIts'] = 200\n \n rs, cs, vs = [], [], []\n for i in xrange(n):\n r = range(m) + [m] * m\n c = [m] * m + range(m)\n v = list(A[:,i].ravel())\n v = v + v\n for j in xrange(m*2):\n cs.append(i)\n rs.append(c[j] * (m+1) + r[j])\n vs.append(-v[j])\n cs.append(n)\n rs.append(m*(m+1)+m)\n vs.append(-1.0)\n G0 = spmatrix(vs, rs, cs, ((m+1)*(m+1), n+1))\n\n r = range(m) + range(m) + [m]*m\n c = range(m) + [m]*m + range(m)\n v = [1.0]*m + list(-l.A.ravel()) + list(-l.A.ravel())\n h0 = matrix(spmatrix(v, r, c, (m+1,m+1)))\n\n rs, cs, vs = [], [], []\n i = 0\n for r in xrange(k):\n for c in xrange(r + 1):\n if r == c:\n vs.append(-1.0)\n rs.append(c * k + r)\n cs.append(i)\n else:\n vs.extend([-1.0, -1.0])\n rs.extend([c *k + r, r *k + c])\n cs.extend([i, i])\n i = i + 1\n G1 = spmatrix(vs, rs, cs, (k*k, n+1))\n\n # Use (I * EPS) because the DSDP solver seems to enforce strict\n # inequality constraints, i.e., S < 0 instead of S <= 0.\n \n c = matrix([[0.0]*n + [1.0]])\n \n if S.SDP_USE_DSDP:\n h1 = matrix(spmatrix([S.EPS]*k,range(k),range(k),(k,k)))\n sol = solvers.sdp(c, Gs=[G0,G1], hs=[h0,h1], solver=\"dsdp\")\n else:\n h1 = matrix(spmatrix([0]*k,range(k),range(k),(k,k)))\n sol = solvers.sdp(c, Gs=[G0,G1], hs=[h0,h1])\n \n x = sol['x']\n\n s = zeros((k, k))\n i = 0\n for r in xrange(k):\n for c in xrange(r + 1):\n if r == c:\n s[r, c] = x[i]\n else:\n s[r, c] = s[c, r] = x[i]\n i = i + 1\n\n # find M s.t transpose(M) * M = rank_restricted(s, d)\n e, v = eig(s) # we have s = v * diag(e) * v.T\n e, v = sqrt(abs(e)), v.real\n order = range(k)\n order.sort(key = lambda i: -e[i]) # e[order] is sorted in descreasing value\n print_info(\"Transformation matrix eigenvalues: %s\" % str(e[order]))\n print_info(\"Transformation matrix id: %s\" % str(order))\n ret = asmatrix(dot(diag(e[order[:d]]), v.T[order[:d],:])), asmatrix(dot(diag(e[order]), v.T[order,:]))\n\n return ret", "def matrix_chain(d):\n n = len(d) - 1\n N = [[0] * n for i in range(n)]\n for b in range(1, n):\n for i in range(n-b):\n j = i + b\n N[i][j] = min(N[i][k]+N[k+1][j]*d[i]*d[k+1]*d[j+1] for k in range(i,j))\n return N", "def triangular_form(self):\r\n pivot_row = 0\r\n pivot_col = 0\r\n size = self.__len__()\r\n\r\n copy_of_matrix = []\r\n for line in self.matrix_value:\r\n new_line = []\r\n for values in line:\r\n new_line.append(values)\r\n copy_of_matrix.append(new_line)\r\n\r\n while pivot_row < size[0] and pivot_col < size[1]:\r\n line_max = 0\r\n val_max = 0\r\n for new_pivot in range(pivot_row, size[0]):\r\n if abs(copy_of_matrix[new_pivot][pivot_col]) > val_max:\r\n line_max = new_pivot\r\n\r\n if copy_of_matrix[line_max][pivot_col] == 0:\r\n pivot_col += 1\r\n else:\r\n swap = copy_of_matrix[pivot_row]\r\n copy_of_matrix[pivot_row] = copy_of_matrix[new_pivot]\r\n copy_of_matrix[new_pivot] = swap\r\n for rows in range(pivot_row + 1, size[0]):\r\n coefficient = copy_of_matrix[rows][pivot_col] / copy_of_matrix[pivot_row][pivot_col]\r\n copy_of_matrix[rows][pivot_col] = 0\r\n for col in range(pivot_col + 1, size[1]):\r\n copy_of_matrix[rows][col] = copy_of_matrix[rows][col] - \\\r\n copy_of_matrix[pivot_row][col] * coefficient\r\n\r\n pivot_row += 1\r\n pivot_col += 1\r\n to_return = Matrix()\r\n to_return.list_2dimension_convert(copy_of_matrix)\r\n return to_return", "def __matrix_element_fill__(self, k, l):\n up_left_val = self.mt[k][l] + (self.same if self.seq_a[k+1] == self.seq_b[l+1] else self.diff)\n left_val = self.mt[k+1][l] + self.gap\n up_val = self.mt[k][l+1] + self.gap\n\n values = numpy.array([up_left_val, left_val, up_val])\n arg_max = numpy.argmax(values)\n\n self.mt[k+1][l+1] = values[arg_max]\n\n self.mt_paths[k+1][l+1] = list(numpy.transpose((values[arg_max] == values).nonzero()).flatten())", "def lll_reduction(int_mat):\n exec_str = '/compute_lll.py'\n inp_args = {}\n inp_args['mat'] = int_mat\n lll_int_mat = rpl.call_sage_math(exec_str, inp_args)\n return lll_int_mat", "def precomputedForM(self, m):\n print(\"TODO\")", "def inverse(M) :\n if len(M) != len(M[0]) :\n print(\"A matriz deve ser quadrada!\")\n else :\n \"\"\"##################################\"\"\"\n W = []\n for x in range(len(M)) :\n t = []\n for y in range(len(M[0])) :\n t.append(M[x][y])\n W.append(t)\n \"\"\"##################################\"\"\"\n\n singular = False\n terminado = False\n\n linhas = len(M)\n colunas = len(M[0])\n \n while singular == False and terminado == False:\n \n printMatrix(W)\n print(\"\")\n print(\"Adicionando a Matriz Identidade à direita da matriz original: \")\n \n for i in range(len(W)) :\n t = []\n for j in range(len(W[i])) :\n if i == j :\n t.append(1)\n else :\n t.append(0)\n W[i].extend(t)\n\n printMatrix(W)\n print(\"\")\n \n for count in range(linhas) :\n perm(W)\n \n \"\"\" Busca pelo pivô da linha correspondente \"\"\"\n pivotColumn = None\n for x in range(colunas) :\n if W[count][x] != 0 :\n pivotColumn = x\n break\n \"\"\"#########################################\"\"\"\n\n \"\"\" Subtração das linhas seguintes \"\"\"\n for i in range(count + 1, linhas) :\n\n if pivotColumn == None :\n singular = True\n break #Linha nula\n \n if W[count][pivotColumn] != 0 :\n m = W[i][pivotColumn]/float(W[count][pivotColumn])\n if m != 0 :\n W[i] = sumVectors(multiplyVectorScalar(-m, W[count]), W[i])\n print(\"Linha \" + str(i + 1) + \" menos \" + str(m) + \" vezes a linha \" + str(count + 1) + \": \")\n printMatrix(W)\n print(\"\")\n \n for count in range(linhas - 1, -1, -1) :\n \n \"\"\" Busca pelo pivô da linha correspondente \"\"\"\n pivotColumn = None\n for x in range(colunas) :\n if W[count][x] != 0 :\n pivotColumn = x\n break\n \"\"\"#########################################\"\"\"\n \n if pivotColumn != None : #Se possuir pivot...\n if W[count][pivotColumn] != 1 :\n print(\"Dividindo a linha \" + str(count + 1) + \" por \" + str(W[count][pivotColumn]) + \".\")\n W[count] = divideVectorScalar(W[count][pivotColumn], W[count])\n printMatrix(W)\n print(\"\")\n \n \"\"\" Subtração das linhas seguintes \"\"\"\n for i in range(count - 1, -1, -1) :\n \n if pivotColumn == None :\n singular = True\n break #Linha nula -> pass\n \n if W[count][pivotColumn] != 0 :\n m = W[i][pivotColumn]/float(W[count][pivotColumn])\n if m != 0 :\n W[i] = sumVectors(multiplyVectorScalar(-m, W[count]), W[i])\n print(\"Linha \" + str(i + 1) + \" menos \" + str(m) + \" vezes a linha \" + str(count + 1) + \": \")\n printMatrix(W)\n print(\"\")\n\n terminado = True \n \"#################################\"\"\"\n \n if singular :\n print(\"Ops... A matriz é singular .'. não possui inversa.\")\n else :\n I = []\n for k in range(len(M)) :\n index = int(len(W[k])/2)\n I.append(W[k][index:])\n print(\"A inversa é : \")\n printMatrix(I)\n print(\"\")\n return I", "def _make_L_matrix(points):\n\n n = len(points)\n K = _U(_interpoint_distances(points))\n P = np.ones((n, 3))\n P[:,1:] = points\n O = np.zeros((3, 3))\n # Construct L matrix from constituent blocks\n L = np.asarray(np.bmat([[K, P], [P.transpose(), O]]))\n\n return L", "def solve_L(L, b):\n m, k = b.shape\n x = np.zeros((m,k))\n x[0,:] = b[0,:] / L[0,0]\n for i in range(1,m):\n x[i,:] = (b[i,:] - L[i,:i] @ x[:i,:]) / L[i,i] \n return x", "def __reducedMatrix(self, A, n_tresh = 5):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the squared Frobenius norm of a matrix
def squaredFrobeniusNorm(mat): return ln.norm(mat, ord = 'fro') ** 2
[ "def frobenius_norm(mat):\n return tf.linalg.trace(tf.matmul(mat, tf.transpose(mat)))", "def forbenius_norm(orig_matrix):\n\n col_Fnorms, row_Fnorms = ([], [])\n\n norm_of_matrix = 0\n no_rows = len(orig_matrix)\n no_cols = len(orig_matrix[0])\n\n\n for i in range(no_rows):\n for j in range(no_cols):\n norm_of_matrix += math.pow(orig_matrix[i][j],2)\n\n for i in range(no_rows):\n row_sum = 0\n for j in range(no_cols):\n row_sum += math.pow(orig_matrix[i][j],2)\n row_Fnorms.append((row_sum/ norm_of_matrix, i))\n \n for i in range(no_cols):\n col_sum = 0\n for j in range(no_rows):\n col_sum += math.pow(orig_matrix[j][i],2)\n col_Fnorms.append((col_sum/ norm_of_matrix, i))\n\n row_Fnorms.sort(reverse= True)\n col_Fnorms.sort(reverse= True)\n\n return (row_Fnorms, col_Fnorms)", "def frobenius_norm(self):\n # check if W and H exist\n if hasattr(self, 'H') and hasattr(self, 'W'):\n if scipy.sparse.issparse(self.data):\n tmp = self.data[:, :] - (self.W * self.H)\n tmp = tmp.multiply(tmp).sum()\n err = np.sqrt(tmp)\n else:\n err = np.sqrt(np.sum((self.data[:, :] - np.dot(self.W, self.H)) ** 2))\n else:\n err = None\n\n return err", "def f_norm(M):\n m, n = np.shape(M)\n M = np.reshape(M, m*n)\n return np.sqrt(np.sum(M**2))", "def frobenius_norm(self, complement=False):\n\n if complement:\n S = self.comp_S\n else:\n S = self.S\n\n # check if W and H exist\n if hasattr(self,'H') and hasattr(self,'W'):\n if scipy.sparse.issparse(self.data):\n tmp = S * ( self.data[:,:] - (self.W * self.H) )\n tmp = tmp.multiply(tmp).sum()\n err = np.sqrt(tmp)\n else:\n err = np.sqrt(np.sum((S * (self.data[:, :] - np.dot(self.W, self.H))) ** 2))\n else:\n err = None\n\n return err", "def norm(self) -> float:\n return np.sqrt(self.inner_product(self).real)", "def norm(self) -> ScalarFunction:\n a = sympy.Integer(0)\n for i in self._vec:\n a += i._f ** 2\n return ScalarFunction(sympy.sqrt(a))", "def norm_sqr(self):\n\n value = self.norm()\n return value * value", "def l2_norm(x):\n return np.linalg.norm(x)", "def l2_norm(x):\n return np.sqrt(np.dot(x.T, x))", "def norm(self) -> ScalarFunction:\n raise NotImplementedError()", "def norm2(v):\n # return (v.T @ v) ** (0.5)\n return math.sqrt(sum(x*x for x in v))", "def normr(W):\n\tW1=tf.reduce_sum(W*W,1,keep_dims=True)**0.5\n\treturn W/W1", "def normalize(F):\n return F / (np.linalg.norm(F) * np.sign(F[2, 0]))", "def get_rsq(geno_mat):\n return scipy.spatial.distance.pdist(geno_mat.T, lambda x, y: scipy.stats.pearsonr(x, y)[0])**2", "def sqnorm(tensor, dim=1):\n\n return unrooted_norm(tensor, p=2, dim=dim)", "def _norm(x: torch.Tensor) ->torch.Tensor:\n return torch.abs(x[..., 0]) ** 2 + torch.abs(x[..., 1]) ** 2", "def numpy_l2norm2(x):\n if x.dtype is not np.float64:\n x = x.astype(np.float64)\n x = x.reshape(-1)\n return np.inner(x, x)", "def euclidean_norm(self) -> float:\n return self._euclidean_norm" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Copies some necessary attributes from original function into decorated function.
def copy_decorator_attrs(original_func, decorated_obj): decorator_name = "to_static" decorated_obj.__name__ = original_func.__name__ decorated_obj._decorator_name = decorator_name decorated_obj.__wrapped__ = original_func decorated_obj.__doc__ = original_func.__doc__ if hasattr(original_func, "__module__"): decorated_obj.__module__ = original_func.__module__ return decorated_obj
[ "def copyprops(original_fn, decorated_fn):\n if hasattr(original_fn, '_wsgiwapi_props'):\n decorated_fn._wsgiwapi_props = original_fn._wsgiwapi_props\n if hasattr(original_fn, '__doc__'):\n decorated_fn.__doc__ = original_fn.__doc__", "def include_original(dec):\n def meta_decorator(method):\n \"\"\"Yo dawg, I heard you like decorators...\"\"\"\n # pylint: disable=protected-access\n decorator = dec(method)\n decorator._original = method\n return decorator\n return meta_decorator", "def _decorate_once(fn):\n if hasattr(fn, '_wsgiwapi_props'):\n props = fn._wsgiwapi_props\n if props.get('decorated', False) == True:\n return fn, props\n props = {'decorated': True}\n\n # Note: the following wrapper function just checks that the properties on\n # the callable passed to application match those set here. I think this\n # will always be true unless a later applied decorator has failed to copy\n # the properties.\n\n # It is tempting to remove this check, and just set the properties on the\n # original callable object, but there is a potential security issue in\n # doing so: if a later applied decorator _has_ failed to copy the\n # properties, this would lead to decorators getting lost, which could mean\n # that code which looks like it is validating parameters is actually\n # failing to do the validation.\n\n # Perhaps the best fix would be to make parameters unavailable unless\n # they've been validated.\n\n # FIXME - review this.\n def res(*args, **kwargs):\n # Check that the decorator has not been applied and then the properties\n # have been lost (probably by a second decorator which doesn't copy the\n # properties being applied).\n if isinstance(args[0], Request):\n request = args[0]\n else:\n request = args[1]\n if request._handler_props is not props:\n raise RuntimeError(\"Handler properties do not match decorated properties. Probably missing call to wsgiwapi.copyprops.\")\n return fn(*args, **kwargs)\n res.__doc__ = fn.__doc__\n res.__name__ = fn.__name__\n res.__dict__.update(fn.__dict__)\n res._wsgiwapi_props = props\n return res, props", "def _set_args(func, *args, **kwargs):\n\n def wrapped():\n return func(*args, **kwargs)\n\n wrapped.args = args\n wrapped.kwargs = kwargs\n wrapped.__name__ = func.__name__\n\n return wrapped", "def passthrough_decorator(f):\n return f", "def _update_function(self, old_function, new_function):\n \n # fix me: Does this handle closures correctly? Can we?\n # readonly: func_closure, func_globals\n old_function.func_code = new_function.func_code\n old_function.func_defaults = new_function.func_defaults\n old_function.func_dict = new_function.func_dict\n old_function.func_doc = new_function.func_doc", "def dont_decorate(func):\n func.__dont_decorate__ = True\n return func", "def decorator_with_args(decorator_to_enhance):\n\n # We use the same trick we did to pass arguments\n def decorator_maker(*args, **kwargs):\n\n # We create on the fly a decorator that accepts only a function\n # but keeps the passed arguments from the maker.\n def decorator_wrapper(func):\n\n # We return the result of the original decorator, which, after all,\n # IS JUST AN ORDINARY FUNCTION (which returns a function).\n # Only pitfall: the decorator must have this specific signature or it won't work:\n return decorator_to_enhance(func, *args, **kwargs)\n\n return decorator_wrapper\n\n return decorator_maker", "def decorate(self, func):\n if not callable(func):\n raise TypeError('Cannot decorate a non callable object \"{}\"'\n .format(func))\n self.decorated = func", "def replace(old, new):\n # decorate the original function\n def decorate(func):\n # do the replacement using the args of the decorator\n def do_replace(*args, **kwargs):\n if args[0] == old:\n args = (new,)\n # call the decorated function\n return func(*args, **kwargs)\n return do_replace\n return decorate", "def copy_raw_func_only(func):\n new_code = types.CodeType(func.func_code.co_argcount, \\\n func.func_code.co_nlocals, \\\n func.func_code.co_stacksize, \\\n func.func_code.co_flags, \\\n func.func_code.co_code, \\\n func.func_code.co_consts, \\\n func.func_code.co_names, \\\n func.func_code.co_varnames, \\\n func.func_code.co_filename, \\\n func.func_code.co_name, \\\n func.func_code.co_firstlineno, \\\n func.func_code.co_lnotab)\n\n return types.FunctionType(new_code, func.func_globals, func.func_name,\n func.func_defaults, func.func_closure)", "def _copy_signature(src, dst, add_keywords={}, remove_arg0=[], **extra_attrs):\n from decorator import FunctionMaker\n \n actual_func = src\n if inspect.isclass(src): # classes\n actual_func, remove_arg0 = src.__init__, ['self']\n elif not inspect.ismethod(src): # normal functions\n # A member function is NOT a member function before it becomes a\n # bound or unbound method, i.e., still inside the class definition.\n pass # src.__dict__ will be copied over by FunctionMaker.\n elif src.__self__ is None: # unbound methods\n assert not src.__dict__ and src.__dict__ is src.im_func.__dict__\n else: # bound methods\n assert not src.__dict__ and src.__dict__ is src.im_func.__dict__\n remove_arg0 = ['self', 'cls']\n signature, defaults = _modify_argspec(\n actual_func, remove_arg0=remove_arg0, add_keywords=add_keywords)\n fmaker = FunctionMaker(src, signature=signature, defaults=defaults)\n \n # If <newfunc>.func_code.co_filename and func_code.co_firstlineno were\n # not read-only, we should change them to the values from ``src``.\n return fmaker.make(\n 'def %(name)s(%(signature)s): return _NeW_fUnC_(%(signature)s)',\n dict(_NeW_fUnC_=dst), **extra_attrs)", "def _link_decorator(source_fn, dest_fn):\n\n source_values = copy.deepcopy(getattr(source_fn, '__garcon__', dict()))\n\n if hasattr(dest_fn, '__garcon__'):\n source_values.update(dest_fn.__garcon__)\n\n setattr(dest_fn, '__garcon__', source_values)", "def _undecorate(func: Callable) -> Callable:\n\n while hasattr(func, \"__wrapped__\"):\n func = func.__wrapped__\n\n return func", "def ApplyToResult( func ):\n\n @simple_decorator\n def wrap( f ):\n def new_function(*args, **kw):\n return func( f( *args, **kw ) )\n return new_function\n \n return wrap", "def with_decorators(self, fn):\r\n return apply_decorators(fn, self.decorators)", "def patch_func(module, function_name, replacement):\n original = getattr(module, function_name)\n setattr(module, function_name, replacement)\n try:\n yield\n finally:\n setattr(module, function_name, original)", "def decorated(python_func):\n\n nonlocal enable_fallback\n if enable_fallback is None:\n flag = os.environ.get(\"ENABLE_FALL_BACK\", None)\n if flag == \"True\":\n enable_fallback = True\n else: # None or True\n enable_fallback = False\n\n StaticClass = StaticFunctionClass = {\n True: SymbolicStaticFunction,\n False: ASTStaticFunction,\n }[enable_fallback]\n\n # Step 1. unwrap the function if it is already decorated.\n _, python_func = unwrap_decorators(python_func)\n\n # Step 2. copy some attributes from original python function.\n static_layer = copy_decorator_attrs(\n original_func=python_func,\n decorated_obj=StaticClass(\n function=python_func,\n input_spec=input_spec,\n build_strategy=build_strategy,\n property=property,\n backend=backend,\n ),\n )\n\n return static_layer", "def with_attrs(**kwargs):\n def decorator(fun):\n for k, v in kwargs.items():\n setattr(fun, k, v)\n\n return fun\n\n return decorator" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds modules that ignore transcription. Builtin modules that have been ignored are collections, pdb, copy, inspect, re, numpy, logging, six
def ignore_module(modules: list[Any]): add_ignore_module(modules)
[ "def make_exclude():\n # Simple utility to make IPython paths more readably, we need a lot of\n # these below\n ipjoin = lambda *paths: pjoin('IPython', *paths)\n\n exclusions = [ipjoin('external'),\n ipjoin('quarantine'),\n ipjoin('deathrow'),\n # This guy is probably attic material\n ipjoin('testing', 'mkdoctests'),\n # Testing inputhook will need a lot of thought, to figure out\n # how to have tests that don't lock up with the gui event\n # loops in the picture\n ipjoin('lib', 'inputhook'),\n # Config files aren't really importable stand-alone\n ipjoin('config', 'profile'),\n # The notebook 'static' directory contains JS, css and other\n # files for web serving. Occasionally projects may put a .py\n # file in there (MathJax ships a conf.py), so we might as\n # well play it safe and skip the whole thing.\n ipjoin('html', 'static'),\n ipjoin('html', 'fabfile'),\n ]\n if not have['sqlite3']:\n exclusions.append(ipjoin('core', 'tests', 'test_history'))\n exclusions.append(ipjoin('core', 'history'))\n if not have['wx']:\n exclusions.append(ipjoin('lib', 'inputhookwx'))\n \n if 'IPython.kernel.inprocess' not in sys.argv:\n exclusions.append(ipjoin('kernel', 'inprocess'))\n \n # FIXME: temporarily disable autoreload tests, as they can produce\n # spurious failures in subsequent tests (cythonmagic).\n exclusions.append(ipjoin('extensions', 'autoreload'))\n exclusions.append(ipjoin('extensions', 'tests', 'test_autoreload'))\n\n # We do this unconditionally, so that the test suite doesn't import\n # gtk, changing the default encoding and masking some unicode bugs.\n exclusions.append(ipjoin('lib', 'inputhookgtk'))\n exclusions.append(ipjoin('kernel', 'zmq', 'gui', 'gtkembed'))\n\n #Also done unconditionally, exclude nbconvert directories containing\n #config files used to test. Executing the config files with iptest would\n #cause an exception.\n exclusions.append(ipjoin('nbconvert', 'tests', 'files'))\n exclusions.append(ipjoin('nbconvert', 'exporters', 'tests', 'files'))\n\n # These have to be skipped on win32 because the use echo, rm, cd, etc.\n # See ticket https://github.com/ipython/ipython/issues/87\n if sys.platform == 'win32':\n exclusions.append(ipjoin('testing', 'plugin', 'test_exampleip'))\n exclusions.append(ipjoin('testing', 'plugin', 'dtexample'))\n\n if not have['pexpect']:\n exclusions.extend([ipjoin('lib', 'irunner'),\n ipjoin('lib', 'tests', 'test_irunner'),\n ipjoin('terminal', 'console'),\n ])\n\n if not have['zmq']:\n exclusions.append(ipjoin('lib', 'kernel'))\n exclusions.append(ipjoin('kernel'))\n exclusions.append(ipjoin('qt'))\n exclusions.append(ipjoin('html'))\n exclusions.append(ipjoin('consoleapp.py'))\n exclusions.append(ipjoin('terminal', 'console'))\n exclusions.append(ipjoin('parallel'))\n elif not have['qt'] or not have['pygments']:\n exclusions.append(ipjoin('qt'))\n\n if not have['pymongo']:\n exclusions.append(ipjoin('parallel', 'controller', 'mongodb'))\n exclusions.append(ipjoin('parallel', 'tests', 'test_mongodb'))\n\n if not have['matplotlib']:\n exclusions.extend([ipjoin('core', 'pylabtools'),\n ipjoin('core', 'tests', 'test_pylabtools'),\n ipjoin('kernel', 'zmq', 'pylab'),\n ])\n\n if not have['cython']:\n exclusions.extend([ipjoin('extensions', 'cythonmagic')])\n exclusions.extend([ipjoin('extensions', 'tests', 'test_cythonmagic')])\n\n if not have['oct2py']:\n exclusions.extend([ipjoin('extensions', 'octavemagic')])\n exclusions.extend([ipjoin('extensions', 'tests', 'test_octavemagic')])\n\n if not have['tornado']:\n exclusions.append(ipjoin('html'))\n\n if not have['jinja2']:\n exclusions.append(ipjoin('html', 'notebookapp'))\n\n if not have['rpy2'] or not have['numpy']:\n exclusions.append(ipjoin('extensions', 'rmagic'))\n exclusions.append(ipjoin('extensions', 'tests', 'test_rmagic'))\n\n if not have['azure']:\n exclusions.append(ipjoin('html', 'services', 'notebooks', 'azurenbmanager'))\n\n if not all((have['pygments'], have['jinja2'], have['sphinx'])):\n exclusions.append(ipjoin('nbconvert'))\n\n # This is needed for the reg-exp to match on win32 in the ipdoctest plugin.\n if sys.platform == 'win32':\n exclusions = [s.replace('\\\\','\\\\\\\\') for s in exclusions]\n \n # check for any exclusions that don't seem to exist:\n parent, _ = os.path.split(get_ipython_package_dir())\n for exclusion in exclusions:\n if exclusion.endswith(('deathrow', 'quarantine')):\n # ignore deathrow/quarantine, which exist in dev, but not install\n continue\n fullpath = pjoin(parent, exclusion)\n if not os.path.exists(fullpath) and not glob.glob(fullpath + '.*'):\n warn(\"Excluding nonexistent file: %r\" % exclusion)\n\n return exclusions", "def disablemoddepends(self):\n pass", "def remove_ignored_modules(self, ignore_modules=None):\n if ignore_modules is None:\n ignore_modules = []\n elif not isinstance(ignore_modules, list):\n ignore_modules = [ignore_modules]\n\n ignored_modules = ignore_modules + DEFAULT_IGNORED_MODULES\n\n # Remove from module dict\n for ignore_mod in map(str.lower, ignored_modules):\n self.modules.pop(ignore_mod, None)\n # Remove from 'used' modules\n for module in self.modules.values():\n with suppress(ValueError):\n module.uses.remove(ignore_mod)\n\n # Remove from 'used' files\n for source_file in self.files.values():\n with suppress(ValueError):\n source_file.uses.remove(ignore_mod)", "def no_cythonize(extensions, **_ignore):\n # https://tinyurl.com/y4aavzq5 ->\n # https://cython.readthedocs.io/en/latest/src/userguide/\n # source_files_and_compilation.html#distributing-cython-modules\n for extension in extensions:\n sources = []\n for sfile in extension.sources:\n path, ext = os.path.splitext(sfile)\n if ext in (\".pyx\", \".py\"):\n ext = {\"c++\": \".cpp\"}.get(extension.language, '.c')\n sfile = path + {\"c++\": \".cpp\"}.get(extension.language, '.c')\n sources.append(sfile)\n extension.sources[:] = sources\n return extensions", "def ignore_extensions(self, *extensions):\n for ext in extensions:\n self.add_ignore_patterns('*%s' % ext)", "def getIgnoredOutputModules(self):\n\n if hasattr(self.data.output, 'ignoredModules'):\n return self.data.output.ignoredModules\n return []", "def enablemoddepends(self):\n pass", "def setIgnoredOutputModules(self, moduleList):\n\n self.data.output.ignoredModules = moduleList\n return", "def process_module(self, node):\n \n ignore = ['__future__', 'collections', 'random', 'six', 'cPickle', 'scipy', 'hashlib', \n 'io', 'contextlib', 'unittest', 'types', 'h5py', 'inspect', 'tarfile', 'yaml', \n 'copy', 'marshal', 'requests', 'functools', 'gzip', 're', 'Queue', 'queue', \n 'os', 'pickle', 'importlib', 'mock', 'threading', 'codecs', 'tempfile', 'time', \n 'binascii', 'pydot', 'zipfile', 'json', 'shutil', 'abc', 'sys', 'csv', 'cntk',\n 'warnings', 'numpy', 'skimage', 'multiprocessing', 'distutils', 'tensorflow', \n 'theano', 'keras_applications', \"keras_preprocessing\"]\n \n \n comment = False\n with node.stream() as stream:\n for (lineno, line) in enumerate(stream):\n line = line.decode(\"utf-8\").strip()\n #Ingore lines withing multi line comments\n if '\\\"\\\"\\\"' in line:\n comment = not comment\n #Empty line or comment line\n if line == \"\" or comment == True or '#' in line:\n continue\n else:\n split_line = line.split()\n #Import\n if split_line[0] == 'import':\n module_split = split_line[1].split('.')\n #Check if module is an ignored library\n if module_split[0] in ignore:\n continue\n else:\n pass \n \n #ImportFrom\n elif split_line[0] == 'from' and len(split_line) >= 3:\n #Check if module is an ignored library or line doesnt contain import\n if split_line[1] in ignore or split_line[2] != 'import':\n continue\n #Check if import is absolute or relative\n elif split_line[1].startswith('.'):\n self.add_message('W0042', line=lineno)\n else:\n module_split = split_line[1].split('.')\n if module_split[0] in ignore:\n continue\n else:\n pass\n else:\n continue", "def _hide_submodules(\n module_name, *, remove_self=True, ignore=tuple(), hide_folder=tuple()\n):\n import os\n\n module = sys.modules[module_name]\n module_path = module.__path__[0]\n\n for file in os.listdir(module_path):\n if file.endswith(\".py\") and not file == \"__init__.py\":\n mod_name = file[:-3]\n elif file in hide_folder:\n mod_name = file\n else:\n mod_name = None\n\n if mod_name is not None:\n if (\n hasattr(module, mod_name)\n and mod_name[0] != \"_\"\n and mod_name not in ignore\n ):\n new_name = \"_\" + mod_name\n setattr(module, new_name, getattr(module, mod_name))\n delattr(module, mod_name)\n\n if remove_self and hasattr(module, \"_hide_submodules\"):\n delattr(module, \"_hide_submodules\")\n\n auto_export(module)", "def _additional_inserted_libs(self):\n return []", "def pep8mod_add_ignore(ignore_code):\r\n pep8mod.options.ignore.append(ignore_code)", "def _register_ignore_types():\n global ignore_types\n if ignore_types is not None:\n return # Only register once\n else:\n ignore_types = ()\n\n ignores = dask.config.get(\"jit-unspill-ignore\", \"cupy.ndarray\")\n ignores = ignores.split(\",\")\n\n toplevels = defaultdict(set)\n for path in ignores:\n if path:\n toplevel = path.split(\".\", maxsplit=1)[0].strip()\n toplevels[toplevel].add(path.strip())\n\n for toplevel, ignores in toplevels.items():\n\n def f(paths):\n global ignore_types\n ignore_types = ignore_types + tuple(pydoc.locate(p) for p in paths)\n\n dispatch.register_lazy(toplevel, partial(f, ignores))", "def filter_builtins(module):\n\n # Default builtin list \n built_in_list = ['__builtins__', '__doc__', '__file__', '__name__']\n \n # Append anything we \"know\" is \"special\"\n # Allows your libraries to have methods you will not try to exec.\n built_in_list.append('special_remove')\n\n # get the list of methods/functions from the module\n module_methods = dir(module) # Dir allows us to get back ALL methods on the module.\n\n for b in built_in_list:\n if b in module_methods:\n module_methods.remove(b)\n\n print(module_methods)\n return module_methods", "def _get_standard_modules():\n\n # the frozen application is not meant to create GUIs or to add\n # support for building and installing Python modules\n ignore_list = ['__main__', 'distutils', 'ensurepip', 'idlelib', 'lib2to3'\n 'test', 'tkinter', 'turtle']\n\n # some modules are platform specific and got a\n # RecursionError: maximum recursion depth exceeded\n # when running this script with PyInstaller 3.3 installed\n if loadlib.IS_WINDOWS:\n os_ignore_list = ['(Unix)', '(Linux)', '(Linux, FreeBSD)']\n elif loadlib.IS_LINUX:\n os_ignore_list = ['(Windows)']\n elif loadlib.IS_MAC:\n os_ignore_list = ['(Windows)', '(Linux)', '(Linux, FreeBSD)']\n else:\n os_ignore_list = []\n\n modules = []\n url = 'https://docs.python.org/{0}.{1}/py-modindex.html'.format(*sys.version_info)\n for s in urlopen(url).read().decode().split('#module-')[1:]:\n m = s.split('\"><code')\n add_module = True\n for x in os_ignore_list:\n if x in m[1]:\n ignore_list.append(m[0])\n add_module = False\n break\n if add_module:\n modules.append(m[0])\n\n included_modules, excluded_modules = [], []\n for module in modules:\n include_module = True\n for mod in ignore_list:\n if module.startswith(mod):\n excluded_modules.extend(['--exclude-module', module])\n include_module = False\n break\n if include_module:\n included_modules.extend(['--hidden-import', module])\n return included_modules + excluded_modules", "def ignoreAll(self): #$NON-NLS-1$\r", "def _removeIgnoredModules(self, moduleNameList):\n\t\ttoRemoveList = []\n\t\tfor fileName in moduleNameList:\n\n\t\t\tfixedFileName = fileName\n\t\t\tif os.path.sep != '/':\n\t\t\t\tfixedFileName = fileName.replace(os.path.sep, '/')\n\t\t\tfor ignoreName in self.ignore:\n\t\t\t\t#if ignoreName == os.path.basename(fileName):\n\t\t\t\tif ignoreName == fixedFileName:\n\t\t\t\t\ttoRemoveList.append(fileName)\n\t\tfor moduleName in toRemoveList:\n\t\t\tmoduleNameList.remove(moduleName)\n\t\treturn moduleNameList", "def test_remove_deps(monkeypatch):\n monkeypatch.setattr(sys, 'modules', dict(sys.modules))\n enabler._remove_deps()", "def monkey_patch_disable_normal_doctest():\n import sys\n from _pytest import doctest\n # Only perform the monkey patch if it is clear the xdoctest plugin is\n # wanted instead of the standard _pytest.doctest pluginn\n if '--doctest-modules' not in sys.argv:\n if '--xdoctest-modules' in sys.argv or '--xdoctest' in sys.argv or '--xdoc' in sys.argv:\n # overwriting the collect function will cripple _pytest.doctest and\n # prevent conflicts with this module.\n def pytest_collect_file(path, parent):\n return None\n # Not sure why, but _is_doctest seems to be called even when\n # pytest_collect_file is monkey patched out\n def _is_doctest(config, path, parent):\n return False\n doctest.pytest_collect_file = pytest_collect_file\n doctest._is_doctest = _is_doctest" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decorates a python function into a ASTStaticFunction object.
def decorated(python_func): nonlocal enable_fallback if enable_fallback is None: flag = os.environ.get("ENABLE_FALL_BACK", None) if flag == "True": enable_fallback = True else: # None or True enable_fallback = False StaticClass = StaticFunctionClass = { True: SymbolicStaticFunction, False: ASTStaticFunction, }[enable_fallback] # Step 1. unwrap the function if it is already decorated. _, python_func = unwrap_decorators(python_func) # Step 2. copy some attributes from original python function. static_layer = copy_decorator_attrs( original_func=python_func, decorated_obj=StaticClass( function=python_func, input_spec=input_spec, build_strategy=build_strategy, property=property, backend=backend, ), ) return static_layer
[ "def make_function_ast(src):\n return python.AstTree(ast.parse(src)).functions()[0]", "def transform(func):\n WalkoffTag.transform.tag(func)\n return func", "def wrap_python_function(cls, fn):\n def wrapped(sass_arg):\n # TODO enforce no units for trig?\n python_arg = sass_arg.value\n python_ret = fn(python_arg)\n sass_ret = cls(\n python_ret,\n unit_numer=sass_arg.unit_numer,\n unit_denom=sass_arg.unit_denom)\n return sass_ret\n\n return wrapped", "def fn(cls, at, argname, body):\n f = call(\n fn_val.code,\n [ast.Lambda(args=arglist([sanitize_identifier(argname)]),\n body=body.code)])\n return cls(t_fn(at, body.t), f)", "def createFunction(self) -> ghidra.program.model.listing.Function:\n ...", "def process_python_function(self):\r\n exec(self.python_text)\r\n self.func = locals()[self.function_name]\r\n return self.func", "def decorate(func, caller, extras=()):\n evaldict = dict(_call_=caller, _func_=func)\n es = ''\n for i, extra in enumerate(extras):\n ex = '_e%d_' % i\n evaldict[ex] = extra\n es += ex + ', '\n\n if '3.5' <= sys.version < '3.6':\n # with Python 3.5 isgeneratorfunction returns True for all coroutines\n # however we know that it is NOT possible to have a generator\n # coroutine in python 3.5: PEP525 was not there yet\n generatorcaller = isgeneratorfunction(\n caller) and not iscoroutinefunction(caller)\n else:\n generatorcaller = isgeneratorfunction(caller)\n if generatorcaller:\n fun = FunctionMaker.create(\n func, \"for res in _call_(_func_, %s%%(shortsignature)s):\\n\"\n \" yield res\" % es, evaldict, __wrapped__=func)\n else:\n fun = FunctionMaker.create(\n func, \"return _call_(_func_, %s%%(shortsignature)s)\" % es,\n evaldict, __wrapped__=func)\n if hasattr(func, '__qualname__'):\n fun.__qualname__ = func.__qualname__\n return fun", "def VisitFunction(self, f):\n\n # flatten return value(s) from VisitSignature\n signatures = tuple(ex for s in f.signatures for ex in ExpandSignature(s)) # pylint: disable=g-complex-comprehension\n return f.Replace(signatures=signatures)", "def jitf(\n fun,\n *,\n filter_fn=None,\n filter_tree=None,\n static_argnums=None,\n static_argnames=None,\n donate_argnums=(),\n **jitkwargs\n):\n if isinstance(static_argnums, int):\n static_argnums = (static_argnums,)\n if static_argnames is not None:\n raise NotImplementedError(\n \"jitf does not yet support `static_argnames`. use static_argnums instead.\"\n )\n if donate_argnums != ():\n raise NotImplementedError(\"jitf does not ye support `donate_argnums`.\")\n validate_filters(\"jitf\", filter_fn, filter_tree)\n\n if static_argnums is None:\n len_static_argnums = 0\n else:\n len_static_argnums = len(static_argnums)\n\n @ft.wraps(fun)\n def f_wrapper(*args, **kwargs):\n if len(kwargs):\n raise NotImplementedError(\n \"jitf does not yet support keyword arguments. Use positional arguments instead.\"\n )\n\n if filter_tree is not None:\n if len(args) - len_static_argnums == 1:\n new_filter_tree = (filter_tree,)\n else:\n new_filter_tree = tuple(filter_tree)\n\n # Mark the arguments that have been explicitly declared static via `static_argnums`\n if static_argnums is not None:\n args = list(args)\n for index in static_argnums:\n args[index] = _UnPyTreeAble(args[index])\n if filter_tree is not None:\n new_filter_tree = list(new_filter_tree)\n for index in static_argnums:\n new_filter_tree.insert(index, _UnPyTreeAble(None))\n\n # Flatten everything else\n args_flat, args_treedef = jax.tree_flatten(args)\n if filter_tree is not None:\n filter_flat, flat_treedef = jax.tree_flatten(new_filter_tree)\n if flat_treedef != args_treedef:\n raise ValueError(\n \"The tree stucture for the filters and the arguments must be the same.\"\n )\n\n # Figure out static argnums with respect to this new flattened structure.\n new_static_argnums = []\n if filter_tree is None:\n # implies filter_fn is not None\n for i, arg in enumerate(args_flat):\n if isinstance(arg, _UnPyTreeAble) or not filter_fn(arg):\n new_static_argnums.append(i)\n else:\n for i, (arg, filter) in enumerate(zip(args_flat, filter_flat)):\n if not filter:\n new_static_argnums.append(i)\n new_static_argnums = tuple(new_static_argnums)\n if static_argnums is not None:\n args_flat = [\n arg.value if isinstance(arg, _UnPyTreeAble) else arg\n for arg in args_flat\n ]\n\n f_jitted = _jitf_cache(\n fun, args_treedef, static_argnums=new_static_argnums, **jitkwargs\n )\n return f_jitted(*args_flat)\n\n return f_wrapper", "def script(pyfunc):\n def wrapped_func(func, *args, **kwargs):\n from .util import _enter_hybrid_runtime, _restore_runtime, _is_tvm_arg_types\n if _is_tvm_arg_types(args):\n return parse(func, args)\n\n intersect = _enter_hybrid_runtime(func)\n value = func(*args, **kwargs)\n _restore_runtime(func, intersect)\n return value\n return decorate(pyfunc, wrapped_func)", "def test_inline_into_function():\n before_program = \"\"\"\n #[version = \"0.0.5\"]\n def @main() {\n let %x = 1 + 1;\n let %f = fn (%y: int) -> int {\n let %z = %y + %y;\n %x + %z\n };\n (%f(2), %f(3))\n }\n \"\"\"\n\n after_program = \"\"\"\n #[version = \"0.0.5\"]\n def @main() {\n let %x = 1 + 1;\n let %f = fn (%y: int) -> int {\n %x + (%y + %y)\n };\n (%f(2), %f(3))\n }\n \"\"\"\n\n optimize_and_check(\n before_program, after_program, transform.DeadCodeElimination(inline_once=True)\n )", "def snitch(func):\n return FunctionType(func.func_code, func.func_globals,\n 'test_' + func.func_name, closure=func.func_closure)", "def visit_create_function(element, compiler, **kw):\n func = element.function\n opt_or_replace = 'OR REPLACE' if element.or_replace else None\n volatility = func.volatility.upper()\n strictness = \"STRICT\" if func.strict else None\n leakproof = \"LEAKPROOF\" if func.leakproof else None\n quoted_definition = \"${quote_tag}$\\n{definition}\\n${quote_tag}$\".format(\n quote_tag=func.quote_tag, definition=func.definition)\n\n function_name = func.build_quoted_identifier(quoter=compiler.preparer.quote)\n return _join_tokens(\n \"CREATE\", opt_or_replace, \"FUNCTION\", function_name, \"RETURNS\",\n func.rtype, volatility, strictness, leakproof, \"LANGUAGE\", func.language,\n \"AS\", quoted_definition,\n )", "def copy_decorator_attrs(original_func, decorated_obj):\n decorator_name = \"to_static\"\n\n decorated_obj.__name__ = original_func.__name__\n decorated_obj._decorator_name = decorator_name\n decorated_obj.__wrapped__ = original_func\n decorated_obj.__doc__ = original_func.__doc__\n if hasattr(original_func, \"__module__\"):\n decorated_obj.__module__ = original_func.__module__\n\n return decorated_obj", "def traceable(fn_cls):\n fn_cls.is_traceable = True\n return fn_cls", "def compiled_function(self):\n try:\n return self._compiled_function\n except AttributeError:\n from .general import NameLookUp\n arg_defs = ', '.join(NameLookUp.pythonize_name(name) for name in self.used_variable_list)\n f = self._compiled_function = eval('lambda ' + arg_defs + ': ' + self.compiled, self.eval_globals)\n return f", "def filter_from_function():\n\n def decorate(f):\n params, _ = extract_params(f)\n return FilterOfFunction(params=params, f=f)\n\n return decorate", "def transform_function_decl(self, node):\n\n if node.result_type.kind in self._data_types[\"int\"]:\n ret_type = self._data_types[\"int\"][node.result_type.kind]\n elif node.result_type.kind in self._data_types[\"float\"]:\n ret_type = self._data_types[\"float\"][node.result_type.kind]\n elif node.result_type.kind in self._data_types[\"bool\"]:\n ret_type = self._data_types[\"bool\"][node.result_type.kind]\n elif node.result_type.kind in self._data_types[\"void\"]:\n ret_type = self._data_types[\"void\"][node.result_type.kind]\n else:\n raise NotImplementedError(\"Only void, bool, int \"\n \"and float are supported\")\n body = []\n param = []\n\n # Subsequent nodes will be the parameters for the function.\n for child in node.get_children():\n decl = self.transform(child)\n if child.kind == cin.CursorKind.PARM_DECL:\n param.append(decl)\n elif child.kind == cin.CursorKind.COMPOUND_STMT:\n for val in decl:\n body.append(val)\n else:\n body.append(decl)\n\n if body == []:\n function = FunctionPrototype(\n return_type = ret_type,\n name = node.spelling,\n parameters = param\n )\n else:\n function = FunctionDefinition(\n return_type = ret_type,\n name = node.spelling,\n parameters = param,\n body = body\n )\n return function", "def make_function(text):\n\n try:\n exec 'f = lambda x: ' + text\n 1+f(2.0) ## test to see if there are any errors in the definition\n except ZeroDivisionError: ## ignore zero division errors\n pass\n except:\n raise FunctionError()\n return f" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Register a save prehook for `paddle.jit.save`. This hook will be executed before `save` function has been invoked. hook(layer, input_spec, configs) > None
def _register_save_pre_hook(hook): global _save_pre_hooks_lock global _save_pre_hooks _save_pre_hooks_lock.acquire() if hook not in _save_pre_hooks: _save_pre_hooks.append(hook) _save_pre_hooks_lock.release() return HookRemoveHelper(hook)
[ "def save(layer, path, input_spec=None, **configs):\n\n # 1. input build & check\n prog_translator = ProgramTranslator()\n is_prim_infer = core._is_fwd_prim_enabled() and core._is_bwd_prim_enabled()\n if not prog_translator.enable_to_static:\n raise RuntimeError(\n \"The paddle.jit.save doesn't work when setting 'paddle.jit.enable_to_static' to False.\"\n )\n\n if not (\n isinstance(layer, (Layer, StaticFunction)) or inspect.isfunction(layer)\n ):\n raise TypeError(\n \"The input of paddle.jit.save should be 'Layer' or 'Function', but received input type is %s.\"\n % type(layer)\n )\n elif inspect.isfunction(layer) or isinstance(layer, StaticFunction):\n warnings.warn(\n 'What you save is a function, and `jit.save` will generate the name of the model file according to `path` you specify. When loading these files with `jit.load`, you get a `TranslatedLayer` whose inference result is the same as the inference result of the function you saved.'\n )\n\n # NOTE(chenweihang): If the input layer be wrapped by DataParallel,\n # the args and kwargs of forward method will can't be parsed by\n # function_spec, so here we save DataParallel._layers instead\n # DataParallel it self\n # NOTE(chenweihang): using inner_layer, do not change input layer\n if isinstance(layer, paddle.DataParallel):\n inner_layer = layer._layers\n else:\n inner_layer = layer\n\n # path check\n file_prefix = os.path.basename(path)\n if file_prefix == \"\":\n raise ValueError(\n \"The input path MUST be format of dirname/file_prefix \"\n \"[dirname\\\\file_prefix in Windows system], but received \"\n \"file_prefix is empty string.\"\n )\n\n dirname = os.path.dirname(path)\n if dirname and not os.path.exists(dirname):\n os.makedirs(dirname)\n\n # avoid change user given input_spec\n inner_input_spec = None\n if input_spec is not None:\n if isinstance(layer, Layer):\n for attr_func in dir(inner_layer):\n static_func = getattr(inner_layer, attr_func, None)\n if (\n isinstance(static_func, StaticFunction)\n and 'forward' != attr_func\n ):\n raise ValueError(\n \"If there are static functions other than 'forward' that need to be saved, the input 'input_spec' should be None, but received the type of 'input_spec' is %s.\"\n % type(input_spec)\n )\n\n if not isinstance(input_spec, (list, tuple)):\n raise TypeError(\n \"The input input_spec should be 'list', but received input_spec's type is %s.\"\n % type(input_spec)\n )\n inner_input_spec = []\n for var in paddle.utils.flatten(input_spec):\n if isinstance(var, paddle.static.InputSpec):\n inner_input_spec.append(var)\n elif isinstance(var, (core.eager.Tensor, Variable)):\n inner_input_spec.append(\n paddle.static.InputSpec.from_tensor(var)\n )\n else:\n # NOTE(Aurelius84): Support non-Tensor type in `input_spec`.\n inner_input_spec.append(var)\n\n # parse configs\n configs = _parse_save_configs(configs)\n # whether outermost layer has pre/post hook, if does, we need also save\n # these operators in program.\n with_hook = configs.with_hook\n combine_params = configs.combine_params\n if combine_params:\n configs._program_only = True\n\n scope = core.Scope()\n extra_var_info = {}\n if isinstance(layer, Layer):\n functions = dir(inner_layer)\n if inner_layer._forward_pre_hooks or inner_layer._forward_post_hooks:\n with_hook = True\n else:\n # layer is function\n functions = [\n layer,\n ]\n\n combine_vars = {}\n property_vals = [] # (value, key)\n concrete_program = None\n for attr_func in functions:\n if isinstance(layer, Layer):\n static_func = get_ast_static_function(\n getattr(inner_layer, attr_func, None)\n )\n if isinstance(static_func, StaticFunction):\n if static_func.is_property:\n # property method to be exported\n immediate_val = static_func()\n property_vals.append(\n (\n immediate_val,\n layer.__class__.__name__ + '.' + attr_func,\n )\n )\n continue\n\n concrete_program = (\n static_func.concrete_program_specify_input_spec(\n inner_input_spec,\n with_hook=with_hook,\n is_prim_infer=is_prim_infer,\n )\n )\n elif 'forward' == attr_func:\n if configs.skip_forward:\n # do not jit.save forward function\n continue\n\n # transform in jit.save, if input_spec is incomplete, declarative will throw error\n # inner_input_spec is list[InputSpec], it should be packed with same structure\n # as original input_spec here.\n if inner_input_spec:\n inner_input_spec = paddle.utils.pack_sequence_as(\n input_spec, inner_input_spec\n )\n static_forward = to_static(\n inner_layer.forward,\n input_spec=inner_input_spec,\n enable_fallback=False,\n )\n concrete_program = (\n static_forward.concrete_program_specify_input_spec(\n with_hook=with_hook, is_prim_infer=is_prim_infer\n )\n )\n # the input_spec has been used in declarative, which is equal to\n # @to_static with input_spec and jit.save without input_spec,\n # avoid needless warning\n inner_input_spec = None\n else:\n continue\n else:\n # When layer is a function\n if isinstance(attr_func, StaticFunction):\n static_func = get_ast_static_function(attr_func)\n\n if static_func.is_property:\n # property method to be exported\n immediate_val = static_func()\n property_vals.append((immediate_val, static_func))\n continue\n\n concrete_program = (\n static_func.concrete_program_specify_input_spec(\n inner_input_spec, is_prim_infer=is_prim_infer\n )\n )\n else:\n static_func = get_ast_static_function(attr_func)\n if inner_input_spec:\n inner_input_spec = paddle.utils.pack_sequence_as(\n input_spec, inner_input_spec\n )\n static_function = to_static(\n static_func,\n input_spec=inner_input_spec,\n enable_fallback=False,\n )\n concrete_program = static_function.concrete_program\n\n if static_function._class_instance is None:\n warnings.warn(\n '`jit.save` will only save the `Program`, not the parameters. If you have to save the parameters, please make sure that {} is a member function of `paddle.nn.Layer` and the saved parameters are in `state_dict`'.format(\n layer\n )\n )\n\n # when save multi `StaticFunction`, all `StaticFunction` share params.\n dygraph_state_dict = None\n if isinstance(inner_layer, Layer):\n dygraph_state_dict = inner_layer.to_static_state_dict()\n elif isinstance(attr_func, StaticFunction):\n if static_func._class_instance:\n dygraph_state_dict = (\n static_func._class_instance.to_static_state_dict()\n )\n\n if dygraph_state_dict:\n # NOTE(chenweihang): we maintain the mapping of variable name to\n # structured name, the buffer variable (non-persistable)\n # saved to inference program may not need by dygraph Layer,\n # we only record the state_dict variable's structured name\n state_names_dict = {}\n state_var_dict = {}\n for structured_name, var in dygraph_state_dict.items():\n state_names_dict[var.name] = structured_name\n state_var_dict[var.name] = var\n\n # 3. share parameters from Layer to scope & record var info\n with dygraph.guard():\n for param_or_buffer in concrete_program.parameters:\n # share to scope\n if param_or_buffer.type == core.VarDesc.VarType.VOCAB:\n scr_tensor = param_or_buffer.value().get_map_tensor()\n tgt_var = scope.var(param_or_buffer.name)\n tgt_var.set_vocab(scr_tensor)\n else:\n param_or_buffer_tensor = scope.var(\n param_or_buffer.name\n ).get_tensor()\n # src_tensor = param_or_buffer.value().get_tensor()\n src_tensor = (\n state_var_dict[param_or_buffer.name]\n .value()\n .get_tensor()\n )\n param_or_buffer_tensor._share_data_with(src_tensor)\n # record var info\n if param_or_buffer.name not in extra_var_info:\n extra_info_dict = {}\n if param_or_buffer.name in state_names_dict:\n extra_info_dict['structured_name'] = state_names_dict[\n param_or_buffer.name\n ]\n extra_info_dict[\n 'stop_gradient'\n ] = param_or_buffer.stop_gradient\n if isinstance(param_or_buffer, EagerParamBase):\n extra_info_dict['trainable'] = param_or_buffer.trainable\n extra_var_info[param_or_buffer.name] = extra_info_dict\n\n # 4. build input & output of save_infernece_model\n # NOTE(chenweihang): [ Get input variables name ]\n # There are two cases, whether to prune the inputs or not\n # - not prune inputs (recommend):\n # - the len(input_spec) == len((concrete_program.inputs) - 1\n # - here can use concrete_program.inputs directly\n # - prune inputs:\n # - the input_spec length < len((concrete_program.inputs) - 1\n # - the input_spec's name should be in concrete_program.inputs\n input_var_names = _get_input_var_names(\n concrete_program.inputs, inner_input_spec\n )\n\n # NOTE(chenweihang): [ Get output variables ]\n # the rule is like [ Get input variables name ]. For output var,\n # we only support Tensor spec, and actually, we only need the\n # var name of output, and we don't recommended to use output_spec\n # print(concrete_program.main_program)\n # print(concrete_program.outputs, configs.output_spec)\n output_vars = _get_output_vars(\n concrete_program.outputs, configs.output_spec, with_hook\n )\n\n # 5. save inference model\n # construct new save_inference_model arguments\n model_path = dirname\n # NOTE(chenweihang): because prefix contains model and params filename,\n # so we don't support set model_filename & params_filename\n if 'forward' == attr_func or not isinstance(layer, Layer):\n model_filename = file_prefix + INFER_MODEL_SUFFIX\n params_filename = file_prefix + INFER_PARAMS_SUFFIX\n path_prefix = file_prefix\n else:\n model_filename = file_prefix + '.' + attr_func + INFER_MODEL_SUFFIX\n params_filename = (\n file_prefix + '.' + attr_func + INFER_PARAMS_SUFFIX\n )\n file_prefix = file_prefix + '.' + attr_func\n file_prefix = os.path.join(model_path, file_prefix)\n with scope_guard(scope):\n input_vars = []\n for var in concrete_program.main_program.clone().list_vars():\n if var.name in input_var_names:\n input_vars.append(var)\n save_inference_model(\n path_prefix=file_prefix,\n feed_vars=input_vars,\n fetch_vars=output_vars,\n executor=Executor(_current_expected_place()),\n program=concrete_program.main_program.clone(),\n clip_extra=configs.clip_extra,\n )\n\n if combine_params:\n clone_main_program = concrete_program.main_program.clone()\n clone_main_program = clone_main_program._prune_with_input(\n input_var_names, output_vars\n )\n for block in clone_main_program.blocks:\n combine_vars.update(block.vars)\n\n # save shared params\n if combine_params:\n # sort vars by name\n combine_vars = sorted(combine_vars.items(), key=lambda item: item[0])\n ordered_vars = []\n for name, var in combine_vars:\n ordered_vars.append(var)\n\n params_filename = file_prefix + INFER_PARAMS_SUFFIX\n with scope_guard(scope):\n paddle.static.save_vars(\n Executor(_current_expected_place()),\n dirname=model_path,\n vars=list(\n filter(\n paddle.framework.io_utils.is_persistable, ordered_vars\n )\n ),\n filename=params_filename,\n )\n # save property\n property_save_path = os.path.join(\n os.path.normpath(model_path), file_prefix + INFER_PROPERTY_SUFFIX\n )\n _save_property(property_save_path, property_vals)\n\n # NOTE(chenweihang): [ Save extra variable info ]\n # save_inference_model will lose some important variable information, including:\n # - Variable name and correspondence (when saved variables as one file)\n # - Variable.stop_gradient information\n # - Which persistent variable are parameter and which are not\n # - Parameter.trainable information\n #\n # The lost information cannot be recovered when it is loaded again,\n # so if we want to perform fine-tune after loading, we may need to\n # configure redundant information to proceed.\n #\n # Due to compatibility issues, we cannot change the original storage structure,\n # but we can save these information in `jit.save` without changing the original\n # storage to improve user experience. So we save extra information into\n # file `***.pdiparams.info`\n\n # \"layer\" can only be Layer or function or StaticFunction.\n contain_parameter = False\n if concrete_program is not None:\n for var in concrete_program.main_program.list_vars():\n contain_parameter |= isinstance(var, Parameter)\n\n if (isinstance(layer, Layer) or contain_parameter) and extra_var_info:\n with scope_guard(scope):\n extra_var_info_path = path + INFER_PARAMS_INFO_SUFFIX\n with open(extra_var_info_path, 'wb') as f:\n pickle.dump(extra_var_info, f, protocol=2)", "def defineHook (self, hook):\n self._hooks.append (hook)", "def preSceneSave(input):\n print '- running preSceneSave callback'\n reload(callbacks)\n callbacks.preSceneSaveCallback()", "def hook_save_checkpoint_path(self, x):\n self.save_checkpoint_path = x", "def register_checkpoint_hooks(cls):\n global DEFAULT_LOAD_HOOKS\n global DEFAULT_SAVE_HOOKS\n global DEFAULT_TRANSFER_HOOKS\n for name, method in cls.__dict__.items():\n if hasattr(method, \"_speechbrain_saver\"):\n DEFAULT_SAVE_HOOKS[cls] = method\n logger.debug(f\"Registered checkpoint save hook for {name}\")\n if hasattr(method, \"_speechbrain_loader\"):\n DEFAULT_LOAD_HOOKS[cls] = method\n logger.debug(f\"Registered checkpoint load hook for {name}\")\n if hasattr(method, \"_speechbrain_transfer\"):\n DEFAULT_TRANSFER_HOOKS[cls] = method\n logger.debug(f\"Registered parameter transfer hook for {name}\")\n return cls", "def register_hook(self, hook: str):\n if hook in self.hooks.keys():\n self.logger.warning(f\"{self.name}: hook {hook!r} is already declared.\")\n\n self.hooks[hook] = []", "def store_hook(self, hook: str, hook_function: HookFunction, route: Route):\n raise NotImplementedError", "def on_hook(self, hook: \"Hook\") -> None:\n try:\n if self.hooked is not None:\n func, args_gen = self.hooked[type(hook)]\n else:\n return\n except KeyError:\n return\n else:\n hook(func, args_gen)", "def register_hook(self, layer):\n layer.register_backward_hook(self.grad_hook)\n layer.register_forward_hook(self.feature_hook)", "def add_hook(self, config, hook, url):\n # TODO: Some input validation needed\n config[\"hooks\"][hook] = url\n print(\"Added hook {0}\".format(hook))\n save_config(confpath, config)", "def hook_pre_trained(self, x):\n self.pre_trained = x", "def add_hook(self, hook):\n h = hook.hash\n self.hooks[h] = hook", "def postSceneSave(input):\n print '- running postSceneSave callback'\n reload(callbacks)\n callbacks.postSceneSaveCallback()", "def register_hook(self, hook, function):\n if hook in self.hooks:\n self.hooks[hook].append(function)\n else:\n self.hooks[hook] = [ function ]", "def get_profiler_hook(model_dir, save_steps=1000, **kwargs):\n return tensorflow.estimator.ProfilerHook(save_steps=save_steps, output_dir=model_dir)", "def _save_final_ckpt(func):\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n obj = None\n if kwargs.get('callbacks') and isinstance(kwargs.get('callbacks'), ModelCheckpoint):\n obj = kwargs.get('callbacks')\n if kwargs.get('callbacks') and isinstance(kwargs.get('callbacks'), list):\n for item in kwargs.get('callbacks'):\n if isinstance(item, ModelCheckpoint):\n obj = item\n if obj and obj._config and obj._config.exception_save:\n try:\n func(self, *args, **kwargs)\n except BaseException as e:\n # pylint: disable=W0212\n prefix = _chg_ckpt_file_name_if_same_exist(obj._directory, obj._exception_prefix, True)\n cur_ckpoint_file = prefix + \"-\" + str(self._current_epoch_num) + \"_\" \\\n + str(self._current_step_num) + \"_breakpoint.ckpt\"\n cur_file = os.path.join(obj._directory, cur_ckpoint_file)\n if \"epoch_num\" in obj._append_dict:\n obj._append_dict[\"epoch_num\"] = obj._append_epoch_num + self._current_epoch_num\n if \"step_num\" in obj._append_dict:\n obj._append_dict[\"step_num\"] = obj._append_step_num + self._current_step_num\n save_checkpoint(self._train_network, cur_file, obj._config.integrated_save, obj._config.async_save,\n obj._append_dict, obj._config.enc_key, obj._config.enc_mode)\n raise e\n else:\n func(self, *args, **kwargs)\n return wrapper", "def register_hook_from_cfg(self, hook_cfg):\n hook_cfg = hook_cfg.copy()\n priority = hook_cfg.pop('priority', 'NORMAL')\n hook = mmcv.build_from_cfg(hook_cfg, HOOKS)\n self.register_hook(hook, priority=priority)", "def hook(func: Callable):\n parameters, return_annotation = _extract_params(func, extract_return=True)\n return Hook(str(func), parameters, return_annotation)", "def _register_hooks(self):\r\n def forward_hook(key):\r\n def forward_hook_(module, input, output):\r\n self.registered_hooks[key][0] = True\r\n # Save featuremaps\r\n if not isinstance(output, torch.Tensor):\r\n print(\"Cannot hook layer {} because its gradients are not in tensor format\".format(key))\r\n\r\n if not ENABLE_MODULE_HOOK:\r\n def _backward_hook(grad_out):\r\n self.registered_hooks[key][1] = True\r\n # Save the gradients correspond to the featuremaps\r\n self.grad_pool[key] = grad_out.detach()\r\n\r\n # Register backward hook directly to the output\r\n # Handle must be removed afterwards otherwise tensor is not freed\r\n if not self.registered_hooks[key][1]:\r\n _backward_handle = output.register_hook(_backward_hook)\r\n self.backward_handlers.append(_backward_handle)\r\n self.fmap_pool[key] = output.detach()\r\n\r\n return forward_hook_\r\n\r\n # This backward hook method looks prettier but is currently bugged in pytorch (04/25/2020)\r\n # Handle does not need to be removed, tensors are freed automatically\r\n def backward_hook(key):\r\n def backward_hook_(module, grad_in, grad_out):\r\n self.registered_hooks[key][1] = True\r\n # Save the gradients correspond to the featuremaps\r\n self.grad_pool[key] = grad_out[0].detach() # TODO: Still correct with batch size > 1?\r\n\r\n return backward_hook_\r\n\r\n self.remove_hook(forward=True, backward=True)\r\n for name, module in self.model.named_modules():\r\n if self.target_layers is None or name in self.target_layers:\r\n self.registered_hooks[name] = [False, False]\r\n self.forward_handlers.append(module.register_forward_hook(forward_hook(name)))\r\n if ENABLE_MODULE_HOOK:\r\n self.backward_handlers.append(module.register_backward_hook(backward_hook(name)))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Saves input Layer or function as ``paddle.jit.TranslatedLayer`` format model, which can be used for inference or finetuning after loading. It will save the translated program and all related persistable variables of input Layer to given ``path`` . ``path`` is the prefix of saved objects, and the saved translated program file suffix is ``.pdmodel`` , the saved persistable variables file suffix is ``.pdiparams`` , and here also saved some additional variable description information to a file, its suffix is ``.pdiparams.info``, these additional information is used in finetuning.
def save(layer, path, input_spec=None, **configs): # 1. input build & check prog_translator = ProgramTranslator() is_prim_infer = core._is_fwd_prim_enabled() and core._is_bwd_prim_enabled() if not prog_translator.enable_to_static: raise RuntimeError( "The paddle.jit.save doesn't work when setting 'paddle.jit.enable_to_static' to False." ) if not ( isinstance(layer, (Layer, StaticFunction)) or inspect.isfunction(layer) ): raise TypeError( "The input of paddle.jit.save should be 'Layer' or 'Function', but received input type is %s." % type(layer) ) elif inspect.isfunction(layer) or isinstance(layer, StaticFunction): warnings.warn( 'What you save is a function, and `jit.save` will generate the name of the model file according to `path` you specify. When loading these files with `jit.load`, you get a `TranslatedLayer` whose inference result is the same as the inference result of the function you saved.' ) # NOTE(chenweihang): If the input layer be wrapped by DataParallel, # the args and kwargs of forward method will can't be parsed by # function_spec, so here we save DataParallel._layers instead # DataParallel it self # NOTE(chenweihang): using inner_layer, do not change input layer if isinstance(layer, paddle.DataParallel): inner_layer = layer._layers else: inner_layer = layer # path check file_prefix = os.path.basename(path) if file_prefix == "": raise ValueError( "The input path MUST be format of dirname/file_prefix " "[dirname\\file_prefix in Windows system], but received " "file_prefix is empty string." ) dirname = os.path.dirname(path) if dirname and not os.path.exists(dirname): os.makedirs(dirname) # avoid change user given input_spec inner_input_spec = None if input_spec is not None: if isinstance(layer, Layer): for attr_func in dir(inner_layer): static_func = getattr(inner_layer, attr_func, None) if ( isinstance(static_func, StaticFunction) and 'forward' != attr_func ): raise ValueError( "If there are static functions other than 'forward' that need to be saved, the input 'input_spec' should be None, but received the type of 'input_spec' is %s." % type(input_spec) ) if not isinstance(input_spec, (list, tuple)): raise TypeError( "The input input_spec should be 'list', but received input_spec's type is %s." % type(input_spec) ) inner_input_spec = [] for var in paddle.utils.flatten(input_spec): if isinstance(var, paddle.static.InputSpec): inner_input_spec.append(var) elif isinstance(var, (core.eager.Tensor, Variable)): inner_input_spec.append( paddle.static.InputSpec.from_tensor(var) ) else: # NOTE(Aurelius84): Support non-Tensor type in `input_spec`. inner_input_spec.append(var) # parse configs configs = _parse_save_configs(configs) # whether outermost layer has pre/post hook, if does, we need also save # these operators in program. with_hook = configs.with_hook combine_params = configs.combine_params if combine_params: configs._program_only = True scope = core.Scope() extra_var_info = {} if isinstance(layer, Layer): functions = dir(inner_layer) if inner_layer._forward_pre_hooks or inner_layer._forward_post_hooks: with_hook = True else: # layer is function functions = [ layer, ] combine_vars = {} property_vals = [] # (value, key) concrete_program = None for attr_func in functions: if isinstance(layer, Layer): static_func = get_ast_static_function( getattr(inner_layer, attr_func, None) ) if isinstance(static_func, StaticFunction): if static_func.is_property: # property method to be exported immediate_val = static_func() property_vals.append( ( immediate_val, layer.__class__.__name__ + '.' + attr_func, ) ) continue concrete_program = ( static_func.concrete_program_specify_input_spec( inner_input_spec, with_hook=with_hook, is_prim_infer=is_prim_infer, ) ) elif 'forward' == attr_func: if configs.skip_forward: # do not jit.save forward function continue # transform in jit.save, if input_spec is incomplete, declarative will throw error # inner_input_spec is list[InputSpec], it should be packed with same structure # as original input_spec here. if inner_input_spec: inner_input_spec = paddle.utils.pack_sequence_as( input_spec, inner_input_spec ) static_forward = to_static( inner_layer.forward, input_spec=inner_input_spec, enable_fallback=False, ) concrete_program = ( static_forward.concrete_program_specify_input_spec( with_hook=with_hook, is_prim_infer=is_prim_infer ) ) # the input_spec has been used in declarative, which is equal to # @to_static with input_spec and jit.save without input_spec, # avoid needless warning inner_input_spec = None else: continue else: # When layer is a function if isinstance(attr_func, StaticFunction): static_func = get_ast_static_function(attr_func) if static_func.is_property: # property method to be exported immediate_val = static_func() property_vals.append((immediate_val, static_func)) continue concrete_program = ( static_func.concrete_program_specify_input_spec( inner_input_spec, is_prim_infer=is_prim_infer ) ) else: static_func = get_ast_static_function(attr_func) if inner_input_spec: inner_input_spec = paddle.utils.pack_sequence_as( input_spec, inner_input_spec ) static_function = to_static( static_func, input_spec=inner_input_spec, enable_fallback=False, ) concrete_program = static_function.concrete_program if static_function._class_instance is None: warnings.warn( '`jit.save` will only save the `Program`, not the parameters. If you have to save the parameters, please make sure that {} is a member function of `paddle.nn.Layer` and the saved parameters are in `state_dict`'.format( layer ) ) # when save multi `StaticFunction`, all `StaticFunction` share params. dygraph_state_dict = None if isinstance(inner_layer, Layer): dygraph_state_dict = inner_layer.to_static_state_dict() elif isinstance(attr_func, StaticFunction): if static_func._class_instance: dygraph_state_dict = ( static_func._class_instance.to_static_state_dict() ) if dygraph_state_dict: # NOTE(chenweihang): we maintain the mapping of variable name to # structured name, the buffer variable (non-persistable) # saved to inference program may not need by dygraph Layer, # we only record the state_dict variable's structured name state_names_dict = {} state_var_dict = {} for structured_name, var in dygraph_state_dict.items(): state_names_dict[var.name] = structured_name state_var_dict[var.name] = var # 3. share parameters from Layer to scope & record var info with dygraph.guard(): for param_or_buffer in concrete_program.parameters: # share to scope if param_or_buffer.type == core.VarDesc.VarType.VOCAB: scr_tensor = param_or_buffer.value().get_map_tensor() tgt_var = scope.var(param_or_buffer.name) tgt_var.set_vocab(scr_tensor) else: param_or_buffer_tensor = scope.var( param_or_buffer.name ).get_tensor() # src_tensor = param_or_buffer.value().get_tensor() src_tensor = ( state_var_dict[param_or_buffer.name] .value() .get_tensor() ) param_or_buffer_tensor._share_data_with(src_tensor) # record var info if param_or_buffer.name not in extra_var_info: extra_info_dict = {} if param_or_buffer.name in state_names_dict: extra_info_dict['structured_name'] = state_names_dict[ param_or_buffer.name ] extra_info_dict[ 'stop_gradient' ] = param_or_buffer.stop_gradient if isinstance(param_or_buffer, EagerParamBase): extra_info_dict['trainable'] = param_or_buffer.trainable extra_var_info[param_or_buffer.name] = extra_info_dict # 4. build input & output of save_infernece_model # NOTE(chenweihang): [ Get input variables name ] # There are two cases, whether to prune the inputs or not # - not prune inputs (recommend): # - the len(input_spec) == len((concrete_program.inputs) - 1 # - here can use concrete_program.inputs directly # - prune inputs: # - the input_spec length < len((concrete_program.inputs) - 1 # - the input_spec's name should be in concrete_program.inputs input_var_names = _get_input_var_names( concrete_program.inputs, inner_input_spec ) # NOTE(chenweihang): [ Get output variables ] # the rule is like [ Get input variables name ]. For output var, # we only support Tensor spec, and actually, we only need the # var name of output, and we don't recommended to use output_spec # print(concrete_program.main_program) # print(concrete_program.outputs, configs.output_spec) output_vars = _get_output_vars( concrete_program.outputs, configs.output_spec, with_hook ) # 5. save inference model # construct new save_inference_model arguments model_path = dirname # NOTE(chenweihang): because prefix contains model and params filename, # so we don't support set model_filename & params_filename if 'forward' == attr_func or not isinstance(layer, Layer): model_filename = file_prefix + INFER_MODEL_SUFFIX params_filename = file_prefix + INFER_PARAMS_SUFFIX path_prefix = file_prefix else: model_filename = file_prefix + '.' + attr_func + INFER_MODEL_SUFFIX params_filename = ( file_prefix + '.' + attr_func + INFER_PARAMS_SUFFIX ) file_prefix = file_prefix + '.' + attr_func file_prefix = os.path.join(model_path, file_prefix) with scope_guard(scope): input_vars = [] for var in concrete_program.main_program.clone().list_vars(): if var.name in input_var_names: input_vars.append(var) save_inference_model( path_prefix=file_prefix, feed_vars=input_vars, fetch_vars=output_vars, executor=Executor(_current_expected_place()), program=concrete_program.main_program.clone(), clip_extra=configs.clip_extra, ) if combine_params: clone_main_program = concrete_program.main_program.clone() clone_main_program = clone_main_program._prune_with_input( input_var_names, output_vars ) for block in clone_main_program.blocks: combine_vars.update(block.vars) # save shared params if combine_params: # sort vars by name combine_vars = sorted(combine_vars.items(), key=lambda item: item[0]) ordered_vars = [] for name, var in combine_vars: ordered_vars.append(var) params_filename = file_prefix + INFER_PARAMS_SUFFIX with scope_guard(scope): paddle.static.save_vars( Executor(_current_expected_place()), dirname=model_path, vars=list( filter( paddle.framework.io_utils.is_persistable, ordered_vars ) ), filename=params_filename, ) # save property property_save_path = os.path.join( os.path.normpath(model_path), file_prefix + INFER_PROPERTY_SUFFIX ) _save_property(property_save_path, property_vals) # NOTE(chenweihang): [ Save extra variable info ] # save_inference_model will lose some important variable information, including: # - Variable name and correspondence (when saved variables as one file) # - Variable.stop_gradient information # - Which persistent variable are parameter and which are not # - Parameter.trainable information # # The lost information cannot be recovered when it is loaded again, # so if we want to perform fine-tune after loading, we may need to # configure redundant information to proceed. # # Due to compatibility issues, we cannot change the original storage structure, # but we can save these information in `jit.save` without changing the original # storage to improve user experience. So we save extra information into # file `***.pdiparams.info` # "layer" can only be Layer or function or StaticFunction. contain_parameter = False if concrete_program is not None: for var in concrete_program.main_program.list_vars(): contain_parameter |= isinstance(var, Parameter) if (isinstance(layer, Layer) or contain_parameter) and extra_var_info: with scope_guard(scope): extra_var_info_path = path + INFER_PARAMS_INFO_SUFFIX with open(extra_var_info_path, 'wb') as f: pickle.dump(extra_var_info, f, protocol=2)
[ "def save(self, path):\n if path is None:\n return\n\n logger.info(\"Save model to {}\".format(path))\n self.model.save_pretrained(path)\n self.tokenizer.save_pretrained(path)", "def save_model(self, path):\r\n torch.save(self.model.state_dict(), path)", "def save(self, path):\n\n torch.save({'embeddings': self.embeddings, 'indices': self.data_index}, path)", "def save(self, path):\n if len(self.ts_allvars) > 0:\n path = self._tfObject__saver.save(tf.get_default_session(), path)\n return path", "def save(self, path: str = \"/tmp\"):\n pickle.dump(self, open(f\"{path}/_planet_{self.englishName.replace(' ', '')}.pickle\", \"wb\"))", "def save(self, path):", "def save(path: str) -> None:\n __scene.save_frame(path)", "def save_optimizer(optimizer, path):\n path = os.path.join(path, \"optimizer.pkl\")\n print(\"Saving the hyperparameters optimizer to {}\".format(path))\n with open(path, 'wb+') as f:\n pickle.dump(optimizer, f)", "def save_model(path, algorithm, params):\n if algorithm == 'pmi':\n data = {\n 'algorithm': 'pmi',\n 'pmi': params}\n else:\n data = {\n 'algorithm': 'phmm',\n 'em': params[0],\n 'gx': params[1],\n 'gy': params[2],\n 'trans': params[3]}\n\n try:\n with open(path, 'wb') as f:\n pickle.dump(data, f, protocol=3)\n except OSError:\n raise ModelError('Could not write model file: {}'.format(path))", "def save_pickle(self, path=None):\n\n path = path if path else ''\n\n if path is not None and path != '':\n if not os.path.isdir(path):\n print('Directory does not exist. Will try creating it...')\n os.mkdir(path)\n\n pickle.dump(self, open(path + 'state_' + self.name + '.pckl', 'wb'))", "def save(self, path):\n path = os.path.abspath(path)\n if not os.path.exists(path):\n os.mkdir(path)\n self.save_depository(os.path.join(path, 'depository'))\n self.save_libraries(os.path.join(path, 'libraries'))\n self.save_groups(os.path.join(path, 'groups'))", "def save(self,file):\n assert \".pymodel\" in file\n with open(file,\"w\") as stream:\n pickle.dump(self,stream)", "def saveVoc(self, path):\n\n self.vocabulary.toFile(path)", "def _save_model(self, filepath: str, trainer, pl_module):\n ModelCheckpoint._save_model(self, filepath, trainer, pl_module)\n # if isinstance(pl_module.encoder, WrapperModel):\n # torch.save(pl_module.encoder.state_dict(), filepath)\n # else:\n # ModelCheckpoint._save_model(self, filepath, trainer, pl_module)", "def save_skos(self, path, language):\n serialize_subjects_to_skos(self.subjects, language, path)", "def save_experience_replay_to_file(self, path):\r\n\r\n try:\r\n pickle.dump(self.experience_replay_pool, open(path, \"wb\"))\r\n print 'saved model in %s' % (path,)\r\n except Exception, e:\r\n print 'Error: Writing model fails: %s' % (path,)\r\n print e", "def _save_model(self, model_path):\n # dictionary for saving model information\n dic = {}\n\n # collect all information in a dictionary\n for i, layer in enumerate(self.layers):\n dic[f\"layer_{i+1}\"] = {}\n dic[f\"layer_{i+1}\"][\"type\"] = layer.name\n dic[f\"layer_{i+1}\"][\"weight_shape\"] = layer.weights.shape\n #dic[f\"layer_{i+1}\"][\"bias_shape\"] = layer.bias.shape\n dic[f\"layer_{i+1}\"][\"weights\"] = layer.weights\n dic[f\"layer_{i+1}\"][\"bias\"] = layer.bias\n\n # if the folder is not yet created, do so\n Path(model_path).mkdir(exist_ok=True)\n\n # save the dictionary as a pickle\n save_as_pickle(dic, model_path + \"model.pickle\")", "def save(self, model_path):\n try:\n model = self.get_model()\n model.saveModel(model_path + \".bigdl\", model_path + \".bin\", True)\n except ValueError:\n invalidInputError(False,\n \"You should fit before calling save\")", "def save(self, epoch, file_path=\"output/bert_trained.model\"):\n output_path = file_path + \".ep%d\" % epoch\n torch.save(self.model.cpu(), output_path)\n self.model.to(self.device)\n print(\"EP:%d Model Saved on:\" % epoch, output_path)\n return output_path" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the encoding type that matches Python's native strings.
def _get_native_encoding_type(self): if sys.maxunicode == 65535: return 'UTF16' else: return 'UTF32'
[ "def get_encoding(byte_string):\n return detect(byte_string)['encoding']", "def get_encoding(str):\n lookup = ('utf_8', 'euc_jp', 'euc_jis_2004', 'euc_jisx0213',\n 'shift_jis', 'shift_jis_2004','shift_jisx0213',\n 'iso2022jp', 'iso2022_jp_1', 'iso2022_jp_2', 'iso2022_jp_3',\n 'iso2022_jp_ext','latin_1', 'ascii')\n for encoding in lookup:\n try:\n str = str.decode(encoding)\n return encoding\n except:\n pass\n return None", "def _nativeType(self):\n\t\treturn str", "def ensure_native_ascii_str(value):\n if isinstance(value, str):\n return value.encode(\"ascii\", \"replace\").decode()\n elif isinstance(value, bytes):\n return value.decode(\"ascii\", \"replace\")\n else:\n raise TypeError(\"Invalid type for string conversion: {}\".format(type(value)))", "def get_encoding_string(self):\n\t\treturn driver_h.FORMAT[self.encoding]", "def __get_encoding(self):\r\n if self.__encoding is not None:\r\n return self.__encoding\r\n return 'utf-8'", "def guess_encoding(text: bytes, default: Encoding=DEFAULT_ENCODING) -> Encoding:\n result = chardet.detect(text)\n return normalize_result(result, default=default)", "def ensure_native_ascii_str(value):\n if isinstance(value, str):\n return value\n elif isinstance(value, unicode): # noqa\n return value.encode(\"ascii\", \"replace\")\n else:\n raise TypeError(\"Invalid type for string conversion: {}\".format(type(value)))", "def get_data_encoding():", "def detect_source_encoding(source: bytes) -> str:\n\n lines = source.splitlines()[:2]\n for line in lines:\n m = Regex.CODING.match(line)\n if m is not None:\n return m.group(1).decode()\n\n return \"utf-8\"", "def getdefaultencoding():\n\tpass", "def _translate_type(type_name):\n if not isinstance(type_name, str):\n raise Exception('Type name must be a string')\n type_name = _sanitize_identifier(type_name)\n\n return _ASN1_BUILTIN_TYPES.get(type_name, type_name)", "def get_encoding_string(self):\n\t\treturn SpeechConfig.objects.get_subclass(id=self.config.id).get_encoding_string()", "def guess_output_encoding():\n # Apparently builds in Docker containers may have None as an encoding.\n # Fall back to ASCII. If this ever happens in a non-ASCII path, well,\n # there may be a more difficult decision to be made. We'll burn that\n # bridge when we get to it, as they almost say.\n return stdout.encoding or 'ascii'", "def getfilesystemencoding():\n\tpass", "def get_body_encoding(self):\r\n assert self.body_encoding <> SHORTEST\r\n if self.body_encoding == QP:\r\n return 'quoted-printable'\r\n elif self.body_encoding == BASE64:\r\n return 'base64'\r\n else:\r\n return encode_7or8bit", "def get_file_encoding(content):\r\n encoding = None\r\n try:\r\n lines_to_check = content.split(\"\\n\", 2)\r\n for index in range(2):\r\n if len(lines_to_check) > index:\r\n line_encoding = _search_coding_line(lines_to_check[index])\r\n if line_encoding:\r\n encoding = line_encoding\r\n break\r\n except UnicodeDecodeError as error:\r\n #add logger\r\n print(error)\r\n #if not encoding is set then use UTF-8 as default\r\n if encoding is None:\r\n encoding = \"UTF-8\"\r\n return encoding", "def text_encoding(self):\n\n return self._text_encoding", "def getctype(c):\n if isinstance(c, int):\n if c in BUILTIN_CTYPES:\n return BUILTIN_CTYPES[c]\n else:\n raise CrumbError('Invalid payload field size \"%d\"; must be the size of a built-in C integer type' % c)\n elif isinstance(c, float):\n if c in BUILTIN_CTYPES:\n return BUILTIN_CTYPES[c]\n else:\n raise CrumbError('Invalid float size \"%s\" in payload, must be 32.0 or 64.0' % c)\n else:\n enums = c.split(' ')\n for e in enums:\n if not isvalidcvar(e):\n raise CrumbError('\"%s\" is not a valid C/C++ symbol name.' % e)\n\n # Assuming no one will create an entry with more than 64k hand-made names.\n # Really, 256 is a bit ridiculous, but hey,who knows.\n if len(enums) <= 256:\n return 'uint8_t'\n elif len(enums) <= 64*1024:\n return 'uint16_t'\n else:\n raise CrumbError(\"Too many enums!!! Something went seriously wrong.\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Apply the Natural Language API to the document, and collect the detected entities.
def add_entities(self, filename, locale, document): # Apply the Natural Language API to the document. entities = self.nl_detect(document) self.extract_and_save_entity_info(entities, locale, filename)
[ "def detect_document(path):\n from google.cloud import vision\n import io\n #image1 = Image.open('C:\\\\Users\\\\Gabija\\\\Documents\\\\BGN_hackathon\\\\Handwriting\\\\p.jpg').convert('1')\n #image1.save('C:\\\\Users\\\\Gabija\\\\Documents\\\\BGN_hackathon\\\\Handwriting\\\\p2.jpg')\n client = vision.ImageAnnotatorClient()\n\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.types.Image(content=content)\n\n response = client.document_text_detection(image=image)\n\n if len(response.full_text_annotation.pages) is 0:\n print(\"null*************************\")\n response = client.text_detection(image=image)\n texts = response.text_annotations\n\n print('Texts:')\n print(texts)\n for text in texts:\n print('\\n\"{}\"'.format(text.description))\n\n vertices = (['({},{})'.format(vertex.x, vertex.y)\n for vertex in text.bounding_poly.vertices])\n\n print('bounds: {}'.format(','.join(vertices)))\n\n else:\n for page in response.full_text_annotation.pages:\n for block in page.blocks:\n print('\\nBlock confidence: {}\\n'.format(block.confidence))\n\n for paragraph in block.paragraphs:\n print('Paragraph confidence: {}'.format(\n paragraph.confidence))\n\n for word in paragraph.words:\n word_text = ''.join([\n symbol.text for symbol in word.symbols\n ])\n print('Word text: {} (confidence: {})'.format(\n word_text, word.confidence))\n\n for symbol in word.symbols:\n print('\\tSymbol: {} (confidence: {})'.format(\n symbol.text, symbol.confidence))", "def callNLPService(text):\n google_cloud_credentials = \"./assets/Interview_Voice_google_cloud_key.json\"\n nlp_service = get_google_nlp_service(google_cloud_credentials)\n client = nlp_service.documents()\n request1 = client.analyzeEntitySentiment(body={\n \"document\": {\n \"type\": \"PLAIN_TEXT\",\n \"content\": text,\n \"language\": \"en_IN\"\n }\n })\n try:\n response = request1.execute()\n except googleapiclient.errors.HttpError as e:\n raise RequestError(e)\n except URLError as e:\n raise RequestError(\"recognition connection failed: {0}\".format(e.reason))\n entities = response[\"entities\"]\n return entities", "def generate_candidates_for_doc(self, doc: ConllDocument) -> List[Dict]:\n self.get_kb()\n # The return variable. Stores the list of entities.\n entities = []\n\n # Inner function to append a label_dict to the entities list\n def add_entity(entity_span_s, entity_span_e, entity_tokens, entity_gt):\n entity_text = ' '.join(entity_tokens)\n entity_candidates = [\n c.entity_ for c in self.kb.get_candidates(entity_text)\n ]\n entity_span = [entity_span_s, entity_span_e]\n\n entities.append(\n {'Position': entity_span,\n 'GroundTruth': entity_gt,\n 'Candidates': entity_candidates}\n )\n\n # Helper variables for the iteration:\n # Tokens belonging to current entity\n collected_tokens = []\n # Tag of the current entity (the ground truth)\n current_entity_tag = None\n # Position of the first entity token in the document tokens list\n span_start = None\n\n # Enumerate the document's list of tokens\n for i_token, token in enumerate(doc.tokens):\n\n # If we are looking at the beginning of a named entity\n if token.true_label.startswith(\"Q\") or token.true_label == \"B\":\n\n # Check if we already have collected a named entity\n # This is the case when two named entities follow each other\n if len(collected_tokens) > 0:\n add_entity(span_start, i_token-1,\n collected_tokens, current_entity_tag)\n\n span_start = i_token\n collected_tokens = [token.text]\n current_entity_tag = token.true_label\n\n # If we are looking at the continuation of a named entity\n elif token.true_label == 'I':\n collected_tokens.append(token.text)\n\n # If we're not looking at a token in a named entity\n else:\n # If we have passed the end of a named entity\n if len(collected_tokens) > 0:\n add_entity(span_start, i_token-1,\n collected_tokens, current_entity_tag)\n\n collected_tokens = []\n\n # If the last tokens were a named entity\n if len(collected_tokens) > 0:\n add_entity(span_start, len(doc.tokens)-1,\n collected_tokens, current_entity_tag)\n\n return entities", "def docParse(self):\n text = self.text\n text = self.simplify(text)\n nlp = self.nlp\n full_doc = nlp(text)\n \n # Slit into sentences and find Simple sentences\n sent_doc_ls = list(sent for sent in full_doc.sents)\n spl_ls = self.simple_find(sent_doc_ls)\n doc_ls = list(nlp.pipe(spl_ls))\n\n print(\"Finding triples (Subject-Verb-Object) from your doc...\\n\")\n # Our triples will be (ent1, rel, ent2)\n triples = self.all_triples(doc_ls) \n return triples", "def classify_request(self):\n\n # Detects the response of the text\n try:\n response = self.client.analyze_entities(self.document, encoding_type='UTF32', )\n\n \"\"\"\n 0 = 'UNKNOWN'\n 1 = 'PERSON'\n 2 = 'LOCATION'\n 3 = 'ORGANIZATION'\n 4 = 'EVENT'\n 5 = 'WORK_OF_ART'\n 6 = 'CONSUMER_GOOD'\n 7 = 'OTHER'\n \"\"\"\n\n classified_text = [{}]\n\n for entity in response.entities:\n classified_text.append(entity)\n classified_text.pop(0)\n return classified_text\n except:\n print(\"Classification error\")", "def extract_entities_results_html(text, normalize):\n try:\n result = rester.get_ner_tags(\n text, concatenate=True, normalize=normalize\n )\n except MatScholarRestError:\n rester_error_txt = RESTER_ERROR_TEXT\n return common_rester_error_html(rester_error_txt)\n tagged_doc = result[\"tags\"]\n relevance = result[\"relevance\"]\n highlighted = highlight_entities_html(tagged_doc)\n\n # Add the warning\n if not relevance:\n warning_header_txt = \"Warning! Abstract not relevant.\"\n warning_body_txt = (\n \"Our classifier has flagged this document as not relevant to \"\n \"inorganic materials science. Expect lower than optimum \"\n \"performance.\"\n )\n warning = common_warning_html(\n warning_header_txt, warning_body_txt, \"is-fullwidth\"\n )\n else:\n warning = html.Div(\"\")\n\n # Update download link\n doc = {\"sentences\": []}\n for sent in tagged_doc:\n new_sent = []\n for token, tag in sent:\n new_sent.append({\"token\": token, \"tag\": tag})\n doc[\"sentences\"].append(new_sent)\n json_string = json.dumps(doc)\n json_string = \"data:text/csv;charset=utf-8,\" + urllib.parse.quote(\n json_string\n )\n download_link = html.A(\n \"Download entities as json\",\n id=\"entity-download-link\",\n href=json_string,\n download=\"tagged_docs.json\",\n target=\"_blank\",\n )\n download_container = html.Div(\n download_link, className=\"has-text-size-4 has-margin-top 10\"\n )\n\n label = html.Label(\"Extracted Entity Tags:\")\n label_container = html.Div(label, className=\"is-size-4 has-margin-top-30\")\n\n highlighted_container = html.Div(highlighted)\n\n label_label = html.Label(\"Labels:\")\n label_label_container = html.Div(\n label_label, className=\"is-size-4 has-margin-top-30\"\n )\n\n entity_colormap_key = copy.deepcopy(entity_color_map_extended)\n entities_keys = []\n for e, color in entity_colormap_key.items():\n # don't need the \"other\" label\n if e == \"other\":\n continue\n entity_key = html.Div(\n e, className=f\"is-size-4 msweb-is-{color}-txt has-text-weight-bold\"\n )\n entity_key_container = html.Div(\n entity_key, className=\"flex-column is-narrow has-margin-5 box\"\n )\n entities_keys.append(entity_key_container)\n\n entity_key_container = html.Div(\n entities_keys, className=\"columns is-multiline has-margin-5\"\n )\n\n results = html.Div(\n [\n warning,\n label_container,\n highlighted_container,\n label_label_container,\n entity_key_container,\n download_container,\n ]\n )\n return results", "def _analyze_document(self, document, train = False):\n # doc = structures.document.Document(document) +++++++++++++++++++\n # first normalize, then tokenize\n tokens = self._tokenizer.tokenize(self._normalize(document['Content']))\n\n for n in range(1, self.n + 1):\n for i in range(len(tokens) + 1 - n):\n ngram = self.stats.add_ngram(*tokens[i:i+n])\n ngram.value += 1", "def extract_entity(self,input_text):\r\n self.input_text = input_text\r\n tokenized = nltk.sent_tokenize(input_text)\r\n for i in tokenized:\r\n words = nltk.word_tokenize(i)\r\n tagged = nltk.pos_tag(words)\r\n n = []\r\n\r\n named_ent = nltk.ne_chunk(tagged)\r\n\r\n for chunk in named_ent:\r\n if hasattr(chunk, 'label'):\r\n chunk = chunk[0]\r\n (name,tag) = chunk\r\n if tag == 'NN':\r\n n.append(name)\r\n\r\n dictionary = [{\r\n 'text':input_text,\r\n 'entities':n\r\n }]\r\n \r\n namedEntities = mongo.db.namedEntities\r\n dictionary_add = {'name':dictionary}\r\n namedEntities.insert(dictionary_add)\r\n \r\n return named_ent", "def extract_entities(self, pages):\n\n selected_entity_types = [\"ORGANIZATION\", \"PERSON\", \"LOCATION\", \"DATE\"]\n\n final_entities = []\n for page in pages:\n #text = self.__get_clean_text_in_supported_language(page['Content'])\n\n text = page.get('Content')\n\n final_entities = self._call_comprehend(text)\n # detected_entities = comprehend.detect_entities(\n # Text=text,\n # LanguageCode=\"en\"\n # )\n\n # uncomment to see output of comprehend\n # print(detected_entities)\n\n # selected_entities = [x for x in detected_entities['Entities']\n # if x['Score'] > 0.9 and\n # x['Type'] in selected_entity_types]\n\n # for selected_entity in selected_entities:\n # clean_entity = {key: selected_entity[key]\n # for key in [\"Text\", \"Type\"]}\n # if clean_entity not in final_entities:\n # final_entities.append(clean_entity)\n\n return final_entities", "def get_text_entity_detection_data(request):\n request_data = json.loads(request.body)\n messages = request_data.get(\"messages\", [])\n bot_message = request_data.get(\"bot_message\")\n entities = request_data.get(\"entities\", {})\n target_language_script = request_data.get('language_script') or ENGLISH_LANG\n source_language = request_data.get('source_language') or ENGLISH_LANG\n\n data = []\n\n message_len = len(messages)\n\n if message_len == 1:\n\n # get first message\n message_str = messages[0]\n\n fallback_value_entities = {}\n text_value_entities = {}\n\n data.append({\"entities\": {}, \"language\": source_language})\n\n for each_entity, value in entities.items():\n ignore_message = value.get('ignore_message', False)\n\n if ignore_message:\n fallback_value_entities[each_entity] = value\n else:\n text_value_entities[each_entity] = value\n\n # get detection for text entities which has ignore_message flag\n if fallback_value_entities:\n output = get_output_for_fallback_entities(fallback_value_entities, source_language)\n data[0][\"entities\"].update(output)\n\n # get detection for text entities\n if text_value_entities:\n output = get_detection(message=message_str, entity_dict=text_value_entities,\n structured_value=None, bot_message=bot_message,\n language_script=source_language,\n target_language_script=target_language_script)\n data[0][\"entities\"].update(output[0])\n\n # check if more than one message\n elif len(messages) > 1:\n text_detection_result = get_detection(message=messages, entity_dict=entities,\n structured_value=None, bot_message=bot_message)\n\n data = [{\"entities\": x, \"language\": source_language} for x in text_detection_result]\n\n else:\n ner_logger.debug(\"No valid message provided\")\n raise KeyError(\"Message is required\")\n\n return data", "def ner_processing(nlp_model, files, target_dir=\"./\"):\n\n print(\"Running Named Entity Recognition on {} files\".format(len(files)), file=sys.stderr)\n\n all_entities, key_val_pairs = set(), []\n non_entity_caps = set() # used to store things that might have been missed by NER. Will get all things that are start of sentence caps, but those should all be pretty common so won't be picked up by thresholding anyway.\n #id2entity, id2string = [None], [args.unk]\n internal_whitespace = re.compile('(?!<=^)\\s+(?!=$)')\n\n for file in files:\n all_stories = []\n with open(file, \"r\") as infile:\n for line in infile:\n # string to entity stores an entity based on a string, entity ids is used to set ids by entity type\n string2entity, entity_ids, multi_word_ents = {}, Counter(), set()\n\n nlp_text = nlp_model(line)\n # find entities, assign entities with ids and make mappings, removing whitespace\n for entity in nlp_text.ents:\n ent_string = internal_whitespace.sub('_', entity.text).lower()\n # keep multi_word ones for later merging\n if '_' in ent_string:\n multi_word_ents.add(ent_string)\n\n if ent_string not in string2entity:\n # make a mapping where entity ids are local to each story. As stories are one per line\n string2entity[ent_string] = NEREntity(ent_string, entity.label_, entity_ids[entity.label_])\n entity_ids[entity.label_] += 1 # increment the id for each type\n\n entity.merge() # this merges entity tokens into one token if it spans multiple\n\n # TODO break out into sep function\n # if entities exist in multiword settings assume same entity and merge ids. This is to handle stuff like John Smith\n has_title = set() # this is to handle things like Mr. and Miss separately\n titles = {'mr', 'mrs', 'miss', 'sir', 'lady', 'lord', 'ms', 'dr', 'doctor',\n 'general', 'captain', 'father', 'count', 'countess', 'baron',\n 'baroness', 'king', 'queen', 'prince', 'princess', 'madam', 'earl'}\n\n # Clean up the mappings - this is basically cause NER isn't good enough and sometimes catches just a title\n for title in (titles & set(string2entity)):\n del string2entity[title]\n\n for ent_str in multi_word_ents:\n ent_tokens = ent_str.split('_')\n if ent_tokens[0] in titles:\n has_title.add(ent_str)\n continue\n for tok in ent_tokens:\n if tok in string2entity:\n if string2entity[tok].ent_type == string2entity[ent_str].ent_type:\n string2entity[tok].id = string2entity[ent_str].id\n for ent_str in has_title:\n ent_tokens = ent_str.split('_')\n for i in range(1, len(ent_tokens)):\n tok = ent_tokens[i]\n if tok in string2entity:\n if string2entity[tok].ent_type == string2entity[ent_str].ent_type:\n string2entity[tok].id = string2entity[ent_str].id\n\n # Heuristic, if a string is not an entity but is capitalized it might be an entity. The particularly effects entities that appear only at the start of sentences.\n non_entity_caps.update(set([internal_whitespace.sub('_', tok.text).lower() for tok in nlp_text\n if (tok.ent_type == 0 and tok.text.istitle())]))\n\n # make substitutions\n full_story = []\n for tok in nlp_text:\n new_tok = internal_whitespace.sub('_', tok.text)\n if new_tok.lower() in string2entity:\n new_tok = str(string2entity[new_tok.lower()])\n full_story.append(new_tok)\n #full_story = [internal_whitespace.sub('_', tok.text) for tok in nlp_text] # can add if tok.ent_type > 1 if want to only do this for entities\n all_stories.append(\" \".join(full_story))\n\n # store all entity replacements for later special characters\n all_entities.update(set([str(val) for val in string2entity.values()]))\n # and for writing out\n key_val_pairs.extend([\"{} {}\".format(key, str(val)) for key, val in\n sorted(string2entity.items())])\n\n with open(file, \"w\") as outfile:\n outfile.write(\"\\n\".join(all_stories))\n print(\"Finished 1 file\", file=sys.stderr)\n print(\"Done\", file=sys.stderr)\n\n # Print string 2 entity file\n string2entity_file = \"string2entity.txt\"\n print(\"Writing {} to target_directory (defaults are this one and inputdir)\".format(string2entity_file), file=sys.stderr)\n with open(target_dir+string2entity_file, \"w\") as outfile:\n #key_val_pairs = [\"{} {}\".format(key, str(val)) for key, val in sorted(string2entity.items())]\n outfile.write(\"\\n\".join(key_val_pairs))\n\n return all_entities, non_entity_caps", "def sentence_extractor(self):\n self.text_sentences = []\n for text in self.texts:\n sentences = nltk.sent_tokenize(text)\n tokens_sentences = []\n for sentence in sentences:\n # tokens = nltk.word_tokenize(sentence)\n tokens = GetNounPhrases(sentence)\n if self.text_cleaner is not None:\n tokens = self.text_cleaner(tokens)\n if self.stem_words:\n tokens = stem_words(tokens)\n \n tokens_sentences.append(tokens)\n self.text_sentences.append(tokens_sentences)", "def transfer_document_annotations(self, tcfg):\n # build tokens for all sentences\n self.build_sntlist_tokens()\n # todo, add linguistic features here, ...\n # align the document with its sentences for NER\n self.align_document_with_sentences(verbose=tcfg.verbose)\n self.transfer_document_entity_mentions(verbose=tcfg.verbose)\n return", "def extract_entities(self, text):\n results = self.fetch(self.base_url, text)\n return [_ for _ in self.process_results(results)]", "def wordcloud():\n \n loc = input('Enter facebook archive extracted location: ')\n if not os.path.isdir(loc):\n print(\"The provided location doesn't seem to be right\")\n exit(1)\n \n fname = loc+'/comments/comments.json'\n if not os.path.isfile(fname):\n print(\"The file posts_and_commments.json is not present at the entered location.\")\n exit(1)\n\n with open(fname) as f:\n base_data = json.load(f)\n \n final_text = None\n final_comments = None\n languages = []\n ctr=0\n \n if \"comments\" in base_data:\n data = base_data[\"comments\"]\n \n for ele in data:\n if 'data' in ele:\n ctext = ele[\"data\"][0][\"comment\"][\"comment\"]\n try:\n b = detect(ctext)\n if b not in languages:\n languages.append(b)\n except LD_EXC:\n ctr+=1\n if final_comments is None:\n final_comments =\"\" + ctext\n else:\n final_comments = final_comments + \" \" + ctext\n words = word_tokenize(ctext)\n for w in words:\n if final_text is None:\n final_text =\"\" + PS.stem(w)\n else:\n final_text = final_text + \" \" + PS.stem(w)\n else:\n print(\"No Comments found in data\")\n \n fname = loc+'/posts/your_posts_1.json'\n if not os.path.isfile(fname):\n print(\"The file your_posts.json is not present at the entered location.\")\n exit(1)\n \n with open(fname) as f:\n base_data = json.load(f)\n\n if \"status_updates\" in base_data:\n data = base_data[\"status_updates\"]\n \n for ele in data:\n if \"data\" in ele:\n if \"post\" in ele[\"data\"][0]:\n try:\n b = detect(ele[\"data\"][0][\"post\"])\n #if b not in languages:\n languages.append(b)\n except LD_EXC:\n ctr+=1\n words = word_tokenize(ele[\"data\"][0][\"post\"])\n for w in words:\n if final_text is None:\n final_text =\"\" + PS.stem(w)\n else:\n final_text = final_text + \" \" + PS.stem(w)\n \n print(\"Your Most Common Language: \")\n print(max(languages,key=languages.count))\n \n if final_text != \"\":\n mask = np.array(Image.open(MASK_LOC))\n wordcloud = WordCloud(background_color = \"white\", collocations=False, mask = mask, max_font_size=300, relative_scaling = 1.0,\n stopwords = set(STOPWORDS)\n ).generate(final_text)\n image_colors = ImageColorGenerator(mask)\n \n plt.imshow(wordcloud.recolor(color_func=image_colors), interpolation=\"bilinear\")\n plt.axis(\"off\")\n print(\"WordCloud of Your Comments & Posts text generated.\")\n plt.show()\n else:\n print(\"No Comments and Posts Text Found\")\n\n \n #Friends Tagged\n \n flist = []\n fname = loc+'/friends/friends.json'\n if not os.path.isfile(fname):\n print(\"The file friends.json is not present at the entered location.\")\n exit(1)\n with open(fname) as f:\n base_data = json.load(f)\n base_data = base_data[\"friends\"]\n for ele in base_data:\n fwords = word_tokenize(ele[\"name\"])\n if fwords[0]!=\"Md\" and fwords[0]!=\"Kumar\":\n flist.append(fwords[0])\n else:\n flist.append(fwords[1])\n \n if final_comments!=\"\":\n friend_names = \"\"\n for sent in nltk.sent_tokenize(final_comments):\n for chunk in nltk.ne_chunk(nltk.pos_tag(nltk.word_tokenize(sent))):\n if hasattr(chunk, 'label'):\n if(chunk.label()[0]=='P'):\n if ''.join(c[0] for c in chunk.leaves()) in flist:\n friend_names = friend_names + \" \" + ' '.join(c[0] for c in chunk.leaves())\n\n wordcloud = WordCloud(background_color = \"white\", mask = mask,relative_scaling = 1.0,\n stopwords = set(STOPWORDS)\n ).generate(friend_names)\n\n plt.imshow(wordcloud)\n plt.axis(\"off\")\n print(\"WordCloud of Your friends mostly tagged by you\")\n plt.show()\n else:\n print(\"No Comments and Posts Text Found\")", "def transform_doc(self, document):\n title, abstract = self._cleaned_document_words(document)\n features = {\n 'title':\n self._text_features(title, self.max_title_len),\n 'abstract':\n self._text_features(abstract, self.max_abstract_len),\n 'authors':\n [\n self.author_to_index[author] for author in document.authors\n if author in self.author_to_index\n ],\n 'venue':\n [self.venue_to_index.get(document.venue, 0)],\n 'keyphrases':\n [\n self.keyphrase_to_index[keyphrase]\n for keyphrase in document.key_phrases\n if keyphrase in self.keyphrase_to_index\n ]\n }\n\n return features", "def entity_sentiment_text(text):\n client = language.LanguageServiceClient()\n\n if isinstance(text, six.binary_type):\n text = text.decode('utf-8')\n\n document = types.Document(\n content=text.encode('utf-8'),\n type=enums.Document.Type.PLAIN_TEXT)\n\n # Detect and send native Python encoding to receive correct word offsets.\n encoding = enums.EncodingType.UTF32\n if sys.maxunicode == 65535:\n encoding = enums.EncodingType.UTF16\n\n result = client.analyze_entity_sentiment(document, encoding)\n for entity in result.entities:\n# print('Mentions: ')\n print(u'Name: \"{}\"'.format(entity.name))\n for mention in entity.mentions:\n# print(u' Begin Offset : {}'.format(mention.text.begin_offset))\n print(u' Content : {}'.format(mention.text.content))", "def fit_on_texts(self, texts):\n for text in texts:\n self.document_count += 1\n if self.char_level or isinstance(text, list):\n if self.lower:\n if isinstance(text, list):\n text = [text_elem.lower() for text_elem in text]\n else:\n text = text.lower()\n seq = text\n else:\n seq = self.text_to_word_sequence(text)\n for w in seq:\n if w in self.word_counts:\n self.word_counts[w] += 1\n else:\n self.word_counts[w] = 1\n for w in set(seq):\n # In how many documents each word occurs\n self.word_docs[w] += 1\n\n wcounts = list(self.word_counts.items())\n wcounts.sort(key=lambda x: x[1], reverse=True)\n # forcing the oov_token to index 1 if it exists\n if self.oov_token is None:\n sorted_voc = []\n else:\n sorted_voc = [self.oov_token]\n sorted_voc.extend(wc[0] for wc in wcounts)\n\n # note that index 0 is reserved, never assigned to an existing word\n self.word_index = dict(\n list(zip(sorted_voc, list(range(1, len(sorted_voc) + 1)))))\n\n self.index_word = dict((c, w) for w, c in self.word_index.items())\n\n for w, c in list(self.word_docs.items()):\n self.index_docs[self.word_index[w]] = c", "def topic_entities(doc):\n\n url = 'http://model.dbpedia-spotlight.org/en/annotate'\n only_place_filter = {\n 'policy': \"whitelist\",\n 'types': \"schema:Place\",\n 'coreferenceResolution': False\n }\n documents = topic_documents(doc)\n\n for index in range(len(documents)):\n document = documents[index]\n try:\n entities = dict()\n for e in spotlight.annotate(url, document, confidence=0.5, support=50):\n entities[e['surfaceForm']] = e['URI']\n\n except (spotlight.SpotlightException, HTTPError):\n entities = {}\n doc['topics'][index]['entities'] = list(entities.items())\n\n return doc" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract information about an entity.
def extract_entity_info(self, entity): type = entity['type'] name = entity['name'].lower() metadata = entity['metadata'] salience = entity['salience'] wiki_url = metadata.get('wikipedia_url', None) return (type, name, salience, wiki_url)
[ "def entity_dict(entity):\n return {'id': entity.key().id(),\n 'url': entity.url,\n 'regex': entity.regex,\n 'phone': entity.phone,\n 'ctime': entity.ctime,\n 'mtime': entity.mtime,\n 'status': entity.status}", "def process_entity(self, entity):\n return entity", "def get_entities(request):\n logging.info('views.get_entities')\n return get_data_json('demo__definition', 'label,numeric', 'category=\\'EntityType\\'', None)", "def entity_tostring(self, entity):\n\n metadata = \", \".join(['\"%s\": \"%s\"' % (key, value) for\n key, value in entity.metadata.items()])\n\n mentions = \", \".join(['\"%s\"' % mention for mention in entity.mentions])\n\n return ('{name: \"%s\",'\n ' type: \"%s\",'\n ' metadata: {%s},'\n ' salience: %s,'\n ' mentions: [%s]}') % (\n entity.name,\n entity.type,\n metadata,\n entity.salience,\n mentions)", "async def _get_entity(self, entity_name: str) -> List[Dict]:\n logging.info(f\"Getting movie entity: {entity_name}\")\n request_url = f\"{self.base_url}/{entity_name}\"\n return await self.make_get_request(request_url)", "def parse_entites_card(self, card) -> list: #returns a list of list with entity, title and additional information\n entities = []\n nameCounter = 1\n for entity_id in card['entities']:\n if type(entity_id) == dict: \n entity_id = entity_id['entity'] \n if self.checkSupported(entity_id):\n if 'title' in card: \n title = self.truncate_name(f'{card[\"title\"]} {nameCounter}')\n nameCounter += 1\n else: \n title = None\n additional_information = {title: 'title', 'type': card['type']}\n entities.append([entity_id, title, additional_information])\n return entities", "def Entity(self) -> _n_0_t_1:", "def retrieve_entities(model):\n entity_names_and_titles = []\n for entity in sorted(model[\"entities\"].keys()):\n entity_names_and_titles.append(\n \"{}\\t{}\".format(entity, model[\"entities\"][entity][\"entity_title\"])\n )\n if model[\"entities\"][entity][\"is_main_entity\"] is True:\n main_entity = entity\n return entity_names_and_titles, main_entity", "def __get_data(self):\n ent = self.__entity_ref()\n return self.get_state_data(ent)", "def get_info(self, factory):\n return factory.get_info()", "def contact_details_for_entity(context):\n request = context['request']\n entity = entity_for_page(request.current_page)\n contacts = Membership.objects.filter(entity = entity, key_contact = True).order_by('membership_order')\n address = address_for_entity(entity)\n return {\n 'entity' : entity,\n 'address': address,\n 'contacts': contacts\n }", "def get_state_data(cls, entity):\n attrs = get_domain_class_attribute_iterator(type(entity))\n return dict([(attr,\n get_nested_attribute(entity, attr.entity_attr))\n for attr in attrs\n if not attr.entity_attr is None])", "def describe(entity=None):\n hlf.help(entity)", "def GetEntity(self, name):\n return self.generalEntities.get(name, None)", "def getBaseEntity(entity):\n if entity is None:\n return entity\n return dict([(k, v) for k, v in entity.items() if k in ['id', 'type']])", "def get_info(self) -> ProviderInfo:", "def prepare_entity(self, entity):\n pass", "def get_company_info():\n return _get(\"info\")", "def entity_number(self) -> int:\n return self.entity[1]", "def get_entry_info(entry):\n\n summary = get_entry_summary(entry)[0]\n plan = get_entry_plan(entry)[0]\n tasks = get_entry_tasks(entry)[0]\n completed_tasks = get_entry_completed_tasks(entry)[0]\n knowledges = get_entry_knowledge(entry)[0]\n failure_points = get_entry_failure_points(entry)[0]\n\n return EntryContent(summary, plan, tasks, completed_tasks, knowledges, failure_points, entry.time_created)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Output some info about the entities by querying the generated sqlite3 database.
def output_entity_data(self): with contextlib.closing(sqlite3.connect(self.db_filename)) as conn: # This query finds the number of times each entity name was # detected, in descending order by count, and returns information # about the first 15 names, including the files in which they were # found, their detected 'salience' and language (locale), and the # wikipedia urls (if any) associated with them. print('\n==============\nTop 15 most frequent entity names:') cursor = conn.cursor() results = cursor.execute( 'select name, count(name) as wc from entities ' 'group by name order by wc desc limit 15;') for item in results: cursor2 = conn.cursor() print(u'\n----Name: {} was found with count {}'.format(*item)) results2 = cursor2.execute( 'SELECT name, type, filename, locale, wiki_url, salience ' 'FROM entities WHERE name=?', (item[0],)) urls = set() for elt in results2: print(('Found in file {}, detected as type {}, with\n' ' locale {} and salience {}.').format( elt[2], elt[1], elt[3], elt[5])) if elt[4]: urls.add(elt[4]) if urls: print('url(s): {}'.format(urls)) # This query finds the number of times each wikipedia url was # detected, in descending order by count, and returns information # about the first 15 urls, including the files in which they were # found and the names and 'salience' with which they were # associated. print('\n==============\nTop 15 most frequent Wikipedia URLs:') c = conn.cursor() results = c.execute( 'select wiki_url, count(wiki_url) as wc from entities ' 'group by wiki_url order by wc desc limit 15;') for item in results: cursor2 = conn.cursor() print('\n----entity: {} was found with count {}'.format(*item)) results2 = cursor2.execute( 'SELECT name, type, filename, locale, salience ' 'FROM entities WHERE wiki_url=?', (item[0],)) names = set() salience = set() for elt in results2: print(('Found in file {}, detected as type {}, with\n' ' locale {}.').format(elt[2], elt[1], elt[3])) names.add(elt[0]) salience.add(elt[4]) print('names(s): {}'.format(names)) print('salience measure(s): {}'.format(salience))
[ "def info_database(self):\n for x in self.list_databases:\n print(\"%50s: %s\" %( x['definition'], x['entry_id']))", "def show_all():\n command = 'select * from entries'\n c = get_db()\n print c.execute(command).fetchall()", "def print_entities(self) -> None:\n for row in self.maze_data:\n for ent in row:\n print(ENTITY_NAME[row[ent]], end=\"\\t\")\n print(\"\\n\")", "def db_info_command(self, args: Namespace, extra_args: List[str], argv: List[str]) -> None:\n backend = setup_backend_db(args.config, args.repo)\n backend.create_engine()\n\n version = backend.get_db_version()\n min_version, max_version = backend.get_db_version_required()\n is_compat = backend.is_db_compatible()\n\n self.display(f\"redun :: version {redun.__version__}\")\n self.display(f\"config dir: {get_config_dir(args.config)}\")\n self.display()\n self.display(f\"db version: {version} '{version.description}'\")\n self.display(f\"CLI requires db versions: >={min_version},<{max_version.major + 1}\")\n self.display(f\"CLI compatible with db: {is_compat}\")", "def test_sqlitedb_get_entities():\n # Query entities\n sqlitedb = Database(SqliteDB(db_file))\n entities = sqlitedb.get_entities()\n\n assert len(entities) == 2, \"number of entities queried is not equal to 2\"", "def display(self):\n\n with self.Session.begin() as session:\n inspector = inspect(self.engine)\n schemas = inspector.get_schema_names()\n main = [{table_name: inspector.get_columns(table_name, schema=schema) for table_name in inspector.get_table_names(schema=schema)} for schema in schemas]\n for i in main[0]:\n print(i)\n display(pd.read_sql_table(i, session.bind))\n print(\"\\n\\n\")", "def showent(self):\n for e in self.entities:\n e.show()", "def __print_annotations__(self):\n select = \"SELECT * from classifications where project_id=\"+str(self.project_id)\n cur = self.postgres_session.cursor()\n cur.execute(select)\n\n for record in cur.fetchall():\n print record", "def info( self ) :\n\n print \"yi = %2d symbol = %s name = %s\" % ( self.yi, self.yiTags[2], self.yiTags[3] )\n print \"Database directory =\", self.database\n print \"Working directory =\", self.workDir\n print \"delWorkDirWhenDone =\", self.delWorkDirWhenDone", "def _show_contents(self):\n for database_name in self._mk.connection.database_names():\n print 'DATABASE: %s' % database_name\n db = self._mk.connection[database_name]\n for collection_name in db.collection_names():\n print 'COLLECTION: %s' % collection_name\n collection = db[collection_name]\n count = collection.count()\n if count == 1:\n print '%d document' % collection.count()\n else:\n print '%d documents' % collection.count()", "def main():\n cur, conn = setUpDatabase('music.db')\n set_up_tables(cur, conn)\n set_up_artist_id_table(cur, conn)\n fillup_hot_100_table(cur, conn)\n\n write_data_to_file(\"music_data.txt\", cur, conn)\n conn.close()", "def info():\n session = Session()\n\n total = session.query(Song).count()\n in_process = session.query(Song).filter(Song.file_location == '*').count()\n to_download = session.query(Song).filter(Song.file_location == None).count()\n print('{:6} total songs.'.format(total))\n print('{:6} in process.'.format(in_process))\n print('{:6} to download.'.format(to_download))", "def show(self):\n for t in self.get_tables_names():\n print t\n for c in self.get_table_column_names(t):\n print \" |_ {0}\".format(c)", "def view_student_t():\n\n conn = sqlite3.connect(DBNAME)\n c = conn.cursor()\n\n for e in c.execute('SELECT * FROM {}'.format(STUDENTTNAME)):\n yield e\n\n conn.commit()\n conn.close()", "def show_entries():\n db = get_db()\n cur = db.execute(\"Select * from entries\")\n entries = cur.fetchall()\n return render_template(\"show_entries.html\", entries=entries)", "def _show_contents_lengthy(self):\n for database_name in self._mk.connection.database_names():\n print 'DATABASE: %s\\n' % database_name\n db = self._mk.connection[database_name]\n for collection_name in db.collection_names():\n print 'COLLECTION: %s' % collection_name\n collection = db[collection_name]\n count = collection.count()\n curs = collection.find()\n for row in curs:\n for key in row:\n print key, ':', row[key]\n print '\\n'", "def inspect_database(detailed=False):\n from glob import glob\n\n for f in glob(config.paths.database + '*'):\n print(f'{f}')", "def show_all_vendors():\n\n with StoreDatabase() as db:\n rows = db.query('SELECT * FROM vendor')\n print()\n print(f\"{'vend_code': <10} {'vend_addr':^20} {'vend_name':^20}\")\n print('========================================================')\n for row in rows:\n print(f'{row[0]: <10} {str(row[1]):^20} {row[2]:^20}')", "def show(self, argv):\n if len(argv) < 2:\n self._print(\" Name:\", self._obj.sysName)\n self._print(\" ObjectID:\", get_oidname(self._obj.sysObjectID))\n self._print(\" Location:\", self._obj.sysLocation)\n self._print(\" Contact:\", self._obj.sysContact)\n self._print(\"Description:\", self._obj.sysDescr)\n else:\n item = argv[1]\n if item.startswith(\"scal\"):\n self._ui.print_list(self._obj.get_scalar_names())\n elif item.startswith(\"tab\"):\n self._ui.print_list(self._obj.get_table_names())\n elif item.startswith(\"not\"):\n self._ui.print_list(self._obj.get_notification_names())\n elif item.startswith(\"sysor\"):\n for sysor_entry in self._obj.getall(\"sysOR\"):\n self._print(sysor_entry.sysORDescr)\n try:\n self._print(get_oidname(sysor_entry.sysORID))\n except AttributeError:\n pass # XXX I think this is a bug workaround\n self._print(\"\\n\")\n elif item.startswith(\"int\"):\n tbl = self._obj.get_interface_table()\n self._print(tbl)\n elif item.startswith(\"all\"):\n tbl = self._obj.get_table(argv[2])\n self._print(tbl)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Group a list into batches of size batch_size. >>> tuple(batch([1, 2, 3, 4, 5], batch_size=2)) ((1, 2), (3, 4), (5))
def batch(list_to_batch, batch_size=BATCH_SIZE): for i in range(0, len(list_to_batch), batch_size): yield tuple(list_to_batch[i:i + batch_size])
[ "def do_batches(alist: List, batch_size: int) -> List:\n for i in range(0, len(alist), batch_size):\n if i + batch_size <= len(alist):\n yield alist[i:i + batch_size]\n else:\n yield alist[i:len(alist)]", "def batcher(iterator, batchsize):\n it = iter(iterator)\n while True:\n batch = tuple(itertools.islice(it, batchsize))\n if not batch:\n return\n yield batch", "def divide_into_batches(batch_count, data_batches, outputs_batches, data, outputs):\n l = len(data);\n batch_size = int(l / batch_count);\n for i in range(0, batch_count-1):\n data_batches.append(data[i * batch_size : (i + 1) * batch_size]);\n outputs_batches.append(outputs[i * batch_size : (i + 1) * batch_size]);\n\n data_batches.append(data[(batch_count - 1) * batch_size : l]);\n outputs_batches.append(outputs[(batch_count - 1) * batch_size : l]);", "def split_batches(data, batch_size):\n batch = []\n for row in data:\n batch.append(row)\n if len(batch) == batch_size:\n yield batch\n batch.clear()\n if len(batch) > 0:\n yield batch", "def __divide_batches(data, batch_size):\n data = np.stack(data, axis=0)\n return [data[i:i + batch_size] for i in range(0, int(data.shape[0]), batch_size - 1)]", "def prepare_list(self, list_images, size = 1000):\n batch = []\n list_batches = []\n counter = 0\n for item, img in enumerate(list_images):\n batch.append(img)\n counter += 1\n if counter >= size:\n counter = 0\n list_batches.append(batch)\n batch = []\n elif item == len(list_images) - 1:\n list_batches.append(batch)\n return list_batches", "def batch_iter(data, batch_size, batch_size_fn=None, batch_size_multiple=1):\n if batch_size_fn is None:\n def batch_size_fn(new, count, sofar):\n return count\n minibatch, size_so_far = [], 0\n for ex in data:\n minibatch.append(ex)\n size_so_far = batch_size_fn(ex, len(minibatch), size_so_far)\n if size_so_far >= batch_size:\n overflowed = 0\n if size_so_far > batch_size:\n overflowed += 1\n if batch_size_multiple > 1:\n overflowed += (\n (len(minibatch) - overflowed) % batch_size_multiple)\n if overflowed == 0:\n yield minibatch\n minibatch, size_so_far = [], 0\n else:\n if overflowed == len(minibatch):\n logger.warning(\n \"The batch will be filled until we reach %d,\"\n \"its size may exceed %d tokens\"\n % (batch_size_multiple, batch_size)\n )\n else:\n yield minibatch[:-overflowed]\n minibatch = minibatch[-overflowed:]\n size_so_far = 0\n for i, ex in enumerate(minibatch):\n size_so_far = batch_size_fn(ex, i + 1, size_so_far)\n if minibatch:\n yield minibatch", "def gen_batches(words, batch_size, window_size=5):\n\n n_batches = len(words)//batch_size\n if len(words) % batch_size != 0:\n n_batches += 1\n\n for idx in range(0, len(words), batch_size):\n x, y = [], []\n batch = words[idx:idx+batch_size]\n for ii in range(len(batch)):\n batch_x = batch[ii]\n batch_y = get_target(batch, ii, window_size)\n y.extend(batch_y)\n x.extend([batch_x]*len(batch_y))\n yield np.array(x), np.array(y)", "def shuffle_and_batch(items: List[T], batch_size: int,\n rng: Optional[random.Random] = None) \\\n -> Iterator[List[T]]:\n\n todo = list(range(len(items)))\n if rng is not None:\n rng.shuffle(todo)\n while todo:\n indices = todo[:batch_size]\n todo = todo[batch_size:]\n items_batch = [items[i] for i in indices]\n yield items_batch", "def getBatch(ITERABLE, COUNT=10):\n LENGTH = len(ITERABLE)\n # yield the next batch\n for NEXT in range(0, LENGTH, COUNT):\n yield ITERABLE[NEXT:min(NEXT + COUNT, LENGTH)]", "def _chunks(lst: List[T], chunk_size: int) -> Generator[List[T], None, None]:\n return (lst[i:i + chunk_size] for i in range(0, len(lst), chunk_size))", "def as_batches(parallel_data, batch_size, sequence_length):\n positions = []\n for i, data in enumerate(parallel_data):\n positions.extend([(i, start_pos)\n for start_pos in range(len(data) - sequence_length)])\n np.random.shuffle(positions)\n\n for i in range(math.ceil(len(positions) / batch_size)):\n batch = [\n parallel_data[index][start: start + sequence_length]\n for index, start in positions[i * batch_size: (i + 1) * batch_size]]\n yield batch", "def getBatches(data, batchSize):\n\t\n\trandom.shuffle(data)\n\n\tbatches = []\n\tfor i in range(len(data) // batchSize):\n\t\tbatches.extend(getSmallSetofBatch(data[i*batchSize : (i+1)*batchSize], batchSize))\n\n\treturn batches", "def split_batch(self, batch_size):\n random.shuffle(list(self.imgs))\n splits = list()\n for i in range(0, len(self), batch_size):\n upper_bound = min(len(self), i + batch_size)\n splits.append(set(list(self.imgs)[i:upper_bound]))\n return splits", "def get_batch(batch_size, data):\n s_index = 0\n e_index = batch_size\n if isinstance(data, np.ndarray):\n while e_index < len(data):\n batch = data[s_index: e_index]\n temp = e_index\n e_index = e_index + batch_size\n s_index = temp\n yield batch\n elif (isinstance(data, tuple) or isinstance(data, list)) \\\n and isinstance(data[0], np.ndarray):\n while e_index < len(data[0]):\n batch = []\n for one in data:\n batch.append(one[s_index: e_index])\n temp = e_index\n e_index = e_index + batch_size\n s_index = temp\n yield batch\n else:\n print(\"check data type !!!\")\n sys.exit(1)", "def chunks(lst, chunk_size):\n for i in range(0, len(lst), chunk_size):\n yield lst[i:i + chunk_size]", "def prepare_batches(sequences, batch_size):\n n_sequences = len(sequences)\n for i in range(0, n_sequences, batch_size):\n batch = sequences[i:i+batch_size]\n\t#needs to be in sorted order for packing batches to work\n batch = sorted(batch, key = len, reverse=True)\n input_sequences, target_sequences = [], []\n\n for sequence in batch:\n input_sequences.append(sequence[:-1])\n target_sequences.append(sequence[1:])\n\n yield input_sequences, target_sequences", "def _split_into_groups(iterable, group_size):\n for g, group in itertools.groupby(\n enumerate(iterable),\n lambda items: items[0] // group_size\n ):\n yield [item for (i, item) in group]", "def batchify(x, batch_size=5, train_len=100):\n # Create extra dimension; as if making a list\n # of the `train_len`-long mini-sequences\n seq_ixs = np.arange(0, len(x), train_len)[:-1]\n batchified = np.stack([x[six:six + train_len] for six in seq_ixs])\n\n batch_ixs = np.arange(0, len(batchified), batch_size)[:-1]\n return [batchified[bix:batch_size] for bix in batch_ixs]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update All Running Statistics Table after raceday Parameter
def Update_Running_Stat(Dataset): for RARID, Race in Dataset.groupby('RARID'): Horse_ELO = [] HNAME_List = '('+str(Race['HNAME'].tolist())[1:-1]+')' JNAME_List = '('+str(Race['JNAME'].tolist())[1:-1]+')' SNAME_List = '('+str(Race['SNAME'].tolist())[1:-1]+')' Dist_ELO = 'HELO_'+str(Race.loc[:,'RADIS'].values[0]) Sur_ELO = 'HELO_' + Race.loc[:,'RATRA'].apply(lambda x : 'TURF' if x == 'T' else 'AW').values[0] GOG_ELO = 'HELO_' + Going_map[Race.loc[:,'RAGOG'].values[0].strip()] PFL_ELO = str(Race.loc[:,'RALOC'].values[0])+'_'+str(Race.loc[:,'RADIS'].values[0])+'_'\ +str(Race.loc[:,'RATRA'].values[0]) for Target in ['HELO',Dist_ELO,Sur_ELO,GOG_ELO,PFL_ELO]: Horse_ELO.append(Calculate_HELO(Target, Race, K = 128)) HELO_DF = reduce(lambda x, y: pd.merge(x, y, on = 'HNAME'), Horse_ELO) JELO_DF = Calculate_JELO(Race, K = 128) SELO_DF = Calculate_SELO(Race, K = 128) #Update HELO Score to Database HPrior_DF = Extraction_Database(""" Select * from RS_HORSE_ELO where HNAME in {HNAME_List} """.format(HNAME_List = HNAME_List)) HPrior_DF = HPrior_DF.loc[:,[i for i in HPrior_DF.columns if i not in HELO_DF.columns[1:]]] HELO_DF = HELO_DF.merge(HPrior_DF, how='left') General_Query_Database(""" DELETE FROM RS_HORSE_ELO where HNAME in {HNAME_List} """.format(HNAME_List = HNAME_List)) Load_Dataset_toDatabase('RS_HORSE_ELO',HELO_DF) #Update JELO Score to Database JPrior_DF = Extraction_Database(""" Select * from RS_JOCKEY_ELO where JNAME in {JNAME_List} """.format(JNAME_List = JNAME_List)) JPrior_DF = JPrior_DF.loc[:,[i for i in JPrior_DF.columns if i not in JELO_DF.columns[1:]]] JELO_DF = JELO_DF.merge(JPrior_DF, how='left') General_Query_Database(""" DELETE FROM RS_JOCKEY_ELO where JNAME in {JNAME_List} """.format(JNAME_List = JNAME_List)) Load_Dataset_toDatabase('RS_JOCKEY_ELO',JELO_DF) #Update SELO Score to Database SPrior_DF = Extraction_Database(""" Select * from RS_STABLE_ELO where SNAME in {SNAME_List} """.format(SNAME_List = SNAME_List)) SPrior_DF = SPrior_DF.loc[:,[i for i in SPrior_DF.columns if i not in SELO_DF.columns[1:]]] SELO_DF = SELO_DF.merge(SPrior_DF, how='left') General_Query_Database(""" DELETE FROM RS_STABLE_ELO where SNAME in {SNAME_List} """.format(SNAME_List = SNAME_List)) Load_Dataset_toDatabase('RS_STABLE_ELO',SELO_DF) return None
[ "def update(self, df: pd.DataFrame):\n for stat in self._statistics.values():\n stat.update(df)", "def update_all():\r\n \r\n # Delete everything in summary table\r\n q_string = \"\"\"\r\n\tTRUNCATE summary;\r\n \"\"\"\r\n try:\r\n cursor.execute(q_string)\r\n except:\r\n print(\"ERROR: Could not delete summary table data\")\r\n sys.exit()\r\n print(\"Summary table truncated.\")\r\n \r\n # Determine date range to use from query table in database\r\n q_string = \"\"\"\r\n SELECT max(qdate), min(qdate)\r\n FROM query;\r\n \"\"\"\r\n try:\r\n cursor.execute(q_string)\r\n result = cursor.fetchall()\r\n except:\r\n print(\"ERROR: Could not fetch dates from query table\")\r\n sys.exit()\r\n \r\n dates = pandas.date_range(start=result[0][1], end=result[0][0])\r\n date1 = datetime.datetime.strftime(result[0][0], '%Y-%m-%d')\r\n date2 = datetime.datetime.strftime(result[0][1], '%Y-%m-%d')\r\n \r\n # Pull in all new summary data using commit_summary() on a loop.\r\n print('Please be patient, populating summary table from {} to {}.'.format(date2, date1))\r\n \r\n for date in dates:\r\n try:\r\n pulldate = datetime.datetime.strftime(date, '%Y-%m-%d')\r\n commit_summary(pulldate)\r\n except:\r\n print(\"ERROR: Missing date {} from database.\".format(pulldate))\r\n pass\r\n \r\n print('Added summary table data for date range: {} to {}.'.format(date2, date1))\r\n \r\n # Update state and candidate table based on the most recent date from\r\n # query table in database.\r\n update_state_winner(date1)\r\n print('Updated state table with candidate winner using 7 day average.')\r\n update_candidate_delegates(date1)\r\n print('Updated average number of delegates per candidate over 7 days.')\r\n \r\n print('Complete.')", "def run_update(self, **kw):\n\n for task in self.preprocessed_task_list:\n qubit = [qb for qb in self.meas_objs if qb.name == task['qb']][0]\n T1 = self.analysis.proc_data_dict['analysis_params_dict'][\n qubit.name]['T1']\n qubit.set(f'T1{task[\"transition_name\"]}', T1)", "def _setDayStats(self, dayStatsDict, lastUpdate):\n\n _sod = dayStatsDict.timespan.start\n\n # Using the _connection as a context manager means that\n # in case of an error, all tables will get rolled back.\n with weedb.Transaction(self.connection) as _cursor:\n\n # For each stats type...\n for _stats_type in self.statsTypes:\n # ... get the stats tuple to be written to the database...\n _write_tuple = (_sod,) + dayStatsDict[_stats_type].getStatsTuple()\n # ... and an appropriate SQL command ...\n _sql_replace_str = _sql_replace_string_factory(self.schema, _stats_type)\n # ... and write to the database.\n _cursor.execute(_sql_replace_str,_write_tuple)\n \n # Set the unit system if it has not been set before. \n # To do this, first see if this file has ever been used:\n last_update = self._getLastUpdate(_cursor)\n if last_update is None:\n # File has never been used. Set the unit system:\n _cursor.execute(meta_replace_str, ('unit_system', str(int(dayStatsDict.unit_system))))\n else:\n # The file has been used. Make sure the new data uses\n # the same unit system as the database.\n unit_system = self._getStdUnitSystem(_cursor)\n if unit_system != dayStatsDict.unit_system:\n raise ValueError(\"stats: Data uses different unit system (0x%x) than stats file (0x%x)\" % (dayStatsDict.unit_system, unit_system))\n # Update the time of the last stats update:\n _cursor.execute(meta_replace_str, ('lastUpdate', str(int(lastUpdate))))", "def update_assessor():\n\n ct = pd.read_sql(\"select parid from combined_table\", engine)\n par = pd.read_sql(\"select parcelid from sca_parcels\", engine)\n missing_parid = ct[ct.parid.isin(par.parcelid) == False].parid.tolist()\n \n assessor = {'sca_asmt':['aprland','aprbldg', 'class', 'rtotapr'],\n 'sca_comintext':['extwall'],\n 'sca_dweldat':['rmbed', 'fixbath', 'sfla', 'extwall', 'yrblt'],\n 'sca_legdat':['subdiv'],\n 'sca_owndat':[['own1','own1'],\n ['ownadr','adrno'],\n ['owndir','adrdir'],\n ['ownstr','adrstr'],\n ['ownsuf','adrsuf'],\n ['cityname','cityname'],\n ['statecode','statecode'],\n ['ownzip','zip1']],\n 'sca_pardat': ['adrno', 'adradd', 'adrdir', 'adrstr', 'adrsuf',\n 'zip1', 'zoning'],\n 'sca_comdat': ['yrblt']}\n engine.execute((\"alter table combined_table \"\n \"drop column if exists geom;\"\n \"select addgeometrycolumn('combined_table', 'geom', \"\n \"2274, 'point', 2);\"\n \"update combined_table set geom = \"\n \"st_transform(st_setsrid(st_point(coord[1],coord[2]),\"\n \"4326), 2274);\"\n \"create index gix_combined_table on combined_table \"\n \"using gist (geom)\"))\n \n for tbl, cols in assessor.iteritems():\n #build strings to be used in set clause and column selection in subquery\n if tbl != 'sca_owndat':\n new_vals = ', '.join(\"{0} = {1}.{0}\".format(col, tbl) for col in cols)\n col_select = ', '.join(col for col in cols)\n else:\n new_vals = ', '.join(\"{0} = {1}.{2}\".format(col[0],\n tbl, col[1]) for col in cols)\n col_select = ', '.join(col[1] for col in cols)\n missing = \"', '\".join(par for par in missing_parid)\n update_vals = {\"new_vals\": new_vals,\n \"col_select\": col_select,\n \"table\": tbl,\n \"missing\": missing ,\n \"where_clause\": \n {\"existing_clause\": \"ct.parid = {}.parid\".format(\n tbl),\n \"missing_clause\": (\"ct.parid in ('{0}') and \"\n \"st_within(geom, {1}.wkb_geometry)\").format(\n missing, tbl)\n }}\n\n update = (\"update combined_table ct set load_date = current_date, \"\n \"{new_vals} from (select parid, wkb_geometry, {col_select} \"\n \"from {table}, sca_parcels where parcelid=parid) {table} \"\n \"where {where_clause}\")\n update_aggregate = (\"update combined_table ct \"\n \"set load_date = current_date, \"\n \"mdnyrblt = {table}.mdnyr, numbldgs = num \"\n \"from (select parid, count(parid) num, \"\n \"median(yrblt)::integer mdnyr, wkb_geometry \"\n \"from {table}, sca_parcels where \"\n \"parid = parcelid group by parid, wkb_geometry) \" \n \"{table} where {where_clause}\")\n #drop end of update string and add nested dictionary key to run each\n #where clause seperately\n engine.execute((update[:-1]+\"[existing_clause]}\").format(**update_vals))\n engine.execute((update[:-1]+\"[missing_clause]}\").format(**update_vals))\n if tbl == 'sca_comdat':\n engine.execute((update_aggregate[:-1]+\"[existing_clause]}\").format(\n **update_vals))\n engine.execute((update_aggregate[:-1]+\"[missing_clause]}\").format(\n **update_vals))\n\n \n engine.execute(\"alter table combined_table drop column geom\")", "def run_update(self, **kw):\n\n for task in self.preprocessed_task_list:\n qubit = [qb for qb in self.meas_objs if qb.name == task['qb']][0]\n pulse_par = self.analysis.proc_data_dict['analysis_params_dict'][\n qubit.name]['qscale']\n if self.analysis.pulse_par_name == 'motzoi':\n qubit.set(f'{task[\"transition_name_input\"]}_motzoi', pulse_par)\n else:\n qubit.set(f'{task[\"transition_name_input\"]}_env_mod_freq',\n pulse_par)", "def refresh_snapshots(self):\r\n dimension_procs = ['usp_RefreshStudentRaceSnapshot','usp_RefreshStudentGenderSnapshot','usp_RefreshStudentHispanicSnapshot']\r\n for proc in dimension_procs:\r\n self.load_data_from_staging(proc_name=proc)\r\n logging.info(\"Refreshed snapshot tables from staging.\")", "def run_update(self, **kw):\n\n for task in self.preprocessed_task_list:\n qubit = [qb for qb in self.meas_objs if qb.name == task['qb']][0]\n amp180 = self.analysis.proc_data_dict['analysis_params_dict'][\n qubit.name]['piPulse']\n qubit.set(f'{task[\"transition_name_input\"]}_amp180', amp180)\n qubit.set(f'{task[\"transition_name_input\"]}_amp90_scale', 0.5)", "def update_rank(ranks):\n\n query = \"UPDATE global_data SET \"\n for i in range(1, 30):\n query += \"rank_d\" + str(i) + \" = \" + \"rank_d\" + str(i+1)\n if i != 29: query += \", \"\n\n try:\n conn = sqlite3.connect(DB_PATH) \n cur = conn.cursor()\n cur.execute(query)\n cur.execute('UPDATE global_data SET rank_d30=NULL')\n\n for row in tqdm(ranks):\n cur.execute('UPDATE global_data SET rank_d30=? WHERE url=?', (row[1], row[0]))\n \n print(\"Successfully update ranks\")\n conn.commit()\n except sqlite3.Error as error:\n print(error)\n finally:\n if (conn): conn.close()", "def update_valuehistory(self):\n from storage.models import Activity\n from course.models import CourseGroup\n from stats.models import ValueHistory\n from datetime import datetime, timedelta, date\n from collections import Counter\n\n # Returns a range of dates between two provided dates\n def daterange(start_date, end_date):\n for n in range(int ((end_date - start_date).days)):\n yield start_date + timedelta(n)\n\n debug_out = open(\"../../../home/pepijn/update.log\", \"a\")\n def debug(msg):\n if debug_out is not None:\n debug_out.write(\"[%s] %s \\n\" % (datetime.now().isoformat(), msg))\n print(\"[%s] %s\" % (datetime.now().isoformat(), msg))\n\n today = datetime.combine(date.today(), datetime.min.time())\n # Retrieve all live course groups (ie episodes) and update each group separately\n course_groups = CourseGroup.objects.filter(end_date__gte=today)\n for course_group in course_groups:\n debug(\"Updating group: %s. Last updated: %s\" % (course_group.label, course_group.last_updated))\n\n # get members\n group_members = course_group.members.all().values_list('identification', flat=True)\n debug(\"Number of students: %i\" % len(group_members))\n\n # If updated for the first time initialize update date\n if course_group.last_updated == None:\n course_group.last_updated = datetime.combine(course_group.start_date-timedelta(days=1), datetime.min.time())\n course_group.save()\n # Get all course dates that have been passed up to today and create valuehistories\n for course_day in daterange(datetime.combine(course_group.last_updated + timedelta(days=1), datetime.min.time()), today + timedelta(days=1)):\n debug(\"Updating course day: %s\" % course_day)\n time_range = [course_day, course_day+timedelta(days=1)]\n # All activity from before the start date is allocated to first course date\n if course_day == datetime.combine(course_group.start_date, datetime.min.time()):\n time_range[0] -= timedelta(days=7)\n # Retrieve relevant activity instances for this variable.\n ignored_objects = IgnoredObject.objects.all().values_list(\n 'object_id', flat=True)\n activity_chunk = Activity.objects.exclude(\n activity__in=ignored_objects).filter(\n course=self.course.url,\n # course=\"http://studiegids.uva.nl/5082INKI6Y/\",\n # https://studiegids.uva.nl/5082INKI6Y/\n type=self.types.all()[0],\n verb=self.verbs.all()[0],\n time__range=time_range)\n if len(activity_chunk) == 0:\n debug(\"No activities found for this day (%s)\" % course_day)\n continue\n\n debug('Total activities: %i' % len(activity_chunk))\n annotated_value_history = []\n updated_students = []\n if len(activity_chunk) > 0:\n # First update the valhistory for all students that have new activity\n value_history, last_consumed = self.calculate_values_from_activities(\n activity_chunk)\n\n # If no activity was consumed, stop.\n if last_consumed is None:\n continue\n \n for value_history_item in value_history:\n group = course_group\n\n # Determine the attached student and create if not existent\n student_id = value_history_item.student\n student, _created = Student.objects.get_or_create(\n identification=student_id, defaults={\"label\": student_id})\n\n group.members.add(student)\n value_history_item.group = group\n # Set course timestamp relative to start\n if course_day == course_group.start_date: \n value_history_item.course_datetime = (timezone.make_aware((datetime.combine(course_day, datetime.min.time()))) -\n timezone.make_aware(\n datetime.combine(group.start_date,\n datetime.min.time())))\n else:\n value_history_item.course_datetime = (\n value_history_item.datetime -\n timezone.make_aware(\n datetime.combine(group.start_date,\n datetime.min.time())))\n \n\n annotated_value_history.append(value_history_item)\n updated_students.append(student_id)\n\n # Update the variable's last consumed activity info if first time ever or if new info in available\n latest_act = activity_chunk.latest('time')\n if (self.last_consumed_activity_timestamp == None and self.last_consumed_activity_pk == 0) or latest_act.time > self.last_consumed_activity_timestamp:\n self.last_consumed_activity_timestamp = latest_act.time\n self.last_consumed_activity_pk = latest_act.pk\n self.save()\n\n # Next update the val history for all students that did not have new activity\n # Value remains unchanged but will regardlessly be added to allow quick lookups\n remaining_students = [stud for stud in group_members if stud not in updated_students]\n for student_id in remaining_students:\n student = Student.objects.get(identification=student_id)\n personal_history = ValueHistory.objects.filter(student=student_id, variable=self)\n # if no valuehistory is present we skip the student. We are not making up data.. Adding zeros is risky..\n if len(personal_history) > 0:\n value_history_item = personal_history.latest('datetime')\n if value_history_item:\n value_history_item.pk = None\n actual_course_datetime = (timezone.make_aware((datetime.combine(course_day, datetime.min.time()))) -\n timezone.make_aware(\n datetime.combine(course_group.start_date,\n datetime.min.time())))\n value_history_item.group = course_group\n value_history_item.course_datetime = actual_course_datetime\n value_history_item.datetime = course_day\n annotated_value_history.append(value_history_item)\n group_members = course_group.members.all().values_list('identification', flat=True)\n # Update the database by adding the new ValueHistory instances\n ValueHistory.objects.bulk_create(annotated_value_history)\n annotated_value_history = []", "def process_travel_stats(overwrite=True):\n rates = {}\n hh_field = \"HH\"\n jobs_field = \"TotalJobs\"\n\n # Apply per cap/job rates to analysis years\n for year in YEARS:\n # Get SE DATA\n year_gdb = make_path(CLEANED, f\"PMT_{year}.gdb\")\n taz_table = make_path(year_gdb, \"EconDemog_TAZ\")\n out_table = make_path(year_gdb, \"TripStats_TAZ\")\n taz_df = table_to_df(taz_table, keep_fields=\"*\")\n\n # Get OD reference\n model_year = prep_conf.NET_BY_YEAR[year][1]\n if model_year not in rates:\n # Calculate per cap/per job vmt rates\n skim_csv = make_path(CLEANED, \"SERPM\", f\"SERPM_OD_{model_year}.csv\")\n taz_ref_csv = make_path(CLEANED, f\"PMT_{model_year}.gdb\", \"EconDemog_TAZ\")\n taz_ref = PMT.table_to_df(taz_ref_csv, keep_fields=\"*\")\n trips_field = \"TRIPS\"\n auto_time_field = prep_conf.SKIM_IMP_FIELD + \"_AU\"\n #tran_time_field = prep_conf.SKIM_IMP_FIELD + \"_TR\"\n dist_field = \"DIST\"\n rates_df = p_help.taz_travel_stats(\n od_table=skim_csv,\n o_field=prep_conf.SKIM_O_FIELD,\n d_field=prep_conf.SKIM_D_FIELD,\n veh_trips_field=trips_field,\n auto_time_field=auto_time_field,\n dist_field=dist_field,\n taz_df=taz_ref,\n taz_id_field=prep_conf.TAZ_COMMON_KEY,\n hh_field=hh_field,\n jobs_field=jobs_field,\n )\n rates[model_year] = rates_df\n\n # Multiply rates by TAZ activity\n rates_df = rates[model_year]\n taz_fields = [prep_conf.TAZ_COMMON_KEY, hh_field, jobs_field]\n loaded_df = rates_df.merge(\n taz_df[taz_fields], how=\"inner\", on=prep_conf.TAZ_COMMON_KEY\n )\n loaded_df[\"__activity__\"] = loaded_df[[hh_field, jobs_field]].sum(axis=1)\n loaded_df[\"VMT_FROM\"] = loaded_df.VMT_PER_ACT_FROM * loaded_df.__activity__\n loaded_df[\"VMT_TO\"] = loaded_df.VMT_PER_ACT_TO * loaded_df.__activity__\n loaded_df[\"VMT_ALL\"] = loaded_df[[\"VMT_FROM\", \"VMT_TO\"]].mean(axis=1)\n\n # Export results\n loaded_df = loaded_df.drop(columns=[hh_field, jobs_field, \"__activity__\"])\n df_to_table(df=loaded_df, out_table=out_table, overwrite=overwrite)", "def Update_All_Analysis():\r\n conn = connect_db()\r\n cur = conn.cursor()\r\n cur.execute('SELECT * FROM stock;')\r\n stocks = cur.fetchall()\r\n\r\n for stock in stocks:\r\n Add_Analysis(conn, cur, stock[0])\r\n cur.close()\r\n print('Update all analysis success')", "def update_table_live(n):\n df = pd.DataFrame(list(cass_session.execute('select * from all_predictions')))\n df['predicted_labels'] = df['prediction'].map(labels_dict)\n top_6 = df['Label'].value_counts()[0:6]\n top_6_labels = top_6.index.tolist()\n all_top_6_data = df[df['predicted_labels'].isin(top_6_labels)]\n correct_predictions = all_top_6_data[all_top_6_data['predicted_labels']==all_top_6_data['Label']]\n true_predictions_count = []\n for label in top_6_labels:\n true_predictions_count.append(correct_predictions['Label'].value_counts()[label])\n prediction_accuracy = [str(round(true_predictions_count[i]*100/top_6[i], 1))+'%' for i in range(6)]\n top_6_counts = top_6.tolist()\n prediction_accuracy.insert(0, 'Accuracy')\n top_6_counts.insert(0, 'Total')\n top_6_labels.insert(0, 'Traffic Class')\n df1 = pd.DataFrame([prediction_accuracy, top_6_counts], columns=top_6_labels)\n\n return dash_table.DataTable(\n id='table',\n columns=[{\"name\": i, \"id\": i} for i in df1.columns], #['Traffic Class', 'Prediction Accuracy', 'Total Count']],\n data=df1.to_dict(\"rows\"),\n style_header={'backgroundColor': 'rgb(30, 30, 30)',\n 'font-size': '180%',\n 'fontWeight': 'bold',\n 'textAlign': 'center',\n 'color': 'rgb(127, 219, 255)'},\n style_cell={\n 'backgroundColor': 'rgb(50, 50, 50)',\n 'color': 'white',\n 'textAlign': 'center',\n 'font-size': '150%'\n }\n )", "def refreshTables(self):\n pass", "def update_visit_table(self):\n pid = self.disp_model[\"pid\"]\n self.visit_table_data = self.sql.query.visit_by_pid(pid=pid)\n print(\"update_visit_table table data\\n\\t%s\" % self.visit_table_data)\n generic_fill_table(self.visit_table, self.visit_table_data)", "def update_database(fn):\n fn = \"../data/weekly_updates/\"+fn\n data = fwf.read_data(fn)\n df = fwf.split_read_combine(data)\n df_2 = filter_df(df,2)\n #search and replace filing number\n delete_log(df_2)\n dump_df(df)\n return", "def _update_helper(self, df_series, profile):\n self._update_column_base_properties(profile)", "def test_tub_update_df(tub):\n tub.update_df()\n assert len(tub.df) == 128", "def MonthlyUpdate (self):\n self.monitor.UpdateIOPlist(self.name,self.Attribute)\n self.monitor.UpdateMDlist(self.name,self.Attribute)\n self.monitor.UpdateIOPTargetlist(self.name,self.Attribute)\n self.monitor.UpdateVFCountdown(self.name,self.params)\n self.monitor.UpdateSideEffect(self.name,self.params)\n self.monitor.UpdateOverallStatus(self.name,self.medicalRecords)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Construct the rules for parallel processing to generate.
def construct_parallel_rules(self): for jobno, region in enumerate(self.get_regions()): params = dict(self.snakemake.params) params.setdefault("args", {}).update({"intervals": [region.human_readable()]}) output = { key: "job_out.{jobno}.d/out/tmp_{jobno}.{ext}".format(jobno=jobno, ext=ext) for key, ext in self.key_ext.items() } vals = { "input_": repr( { key: self._abs_path(getattr(self.snakemake.input, key)) for key in ("vcf", "tbi", "bam") } ), "jobno": jobno, "params": repr(params), "output": repr(output), "wrapper_prefix": "file://" + self.wrapper_base_dir, "inner_wrapper": self.inner_wrapper, "resources": repr(self.res_converter(self.job_resources).to_res_dict()), } yield textwrap.dedent( r""" rule chunk_{jobno}: input: **{input_}, output: touch("job_out.{jobno}.d/.done"), **{output} params: **{params} wrapper: '{wrapper_prefix}/snappy_wrappers/wrappers/{inner_wrapper}' cluster_config['chunk_{jobno}'] = {resources} """ ).format(**vals).lstrip()
[ "def construct_parallel_rules(self):\n for jobno, region in enumerate(self.get_regions()):\n params = dict(self.snakemake.params)\n params.setdefault(\"args\", {}).update({\"intervals\": [region.human_readable()]})\n output = {\n key: \"job_out.{jobno}.d/out/tmp_{jobno}.{ext}\".format(jobno=jobno, ext=ext)\n for key, ext in self.key_ext.items()\n }\n vals = {\n \"input_bam\": repr(self.snakemake.input.normal_bam),\n \"jobno\": jobno,\n \"params\": repr(params),\n \"output\": repr(output),\n \"wrapper_prefix\": \"file://\" + self.wrapper_base_dir,\n \"inner_wrapper\": self.inner_wrapper,\n \"resources\": repr(self.res_converter(self.job_resources).to_res_dict()),\n }\n yield textwrap.dedent(\n r\"\"\"\n rule chunk_{jobno}:\n input:\n {input_bam},\n output:\n touch(\"job_out.{jobno}.d/.done\"),\n **{output}\n params:\n **{params}\n wrapper: '{wrapper_prefix}/snappy_wrappers/wrappers/{inner_wrapper}'\n\n cluster_config['chunk_{jobno}'] = {resources}\n \"\"\"\n ).format(**vals).lstrip()", "def build_general_rules():\n rules_dict = {\n 'level 1a': processing_rules.build_rule('level 1a', ['level 0'],\n run_bottom_error, False),\n 'l1brsgen': processing_rules.build_rule('l1brsgen', ['l1'],\n run_l1brsgen, False),\n 'l2brsgen': processing_rules.build_rule('l2brsgen', ['l2gen'],\n run_l2brsgen, False),\n 'l1mapgen': processing_rules.build_rule('l1mapgen', ['l1'],\n run_l1mapgen, False),\n 'l2mapgen': processing_rules.build_rule('l2mapgen', ['l2gen'],\n run_l2mapgen, False),\n #'level 1b': processing_rules.build_rule('level 1b', ['level 1a','geo'],\n # run_l1b, False),\n 'level 1b': processing_rules.build_rule('level 1b', ['level 1a'],\n run_l1b, False),\n # 'l2gen': processing_rules.build_rule('l2gen', ['level 1b'], run_l2gen,\n # False),\n 'l2gen': processing_rules.build_rule('l2gen', ['level 1a'], run_l2gen,\n False),\n 'l2extract': processing_rules.build_rule('l2extract', ['l2gen'],\n run_l2extract, False),\n 'l2bin': processing_rules.build_rule('l2bin', ['l2gen'], run_l2bin,\n True),\n 'l3bin': processing_rules.build_rule('l3bin', ['l2bin'], run_l3bin,\n True),\n 'l3mapgen': processing_rules.build_rule('l3mapgen', ['l3bin'],\n run_l3mapgen, False),\n 'smigen': processing_rules.build_rule('smigen', ['l3bin'], run_smigen,\n False)\n }\n rules_order = ['level 1a', 'l1brsgen', 'l1mapgen', 'level 1b', 'l2gen',\n 'l2extract', 'l2brsgen', 'l2mapgen', 'l2bin', 'l3bin',\n 'l3mapgen', 'smigen']\n rules = processing_rules.RuleSet('General rules', rules_dict, rules_order)\n return rules", "def build_rules():\n rules = dict(general=build_general_rules(),\n goci=build_goci_rules(),\n meris=build_meris_rules(),\n modis=build_modis_rules(),\n seawifs=build_seawifs_rules(),\n viirs=build_viirs_rules())\n return rules", "def rule_generation(items, minconf):\n pass", "def create_parallel_2(self):\n\n # first obtain all parallel classes developing through Galois Field\n # but set of parallel classes are super set of real parallel classes at the moment\n self.create_parallel_1()\n\n real_classes = {} # i.e. 0 { 'a_': (1, 8, 15) , 'b0,1': (2, 3, 5), ... }\n remainder_triples = {} # i.e. a2 { 0: (3, 12, 16), 1: (4, 13, 17), ... }\n\n # split super set to get real parallel classes and remainder triples\n for class_key, class_blocks in self.classes.items():\n real_classes[class_key] = {}\n\n for block_key, block_triple in class_blocks.items():\n i = block_key[1]\n\n # 'a_i' blocks logic\n if block_key[0] == 'a':\n if i == '_' \\\n or self.t <= int(i) <= 2 * self.t - 1 \\\n or 3 * self.t <= int(i) <= 4 * self.t - 1 \\\n or 5 * self.t <= int(i) <= 6 * self.t - 1:\n real_classes[class_key][block_key] = block_triple\n else:\n remainder_triples.setdefault(block_key, {})[class_key] = block_triple\n\n # 'b_i' blocks logic\n elif block_key[0] == 'b':\n if 0 <= int(i) <= self.t - 1:\n real_classes[class_key][block_key] = block_triple\n else:\n remainder_triples.setdefault(block_key, {})[class_key] = block_triple\n\n self.classes = {}\n self.classes.update(real_classes)\n self.classes.update(remainder_triples)", "def buildCellRules(self, seed=None):\n\n random.seed(seed)\n self._rsCellRules()\n self._fsiCellRules()\n self._strD1CellRules()\n self._strD2CellRules()\n self._thCellRules()\n self._gpiCellRules()\n self._gpeCellRules()\n self._stnCellRules()", "def generate (self, method='crest', nprocs_per_job=1, nprocs=1) :\n Conformers.generate(self, method, nprocs_per_job, nprocs)", "def construct_rule(perm_prop,\n perm_bound,\n dag,\n max_rule_size,\n max_nonempty,\n max_rules,\n\n ignore_first=1,\n allow_overlap_in_first=True):\n\n\n main_perms = []\n for perm in Permutations(perm_bound):\n if perm_prop(perm):\n main_perms.append(tuple(perm))\n\n # pick the main permutation to work with, currently just chooses one of the\n # largest ones randomly\n # TODO: be more smart about picking the permutations to learn from (or use all of them)\n random.shuffle(main_perms)\n main_perms = main_perms[:50]\n # main_perm = [ Permutation([1,2,3,4,5,6]) ]\n\n rules = RuleSet(perm_prop, perm_bound)\n tried_rules = set()\n for n in range(1, max_rule_size[0] + 1):\n for m in range(1, max_rule_size[1] + 1):\n for xsep in choose(perm_bound - 1, n - 1):\n for ysep in choose(perm_bound - 1, m - 1):\n for main_perm in main_perms:\n\n arr = [ [ [] for j in range(m) ] for i in range(n) ]\n\n nonempty_cnt = 0\n ok = True\n for i in range(n):\n for j in range(m):\n for k in range(0 if j == 0 else ysep[j-1] + 1, (perm_bound - 1 if j == m - 1 else ysep[j]) + 1):\n if (0 if i == 0 else xsep[i-1] + 1) <= perm_bound - main_perm[k] <= (perm_bound - 1 if i == n - 1 else xsep[i]):\n arr[i][j].append(main_perm[k])\n\n if arr[i][j]:\n nonempty_cnt += 1\n if nonempty_cnt > max_nonempty:\n ok = False\n break\n\n if not ok:\n break\n\n if not ok:\n continue\n\n\n nonempty = []\n for i in range(n):\n for j in range(m):\n if arr[i][j]:\n arr[i][j] = Permutation.to_standard(arr[i][j])\n cur = []\n # for inp_prop, inp in dag.elements:\n for inp in dag.elements:\n if inp is None:\n continue\n\n if inp.contains(arr[i][j]):\n cur.append((i, j, inp))\n\n nonempty.append(cur)\n\n\n\n for poss in product(*nonempty):\n rule = GeneratingRule({ (i,j): inp for i, j, inp in poss })\n if rule in tried_rules:\n continue\n\n # print(rule)\n\n tried_rules.add(rule)\n rules.add_rule(rule)\n\n print('Found %d rules, %d of which are valid, %d of which are distinct' % (\n len(tried_rules),\n sum( len(v) for k, v in rules.rules.items() ),\n len(rules.rules),\n ))\n\n return rules.exact_cover(\n max_rules,\n ignore_first,\n allow_overlap_in_first,\n )", "def create_parallel_1(self):\n for i in range(self.q):\n self.create_blocks(i)\n self.classes[i] = self.blocks", "def rules(self, prob_function, args, labels = {}):\n\t\t# Create nodes for all positions between words\n\t\tnodes = [Node(i) for i in xrange(0, self.lengthS + 1)]\n\t\tspans = []\n\t\t\n\t\t# Construct the graph by creating the edges\n\t#\tprint 'finding spans'\n\t\tfor (i,j) in self.spans():\n\t\t\tnodes[i].link_to(nodes[j])\n\t\t\tspans.append((i,j))\n\t#\tprint 'finding rules'\n\t\tfor (i,j) in spans:\n\t\t\tfor path in nodes[i].paths_to(nodes[j]):\n\t\t\t\tif not path or len(path) == 2:\n\t\t\t\t\t# No rules possible, or path points to itself\n\t\t\t\t\tcontinue\n\t\t\t\t# set probability\n\t\t\t\trule = Rule((i, j), path, labels)\n\t\t\t\tprob = prob_function(rule,args)\n\t\t\t\tyield self.prune_production(rule, self.lex_dict)", "def build_rules(self):\n if self.compiler.unfold_plan is not None:\n plan = self.compiler.unfold_plan\n env = unfolding.plan_to_program(\n plan, self.context, self.datasource,\n self.relations, self.rules)\n else:\n env = {}\n for rule in self.rules:\n env_rule = env.get(rule.id, None)\n if env_rule is not None:\n for rec in env_rule:\n self.build_rule(rule, rec)\n else:\n self.build_rule(rule, {})\n z3c.register(self.context)\n logging.getLogger().debug(\"Compiled rules:\\n%s\", self.context)\n if self.compiler.project is not None:\n self.compiler.project.reconciliate(self.context)\n if cfg.CONF.smt2 is not None:\n with open(cfg.CONF.smt2, 'w') as fd:\n self.dump_primitive_tables(fd)\n primitives.dump_translations(fd)\n fd.write(str(self.context))", "def build_meris_rules():\n rules_dict = {\n 'level 1a': processing_rules.build_rule('level 1a', ['level 0'],\n run_bottom_error, False),\n 'l1brsgen': processing_rules.build_rule('l1brsgen', ['l1'],\n run_l1brsgen, False),\n 'l2brsgen': processing_rules.build_rule('l2brsgen', ['l2gen'],\n run_l2brsgen, False),\n 'l1mapgen': processing_rules.build_rule('l1mapgen', ['l1'],\n run_l1mapgen, False),\n 'l2mapgen': processing_rules.build_rule('l2mapgen', ['l2gen'],\n run_l2mapgen, False),\n #'level 1b': processing_rules.build_rule('level 1b', ['level 1a','geo'],\n # run_l1b, False),\n 'level 1b': processing_rules.build_rule('level 1b', ['level 1a'],\n run_l1b, False),\n # 'l2gen': processing_rules.build_rule('l2gen', ['level 1b'], run_l2gen,\n # False),\n 'l2gen': processing_rules.build_rule('l2gen', ['level 1b'], run_l2gen,\n False),\n 'l2extract': processing_rules.build_rule('l2extract', ['l2gen'],\n run_l2extract, False),\n 'l2bin': processing_rules.build_rule('l2bin', ['l2gen'], run_l2bin,\n True),\n 'l3bin': processing_rules.build_rule('l3bin', ['l2bin'], run_l3bin,\n True),\n 'l3mapgen': processing_rules.build_rule('l3mapgen', ['l3bin'],\n run_l3mapgen, False),\n 'smigen': processing_rules.build_rule('smigen', ['l3bin'], run_smigen,\n False)\n }\n rules_order = ['level 1a', 'l1brsgen', 'l1mapgen', 'level 1b', 'l2gen',\n 'l2extract', 'l2brsgen', 'l2mapgen', 'l2bin', 'l3bin',\n 'l3mapgen', 'smigen']\n rules = processing_rules.RuleSet('MERIS rules', rules_dict, rules_order)\n return rules", "def build_modis_rules():\n rules_dict = {\n 'level 0': processing_rules.build_rule('level 0', ['nothing lower'],\n run_bottom_error, False),\n 'level 1a': processing_rules.build_rule('level 1a', ['level 0'],\n run_modis_l1a, False),\n 'l1brsgen': processing_rules.build_rule('l1brsgen', ['l1'],\n run_l1brsgen, False),\n 'l1mapgen': processing_rules.build_rule('l1mapgen', ['l1'],\n run_l1mapgen, False),\n 'geo': processing_rules.build_rule('geo', ['level 1a'], run_modis_geo,\n False),\n 'l1aextract_modis': processing_rules.build_rule('l1aextract_modis',\n ['level 1a', 'geo'],\n run_l1aextract_modis,\n False),\n 'level 1b': processing_rules.build_rule('level 1b',\n ['level 1a', 'geo'],\n run_modis_l1b, False),\n 'l2gen': processing_rules.build_rule('l2gen', ['level 1b', 'geo'],\n run_l2gen, False),\n 'l2extract': processing_rules.build_rule('l2extract', ['l2gen'],\n run_l2extract, False),\n 'l2brsgen': processing_rules.build_rule('l2brsgen', ['l2gen'],\n run_l2brsgen, False),\n 'l2mapgen': processing_rules.build_rule('l2mapgen', ['l2gen'],\n run_l2mapgen, False),\n 'l2bin': processing_rules.build_rule('l2bin', ['l2gen'], run_l2bin,\n True),\n 'l3bin': processing_rules.build_rule('l3bin', ['l2bin'], run_l3bin,\n True),\n 'l3mapgen': processing_rules.build_rule('l3mapgen', ['l3bin'],\n run_l3mapgen, False, False),\n 'smigen': processing_rules.build_rule('smigen', ['l3bin'], run_smigen,\n False)\n }\n rules_order = ['level 0', 'level 1a', 'l1brsgen', 'l1mapgen', 'geo',\n 'l1aextract_modis', 'level 1b', 'l2gen', 'l2extract',\n 'l2bin', 'l2brsgen', 'l2mapgen', 'l3bin', 'l3mapgen',\n 'smigen']\n rules = processing_rules.RuleSet(\"MODIS Rules\", rules_dict, rules_order)\n return rules", "def rules(self, create, extracted, **kwargs):\n if not create:\n return\n if extracted:\n for rule in extracted:\n self.rules.add(rule)", "def generate(rule, steps):\n # START OF YOUR CODE\n pass\n\n # END OF YOUR CODE", "def preprocessing():", "def precomputation(rulelist, enable_regex=False):\n\n # 1. get rule countability\n if RUNTIME_CONFIG['debug'] == True:\n print(\"Getting Dependencies and Countability\\n\")\n rulelist = get_dependencies_for_rules(rulelist)\n\n # special count for D1 Q\n rulelist = get_special_countability(rulelist)\n\n # 2. get feasibility\n rulelist = get_is_feasible(rulelist, enable_regex)\n\n # 3. get special feasibility / special_checking(HC)\n rulelist = get_special_invertibility(rulelist, enable_regex)\n\n #for r in rulelist:\n #print(\"{}\\n{}\".format(r.raw, r.feasibility))\n\n # 4. pipe data (with memorization)\n if RUNTIME_CONFIG['debug'] == True:\n print(\"Calling JtR/HC to Enumerate Uninvertible Rules\\n\")\n\n stime = perf_counter()\n if has_generated_data() == False:\n if RUNTIME_CONFIG['debug'] == True:\n print(\"Start Calling JtR/HC To Generate Data:\\n\")\n\n for i, r in enumerate(rulelist):\n if r.feasibility.is_invertible() and r.feasibility.is_countable(\n ): # If Both, Continue\n continue\n # only get a guess number\n elif r.feasibility.is_invertible() and r.feasibility.is_countable(\n ) == False: # Only invertible, get count only\n forward_a_rule_to_an_address_count_only(\n RUNTIME_CONFIG['wordlist_path']['name'], r,\n \"{}/count/rule{}.txt\".format(\n RUNTIME_CONFIG['preprocess_path'],\n i), RUNTIME_CONFIG['wordlist_path']['prefix'])\n # pipe both guesses and number\n else:\n forward_a_rule_to_an_address_and_forward_count(\n RUNTIME_CONFIG['wordlist_path']['name'], r,\n RUNTIME_CONFIG['preprocess_path'], i,\n RUNTIME_CONFIG['wordlist_path']['prefix'])\n\n store_generated_data_hash()\n\n else:\n if RUNTIME_CONFIG['debug'] == True:\n print(\"Already Has Data, Skipping Enumeration\\n\")\n\n data_generation_time = perf_counter() - stime\n if RUNTIME_CONFIG['debug'] == True:\n print(\"Data Generation (Enumeration) Time: {}\\n\".format(\n data_generation_time))\n\n return rulelist", "def _pre_processing(self, material_probs, convertion_rules):\n # To count items and stages.\n additional_items = {'30135': u'D32钢', '30125': u'双极纳米片', '30115': u'聚合剂'}\n exp_unit = 200*30.0/7400\n gold_unit = 0.004\n exp_worths = {'2001':exp_unit, '2002':exp_unit*2, '2003':exp_unit*5, '2004':exp_unit*10}\n gold_worths = {'3003':gold_unit*500}\n\n item_dct = {}\n stage_dct = {}\n for dct in material_probs['matrix']:\n item_dct[dct['item']['itemId']]=dct['item']['name']\n stage_dct[dct['stage']['code']]=dct['stage']['code']\n item_dct.update(additional_items)\n \n # To construct mapping from id to item names.\n item_array = []\n item_id_array = []\n for k,v in item_dct.items():\n try:\n float(k)\n item_array.append(v)\n item_id_array.append(k)\n except:\n pass\n self.item_array = np.array(item_array)\n self.item_id_array = np.array(item_id_array)\n self.item_dct_rv = {v:k for k,v in enumerate(item_array)}\n\n # To construct mapping from stage id to stage names and vice versa.\n stage_array = []\n for k,v in stage_dct.items():\n stage_array.append(v)\n self.stage_array = np.array(stage_array)\n self.stage_dct_rv = {v:k for k,v in enumerate(self.stage_array)}\n \n # To format dropping records into sparse probability matrix\n probs_matrix = np.zeros([len(stage_array), len(item_array)])\n cost_lst = np.zeros(len(stage_array))\n cost_exp_offset = np.zeros(len(stage_array))\n cost_gold_offset = np.zeros(len(stage_array))\n for dct in material_probs['matrix']:\n try:\n float(dct['item']['itemId'])\n probs_matrix[self.stage_dct_rv[dct['stage']['code']], self.item_dct_rv[dct['item']['name']]] = dct['quantity']/float(dct['times'])\n if cost_lst[self.stage_dct_rv[dct['stage']['code']]] == 0:\n cost_gold_offset[self.stage_dct_rv[dct['stage']['code']]] = - dct['stage']['apCost']*(12*gold_unit)\n cost_lst[self.stage_dct_rv[dct['stage']['code']]] = dct['stage']['apCost']\n except:\n pass\n\n try:\n cost_exp_offset[self.stage_dct_rv[dct['stage']['code']]] -= exp_worths[dct['item']['itemId']]*dct['quantity']/float(dct['times'])\n except:\n pass\n\n try:\n cost_gold_offset[self.stage_dct_rv[dct['stage']['code']]] -= gold_worths[dct['item']['itemId']]*dct['quantity']/float(dct['times'])\n except:\n pass\n\n # Hardcoding: extra gold farmed.\n cost_gold_offset[self.stage_dct_rv['S4-6']] -= 3228 * gold_unit\n cost_gold_offset[self.stage_dct_rv['S5-2']] -= 2484 * gold_unit\n\n # To build equavalence relationship from convert_rule_dct.\n self.convertions_dct = {}\n convertion_matrix = []\n convertion_outc_matrix = []\n convertion_cost_lst = []\n for rule in convertion_rules:\n convertion = np.zeros(len(self.item_array))\n convertion[self.item_dct_rv[rule['name']]] = 1\n\n comp_dct = {comp['name']:comp['count'] for comp in rule['costs']}\n self.convertions_dct[rule['name']] = comp_dct\n for iname in comp_dct:\n convertion[self.item_dct_rv[iname]] -= comp_dct[iname]\n convertion_matrix.append(copy.deepcopy(convertion))\n\n outc_dct = {outc['name']:outc['count'] for outc in rule['extraOutcome']}\n outc_wgh = {outc['name']:outc['weight'] for outc in rule['extraOutcome']}\n weight_sum = float(sum(outc_wgh.values()))\n for iname in outc_dct:\n convertion[self.item_dct_rv[iname]] += outc_dct[iname]*0.175*outc_wgh[iname]/weight_sum\n convertion_outc_matrix.append(convertion)\n \n convertion_cost_lst.append(rule['goldCost']*0.004)\n\n convertions_group = (np.array(convertion_matrix), np.array(convertion_outc_matrix), np.array(convertion_cost_lst))\n farms_group = (probs_matrix, cost_lst, cost_exp_offset, cost_gold_offset)\n \n return convertions_group, farms_group", "def build_goci_rules():\n rules_dict = {\n 'level 1a': processing_rules.build_rule('level 1a', ['level 0'],\n run_bottom_error, False),\n 'l1brsgen': processing_rules.build_rule('l1brsgen', ['l1'],\n run_l1brsgen, False),\n 'l2brsgen': processing_rules.build_rule('l2brsgen', ['l2gen'],\n run_l2brsgen, False),\n 'l1mapgen': processing_rules.build_rule('l1mapgen', ['l1'],\n run_l1mapgen, False),\n 'l2mapgen': processing_rules.build_rule('l2mapgen', ['l2gen'],\n run_l2mapgen, False),\n #'level 1b': processing_rules.build_rule('level 1b', ['level 1a','geo'],\n # run_l1b, False),\n 'level 1b': processing_rules.build_rule('level 1b', ['level 1a'],\n run_l1b, False),\n # 'l2gen': processing_rules.build_rule('l2gen', ['level 1b'], run_l2gen,\n # False),\n 'l2gen': processing_rules.build_rule('l2gen', ['level 1b'], run_l2gen,\n False),\n 'l2extract': processing_rules.build_rule('l2extract', ['l2gen'],\n run_l2extract, False),\n 'l2bin': processing_rules.build_rule('l2bin', ['l2gen'], run_l2bin,\n True),\n 'l3bin': processing_rules.build_rule('l3bin', ['l2bin'], run_l3bin,\n True),\n 'l3mapgen': processing_rules.build_rule('l3mapgen', ['l3bin'],\n run_l3mapgen, False),\n 'smigen': processing_rules.build_rule('smigen', ['l3bin'], run_smigen,\n False)\n }\n rules_order = ['level 1a', 'l1brsgen', 'l1mapgen', 'level 1b', 'l2gen',\n 'l2extract', 'l2brsgen', 'l2mapgen', 'l2bin', 'l3bin',\n 'l3mapgen', 'smigen']\n rules = processing_rules.RuleSet('GOCI rules', rules_dict, rules_order)\n return rules" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates 'X of Y' style song names. May implement adjectives. If one adjective is chosen, another is highly likely.
def XofYGenerator(): adjective1 = "" adjective2 = "" noun1 = random.choice(nouns) noun2 = random.choice(nouns) # decide if it's going to be adjective-y if random.random() >= 0.625: if random.random() >= 0.5: adjective1 = random.choice(adjectives) if random.random() >= 0.25: adjective2 = random.choice(adjectives) else: adjective2 = random.choice(adjectives) if random.random() >= 0.25: adjective1 = random.choice(adjectives) return ' '.join([adjective1, noun1, 'of', adjective2, noun2])
[ "def generateSongName(style=None):\n\n if style:\n generator = generators[style]\n else:\n generator = random.choice(generators.values())\n\n song_title = generator()\n return string.capwords(song_title)", "def generate_lexicon(with_tone=False, with_erhua=False):\n syllables = OrderedDict()\n\n for C in [''] + INITIALS:\n for V in FINALS:\n for R in [''] if not with_erhua else ['', 'r']:\n for T in [''] if not with_tone else ['1', '2', '3', '4', '5']:\n result = rule(C, V, R, T)\n if result:\n syllables[result] = f'{C} {V}{R}{T}'\n return syllables", "def _generate_names(self):\n\n return [f\"Player {n + 1}\" for n in range(self._nplayers)]", "def gen_ambush_text(encounter):\n ls = [(x.name, x.nameplural) for x in encounter]\n ls.sort()\n ls2 = []\n num = 0\n name = \"\"\n for value in ls:\n if value[0] == name:\n num += 1\n else:\n if num == 1:\n ls2.append(\"a \" + FMT_ENEMY.format(value[0]))\n elif num > 1:\n ls2.append(\"{} {}\".format(num, FMT_ENEMY.format(value[1])))\n name = value[0]\n num = 1\n if num == 1:\n ls2.append(\"a \" + FMT_ENEMY.format(value[0]))\n elif num > 1:\n ls2.append(\"{} {}\".format(num, FMT_ENEMY.format(value[1])))\n return join_list_pretty(ls2)", "def create_soundex_representation(lyrics):\n\n\tsoundex_repr = ''\n\tfor word in lyrics.split():\n\t\ttry:\n\t\t\tsoundex_repr += phonetics.soundex(word) + ' '\n\t\texcept:\n\t\t\tword = re.sub(\"'\",\"\",word)\n\t\t\twords = re.sub(\"\\-\", \" \",word) # convert words a four-door to four door\n\t\t\tfor word in words.split():\n\t\t\t\ttry:\n\t\t\t\t\tsoundex_repr += phonetics.soundex(word) + ' '\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\treturn soundex_repr.rstrip()", "def buildSentence():\n\tsentenceThree = []\n\tsentenceFour = []\n\tfor i in range(3): #build sentence of length 3\n\t\tx = random.randint(0,5)\n\t\tif x == 0:\n\t\t\tsentenceThree.append(personWords[random.randint(0, len(personWords)-1)])\n\t\telif x == 1:\n\t\t\tsentenceThree.append(actionWords[random.randint(0, len(actionWords)-1)])\n\t\telif x == 2:\n\t\t\tsentenceThree.append(positionWords[random.randint(0, len(positionWords)-1)])\n\t\telif x == 3:\n\t\t\tsentenceThree.append(pronounWords[random.randint(0, len(pronounWords)-1)])\n\t\telif x == 4:\n\t\t\tsentenceThree.append(thingWords[random.randint(0, len(thingWords)-1)])\n\t\telse:\n\t\t\tsentenceThree.append(valueWords[random.randint(0, len(valueWords)-1)])\n\tfor i in range(4): #build sentence of length 4\n\t\tx = random.randint(0,5)\n\t\tif x == 0:\n\t\t\tsentenceFour.append(personWords[random.randint(0, len(personWords)-1)])\n\t\telif x == 1:\n\t\t\tsentenceFour.append(actionWords[random.randint(0, len(actionWords)-1)])\n\t\telif x == 2:\n\t\t\tsentenceFour.append(positionWords[random.randint(0, len(positionWords)-1)])\n\t\telif x == 3:\n\t\t\tsentenceFour.append(pronounWords[random.randint(0, len(pronounWords)-1)])\n\t\telif x == 4:\n\t\t\tsentenceFour.append(thingWords[random.randint(0, len(thingWords)-1)])\n\t\telse:\n\t\t\tsentenceFour.append(valueWords[random.randint(0, len(valueWords)-1)])\n\tif random.randint(0,1) == 0:\n\t\treturn \" \".join(sentenceThree)\n\telse:\n\t\treturn \" \".join(sentenceFour)", "def make_word(phonology, num_syllables, distro='zipf')->str:\n\n # Construct the word\n word = ''\n for i in range(num_syllables):\n word += make_syllable(phonology)\n return word", "def test_shorten_name():\n names = [(\"Parahippocampal Gyrus, anterior division\",\n \"Parahip G, ant\",\n \"ctx\"),\n (\"Middle Frontal Gyrus\", \"MFG\", \"ctx\"),\n (\"Right Hippocampus\", \"R Hippocampus\", \"sub\")]\n\n for orig, new, atlas in names:\n yield assert_equal, new, locator.shorten_name(orig, atlas)", "def get_jokes(number, flag=1):\n\n nouns = [\"автомобиль\", \"лес\", \"огонь\", \"город\", \"дом\"]\n adverbs = [\"сегодня\", \"вчера\", \"завтра\", \"позавчера\", \"ночью\"]\n adjectives = [\"веселый\", \"яркий\", \"зеленый\", \"утопичный\", \"мягкий\"]\n\n size = len(nouns)\n if number > size and not flag:\n return print(f\"Impossible to create phrases with non-repeated words. \"\n f\"Please, chose number less than {size + 1}.\")\n\n for i in range(number):\n idx_noun, idx_adv, idx_adj = r.randrange(size)\n\n noun = nouns[idx_noun]\n adverb = adverbs[idx_adv]\n adjective = adjectives[idx_adj]\n\n print(f'{noun} {adverb} {adjective}')\n\n if not flag and size:\n # replace selected element with the last element of the list\n # reduce size by one\n nouns[idx_noun] = nouns[size - 1]\n adverbs[idx_adv] = adverbs[size - 1]\n adjectives[idx_adj] = adjectives[size - 1]\n size -= 1", "def makePoem():\n adjFile = r\"C:\\Users\\shockma\\Documents\\Special\\Python\\extract\\adj.txt\"\n advFile = r\"C:\\Users\\shockma\\Documents\\Special\\Python\\extract\\adv.txt\"\n nounFile = r\"C:\\Users\\shockma\\Documents\\Special\\Python\\extract\\noun.txt\"\n prepFile = r\"C:\\Users\\shockma\\Documents\\Special\\Python\\extract\\prepositions.txt\"\n verbFile = r\"C:\\Users\\shockma\\Documents\\Special\\Python\\extract\\verb.txt\"\n adj = getWord(adjFile, 3)\n adv = getWord(advFile, 1)\n noun = getWord(nounFile, 3)\n prep = getWord(prepFile, 2)\n verb = getWord(verbFile, 3)\n aan = aOrAn(adj)\n return aan[0] + ' ' + adj[0] \\\n + ' ' + noun[0] + '\\n\\n' \\\n + aan[0] + ' ' + adj[0] \\\n + ' ' + noun[0] \\\n + ' ' + verb[0] \\\n + ' ' + prep[0] \\\n + ' the ' + adj[1] \\\n + ' ' + noun[1] + '\\n' \\\n + adv[0] \\\n + ', the ' + noun[0] \\\n + ' ' + verb[1] + '\\n'\\\n + 'the ' + noun[1] \\\n + ' ' + verb[2] \\\n + ' ' + prep [1] \\\n + ' ' + aan[2].lower() + ' ' + adj[2] \\\n + ' ' + noun[2]", "def name_the_drink():\n # This allows me to apply the random function to create the drink name\n import random\n adjectives = ['Brave', 'Agreeable', 'Calm', 'Delightful',\n 'Eager', 'Faithful', 'Gentle', 'Happy', 'Jolly',\n 'Kind', 'Lively', 'Nice', 'Obedient', 'Proud',\n 'Relieved', 'Silly', 'Thankful', 'Victorious',\n 'Witty', 'Zealous', 'Angry', 'Bewildered', 'Clumsy',\n 'Defeated', 'Embarrassed', 'Fierce', 'Grumpy',\n 'Helpless', 'Itchy', 'Jealous', 'Lazy', 'Mysterious',\n 'Nervous', 'Obnoxious', 'Panicky', 'Repulsive',\n 'Scary', 'Thoughtless', 'Uptight', 'Worried'\n ]\n \n animals = ['Bali Cattle', 'Alpaca', 'Cat', 'Cattle', 'Chicken',\n 'Dog', 'Bactrian Camel', 'Canary', 'Dirty Camel', 'Duck',\n 'Goat', 'Goose', 'Guineafowl', 'Hedgehog', 'Pig', 'Pigeon',\n 'Rabbit', 'Silkmoth', 'Silver Fox', 'Turkey', 'Donkey',\n 'Fancy Mouse', 'Lab Rat', 'Ferret', 'Gayal', 'Goldfish'\n ]\n \n drink_name = \"Yaaarr, its name be {} {}! Drink up!\".format(str(random.choice(adjectives)),str(random.choice(animals)))\n print(\"\")\n print(drink_name)\n print(\"\")", "def get_name(num_syllables):\n name_list = get_data_file_as_lines(\"names.txt\")\n\n name = \"\"\n for syllable in range(1, num_syllables):\n number = randint(1, 100)\n syllable = name_list[number-1].strip()\n hyphen_chance = randint(1, 3)\n if syllable[-1:] is not \"'\" and hyphen_chance == 3:\n syllable += \"-\"\n if name[-1:] == \"-\" and syllable[:1] == \"'\":\n syllable = syllable.lstrip(\"'\")\n name += syllable\n\n return name.strip(\"-\").strip(\"'\").capitalize()", "def indefinite(noun):\r\n return f'an {noun}' if noun[0] in VOWELS else f'a {noun}'", "def generate(markovD, length, starterList):\n randTup = random.choice(starterList)\n for word in randTup:\n print(word, end=' ')\n for x in range(length):\n word = random.choice(markovD[randTup])\n print(word, end=' ')\n tempLst = []\n for x in range(1, len(randTup)):\n tempLst += [randTup[x]]\n tempLst += [word]\n\n randTup = tuple(tempLst)\n if word[-1] in [\".\", \"!\", \"?\"]:\n randTup = random.choice(starterList)\n\n print()", "def _generate_shortname(cls):\n return ''.join([cls.letters[random.randrange(0, cls.num_letters)] for idx in range(0, cls.SHORTNAME_LEN)])", "def generate_word(self) -> str:\n\n from random import choice, randint\n\n ret = choice(self.inits)\n count = randint(self.min, self.max)\n\n while len(ret) < count:\n try:\n arr = self.pairs[ret[-2:]]\n ret += choice(arr)[1:]\n except KeyError:\n ret = ret[:-2]\n\n if len(ret) < 2:\n ret = choice(self.inits)\n\n return ret.title()", "def generate_accessories(accessories, is_male):\n\n sentence = \"He\" if is_male else \"She\"\n sentence += \" is wearing\"\n\n def necktie_and_hat(attribute):\n \"\"\"\n Returns a grammatically correct sentence based on the attribute\n \"\"\"\n\n if attribute == \"necktie\" or attribute == \"hat\" or attribute == \"necklace\":\n return \"a \" + attribute\n return attribute\n\n if len(accessories) == 1:\n attribute = (\n accessories[0].lower()\n if accessories[0] == \"Eyeglasses\"\n else necktie_and_hat(accessories[0].lower().split(\"_\")[1])\n )\n return sentence + \" \" + attribute + \".\"\n\n for i, attribute in enumerate(accessories):\n attribute = (\n attribute.lower()\n if attribute == \"Eyeglasses\"\n else necktie_and_hat(attribute.lower().split(\"_\")[1])\n )\n\n if i == len(accessories) - 1:\n sentence = sentence[:-1]\n sentence += \" and \" + attribute + \".\"\n else:\n sentence += \" \" + attribute + \",\"\n\n return sentence", "def generate_slides(song: Song) -> str:\n return song.generate_slides()", "def humanize_names_list(names_list,\n possessive=POSSESSIVE,\n conjunction=CONJUNCTION):\n\n assert isinstance(names_list, (list, tuple))\n names = [possessive.format(name=name) for name in names_list[:]]\n if len(names) is 1:\n return names[0]\n elif len(names) > 1:\n return \"{0} {conjunction} {1}\".format(\", \".join(names[:-1]), names[-1],\n conjunction=conjunction)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates a Mahavishnu John band name. Optional style argument can be one of 'one word', 'x of y', or 'x y'
def generateSongName(style=None): if style: generator = generators[style] else: generator = random.choice(generators.values()) song_title = generator() return string.capwords(song_title)
[ "def makeChanName(bcateg, taucateg):\n chname = 'XX XX'\n\n ## b part \n if bcateg == 'bb':\n chname = 'bb'\n\n # space\n chname += ' '\n\n # tau part\n if taucateg == 'MuTau' or taucateg == 'mutau':\n chname += '#mu#tau_{h}'\n if taucateg == 'ETau' or taucateg == 'etau':\n chname += 'e#tau_{h}'\n if taucateg == 'TauTau' or taucateg == 'tautau':\n chname += '#tau_{h}#tau_{h}'\n\n return chname", "def note_name(p):\r\n chroma = ['A', 'A$^\\\\sharp$', 'B', 'C', 'C$^\\\\sharp$', 'D', 'D$^\\\\sharp$', 'E', 'F', 'F$^\\\\sharp$', 'G',\r\n 'G$^\\\\sharp$']\r\n name = chroma[(p - 69) % 12] + str(p // 12 - 1)\r\n return name", "def _generate_title(setup):\n groom_str = \" Groomed\" if \"groomed\" in setup.region['name'] else ''\n return '%s %s%s %s' % (setup.jet_algo, setup.region['label'], groom_str, setup.angle.name)", "def short_name_creator(long_name):\n name = long_name.replace(' ', '')\n\n if 'UT' in name:\n long_name = long_name.split('UT')[0].rstrip()\n name = name.split('UT')[0]\n\n if '2MASS' in name:\n # If the user already passed a 2MASS short name (i.e., 2MASS XXXX+YYYY), just return it.\n if len(name) == 14 or len(name) == 16 or len(name) == 17: #TODO: Fix this, stupid. \n short_name = long_name\n\n # Otherwise, create the short name.\n else:\n if '+' in name:\n short_name = '2MASS ' + \\\n name.split('J')[1].split('+')[0][0:4]+'+' + \\\n name.split('J')[1].split('+')[1][0:4]\n elif '-' in name:\n short_name = '2MASS ' + \\\n name.split('J')[1].split('-')[0][0:4]+'-' + \\\n name.split('J')[1].split('-')[1][0:4]\n # If a name other than a 2MASS identifier was passed, just return the long name.\n else:\n print('')\n print('WARNING: long_name does not match 2MASS Jxxxxxxxx+xxxxxxx format, returning long_name.')\n short_name = long_name\n\n return short_name", "def pkg_format_name(s, missing=False):\n return str(C(s, fore=('red' if missing else 'magenta'), style='bright'))", "def _kaloom_nw_name(prefix, network_id):\n return prefix + network_id", "def title(file, name) :\n\n with open(file, \"w\") as p :\n p.write(\"strict graph G1{\\n\")\n p.write(name)\n p.write(\"[shape=plaintext]\\n\")\n p.write(\"edge[len=6;edgesep=10];\\n\")\n p.write(\" overlap=false;\\n\")", "def get_name(num_syllables):\n name_list = get_data_file_as_lines(\"names.txt\")\n\n name = \"\"\n for syllable in range(1, num_syllables):\n number = randint(1, 100)\n syllable = name_list[number-1].strip()\n hyphen_chance = randint(1, 3)\n if syllable[-1:] is not \"'\" and hyphen_chance == 3:\n syllable += \"-\"\n if name[-1:] == \"-\" and syllable[:1] == \"'\":\n syllable = syllable.lstrip(\"'\")\n name += syllable\n\n return name.strip(\"-\").strip(\"'\").capitalize()", "def get_name(n, l, m):\n return '%d%s%d' % (n, OrbLet[l] if l < len(OrbLet) else '_%d_' % l, m)", "def gen_street_name(start, end) -> str:\n start = [int(char) for char in str(start)]\n end = [int(char) for char in str(end)]\n string = \"\"\n for i in start:\n string += f\"{Grid.alphabet[i]}\"\n string += \"-\"\n for j in end:\n string += f\"{Grid.alphabet[j]}\"\n return string", "def create_name(self, index, county, city ):\n\n\n\n # Get locality(town or county), and remove state abbreviation.\n if county and city:\n locality = county + '_' + city\n return locality + '_' + str(index)\n #print locality\n elif county:\n locality = county\n return locality + '_' + str(index)\n\n elif city:\n locality = city\n return locality + '_' + str(index)\n else:\n print 'Missing data at row ' + str(index) + '.'", "def make_sheet_name(month, year):\n return u\"Звонки_%s_%s\" % (MONTH_NAMES[month-1], str(year))", "def getZ_A_suffix_andZAFromName( name ) :\n\n if( name == 'n' ) : return( 0, 1, '', 1 )\n if( name == 'gamma' ) : return( 0, 0, '', 0 )\n if( name[:18] == 'FissionProductENDL' ) :\n ZA = int( name[18:] )\n Z = ZA / 1000\n A = 1000 * Z - ZA\n return( Z, A, '', ZA )\n if( '__' in name ) : raise Exception ( \"Name = %s\" % name )\n naturalSuffix = ''\n if( '_' in name ) : # Isotope names can have level designator (e.g., 'O16_e3') and naturals are of the form 'S_natural' or 'S_natural_l'\n s = name.split( '_' ) # where S is element's symbol and l is level designator (e.g., 'Xe_natural' or 'Xe_natural_c').\n sZA, suffix = s[:2]\n if( len( s ) > 2 ) :\n if( ( len( s ) > 3 ) or ( suffix != 'natural' ) ) : raise Exception( 'Invalid name for endl ZA particle = %s' % name )\n naturalSuffix = s[2]\n else :\n sZA = name\n suffix = ''\n for i, c in enumerate( sZA ) :\n if( c.isdigit( ) ) : break\n if( not c.isdigit( ) ) : i += 1\n sZ, sA = sZA[:i], sZA[i:]\n Z = elementZFromSymbol( sZ )\n if( Z is None ) : raise Exception( 'No element symbol for particle named %s' % name )\n if( sA == '' ) :\n if( suffix == 'natural' ) : return( Z, 0, naturalSuffix, 1000 * Z )\n if( suffix == '' ) : return( Z, 0, '', 1000 * Z )\n raise Exception( 'No A for particle named %s' % name )\n elif( suffix == 'natural' ) :\n raise Exception( 'Natural element also has A defined for particle named %s' % name )\n else :\n try :\n A = int( sA )\n except :\n raise Exception( 'Could not convert A to an integer for particle named %s' % name )\n ZA = 1000 * Z + A\n return( Z, A, suffix, ZA )", "def make_dataset_header(data, file_format, aminoacids):\n if aminoacids:\n datatype = 'PROTEIN'\n else:\n datatype = 'DNA'\n\n if file_format in ['NEXUS', 'PHYLIP', 'FASTA']:\n header = \"\"\"\n#NEXUS\n\nBEGIN DATA;\nDIMENSIONS NTAX={0} NCHAR={1};\nFORMAT INTERLEAVE DATATYPE={2} MISSING=? GAP=-;\nMATRIX\n\"\"\".format(data.number_taxa, data.number_chars, datatype)\n\n elif file_format == 'MEGA':\n return \"#MEGA\\n!TITLE title;\"\n\n else: # file_format: TNT\n if aminoacids:\n molecule_type = \"prot\"\n else:\n molecule_type = \"dna\"\n header = \"\"\"\nnstates {0};\nxread\n{1} {2}\"\"\".format(molecule_type, data.number_chars, data.number_taxa)\n\n return header.strip()", "def build_term_code(year_semester: str, abbr: str) -> str:\n if abbr != \"GV\" and abbr != \"QT\":\n return year_semester + \"1\"\n else:\n if abbr == \"GV\":\n return year_semester + \"2\"\n else:\n return year_semester + \"3\"", "def form_bag_name03(aipid, bagseq=0, mbver=\"0.3\"):\n return form_bag_name(aipid, bagseq, \"\", mbver,\n \"{aipid}.mbag{mbver}-{bagseq}\")", "def create_name(base_name, label):\n return base_name + SEPARATOR + label", "def gen_filename(year, month):\n\n month = str(month).zfill(2)\n year = str(year)\n\n return \"hre\" + year + month + \".nc\"", "def title(title, line='='):\n return '{title}\\n{pad:{line}<{width}}\\n'.format(title=title,\n pad='',\n line=line,\n width=len(title))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the unique user manager or create it.
def GetGlobalUserManager(*args, **kwargs): global global_user_manager if global_user_manager is None: global_user_manager = UserManager(*args, **kwargs) return global_user_manager
[ "def get_single_user():", "def get_or_create_user(github_user):\n try:\n return User.objects.get(login=github_user.login)\n except User.DoesNotExist:\n return create_user(github_user)", "def get_or_create_user(self, username, ldap_user):\n return User.objects.get_or_create(username=username)", "def get_manager( self, manager_name ):\n\n try:\n return self.managers[ manager_name ]\n except:\n return None", "def cli(ctx, user):\n return ctx.gi.users.get_user_creator(user)", "def user(self):\r\n try:\r\n return User.objects.get(username=self.username)\r\n except User.DoesNotExist:\r\n return None", "def get_user_model():\n return django_get_user_model()", "def default_user():\n user_id = -1\n try:\n user_id = get_user_model().objects.get(username='superuser').id\n except:\n pass\n\n return user_id", "def get_user(handle):\n return bbs.dbproxy.DBProxy('userbase')[handle]", "def get_user_right_manager(self) -> UserRightManager:\n return self.__user_right_manager", "def get_manager(self):\n return self.__manager", "def GetManager(*args, **kwargs):\n return _aui.AuiManager_GetManager(*args, **kwargs)", "def get_or_make_user(self, aga_id):\n while aga_id in self._pin_changes:\n aga_id = self._pin_changes[aga_id]\n if aga_id in self._users:\n return self._users[aga_id]\n else:\n user = User(aga_id=aga_id, email=uuid4(), fake=True)\n\n db.session.add(user)\n db.session.commit()\n\n player = Player(id=aga_id, name='', user_id=user.id, server_id=self.server_id, token=uuid4())\n db.session.add(player)\n\n self._users[aga_id] = user\n return user", "def AuiManager_GetManager(*args, **kwargs):\n return _aui.AuiManager_GetManager(*args, **kwargs)", "def get_manager():\n global _MANAGER\n if _MANAGER is None:\n _MANAGER = ResourceManager()\n return _MANAGER", "def _get_or_create_user(self, force_populate=False):\n save_user = False\n\n username = self.backend.ldap_to_django_username(self._username)\n\n (self._user, created) = self.backend.get_or_create_user(username, self)\n\n if created:\n self._user.set_unusable_password()\n save_user = True\n\n if(force_populate or ldap_settings.AUTH_LDAP_ALWAYS_UPDATE_USER or created):\n self._populate_user()\n self._populate_and_save_user_profile()\n\n #self._populate_profile_from_group_memberships()\n save_user = True\n\n if ldap_settings.AUTH_LDAP_MIRROR_GROUPS:\n self._mirror_groups()\n\n if save_user:\n self._user.save()\n\n self._user.ldap_user = self\n self._user.ldap_username = self._username", "def get_manager_id_locator(self):\n return self.managerId;", "def getmanager(self, user):\n\n url_encoded = urllib.parse.quote_plus(f'{user}')\n request_string = f\"{self.base_url}/users/{url_encoded}/manager\"\n header = {\n \"Content-type\": \"application/json\",\n \"Authorization\": \"Bearer \" + self.access_token2\n }\n response = requests.get(request_string, headers=header)\n data = response.json()\n if \"error\" in data.keys():\n return \"Notfound\"\n else:\n return data['userPrincipalName']", "def unit_manager_uid(self):\n return self.uid" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the user with this uid or None.
def GetUserByUid(self, uid): return self.userDict[uid] if uid in self.userDict else None
[ "def get(uid):\n if uid in User.users:\n return User.users[uid]\n return None", "def user(self):\r\n try:\r\n return User.objects.get(username=self.username)\r\n except User.DoesNotExist:\r\n return None", "def get_single_user():", "def get_user(self, nickname):\n \n return self.users[nickname][0]", "def user(self) -> Optional[str]:\n if self.logged_in():\n return self.username()\n return None", "def get_current_user(self):\n return self.graph.users.get(int(self.get_secure_cookie('eid')))", "def sqla_user(self):\r\n if self.user:\r\n return self.orm.query(models.User).get(self.user.id)", "def _get_user_id(self) -> str:\n return self._uid[0]", "def GetUser(self, guid):\n self.usrLock.acquire()\n \n user = None\n for candidate in self.users: #Match the user\n if candidate.guid == guid:\n user = candidate\n break\n \n self.usrLock.release()\n return user", "def get_associated_database_user(self):\n query = DatabaseUser.all(keys_only=True)\n query.filter(\"person =\", self)\n key = query.get()\n return (db.get(key))", "def get_user():\n user = None\n if 'userId' in session:\n user = User.query.get(session['userId'])\n return user", "def get_user(self, socket):\r\n\t\treturn self.users.get(socket, None)", "def user(self) -> Optional[dict]:\n return self._get('user')", "def getUser(self):\n users = User.objects.filter(username__iexact=self.trigramme)\n if users.count() >= 1:\n return users[0]", "def get(cls, uid_or_email):\n\n # Hack: UIDs will never contain the '@' symbol so if it does this is an\n # e-mail\n if '@' in uid_or_email:\n record = auth.get_user_by_email(uid_or_email)\n else:\n record = auth.get_user(uid_or_email)\n\n user = cls(record.uid)\n user._init_from_user_record(record)\n return user", "def get_user():\r\n return login_session.get('user', None)", "def get_user_by_id(self, user_id):\n if type(user_id) != unicode:\n return None\n try:\n return self._mk.User.find_one(ObjectId(user_id))\n except:\n return None", "def _get_user_id(self):\n return self._api_query_request('me')['id']", "def get_iuser(self, context):\n user = self.dbapi.iuser_get_one()\n return user" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Store the user dict to the pathToExistingUserFile file.
def StoreUserList(self): with open(self.pathToExistingUserFile, 'w') as o: self.logger.debug('Update existing user file') for user in self.userDict.values(): o.write('{};{};{}\n'.format(user.firstName, user.lastName, user.uid) )
[ "def add_user(self, dict_user: dict):\n try:\n with open(self.path, 'a') as file:\n file.write('\\n[{user_name}]\\n'.format(**dict_user))\n file.write('\\tpath = /home/{user_name}/\\n'.format(**dict_user))\n file.write('\\tvalid users = {user_name}\\n'.format(**dict_user))\n file.write('\\tread only = no\\n')\n except Exception as e:\n self.restore_backup()\n raise e\n else:\n self.check_integrity()\n click.secho(\n 'SUCCESS: Added user to smb.conf.', fg='green')", "def save( self ):\n if self._userSettings:\n\n from ToolBOSCore.Packages.CopyrightHeader import getCopyrightHeader\n\n content = getCopyrightHeader( 'python', 'User preferences' )\n\n for key, value in sorted( self._userSettings.items() ):\n value = repr( value ) # write object as Python code\n\n content += '%s = %s\\n\\n' % ( key, value )\n\n content += '\\n# EOF\\n'\n\n logging.debug( 'writing %s', self._userFile )\n FastScript.setFileContent( self._userFile, content )\n\n else:\n # delete entire file if there are no settings left\n\n logging.debug( 'deleting empty configfile' )\n FastScript.remove( self._userFile )", "def save_info(username,password):\n user_dict = {username : password}\n with open(\"accounts.json\", \"r+\") as file:\n f = json.load(file)\n f.update(user_dict)\n file.seek(0)\n json.dump(f,file)", "def __save_current_user_data(self):\n np.save(self.file_names['data'], self.data)\n np.save(self.file_names['users_list'], self.users_list)\n np.save(self.file_names['user_indexes'], self.user_indexes)", "def __save(self):\n\n write_file(path.join(path_wallet, self.__user + '_wallet.txt'),\n self.__user + '\\n'\n + self.__keys_filename)", "def write_username(user):\n with open(Launcher.USERNAME_FILE, 'wb') as username_file:\n pickle.dump(user, username_file)\n Launcher.USERANAME = user", "def store_user(steamname, steamid, discorduser=\"\"):\n\ttry:\n\t\tdata = { steamname : [steamid, discorduser] }\n\t\tif isfile(USERS_JSON) and getsize(USERS_JSON) > 0:\n\t\t\twith open(USERS_JSON, 'r+') as out_file:\n\t\t\t\tfile_data = json.load(out_file)\n\t\t\t\tfile_data.update(data)\n\t\t\t\tout_file.seek(0)\n\t\t\t\tout_file.truncate()\n\t\t\t\tout_file.write(json.dumps(file_data))\n\t\telse:\n\t\t\twith open(USERS_JSON, 'w') as out_file:\n\t\t\t\tjson.dump(data, out_file)\n\texcept Exception as e:\n\t\tprint(e)", "def _save_json_file(self, filename, data, is_analysation_dict=False):\n\t\tfile_path = os.path.join(self.users_dir, filename)\n\t\tif is_analysation_dict:\n\t\t\tif os.path.isfile(file_path):\n\t\t\t\t# file already exists\n\t\t\t\tsave_data = self._load_json_file(filename)\n\t\t\telse:\n\t\t\t\tsave_data = {}\n\t\t\tsave_data[self.model.name] = data\n\t\t\tdata = save_data\n\t\twith open(file_path, \"w\") as output_file:\n\t\t\tjson.dump(data, output_file)", "def _user_path():\n return os.path.join(private_dir.private_dir_path(settings.APP_NAME), 'user.json')", "def storeUserProfile(self, user_id, profile_json):\n profile_path = os.path.join(self.profile_folder, \"profile_\" + user_id + \".json\")\n fp = open(profile_path, \"w\")\n json.dump(profile_json[\"final_param\"], fp)\n return", "def create_user_dict(self, username, path = []):\n \n self.log.info(\"Creating user dictionary for user %r.\" % username)\n\n section_data = self.get_section_data(username, path)\n custom_data = self.get_custom_data()\n\n default_user_data = {\"admin\": False,\n \"sections\": [section_data],\n \"custom\": custom_data,\n \"root\": copy.deepcopy(path)}\n\n return default_user_data", "def update_data(current_user_data, filename):\n\n try:\n stored_data = read_data(filename)\n logger.info(\"Merging new data into stored\")\n\n for k, v in current_user_data.items():\n if existing := stored_data.get(k):\n stored_data[k] = list(set(existing) | set(v))\n else:\n stored_data[k] = v\n except:\n logger.info(\"No stored data found\")\n stored_data = current_user_data\n\n logger.info(\"Writing new data to file %s\", filename)\n\n with open(filename, 'w') as f:\n json.dump(stored_data, f)", "def _dir_from_users(self):\n for username in self._users.keys():\n path = self._path_to_db + \"/\"+username+\".json\"\n with open(path, \"w\") as fp:\n json.dump(self._users[username], fp, default=lambda o: o.__dict__)", "def _SaveAuthentication(self):\n auth_file = self._AuthFilePath()\n try:\n dir = os.path.dirname(auth_file)\n if not os.path.exists(dir):\n os.makedirs(dir)\n fh = open(auth_file, 'w')\n fh.write(self._user_cookie)\n fh.close()\n except:\n logging.fatal('Failed to save authorization file %s', auth_file, exc_info=True)\n raise ScenarioLoginError('Error saving auth file for client %s.' % self.name)", "def add_user_and_password(cls, username, password):\n\n users = cls.users\n users[username] = hashpw(password.encode('utf-8'),\n gensalt()).decode('utf-8')\n try:\n with open(cls.cwd + \"/users.json\", \"w\") as outfile:\n json.dump(users, outfile, sort_keys=True, indent=4)\n except:\n logger.info('Unable to write new user file.')\n cls.users = users", "def load_user():\n old_dir = os.getcwd()\n os.chdir(os.path.dirname(os.path.abspath(__file__)))\n with open(\"../config/user.yaml\", \"r\") as stream:\n SETTINGS[\"user\"] = yaml.safe_load(stream)\n os.chdir(old_dir)", "def testMapUserFile(self):\n test_path = self._GetTestFilePath(['NTUSER.DAT'])\n self._SkipIfPathNotExists(test_path)\n\n win_registry = registry.WinRegistry(\n registry_file_reader=TestWinRegistryFileReader())\n\n registry_file = win_registry._OpenFile(test_path)\n\n win_registry = registry.WinRegistry()\n profile_path = '%SystemRoot%\\\\System32\\\\config\\\\systemprofile'\n win_registry.MapUserFile(profile_path, registry_file)", "def write_user_data(self, user_id, user_data):\n users = self.read(\"users\")\n users[user_id] = user_data\n self.write('users', users)", "def savedusername(username=\"\"):\n\n _userPath = os.path.expanduser(\"~/.minerva/\")\n _userFile = os.path.join(_userPath, 'user')\n\n if not os.path.exists(_userPath):\n os.makedirs(_userPath)\n\n if not os.path.exists(_userFile):\n open(_userFile, 'w').close()\n\n if username == \"\":\n userFile = open(_userFile, 'r+')\n username = userFile.readline().strip()\n userFile.close()\n else:\n userFile = open(_userFile, 'w+')\n userFile.write(username)\n userFile.close()\n\n return username" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns first n rows
def head(self, n=5): col = self.copy() col.query.setLIMIT(n) return col.toPandas()
[ "def top(self, n):\n ttbl = self.order_cols()\n return ttbl.select(range(n+1))", "def head(data, n=3):\n print(data[:n])", "def head(iterable, n ):\n from itertools import islice\n return list(islice(iterable, n))", "def head_table(table: str, cur: psycopg2.extensions.cursor, n: int = 5):\n try:\n cur.execute(f\"SELECT * FROM {table} LIMIT {n}\")\n return [e for e in cur.fetchall()]\n except psycopg2.Error as e:\n print(f\"Error: {e}\")", "def task_10_list_first_10_customers(cur):\n sql = '''SELECT * FROM Customers \n WHERE ROWNUM < 11'''\n cur.execute(sql)\n return cur.fetchall()", "def first(n, generator):\n return itertools.islice(generator, n)", "def tail(self, n=None):\n if n is None:\n n = dataiter.DEFAULT_PEEK_ROWS\n n = min(self.nrow, n)\n return self.slice(np.arange(self.nrow - n, self.nrow))", "def first_elements(my_list, n):\r\n return my_list[0:n]", "def sample(self, n=None):\n if n is None:\n n = dataiter.DEFAULT_PEEK_ROWS\n n = min(self.nrow, n)\n rows = np.random.choice(self.nrow, n, replace=False)\n return self.slice(np.sort(rows))", "def first_n_timepoints(df, labels, n=30):\n # Ids of those who have n entries\n at_least_n_id = df.loc[pd.IndexSlice[:, n], :].index.get_level_values('id').unique()\n\n # Then get the first n entries of those people\n df_first_n = df.loc[pd.IndexSlice[at_least_n_id, :n], :]\n\n # Do to the labels\n labels = labels.loc[df_first_n.index]\n\n return df_first_n, labels", "def row(m, n):\r\n return m[n]", "def displayHead(df: pyspark.sql.DataFrame, nrows: int = 5):\n return df.limit(nrows).toPandas()", "def head(self, n=5):\n return PandasDataset(\n self.spark_df.limit(n).toPandas(),\n expectation_suite=self.get_expectation_suite(\n discard_failed_expectations=False,\n discard_result_format_kwargs=False,\n discard_catch_exceptions_kwargs=False,\n discard_include_config_kwargs=False,\n ),\n )", "def take(self, n):\n return Enumerable3(itertools.islice(self, 0, n, 1))", "def select_n_rows(self,table_name,number_of_rows):\n query = \"\"\"SELECT * FROM {}\"\"\".format(table_name)\n self._cursor.execute(query)\n row = self._cursor.fetchmany(number_of_rows)\n for index,record in enumerate(row):\n print(index,record,'\\n')", "def _top_row_iter(self, n):\n row = [-1]*n\n pos = 0\n while pos >= 0:\n if pos == n:\n yield row[:]\n pos -= 1\n continue\n # If it would create an invalid entry, backstep\n if ( pos > 0 and (row[pos] >= row[pos-1] \\\n or (self._strict and row[pos] == row[pos-1]-1)) ) \\\n or (self._k is not None and row[pos] >= self._k):\n row[pos] = -1\n pos -= 1\n continue\n row[pos] += 1\n pos += 1", "def first_n_packets(features, *, n_packets: int):\n n_features = features.shape[1]\n idx = np.r_[:n_packets, (n_features - N_META_FEATURES):n_features]\n return features[:, idx]", "def get_first_five_rows(request):\n body = json.loads(request.body)\n import_file = ImportFile.objects.get(pk=body.get('import_file_id'))\n\n rows = [\n r.split(ROW_DELIMITER)\n for r in import_file.cached_second_to_fifth_row.splitlines()\n ]\n\n return {\n 'status': 'success',\n 'first_five_rows': [\n dict(\n zip(import_file.first_row_columns, row)\n ) for row in rows\n ]\n }", "def Take(iterable, n):\n return itf.take(iterable, n)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the opcode, a list of values, and a list of locations.
def read(self) -> Tuple[int, List[int], List[int]]: # Read the current instruction. ins = self.mem[self.pc] self.pc += 1 # Determine the opcode. op = ins % 100 # Start two lists, for values and locations. vals: List[int] = [] locs: List[int] = [] # Read each used mode, then save a value and optional location. for i in range(ARITY[Op(op)]): mode = ins // pow(10, 2 + i) % 10 if mode == Mode.POS: vals.append(self.mem[self.mem[self.pc]]) locs.append(self.mem[self.pc]) elif mode == Mode.IMM: vals.append(self.mem[self.pc]) locs.append(-1) else: raise RuntimeError("encountered invalid mode") self.pc += 1 return op, vals, locs
[ "def _get_ops(self):\n\n arglist = []\n arg = self.arg\n\n if arg is None:\n op = [(self.opcode, 0)]\n else:\n while arg > 0xff:\n arg = arg >> (8 * len(arglist))\n arglist.append((self.EXTENDED_ARG, arg & 0xff))\n\n arglist = arglist[::-1]\n if len(arglist) > 3:\n # No more than 3 EXTENDED_ARG opcodes can precede\n # an opcode\n raise RuntimeError(\n f'argument {arg} for {dis.opname[opcode]} too large')\n\n if arglist:\n # The argument associated with the actual instruction\n # is the last one in the arglist\n arg = arglist.pop()[1]\n\n op = [(self.opcode, arg)]\n\n return arglist + op", "def getOperandIndices(self) -> List[int]:\n ...", "def parse_location(die):\n\n\n if LOC in die.attributes:\n loc = die.attributes[LOC]\n elif CVAL in die.attributes:\n return '$' + str(die.attributes[CVAL].value)\n else:\n return ''\n\n if loc.form != EXPR:\n print('Unrecognized location encoding:')\n print('\\t%s\\t%s' % (die.attributes[LOC].form, die.attributes[LOC].value))\n return '???'\n\n try:\n if hasattr(loc, 'value'):\n loc = loc.value\n\n # shitty hack\n if type(loc) is int:\n loc = [loc]\n\n if loc[0] == OP_CFA:\n if len(loc) > 1:\n # Indicates (signed) LEB128 offset from base pointer\n return get_leb128(loc[1:])\n else:\n # Not sure what this means, maybe just %rbp ?\n return '%rbp'\n\n if loc[0] >= OP_REG and loc[0] < OP_BREG:\n # Indicates in-register location\n\n # TODO: figure out size of operand and change register name accordingly\n result = regs[loc[0] - OP_REG]\n return '%' + result\n\n if loc[0] >= OP_BREG:\n if len(loc) > 1:\n # Get offset from register\n offset = get_leb128(loc[1:])\n else:\n offset = ''\n\n try:\n # Get register\n reg = regs[loc[0] - OP_BREG]\n\n return [offset, reg]\n except:\n return '???'\n\n except:\n print('Unable to resolve location: %s' % loc)\n try: print('\\t(decoded: %s)' % get_leb128(loc))\n except: pass\n raise", "def _get_ops_details(self):\n return [\n self._get_op_details(idx) for idx in range(self._interpreter.NumNodes())\n ]", "def get_values():\n fcts = {\"arccos\": \"acos\",\n \"arcsin\": \"asin\",\n \"arctan\": \"atan\",\n \"conj\": \"conjugate\",\n \"abs\": \"Abs\",\n \"int\": \"integrate\",\n \"des\": \"apart\"\n }\n\n operators = {}\n\n constants = {\"i\": \"I\",\n \"j\": \"J\",\n \"inf\": \"oo\",\n \"ipi\": \"I*pi\",\n \"e\": \"E\"}\n\n advanced = {\"Laplace\": lambda __wild_sym__:\n laplace_transform(parse_expr(str(__wild_sym__)), parse_expr(\"t\"),\n parse_expr(\"s\"), noconds=True),\n \"Linv\": lambda __wild_sym__:\n inverse_laplace_transform(parse_expr(str(__wild_sym__)), parse_expr(\"s\"),\n parse_expr(\"t\"), noconds=True),\n \"step\": lambda __wild_sym__: Heaviside(__wild_sym__),\n \"dirac\": lambda __wild_sym__: DiracDelta(__wild_sym__),\n \"sym\": lambda __wild_sym__:\n Symbol(str(__wild_sym__)),\n }\n advanced[\"L\"] = advanced[\"Laplace\"]\n\n return fcts, operators, constants, advanced", "def opcode_4(\n memory: list, position: int, param_mode1: int,\n) -> (list, int):\n if param_mode1 == 0:\n print(memory[memory[position + 1]])\n elif param_mode1 == 1:\n print(memory[position + 1])\n return memory, position + 2", "def parse_instruction(self, line):\n instruction, *args = line.strip().replace(',', '').split()\n return instruction, args", "def getCoordAction(self, data):\n row, col, action = None, None, None\n for d in data[1:]:\n if 'Row' in d[0]:\n row = d\n if 'Col' in d[0]:\n col = d\n if 'Action' in d[0]:\n action = d\n return row, col, action", "def opcode_1(\n memory: list, position: int, param_mode1: int, param_mode2: int, param_mode3: int\n) -> (list, int):\n if param_mode1 == 0:\n param1 = memory[memory[position + 1]]\n else:\n param1 = memory[position + 1]\n if param_mode2 == 0:\n param2 = memory[memory[position + 2]]\n else:\n param2 = memory[position + 2]\n\n memory[memory[position + 3]] = param1 + param2\n\n return memory, position + 4", "def parse_byte_and_args(self) -> Tuple[str, Any, int]: # TODO: code in the middle of that\n f = self.frame\n opoffset = f.f_lasti\n byteCode = f.f_code.co_code[opoffset] # type: int\n assert type(byteCode) == int\n\n f.f_lasti += 1\n byteName = dis.opname[byteCode]\n arg = None # type: Optional[bytes]\n arguments = []\n\n if byteCode >= dis.HAVE_ARGUMENT:\n arg, f.f_lasti = f.f_code.co_code[f.f_lasti:f.f_lasti + 2], f.f_lasti + 2\n assert type(arg) == bytes, type(arg)\n\n intArg = arg[0] + (arg[1] << 8)\n if byteCode in dis.hasconst:\n arg = f.f_code.co_consts[intArg]\n elif byteCode in dis.hasname:\n arg = f.f_code.co_names[intArg]\n elif byteCode in dis.hasjrel:\n arg = f.f_lasti + intArg\n elif byteCode in dis.hasjabs:\n arg = intArg\n elif byteCode in dis.haslocal:\n arg = f.f_code.co_varnames[intArg]\n else:\n arg = intArg\n arguments = [arg]\n\n assert type(byteName) == str, (byteName, type(byteName))\n # assert False, (arguments, type(arguments)) #TODO:object triples\n assert type(opoffset) == int, (opoffset, type(opoffset))\n\n return byteName, arguments, opoffset", "def stub_2regs(self, opcode, name, opts):\n data = [x.strip() for x in opts.split(',')]\n if len(data) == 2:\n reg1 = self.parse_reg(data[0])\n reg2 = self.parse_reg(data[1])\n\n self.code += chr(opcode)\n self.code += self.output_num(reg1, False)\n self.code += self.output_num(reg2, False)\n else:\n raise ParseError('Unsupported %s: %s @%s' % (name, opts, self.line))\n\n return (self.code, reg1, reg2)", "def get_locations(self) -> np.ndarray:\n return np.transpose(self.state[:, :, Boids.Attr.LOC])", "def determine_arg_locations(self, arg_types):\n arg_locs = []\n if self.has_option(\"wincc\"):\n # Windows calling convention:\n int_regs = [\n (registers.rcx, registers.ecx),\n (registers.rdx, registers.edx),\n (registers.r8, registers.r8d),\n (registers.r9, registers.r9d),\n ]\n float_regs = [\n (registers.xmm0_single, registers.xmm0),\n (registers.xmm1_single, registers.xmm1),\n (registers.xmm2_single, registers.xmm2),\n (registers.xmm3_single, registers.xmm3),\n ]\n else:\n # Sys V ABI calling convention:\n int_regs = [\n (registers.rdi, registers.edi),\n (registers.rsi, registers.esi),\n (registers.rdx, registers.edx),\n (registers.rcx, registers.ecx),\n (registers.r8, registers.r8d),\n (registers.r9, registers.r9d),\n ]\n float_regs = [\n (registers.xmm0_single, registers.xmm0),\n (registers.xmm1_single, registers.xmm1),\n (registers.xmm2_single, registers.xmm2),\n (registers.xmm3_single, registers.xmm3),\n (registers.xmm4_single, registers.xmm4),\n (registers.xmm5_single, registers.xmm5),\n (registers.xmm6_single, registers.xmm6),\n (registers.xmm7_single, registers.xmm7),\n ]\n\n offset = 16\n for arg_type in arg_types:\n # Determine register:\n if arg_type in [\n ir.i8,\n ir.i64,\n ir.u8,\n ir.u64,\n ir.i16,\n ir.u16,\n ir.i32,\n ir.u32, # TODO: maybe use eax and friends?\n ir.ptr,\n ]:\n if int_regs:\n if arg_type in [ir.i32, ir.u32]:\n reg = int_regs.pop(0)[1]\n else:\n reg = int_regs.pop(0)[0]\n\n if self.has_option(\"wincc\"):\n float_regs.pop(0)\n else:\n # We need stack location!\n # arg_size = self.info.get_size(arg_type)\n arg_size = 8 # All integers are passed in 8 byte memory\n reg = StackLocation(offset, arg_size)\n offset += arg_size\n elif arg_type in [ir.f32, ir.f64]:\n if float_regs:\n if arg_type is ir.f32:\n reg = float_regs.pop(0)[0]\n else:\n reg = float_regs.pop(0)[1]\n\n if self.has_option(\"wincc\"):\n int_regs.pop(0)\n else:\n # We need stack location!\n arg_size = self.info.get_size(arg_type)\n reg = StackLocation(offset, arg_size)\n offset += arg_size\n elif isinstance(arg_type, ir.BlobDataTyp):\n reg = StackLocation(offset, arg_type.size)\n offset += arg_type.size\n else: # pragma: no cover\n raise NotImplementedError(str(arg_type))\n arg_locs.append(reg)\n\n return arg_locs", "def _unpack_opargs(code):\n extended_arg = 0\n n = len(code)\n offset = i = 0\n while i < n:\n op = code[i]\n i += CODE_LEN\n if op >= HAVE_ARGUMENT:\n arg = code[i] | extended_arg\n for j in range(ARG_LEN):\n arg |= code[i + j] << (8 * j)\n i += ARG_LEN\n if op == EXTENDED_ARG:\n # This is a deviation from what dis does...\n # In python 3.11 it seems like EXTENDED_ARGs appear more often\n # and are also used as jump targets. So as to not have to do\n # \"book keeping\" for where EXTENDED_ARGs have been \"skipped\"\n # they are replaced with NOPs so as to provide a legal jump\n # target and also ensure that the bytecode offsets are correct.\n yield (offset, OPCODE_NOP, arg, i)\n extended_arg = arg << 8 * ARG_LEN\n offset = i\n continue\n else:\n arg = None\n i += NO_ARG_LEN\n\n extended_arg = 0\n yield (offset, op, arg, i)\n offset = i # Mark inst offset at first extended", "def get_indices(self, modes: str, *indices: int) -> Union[int, List[int]]:\n logger = logging.getLogger(__name__)\n output = []\n for mode, index in zip(reversed(modes), indices):\n\n logger.warning(\"Getting value %r: %d\", mode, index)\n if mode == \"0\":\n index = self[index]\n logger.warning(\" from position: %d\", index)\n elif mode == \"1\":\n pass\n elif mode == \"2\":\n index = self[index]+self.offset\n logger.warning(\" using relative base %d\", self.offset)\n logger.warning(\" from position: %d\", index)\n\n output.append(index)\n logger.warning(\" referencing value: %d\", self[index])\n\n if len(output) == 1:\n output = output[0]\n return output", "def _parse_one_instruction(cls, instr):\n opcode = Opcode(instr % 100)\n instr //= 100 # get rid of the opcode\n num_param = cls.NUM_PARAMS_OF_OPCODE[opcode]\n parameter_modes = []\n for i in range(num_param):\n parameter_modes.append(ParamMode(instr % 10))\n instr //= 10\n return opcode, parameter_modes", "def get_instructions ():\n try:\n instruction = sys.argv[1]\n try:\n target_id = int (sys.argv[2])\n return (instruction, target_id)\n except:\n return (instruction, None)\n except:\n print ('ERROR: I need instructions!')\n sys.exit()", "def get_instruction_tokens(self, _ea):\n\t\tif (_ea != BADADDR):\n\t\t\treturn filter(None, GetDisasm(_ea).split(\" \"))", "def build_location_array():\n\n # Gets location data based on input\n location_data = GEO_LOCATOR.geocode(address_input())\n\n return [location_data.address, location_data.latitude, location_data.longitude]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Do these programs correctly compare inputs against values?
def test_input_comparisons() -> None: cases = [ ( # Using position mode, is the input equal to 8? [3, 9, 8, 9, 10, 9, 4, 9, 99, -1, 8], [(-1, 0), (0, 0), (8, 1), (9, 0)], ), ( # Using position mode, is the input less than 8? [3, 9, 7, 9, 10, 9, 4, 9, 99, -1, 8], [(-1, 1), (0, 1), (8, 0), (9, 0)], ), ( # Using immediate mode, is the input equal to 8? [3, 3, 1108, -1, 8, 3, 4, 3, 99], [(-1, 0), (0, 0), (8, 1), (9, 0)], ), ( # Using immediate mode, is the input less than 8? [3, 3, 1107, -1, 8, 3, 4, 3, 99], [(-1, 1), (0, 1), (8, 0), (9, 0)], ), ( # If less than 8, 999; if 8, 1000; if greater than 8, 1001. [ 3, 21, 1008, 21, 8, 20, 1005, 20, 22, 107, 8, 21, 20, 1006, 20, 31, 1106, 0, 36, 98, 0, 0, 1002, 21, 125, 20, 4, 20, 1105, 1, 46, 104, 999, 1105, 1, 46, 1101, 1000, 1, 20, 4, 20, 1105, 1, 46, 98, 99 ], [(-1, 999), (0, 999), (8, 1000), (9, 1001)], ), ] vm = VirtualMachine() for program, pairs in cases: for x, y in pairs: vm.load(program, x) assert next(vm.execute()) == y
[ "def check_inputs(args):\n check_fail = False\n check_fail = check_sample(args.base, args.bSample)\n check_fail = check_sample(args.comp, args.cSample)\n return check_fail", "def test_program5_1(self):\n _, outputs = self.run_program([3, 0, 4, 0, 99], [17])\n self.assertListEqual(outputs, [17])", "def input_valid(self, settings_to_test):\n return (True, \"ok\")\n #return (False, \"All arguments are assumed invalid until verified\")", "def validate_input(a,b,p): \n first_term = 4*(a**3) % p\n second_term = 27*(b**2) % p\n result = ( first_term + second_term ) % p\n\n return result != 0", "def test_program9_1(self):\n program = [109,1,204,-1,1001,100,1,100,1008,100,16,101,1006,101,0,99]\n _, outputs = self.run_program(program)\n self.assertListEqual(program, outputs)", "def testComparison(self):\n good_code= \"+++++\"\n bad_code = \"++++\"\n good_org = organism.Organism(good_code)\n bad_org = organism.Organism(bad_code)\n target = [5,0,0,0,0,0,0,0,0,0]\n good_org.evaluate(target)\n bad_org.evaluate(target)\n self.assert_(good_org > bad_org)", "def test_inputs(self):\n\n assert False", "def test_good_input():\n # ------------------------------------------------------------------\n # We supplied tests for this function.\n # ------------------------------------------------------------------\n print()\n print('--------------------------------------------------')\n print('Testing the good_input function:')\n print('--------------------------------------------------')\n\n good_input()", "def ask_and_evaluate(self):\n answer = raw_input(self.question + \" > \")\n if answer == self.correct_answer:\n return True\n return False", "def test_program5_7(self):\n program = [3, 3, 1107, -1, 8, 3, 4, 3, 99]\n\n _, outputs = self.run_program(program, [7])\n self.assertListEqual(outputs, [1])\n\n _, outputs = self.run_program(program, [10])\n self.assertListEqual(outputs, [0])", "def test_print_correct_answer(self, capsys):\n\n app_functions.print_correct_answer(2, 3, \"sum\")\n captured = capsys.readouterr() # capture print output\n assert captured.out.strip() == \"No! The sum of 2 and 3 is 5!\"\n\n app_functions.print_correct_answer(0, 6, \"sum\")\n captured = capsys.readouterr() # capture print output\n assert captured.out.strip() == \"No! The sum of 0 and 6 is 6!\"\n\n app_functions.print_correct_answer(2, 3, \"difference\")\n captured = capsys.readouterr() # capture print output\n assert captured.out.strip() == \"No! The difference between 2 and 3 is 1!\"\n\n app_functions.print_correct_answer(3, 2, \"difference\")\n captured = capsys.readouterr() # capture print output\n assert captured.out.strip() == \"No! The difference between 3 and 2 is 1!\"\n\n app_functions.print_correct_answer(0, 6, \"difference\")\n captured = capsys.readouterr() # capture print output\n assert captured.out.strip() == \"No! The difference between 0 and 6 is 6!\"", "def test_is_correct_answer(self):\n actual = app_functions.is_correct_answer(4, 2, \"sum\", 6)\n assert actual == True\n\n actual = app_functions.is_correct_answer(4, 2, \"sum\", 3)\n assert actual == False\n\n actual = app_functions.is_correct_answer(4, 2, \"difference\", 2)\n assert actual == True\n\n actual = app_functions.is_correct_answer(4, 2, \"difference\", 3)\n assert actual == False\n\n actual = app_functions.is_correct_answer(2, 4, \"difference\", 2)\n assert actual == True", "def test_program5_6(self):\n program = [3, 3, 1108, -1, 8, 3, 4, 3, 99]\n\n _, outputs = self.run_program(program, [7])\n self.assertListEqual(outputs, [0])\n\n _, outputs = self.run_program(program, [8])\n self.assertListEqual(outputs, [1])", "def test_check_input_exit(self):\n self.assertTrue(self.utils.check_input('X', 'X'))\n self.assertTrue(self.utils.check_input('x', 'X'))\n self.assertTrue(self.utils.check_input('Exit', 'X'))\n self.assertTrue(self.utils.check_input('eXiT', 'X'))\n self.assertTrue(self.utils.check_input('EXIT', 'X'))\n self.assertFalse(self.utils.check_input('quit', 'X'))", "def check_vals(self, valid_vals, actual_val):\r\n\r\n\t\tk = 1\r\n\r\n\t\t# checking among each value of valid values\r\n\t\tfor val in valid_vals:\r\n\t\t\tif str(actual_val).strip().upper() == str(val).strip().upper():\r\n\t\t\t\tk = 0\r\n\t\t\t\treturn k\r\n\r\n\t\treturn k", "def test_handcrafted_examples(self):\n self.assertTrue(abs(pi(1000000) - 3.14) < 0.01)", "def test_accepts_input_true(self):\n nose.assert_equal(self.dtm1.accepts_input('00001111'), True)", "def test_guess_correct():\n\n assert update_guess(1, 0.3, 0.1, 0.7) >= 0.3\n assert update_guess(1, 0.1, 0.3, 0.7) >= 0.1\n assert update_guess(1, 0.01, 0.01, 0.01) >= 0.01\n assert update_guess(1, 0.49, 0.49, 0.99) >= 0.49", "def test_round_verdict(inputs, expected):\n assert g1.round_verdict(inputs) == expected, \"Verdict has to match possible values.\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set params for different versions of emulator
def _set_params_by_version(self): pass
[ "def prepare_emulator(self):", "def test_set_system_param(self):\n pass", "def set_model_params(self, params):", "def _setSpecificEmulationOptions(self):\n\n # CondDB usage by L0Muon emulator\n if self.isPropertySet(\"IgnoreL0MuonCondDB\"):\n log.info(\"L0Muon emulator will use the event TCK to get the FOI values : %s\"%(self.getProp(\"IgnoreL0MuonCondDB\")))\n l0muon = emulateL0Muon()\n l0muon.IgnoreCondDB = self.getProp(\"IgnoreL0MuonCondDB\")\n \n # TCK used by the L0Muon emulator \n if self.isPropertySet(\"L0MuonUseTCKFromData\"):\n log.info(\"L0Muon emulator will use the event TCK to get the FOI values : %s\"%(self.getProp(\"L0MuonUseTCKFromData\")))\n l0muon = emulateL0Muon()\n l0muon.UseTCKFromData = self.getProp(\"L0MuonUseTCKFromData\")\n \n # Set l0context for the emulation\n if self.isPropertySet(\"L0EmulatorContext\"):\n l0context = self.getProp(\"L0EmulatorContext\")\n log.info( \"The results of the L0 emulation will be written at location+%s\"%(l0context) )\n emulateL0Calo().L0Context = l0context\n emulateL0Muon().L0Context = l0context\n emulateL0DU().L0Context = l0context\n\n if self.isPropertySet(\"L0MuonForceLUTVersion\"):\n lutversion = self.getProp(\"L0MuonForceLUTVersion\")\n emulateL0Muon().LUTVersion = lutversion\n\n # Set electron emulation and Hcal threshold depending on data type\n if self.isPropertySet(\"DataType\"):\n datatype = self.getProp(\"DataType\")\n if datatype == \"2011\" or datatype == \"2010\" or datatype == \"2009\":\n emulateL0Calo().HcalThreshold = 0\n emulateL0Calo().EcalThreshold = 0\n elif datatype == \"2012\":\n emulateL0Calo().HcalThreshold = 8 # (default)\n emulateL0Calo().EcalThreshold = 5 # (default)\n if datatype == \"2010\" or datatype == \"2009\":\n emulateL0Calo().UseNewElectron = False\n else:\n emulateL0Calo().UseNewElectron = True\n\n if not self.isPropertySet(\"L0MuonForceLUTVersion\"):\n if datatype == \"2009\" or datatype == \"2010\" or datatype == \"2011\":\n emulateL0Muon().LUTVersion = \"V1\"\n elif datatype == \"2012\":\n emulateL0Muon().LUTVersion = \"V3\"\n elif datatype == \"2015\":\n emulateL0Muon().LUTVersion = \"V8\"", "def setParameters():\n ip = '192.168.1.143'\n port = 9559\n myBroker = naoqi.ALBroker(\"myBroker\", \"0.0.0.0\", 0, ip, port)\n connector = RobotConnect(\"Naomi\")\n connector.setPostureProxy()\n connector.setMotionProxy()\n connector.setVideoProxy()\n return connector", "def set_parameters(self, params):\r\n #raise NotImplementedError(\"You need to write this part!\")\r\n i = 0\r\n for param in self.network.parameters():\r\n param = prams[i]\r\n i = i+1", "def set_boot_os_specific_parameters(self, data):\n if len(data) > constants.BOOT_OS_SPECIFIC_PARAMETERS_MAX_LEN:\n raise exceptions.BootOsSpecificParametersPropertyExceededError()\n self.partition.update_properties({\n 'boot-os-specific-parameters': data\n })", "def sdk_configure(self, platform_name):\n\n pass", "def set_params(self, **params):\n for key, value in params.items():\n if hasattr(self, key):\n if key == 'layers':\n value = list(value)\n setattr(self, key, value)\n else:\n # accessing deep parameters\n param, sep, param_of_param = key.partition('__')\n if sep != '__':\n raise ValueError(key + ' is an invalid parameter a Theanets estimator')\n if param == 'trainers':\n index, sep, param = param_of_param.partition('_')\n index = int(index)\n if index >= len(self.trainers):\n raise ValueError('{} is an invalid parameter for a Theanets estimator: index '\n 'too big'.format(key))\n if param == '':\n # e.g. trainers__0 = {'optimize': 'sgd', 'learning_rate': 0.3}\n self.trainers[index] = value\n else:\n # e.g. trainers__0_optimize = 'sgd'\n self.trainers[index][param] = value\n elif param == 'layers':\n index = int(param_of_param)\n if index >= len(self.layers):\n raise ValueError('{} is an invalid parameter for a Theanets estimator: index '\n 'too big'.format(key))\n self.layers[index] = value\n elif param == 'scaler':\n try:\n self.scaler.set_params(**{param_of_param: value})\n except Exception:\n raise ValueError('was unable to set parameter {}={} '\n 'to scaler {}'.format(param_of_param, value, self.scaler))\n else:\n raise ValueError(key + ' is an invalid parameter for a Theanets estimator')", "def _PrepareQemuArgs(self, binary, net_type, window_scale, with_audio,\n with_boot_anim):\n self._emulator_start_args = [\n binary,\n '-ports', '%s,%s' % (self.emulator_telnet_port,\n self.emulator_adb_port),\n '-skin', self._metadata_pb.skin,\n '-timezone', 'America/Los_Angeles',\n '-cache', 'cache.img', # only respected via cmdline flag.\n '-memory', str(self._MemoryMb()),\n '-sdcard', 'sdcard.img',\n '-ramdisk', 'ramdisk.img',\n '-partition-size', '2047',\n '-no-snapshot-save',\n '-verbose',\n '-unix-pipe', 'sockets/qemu.mgmt',\n '-unix-pipe', 'sockets/device-forward-server',\n '-unix-pipe', 'sockets/tar-pull-server',\n '-unix-pipe', 'sockets/exec-server',\n '-unix-pipe', 'sockets/tar-push-server',\n '-unix-pipe', 'sockets/h2o',\n '-writable-system',\n '-show-kernel']\n\n if (self._metadata_pb.emulator_type ==\n emulator_meta_data_pb2.EmulatorMetaDataPb.QEMU2):\n self._emulator_start_args.extend(['-engine', 'qemu2',\n '-kernel', self._KernelFileName()])\n self._emulator_start_args.extend(['-system', 'system.img'])\n\n if self._metadata_pb.emulator_architecture == 'x86':\n self._emulator_start_args.extend(['-feature', 'AllowSnapshotMigration'])\n self._emulator_start_args.extend(['-feature', '-GLDMA'])\n self._emulator_start_args.extend(['-feature', '-OnDemandSnapshotLoad'])\n\n if os.path.exists(self._VendorFile()):\n self._emulator_start_args.extend(['-vendor', 'vendor.img'])\n\n if os.path.exists(self._UserdataQemuFile()):\n self._emulator_start_args.extend(['-data', 'userdata-qemu.img'])\n\n if os.path.exists(self._EncryptionKeyImageFile()):\n self._emulator_start_args.extend(['-encryption-key', 'encryptionkey.img'])\n\n if self._display:\n open_gl_driver = self._display.open_gl_driver\n else:\n open_gl_driver = self.BestOpenGL()\n self._emulator_start_args.append('-no-window')\n\n # Most of our open_gl_driver options map directly to a -gpu option of the\n # emulator, but no_open_gl is weird -- the emulator has no such option, and\n # all Android devices have some form of OpenGL support. To preserve old\n # behavior, we map it to -gpu off.\n gpu = 'off' if open_gl_driver == NO_OPEN_GL else open_gl_driver\n self._emulator_start_args.extend(['-gpu', gpu])\n\n if (not self._enable_gps and\n self._metadata_pb.emulator_type ==\n emulator_meta_data_pb2.EmulatorMetaDataPb.QEMU):\n self._emulator_start_args.extend(['-gps', 'null'])\n\n if not with_audio:\n if (self._metadata_pb.emulator_type ==\n emulator_meta_data_pb2.EmulatorMetaDataPb.QEMU2):\n self._emulator_start_args.extend(['-no-audio'])\n else:\n self._emulator_start_args.extend(['-audio', 'none'])\n\n if not with_boot_anim:\n self._emulator_start_args.append('-no-boot-anim')\n\n se_linux_mode = [prop for prop in self._metadata_pb.boot_property\n if prop.name == 'ro.initial_se_linux_mode']\n\n if se_linux_mode:\n assert len(se_linux_mode) == 1, 'Too many values: %s' % se_linux_mode\n se_linux_mode = se_linux_mode[0].value\n valid_modes = ['disabled', 'permissive']\n assert se_linux_mode in valid_modes, ('%s invalid. Only options are: %s'\n '. If not specified and API > 19 '\n 'defaults to enforcing.' % (\n se_linux_mode, valid_modes))\n self._emulator_start_args.extend(['-selinux', se_linux_mode])\n\n if window_scale:\n self._emulator_start_args.extend(\n ['-scale', str(window_scale / 100.0)])\n if not window_scale or window_scale == 100:\n self._emulator_start_args.append('-fixed-scale')\n\n if net_type is None or net_type == 'off':\n net_delay = self._metadata_pb.net_delay\n net_speed = self._metadata_pb.net_speed\n else:\n net_delay = NET_TYPE_TO_DELAY[net_type]\n net_speed = NET_TYPE_TO_SPEED[net_type]\n self._emulator_start_args.extend(\n ['-netdelay', net_delay, '-netspeed', net_speed])\n\n avd_name = self._MakeAvd()\n self._emulator_start_args.extend(['-avd', avd_name])\n\n if self._sim_access_rules_file:\n self._emulator_start_args.extend(\n ['-sim-access-rules-file', self._sim_access_rules_file])\n\n if self._phone_number:\n self._emulator_start_args.extend(['-phone-number', self._phone_number])\n\n if (self._metadata_pb.qemu_arg or\n self._qemu_gdb_port or\n self._enable_single_step or\n net_type == 'off' or\n not self._enable_g3_monitor):\n self._emulator_start_args.append('-qemu')\n\n if self._metadata_pb.qemu_arg:\n self._emulator_start_args.extend(self._metadata_pb.qemu_arg)\n self._emulator_start_args.extend(\n ['-L', self.android_platform.MakeBiosDir(self._TempDir('bios'))])\n\n if self._qemu_gdb_port:\n self._emulator_start_args.extend(['-gdb',\n 'tcp::%d' % self._qemu_gdb_port])\n if self._enable_single_step:\n self._emulator_start_args.append('-S')\n\n if net_type == 'off':\n # TODO: fix this for IPV6\n # We always want to allow tcp connections to host for testing purpose.\n # BTW, there is a bug in emulator, so we have to use 1-65534 instead of\n # 1-65535.\n self._emulator_start_args.extend(['-drop-tcp', '-drop-udp',\n '-allow-tcp', '10.0.2.2:[1-65534]'])\n\n # init process of Android will set a system property begin with\n # 'ro.kernel' for every key=value pair added here.\n # See:\n # https://android.googlesource.com/platform/system/core/+/gingerbread/init/init.c#424\n kernel_args = []\n\n # g3_monitor is not supported in mini_boot mode.\n if self._mini_boot or not self._enable_g3_monitor:\n kernel_args.append('g3_monitor=0')\n\n kernel_args.append('enable_test_harness=%d' %\n (1 if FLAGS.enable_test_harness else 0))\n if kernel_args:\n self._emulator_start_args.extend(['-append', ' '.join(kernel_args)])", "def set_params(self, **values):\n pc, pe = {}, {}\n for k, v in values.items():\n if k.startswith('e_'):\n pe[k[2:]] = v\n elif k.startswith('c_'):\n pc[k[2:]] = v\n else:\n raise ValueError( # pragma: no cover\n f\"Unexpected parameter name '{k}'\")\n self.clus.set_params(**pc)\n self.estimator.set_params(**pe)", "def test_set_agent_parameter(self):\n pass", "def test_override_ua_parameters(self):\n\n with ThreadedWebServer(\n JavascriptRequestDetector,\n binding_address=self.GetBindingAddress()) as server:\n with self.CreateCobaltRunner(\n url=server.GetURL(file_name='testdata/override_ua_parameters.html'),\n target_params=[\n '--user_agent_client_hints='\\\n 'aux_field=foo.bar.baz.qux/21.2.1.41.0;'\\\n 'brand=Cobalt;'\\\n 'build_configuration=debug;'\\\n 'chipset_model_number=foobar0000;'\\\n 'cobalt_build_version_number=289852;'\\\n 'cobalt_version=21.lts.2;'\\\n 'connection_type=Wireless;'\\\n 'device_type=ATV;'\\\n 'evergreen_type=;'\\\n 'evergreen_version=;'\\\n 'javascript_engine_version=v8/7.7.299.8-jit;'\\\n 'firmware_version=;'\\\n 'model=QUUX;'\\\n 'model_year=2018;'\\\n 'original_design_manufacturer=Quuz;'\\\n 'os_name_and_version=Corge grault-v7a\\\\; Garply 7.1.2\\\\; '\\\n 'Waldo OS 6.0;'\\\n 'starboard_version=Starboard/12;'\\\n 'rasterizer_type=gles'\n ]) as runner:\n runner.WaitForJSTestsSetup()\n self.assertTrue(runner.JSTestsSucceeded())", "def setparms(hamtrain, spamtrain, hamtest=None, spamtest=None, seed=None):\n global HAMTEST, SPAMTEST, HAMTRAIN, SPAMTRAIN, SEED\n HAMTRAIN, SPAMTRAIN = hamtrain, spamtrain\n if hamtest is None:\n HAMTEST = HAMTRAIN\n else:\n HAMTEST = hamtest\n if spamtest is None:\n SPAMTEST = SPAMTRAIN\n else:\n SPAMTEST = spamtest\n if seed is not None:\n SEED = seed", "def update_board_params(self, **params):\n self.board_generator.set_board_params(**params)", "def do_set(self, args):\n\n split_args = args.split()\n if len(split_args) < 1:\n module_logger.error(\"You must provide at least one argument\".format(args))\n elif len(split_args) == 1:\n if split_args[0] == \"iface\":\n iface = interface.get_first_interface()\n\n if iface is not None:\n self._params.iface = iface\n else:\n module_logger.error(\"There are no wireless interfaces available.\")\n elif split_args[0] == 'macs':\n self._params.macs = []\n else:\n module_logger.error(\"Parameters require a value\".format(split_args[0]))\n elif split_args[0] in meta.Params.VALID_PARAMS:\n try:\n param = split_args[0]\n value = split_args[1]\n # Validate certain parameters\n if split_args[0] == \"iface\":\n self._params.iface = value\n elif param == \"duration\":\n self._params.duration = value\n elif param == \"degrees\":\n self._params.degrees = value\n elif param == \"bearing\":\n self._params.bearing_magnetic = value\n elif param == \"hop_int\":\n self._params.hop_int = value\n elif param == \"hop_dist\":\n self._params.hop_dist = value\n elif param == \"mac\":\n self._params.add_mac(value)\n elif param == \"macs\":\n # Load macs from provided file\n self._params.add_mac(localizer.load_macs(value))\n elif param == \"channel\":\n self._params.channel = value\n elif param == \"capture\":\n self._params.capture = value\n\n print(\"Parameter '{}' set to '{}'\".format(param, value))\n\n except (ValueError, FileNotFoundError) as e:\n module_logger.error(e)\n else:\n module_logger.error(\"Invalid parameter '{}'\".format(split_args[0]))\n\n self._update_prompt()", "def update_params_for_each_scenario(self, hyperbel_params=None, ots_params=None):\r\n\r\n for szenario in self.media_params:\r\n if hyperbel_params is not None:\r\n assert (len(hyperbel_params) == len(self.media))\r\n self.media_params[szenario]['Hyperbel'] = hyperbel_params # update hyperbel params\r\n if ots_params is not None:\r\n assert (len(ots_params) == len(self.media))\r\n self.media_params[szenario]['OTS-beta'] = ots_params # update ots-params\r", "def set_params(self, **kwargs):\n _api.warn_external(\n \"'set_params()' not defined for locator of type \" +\n str(type(self)))", "def set_parameters(self, params):\n self.param_list = params", "def set_opts(self, **kwargs):\n raise NotImplementedError('Function not implemented in base class.')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update parent's window rectangle.
def _update_rect_from_parent_hwnd(self): if not self.parent_hwnd: return try: rect = win32gui.GetWindowRect(self.parent_hwnd) self.parent_x = rect[0] self.parent_y = rect[1] self.parent_width = rect[2] - rect[0] self.parent_height = rect[3] - rect[1] except pywintypes.error: pass
[ "def redraw_window(cls):\n lives_font = pg.font.SysFont(Config.font_style, 30)\n score_font = pg.font.SysFont(Config.font_style, 30)\n Game.screen.blit(Game.bg_obj, (0,0))\n lives_label = lives_font.render('Lives: ', 1, 'yellow')\n heart_x = lives_label.get_rect().right + 10\n for _ in range(Game.lives):\n Game.screen.blit(Game.heart_obj, (heart_x, 0.9*Config.resolution[1]))\n heart_x += 60\n\n score_label = score_font.render('Score: ' + str(Game.score), 1, 'yellow')\n rocket_rect = Game.rocket_obj.get_rect(center=(Config.resolution[0]/2, 0.9*Config.resolution[1]))\n\n Game.screen.blit(lives_label, (10, 0.9*Config.resolution[1]))\n Game.screen.blit(Game.rocket_obj, rocket_rect)\n Game.screen.blit(score_label, (0.9*Config.resolution[0], 0.9*Config.resolution[1]))", "def _redraw_now(self, rgn):\n if self._parent is None:\n return\n self._parent._redraw_now(rgn)", "def window_resize(self):\r\n\r\n offset = (self.canvas.winfo_width()-self.container.winfo_reqwidth(),\r\n self.canvas.winfo_height()-self.container.winfo_reqheight())\r\n self.check_scrollbar(self.hsb, offset[0])\r\n self.check_scrollbar(self.vsb, offset[1])\r\n self.canvas.itemconfig(\r\n self.window,\r\n width=max(self.canvas.winfo_width(), self.container.winfo_reqwidth()),\r\n height=max(self.canvas.winfo_height(), self.container.winfo_reqheight())\r\n )", "def update_screen_rect(self):\n self.rect.center = self.player.x, self.player.y", "def _redraw(self, rgn=None):\n if self._parent is None:\n return\n\n olddrawenviron = self._mac_setwin()\n\n # First do opaque subwindows, topmost first\n still_to_do = []\n for child in self._subwindows:\n if not child._transparent:\n child._redraw(rgn)\n else:\n still_to_do.append(child)\n\n # Next do ourselves\n saveclip = Qd.NewRgn()\n Qd.GetClip(saveclip)\n clip = self._mac_getclip()\n if not Qd.EmptyRgn(clip):\n Qd.SetClip(clip)\n if not self._outline_color is None:\n Qd.RGBForeColor(self._outline_color)\n rect = self.qdrect()\n Qd.FrameRect(rect)\n Qd.RGBBackColor(self._bgcolor)\n Qd.RGBForeColor(self._fgcolor)\n if self._redrawfunc:\n if not self._transparent:\n # XXX This causes flashing in movie windows and such.\n Qd.EraseRect(self.qdrect())\n self._redrawfunc(rgn)\n else:\n self._do_redraw(rgn)\n\n # Then do transparent children bottom-to-top\n still_to_do.reverse()\n for child in still_to_do:\n child._redraw(rgn)\n # Then do the transition on our full clip (including children)\n # XXX should only be done in topmost window\n clipincludingchildren = self._mac_getclip(includechildren=1)\n Qd.SetClip(clipincludingchildren)\n if self._transition and self._transition.ismaster(self):\n self._transition.changed()\n Qd.SetClip(saveclip)\n Qd.DisposeRgn(saveclip)\n\n self._mac_unsetwin(olddrawenviron)\n\n # Last, do the rubber box, if rubberboxing\n if self is _in_create_box:\n self._rb_redraw()", "def _updateSizes(self) -> None:\n print(f'QmlOffscreenRenderer._updateSizes: {self.size}')\n width, height = self.size.toTuple()\n self._window.setGeometry(0, 0, width, height)\n self._rootItem.setWidth(width)\n self._rootItem.setHeight(height)", "def recalcAspectRatio(self, window):\n # set the mainframe size to the window borders again\n self.frameMain[\"frameSize\"] = (\n base.a2dLeft, base.a2dRight,\n base.a2dTop, base.a2dBottom)\n\n # calculate new aspec tratio\n wp = window.getProperties()\n aspX = 1.0\n aspY = 1.0\n wpXSize = wp.getXSize()\n wpYSize = wp.getYSize()\n if wpXSize > wpYSize:\n aspX = wpXSize / float(wpYSize)\n else:\n aspY = wpYSize / float(wpXSize)\n # calculate new position/size/whatever of the gui items\n self.title.setPos(0.0, 0.0, base.a2dTop - self.textscale)\n self.menuBackground.setScale(1.0 * aspX, 1.0, 1.0 * aspY)\n self.cbVolumeMute.setPos(base.a2dRight - 0.15, 0, base.a2dBottom + 0.15)", "def adjust_window_to_current_width(self, event=None):\n width = self.root.winfo_width()\n self.set_window_size(width)", "def graphic_window(self):", "def windowEvent(self, *args, **kwargs):\n super().windowEvent(*args, **kwargs)\n\n for win, cam, pixel2d in self.forcedAspectWins:\n aspectRatio = self.getAspectRatio(win)\n cam.node().getLens().setAspectRatio(aspectRatio)\n\n # Fix pixel2d scale for new window size\n # Temporary hasattr for old Pandas\n if not hasattr(win, 'getSbsLeftXSize'):\n pixel2d.setScale(2.0 / win.getXSize(), 1.0, 2.0 / win.getYSize())\n else:\n pixel2d.setScale(2.0 / win.getSbsLeftXSize(), 1.0, 2.0 / win.getSbsLeftYSize())", "def place_at_edge(parent, win, padding=10):\n\n # make sure windows are updated so we get current positions\n win.update_idletasks()\n parent.update_idletasks()\n\n # width and height of window to place\n width = win.winfo_width()\n height = win.winfo_height()\n\n # position and size of parent\n parent_x_upperleft = parent.winfo_rootx()\n parent_y_upperleft = parent.winfo_rooty()\n titlebar_height = parent.winfo_rooty() - parent.winfo_y()\n parent_width = parent.winfo_width()\n\n win.geometry(\n \"{}x{}+{}+{}\".format(\n width,\n height,\n parent_x_upperleft + parent_width + padding,\n parent_y_upperleft - titlebar_height,\n )\n )", "def user32_AdjustWindowRect(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"lpRect\", \"dwStyle\", \"bMenu\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def update(self):\n self.rect.topleft = (self.x * BOX_LENGTH, self.y * BOX_LENGTH)", "def resize(self, event):\n self.redraw()", "def set_window_rect(self, x=None, y=None, width=None, height=None):\n self._selenium_web_driver().set_window_rect(x, y, width, height)", "def update_unit_rect(self, unit):\n # Maps the tile and the GUI positions together to the units.\n x, y = unit.tilex, unit.tiley\n screen_x, screen_y = x*TILE_DIMENSION+10, y*TILE_DIMENSION+10\n unit.rect.x = screen_x\n unit.rect.y = screen_y", "def _scrollupdate(self, old_x, old_y):\n new_x = self._barx.GetControlValue()\n new_y = self._bary.GetControlValue()\n Qd.SetPort(self._onscreen_wid)\n # See whether we can use scrollrect. Only possible if no updates pending.\n updrgn = Qd.NewRgn()\n self._onscreen_wid.GetWindowUpdateRgn(updrgn)\n## self._onscreen_wid.GetWindowRegion(Windows.kWindowUpdateRgn, updrgn)\n if Qd.EmptyRgn(updrgn):\n # Scroll, and get the new vacated region back\n Qd.ScrollRect(self.qdrect(), old_x-new_x, old_y-new_y, updrgn)\n else:\n # ok, update the whole window\n Qd.RectRgn(updrgn, self.qdrect())\n self._onscreen_wid.InvalWindowRgn(updrgn)\n Qd.DisposeRgn(updrgn)\n self._canvaspos = new_x, new_y", "def OnPaint (self, event):\n scrollWindowOriginX, scrollWindowOriginY = self.CalcUnscrolledPosition (0, 0)\n\n paintDC = wxPaintDC (self)\n self.PrepareDC (paintDC)\n\n \"\"\"\n Calculate the rectangle that needs updating in scrolled coordinates\n \"\"\"\n updateRect = self.GetUpdateRegion().GetBox()\n bufferX = updateRect.GetLeft() + scrollWindowOriginX\n bufferY = updateRect.GetTop() + scrollWindowOriginY\n bufferWidth = updateRect.GetWidth()\n bufferHeight = updateRect.GetHeight()\n\n memoryDC = wxMemoryDC()\n offscreenBuffer = wxEmptyBitmap (bufferWidth, bufferHeight)\n memoryDC.SelectObject (offscreenBuffer)\n memoryDC.SetDeviceOrigin (-bufferX, -bufferY)\n\n \"\"\"\n Debugging code that makes it easy to see which areas are updating.\n \"\"\"\n if 0:\n success = paintDC.Blit (bufferX,\n bufferY,\n bufferWidth,\n bufferHeight,\n paintDC,\n bufferX,\n bufferY,\n wxSRC_INVERT)\n time.sleep(1)\n success = paintDC.Blit (bufferX,\n bufferY,\n bufferWidth,\n bufferHeight,\n paintDC,\n bufferX,\n bufferY,\n wxSRC_INVERT)\n\n\n memoryDC.BeginDrawing()\n\n self.DrawBackground (memoryDC)\n self.Draw (memoryDC)\n\n paintDC.Blit (bufferX,\n bufferY,\n bufferWidth,\n bufferHeight,\n memoryDC,\n bufferX,\n bufferY)\n\n memoryDC.EndDrawing()", "def update(self):\n\t\tself.gui.draw_image(self.image, self.getRealPos())\n\t\tWidget.update(self)", "def adjust_window_to_current_state(self, event=None):\n # zoomed to normal\n if((self.unit == self.root.winfo_screenheight()//GRID_ROWS-2 or\n self.unit == self.root.winfo_screenwidth()//GRID_COLUMNS) and\n self.root.state() == \"normal\"):\n width = DEFAULT_WIDTH_WINDOW\n self.set_window_size(width)\n # normal to zoomed\n if(not (self.unit == self.root.winfo_screenheight()//GRID_ROWS-2 or\n self.unit == self.root.winfo_screenwidth()//GRID_COLUMNS) and\n self.root.state() == \"zoomed\"):\n width = self.root.winfo_screenwidth()\n self.set_window_size(width)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get child window info.
def _get_emulator_window_info(self, hwnd, wildcard): if self.child_name in win32gui.GetWindowText(hwnd): self.hwnd = hwnd self._update_rect_from_main_hwnd()
[ "def _get_window(self):\r\n pass", "def getspectralwindowinfo(self):\n return _ms.ms_getspectralwindowinfo(self)", "def user32_GetWindowInfo(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hwnd\", \"pwi\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def get_window(self):\n return self.get_object(self.__winid)", "def _get_main_window(self):\n return self._window_ref()", "def user32_ChildWindowFromPointEx(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hwndParent\", \"pt\", \"uFlags\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def get_window(self, event):\n if event.insaxes is None:\n return None\n return event.inaxes.figure.canvas.parent()", "def cmd_internal_windows(self):\r\n return [\r\n i.info() for i in self.windowMap.values()\r\n if isinstance(i, window.Internal)\r\n ]", "def user32_ChildWindowFromPoint(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hWndParent\", \"Point\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def getWin( self ):\n return self.win", "def Window(*args, **kwargs):\n return _aui.AuiPaneInfo_Window(*args, **kwargs)", "def window(args):\n uid = args.get(\"win\", get_new_window_id())\n version = args.get(\"version\", 1)\n if uid is None:\n uid = get_new_window_id()\n opts = args.get(\"opts\", {})\n\n ptype = args[\"data\"][0][\"type\"]\n\n p = {\n \"command\": \"window\",\n \"version\": version,\n \"id\": str(uid),\n \"title\": opts.get(\"title\", \"\"),\n \"inflate\": opts.get(\"inflate\", True),\n \"width\": opts.get(\"width\"),\n \"height\": opts.get(\"height\"),\n \"contentID\": get_rand_id(), # to detected updated windows\n }\n\n if ptype == \"image_history\":\n p.update(\n {\n \"content\": [args[\"data\"][0][\"content\"]],\n \"selected\": 0,\n \"type\": ptype,\n \"show_slider\": opts.get(\"show_slider\", True),\n }\n )\n elif ptype in [\"image\", \"text\", \"properties\"]:\n p.update({\"content\": args[\"data\"][0][\"content\"], \"type\": ptype})\n elif ptype == \"network\":\n p.update(\n {\n \"content\": args[\"data\"][0][\"content\"],\n \"type\": ptype,\n \"directed\": opts.get(\"directed\", False),\n \"showEdgeLabels\": opts.get(\"showEdgeLabels\", \"hover\"),\n \"showVertexLabels\": opts.get(\"showVertexLabels\", \"hover\"),\n }\n )\n elif ptype in [\"embeddings\"]:\n p.update(\n {\n \"content\": args[\"data\"][0][\"content\"],\n \"type\": ptype,\n \"old_content\": [], # Used to cache previous to prevent recompute\n }\n )\n p[\"content\"][\"has_previous\"] = False\n else:\n p[\"content\"] = {\"data\": args[\"data\"], \"layout\": args[\"layout\"]}\n p[\"type\"] = \"plot\"\n\n return p", "def current_window(self) -> Optional[wrappers.Window]:\n result = xlib.get_window_property(\n display=self.dpy, window=self.root, property=self.atom[\"_NET_ACTIVE_WINDOW\"], type=self.atom[\"WINDOW\"]\n )\n return None if not result else self.create_window(window_id=cast(List[xlib.Window], result)[0])", "def mainWindow(self):\r\n\r\n return self.__mainWindow", "def get_window_names(self, lib_primary, window):\n self.logger.debug(\"Naming window %s\", window)\n name = c_char_p()\n xlib.XFetchName(lib_primary, window, byref(name))\n self.logger.silly(\"WM Name: %s\", name.value)\n props = XTextProperty()\n xlib.XGetWMIconName(lib_primary, window, byref(props))\n self.logger.silly(\"WM Icon Name: %s\", props.value)\n return [name.value, props.value]", "def get_focused_window(self):\n focus = self.display.get_input_focus()\n if focus.focus.get_wm_class() is None:\n # TODO Climb the tree until find something with a class property\n # (The immediate parent works well enough for now, for the few\n # cases I've encountered.)\n query = focus.focus.query_tree()\n window = query.parent if query else None\n else:\n window = focus.focus\n if not window:\n return (None, None)\n return (window.get_wm_class(), window.get_wm_name())", "def get_window_geometry(self, window: xlib.Window) -> Tuple[int, int, int, int]:\n root_ret = xlib.ffi.new(\"Window *\")\n x = xlib.ffi.new(\"int *\")\n y = xlib.ffi.new(\"int *\")\n w = xlib.ffi.new(\"unsigned int *\")\n h = xlib.ffi.new(\"unsigned int *\")\n border_width = xlib.ffi.new(\"unsigned int *\")\n depth = xlib.ffi.new(\"unsigned int *\")\n xlib.lib.XGetGeometry(self.dpy, window, root_ret, x, y, w, h, border_width, depth)\n return x[0], y[0], w[0], h[0]", "def get_current_window_handle(self):\n return self._selenium_web_driver().current_window_handle", "def get_top(self):\n return self.window", "def sdl_window_p(self) -> Any:\n return lib.TCOD_context_get_sdl_window(self._p)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Is emulator's window minimized.
def is_minimized(self): return win32gui.IsIconic(self.parent_hwnd)
[ "def is_visible() -> bool:\n return win.winfo_ismapped()", "def minimized(self) -> bool:\n return False", "def should_show_windows(self) -> bool:\n return self._should_show_windows", "def has_windows(self):\n\n if self.wins.log_win:\n return True\n\n return False", "def _is_win_visible(self, i3_win) -> bool:\n try:\n xprop = check_output(['xprop', '-id', str(i3_win.window)]).decode()\n return '_NET_WM_STATE_HIDDEN' not in xprop\n except FileNotFoundError:\n # if xprop not found, fall back to just checking if tmux win is on our current worksapce:\n self.logger.debug('xprop utility is not found - please install it.')\n self.logger.debug('will decide visibility simply by checking if tmux is on our current workspace')\n return self._is_tmux_win_on_current_ws(i3_win)", "def activate_window_desktop(self, window: wrappers.Window) -> Optional[bool]:\n pass", "def has_minimize(self):\n return self._minimize is not None", "def in_window(self):\n if self.actions == -1:\n return True\n else:\n return False", "def user32_IsWindow(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hWnd\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def full_window():\n global window\n True if (len(window) >= MWS / MSS) else False", "def test_minimize_maximize(self):\n wrp = self.dlg.minimize()\n self.dlg.wait_not('active')\n self.assertEqual(wrp.is_minimized(), True)\n wrp.maximize()\n self.dlg.wait('active')\n self.assertEqual(wrp.is_maximized(), True)\n wrp.minimize()\n self.dlg.wait_not('active')\n wrp.restore()\n self.dlg.wait('active')\n self.assertEqual(wrp.is_normal(), True)", "def minimizeApp():\n pass", "def check_window_change(self) -> int:\n new_win_title = GetWindowText(GetForegroundWindow()) # get current window text\n new_win_tags = self.get_window_tags(new_win_title) # get current window tags\n\n hwnd = FindWindow(None, new_win_title)\n processid = win32process.GetWindowThreadProcessId(hwnd)\n pshandle = win32api.OpenProcess(win32con.PROCESS_QUERY_INFORMATION | win32con.PROCESS_VM_READ, False, processid[1])\n exename = win32process.GetModuleFileNameEx(pshandle, 0)\n\n # local log routine\n try:\n if not new_win_title:\n self.local_log.create_log_entry(\"Minimized\", self.normalize_string(exename))\n else:\n self.local_log.create_log_entry(self.normalize_string(new_win_title), self.normalize_string(exename))\n except:\n print('Could not log to local file!')\n\n \n\n if not new_win_title and not self.minimized: # stops current time entry if all windows are minimized\n # return -1 if not self.update_entry(True) else 1\n return 0\n\n elif new_win_title and not new_win_tags and new_win_title != self.last_seen['title']: # create untagged time entry in case no tags are found\n # return -1 if not self.update_entry(False, exename) else 1\n return 0\n\n elif new_win_title and new_win_tags != self.last_seen['tags']: # create regular tagged entry\n # return -1 if not self.update_entry(False, exename, new_win_tags) else 1\n return 0\n else:\n return 0", "def user32_IsWindowVisible(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hWnd\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def user32_IsHungAppWindow(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hWnd\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def on_minimize(self, _widget, event):\n if not self.application.app_hidden and event.new_window_state & Gdk.WindowState.ICONIFIED:\n self.application.window.iconify()", "def isWin(self):\n return self.pos in self.data['win_states']", "def user32_IsWindowInDestroy(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hwnd\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def _is_visible(self, obj):\n # TODO: FINISH THIS\n window_w = SCREEN_W\n window_h = SCREEN_H\n return obj.right >= 0 and obj.left <= window_w and obj.top >= 0 and obj.bottom <= window_h", "def is_root_alive(root):\n try:\n answer = root.state() \n except Exception as e:\n # I guess all exceptions mean that the window is not live..\n return False \n## e = str(e).lower() \n## if e.endswith('application has been destroyed'):\n## return False\n## else:\n## raise \n else:\n return answer" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get screen image from another image.
def get_image_from_image(image, ui_element): image = Image.fromarray(image) box = (ui_element.rect[0] * image.width, ui_element.rect[1] * image.height, ui_element.rect[2] * image.width, ui_element.rect[3] * image.height) screen = image.crop(box) return array(screen)
[ "def get_image(imageboard='danbooru', random=False, page=0):\n if(imageboard == 'danbooru'):\n result = danbooru.get_image(random=random,page=page)\n elif (imageboard == 'konachan'):\n result = konachan.get_image(random=random,page=page)\n elif(imageboard == 'yandere'):\n result = yandere.get_image(random=random,page=page)\n else:\n result = danbooru.get_image(random=random,page=page)\n\n return result", "def get_image(self, image_name):\n return self._image[image_name]", "def get_img():\n args = get_argument()\n img_path = args[\"image\"]\n img = cv2.imread(img_path)\n return img", "def select_image(self) -> pygame.Surface:\n if self.boosting:\n return self.image_boosting\n elif not self.intact:\n return self.image_normal\n else:\n return self.image_normal", "def get_image(self, image_id):\r\n images = self.list_images(ex_image_ids=[image_id])\r\n image = images[0]\r\n\r\n return image", "def _get_image(self, idx):\n if self.in_memory:\n image = self.images[idx]\n else:\n image = self._get_pil_image(self.image_filenames[idx])\n return image", "def get_image(self):", "def get_image_from_image_decorator(emulator, get_image_from_image):\n\n def wrapped(image, rect):\n box = (rect.global_rect[0] * emulator.width, rect.global_rect[1] * emulator.height,\n rect.global_rect[2] * emulator.width, rect.global_rect[3] * emulator.height)\n element = ElementOnScreen(name=\"\", box=box, color=ElementOnScreen.GREEN_COLOR)\n if emulator.screen_elements is not None:\n emulator.screen_elements.append(element)\n return get_image_from_image(image=image, rect=rect)\n\n return wrapped", "def get_image(self, x, y, width, height):\n\n # Create a new blank image\n image = pygame.Surface([width, height]).convert()\n\n # Copy the sprite from the large sheet onto the smaller image\n image.blit(self.sprite_sheet, (0, 0), (x, y, width, height))\n # a = screen.blit(image, (0, 0))\n image = pygame.transform.scale(image, (200, 200))\n\n # Assuming black works as the transparent color\n ##image.set_colorkey((0, 0, 0))\n transColor = image.get_at((0, 0))\n image.set_colorkey(transColor)\n\n # Return the image\n return image # , a", "def get_random_image(self):\n return self.read_image(random.choice(self.image_paths))", "def get_image(self):\n image = self.image\n if not image:\n image = getattr(self.page.specific, 'feed_image', None)\n if not image:\n image = getattr(self.page.specific, 'header_image', None)\n\n return image", "def get_image(self, x, y, width, height, alpha=False):\n image = pg.Surface((width, height))\n image.blit(self.spritesheet, (0, 0), (x, y, width, height))\n image.set_colorkey((0, 0, 0))\n image.set_alpha(255)\n if alpha:\n return image.convert_alpha()\n return image.convert()", "def select_image():\n # First randomly select a mission to avoid all tweets being from\n # Cassini, which dominates the dataset.\n missions = set(IMAGES['Instrument Host Name'])\n missions.remove('HST') # Show images from deep space missions only (sorry Hubble)\n missions.remove('NH') # New Horizons pre-Pluto data is a bit dull?\n mask_mission = IMAGES['Instrument Host Name'] == random.sample(missions, 1)[0]\n # Having selected a mission, select a random image from that mission\n idx = random.randint(0, mask_mission.sum())\n return IMAGES[mask_mission].iloc[idx]", "def get_image(experiment, **kwargs):\n im_file, actual_time = experiment.image_files.nearest(**kwargs)\n img = ndimage.imread(str(im_file))\n return img", "def get_image(self):\n slave = 0xFE\n (status, active_image) = self.__device.get_active_image(slave)\n self.__device.decode_error_status(status, cmd='get_active_image(%s)' % slave, print_on_error=True)\n print \"Active image: %d\" % active_image", "def _get_tile(self):\r\n\r\n tile_url = \"https://mts1.google.com/vt/\"\r\n # tile_url = \"http://mt1.google.com/vt/\"\r\n params = {\r\n 'lyrs': 'y',\r\n 'x': self.x,\r\n 'y': self.y,\r\n 'z': self.zoom,\r\n 'src': 'app'}\r\n self.img = get_pic(requests.get(tile_url, params=params))\r\n return self.img", "def get_image():\r\n\r\n file = choose_file()\r\n \r\n if file == \"\":\r\n sys.exit(\"File Open cancelled, exiting program\")\r\n img = load_image(file)\r\n\r\n return img", "def get_image(type, id):\n cursor = get_db().cursor(dictionary=True)\n\n cursor.execute(\n 'SELECT p.id, name, user_id'\n ' FROM images p'\n ' WHERE p.id = %s',\n (id,))\n\n image = cursor.fetchone()\n\n if image is None:\n abort(404, \"Image doesn't exist.\".format(id))\n\n if image['user_id'] != g.user['id']:\n abort(403)\n\n dir = 'images' if type == 0 else ('thumbnails' if type == 1 else 'faces')\n\n return send_from_directory(dir, str(image[\"id\"]) + '.' + image[\"name\"].rsplit('.', 1)[1])", "def get_image(path, group, subject, run):\n group = group if isinstance(group, str) else '{:02d}'.format(group)\n glob_root = pjoin(path,\n 'group{}_sub{:02d}_run{}'.format(\n group, subject, run))\n image = glob(glob_root + '.nii')\n return image[0] if len(image) == 1 else None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get color from screen.
def get_screen_color(self, positions, screen=None): screen = screen if screen is not None else self._get_screen() return [screen.getpixel(position) for position in positions]
[ "def get_color(self):\n color = askcolor(color=(self.g, self.r, self.b))\n grb = color[0]\n if grb != None:\n self.g = grb[0]\n self.r = grb[1]\n self.b = grb[2]", "def _getPixelColor(self):\n hdc = windll.user32.GetDC(0)\n\n cursor_pos = self._getCursorPos()\n rgb = windll.gdi32.GetPixel(hdc, *cursor_pos)\n\n windll.user32.ReleaseDC(0, hdc)\n\n return (rgb & 0xff, (rgb >> 8) & 0xff, (rgb >> 16) & 0xff)", "def find_color():\n color.setup_color_sensor()\n sid_color= color.get_basic_color()\n\n if sid_color == '1':\n sid = 'b123456'\n elif sid_color == '2':\n sid = 'a123456'\n return sid", "def get_color(self):\r\n return self._player_color", "def _fetch_color(self, shader):\n return cmds.getAttr(shader + '.color')[0]", "def pixel_get_color(x, y):\r\n return _audll.AU3_PixelGetColor(x, y)", "def get_color(self):\n return self._text.color", "def GetColour(self):\n return self.__enteredColour", "def get(self, id):\r\n color = obtener_color(id)\r\n if not color:\r\n api.abort(404)\r\n else:\r\n return color", "def __int__(self):\n return self.color", "def screen_color_depth(self):\n return self._screen_color_depth", "def pixel_color(self, x, y):\n\n x = int(x)-1\n y = int(y)-1\n\n try:\n return self.canvas[x][y]\n except:\n return None", "def get_ambient_light_color(self):\n self.__send_command(CommandsBytes.GET_AMBIENT_LIGHT_COLOR)\n # Get the result\n result = self.__receive_string()\n if result == \"ok\":\n # Receive the color\n r = self.__receive_int()\n g = self.__receive_int()\n b = self.__receive_int()\n return r, g, b\n print(\"Error getting ambient light color\")\n return None", "def get_color_for(username):\n with sql.connect(database_user) as cur:\n res = cur.execute(f\"\"\"\n SELECT color\n From UserDatabase\n WHERE username='{username}';\n \"\"\")\n _color = res.fetchone()[0]\n return _color", "def get_color_code(self):\n if self.color == 'r':\n return (254, 0, 0)\n else:\n return (0, 0, 0)", "def xy_color(self):\n return self.device.state.get('color_xy')", "def get_color(self, stepind, lightind):\n light = self.get_light(stepind, lightind)\n return light.get_color()", "def getColor(dim, tag):\n api_r_ = c_int()\n api_g_ = c_int()\n api_b_ = c_int()\n api_a_ = c_int()\n ierr = c_int()\n lib.gmshModelGetColor(\n c_int(dim),\n c_int(tag),\n byref(api_r_),\n byref(api_g_),\n byref(api_b_),\n byref(api_a_),\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshModelGetColor returned non-zero error code: \",\n ierr.value)\n return (\n api_r_.value,\n api_g_.value,\n api_b_.value,\n api_a_.value)", "def getColor(name):\n api_r_ = c_int()\n api_g_ = c_int()\n api_b_ = c_int()\n api_a_ = c_int()\n ierr = c_int()\n lib.gmshOptionGetColor(\n c_char_p(name.encode()),\n byref(api_r_),\n byref(api_g_),\n byref(api_b_),\n byref(api_a_),\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshOptionGetColor returned non-zero error code: \",\n ierr.value)\n return (\n api_r_.value,\n api_g_.value,\n api_b_.value,\n api_a_.value)", "def color(self):\n return definitions.player_colors[self.player_id]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get (x,y) position inside screen rectangle. Using normal distribution position usually will be near rectangle center.
def get_position_inside_screen_rectangle(self, rect, mean_mod=2, sigma_mod=5): if rect[0] == rect[2] and rect[1] == rect[3]: return int(rect[0] * self.width), int(rect[1] * self.height) x, y = get_position_inside_rectangle(rect=rect, mean_mod=mean_mod, sigma_mod=sigma_mod) return int(x * self.width), int(y * self.height)
[ "def get_position(self):\n return self._rect.x, self._rect.y", "def getCenter(self):\n (left, top), (right, bottom) = self.getCoords()\n x = left + (right - left) / 2\n y = top + (bottom - top) / 2\n return x, y", "def _get_pos(self):\r\n \r\n return (self.rect.midbottom[0]-(MAP_TILE_WIDTH/2))/MAP_TILE_WIDTH, (self.rect.midbottom[1]-(MAP_TILE_HEIGHT))/MAP_TILE_HEIGHT", "def get_aim_pos(self):\n\n left_top_client = (self._window_rect.left, self._window_rect.top)\n right_bottom_client = (self._window_rect.right, self._window_rect.bottom)\n left_top_screen = win32gui.ClientToScreen(self._process_handle, left_top_client)\n right_bottom_screen = win32gui.ClientToScreen(self._process_handle, right_bottom_client)\n\n x_pos = 0.5 * (right_bottom_screen[0] + left_top_screen[0])\n y_pos = 0.5 * (right_bottom_screen[1] + left_top_screen[1])\n\n return x_pos, y_pos", "def get_pos(self):\r\n if self.pos is None:\r\n x = random.uniform(32.001, 32.999)\r\n y = random.uniform(35.001, 35.999)\r\n self.pos = (x, y, 0)\r\n return self.pos", "def calc_pos(self, gridpos):\n x,y = gridpos\n x = self.x_offset + self.x_u * x\n y = self.y_offset + self.y_u * y\n return x, y", "def get_center(self):\n x, y = self.pos\n ox, oy = self.origin\n w, h = self.size\n return (x - ox + w / 2, y - oy + h / 2)", "def getPos(self):\n\t\treturn self.__robot.x(), self.__robot.y(), self.__robot.a()", "def __random_coordinates(self):\n x = random.randint(self.__screen_min_x, self.__screen_max_x)\n y = random.randint(self.__screen_min_y, self.__screen_max_y)\n return x, y", "def find_center(rect):\r\n p = Point()\r\n p.x = rect.corner.x + rect.width / 2\r\n p.y = rect.corner.y + rect.height / 2\r\n return p", "def getPosition(self):\n return self.barycenter", "def xy2center(self, x, y):\n x = x - 10.97 / 2\n y = y - 23.78 / 2\n return x, y", "def canvasPos(self):\r\n\t\tpos = pos_minus(local.mouse.pos,self['screenpos'])\r\n\t\tpos = SafeSize(pos)\r\n\t\tpos = min(pos[0],self['size'][0]-1),min(pos[1],self['size'][1]-1)\r\n\t\treturn pos", "def get_physical_position(self):\n return self._pos", "def pos(self):\n return Point(*self.position())", "def get_position(self):\n return self.node.eval_script(\"node.getBoundingClientRect()\")", "def get_grid_position(self, point):\n x_grid = (point['x']-self.min_x)//self.gridsize_x\n y_grid = (point['y']-self.min_y)//self.gridsize_y\n return x_grid, y_grid", "def mousepos():\n data = display.Display().screen().root.query_pointer()._data\n return data[\"root_x\"], data[\"root_y\"]", "def _get_random_location(self):\n\n width, height = self.world_size\n\n # # Generate a random (x, y) coordinate within the world's borders\n x = random.uniform(self.radius, width - self.radius)\n y = random.uniform(self.radius, height - self.radius)\n\n x -= width // 2\n y -= height // 2\n\n return x, y" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get text from screen.
def get_screen_text(self, ui_element, screen=None): image = screen if screen is not None else self.get_screen_image(ui_element.rect) return get_text_from_image(image=image, threshold=ui_element.threshold, chars=ui_element.chars, save_file=ui_element.save_file, max_height=ui_element.max_height)
[ "def get_text(self):\n data = self.txtbox.get(1.0, END)\n print(data)", "def get_text(self):\n data = self.txtbox.get(1.0, END)\n test = self.txtbox.selection_get()", "def findText(self, screenText):\n return self.cursesScreen.test_find_text(screenText)", "def get_text(self) -> str:\n return self.clipboard.wait_for_text()", "def GetText(self):", "def win_get_text(title, text=u'', bufsize=64 * 1024):\r\n buf = create_unicode_buffer(bufsize)\r\n _audll.AU3_WinGetText(title, text, buf, bufsize)\r\n return buf.value", "def get_text(self):\r\n\t\treturn self.text", "def get_text(self):\n\n user_input = self.text1.get(\"1.0\", END)\n\n return user_input", "def get_text(textname):\n try:\n textobj = Blender.Text.Get(textname)\n except Exception, err:\n print \"WARNING: \", err\n return \"\"\n else:\n return string.join(textobj.asLines(), \"\\n\")", "def get_text(self):\r\n editor = self._main.get_current_editor()\r\n if editor:\r\n return editor.get_text()\r\n return", "def get_text(self):\n return self.widget.GetValue()", "def get_text(self):\r\n return self.toPlainText()", "def fetch_user_input(self):\n user_text = self.entry.get()\n self.entry.delete(0, 'end')\n self.pipe_in_1(\"user_text\")\n return user_text", "def get_text_from_clipboard(self):\n\n self.mob_conn.get_clipboard()\n text_from_clipboard = self.mob_conn.get_clipboard_text()\n return text_from_clipboard", "def get_screen ( self ):\n\t\tself.msg(1,\"Generating the current TN3270 buffer in ASCII\")\n\t\tbuff = ''\n\t\ti = 1\n\n\t\tfor line in self.buffer:\n\t\t\tif line == \"\\0\":\n\t\t\t\tbuff += \" \"\n\t\t\telse:\n\t\t\t\tbuff += line.decode('EBCDIC-CP-BE').encode('utf-8')\n\t\t\tif i % 80 == 0:\n\t\t\t\tbuff += '\\n'\n\n\t\t\ti = i + 1\n\t\treturn buff", "def get_view_text(view):\n\tlog_utils.debug('retrieving text')\n\t\t\n\tbf = view.get_buffer()\n\n\tstart = bf.get_start_iter()\n\tend = bf.get_end_iter()\n\n\ttext = bf.get_text(start, end)\t\t\n\t\n\tlog_utils.debug('retrieved text')\n\n\treturn text", "def text(self):", "def get_current_page_text(self):\n return self.br.response().read()", "def draw_text(screen, text):\n font = pg.font.SysFont(\"Helvitca\", 32, True, False)\n text_object = font.render(text, False, pg.Color(\"turquoise\"))\n text_location = pg.Rect(0, 0, WIDTH, HEIGHT) \\\n .move(WIDTH / 2 - text_object.get_width() / 2, HEIGHT / 2 - text_object.get_height() / 2)\n screen.blit(text_object, text_location)\n text_object = font.render(text, False, pg.Color(\"dark blue\"))\n screen.blit(text_object, text_location.move(1, 1))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if image is on screen.
def is_image_on_screen(self, ui_element, screen=None): rect = ui_element.rect if ui_element.rect else ui_element.button screen_image = screen if screen is not None else self.get_screen_image(rect) return is_images_similar(screen_image, ui_element.image, ui_element.threshold, save_file=ui_element.save_file)
[ "def is_onscreen(self):\n x, y = self.loc\n w, h = get_screen_size()\n\n screen = Rect(0, 0, w, h)\n actor = Rect(x, y, self.width, self.height)\n\n if screen.colliderect(actor):\n return True\n else:\n return False", "def is_onscreen(self):\n x,y = self.loc\n w,h = get_screen_size()\n\n screen = Rect(0, 0, w, h)\n actor = Rect(x, y, self.width, self.height)\n\n if screen.colliderect(actor): return True\n else: return False", "def exist(image):\r\n try:\r\n if len(pyautogui.locateCenterOnScreen(image)) == 2:\r\n return True\r\n except Exception:\r\n return False", "def determine_if_on_screen(self, component) -> bool:\r\n\r\n # make ranges that describe x- and y-coordinates on screen\r\n start_range = cfg.camera_pos[0] - (cfg.screen_width / 2), cfg.camera_pos[1] - (cfg.screen_height / 2)\r\n end_range = cfg.camera_pos[0] + (cfg.screen_width / 2), cfg.camera_pos[1] + (cfg.screen_height / 2)\r\n\r\n # make sure they are int, int\r\n xrange = range(int(start_range[0]), int(end_range[0]))\r\n yrange = range(int(start_range[1]), int(end_range[1]))\r\n\r\n if int(component.pos[0]) in xrange and int(component.pos[1]) in yrange:\r\n return True\r\n else:\r\n #print(\"NOT on screen pos:\", component.pos[0], component.pos[1], \"range:\", xrange, yrange)\r\n return False", "def in_screen(self, coord):\n\t\treturn coord.x >= 0 and coord.x < self.width and coord.y >= 0 and coord.y < self.height", "def _is_visible(self, obj):\n # TODO: FINISH THIS\n window_w = SCREEN_W\n window_h = SCREEN_H\n return obj.right >= 0 and obj.left <= window_w and obj.top >= 0 and obj.bottom <= window_h", "def mouse_on_screen(self, x_detected, y_detected):\n\t\treturn \\\n\t\t\tself.x_start_screen <= x_detected <= self.x_end_screen \\\n\t\t\tand self.y_start_screen <= y_detected <= self.y_end_screen", "def mobile_screen_should_contain(self, target):\n self._prepare()\n im_source = ac.imread(self._screen)\n im_search = ac.imread(target)\n re = ac.find_template(im_source, im_search, self.TH)\n if re:\n return True\n return False", "def is_visible() -> bool:\n return win.winfo_ismapped()", "def checkScreen(self):\r\n if not window.screen == self.screen:\r\n window.setScreen(self.screen)", "def check_current_screen(self, _):\n assert self.screenmanager\n\n hour = datetime.datetime.now().hour\n if hour in HOURS_SHOW_WEATHER_DEFAULT:\n self.screenmanager.show_weather_screen()\n else:\n self.screenmanager.show_slideshow()", "def missed_shot(img_screen):\n # TODO: template match to the button\n return False", "def is_image_on_screen_decorator(emulator, is_image_on_screen):\n\n def wrapped(ui_element, screen=None):\n box = (ui_element.image_rect.global_rect[0] * emulator.width,\n ui_element.image_rect.global_rect[1] * emulator.height,\n ui_element.image_rect.global_rect[2] * emulator.width,\n ui_element.image_rect.global_rect[3] * emulator.height)\n element = ElementOnScreen(name=ui_element.name, box=box, color=ElementOnScreen.CYAN_COLOR)\n on_screen = is_image_on_screen(ui_element=ui_element, screen=screen)\n element.color = ElementOnScreen.MAGENTA_COLOR if on_screen else element.color\n if emulator.screen_elements is not None:\n emulator.screen_elements.append(element)\n return on_screen\n\n return wrapped", "def is_levelup_screen(self):\n # This is implemented as reading some text on the screen instead of\n # using get_text() because checking every loop is really slow.\n\n address = 0xc50f\n values = [146, 143, 130, 139]\n\n for (index, value) in enumerate(values):\n if self.emulator.vba.read_memory_at(address + index) != value:\n return False\n else:\n return True", "def isinview(self):\n term = getsession().terminal\n return (self.xloc > 0 and self.xloc +self.width -1 <= term.width\n and self.yloc > 0 and self.yloc +self.height -1 <= term.height)", "def is_evolved_screen(self):\n if not self.is_in_battle():\n return False\n\n address = 0x4bb1\n values = [164, 181, 174, 171, 181, 164, 163, 127, 168, 173, 179, 174, 79]\n\n for (index, value) in enumerate(values):\n if self.emulator.vba.read_memory_at(address + index) != value:\n return False\n else:\n return True", "def isScreenDoor(state: 'SoState') -> \"SbBool\":\n return _coin.SoShapeStyleElement_isScreenDoor(state)", "def is_image(self, name):\r\n ns = self.get_current_namespace()\r\n try:\r\n from spyderlib.pil_patch import Image\r\n return isinstance(ns[name], Image.Image)\r\n except ImportError:\r\n return False", "def image_is_loaded(self):\n try:\n self.image.size\n return True\n except AttributeError:\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if color in rects on screen is similar to given color.
def is_color_similar(self, color, rects, screen=None): positions = [self.get_position_inside_screen_rectangle(rect) for rect in rects] screen_colors = self.get_screen_color(positions=positions, screen=screen) similar = False for screen_color in screen_colors: similar = similar or True if is_color_similar(color, screen_color) else similar or False return similar
[ "def has_match(self, x, y, color):\n\n if self.pixel_color(x, y) == color:\n return True\n\n for x, y in self.pixels_around(x, y):\n if self.pixel_color(x, y) == color:\n return True\n return False", "def is_similar_color_rgb(color_a, color_b):\n difference = abs(rgb_luminance(color_a) - rgb_luminance(color_b))\n distance = color_distance(color_a, color_b)\n debug('[COLOR DIFF] ({0} <-> {1}) Diff: {2} - Distance: {3}'.format(color_a, color_b, difference, distance))\n return difference < COLOR_LUMINANCE_THRESHOLD and distance < COLOR_DISTANCE_THRESHOLD", "def is_checkmated(self, color):", "def check_color_tile(self, inp_row, inp_col, inp_color):\n if self.list_tiles[inp_row][inp_col] is not None:\n tile = self.list_tiles[inp_row][inp_col]\n if tile.color == inp_color:\n return True\n return False", "def match_color(frame, region, color):\n palette, color_frequency = get_palette(frame, region)\n debug('Palette: {0}'.format(palette))\n debug('Color frequency: {0}'.format(color_frequency))\n selected = is_color_in_palette(palette, color_frequency, color)\n\n if IMAGE_DEBUG_MODE:\n cv2.imwrite('matching_color_{0}.jpg'.format(time.time()), crop_image(frame, region))\n\n debug('Color matched: {0}, {1}'.format(selected, color))\n\n return selected", "def validate_pixel(pixel):\n \n valid_color = (pixel[0] < 150 and pixel[1] < 135 and pixel[2] < 135)\n return valid_color", "def isRed(img, circle):\n circle = [int(X) for X in circle]\n xc, yc, r = circle\n cropImg = img[yc-r:yc+r, xc-r:xc+r]\n average_color = cv2.mean(cropImg)\n if red_lower[0] <= average_color[0] <= red_upper[0] and red_lower[1] <= \\\n average_color[1] <= red_upper[1] and red_lower[2] <= \\\n average_color[2] <= red_upper[2]:\n return True\n else:\n return False", "def _iscolor(color):\n if color is None:\n return True\n if isinstance(color, (tuple, list, _Color)):\n return len(color) == 3\n if isinstance(color, _INTTYPES):\n return True\n return False", "def check_color(board: list) -> bool:\n starting_row = 0\n starting_column = 4\n for i in range(5):\n i = starting_row\n j = starting_column\n color_temp = []\n while i + j != 8:\n if board[i][j] != ' ':\n if board[i][j] in color_temp:\n return False\n else:\n color_temp.append(board[i][j])\n i += 1\n while i + j != 13:\n if board[i][j] != ' ':\n if board[i][j] in color_temp:\n return False\n else:\n color_temp.append(board[i][j])\n j += 1\n starting_row += 1\n starting_column -= 1\n return True", "def hasColor(*args, **kwargs):\n \n pass", "def are_colors_similar(self, color_index_a, palette_b, color_index_b,\n tolerance=50):\n color_a = self.colors[color_index_a]\n color_b = palette_b.colors[color_index_b % len(palette_b.colors)]\n r_diff = abs(color_a[0] - color_b[0])\n g_diff = abs(color_a[1] - color_b[1])\n b_diff = abs(color_a[2] - color_b[2])\n return (r_diff + g_diff + b_diff) <= tolerance", "def test_compare__same_colors_within_distance(self):\n size = (3, 5)\n pixelarray_result_color = pygame.Color(\"white\")\n surface_color = (127, 127, 127, 255)\n\n for depth in (8, 16, 24, 32):\n expected_pixelarray_surface = pygame.Surface(size, depth=depth)\n expected_pixelarray_surface.fill(pixelarray_result_color)\n\n # Copy the surface to ensure same dimensions/formatting.\n surf_a = expected_pixelarray_surface.copy()\n surf_a.fill(surface_color)\n # For non-32 bit depths, the actual color can be different from what\n # was filled.\n expected_surface_color = surf_a.get_at((0, 0))\n\n pixelarray_a = pygame.PixelArray(surf_a)\n pixelarray_b = pygame.PixelArray(surf_a.copy())\n\n for distance in (0.0, 0.01, 0.1, 1.0):\n pixelarray_result = pixelarray_a.compare(\n pixelarray_b, distance=distance\n )\n\n # Ensure the resulting pixelarray is correct and that the original\n # surfaces were not changed.\n self.assert_surfaces_equal(\n pixelarray_result.surface,\n expected_pixelarray_surface,\n (depth, distance),\n )\n self.assert_surface_filled(\n pixelarray_a.surface, expected_surface_color, (depth, distance)\n )\n self.assert_surface_filled(\n pixelarray_b.surface, expected_surface_color, (depth, distance)\n )\n\n pixelarray_a.close()\n pixelarray_b.close()\n pixelarray_result.close()", "def blackOrWhite(color):\n if (color[0] == 255 and color[1] == 255 and color [2] == 255):\n return 1\n elif (color[0] == 0 and color[1] == 0 and color [2] == 0):\n return 2\n return 0", "def check_winning_condition(_board: SimpleGoBoard, point: int, color: int) -> bool:\n\n consecutive_occurrence = 0\n # check horizontal: west to east\n for i in range(0, 9):\n if _constrained_index(_board, point - 4 + i) == color:\n consecutive_occurrence += 1\n if consecutive_occurrence >= 5:\n return True\n else:\n consecutive_occurrence = 0\n\n\n consecutive_occurrence = 0\n # check vertical: south to north\n for i in range(0, 9):\n if _constrained_index(_board, point + (-4 + i) * _board.NS) == color:\n consecutive_occurrence += 1\n if consecutive_occurrence >= 5:\n return True\n else:\n consecutive_occurrence = 0\n\n\n consecutive_occurrence = 0\n # check diagonal: southwest to northeast\n for i in range(0, 9):\n if _constrained_index(_board, point + (-4 + i) * _board.NS - 4 + i) == color:\n consecutive_occurrence += 1\n if consecutive_occurrence >= 5:\n return True\n else:\n consecutive_occurrence = 0\n\n\n consecutive_occurrence = 0\n # check diagonal: southeast to northwest\n for i in range(0, 9):\n if _constrained_index(_board, point + (-4 + i) * _board.NS + 4 - i) == color:\n consecutive_occurrence += 1\n if consecutive_occurrence >= 5:\n return True\n else:\n consecutive_occurrence = 0\n\n return False", "def __check_color_valid(color):\n\n if(len(color) < 3):\n return False, \"Color tuple has less than 3 elements\"\n if(len(color) > 3):\n return False, \"Color tuple has more than 3 elements\"\n for i in color:\n if(i > 255 or i < 0):\n return False, \"A RGB value is out of bounds(0-255)\"\n return True, \"\"", "def __eq__(self, u: 'SbColor') -> \"int\":\n return _coin.SbColor___eq__(self, u)", "def contains_clutter_class(segmap_colors, class_presence_threshold=1):\n indexes_red = np.where(np.all(segmap_colors == np.array([255,0,0]), axis=-1))\n \n if len(indexes_red[0]) >= class_presence_threshold:\n return True\n return False", "def color_is_checked(self, color):\n # for tests purposes there can be no king..\n if 'King' not in self.piece_types_left(color):\n return False\n poz_k = self.positions_of_piece('King', color)\n assert len(poz_k) == 1\n return (True, color) if self.under_attack(poz_k[0], color) else False", "def find_color(self):\n\n self.sym_mask = cv2.bitwise_and(self.symbol_img, self.symbol_img, mask=self.thresh_sym)\n sym_hsv = cv2.cvtColor(self.sym_mask, cv2.COLOR_BGR2HSV)\n norm_hsv = cv2.cvtColor(self.normalize_patch, cv2.COLOR_BGR2HSV)\n\n # sym_mean = np.mean(sym_hsv[:, :, 0], axis=(0, 1))\n norm_mean = np.mean(norm_hsv[:, :, 0], axis=(0, 1))\n\n hue = sym_hsv[:, :, 0]\n sym_mean = hue[hue > 0].flatten()\n # print(sym_mean)\n purple_mean = ((sym_mean > 115) & (sym_mean < 160)).sum()\n green_mean = ((sym_mean > 60) & (sym_mean < 90)).sum()\n red_mean = ((sym_mean > 1) & (sym_mean < 10)).sum() + (sym_mean > 175).sum()\n color_choice = np.array([red_mean, green_mean, purple_mean])\n\n self.color = np.argmax(color_choice)\n self.color_str = COLOR_STR[self.color]", "def test_compare__different_colors_within_distance(self):\n size = (3, 5)\n pixelarray_result_color = pygame.Color(\"white\")\n surface_a_color = (127, 127, 127, 255)\n surface_b_color = (128, 127, 127, 255)\n\n for depth in (8, 16, 24, 32):\n expected_pixelarray_surface = pygame.Surface(size, depth=depth)\n expected_pixelarray_surface.fill(pixelarray_result_color)\n\n # Copy the surface to ensure same dimensions/formatting.\n surf_a = expected_pixelarray_surface.copy()\n surf_a.fill(surface_a_color)\n # For non-32 bit depths, the actual color can be different from what\n # was filled.\n expected_surface_a_color = surf_a.get_at((0, 0))\n pixelarray_a = pygame.PixelArray(surf_a)\n\n surf_b = expected_pixelarray_surface.copy()\n surf_b.fill(surface_b_color)\n # For non-32 bit depths, the actual color can be different from what\n # was filled.\n expected_surface_b_color = surf_b.get_at((0, 0))\n pixelarray_b = pygame.PixelArray(surf_b)\n\n for distance in (0.2, 0.3, 0.5, 1.0):\n pixelarray_result = pixelarray_a.compare(\n pixelarray_b, distance=distance\n )\n\n # Ensure the resulting pixelarray is correct and that the original\n # surfaces were not changed.\n self.assert_surfaces_equal(\n pixelarray_result.surface,\n expected_pixelarray_surface,\n (depth, distance),\n )\n self.assert_surface_filled(\n pixelarray_a.surface, expected_surface_a_color, (depth, distance)\n )\n self.assert_surface_filled(\n pixelarray_b.surface, expected_surface_b_color, (depth, distance)\n )\n\n pixelarray_a.close()\n pixelarray_b.close()\n pixelarray_result.close()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Click inside button rectangle.
def click_button(self, button_rect, min_duration=0.1, max_duration=0.25): duration = random.uniform(min_duration, max_duration) r_sleep(duration) x, y = self.get_position_inside_screen_rectangle(button_rect) self.autoit_control_click_by_handle(self.parent_hwnd, self.hwnd, x=x, y=y) r_sleep(duration * 2)
[ "def click(widget, view_index=None):\n pos = center(widget, view_index)\n robouser.click(pos)", "def button_press(self, x, y, button):\n base._Widget.button_press(self, x, y, button)\n if button == 1:\n icon = self.get_icon_in_position(x, y)\n if icon is not None:\n cmd = self.progs[icon][\"cmd\"]\n if cmd.startswith(\"qshell:\"):\n exec(cmd[7:].lstrip())\n else:\n self.qtile.spawn(cmd)\n self.draw()", "def press(self, button, wait=0.0, port=0):", "def test_button(self):\n # generic button\n button = Button(10, 20, 50, 60)\n self.assertEqual(pg.Rect(-15, 20, 50, 60), button.rect)\n self.assertEqual((10, 20), button.rect.midtop)\n self.assertEqual((255, 255, 255), button.return_color())\n\n # turn button\n turn_button = Button(0, 0, 20, 20, color=(255, 255, 255))\n self.assertEqual((255, 255, 255), turn_button.return_color())\n\n # end button\n end_button = Button(0, 0, 20, 20, color=(0, 0, 0))\n self.assertEqual((0, 0, 0), end_button.return_color())", "def OnButtonClick(self):\n pass", "def click_button(self, selector: str):\n with self.playwright.grpc_channel() as stub:\n response = stub.ClickButton(\n playwright_pb2.selectorRequest(selector=selector)\n )\n logger.info(response.log)", "def click(self,boton):\n cadena= \"el argumento 'boton' no es igual a: ('izquierdo', 'medio', 'derecho', 4, 5, 6, 7)\"\n assert boton in self.mapa_botones_mouse.keys(),cadena\n boton = self.mapa_botones_mouse[boton]\n fake_input(self._display, X.ButtonPress, boton)\n self._display.sync()\n fake_input(self._display, X.ButtonRelease, boton)\n self._display.sync()", "def on_button_press_event(self, window, event):\n event_rect = Gdk.Rectangle()\n event_rect.x, event_rect.y = event.get_root_coords()\n event_rect.width = 1\n event_rect.height = 1\n\n allocation = self.get_allocation()\n window_rect = Gdk.Rectangle()\n window_rect.x, window_rect.y = self._get_position()\n window_rect.width = allocation.width\n window_rect.height = allocation.height\n\n intersection = Gdk.rectangle_intersect(\n event_rect, window_rect)\n # if the click was outside this window, hide it\n if not intersection[0]:\n self.popdown()", "def click_button(self, key_id):\n select_id = self.key[key_id]\n select_key_value = resolv_dict(self.config, select_id)\n self.scroll_view(select_key_value)\n Click_button = click_button_id(self.driver, select_key_value)\n assert Click_button == True, 'Click button : %s ' % Click_button", "def click(self, text):\n [b for b in self.harness.css('button', self.element) if b.text == text][0].click()", "def button_click(self, button, x, y):\n if self.board.place(x, y, self.board.player):\n self.visual_place(button)\n\n if not self.board.is_multiplayer:\n actions = self.board.get_actions()\n action = self.ai.take_action(actions)\n (ai_x, ai_y, ai_player) = action\n self.board.place(ai_x, ai_y, ai_player)\n\n ai_button = self.buttons[ai_y][ai_x]\n self.visual_place(ai_button)", "def button1(msg,x,y,w,h,ic,ac,action=None):\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n if x+w>mouse[0]>x and y+h>mouse[1]>y:\n pygame.draw.rect(screen,ac,(x,y,w,h))\n if click[0]==1 and action!=None:\n action()\n else:\n pygame.draw.rect(screen,ic,(x,y,w,h))\n #Displays message on button\n small_text = pygame.font.SysFont(\"timesnewromanboldttf\",25)\n text_s = small_text.render(msg,True, (85,107,47))\n text_rect_s = text_s.get_rect()\n text_rect_s.center = (x+(w//2),y+(h//2))\n screen.blit(text_s, text_rect_s)", "def click( self, (x,y) ):\r\n for k in self.keys:\r\n if k.rect==None: continue\r\n x0,y0,w,h = k.rect\r\n if x >= x0 and x < x0+w and y >= y0 and y < y0+h:\r\n k.toggle_active()\r\n return k", "def test_click_input(self):\n self.button.click_input()\n self.assertEqual(self.label.window_text(), \"LeftClick\")", "def during_btn_press (self, event, epsilon_x, data_bounds=None):\n if event.inaxes is None: return\n ib = (data_bounds is not None)\n \n if ib:\n xmin, xmax, ymin, ymax = data_bounds\n \n # if the selection box is visible\n if self.get_visible():\n if (event.xdata, event.ydata) in self:\n self.move_box(True)\n self.set_box_start()\n return False\n \n\n # selection box is not visible and order is selected\n if event.inaxes is None: return False\n if ib:\n xs1, xs2, ys1, ys2 = self.modify_bounds(data_bounds)\n\n # if you click within epsilon of the left edge\n if np.abs(xmin - event.xdata) < 1.5 * epsilon_x and ib:\n self.create_from_left(True)\n self.set_box_start([xs1, xmin, ys1, ys2])\n # if you click within epsilon of the right edge\n elif np.abs(xmax - event.xdata) < 1.5 * epsilon_x and ib:\n self.create_from_right(True)\n self.set_box_start([xmax, xs2, ys1, ys2])\n\n else:\n self.create(True)\n\n self.update_box(None,False)\n self._ptime = time.time()\n return True", "def actionPerformed(self, e):\n if e != e:\n # Avoid the unused parameter warning \n twin.doClick(0)", "def OnButton(self,e):\n self.queue.put('button!')", "def test_button_click(self):\n label = self.dlg.by(class_name=\"Text\",\n name=\"TestLabel\").find()\n self.dlg.Apply.click()\n self.assertEqual(label.window_text(), \"ApplyClick\")", "def click(self, locator):\r\n self.find_element(locator).click()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Close current opened app in emulator.
def close_current_app(self): raise NotImplementedError
[ "def call_manager_exit(self):\n self.manager.client.close_com()\n self.manager.close_frame()", "def quit_apps():\n os.system(\"osascript -e 'tell app \\\"{}\\\" to quit saving no'\".format(\n MAPLE_CLIENT_APP_NAME\n ))\n os.system(\"osascript -e 'tell app \\\"Automator\\\" to quit saving no'\")", "async def close(self, ctx: commands.Context):\n await self.bot.logout()", "def close_animation(self):\n\n self.env.gym_env.close()", "def stop(self):\n self.on_fg = False\n self.device.execute_command(f\"am force-stop {self.package_name}\",\n shell=True) \\\n .validate(Exception(\"error stopping app\"))", "def app_closing(self, ID, unexpected = 0):\n# ClientEditor expects this to be defined, but EdSim doesn't ever\n# generate a close_app_cbk, so we don't ever expect to receive this.\n self.editor.disconnected()\n self.frame.disconnected()", "def close(self):\r\n pygame.quit()", "def quit_app(self):\n ans = askokcancel('Verify exit', 'Really quit?')\n if ans:\n datautils.save_data(self.datalist)\n self.parent.quit()", "def close(self):\r\n self.telnet.write('exit\\n')\r\n self.telnet.close()", "def exit_game(self):\n pygame.quit()\n sys.exit()", "def close_view_controller():\n \n ConsoleViewController.closePresentedViewController()\n ConsoleViewController.isMainLoopRunning = False", "def doQuit(self):\n\n self.mainWin2.destroy()", "def close_window(_):\n root.destroy()", "def _uninstall_android_app(package_name):\n args = [\"adb\", \"uninstall\", package_name]\n logging.info(\"Uninstall testapp: %s\", \" \".join(args))\n _run_with_retry(args, device=_DEVICE_ANDROID, type=_RESET_TYPE_REBOOT)", "def tearDown(self):\n self.app.kill()", "def close_window(self):\r\n Window.close()", "def closeViewController():\n \n PytoClasses.ConsoleViewController.visible.closePresentedViewController()\n PytoClasses.ConsoleViewController.isMainLoopRunning = False", "def close_game(self):\n\t\tfor key in self.RestReceiver.get_controlled_entities():\n\t\t\tself.client.unsubscribe(key)\n\t\tself.ACTIVE = False", "async def test_close_session_shuts_down_appsession(self):\n with self.patch_app_session():\n await self.runtime.start()\n\n # Create a session and get its associated AppSession object.\n session_id = self.runtime.create_session(\n client=MockSessionClient(), user_info=MagicMock()\n )\n app_session = self.runtime._get_session_info(session_id).session\n\n # Close the session. AppSession.shutdown should be called.\n self.runtime.close_session(session_id)\n app_session.shutdown.assert_called_once()", "def quit(self):\n self.joystick.close()\n if not game.serial is None:\n game.serial.close()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Connects to SQLite file at dbfile and yields parsed tokens for each row.
def stream(self, dbfile): # Connection to database file db = sqlite3.connect(dbfile) cur = db.cursor() cur.execute("SELECT Text FROM sections") count = 0 for section in cur: # Tokenize text tokens = Tokenizer.tokenize(section[0]) count += 1 if count % 1000 == 0: print(f"Streamed {count} documents", end="\r") # Skip documents with no tokens parsed if tokens: yield tokens print(f"Iterated over {count} total rows") # Free database resources db.close()
[ "def parse(file, conn): #formerly main\n global cursor\n cursor = [] #CRITICALLY IMPORTANT\n #TODO: Investigate and understand what the removal of these two lines does to the program. The cursor\n #appears to stay behind after the parser function has completed and pollutes the next call to parser,\n #will erratically ruin test cases\n\n #TODO: Remove global variables, make everything local\n\n c = conn.cursor()\n with open(file, 'r') as f:\n\n for line in f:\n line = line.strip('\\n') #take off the newline\n process(line, c)\n\n adjust_entries(stack[0], c)\n insert_into_db(stack[0], cursor, c)\n\n #go grab the sql tables\n print('\\nIndividuals:')\n print(from_db_cursor(c.execute('SELECT * FROM INDI ORDER BY ID ASC')))\n print('\\nFamilies:')\n print(from_db_cursor(c.execute('SELECT * FROM FAM ORDER BY ID ASC')))\n conn.commit() #save db every time it's run", "def tokens(dbfile):\n\n tokens = None\n\n # Stream tokens to temp working file\n with tempfile.NamedTemporaryFile(\n mode=\"w\", suffix=\".txt\", delete=False\n ) as output:\n # Save file path\n tokens = output.name\n\n for row in RowIterator(dbfile):\n output.write(\" \".join(row) + \"\\n\")\n\n return tokens", "def db_read(file):\n blocklist = {}\n if not Path(file).is_file():\n print(\"DB file does not exist.\")\n else:\n try:\n # pylint: disable=C0103\n with dbm.gnu.open(file, \"r\") as db:\n k = db.firstkey()\n while k is not None:\n blocklist[k.decode(\"utf-8\")] = db[k].decode(\"utf-8\")\n k = db.nextkey(k)\n except IOError as err:\n print(\"Error reading DB file:\", err)\n\n # note: dicts can't be sorted; we'd need to create a sorted list of tupels\n return blocklist", "def _open_sqlite_db(self, dbfile):\n try:\n db = sqlite3.connect(dbfile)\n cur = db.cursor()\n except sqlite3.Error, e:\n # self.logger.error(\"sqlite3 error: %s\", e.message)\n print (\"sqlite3 error: %s\" % e.message)\n sys.exit(1)\n return db, cur", "def query_from_file(*file_path: str):\n conn, cur = DbManager.get_db()\n queries = read_file(*file_path).split(\"-----\")\n for query in queries:\n cur.execute(query)\n conn.commit()\n cur.close()\n conn.close()", "def __call__(self, infile, dbfile):\n\n print(f\"Converting {infile} to {dbfile}\")\n\n # Delete existing file\n if os.path.exists(dbfile):\n os.remove(dbfile)\n\n # Create new database\n db = sqlite3.connect(dbfile)\n\n # Create database tables if necessary\n self.create(db, XML2DB.QUESTIONS, \"questions\")\n self.create(db, XML2DB.ANSWERS, \"answers\")\n\n count = 0\n with open(infile, encoding=\"utf-8\") as xml:\n context, root = self.xmlstream(xml)\n\n for event, row in context:\n if event == \"end\":\n # Execute insert statement\n self.insert(db, row)\n\n count += 1\n if count % 10000 == 0:\n print(f\"Inserted {count} rows\")\n\n # Free memory\n root.clear()\n\n print(f\"Total rows inserted: {count}\")\n\n # Commit changes\n db.commit()", "def read_task_db(fname):\n connection = sqlite3.connect(fname)\n # cursor = connection.cursor()\n # query = \"SELECT name FROM sqlite_master WHERE type='table';\"\n # tables = cursor.execute(query).fetchall()\n\n tasks = pandas.read_sql_query(\"SELECT * from tasks\", connection)\n events = pandas.read_sql_query(\"SELECT * from task_events\", connection)\n params = pandas.read_sql_query(\"SELECT * from task_parameters\", connection)\n\n return tasks, events, params", "def db_connect(file):\n\ttry: \n\t\tconn = sqlite3.connect(file)\n\t\treturn conn\n\texcept sqlite3.Error as e:\n\t\tprint(e)\n\n\treturn None", "def connect(self, dataFile):\r\n\t\tself.debug(\"-- connect -- \" + dataFile)\r\n\t\t\r\n\t\tdb = sqlite3.connect(dataFile)\r\n\t\tself.c = db.cursor()\r\n\t\treturn {'db':db, 'c':self.c}", "def stream(dbfile, maxsize):\n # Connection to database file\n db = sqlite3.connect(dbfile)\n cur = db.cursor()\n\n # Select tagged sentences without a NLP label. NLP labels are set for\n # non-informative sentences.\n query = Index.SECTION_QUERY + \" AND tags is not null\"\n\n if maxsize > 0:\n query += (\n \" AND article in (SELECT id FROM articles ORDER BY entry DESC LIMIT %d)\"\n % maxsize\n )\n\n # Run the query\n cur.execute(query)\n\n count = 0\n for row in cur:\n # Unpack row\n uid, name, text = row\n\n if not name or not re.search(Index.SECTION_FILTER, name.lower()):\n # Tokenize text\n tokens = Tokenizer.tokenize(text)\n\n document = (uid, tokens, None)\n\n count += 1\n if count % 1000 == 0:\n print(\"Streamed %d documents\" % (count), end=\"\\r\")\n\n # Skip documents with no tokens parsed\n if tokens:\n yield document\n\n print(\"Iterated over %d total rows\" % (count))\n\n # Free database resources\n db.close()", "def Open(self, filename):\n self.fn = filename\n self.conn = sqlite3.connect(self.fn)\n self.conn.row_factory = lambda cursor, row: row[0] # makes the output lists of values instead of tuples\n self.cursor = self.conn.cursor()", "def read_fsa_db(db,fp,org_id) :\n\n cdsseq=\"\"\n tag=\"\"\n for line in fp :\n if line[0] == '>' :\n loaddb(cdsseq,org_id,tag,db)\n\n tag = line[1:].strip().split()[0]\n tag=tag.replace(\"ORFN:\",\"ORFP_\")\n cdsseq = \"\"\n else :\n cdsseq += line.strip()\n \n loaddb(cdsseq,org_id,tag,db)", "def create_connection(self, db_file):\n\t\t conn = sqlite3.connect(db_file) \n\t\t return conn", "def run():\n with open('directTDoA_knownpoints.db') as h:\n global my_info1, my_info2, my_info3\n i = 3 # skip the 3x comment lines at start of the text file database\n lines = h.readlines()\n my_info1 = []\n my_info2 = []\n my_info3 = []\n while i < sum(1 for _ in open('directTDoA_knownpoints.db')):\n inforegexp = re.search(r\"(.*),(.*),(.*)\", lines[i])\n my_info1.append(inforegexp.group(1))\n my_info2.append(inforegexp.group(2))\n my_info3.append(inforegexp.group(3))\n i += 1", "def generate_db(tsv_file, db_file):\n logger.info(\"Converting tsv %s to db file %s\", tsv_file, db_file)\n if os.path.exists(db_file):\n os.remove(db_file)\n db = TinyDB(db_file)\n with codecs.open(tsv_file, \"rb\", encoding=\"utf-8\") as f:\n row = f.readline().split(\"\\t\")\n headers = [SanskritObject(x).canonical() for x in row[0:8]]\n logger.info(\"Found dhatu tsv headers: {}\".format(str(headers)))\n # FIXME - Rewrite from here\n for row in f:\n entries = row.split(\"\\t\")[:len(headers)]\n entries = [SanskritObject(e).canonical() for e in entries]\n j = dict(zip(headers, entries))\n db.insert(j)\n db.close()\n logger.info(\"Saved dhatus database\")", "def parse_db(self):\n if self.db is not None:\n return list(self.add_program(self.db))", "def load(cls, iterable):\n x = 1\n db = DB()\n for line in iterable:\n if not line.strip() or line[0] == '#':\n continue\n try:\n entry = Entry.loads(line.strip(), id=x)\n except LoadError:\n warn(\"unable to parse a line!\")\n continue\n if entry:\n db.entries.append(entry)\n x += 1\n\n db.entries.sort(cmp=lambda a, b: cmp(a.start, b.start))\n return db", "def css_connection():\n conn = sqlite3.connect(CSS_DATABASE)\n try:\n yield conn\n finally:\n conn.close()", "def read_csv_files():\n if os.path.exists(\"data_wrangling_project.db\"):\n print (\"\\nDatabase in order...\")\n else:\n print (\"\\nDatabase does not exist...\\n\")\n sys.exit()\n\n if not os.path.exists(\"nodes_tags.csv\"):\n print (\"Cannot find CSV files...\")\n sys.exit()\n\n try:\n con = sql.connect(\"data_wrangling_project.db\")\n print (\"Connected to database...\\n\")\n except:\n print (\"\\nError -- cannot connect to the database\")\n sys.exit()\n\n cur = con.cursor()\n\n nodes_row_count = 0\n nodes_tags_row_count = 0\n ways_row_count = 0\n ways_tags_row_count = 0\n ways_nodes_row_count = 0\n\n with open('nodes.csv', 'r') as csv_file:\n reader = csv.reader(csv_file) # comma is default delimiter\n next(csv_file) # skip header row\n for row in reader:\n cur.execute(\"INSERT OR ABORT INTO nodes (id, lat, lon, user, uid, version, changeset, timestamp) \\\n VALUES (?, ?, ?, ?, ?, ?, ?, ?);\", row)\n nodes_row_count += 1\n\n print ('Nodes written to db...')\n print ('Nodes number of rows: {:,}'.format(nodes_row_count))\n csv_file.close()\n\n with open('nodes_tags.csv', 'r') as csv_file:\n reader = csv.reader(csv_file) # comma is default delimiter\n next(csv_file) # skip header row\n for row in reader:\n cur.execute(\"INSERT OR ABORT INTO nodes_tags (id, key, value, type) VALUES (?, ?, ?, ?);\", row)\n nodes_tags_row_count += 1\n\n print ('\\nNodes Tags written to db...')\n print ('Nodes Tags number of rows: {:,}'.format(nodes_tags_row_count))\n csv_file.close()\n\n with open('ways.csv', 'r') as csv_file:\n reader = csv.reader(csv_file) # comma is default delimiter\n next(csv_file) # skip header row\n for row in reader:\n cur.execute(\"INSERT OR ABORT INTO ways (id, user, uid, version, changeset, timestamp) \\\n VALUES (?, ?, ?, ?, ?, ?);\", row)\n ways_row_count += 1\n\n print ('\\nWays written to db...')\n print ('Ways number of rows: {:,}'.format(ways_row_count))\n csv_file.close()\n\n with open('ways_tags.csv', 'r') as csv_file:\n reader = csv.reader(csv_file) # comma is default delimiter\n next(csv_file) # skip header row\n for row in reader:\n cur.execute(\"INSERT OR ABORT INTO ways_tags (id, key, value, type) VALUES (?, ?, ?, ?);\", row)\n ways_tags_row_count += 1\n\n print ('\\nWays Tags written to db...')\n print ('Ways Tags number of rows: {:,}'.format(ways_tags_row_count))\n csv_file.close()\n\n with open('ways_nodes.csv', 'r') as csv_file:\n reader = csv.reader(csv_file) # comma is default delimiter\n next(csv_file) # skip header row\n for row in reader:\n cur.execute(\"INSERT OR ABORT INTO ways_nodes (id, node_id, position) VALUES (?, ?, ?);\", row)\n ways_nodes_row_count += 1\n\n print ('\\nWays Nodes written to db...')\n print ('Ways Nodes number of rows: {:,}'.format(ways_nodes_row_count))\n csv_file.close()\n\n con.commit()\n cur.close()\n con.close()\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Iterates over each row in dbfile and writes parsed tokens to a temporary file for processing.
def tokens(dbfile): tokens = None # Stream tokens to temp working file with tempfile.NamedTemporaryFile( mode="w", suffix=".txt", delete=False ) as output: # Save file path tokens = output.name for row in RowIterator(dbfile): output.write(" ".join(row) + "\n") return tokens
[ "def stream(self, dbfile):\n\n # Connection to database file\n db = sqlite3.connect(dbfile)\n cur = db.cursor()\n\n cur.execute(\"SELECT Text FROM sections\")\n\n count = 0\n for section in cur:\n # Tokenize text\n tokens = Tokenizer.tokenize(section[0])\n\n count += 1\n if count % 1000 == 0:\n print(f\"Streamed {count} documents\", end=\"\\r\")\n\n # Skip documents with no tokens parsed\n if tokens:\n yield tokens\n\n print(f\"Iterated over {count} total rows\")\n\n # Free database resources\n db.close()", "def parse(file, conn): #formerly main\n global cursor\n cursor = [] #CRITICALLY IMPORTANT\n #TODO: Investigate and understand what the removal of these two lines does to the program. The cursor\n #appears to stay behind after the parser function has completed and pollutes the next call to parser,\n #will erratically ruin test cases\n\n #TODO: Remove global variables, make everything local\n\n c = conn.cursor()\n with open(file, 'r') as f:\n\n for line in f:\n line = line.strip('\\n') #take off the newline\n process(line, c)\n\n adjust_entries(stack[0], c)\n insert_into_db(stack[0], cursor, c)\n\n #go grab the sql tables\n print('\\nIndividuals:')\n print(from_db_cursor(c.execute('SELECT * FROM INDI ORDER BY ID ASC')))\n print('\\nFamilies:')\n print(from_db_cursor(c.execute('SELECT * FROM FAM ORDER BY ID ASC')))\n conn.commit() #save db every time it's run", "def ParseAndInsertTSVintoDB(path, cur):\n df = pd.read_csv(path, delimiter=\"\\t\")\n\n for _, row in tqdm(df.iterrows()):\n InsertRow(row, cur)", "def generate_db(tsv_file, db_file):\n logger.info(\"Converting tsv %s to db file %s\", tsv_file, db_file)\n if os.path.exists(db_file):\n os.remove(db_file)\n db = TinyDB(db_file)\n with codecs.open(tsv_file, \"rb\", encoding=\"utf-8\") as f:\n row = f.readline().split(\"\\t\")\n headers = [SanskritObject(x).canonical() for x in row[0:8]]\n logger.info(\"Found dhatu tsv headers: {}\".format(str(headers)))\n # FIXME - Rewrite from here\n for row in f:\n entries = row.split(\"\\t\")[:len(headers)]\n entries = [SanskritObject(e).canonical() for e in entries]\n j = dict(zip(headers, entries))\n db.insert(j)\n db.close()\n logger.info(\"Saved dhatus database\")", "def read_csv_files():\n if os.path.exists(\"data_wrangling_project.db\"):\n print (\"\\nDatabase in order...\")\n else:\n print (\"\\nDatabase does not exist...\\n\")\n sys.exit()\n\n if not os.path.exists(\"nodes_tags.csv\"):\n print (\"Cannot find CSV files...\")\n sys.exit()\n\n try:\n con = sql.connect(\"data_wrangling_project.db\")\n print (\"Connected to database...\\n\")\n except:\n print (\"\\nError -- cannot connect to the database\")\n sys.exit()\n\n cur = con.cursor()\n\n nodes_row_count = 0\n nodes_tags_row_count = 0\n ways_row_count = 0\n ways_tags_row_count = 0\n ways_nodes_row_count = 0\n\n with open('nodes.csv', 'r') as csv_file:\n reader = csv.reader(csv_file) # comma is default delimiter\n next(csv_file) # skip header row\n for row in reader:\n cur.execute(\"INSERT OR ABORT INTO nodes (id, lat, lon, user, uid, version, changeset, timestamp) \\\n VALUES (?, ?, ?, ?, ?, ?, ?, ?);\", row)\n nodes_row_count += 1\n\n print ('Nodes written to db...')\n print ('Nodes number of rows: {:,}'.format(nodes_row_count))\n csv_file.close()\n\n with open('nodes_tags.csv', 'r') as csv_file:\n reader = csv.reader(csv_file) # comma is default delimiter\n next(csv_file) # skip header row\n for row in reader:\n cur.execute(\"INSERT OR ABORT INTO nodes_tags (id, key, value, type) VALUES (?, ?, ?, ?);\", row)\n nodes_tags_row_count += 1\n\n print ('\\nNodes Tags written to db...')\n print ('Nodes Tags number of rows: {:,}'.format(nodes_tags_row_count))\n csv_file.close()\n\n with open('ways.csv', 'r') as csv_file:\n reader = csv.reader(csv_file) # comma is default delimiter\n next(csv_file) # skip header row\n for row in reader:\n cur.execute(\"INSERT OR ABORT INTO ways (id, user, uid, version, changeset, timestamp) \\\n VALUES (?, ?, ?, ?, ?, ?);\", row)\n ways_row_count += 1\n\n print ('\\nWays written to db...')\n print ('Ways number of rows: {:,}'.format(ways_row_count))\n csv_file.close()\n\n with open('ways_tags.csv', 'r') as csv_file:\n reader = csv.reader(csv_file) # comma is default delimiter\n next(csv_file) # skip header row\n for row in reader:\n cur.execute(\"INSERT OR ABORT INTO ways_tags (id, key, value, type) VALUES (?, ?, ?, ?);\", row)\n ways_tags_row_count += 1\n\n print ('\\nWays Tags written to db...')\n print ('Ways Tags number of rows: {:,}'.format(ways_tags_row_count))\n csv_file.close()\n\n with open('ways_nodes.csv', 'r') as csv_file:\n reader = csv.reader(csv_file) # comma is default delimiter\n next(csv_file) # skip header row\n for row in reader:\n cur.execute(\"INSERT OR ABORT INTO ways_nodes (id, node_id, position) VALUES (?, ?, ?);\", row)\n ways_nodes_row_count += 1\n\n print ('\\nWays Nodes written to db...')\n print ('Ways Nodes number of rows: {:,}'.format(ways_nodes_row_count))\n csv_file.close()\n\n con.commit()\n cur.close()\n con.close()\n return", "def write_database(db, database_file):\n with open(database_file, 'w') as f:\n for job_id, (filename, status, date) in db.items():\n f.write(\"{}\\t{}\\t{}\\t{}\\n\".format(filename, job_id, status, date))", "def __call__(self, infile, dbfile):\n\n print(f\"Converting {infile} to {dbfile}\")\n\n # Delete existing file\n if os.path.exists(dbfile):\n os.remove(dbfile)\n\n # Create new database\n db = sqlite3.connect(dbfile)\n\n # Create database tables if necessary\n self.create(db, XML2DB.QUESTIONS, \"questions\")\n self.create(db, XML2DB.ANSWERS, \"answers\")\n\n count = 0\n with open(infile, encoding=\"utf-8\") as xml:\n context, root = self.xmlstream(xml)\n\n for event, row in context:\n if event == \"end\":\n # Execute insert statement\n self.insert(db, row)\n\n count += 1\n if count % 10000 == 0:\n print(f\"Inserted {count} rows\")\n\n # Free memory\n root.clear()\n\n print(f\"Total rows inserted: {count}\")\n\n # Commit changes\n db.commit()", "def process_snowflake_file(filepath, snowflake_writer):\n snowflake_parser = SnowflakeTableParser()\n data = snowflake_parser.parse_file(filepath=filepath)\n for item in data:\n record = {\n \"num_available_units\": item['available_units'],\n 'has_valid_address_parts': bool(item['valid_address_parts']),\n \"address_hash\": item['address_hash'],\n \"raw_apt_name\": item[\"raw_apt_name\"],\n \"raw_address\": item[\"raw_address\"],\n \"raw_city\": item[\"raw_city\"],\n \"raw_state\": item[\"raw_state\"],\n \"apt_name\": item[\"apt_name\"],\n \"address\": item[\"address\"],\n \"city\": item[\"city\"],\n \"state\": item[\"state\"],\n \"zip\": item[\"zip\"],\n \"load_datetime_string\": item[\"date_string\"],\n \"load_datetime\": item[\"date_object\"],\n }\n snowflake_writer.add_record(output_record=record)\n\n snowflake_writer.run_inserts()", "def run():\n with open('directTDoA_knownpoints.db') as h:\n global my_info1, my_info2, my_info3\n i = 3 # skip the 3x comment lines at start of the text file database\n lines = h.readlines()\n my_info1 = []\n my_info2 = []\n my_info3 = []\n while i < sum(1 for _ in open('directTDoA_knownpoints.db')):\n inforegexp = re.search(r\"(.*),(.*),(.*)\", lines[i])\n my_info1.append(inforegexp.group(1))\n my_info2.append(inforegexp.group(2))\n my_info3.append(inforegexp.group(3))\n i += 1", "def save_tokens_to_file(self, file_path):\n with open(file_path, 'w', encoding='utf-8') as fp:\n #for token in self.token2id.keys():\n for idd in range(self.size()): \n fp.write(self.id2token[idd] + '\\n')", "def fix_gtf_records(gtf_file, output_file):\n gene_id_regex = re.compile(r'(.*gene_id \")([^\"]+)(\".*)')\n transcript_id_regex = re.compile(r'(.*transcript_id \")([^\"]+)(\".*)')\n for line in open(gtf_file, 'rU'):\n gene_id_match = re.match(gene_id_regex, line)\n if gene_id_match:\n output_file.write(line)\n else:\n transcript_id_match = re.match(transcript_id_regex, line)\n if transcript_id_match:\n gene_id = transcript_id_match.group(2)\n else:\n gene_id = \"\"\n output_file.write('%s gene_id \"%s\";\\n' % (line.strip(), gene_id))", "def write_data(file_in, conn, table):\n\n # Create table and batch writer\n if not conn.table_exists(table):\n conn.create_table(table)\n writer = conn.create_batch_writer(table)\n\n # Iterate over file, add each mutation to the writer\n with open(file_in) as f:\n for line in f:\n (row, col_fam, col_qual, col_vis, val) = tuple(line.rstrip('\\n').split('\\t'))\n mutation = Mutation(row)\n mutation.put(cf=col_fam, cq=col_qual, cv=col_vis, val=val)\n writer.add_mutation(mutation)\n\n writer.close()", "def create_words_file(db_name, table_names):\r\n #opening the file\r\n file = open(\"Words2\\\\\"+db_name+\"_words.txt\", 'w', encoding='utf-8')\r\n conn = sql.connect(db_name)\r\n #parsing all tables in the database\r\n for table in table_names:\r\n write_comments_to_file(file,conn,table)", "def update_gene_expr_table(files):\n local_connector = LocalConnector(psql)\n conn, cur = local_connector.get_connection()\n s3 = boto3.client('s3', aws_access_key_id=__credential__.aws_access_key_id, \\\n aws_secret_access_key=__credential__.aws_secret_access_key)\n for f in files:\n try: # TODO: import Error\n # Stream-in files from S3 and parse to list\n obj = s3.get_object(Bucket='gdcdata', Key=f.filepath)\n body = obj['Body'].read()\n content = GzipFile(None, 'r', fileobj=BytesIO(body)).read().decode('utf-8')\n content = list(csv.reader(content.split('\\n'), delimiter='\\t'))\n\n # Filter all genes with expression level == 0\n # Truncate the gene id from for eg. `ENSG00000007080.9` to `ENSG00000007080`\n # Convert to list: case_id, gene_id, expr_val\n gene_list = filter(lambda x: x[2] > 0, \\\n map(lambda x: (f.caseid, x[0].split('.')[0], float(x[1])), \\\n filter(lambda x: len(x) > 1, content)))\n\n # Method 1\n # Write the list to temp csv file\n # Which is slow\n header = 'case_id\\tgene_id\\tgene_expr\\n'\n fd, path = mkstemp(suffix='.csv')\n with open(path, 'w', newline='') as tf:\n tf.write(header)\n writer = csv.writer(tf, delimiter='\\t')\n writer.writerows(gene_list)\n query = \"COPY gene_expr_table FROM STDIN DELIMITER '\\t' CSV HEADER\"\n with open(path, 'r') as tf:\n cur.copy_expert(query, tf)\n conn.commit()\n close(fd)\n\n '''\n # Method 2\n # Insert by each row\n # Even slower\n import psycopg2\n from psycopg2 import extras\n query = \"\"\"INSERT INTO gene_expr_table\n VALUES (%s, %s, %s)\"\"\"\n psycopg2.extras.execute_batch(cur, query, gene_list)\n conn.commit()\n '''\n\n except:\n print(\"Unable to retrieve file: gdcdata/%s\" % f.filepath)\n continue\n\n local_connector.close_connection()", "def stream(dbfile, maxsize):\n # Connection to database file\n db = sqlite3.connect(dbfile)\n cur = db.cursor()\n\n # Select tagged sentences without a NLP label. NLP labels are set for\n # non-informative sentences.\n query = Index.SECTION_QUERY + \" AND tags is not null\"\n\n if maxsize > 0:\n query += (\n \" AND article in (SELECT id FROM articles ORDER BY entry DESC LIMIT %d)\"\n % maxsize\n )\n\n # Run the query\n cur.execute(query)\n\n count = 0\n for row in cur:\n # Unpack row\n uid, name, text = row\n\n if not name or not re.search(Index.SECTION_FILTER, name.lower()):\n # Tokenize text\n tokens = Tokenizer.tokenize(text)\n\n document = (uid, tokens, None)\n\n count += 1\n if count % 1000 == 0:\n print(\"Streamed %d documents\" % (count), end=\"\\r\")\n\n # Skip documents with no tokens parsed\n if tokens:\n yield document\n\n print(\"Iterated over %d total rows\" % (count))\n\n # Free database resources\n db.close()", "def parse_table_to_elegant_file(self, filename: str) -> None:\n self.add_drifts()\n\n parse_table_to_elegant_file(self.name, self.table, filename)", "def File_to_DB():\n conn = mysql.connector.connect(\n user='root',\n password='MaximumHaze16',\n host='localhost',\n database='seniordesign'\n )\n cur = conn.cursor()\n fr = open(\"C:\\\\users\\\\sarah\\\\desktop\\\\dbtransfer2\\\\transferFile.txt\", 'r')\n count =0\n for line in fr:\n id = int(line[0:line.find(\"%\")])\n title= line[line.find(\"%\")+1:line.find(\"%%\")]\n author = line[line.find(\"%%\")+2:line.find(\"%%%\")]\n genre = line[line.find(\"%%%\")+3:line.find(\"%%%%\")]\n length = int(line[line.find(\"%%%%\")+4:line.find(\"%%%%%\")])\n cur.execute(\"insert into example values(%s,%s,%s,%s,%s)\",(id,title,author,genre,length))\n\n conn.commit()\n conn.close()\n fr.close()", "def refresh (self):\n\n # The cache directory must exist\n self.check_cache(confdir.cache)\n\n # these might speed-up indexing\n db_oids = []\n open_flags = 'nfu' \n while open_flags:\n try:\n # Issue #4: work on a temporary file to limit collisions\n db = dbm.open(self.__dbFileTmp, open_flags)\n db[\"__version__\"] = Database.version\n db[\"__source_path__\"] = os.path.abspath(self.sourceFile)\n db[\"__source_md5__\"] = md5sum(self.sourceFile)\n except Exception:\n open_flags = open_flags[:-1]\n if not open_flags:\n raise\n else:\n break\n\n text = open(self.sourceFile, 'rb')\n\n logger.debug ( 'Building index %s for data file %s (open flags \\\"%s\\\")', self.__dbFileTmp, self.sourceFile, open_flags );\n\n # Build the \"direct\" indexes to have direct access to values\n nb_direct = nb_next = 0\n lineNo = 0\n while 1:\n\n try:\n oid = line = None\n while not oid:\n line = text.readline()\n lineNo += 1\n if not line: break\n oid, tag, val = self.textParser.grammar.parse(line)\n if not oid: break\n except Exception:\n db.close()\n exc = sys.exc_info()[1]\n try:\n os.remove(self.__dbFileTmp)\n except OSError:\n pass\n raise Exception('Data error at %s:%d: %s' % ( self.sourceFile, lineNo, exc ) )\n\n try:\n _oid = self.textParser.evaluateOid(oid)\n except Exception:\n db.close()\n exc = sys.exc_info()[1]\n try:\n os.remove(self.__dbFileTmp)\n except OSError:\n pass\n raise Exception( 'OID error at %s:%d: %s' % ( self.sourceFile, lineNo, exc ) )\n\n try:\n _tag = self.textParser.evaluateTag(tag)\n except Exception:\n logger.warn ( 'Validation error at line %s, tag %r: %s', lineNo, tag, sys.exc_info()[1] );\n\n try:\n _val = self.textParser.evaluateValue( oid, tag, val, dataValidation=True)\n except Exception:\n logger.warn ( 'Validation error at line %s, value %r: %s', lineNo, val, sys.exc_info()[1] );\n\n # for lines serving subtrees, type is empty in tag field\n db[oid] = '%s,%d,%s,%s' % (oid2str(_oid), tag[0] == ':', _tag, _val)\n db_oids.append ( _oid );\n nb_direct = nb_direct+1\n\n # Build the \"next\" indexes to have direct access to next values\n\n # First we need oids splitted into nodes. We cannot sort them by string\n # comparison: \"1\"<\"10\"<\"2\" and we want 1<2<10\n db_oids.sort()\n for i in range(len(db_oids)-1):\n oid = db_oids[i]\n oid_txt = oid2str(oid)\n # The easy one\n key = \"next.\"+oid_txt\n db[key] = oid2str(db_oids[i+1])\n nb_next = nb_next+1\n # Now the parents: their next is current oid unless they already have one next\n nodes = oid[:-1]\n for n in range(len(nodes)):\n key = \"next.\" + oid2str(nodes[:n+1])\n if not db.has_key(key):\n db[key] = oid_txt\n nb_next = nb_next+1\n # The last one have no next\n key = \"next.\" + oid2str(db_oids[ len(db_oids)-1 ])\n db[key] = \"\"\n nb_next = nb_next+1\n\n text.close()\n db.close()\n logger.debug ( 'Index ok: %d direct entries, %d next entries' % (nb_direct,nb_next) );\n\n # Issue #4: Synchronizes access to the database\n self.__lock.acquire(True);\n try:\n self.close()\n if os.access(self.__dbFile, os.R_OK):\n os.remove(self.__dbFile);\n os.rename(self.__dbFileTmp, self.__dbFile);\n self.__dbType = whichdb(self.__dbFile)\n finally:\n self.__lock.release();", "def write_file(tokens, f):\n for t in tokens:\n f.write(\"%s:\\n\" % t[0])\n for entry in t[1:]:\n f.write(\"\\t%s\\n\" % entry)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return all the recipient email addresses
def recipient_addrs(self): tos = self.msg.get_all('to', []) ccs = self.msg.get_all('cc', []) ccs = self.msg.get_all('bcc', []) resent_tos = self.msg.get_all('resent-to', []) resent_ccs = self.msg.get_all('resent-cc', []) recipient_addrs = email.utils.getaddresses(tos + bccs + ccs + resent_tos + resent_ccs) return [String(a[1]) for a in recipient_addrs]
[ "def email_recipients(self) -> str:\n return self['emailRecipients']", "def get_emails(notification_rec):\n # Use a set instead of list as there could be duplicates.\n ret = []\n\n for recipient in notification_rec.recipients.all():\n ret.append(recipient.email)\n return ret", "def sendall_recipient_addresses() -> List[str]:\n return [to_address(0x1234)]", "async def get_email_recipients(self, email_id):\n async with self.postgres.acquire() as conn:\n async with conn.cursor() as curs:\n await curs.execute('''\n SELECT * FROM mailrecipient\n LEFT JOIN recipient on recipient.id = mailrecipient.recipientid\n WHERE mailrecipient.mailid=%s\n ''',\n (email_id,))\n recipient_records = await curs.fetchall()\n\n recipients = []\n for record in recipient_records:\n recipients.append(record[4])\n\n return recipients", "def get_associated_emails(self):\n email_addresses = set([note.author_email for note in self.get_notes()])\n email_addresses.add(self.author_email)\n return email_addresses", "def contact_email_list(self):\n u2s = self.userstosubjects_set.filter(available_for_contact=True)\n return [ s.user.email for s in u2s ]", "def get_recipients():\n _filter = request.args.get(\"q\", None)\n recipients = list(\n map(\n lambda u: (u.id, u.nickname if u.nickname else u.email),\n filter(\n lambda u: u.id != current_user.get_id(),\n UserModel.search_user_by_keyword(current_user.id, _filter),\n ),\n )\n )\n\n return jsonify(recipients=recipients)", "def get_mail_users(self):\n self.cursor.execute('SELECT address FROM email')\n return self.cursor.fetchall()", "def list_mail_addresses(self):\n self.cursor.execute('SELECT * from email')\n result = self.cursor.fetchall()\n print \"Listing mail addresses...\"\n for address in result:\n print \"Address:\\t\" + address[0]", "def extract_email_addresses(self, data):\n ret = []\n emails = data.get('emails')\n for email in emails:\n ret.append(EmailAddress(email=email['email'],\n verified=email['verified'],\n primary=email['primary']))\n return ret", "def get_emails(self):\n return self.db.all(\"\"\"\n SELECT *\n FROM emails\n WHERE participant_id=%s\n ORDER BY id\n \"\"\", (self.id,))", "def get_recipients(self):\n if self.recipients is not None:\n return self.recipients\n raise NotImplementedError(\"Notifier requires a recipients list\")", "def get_emailaddrs(self):\n mbox = [mailbox.mbox(each) for each in self.path]\n collector = set()\n for each_mbox in mbox:\n for each_record in each_mbox:\n if each_record.has_key('From'):\n collector.update(each_record['From'].split(','))\n if each_record.has_key('To'):\n collector.update(each_record['To'].split(','))\n if each_record.has_key('Cc'):\n collector.update(each_record['Cc'].split(','))\n return collector", "def get_verified_email_addresses(self):\n return [email.address for email in self.get_emails() if email.verified]", "def _transform_recipients(self):\n # Extract recipients\n addrs = email.utils.getaddresses(self._message.get_all(\"TO\", [])) + \\\n email.utils.getaddresses(self._message.get_all(\"CC\", [])) + \\\n email.utils.getaddresses(self._message.get_all(\"BCC\", []))\n self._recipients = [x[1] for x in addrs]\n self._message.__delitem__(\"bcc\")\n self._sender = self._message[\"from\"]", "def getEmailAddresses(self):\n uniqueDiscoveredPages = set(self.discoveredPages)\n if uniqueDiscoveredPages:\n for discoveredPage in uniqueDiscoveredPages:\n if not discoveredPage in self.openedPages:\n self.openUrl(discoveredPage)\n\n # Create a unique list of email addresses found in the domain\n uniqueList = set(self.emails)\n # Output to the screen\n if uniqueList:\n print 'Found these email addresses:'\n for email in uniqueList:\n print email\n else:\n print 'No email addresses found at %s' % self.args_dict['domain']", "def get_email_to_send(self, cr, uid, ids, context=None):\n model_obj = self.pool.get('ir.model.data')\n group = model_obj.get_object(\n cr, uid, 'base', 'group_hr_user'\n )\n mail_list = ''\n for user in group.users:\n mail_list += user.email and user.email + ', ' or ''\n return mail_list", "def get_to_addresses(self):\n addresses = self.get_all_addr_header('Resent-To')\n addresses.extend(self.get_all_addr_header('Resent-Cc'))\n if addresses:\n for address in addresses:\n yield address\n else:\n for key in TO_HEADERS:\n for address in self.get_all_addr_header(key):\n yield address", "def find_emails(self, source):\n\n emails = re.findall(r\"[\\w\\.-]+@\" + self.domain, source, re.I)\n\n if emails == None:\n emails = []\n\n return emails" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds NP (nounphrase) leaf nodes of a chunk tree.
def leaves(tree): for subtree in tree.subtrees(filter = lambda t: t.label()=='NP'): yield subtree.leaves()
[ "def find_non_clustered_leafs(module, task, msg):\n non_clustered_leafs = []\n cli = pn_cli(module)\n cli += ' cluster-show format cluster-node-1,cluster-node-2 '\n cli += ' no-show-headers '\n clustered_nodes = list(set(run_command(module, cli, task, msg).split()))\n\n for leaf in module.params['pn_leaf_list']:\n if leaf not in clustered_nodes:\n non_clustered_leafs.append(leaf)\n\n return non_clustered_leafs", "def traverse_tree(tree, return_flat_tree):\n tokens = []\n if (type(tree) == nltk.tree.Tree) and (tree.label() == NONE_NODE_LABEL):\n return tokens\n for subtree in tree:\n if type(subtree) == nltk.tree.Tree:\n #if subtree.label() == \"NP\":\n if subtree.label().startswith(\"NP\"):\n if tree.label() != NONE_NODE_LABEL:\n # If the subtree does not contain additional NP's, the leaves constitute a 'base NP'\n if not subtree_contains_np(subtree):\n if subtree.leaves() and len(subtree.leaves()) > 0: \n # Filter the base-NP tree (as it may contain -NONE- labels):\n filtered_leaves = filter_base_np(subtree)\n \n if len(filtered_leaves) > 0:\n if return_flat_tree:\n # use extend if a flat tree is desired:\n tokens.extend( [SONP_SYM] + filtered_leaves + [EONP_SYM])\n else:\n # base-NPs will be in a nested list\n tokens.append( [SONP_SYM] + filtered_leaves + [EONP_SYM])\n \n else:\n assert(False)\n \n else:\n # If the subtree contains NPs, continuing traversing in search of the base NP\n child_tokens = traverse_tree(subtree, return_flat_tree)\n if len(child_tokens) > 0:\n tokens.extend(child_tokens)\n else:\n child_tokens = traverse_tree(subtree, return_flat_tree)\n if len(child_tokens) > 0:\n tokens.extend(child_tokens)\n #tokens.append(traverse_tree(subtree))\n else:\n tokens.append(str(subtree))\n return tokens", "def get_grammar_chunks(doc, grammar=\"NP: {<N.*>+}\"):\n tokens = [t for t in tokenize(doc) if t]\n tagged = pos_tag(tokens)\n parser = nltk.RegexpParser(grammar)\n parsed = parser.parse(tagged)\n return [[token for token, annotation in subtree.leaves()] \n for subtree in parsed if hasattr(subtree, \"label\")]", "def findLeafNode(self, treeNodeList):\n leafNodeList = []\n for node in treeNodeList:\n if node.is_leaf:\n leafNodeList.append(node)\n #if node.is_leaf -ends\n #for node ends\n return leafNodeList", "def extract_noun_chunks(text: str, remove_stopwords: bool = True) -> List[str]:\n doc = nlp(text)\n chunks = [chunk.text for chunk in doc.noun_chunks]\n chunks = list(filter(lambda chunk: not nlp.vocab[chunk].is_stop, chunks)) if remove_stopwords else chunks\n\n return list(set(chunks))", "def n_splittable_leaf_nodes(tree: Tree) -> int:\n output = len(tree.splittable_leaf_nodes)\n return output", "def nt_search(seq, subseq): # -> list[str]:\n ...", "def noun_extraction(self, nlp_output):\n found_nouns = []\n root_noun = ''\n\n for key, value in nlp_output.items():\n if value[1] in ['NOUN', 'PROPN'] and value[3] in ['ROOT', 'pnc']:\n root_noun = value[0]\n found_nouns.append(value[0])\n if root_noun in value[4] and value[1] in ['PROPN', 'NOUN', 'X', 'NUM'] and value[3] in ['nk', 'pnc']:\n found_nouns.append(value[0])\n\n nouns = ' '.join(found_nouns)\n return nouns", "def get_np_keyphrases(abstract_sents):\n post_parses = [list(s_parse) for s_parse in list(parser.raw_parse_sents(abstract_sents))]\n keyphrases = []\n for sent_parse in post_parses:\n for tree in sent_parse[0]:\n for subtree in tree.subtrees():\n if filter_noun_phrases(subtree, keyphrases):\n keyphrases.append(subtree.leaves())\n return keyphrases", "def leaves(tree):\n if is_tree(tree):\n return [label(tree)]\n else:\n # List of leaf label for each branch\n return sum()", "def get_phrases(tree):\n phrases = []\n\n def recurse(t):\n try:\n phrases.append(t.label())\n except AttributeError:\n return\n for child in t:\n recurse(child)\n\n recurse(tree)\n return phrases", "def leaf_nodes_to_search_percent(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"leaf_nodes_to_search_percent\")", "def find_partial_trees(\n self, sentence: bytes, start: Optional[Production] = None\n ) -> Iterator[ParseTree[ParseTreeValue]]:\n if start is None:\n start = self.start\n for pm in start.partial_match(sentence): # type: ignore\n yield pm.tree", "def _get_leaf_nodes(self, EC_nr):\n EC_nr_toplvls = [eclvl for eclvl in EC_nr if eclvl != '-']\n depth = len(EC_nr) - len(EC_nr_toplvls) # how may iterations we need until we arrive at the leaf nodes\n\n # get the branch:\n curr_lvl_dict = self.class_to_id_EC\n for eclvl in EC_nr_toplvls:\n curr_lvl_dict = curr_lvl_dict[eclvl]\n file_path_list = []\n # get the filepaths:\n for path in _iter_paths(curr_lvl_dict):\n print(path)\n if path[-1] == 'path':\n item = curr_lvl_dict\n for i in range(len(path)):\n item = item[path[i]]\n file_path_list.append(item)\n return file_path_list", "def get_phyl_leafs(self, node):\n if not node.is_phyl_node:\n raise Exception(\"Node must be phylostratum node!!!\")\n\n leafs = []\n for child in node.children:\n if not child.is_phyl_node:\n leafs.extend(self.get_leafs_subtree(child))\n if node.is_leaf:\n leafs.append(node)\n return leafs", "def leaves(tree):\n if is_leaf(tree):\n return [label(tree)]\n return sum([leaves(b) for b in branches(tree)], [])", "def get_side_phyl_branch_leafs(self):\n phyl_2_leafs = {}\n\n for ph_node in self.phyl_nodes:\n leafs = []\n for child in ph_node.children:\n if not child.is_phyl_node:\n leafs.extend(self.get_leafs_subtree(child))\n if ph_node.is_leaf:\n leafs.append(ph_node)\n phyl_2_leafs[ph_node.id] = leafs\n return phyl_2_leafs", "def rtree_outgroup_labels(tree):\n node = None\n # add an n_leaves_under attribute\n for node in tree.postorder_node_iter():\n e = node.edge\n p = getattr(e, \"tail_node\", None)\n if p:\n p.n_leaves_under = getattr(p, \"n_leaves_under\", 0) + getattr(node, \"n_leaves_under\", 1)\n\n # find the child of the root with the largest number of descendants\n seed_node = tree.seed_node\n ch = seed_node.child_nodes()\n f = ch[0]\n f.in_biggest = False\n biggest_clade, bc_size = f, getattr(f, \"n_leaves_under\", 1)\n for nd in ch[1:]:\n nk = getattr(nd, \"n_leaves_under\", 1)\n if nd > bc_size:\n biggest_clade, bc_size = nd, nk\n nd.in_biggest = False\n # Mark the biggest clade, and accumulate out all unmarked leaf names\n biggest_clade.in_biggest = True\n outgroup_labels = []\n for node in tree.preorder_node_iter():\n par = node.parent_node\n if node == seed_node or par == seed_node:\n continue\n node.in_biggest = par.in_biggest\n if (not node.in_biggest) and (not node.child_nodes()):\n outgroup_labels.append(node.label)\n return outgroup_labels", "def get_all_leaf_nodes(node, leaf_nodes):\n if node.getchildren():\n for child in node:\n get_all_leaf_nodes(child, leaf_nodes)\n else:\n leaf_nodes.append(node)", "def test_build_paragraph_tree(self):\n text = \"This (a) is a good (1) test (2) of (3) some (b) body.\"\n self.assertEqual(\n self.regParser.build_tree(text),\n Node(\"This \", children=[\n Node(\"(a) is a good \", label=['a'], children=[\n Node(\"(1) test \", label=['a', '1']),\n Node(\"(2) of \", label=['a', '2']),\n Node(\"(3) some \", label=['a', '3'])\n ]),\n Node(\"(b) body.\", label=['b'])\n ])\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Normalises words to lowercase and stems and lemmatizes it.
def normalise(word, stemmer, lemmatizer): word = word.lower() #word = stemmer.stem(word) word = lemmatizer.lemmatize(word) return word
[ "def words_normalize(words):\n normalized_words = []\n for word in words:\n wnormalized = word.lower()\n normalized_words.append((wnormalized))\n return normalized_words", "def spacy_normalizer(text, lemma=None):\n if not str(text).isupper() or not str(text).endswith(\"S\") or not len(text.split()) == 1:\n tokens = list(filter(lambda x: len(x) != 0, p.split(text.strip())))\n if lemma:\n lemma = lemma.split(\" \")\n text = \" \".join([stemmer.stem(lem) for lem in lemma])\n else:\n text = \" \".join([stemmer.stem(spacy_lemmatizer(t, \"NOUN\")[0]) for t in tokens])\n return text", "def normalize_ingredient_name(ingredient_name):\n words = ingredient_name.lower().strip().split()\n return ' '.join(LEMMATIZER.lemmatize(w) for w in words)", "def lemmatize_words(sentence):\n lemmatizer = WordNetLemmatizer()\n wordnet_map = {\"N\": wordnet.NOUN, \"V\": wordnet.VERB, \"J\": wordnet.ADJ, \"R\": wordnet.ADV}\n\n pos_tagged_text = nltk.pos_tag(sentence.split())\n\n return \" \".join(\n [\n lemmatizer.lemmatize(word, wordnet_map.get(pos[0], wordnet.NOUN))\n for word, pos in pos_tagged_text\n ]\n )", "def lemmatize_word(word):\n return lemmatizer.lemmatize(word,'v')", "def lemmatize_stemming(self, word_without_punctuation):\n stemmer = SnowballStemmer('english')\n return stemmer.stem(WordNetLemmatizer().lemmatize(word_without_punctuation, pos='v'))", "def lemmatize_words(\n unlem: List[str],\n lemmatizer_name: str,\n pos_tag: Union[str, List[str]] = \"n\",\n verbose: bool = False,\n) -> List[str]:\n if lemmatizer_name == \"nltk\":\n lemmatized = nltk_lemmatize(unlem, pos_tag, verbose)\n elif lemmatizer_name == \"spacy\":\n lemmatized = spacy_lemmatize(unlem, pos_tag, verbose)\n elif lemmatizer_name == \"spacy_old\":\n lemmatized = old_spacy_lemmatize(unlem, verbose)\n elif lemmatizer_name == \"pymorphy-ru\":\n lemmatized = pymorphy_ru_lemmatize(unlem, verbose)\n else:\n raise ValueError(f\"Incorrect lemmatizer type: {lemmatizer_name}\")\n return lemmatized", "def lemmatize_text(self, text):\n stop = stopwords.words('english')\n return [self.lemmatizer.lemmatize(w) for w in self.w_tokenizer.tokenize(text) if w not in stop]", "def lemmatize_stemming(text):\n stemmer = PorterStemmer()\n return stemmer.stem(WordNetLemmatizer().lemmatize(text, pos = 'v'))", "def lemmatize(word):\n global _lemmatizer\n if _lemmatizer is None:\n _lemmatizer = nltk.WordNetLemmatizer()\n return _lemmatizer.lemmatize(word)", "def lemmatize_text(src_tokens: list, dest_tokens: list):\n lem = WordNetLemmatizer()\n for w in src_tokens:\n # Lemmatize the alphabetical items\n if w.isalpha():\n w = lem.lemmatize(w)\n dest_tokens.append(w)\n # Retain non-alphabetical items\n else:\n dest_tokens.append(w)", "def normalize(text, lowercase=True, collapse=True, decompose=True,\n replace_categories=CATEGORY_DEFAULTS, lemma=False, pos_tag=\"n\"):\n if not isinstance(text, six.string_types):\n return\n\n # Python 3?\n if six.PY2 and not isinstance(text, six.text_type):\n text = text.decode('utf-8')\n\n if lowercase:\n # Yeah I made a Python package for this.\n text = text.lower()\n\n # if transliterate:\n # # Perform unicode-based transliteration, e.g. of cyricllic\n # # or CJK scripts into latin.\n # text = unidecode(text)\n # if six.PY2:\n # text = unicode(text)\n\n if decompose:\n # Apply a canonical unicoe normalization form, e.g.\n # transform all composite characters with diacritics\n # into a series of characters followed by their\n # diacritics as separate unicode codepoints.\n text = unicodedata.normalize('NFKD', text)\n\n # Perform unicode category-based character replacement. This is\n # used to filter out whole classes of characters, such as symbols,\n # punctuation, or whitespace-like characters.\n characters = []\n for character in text:\n category = unicodedata.category(character)[0]\n character = replace_categories.get(category, character)\n characters.append(character)\n text = u''.join(characters)\n # print(text)\n if collapse:\n # Remove consecutive whitespace.\n text = COLLAPSE.sub(WS, text).strip(WS)\n\n if lemma:\n text = lemmatize(text, pos=get_wordnet_pos(pos_tag))\n\n return text", "def normalize(text):\n\n return white_space_fix(remove_articles(remove_punc(lower(text))))", "def _stem_words(stemmer, words):\n return [stemmer.stem(word.lower()) for word in words]", "def lemmatize(input):\n lemmatizer=WordNetLemmatizer()\n input_str=word_tokenize(input)\n new_words = []\n for word in input_str:\n new_words.append(lemmatizer.lemmatize(word))\n return ' '.join(new_words)", "def luanerize_word(word):\n\n import nltk # pylint: disable=import-outside-toplevel\n\n wnl = nltk.stem.WordNetLemmatizer()\n lemma = wnl.lemmatize(word.strip(string.punctuation))\n return word.replace(lemma, LUAN)", "def to_lemmas(text):\n # TODO: Option to leave hypens in tact?\n # TODO: Some of this stuff should be optional.\n # TODO: Change name to lemmatize.\n # TODO: This should share code with tokenize.\n nlp_model = get_minimal_model()\n lemmas = []\n text = text.lower()\n for token in nlp_model(text):\n if (token.is_ascii \n and not token.is_punct \n and not token.is_stop \n and not token.is_digit\n and token.text.strip() \n and len(token.text) < 15):\n lemmas.append(token.lemma_.strip())\n return ' '.join(lemmas)", "def lemmatize_sentence(sentence):\n\n sentence = [(stem.WordNetLemmatizer().lemmatize(word, pos_tag), pos_tag) if pos_tag else (word, pos_tag)\n for word, pos_tag in sentence]\n\n return sentence", "def clean_text(text, lemma, en_stop = [], exclude_sent = [], minwords = 2,\r\n lemmatize = True):\r\n \r\n preprocessed_text = None\r\n \r\n text = str(text) #Some text is just numbers or empty\r\n text = text.lower() #lowercases every word \r\n text = re.sub('[%s]'% re.escape(string.punctuation),\"\",text) #removes punctuation\r\n text = re.sub('\\w*\\d\\w','', text) #removes digits\r\n tokens = text.split()\r\n tokens = [word for word in tokens if word not in en_stop]\r\n \r\n if lemma:\r\n tokens = [lemma.lemmatize(word) for word in tokens]\r\n\r\n if len(tokens) >= minwords and text not in exclude_sent: \r\n preprocessed_text = ' '.join(tokens)\r\n \r\n return preprocessed_text" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a trained BDT
def _train_bdt(): target, original = _generate() # Train a BDT to reweight print("train bdt") bdt = hep_ml.reweight.GBReweighter() bdt.fit(original=original, target=target) return bdt
[ "def getTrainingData(self):", "def load_bert_model(output_dir,\n bert_config_file='./model/uncased_L-12_H-768_A-12/bert_config.json',\n init_checkpoint='./tuned_model/model.ckpt-2461',\n num_labels=2,\n attn_processor_fn=None):\n bert_config = modeling.BertConfig.from_json_file(bert_config_file)\n tf.logging.info('Setting output dir to {} ...'.format(output_dir))\n # I don't expect to be running this model on a TPU so whatever...\n is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2\n run_config = tf.contrib.tpu.RunConfig(\n cluster=None,\n master=None,\n model_dir=output_dir,\n save_checkpoints_steps=1000,\n tpu_config=tf.contrib.tpu.TPUConfig(\n iterations_per_loop=1000,\n num_shards=8,\n per_host_input_for_training=is_per_host))\n\n model_fn = model_fn_builder(\n bert_config=bert_config,\n num_labels=num_labels,\n init_checkpoint=init_checkpoint,\n learning_rate=1e-3,\n num_train_steps=2,\n num_warmup_steps=1,\n use_tpu=False,\n use_one_hot_embeddings=False)\n\n estimator = tf.contrib.tpu.TPUEstimator(\n use_tpu=False,\n model_fn=model_fn,\n config=run_config,\n train_batch_size=32,\n eval_batch_size=8,\n predict_batch_size=8,\n export_to_tpu=False,\n params={'attn_processor_fn': attn_processor_fn})\n\n return estimator", "def load_generation_data():\n input_path = CODE_CORPUS / \"input\"\n # Create a field variable for each field that will be in our TSV file\n code_field = data.Field(sequential=True, tokenize=lambda s: s.split(\" \"),\n include_lengths=True, use_vocab=True)\n\n comm_field = data.Field(sequential=True, tokenize=lambda s: s.split(\" \"),\n include_lengths=True, use_vocab=True)\n\n # Used to create a tabular dataset from TSV\n train_val_fields = [(\"code\", code_field), (\"comm\", comm_field)]\n\n # Build the large tabular dataset using the defined fields\n tsv_file_path = input_path / \"generation_dataset.tsv\"\n tab_data = data.TabularDataset(str(tsv_file_path), \"TSV\", train_val_fields)\n\n # Split the large dataset into TRAIN, DEV, TEST portions\n train_data, dev_data, test_data = tab_data.split(split_ratio=[0.85, 0.05, 0.1])\n\n # Load the pretrained word embedding vectors\n code_vec_path = input_path / \"code-vectors.txt\"\n comm_vec_path = input_path / \"comm-vectors.txt\"\n code_vectors = vocab.Vectors(str(code_vec_path), str(input_path))\n comm_vectors = vocab.Vectors(str(comm_vec_path), str(input_path))\n\n # Builds the known word vocab for code and comments from the pretrained vectors\n code_field.build_vocab(train_data, dev_data, test_data, vectors=code_vectors)\n comm_field.build_vocab(train_data, dev_data, test_data, vectors=comm_vectors)\n\n # We need to return the test sets and the field pretrained vectors\n return (train_data, dev_data, test_data,\n code_field.vocab, comm_field.vocab)", "def train_decision_tree():\n return train_decision_tree_service()", "def _produce_train_dataset(self):\r\n pass", "def learn_test_data():\n return LearnTestData()", "def load(self):\n model_file, _ = self.get_model('.pt')\n device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')\n model = BertSentimentClassifier(n_classes=3)\n model.load_state_dict(torch.load(model_file, map_location=device))\n model.eval()\n self.model = model", "def get_checkpoint_data(self):\n pass", "def _load_turicreate_model(self, path):\n return tc.load_model(path)", "def load_trained_DQN(self, path):\r\n\r\n trained_file = pickle.load(open(path, 'rb'))\r\n model = trained_file['model']\r\n print \"Trained DQN Parameters:\", json.dumps(trained_file['params'], indent=2)\r\n return model", "def load_trained_DQN(self, path):\r\n\r\n trained_file = pickle.load(open(path, 'rb'))\r\n model = trained_file['model']\r\n\r\n print(\"trained DQN Parameters:\", json.dumps(trained_file['params'], indent=2))\r\n return model", "def get_train_data():\n df = pd.read_pickle(\"data_train.pkl\")\n data = {'post': list(df)[0], 'class': list(df)[1]}\n return pd.DataFrame(data)", "def load_breeze(path, start, end):\n breeze_df = pd.read_pickle(path)\n breeze_df = breeze_df[(breeze_df['Transaction_dtm'] >= start) & (breeze_df['Transaction_dtm'] <= end)]\n return breeze_df", "def blop() -> pd.DataFrame:\n return openinsider_model.get_print_insider_data(\"blop\")", "def test_bthe_b():\n test_path = tempfile.mkdtemp()\n x_train, metadata = bthe_b(test_path)\n try:\n assert x_train.shape == (100, 8)\n except:\n shutil.rmtree(test_path)\n raise()", "def build_eval_dataset(self):\n pass", "def fetch_pretrained_model(ckpt_pth):\n return torch.load(ckpt_pth)", "def build_budget():\n # pylint: disable=eval-used\n # TODO: figure out a better solution than eval\n if 'current_budget' in session:\n current_budget = session['current_budget']\n\n return BudgetSimulator(eval(current_budget),\n start_balance=start_balance())", "def start_tpot(automated_run, session, path):\n module = functions.import_string_code_as_module(automated_run.source)\n extraction = session.query(models.Extraction).first()\n X, y = extraction.return_train_dataset()\n\n tpot_learner = module.tpot_learner\n\n tpot_learner.fit(X, y)\n\n temp_filename = os.path.join(path, 'tpot-temp-export-{}'.format(os.getpid()))\n tpot_learner.export(temp_filename)\n\n with open(temp_filename) as f:\n base_learner_source = f.read()\n\n base_learner_source = constants.tpot_learner_docstring + base_learner_source\n\n try:\n os.remove(temp_filename)\n except OSError:\n pass\n\n blo = models.BaseLearnerOrigin(\n source=base_learner_source,\n name='TPOT Learner',\n meta_feature_generator='predict'\n )\n\n session.add(blo)\n session.commit()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the empirical fisher informatiom matrix of some distribution. For some distribution p, some M different ddimensional vector of distribution parameters theta, this function calculates the empirical fisher information matrix over k datapoints (x_i, y_i) using the pdf value at theta p(x_i, y_i; theta) and it's derivate d/dtheta p(x_i, y_i; theta).
def empirical_fisher(p, dp): M = p.shape[0] res = [] for m in range(M): res.append(empirical_fisher_(p[m], dp[m])) return np.array(res)
[ "def compute_fisher_info(p, eta):\n global p_map, eta_map\n\n # Stack columns of p for next step\n p_stack = numpy.repeat(p, eta.size).reshape(p.size, eta.size)\n # Compute Fisher matrix\n fisher = numpy.dot(eta_map, p_stack * p_map) - numpy.outer(eta, eta)\n\n return fisher", "def fisher_information_matrix(self, params):\n n_params = len(np.atleast_1d(params))\n fisher = np.zeros(shape=(n_params, n_params))\n\n if not hasattr(self.mean, 'gradient'):\n _grad = lambda mean, argnum, params: jacobian(mean, argnum=argnum)(*params)\n else:\n _grad = lambda mean, argnum, params: mean.gradient(*params)[argnum]\n\n grad_mean = [_grad(self.mean, i, params) for i in range(n_params)]\n\n for i in range(n_params):\n for j in range(i, n_params):\n fisher[i, j] = np.nansum(grad_mean[i] * grad_mean[j])\n fisher[j, i] = fisher[i, j]\n\n return fisher / self.var", "def empirical_fisher_(p, dp):\n k = p.size\n\n try:\n d = dp.shape[1]\n except IndexError:\n sum = 0\n for i in range(k):\n sum += 1 / p[i] ** 2 * dp[i] ** 2\n return np.array([sum / k])\n\n sum = np.zeros((d, d))\n for i in range(k):\n sum += 1 / p[i] ** 2 * np.outer(dp[i], dp[i])\n return sum / k", "def fisher_information_matrix(self, params):\n pass", "def _compute_fisher_information(result: AmplitudeEstimationResult, observed: bool = False) -> float:\n fisher_information = None\n mlv = result.mle # MLE in [0,1]\n m = result.num_evaluation_qubits\n M = 2**m # pylint: disable=invalid-name\n\n if observed:\n a_i = np.asarray(list(result.samples.keys()))\n p_i = np.asarray(list(result.samples.values()))\n\n # Calculate the observed Fisher information\n fisher_information = sum(p * derivative_log_pdf_a(a, mlv, m) ** 2 for p, a in zip(p_i, a_i))\n else:\n\n def integrand(x):\n return (derivative_log_pdf_a(x, mlv, m)) ** 2 * pdf_a(x, mlv, m)\n\n grid = np.sin(np.pi * np.arange(M / 2 + 1) / M) ** 2\n fisher_information = sum(integrand(x) for x in grid)\n\n return fisher_information", "def fisherMatrix(self):\n FisherM = {}\n for i in range(self.num_params):\n for j in range(self.num_params):\n param_i = self.param_names[i]\n param_j = self.param_names[j]\n FisherM[param_i, param_j] = (\n self.fisher_matrix_images[param_i, param_j].sum())\n return FisherM", "def fisherMatrixImages(self):\n FisherM_images = {}\n for i in range(self.num_params):\n for j in range(self.num_params):\n param_i = self.param_names[i]\n param_j = self.param_names[j]\n derivative1 = self.derivatives_images[param_i]\n derivative2 = self.derivatives_images[param_j]\n FisherM_images[param_i, param_j] = (\n derivative1 * derivative2 / self.var_noise)\n return FisherM_images", "def fisher_information_matrix(g, Nij,estimate=False):\n #TODO change this to also take an estimate flag\n n_nodes = len(g.nodes)\n F = np.zeros([n_nodes, n_nodes])\n\n if estimate:\n s_ij = form_edge_matrix(g, 'statistical_fluctuation_est', action='symmetrize')\n else:\n s_ij = form_edge_matrix(g, 'statistical_fluctuation', action='symmetrize')\n \n for (i,j) in g.edges:\n F[i,j] = -Nij[i,j] / s_ij[i,j]**2\n F[j,i] = F[i,j] # make symmetric\n for i in g.nodes:\n F[i,i] = -np.sum(F[i,:])\n \n return F", "def normalised_fisher(p, dp):\n M = p.shape[0]\n d = dp.shape[2]\n\n all_fishers = empirical_fisher(p, dp)\n fisher_trace = np.trace(np.sum(all_fishers, axis=0))\n normalisation = d * M / fisher_trace\n return normalisation * all_fishers", "def effective_dimension_(p, dp, gamma):\n M = p.shape[0]\n k = p.shape[1]\n d = dp.shape[2]\n\n f_hat = normalised_fisher(p, dp)\n mat = np.eye(d) + gamma * k / (2 * np.pi * np.log(k)) * f_hat\n\n # log(sqrt(det)) == log(det) / 2\n rootdet = np.linalg.slogdet(mat)[1] / 2 # slogdet is more stable than det\n add = np.log(np.sum(np.exp(rootdet))) - np.log(\n M\n ) # normalized sum over parameter space\n result = 2 * add / np.log(gamma * k / (2 * np.pi * np.log(k)))\n return result", "def fisher_vector(xx, gmm):\n xx = np.atleast_2d(xx)\n N = xx.shape[0]\n\n # Compute posterior probabilities.\n Q = gmm.predict_proba(xx) # NxK\n\n # Compute the sufficient statistics of descriptors.\n Q_sum = np.sum(Q, 0)[:, np.newaxis] / N\n Q_xx = np.dot(Q.T, xx) / N\n Q_xx_2 = np.dot(Q.T, xx ** 2) / N\n\n # Compute derivatives with respect to mixing weights, means and variances.\n d_pi = Q_sum.squeeze() - gmm.weights_\n d_mu = Q_xx - Q_sum * gmm.means_\n d_sigma = (\n - Q_xx_2\n - Q_sum * gmm.means_ ** 2\n #+ Q_sum * gmm.covars_\n + Q_sum * gmm.covariances_\n + 2 * Q_xx * gmm.means_)\n\n # Merge derivatives into a vector.\n return np.hstack((d_pi, d_mu.flatten(), d_sigma.flatten()))", "def expiH(H):\n\n # Diagonalise the matrices:\n evals, evecs = eigh(H)\n\n # Now we compute exp(i*H) = Q exp(i*D) Q^\\dagger where Q is the matrix of\n # eigenvectors (as columns) and D is the diagonal matrix of eigenvalues:\n\n Q = evecs\n Q_dagger = Q.conj().swapaxes(-1, -2) # Only transpose the matrix dimensions\n exp_iD_diags = np.exp(1j*evals)\n\n # Compute the 3-term matrix product Q*exp_iD_diags*Q_dagger using the\n # einsum function in order to specify which array axes of each array to\n # sum over:\n return np.einsum('...ik,...k,...kj->...ij', Q, exp_iD_diags, Q_dagger)", "def ifft_matrix(X_k):\n NN = X_k.shape[0] # length of transform\n nn = np.arange(NN,dtype='float') # initial array of length x\n kk = np.reshape(nn,(NN,1)) # transposed array of length x\n MM = np.exp(-2j * np.pi * nn * kk / NN) #transform matrix <3\n return np.dot(X_k,MM)/NN #dot prodtuct/soltion", "def katz_expansions(k0,p,ellp,mdash,n):\n S = Zmod(p**mdash)\n\n Ep1 = eisenstein_series_qexp(p-1, ellp, K=S, normalization=\"constant\")\n E4 = eisenstein_series_qexp(4, ellp, K=S, normalization=\"constant\")\n E6 = eisenstein_series_qexp(6, ellp, K=S, normalization=\"constant\")\n\n delta = delta_qexp(ellp, K=S)\n h = delta / E6**2\n hj = delta.parent()(1)\n e = []\n\n # We compute negative powers of E_(p-1) successively (this saves a great\n # deal of time). The effect is that Ep1mi = Ep1 ** (-i).\n Ep1m1 = ~Ep1\n Ep1mi = 1\n for i in range(0,n+1):\n Wi,hj = compute_Wi(k0 + i*(p-1),p,h,hj,E4,E6)\n for bis in Wi:\n eis = p**floor(i/(p+1)) * Ep1mi * bis\n e.append(eis)\n Ep1mi = Ep1mi * Ep1m1\n\n return e,Ep1", "def fisher_like(expt_name, zc, dh, dm, verbose=False):\n # Get pre-loaded Fisher data for this experiment\n expt_zc, expt_mean, expt_icov = expt_data[expt_name]\n \n # Convert distances in this model into H and D_A\n # Units: expects H ~ 100 km/s/Mpc, D_A ~ Gpc\n Hz = [model.C / dh[np.where(zc==_z)][0] / 1e2 for _z in expt_zc]\n DAz = [dm[np.where(zc==_z)][0] / (1.+_z) / 1e3 for _z in expt_zc]\n mean = np.concatenate((Hz, DAz))\n x = mean - expt_mean\n \n logL = -0.5 * np.dot(x, np.dot(expt_icov, x))\n if verbose: print(\"\\t%16s: %3.3f\" % (\"%s Fisher\" % expt_name, logL))\n return logL", "def test_Fisher_estimation(self):\n fe = glm.poisson_estimation(algo='Fisher', data=self._data, explanatories=[1], response=0)\n self.assertAlmostEqual(fe.estimated.predictor.alpha, -3.305, places=3)\n self.assertAlmostEqual(fe.estimated.predictor.delta[0], 0.164, places=3)", "def pdf_entropy(density_df, input_density_df,\n pseudocount, input_pseudocount,\n min_density_threshold=0):\n\n df_indices = density_df.index\n dfi_indices = input_density_df.index\n missing = set(df_indices) - set(dfi_indices)\n\n input_density_df = input_density_df.append(input_density_df.ix[missing])\n\n pdf = calculate_pdf(density_df, pseudocount, min_density_threshold)\n input_pdf = calculate_pdf(\n input_density_df, input_pseudocount, min_density_threshold\n )\n\n en = pdf.multiply(np.log2(pdf.div(input_pdf)))\n return en", "def calculate_fisher_information(\n self,\n x,\n theta=None,\n obs_weights=None,\n estimator_weights=None,\n n_events=1,\n mode=\"score\",\n calculate_covariance=True,\n sum_events=True,\n epsilon_shift=0.001,\n ):\n\n logger.debug(\"Evaluating Fisher information for %s estimators in ensemble\", self.n_estimators)\n\n # Check ensemble\n if self.estimator_type not in [\"score\", \"parameterized_ratio\"]:\n raise NotImplementedError(\n \"Fisher information calculation is only implemented for local score estimators \"\n \"(ScoreEstimator instances) and parameterized ratio estimators (parameterized_ratio instances).\"\n )\n\n # Check input\n if mode not in [\"score\", \"information\", \"modified_score\"]:\n raise ValueError(f\"Unknown mode {mode}!\")\n\n # Calculate estimator_weights of each estimator in vote\n if estimator_weights is None:\n estimator_weights = np.ones(self.n_estimators)\n\n assert len(estimator_weights) == self.n_estimators\n estimator_weights /= np.sum(estimator_weights)\n logger.debug(\"Estimator weights: %s\", estimator_weights)\n\n covariance = None\n\n # \"information\" mode\n if mode == \"information\":\n # Calculate estimator predictions\n predictions = []\n for i, estimator in enumerate(self.estimators, start=1):\n logger.debug(\"Starting evaluation for estimator %s / %s in ensemble\", i, self.n_estimators)\n predictions.append(\n estimator.calculate_fisher_information(x=x, theta=theta, weights=obs_weights, n_events=n_events)\n )\n predictions = np.array(predictions)\n\n # Calculate weighted mean and covariance\n information = np.average(predictions, axis=0, weights=estimator_weights)\n\n predictions_flat = predictions.reshape((predictions.shape[0], -1))\n\n if calculate_covariance:\n covariance = np.cov(predictions_flat.T, aweights=estimator_weights)\n covariance_shape = (\n predictions.shape[1],\n predictions.shape[2],\n predictions.shape[1],\n predictions.shape[2],\n )\n covariance = covariance.reshape(covariance_shape)\n\n # \"modified_score\" mode:\n elif mode == \"modified_score\":\n # Load training data\n if isinstance(x, str):\n x = load_and_check(x)\n n_samples = x.shape[0]\n\n # Calculate score predictions\n score_predictions = []\n for i, estimator in enumerate(self.estimators, start=1):\n logger.debug(\"Starting evaluation for estimator %s / %s in ensemble\", i, self.n_estimators)\n score_predictions.append(estimator.evaluate_score(x=x, theta=np.array([theta for _ in x])))\n logger.debug(\"Estimator %s predicts t(x) = %s for first event\", i, score_predictions[-1][0, :])\n\n score_predictions = np.array(score_predictions) # (n_estimators, n_events, n_parameters)\n\n # Get ensemble mean and ensemble covariance\n score_mean = np.mean(score_predictions, axis=0) # (n_events, n_parameters)\n\n # For uncertainty calculation: calculate points between mean and original predictions with same mean and\n # variance / n compared to the original predictions\n score_shifted_predictions = (score_predictions - score_mean[np.newaxis, :, :]) / self.n_estimators**0.5\n score_shifted_predictions = score_mean[np.newaxis, :, :] + score_shifted_predictions\n\n # Event weights\n if obs_weights is None:\n obs_weights = np.ones(n_samples)\n obs_weights /= np.sum(obs_weights)\n\n # Fisher information prediction (based on mean scores)\n if sum_events:\n information = float(n_events) * np.sum(\n obs_weights[:, np.newaxis, np.newaxis]\n * score_mean[:, :, np.newaxis]\n * score_mean[:, np.newaxis, :],\n axis=0,\n )\n else:\n information = (\n float(n_events)\n * obs_weights[:, np.newaxis, np.newaxis]\n * score_mean[:, :, np.newaxis]\n * score_mean[:, np.newaxis, :]\n )\n\n if calculate_covariance:\n # Fisher information predictions based on shifted scores\n informations_individual = float(n_events) * np.sum(\n obs_weights[np.newaxis, :, np.newaxis, np.newaxis]\n * score_shifted_predictions[:, :, :, np.newaxis]\n * score_shifted_predictions[:, :, np.newaxis, :],\n axis=1,\n ) # (n_estimators, n_parameters, n_parameters)\n\n n_params = score_mean.shape[1]\n informations_individual = informations_individual.reshape(-1, n_params**2)\n covariance = np.cov(informations_individual.T)\n covariance = covariance.reshape(n_params, n_params, n_params, n_params)\n\n # Let's check the expected score\n expected_score = [np.einsum(\"n,ni->i\", obs_weights, score_mean)]\n logger.debug(\"Expected per-event score (should be close to zero):\\n%s\", expected_score)\n\n # \"score\" mode:\n elif mode == \"score\":\n # Load training data\n if isinstance(x, str):\n x = load_and_check(x)\n n_samples = x.shape[0]\n\n # Calculate score predictions\n score_predictions = []\n for i, estimator in enumerate(self.estimators, start=1):\n logger.debug(\"Starting evaluation for estimator %s / %s in ensemble\", i, self.n_estimators)\n score_predictions.append(estimator.evaluate_score(x=x, theta=np.array([theta for _ in x])))\n logger.debug(\"Estimator %s predicts t(x) = %s for first event\", i, score_predictions[-1][0, :])\n\n score_predictions = np.array(score_predictions) # (n_estimators, n_events, n_parameters)\n\n # Get ensemble mean and ensemble covariance\n score_mean = np.mean(score_predictions, axis=0) # (n_events, n_parameters)\n\n # For uncertainty calculation: calculate points between mean and original predictions with same mean and\n # variance / n compared to the original predictions\n score_shifted_predictions = epsilon_shift * (score_predictions - score_mean[np.newaxis, :, :])\n score_shifted_predictions = score_mean[np.newaxis, :, :] + score_shifted_predictions\n\n # Event weights\n if obs_weights is None:\n obs_weights = np.ones(n_samples)\n obs_weights /= np.sum(obs_weights)\n\n # Fisher information prediction (based on mean scores)\n if sum_events:\n information = float(n_events) * np.sum(\n obs_weights[:, np.newaxis, np.newaxis]\n * score_mean[:, :, np.newaxis]\n * score_mean[:, np.newaxis, :],\n axis=0,\n )\n else:\n information = (\n float(n_events)\n * obs_weights[:, np.newaxis, np.newaxis]\n * score_mean[:, :, np.newaxis]\n * score_mean[:, np.newaxis, :]\n )\n\n if calculate_covariance:\n # Fisher information predictions based on shifted scores\n informations_individual = float(n_events) * np.sum(\n obs_weights[np.newaxis, :, np.newaxis, np.newaxis]\n * score_shifted_predictions[:, :, :, np.newaxis]\n * score_shifted_predictions[:, :, np.newaxis, :],\n axis=1,\n ) # (n_estimators, n_parameters, n_parameters)\n\n n_params = score_mean.shape[1]\n informations_individual = informations_individual.reshape(-1, n_params**2)\n covariance = np.cov(informations_individual.T)\n covariance /= epsilon_shift**2\n covariance = covariance.reshape(n_params, n_params, n_params, n_params)\n\n # Let's check the expected score\n expected_score = [np.einsum(\"n,ni->i\", obs_weights, score_mean)]\n logger.debug(\"Expected per-event score (should be close to zero):\\n%s\", expected_score)\n\n else:\n raise RuntimeError(\"Unknown mode %s, has to be 'information', 'score', or 'modified_score'.\")\n\n return information, covariance", "def calculate_fisher_information(\n self,\n x,\n theta=None,\n obs_weights=None,\n estimator_weights=None,\n n_events=1,\n mode=\"score\",\n calculate_covariance=True,\n sum_events=True,\n epsilon_shift=0.001,\n ):\n logger.debug(\"Evaluating Fisher information for %s estimators in ensemble\", self.n_estimators)\n\n # Check ensemble\n if self.estimator_type not in [\"score\", \"parameterized_ratio\"]:\n raise NotImplementedError(\n \"Fisher information calculation is only implemented for local score estimators \"\n \"(ScoreEstimator instances) and parameterized ratio estimators (parameterized_ratio instances).\"\n )\n\n # Check input\n if mode not in [\"score\", \"information\"]:\n raise ValueError(\"Unknown mode {}, has to be 'score' or 'information'!\".format(mode))\n\n # Calculate estimator_weights of each estimator in vote\n if estimator_weights is None:\n estimator_weights = np.ones(self.n_estimators)\n assert len(estimator_weights) == self.n_estimators\n estimator_weights /= np.sum(estimator_weights)\n logger.debug(\"Estimator weights: %s\", estimator_weights)\n\n covariance = None\n\n # \"information\" mode\n if mode == \"information\":\n # Calculate estimator predictions\n predictions = []\n for i, estimator in enumerate(self.estimators):\n logger.debug(\"Starting evaluation for estimator %s / %s in ensemble\", i + 1, self.n_estimators)\n\n predictions.append(\n estimator.calculate_fisher_information(x=x, theta=theta, weights=obs_weights, n_events=n_events)\n )\n predictions = np.array(predictions)\n\n # Calculate weighted mean and covariance\n information = np.average(predictions, axis=0, weights=estimator_weights)\n\n predictions_flat = predictions.reshape((predictions.shape[0], -1))\n\n if calculate_covariance:\n covariance = np.cov(predictions_flat.T, aweights=estimator_weights)\n covariance_shape = (\n predictions.shape[1],\n predictions.shape[2],\n predictions.shape[1],\n predictions.shape[2],\n )\n covariance = covariance.reshape(covariance_shape)\n\n # \"modified_score\" mode:\n elif mode == \"modified_score\":\n # Load training data\n if isinstance(x, six.string_types):\n x = load_and_check(x)\n n_samples = x.shape[0]\n\n # Calculate score predictions\n score_predictions = []\n for i, estimator in enumerate(self.estimators):\n logger.debug(\"Starting evaluation for estimator %s / %s in ensemble\", i + 1, self.n_estimators)\n\n score_predictions.append(estimator.evaluate_score(x=x, theta=np.array([theta for _ in x])))\n logger.debug(\"Estimator %s predicts t(x) = %s for first event\", i + 1, score_predictions[-1][0, :])\n score_predictions = np.array(score_predictions) # (n_estimators, n_events, n_parameters)\n\n # Get ensemble mean and ensemble covariance\n score_mean = np.mean(score_predictions, axis=0) # (n_events, n_parameters)\n\n # For uncertainty calculation: calculate points betweeen mean and original predictions with same mean and\n # variance / n compared to the original predictions\n score_shifted_predictions = (score_predictions - score_mean[np.newaxis, :, :]) / self.n_estimators ** 0.5\n score_shifted_predictions = score_mean[np.newaxis, :, :] + score_shifted_predictions\n\n # Event weights\n if obs_weights is None:\n obs_weights = np.ones(n_samples)\n obs_weights /= np.sum(obs_weights)\n\n # Fisher information prediction (based on mean scores)\n if sum_events:\n information = float(n_events) * np.sum(\n obs_weights[:, np.newaxis, np.newaxis]\n * score_mean[:, :, np.newaxis]\n * score_mean[:, np.newaxis, :],\n axis=0,\n )\n else:\n information = (\n float(n_events)\n * obs_weights[:, np.newaxis, np.newaxis]\n * score_mean[:, :, np.newaxis]\n * score_mean[:, np.newaxis, :]\n )\n\n if calculate_covariance:\n # Fisher information predictions based on shifted scores\n informations_individual = float(n_events) * np.sum(\n obs_weights[np.newaxis, :, np.newaxis, np.newaxis]\n * score_shifted_predictions[:, :, :, np.newaxis]\n * score_shifted_predictions[:, :, np.newaxis, :],\n axis=1,\n ) # (n_estimators, n_parameters, n_parameters)\n\n n_params = score_mean.shape[1]\n informations_individual = informations_individual.reshape(-1, n_params ** 2)\n covariance = np.cov(informations_individual.T)\n covariance = covariance.reshape(n_params, n_params, n_params, n_params)\n\n # Let's check the expected score\n expected_score = [np.einsum(\"n,ni->i\", obs_weights, score_mean)]\n logger.debug(\"Expected per-event score (should be close to zero):\\n%s\", expected_score)\n\n # \"score\" mode:\n elif mode == \"score\":\n # Load training data\n if isinstance(x, six.string_types):\n x = load_and_check(x)\n n_samples = x.shape[0]\n\n # Calculate score predictions\n score_predictions = []\n for i, estimator in enumerate(self.estimators):\n logger.debug(\"Starting evaluation for estimator %s / %s in ensemble\", i + 1, self.n_estimators)\n\n score_predictions.append(estimator.evaluate_score(x=x, theta=np.array([theta for _ in x])))\n logger.debug(\"Estimator %s predicts t(x) = %s for first event\", i + 1, score_predictions[-1][0, :])\n score_predictions = np.array(score_predictions) # (n_estimators, n_events, n_parameters)\n\n # Get ensemble mean and ensemble covariance\n score_mean = np.mean(score_predictions, axis=0) # (n_events, n_parameters)\n\n # For uncertainty calculation: calculate points betweeen mean and original predictions with same mean and\n # variance / n compared to the original predictions\n score_shifted_predictions = epsilon_shift * (score_predictions - score_mean[np.newaxis, :, :])\n score_shifted_predictions = score_mean[np.newaxis, :, :] + score_shifted_predictions\n\n # Event weights\n if obs_weights is None:\n obs_weights = np.ones(n_samples)\n obs_weights /= np.sum(obs_weights)\n\n # Fisher information prediction (based on mean scores)\n if sum_events:\n information = float(n_events) * np.sum(\n obs_weights[:, np.newaxis, np.newaxis]\n * score_mean[:, :, np.newaxis]\n * score_mean[:, np.newaxis, :],\n axis=0,\n )\n else:\n information = (\n float(n_events)\n * obs_weights[:, np.newaxis, np.newaxis]\n * score_mean[:, :, np.newaxis]\n * score_mean[:, np.newaxis, :]\n )\n\n if calculate_covariance:\n # Fisher information predictions based on shifted scores\n informations_individual = float(n_events) * np.sum(\n obs_weights[np.newaxis, :, np.newaxis, np.newaxis]\n * score_shifted_predictions[:, :, :, np.newaxis]\n * score_shifted_predictions[:, :, np.newaxis, :],\n axis=1,\n ) # (n_estimators, n_parameters, n_parameters)\n\n n_params = score_mean.shape[1]\n informations_individual = informations_individual.reshape(-1, n_params ** 2)\n covariance = np.cov(informations_individual.T)\n covariance /= epsilon_shift ** 2\n covariance = covariance.reshape(n_params, n_params, n_params, n_params)\n\n # Let's check the expected score\n expected_score = [np.einsum(\"n,ni->i\", obs_weights, score_mean)]\n logger.debug(\"Expected per-event score (should be close to zero):\\n%s\", expected_score)\n\n else:\n raise RuntimeError(\"Unknown mode %s, has to be 'information', 'score', or 'modified_score'.\")\n\n return information, covariance" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the empirical fisher informatiom matrix of some distribution. For some distribution p, some ddimensional vector of distribution parameters theta, this function calculates the empirical fisher information matrix over k datapoints (x_i, y_i) using the pdf value at theta p(x_i, y_i; theta) and it's derivate d/dtheta p(x_i, y_i; theta).
def empirical_fisher_(p, dp): k = p.size try: d = dp.shape[1] except IndexError: sum = 0 for i in range(k): sum += 1 / p[i] ** 2 * dp[i] ** 2 return np.array([sum / k]) sum = np.zeros((d, d)) for i in range(k): sum += 1 / p[i] ** 2 * np.outer(dp[i], dp[i]) return sum / k
[ "def compute_fisher_info(p, eta):\n global p_map, eta_map\n\n # Stack columns of p for next step\n p_stack = numpy.repeat(p, eta.size).reshape(p.size, eta.size)\n # Compute Fisher matrix\n fisher = numpy.dot(eta_map, p_stack * p_map) - numpy.outer(eta, eta)\n\n return fisher", "def empirical_fisher(p, dp):\n M = p.shape[0]\n res = []\n for m in range(M):\n res.append(empirical_fisher_(p[m], dp[m]))\n return np.array(res)", "def fisher_information_matrix(self, params):\n n_params = len(np.atleast_1d(params))\n fisher = np.zeros(shape=(n_params, n_params))\n\n if not hasattr(self.mean, 'gradient'):\n _grad = lambda mean, argnum, params: jacobian(mean, argnum=argnum)(*params)\n else:\n _grad = lambda mean, argnum, params: mean.gradient(*params)[argnum]\n\n grad_mean = [_grad(self.mean, i, params) for i in range(n_params)]\n\n for i in range(n_params):\n for j in range(i, n_params):\n fisher[i, j] = np.nansum(grad_mean[i] * grad_mean[j])\n fisher[j, i] = fisher[i, j]\n\n return fisher / self.var", "def _compute_fisher_information(result: AmplitudeEstimationResult, observed: bool = False) -> float:\n fisher_information = None\n mlv = result.mle # MLE in [0,1]\n m = result.num_evaluation_qubits\n M = 2**m # pylint: disable=invalid-name\n\n if observed:\n a_i = np.asarray(list(result.samples.keys()))\n p_i = np.asarray(list(result.samples.values()))\n\n # Calculate the observed Fisher information\n fisher_information = sum(p * derivative_log_pdf_a(a, mlv, m) ** 2 for p, a in zip(p_i, a_i))\n else:\n\n def integrand(x):\n return (derivative_log_pdf_a(x, mlv, m)) ** 2 * pdf_a(x, mlv, m)\n\n grid = np.sin(np.pi * np.arange(M / 2 + 1) / M) ** 2\n fisher_information = sum(integrand(x) for x in grid)\n\n return fisher_information", "def fisher_information_matrix(self, params):\n pass", "def fisherMatrix(self):\n FisherM = {}\n for i in range(self.num_params):\n for j in range(self.num_params):\n param_i = self.param_names[i]\n param_j = self.param_names[j]\n FisherM[param_i, param_j] = (\n self.fisher_matrix_images[param_i, param_j].sum())\n return FisherM", "def normalised_fisher(p, dp):\n M = p.shape[0]\n d = dp.shape[2]\n\n all_fishers = empirical_fisher(p, dp)\n fisher_trace = np.trace(np.sum(all_fishers, axis=0))\n normalisation = d * M / fisher_trace\n return normalisation * all_fishers", "def fisher_information_matrix(g, Nij,estimate=False):\n #TODO change this to also take an estimate flag\n n_nodes = len(g.nodes)\n F = np.zeros([n_nodes, n_nodes])\n\n if estimate:\n s_ij = form_edge_matrix(g, 'statistical_fluctuation_est', action='symmetrize')\n else:\n s_ij = form_edge_matrix(g, 'statistical_fluctuation', action='symmetrize')\n \n for (i,j) in g.edges:\n F[i,j] = -Nij[i,j] / s_ij[i,j]**2\n F[j,i] = F[i,j] # make symmetric\n for i in g.nodes:\n F[i,i] = -np.sum(F[i,:])\n \n return F", "def pdf_entropy(density_df, input_density_df,\n pseudocount, input_pseudocount,\n min_density_threshold=0):\n\n df_indices = density_df.index\n dfi_indices = input_density_df.index\n missing = set(df_indices) - set(dfi_indices)\n\n input_density_df = input_density_df.append(input_density_df.ix[missing])\n\n pdf = calculate_pdf(density_df, pseudocount, min_density_threshold)\n input_pdf = calculate_pdf(\n input_density_df, input_pseudocount, min_density_threshold\n )\n\n en = pdf.multiply(np.log2(pdf.div(input_pdf)))\n return en", "def fisherMatrixImages(self):\n FisherM_images = {}\n for i in range(self.num_params):\n for j in range(self.num_params):\n param_i = self.param_names[i]\n param_j = self.param_names[j]\n derivative1 = self.derivatives_images[param_i]\n derivative2 = self.derivatives_images[param_j]\n FisherM_images[param_i, param_j] = (\n derivative1 * derivative2 / self.var_noise)\n return FisherM_images", "def effective_dimension_(p, dp, gamma):\n M = p.shape[0]\n k = p.shape[1]\n d = dp.shape[2]\n\n f_hat = normalised_fisher(p, dp)\n mat = np.eye(d) + gamma * k / (2 * np.pi * np.log(k)) * f_hat\n\n # log(sqrt(det)) == log(det) / 2\n rootdet = np.linalg.slogdet(mat)[1] / 2 # slogdet is more stable than det\n add = np.log(np.sum(np.exp(rootdet))) - np.log(\n M\n ) # normalized sum over parameter space\n result = 2 * add / np.log(gamma * k / (2 * np.pi * np.log(k)))\n return result", "def density_estimation(sample, X, h, kernel=\"epanechnikov\"):\n kde = KernelDensity(kernel=kernel, bandwidth=h).fit(sample.reshape(-1, 1))\n log_dens = kde.score_samples(X.reshape(-1, 1))\n density = np.exp(log_dens)\n return density", "def fisher_like(expt_name, zc, dh, dm, verbose=False):\n # Get pre-loaded Fisher data for this experiment\n expt_zc, expt_mean, expt_icov = expt_data[expt_name]\n \n # Convert distances in this model into H and D_A\n # Units: expects H ~ 100 km/s/Mpc, D_A ~ Gpc\n Hz = [model.C / dh[np.where(zc==_z)][0] / 1e2 for _z in expt_zc]\n DAz = [dm[np.where(zc==_z)][0] / (1.+_z) / 1e3 for _z in expt_zc]\n mean = np.concatenate((Hz, DAz))\n x = mean - expt_mean\n \n logL = -0.5 * np.dot(x, np.dot(expt_icov, x))\n if verbose: print(\"\\t%16s: %3.3f\" % (\"%s Fisher\" % expt_name, logL))\n return logL", "def expiH(H):\n\n # Diagonalise the matrices:\n evals, evecs = eigh(H)\n\n # Now we compute exp(i*H) = Q exp(i*D) Q^\\dagger where Q is the matrix of\n # eigenvectors (as columns) and D is the diagonal matrix of eigenvalues:\n\n Q = evecs\n Q_dagger = Q.conj().swapaxes(-1, -2) # Only transpose the matrix dimensions\n exp_iD_diags = np.exp(1j*evals)\n\n # Compute the 3-term matrix product Q*exp_iD_diags*Q_dagger using the\n # einsum function in order to specify which array axes of each array to\n # sum over:\n return np.einsum('...ik,...k,...kj->...ij', Q, exp_iD_diags, Q_dagger)", "def fisher_vector(xx, gmm):\n xx = np.atleast_2d(xx)\n N = xx.shape[0]\n\n # Compute posterior probabilities.\n Q = gmm.predict_proba(xx) # NxK\n\n # Compute the sufficient statistics of descriptors.\n Q_sum = np.sum(Q, 0)[:, np.newaxis] / N\n Q_xx = np.dot(Q.T, xx) / N\n Q_xx_2 = np.dot(Q.T, xx ** 2) / N\n\n # Compute derivatives with respect to mixing weights, means and variances.\n d_pi = Q_sum.squeeze() - gmm.weights_\n d_mu = Q_xx - Q_sum * gmm.means_\n d_sigma = (\n - Q_xx_2\n - Q_sum * gmm.means_ ** 2\n #+ Q_sum * gmm.covars_\n + Q_sum * gmm.covariances_\n + 2 * Q_xx * gmm.means_)\n\n # Merge derivatives into a vector.\n return np.hstack((d_pi, d_mu.flatten(), d_sigma.flatten()))", "def calculate_fisher_information(\n self,\n x,\n theta=None,\n obs_weights=None,\n estimator_weights=None,\n n_events=1,\n mode=\"score\",\n calculate_covariance=True,\n sum_events=True,\n epsilon_shift=0.001,\n ):\n\n logger.debug(\"Evaluating Fisher information for %s estimators in ensemble\", self.n_estimators)\n\n # Check ensemble\n if self.estimator_type not in [\"score\", \"parameterized_ratio\"]:\n raise NotImplementedError(\n \"Fisher information calculation is only implemented for local score estimators \"\n \"(ScoreEstimator instances) and parameterized ratio estimators (parameterized_ratio instances).\"\n )\n\n # Check input\n if mode not in [\"score\", \"information\", \"modified_score\"]:\n raise ValueError(f\"Unknown mode {mode}!\")\n\n # Calculate estimator_weights of each estimator in vote\n if estimator_weights is None:\n estimator_weights = np.ones(self.n_estimators)\n\n assert len(estimator_weights) == self.n_estimators\n estimator_weights /= np.sum(estimator_weights)\n logger.debug(\"Estimator weights: %s\", estimator_weights)\n\n covariance = None\n\n # \"information\" mode\n if mode == \"information\":\n # Calculate estimator predictions\n predictions = []\n for i, estimator in enumerate(self.estimators, start=1):\n logger.debug(\"Starting evaluation for estimator %s / %s in ensemble\", i, self.n_estimators)\n predictions.append(\n estimator.calculate_fisher_information(x=x, theta=theta, weights=obs_weights, n_events=n_events)\n )\n predictions = np.array(predictions)\n\n # Calculate weighted mean and covariance\n information = np.average(predictions, axis=0, weights=estimator_weights)\n\n predictions_flat = predictions.reshape((predictions.shape[0], -1))\n\n if calculate_covariance:\n covariance = np.cov(predictions_flat.T, aweights=estimator_weights)\n covariance_shape = (\n predictions.shape[1],\n predictions.shape[2],\n predictions.shape[1],\n predictions.shape[2],\n )\n covariance = covariance.reshape(covariance_shape)\n\n # \"modified_score\" mode:\n elif mode == \"modified_score\":\n # Load training data\n if isinstance(x, str):\n x = load_and_check(x)\n n_samples = x.shape[0]\n\n # Calculate score predictions\n score_predictions = []\n for i, estimator in enumerate(self.estimators, start=1):\n logger.debug(\"Starting evaluation for estimator %s / %s in ensemble\", i, self.n_estimators)\n score_predictions.append(estimator.evaluate_score(x=x, theta=np.array([theta for _ in x])))\n logger.debug(\"Estimator %s predicts t(x) = %s for first event\", i, score_predictions[-1][0, :])\n\n score_predictions = np.array(score_predictions) # (n_estimators, n_events, n_parameters)\n\n # Get ensemble mean and ensemble covariance\n score_mean = np.mean(score_predictions, axis=0) # (n_events, n_parameters)\n\n # For uncertainty calculation: calculate points between mean and original predictions with same mean and\n # variance / n compared to the original predictions\n score_shifted_predictions = (score_predictions - score_mean[np.newaxis, :, :]) / self.n_estimators**0.5\n score_shifted_predictions = score_mean[np.newaxis, :, :] + score_shifted_predictions\n\n # Event weights\n if obs_weights is None:\n obs_weights = np.ones(n_samples)\n obs_weights /= np.sum(obs_weights)\n\n # Fisher information prediction (based on mean scores)\n if sum_events:\n information = float(n_events) * np.sum(\n obs_weights[:, np.newaxis, np.newaxis]\n * score_mean[:, :, np.newaxis]\n * score_mean[:, np.newaxis, :],\n axis=0,\n )\n else:\n information = (\n float(n_events)\n * obs_weights[:, np.newaxis, np.newaxis]\n * score_mean[:, :, np.newaxis]\n * score_mean[:, np.newaxis, :]\n )\n\n if calculate_covariance:\n # Fisher information predictions based on shifted scores\n informations_individual = float(n_events) * np.sum(\n obs_weights[np.newaxis, :, np.newaxis, np.newaxis]\n * score_shifted_predictions[:, :, :, np.newaxis]\n * score_shifted_predictions[:, :, np.newaxis, :],\n axis=1,\n ) # (n_estimators, n_parameters, n_parameters)\n\n n_params = score_mean.shape[1]\n informations_individual = informations_individual.reshape(-1, n_params**2)\n covariance = np.cov(informations_individual.T)\n covariance = covariance.reshape(n_params, n_params, n_params, n_params)\n\n # Let's check the expected score\n expected_score = [np.einsum(\"n,ni->i\", obs_weights, score_mean)]\n logger.debug(\"Expected per-event score (should be close to zero):\\n%s\", expected_score)\n\n # \"score\" mode:\n elif mode == \"score\":\n # Load training data\n if isinstance(x, str):\n x = load_and_check(x)\n n_samples = x.shape[0]\n\n # Calculate score predictions\n score_predictions = []\n for i, estimator in enumerate(self.estimators, start=1):\n logger.debug(\"Starting evaluation for estimator %s / %s in ensemble\", i, self.n_estimators)\n score_predictions.append(estimator.evaluate_score(x=x, theta=np.array([theta for _ in x])))\n logger.debug(\"Estimator %s predicts t(x) = %s for first event\", i, score_predictions[-1][0, :])\n\n score_predictions = np.array(score_predictions) # (n_estimators, n_events, n_parameters)\n\n # Get ensemble mean and ensemble covariance\n score_mean = np.mean(score_predictions, axis=0) # (n_events, n_parameters)\n\n # For uncertainty calculation: calculate points between mean and original predictions with same mean and\n # variance / n compared to the original predictions\n score_shifted_predictions = epsilon_shift * (score_predictions - score_mean[np.newaxis, :, :])\n score_shifted_predictions = score_mean[np.newaxis, :, :] + score_shifted_predictions\n\n # Event weights\n if obs_weights is None:\n obs_weights = np.ones(n_samples)\n obs_weights /= np.sum(obs_weights)\n\n # Fisher information prediction (based on mean scores)\n if sum_events:\n information = float(n_events) * np.sum(\n obs_weights[:, np.newaxis, np.newaxis]\n * score_mean[:, :, np.newaxis]\n * score_mean[:, np.newaxis, :],\n axis=0,\n )\n else:\n information = (\n float(n_events)\n * obs_weights[:, np.newaxis, np.newaxis]\n * score_mean[:, :, np.newaxis]\n * score_mean[:, np.newaxis, :]\n )\n\n if calculate_covariance:\n # Fisher information predictions based on shifted scores\n informations_individual = float(n_events) * np.sum(\n obs_weights[np.newaxis, :, np.newaxis, np.newaxis]\n * score_shifted_predictions[:, :, :, np.newaxis]\n * score_shifted_predictions[:, :, np.newaxis, :],\n axis=1,\n ) # (n_estimators, n_parameters, n_parameters)\n\n n_params = score_mean.shape[1]\n informations_individual = informations_individual.reshape(-1, n_params**2)\n covariance = np.cov(informations_individual.T)\n covariance /= epsilon_shift**2\n covariance = covariance.reshape(n_params, n_params, n_params, n_params)\n\n # Let's check the expected score\n expected_score = [np.einsum(\"n,ni->i\", obs_weights, score_mean)]\n logger.debug(\"Expected per-event score (should be close to zero):\\n%s\", expected_score)\n\n else:\n raise RuntimeError(\"Unknown mode %s, has to be 'information', 'score', or 'modified_score'.\")\n\n return information, covariance", "def calculate_fisher_information(\n self,\n x,\n theta=None,\n obs_weights=None,\n estimator_weights=None,\n n_events=1,\n mode=\"score\",\n calculate_covariance=True,\n sum_events=True,\n epsilon_shift=0.001,\n ):\n logger.debug(\"Evaluating Fisher information for %s estimators in ensemble\", self.n_estimators)\n\n # Check ensemble\n if self.estimator_type not in [\"score\", \"parameterized_ratio\"]:\n raise NotImplementedError(\n \"Fisher information calculation is only implemented for local score estimators \"\n \"(ScoreEstimator instances) and parameterized ratio estimators (parameterized_ratio instances).\"\n )\n\n # Check input\n if mode not in [\"score\", \"information\"]:\n raise ValueError(\"Unknown mode {}, has to be 'score' or 'information'!\".format(mode))\n\n # Calculate estimator_weights of each estimator in vote\n if estimator_weights is None:\n estimator_weights = np.ones(self.n_estimators)\n assert len(estimator_weights) == self.n_estimators\n estimator_weights /= np.sum(estimator_weights)\n logger.debug(\"Estimator weights: %s\", estimator_weights)\n\n covariance = None\n\n # \"information\" mode\n if mode == \"information\":\n # Calculate estimator predictions\n predictions = []\n for i, estimator in enumerate(self.estimators):\n logger.debug(\"Starting evaluation for estimator %s / %s in ensemble\", i + 1, self.n_estimators)\n\n predictions.append(\n estimator.calculate_fisher_information(x=x, theta=theta, weights=obs_weights, n_events=n_events)\n )\n predictions = np.array(predictions)\n\n # Calculate weighted mean and covariance\n information = np.average(predictions, axis=0, weights=estimator_weights)\n\n predictions_flat = predictions.reshape((predictions.shape[0], -1))\n\n if calculate_covariance:\n covariance = np.cov(predictions_flat.T, aweights=estimator_weights)\n covariance_shape = (\n predictions.shape[1],\n predictions.shape[2],\n predictions.shape[1],\n predictions.shape[2],\n )\n covariance = covariance.reshape(covariance_shape)\n\n # \"modified_score\" mode:\n elif mode == \"modified_score\":\n # Load training data\n if isinstance(x, six.string_types):\n x = load_and_check(x)\n n_samples = x.shape[0]\n\n # Calculate score predictions\n score_predictions = []\n for i, estimator in enumerate(self.estimators):\n logger.debug(\"Starting evaluation for estimator %s / %s in ensemble\", i + 1, self.n_estimators)\n\n score_predictions.append(estimator.evaluate_score(x=x, theta=np.array([theta for _ in x])))\n logger.debug(\"Estimator %s predicts t(x) = %s for first event\", i + 1, score_predictions[-1][0, :])\n score_predictions = np.array(score_predictions) # (n_estimators, n_events, n_parameters)\n\n # Get ensemble mean and ensemble covariance\n score_mean = np.mean(score_predictions, axis=0) # (n_events, n_parameters)\n\n # For uncertainty calculation: calculate points betweeen mean and original predictions with same mean and\n # variance / n compared to the original predictions\n score_shifted_predictions = (score_predictions - score_mean[np.newaxis, :, :]) / self.n_estimators ** 0.5\n score_shifted_predictions = score_mean[np.newaxis, :, :] + score_shifted_predictions\n\n # Event weights\n if obs_weights is None:\n obs_weights = np.ones(n_samples)\n obs_weights /= np.sum(obs_weights)\n\n # Fisher information prediction (based on mean scores)\n if sum_events:\n information = float(n_events) * np.sum(\n obs_weights[:, np.newaxis, np.newaxis]\n * score_mean[:, :, np.newaxis]\n * score_mean[:, np.newaxis, :],\n axis=0,\n )\n else:\n information = (\n float(n_events)\n * obs_weights[:, np.newaxis, np.newaxis]\n * score_mean[:, :, np.newaxis]\n * score_mean[:, np.newaxis, :]\n )\n\n if calculate_covariance:\n # Fisher information predictions based on shifted scores\n informations_individual = float(n_events) * np.sum(\n obs_weights[np.newaxis, :, np.newaxis, np.newaxis]\n * score_shifted_predictions[:, :, :, np.newaxis]\n * score_shifted_predictions[:, :, np.newaxis, :],\n axis=1,\n ) # (n_estimators, n_parameters, n_parameters)\n\n n_params = score_mean.shape[1]\n informations_individual = informations_individual.reshape(-1, n_params ** 2)\n covariance = np.cov(informations_individual.T)\n covariance = covariance.reshape(n_params, n_params, n_params, n_params)\n\n # Let's check the expected score\n expected_score = [np.einsum(\"n,ni->i\", obs_weights, score_mean)]\n logger.debug(\"Expected per-event score (should be close to zero):\\n%s\", expected_score)\n\n # \"score\" mode:\n elif mode == \"score\":\n # Load training data\n if isinstance(x, six.string_types):\n x = load_and_check(x)\n n_samples = x.shape[0]\n\n # Calculate score predictions\n score_predictions = []\n for i, estimator in enumerate(self.estimators):\n logger.debug(\"Starting evaluation for estimator %s / %s in ensemble\", i + 1, self.n_estimators)\n\n score_predictions.append(estimator.evaluate_score(x=x, theta=np.array([theta for _ in x])))\n logger.debug(\"Estimator %s predicts t(x) = %s for first event\", i + 1, score_predictions[-1][0, :])\n score_predictions = np.array(score_predictions) # (n_estimators, n_events, n_parameters)\n\n # Get ensemble mean and ensemble covariance\n score_mean = np.mean(score_predictions, axis=0) # (n_events, n_parameters)\n\n # For uncertainty calculation: calculate points betweeen mean and original predictions with same mean and\n # variance / n compared to the original predictions\n score_shifted_predictions = epsilon_shift * (score_predictions - score_mean[np.newaxis, :, :])\n score_shifted_predictions = score_mean[np.newaxis, :, :] + score_shifted_predictions\n\n # Event weights\n if obs_weights is None:\n obs_weights = np.ones(n_samples)\n obs_weights /= np.sum(obs_weights)\n\n # Fisher information prediction (based on mean scores)\n if sum_events:\n information = float(n_events) * np.sum(\n obs_weights[:, np.newaxis, np.newaxis]\n * score_mean[:, :, np.newaxis]\n * score_mean[:, np.newaxis, :],\n axis=0,\n )\n else:\n information = (\n float(n_events)\n * obs_weights[:, np.newaxis, np.newaxis]\n * score_mean[:, :, np.newaxis]\n * score_mean[:, np.newaxis, :]\n )\n\n if calculate_covariance:\n # Fisher information predictions based on shifted scores\n informations_individual = float(n_events) * np.sum(\n obs_weights[np.newaxis, :, np.newaxis, np.newaxis]\n * score_shifted_predictions[:, :, :, np.newaxis]\n * score_shifted_predictions[:, :, np.newaxis, :],\n axis=1,\n ) # (n_estimators, n_parameters, n_parameters)\n\n n_params = score_mean.shape[1]\n informations_individual = informations_individual.reshape(-1, n_params ** 2)\n covariance = np.cov(informations_individual.T)\n covariance /= epsilon_shift ** 2\n covariance = covariance.reshape(n_params, n_params, n_params, n_params)\n\n # Let's check the expected score\n expected_score = [np.einsum(\"n,ni->i\", obs_weights, score_mean)]\n logger.debug(\"Expected per-event score (should be close to zero):\\n%s\", expected_score)\n\n else:\n raise RuntimeError(\"Unknown mode %s, has to be 'information', 'score', or 'modified_score'.\")\n\n return information, covariance", "def test_Fisher_estimation(self):\n fe = glm.poisson_estimation(algo='Fisher', data=self._data, explanatories=[1], response=0)\n self.assertAlmostEqual(fe.estimated.predictor.alpha, -3.305, places=3)\n self.assertAlmostEqual(fe.estimated.predictor.delta[0], 0.164, places=3)", "def katz_expansions(k0,p,ellp,mdash,n):\n S = Zmod(p**mdash)\n\n Ep1 = eisenstein_series_qexp(p-1, ellp, K=S, normalization=\"constant\")\n E4 = eisenstein_series_qexp(4, ellp, K=S, normalization=\"constant\")\n E6 = eisenstein_series_qexp(6, ellp, K=S, normalization=\"constant\")\n\n delta = delta_qexp(ellp, K=S)\n h = delta / E6**2\n hj = delta.parent()(1)\n e = []\n\n # We compute negative powers of E_(p-1) successively (this saves a great\n # deal of time). The effect is that Ep1mi = Ep1 ** (-i).\n Ep1m1 = ~Ep1\n Ep1mi = 1\n for i in range(0,n+1):\n Wi,hj = compute_Wi(k0 + i*(p-1),p,h,hj,E4,E6)\n for bis in Wi:\n eis = p**floor(i/(p+1)) * Ep1mi * bis\n e.append(eis)\n Ep1mi = Ep1mi * Ep1m1\n\n return e,Ep1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the empirical normalised fisher informatiom matrix of some distribution. For some distribution p, some M different ddimensional vector of distribution parameters theta, this function calculates the empirical fisher information matrix over k datapoints (x_i, y_i) using the pdf value at theta p(x_i, y_i; theta) and it's derivate d/dtheta p(x_i, y_i; theta).
def normalised_fisher(p, dp): M = p.shape[0] d = dp.shape[2] all_fishers = empirical_fisher(p, dp) fisher_trace = np.trace(np.sum(all_fishers, axis=0)) normalisation = d * M / fisher_trace return normalisation * all_fishers
[ "def compute_fisher_info(p, eta):\n global p_map, eta_map\n\n # Stack columns of p for next step\n p_stack = numpy.repeat(p, eta.size).reshape(p.size, eta.size)\n # Compute Fisher matrix\n fisher = numpy.dot(eta_map, p_stack * p_map) - numpy.outer(eta, eta)\n\n return fisher", "def fisher_information_matrix(self, params):\n n_params = len(np.atleast_1d(params))\n fisher = np.zeros(shape=(n_params, n_params))\n\n if not hasattr(self.mean, 'gradient'):\n _grad = lambda mean, argnum, params: jacobian(mean, argnum=argnum)(*params)\n else:\n _grad = lambda mean, argnum, params: mean.gradient(*params)[argnum]\n\n grad_mean = [_grad(self.mean, i, params) for i in range(n_params)]\n\n for i in range(n_params):\n for j in range(i, n_params):\n fisher[i, j] = np.nansum(grad_mean[i] * grad_mean[j])\n fisher[j, i] = fisher[i, j]\n\n return fisher / self.var", "def empirical_fisher(p, dp):\n M = p.shape[0]\n res = []\n for m in range(M):\n res.append(empirical_fisher_(p[m], dp[m]))\n return np.array(res)", "def empirical_fisher_(p, dp):\n k = p.size\n\n try:\n d = dp.shape[1]\n except IndexError:\n sum = 0\n for i in range(k):\n sum += 1 / p[i] ** 2 * dp[i] ** 2\n return np.array([sum / k])\n\n sum = np.zeros((d, d))\n for i in range(k):\n sum += 1 / p[i] ** 2 * np.outer(dp[i], dp[i])\n return sum / k", "def fisherMatrixImages(self):\n FisherM_images = {}\n for i in range(self.num_params):\n for j in range(self.num_params):\n param_i = self.param_names[i]\n param_j = self.param_names[j]\n derivative1 = self.derivatives_images[param_i]\n derivative2 = self.derivatives_images[param_j]\n FisherM_images[param_i, param_j] = (\n derivative1 * derivative2 / self.var_noise)\n return FisherM_images", "def fisherMatrix(self):\n FisherM = {}\n for i in range(self.num_params):\n for j in range(self.num_params):\n param_i = self.param_names[i]\n param_j = self.param_names[j]\n FisherM[param_i, param_j] = (\n self.fisher_matrix_images[param_i, param_j].sum())\n return FisherM", "def pdf_entropy(density_df, input_density_df,\n pseudocount, input_pseudocount,\n min_density_threshold=0):\n\n df_indices = density_df.index\n dfi_indices = input_density_df.index\n missing = set(df_indices) - set(dfi_indices)\n\n input_density_df = input_density_df.append(input_density_df.ix[missing])\n\n pdf = calculate_pdf(density_df, pseudocount, min_density_threshold)\n input_pdf = calculate_pdf(\n input_density_df, input_pseudocount, min_density_threshold\n )\n\n en = pdf.multiply(np.log2(pdf.div(input_pdf)))\n return en", "def _compute_fisher_information(result: AmplitudeEstimationResult, observed: bool = False) -> float:\n fisher_information = None\n mlv = result.mle # MLE in [0,1]\n m = result.num_evaluation_qubits\n M = 2**m # pylint: disable=invalid-name\n\n if observed:\n a_i = np.asarray(list(result.samples.keys()))\n p_i = np.asarray(list(result.samples.values()))\n\n # Calculate the observed Fisher information\n fisher_information = sum(p * derivative_log_pdf_a(a, mlv, m) ** 2 for p, a in zip(p_i, a_i))\n else:\n\n def integrand(x):\n return (derivative_log_pdf_a(x, mlv, m)) ** 2 * pdf_a(x, mlv, m)\n\n grid = np.sin(np.pi * np.arange(M / 2 + 1) / M) ** 2\n fisher_information = sum(integrand(x) for x in grid)\n\n return fisher_information", "def fisher_vector(xx, gmm):\n xx = np.atleast_2d(xx)\n N = xx.shape[0]\n\n # Compute posterior probabilities.\n Q = gmm.predict_proba(xx) # NxK\n\n # Compute the sufficient statistics of descriptors.\n Q_sum = np.sum(Q, 0)[:, np.newaxis] / N\n Q_xx = np.dot(Q.T, xx) / N\n Q_xx_2 = np.dot(Q.T, xx ** 2) / N\n\n # Compute derivatives with respect to mixing weights, means and variances.\n d_pi = Q_sum.squeeze() - gmm.weights_\n d_mu = Q_xx - Q_sum * gmm.means_\n d_sigma = (\n - Q_xx_2\n - Q_sum * gmm.means_ ** 2\n #+ Q_sum * gmm.covars_\n + Q_sum * gmm.covariances_\n + 2 * Q_xx * gmm.means_)\n\n # Merge derivatives into a vector.\n return np.hstack((d_pi, d_mu.flatten(), d_sigma.flatten()))", "def density_estimation(sample, X, h, kernel=\"epanechnikov\"):\n kde = KernelDensity(kernel=kernel, bandwidth=h).fit(sample.reshape(-1, 1))\n log_dens = kde.score_samples(X.reshape(-1, 1))\n density = np.exp(log_dens)\n return density", "def pdf(X, m, S):\n if not isinstance(X, np.ndarray) or len(X.shape) != 2:\n return None\n if not isinstance(m, np.ndarray) or len(m.shape) != 1:\n return None\n if not isinstance(S, np.ndarray) or len(S.shape) != 2:\n return None\n if X.shape[1] != m.shape[0] or X.shape[1] != S.shape[0]:\n return None\n if S.shape[0] != S.shape[1] or X.shape[1] != S.shape[1]:\n return None\n\n n, d = X.shape\n x_m = X - m\n # Inverted covariance matrix\n S_inv = np.linalg.inv(S)\n\n fac = np.einsum('...k,kl,...l->...', x_m, S_inv, x_m)\n P1 = 1. / (np.sqrt(((2 * np.pi)**d * np.linalg.det(S))))\n P2 = np.exp(-fac / 2)\n P = np.maximum((P1 * P2), 1e-300)\n\n return P", "def effective_dimension_(p, dp, gamma):\n M = p.shape[0]\n k = p.shape[1]\n d = dp.shape[2]\n\n f_hat = normalised_fisher(p, dp)\n mat = np.eye(d) + gamma * k / (2 * np.pi * np.log(k)) * f_hat\n\n # log(sqrt(det)) == log(det) / 2\n rootdet = np.linalg.slogdet(mat)[1] / 2 # slogdet is more stable than det\n add = np.log(np.sum(np.exp(rootdet))) - np.log(\n M\n ) # normalized sum over parameter space\n result = 2 * add / np.log(gamma * k / (2 * np.pi * np.log(k)))\n return result", "def poissonDist(avg, k):\n PMF = ((avg**k)*(np.exp(-1*avg))/(factorial(k)))\n return(PMF)", "def floater_hormann_adaptive(y, x, fx):\n assert x.ndim == y.ndim == 1\n assert x.shape == fx.shape\n n = x.size\n # sort x\n isort = sorted(range(n), key=x.__getitem__)\n x, fx = x[isort], fx[isort]\n # find the right order\n linf = zeros(n-2)\n for d in range(1, n-1):\n for i in range(n-1):\n xx = numpy.array(list(x[:i]) + list(x[i+2:]))\n fxx = numpy.array(list(fx[:i]) + list(fx[i+2:]))\n fxi = floater_hormann(x[i:i+2], xx, fxx, d)\n err = max(abs(fxi[0] - fx[i]), abs(fxi[1] - fx[i+1]))\n linf[d-1] = max(linf[d-1], err)\n print ' trying d = ', d, ', L_inf error = ', linf[d-1]\n if d >= 3 and linf[d-1] > linf[d-2] > linf[d-3]:\n linf[d:] = numpy.inf\n break\n d = linf.argmin() + 1\n print ' using d = ', d\n return floater_hormann(y, x, fx, d)", "def fisher_information_matrix(self, params):\n pass", "def entropy_fit(self, n_moments, tol=1e-10, verbose=False):\n # sum of probs constraint\n n_constraints = n_moments + 1\n\n # don't want to mess up the object...\n xs = self.xs.copy()\n p = self.agg_density.copy()\n # more aggressively de-fuzz\n p = np.where(abs(p) < 1e-16, 0, p)\n p = p / np.sum(p)\n p1 = p.copy()\n\n mtargets = np.zeros(n_constraints)\n for i in range(n_constraints):\n mtargets[i] = np.sum(p)\n p *= xs\n\n parm1 = np.zeros(n_constraints)\n x = np.array([xs ** i for i in range(n_constraints)])\n\n probs = np.exp(-x.T @ parm1)\n machieved = x @ probs\n der1 = -(x * probs) @ x.T\n\n er = 1\n iters = 0\n while er > tol:\n iters += 1\n try:\n parm1 = parm1 - inv(der1) @ (machieved - mtargets)\n except np.linalg.LinAlgError:\n print('Singluar matrix')\n print(der1)\n return None\n probs = np.exp(-x.T @ parm1)\n machieved = x @ probs\n der1 = -(x * probs) @ x.T\n er = (machieved - mtargets).dot(machieved - mtargets)\n if verbose:\n print(f'Error: {er}\\nParameter {parm1}')\n ans = pd.DataFrame(dict(xs=xs, agg=p1, fit=probs))\n ans = ans.set_index('xs')\n return dict(params=parm1, machieved=machieved, mtargets=mtargets, ans_df=ans)", "def fisher_like(expt_name, zc, dh, dm, verbose=False):\n # Get pre-loaded Fisher data for this experiment\n expt_zc, expt_mean, expt_icov = expt_data[expt_name]\n \n # Convert distances in this model into H and D_A\n # Units: expects H ~ 100 km/s/Mpc, D_A ~ Gpc\n Hz = [model.C / dh[np.where(zc==_z)][0] / 1e2 for _z in expt_zc]\n DAz = [dm[np.where(zc==_z)][0] / (1.+_z) / 1e3 for _z in expt_zc]\n mean = np.concatenate((Hz, DAz))\n x = mean - expt_mean\n \n logL = -0.5 * np.dot(x, np.dot(expt_icov, x))\n if verbose: print(\"\\t%16s: %3.3f\" % (\"%s Fisher\" % expt_name, logL))\n return logL", "def build_markov_transition_density(P: np.ndarray, pi: np.ndarray):\r\n P, _ = sanity_check_probabilities(P, P)\r\n na, ns = P.shape[0], P.shape[1]\r\n P_pi = np.zeros((ns, ns))\r\n for s in range(ns):\r\n for y in range(ns):\r\n P_pi[y, s] = np.dot(P[:, s, y], pi[s, :])\r\n return P_pi", "def test_Fisher_estimation(self):\n fe = glm.poisson_estimation(algo='Fisher', data=self._data, explanatories=[1], response=0)\n self.assertAlmostEqual(fe.estimated.predictor.alpha, -3.305, places=3)\n self.assertAlmostEqual(fe.estimated.predictor.delta[0], 0.164, places=3)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r"""Calculates the effective dimension of some distribution. For some distribution p, some M different ddimensional vector of distribution parameters theta, this function calculates the effective dimension over k datapoints (x_i, y_i) using the pdf value at theta p(x_i, y_i; theta) and it's derivate d/dtheta p(x_i, y_i; theta).
def effective_dimension_(p, dp, gamma): M = p.shape[0] k = p.shape[1] d = dp.shape[2] f_hat = normalised_fisher(p, dp) mat = np.eye(d) + gamma * k / (2 * np.pi * np.log(k)) * f_hat # log(sqrt(det)) == log(det) / 2 rootdet = np.linalg.slogdet(mat)[1] / 2 # slogdet is more stable than det add = np.log(np.sum(np.exp(rootdet))) - np.log( M ) # normalized sum over parameter space result = 2 * add / np.log(gamma * k / (2 * np.pi * np.log(k))) return result
[ "def estimate_marginal_entropies(X, k=3):\n marginal_entropies = np.zeros(X.shape[1])\n for i in range(X.shape[1]):\n marginal_entropies[i] = estimate_entropy(X[:,i], k)\n return marginal_entropies", "def pDpk(self, x, k):\n k = np.array(k)\n return 2*c*c*k/(self._omega*self._omega)", "def calc_dK(self):\n K = 2*pi*sqrt(2*m_He*self.E)/h # m^-1\n #K = k*np.sin(incident_angle*pi/180) \n #K = 2*pi*sqrt(5*m_He*k_B*self.T)/h; # m^-1\n # Calculates the parallel momentum transfer in nm^-1\n self.DK = K*(np.sin(self.theta*pi/180) - np.sin(self.incident_angle*pi/180) )/1e9;\n # Calculate the projected k values\n self.kx = -K*( (np.sin(self.theta*pi/180) - np.sin(self.incident_angle*pi/180) )*np.cos(self.alpha*pi/180) )/1e9;\n self.ky = -K*(np.sin(self.theta*pi/180) - np.sin(self.incident_angle*pi/180) )*np.sin(self.alpha*pi/180)/1e9;", "def effective_dimension(L, K):\n return (K @ L.cholesky_inverse()).trace().item()", "def _infer_dimension_(spectrum, tr_sigma, n_samples, n_features, delta = None, alpha = 1, beta = 1):\n n_spectrum = len(spectrum)\n ll = np.empty(n_spectrum)\n unscaled_vhat = np.empty(n_spectrum)\n for rank in range(n_spectrum):\n if delta is not None:\n unscaled_vhat[rank] = tr_sigma - (rank * delta / (n_samples - 1) + spectrum[:rank].sum())\n #print('unscaled_vhat is : ', unscaled_vhat)\n else:\n unscaled_vhat[rank] = tr_sigma - spectrum[:rank].sum()\n\n ll[rank] = _assess_dimension_(spectrum, unscaled_vhat[rank], rank, n_samples, n_features, alpha = alpha, beta = beta)\n return np.nanargmax(ll)+1, ll, unscaled_vhat", "def cdf(self, k):\n if not isinstance(k, int):\n k = int(k)\n if k < 0:\n return 0\n return (sum([self.pmf(n) for n in range(k + 1)]))", "def multivariate_digamma(self, n, p):\n # Check for appropriate degree of freedom\n if not n > (p-1):\n raise ValueError('Degrees of freedom too low for dimensionality.')\n\n # Preallocate\n Pp = 0\n\n # Sum from d=1 to D\n for d in range(1, p+1):\n\n # Digamma function of degrees of freedom and dimension\n Pp += sp.digamma((n + 1 - d)/2)\n\n return Pp", "def getGplvmDimensionsC(self):\n return _core.CGPkronecker_getGplvmDimensionsC(self)", "def make_probability_dist(end_point_dic: dict) -> np.ndarray:\r\n\r\n #make the probability density function given the out_dic\r\n def norm(arrayIn: np.ndarray) -> np.ndarray: #### normalize\r\n \"\"\" This is a sub function to normalize the probability mass function. \r\n \r\n Args:\r\n arrayIn: Np.ndarray, the un-normalized probalility mass function.\r\n Returns:\r\n arrayIn: Np.ndarray, the array after normalization.\r\n \"\"\"\r\n normFact = sum(arrayIn[:, 1]) # sum all probabilities \r\n arrayIn[: ,1] = arrayIn[:, 1]/normFact # divide by the sum of all probabilities\r\n\r\n return arrayIn\r\n\r\n prob_mass_dist = np.zeros([len(end_point_dic), 2]) # creating empty array to be populated shortly\r\n\r\n for i ,endPoint in enumerate(end_point_dic): #### placing info from dictionary in numpy array\r\n prob_mass_dist[i][0] = endPoint\r\n prob_mass_dist[i][1] = end_point_dic[endPoint]\r\n\r\n return norm(prob_mass_dist) # normalizing and returning\r", "def get_param_dim(self):\n return self.input_dim * self.output_dim + self.output_dim ** 2", "def numVariablesDegree(d, n):\n\n return scipy.special.comb(d + n - 1, n - 1, exact = True)", "def dim(self):\n return self._dim", "def _dimensions(ds):\n ds = dshape(ds)\n if isdimension(ds[0]):\n return 1 + _dimensions(ds.subarray(1))\n if isinstance(ds[0], Record):\n return 1 + max(map(_dimensions, ds[0].types))\n if len(ds) == 1 and isunit(ds[0]):\n return 0\n raise NotImplementedError('Can not compute dimensions for %s' % ds)", "def __kl_divergence(self, params, P, degrees_of_freedom):\n MACHINE_EPSILON = np.finfo(np.double).eps\n \n X_embedded = params.reshape(self.n_samples, self.n_components)\n \n # compute low-dimensional affinity matrix\n dist = pdist(X_embedded, \"sqeuclidean\") / degrees_of_freedom\n dist += 1.0\n dist **= (degrees_of_freedom + 1.0) / -2.0\n Q = np.maximum(dist / (2.0 * np.sum(dist)), MACHINE_EPSILON)\n \n # KL divergence of P and Q\n kl_divergence = 2.0 * np.dot(P, np.log(np.maximum(P, MACHINE_EPSILON) / Q))\n \n # gradient: dC/dY\n grad = np.ndarray((self.n_samples, self.n_components), dtype=params.dtype)\n PQd = squareform((P - Q) * dist)\n \n for i in range(self.n_samples):\n grad[i] = np.dot(np.ravel(PQd[i], order=\"K\"), X_embedded[i] - X_embedded)\n grad = grad.ravel()\n c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom\n grad *= c\n \n return kl_divergence, grad", "def calculate_column_density(self):\n self._calculate_thin_line_column_density()\n self._calculate_thick_line_column_density()\n self._calculate_H2_column_density()", "def pdf_entropy(density_df, input_density_df,\n pseudocount, input_pseudocount,\n min_density_threshold=0):\n\n df_indices = density_df.index\n dfi_indices = input_density_df.index\n missing = set(df_indices) - set(dfi_indices)\n\n input_density_df = input_density_df.append(input_density_df.ix[missing])\n\n pdf = calculate_pdf(density_df, pseudocount, min_density_threshold)\n input_pdf = calculate_pdf(\n input_density_df, input_pseudocount, min_density_threshold\n )\n\n en = pdf.multiply(np.log2(pdf.div(input_pdf)))\n return en", "def cdf(self, k):\n if not isinstance(k, int):\n k = int(k)\n if k < 0:\n return 0\n # print(self.pmf(k))\n e = 2.7182818285\n const = (e ** (-1 * self.lambtha))\n return self.pmf(k) + self.cdf(k - 1)", "def num_dimensions(self):\n return self.numDim.value", "def density_estimation(sample, X, h, kernel=\"epanechnikov\"):\n kde = KernelDensity(kernel=kernel, bandwidth=h).fit(sample.reshape(-1, 1))\n log_dens = kde.score_samples(X.reshape(-1, 1))\n density = np.exp(log_dens)\n return density" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize the Typed Factory with the field to check and the factory each value corresponds to
def __init__(self, field, valueToFactory): self.field = field self.valueToFactory = valueToFactory
[ "def __init__(self, schema_factory):\n self._initialize()\n self.factory = schema_factory\n self.Input = type(schema_factory.mock_input())\n self.Output = type(schema_factory.mock_output())\n self.model = self._load_model()", "def factory(self, field_type: type[Field]) -> FieldDefaultFactory:\n factory = self._factories.get(field_type, None)\n if factory is not None:\n return factory\n\n for ft, factory in self._factories.items():\n if issubclass(field_type, ft):\n return factory\n\n return lambda f: f.value", "def test_source_trait_encoded_value_factory_create(self):\n source_trait_encoded_value = factories.SourceTraitEncodedValueFactory.create()\n self.assertIsInstance(source_trait_encoded_value, models.SourceTraitEncodedValue)", "def test_source_trait_encoded_value_factory_build(self):\n source_trait_encoded_value = factories.SourceTraitEncodedValueFactory.build()\n self.assertIsInstance(source_trait_encoded_value, models.SourceTraitEncodedValue)", "def __post_init__(self):\n for name, field_type in self.__annotations__.items():\n if not isinstance(self.__dict__[name], field_type):\n setattr(self, name, field_type(self.__dict__[name]))", "def gen_fake(self, field_name, fake):\r\n ...", "def addFactory(self, value, factory):\r\n self.valueToFactory[value] = factory", "def test_harmonized_trait_encoded_value_factory_create(self):\n harmonized_trait_encoded_value = factories.HarmonizedTraitEncodedValueFactory.create()\n self.assertIsInstance(harmonized_trait_encoded_value, models.HarmonizedTraitEncodedValue)", "def test_constructor(self):\n try:\n FieldType()\n self.fail()\n except:\n pass\n\n try:\n FieldType(None)\n self.fail()\n except:\n pass\n \n x = FieldType('foo')\n self.assertTrue(x.term == 'foo')\n\n x = FieldType('http://bar.com/foo')\n self.assertTrue(x.term == 'foo')\n\n x = FieldType('foo', index=0)\n self.assertTrue(x.index == 0)\n \n try:\n FieldType('foo', index='')\n except TypeError:\n pass\n\n x = FieldType('foo', default='bar')\n self.assertTrue(x.default == 'bar')\n\n x = FieldType('foo', index=0, default='bar')\n self.assertTrue(x.term == 'foo')\n self.assertTrue(x.index == 0)\n self.assertTrue(x.default == 'bar')", "def _get_translation_factory_and_field(self):\n raise NotImplementedError()", "def TypeInitializer(self) -> _n_5_t_19:", "def __init__(self, force_order=None, **fields):\n # check that each field value is a BaseType of FieldSet instance\n for value in fields.values():\n if not isinstance(value, (BaseType, FieldSet)):\n raise ValueError('%s is not a valid type' % value)\n self.data = None # loaded data should be stored here\n self.cache = {} # store converted values from data types\n self.loaded = False # flag to mark that data was loaded\n self.fields = SortedDict() # fields\n for name in _ordering(force_order, fields): # store fields ordered\n self.fields[name] = fields[name]", "def test_harmonized_trait_encoded_value_factory_build(self):\n harmonized_trait_encoded_value = factories.HarmonizedTraitEncodedValueFactory.build()\n self.assertIsInstance(harmonized_trait_encoded_value, models.HarmonizedTraitEncodedValue)", "def factory( self ):\n return self._factory", "def _create_fields(self):\r\n pass", "def test_factory(self):\n self.assertIsInstance(Extrapolator.factory(mode=\"window\"), WindowExtrapolator)\n self.assertIsInstance(Extrapolator.factory(mode=\"poly\"), PolynomialExtrapolator)\n self.assertIsInstance(Extrapolator.factory(mode=\"diff_model\"), DifferentialExtrapolator)\n self.assertIsInstance(Extrapolator.factory(mode=\"pca\"), PCAExtrapolator)\n self.assertIsInstance(Extrapolator.factory(mode=\"l1\"), SieveExtrapolator)\n self.assertRaises(QiskitNatureError, Extrapolator.factory, mode=\"unknown\")", "def test_all_betterself_factories(self):\n factories_to_test = [\n ActivityFactory,\n ActivityLogFactory,\n DailyProductivityLogFactory,\n IngredientFactory,\n IngredientCompositionFactory,\n MeasurementFactory,\n SleepLogFactory,\n SupplementFactory,\n SupplementLogFactory,\n SupplementStackFactory,\n SupplementStackCompositionFactory,\n WellBeingLogFactory,\n FoodFactory,\n FoodLogFactory,\n ]\n\n for factory in factories_to_test:\n created_instance = factory()\n self.assertIsNotNone(created_instance)", "def __init_flow_generator(self, entity):\n if entity.e_schema == 'Refund':\n if self._doc_type != \"refund\":\n self._doc_type = \"refund\"\n self._flow_generator = RefundFlowGenerator()\n elif entity.e_schema == 'Expense':\n if self._doc_type != \"expense\":\n self._doc_type = \"expense\"\n self._flow_generator = ExpenseFlowGenerator()\n else:\n self._doc_type = None\n self._flow_generator = None", "def _init_fields(self) -> None:\n ..." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add the factory to the mapping
def addFactory(self, value, factory): self.valueToFactory[value] = factory
[ "def add_factory(self, node_name, factory):\n self.factories[node_name] = factory", "def add_to_map(self):\n pass", "def set_factory(self, name, factory):\n self.factories[name] = factory", "def add(self, pattern, methods):\n self._mapping.append(RESTMapping(pattern, methods))", "def add_instance(self, instance):\n self.factories.append(instance)", "def add_factory(name, evaluation_factory):\n EvaluationFactory.factories[name] = evaluation_factory", "def add_mapping(self, periph, mapping):\n self._mappings[periph] = mapping", "def test_graph_orm_mapping_add(self):\n\n idx = self.orm.node_mapping.add(ORMtestTgf6, lambda x: x.get('key') == 'six')\n\n self.assertEqual(idx, 1)\n self.assertEqual(len(self.orm.node_mapping), 1)\n self.assertEqual(list(self.orm.node_mapping.keys()), [1])\n self.assertEqual(self.orm.node_mapping[1]['class'], ORMtestTgf6)\n self.assertEqual(self.orm.node_mapping[1]['mro_pos'], 0)", "def register(self, service, factory=..., instance=..., scope=..., **kwargs):\n ...", "def set_dynamic_mapping(self, collection):\n self.client.put_mapping(self.index, collection, {'dynamic': True})", "def register_factory(\n self, factory, iface_or_type=Interface, *, context=None, name=''\n ):\n iface = _iface_for_type(iface_or_type)\n context_iface = _iface_for_context(context)\n wants_context = context is not None\n\n info = ServiceFactoryInfo(factory, iface, context_iface, wants_context)\n factories = self._cache.get()\n _register_factory(info, factories, iface, context_iface, name)", "def set_factory(self, thing: type, value, overwrite=False):\n if thing in self.factories and not overwrite:\n raise DiayException(\"factory for %r already exists\" % thing)\n self.factories[thing] = value", "def _add_mapping(self, adapter, host_uuid, vm_uuid, vios_uuid,\n device_name):\n pv = pvm_stor.PV.bld(adapter, device_name)\n tsk_map.add_vscsi_mapping(host_uuid, vios_uuid, vm_uuid, pv)", "def mapping(self, source):", "def _add_flfact_object(self, flfact: FLFACT) -> None:\n key = flfact.sid\n #assert key not in self.flfacts\n assert key > 0\n self.flfacts[key] = flfact # set id...\n self._type_to_id_map[flfact.type].append(key)", "def add(self, feature_name):\n\n self.__dictionary[feature_name] = feature_name\n self.__mapped = False", "def add_features(self, features):\n\n if isinstance(features, Mapping):\n features = features.items()\n\n super(Namespace, self).add_features(features)", "def add_actor(self, dict_actor):\r\n raise NotImplementedError", "def addDynamicPort(self, portname, factory):\n assertMainThread()\n if factory is InputPortInterface:\n factory = InputPort\n if factory is OutputPortInterface:\n # if InputPort or OutputPort classes are given as argument, make sure to actually use the factories\n factory = OutputPort\n self.addPort(factory(True, portname, self))\n self.createFilterAndUpdate(False)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Connect to an SR560 Amplifier.
def connect(self, server, port): print('Connecting to "%s" on port "%s"...' %(server.name, port)) self.server = server self.ctx = server.context() self.port = port p = self.packet() p.open(port) # The following parameters match the default configuration of # the SR560 unit. You should go to the instrument menu and change # the settings below. p.baudrate(9600L) p.stopbits(2L) p.bytesize(8L) p.parity('N') p.rts(False) p.timeout(2 * units.s) # Clear out the read buffer. This is necessary for some devices. p.read_line() yield p.send()
[ "def sde_connect(self):\n if not self.sde_plugin:\n raise AlmException('Requires initialization')\n try:\n self.sde_plugin.connect()\n except APIError, err:\n raise AlmException('Unable to connect to SD Elements. Please review URL, id,'\n ' and password in configuration file. Reason: %s' % (str(err)))\n\n self.sde_validate_configuration()", "def connect_to_spec(self):\n try:\n self.solo_controller.ADXComm.tryReconnect(TryOnce=True, host=self.spec_address.get())\n except AttributeError:\n self.solo_controller = solocomm.initConnections(self, host=self.spec_address.get())", "def __connect_with_credentials(self):\n\t\tself.client_.username_pw_set(\"xgvutxaa\", \"9cMIpVoL4Ujj\")\n\t\tself.client_.connect('spectacular-pharmacist.cloudmqtt.com',1883,3600)", "def connect(self):\n if not isinstance(self.owf_serial, serial.Serial) or not self.owf_serial.is_open:\n if self.config[\"OCTOWIRE\"].getint(\"detect\"):\n self.owf_serial = self._manage_connection()\n else:\n port = self.config[\"OCTOWIRE\"][\"port\"]\n baudrate = self.config[\"OCTOWIRE\"].getint(\"baudrate\")\n timeout = self.config[\"OCTOWIRE\"].getint(\"read_timeout\")\n self.owf_serial = self._manage_connection(auto_connect=False, octowire_port=port,\n octowire_baudrate=baudrate, octowire_timeout=timeout)", "def __connect(self):\n self.session = xnatpy.connect(\n self.server, user=self.user, password=self.password\n )", "def connectToWifiSpot(wifiObject, password) :\n print(\"not yet implemented\")", "def connect(self, ap, **kwargs):\n if not isinstance(ap, FstAP):\n raise Exception(\"Bad AP object to connect to\")\n h = self.get_instance()\n hap = ap.get_instance()\n h.dump_monitor()\n h.connect(ap.get_ssid(), **kwargs)\n h.dump_monitor()\n self.connected = ap", "def connectionMade(self):\n self.connectToSelf = self.hijacker.clientBase.connect(MobileCodeClient(), \n self.hijacker.clientBase.getAddress(), \n 100)", "def connect(self):\n if self.__proxy is not None:\n self._close_connections()\n self.__proxy = naoqi.ALProxy(\"ALMotion\", self.ip, self.port)", "def setParameters():\n ip = '192.168.1.143'\n port = 9559\n myBroker = naoqi.ALBroker(\"myBroker\", \"0.0.0.0\", 0, ip, port)\n connector = RobotConnect(\"Naomi\")\n connector.setPostureProxy()\n connector.setMotionProxy()\n connector.setVideoProxy()\n return connector", "def __init__(self, _id: int, spc: SurePetcareAPI) -> None:\n super().__init__(_id, spc, DEVICE_CLASS_CONNECTIVITY, SureProductID.HUB)", "def connect(self):\n if self.__socket is not None:\n self._close_connections()\n # Start GAViewer with port, and load the body.\n self.__process_id = os.spawnlp(os.P_NOWAIT, self.GAVIEWER_PATH,\\\n self.GAVIEWER_NAME, \"-net\", str(self.port))\n self.__socket = socket.create_connection((self.ip, self.port))\n self.__socket.send(\"default_model(%s)$\" % self.MATH_MODELS[len(BASE)])\n self.__socket.send(self.body_setup())", "def connectToLegacyHub(adr, port):\n\tinsteon.setPort(IOPort(LegacyHubStream(adr, port)))", "def connect(self):\r\n self.connection = cql.connect('127.0.0.1', 9160, \"fourgm\", cql_version = '3.0.0')\r\n self.cursor = self.connection.cursor()\r\n\r\n self.connection2 = pyodbc.connect('DRIVER={SQL Server};SERVER=XXX.XXX.XXX.XXX;DATABASE=umlsSmall;UID=XXXXXX;PWD=XXXXXXXXX')\r\n self.cursor2=self.connection2.cursor()", "def connect():\n global connected\n try:\n device.open(VENDOR_ID, PRODUCT_ID)\n except IOError:\n connected = False\n raise\n else:\n connected = True", "def connect(self):\n self.logger.debug('Connecting to {}'.format(self))\n # This will close device and driver, ensuring it is ready to access a new camera\n self._driver.set_handle(handle=INVALID_HANDLE_VALUE)\n self._driver.open_driver()\n self._driver.open_device(self._address)\n self._driver.establish_link()\n link_status = self._driver.get_link_status()\n if not link_status['established']:\n raise error.PanError(\"Could not establish link to {}.\".format(self))\n self._handle = self._driver.get_driver_handle()\n if self._handle == INVALID_HANDLE_VALUE:\n raise error.PanError(\"Could not connect to {}.\".format(self))\n\n self._info = self._driver.get_ccd_info(self._handle)\n self.model = self.properties['camera name']\n if self.properties['colour']:\n if self.properties['Truesense']:\n self._filter_type = 'CRGB'\n else:\n self._filter_type = 'RGGB'\n else:\n self._filter_type = 'M'\n\n # Stop camera from skipping lowering of Vdd for exposures of 3 seconds of less\n self._driver.disable_vdd_optimized(self._handle)\n\n self._connected = True\n\n if self.filterwheel and not self.filterwheel.is_connected:\n # Need to defer connection of SBIG filter wheels until after camera is connected\n # so do it here.\n self.filterwheel.connect()", "def d_connect(self):\n self.drone = Drone()\n self.drone.startup()\n self.drone.reset()\n self.navigator = Navigator(self.drone)\n self.camera = Camera(\n self.drone,\n self.cam_width,\n self.cam_height,\n self.camera_event)\n self.camera.start()\n self.senActivate()\n self.render_map()\n self.act_drone_loc()\n for radio in self.radios:\n radio.config(bg= self.control_color_back,state=tk.NORMAL)\n self.route_selctn()", "def __connect(self):\n\n try:\n # in connect we use only 'json' content type\n\n rqst_dict = DSAPIRequests.connect(email=self.username, password=self.password)\n\n try:\n print(\"Connecting to DSpace API...\")\n result = self.send_request(rqst_dict, c_type='json')\n print(\"Connection request returned: \", result)\n self.api_token = result['api-token']\n except Exception as e:\n print(\"Failed to connect to DSpace API because of the following reason: \" + str(e))\n raise e\n\n except Exception as e:\n raise e", "def connect(self):\n self.serial_connection = serial.Serial(self.serial_name, timeout=self.time_out)\n self.serial_connection.flushInput()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of user roles scoped to a project or domain.
def roles_for_user(request, user, project=None, domain=None): ksclient = get_admin_ksclient() if keystone.VERSIONS.active < 3: return ksclient.roles.roles_for_user(user, project) else: return ksclient.roles.list(user=user, domain=domain, project=project)
[ "async def get_user_roles_for_domain(\n request: Request, user: UserId, domain: Domain, nocache: bool = False\n) -> typing.Set[int]:\n domain_id = domain if type(domain) is int else Mappings.domain_id_for(domain)\n\n user_roles = await get_all_user_roles(request, user, nocache)\n # Look up the list of role ids associated with the domain key. Return an\n # empty set of it does not exist.\n return set(user_roles.get(f\"d:{domain_id}\", []))", "def roles(self):\n roles = []\n for user_role in self.user_roles:\n roles.append(user_role.role)\n return roles", "def get_roles(self) -> List[str]:\n pass", "def getRoles(self):\n rolesList = []\n roles = self.userTree.find('roles')\n for role in roles.findall('role'):\n rolesList.append(role.text)\n return(rolesList)", "def get_global_roles(principal):\n if isinstance(principal, User):\n return [prr.role for prr in PrincipalRoleRelation.objects.filter(\n user=principal, content_id=None, content_type=None)]\n else:\n if isinstance(principal, Group):\n principal = (principal,)\n return [prr.role for prr in PrincipalRoleRelation.objects.filter(\n group__in=principal, content_id=None, content_type=None)]", "def get_roles(self, **options):\n return self._call(\"roles\", method=\"GET\", **options)", "def user_roles(request):\n return {\n 'ROLE_PATIENT': UserProfile.ROLE_PATIENT,\n 'ROLE_THERAPIST': UserProfile.ROLE_THERAPIST,\n 'ROLE_RESEARCHER': UserProfile.ROLE_RESEARCHER\n }", "def _find_projects(self, token=None):\n if not token:\n return []\n return deep_pluck(token['roles'], 'scope.project')", "def roles(self) -> Optional[Sequence['outputs.CloudServiceRoleProfilePropertiesResponse']]:\n return pulumi.get(self, \"roles\")", "def projects_permissions(self, user):\n locale_projects = []\n\n def create_users_map(qs, keyfield):\n \"\"\"\n Creates a map from query set\n \"\"\"\n user_map = defaultdict(list)\n for row in qs:\n key = row.pop(keyfield)\n user_map[key].append(row)\n return user_map\n\n project_locales = list(\n self.project_locale.visible()\n .visible_for(user)\n .prefetch_related(\"project\", \"translators_group\")\n .order_by(\"project__name\")\n .values(\n \"id\",\n \"project__pk\",\n \"project__name\",\n \"project__slug\",\n \"translators_group__pk\",\n \"has_custom_translators\",\n )\n )\n\n projects_translators = create_users_map(\n (\n User.objects.filter(\n groups__pk__in=[\n project_locale[\"translators_group__pk\"]\n for project_locale in project_locales\n ]\n )\n .exclude(email=\"\")\n .prefetch_related(\"groups\")\n .values(\"id\", \"first_name\", \"email\", \"groups__pk\")\n .distinct()\n .order_by(\"email\")\n ),\n \"groups__pk\",\n )\n\n for project_locale in project_locales:\n group_pk = project_locale[\"translators_group__pk\"]\n locale_projects.append(\n (\n project_locale[\"id\"],\n project_locale[\"project__slug\"],\n project_locale[\"project__name\"],\n projects_translators[group_pk],\n project_locale[\"has_custom_translators\"],\n )\n )\n\n return locale_projects", "def listroles(self):\n\n request_string = f\"{self.base_url}/directoryRoles\"\n response = requests.get(request_string, headers=self.header_params_GMC)\n data = response.json()\n roles = [x['displayName'] for x in data['value']]\n return roles", "def get_roles(self, model_id):\n doc = self.__get_model(model_id)\n return doc['roles']", "def get_staff_roles(request: Request):\n return staff_roles", "def all_roles(cls):\n return _CompilerRole.all()", "def list_roles_for_user(self, user):\r\n user_id = utils.get_id(user)\r\n uri = \"users/%s/roles\" % user_id\r\n resp, resp_body = self.method_get(uri)\r\n if resp.status_code in (401, 403):\r\n raise exc.AuthorizationFailure(\"You are not authorized to list \"\r\n \"user roles.\")\r\n roles = resp_body.get(\"roles\")\r\n return roles", "def get_player_roles(self) -> List[str]:\n pass", "def list(self, user=None, group=None, project=None, domain=None, role=None,\n effective=False):\n\n self._check_not_user_and_group(user, group)\n self._check_not_domain_and_project(domain, project)\n\n query_params = {}\n if user:\n query_params['user.id'] = base.getid(user)\n if group:\n query_params['group.id'] = base.getid(group)\n if project:\n query_params['scope.project.id'] = base.getid(project)\n if domain:\n query_params['scope.domain.id'] = base.getid(domain)\n if role:\n query_params['role.id'] = base.getid(role)\n if effective:\n query_params['effective'] = effective\n\n return super(RoleAssignmentManager, self).list(**query_params)", "def get_user_roles(self, user, project=None):\n params = {'User': user}\n if project:\n params['Project'] = project\n return self.apiconn.get_list('DescribeUserRoles',\n params,\n [('item', UserRole)])", "def get_local_roles(obj, principal):\n ctype = ContentType.objects.get_for_model(obj)\n\n if isinstance(principal, User):\n return [prr.role for prr in PrincipalRoleRelation.objects.filter(\n user=principal, content_id=obj.id, content_type=ctype)]\n else:\n return [prr.role for prr in PrincipalRoleRelation.objects.filter(\n group=principal, content_id=obj.id, content_type=ctype)]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds a role for a group on a domain or project.
def add_gp_role(request, role, group, domain=None, project=None): ksclient = get_admin_ksclient() return ksclient.roles.grant(role=role, group=group, domain=domain, project=project)
[ "def test_add_role_to_ldap_group(self):\n pass", "def role(self, role):\n groups = Group.objects.filter(name__contains=\"| role |\")\n if role is None: self.groups.remove(*groups); return\n if not role in MEMBER_ROLES.keys():\n raise ValueError(\"No role with identifyer '{0}' found. Must be one of ['{1}'].\".format(\n str(role), \"', '\".join(MEMBER_ROLES.keys())))\n else:\n self.groups.add(Group.objects.get(name=MEMBER_ROLES[role][0]))", "def add_user_group(user_group_name, tenant_name, role, logger, client):\n graceful_msg = (\n 'User group `{0}` is already associated with tenant `{1}`'\n .format(user_group_name, tenant_name)\n )\n with handle_client_error(409, graceful_msg, logger):\n client.tenants.add_user_group(user_group_name, tenant_name, role)\n logger.info(\n 'User group `{0}` added successfully to tenant `{1}`'\n .format(user_group_name, tenant_name)\n )", "def add_user_role(channel, role, user):\n get_role_model(channel, role).group.user_set.add(user)", "def add_group(username, group_name, logger, client):\n client.users.add_to_group(username, group_name)\n logger.info('User `{0}` added successfully to group '\n '`{1}`'.format(username, group_name))", "async def addRole(guild, join_message, role: str) -> str:\n if role in getRoles(guild.id):\n return f'{role} already exists!'\n\n # Otherwise, create the role\n else:\n try:\n await guild.create_role(name=role)\n except discord.Forbidden:\n raise exceptions.ForbiddenError(task=\"create_role\", detail=role)\n\n getRoles(guild.id)[role] = join_message\n dumpConfigRoles(guild.id)\n return ''", "def assign_group_to_saml_user_roles(self, group_id, role, saml_roles):\n url = f'{self._okta.api}/apps/{self.id}/groups/{group_id}'\n payload = {'id': group_id, 'profile': {'role': role, 'samlRoles': saml_roles}}\n response = self._okta.session.put(url, json=payload)\n if not response.ok:\n self._logger.error(f'Assigning group to the saml user roles failed. Response: {response.text}')\n return response.ok", "def add_group(self, name):\n if self.in_group(name):\n # We're already in this group.\n return\n\n self.groups.append(Group.get_or_create(name=name))", "def add_user_role_to_tenant(request, project=None, user=None, role=None,\n group=None, domain=None):\n ksclient = get_admin_ksclient()\n if keystone.VERSIONS.active < 3:\n return ksclient.roles.add_user_role(user, role, project)\n else:\n return ksclient.roles.grant(role, user=user, project=project,\n group=group, domain=domain)", "def AddGroup(self, group):\n # Try to avoid grabbing the lock in the common case that a group already\n # exists.\n if self.GroupExists(group.group):\n logging.info('Not installing group \"%s\" because it already existed.',\n group.group)\n return\n\n # Clear the group cache to force ourselves to reparse.\n self._group_cache = None\n\n with locking.PortableLinkLock(self._group_db_file + '.lock',\n max_retry=self._DB_LOCK_RETRIES):\n # Check that |group| exists under the lock in case we're racing to create\n # this group.\n if self.GroupExists(group.group):\n logging.info('Not installing group \"%s\" because it already existed.',\n group.group)\n return\n\n self._groups[group.group] = group\n new_groups = sorted(self._groups.itervalues(), key=lambda g: g.gid)\n contents = '\\n'.join([GroupToEntry(g) for g in new_groups])\n osutils.WriteFile(self._group_db_file, contents, atomic=True, sudo=True)\n print('Added group \"%s\" to %s:' % (group.group, self._group_db_file))\n print(' - group id: %d' % group.gid)\n print(' - password entry: %s' % group.password)\n print(' - user list: %s' % ','.join(group.users))", "def add_role(self, RoleName):\n response = self.client.add_role_to_instance_profile(\n InstanceProfileName=self.ProfileName,\n RoleName=RoleName\n )", "def create_group(c, runner, group):\n if group_exists(c, group, runner=runner):\n return True\n\n cmd = \"groupadd {}\".format(group)\n return runner(cmd, hide=True, warn=True).ok", "def add_group_by_name(self, group_name):\n group = self._okta.get_group_by_name(group_name)\n if not group:\n raise InvalidGroup(group_name)\n url = f'{self._okta.api}/apps/{self.id}/groups/{group.id}'\n response = self._okta.session.put(url, data=json.dumps({}))\n if not response.ok:\n self._logger.error(f'Adding group failed. Response: {response.text}')\n return response.ok", "def test_add_role_to_user(self):\n pass", "def modify_policy_add_role(\n crm_service: str, project_id: str, role: str, member: str\n) -> None:\n\n policy = get_policy(crm_service, project_id)\n\n binding = None\n for b in policy[\"bindings\"]:\n if b[\"role\"] == role:\n binding = b\n break\n if binding is not None:\n binding[\"members\"].append(member)\n else:\n binding = {\"role\": role, \"members\": [member]}\n policy[\"bindings\"].append(binding)\n\n set_policy(crm_service, project_id, policy)", "def addGroup(self, groupName):\n self._validateValues(groupName=groupName, accountName=self.accountName)\n return self._callBitbucketRestAPI(BitbucketRESTCall.GROUPS, q.enumerators.RESTMethod.POST, uriParts=[self.accountName], data={'name': groupName})", "def addGroup(self, name, description, mutexc=False, required=False):\n\n if name in self._group_args:\n raise ValueError('group {0} is already defined'.format(name))\n\n self._groups.append((name, mutexc, required))\n self._group_desc[name] = description\n self._group_args[name] = []", "def add_group():\n form = AddResearchGroupForm(request.form)\n\n if form.validate_on_submit():\n url = form.website.data\n if not re.match(r'http(s?)\\:', url):\n url = 'http://' + url\n r = urlsplit(url) # canonicalize\n\n group = ResearchGroup(abbreviation=form.abbreviation.data,\n name=form.name.data,\n colour=form.colour.data,\n website=r.geturl(),\n active=True,\n creator_id=current_user.id,\n creation_timestamp=datetime.now())\n\n try:\n db.session.add(group)\n db.session.commit()\n except SQLAlchemyError as e:\n db.session.rollback()\n current_app.logger.exception(\"SQLAlchemyError exception\", exc_info=e)\n flash('Could not add this affiliation group because of a database error. Please contact a system '\n 'administrator', 'error')\n\n return redirect(url_for('admin.edit_groups'))\n\n return render_template('admin/edit_group.html', group_form=form, title='Add new affiliation')", "def add_local_role(obj, principal, role):\n ctype = ContentType.objects.get_for_model(obj)\n if isinstance(principal, User):\n try:\n PrincipalRoleRelation.objects.get(user=principal, role=role, content_id=obj.id, content_type=ctype)\n except PrincipalRoleRelation.DoesNotExist:\n PrincipalRoleRelation.objects.create(user=principal, role=role, content=obj)\n return True\n else:\n try:\n PrincipalRoleRelation.objects.get(group=principal, role=role, content_id=obj.id, content_type=ctype)\n except PrincipalRoleRelation.DoesNotExist:\n PrincipalRoleRelation.objects.create(group=principal, role=role, content=obj)\n return True\n\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds a role for a group on a domain or project.
def add_gp_role(request, role, group, domain=None, project=None): ksclient = get_admin_ksclient() return ksclient.roles.grant(role=role, group=group, domain=domain, project=project)
[ "def test_add_role_to_ldap_group(self):\n pass", "def role(self, role):\n groups = Group.objects.filter(name__contains=\"| role |\")\n if role is None: self.groups.remove(*groups); return\n if not role in MEMBER_ROLES.keys():\n raise ValueError(\"No role with identifyer '{0}' found. Must be one of ['{1}'].\".format(\n str(role), \"', '\".join(MEMBER_ROLES.keys())))\n else:\n self.groups.add(Group.objects.get(name=MEMBER_ROLES[role][0]))", "def add_user_group(user_group_name, tenant_name, role, logger, client):\n graceful_msg = (\n 'User group `{0}` is already associated with tenant `{1}`'\n .format(user_group_name, tenant_name)\n )\n with handle_client_error(409, graceful_msg, logger):\n client.tenants.add_user_group(user_group_name, tenant_name, role)\n logger.info(\n 'User group `{0}` added successfully to tenant `{1}`'\n .format(user_group_name, tenant_name)\n )", "def add_user_role(channel, role, user):\n get_role_model(channel, role).group.user_set.add(user)", "def add_group(username, group_name, logger, client):\n client.users.add_to_group(username, group_name)\n logger.info('User `{0}` added successfully to group '\n '`{1}`'.format(username, group_name))", "async def addRole(guild, join_message, role: str) -> str:\n if role in getRoles(guild.id):\n return f'{role} already exists!'\n\n # Otherwise, create the role\n else:\n try:\n await guild.create_role(name=role)\n except discord.Forbidden:\n raise exceptions.ForbiddenError(task=\"create_role\", detail=role)\n\n getRoles(guild.id)[role] = join_message\n dumpConfigRoles(guild.id)\n return ''", "def assign_group_to_saml_user_roles(self, group_id, role, saml_roles):\n url = f'{self._okta.api}/apps/{self.id}/groups/{group_id}'\n payload = {'id': group_id, 'profile': {'role': role, 'samlRoles': saml_roles}}\n response = self._okta.session.put(url, json=payload)\n if not response.ok:\n self._logger.error(f'Assigning group to the saml user roles failed. Response: {response.text}')\n return response.ok", "def add_group(self, name):\n if self.in_group(name):\n # We're already in this group.\n return\n\n self.groups.append(Group.get_or_create(name=name))", "def add_user_role_to_tenant(request, project=None, user=None, role=None,\n group=None, domain=None):\n ksclient = get_admin_ksclient()\n if keystone.VERSIONS.active < 3:\n return ksclient.roles.add_user_role(user, role, project)\n else:\n return ksclient.roles.grant(role, user=user, project=project,\n group=group, domain=domain)", "def AddGroup(self, group):\n # Try to avoid grabbing the lock in the common case that a group already\n # exists.\n if self.GroupExists(group.group):\n logging.info('Not installing group \"%s\" because it already existed.',\n group.group)\n return\n\n # Clear the group cache to force ourselves to reparse.\n self._group_cache = None\n\n with locking.PortableLinkLock(self._group_db_file + '.lock',\n max_retry=self._DB_LOCK_RETRIES):\n # Check that |group| exists under the lock in case we're racing to create\n # this group.\n if self.GroupExists(group.group):\n logging.info('Not installing group \"%s\" because it already existed.',\n group.group)\n return\n\n self._groups[group.group] = group\n new_groups = sorted(self._groups.itervalues(), key=lambda g: g.gid)\n contents = '\\n'.join([GroupToEntry(g) for g in new_groups])\n osutils.WriteFile(self._group_db_file, contents, atomic=True, sudo=True)\n print('Added group \"%s\" to %s:' % (group.group, self._group_db_file))\n print(' - group id: %d' % group.gid)\n print(' - password entry: %s' % group.password)\n print(' - user list: %s' % ','.join(group.users))", "def add_role(self, RoleName):\n response = self.client.add_role_to_instance_profile(\n InstanceProfileName=self.ProfileName,\n RoleName=RoleName\n )", "def create_group(c, runner, group):\n if group_exists(c, group, runner=runner):\n return True\n\n cmd = \"groupadd {}\".format(group)\n return runner(cmd, hide=True, warn=True).ok", "def add_group_by_name(self, group_name):\n group = self._okta.get_group_by_name(group_name)\n if not group:\n raise InvalidGroup(group_name)\n url = f'{self._okta.api}/apps/{self.id}/groups/{group.id}'\n response = self._okta.session.put(url, data=json.dumps({}))\n if not response.ok:\n self._logger.error(f'Adding group failed. Response: {response.text}')\n return response.ok", "def test_add_role_to_user(self):\n pass", "def modify_policy_add_role(\n crm_service: str, project_id: str, role: str, member: str\n) -> None:\n\n policy = get_policy(crm_service, project_id)\n\n binding = None\n for b in policy[\"bindings\"]:\n if b[\"role\"] == role:\n binding = b\n break\n if binding is not None:\n binding[\"members\"].append(member)\n else:\n binding = {\"role\": role, \"members\": [member]}\n policy[\"bindings\"].append(binding)\n\n set_policy(crm_service, project_id, policy)", "def addGroup(self, groupName):\n self._validateValues(groupName=groupName, accountName=self.accountName)\n return self._callBitbucketRestAPI(BitbucketRESTCall.GROUPS, q.enumerators.RESTMethod.POST, uriParts=[self.accountName], data={'name': groupName})", "def addGroup(self, name, description, mutexc=False, required=False):\n\n if name in self._group_args:\n raise ValueError('group {0} is already defined'.format(name))\n\n self._groups.append((name, mutexc, required))\n self._group_desc[name] = description\n self._group_args[name] = []", "def add_group():\n form = AddResearchGroupForm(request.form)\n\n if form.validate_on_submit():\n url = form.website.data\n if not re.match(r'http(s?)\\:', url):\n url = 'http://' + url\n r = urlsplit(url) # canonicalize\n\n group = ResearchGroup(abbreviation=form.abbreviation.data,\n name=form.name.data,\n colour=form.colour.data,\n website=r.geturl(),\n active=True,\n creator_id=current_user.id,\n creation_timestamp=datetime.now())\n\n try:\n db.session.add(group)\n db.session.commit()\n except SQLAlchemyError as e:\n db.session.rollback()\n current_app.logger.exception(\"SQLAlchemyError exception\", exc_info=e)\n flash('Could not add this affiliation group because of a database error. Please contact a system '\n 'administrator', 'error')\n\n return redirect(url_for('admin.edit_groups'))\n\n return render_template('admin/edit_group.html', group_form=form, title='Add new affiliation')", "def add_local_role(obj, principal, role):\n ctype = ContentType.objects.get_for_model(obj)\n if isinstance(principal, User):\n try:\n PrincipalRoleRelation.objects.get(user=principal, role=role, content_id=obj.id, content_type=ctype)\n except PrincipalRoleRelation.DoesNotExist:\n PrincipalRoleRelation.objects.create(user=principal, role=role, content=obj)\n return True\n else:\n try:\n PrincipalRoleRelation.objects.get(group=principal, role=role, content_id=obj.id, content_type=ctype)\n except PrincipalRoleRelation.DoesNotExist:\n PrincipalRoleRelation.objects.create(group=principal, role=role, content=obj)\n return True\n\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes a given single role for a group from a domain or project.
def remove_group_role(request, role, group, domain=None, project=None): ksclient = get_admin_ksclient() return ksclient.roles.revoke(role=role, group=group, project=project, domain=domain)
[ "def removeRole(self, role):\n pass", "def remove_role(principal, role):\n try:\n if isinstance(principal, User):\n ppr = PrincipalRoleRelation.objects.get(\n user=principal, role=role, content_id=None, content_type=None)\n else:\n ppr = PrincipalRoleRelation.objects.get(\n group=principal, role=role, content_id=None, content_type=None)\n\n except PrincipalRoleRelation.DoesNotExist:\n return False\n else:\n ppr.delete()\n\n return True", "def role(self, role):\n groups = Group.objects.filter(name__contains=\"| role |\")\n if role is None: self.groups.remove(*groups); return\n if not role in MEMBER_ROLES.keys():\n raise ValueError(\"No role with identifyer '{0}' found. Must be one of ['{1}'].\".format(\n str(role), \"', '\".join(MEMBER_ROLES.keys())))\n else:\n self.groups.add(Group.objects.get(name=MEMBER_ROLES[role][0]))", "def remove_role(user, role):\n return _assign_or_remove_role(user, role, \"remove_role_from_user\")", "def remove_group(self, group):\n self.groups.remove(group)", "async def _delmodrole(self, ctx: commands.Context, role: discord.Role):\n async with self.config.guild(ctx.guild).modroles() as modroles:\n if role.id in modroles:\n modroles.remove(role.id)\n await ctx.send(f\"{role} role has been removed.\")\n else:\n await ctx.send(\"That role isn't in the list.\")", "def test_delete_role_from_ldap_group(self):\n pass", "def remove_group(c, runner, group):\n if not group_exists(c, group, runner=runner):\n return True\n\n cmd = \"groupdel {}\".format(group)\n return runner(cmd, hide=True, warn=True).ok", "def remove_user_role(channel, role, user):\n get_role_model(channel, role).group.user_set.remove(user)", "async def remove_course_role(self, role_id):\n role = self.bot.get_guild(self.guild_id).get_role(role_id)\n if role is None:\n return logger.error(\"role is empty.\")\n\n await role.delete()", "def delete_role(self, user, role):\n if self.can_modify_roles(user) and role in self[\"roles\"] and role != \"default\":\n for permission in Permissions.ROLE_PERMISSIONS:\n if role in self[\"general\"][permission]:\n self[\"general\"][permission].remove(role)\n for path in self[\"files\"]:\n if role in self[\"files\"][path][\"roles_write\"]:\n self[\"files\"][path][\"roles_write\"].remove(role)\n del self[\"roles\"][role]\n return True\n return False", "def remove_permission_from_role(self, role, permission) -> None:\n raise NotImplementedError", "def delete_group(self, group):\n path = \"api/groups/{0}\".format(group)\n self._delete(path)", "async def remove(self, ctx, *, role_name):\n found_role = None\n for role in ctx.guild.roles:\n if role.name.lower() == role_name.lower():\n found_role = role\n if found_role:\n try:\n success = await \\\n self.bot.pg_utils.remove_autoassign_role(\n ctx.guild.id, found_role.id, self.bot.logger)\n except ValueError:\n local_embed = discord.Embed(\n title=f'{found_role.name} is already'\n ' not on the auto-assignable list',\n description=' ',\n color=0x651111\n )\n await ctx.send(embed=local_embed)\n return\n if success:\n local_embed = discord.Embed(\n title=f'Removed {found_role.name} '\n 'from auto-assignable roles',\n description=' ',\n color=0x419400\n )\n else:\n local_embed = discord.Embed(\n title=f'Internal error occured,'\n ' please contact @dashwav#7785',\n description=' ',\n color=0x651111\n )\n await ctx.send(embed=local_embed)\n else:\n local_embed = discord.Embed(\n title=f'Couldn\\'t find role {role_name}',\n description=' ',\n color=0x651111\n )\n await ctx.send(embed=local_embed)", "def remove_group(self, auth=None):\n self._require_manager_permission(auth)\n group_id = self._id\n members = list(self.members.values_list('id', flat=True))\n nodes = self.nodes\n\n self.member_group.delete()\n self.manager_group.delete()\n self.delete()\n self.update_search(deleted_id=group_id)\n\n for user in OSFUser.objects.filter(id__in=members):\n for node in nodes:\n node.disconnect_addons(user, auth)\n params = {\n 'group': group_id,\n 'node': node._id,\n }\n self.add_corresponding_node_log(node, NodeLog.GROUP_REMOVED, params, auth)\n project_signals.contributor_removed.send(node, user=user)\n node.update_search()", "def remove_role(self, user, targetUser, role):\n if self.can_grant_permissions(user):\n if role in self[\"roles\"]:\n if targetUser in self[\"roles\"][role]:\n self[\"roles\"][role].remove(targetUser)\n return True\n return False", "def delete_device_role(self, device_role):\n try:\n device_role_id = self.get_device_roles(name=device_role)[0]['id']\n except IndexError:\n raise exceptions.NotFoundException({\"detail\": \"device-role: {}\".format(device_role)}) from None\n return self.netbox_con.delete('/dcim/device-roles/', device_role_id)", "def remove_user_role_frm_tenant(request, project=None, user=None, role=None,\n group=None, domain=None):\n ksclient = get_admin_ksclient()\n if keystone.VERSIONS.active < 3:\n return ksclient.roles.remove_user_role(user, role, project)\n else:\n return ksclient.roles.revoke(role, user=user, project=project,\n group=group, domain=domain)", "async def remove(self, ctx, *, role: discord.Role):\r\n\t\tdata = json_mngr().read('./data/settings.json')\r\n\t\tif str(ctx.guild.id) not in data.keys():\r\n\t\t\tdata[str(ctx.guild.id)] = {\r\n\t\t\t\t\"edit_roles\": [],\r\n\t\t\t\t\"view_roles\": [],\r\n\t\t\t\t\"log_channel\": None\r\n\t\t\t}\r\n\t\tif role:\r\n\t\t\tif role.id in data[str(ctx.guild.id)]['edit_roles']:\r\n\t\t\t\tdata[str(ctx.guild.id)]['edit_roles'].remove(role.id)\r\n\t\t\tjson_mngr().handle_modify('./data/settings.json', newdata=data, indent=2, backup=True)\r\n\t\t\tawait ctx.send(f\"removed {role.id} as an editing role.\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds a role for a user on a tenant.
def add_user_role_to_tenant(request, project=None, user=None, role=None, group=None, domain=None): ksclient = get_admin_ksclient() if keystone.VERSIONS.active < 3: return ksclient.roles.add_user_role(user, role, project) else: return ksclient.roles.grant(role, user=user, project=project, group=group, domain=domain)
[ "def add_role_to_user_and_tenant(self, user_id, tenant_id, role_id):\n raise exception.NotImplemented()", "def add_user_role(channel, role, user):\n get_role_model(channel, role).group.user_set.add(user)", "def assign_role(user, role):\n return _assign_or_remove_role(user, role, \"assign_role_to_user\")", "def add_user_role(self, user, role, project=None):\n return self.modify_user_role(user, role, project=project,\n operation='add')", "def add_user(username, tenant_name, role, logger, client):\n graceful_msg = (\n 'User `{0}` is already associated with tenant `{1}`'\n .format(username, tenant_name)\n )\n with handle_client_error(409, graceful_msg, logger):\n client.tenants.add_user(username, tenant_name, role)\n logger.info(\n 'User `{0}` added successfully to tenant `{1}` with `{2}` role'\n .format(username, tenant_name, role)\n )", "def test_add_role_to_user(self):\n pass", "async def AddColeadRole(self, user: User):\n await user.add_roles(self.coleaderRole)", "def grant_role(self, user, targetUser, role):\n if self.can_grant_permissions(user):\n if role in self[\"roles\"]:\n self[\"roles\"][role].append(targetUser)\n return True\n return False", "def test_add_roles_to_user(self):\n pass", "async def AddLeaderRole(self, user: User):\n await user.add_roles(self.leaderRole)", "def addusertorole(self, userguid, roleguid):\n\n header = {\n \"Content-type\": \"application/json\",\n \"Authorization\": \"Bearer \" + self.access_token2\n }\n request_string = f\"{self.base_url}/directoryRoles/{roleguid}/members/$ref\"\n request_body = json.dumps({\n \"@odata.id\": f\"{self.base_url}/directoryObjects/{userguid}\"\n })\n response = requests.post(\n request_string,\n data=request_body,\n headers=header)\n if response.ok:\n return \"Sucess\"\n else:\n return f\"Failed to add userguid: {userguid}\"", "def create_role(self, user, role):\n if self.can_modify_roles(user) and role not in self[\"roles\"] and role != \"default\":\n self[\"roles\"][role] = []\n return True\n return False", "def add_user_to_role(roleuuid, useruuid):\n c = start_handler()\n URL = f'{APIVERSION}/roles/{roleuuid}/users'\n DATA = {'data': {'type': 'users', 'id': useruuid}}\n PF = json.dumps(DATA)\n c.setopt(c.POSTFIELDS, PF)\n perform_request(c, URL)", "async def AddStaticRole(self, user: User):\n await user.add_roles(self.memberRole)", "async def addrole(self, ctx, role:discord.Role, *users:discord.User):\n if ctx.message.server.me.permissions_in(ctx.message.channel).manage_roles == False:\n await self.bot.say(\"Sorry, I do not have the manage_roles permission\\n**Aborting**\")\n return\n if len(users) == 0:\n await self.bot.say(\":no_entry: You need to specify a user to give the role too.\")\n idk = []\n for user in users:\n await self.bot.add_roles(user, role)\n idk.append(user.name)\n await self.bot.say(\"ok, gave user(s) `\" + \", \".join(idk) + \"` the role {0}\".format(role.name))", "def set_role(username, security_role, logger, client):\n logger.info('Setting new role for user {0}...'.format(username))\n client.users.set_role(username, security_role)\n logger.info('User deleted')", "def add_gp_role(request, role, group, domain=None, project=None):\n ksclient = get_admin_ksclient()\n return ksclient.roles.grant(role=role, group=group, domain=domain,\n project=project)", "def add_role(self, RoleName):\n response = self.client.add_role_to_instance_profile(\n InstanceProfileName=self.ProfileName,\n RoleName=RoleName\n )", "def set_role(self, role):\n self.role.set(role)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes a given single role for a user from a tenant.
def remove_user_role_frm_tenant(request, project=None, user=None, role=None, group=None, domain=None): ksclient = get_admin_ksclient() if keystone.VERSIONS.active < 3: return ksclient.roles.remove_user_role(user, role, project) else: return ksclient.roles.revoke(role, user=user, project=project, group=group, domain=domain)
[ "def remove_role_from_user_and_tenant(self, user_id, tenant_id, role_id):\n raise exception.NotImplemented()", "def remove_role(user, role):\n return _assign_or_remove_role(user, role, \"remove_role_from_user\")", "def delete_role_from_user(self, role, user):\r\n uri = \"users/%s/roles/OS-KSADM/%s\" % (utils.get_id(user),\r\n utils.get_id(role))\r\n resp, resp_body = self.method_delete(uri)", "def removeRole(self, role):\n pass", "def remove_user_role(channel, role, user):\n get_role_model(channel, role).group.user_set.remove(user)", "def delete_role(self, user, role):\n if self.can_modify_roles(user) and role in self[\"roles\"] and role != \"default\":\n for permission in Permissions.ROLE_PERMISSIONS:\n if role in self[\"general\"][permission]:\n self[\"general\"][permission].remove(role)\n for path in self[\"files\"]:\n if role in self[\"files\"][path][\"roles_write\"]:\n self[\"files\"][path][\"roles_write\"].remove(role)\n del self[\"roles\"][role]\n return True\n return False", "def remove_role(self, user, targetUser, role):\n if self.can_grant_permissions(user):\n if role in self[\"roles\"]:\n if targetUser in self[\"roles\"][role]:\n self[\"roles\"][role].remove(targetUser)\n return True\n return False", "def revoke(session: Session, username: str, role: Role):\n if not username or not role:\n raise ValueError('A username and a role name are required.')\n user_role: Optional[UserRole] = UserRoles.find_role(session, username, role)\n if user_role is None:\n return\n try:\n session.delete(user_role)\n session.commit()\n except:\n session.rollback()\n raise", "def delete_user_role(self, uid, rolename, meeting): \n if meeting !=\"\":\n status, resp = self.ucengine.request('DELETE',\n 'user/%s/roles/%s/%s' % (uid, rolename, meeting), \n params = {'uid':self.uid, 'sid': self.sid}\n )\n else:\n status, resp = self.ucengine.request('DELETE',\n 'user/%s/roles/%s' % (uid, rolename),\n params = {'uid':self.uid, 'sid': self.sid}\n )\n if status != 200:\n raise UCError(status, resp)", "async def _remove(self, ctx: commands.Context, user: discord.Member, role: discord.Role):\n async with self.config.member(user).temp_roles() as user_tr:\n if not (user_tr.get(str(role.id))):\n return await ctx.send(\n f\"That is not an active TempRole for {user.mention}.\",\n allowed_mentions=discord.AllowedMentions.none()\n )\n del user_tr[str(role.id)]\n message = f\"TempRole {role.mention} for {user.mention} has been removed.\"\n await ctx.send(\n message,\n allowed_mentions=discord.AllowedMentions.none()\n )\n await self._maybe_send_log(ctx.guild, message)\n await self._tr_end(user, role, admin=ctx.author)", "async def RemoveStaticRole(self, user: User): \n await user.remove_roles(self.memberRole)", "def test_remove_role_from_user(self):\n pass", "def remove_role(principal, role):\n try:\n if isinstance(principal, User):\n ppr = PrincipalRoleRelation.objects.get(\n user=principal, role=role, content_id=None, content_type=None)\n else:\n ppr = PrincipalRoleRelation.objects.get(\n group=principal, role=role, content_id=None, content_type=None)\n\n except PrincipalRoleRelation.DoesNotExist:\n return False\n else:\n ppr.delete()\n\n return True", "def delete_device_role(self, device_role):\n try:\n device_role_id = self.get_device_roles(name=device_role)[0]['id']\n except IndexError:\n raise exceptions.NotFoundException({\"detail\": \"device-role: {}\".format(device_role)}) from None\n return self.netbox_con.delete('/dcim/device-roles/', device_role_id)", "async def remove_course_role(self, role_id):\n role = self.bot.get_guild(self.guild_id).get_role(role_id)\n if role is None:\n return logger.error(\"role is empty.\")\n\n await role.delete()", "async def RemoveLeaderRole(self, user: User):\n await user.remove_roles(self.leaderRole)", "def test_staff_role_remove(self):\n\n flag = \"user\"\n api = \"permise.staff.role.remove\"\n role_id = 15\n\n result = self.access_api(flag = flag, api = api, role_id = role_id)", "def test_remove_user_role(self):\n pass", "async def removerole(self, ctx, role:discord.Role, *users:discord.User):\n if ctx.message.server.me.permissions_in(ctx.message.channel).manage_roles == False:\n await self.bot.say(\"Sorry, I do not have the manage_roles permission\\n**Aborting**\")\n return\n if len(users) == 0:\n await self.bot.say(\"You need to add a person to remove the role from!\")\n idk = []\n for user in users:\n await self.bot.remove_roles(user, role)\n idk.append(user.name)\n await self.bot.say(\"ok, removed the role {0} from user(s) `{1}`\".format(role.name, ', '.join(idk)))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
To see all volumes transfers as an admin pass in a special
def transfer_list_cinder(request, detailed=True, search_opts=None): c_client = get_cinder_client() return [cinder.VolumeTransfer(v) for v in c_client.transfers.list( detailed=detailed, search_opts=search_opts)]
[ "def transfer_list(request, detailed=True, search_opts=None):\n c_client = cinderclient(request)\n try:\n return [VolumeTransfer(v) for v in c_client.transfers.list(\n detailed=detailed, search_opts=search_opts)]\n except cinder_exception.Forbidden as error:\n LOG.error(error)\n return []", "def show_volume_transfer(self, transfer_id):\n url = \"%s/%s\" % (self.resource_path, transfer_id)\n resp, body = self.get(url)\n body = json.loads(body)\n schema = self.get_schema(self.schema_versions_info)\n self.validate_response(schema.show_volume_transfer, resp, body)\n return rest_client.ResponseBody(resp, body)", "def do_showVolumes(self, filer):\n\t\tcommand = 'ssh -qn admin@%s vol show' % self.filer\n\t\tproc = subprocess.Popen(command.split(), stdout=subprocess.PIPE)\n\t\tp_stdout = proc.communicate()[0]\n\t\tprint p_stdout", "def find_volumes():\n global email_message\n email_message += 'Finding volumes that match the requested filter: %(filter)s\\n\\n' % {\n 'filter': config.volumes['filter']\n }\n return conn.get_all_volumes(filters=config.volumes['filter'])", "def volume_list(search_opts=None):\r\n c_client = cinderclient()\r\n if c_client is None:\r\n return []\r\n # print c_client.volumes.list(search_opts=search_opts)\r\n return c_client.volumes.list(search_opts=search_opts)", "def show_asm_volumes(self):\n sql = \"select NAME from v$asm_diskgroup_stat ORDER BY 1\"\n self.cur.execute(sql)\n res = self.cur.fetchall()\n key = ['{#ASMVOLUME}']\n lst = []\n for i in res:\n d = dict(zip(key, i))\n lst.append(d)\n print ( json.dumps({'data': lst}) )", "def list(ctx):\n \"\"\"been added as volume metadata or block drives as well as drives that have not been added and are available.\"\"\"\n\n \n\n cli_utils.establish_connection(ctx)\n \n\n \n\n ctx.logger.info(\"\"\": \"\"\"+\"\"\";\"\"\"+\"\")\n try:\n _ListDrivesResult = ctx.element.list_drives()\n except common.ApiServerError as e:\n ctx.logger.error(e.message)\n exit()\n except BaseException as e:\n ctx.logger.error(e.__str__())\n exit()\n if ctx.json:\n print(simplejson.dumps(simplejson.loads(_ListDrivesResult), indent=4))\n return\n else:\n cli_utils.print_result(_ListDrivesResult, ctx.logger, as_json=ctx.json, as_pickle=ctx.pickle, depth=ctx.depth, filter_tree=ctx.filter_tree)", "def volume(ctx, *args, **kwargs):", "def volume_options_list_info(self, volume):\n return self.request( \"volume-options-list-info\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'options': [ VolumeOptionInfo, True ],\n } )", "def list(self,\n **kwargs\n ):\n\n # dont filter_name=None,\n # dont filter_value=None,\n # dryrun=False):\n\n #:param filter_name (string)\n #:param filter_value (string)\n #:param volume_ids (list): The volume IDs\n\n # filter = \"[[\n # {\n # 'Name': 'xyz',\n # 'Values': [\n # 'abc',\n # ]\n # },\n # ]\"\n\n # filter = eval(filter)\n\n #banner('print kwargs')\n #print(kwargs)\n #print(kwargs['output'])\n\n client = boto3.client('ec2')\n dryrun = kwargs['--dryrun']\n #region = kwargs['--region']\n #vm = kwargs['--vm']# will need vm id from mongo records\n result = client.describe_volumes(\n DryRun=dryrun,\n # Filters=[\n # {\n # 'Name': {},\n # 'Values': [\n # filter_value,\n # ]\n # },\n # ],\n )\n #banner(\"raw results\")\n #print(result)\n #banner(\"raw results end\")\n result = self.update_dict(result)\n\n #print(self.Print(result, kind='volume', output=kwargs['output']))\n\n return result", "def volume_list(mnode):\n return RestClient(mnode).handle_request(\n \"GET\", \"/v1/volumes\", httplib.OK, None)", "def list_vol(tag=None, device=None):\n conn = _ec2connect()\n vols = conn.get_all_volumes(filters=_get_filters(tag))\n if not vols:\n print('\\tNone.')\n return\n for v in vols:\n t = v.tags.get(TAG_NAME, 'root')\n s = v.attachment_state()\n z = v.size\n i = v.attach_data.instance_id\n d = v.attach_data.device\n print('\\t{0:25} {1:2}GB {2:15} {3:15} {4} {5}'.format(t, z, v.id, s, i, d ))", "def show_volume(svm_name) -> None:\n print()\n print(\"Getting Volume Details\")\n print(\"===================\")\n try:\n for volume in Volume.get_collection(\n **{\"svm.name\": svm_name}, fields=\"uuid\"):\n print(\n \"Volume name:-%s ; Volume uuid:-%s \" %\n (volume.name, volume.uuid))\n except NetAppRestError as error:\n print(\"Error:- \" % error.http_err_response.http_response.text)\n print(\"Exception caught :\" + str(error))", "def display_attached_volumes(oci_sess, iscsiadm_session, disks, output_mode, details, truncate):\n #\n # todo: handle the None ocisession more elegantly.\n oci_vols = list()\n try:\n if bool(oci_sess):\n oci_vols = sorted(oci_sess.this_instance().all_volumes())\n except Exception as e:\n _logger.debug('Cannot get all volumes of this instance : %s', str(e))\n\n if not iscsiadm_session and len(oci_vols) > 0:\n print(\"Local iSCSI info not available. \")\n print(\"List info from Cloud instead(No boot volume).\")\n print(\"\")\n _display_oci_volume_list(oci_vols, output_mode, details, truncate)\n\n _columns = []\n if details:\n _columns.append(['Target', 32, 'target'])\n _columns.append(['Volume name', 32, 'name'])\n if details:\n _columns.append(['Volume OCID', 32, 'ocid'])\n _columns.append(['Persistent portal', 20, 'p_portal'])\n _columns.append(['Current portal', 20, 'c_portal'])\n _columns.append(['Session State', 13, 's_state'])\n _columns.append(['Attached device', 15, 'dev'])\n _columns.append(['Size', 6, 'size'])\n\n # this is only used in compatibility mode i.e using 'text'\n partitionPrinter = get_row_printer_impl('text')(title='Partitions',\n columns=(['Device', 8, 'dev_name'],\n ['Size', 6, 'size'],\n ['Filesystem', 12, 'fstype'],\n ['Mountpoint', 12, 'mountpoint']))\n _items = []\n for iqn in list(iscsiadm_session.keys()):\n _item = {}\n oci_vol = get_volume_by_iqn(oci_sess, iqn)\n _item['target'] = iqn\n if oci_vol is not None:\n _item['name'] = oci_vol.get_display_name()\n _item['ocid'] = oci_vol.get_ocid()\n _item['p_portal'] = \"%s:%s\" % (iscsiadm_session[iqn]['persistent_portal_ip'],\n iscsiadm_session[iqn]['persistent_portal_port'])\n _item['c_portal'] = \"%s:%s\" % (iscsiadm_session[iqn]['current_portal_ip'],\n iscsiadm_session[iqn]['current_portal_port'])\n _item['s_state'] = iscsiadm_session[iqn].get('session_state', 'n/a')\n device = iscsiadm_session[iqn].get('device', None)\n if device is None:\n _item['dev'] = '(not attached)'\n else:\n _item['dev'] = device\n if device in disks:\n _item['size'] = disks[device]['size']\n\n _items.append(_item)\n\n iscsi_dev_printer = None\n if len(_items) == 0:\n print('No iSCSI devices attached.')\n elif output_mode == 'compat':\n iscsi_dev_printer = get_row_printer_impl('text')(\n title='Currently attached iSCSI devices', columns=_columns, text_truncate=truncate)\n else:\n iscsi_dev_printer = get_row_printer_impl(output_mode)(\n title='Currently attached iSCSI devices', columns=_columns, text_truncate=truncate)\n if bool(iscsi_dev_printer):\n iscsi_dev_printer.printHeader()\n for _item in _items:\n iscsi_dev_printer.printRow(_item)\n if output_mode == 'compat':\n if 'partitions' not in disks[_item['dev']]:\n #\n # iscsi_dev_printer.printKeyValue('File system type', disks[_item['dev']]['fstype'])\n # iscsi_dev_printer.printKeyValue('Mountpoint', disks[_item['dev']]['mountpoint'])\n fstype = disks[_item['dev']]['fstype'] if bool(disks[_item['dev']]['fstype']) else 'Unknown'\n iscsi_dev_printer.printKeyValue('File system type', fstype)\n mntpoint = disks[_item['dev']]['mountpoint'] if bool(disks[_item['dev']]['mountpoint']) else 'Not mounted'\n iscsi_dev_printer.printKeyValue('Mountpoint', mntpoint)\n else:\n partitions = disks[device]['partitions']\n partitionPrinter.printHeader()\n for part in sorted(list(partitions.keys())):\n # add it as we need it during the print\n partitions[part]['dev_name'] = part\n partitionPrinter.printRow(partitions[part])\n partitionPrinter.rowBreak()\n partitionPrinter.printFooter()\n partitionPrinter.finish()\n iscsi_dev_printer.rowBreak()\n iscsi_dev_printer.printFooter()\n iscsi_dev_printer.finish()", "def test_get_all_available_volumes(self):\n expected = ['/dev/dms1234567']\n actual = self.connector.get_all_available_volumes(None)\n self.assertItemsEqual(expected, actual)", "def accept(self, context, transfer_id, auth_key):\n # We must use an elevated context to see the volume that is still\n # owned by the donor.\n context.authorize(policy.ACCEPT_POLICY)\n transfer = self.db.transfer_get(context.elevated(), transfer_id)\n\n crypt_hash = self._get_crypt_hash(transfer['salt'], auth_key)\n if crypt_hash != transfer['crypt_hash']:\n msg = (_(\"Attempt to transfer %s with invalid auth key.\") %\n transfer_id)\n LOG.error(msg)\n raise exception.InvalidAuthKey(reason=msg)\n\n volume_id = transfer['volume_id']\n vol_ref = objects.Volume.get_by_id(context.elevated(), volume_id)\n if vol_ref['consistencygroup_id']:\n msg = _(\"Volume %s must not be part of a consistency \"\n \"group.\") % vol_ref['id']\n LOG.error(msg)\n raise exception.InvalidVolume(reason=msg)\n\n try:\n values = {'per_volume_gigabytes': vol_ref.size}\n QUOTAS.limit_check(context, project_id=context.project_id,\n **values)\n except exception.OverQuota as e:\n quotas = e.kwargs['quotas']\n raise exception.VolumeSizeExceedsLimit(\n size=vol_ref.size, limit=quotas['per_volume_gigabytes'])\n\n try:\n reserve_opts = {'volumes': 1, 'gigabytes': vol_ref.size}\n QUOTAS.add_volume_type_opts(context,\n reserve_opts,\n vol_ref.volume_type_id)\n reservations = QUOTAS.reserve(context, **reserve_opts)\n except exception.OverQuota as e:\n quota_utils.process_reserve_over_quota(context, e,\n resource='volumes',\n size=vol_ref.size)\n try:\n donor_id = vol_ref['project_id']\n reserve_opts = {'volumes': -1, 'gigabytes': -vol_ref.size}\n QUOTAS.add_volume_type_opts(context,\n reserve_opts,\n vol_ref.volume_type_id)\n donor_reservations = QUOTAS.reserve(context.elevated(),\n project_id=donor_id,\n **reserve_opts)\n except Exception:\n donor_reservations = None\n LOG.exception(\"Failed to update quota donating volume\"\n \" transfer id %s\", transfer_id)\n\n snap_res = None\n snap_donor_res = None\n if transfer['no_snapshots'] is False:\n snapshots = objects.SnapshotList.get_all_for_volume(\n context.elevated(), volume_id)\n volume_type_id = vol_ref.volume_type_id\n snap_res, snap_donor_res = self._handle_snapshot_quota(\n context, snapshots, volume_type_id, vol_ref['project_id'])\n\n volume_utils.notify_about_volume_usage(context, vol_ref,\n \"transfer.accept.start\")\n\n encryption_key_transferred = False\n try:\n # Transfer ownership of the volume now, must use an elevated\n # context.\n self.volume_api.accept_transfer(context,\n vol_ref,\n context.user_id,\n context.project_id,\n transfer['no_snapshots'])\n if vol_ref.encryption_key_id is not None:\n key_transfer.transfer_accept(context, vol_ref, conf=CONF)\n encryption_key_transferred = True\n\n self.db.transfer_accept(context.elevated(),\n transfer_id,\n context.user_id,\n context.project_id,\n transfer['no_snapshots'])\n QUOTAS.commit(context, reservations)\n if snap_res:\n QUOTAS.commit(context, snap_res)\n if donor_reservations:\n QUOTAS.commit(context, donor_reservations, project_id=donor_id)\n if snap_donor_res:\n QUOTAS.commit(context, snap_donor_res, project_id=donor_id)\n LOG.info(\"Volume %s has been transferred.\", volume_id)\n except Exception:\n # If an exception occurs after the encryption key was transferred\n # then we need to transfer the key *back* to the service project.\n # This is done by making another key transfer request.\n if encryption_key_transferred:\n key_transfer.transfer_create(context, vol_ref, conf=CONF)\n with excutils.save_and_reraise_exception():\n QUOTAS.rollback(context, reservations)\n if snap_res:\n QUOTAS.rollback(context, snap_res)\n if donor_reservations:\n QUOTAS.rollback(context, donor_reservations,\n project_id=donor_id)\n if snap_donor_res:\n QUOTAS.rollback(context, snap_donor_res,\n project_id=donor_id)\n\n vol_ref = objects.Volume.get_by_id(context.elevated(),\n volume_id)\n volume_utils.notify_about_volume_usage(context, vol_ref,\n \"transfer.accept.end\")\n return {'id': transfer_id,\n 'display_name': transfer['display_name'],\n 'volume_id': vol_ref['id']}", "def volume_list_paged(request, search_opts=None, marker=None, paginate=False,\n sort_dir=\"desc\"):\n has_more_data = False\n has_prev_data = False\n volumes = []\n\n # To support filtering with group_id, we need to use the microversion.\n c_client = _cinderclient_with_generic_groups(request)\n if c_client is None:\n return volumes, has_more_data, has_prev_data\n\n # build a dictionary of volume_id -> transfer\n transfers = {t.volume_id: t\n for t in transfer_list(request, search_opts=search_opts)}\n\n if paginate:\n page_size = utils.get_page_size(request)\n # sort_key and sort_dir deprecated in kilo, use sort\n # if pagination is true, we use a single sort parameter\n # by default, it is \"created_at\"\n sort = 'created_at:' + sort_dir\n for v in c_client.volumes.list(search_opts=search_opts,\n limit=page_size + 1,\n marker=marker,\n sort=sort):\n v.transfer = transfers.get(v.id)\n volumes.append(Volume(v))\n volumes, has_more_data, has_prev_data = update_pagination(\n volumes, page_size, marker, sort_dir)\n else:\n for v in c_client.volumes.list(search_opts=search_opts):\n v.transfer = transfers.get(v.id)\n volumes.append(Volume(v))\n\n return volumes, has_more_data, has_prev_data", "def volume_wafl_info(self):\n return self.request( \"volume-wafl-info\", {\n }, {\n 'root-volume': [ basestring, False ],\n 'disk-types': [ basestring, False ],\n 'snapshots-max': [ int, False ],\n 'checksum-types': [ basestring, False ],\n } )", "def test_azure_service_api_volumes_get(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plot KDEs of the same feature and different classes together, so to compare different classes.
def plot_fits(kdes, features, classes, model_dir): values = np.linspace(-3, 2, 100) values = values.reshape(len(values), 1) for feature_name in features: f, ax = plt.subplots(figsize=(FIG_WIDTH, FIG_HEIGHT), dpi=DPI) ax.set_title(feature_name) plt.xlabel(feature_name, fontsize=10) plt.ylabel("Probability Density", fontsize=10) # Plot KDE fit for this feature, per class for class_name in classes: kde = kdes[class_name][feature_name] if kde is not None: # sample probabilities for the range of values probabilities = np.exp(kde.score_samples(values)) ax.plot(values, probabilities, label=class_name) ax.legend() util.save_plot(model_dir, feature_name)
[ "def draw_data(X):\n dist = k_dist(X, k=3)\n plt.plot(dist)\n plt.text(700, dist[700], 'k=3')\n\n dist = k_dist(X, k=7)\n plt.plot(dist)\n plt.text(800, dist[700], 'k=7')\n\n dist = k_dist(X, k=13)\n plt.plot(dist)\n plt.text(900, dist[700], 'k=13')\n plt.title('k-dist plot')\n plt.ylabel('dist')\n plt.xlabel('num')\n plt.savefig('k-dist.pdf')\n\n plt.close()\n\n dbs = DBSCAN(eps=0.045, min_samples=7)\n clusters = dbs.fit_predict(X)\n colors = [\"#ffffff\", \"#33cc33\", \"#ccff33\", \"#0033cc\", \"#cc33ff\",\n \"#ff6600\", \"#ff0000\", \"#663300\", \"#660033\", \"#ff00ff\",\n \"#00ffff\", \"#666699\", \"#333300\", \"#669999\", \"#0000cc\"]\n for i, g in enumerate(clusters):\n plt.scatter(X[i][0], X[i][1], color=colors[int(g) + 1], edgecolors='black')\n plt.title('eps=0.045, min samples=7')\n plt.savefig('groups.pdf')", "def vizualization():\n X = np.array(pandas.read_csv(\"dbscan-paintedData.csv\", sep=\"\\t\"))\n plt.figure()\n plt.subplot(2, 1, 1)\n for k in [1, 3, 15]:\n dists = k_dist(X, k=k)\n plt.plot(dists, label=\"k=%d\" % k)\n plt.legend()\n plt.xlabel(\"i-ti primer\")\n plt.ylabel(\"razdalja\")\n # plt.show()\n plt.subplot(2, 1, 2)\n dbs = DBSCAN(3, 0.07)\n clusters = dbs.fit_predict(X)\n classes = np.unique(clusters)\n for cls in classes:\n mask = clusters == cls\n plt.scatter(X[mask, 0], X[mask, 1], 10, label=\"Noise\" if cls == -1 else cls)\n plt.legend()\n plt.show()", "def kdeplot(self, x, y, ax=None, **kwargs):\n validate_ax(ax)\n\n x = np.ravel(x)\n y = np.ravel(y)\n if x.size != y.size:\n raise ValueError(\"x and y must be the same size\")\n\n x, y = self._reverse_if_vertical(x, y)\n\n return sns.kdeplot(x=x, y=y, ax=ax, clip=self.kde_clip, **kwargs)", "def plot_dist_features(df, features, save=False, path=str):\n\n import matplotlib as mpl\n import matplotlib.cm as cm\n import matplotlib.pyplot as plt\n import seaborn as sns\n\n for i in features:\n fig, axs = plt.subplots(1, figsize=(9,6))\n sns.set(style='white', palette='deep')\n\n x = df[i]\n if i == 'duration_ms':\n x = x/1000\n sns.kdeplot(x, label = 'duration in seconds', shade=True).set_title(\"Distribution of Feature Duration\")\n else:\n sns.kdeplot(x, label = i, shade=True).set_title(\"Distribution of Feature \"+i)\n\n if save:\n filename='distribution_plot_'+i\n fig.savefig(path+filename)\n\n return", "def plot_ks_statistic(clf, X, y, title='KS Statistic Plot', do_split=True,\n test_split_ratio=0.33, random_state=None, ax=None):\n if not hasattr(clf, 'predict_proba'):\n raise TypeError('\"predict_proba\" method not in classifier. Cannot calculate ROC Curve.')\n\n if not do_split:\n if len(clf.classes_) != 2:\n raise ValueError('Cannot calculate KS statistic for data with '\n '{} category/ies'.format(len(clf.classes_)))\n probas = clf.predict_proba(X)\n y_true = y\n\n else:\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_split_ratio,\n stratify=y, random_state=random_state)\n clf_clone = clone(clf)\n clf_clone.fit(X_train, y_train)\n if len(clf_clone.classes_) != 2:\n raise ValueError('Cannot calculate KS statistic for data with '\n '{} category/ies'.format(len(clf_clone.classes_)))\n probas = clf_clone.predict_proba(X_test)\n y_true = y_test\n\n # Compute KS Statistic curves\n thresholds, pct1, pct2, ks_statistic, \\\n max_distance_at, classes = binary_ks_curve(y_true, probas[:, 1].ravel())\n\n if ax is None:\n fig, ax = plt.subplots(1, 1)\n\n ax.set_title(title)\n\n ax.plot(thresholds, pct1, lw=3, label='Class {}'.format(classes[0]))\n ax.plot(thresholds, pct2, lw=3, label='Class {}'.format(classes[1]))\n idx = np.where(thresholds == max_distance_at)[0][0]\n ax.axvline(max_distance_at, *sorted([pct1[idx], pct2[idx]]),\n label='KS Statistic: {:.3f} at {:.3f}'.format(ks_statistic, max_distance_at),\n linestyle=':', lw=3, color='black')\n\n ax.set_xlim([0.0, 1.0])\n ax.set_ylim([0.0, 1.0])\n\n ax.set_xlabel('Threshold')\n ax.set_ylabel('Percentage below threshold')\n ax.legend(loc='lower right')\n\n return ax", "def plot_distributions(X_true, X_fake, y_true=None, y_fake=None):\n no_vars = X_true.shape[1]\n combinations = [(x,y) for x in range(no_vars) for y in range(no_vars) if y>x]\n\n if y_true:\n minority_true = X_true[y_true==1,:]\n minority_fake = X_fake[y_fake==1,:]\n majority_true = X_true[y_true==0,:]\n majority_fake = X_fake[y_fake==0,:]\n else:\n minority_true = X_true\n minority_fake = X_fake\n\n\n fig, axes = plt.subplots(nrows=no_vars, ncols=no_vars, sharex=True,\\\n squeeze=True,figsize=(10,10))\n for y in axes:\n for x in y:\n x.set_xticklabels([])\n x.set_yticklabels([])\n\n # Plot univariate minority distribution on diagonal\n for i in range(no_vars):\n print(f\"Plotting univariate distribution {i+1}/{no_vars}\")\n sns.kdeplot(minority_true[:,i], alpha=0.5, shade=True, color=\"blue\",\\\n ax=axes[(i,i)])\n sns.kdeplot(minority_fake[:,i], alpha=0.5, shade=True, color=\"green\",\\\n ax=axes[(i,i)])\n\n # Plot conditional distributions in the lower and upper triangles\n for i,j in combinations:\n print(f\"Plotting univariate distribution {i},{j}\")\n axes[(i,j)].set_ylim(0,1)\n # majority (upper right)\n if y_true is not None:\n sns.kdeplot(majority_real[0:1000,i], majority_real[0:1000,j],\\\n alpha=0.5, cmap=\"Blues\", ax=axes[(i,j)])\n sns.kdeplot(majority_fake[:,i], majority_fake[:,j],\\\n alpha=0.5, cmap=\"Greens\", ax=axes[(i,j)], )\n\n # minority (lower left)\n sns.kdeplot(minority_true[:,i], minority_true[:,j], alpha=0.5,\\\n cmap=\"Blues\", ax=axes[(j,i)])\n sns.kdeplot(minority_fake[:,i], minority_fake[:,j], alpha=0.5,\\\n cmap=\"Greens\", ax=axes[(j,i)])\n\n return fig", "def plot(self):\n\n plot_colors = \"rg\"\n plot_step = 0.02\n n_classes = 2\n for _ in range (self.n_estimators):\n plt.subplot(2, 5, _+1 )\n x_min, x_max = self.split_x[_].iloc[:, 0].min() - 1, self.split_x[_].iloc[:, 0].max() + 1\n y_min, y_max = self.split_x[_].iloc[:, 1].min() - 1, self.split_x[_].iloc[:, 1].max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),np.arange(y_min, y_max, plot_step))\n print(xx,yy)\n plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)\n Z = self.estimators_list[_].predict(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n cs = plt.contourf(xx, yy, Z, cmap=plt.cm.PiYG)\n for i, color in zip(range(n_classes), plot_colors):\n idx = np.where(self.split_y[_] == i)\n for i in range (len(idx[0])):\n plt.scatter(self.split_x[_].loc[idx[0][i]][0], self.split_x[_].loc[idx[0][i]][1],c=color,cmap=plt.cm.PiYG, edgecolor='black', s=15)\n plt.suptitle(\"RandomForestClassifier:Decision surface of a decision tree using two features\")\n plt.legend(loc='lower right', borderpad=0, handletextpad=0)\n plt.axis(\"tight\")\n\n plt.show()\n fig1 = plt\n\n # Figure 2\n print(\"Printing combining decision surface \")\n plot_colors = \"rg\"\n plot_step = 0.02\n n_classes = 2\n x_min, x_max = self.data.iloc[:, 0].min() - 1, self.data.iloc[:, 0].max() + 1\n y_min, y_max = self.data.iloc[:, 1].min() - 1, self.data.iloc[:, 1].max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),np.arange(y_min, y_max, plot_step))\n plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)\n Z = self.predict(np.c_[xx.ravel(), yy.ravel()])\n Z = np.array(Z)\n Z = Z.reshape(xx.shape)\n cs = plt.contourf(xx, yy, Z, cmap=plt.cm.PiYG)\n for i, color in zip(range(n_classes), plot_colors):\n idx = np.where(self.labels == i)\n for i in range (len(idx[0])):\n plt.scatter(self.data.loc[idx[0][i]][0], self.data.loc[idx[0][i]][1],c=color,cmap=plt.cm.PiYG, edgecolor='black', s=15)\n plt.suptitle(\"RandomForestClassifier:Decision surface by combining all the estimators\")\n plt.legend(loc='lower right', borderpad=0, handletextpad=0)\n plt.axis(\"tight\")\n\n plt.show()\n fig2 = plt\n\n return [fig1,fig2]", "def plotFeatureValuesAsHistogram(dataSet, featureColID, classColID, classList=[], classNames = {}, removeNans=True, sigmaWidth=6):\n colorArray = ['red', 'blue', 'green', 'cyan', 'magenta', 'yellow', 'black', 'orange']\n for classID in classList:\n\n #data cleaning\n dataFilteredByClass = dataSet[dataSet[:, classColID] == classID]\n if removeNans:\n dataFilteredByClass = dataFilteredByClass[~np.isnan(dataFilteredByClass[:, featureColID])]\n dataToPlot = dataFilteredByClass[:, featureColID]\n\n #gaussian fit\n mu, sigma, alpha = fitGaussian(dataToPlot)\n\n #plotting\n color=colorArray[classList.index(classID)]\n plt.plot(\n [x for x in range(int(mu - sigmaWidth * sigma), int(mu + sigmaWidth * sigma))],\n [gaussCurve(x, mu, sigma, alpha) for x in\n range(int(mu - sigmaWidth * sigma), int(mu + sigmaWidth * sigma))],\n color=color,\n label=str(classNames[classID])\n )\n plt.hist(dataToPlot, len(set(dataToPlot)), color=color)\n\n legend = plt.legend(shadow=True)\n plt.show()", "def KDE_fit(x,y):\n \n data = np.vstack([x, y]).T\n #Grid search for best KDE bandwidth\n params = {'bandwidth': np.linspace(np.min(np.diff(y)),np.max(np.diff(y)),100)}\n grid = GridSearchCV(KernelDensity(), params)\n grid.fit(data)\n \n KDE_bandwidth = grid.best_estimator_.bandwidth\n \n kde = grid.best_estimator_\n X, Y = np.meshgrid(np.linspace(np.min(x),np.max(x),100), np.linspace(np.min(y),np.max(y),100))\n\n xy = np.vstack([X.ravel(), Y.ravel()]).T\n #compute the KDE on a 100x100 grid of points\n Z = np.exp(kde.score_samples(xy)).reshape(X.shape)\n \n #fit KDE estimation with 2 Gaussian model\n g2D_init1 = Gaussian2D(amplitude=np.max(Z), x_mean=X[np.unravel_index(np.argmax(Z),Z.shape)], y_mean=Y[np.unravel_index(np.argmax(Z),Z.shape)], x_stddev=np.std(x), y_stddev=np.std(y), theta=0, bounds={'theta': (0,np.pi),'x_mean': (np.min(x),np.max(x)),'y_mean': (np.min(y),np.max(y)),'x_stddev':(0.001,1),'y_stddev':(0.001,1)})\n g2D_init2 = Gaussian2D(amplitude=np.median(Z), x_mean=np.median(x), y_mean=np.median(y), x_stddev=np.std(x), y_stddev=np.std(y), theta=0, bounds={'theta': (0,np.pi),'x_mean': (np.min(x),np.max(x)),'y_mean': (np.min(y),np.max(y)),'x_stddev':(0.001,1),'y_stddev':(0.001,1)})\n g2D_init = g2D_init1 + g2D_init2\n\n fitter = fitting.LevMarLSQFitter()\n \n g2D = fitter(g2D_init, X, Y, Z)\n \n KD_fit_sqresid = np.mean(np.power(Z-g2D(X,Y),2.0))\n \n #Sort by largest and smallest amplitude gaussian\n i_large = np.argmax([g2D.amplitude_0,g2D.amplitude_1])\n i_small = np.argmin([g2D.amplitude_0,g2D.amplitude_1])\n g2D_large = g2D[i_large]\n g2D_small = g2D[i_small]\n \n amp_0 = g2D_large.amplitude.value\n amp_1 = g2D_small.amplitude.value\n \n xmean_0 = g2D_large.x_mean.value\n xmean_1 = g2D_small.x_mean.value\n \n ymean_0 = g2D_large.y_mean.value\n ymean_1 = g2D_small.y_mean.value\n \n if g2D_large.x_stddev >= g2D_large.y_stddev:\n \n major_std_0 = g2D_large.x_stddev.value\n theta_0 = g2D_large.theta.value\n ecc_0 = np.sqrt(1.0 - (g2D_large.y_stddev.value/g2D_large.x_stddev.value)**2.0)\n \n else:\n \n major_std_0 = g2D_large.y_stddev.value\n \n if g2D_large.theta <= np.pi/2:\n theta_0 = np.pi/2 + g2D_large.theta.value\n \n elif g2D_large.theta > np.pi/2:\n theta_0 = g2D_large.theta.value - np.pi/2\n \n ecc_0 = np.sqrt(1.0 - (g2D_large.x_stddev.value/g2D_large.y_stddev.value)**2.0)\n \n if g2D_small.x_stddev >= g2D_small.y_stddev:\n \n major_std_1 = g2D_small.x_stddev.value\n theta_1 = g2D_small.theta.value\n ecc_1 = np.sqrt(1.0 - (g2D_small.y_stddev.value/g2D_small.x_stddev.value)**2.0)\n \n else:\n \n major_std_1 = g2D_small.y_stddev.value\n \n if g2D_small.theta <= np.pi/2:\n theta_1 = np.pi/2 + g2D_small.theta.value\n \n elif g2D_small.theta > np.pi/2:\n theta_1 = g2D_small.theta.value - np.pi/2\n \n ecc_1 = np.sqrt(1.0 - (g2D_small.x_stddev.value/g2D_small.y_stddev.value)**2.0)\n \n return (KDE_bandwidth, KD_fit_sqresid, amp_0, xmean_0, ymean_0, major_std_0, theta_0,\n ecc_0, amp_1, xmean_1, ymean_1, major_std_1, theta_1, ecc_1)", "def plot_k(self, k, xlabel=\"Feature 1\", ylabel=\"Feature 2\"):\n fig, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(nrows=3, ncols=2, sharex=True, sharey=True, figsize=(12,9))\n axes = [ax1, ax2, ax3, ax4, ax5, ax6]\n\n for i in range(2, k+1):\n kmeans = Kmeans(i)\n kmeans.fit(self._data)\n\n cycol = cycle('bgrcmk')\n\n # Plot points belonging to clusters\n for j in range(kmeans._k):\n cluster_i_inds = kmeans._groups[kmeans._groups[:, 2] == j]\n axes[i-2].scatter(cluster_i_inds[:, 0], cluster_i_inds[:, 1], c=next(cycol), s=8)\n\n # Plot cluster centres\n for j in range(i):\n axes[i-2].scatter(kmeans._centroids[:, 0], kmeans._centroids[:, 1], c='w', marker='x', s=100)\n\n axes[i-2].set_title(f'K-means clustering with {i} clusters')\n axes[i-2].set_xlabel(xlabel)\n axes[i-2].set_ylabel(ylabel)\n\n plt.savefig(f\"./plots/clustering_results/kmeans_clusters_2-{k}_plots.pdf\")\n plt.show()\n plt.close()", "def plot(self):\n \n plot_colors = \"rg\"\n plot_step = 0.02\n n_classes = 2\n for _ in range (self.n_estimators):\n \n plt.subplot(2, 5, _+1 )\n \n x_min, x_max = self.split_x[_].iloc[:, 0].min() - 1, self.split_x[_].iloc[:, 0].max() + 1\n y_min, y_max = self.split_x[_].iloc[:, 1].min() - 1, self.split_x[_].iloc[:, 1].max() + 1\n \n xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),np.arange(y_min, y_max, plot_step))\n \n plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)\n \n Z = self.estimators_list[_].predict(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n cs = plt.contourf(xx, yy, Z, cmap=plt.cm.PiYG)\n \n for i, color in zip(range(n_classes), plot_colors):\n # idx = np.where(self.split_y[_] == i)\n plt.scatter(self.split_x[_][0], self.split_x[_][1],c=color,cmap=plt.cm.PiYG, edgecolor='black', s=15)\n \n plt.suptitle(\"RandomForestRegressor: Decision surface of a decision tree using two features\")\n plt.legend(loc='lower right', borderpad=0, handletextpad=0)\n plt.axis(\"tight\")\n\n plt.show()\n fig1 = plt\n\n # Figure 2\n print(\"Printing decision surface by combining the individual estimators\")\n plot_colors = \"rg\"\n plot_step = 0.02\n n_classes = 2\n x_min, x_max = self.data.iloc[:, 0].min() - 1, self.data.iloc[:, 0].max() + 1\n y_min, y_max = self.data.iloc[:, 1].min() - 1, self.data.iloc[:, 1].max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),np.arange(y_min, y_max, plot_step))\n plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)\n Z = self.predict(np.c_[xx.ravel(), yy.ravel()])\n Z = np.array(Z)\n Z = Z.reshape(xx.shape)\n cs = plt.contourf(xx, yy, Z, cmap=plt.cm.PiYG)\n for i, color in zip(range(n_classes), plot_colors):\n # idx = np.where(self.labels == i)\n plt.scatter(self.data[0], self.data[1],c=color,cmap=plt.cm.PiYG, edgecolor='black', s=15)\n plt.suptitle(\"RandomForestRegressor: Decision surface by combining all the estimators\")\n plt.legend(loc='lower right', borderpad=0, handletextpad=0)\n plt.axis(\"tight\")\n\n plt.show()\n fig2 = plt\n\n return [fig1,fig2]", "def feature_distributions(self, viz_type='hist', bins=None, max_features=None, grid_size=4):\n self.print_message('Generating feature distribution plots...')\n\n if viz_type == 'hist':\n hist = True\n kde = False\n elif viz_type == 'kde':\n hist = False\n kde = True\n elif viz_type == 'both':\n hist = True\n kde = True\n else:\n raise Exception('Visualization type not supported.')\n\n data = self.data.fillna(0)\n\n if max_features:\n data = data.iloc[:, :max_features]\n\n n_features = len(data.columns)\n plot_size = grid_size ** 2\n n_plots = n_features // plot_size if n_features % plot_size == 0 else n_features // plot_size + 1\n\n for i in range(n_plots):\n fig, ax = plt.subplots(grid_size, grid_size, figsize=(self.fig_size, self.fig_size / 2))\n for j in range(plot_size):\n index = (i * plot_size) + j\n if index < n_features:\n if type(data.iloc[0, index]) is str:\n sb.countplot(x=data.columns[index], data=data,\n ax=ax[j // grid_size, j % grid_size])\n else:\n sb.distplot(a=data.iloc[:, index], bins=bins, hist=hist, kde=kde,\n label=data.columns[index], ax=ax[j // grid_size, j % grid_size],\n kde_kws={\"shade\": True})\n fig.tight_layout()\n\n self.print_message('Plot generation complete.')", "def overlapping_density(input_vars):\n # Set size of figure\n fig = plt.figure(figsize=(16, 10), dpi=80)\n\n # Starter code for figuring out which package to use\n for variable in input_vars:\n sns.kdeplot(variable[1], shade=True, color=variable[2],\n label=variable[0], figure=fig)\n return fig", "def plot_features(path, erf_df, show_fig=False):\r\n\r\n\r\n f = sns.boxplot(x='Condition', y='PPG_Rate', hue='Gender', data=erf_df)\r\n f.set_xlabel('Emoce')\r\n f.set_ylabel('Rozdíl středních hodnot tepu [bpm]')\r\n f.yaxis.set_major_locator(MaxNLocator(integer=True))\r\n f.set_xticklabels(['Neutrální', 'Nuda', 'Pozitivní', 'Radost', 'Strach', 'Zmatek', 'Znechucení'])\r\n\r\n handles, _ = f.get_legend_handles_labels()\r\n f.legend(handles, ['Muži', 'Ženy'], loc=\"best\") # Associate\r\n plt.savefig('{}/heart_rate.png'.format(path))\r\n\r\n if show_fig:\r\n plt.show()\r\n\r\n f.clear()\r\n plt.cla()\r\n plt.clf()\r\n\r\n\r\n g = sns.boxplot(x='Condition', y='EDA_Tonic', hue='Gender', data=erf_df)\r\n g.set_xlabel('Emoce')\r\n g.set_ylabel('EDA tónická složka [uS]')\r\n g.yaxis.set_major_locator(MaxNLocator(integer=True))\r\n g.set_xticklabels(['Neutrální', 'Nuda', 'Pozitivní', 'Radost', 'Strach', 'Zmatek', 'Znechucení'])\r\n\r\n handles, _ = g.get_legend_handles_labels()\r\n g.legend(handles, ['Muži', 'Ženy'], loc=\"best\") # Associate\r\n plt.savefig('{}/eda_tonic.png'.format(path))\r\n\r\n if show_fig:\r\n plt.show()\r\n\r\n g.clear()\r\n plt.cla()\r\n plt.clf()\r\n\r\n\r\n h = sns.boxplot(x='Condition', y='EDA_Phasic', hue='Gender', data=erf_df)\r\n h.set_xlabel('Emoce')\r\n h.set_ylabel('EDA fázická složka [uS]')\r\n h.yaxis.set_major_locator(MaxNLocator(integer=True))\r\n h.set_xticklabels(['Neutrální', 'Nuda', 'Pozitivní', 'Radost', 'Strach', 'Zmatek', 'Znechucení'])\r\n\r\n handles, _ = h.get_legend_handles_labels()\r\n h.legend(handles, ['Muži', 'Ženy'], loc=\"best\") # Associate\r\n plt.savefig('{}/eda_phasic.png'.format(path))\r\n\r\n if show_fig:\r\n plt.show()\r\n\r\n h.clear()\r\n plt.cla()\r\n plt.clf()", "def plot_scatter(X: np.ndarray, y: np.ndarray, idx_to_class: Dict[int, str]):\n # Choose a color palette with seaborn\n n_classes: int = len(idx_to_class)\n palette: np.ndarray = np.array(sns.color_palette(\"hls\", n_classes))\n\n # Create a scatter plot\n figure = plt.figure()\n ax = plt.subplot(aspect=\"equal\")\n scatter = ax.scatter(\n X[:, 0], X[:, 1], lw=0, s=40, c=palette[y.astype(np.int)]\n )\n ax.axis(\"tight\")\n ax.axis(\"off\")\n\n # Add the labels for each sample corresponding to the label\n for i in range(n_classes):\n # Position of each label at median of data points\n x_text, y_text = np.median(X[y == i, :], axis=0)\n text = ax.text(x_text, y_text, str(i), fontsize=20)\n text.set_path_effects([\n PathEffects.Stroke(linewidth=5, foreground=\"w\"),\n PathEffects.Normal()\n ])\n\n # Add legends for each class\n annotations: List = []\n for i in range(n_classes):\n circle = Line2D([0], [0], marker='o', color=palette[i], label=f\"{i}: {idx_to_class[i]}\")\n annotations.append(circle)\n plt.legend(handles=annotations, loc=\"best\")\n\n return figure, ax, scatter", "def kernel_plot(cls, X, y, c_plot, remove_val=[np.nan], rug_sample=1000):\r\n y_vals = y.unique() if y is not None else []\r\n \r\n n_plots = len(c_plot)\r\n ax = cls.get_figure(n_plots)\r\n \r\n for i, c in enumerate(c_plot): \r\n axi = ax[i] if n_plots > 1 else ax\r\n n = []\r\n if len(y_vals) > 1: \r\n for j, v in enumerate(y_vals):\r\n x = X.loc[y == v, c]\r\n x = x[~x.isin(remove_val)]\r\n name = f'{v} ({cls.fnum(x.mean())}, {cls.fnum(x.std())})' \r\n x.name = name\r\n n.append(x.size)\r\n plot = sns.kdeplot(x, shade=True, ax=axi)\r\n \r\n if rug_sample > 0:\r\n if len(plot.get_lines()) > j:\r\n color = plot.get_lines()[j].get_c()\r\n s = x.sample(n=rug_sample) if x.size > rug_sample else x\r\n s.name = name\r\n sns.rugplot(s, ax=axi, color=color)\r\n else: \r\n x = X.loc[~X[c].isin(remove_val), c]\r\n n.append(x.size)\r\n plot = sns.kdeplot(x, shade=True, ax=axi)\r\n\r\n if rug_sample > 0:\r\n s = x.sample(n=rug_sample) if x.size > rug_sample else x\r\n sns.rugplot(s, ax=axi)\r\n\r\n plot.set_title(f'{c} - {str(remove_val)}, n={cls.fnum(sum(n))} {str(list(map(cls.fnum, n)))}')\r\n \r\n plt.show()", "def plot_stackdist(data, size=.5, aspect=12, x_labels=None,\n y_labels=None, palette=None, g=None):\n\n n_feat = data[0].shape[1]\n x = np.concatenate([d.ravel() for d in data], axis=0)\n f = np.concatenate([(np.ones_like(d) * np.arange(d.shape[1])).ravel()\n for d in data], axis=0)\n g = np.concatenate([i * np.ones_like(d.ravel()).astype(int)\n for i, d in enumerate(data)], axis=0)\n df = pd.DataFrame(dict(x=x, f=f, g=g))\n\n if not palette:\n palette = list(palettes.msme_rgb.values())[::-1]\n\n # Initialize the FacetGrid object\n g = sns.FacetGrid(df, row=\"g\", col=\"f\", hue=\"f\",\n aspect=aspect, size=size, palette=palette)\n\n # Draw the densities in a few steps\n global row_count, col_count\n col_count = 0\n row_count = 0\n\n def kdeplot(x, color='w', **kwargs):\n global row_count, col_count\n\n if color != 'w':\n color = sns.light_palette(\n color, n_colors=len(data) + 1)[row_count + 1]\n sns.kdeplot(x, color=color, **kwargs)\n\n col_count = (col_count + 1) % n_feat\n\n if col_count == 0:\n row_count = (row_count + 1) % len(data)\n\n g.map(kdeplot, \"x\", clip_on=False, shade=True, alpha=1., bw=.2)\n g.map(kdeplot, \"x\", clip_on=False, color='w', lw=2, bw=.2)\n\n # Add y labels\n g.set_titles(\"\")\n g.set_xlabels(\"\")\n for i, ax in enumerate(g.axes):\n if y_labels is not None:\n ax[0].text(0, .2, y_labels[i], fontweight=\"bold\", color='k',\n ha=\"left\", va=\"center\", transform=ax[0].transAxes)\n for j, a in enumerate(ax):\n a.set_facecolor((0, 0, 0, 0))\n if i == 0 and x_labels is not None:\n a.set_title(x_labels[j])\n\n # Set the subplots to overlap\n g.fig.subplots_adjust(hspace=-.25, wspace=0.1)\n\n # Remove axes details that don't play will with overlap\n g.set(yticks=[])\n g.set(xticks=[])\n g.despine(bottom=False, left=True)\n return g", "def fit(self, X,y):\n self.ndims = X.shape[1]\n self.kdes = {}\n for c in np.unique(y):\n stdx = np.min(np.std(X[y==c], axis=0))\n if \"bandwidth\" in self.kwargs.keys():\n bw = self.kwargs[\"bandwidth\"]\n kwargs = {k:v for k,v in self.kwargs.items() if k!=\"bandwidth\"}\n else:\n bw = 1.06*stdx*len(X)**-.2 if stdx != 0 else 1.\n kwargs = self.kwargs\n self.kdes[c] = KernelDensity(bandwidth=bw, **kwargs)\n self.kdes[c].fit(X[y==c])\n \n self.classes = list(self.kdes.keys())\n \n # build probability maps for each class\n n = int(np.power(1e5, 1/self.ndims))\n c = np.r_[[np.linspace(np.min(X[:,c]), np.max(X[:,c]),n) for c in range(X.shape[1])]]\n self.data_linspaces = c\n \n dV = np.product(c[:,1]-c[:,0])\n self.data_meshgrid = np.meshgrid(*c)\n c = np.r_[[i.flatten() for i in self.data_meshgrid]].T\n self.log_probmaps = [i.reshape( [n]*self.ndims) for i in self.kde_logprobs(c).T]\n self.probmaps = np.exp(self.log_probmaps) \n \n # compute kl divergences between each pair of classes\n kldivs = np.zeros((len(self.classes), len(self.classes)))\n epsilon = 1e-50\n for c1 in range(0, len(self.classes)):\n for c2 in range(0, len(self.classes)):\n if c1==c2:\n continue\n kldivs[c1,c2] = -dV*np.sum(self.probmaps[c1]*\\\n (np.log(self.probmaps[c2]+epsilon)-np.log(self.probmaps[c1]+epsilon)))\n self.kldivs = kldivs \n \n self.kldivs = pd.DataFrame(self.kldivs, index=self.classes, columns = self.classes)\n self.kldivs.index.name = \"class\"\n self.kldivs.columns.name = \"KL divergence\"\n \n return self", "def knnplot(num = 100, c1 = -1., sigma1 = 1.5,\n c2 = 1., sigma2 = 1.5, k = 5):\n\n # The number of data drawn from each Gaussian, and the density of\n # the background grid.\n halfnum = int(num / 2)\n gridsize = 0.03\n\n # Randomly draw from the normal distribution, to get x and y\n # values for the two categories.\n x1 = norm.rvs(size = halfnum, loc = c1, scale = sigma1)\n y1 = norm.rvs(size = halfnum, loc = c1, scale = sigma1)\n x2 = norm.rvs(size = halfnum, loc = c2, scale = sigma2)\n y2 = norm.rvs(size = halfnum, loc = c2, scale = sigma2)\n\n # Join the data into a single dataset.\n z1 = np.c_[x1, y1]\n z2 = np.c_[x2, y2]\n x = np.concatenate((z1, z2))\n\n # Create the labels for the data.\n y = np.concatenate(( np.zeros(halfnum), np.zeros(halfnum) + 1))\n\n # Create the kNN model, and fit it.\n model = skn.KNeighborsClassifier(k)\n model = model.fit(x, y)\n\n # Get the max and min values of the data.\n minx = min(x[:,0]); maxx = max(x[:,0])\n miny = min(x[:,1]); maxy = max(x[:,1])\n\n # Create a grid based on the domain, and the grid size.\n xx, yy = np.meshgrid(np.arange(minx, maxx, gridsize),\n np.arange(miny, maxy, gridsize))\n\n # Calculate the predicted values for each grid point.\n z = model.predict(np.c_[xx.ravel(), yy.ravel()])\n\n # Create some colour maps.\n cmap_light = colors.ListedColormap(['#FFAAAA', '#AAAAFF'])\n cmap_bold = colors.ListedColormap(['#FF0000', '#0000FF'])\n\n # Reshape the predicted data, and plot it as the background.\n z = z.reshape(xx.shape)\n plt.pcolormesh(xx, yy, z, cmap = cmap_light)\n \n # Plot also the training points.\n plt.scatter(x[:, 0], x[:, 1], c = y, cmap = cmap_bold)\n plt.xlim(xx.min(), xx.max())\n plt.ylim(yy.min(), yy.max())\n\n # Tighten up, and save the figure.\n plt.tight_layout(0.1)\n plt.savefig('knndemo_k=' + str(k) + '.pdf')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a single source document by slug.
def get(self, slug): try: return self._cache[slug] except KeyError: raise SourceFile.DoesNotExist
[ "def get_source(slug):\n from .models import SOURCES\n for cls in SOURCES:\n if cls.slug == slug:\n return cls", "def data_source_from_slug(slug: str) -> Optional[str]:\n if NAMESPACE_DELIMITER in slug:\n splitted = slug.split(NAMESPACE_DELIMITER)\n assert len(splitted) == 2, f'Unexpected slug structure {slug}'\n return splitted[0]\n else:\n return None", "def get_doc_from_shorturl(self, shortURL):\n doc = self.collection.find_one({'_id': shortURL})\n return doc", "def get_article(slug):\n article = Article.objects.all().filter(slug=slug).first()\n return article", "async def get_single(self, slug: str):\n q = 'SELECT postable.id, {table}.title, {table}.slug, {table}.author_id, {table}.body, postable.created_at, ' \\\n 'postable.updated_at FROM {schema}{table} INNER JOIN {schema}postable ON {table}.id = postable.id WHERE ' \\\n '{table}.slug = $1'\n\n return await self.fetch(q, args=(slug,), single=True, flatten=True)", "def get_source(self, uuid, **kwargs):\n return es.get_source(\n index=self.Meta.index, doc_type=self.Meta.doc_types, id=uuid, **kwargs\n )", "def get_article(slug):\n return Article.objects.get(slug=slug)", "def get_object_or_none(slug):\n app.logger.debug(\"get_object_or_none(%s)\" % slug)\n conn = get_p2p_connection()\n slug = slug.strip()\n\n try:\n app.logger.debug(\"conn.get_fancy_content_item(%s)\" % slug)\n # Custom query params to get the data we need\n query = {\n \"product_affiliate_code\": conn.product_affiliate_code,\n \"source_code\": conn.source_code,\n \"content_item_state_code\": \"all\",\n \"include\": [\n \"static_collections\",\n \"related_items\",\n \"embedded_items\",\n \"parent_related_items\",\n \"programmed_custom_params\",\n \"web_url\",\n \"geocodes\",\n \"notes\"\n ],\n }\n return conn.get_fancy_content_item(slug, query)\n except p2p.P2PNotFound:\n return None", "def getDocumentById(self, request):\n R = Resource.objects.getResourceById(request)\n D = Document.objects.get(resource=R)\n return D", "def get_short(self, s) -> Optional[Document]:\n if not is_short_uuid(s):\n raise Exception(\"Not a valid short uid.\")\n for d in self.index:\n if d.get(\"uid\").startswith(s):\n return self.get_doc(d.get(\"uid\"))\n return None", "def get_by_slug(service_slug):\n return Service.all().filter('slug = ', service_slug).get()", "def get_page(slug):\n return Page.objects.get(slug=slug)", "def _by_slug(slug, skip_deleted=True):\n if skip_deleted:\n query = (\n Match\n .query\n .filter_by(\n slug=slug,\n is_visible=True,\n is_deleted=False\n )\n )\n else:\n query = (\n Match\n .query\n .filter_by(\n slug=slug,\n is_visible=True,\n )\n )\n\n return query.first()", "def get_photo_by_slug_if_exists(self, slug):\n try:\n photo = self.db_session.query(SSTPhoto).filter(SSTPhoto.slug == slug).first()\n if photo:\n return photo\n else:\n return False\n except Exception as e:\n # print(f'Failure going to database: {sys.exc_info()}') # Remove\n # sys.stdout.flush() # Remove ########################################\n raise e", "def get_series_by_slug(slug):\n\n series_key = get_series_key(slug)\n series = series_key.get()\n return series", "def getBySlug( self, company_slug, load_level = 'light' ):\n qry = \"\"\"SELECT * FROM\n `%s`.`companies` \n WHERE `slug` = \"%s\"; \"\"\" % ( self.db_name, Mysql.escape_string( company_slug ) )\n company = Mysql.ex( qry )\n if len( company ) == 0:\n return False\n company = self.getLoadLevel( company[0], load_level )\n return company", "def get_object(self):\n queryset = self.get_queryset()\n place = self.kwargs['place']\n return get_object_or_404(queryset, slug__iexact=place)", "def first_or_404(self):\n instance = self.first()\n if instance is None:\n abort(404)\n return instance", "def get_by_slug(self, slug, published_only=False):\n\n query = self._get_post_query()\n\n if published_only:\n post = query.filter(\n sqlalchemy.and_(\n app.models.Post.slug == slug,\n app.models.Post.published == True\n )\n ).one_or_none()\n else:\n post = cls.query.filter(app.models.Post.slug == slug).one_or_none()\n return post" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Use dask to parallelize apply textacy Doc object creation from a dataframe
def dask_df_apply(df, text_col, textacy_col_name='textacy_doc', ncores=None, inplace=False): # If no number of cores to work with, default to max if not ncores: nCores = cpu_count() - 1 nCores # Partition dask dataframe and map textacy doc apply # Sometimes this fails because it can't infer the dtypes correctly # meta=pd.Series(name=0, dtype='object') is a start # This is also a start https://stackoverflow.com/questions/40019905/how-to-map-a-column-with-dask?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa # Possibly both the inner lambda apply and outer lambda df both need metadata? textacy_series = dd.from_pandas(df, npartitions=nCores).map_partitions( lambda df : df[text_col].apply(lambda x : textacy.doc.Doc(x, lang=nlp))).compute(get=get) # Name the series textacy_series.name = textacy_col_name # If inplace return the dataframe and textacy Series if inplace: return pd.concat([df, textacy_series], axis=1) # Else return just the Textacy series else: return textacy_series
[ "def extract_text_features(content_df: pd.DataFrame, pos_file: str, ents_file: str):\n\n start_time = time.time()\n\n # Prepare spaCy model and document pipeline\n nlp = spacy.load(\"en_core_web_lg\")\n review_ids, texts = content_df[\"reviewid\"].values, content_df[\"content\"].values\n doc_generator = nlp.pipe(texts, disable=[\"parser\"], batch_size=32)\n doc_pipeline = zip(review_ids, doc_generator)\n pos_path, ents_path = f\"../data/{pos_file}\", f\"../data/{ents_file}\"\n\n with open(ents_path, \"w\") as pos_file, open(pos_path, \"w\") as ents_file:\n\n for i, (review_id, doc) in enumerate(doc_pipeline, start=1):\n\n desired_pos = [\"ADJ\", \"NOUN\", \"VERB\"]\n pos_tags_in_text = [\n \",\".join([str(review_id), tok.text, tok.pos_, \"\\n\"])\n for tok in doc\n if tok.pos_ in desired_pos\n ]\n\n desired_labels = [\"ORG\", \"PERSON\", \"GPE\", \"NORP\", \"EVENT\"]\n ents_in_text = [\n \",\".join([str(review_id), ent.text, ent.label_, \"\\n\"])\n for ent in doc.ents\n if ent.label_ in desired_labels\n ]\n\n pos_file.writelines(pos_tags_in_text)\n ents_file.writelines(ents_in_text)\n\n # Every 1000 docs, report time elapsed\n if i % 1000 == 0:\n print(\n f\"Finished {i} reviews. Time elapsed: {time.time() - start_time}s\"\n )\n\n return None", "def nlp_on_dataframe(\n self,\n df: pd.DataFrame,\n text_column_name: str,\n new_column_name: str = \"text_found\",\n main_document_keyword: str = None,\n ) -> pd.DataFrame:\n func = partial(\n self._check_diagnostic_report,\n main_document_keyword=main_document_keyword,\n )\n texts = [row for row in df[text_column_name].values]\n tqdm_text = f\"Searching for Sentences with {self.target_regex}\"\n if self.negation_regex is not None:\n tqdm_text += f\" and without {self.negation_regex}\"\n if self.num_processes > 1:\n pool = multiprocessing.Pool(self.num_processes)\n results = [\n result\n for result in tqdm(\n pool.imap(func, texts),\n total=len(df),\n desc=tqdm_text,\n )\n ]\n pool.close()\n pool.join()\n else:\n results = [\n result\n for result in tqdm(\n [func(text) for text in texts],\n total=len(df),\n desc=tqdm_text,\n )\n ]\n\n df[new_column_name + \"_sentences\"] = results\n df[new_column_name] = ~df[new_column_name + \"_sentences\"].isna()\n return df", "def get_corpus_text(engine, post_ids, raw_corpus_file):\n \n print('Downloading title and selftext of posts in random sample from database...')\n sql = (\n \"SELECT \"\n \"subreddit, \"\n \"fullname, \"\n \"CONCAT(title, ', ', selftext) as concat_text \"\n \"FROM all_posts \"\n \"WHERE fullname IN ('{}') \".format(\"', '\".join(post_ids['fullname']))\n )\n post_text = pd.read_sql(sql,\n engine,\n )\n\n '''\n print('Concatenating titles and selftext of each post...')\n post_text['concat_text'] = post_text[['title', 'selftext']].apply(lambda x: ' '.join(x).strip(),\n axis=1,\n )\n '''\n\n print('Concatenating posts for each subreddit into single string...')\n subreddit_text = post_text.groupby(['subreddit'])['concat_text'].apply(lambda x: ', '.join(x).strip())\n post_id_list = post_text.groupby(['subreddit'])['fullname'].apply(lambda x: x.tolist())\n subreddit_text = pd.concat([subreddit_text,\n post_id_list,\n ],\n axis=1,\n ).reset_index()\n subreddit_text.columns = ['subreddit', 'text', 'posts']\n print('Raw subreddit corpus data: \\n{}'.format(subreddit_text))\n print(subreddit_text.columns)\n\n print('Saving raw sample text to pickle file...')\n pd.to_pickle(subreddit_text, raw_corpus_file)\n\n return subreddit_text", "def getTextTagged(dataset):\n \n# nlp=spacy.load('fr_core_news_sm') #Load the pre-existed french model of spacy\n data={\"Name\":dataset[\"Name\"],\"TextTagged\":[]}\n texttagged=[]\n for text in dataset[\"Text\"]:\n ret=tagging(text,nlp)\n texttagged.append(ret[0])\n data[\"TextTagged\"]=texttagged\n return pd.DataFrame(data)", "def parse(self, texts, output_dir=None):\n if self.n_jobs == 1:\n return self.process_batch(texts, output_dir)\n partitions = minibatch(texts, size=self.batch_size)\n executor = Parallel(n_jobs=self.n_jobs, backend=\"multiprocessing\", prefer=\"processes\")\n do = delayed(partial(self.process_batch))\n tasks = (do(batch, output_dir, batch_i) for batch_i, batch in enumerate(partitions))\n return [doc for batch in executor(tasks) for doc in batch]", "def textblob_semantic_analysis(output_file, data):\n\n test_data, results = prepare_test_dataset()\n final_test = textblob_evaluation(test_data)\n\n accuracy = calculate_accuracy(final_test, results)\n logging.info(\"TextBlob library accuracy: {}.\".format(accuracy))\n\n final = textblob_evaluation(data)\n df = pd.DataFrame(final, columns=['Text', 'Feedback'])\n df.to_csv(output_file, index=False)\n\n return df", "def build_corpus_text_df(train_tagged_sentences):\n sentences_and_tags_dicts = []\n for sentence in train_tagged_sentences:\n concat_sen = ''\n concat_tags = ''\n for word, tag in sentence:\n concat_sen += ' ' + word\n concat_tags += ' ' + tag\n temp_dict = {'text': concat_sen, 'tags': concat_tags}\n # temp_dict = {'text': concat_sen}\n sentences_and_tags_dicts.append(temp_dict)\n\n return pd.DataFrame(sentences_and_tags_dicts)", "def build_embedding(df):\n def _tokenize(abstract_str):\n \"\"\"\n Tokenize a abstract string as a lit of words.\n input: str\n output: list[str]\n \"\"\"\n abstract_list = nltk.word_tokenize(abstract1_str)\n return abstract_list\n\n nb_features = 10\n embedding = np.zeros((len(df), len(df), nb_features))\n\n for i1, row1 in df.iterrows():\n for i2, row2 in df.iterrows():\n if i1 == i2:\n continue\n word1, abstract1_str = row1[\"title\"].lower(), row1[\"abstract\"].lower()\n word2, abstract2_str = row2[\"title\"].lower(), row2[\"abstract\"].lower()\n \n # Transform abstracts strings into lists of tokens\n abstract1 = _tokenize(abstract1_str)\n abstract2 = _tokenize(abstract2_str)\n\n # Surface features\n # Not implemented\n\n # Word N-gramms features\n # Not implemented\n \n # 3.2.2 Wikipedia abstract features\n # Il faut créer un pandas avec les abstracts des articles contenant l'un des mots.\n embedding[i1, i2, 0] = 1 if word1 in abstract2 else 0\n\n # Presence and distance\n if word1 in abstract2 and word2 in abstract2:\n # distance = abs(abstract2.index(word1) - abstract2.index(word2))\n distance = min(\n [abs(pos_word1 - pos_word2)\n for (pos_word1, pos_word2)\n in zip(\n [pos_word1 for pos_word1, word in enumerate(abstract2)\n if word == word1],\n [pos_word2 for pos_word2, word in enumerate(abstract2)\n if word == word2])\n ])\n embedding[i1, i2, 1] = 1 if distance < 20 else 0\n\n # count\n \n # min distance\n\n # Patern\n return embedding", "def process_text(model_name: str, text: str) -> spacy.tokens.Doc:\n nlp = load_model(model_name)\n return nlp(text)", "def annotate_documents(language, name_to_id):\n\n # Configure MERPY, preprocessing the selected lexicon \n lexicon_name = \"icd10cm\" + language\n \n merpy.create_lexicon(name_to_id.keys(), lexicon_name)\n merpy.create_mappings(name_to_id, lexicon_name)\n merpy.process_lexicon(lexicon_name)\n\n # Configure sentence segmenter\n spacy_models = {\"en\": \"en_core_web_md\", \"es\": \"es_core_news_md\", \n \"pt\": \"pt_core_news_md\"}\n nlp = spacy.load(spacy_models[language])\n\n abstracts_dir = \"./scielo_abstracts/\" \n doc_w_ann_count = int()\n entity_count = int()\n linked_mentions = int()\n \n abstracts = os.listdir(abstracts_dir)\n pbar = tqdm(total=int(len(abstracts)/3))\n \n logging.info(\"Annotating the abstracts...\")\n\n for abstract in abstracts:\n \n if abstract[-2:] == language:\n out_ann = str()\n out_txt = str()\n\n with open(abstracts_dir + abstract, 'r') as input_file:\n text = input_file.read()\n input_file.close()\n document_ent_count = int()\n \n current_pos = int()\n text_spacy = nlp(text)\n \n for sent in text_spacy.sents:\n entities = merpy.get_entities(sent.text, lexicon_name)\n \n for entity in entities:\n\n if entity != ['']:\n entity_count += 1\n document_ent_count += 1\n \n begin_pos = str(current_pos + int(entity[0]))\n end_pos = str(current_pos + int(entity[1]))\n \n if len(entity) == 4: \n # Linked mentions with ICD code\n linked_mentions += 1\n out_ann += \"T\" + str(document_ent_count) \\\n + \"\\t\" + begin_pos + \" \" + end_pos + \"\\t\"\\\n + entity[2] + \"\\t\" + entity[3] + \"\\n\"\n \n elif len(entity) == 3: \n # Mentions without ICD code\n out_ann += \"T\" + str(document_ent_count) \\\n + \"\\t\" + begin_pos + \" \" + end_pos + \"\\t\"\\\n + entity[2] + \"\\n\"\n \n out_txt += sent.text + \" \"\n current_pos += len(sent.text) + 1\n\n if document_ent_count > 0:\n doc_w_ann_count += 1\n \n # Generate text file, to ensure that annotations spans match\n # the text \n with open(abstracts_dir + abstract, 'w') as out_txt_file:\n out_txt_file.write(out_txt)\n out_txt_file.close()\n \n # Generate annotations file\n out_ann_filename = \"./mer_annotations/\" + language + \"/\" \\\n + abstract + \".ann\"\n\n with open(out_ann_filename, 'w') as out_ann_file:\n out_ann_file.write(out_ann)\n out_ann_file.close()\n \n pbar.update(1)\n \n pbar.close()\n logging.info(\"Done!\")\n\n # Calculate overall annotations stats\n try:\n mentions_ratio = float(entity_count/doc_w_ann_count)\n doc_linked_ratio = float(linked_mentions/doc_w_ann_count)\n linked_ratio = float(linked_mentions/entity_count)\n \n except:\n mentions_ratio = 0.0\n doc_linked_ratio = 0.0\n linked_ratio = 0.0\n\n stats = \"DOCUMENTS WITH ANNOTATIONS: \" + str(doc_w_ann_count) + \"\\n\"\n stats += \"TOTAL ENTITY MENTIONS: \" + str(entity_count) + \"\\n\"\n stats += \"ENTITY MENTIONS PER DOCUMENT: \" + str(mentions_ratio) + \"\\n\"\n stats += \"LINKED ENTITY MENTIONS: \" + str(linked_mentions) + \"\\n\"\n stats += \"LINKED ENTITY MENTIONS PER DOCUMENT: \" \\\n + str(doc_linked_ratio) + \"\\n\"\n stats += \"RATIO OF LINKED ENTITY MENTIONS: \" + str(linked_ratio)\n\n stats_filename = \"mer_annotation_stats_\" + language\n\n with open(stats_filename, \"w\") as output:\n output.write(stats)\n output.close()\n \n logging.info(\"Generated \" + stats_filename)", "def process_in_parallel(table_rows, processes=20):\n # Convert to strings for multiprocessing\n table_rows_str = [str(r) for r in table_rows]\n\n # Process each article in paralllel\n pool = Pool(processes=processes)\n results = []\n start = timer()\n for i, r in enumerate(pool.imap_unordered(process_entry, table_rows_str)):\n # Report progress\n print(f'{100 * i / len(table_rows_str):.2f}% complete.', end='\\r')\n results.append(r)\n pool.close()\n pool.join()\n end = timer()\n print(\n f'Processed {len(table_rows_str)} articles in {end-start:.2f} seconds.')\n\n # Convert to dataframe\n df = pd.DataFrame(results)\n # Rename ratio\n df.rename(columns={'ratio': 'read_ratio'}, inplace=True)\n # Add extra columns with more data\n df['claps_per_word'] = df['claps'] / df['word_count']\n df['editing_days'] = ((df['published_date'] - df['started_date']\n ).dt.total_seconds() / (60 * 60 * 24)).astype(int)\n\n # Rounding\n df['published_date'] = df['published_date'].dt.round('min')\n df['started_date'] = df['started_date'].dt.round('min')\n df['read_ratio'] = df['read_ratio'].round(2)\n\n # 5 most common tags (might want to include more tags)\n n = 5\n all_tags = list(chain(*df['tags'].tolist()))\n tag_counts = Counter(all_tags)\n tags = tag_counts.most_common(n)\n\n # Adding columns with indication of tag\n for tag, count in tags:\n flag = [1 if tag in tags else 0 for tags in df['tags']]\n df.loc[:, f'<tag>{tag}'] = flag\n\n df.sort_values('published_date', inplace=True)\n return df", "def preprocess_text_spacy(\n texts,\n n_jobs=1,\n batch_size=100\n):\n texts = [preprocess(t) for t in texts]\n return list(get_spacy_parse(texts, batch_size=batch_size, n_threads=n_jobs))", "def analyze_text(target_text, model):\n print('开始提取目标文本关键词...')\n time1 = time.time()\n kw_list = jieba.analyse.extract_tags(target_text, topK=500, withWeight=True,\n allowPOS=['n', 'v', 'nr', 'ns', 'vn', 'a', 'l'])\n print('提取关键词时间 %f s' % (time.time() - time1))\n kw_weight = pd.Series({k: v for k, v in kw_list}, name='weight')\n kw_vector = pd.DataFrame({k: model.wv[k] for k, v in kw_list if k in model.wv}).transpose()\n n_kw_vector = kw_vector.div(kw_vector.std(axis=1), axis=0)\n filtered_kw_weight = kw_weight[n_kw_vector.index]\n ac = AgglomerativeClustering(30)\n ac.fit(kw_vector)\n kw_label = pd.Series(ac.labels_, index=kw_vector.index, name='label')\n tsne = TSNE()\n print('开始进行t-SNE降维...')\n time1 = time.time()\n kw_tsne_v = tsne.fit_transform(n_kw_vector)\n print('t-SNE降维时间: %f s' % (time.time() - time1))\n kw_tsne_df = pd.DataFrame(kw_tsne_v, index=n_kw_vector.index, columns=['x', 'y'])\n kw_df = pd.concat([kw_label, kw_tsne_df, filtered_kw_weight], axis=1)\n return kw_df", "def sequence_to_text(self, padded, index_word):\n # create a empty list\n word_list = []\n for idx, i in enumerate(padded):\n # for every word in self.question_padded[0]=sentence, put it into a list\n words = np.array([index_word.get(word) for word in i])\n # insert into list\n word_list.append(words)\n # create a dictionary to build DataFrame\n dic = {\"sequence_to_text\": word_list}\n word_df = pd.DataFrame(dic)\n return word_df", "def convert_bibliographic(df: pd.DataFrame, proc_index: int = 0) -> pd.DataFrame:\n # Here we handle the 'Pages' column: its meaning can be\n # very ambiguous if the entity's macro_category is not 'article'.\n # Hence, in that case we discard its content, while in case\n # of articles we try to extract from it the begin_page and the end_page\n articles_mask: pd.Series = df['label'].isin(['journal article', 'conference paper'])\n non_articles_mask: pd.Series = ~articles_mask\n df.loc[non_articles_mask, 'Pages'] = None\n df.loc[articles_mask, 'Pages'] = df.loc[articles_mask, 'Pages'].map(split_range_optional)\n\n # Column 'venue' evaluation\n df['venue'] = pd.Series(dtype='object')\n is_journal: pd.Series = df['label'].isin(['journal article', 'conference paper'])\n\n # Swap Title and Chapter: Title <--> Chapter\n book_chapters: pd.Series = df['label'] == 'book chapter'\n df.loc[book_chapters, 'venue'] = df.loc[book_chapters, 'Title']\n df.loc[book_chapters, 'Title'] = df.loc[book_chapters, 'Chapter']\n\n df.loc[is_journal, 'venue'] = df.loc[is_journal, 'Periodical']\n\n df.drop(['Chapter', 'Periodical'], axis=1)\n\n # Convert column 'type_of_citation'\n wikicode_to_ocdm_mapping: Dict[str, str] = {'journal article': 'journal article',\n 'conference paper': 'proceedings article',\n 'book': 'book',\n 'book part': 'book part',\n 'book chapter': 'book chapter'\n }\n # df['type_of_citation'] = df['label'].map(wikicode_to_ocdm_mapping)\n df['label'] = df['label'].map(wikicode_to_ocdm_mapping)\n\n df = df.rename({'pmc': 'pmcid'}, axis=1) # meta recognizes 'pmcid' instead of 'pmc'\n id_schemes: Set[str] = allowed_id_schemes.difference({'pmc'}).union({'pmcid'})\n\n # Stringify 'ID_list' column (enriched with 'tmp' identifiers)\n # and add identifiers to the 'venue' column where needed\n stringify_venue_identifiers(df)\n df['tmp'] = f\"bib_{proc_index}_\" + df.index.astype(str) # this adds a 'tmp' column\n # The following line removes identifier columns and adds a 'ID_list' column\n df = collapse_id_list(df, 'ID_list', ['tmp', *id_schemes], do_not_drop={'tmp'})\n df['id'] = df['ID_list'].map(stringify_id_list) # this adds an 'id'\n\n # Stringify 'Authors' column\n df['author'] = df['Authors'].map(stringify_authors)\n\n # Stringify 'Pages' column\n df['Pages'] = df['Pages'].str.join('-')\n\n # Add the missing column 'editor' even if we don't have data for it\n df['editor'] = None\n\n df = df.rename({'Title': 'title',\n 'Date': 'pub_date',\n 'Volume': 'volume',\n 'Issue': 'issue',\n 'Pages': 'page',\n 'PublisherName': 'publisher',\n # 'type_of_citation': 'type',\n 'label': 'type'\n }, axis=1)\n return df", "def _tokenize_df(df, target=\"sentence\"):\n tqdm.pandas()\n assert type(target) is str, \"target must be a string\"\n assert target in df.columns, \"dataframe must have a {} column (user specified) to tokenize\".format(target)\n df[\"tokenized_text\"] = df[target].progress_apply(ReviewApp._tokenize_lematize)\n return df", "def write_word_loads_table(stn_df, loads_csv, in_docx, engine):\n import os\n\n import numpy as np\n import pandas as pd\n from docx import Document\n from docx.enum.text import WD_ALIGN_PARAGRAPH\n from docx.shared import Pt\n\n # Get year from loads_csv\n year = int(os.path.split(loads_csv)[1].split(\"_\")[-1][:-4])\n\n # Read loads data\n lds_df = pd.read_csv(loads_csv, index_col=0)\n\n # Chem pars of interest\n par_list = [\n \"SPM\",\n \"TOC\",\n \"PO4-P\",\n \"TOTP\",\n \"NO3-N\",\n \"NH4-N\",\n \"TOTN\",\n \"SiO2\",\n \"Ag\",\n \"As\",\n \"Pb\",\n \"Cd\",\n \"Cu\",\n \"Zn\",\n \"Ni\",\n \"Cr\",\n \"Hg\",\n ]\n\n # Open the Word document\n doc = Document(in_docx)\n\n # Set styles for 'Normal' template in this doc\n style = doc.styles[\"Normal\"]\n\n font = style.font\n font.name = \"Times New Roman\"\n font.size = Pt(8)\n\n p_format = style.paragraph_format\n p_format.space_before = Pt(0)\n p_format.space_after = Pt(0)\n\n # Get table obj\n tab = doc.tables[0]\n\n # Extract text to index rows\n row_dict = {}\n for idx, cell in enumerate(tab.column_cells(0)):\n for paragraph in cell.paragraphs:\n row_dict[paragraph.text] = idx\n\n # Extract text to index cols\n col_dict = {}\n for idx, cell in enumerate(tab.row_cells(0)):\n for paragraph in cell.paragraphs:\n col_dict[paragraph.text] = idx\n\n # Loop over sites\n print(\"Processing:\")\n for stn_id in stn_df[\"station_id\"]:\n # Get name and code\n name = stn_df.query(\"station_id == @stn_id\")[\"station_name\"].values[0]\n code = stn_df.query(\"station_id == @stn_id\")[\"station_code\"].values[0]\n\n print(\" %s (%s)...\" % (name, code))\n\n # Allow for sites with the same name\n if name in [\"Børselva\", \"Oselva\"]:\n name = \"%s (%s)\" % (name, code)\n\n # Get flow data\n q_df = extract_discharge(\n stn_id, \"%s-01-01\" % year, \"%s-12-31\" % year, engine, plot=False\n )\n\n # Average daily flow vol in 1000s m3/day\n q_av = q_df.mean()[\"flow_m3/s\"]\n v_av = q_av * 24 * 60 * 60 / 1000\n\n # Update the table with flow\n update_cell(name, \"Flow rate\", v_av, col_dict, row_dict, tab)\n\n # Loop over chem pars\n for par in par_list:\n # Get col for loads df\n if par == \"Hg\":\n par_l = \"Hg_kg\"\n else:\n par_l = par + \"_tonnes\"\n\n # Get load value\n load = lds_df.at[stn_id, par_l]\n\n # Update table\n update_cell(name, par, load, col_dict, row_dict, tab)\n\n # Save after each table\n doc.save(in_docx)\n\n print(\"Finished.\")", "def fetch_query_corpus(arg_tuple): \n \n # Destructure the tuple (needed for multiprocessing)\n path, query_text, key = arg_tuple\n\n # Open file and fetch all lines of URLs\n with open(BASE_PATH + path) as url_file: \n lines = url_file.read().split('\\n')\n \n results = []\n\n print(\"Processing \" + query_text)\n\n for line in lines:\n result = fetch_article_text(line, key)\n if result != None: \n results.append(result)\n\n # Print results to file\n filename = \"CORPUS/%s.json\" % (query_text)\n with open(filename, 'w') as outfile:\n json.dump(results, outfile, indent=4)", "def main() -> None:\n configure_pandas()\n\n df = pd.read_csv(f\"{DATAPATH}ton.csv\", low_memory=False)\n\n # Remove outlier text lengths\n df = df[df[\"text\"].str.len() < 3000]\n\n # Only keep full records of party_name and text\n df = df[[\"party_name\", \"text\"]].dropna(how=\"any\")\n print(f\"{len(df)} full records.\")\n\n # Limit dataset for faster iterations during testing\n N = 10000\n df = df.loc[:N, :]\n\n # Split train/test\n X_train, X_test, y_train, y_test = train_test_split(\n df[\"text\"],\n df[\"party_name\"],\n test_size=0.3,\n random_state=1,\n stratify=df[\"party_name\"])\n\n # Encode targets based on train\n target_encoder = LabelEncoder()\n target_encoder.fit(y_train)\n y_train = target_encoder.transform(y_train)\n y_test = target_encoder.transform(y_test)\n\n # Features + model pipeline\n pipeline = Pipeline(\n [\n (\"vect\", TfidfVectorizer(\n tokenizer=spacy_tokenizer,\n analyzer=\"word\",\n max_df=0.9,\n min_df=5,\n ngram_range=(1, 2)\n )),\n (\"clf\", XGBClassifier()),\n ]\n )\n\n # Train\n pipeline.fit(X_train, y_train)\n\n # Measure on test set\n y_pred = pipeline.predict(X_test)\n print(metrics.classification_report(y_test, y_pred, target_names=target_encoder.classes_))\n\n # TODO grid search, hyperparameters, etc." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given an entity and a textacy corpus, return a list of all the sentences in which this entity occurs
def list_of_entity_statements(corpus, entity): entity_sentences = [list(entity_statements(doc, entity=entity)) for doc in corpus if list(entity_statements(doc, entity=entity))] # If statement that removes null sentences entity_sentences = [item for sublist in entity_sentences for item in sublist] return entity_sentences
[ "def sentence_entities(sentence):\n\n\n nlp = Rating.nlp_load(sentence)\n return [(ent.text, ent.label_) for ent in nlp.ents]", "def get_texts_from_entities(entities):\n texts = []\n for e in entities:\n texts.append(e.text)\n return texts", "def get_entity_sentences(db: Session, db_entity: models.Entity, skip: int = 0, limit: int = 100):\n return db.query(models.Sentence.text).filter((models.Sentence.id == models.association_table.c.sent_id) &\n (models.association_table.c.ent_id == db_entity.id)).offset(\n skip).limit(limit).all()", "def find_sentences_with_entity(requested_entity, text):\n\n accepted_splits = []\n \n for m in re.finditer(requested_entity, text): \n #goal here is to get the sentence itself instead of cutting it off in the middle, doesn't work perfectly yet\n search_area = text[m.start()-300:m.end()+300]\n splits = search_area.split('.')\n # splits = splits[1:-1]\n for split in splits:\n if requested_entity in split:\n if split not in accepted_splits:\n # st.write(split)\n accepted_splits.append(split)\n \n accepted_splits = list(set(accepted_splits))\n\n return accepted_splits", "def extract_entities_w_spacy(text):\n assert type(text) == str, \"Input text must be of type string.\"\n assert text != \"\", \"Input text cannot be empty.\"\n\n nlp = en_core_web_sm.load()\n doc = nlp(text)\n entities = pd.DataFrame(\n [(X.text, X.label_) for X in doc.ents], columns=[\"text\", \"entity\"]\n )\n return entities.drop_duplicates([\"entity\", \"text\"], keep=\"first\")", "def extract_sentences(text: str) -> List[str]:\n doc = nlp(text)\n return [sent.text for sent in doc.sents]", "def sentence_extractor(self):\n self.text_sentences = []\n for text in self.texts:\n sentences = nltk.sent_tokenize(text)\n tokens_sentences = []\n for sentence in sentences:\n # tokens = nltk.word_tokenize(sentence)\n tokens = GetNounPhrases(sentence)\n if self.text_cleaner is not None:\n tokens = self.text_cleaner(tokens)\n if self.stem_words:\n tokens = stem_words(tokens)\n \n tokens_sentences.append(tokens)\n self.text_sentences.append(tokens_sentences)", "def sentence_extractor(self):\n self.text_sentences = []\n for text in self.texts:\n sentences = nltk.sent_tokenize(text)\n tokens_sentences = []\n for sentence in sentences:\n tokens = nltk.word_tokenize(sentence)\n if self.text_cleaner is not None:\n tokens = self.text_cleaner(tokens)\n if self.stem_words:\n tokens = stem_words(tokens)\n \n tokens_sentences.append(tokens)\n self.text_sentences.append(tokens_sentences)", "def get_sentences(raw_text):\n return [\"\".join(s['sentences']) for s in format_doc(raw_text)]", "def extract_entities(text):\n\n text = nlp_de(text)\n entities_nr = len(text.ents)\n # print(entities_nr, \"Entities in diesem Text.\")\n entities_labels = Counter([x.label_ for x in text.ents])\n entities_top3 = Counter([x.text for x in text.ents]).most_common(3)\n entities_list = [(X.text, X.label_) for X in text.ents]\n\n return (\n entities_nr,\n \"Entities in diesem Text:\",\n entities_labels,\n \"Die 3 häufigsten Entities:\\n\",\n entities_top3,\n \"Identifizierte Entities:\\n\",\n entities_list,\n )", "def extract_entities(body: str) -> list[str]:\n\n body = NLP(body)\n\n entities = [ent.text for ent in body.ents]\n \n return entities", "def get_target_sentences(self, domain: str) -> List[str]:\n return self._get_sentences(domain, self._target_suffix)", "def test_sqlitedb_get_sentences():\n # Query sentence with entity specified\n sqlitedb = Database(SqliteDB(db_file))\n entities = sqlitedb.get_sentences(\"google\")\n\n assert len(entities) == 1, \"number of sentence with 'google' entity is not 1\"", "def annotate(model_path, corpus_sents):\n nlp = spacy.load(model_path)\n annotated = []\n\n for text in corpus_sents:\n doc = nlp(text)\n # breakpoint()\n ents = []\n for ent in doc.ents:\n ent_pos = text.index(ent.text)\n ent_end_pos = ent_pos + len(ent.text)\n ents.append((ent_pos, ent_end_pos, ent.label_))\n # if len(doc.ents) > 0:\n # print(\"Tokens\", [(t.text, t.ent_type_, t.ent_iob) for t in doc])\n # print(\"Entities\", [(ent.text, ent.label_) for ent in doc.ents])\n # print(annotated)\n\n annotated.append((text, {\"entities\": ents}))\n\n return annotated", "def get_sentences(corpus_file):\n\n # Read all the sentences in the file\n with open(corpus_file, 'r', errors='ignore') as f_in:\n\n s = []\n\n for line in f_in:\n line = line\n\n # Ignore start and end of doc\n if '<text' in line or '</text' in line or '<s>' in line:\n continue\n # End of sentence\n elif '</s>' in line:\n yield s\n s = []\n else:\n try:\n word, lemma, pos, index, parent, dep = line.split()\n s.append((word, lemma, pos, int(index), parent, dep))\n # One of the items is a space - ignore this token\n except Exception as e:\n print (str(e))\n continue", "def ToSentences(paragraph, include_token=True):\n s_gen = SnippetGen(paragraph, SENTENCE_START, SENTENCE_END, include_token)\n return [s for s in s_gen]", "def splitSentences(self,txt):\n \n txt = txt.split()\n #txt = txt.split(\"\\s\") #DM to account for longer documents in formative evaluation - change back for impression sections only\n\n #attribute side header to each corresponding sentence\n sentences = []\n wordLoc = 0\n \n\n while(wordLoc < len(txt) ):\n currentWord = txt[wordLoc]\n if( currentWord[-1] in '.?!' ):\n if( currentWord in self.exceptionTerms ):\n wordLoc += 1\n # per discussion with A.G. dropped this exception, since assuming numbers only use decimal points if there \n # are actual decimal point digits expressed and thus the period would not be the last character of the word.\n #elif( self.digits.intersection(currentWord) and \n #not set('()').intersection(currentWord)): # word doesn't include parentheses. Is this necessary?\n #wordLoc += 1\n else:\n sentences.append(unicode(\" \"+' '.join(txt[:wordLoc+1]))) \n txt = txt[wordLoc+1:]\n wordLoc = 0\n else:\n wordLoc += 1\n\n # if any texts remains (due to failure to identify a final sentence termination,\n # then take all remaining text and put into a sentence\n if( txt ):\n sentences.append(unicode(\" \"+' '.join(txt)) )\n \n #print sentences;raw_input()\n return sentences", "def ent_query(item: Query):\n doc = nlp(item.corpus)\n return [\n {\"start\": ent.start_char, \"end\": ent.end_char,\n \"label\": ent.label_, \"id\": ent.ent_id_, \"text\": ent.text}\n for ent in doc.ents\n ]", "def get_sentences_from_parsed_text(doc):\n \n # Open file and split at sentence boarders \n with open(doc,\"r\") as infile:\n infile = infile.read()\n sentences = infile.split('\\n\\n')[:-1]\n\n return sentences", "def _merge_sentences(text):\n\n return [word for sentence in text for word in sentence]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function add specific point of timeseries to tree point, parent_ind, max_depth, tree, borders
def _add_point_to_tree(self, point, parent_ind, max_depth, tree, borders): # Stop if point is too deep. if point['depth'] > max_depth: return # Add point to tree curr_ts_name = self._timescales[point['depth']]['name'] tree.append(dict(short_name=point['name_short'], full_name=point['name_full'], timescale=curr_ts_name, parent_index=parent_ind)) # Update borders. curr_borders = borders.get(curr_ts_name) if curr_borders is None: borders[curr_ts_name] = (point['name_full'], point['name_full']) else: borders[curr_ts_name] = (borders[curr_ts_name][0], point['name_full']) # Collect points from lower timescales recursively. point_ind = len(tree) - 1 for child in point['children']: self._add_point_to_tree(child, point_ind, max_depth, tree, borders)
[ "def set_root(self, df):\n\t\tif df.index.name == \"time\":\n\t\t\tpass\n\t\telse:\n\t\t\tdf = df.set_index(\"time\")\n\t\tdf.index = pandas.to_datetime(df.index)\t\t\n\t\tself.root = df\n\t\treturn", "def addParentAddedDagPathCallback(*args, **kwargs):\n \n pass", "def __insert_data(self, plotting=False):\n\n for index_point in range(0, len(self.__pointer_data)):\n if (index_point != 0) and (plotting is True):\n plot_tree_fin(self.__tree)\n plot_birch_leaves(self.__tree, data=self.__pointer_data)\n\n print(\"\\n\")\n print(\"\\n\")\n print(\"index: \", index_point)\n point = self.__pointer_data[index_point]\n print(\"point \", point)\n self.__tree.insert_cluster([point])\n\n if self.__tree.amount_entries > self.__entry_size_limit:\n print(\"rebuilding tree\")\n self.__tree = self.__rebuild_tree(index_point)\n\n # self.__tree.show_feature_distribution(self.__pointer_data);", "def addChildReorderedDagPathCallback(*args, **kwargs):\n \n pass", "def add_to_tree(self, node):\n # print(node.state)\n # if(node.parent_node is not None):\n # print(\"parent:\", node.parent_node.state)\n # print()\n self.tree.append(node)\n return", "def addChildAddedDagPathCallback(*args, **kwargs):\n \n pass", "def addNodeSet(self, name, object=None, parent=None, mouseBinding={},\\\n hasChildren=False, firstExpand_cb=None, nodeClass=Node):\n \n if (type(object) is not list) or \\\n (type(name) is not list) or \\\n (type(hasChildren) is not list):\n warn(\"List of children needed, non-list type found\")\n return None\n \n if self.mouseBinding is not None:\n mouseBinding.update(self.mouseBinding)\n\n num = len(name)\n nodeList=[]\n for i in range(num):\n if self.mouseBinding is not None:\n mouseBinding.update(self.mouseBinding[i])\n node = nodeClass(name[i], object[i], \\\n hasChildren=hasChildren[i], firstExpand_cb=firstExpand_cb)\n nodeList.append(node)\n node.tree = self\n try:\n hash(object[i])\n node.objectKey = object[i]\n except TypeError:\n node.objectKey = self.objIndex\n self.objIndex +=1\n\n ## if type(object) is not types.InstanceType:\n ## node.objectKey = self.objIndex\n ## self.objIndex +=1\n ## else:\n ## node.objectKey = object\n\n if self.obj2Node:\n self.objToNode[node.objectKey] = node\n\n self.numberOfNodes += 1\n node.uniqueID = self.numberOfNodes\n node.tag = [str(node.uniqueID)]\n \n # if parent given as a string, find the Node obj of the parent\n if type(parent) is bytes:\n input=parent\n parent = self.findNodeFromName(parent)\n if parent is None:\n node.parentFullname = None\n warn( \"error in addNode, check name of parent: \"+ input) \n return\n else:\n node.parentFullname = input\n elif parent in self.objToNode:\n parent = self.objToNode[parent]\n elif not isinstance(parent, Node) and parent is not None:\n raise RuntimeError('bad parent')\n\n # if parent is given as None,we have a new root node\n # The new root is added to the end(bottom) of the tree\n if parent is None:\n node.parentFullname = None\n h = 0\n for r in self.roots:\n if r.name == name :\n warn(\"The node with name\"+name + \"already exists\")\n return\n h += r.height\n # calc the Y offset of current node\n node.y += h * OFFSET + self.offy \n node.x += self.offx\n self.roots.append(node)\n else:\n assert isinstance(parent, Node)\n if parent.parentFullname != None:\n node.parentFullname = parent.parentFullname + '|' + \\\n parent.name\n else:\n node.parentFullname = parent.name\n\n node.parent = parent\n \n if parent is not None:\n # check duplicated node\n # FIXME ... this is expensive\n## for c in parent.children:\n## if c.name == node.name:\n## print \"The node with name\", name, \"already exists\"\n## return \n\n for node in nodeList:\n node.x = parent.x + OFFSET\n parent.children.append(node)\n if parent.expanded:\n parent.increaseParentHeight(offset=num)\n parent.inserted = True\n self.updateY()\n if parent.inserted:\n parent.draw_new_insert(num=num, mode = 'batch')\n parent.inserted = False\n parent.draw()\n else:\n for i in range(num):\n self.draw_new_root(nodeList[i])\n \n bb = self.canvas.bbox(tkinter.ALL)\n self.canvas.configure(scrollregion=(0, 0,bb[2]+OFFSET, bb[3]+OFFSET))\n \n return nodeList", "def draw_pine_tree(x, y):\n\n # Draw the triangle on top of the trunk.\n # We need three x, y points for the triangle.\n arcade.draw_triangle_filled(x + 40, y, # Point 1\n x, y - 100, # Point 2\n x + 80, y - 100, # Point 3\n arcade.color.DARK_GREEN)\n\n # Draw the trunk\n arcade.draw_lrtb_rectangle_filled(x + 30, x + 50, y - 100, y - 140,\n arcade.color.DARK_BROWN)", "def create_tree(raw_tree, Samples, index):\n\t#initialize index of sample\n\tcount = 0\n\tif count == index: count += 1 #index to be skipped\n\t#initialize final tree\n\tfinal_tree = Tree()\n\t#add each sample to final tree in proper format\n\tfor origin in raw_tree:\n\t\t#add node\n\t\tfinal_tree.nodes.append(Node(origin, Samples[count]))\n\t\t#add to index\n\t\tcount += 1\n\t\tif count == index: count += 1 #index to be skipped\n\t#don't append tree if has loops\n\tfinal_tree.to_dict()\n\tif final_tree.loops(): return None\n\t#if pairs of samples from same time point exist, change the format to include and internode\n\tfinal_tree = get_internodes(final_tree)\n if final_tree.double_progenitor(): return None\n\t#sort nodes\n\tfinal_tree.sort_nodes()\n\t#return\n\treturn final_tree", "def draw_tree(order, theta, sz, posn, heading, color=(0,0,0), depth=0):\n trunk_ratio = 0.3 # The relative ratio of the trunk to the whole tree. \n\n # Length of the trunk \n trunk = sz * trunk_ratio \n delta_x = trunk * math.cos(heading) \n delta_y = trunk * math.sin(heading) \n (u, v) = posn \n newpos = (u + delta_x, v + delta_y) \n pygame.draw.line(main_surface, color, posn, newpos) \n\n if order > 0: \n \"\"\" Make 2 halfs for the fractal tree symmetrical around the trunk.\n \"\"\"\n if depth == 0: \n color1 = (255, 0, 0) \n color2 = (0, 0, 255) \n else: \n color1 = color \n color2 = color \n\n # make the recursive calls, which can be considered as zooming into the fractal pattern. \n newsz = sz*(1 - trunk_ratio) \n draw_tree(order-1, theta, newsz, newpos, heading-theta, color1, depth+1) \n draw_tree(order-1, theta, newsz, newpos, heading+theta, color2, depth+1)", "def add(self, transaction):\n point = self._root\n # print(\"Transactions: \", transaction , end=\" \")\n for item in transaction:\n # try:\n # self._count+=1\n # visual_fptree(self._root.tree, str(self._count))\n # except Exception as e:\n # print(e)\n\n # print(item, \" \", end=\"\")\n\n next_point = point.search(item)\n if next_point:\n # There is already a node in this tree for the current\n # transaction item; reuse it.\n next_point.increment()\n else:\n # Create a new point and add it as a child of the point we're\n # currently looking at.\n next_point = FPNode(self, item)\n point.add(next_point)\n\n # Update the route of nodes that contain this item to include\n # our new node.\n self._update_route(next_point)\n\n point = next_point\n print()", "def _create_nested_star(branches, level):\n import numpy as np\n global v_id, spos\n v_id = 0\n spos = np.zeros((2, 3000))\n cexp.new_graph()\n _nested_stars(np.zeros(2), branches, level)\n cexp.finalize_graph()", "def create_location_shapes(resolved_location_tree, parent_shape=None):\n shapes = []\n for child in resolved_location_tree.get('children', []):\n new_shape = None\n shape_value = None\n country_shape = None\n location = child['location']\n matching_countries = world_df[world_df.iso_a2 == location.get('countryCode', 'NoCountry')]\n excess_value = child['value'] - sum(child2['value'] for child2 in child.get('children', []))\n # Excess value could be slightly negative due to floating point error.\n if excess_value < -0.01:\n print(child)\n print([child2['value'] for child2 in child['children']])\n raise Exception('Bad tree')\n excess_value = max(0, excess_value)\n if len(matching_countries):\n country_shape = matching_countries.geometry.iloc[0]\n if location.get('featureCode', '').startswith('PCL') and country_shape:\n new_shape = country_shape\n else:\n # Plot the location as a circle clipped by its containing location.\n lon_lat = location.get('longitude'), location.get('latitude')\n if lon_lat[0] and lon_lat[1]:\n # default area 10 square km\n shape_area = location.get('area', 10e3)\n if len(matching_countries) and 'population' in location:\n # Set the shape area as a proportion of the country area\n # equal to the proportion of the country's population it contains.\n country_portion = float(location['population']) / matching_countries.pop_est.iloc[0]\n shape_area = max(country_shape.area * country_portion, shape_area)\n new_shape = create_projected_point(lon_lat).buffer(math.sqrt(shape_area / math.pi))\n if parent_shape:\n new_shape = new_shape.intersection(parent_shape)\n if new_shape.area == 0:\n new_shape = None\n child_shapes = create_location_shapes(child, new_shape)\n child_shapes_out = []\n for child_shape_val in child_shapes:\n if child_shape_val[0] is None:\n # If the child location has no shape absorb it into the\n # parent location.\n excess_value += child_shape_val[1]\n else:\n child_shapes_out.append(child_shape_val)\n # # Remove overlapping child shape regions from parent shape.\n # if len(child_shapes_out) > 0:\n # all_child_shape = child_shapes_out[0][0]\n # for child_shape, value in child_shapes_out[1:]:\n # all_child_shape = all_child_shape.union(child_shape)\n # new_shape = new_shape.difference(all_child_shape)\n shapes.append((new_shape, excess_value,))\n shapes += child_shapes_out\n return shapes", "def set_leaf(self, i, value):\n self.tree[i + self.size - 1] = value", "def add_leaf(self, tree, leaf):\n node = tree\n for n in self._path.split('.'):\n if n not in node['children']:\n node['children'][n] = {'node': None, 'children': {}}\n node = node['children'][n]\n node['node'] = leaf", "def addNode(self, name, object=None, parent=None, mouseBinding={},\\\n hasChildren=False, firstExpand_cb=None, nodeClass=Node):\n # the '|' is not allowed as name of the node\n if name.find('|')!=-1:\n warn( \"No '|' is allowed in node name \")\n return\n\n if self.mouseBinding is not None:\n mouseBinding.update(self.mouseBinding)\n\n node = nodeClass(name, object, mouseBinding=mouseBinding, \\\n hasChildren=hasChildren, firstExpand_cb=firstExpand_cb)\n\n node.tree = self\n try:\n hash(object)\n node.objectKey = object\n except TypeError:\n node.objectKey = self.objIndex\n self.objIndex +=1\n \n## if type(object) is not types.InstanceType:\n## node.objectKey = self.objIndex\n## self.objIndex +=1\n## else:\n## node.objectKey = object\n \n if self.obj2Node:\n self.objToNode[node.objectKey] = node\n \n self.numberOfNodes += 1\n node.uniqueID = self.numberOfNodes\n node.tag = [str(node.uniqueID)]\n \n # if parent is given as None,we have a new root node\n # The new root is added to the end(bottom) of the tree\n if parent is None:\n node.parentFullname = None\n h = 0\n for r in self.roots:\n if r.name == name :\n warn( \"The node with name\"+ name + \"already exists\")\n return\n h += r.height\n # calc the Y offset of current node\n node.y += h * OFFSET + self.offy \n node.x += self.offx\n self.roots.append(node)\n self.draw_new_root(node)\n \n else:\n # if parent given as a string, find the Node obj of the parent\n if type(parent) is bytes:\n input=parent\n parent = self.findNodeFromName(parent)\n if parent is None:\n node.parentFullname = None\n warn( \"error in addNode, check name of parent:\"+ input)\n return \n elif parent in self.objToNode:\n parent = self.objToNode[parent]\n elif not isinstance(parent, Node):\n raise RuntimeError('bad parent')\n #else:\n # # only Node type is accepted.\n # assert isinstance(parent, Node)\n\n if parent.parentFullname != None:\n node.parentFullname = parent.parentFullname + '|' + parent.name\n else:\n node.parentFullname = parent.name\n \n node.parent = parent \n # check duplicated node\n # FIXME ... this is expensive\n## for c in parent.children:\n## if c.name == node.name:\n## print \"The node with name\", name, \"already exists\"\n## return \n node.x = parent.x + OFFSET\n parent.children.append(node)\n if parent.expanded:\n parent.increaseParentHeight()\n parent.inserted = True\n self.updateY()\n if parent.inserted:\n parent.draw_new_insert()\n parent.inserted = False\n # FIXME erasing the parent is very expensif, we only need to\n # draw from node to end of children and move everything below\n # parent down\n parent.draw() \n \n bb = self.canvas.bbox(tkinter.ALL)\n self.canvas.configure(\n scrollregion=(0, 0,bb[2]+OFFSET, bb[3]+OFFSET))\n \n return node", "def addPoint(self, pt: 'SbVec3f', userdata: 'void *const'=None) -> \"int\":\n return _coin.SbBSPTree_addPoint(self, pt, userdata)", "def PositionNests(self):\n\n if self.CrateIndex == None:\n return \"This operation must occur after placing the piece.\"\n \n for nest in self.nests:\n nest.ExtremePoints[0] = \"eggs\"\n\n # THEN ADD THE NESTS TO THE LIST OF OPEN CRATES...", "def write_rootgraph(vectorx, vectory, graphtitle, xtitle, ytitle, sectorscurrents, rootdirectory):\n\n\tarrayx = array('d')\n\tarrayy = array('d')\n\n\tfor x in vectorx:\n\t\tarrayx.append(x)\n\n\tfor y in vectory:\n\t\tarrayy.append(y)\n\n\n\t#How many graph points\n\tn = len(vectorx)\n\n\tMyTGraph = TGraph(n, arrayx, arrayy)\n\tMyTGraph.SetName(graphtitle)\n\n\tif ytitle == \"i\":\n\t\tytitle = ytitle+\" (uA)\"\n\t\tcolor = 2\n\t\toffset = 1.\n\t\tminimum = -0.5\n\t\tmaximum = int(np.max(vectory)+1.5)\n\t\tlineup = TLine(float(np.min(vectorx)), 500, float(np.max(vectorx)), 500)\n\t\tlineup.SetLineColor(2)\n\t\tlineup.SetLineStyle(2)\n\t\tlinedown = TLine(float(np.min(vectorx)), 500., float(np.max(vectorx)), 500.)\n\t\tlinedown.SetLineColor(8)\n\t\tlinedown.SetLineStyle(2)\n\t\tfor entry in range(len(sectorscurrents)):\n\t\t\tif vectory[entry] > 0.01: \n\t\t\t\tlatex = TLatex(MyTGraph.GetX()[entry], MyTGraph.GetY()[entry], sectorscurrents[entry])\n\t\t\t\tlatex.SetTextSize(0.02)\n\t\t\t\tMyTGraph.GetListOfFunctions().Add(latex)\n\t\t\telse:\n\t\t\t\tlatex = TLatex(MyTGraph.GetX()[entry], MyTGraph.GetY()[entry], \"\")\n\t\t\t\tlatex.SetTextSize(0.02)\n\t\t\t\tMyTGraph.GetListOfFunctions().Add(latex)\n\n\tif ytitle == \"v\":\n\t\tytitle = ytitle+\" (V)\"\n\t\tcolor = 4\n\t\toffset = 0.9\n\t\tminimum = 400\n\t\tmaximum = 600\n\t\tlineup = TLine(float(np.min(vectorx)), 580., float(np.max(vectorx)), 580.)\n\t\tlineup.SetLineColor(2)\n\t\tlineup.SetLineStyle(2)\n\t\tlinedown = TLine(float(np.min(vectorx)), 530., float(np.max(vectorx)), 530.)\n\t\tlinedown.SetLineColor(8)\n\t\tlinedown.SetLineStyle(2)\n\n\t\tfor entry in range(len(sectorscurrents)):\n\t\t\tif vectory[entry] > 569.0:\n\t\t\t\tlatex = TLatex(MyTGraph.GetX()[entry], MyTGraph.GetY()[entry], \"\")\n\t\t\telse:\n\t\t\t\tlatex = TLatex(MyTGraph.GetX()[entry], MyTGraph.GetY()[entry], sectorscurrents[entry])\n\t\t\tlatex.SetTextSize(0.02)\n\t\t\tMyTGraph.GetListOfFunctions().Add(latex)\n\t\n\t\n\t#Draw + DrawOptions\n\tStyle = gStyle\n\tStyle.SetPadLeftMargin(2.0)\n\tXAxis = MyTGraph.GetXaxis() #TGraphfasthescin\n\t#XAxis.SetTitleOffset(offset)\n\tXAxis.SetTitle(xtitle)\n\tMyTGraph.SetMarkerColor(color)\n\tMyTGraph.SetMarkerStyle(1)\n\tMyTGraph.SetMarkerSize(1)\n\tMyTGraph.SetLineColor(color)\n\tMyTGraph.SetTitle(graphtitle)\n\t#XAxis.SetTitle(xtitle)\n\tYAxis = MyTGraph.GetYaxis()\n\tYAxis.SetTitleOffset(offset)\n\tYAxis.SetTitle(ytitle)\n\tMyTGraph.GetHistogram().SetMinimum(minimum)\n\tMyTGraph.GetHistogram().SetMaximum(maximum)\n\trootdirectory.WriteTObject(MyTGraph)\n\tc = TCanvas()\n\tc.SetName(graphtitle+\"_canvas\")\n\tMyTGraph.Draw(\"APL\")\n\tlineup.Draw(\"\")\n\tlinedown.Draw(\"\")\n\trootdirectory.WriteTObject(c)\n\t\n\t#MyTGraph.Write(graphtitle)\n\tMyTGraph.Draw(\"APL\")\n\t#gPad.SaveAs(\"current-\"+graphtitle+\".pdf\")\n\tgPad.Close()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Singleton to create to create the gui if it doesn't exist, or show if it does
def show_gui(): global INSTANCE if not INSTANCE: INSTANCE = TeamSelectorDialog() INSTANCE.show() return INSTANCE
[ "def show_gui(self):\n self.gui = Browser_GUI(self)", "def createGuiWorker() -> ghidra.util.worker.Worker:\n ...", "def init_main_window(self):\r\n gui_main = Tk()\r\n gui_main.geometry(f\"{WIDTH}x{HEIGHT}\")\r\n gui_main.resizable(width=False, height=False)\r\n gui_main.title(\"HUJI Boggle!\")\r\n gui_main.configure(background=BG_COLOR)\r\n return gui_main", "def create_wf_gui():\n\n root = tk.Tk()\n root.title('Workflow Creator')\n root.geometry('475x495+400+300')\n my_gui = WorkFlowGUI()\n my_gui.mainloop()\n opt = my_gui.Options\n\n root.iconify() # Not super solution but only way to make it close on Mac\n root.destroy()\n\n return opt", "def show():\r\n auto_collision_gui = GUI()\r\n auto_collision_gui.show(dockable=True)\r\n return auto_collision_gui", "def ui():\n win = ControlWindow()\n win.show()", "def gui():\n logger.info('Starting Keithley GUI') \n k = KeithleyForceUI()\n k.configure_traits()", "def __init__(self, **params):\n self.name = 'python_qt'\n if not 'iCeDeROM' in params:\n raise KeyError('iCeDeROM parameter reference mandatory!')\n if not 'gui' in params['iCeDeROM'].modules:\n raise RuntimeError('Python Console QtWidget requires GUI running!')\n self.iCeDeROM = params['iCeDeROM']\n self.tabs = dict()\n self.texts = dict()\n self.layouts = dict()\n self.window = None\n self.command = None\n self.history_index = 0\n self.history = list()\n self.createQtWidget(**params)\n self.setupQtWidget(**params)", "def __init__(self, parent=None):\n QtGui.QMainWindow.__init__(self, parent)\n self.db = getDatabase()\n self.addToLibWidget = LibWidget(self, self.db)\n self.setGeometry(100, 100, 800, 800)\n self.setWindowTitle('Media Browser')\n self.setWindowStyle()\n self.setupUI()", "def create_window(plotobj, window_class=Main, **kwargs):\n app_created = False\n app = QtCore.QCoreApplication.instance()\n if app is None:\n app = QtWidgets.QApplication(sys.argv)\n app_created = True\n app.references = set()\n window = window_class(plotobj, **kwargs)\n app.references.add(window)\n window.show()\n if app_created:\n app.exec_()\n return window", "def launch_single(self) -> None:\n print('launching single extraction gui')\n self.master.title('Python Data Extractor - Single')\n\n destroy_child_widgets(self.central_widget)\n SingleAppGui(self.central_widget)", "def __init__(self, logic_controller):\n \n self._logic_controller = logic_controller\n builder = Gtk.Builder()\n builder.add_from_file(\"gui/gui.glade\")\n \n self._stores_per_type = {gobjects.GContract : wrappers.contract.StoreWrapper(),\n gobjects.GProject : wrappers.project.StoreWrapper(),\n gobjects.GCategory : wrappers.category.StoreWrapper(),\n gobjects.GWorktime : wrappers.worktime.StoreWrapper() }\n\n self._widgets_per_type = {gobjects.GContract : widgets.ContractWidget(self, builder, self._stores_per_type),\n gobjects.GProject : widgets.ProjectWidget(self, builder, self._stores_per_type),\n gobjects.GCategory : widgets.CategoryWidget(self, builder, self._stores_per_type),\n gobjects.GWorktime : widgets.WorktimeWidget(self, builder, self._stores_per_type) }\n \n\n self._notebook = wrappers.NotebookWrapper(builder.get_object(\"main-notebook\"))\n \n builder.get_object(\"main-window\").connect(\"destroy\", lambda x: self.shutdown_gui())\n builder.get_object(\"quit-button\").connect(\"clicked\", lambda x: self.shutdown_gui())\n builder.get_object(\"new-button\").connect(\"clicked\", lambda x: self._new_button_clicked())\n builder.get_object(\"save-button\").connect(\"clicked\", lambda x: self._save_button_clicked())\n builder.get_object(\"delete-button\").connect(\"clicked\", lambda x: self._delete_button_clicked())\n\n builder.get_object(\"main-window\").show()", "def create_window(window):\n\tapp_created = False\n\tapp = QtCore.QCoreApplication.instance()\n\tif app is None:\n\t\tapp = QtGui.QApplication(sys.argv)\n\t\tapp_created = True\n\tapp.references = set()\n\tapp.references.add(window)\n\twindow.show()\n\tif app_created:\n\t\tapp.exec_()\n\treturn window", "def h_app_show(self, *_):\n if self.window is None:\n self.window=self.ui_window_class(self.glade_file)\n self.do_updates()", "def make_extra_widgets(self):\n\n # text window (if None (for now), new windows will be created)\n # self.gvars.Text_help = None\n self.gvars.Text_help = QLIB.TextWindow(parent=self)\n self.gvars.Text_script = QLIB.TextWindow(parent=self)\n\n # note whether we have a browser via import\n self.gvars.browser = None\n try: \n import webbrowser\n self.gvars.browser = webbrowser\n if self.verb > 1: print '++ have browser'\n except:\n if self.verb > 1: print '-- NO browser'", "def gui(self):\n if 'SamapGui' not in self.__dict__:\n try:\n from samalg.gui import SAMGUI\n except ImportError:\n raise ImportError('Please install SAMGUI dependencies. See the README in the SAM github repository.')\n\n sg = SAMGUI(sam = [self.sam1,self.sam2], title = [self.id1,self.id2],default_proj='X_umap_samap')\n self.SamapGui = sg\n return sg.SamPlot\n else:\n return self.SamapGui.SamPlot", "def build_screen(self, control_manager, panes):\n\t\treturn GuiScreen(control_manager, panes)", "def _loadGui(self, type, guifile, imports):\n\t\tif type == 'MAIN':\n\t\t\tself._mainmenu = menuhandler.MenuHandler(guifile, self)\n\t\telif type == 'HUD':\n\t\t\tself._hud = hudhandler.HUDHandler(guifile, self)\n\t\telif type == 'SETTINGS':\n\t\t\tself._settingsmenu = settingshandler.SettingsHandler(guifile, self)\n\t\telif type == 'ABOUT':\n\t\t\tself._aboutmenu = abouthandler.AboutHandler(guifile, self)\n\t\telif type == 'PAUSE':\n\t\t\tself._pause = pychan.loadXML('gui/' + guifile + '.xml')\n\t\t\tif imports:\n\t\t\t\tguiinit = __import__('scripts.gui.' + guifile)\n\t\t\t\tguiinit.run()\n\t\telif type == 'LOAD':\n\t\t\tself._loadingmenu = pychan.loadXML('gui/' + guifile + '.xml')\n\t\t\tif imports:\n\t\t\t\tguiinit = __import__('scripts.gui.' + guifile)\n\t\t\t\tguiinit.run()\n\t\telse:\n\t\t\tpass", "def toggle_wicd_gui(self):\n if not self.gui_win:\n self.gui_win = gui.appGui(tray=self)\n elif not self.gui_win.is_visible:\n self.gui_win.show_win()\n else:\n self.gui_win.exit()\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
``read_version()`` takes one or more file path components pointing to a Python source file to parse. The path components will be joined together with ``os.path.join()``, and then, if the path isn't absolute, the path to the directory containing the script calling ``read_version()`` will be prepended to the path. (No more ``join(dirname(__file__), ...)`` boilerplate needed!) ``read_version()`` then parses the given Python file and searches through the parse tree for any assignments to a variable named ``__version__``, returning the last value assigned. The ``variable`` keyword argument can be set to the name of a variable other than ``__version__`` to search for assignments to a different variable instead. If no assignments to the variable are found, a ``ValueError`` is raised. To instead return a default value when this happens, set the ``default`` keyword argument.
def read_version(*fpath, **kwargs): if not fpath: raise ValueError('No filepath passed to read_version()') fpath = os.path.join(*fpath) if not os.path.isabs(fpath): caller_file = inspect.stack()[1][0].f_globals["__file__"] fpath = os.path.join(os.path.dirname(caller_file), fpath) with open(fpath, 'rb') as fp: src = fp.read() top_level = ast.parse(src) variable = kwargs.get("variable", "__version__") try: result = kwargs["default"] except KeyError: pass for statement in top_level.body: if isinstance(statement, ast.Assign): for target in statement.targets: if isinstance(target, ast.Tuple): if any(isinstance(t, ast.Name) and t.id == variable for t in target.elts): value = ast.literal_eval(statement.value) for t,v in zip(target.elts, value): if isinstance(t, ast.Name) and t.id == variable: result = v elif isinstance(target, ast.Name) and target.id == variable: result = ast.literal_eval(statement.value) try: return result except NameError: raise ValueError('No assignment to {!r} found in file'.format(variable))
[ "def get_version():\n with open(VERSION_FILE) as handle:\n lines = handle.read()\n result = VERSION_REGEX.search(lines)\n if result:\n return result.groupdict()[\"version\"]\n else:\n raise ValueError(\"Unable to determine __version__\")", "def get_version(file):\n __version__ = \"\"\n\n if os.path.isfile(file):\n with open(file, \"r\", encoding=\"utf-8\") as fp:\n __version__ = fp.read().strip()\n\n return __version__", "def find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file, re.M\n )\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")", "def __get_version_from_version_txt(path):\n file = os.path.split(__file__)[0]\n paths = [file,\n os.path.join(file, \"..\"),\n os.path.join(file, \"..\", \"..\"),\n os.path.join(file, \"..\", \"..\", \"..\"),\n path]\n for p in paths:\n fp = os.path.join(p, \"version.txt\")\n if os.path.exists(fp):\n with open(fp, \"r\") as f:\n return int(f.read().strip(\" \\n\\r\\t\"))\n raise FileNotFoundError(\n \"unable to find version.txt in\\n\" + \"\\n\".join(paths))", "def version_from_path(self):\n try:\n self.version_label = self.path.split(\"/\")[1]\n (self.major, self.minor, self.revision) = [\n int(s) for s in self.version_label.lstrip(\"v\").split(\".\")\n ]\n except (IndexError, ValueError):\n return \"\"", "def get_version():\n version_file = repository_root / f\"{package_root}/{package_name}/__init__.py\"\n initfile_lines = version_file.open(\"rt\").readlines()\n VSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"\n for line in initfile_lines:\n mo = re.search(VSRE, line, re.M)\n if mo:\n return mo.group(1)\n return \"unknown\"", "def read_current_version():\n config = RawConfigParser()\n config.add_section('bumpversion')\n config.read_file(io.open('.bumpversion.cfg', 'rt', encoding='utf-8'))\n items = dict(config.items('bumpversion'))\n current_version = items.get('current_version')\n return current_version", "def find_current_version():\n with open(VERSION_FILE) as v:\n return v.read()", "def read(self):\n with self._path.open(mode='r') as fh:\n version = fh.read().strip()\n version = semver.parse(version)\n return Version(version)", "def version(self):\n try:\n print((\"Reading version from\", self.version_file))\n with open(self.version_file) as fh:\n version = fh.read().strip()\n except FileNotFoundError:\n self.log.debug(f\"No version file found at {self.version_file}\")\n return \"0.0.0\"\n return version", "def get_asdf_standard_version(filepath):\n file_obj = file_factory(filepath)\n return file_obj.get_asdf_standard_version()", "def get_current_version(file_path):\n\n return get_local_working_version(file_path)", "def read_version():\n finder = VersionFinder()\n finder.visit(\n ast.parse(local_file('httpretty', '__init__.py').encode('utf-8')))\n return finder.version", "def read_version_from_file(file_path, app_version):\n with open(file_path, 'r') as stream:\n filename, file_extension = os.path.splitext(file_path)\n\n if file_extension == '.py': # Case setup.py files\n file_content = stream.read()\n current_version = get_setup_py_version(file_content)\n file_type = 'python'\n elif file_extension == '.yaml' or file_extension == '.yml': # Case Helm chart files\n try:\n yaml = YAML()\n file_content = yaml.load(stream)\n except YAMLError as exc:\n print(exc)\n # Make sure Helm chart is valid and contains minimal mandatory keys\n if is_valid_helm_chart(file_content):\n file_type = 'helm_chart'\n if app_version:\n current_version = file_content.get('appVersion', None)\n\n # user passed the 'app-version' flag, but helm chart file does not contain the 'appVersion' field\n if not current_version:\n raise ValueError(\n \"Could not find 'appVersion' field in helm chart.yaml file: {}\".format(file_content)\n )\n else:\n current_version = file_content['version']\n else:\n raise ValueError(\"Input file is not a valid Helm chart.yaml: {0}\".format(file_content))\n else: # Case file name is just 'VERSION'\n if os.path.basename(filename) == 'VERSION':\n # A version file should ONLY contain a valid semantic version string\n file_content = None\n current_version = stream.read()\n file_type = 'plain_version'\n else:\n raise ValueError(\"File name or extension not known to this app: {}{}\"\n .format(os.path.basename(filename), file_extension))\n\n return {'file_content': file_content, 'version': current_version, 'file_type': file_type}", "def old_version():\n with open(version_file, 'r') as file_:\n for line in file_.readlines():\n if \"__version__\" in line:\n version = line.strip().split('=')[-1].strip(\" '\\\"\")\n break\n else:\n raise ValueError(\"Could not read or generate version\")\n return version", "def parse_version(parser, header, version):\n version = parse_string(parser, header, 'APP_VERSION', version)\n if parser.has_option(header, 'GIT'):\n git = parser.get(header, 'GIT')\n try:\n # current hash can be found in the link in HEAD-file in git-folder\n # The file is specified by: 'ref: <location>'\n git_head = os.path.join(git, 'HEAD')\n if os.path.isfile(git_head):\n git_file = (open(git_head).read().rsplit(': ', 1)[1]).rstrip()\n # read the git-version\n version_file = os.path.join(git, git_file)\n if os.path.exists(version_file):\n version = open(version_file).read()\n # cut version to at most 6 chars\n return version[:6]\n else:\n # Return \"dummy\" version in case of no git version file found\n log(\"Folder {} not found, using dummy version: {}\".format(git_head, version))\n return version\n except IOError:\n log(\"Error reading one of the files to retrieve the current git-version.\")\n raise\n return version", "def _get_version() -> str:\n _dirpath = path.split(path.realpath(__file__))[0]\n version = \"UNKNOWN???\"\n for _ in range(3):\n _filepath = path.join(_dirpath, \"pyproject.toml\")\n if path.exists(_filepath):\n with open(_filepath, encoding=\"utf8\") as f:\n version = (\n [ln for ln in f.read().split(\"\\n\") if \"version\" in ln][0]\n .replace(\"version = \", \"\")\n .strip('\"')\n )\n return version\n _dirpath = path.split(_dirpath)[0]\n return version", "def read_sdk_version() -> str:\n file = 'DatadogCore/Sources/Versioning.swift'\n regex = r'^internal let __sdkVersion = \\\"(.*)?\\\"$'\n\n with open(file) as version_file:\n for line in version_file.readlines():\n if match := re.match(regex, line):\n return match.group(1)\n \n raise Exception(f'Expected `__sdkVersion` not found in {file}')", "def get_version():\n with open(os.path.join(\n os.path.dirname(__file__), MODULE_NAME, '__init__.py')\n ) as init:\n for line in init.readlines():\n res = re.match(r'^__version__ = [\\'\"](.*)[\\'\"]$', line)\n if res:\n return res.group(1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a copy of this RequestParser with the same set of arguments
def copy(self): parser_copy = self.__class__(self.schema_class, self.argument_class, self.result_class) parser_copy.args = deepcopy(self.args) parser_copy.trim = self.trim parser_copy.bundel_errors = self.bundle_errors return parser_copy
[ "def copy(self):\n self.make_body_seekable()\n env = self.environ.copy()\n new_req = self.__class__(env)\n new_req.copy_body()\n return new_req", "def duplicate(self):\n return Params(copy.deepcopy(self.params), history=self.history, ext_vars=self.ext_vars)", "def createFromGlobals(cls):\n request = cls(sys.argv, dict());\n return request;", "def replace_http_args(self, method='POST', *args, **kwargs):\n nreq = BaseRequest.from_values(method=method, *args, **kwargs)\n self.args = nreq.args\n self.form = nreq.form\n self.files = nreq.files", "def copy(self, url=None, **fields):\n\n # Copy the URL unless another was specified.\n if url is None:\n url = self.__url\n # Copy fields, and update with any that were specified\n # additionally. \n new_fields = self.__fields.copy()\n new_fields.update(fields)\n # Make the request.\n new_request = apply(WebRequest, (url, ), new_fields)\n # Copy the client address, if present.\n if hasattr(self, \"client_address\"):\n new_request.client_address = self.client_address\n\n return new_request", "def _parse_args(self):\n self._verify(self.args + list(self.kwargs))\n\n self.name = self.args[0]\n self.nodes = self.args[1:1+self.num_nodes]\n self.value = self._parse_values(self.args[1+self.num_nodes:])\n self.kwargs = self._parse_pairs(self.kwargs)\n # for key, value in self.kwargs.items():\n # setattr(self, key, value)", "def parse_args(self, req=None):\r\n if req is None:\r\n req = request\r\n\r\n namespace = self.namespace_class()\r\n\r\n for arg in self.args:\r\n namespace[arg.dest or arg.name] = arg.parse(req)\r\n\r\n return namespace", "def copy(self):\n p = IonParamManager()\n p.pdict = self.pdict\n p.size = self.size\n return p", "def __init__(self, **requests_kwargs):\n self.requests_kwargs = requests_kwargs", "def copy(self, *args, **kwargs):\n if not args and not kwargs:\n return self.__class__(**self.__dict__)\n else:\n return super().copy(self, *args, **kwargs)", "def clone(self):\n return ResourceRequest(self.name, self.type, self.force_validate,\n self.save_state, **self.kwargs)", "def original_args(self) -> dict[str, Any]:\n original_args = getattr(self, \"_original_args\", {})\n return deepcopy(original_args)", "def constructor(self, **kwargs):\n if len(kwargs) > 0:\n self.__dict__.update(kwargs)", "def create(cls, argv):\n request = cls(argv, dict());\n return request;", "def new_comm_kwargs(cls, *args, **kwargs):\n kwargs.setdefault('address', 'address')\n return args, kwargs", "def __attrs_post_init__(self) -> None:\n self.parse = urlparse(self.url)\n self.bolton = urlutils.URL(self.url)", "def from_arguments(cls, argstring):\n\n obj = object.__new__(cls)\n obj.parse(argstring)\n return obj", "def copy(self):\n # seq length will be provided when copying, no need to pass\n return CyclerParams(sequence=self.sequence, mutation_probability=self.mutation_probability)", "def __init__(self):\n self._parser = flask_restful.reqparse.RequestParser()\n self._parser.add_argument(\n 'name', type=str, help='The name of the playlist'\n )\n self._parser.add_argument(\n 'tracks', type=int, action='append', help='The tracks for the playlist'\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the latitude and longitude, if available, from the provided exif_data (obtained through get_exif_data above)
def get_exif_location(self, exif_data): lat = None lon = None alt = None gps_latitude = _get_if_exist(exif_data, 'GPS GPSLatitude') gps_latitude_ref = _get_if_exist(exif_data, 'GPS GPSLatitudeRef') gps_longitude = _get_if_exist(exif_data, 'GPS GPSLongitude') gps_longitude_ref = _get_if_exist(exif_data, 'GPS GPSLongitudeRef') gps_altitude = _get_if_exist(exif_data, 'GPS GPSAltitude') if gps_latitude and gps_latitude_ref and gps_longitude and gps_longitude_ref: lat = convert_to_degrees(gps_latitude) if gps_latitude_ref.values[0] != 'N': lat = 0 - lat lon = convert_to_degrees(gps_longitude) if gps_longitude_ref.values[0] != 'E': lon = 0 - lon if gps_altitude: alt = ratio_to_float(gps_altitude.values[0]) return lat, lon, alt
[ "def get_latitude(filepath):\n image_file = open(filepath, 'rb')\n tags = exifread.process_file(image_file)\n return tags['GPS GPSLatitude']", "def get_gps_dms(exif_data):\n img_gps = {}\n lat_ref = ''\n lat = 0.0\n long_ref = ''\n long = 0.0\n try:\n for key in exif_data['GPSInfo'].keys():\n decoded_value = ExifTags.GPSTAGS.get(key)\n img_gps[decoded_value] = exif_data['GPSInfo'][key]\n # logger.info(exif['GPSInfo'[key]])\n long_ref = img_gps.get('GPSLongitudeRef')\n lat_ref = img_gps.get('GPSLatitudeRef')\n\n long = img_gps.get('GPSLongitude')\n lat = img_gps.get('GPSLatitude')\n except AttributeError:\n # logger.debug('Image has no GPSInfo metadata: {}'.format())\n pass\n\n return lat_ref, lat, long_ref, long", "def get_longitude(filepath):\n image_file = open(filepath, 'rb')\n tags = exifread.process_file(image_file)\n return tags['GPS GPSLongitude']", "def get_exif_data(self):\n exif_data = {}\n info = self.image._getexif()\n if info:\n for tag, value in info.items():\n decoded = TAGS.get(tag, tag)\n if decoded == \"GPSInfo\":\n gps_data = {}\n for t in value:\n sub_decoded = GPSTAGS.get(t, t)\n gps_data[sub_decoded] = value[t]\n\n exif_data[decoded] = gps_data\n else:\n exif_data[decoded] = value\n self.exif_data = exif_data\n return exif_data", "def get_timestamp(exif_data):\n dt = None\n utc = pytz.utc\n try:\n if not \"GPSInfo\" in exif_data:\n return None\n gps_info = exif_data[\"GPSInfo\"]\n gps_time_stamp = gps_info.get('GPSTimeStamp')\n if 'GPSDateStamp' in gps_info:\n gps_date = [int(i) for i in gps_info['GPSDateStamp'].split(':')]\n elif 29 in gps_info:\n gps_date = [int(i) for i in gps_info[29].split(':')]\n else:\n gps_date = None\n if gps_time_stamp and gps_date:\n yy = gps_date[0]\n mm = gps_date[1]\n dd = gps_date[2]\n h = int(float(gps_time_stamp[0][0]) / float(gps_time_stamp[0][1]))\n m = int(float(gps_time_stamp[1][0]) / float(gps_time_stamp[1][1]))\n s = int(float(gps_time_stamp[2][0]) / float(gps_time_stamp[2][1]))\n dt = utc.localize(datetime.datetime(yy, mm,dd,h,m,s))\n except:\n return None\n if dt is None:\n return None\n return dt.strftime('%s')", "def get_lat_lon(meta):\n length, width = int(meta['LENGTH']), int(meta['WIDTH'])\n lat0, lat1, lon0, lon1 = get_bounding_box(meta)\n if 'Y_FIRST' in meta.keys():\n lat, lon = np.mgrid[lat0:lat1:length*1j, lon0:lon1:width*1j]\n else:\n lat, lon = get_lat_lon_rdc(meta)\n return lat, lon", "def get_gps_point(\n exif: Dict[str, Any], reference: geo.TopocentricConverter\n) -> Tuple[np.ndarray, np.ndarray]:\n gps = exif[\"gps\"]\n altitude = 0\n direction = np.array([0, 0, 1])\n return (\n reference.to_topocentric(gps[\"latitude\"], gps[\"longitude\"], altitude),\n direction,\n )", "def get_altitude(exif_data):\n alt = None\n try:\n if not \"GPSInfo\" in exif_data:\n return None\n gps_info = exif_data[\"GPSInfo\"]\n gps_altitude = gps_info.get('GPSAltitude')\n gps_altitude_ref = gps_info.get('GPSAltitudeRef')\n if gps_altitude:\n alt = float(gps_altitude[0]) / float(gps_altitude[1])\n if gps_altitude_ref == 1:\n alt *=-1\n except:\n return None\n return alt", "def get_lat_lon_values():\n refcube = xr.open_dataset(settings.ACCESS_G_PATH + settings.access_g_filename('20190101'))\n return refcube.lat.values, refcube.lon.values", "def getGeoLocation(self, photo_id):\n geo_data = {'latitude': None, 'longitude': None, 'accuracy': None,\n 'locality': '', 'county': '', 'region': '', 'country': ''}\n try:\n result = self.flickr.photos_geo_getLocation(photo_id=photo_id)\n except flickrapi.FlickrError:\n return geo_data\n\n geo_data['latitude'] = float(result.photo[0].location[0]['latitude'])\n geo_data['longitude'] = float(result.photo[0].location[0]['longitude'])\n geo_data['accuracy'] = result.photo[0].location[0]['accuracy']\n\n for bit in ('locality', 'county', 'region', 'country',):\n if hasattr(result.photo[0].location[0], bit):\n geo_data[bit] = getattr(result.photo[0].location[0], bit)[0].text\n\n return geo_data", "def populate_exif(event):\n url = event.fields['url'].new_value\n response = requests.get(url)\n image_file = StringIO(response.content)\n raw_exif = exifread.process_file(image_file, details=False)\n\n # EXIF data\n exclude = ('Thumbnail', 'Interoperability', 'MakerNote', 'GPS')\n exif_data = {key: val.printable for key, val in raw_exif.items()\n if key.split()[0] not in exclude}\n event.set_field_value('exif', exif_data)\n\n # GPS data\n loc_field = event.fields.get('location')\n if not loc_field or not loc_field.new_value:\n gps_data = {key.split()[-1]: val.values\n for key, val in raw_exif.items()\n if key.startswith('GPS')}\n lat, lon = get_lat_lon(gps_data)\n event.set_field_value('location', {'lat': lat, 'lon': lon})", "def exif_coord(self):\n from pyexiv2.utils import Rational\n (d,m,s) = self.dms\n return ( Rational(abs(d),1),Rational(m,1),Rational(s * 1e7,1e7) )", "def get_lat_lng(self):\n if self.lat and self.lng:\n return self.lat, self.lng\n return None", "def get_exif(photo):\n cmd = [\n '/usr/bin/exiftool',\n '-d',\n '%Y-%m-%d %H:%M:%S',\n '-coordFormat',\n '%+.6f',\n '-json',\n ]\n cmd += TAGS\n cmd.append(photo)\n for_some_reason_a_list = json.loads(subprocess.check_output(cmd))\n return for_some_reason_a_list[0]", "def get_latlon():\r\n\t\tmagtag.url = get_data_source_url(api=\"forecast5\", location=secrets[\"openweather_location\"])\r\n\t\tmagtag.json_path = [\"city\"]\r\n\t\traw_data = magtag.fetch()\r\n\t\treturn raw_data[\"coord\"][\"lat\"], raw_data[\"coord\"][\"lon\"]", "def gps_data():\n imagesData = {}\n\n logging.info(\"loading images data from JPG files\")\n\n for image in glob.glob('../images/*.JPG'):\n\n try:\n gps_info = Image.open(image)._getexif()[0x8825]\n except TypeError:\n log.warning(\"GPSInfo not found\")\n\n gps_latitude_ref = gps_info[1]\n gps_latitude = gps_info[2]\n gps_longitude_ref = gps_info[3]\n gps_longitude = gps_info[4]\n gps_altitude_ref = gps_info[5]\n gps_altitude = gps_info[6]\n\n lat = _convert_to_degress(gps_latitude)\n if gps_latitude_ref != \"N\":\n lat = 0 - lat\n\n lon = _convert_to_degress(gps_longitude)\n if gps_longitude_ref != \"E\":\n lon = 0 - lon\n\n alt = _convert_alt_to_degrees(gps_altitude)\n if ord(gps_altitude_ref) != 0:\n alt = 0 - alt\n\n imagesData[image[10:]] = [lon, lat, alt]\n\n return imagesData", "def get_exif_data(fname):\n ret = {}\n try:\n img = Image.open(fname)\n if hasattr( img, '_getexif' ):\n \t# raw data\n exifinfo = img._getexif()\n if exifinfo != None:\n for tag, value in exifinfo.items():\n decoded = TAGS.get(tag, tag)\n ret[decoded] = value\n # fo.write(ret[decoded]+\"\\n\")\n except IOError:\n print 'IOERROR ' + fname\n # fo.close() \n return ret", "def get_imageset_gps_lats(ibs, imageset_rowid_list):\n id_iter = imageset_rowid_list\n colnames = (IMAGESET_GPS_LAT,)\n imageset_gps_lat_list = ibs.db.get(\n const.IMAGESET_TABLE, colnames, id_iter, id_colname='rowid'\n )\n return imageset_gps_lat_list", "def get_exif_data(fname):\n ret = {}\n try:\n img = Image.open(fname)\n if hasattr( img, '_getexif' ):\n exifinfo = img._getexif()\n if exifinfo != None:\n for tag, value in exifinfo.items():\n decoded = TAGS.get(tag, tag)\n ret[decoded] = value\n except IOError:\n print('IOERROR ' + fname)\n return ret" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Clone a layer and pass its parameters through the function g.
def newlayer(layer, g): layer = copy.deepcopy(layer) layer.weight = torch.nn.Parameter(g(layer.weight)) layer.bias = torch.nn.Parameter(g(layer.bias)) return layer
[ "def _copy_layer(layer):\n if not isinstance(layer, tf.keras.layers.Layer):\n raise TypeError('layer is not a keras layer: %s' % str(layer))\n\n # pylint:disable=unidiomatic-typecheck\n if type(layer) == tf.compat.v1.keras.layers.DenseFeatures:\n raise ValueError('DenseFeatures V1 is not supported. '\n 'Use tf.compat.v2.keras.layers.DenseFeatures instead.')\n if layer.built:\n logging.warn(\n 'Beware: Copying a layer that has already been built: \\'%s\\'. '\n 'This can lead to subtle bugs because the original layer\\'s weights '\n 'will not be used in the copy.', layer.name)\n # Get a fresh copy so we don't modify an incoming layer in place. Weights\n # will not be shared.\n return type(layer).from_config(layer.get_config())", "def clonelayers(layer, N: int):\n return nn.ModuleList([copy.deepcopy(layer) for _ in range(N)])", "def copy (self):\n copy = NFFG(id=self.id, name=self.name, version=self.version)\n copy.network = self.network.copy()\n return copy", "def copy(self, g):\n\n g.score = self.score\n g.fitness = self.fitness\n g.evaluator = self.evaluator\n g.initializator = self.initializator\n g.mutator = self.mutator\n g.crossover = self.crossover\n g.internalParams = self.internalParams", "def clone(self, name: str = None) -> \"Network\":\n # pylint: disable=protected-access\n net = object.__new__(Network)\n net._init_fields()\n net.name = name if name is not None else self.name\n net.static_kwargs = util.EasyDict(self.static_kwargs)\n net._build_module_src = self._build_module_src\n net._build_func_name = self._build_func_name\n net._build_func = self._build_func\n net._init_graph()\n net.copy_vars_from(self)\n return net", "def copy(self):\n phi = self.jg.copy()\n h = phi.codomain\n b = [ phi.map[a] for a in self.bd ]\n return RibbonGraph(h,b)", "def cloned_feature(feature, num_layers):\n # feature[2] is the same for bert, but it didn't work for\n # older versions of transformers for xlnet\n # feature = feature[2]\n feature = feature.hidden_states\n if num_layers is None:\n feature = torch.stack(feature[-4:-1], axis=3).sum(axis=3) / 4\n else:\n feature = torch.stack(feature[-num_layers:], axis=3)\n return feature.clone().detach()", "def _optimize_clone(optimizer, clone, num_clones, regularization_losses,\n **kwargs):\n sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)\n clone_grad = None\n if sum_loss is not None:\n with tf.device(clone.device):\n clone_grad = optimizer.compute_gradients(sum_loss, **kwargs)\n return sum_loss, clone_grad", "def copy(self):\n rv = Network(self.n_max)\n rv.generate(self.G.copy())\n rv.G_infected = self.G_infected.copy()\n return rv", "def clone(self):\n # instantiate new pointcloud with the representation which is not None\n # (either list or tensor) to save compute.\n new_points, new_normals, new_features = None, None, None\n if self._points_list is not None:\n new_points = [v.clone() for v in self.points_list()]\n normals_list = self.normals_list()\n features_list = self.features_list()\n if normals_list is not None:\n new_normals = [n.clone() for n in normals_list]\n if features_list is not None:\n new_features = [f.clone() for f in features_list]\n elif self._points_padded is not None:\n new_points = self.points_padded().clone()\n normals_padded = self.normals_padded()\n features_padded = self.features_padded()\n if normals_padded is not None:\n new_normals = self.normals_padded().clone()\n if features_padded is not None:\n new_features = self.features_padded().clone()\n other = self.__class__(\n points=new_points, normals=new_normals, features=new_features\n )\n for k in self._INTERNAL_TENSORS:\n v = getattr(self, k)\n if torch.is_tensor(v):\n setattr(other, k, v.clone())\n return other", "def clone(orig: Model[InT, OutT], n: int) -> Model[InT, OutT]:\n if n == 0:\n return cast(Model[InT, OutT], noop())\n elif n == 1:\n return orig\n layers: List[Model] = [orig]\n for i in range(n - 1):\n layers.append(orig.copy())\n return cast(Model[InT, OutT], chain(*layers))", "def clone_with_theta(self, theta):\n ...", "def copy(self, g):\n g.parent = self.parent\n g.childs = self.childs[:]", "def clone(self, data):", "def clone(self):\n poly = self.polydata(False)\n polyCopy = vtk.vtkPolyData()\n polyCopy.DeepCopy(poly)\n\n cloned = Mesh(polyCopy)\n pr = vtk.vtkProperty()\n pr.DeepCopy(self.GetProperty())\n cloned.SetProperty(pr)\n\n # assign the same transformation to the copy\n cloned.SetOrigin(self.GetOrigin())\n cloned.SetScale(self.GetScale())\n cloned.SetOrientation(self.GetOrientation())\n cloned.SetPosition(self.GetPosition())\n\n cloned._mapper.SetScalarVisibility(self._mapper.GetScalarVisibility())\n cloned._mapper.SetScalarRange(self._mapper.GetScalarRange())\n cloned._mapper.SetColorMode(self._mapper.GetColorMode())\n lsr = self._mapper.GetUseLookupTableScalarRange()\n cloned._mapper.SetUseLookupTableScalarRange(lsr)\n cloned._mapper.SetScalarMode(self._mapper.GetScalarMode())\n lut = self._mapper.GetLookupTable()\n if lut:\n cloned._mapper.SetLookupTable(lut)\n\n cloned.base = self.base\n cloned.top = self.top\n cloned.name = self.name\n if self.trail:\n n = len(self.trailPoints)\n cloned.addTrail(self.trailOffset, self.trailSegmentSize*n, n,\n None, None, self.trail.GetProperty().GetLineWidth())\n if self.shadow:\n cloned.addShadow(self.shadowX, self.shadowY, self.shadowZ,\n self.shadow.GetProperty().GetColor(),\n self.shadow.GetProperty().GetOpacity())\n return cloned", "def clone(self):\n return self.__clone(True)", "def _gen_layer(self, l_dict):\n # TODO Handle variable layer configuration\n new_layer = Layer(l_dict['size'], l_dict['act_func'], l_dict['type'], l_dict, self.lr)\n return new_layer", "def replace_layer_with_copy(feat: BaseFeature, target_layer: torch.nn.Module):\n with torch.no_grad():\n # Get the original encoder object and a mapping from param names to the params themselves.\n orig_encoder_obj = feat.encoder_obj\n orig_encoder_obj_state_dict = orig_encoder_obj.state_dict()\n\n # Deep copy the original encoder object and set the copy as this feature's encoder object.\n copy_encoder_obj = deepcopy(orig_encoder_obj)\n feat.encoder_obj = copy_encoder_obj\n\n # We have to get the absolute module key in order to do string matching because the target_layer keys are\n # relative to itself. If we were to leave it as-is and attempt to suffix match, we may get duplicates for\n # common layers i.e. \"LayerNorm.weight\" and \"LayerNorm.bias\". Getting the absolute module key ensures we\n # use values like \"transformer.module.embedding.LayerNorm.weight\" instead.\n keys_to_keep_copy = get_absolute_module_key_from_submodule(orig_encoder_obj, target_layer)\n\n # Get the tensors to keep from the copied encoder object. These are the tensors in the target layer.\n for key, param in copy_encoder_obj.named_parameters():\n if key not in keys_to_keep_copy:\n param.data = orig_encoder_obj_state_dict[key].data", "def clone(self, name, **attr):\n\t\tobj = copy.deepcopy(self._objects.get(name))\n\t\tobj.__dict__.update(attr)\n\t\treturn obj" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }